max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
main.py | kornaev/spirograph | 0 | 12764151 | <filename>main.py
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 25 17:48:47 2020
@author: crazY
"""
import sys
from time import sleep
from PyQt5 import QtGui, QtWidgets
import mainwindow
from spirograph import spirograph
M = 3 # scale multiplyer
pen_outer = QtGui.QPen(QtGui.QColor('red'))
pen_inner = QtGui.QPen(QtGui.QColor('blue'))
pen_plecho = QtGui.QPen(QtGui.QColor('green'))
pen_spiro = QtGui.QPen(QtGui.QColor('black'))
pen_spiro.setWidth(4)
def draw_circle(self, center_x, center_y, radius):
self.drawEllipse(
int(center_x - radius),
int(center_y - radius),
radius * 2,
radius * 2)
class MainWindow(QtWidgets.QMainWindow, mainwindow.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.init_widgets()
self.init_pixmaps()
self.but_save.clicked.connect(self.save_file)
self.but_draw.clicked.connect(self.draw_image)
def init_widgets(self):
_l = QtWidgets.QStackedLayout()
_l.setStackingMode(QtWidgets.QStackedLayout.StackAll)
_l.addWidget(self.label_out_final)
_l.addWidget(self.label_out)
_l.addWidget(self.label_bg)
self.mwLayout_2.addLayout(_l)
def init_pixmaps(self):
canvas = QtGui.QPixmap(400 * M, 400 * M)
canvas.fill(QtGui.QColor('white'))
canvas_final = QtGui.QPixmap(400 * M, 400 * M)
canvas_final.fill(QtGui.QColor('transparent'))
canvas_bg = QtGui.QPixmap(400 * M, 400 * M)
canvas_bg.fill(QtGui.QColor('white'))
self.label_out.setPixmap(canvas)
self.label_out_final.setPixmap(canvas_final)
self.label_bg.setPixmap(canvas_bg)
def save_file(self):
return
def draw_image(self):
R = self.spinBox_outer_diameter.value() * M
r = self.spinBox_inner_diameter.value() * M
d = self.spinBox_plecho.value() * M
s = spirograph(R, r, d)
s.calculate()
res = s.get_results()
self.label_out.setVisible(True)
if self.cb_clean.isChecked():
self.label_out_final.pixmap().fill(QtGui.QColor('transparent'))
_p = QtGui.QPainter(self.label_out.pixmap())
_p.translate(200 * M, 200 * M)
_f = QtGui.QPainter(self.label_out_final.pixmap())
_f.translate(200 * M, 200 * M)
_f.setPen(pen_spiro)
for i in range(1, len(res)):
self.label_out.pixmap().fill(QtGui.QColor('#ffffff'))
_p.setPen(pen_outer)
draw_circle(_p, 0, 0, R)
_p.setPen(pen_inner)
draw_circle(_p, res[i][0], res[i][1], r)
_p.setPen(pen_plecho)
_p.drawLine(res[i][0], res[i][1], res[i][2], res[i][3])
_f.drawLine(res[i][2], res[i][3], res[i - 1][2], res[i - 1][3])
self.label_out.repaint()
_p.end()
_f.end()
sleep(1)
self.label_out.setVisible(False)
def main():
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
if __name__ == '__main__':
main()
| 2.3125 | 2 |
switch.py | yangchuansheng/WSL-Distribution-Switcher | 1,917 | 12764152 | #!/usr/bin/env python3
# coding=utf-8
import glob
import sys
import os.path
import subprocess
from utils import Fore, parse_image_arg, probe_wsl, get_label, path_trans, handle_sigint
# handle arguments
handle_sigint()
if len(sys.argv) < 2:
# print usage information
print('usage: ./switch.py image[:tag]')
# check if there are any installations
basedir, lxpath, bashpath = probe_wsl(True)
if basedir:
#fix basedir to add LocalState\rootfs
basedir = os.path.join(basedir, 'LocalState')
names = glob.glob(os.path.join(basedir, 'rootfs*'))
not_debian = True
has_debian = False
if len(names) > 0:
print('\nThe following distributions are currently installed:\n')
for name in names:
active = os.path.basename(name) == 'rootfs'
name = get_label(name).split('_', 1)
if len(name) != 2:
continue
if name[0] == 'debian' and name[1] == '9':
has_debian = True
if active:
not_debian = False
print(' - %s%s%s:%s%s%s%s' % (Fore.YELLOW, name[0], Fore.RESET, Fore.YELLOW, name[1], Fore.RESET, ('%s*%s' % (Fore.GREEN, Fore.RESET) if active else '')))
if not_debian:
print()
if has_debian:
print('To switch back to the default distribution, specify %sdebian%s:%s9%s as the argument.' % (Fore.YELLOW, Fore.RESET, Fore.YELLOW, Fore.RESET))
else:
print('You do not seem to have the default distribution installed anymore.\nTo reinstall it, run %slxrun /uninstall%s and %slxrun /install%s from the command prompt.' % (Fore.GREEN, Fore.RESET, Fore.GREEN, Fore.RESET))
sys.exit(-1)
image, tag, fname, label = parse_image_arg(sys.argv[1], False)
# sanity checks
print('%s[*]%s Probing the Linux subsystem...' % (Fore.GREEN, Fore.RESET))
basedir, lxpath, bashpath = probe_wsl()
#fix basedir to add LocalState\rootfs
basedir = os.path.join(basedir, 'LocalState')
# read label of current distribution
clabel = get_label(os.path.join(basedir, 'rootfs'))
if not clabel:
clabel = 'debian_9'
if label == clabel:
print('%s[!]%s No %s/.switch_label%s found, and the target rootfs is %subuntu%s:%strusty%s. Cannot continue.' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, Fore.YELLOW, Fore.RESET, Fore.YELLOW, Fore.RESET))
print('%s[!]%s To fix this, run %secho some_tag > /.switch_label%s (replacing %ssome_tag%s with something like %sdebian_sid%s) from the current Bash terminal.' % (Fore.RED, Fore.RESET, Fore.GREEN, Fore.RESET, Fore.GREEN, Fore.RESET, Fore.GREEN, Fore.RESET))
sys.exit(-1)
else:
print('%s[!]%s No %s/.switch_label%s found, assuming current rootfs is %subuntu%s:%strusty%s.' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, Fore.YELLOW, Fore.RESET, Fore.YELLOW, Fore.RESET))
# sanity checks, take two
if clabel == label:
print('%s[!]%s The %s%s%s:%s%s%s rootfs is the current installation.' % (Fore.YELLOW, Fore.RESET, Fore.YELLOW, image, Fore.RESET, Fore.YELLOW, tag, Fore.RESET))
sys.exit(-1)
if not os.path.isdir(os.path.join(basedir, 'rootfs_' + label)):
print('%s[!]%s The %s%s%s:%s%s%s rootfs is not installed.' % (Fore.RED, Fore.RESET, Fore.YELLOW, image, Fore.RESET, Fore.YELLOW, tag, Fore.RESET))
sys.exit(-1)
# do the switch
print('%s[*]%s Moving current %srootfs%s to %srootfs_%s%s...' % (Fore.GREEN, Fore.RESET, Fore.BLUE, Fore.RESET, Fore.BLUE, clabel, Fore.RESET))
try:
subprocess.check_output(['cmd', '/C', 'move', path_trans(os.path.join(basedir, 'rootfs')), path_trans(os.path.join(basedir, 'rootfs_' + clabel))])
except subprocess.CalledProcessError as err:
print('%s[!]%s Failed to backup current %srootfs%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, err))
sys.exit(-1)
print('%s[*]%s Moving desired %srootfs_%s%s to %srootfs%s...' % (Fore.GREEN, Fore.RESET, Fore.BLUE, label, Fore.RESET, Fore.BLUE, Fore.RESET))
try:
subprocess.check_output(['cmd', '/C', 'move', path_trans(os.path.join(basedir, 'rootfs_' + label)), path_trans(os.path.join(basedir, 'rootfs'))])
except subprocess.CalledProcessError as err:
print('%s[!]%s Failed to switch to new %srootfs%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, err))
print('%s[*]%s Rolling back to old %srootfs%s...' % (Fore.YELLOW, Fore.RESET, Fore.BLUE, Fore.RESET))
try:
subprocess.check_output(['cmd', '/C', 'move', path_trans(os.path.join(basedir, 'rootfs_' + clabel)), path_trans(os.path.join(basedir, 'rootfs'))])
except subprocess.CalledProcessError as err:
print('%s[!]%s Failed to roll back to old %srootfs%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, err))
print('%s[!]%s You are now the proud owner of one broken Linux subsystem! To fix it, run %slxrun /uninstall%s and %slxrun /install%s from the command prompt.' % (Fore.RED, Fore.RESET, Fore.GREEN, Fore.RESET, Fore.GREEN, Fore.RESET))
sys.exit(-1)
| 2.40625 | 2 |
Networking/Packets/Incoming/PingPacket.py | henriquelino/pyrelay | 26 | 12764153 | <gh_stars>10-100
class PingPacket:
def __init__(self):
self.type = "PING"
self.serial = 0
def read(self, reader):
self.serial = reader.readInt32()
| 2.375 | 2 |
Torrents/migrations/0005_auto_20200607_1345.py | tabish-ali/torrent_site | 1 | 12764154 | # Generated by Django 3.0.7 on 2020-06-07 13:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Torrents', '0004_auto_20200607_1344'),
]
operations = [
migrations.AlterField(
model_name='uploadtorrents',
name='uploader_name',
field=models.CharField(blank=True, default='tabish', max_length=50),
),
]
| 1.375 | 1 |
test/mitmproxy/test_platform_pf.py | dolfly/mitmproxy | 9 | 12764155 | import sys
from mitmproxy.platform import pf
from . import tutils
class TestLookup:
def test_simple(self):
if sys.platform == "freebsd10":
p = tutils.test_data.path("data/pf02")
d = open(p, "rb").read()
else:
p = tutils.test_data.path("data/pf01")
d = open(p, "rb").read()
assert pf.lookup("192.168.1.111", 40000, d) == ("5.5.5.5", 80)
tutils.raises(
"Could not resolve original destination",
pf.lookup,
"192.168.1.112",
40000,
d)
tutils.raises(
"Could not resolve original destination",
pf.lookup,
"192.168.1.111",
40001,
d)
| 2.390625 | 2 |
python/plugins/processing/gui/ListMultiselectWidget.py | dyna-mis/Hilabeling | 0 | 12764156 | <reponame>dyna-mis/Hilabeling
# -*- coding: utf-8 -*-
"""
***************************************************************************
ListMultiSelectWidget.py
---------------------
Date : June 2016
Copyright : (C) 2016 by <NAME>
Email : marco at opengis.ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = 'June 2016'
__copyright__ = '(C) 2016, <NAME>'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
from qgis.PyQt.QtWidgets import (QGroupBox,
QPushButton,
QSizePolicy,
QLabel,
QHBoxLayout,
QVBoxLayout,
QListWidget,
QAbstractItemView)
from qgis.PyQt.QtGui import QFont
from qgis.PyQt.QtCore import Qt, QSize, pyqtSignal
class ListMultiSelectWidget(QGroupBox):
"""Widget to show two parallel lists and move elements between the two
usage from code:
self.myWidget = ListMultiSelectWidget(title='myTitle')
self.myLayout.insertWidget(1, self.myWidget)
usage from designer:
insert a QGroupBox in your UI file
optionally give a title to the QGroupBox
promote it to ListMultiSelectWidget
"""
selection_changed = pyqtSignal()
def __init__(self, parent=None, title=None):
QGroupBox.__init__(self)
self.setTitle(title)
self.selected_widget = None
self.deselected_widget = None
self._setupUI()
# connect actions
self.select_all_btn.clicked.connect(self._select_all)
self.deselect_all_btn.clicked.connect(self._deselect_all)
self.select_btn.clicked.connect(self._select)
self.deselect_btn.clicked.connect(self._deselect)
self.deselected_widget.itemDoubleClicked.connect(self._select)
self.selected_widget.itemDoubleClicked.connect(self._deselect)
def get_selected_items(self):
"""
:return list with all the selected items text
"""
return self._get_items(self.selected_widget)
def get_deselected_items(self):
"""
:return list with all the deselected items text
"""
return self._get_items(self.deselected_widget)
def add_selected_items(self, items):
"""
:param items list of strings to be added in the selected list
"""
self._add_items(self.selected_widget, items)
def add_deselected_items(self, items):
"""
:param items list of strings to be added in the deselected list
"""
self._add_items(self.deselected_widget, items)
def set_selected_items(self, items):
"""
:param items list of strings to be set as the selected list
"""
self._set_items(self.selected_widget, items)
def set_deselected_items(self, items):
"""
:param items list of strings to be set as the deselected list
"""
self._set_items(self.deselected_widget, items)
def clear(self):
"""
removes all items from selected and deselected
"""
self.set_selected_items([])
self.set_deselected_items([])
def addItem(self, item):
"""
This is for Processing
:param item: string to be added in the deselected list
"""
self.add_deselected_items([item])
def addItems(self, items):
"""
This is for Processing
:param items: list of strings to be added in the deselected list
"""
self.add_deselected_items(items)
def _get_items(self, widget):
for i in range(widget.count()):
yield widget.item(i).text()
def _set_items(self, widget, items):
widget.clear()
self._add_items(widget, items)
def _add_items(self, widget, items):
widget.addItems(items)
def _select_all(self):
self.deselected_widget.selectAll()
self._do_move(self.deselected_widget, self.selected_widget)
def _deselect_all(self):
self.selected_widget.selectAll()
self._do_move(self.selected_widget, self.deselected_widget)
def _select(self):
self._do_move(self.deselected_widget, self.selected_widget)
def _deselect(self):
self._do_move(self.selected_widget, self.deselected_widget)
def _do_move(self, fromList, toList):
for item in fromList.selectedItems():
prev_from_item = fromList.item(fromList.row(item) - 1)
toList.addItem(fromList.takeItem(fromList.row(item)))
fromList.scrollToItem(prev_from_item)
self.selection_changed.emit()
def _setupUI(self):
self.setSizePolicy(
QSizePolicy.Preferred, QSizePolicy.Preferred)
self.setMinimumHeight(180)
self.main_horizontal_layout = QHBoxLayout(self)
italic_font = QFont()
italic_font.setItalic(True)
# deselected widget
self.deselected_widget = QListWidget(self)
self._set_list_widget_defaults(self.deselected_widget)
deselected_label = QLabel()
deselected_label.setText('Deselected')
deselected_label.setAlignment(Qt.AlignCenter)
deselected_label.setFont(italic_font)
deselected_v_layout = QVBoxLayout()
deselected_v_layout.addWidget(deselected_label)
deselected_v_layout.addWidget(self.deselected_widget)
# selected widget
self.selected_widget = QListWidget(self)
self._set_list_widget_defaults(self.selected_widget)
selected_label = QLabel()
selected_label.setText('Selected')
selected_label.setAlignment(Qt.AlignCenter)
selected_label.setFont(italic_font)
selected_v_layout = QVBoxLayout()
selected_v_layout.addWidget(selected_label)
selected_v_layout.addWidget(self.selected_widget)
# buttons
self.buttons_vertical_layout = QVBoxLayout()
self.buttons_vertical_layout.setContentsMargins(0, -1, 0, -1)
self.select_all_btn = SmallQPushButton('>>')
self.deselect_all_btn = SmallQPushButton('<<')
self.select_btn = SmallQPushButton('>')
self.deselect_btn = SmallQPushButton('<')
self.select_btn.setToolTip('Add the selected items')
self.deselect_btn.setToolTip('Remove the selected items')
self.select_all_btn.setToolTip('Add all')
self.deselect_all_btn.setToolTip('Remove all')
# add buttons
spacer_label = QLabel() # pragmatic way to create a spacer with
# the same height of the labels on top
# of the lists, in order to align the
# buttons with the lists.
self.buttons_vertical_layout.addWidget(spacer_label)
self.buttons_vertical_layout.addWidget(self.select_btn)
self.buttons_vertical_layout.addWidget(self.deselect_btn)
self.buttons_vertical_layout.addWidget(self.select_all_btn)
self.buttons_vertical_layout.addWidget(self.deselect_all_btn)
# add sub widgets
self.main_horizontal_layout.addLayout(deselected_v_layout)
self.main_horizontal_layout.addLayout(self.buttons_vertical_layout)
self.main_horizontal_layout.addLayout(selected_v_layout)
def _set_list_widget_defaults(self, widget):
widget.setAlternatingRowColors(True)
widget.setSortingEnabled(True)
widget.setDragEnabled(True)
widget.setDragDropMode(QAbstractItemView.DragDrop)
widget.setDragDropOverwriteMode(False)
widget.setDefaultDropAction(Qt.MoveAction)
widget.setSelectionMode(QAbstractItemView.MultiSelection)
class SmallQPushButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self)
self.setText(text)
buttons_size_policy = QSizePolicy(
QSizePolicy.Fixed, QSizePolicy.Fixed)
self.setSizePolicy(buttons_size_policy)
self.setMaximumSize(QSize(30, 30))
| 1.273438 | 1 |
2020-11-07/laercio.py | pufe/programa | 2 | 12764157 | n = int(input())
for teste in range(n):
m = int(input())
lista = [int(x) for x in input().split()]
impares = []
for x in lista:
if x%2 == 1:
impares.append(x)
impares.sort()
laercio = []
while len(impares) > 0:
laercio.append(impares.pop())
impares.reverse()
print(*laercio)
| 3.171875 | 3 |
pheweb/serve/components/chip/service.py | stellakeppo/pheweb | 0 | 12764158 | <reponame>stellakeppo/pheweb
# -*- coding: utf-8 -*-
"""
Endpoint for chip data.
Methods for flask blueprints.
"""
import typing
from flask import (
Blueprint,
jsonify,
current_app as app,
abort,
Response,
)
from .model import ChipDAO, JeevesContext
chip = Blueprint("pheweb_chip", __name__)
development = Blueprint("development", __name__)
app.jeeves: JeevesContext # type: ignore
def get_dao(current_app=app) -> ChipDAO:
""" "
Get DAO.
Get DAO object stored in jeeves.
Return 404 if not available as
it means the chip data is not
available.
"""
dao: typing.Optional[ChipDAO] = current_app.jeeves.chip_dao
if dao is None:
result = None
abort(404, "Chip data not available")
else:
result = dao
return result
@chip.route("/api/v1/chip_data", methods=["GET"])
def chip_data() -> Response:
"""
Endpoint to return chip data.
:return: response
"""
dao: ChipDAO = get_dao()
return jsonify(dao.get_chip_data())
@chip.route("/api/v1/cluster_plot/<variant>", methods=["GET"])
def cluster_plot(variant) -> Response:
"""
Endpoint to return cluster plot for variant.
:param variant: variant
:return: response
"""
dao: ChipDAO = get_dao()
data = dao.get_cluster_plot(variant)
if data is None:
result = None
abort(404, "Requested cluster plot not found!")
else:
result = Response(data, mimetype="image/png")
return result
| 2.328125 | 2 |
compss/programming_model/bindings/python/src/pycompss/tests/util/test_warning_modules.py | eflows4hpc/compss | 0 | 12764159 | #!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
import os
import sys
from pycompss.util.exceptions import PyCOMPSsException
def test_get_optional_module_warning():
from pycompss.util.warnings.modules import get_optional_module_warning
warning = get_optional_module_warning("UNITTEST_NAME", "UNITTEST_DESCRIPTION")
assert isinstance(warning, str), "Optional module warning does NOT return a string"
assert warning != "", "Optional module warning can not be empty"
assert "UNITTEST_NAME" in warning, "Module name not in optional module warning"
assert (
"UNITTEST_DESCRIPTION" in warning
), "Module description not in optional module warning"
def test_show_optional_module_warning():
import pycompss.util.warnings.modules as warn
# Hack - Add non existing package
warn.OPTIONAL_MODULES["non_existing_package"] = "this is the description"
stdout_backup = sys.stdout
out_file = "warning.out"
fd = open(out_file, "w")
sys.stdout = fd
warn.show_optional_module_warnings()
# Cleanup
sys.stdout = stdout_backup
fd.close()
del warn.OPTIONAL_MODULES["non_existing_package"]
# Result check
if os.path.exists(out_file) and os.path.getsize(out_file) > 0:
# Non empty file exists - this is ok.
os.remove(out_file)
else:
raise PyCOMPSsException("The warning has not been shown")
| 2.1875 | 2 |
scripts/experimenting/dataset/core/base.py | miracleyoo/lifting_events_to_3d_hpe | 0 | 12764160 | import os
from abc import ABC, abstractmethod
from typing import Tuple
import numpy as np
import torch
from scipy import io
from experimenting.utils import Skeleton
class BaseCore(ABC):
"""
Base class for dataset cores. Each core should implement get_frame_info and
load_frame_from_id for base functionalities. Labels, heatmaps, and joints
loading may be implemented as well to use the relative task implementations
"""
def __init__(self, name, partition):
# self._set_partition_function(partition)
self.partition = partition
if hasattr(self, 'n_channels'):
self._partition_function = None #! Remember to uncomment this line when trained on original dataset!
self.name = name
# def _set_partition_function(self, partition_param):
# if partition_param is None:
# partition_param = "cross-subject"
# if partition_param == "cross-subject":
# self.partition_function = self.get_cross_subject_partition_function()
# else:
# self.partition_function = self.get_cross_view_partition_function()
@property
def partition_function(self):
if self._partition_function is not None:
# print("YEAHHHHHHHHHHHHHHHHHHHH\n\n\n\n")
return self._partition_function
else:
if self.partition is None:
self._partition_function = self.get_cross_subject_partition_function()
else:
self._partition_function = self.get_cross_view_partition_function()
return self._partition_function
# def _set_partition_function(self, partition_param):
# if partition_param is None:
# partition_param = "cross-subject"
# if partition_param == "cross-subject":
# self.partition_function = self.get_cross_subject_partition_function()
# else:
# self.partition_function = self.get_cross_view_partition_function()
@staticmethod
@abstractmethod
def get_frame_info(path):
"""
Get frame attributes given the path
Args:
path: frame path
Returns:
Frame attributes as a subscriptable object
"""
def get_cross_subject_partition_function(self):
"""
Get partition function for cross-subject evaluation method
Note:
Core class must implement get_test_subjects
get_frame_info must provide frame's subject
"""
def _get(x):
return self.frames_info[x]["subject"] in self.get_test_subjects()
# temp = lambda x: self.frames_info[x]["subject"] in self.get_test_subjects()
return _get
def get_cross_view_partition_function(self):
"""
Get partition function for cross-view evaluation method
Note:
Core class must implement get_test_view
get_frame_info must provide frame's cam
"""
def _get(x):
return self.frames_info[x]["cam"] in self.get_test_view()
# temp = lambda x: self.frames_info[x]["cam"] in self.get_test_view()
return _get
def train_partition_function(self, x):
"""
Accept all inputs as training
"""
return True
def get_test_subjects(self):
raise NotImplementedError()
def get_test_view(self):
raise NotImplementedError()
def get_frame_from_id(self, idx):
raise NotImplementedError()
def get_label_from_id(self, idx):
raise NotImplementedError()
def get_joint_from_id(self, idx) -> Tuple[Skeleton, torch.Tensor, torch.Tensor]:
raise NotImplementedError()
def get_heatmap_from_id(self, idx):
raise NotImplementedError()
| 2.390625 | 2 |
diplomacy_research/players/benchmarks/rl_neurips2019/dataset_builder.py | wwongkamjan/dipnet_press | 39 | 12764161 | # ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" RL (NeurIPS 2019) Dataset Builder
- Base class responsible for generating the protocol buffers to be used by the model
"""
import logging
import numpy as np
from diplomacy import Map
from diplomacy_research.models.datasets.base_builder import FixedProtoField, VarProtoField
from diplomacy_research.models.policy.base_policy_builder import BasePolicyBuilder
from diplomacy_research.models.state_space import get_order_based_mask, proto_to_board_state, GO_ID, NB_NODES, \
NB_SUPPLY_CENTERS, POWER_VOCABULARY_KEY_TO_IX, MAX_CANDIDATES, NB_FEATURES, NB_ORDERS_FEATURES, NB_PREV_ORDERS, \
get_board_alignments, get_orderable_locs_for_powers, get_current_season, proto_to_prev_orders_state
# Constants
LOGGER = logging.getLogger(__name__)
class BaseDatasetBuilder(BasePolicyBuilder):
""" This object is responsible for maintaining the data and feeding it into the model """
@staticmethod
def get_proto_fields():
""" Returns the proto fields used by this dataset builder """
# Creating proto fields
proto_fields = {
'request_id': FixedProtoField([], None),
'player_seed': FixedProtoField([], np.int32),
'board_state': FixedProtoField([NB_NODES, NB_FEATURES], np.uint8),
'board_alignments': VarProtoField([NB_NODES * NB_SUPPLY_CENTERS], np.uint8),
'prev_orders_state': FixedProtoField([NB_PREV_ORDERS, NB_NODES, NB_ORDERS_FEATURES], np.uint8),
'decoder_inputs': VarProtoField([1 + NB_SUPPLY_CENTERS], np.int32),
'decoder_lengths': FixedProtoField([], np.int32),
'candidates': VarProtoField([None, MAX_CANDIDATES], np.int32),
'noise': FixedProtoField([], np.float32),
'temperature': FixedProtoField([], np.float32),
'dropout_rate': FixedProtoField([], np.float32),
'current_power': FixedProtoField([], np.int32),
'current_season': FixedProtoField([], np.int32),
'value_targets': FixedProtoField([], np.float32),
'context': VarProtoField([256 * 2 * 8], np.float32),
'messages': VarProtoField([1 + 1000], np.int32),
'message_lengths': FixedProtoField([], np.int32),
'senders': VarProtoField([1000], np.uint8),
'recipients': VarProtoField([1000], np.uint8),
'next_conversant': FixedProtoField([2], np.int32)
}
return proto_fields
@staticmethod
def get_feedable_item(locs, state_proto, power_name, phase_history_proto, possible_orders_proto, **kwargs):
""" Computes and return a feedable item (to be fed into the feedable queue)
:param locs: A list of locations for which we want orders
:param state_proto: A `.proto.game.State` representation of the state of the game.
:param power_name: The power name for which we want the orders and the state values
:param phase_history_proto: A list of `.proto.game.PhaseHistory`. This represents prev phases.
:param possible_orders_proto: A `proto.game.PossibleOrders` object representing possible order for each loc.
:param kwargs: Additional optional kwargs:
- player_seed: The seed to apply to the player to compute a deterministic mask.
- noise: The sigma of the additional noise to apply to the intermediate layers (i.e. sigma * epsilon)
- temperature: The temperature to apply to the logits. (Default to 0. for deterministic/greedy)
- dropout_rate: The amount of dropout to apply to the inputs/outputs of the decoder.
:return: A feedable item, with feature names as key and numpy arrays as values
"""
# pylint: disable=too-many-branches
# Converting to state space
map_object = Map(state_proto.map)
board_state = proto_to_board_state(state_proto, map_object)
# Building the decoder length
# For adjustment phase, we restrict the number of builds/disbands to what is allowed by the game engine
in_adjustment_phase = state_proto.name[-1] == 'A'
nb_builds = state_proto.builds[power_name].count
nb_homes = len(state_proto.builds[power_name].homes)
# If we are in adjustment phase, making sure the locs are the orderable locs (and not the policy locs)
if in_adjustment_phase:
orderable_locs, _ = get_orderable_locs_for_powers(state_proto, [power_name])
if sorted(locs) != sorted(orderable_locs):
if locs:
LOGGER.warning('Adj. phase requires orderable locs. Got %s. Expected %s.', locs, orderable_locs)
locs = orderable_locs
# WxxxA - We can build units
# WxxxA - We can disband units
# Other phase
if in_adjustment_phase and nb_builds >= 0:
decoder_length = min(nb_builds, nb_homes)
elif in_adjustment_phase and nb_builds < 0:
decoder_length = abs(nb_builds)
else:
decoder_length = len(locs)
# Computing the candidates for the policy
if possible_orders_proto:
# Adjustment Phase - Use all possible orders for each location.
if in_adjustment_phase:
# Building a list of all orders for all locations
adj_orders = []
for loc in locs:
adj_orders += possible_orders_proto[loc].value
# Computing the candidates
candidates = [get_order_based_mask(adj_orders)] * decoder_length
# Regular phase - Compute candidates for each location
else:
candidates = []
for loc in locs:
candidates += [get_order_based_mask(possible_orders_proto[loc].value)]
# We don't have possible orders, so we cannot compute candidates
# This might be normal if we are only getting the state value or the next message to send
else:
candidates = []
for _ in range(decoder_length):
candidates.append([])
# Prev orders state
prev_orders_state = []
for phase_proto in reversed(phase_history_proto):
if len(prev_orders_state) == NB_PREV_ORDERS:
break
if phase_proto.name[-1] == 'M':
prev_orders_state = [proto_to_prev_orders_state(phase_proto, map_object)] + prev_orders_state
for _ in range(NB_PREV_ORDERS - len(prev_orders_state)):
prev_orders_state = [np.zeros((NB_NODES, NB_ORDERS_FEATURES), dtype=np.uint8)] + prev_orders_state
prev_orders_state = np.array(prev_orders_state)
# Building (order) decoder inputs [GO_ID]
decoder_inputs = [GO_ID]
# kwargs
player_seed = kwargs.get('player_seed', 0)
noise = kwargs.get('noise', 0.)
temperature = kwargs.get('temperature', 0.)
dropout_rate = kwargs.get('dropout_rate', 0.)
# Building feedable data
item = {
'player_seed': player_seed,
'board_state': board_state,
'board_alignments': get_board_alignments(locs,
in_adjustment_phase=in_adjustment_phase,
tokens_per_loc=1,
decoder_length=decoder_length),
'prev_orders_state': prev_orders_state,
'decoder_inputs': decoder_inputs,
'decoder_lengths': decoder_length,
'candidates': candidates,
'noise': noise,
'temperature': temperature,
'dropout_rate': dropout_rate,
'current_power': POWER_VOCABULARY_KEY_TO_IX[power_name],
'current_season': get_current_season(state_proto)
}
# Return
return item
@property
def proto_generation_callable(self):
""" Returns a callable required for proto files generation.
e.g. return generate_proto(saved_game_bytes, is_validation_set)
Note: Callable args are - saved_game_bytes: A `.proto.game.SavedGame` object from the dataset
- phase_ix: The index of the phase we want to process
- is_validation_set: Boolean that indicates if we are generating the validation set
Note: Used bytes_to_proto from diplomacy_research.utils.proto to convert bytes to proto
The callable must return a list of tf.train.Example to put in the protocol buffer file
"""
raise NotImplementedError()
| 1.453125 | 1 |
Challenge_1/members/martin_game.py | joeherold/weekly_coding_challenge_fwkwkw_python | 1 | 12764162 | <gh_stars>1-10
# Erstellung eines Spiels mit pygame
# pygame ist eine Library, welche die einfache Erstellung von Spielen ermöglicht.
# Nachfolgend wird die Erstellung eines einfachen Spiels mit Pygame dargestellt.
# Was bringt diese Kenntnis für FH Schüler? Während langweiligen wirtschaftsfächern kann sich die Zeit vertrieben werden.
# Um pygame auszuführen muss dies in der Shell / Terminal installiert werden. Dazu ist der folgende Befehle notwendig:
# pip install pygame
# Mit dem nachfolgendem Befehl kann die Installation in der Shell / Terminal getestet werden. Erschein ein Mini-Game hat es funktioniert.
# python3 -m pygame.examples.aliens
import pygame # Import der Library
import random # Ermöglicht die Generierung von Zufallswerten
# Damit wird die Steuerung für das Spiel importiert
from pygame.locals import (
K_UP,
K_DOWN,
K_LEFT,
K_RIGHT,
K_ESCAPE,
KEYDOWN,
QUIT,
)
# Festlegung des Bildschirms für das Spiel. Das Fenster wird vom Betriebssystem geöffnet.
fensterBreite = 800
fensterHöhe = 600
heldFarbe = (50, 50, 50)
# Definition des Spielers:
class Held(pygame.sprite.Sprite):
def __init__(self):
super(Held, self).__init__()
self.surf = pygame.Surface((80, 40))
self.surf.fill(heldFarbe)
self.rect = self.surf.get_rect()
def update(self, pressed_keys): # Festlegung der Bewegung des Spielers
if pressed_keys[K_UP]:
self.rect.move_ip(0, -6)
if pressed_keys[K_DOWN]:
self.rect.move_ip(0, 6)
if pressed_keys[K_LEFT]:
self.rect.move_ip(-6, 0)
if pressed_keys[K_RIGHT]:
self.rect.move_ip(6, 0)
if self.rect.left < 0: # Behalten des Spielers am Bildschirm
self.rect.left = 0
if self.rect.right > fensterBreite:
self.rect.right = fensterBreite
if self.rect.top <= 0:
self.rect.top = 0
if self.rect.bottom >= fensterHöhe:
self.rect.bottom = fensterHöhe
# Erstellen von Feinden am Spielfeld
class Enemy(pygame.sprite.Sprite):
def __init__(self):
super(Enemy, self).__init__()
self.surf = pygame.Surface((40, 20))
self.surf.fill((200, 200, 200))
self.rect = self.surf.get_rect(
center=(
random.randint(fensterBreite + 20, fensterBreite + 100),
random.randint(0, fensterHöhe),
)
)
self.speed = random.randint(2, 10)
# Angaben zur Bewegung der Hindernisse
def update(self):
self.rect.move_ip(-self.speed, 0)
if self.rect.right < 0:
self.kill()
pygame.init() # Initialisierung von pygame, damit dies auch verwendet werden kann. Der Vorteil ist, dass pygame auf Windows, Linux und Mac genutzt werden kann.
screen = pygame.display.set_mode((fensterBreite, fensterHöhe)) # Erstellt das Fenster für das Spiel. Die Einstellungen werden on oben übernommen.
# Hinzufügen von Feinden, alle 250 Millisekunden, 4/Sekunde. Dies gilt für das Gesamt Spiel
ADDENEMY = pygame.USEREVENT + 1
pygame.time.set_timer(ADDENEMY, 250)
held = Held() # Erstellen des Spielers
feinde = pygame.sprite.Group() # Fesstellen von Kollisionen
all_sprites = pygame.sprite.Group() # Erstellen einer Gruppe von allen Elementen zum Rendern
all_sprites.add(held)
clock = pygame.time.Clock() # Erstellen einer Uhr für die Spielgeschwindigkeit
# Die Ausführung des Spiels festlegen, bis diese beendet wird durch schließen des Fensters.
running = True
while running:
for event in pygame.event.get(): # Erfasst eingaben im Spiel durch den User.
if event.type == KEYDOWN:
if event.key == K_ESCAPE: # Das Spiel wird durch die Taste Escape beendet. FUNKTIONIERT NOCH NICHT
running = False
elif event.type == QUIT: # Das Spiel wird durch schließen des Fensters beendet
running = False
elif event.type == ADDENEMY: # Hinzufügen von Feinden im Spiel und Zuweisung zu allen Elementen
new_enemy = Enemy()
feinde.add(new_enemy)
all_sprites.add(new_enemy)
pressed_keys = pygame.key.get_pressed() # Erfassen der Usereingaben
held.update(pressed_keys) # Ermöglicht die Steuerung des Spielerelements
feinde.update() # die Feinde werden aktualisiert
screen.fill((255, 204, 153)) # die Hintergrundfarbe
#pygame.display.flip()
# Anzeigen aller Spielelemente
for entity in all_sprites:
screen.blit(entity.surf, entity.rect)
if pygame.sprite.spritecollideany(held, feinde):# Prüfen ob der Spieler mit den Feinden kollidiert
held.kill()
running = False
pygame.display.flip() # Aktualisiert den Bildschirm
clock.tick(100) # Frames per second
| 3.125 | 3 |
main/forms.py | mailfish/helena | 0 | 12764163 | <reponame>mailfish/helena
from .models import User
from django import forms
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
class AdminUserAddForm(UserCreationForm):
class Meta:
model = User
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
class AdminUserChangeForm(UserChangeForm):
class Meta:
model = User
| 2.546875 | 3 |
cinema_environment/server_monolith/server_app/migrations/0010_auto_20200113_1616.py | AndrewMalitchuk/cinema-server-monolith | 0 | 12764164 | # Generated by Django 3.0.2 on 2020-01-13 14:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server_app', '0009_auto_20200113_1606'),
]
operations = [
migrations.AlterField(
model_name='film',
name='pic_url',
field=models.FileField(upload_to='film_pic/'),
),
]
| 1.445313 | 1 |
counts.py | nickgerend/EyeintheSky | 0 | 12764165 | <filename>counts.py
# Written by: <NAME>, @dataoutsider
# Viz: "Eye in the Sky", enjoy!
import pandas as pd
import os
df = pd.read_csv(os.path.dirname(__file__) + '/UCS-Satellite-Database-8-1-2020_Clean.csv')
# mask = df.applymap(type) != bool
# d = {True: 'TRUE', False: 'FALSE'}
# df = df.where(mask, df.replace(d))
df_LS_1 = df.groupby('Launch Site').size().to_frame('Count').reset_index().rename({'Launch Site': 'Item'}, axis=1)
df_UG_2 = df.groupby('User_Group').size().to_frame('Count').reset_index().rename({'User_Group': 'Item'}, axis=1)
df_SOC_3 = df.groupby('Same_Owner_Contractor').size().to_frame('Count').reset_index().rename({'Same_Owner_Contractor': 'Item'}, axis=1)
df_PG_4 = df.groupby('Purpose_Group').size().to_frame('Count').reset_index().rename({'Purpose_Group': 'Item'}, axis=1)
df_YB_5 = df.groupby('Year_Bin').size().to_frame('Count').reset_index().rename({'Year_Bin': 'Item'}, axis=1)
df_O_6 = df.groupby('Orbit').size().to_frame('Count').reset_index().rename({'Orbit': 'Item'}, axis=1)
df_LV_7 = df.groupby('Launch Vehicle').size().to_frame('Count').reset_index().rename({'Launch Vehicle': 'Item'}, axis=1)
df_VG_8 = df.groupby('Vehicle Group').size().to_frame('Count').reset_index().rename({'Vehicle Group': 'Item'}, axis=1)
dfs = []
dfs.append(df_LS_1)
dfs.append(df_UG_2)
dfs.append(df_SOC_3)
dfs.append(df_PG_4)
dfs.append(df_YB_5)
dfs.append(df_O_6)
dfs.append(df_LV_7)
dfs.append(df_VG_8)
df_combined = pd.concat(dfs, axis=0)
# df_combined.reset_index()
# df_combined.columns =['Item', 'Count']
#print(df_combined)
#df_combined.to_csv(os.path.dirname(__file__) + '/data_counts.csv', encoding='utf-8', index=False)
#df_combined.to_csv(os.path.dirname(__file__) + '/data_counts2.csv', encoding='utf-8', index=False)
df2 = pd.read_csv(os.path.dirname(__file__) + '/UCS-Satellite-Database-8-1-2020_Clean.csv')
mask = df2.applymap(type) != bool
d = {True: 'TRUE', False: 'FALSE'}
df2 = df2.where(mask, df2.replace(d))
df_group_PGYG = df2.groupby(['Purpose_Group', 'Year_Bin'], sort=False).size().to_frame('Count').reset_index().rename({'Purpose_Group': 'Item1', 'Year_Bin': 'Item2'}, axis=1)
df_group_PGUG = df2.groupby(['Purpose_Group', 'User_Group'], sort=False).size().to_frame('Count').reset_index().rename({'Purpose_Group': 'Item1', 'User_Group': 'Item2'}, axis=1)
df_group_UGO = df2.groupby(['User_Group', 'Orbit'], sort=False).size().to_frame('Count').reset_index().rename({'User_Group': 'Item1', 'Orbit': 'Item2'}, axis=1)
df_group_SOLS = df2.groupby(['Same_Owner_Contractor', 'Launch Site'], sort=False).size().to_frame('Count').reset_index().rename({'Same_Owner_Contractor': 'Item1', 'Launch Site': 'Item2'}, axis=1)
df_group_inner = df.groupby(['Same_Owner_Contractor', 'Launch Vehicle'], sort=False).size().to_frame('Count').reset_index().rename({'Same_Owner_Contractor': 'Item1', 'Launch Vehicle': 'Item2'}, axis=1)
df_group_outer = df.groupby(['Launch Vehicle', 'Vehicle Group'], sort=False).size().to_frame('Count').reset_index().rename({'Launch Vehicle': 'Item1', 'Vehicle Group': 'Item2'}, axis=1)
dfgs = []
dfgs.append(df_group_PGYG)
dfgs.append(df_group_PGUG)
dfgs.append(df_group_UGO)
dfgs.append(df_group_SOLS)
dfgs.append(df_group_inner)
dfgs.append(df_group_outer)
df_combined2 = pd.concat(dfgs, axis=0)
#print(df_combined2)
#df_combined2.to_csv(os.path.dirname(__file__) + '/data_group_counts.csv', encoding='utf-8', index=False)
df_combined2.to_csv(os.path.dirname(__file__) + '/data_group_counts2.csv', encoding='utf-8', index=False)
#print(df_combined2) | 2.671875 | 3 |
setup.py | fraser-langton/Quandoo | 1 | 12764166 | """A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
from os import path
from setuptools import setup, find_packages
with open(path.join(path.abspath(path.dirname(__file__)), 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Quandoo',
version='1.3.5',
description="A SDK for interacting with the Quandoo API, it is a work in progress",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/fraser-langton/Quandoo',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='quandoo api',
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'test']),
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
install_requires=['requests', 'tzlocal', 'python-dotenv'],
)
| 1.484375 | 1 |
ironic/tests/unit/api/test_acl.py | zhouhao3/ironic | 1 | 12764167 | <reponame>zhouhao3/ironic
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for ACL. Checks whether certain kinds of requests
are blocked or allowed to be processed.
"""
import abc
from unittest import mock
import ddt
from keystonemiddleware import auth_token
from oslo_config import cfg
from ironic.api.controllers.v1 import versions as api_versions
from ironic.common import exception
from ironic.conductor import rpcapi
from ironic.tests.unit.api import base
from ironic.tests.unit.db import utils as db_utils
class TestACLBase(base.BaseApiTest):
def setUp(self):
super(TestACLBase, self).setUp()
self.environ = {}
self.format_data = {}
self._create_test_data()
self.fake_token = None
mock_auth = mock.patch.object(
auth_token.AuthProtocol, 'process_request',
autospec=True)
self.mock_auth = mock_auth.start()
self.addCleanup(mock_auth.stop)
topic = mock.patch.object(
rpcapi.ConductorAPI, 'get_topic_for', autospec=True)
self.mock_topic = topic.start()
self.mock_topic.side_effect = exception.TemporaryFailure
self.addCleanup(topic.stop)
rtopic = mock.patch.object(rpcapi.ConductorAPI, 'get_random_topic',
autospec=True)
self.mock_random_topic = rtopic.start()
self.mock_random_topic.side_effect = exception.TemporaryFailure
self.addCleanup(rtopic.stop)
self._set_test_config()
def _make_app(self):
cfg.CONF.set_override('auth_strategy', 'keystone')
return super(TestACLBase, self)._make_app()
@abc.abstractmethod
def _create_test_data(self):
pass
@abc.abstractmethod
def _set_test_config(self):
pass
def _check_skip(self, **kwargs):
if kwargs.get('skip_reason'):
self.skipTest(kwargs.get('skip_reason'))
def _fake_process_request(self, request, auth_token_request):
pass
def _test_request(self, path, params=None, headers=None, method='get',
body=None, assert_status=None,
assert_dict_contains=None,
assert_list_length=None,
deprecated=None):
path = path.format(**self.format_data)
self.mock_auth.side_effect = self._fake_process_request
# always request the latest api version
version = api_versions.max_version_string()
rheaders = {
'X-OpenStack-Ironic-API-Version': version
}
# NOTE(TheJulia): Logging the test request to aid
# in troubleshooting ACL testing. This is a pattern
# followed in API unit testing in ironic, and
# really does help.
print('API ACL Testing Path %s %s' % (method, path))
if headers:
for k, v in headers.items():
rheaders[k] = v.format(**self.format_data)
if method == 'get':
response = self.get_json(
path,
headers=rheaders,
expect_errors=True,
extra_environ=self.environ,
path_prefix=''
)
elif method == 'put':
response = self.put_json(
path,
headers=rheaders,
expect_errors=True,
extra_environ=self.environ,
path_prefix='',
params=body
)
elif method == 'post':
response = self.post_json(
path,
headers=rheaders,
expect_errors=True,
extra_environ=self.environ,
path_prefix='',
params=body
)
elif method == 'patch':
response = self.patch_json(
path,
params=body,
headers=rheaders,
expect_errors=True,
extra_environ=self.environ,
path_prefix=''
)
elif method == 'delete':
response = self.delete(
path,
headers=rheaders,
expect_errors=True,
extra_environ=self.environ,
path_prefix=''
)
else:
assert False, 'Unimplemented test method: %s' % method
# Once miggrated:
# Items will return:
# 403 - Trying to access something that is generally denied.
# Example: PATCH /v1/nodes/<uuid> as a reader.
# 404 - Trying to access something where we don't have permissions
# in a project scope. This is particularly true where implied
# permissions or assocation exists. Ports are attempted to be
# accessed when the underlying node is inaccessible as owner
# nor node matches.
# Example: GET /v1/portgroups or /v1/nodes/<uuid>/ports
# 500 - Attempting to access something such an system scoped endpoint
# with a project scoped request. Example: /v1/conductors.
if not (bool(deprecated)
and ('404' in response.status
or '500' in response.status
or '403' in response.status)
and cfg.CONF.oslo_policy.enforce_scope
and cfg.CONF.oslo_policy.enforce_new_defaults):
self.assertEqual(assert_status, response.status_int)
else:
self.assertTrue(
('404' in response.status
or '500' in response.status
or '403' in response.status))
# We can't check the contents of the response if there is no
# response.
return
if not bool(deprecated):
self.assertIsNotNone(assert_status,
'Tests must include an assert_status')
if assert_dict_contains:
for k, v in assert_dict_contains.items():
self.assertIn(k, response)
print(k)
print(v)
if str(v) == "None":
# Compare since the variable loaded from the
# json ends up being null in json or None.
self.assertIsNone(response.json[k])
elif str(v) == "{}":
# Special match for signifying a dictonary.
self.assertEqual({}, response.json[k])
elif isinstance(v, dict):
# The value from the YAML can be a dictionary,
# which cannot be formatted, so we're likely doing
# direct matching.
self.assertEqual(str(v), str(response.json[k]))
else:
self.assertEqual(v.format(**self.format_data),
response.json[k])
if assert_list_length:
for root, length in assert_list_length.items():
# root - object to look inside
# length - number of expected elements which will be
# important for owner/lessee testing.
items = response.json[root]
self.assertIsInstance(items, list)
if not (bool(deprecated)
and cfg.CONF.oslo_policy.enforce_scope):
self.assertEqual(length, len(items))
else:
# If we have scope enforcement, we likely have different
# views, such as "other" admins being subjected to
# a filtered view in these cases.
self.assertEqual(0, len(items))
# NOTE(TheJulia): API tests in Ironic tend to have a pattern
# to print request and response data to aid in development
# and troubleshooting. As such the prints should remain,
# at least until we are through primary development of the
# this test suite.
print('ACL Test GOT %s' % response)
@ddt.ddt
class TestRBACBasic(TestACLBase):
def _create_test_data(self):
fake_db_node = db_utils.create_test_node(chassis_id=None)
self.format_data['node_uuid'] = fake_db_node['uuid']
@ddt.file_data('test_acl_basic.yaml')
@ddt.unpack
def test_basic(self, **kwargs):
self._check_skip(**kwargs)
self._test_request(**kwargs)
@ddt.ddt
class TestRBACModelBeforeScopesBase(TestACLBase):
def _create_test_data(self):
allocated_node_id = 31
fake_db_allocation = db_utils.create_test_allocation(
node_id=allocated_node_id,
resource_class="CUSTOM_TEST")
fake_db_node = db_utils.create_test_node(
chassis_id=None,
driver='fake-driverz',
owner='z')
fake_db_node_alloced = db_utils.create_test_node(
id=allocated_node_id,
chassis_id=None,
allocation_id=fake_db_allocation['id'],
uuid='22e26c0b-03f2-4d2e-ae87-c02d7f33c000',
driver='fake-driverz',
owner='z')
fake_vif_port_id = "ee21d58f-5de2-4956-85ff-33935ea1ca00"
fake_db_port = db_utils.create_test_port(
node_id=fake_db_node['id'],
internal_info={'tenant_vif_port_id': fake_vif_port_id})
fake_db_portgroup = db_utils.create_test_portgroup(
uuid="6eb02b44-18a3-4659-8c0b-8d2802581ae4",
node_id=fake_db_node['id'])
fake_db_chassis = db_utils.create_test_chassis(
drivers=['fake-hardware', 'fake-driverz', 'fake-driver'])
fake_db_deploy_template = db_utils.create_test_deploy_template()
fake_db_conductor = db_utils.create_test_conductor()
fake_db_volume_target = db_utils.create_test_volume_target(
node_id=fake_db_allocation['id'])
fake_db_volume_connector = db_utils.create_test_volume_connector(
node_id=fake_db_allocation['id'])
# Trait name aligns with create_test_node_trait.
fake_trait = 'trait'
fake_setting = 'FAKE_SETTING'
db_utils.create_test_bios_setting(
node_id=fake_db_node['id'],
name=fake_setting,
value=fake_setting)
db_utils.create_test_node_trait(
node_id=fake_db_node['id'])
# dedicated node for portgroup addition test to avoid
# false positives with test runners.
db_utils.create_test_node(
uuid='18a552fb-dcd2-43bf-9302-e4c93287be11')
self.format_data.update({
'node_ident': fake_db_node['uuid'],
'allocated_node_ident': fake_db_node_alloced['uuid'],
'port_ident': fake_db_port['uuid'],
'portgroup_ident': fake_db_portgroup['uuid'],
'chassis_ident': fake_db_chassis['uuid'],
'deploy_template_ident': fake_db_deploy_template['uuid'],
'allocation_ident': fake_db_allocation['uuid'],
'conductor_ident': fake_db_conductor['hostname'],
'vif_ident': fake_vif_port_id,
# Can't use the same fake-driver as other tests can
# pollute a global method cache in the API that is in the
# test runner, resulting in false positives.
'driver_name': 'fake-driverz',
'bios_setting': fake_setting,
'trait': fake_trait,
'volume_target_ident': fake_db_volume_target['uuid'],
'volume_connector_ident': fake_db_volume_connector['uuid'],
})
@ddt.ddt
class TestRBACModelBeforeScopes(TestRBACModelBeforeScopesBase):
def _set_test_config(self):
# NOTE(TheJulia): Sets default test conditions, in the event
# oslo_policy defaults change.
cfg.CONF.set_override('enforce_scope', False, group='oslo_policy')
cfg.CONF.set_override('enforce_new_defaults', False,
group='oslo_policy')
@ddt.file_data('test_rbac_legacy.yaml')
@ddt.unpack
def test_rbac_legacy(self, **kwargs):
self._check_skip(**kwargs)
self._test_request(**kwargs)
@ddt.ddt
class TestRBACScoped(TestRBACModelBeforeScopes):
"""Test Scoped RBAC access using our existing access policy."""
def _set_test_config(self):
# NOTE(TheJulia): This test class is as like a canary.
# The operational intent is for it to kind of provide
# a safety net as we're changing policy rules so we can
# incremently disable the ones we *know* will no longer work
# while we also enable the new ones in another test class with
# the appropriate scope friendly chagnges. In other words, two
# test changes will be needed for each which should also reduce
# risk of accidential policy changes. It may just be Julia being
# super risk-adverse, just let her roll with it and we will delete
# this class later.
# NOTE(TheJulia): This test class runs with test_rbac_legacy.yaml!
cfg.CONF.set_override('enforce_scope', True, group='oslo_policy')
cfg.CONF.set_override('enforce_new_defaults', True,
group='oslo_policy')
@ddt.file_data('test_rbac_legacy.yaml')
def test_scoped_canary(self, **kwargs):
self._check_skip(**kwargs)
self._test_request(**kwargs)
@ddt.ddt
class TestRBACScopedRequests(TestRBACModelBeforeScopesBase):
@ddt.file_data('test_rbac_system_scoped.yaml')
@ddt.unpack
def test_system_scoped(self, **kwargs):
self._check_skip(**kwargs)
self._test_request(**kwargs)
@ddt.ddt
class TestRBACProjectScoped(TestACLBase):
def setUp(self):
super(TestRBACProjectScoped, self).setUp()
cfg.CONF.set_override('enforce_scope', True, group='oslo_policy')
cfg.CONF.set_override('enforce_new_defaults', True,
group='oslo_policy')
def _create_test_data(self):
owner_node_ident = '1ab63b9e-66d7-4cd7-8618-dddd0f9f7881'
lessee_node_ident = '38d5abed-c585-4fce-a57e-a2ffc2a2ec6f'
owner_project_id = '70e5e25a-2ca2-4cb1-8ae8-7d8739cee205'
lessee_project_id = 'f11853c7-fa9c-4db3-a477-c9d8e0dbbf13'
unowned_node = db_utils.create_test_node(chassis_id=None)
# owned node - since the tests use the same node for
# owner/lesse checks
owned_node = db_utils.create_test_node(
uuid=owner_node_ident,
owner=owner_project_id,
last_error='meow',
reservation='lolcats')
owned_node_port = db_utils.create_test_port(
uuid='ebe30f19-358d-41e1-8d28-fd7357a0164c',
node_id=owned_node['id'],
address='00:00:00:00:00:01')
db_utils.create_test_port(
uuid='21a3c5a7-1e14-44dc-a9dd-0c84d5477a57',
node_id=owned_node['id'],
address='00:00:00:00:00:02')
owner_pgroup = db_utils.create_test_portgroup(
uuid='b16efcf3-2990-41a1-bc1d-5e2c16f3d5fc',
node_id=owned_node['id'],
name='magicfoo',
address='01:03:09:ff:01:01')
db_utils.create_test_volume_target(
uuid='a265e2f0-e97f-4177-b1c0-8298add53086',
node_id=owned_node['id'])
db_utils.create_test_volume_connector(
uuid='65ea0296-219b-4635-b0c8-a6e055da878d',
node_id=owned_node['id'],
connector_id='iqn.2012-06.org.openstack.magic')
fake_owner_allocation = db_utils.create_test_allocation(
node_id=owned_node['id'],
owner=owner_project_id,
resource_class="CUSTOM_TEST")
# Leased nodes
fake_allocation_id = 61
leased_node = db_utils.create_test_node(
allocation_id=fake_allocation_id,
uuid=lessee_node_ident,
owner=owner_project_id,
lessee=lessee_project_id,
last_error='meow',
reservation='lolcats')
fake_db_volume_target = db_utils.create_test_volume_target(
node_id=leased_node['id'])
fake_db_volume_connector = db_utils.create_test_volume_connector(
node_id=leased_node['id'])
fake_db_port = db_utils.create_test_port(
node_id=leased_node['id'])
fake_db_portgroup = db_utils.create_test_portgroup(
node_id=leased_node['id'])
fake_trait = 'CUSTOM_MEOW'
fake_vif_port_id = "0e21d58f-5de2-4956-85ff-33935ea1ca01"
fake_leased_allocation = db_utils.create_test_allocation(
id=fake_allocation_id,
node_id=leased_node['id'],
owner=lessee_project_id,
resource_class="CUSTOM_LEASED")
# Random objects that shouldn't be project visible
other_port = db_utils.create_test_port(
uuid='abfd8dbb-1732-449a-b760-2224035c6b99',
address='00:00:00:00:00:ff')
other_node = db_utils.create_test_node(
uuid='573208e5-cd41-4e26-8f06-ef44022b3793')
other_pgroup = db_utils.create_test_portgroup(
uuid='5810f41c-6585-41fc-b9c9-a94f50d421b5',
node_id=other_node['id'],
name='corgis_rule_the_world',
address='ff:ff:ff:ff:ff:0f')
self.format_data.update({
'node_ident': unowned_node['uuid'],
'owner_node_ident': owner_node_ident,
'lessee_node_ident': lessee_node_ident,
'allocated_node_ident': lessee_node_ident,
'volume_target_ident': fake_db_volume_target['uuid'],
'volume_connector_ident': fake_db_volume_connector['uuid'],
'lessee_port_ident': fake_db_port['uuid'],
'lessee_portgroup_ident': fake_db_portgroup['uuid'],
'trait': fake_trait,
'vif_ident': fake_vif_port_id,
'ind_component': 'component',
'ind_ident': 'magic_light',
'owner_port_ident': owned_node_port['uuid'],
'other_port_ident': other_port['uuid'],
'owner_portgroup_ident': owner_pgroup['uuid'],
'other_portgroup_ident': other_pgroup['uuid'],
'driver_name': 'fake-driverz',
'owner_allocation': fake_owner_allocation['uuid'],
'lessee_allocation': fake_leased_allocation['uuid']})
@ddt.file_data('test_rbac_project_scoped.yaml')
@ddt.unpack
def test_project_scoped(self, **kwargs):
self._check_skip(**kwargs)
self._test_request(**kwargs)
| 1.71875 | 2 |
python-web/FORM/form_workshop/form_workshop/create_form/urls.py | yosif88/SoftUni | 0 | 12764168 | from django.urls import path
from form_workshop.create_form.views import show_form_data
urlpatterns = [
path('', show_form_data, name='show form')
] | 1.601563 | 2 |
etl_day_ahead_forecasting/pipeline/_defined/__init__.py | KamilMaziarz/etl-day-ahead-forecasting | 0 | 12764169 | <reponame>KamilMaziarz/etl-day-ahead-forecasting
from .pse import (
cross_border_pipeline,
eua_price_pipeline,
renewables_generation_pipeline,
system_operation_data_pipeline,
units_generation_pipeline,
units_outages_pipeline,
)
__all__ = [
'cross_border_pipeline',
'eua_price_pipeline',
'renewables_generation_pipeline',
'system_operation_data_pipeline',
'units_generation_pipeline',
'units_outages_pipeline',
]
| 1.09375 | 1 |
Day-03_Binary-Diagnostic/tests/test_day_03.py | richardangell/advent-of-code-2021 | 0 | 12764170 | <filename>Day-03_Binary-Diagnostic/tests/test_day_03.py
import puzzle_1
import puzzle_2
def test_puzzle_1():
input_1 = puzzle_1.helpers.load_input("tests/input_1.txt", remove_lines_breaks=True)
assert puzzle_1.calculate_gamma_and_epsilon(input_1) == (22, 9)
def test_puzzle_2():
input_1 = puzzle_2.helpers.load_input("tests/input_1.txt", remove_lines_breaks=True)
assert puzzle_2.calculate_oxygen_and_co2(input_1) == (23, 10)
| 2.296875 | 2 |
azure_functions_devops_build/project/models/project_failed.py | coolgeeck/delwar1 | 16 | 12764171 | <reponame>coolgeeck/delwar1<gh_stars>10-100
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
class ProjectFailed(object):
def __init__(self, message):
self.valid = False
self.message = message
| 2.265625 | 2 |
store/functions.py | tosbala/flink-statefun-bookstore | 2 | 12764172 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import sys
from statefun import *
current = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(current)
sys.path.append(parent)
import serve
from models import *
# uses isbn as target id
@serve.functions.bind(typename="com.store.fn/order")
async def order_book(context: Context, message: Message):
# take the order
order = message.as_type(Order.TYPE)
# update the order status
context.send(
message_builder(target_typename="com.store.fn/order-updates",
target_id=context.address.id,
value=order,
value_type=Order.TYPE))
# uses isbn as target id
@serve.functions.bind(typename="com.store.fn/order-updates")
async def order_updates(context: Context, message: Message):
order_update = ''
payment_request = dict()
warehouse_request = ''
if message.is_type(Order.TYPE):
order = message.as_type(Order.TYPE)
# print(f"order update '{OrderStatus(order.status).name.upper()}'", flush=True)
order_update = f'{order.buyer}, sorry we couldnt process your order'
if order.status == OrderStatus.RESERVE:
order_update = f'{order.buyer}, Thank You. Your order for book {order.isbn} is being processed'
elif order.status == OrderStatus.RESERVED:
order_update = f'{order.buyer}, your order for book {order.isbn} is awaiting payment confirmation'
# create a payment request
payment_request = {'id': order.isbn, 'user': order.buyer, 'value': order.value, 'status': PaymentStatus.DEBIT}
elif order.status == OrderStatus.NORESERVATION:
order_update = f'{order.buyer}, couldnt process your order for book {order.isbn}. Amount will be refunded'
# create a refund request
payment_request = {'id': order.isbn, 'user': order.buyer, 'value': order.value, 'status': PaymentStatus.REFUND}
elif order.status == OrderStatus.CONFIRMED:
order_update = f'{order.buyer}, your order for book {order.isbn} is confirmed now'
elif order.status == OrderStatus.NOSTOCK:
order_update = f'{order.buyer}, book {order.isbn} you requested is out of stock now, please try again later'
elif order.status == OrderStatus.DISPATCHED:
order_update = f'{order.buyer}, book {order.isbn} is dispatched now'
elif message.is_type(Payment):
payment = message.as_type(Payment)
if payment['status'] == PaymentStatus.DEBITED:
order_update = f"{payment['user']}, payment for book {payment['id']} is confirmed now"
# make a warehouse request to process the reservation
order = Order(buyer=payment['user'], isbn=context.address.id, status=OrderStatus.PAID, value=payment['value'])
warehouse_request = 'com.warehouse.fn/order'
if payment['status'] == PaymentStatus.REFUNDED:
order_update = f"{payment['user']}, amound {payment['value']} paid for {payment['id']} is refunded now"
if payment_request:
# send out a payment request
context.send_egress(
kafka_egress_message(
typename="com.payments/orders",
topic="payments",
key=order.buyer,
value=payment_request,
value_type=Payment))
if warehouse_request:
# make a warehouse request
context.send(
message_builder(
target_typename=warehouse_request,
target_id=context.address.id,
value=order,
value_type=Order.TYPE))
if order_update:
# send out the order status message
context.send_egress(
kafka_egress_message(
typename="com.bookstore/coms",
topic="status",
key=context.address.id,
value=order_update))
if __name__ == '__main__':
serve.run()
| 1.875 | 2 |
rpython/memory/gc/test/test_rawrefcount.py | wdv4758h/mu-client-pypy | 0 | 12764173 | import py
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC
from rpython.memory.gc.test.test_direct import BaseDirectGCTest
from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY
from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT
PYOBJ_HDR = IncrementalMiniMarkGC.PYOBJ_HDR
PYOBJ_HDR_PTR = IncrementalMiniMarkGC.PYOBJ_HDR_PTR
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('S',
('x', lltype.Signed),
('prev', lltype.Ptr(S)),
('next', lltype.Ptr(S))))
class TestRawRefCount(BaseDirectGCTest):
GCClass = IncrementalMiniMarkGC
def _collect(self, major, expected_trigger=0):
if major:
self.gc.collect()
else:
self.gc.minor_collection()
count1 = len(self.trigger)
self.gc.rrc_invoke_callback()
count2 = len(self.trigger)
assert count2 - count1 == expected_trigger
def _rawrefcount_pair(self, intval, is_light=False, is_pyobj=False,
create_old=False, create_immortal=False,
force_external=False):
if is_light:
rc = REFCNT_FROM_PYPY_LIGHT
else:
rc = REFCNT_FROM_PYPY
self.trigger = []
self.gc.rawrefcount_init(lambda: self.trigger.append(1))
#
if create_immortal:
p1 = lltype.malloc(S, immortal=True)
else:
saved = self.gc.nonlarge_max
try:
if force_external:
self.gc.nonlarge_max = 1
p1 = self.malloc(S)
finally:
self.gc.nonlarge_max = saved
p1.x = intval
if create_immortal:
self.consider_constant(p1)
elif create_old:
self.stackroots.append(p1)
self._collect(major=False)
p1 = self.stackroots.pop()
p1ref = lltype.cast_opaque_ptr(llmemory.GCREF, p1)
r1 = lltype.malloc(PYOBJ_HDR, flavor='raw', immortal=create_immortal)
r1.ob_refcnt = rc
r1.ob_pypy_link = 0
r1addr = llmemory.cast_ptr_to_adr(r1)
if is_pyobj:
assert not is_light
self.gc.rawrefcount_create_link_pyobj(p1ref, r1addr)
else:
self.gc.rawrefcount_create_link_pypy(p1ref, r1addr)
assert r1.ob_refcnt == rc
assert r1.ob_pypy_link != 0
def check_alive(extra_refcount):
assert r1.ob_refcnt == rc + extra_refcount
assert r1.ob_pypy_link != 0
p1ref = self.gc.rawrefcount_to_obj(r1addr)
p1 = lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref)
assert p1.x == intval
if not is_pyobj:
assert self.gc.rawrefcount_from_obj(p1ref) == r1addr
else:
assert self.gc.rawrefcount_from_obj(p1ref) == llmemory.NULL
return p1
return p1, p1ref, r1, r1addr, check_alive
def test_rawrefcount_objects_basic(self, old=False):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_light=True, create_old=old))
p2 = self.malloc(S)
p2.x = 84
p2ref = lltype.cast_opaque_ptr(llmemory.GCREF, p2)
r2 = lltype.malloc(PYOBJ_HDR, flavor='raw')
r2.ob_refcnt = 1
r2.ob_pypy_link = 0
r2addr = llmemory.cast_ptr_to_adr(r2)
# p2 and r2 are not linked
assert r1.ob_pypy_link != 0
assert r2.ob_pypy_link == 0
assert self.gc.rawrefcount_from_obj(p1ref) == r1addr
assert self.gc.rawrefcount_from_obj(p2ref) == llmemory.NULL
assert self.gc.rawrefcount_to_obj(r1addr) == p1ref
assert self.gc.rawrefcount_to_obj(r2addr) == lltype.nullptr(
llmemory.GCREF.TO)
lltype.free(r1, flavor='raw')
lltype.free(r2, flavor='raw')
def test_rawrefcount_objects_collection_survives_from_raw(self, old=False):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_light=True, create_old=old))
check_alive(0)
r1.ob_refcnt += 1
self._collect(major=False)
check_alive(+1)
self._collect(major=True)
check_alive(+1)
r1.ob_refcnt -= 1
self._collect(major=False)
p1 = check_alive(0)
self._collect(major=True)
py.test.raises(RuntimeError, "r1.ob_refcnt") # dead
py.test.raises(RuntimeError, "p1.x") # dead
self.gc.check_no_more_rawrefcount_state()
assert self.trigger == []
assert self.gc.rawrefcount_next_dead() == llmemory.NULL
def test_rawrefcount_dies_quickly(self, old=False):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_light=True, create_old=old))
check_alive(0)
self._collect(major=False)
if old:
check_alive(0)
self._collect(major=True)
py.test.raises(RuntimeError, "r1.ob_refcnt") # dead
py.test.raises(RuntimeError, "p1.x") # dead
self.gc.check_no_more_rawrefcount_state()
def test_rawrefcount_objects_collection_survives_from_obj(self, old=False):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_light=True, create_old=old))
check_alive(0)
self.stackroots.append(p1)
self._collect(major=False)
check_alive(0)
self._collect(major=True)
check_alive(0)
p1 = self.stackroots.pop()
self._collect(major=False)
check_alive(0)
assert p1.x == 42
self._collect(major=True)
py.test.raises(RuntimeError, "r1.ob_refcnt") # dead
py.test.raises(RuntimeError, "p1.x") # dead
self.gc.check_no_more_rawrefcount_state()
def test_rawrefcount_objects_basic_old(self):
self.test_rawrefcount_objects_basic(old=True)
def test_rawrefcount_objects_collection_survives_from_raw_old(self):
self.test_rawrefcount_objects_collection_survives_from_raw(old=True)
def test_rawrefcount_dies_quickly_old(self):
self.test_rawrefcount_dies_quickly(old=True)
def test_rawrefcount_objects_collection_survives_from_obj_old(self):
self.test_rawrefcount_objects_collection_survives_from_obj(old=True)
def test_pypy_nonlight_survives_from_raw(self, old=False):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_light=False, create_old=old))
check_alive(0)
r1.ob_refcnt += 1
self._collect(major=False)
check_alive(+1)
self._collect(major=True)
check_alive(+1)
r1.ob_refcnt -= 1
self._collect(major=False)
p1 = check_alive(0)
self._collect(major=True, expected_trigger=1)
py.test.raises(RuntimeError, "p1.x") # dead
assert r1.ob_refcnt == 1 # in the pending list
assert r1.ob_pypy_link == 0
assert self.gc.rawrefcount_next_dead() == r1addr
assert self.gc.rawrefcount_next_dead() == llmemory.NULL
assert self.gc.rawrefcount_next_dead() == llmemory.NULL
self.gc.check_no_more_rawrefcount_state()
lltype.free(r1, flavor='raw')
def test_pypy_nonlight_survives_from_obj(self, old=False):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_light=False, create_old=old))
check_alive(0)
self.stackroots.append(p1)
self._collect(major=False)
check_alive(0)
self._collect(major=True)
check_alive(0)
p1 = self.stackroots.pop()
self._collect(major=False)
check_alive(0)
assert p1.x == 42
self._collect(major=True, expected_trigger=1)
py.test.raises(RuntimeError, "p1.x") # dead
assert r1.ob_refcnt == 1
assert r1.ob_pypy_link == 0
assert self.gc.rawrefcount_next_dead() == r1addr
self.gc.check_no_more_rawrefcount_state()
lltype.free(r1, flavor='raw')
def test_pypy_nonlight_dies_quickly(self, old=False):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_light=False, create_old=old))
check_alive(0)
if old:
self._collect(major=False)
check_alive(0)
self._collect(major=True, expected_trigger=1)
else:
self._collect(major=False, expected_trigger=1)
py.test.raises(RuntimeError, "p1.x") # dead
assert r1.ob_refcnt == 1
assert r1.ob_pypy_link == 0
assert self.gc.rawrefcount_next_dead() == r1addr
self.gc.check_no_more_rawrefcount_state()
lltype.free(r1, flavor='raw')
def test_pypy_nonlight_survives_from_raw_old(self):
self.test_pypy_nonlight_survives_from_raw(old=True)
def test_pypy_nonlight_survives_from_obj_old(self):
self.test_pypy_nonlight_survives_from_obj(old=True)
def test_pypy_nonlight_dies_quickly_old(self):
self.test_pypy_nonlight_dies_quickly(old=True)
@py.test.mark.parametrize('external', [False, True])
def test_pyobject_pypy_link_dies_on_minor_collection(self, external):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_pyobj=True, force_external=external))
check_alive(0)
r1.ob_refcnt += 1 # the pyobject is kept alive
self._collect(major=False)
assert r1.ob_refcnt == 1 # refcnt dropped to 1
assert r1.ob_pypy_link == 0 # detached
self.gc.check_no_more_rawrefcount_state()
lltype.free(r1, flavor='raw')
@py.test.mark.parametrize('old,external', [
(False, False), (True, False), (False, True)])
def test_pyobject_dies(self, old, external):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_pyobj=True, create_old=old,
force_external=external))
check_alive(0)
if old:
self._collect(major=False)
check_alive(0)
self._collect(major=True, expected_trigger=1)
else:
self._collect(major=False, expected_trigger=1)
assert r1.ob_refcnt == 1 # refcnt 1, in the pending list
assert r1.ob_pypy_link == 0 # detached
assert self.gc.rawrefcount_next_dead() == r1addr
self.gc.check_no_more_rawrefcount_state()
lltype.free(r1, flavor='raw')
@py.test.mark.parametrize('old,external', [
(False, False), (True, False), (False, True)])
def test_pyobject_survives_from_obj(self, old, external):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, is_pyobj=True, create_old=old,
force_external=external))
check_alive(0)
self.stackroots.append(p1)
self._collect(major=False)
check_alive(0)
self._collect(major=True)
check_alive(0)
p1 = self.stackroots.pop()
self._collect(major=False)
check_alive(0)
assert p1.x == 42
assert self.trigger == []
self._collect(major=True, expected_trigger=1)
py.test.raises(RuntimeError, "p1.x") # dead
assert r1.ob_refcnt == 1
assert r1.ob_pypy_link == 0
assert self.gc.rawrefcount_next_dead() == r1addr
self.gc.check_no_more_rawrefcount_state()
lltype.free(r1, flavor='raw')
def test_pyobject_attached_to_prebuilt_obj(self):
p1, p1ref, r1, r1addr, check_alive = (
self._rawrefcount_pair(42, create_immortal=True))
check_alive(0)
self._collect(major=True)
check_alive(0)
| 1.765625 | 2 |
app_chat/urls.py | wastedideas/django_chat_project | 0 | 12764174 | <reponame>wastedideas/django_chat_project
from django.urls import path
from app_chat.views import chats_list_view, new_message_view, dialog_view
urlpatterns = [
path(
'',
chats_list_view,
name='chats_list',
),
path(
'new_chat/<str:new_chat_with>',
new_message_view,
name='new_chat_page',
),
path(
'chat_with/<int:chat_id>',
dialog_view,
name='chat_with',
),
]
| 1.953125 | 2 |
stlearn/plotting/gene_plot.py | duypham2108/stLearn | 0 | 12764175 | from matplotlib import pyplot as plt
from PIL import Image
import pandas as pd
import matplotlib
import numpy as np
from typing import Optional, Union, Mapping # Special
from typing import Sequence, Iterable # ABCs
from typing import Tuple # Classes
from anndata import AnnData
import warnings
from stlearn.plotting.classes import GenePlot
from stlearn.plotting.classes_bokeh import BokehGenePlot
from stlearn.plotting._docs import doc_spatial_base_plot, doc_gene_plot
from stlearn.utils import Empty, _empty, _AxesSubplot, _docs_params
from bokeh.io import push_notebook, output_notebook
from bokeh.plotting import show
@_docs_params(spatial_base_plot=doc_spatial_base_plot, gene_plot=doc_gene_plot)
def gene_plot(
adata: AnnData,
gene_symbols: Union[str, list] = None,
threshold: Optional[float] = None,
method: str = "CumSum",
contour: bool = False,
step_size: Optional[int] = None,
title: Optional["str"] = None,
figsize: Optional[Tuple[float, float]] = None,
cmap: Optional[str] = "Spectral_r",
use_label: Optional[str] = None,
list_clusters: Optional[list] = None,
ax: Optional[matplotlib.axes._subplots.Axes] = None,
fig: Optional[matplotlib.figure.Figure] = None,
show_plot: Optional[bool] = True,
show_axis: Optional[bool] = False,
show_image: Optional[bool] = True,
show_color_bar: Optional[bool] = True,
color_bar_label: Optional[str] = "",
crop: Optional[bool] = True,
margin: Optional[bool] = 100,
size: Optional[float] = 7,
image_alpha: Optional[float] = 1.0,
cell_alpha: Optional[float] = 0.7,
use_raw: Optional[bool] = False,
fname: Optional[str] = None,
dpi: Optional[int] = 120,
) -> Optional[AnnData]:
"""\
Allows the visualization of a single gene or multiple genes as the values
of dot points or contour in the Spatial transcriptomics array.
Parameters
-------------------------------------
{spatial_base_plot}
{gene_plot}
Examples
-------------------------------------
>>> import stlearn as st
>>> adata = st.datasets.example_bcba()
>>> genes = ["BRCA1","BRCA2"]
>>> st.pl.gene_plot(adata, gene_symbols = genes)
"""
GenePlot(
adata,
gene_symbols=gene_symbols,
threshold=threshold,
method=method,
contour=contour,
step_size=step_size,
title=title,
figsize=figsize,
cmap=cmap,
use_label=use_label,
list_clusters=list_clusters,
ax=ax,
fig=fig,
show_plot=show_plot,
show_axis=show_axis,
show_image=show_image,
show_color_bar=show_color_bar,
color_bar_label=color_bar_label,
crop=crop,
margin=margin,
size=size,
image_alpha=image_alpha,
cell_alpha=cell_alpha,
use_raw=use_raw,
fname=fname,
dpi=dpi,
)
def gene_plot_interactive(adata: AnnData):
bokeh_object = BokehGenePlot(adata)
output_notebook()
show(bokeh_object.app, notebook_handle=True)
| 2.234375 | 2 |
src/utils.py | caijh33/I3D_CTC | 0 | 12764176 | import csv
import numpy as np
import torch
import time
class Timer(object):
"""
docstring for Timer
"""
def __init__(self):
super(Timer, self).__init__()
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
def tic(self):
self.start_time = time.time()
def toc(self, average = False):
self.diff = time.time() - self.start_time
self.calls += 1
self.total_time += self.diff
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def format(self, time):
m,s = divmod(time, 60)
h,m = divmod(m, 60)
d,h = divmod(h, 24)
return ("{}d:{}h:{}m:{}s".format(int(d), int(h), int(m), int(s)))
def end_time(self, extra_time):
"""
calculate the end time for training, show local time
"""
localtime= time.asctime(time.localtime(time.time() + extra_time))
return localtime
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Logger(object):
def __init__(self, path, header):
self.log_file = open(path, 'w')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerow(header)
self.header = header
def __del(self):
self.log_file.close()
def log(self, values):
write_values = []
for col in self.header:
assert col in values
write_values.append(values[col])
self.logger.writerow(write_values)
self.log_file.flush()
def load_value_file(file_path):
with open(file_path, 'r') as input_file:
value = float(input_file.read().rstrip('\n\r'))
return value
def calculate_accuracy(outputs, targets):
batch_size = targets.size(0)
_, pred = outputs.topk(1, 1, True)
pred = pred.t()
correct = pred.eq(targets.view(1, -1))
n_correct_elems = correct.float().sum().data[0]
return n_correct_elems / batch_size
class MixUp(object):
def __init__(self, alpha):
self.alpha = alpha
def mixup_data(self, x, y, use_cuda=True):
"""
return mixed inputs. pairs of targets
"""
if self.alpha > 0:
lam = np.random.beta(self.alpha, self.alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(self, criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
class TrainingHelper(object):
def __init__(self, image):
self.image = image
def congratulation(self):
"""
if finish training success, print congratulation information
"""
for i in range(40):
print('*')*i
print('finish training')
def submission_file(ids, outputs, filename):
""" write list of ids and outputs to filename"""
with open(filename, 'w') as f:
for vid, output in zip(ids, outputs):
scores = ['{:g}'.format(x)
for x in output]
f.write('{} {}\n'.format(vid, ' '.join(scores))) | 2.828125 | 3 |
audioPlayer.py | Razpudding/rpi-duckling | 0 | 12764177 | import pygame
pygame.mixer.init()
pygame.mixer.music.load("myFile.wav")
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
| 2.578125 | 3 |
app/main/views.py | Maryan23/News-On-The-Go | 0 | 12764178 | from flask import render_template
from . import main
from ..requests import get_sources, get_articles
#Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
#Getting news sources
sources = get_sources()
title = 'News OTG'
return render_template('index.html',title = title, sources = sources)
@main.route('/articles/<sources_id>')
def articles(sources_id):
'''
View articles page function that returns the article details page and its data
'''
articles = get_articles(sources_id)
return render_template('articles.html',articles = articles) | 2.6875 | 3 |
tests/izmat_zdecomp.py | rsoren/limetr | 0 | 12764179 | # check utils zdecomp
def izmat_zdecomp():
import numpy as np
from limetr.special_mat import izmat
ok = True
tol = 1e-10
# setup problem
# -------------------------------------------------------------------------
k = 3
n = [5, 2, 4]
z_list = []
tr_u_list = []
tr_s_list = []
for i in range(len(n)):
z_list.append(np.random.randn(n[i], k))
u, s, vt = np.linalg.svd(z_list[-1], full_matrices=False)
tr_u_list.append(u)
tr_s_list.append(s)
z = np.vstack(z_list)
tr_u = np.hstack([u.reshape(u.size, order='F') for u in tr_u_list])
tr_s = np.hstack(tr_s_list)
my_u = np.zeros(tr_u.size)
my_s = np.zeros(tr_s.size)
nz = [z_sub.shape[0] for z_sub in z_list]
nu = [u_sub.size for u_sub in tr_u_list]
ns = [s_sub.size for s_sub in tr_s_list]
izmat.zdecomp(nz, nu, ns, z, my_u, my_s)
if not ok:
print('err in zdecomp')
print('err:', err)
return ok
| 2.234375 | 2 |
airq/aq_dashboard.py | maximematerno/Sprint-challenge | 0 | 12764180 | """OpenAQ Air Quality Dashboard with Flask."""
from datetime import datetime
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
import openaq
APP = Flask(__name__)
APP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
DB = SQLAlchemy(APP)
class Record(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
utc_datetime = DB.Column(DB.DateTime)
location = DB.Column(DB.String(50))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return f'< Time {self.utc_datetime} --- Value {self.value} >'
def get_measurements(city='Los Angeles', parameter='pm25'):
api = openaq.OpenAQ()
status, body = api.measurements(city=city, parameter=parameter)
return [{'utc_datetime': datetime.strptime(result['date']['utc'],
'%Y-%m-%dT%H:%M:%S.%f%z'),
'location': result['location'],
'value': result['value']} for result in body['results']]
@APP.route('/')
def root():
"""Base view."""
records = Record.query.filter(Record.value >= 10).all()
return render_template('base.html',
city='Los Angeles',
records=records)
@APP.route('/refresh')
def refresh():
"""New data replace existing one."""
DB.drop_all()
DB.create_all()
data = get_measurements()
for record in data:
DB.session.add(Record(utc_datetime=record['utc_datetime'],
location=record['location'],
value=record['value']))
DB.session.commit()
return 'Data refreshed!'
@APP.route('/locations/<city>')
def locations(city='Los Angeles'):
""" location Los Angeles."""
api = openaq.OpenAQ()
status, body = api.locations(city=city)
locations = [{'name': loc['location'],
'latitude': loc['coordinates']['latitude'],
'longitude': loc['coordinates']['longitude']} for loc in body['results']]
return render_template('locations.html',
city=city,
locations=locations) | 2.734375 | 3 |
PKRL/ts_data.py | pkmtum/Probabilistic_Koopman_Learning | 3 | 12764181 | '''
Created on 13 Aug 2020
@author: <NAME>
'''
from .ts_util import *
import numpy as np
from typing import List, Tuple
class ts_data(object):
def __init__(self, ts: np.array, prop_train: float =0.75, has_time:bool = True, delta_t:float = 1.0):
'''
Utility object for time series data.
:param ts: PxN time series matrix of P timesteps consisting of N-1 features or PxN snapshot matrix of P timesteps consisting of N features
:param prop_train: proportion of training data
:param has_time: is ts a snapshot matrix
:param delta_t: timestep to be applied if ts is a snapshot matrix
'''
if (has_time):
self.ts = ts
else:
self.ts = add_uni_time(ts, delta_t)
self._create_train_test(prop_train)
self.train_ts_centered = None
self.test_ts_centered = None
self.ts_centered = None
self.train_ts_norm = None
self.test_ts_norm = None
self.ts_norm = None
self.train_mean = None
self.train_std = None
self.train_inv_std = None
self.x = None
self.x_val = None
self.x_all = None
self.x_inp = None
self.train_chunks = None
self.x_val_inp = None
self.x_val2_inp = None
@property
def num_features(self):
return self.ts.shape[1] - 1
@property
def num_obs(self):
return self.ts.shape[0]
@property
def num_train(self):
return self.train_ts.shape[0]
@property
def num_train_filtered(self):
return self.x.shape[0]
@property
def num_test(self):
return self.test_ts.shape[0]
@property
def num_test_filtered(self):
return self.x_val.shape[0]
def _create_train_test(self, prop_train=0.75):
self.train_ts, self.test_ts = train_test_split_ts(self.ts, prop_train)
def standardize(self):
'''
Standardize all training and evaluation set with the mean and the standard deviation matrix of the training set.
'''
self.train_ts_centered, self.train_mean = center_ts(self.train_ts)
self.test_ts_centered = translate_ts(self.test_ts, -self.train_mean)
self.ts_centered = translate_ts(self.ts, -self.train_mean)
self.train_std = std_ts(self.train_ts_centered)
self.train_inv_std = np.linalg.inv(self.train_std)
self.train_ts_norm = scale_ts(self.train_ts_centered, self.train_inv_std)
self.test_ts_norm = scale_ts(self.test_ts_centered, self.train_inv_std)
self.ts_norm = scale_ts(self.ts_centered, self.train_inv_std)
def generate_train_model_inputs(self, num_train_chunks:int =1, rate:float =0):
'''
'Hankelize' the training data and apply adaptive sampling rate to all data sets.
:param num_train_chunks: number of chunks
:param rate: threshold value for sampling
'''
self.x = adapt_sampling_rate(self.train_ts_norm, rate)
self.x_val = adapt_sampling_rate(self.test_ts_norm, rate)
self.x_all = adapt_sampling_rate(self.ts_norm, rate)
self.x_inp, self.train_chunks = prepare_train_model_data(self.x, num_train_chunks)
self.x_val_inp, _ = prepare_train_model_data(self.x_val, 1)
self.x_all[:,-1] = self.x_all[:,-1] - self.x_all[self.train_chunks[-1],-1]
self.x_val2_inp, _ = prepare_train_model_data(self.x_all[self.train_chunks[-1]:], 1)
| 2.421875 | 2 |
container/pyf/schemas/event.py | Pompino/react-components-23KB | 2 | 12764182 | from pydantic import BaseModel as BaseSchema
from typing import List, Optional
from simpleschemas import UserGetSimpleSchema, GroupGetSimpleSchema, EventGetSimpleSchema, ClassRoomGetSimpleSchema
class EventGetSchema(EventGetSimpleSchema):
users: List[UserGetSimpleSchema]
groups: List[GroupGetSimpleSchema]
classrooms: List[ClassRoomGetSimpleSchema]
class Config:
orm_mode = True | 2.171875 | 2 |
examples/control/tracking.py | ajthor/socks | 0 | 12764183 | # %% [markdown]
"""
# Target Tracking
This example demonstrates the kernel-based stochastic optimal control algorithm and the
dynamic programming algorithm. By default, it uses a nonholonomic vehicle system
(unicycle dynamics), and seeks to track a v-shaped trajectory.
To run the example, use the following command:
```shell
python examples/control/tracking.py
```
"""
# %%
import gym
import numpy as np
from gym.envs.registration import make
from gym_socks.algorithms.control.kernel_control_fwd import KernelControlFwd
from gym_socks.algorithms.control.kernel_control_bwd import KernelControlBwd
from functools import partial
from sklearn.metrics.pairwise import rbf_kernel
from gym_socks.sampling import sample
from gym_socks.sampling import default_sampler
from gym_socks.sampling import random_sampler
from gym_socks.sampling import grid_sampler
from gym_socks.utils.grid import make_grid_from_ranges
# %% [markdown]
# Configuration variables.
# %%
system_id = "NonholonomicVehicleEnv-v0"
sigma = 3 # Kernel bandwidth parameter.
regularization_param = 1e-7 # Regularization parameter.
time_horizon = 20
# For controlling randomness.
seed = 12345
# %% [markdown]
# ## Generate the Sample
#
# We generate a random sample from the system, and choose random control actions and
# random initial conditions.
# %%
env = make(system_id)
env.sampling_time = 0.1
env.seed(seed)
env.action_space = gym.spaces.Box(
low=np.array([0.1, -10.1], dtype=np.float32),
high=np.array([1.1, 10.1], dtype=np.float32),
shape=(2,),
dtype=np.float32,
seed=seed,
)
sample_size = 1500
sample_space = gym.spaces.Box(
low=np.array([-1.2, -1.2, -2 * np.pi], dtype=np.float32),
high=np.array([1.2, 1.2, 2 * np.pi], dtype=np.float32),
shape=(3,),
dtype=np.float32,
seed=seed,
)
state_sampler = random_sampler(sample_space=sample_space)
action_sampler = random_sampler(sample_space=env.action_space)
S = sample(
sampler=default_sampler(
state_sampler=state_sampler, action_sampler=action_sampler, env=env
),
sample_size=sample_size,
)
A = make_grid_from_ranges([np.linspace(0.1, 1.1, 10), np.linspace(-10.1, 10.1, 21)])
# %% [markdown]
# We define the cost as the norm distance to the target at each time step.
# %%
a = 0.5 # Path amplitude.
p = 2.0 # Path period.
target_trajectory = [
[
(x * 0.1) - 1.0,
4 * a / p * np.abs((((((x * 0.1) - 1.0) - p / 2) % p) + p) % p - p / 2) - a,
]
for x in range(time_horizon)
]
def _tracking_cost(time: int = 0, state: np.ndarray = None) -> float:
"""Tracking cost function.
The goal is to minimize the distance of the x/y position of the vehicle to the
'state' of the target trajectory at each time step.
Args:
time : Time of the simulation. Used for time-dependent cost functions.
state : State of the system.
Returns:
cost : Real-valued cost.
"""
dist = state[:, :2] - np.array([target_trajectory[time]])
result = np.linalg.norm(dist, ord=2, axis=1)
result = np.power(result, 2)
return result
# %% [markdown]
# ## Algorithm
#
# Now, we can compute the policy using the algorithm, and then simulate the system
# forward in time using the computed policy.
#
# In order to change this to the dynamic programming algorithm, use `KernelControlBwd`.
# %%
# Compute the policy.
policy = KernelControlFwd(
time_horizon=time_horizon,
cost_fn=_tracking_cost,
kernel_fn=partial(rbf_kernel, gamma=1 / (2 * (sigma ** 2))),
regularization_param=regularization_param,
verbose=False,
)
policy.train(S=S, A=A)
# Simulate the controlled system.
env.reset()
initial_condition = [-0.8, 0, 0]
env.state = initial_condition
trajectory = [initial_condition]
for t in range(time_horizon):
action = policy(time=t, state=[env.state])
state, *_ = env.step(time=t, action=action)
trajectory.append(list(state))
# %% [markdown]
# ## Results
#
# We then plot the simulated trajectories of the actual system alongside the predicted
# state trajectory using the approximated dynamics.
# %%
import matplotlib
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.axes()
target_trajectory = np.array(target_trajectory, dtype=np.float32)
plt.plot(
target_trajectory[:, 0],
target_trajectory[:, 1],
marker="o",
color="C0",
label="Target Trajectory",
)
trajectory = np.array(trajectory, dtype=np.float32)
plt.plot(
trajectory[:, 0],
trajectory[:, 1],
color="C1",
label="System Trajectory",
)
# Plot the markers as arrows, showing vehicle heading.
paper_airplane = [(0, -0.25), (0.5, -0.5), (0, 1), (-0.5, -0.5), (0, -0.25)]
for x in trajectory:
angle = -np.rad2deg(x[2])
t = matplotlib.markers.MarkerStyle(marker=paper_airplane)
t._transform = t.get_transform().rotate_deg(angle)
plt.plot(x[0], x[1], marker=t, markersize=15, linestyle="None", color="C1")
plt.legend()
plt.show()
| 3.28125 | 3 |
python/kwiver/vital/tests/test_rotation.py | mwoehlke-kitware/kwiver | 0 | 12764184 | """
ckwg +31
Copyright 2016-2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for vital.types.Rotation class
"""
from __future__ import print_function
import math
import unittest
import nose.tools
import numpy
from kwiver.vital.types import rotation, RotationD, RotationF
def array_normalize(a, dtype=None):
a = numpy.asarray(a, dtype)
return (a / numpy.linalg.norm(a)).tolist()
class TestVitalRotation(unittest.TestCase):
def test_new_default(self):
# That these even construct
rot_d = RotationD()
nose.tools.assert_equal(rot_d.type_name, "d")
rot_f = RotationF()
nose.tools.assert_equal(rot_f.type_name, "f")
def test_eq(self):
# Identities should equal
r1 = RotationD()
r2 = RotationD()
nose.tools.assert_equal(r1, r2)
r3 = RotationD([1, 2, 3, 4])
r4 = RotationD([1, 2, 3, 4])
nose.tools.assert_equal(r3, r4)
nose.tools.assert_false(r1 == r3)
r1 = RotationF()
r2 = RotationF()
nose.tools.assert_equal(r1, r2)
r3 = RotationF([1, 2, 3, 4])
r4 = RotationF([1, 2, 3, 4])
nose.tools.assert_equal(r3, r4)
nose.tools.assert_false(r1 == r3)
r1 = RotationD([1, 2, 3, 4])
r2 = RotationD([-1, -2, -3, -4])
assert r1.angle_from(r2) < 1e-12
def test_not_eq(self):
# Identities should equal
r1 = RotationD()
r2 = RotationD()
nose.tools.assert_false(r1 != r2)
r3 = RotationD([1, 2, 3, 4])
r4 = RotationD([1, 2, 3, 4])
nose.tools.assert_false(r3 != r4)
nose.tools.ok_(r1 != r3)
r1 = RotationF()
r2 = RotationF()
nose.tools.assert_false(r1 != r2)
r3 = RotationF([1, 2, 3, 4])
r4 = RotationF([1, 2, 3, 4])
nose.tools.assert_false(r3 != r4)
nose.tools.ok_(r1 != r3)
def test_to_matrix(self):
# Default value should be identity
rot_d = RotationD()
numpy.testing.assert_array_equal(rot_d.matrix(), numpy.eye(3))
rot_f = RotationF()
numpy.testing.assert_array_equal(rot_f.matrix(), numpy.eye(3))
def test_to_quaternion(self):
rot_d = RotationD()
numpy.testing.assert_array_equal(rot_d.quaternion(), [0, 0, 0, 1])
rot_f = RotationF()
numpy.testing.assert_array_equal(rot_f.quaternion(), [0, 0, 0, 1])
def test_to_axis_angle(self):
# expected identity: [0,0,1] and 0
ident_axis = [0, 0, 1]
ident_angle = 0
rot_d = RotationD()
rot_f = RotationF()
numpy.testing.assert_equal(rot_d.axis(), ident_axis)
nose.tools.assert_equal(rot_d.angle(), ident_angle)
numpy.testing.assert_equal(rot_f.axis(), ident_axis)
nose.tools.assert_equal(rot_f.angle(), ident_angle)
def test_to_rodrigues(self):
# rodrigues identity: [0,0,0]
ident_rod = [0, 0, 0]
rot_d = RotationD()
rot_f = RotationF()
rod = rot_d.rodrigues()
numpy.testing.assert_equal(rod, ident_rod)
rod = rot_f.rodrigues()
numpy.testing.assert_equal(rod, ident_rod)
def test_to_ypr(self):
# ypr identity: (pi/2, 0, pi)
ident_ypr = (math.pi / 2, 0, -math.pi)
rot_d = RotationD()
rot_f = RotationF()
numpy.testing.assert_almost_equal(rot_d.yaw_pitch_roll(), ident_ypr, 15)
numpy.testing.assert_almost_equal(rot_f.yaw_pitch_roll(), ident_ypr)
def test_from_rotation(self):
r = RotationD()
r_cpy = RotationD(r)
nose.tools.ok_(r == r_cpy)
r = RotationD([1, 2, 3, 4])
r_cpy = RotationD(r)
nose.tools.ok_(r == r_cpy)
r = RotationF()
r_cpy = RotationF(r)
nose.tools.ok_(r == r_cpy)
r = RotationF([1, 2, 3, 4])
r_cpy = RotationF(r)
nose.tools.ok_(r == r_cpy)
def test_from_rotation_other_type(self):
r = RotationD()
r_cpy = RotationF(r)
numpy.testing.assert_array_almost_equal(r.quaternion(), r_cpy.quaternion(), 6)
r = RotationD([1, 2, 3, 4])
r_cpy = RotationF(r)
numpy.testing.assert_array_almost_equal(r.quaternion(), r_cpy.quaternion(), 6)
r = RotationF()
r_cpy = RotationD(r)
numpy.testing.assert_array_almost_equal(r.quaternion(), r_cpy.quaternion(), 6)
r = RotationF([1, 2, 3, 4])
r_cpy = RotationD(r)
numpy.testing.assert_array_almost_equal(r.quaternion(), r_cpy.quaternion(), 6)
def test_from_quaternion(self):
q = array_normalize([+2, -1, -3, +0], float)
r = RotationD(q)
numpy.testing.assert_equal(r.quaternion(), q)
def test_from_rodrigues(self):
rod_list_1 = [0, 0, 0]
r1 = RotationD(rod_list_1)
numpy.testing.assert_equal(r1.rodrigues(), rod_list_1)
# This one will get normalized by magnitude in rotation instance
# This vector's is less than 2*pi, so we should expect this vector to be
# returned as is.
rod2 = numpy.array([2, -1, 0.5])
nod2_normed = array_normalize(rod2)
print("r2 2-norm:", numpy.linalg.norm(rod2))
print("r2-normed:", nod2_normed)
r2 = RotationD(rod2)
numpy.testing.assert_array_almost_equal(
r2.rodrigues(), rod2, decimal=14, # 1e-14
)
def test_from_aa(self):
# Axis should come out of rotation normalized
angle = 0.8
axis = [-3, 2, 1]
axis_norm = array_normalize(axis)
r = RotationD(angle, axis)
nose.tools.assert_equal(angle, r.angle())
numpy.testing.assert_equal(axis_norm, r.axis())
def test_from_ypr(self):
y = 1.2
p = 0.3
r = -1.0
# XXX
rot = RotationD(y, p, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# 0XX
rot = RotationD(0, p, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# X0X
rot = RotationD(y, 0, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# XX0
rot = RotationD(y, p, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# 00X
rot = RotationD(0, 0, r)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(r, rr, 14)
# 0X0
rot = RotationD(0, p, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(p, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# X00
rot = RotationD(y, 0, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(y, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
# 000
rot = RotationD(0, 0, 0)
ry, rp, rr = rot.yaw_pitch_roll()
nose.tools.assert_almost_equal(0, ry, 14)
nose.tools.assert_almost_equal(0, rp, 14)
nose.tools.assert_almost_equal(0, rr, 14)
def test_from_matrix(self):
# Create a non-identity matrix from a different constructor that we
# assume works
# Create new rotation with that matrix.
# New rotation to_matrix method should produce the same matrix
pre_r = RotationD([+2, -1, -3, +0])
mat = pre_r.matrix()
r = RotationD(mat)
numpy.testing.assert_allclose(mat, r.matrix(), 1e-15)
def test_inverse(self):
# quaternion calc from:
# https://www.wolframalpha.com/input/?i=quaternion:+0%2B2i-j-3k&lk=3
r = RotationD([+2, -1, -3, +0])
r_inv = r.inverse()
e_inv = array_normalize([-1 / 7.0, +1 / 14.0, +3 / 14.0, 0])
numpy.testing.assert_allclose(r_inv.quaternion(), e_inv, 1e-15)
r = RotationF([+2, -1, -3, +0])
r_inv = r.inverse()
numpy.testing.assert_allclose(r_inv.quaternion(), e_inv, 1e-7)
def test_mul(self):
# Normalize quaternaion vector.
expected_quat = array_normalize([+2.0, -1.0, -3.0, +0.0])
r_ident_d = RotationD()
r_ident_f = RotationF()
r_other_d = RotationD(expected_quat)
r_other_f = RotationF(expected_quat)
r_res_d = r_ident_d * r_other_d
nose.tools.assert_is_not(r_other_d, r_res_d)
numpy.testing.assert_equal(r_res_d, r_other_d)
numpy.testing.assert_equal(r_res_d.quaternion(), expected_quat)
r_res_f = r_ident_f * r_other_f
nose.tools.assert_is_not(r_other_f, r_res_f)
numpy.testing.assert_equal(r_res_f, r_other_f)
numpy.testing.assert_allclose(r_res_f.quaternion(), expected_quat, 1e-7)
def test_mul_vector(self):
vec = [1, 0, 0]
vec_expected = [0, 1, 0]
r_axis = [0, 0, 1]
r_angle = math.pi / 2.0
r = RotationD(r_angle, r_axis)
vec_rotated = r * vec
numpy.testing.assert_array_almost_equal(vec_expected, vec_rotated)
def test_interpolation(self):
x_d = RotationD(0, [1, 0, 0])
y_d = RotationD(math.pi / 2, [0, 1, 0])
r_d = RotationD(math.pi / 4, [0, 1, 0])
x_f = RotationF(0, [1, 0, 0])
y_f = RotationF(math.pi / 2, [0, 1, 0])
r_f = RotationF(math.pi / 4, [0, 1, 0])
z_d = rotation.interpolate_rotation(x_d, y_d, 0.5)
z_f = rotation.interpolate_rotation(x_f, y_f, 0.5)
nose.tools.assert_almost_equal((z_d.inverse() * r_d).angle(), 0, 14)
nose.tools.assert_almost_equal((z_f.inverse() * r_f).angle(), 0, 6)
def test_interpolated_rotations(self):
x = RotationD(0, [1, 0, 0])
a = math.pi / 2
y = RotationD(a, [0, 1, 0])
i_list = rotation.interpolated_rotations(x, y, 3)
nose.tools.assert_equal([i.type_name for i in i_list], ["d"] * 3)
i0_e_axis, i0_e_angle = [0, 1, 0], a * 0.25
i1_e_axis, i1_e_angle = [0, 1, 0], a * 0.50
i2_e_axis, i2_e_angle = [0, 1, 0], a * 0.75
numpy.testing.assert_almost_equal(i_list[0].axis(), i0_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[0].angle(), i0_e_angle, 14)
numpy.testing.assert_almost_equal(i_list[1].axis(), i1_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[1].angle(), i1_e_angle, 14)
numpy.testing.assert_almost_equal(i_list[2].axis(), i2_e_axis, 14)
numpy.testing.assert_almost_equal(i_list[2].angle(), i2_e_angle, 14)
| 1.5625 | 2 |
landing/urls.py | gauravuttarkar/SoftwareEngineering | 0 | 12764185 | <filename>landing/urls.py
from django.urls import path
from . import views
from events import urls as events_urls
from django.urls import path, include
from donations import urls as donations_urls
urlpatterns = [
path('', views.home),
path('events/', include(events_urls)),
path('donations/', include(donations_urls)),
path('login', views.login1,name="login"),
path('login-submit', views.logging_in, name="logging_in"),
path('signup', views.signup,name="signup"),
path('signup_submit', views.signup_submit, name="signup_submit"),
path('logout', views.logout,name="logout"),
]
| 2.046875 | 2 |
part-2/1-sequence/5-custom_sequence-polygon.py | boconlonton/python-deep-dive | 0 | 12764186 | """
Polygon exercises:
- __len__
- __getitem__ (retrieve)
- __add__
- __iadd__
- __mul__
- __imul__
- append()
- insert()
- extend()
- __setitem__ (assignment)
- __delitem__
"""
import numbers
class Point:
""""""
def __init__(self, x, y):
if isinstance(x, numbers.Real) \
and isinstance(y, numbers.Real):
self._pt = (x, y)
else:
raise TypeError('Point co-ordinates must be real numbers')
def __repr__(self):
return f'Point(x={self._pt[0]}, y={self._pt[1]})'
# Custom Seq p1
def __len__(self):
"""Defines len()"""
return len(self._pt)
def __getitem__(self, i):
"""Defines obj[i]"""
return self._pt[i]
class Polygon:
def __init__(self, *pts):
if pts:
self._pts = [Point(*pt) for pt in pts]
else:
self._pts = []
def __repr__(self):
# Removes the square brackets in the result
pts_str = ",".join(str(pt) for pt in self._pts)
return f'Polygon({pts_str})'
# Turn Polygon into a Sequence Type
def __len__(self):
"""Defines len()"""
return len(self._pts)
def __getitem__(self, s):
return self._pts[s]
def __setitem__(self, s, other):
"""Define instance[s] = other"""
try:
rhs = [Point(*pt) for pt in other]
is_single = False
except TypeError:
try:
rhs = Point(*other)
is_single = True
except TypeError:
raise TypeError('Invalid Point or Iterable of Points')
if isinstance(s, int) and is_single \
or (isinstance(s, slice) and not is_single):
# Assignment for a slice
self._pts[s] = rhs
else:
raise TypeError('Incompatible')
# More methods
def __add__(self, other):
"""Define p1 + p2"""
if isinstance(other, Polygon):
new_pts = self._pts + other._pts
return Polygon(*new_pts)
else:
raise TypeError('can only concatenate with same Polygon')
# def __iadd__(self, other):
# """Define p1 += p2
# - Mutating
# - p2 can be any iterables
# """
# if isinstance(other, Polygon):
# # The memory address of _pts will be changed
# points = other._pts
# else:
# # Modify for more options of right-hand-side
# points = [Point(*pt) for pt in other]
# self._pts = self._pts + points
# return self
def append(self, pt):
"""Define instance.append()
NOT need to return
"""
pt = Point(*pt)
self._pts.append(pt)
def insert(self, i, pt):
"""Define instance.insert(i, pt)
NOT need to return
"""
self._pts.insert(i, Point(*pt))
def extend(self, pts):
"""Define instance.extend(pts)
NOT need to return
"""
if isinstance(pts, Polygon):
self._pts += pts._pts
else:
self._pts += [Point(*pt) for pt in pts]
def __iadd__(self, other):
"""Define p1 += p2 (Improved from extend)
- Mutating
- p2 can be any iterables
"""
self.extend(other)
return self
def __delitem__(self, s):
"""Define del instance[s]"""
del self._pts[s]
def pop(self, s):
"""Define instance.pop(s)"""
return self._pts.pop(s)
def clear(self):
self._pts.clear()
# Usage
p1 = Point(10, 2.5)
print(p1)
x, y = p1
print(x, y)
# This is the beauty of using custom seq over Named Tuple
p2 = Point(*p1)
# Polygon
print('POLYGON')
p = Polygon((0, 0), Point(1, 1))
print(p)
print(p[0])
print(len(p))
print('\n CONCAT POLYGON')
p1 = Polygon((1, 1), (2, 2))
p2 = Polygon((3,3), (4, 4))
result = p1 + p2
print(result) # New object
print('\n ICONCAT POLYGON')
print(id(p1))
p1 += p2
print(id(p1)) # Original object (Mutating)
print(p1)
print('\n ICONCAT POLYGON (Modified)')
p1 = Polygon((1, 1), (2, 2))
print(id(p1))
p1 += [(0, 0), (6, 2)]
print(id(p1))
print(p1)
print('\n Append')
p1 = Polygon((1, 1), (2, 2))
print(id(p1))
p1.append((3, 4))
print(id(p1))
print(p1)
print('\n Extend')
p1 = Polygon((0, 0), (2, 2))
p2 = Polygon((3, 3), (4, 4))
print(id(p1), p1)
print(id(p2), p2)
p1.append([10, 10])
print(id(p1), p1)
p1.insert(1, Point(-1, -1))
print(id(p1), p1)
p1.extend(p2)
print(id(p1), p1)
p1.extend([(6, 6), Point(20, 20)])
print(id(p1), p1)
print('\n Assignment of slice')
p1 = Polygon((0, 0), (2, 2))
print(id(p1), p1)
p1[0:2] = [(3, 3), Point(20, 20), [30, 30]]
print(id(p1), p1)
print('\n Assignment of value')
p1 = Polygon((0, 0), (2, 2))
print(id(p1), p1)
p1[0] = (3, 3)
print(id(p1), p1)
p1[0] = Point(-1, -1)
print(id(p1), p1)
print('\n Assignment (Modified)')
p1 = Polygon((0, 0), (2, 2))
print(id(p1), p1)
p1[0:2] = [(3, 3), [4,4]]
print(id(p1), p1)
print('\n Del/Pop (Modified)')
p1 = Polygon((0, 0), (2, 2), (3, 3))
print(id(p1), p1)
del p1[0]
print(id(p1), p1)
p1 = Polygon((0, 0), (2, 2), (3, 3))
print(id(p1), p1)
del p1[0:2]
print(id(p1), p1)
p1 = Polygon((0, 0), (2, 2), (3, 3))
print(id(p1), p1)
res = p1.pop(0)
print(res)
print(id(p1), p1)
| 3.703125 | 4 |
api/backoffice/models.py | ideabosque/DataWald-AWS | 0 | 12764187 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
__author__ = 'bibow'
import json, uuid, os
from datetime import datetime, date
from decimal import Decimal
import logging
logger = logging.getLogger()
logger.setLevel(eval(os.environ["LOGGINGLEVEL"]))
import boto3
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource('dynamodb')
configData = dynamodb.Table('config_data')
response = configData.get_item(
Key={
'key': "BACKOFFICEAPI"
}
)
BACKOFFICEAPI = response["Item"]["value"]
# Helper class to convert a DynamoDB item to JSON.
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
elif isinstance(o, (datetime, date)):
return o.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(o, (bytes, bytearray)):
return str(o)
else:
return super(JSONEncoder, self).default(o)
class OrdersModel(object):
def __init__(self):
self._orders = dynamodb.Table('orders')
@property
def orders(self):
return self._orders
def _getOrder(self, frontend, feOrderId):
response = self.orders.query(
IndexName="frontend_index",
KeyConditionExpression=Key('frontend').eq(frontend) & Key('fe_order_id').eq(feOrderId),
Limit=1
)
return response
def getOrders(self):
pass
def getOrder(self, frontend, feOrderId):
order = {}
response = self._getOrder(frontend, feOrderId)
if response['Count'] != 0:
order = response["Items"][0]
return {
"statusCode": 200,
"headers": {},
"body": (json.dumps(order, indent=4, cls=JSONEncoder))
}
def insertOrder(self, frontend, feOrderId, order):
insertStatus = BACKOFFICEAPI['DWFEORDERSTATUS_METRICS']['insert']['status']
order['tx_status'] = order.get("tx_status", "N") if order['fe_order_status'].lower() in insertStatus else "I"
order['create_dt'] = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
order['tx_dt'] = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
order['tx_note'] = '{0} -> DataWald'.format(frontend)
order['frontend'] = frontend
response = self._getOrder(frontend, feOrderId)
_id = str(uuid.uuid1())
if response['Count'] != 0:
item = response["Items"][0]
_id = item["id"]
if order['fe_order_status'] != item['fe_order_status']:
order["id"] = _id
else:
if item["tx_status"] == "N":
order = item
order["tx_status"] = "P"
elif item["tx_status"] == "F" and order["tx_status"] == "N":
order["id"] = _id
else:
order = item
self.orders.put_item(Item=order)
log = "Successfully update document: {0}/{1}".format(order["fe_order_id"], order["id"])
logger.info(log)
else:
order["id"] = _id
self.orders.put_item(Item=order)
log = "Successfully insert document: {0}/{1}".format(order["fe_order_id"], order["id"])
logger.info(log)
return {
"statusCode": 200,
"headers": {},
"body": json.dumps({
"id": _id,
"frontend": frontend,
"fe_order_id": feOrderId
})
}
def updateOrderStatus(self, id, orderStatus):
response = self.orders.update_item(
Key={
'id': id
},
UpdateExpression="set bo_order_id=:val0, tx_dt=:val1, tx_status=:val2, tx_note=:val3",
ExpressionAttributeValues={
':val0': orderStatus['bo_order_id'],
':val1': datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
':val2': orderStatus['tx_status'],
':val3': orderStatus['tx_note']
},
ReturnValues="UPDATED_NEW"
)
return {
"statusCode": 200,
"headers": {},
"body": (json.dumps(response, indent=4, cls=JSONEncoder))
}
class ItemReceiptsModel(object):
def __init__(self):
self._itemReceipts = dynamodb.Table('itemreceipts')
@property
def itemReceipts(self):
return self._itemReceipts
def _getItemReceipt(self, frontend, boPONum):
response = self.itemReceipts.query(
IndexName="frontend_index",
KeyConditionExpression=Key('frontend').eq(frontend) & Key('bo_po_num').eq(boPONum),
Limit=1
)
return response
def getItemReceipts(self):
pass
def getItemReceipt(self, frontend, boPONum):
itemReceipt = {}
response = self._getItemReceipt(frontend, boPONum)
if response['Count'] != 0:
itemReceipt = response["Items"][0]
return {
"statusCode": 200,
"headers": {},
"body": (json.dumps(itemReceipt, indent=4, cls=JSONEncoder))
}
def insertItemReceipt(self, frontend, boPONum, itemReceipt):
itemReceipt["frontend"] = frontend
itemReceipt["tx_status"] = itemReceipt.get("tx_status", "N")
itemReceipt["tx_dt"] = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
itemReceipt["tx_note"] = '{0} -> DataWald'.format(frontend)
response = self._getItemReceipt(frontend, boPONum)
_id = str(uuid.uuid1())
if response['Count'] != 0:
item = response["Items"][0]
_id = item["id"]
if itemReceipt['data'] != item['data']:
history = {}
if 'history' in item.keys():
history = item['history']
createDt = item["create_dt"]
history[createDt] = item['data']
itemReceipt['history'] = history
itemReceipt["id"] = _id
itemReceipt["bo_itemreceipt_id"] = item["bo_itemreceipt_id"]
self.itemReceipts.put_item(Item=itemReceipt)
log = "Successfully update item recepit: {0}/{1}".format(frontend, boPONum)
logger.info(log)
else:
log = "No update item recepit: {0}/{1}".format(frontend, boPONum)
logger.info(log)
response = self.itemReceipts.update_item(
Key={
'id': _id
},
UpdateExpression="set tx_dt=:val0, tx_status=:val1, tx_note=:val2",
ExpressionAttributeValues={
':val0': datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
':val1': "I",
':val2': log
},
ReturnValues="UPDATED_NEW"
)
else:
itemReceipt["id"] = _id
self.itemReceipts.put_item(Item=itemReceipt)
log = "Successfully insert item recepit: {0}/{1}".format(frontend, boPONum)
logger.info(log)
return {
"statusCode": 200,
"headers": {},
"body": json.dumps({
"id": _id,
"frontend": frontend,
"bo_po_num": boPONum
})
}
def updateItemReceiptStatus(self, id, itemReceiptStatus):
response = self.itemReceipts.update_item(
Key={
'id': id
},
UpdateExpression="set tx_dt=:val0, tx_status=:val1, tx_note=:val2, bo_itemreceipt_id=:val3",
ExpressionAttributeValues={
':val0': datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
':val1': itemReceiptStatus['tx_status'],
':val2': itemReceiptStatus['tx_note'],
':val3': itemReceiptStatus['bo_itemreceipt_id']
},
ReturnValues="UPDATED_NEW"
)
return {
"statusCode": 200,
"headers": {},
"body": (json.dumps(response, indent=4, cls=JSONEncoder))
}
class CustomersModel(object):
def __init__(self):
self._customers = dynamodb.Table('customers-bo')
@property
def customers(self):
return self._customers
def _getCustomer(self, frontend, feCustomerId):
response = self.customers.query(
IndexName="frontend_index",
KeyConditionExpression=Key('frontend').eq(frontend) & Key('fe_customer_id').eq(feCustomerId),
Limit=1
)
return response
def getCustomers(self):
pass
def getCustomer(self, frontend, feCustomerId):
customer = {}
response = self._getCustomer(frontend, feCustomerId)
if response['Count'] != 0:
customer = response["Items"][0]
return {
"statusCode": 200,
"headers": {},
"body": (json.dumps(customer, indent=4, cls=JSONEncoder))
}
def insertCustomer(self, frontend, feCustomerId, customer):
customer['tx_status'] = customer.get("tx_status", "N")
customer['tx_dt'] = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
customer['tx_note'] = '{0} -> DataWald'.format(frontend)
customer['frontend'] = frontend
response = self._getCustomer(frontend, feCustomerId)
_id = str(uuid.uuid1())
if response['Count'] != 0:
item = response["Items"][0]
_id = item["id"]
if customer['data'] != item['data']:
createDt = item["create_dt"]
customer["id"] = _id
customer["create_dt"] = createDt
self.customers.put_item(Item=customer)
log = "Successfully update customer: {0}/{1}".format(frontend, feCustomerId)
logger.info(log)
else:
log = "No update customer: {0}/{1}".format(frontend, feCustomerId)
logger.info(log)
response = self.customers.update_item(
Key={
'id': _id
},
UpdateExpression="set tx_dt=:val0, tx_status=:val1, tx_note=:val2",
ExpressionAttributeValues={
':val0': datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
':val1': "N" if item['tx_status'] in ('N', 'F') else 'I',
':val2': log
},
ReturnValues="UPDATED_NEW"
)
else:
customer["id"] = _id
self.customers.put_item(Item=customer)
log = "Successfully insert customer: {0}/{1}".format(frontend, feCustomerId)
logger.info(log)
return {
"statusCode": 200,
"headers": {},
"body": json.dumps({
"id": _id,
"frontend": frontend,
"fe_customer_id": feCustomerId
})
}
def updateCustomerStatus(self, id, customerStatus):
response = self.customers.update_item(
Key={
'id': id
},
UpdateExpression="set fe_customer_id=:val0, tx_dt=:val1, tx_status=:val2, tx_note=:val3",
ExpressionAttributeValues={
':val0': customerStatus['fe_customer_id'],
':val1': datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
':val2': customerStatus['tx_status'],
':val3': customerStatus['tx_note']
},
ReturnValues="UPDATED_NEW"
)
return {
"statusCode": 200,
"headers": {},
"body": (json.dumps(response, indent=4, cls=JSONEncoder))
}
| 2.1875 | 2 |
manage.py | stenvix/lpschedule | 7 | 12764188 | from flask_script import Manager
from schedule import frontend, api
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.serving import run_simple
from schedule.core import db
manager = Manager(frontend.create_app())
@manager.command
def runserver():
app = DispatcherMiddleware(frontend.create_app(), {'/api': api.create_app()})
run_simple('0.0.0.0', 5000, app, use_reloader=True, use_debugger=True)
@manager.command
def initdb():
db.create_all()
@manager.command
def dropdb():
db.drop_all(bind=None)
if __name__ == '__main__':
manager.run(default_command='runserver')
| 1.875 | 2 |
xcamserver/socket_server.py | Moskari/xcamserver | 0 | 12764189 | '''
Created on 28.12.2016
@author: sapejura
'''
import threading
import socket
import select
import queue
from xcamserver.framebuffer import FrameQueue
# from xcamserver import worker_ctx, dummy_worker
class SocketServer():
def __init__(self):
self.stop_event = threading.Event()
self.thread = threading.Thread(name='socket thread',
target=self._thread,
args=(self.stop_event,))
self._data_size = 4096 # Size of data chunks read from camera
self._frame_size = None # Size of frame read from camera
self.camera_addr = None
self.camera_socket = None # Connection to camera which we are reading
self.server_socket = None
def init(self, frame_size):
self.close()
self._frame_size = frame_size
print('Creating new socket...')
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.settimeout(60)
self.server_socket.bind(('localhost', 0))
# self.server_socket.setblocking(1)
# server_socket.bind(('localhost', 8000))
server_addr = self.server_socket.getsockname()
print('...binded...')
print('...picked server address: %s...' % str(server_addr))
self.server_socket.listen(100)
# print('...server socket is listening max 10 connection.')
def run(self):
if self.is_alive():
raise Exception('Socket server is already online')
self.inputs = [self.server_socket]
self.outputs = []
self.stop_event.clear()
self.thread.start()
def stop(self):
if self.is_alive():
self.stop_event.set()
self.thread.join(10)
if self.is_alive():
raise Exception('Socket server didn\'t stop.')
for sock in self.outputs:
try:
sock.close()
except:
print('Could not close socket', sock)
for sock in self.inputs:
try:
sock.close()
except:
print('Could not close socket', sock)
else:
raise Exception('Can\'t stop socket server. Server is already stopped.')
def close(self):
if self.is_alive():
self.stop()
if self.server_socket:
self.server_socket.close()
if self.camera_socket:
self.camera_socket.close()
def is_alive(self):
return self.thread.isAlive()
def _thread(self, stop_event):
# TODO: Exception handling
# TODO: This will definitely stackoverflow if there are no clients ever. Queue needs buffered replacement
# print('server_socket:', self.server_socket.getsockname(), 'client_socket:', self.camera_addr)
queues = {} # Every outgoing socket gets its own send and receive queues
while True:
# Wait for sockets
(sread, swrite, sexc) = select.select(self.inputs, self.outputs, [], 1)
# Exit thread
if stop_event.is_set():
self.inputs.clear()
self.outputs.clear()
break
# Check incoming connections
self._handle_read_reqs(queues, sread, swrite, sexc)
# Write received camera data to outgoing sockets which are ready
self._handle_write_reqs(queues, sread, swrite, sexc)
print('Socket server closed')
def _remove_socket(self, sock, queues, sread, swrite, sexc):
sock_addr, peer_addr = None, None
try:
sock_addr = sock.getsockname()
except:
pass
try:
peer_addr = sock.getpeername()
except:
pass
queues.pop(sock)
if sock in self.inputs:
self.inputs.remove(sock)
if sock in self.outputs:
self.outputs.remove(sock)
if sock in sread:
sread.remove(sock)
if sock in swrite:
swrite.remove(sock)
if sock in sexc:
sexc.remove(sock)
sock.close()
print('Closed connection to', peer_addr, 'from', sock_addr)
def _handle_read_reqs(self, queues, sread, swrite, sexc):
for sock in sread:
if sock == self.server_socket:
# A "readable" server socket is ready to accept a connection
connection, client_address = sock.accept()
print('new client registration from', client_address)
connection.setblocking(0)
if client_address == self.camera_addr: # Camera connection
self._add_camera_sock(connection)
else:
self._add_client_sock(connection, queues)
elif sock == self.camera_socket: # Camera is sending data
self._recv_from_camera(sock, queues)
else:
error = self._recv_from_client(sock, queues)
if error:
self._remove_socket(sock, queues, sread, swrite, sexc)
continue
def _add_camera_sock(self, connection):
print('It is the camera.')
self.camera_socket = connection
self.inputs.append(connection)
def _add_client_sock(self, connection, queues):
print('It is a new client application:', connection.getpeername())
# Add the socket to outgoing sockets
# if sock not in self.outputs:
# self.outputs.append(connection)
self.inputs.append(connection)
# Give the socket its own data queue because sockets
# can be available for sending at different times
if connection not in queues.keys():
# Outgoing data and control frames
tx_q = FrameQueue(self._frame_size + 4) # timestamp is 4 bytes
# Incoming control frames
rx_q = FrameQueue(4)
rx_q.set_mode(b'\x02') # Cares only about the newest control frames
queues[connection] = (tx_q, rx_q)
def _recv_from_camera(self, sock, queues):
data = sock.recv(self._data_size)
if data:
for tx_q, _ in queues.values():
tx_q.put(data)
else:
pass
def _recv_from_client(self, sock, queues):
print('Connection from', sock.getpeername())
msg_size = 4
try:
data = sock.recv(msg_size)
except (ConnectionAbortedError,
ConnectionResetError) as e:
return True
else:
if data == b'':
# Client disconnects
print('Removing socket', sock.getpeername(), 'from listened inputs and outputs, closing connection.')
return True
else:
print('Received ctrl data:', data, 'from client', sock.getpeername())
# print('Received something unexpected from,', sock.getpeername(), 'Data:', data)
tx_q, rx_q = queues[sock]
rx_q.put(data)
if rx_q.buffer_size() >= msg_size:
msg = rx_q.get(4)
print('Received full ctrl package:', msg)
# print(msg[0:1])
tx_q.set_mode(msg[0:1])
if sock not in self.outputs:
self.outputs.append(sock)
return False
def _handle_write_reqs(self, queues, sread, swrite, sexc):
for sock in swrite:
# data = message_queue.get_nowait()
tx_q, _ = queues[sock]
data = tx_q.get()
if len(data) == 0:
pass
else:
try:
sent_data = 0
# print('Sending data to socket', s.getsockname())
while(sent_data < len(data)):
sent_data += sock.send(data[sent_data:])
# print('Sent', sent_data, 'bytes')
# print('Sent data to socket', sock.getpeername())
except (ConnectionResetError,
ConnectionAbortedError,
ConnectionRefusedError) as e:
sock_addr = sock.getpeername()
print('Connection to', sock_addr, 'lost')
print('%s(%s): %s' % (type(e).__name__, str(e.errno), e.strerror))
self._remove_socket(sock, queues, sread, swrite, sexc)
continue
| 2.625 | 3 |
funcx_sdk/funcx/sdk/executor.py | sirosen/funcX | 0 | 12764190 | <filename>funcx_sdk/funcx/sdk/executor.py
import time
import threading
import os
import uuid
import sys
from concurrent.futures import Future
import concurrent
import logging
import asyncio
import websockets
import json
import dill
from websockets.exceptions import InvalidHandshake
import multiprocessing as mp
import atexit
from funcx.sdk.asynchronous.ws_polling_task import WebSocketPollingTask
logger = logging.getLogger("asyncio")
class AtomicController():
""" This is used to synchronize between the FuncXExecutor which starts WebSocketPollingTasks
and the WebSocketPollingTask which closes itself when there are 0 tasks.
"""
def __init__(self, start_callback, stop_callback, init_value=0):
self._value = 0
self.lock = threading.Lock()
self.start_callback = start_callback
self.stop_callback = stop_callback
def increment(self):
with self.lock:
if self._value == 0:
self.start_callback()
self._value += 1
def decrement(self):
with self.lock:
self._value -= 1
if self._value == 0:
self.stop_callback()
return self._value
def value(self):
with self.lock:
return self._value
def __repr__(self):
return f"AtomicController value:{self._value}"
class FuncXExecutor(concurrent.futures.Executor):
""" Extends the concurrent.futures.Executor class to layer this interface
over funcX. The executor returns future objects that are asynchronously
updated with results by the WebSocketPollingTask using a websockets connection
to the hosted funcx-websocket-service.
"""
def __init__(self, funcx_client,
results_ws_uri: str = 'ws://localhost:6000',
label: str = 'FuncXExecutor'):
"""
Parameters
==========
funcx_client : client object
Instance of FuncXClient to be used by the executor
results_ws_uri : str
Web sockets URI for the results
label : str
Optional string label to name the executor.
Default: 'FuncXExecutor'
"""
self.funcx_client = funcx_client
self.results_ws_uri = results_ws_uri
self.label = label
self._tasks = {}
self._function_registry = {}
self._function_future_map = {}
self.task_group_id = self.funcx_client.session_task_group_id # we need to associate all batch launches with this id
self.poller_thread = ExecutorPollerThread(self.funcx_client,
self._function_future_map,
self.results_ws_uri,
self.task_group_id)
atexit.register(self.shutdown)
def submit(self, function, *args, endpoint_id=None, container_uuid=None, **kwargs):
"""Initiate an invocation
Parameters
----------
function : Function/Callable
Function / Callable to execute
*args : Any
Args as specified by the function signature
endpoint_id : uuid str
Endpoint UUID string. Required
**kwargs : Any
Arbitrary kwargs
Returns
-------
Future : concurrent.futures.Future
A future object
"""
if function not in self._function_registry:
# Please note that this is a partial implementation, not all function registration
# options are fleshed out here.
logger.debug("Function:{function} is not registered. Registering")
function_uuid = self.funcx_client.register_function(function,
function_name=function.__name__,
container_uuid=container_uuid)
self._function_registry[function] = function_uuid
logger.debug(f"Function registered with id:{function_uuid}")
assert endpoint_id is not None, "endpoint_id key-word argument must be set"
batch = self.funcx_client.create_batch(task_group_id=self.task_group_id)
batch.add(*args,
endpoint_id=endpoint_id,
function_id=self._function_registry[function],
**kwargs)
r = self.funcx_client.batch_run(batch)
logger.debug(f"Batch submitted to task_group: {self.task_group_id}")
task_uuid = r[0]
logger.debug(f'Waiting on results for task ID: {task_uuid}')
# There's a potential for a race-condition here where the result reaches
# the poller before the future is added to the future_map
self._function_future_map[task_uuid] = Future()
self.poller_thread.atomic_controller.increment()
res = self._function_future_map[task_uuid]
if not self.poller_thread or self.poller_thread.ws_handler.closed:
self.poller_thread = ExecutorPollerThread(self.funcx_client, self._function_future_map, self.results_ws_uri, self.task_group_id)
return res
def shutdown(self):
if self.poller_thread:
self.poller_thread.shutdown()
logger.debug(f"Executor:{self.label} shutting down")
def noop():
return
class ExecutorPollerThread():
""" This encapsulates the creation of the thread on which event loop lives,
the instantiation of the WebSocketPollingTask onto the event loop and the
synchronization primitives used (AtomicController)
"""
def __init__(self, funcx_client, _function_future_map, results_ws_uri, task_group_id):
"""
Parameters
==========
funcx_client : client object
Instance of FuncXClient to be used by the executor
results_ws_uri : str
Web sockets URI for the results
"""
self.funcx_client = funcx_client
self.results_ws_uri = results_ws_uri
self._function_future_map = _function_future_map
self.task_group_id = task_group_id
self.eventloop = None
self.atomic_controller = AtomicController(self.start,
noop)
def start(self):
""" Start the result polling thread
"""
# Currently we need to put the batch id's we launch into this queue
# to tell the web_socket_poller to listen on them. Later we'll associate
eventloop = asyncio.new_event_loop()
self.eventloop = eventloop
self.ws_handler = WebSocketPollingTask(self.funcx_client,
eventloop,
self.atomic_controller,
init_task_group_id=self.task_group_id,
results_ws_uri=self.results_ws_uri,
auto_start=False)
self.thread = threading.Thread(target=self.event_loop_thread,
args=(eventloop, ))
self.thread.start()
logger.debug("Started web_socket_poller thread")
def event_loop_thread(self, eventloop):
asyncio.set_event_loop(eventloop)
eventloop.run_until_complete(self.web_socket_poller())
@asyncio.coroutine
async def web_socket_poller(self):
await self.ws_handler.init_ws(start_message_handlers=False)
await self.ws_handler.handle_incoming(self._function_future_map, auto_close=True)
def shutdown(self):
ws = self.ws_handler.ws
if ws:
ws_close_future = asyncio.run_coroutine_threadsafe(ws.close(), self.eventloop)
ws_close_future.result()
def double(x):
return x * 2
if __name__ == '__main__':
import argparse
from funcx import FuncXClient
from funcx import set_stream_logger
import time
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--service_url", default='http://localhost:5000/v2',
help="URL at which the funcx-web-service is hosted")
parser.add_argument("-e", "--endpoint_id", required=True,
help="Target endpoint to send functions to")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
endpoint_id = args.endpoint_id
# set_stream_logger()
fx = FuncXExecutor(FuncXClient(funcx_service_address=args.service_url))
print("In main")
endpoint_id = args.endpoint_id
future = fx.submit(double, 5, endpoint_id=endpoint_id)
print("Got future back : ", future)
for i in range(5):
time.sleep(0.2)
# Non-blocking check whether future is done
print("Is the future done? :", future.done())
print("Blocking for result")
x = future.result() # <--- This is a blocking call
print("Result : ", x)
# fx.shutdown()
| 2.640625 | 3 |
2015/02/fc_2015_02_07.py | mfwarren/FreeCoding | 0 | 12764191 | #!/usr/bin/env python3
# imports go here
from xmlrpc.server import SimpleXMLRPCServer
import xmlrpc.client
from threading import Thread
#
# Free Coding session for 2015-02-07
# Written by <NAME>
#
def run_server():
server = SimpleXMLRPCServer(('localhost', 9000))
server.register_function(pow)
server.register_function(lambda x, y: x+y, 'add')
server.register_multicall_functions()
server.serve_forever()
def call_server():
s = xmlrpc.client.ServerProxy('http://localhost:9000')
x = 2
while True:
x = x + 1
print(s.pow(2, x))
t = Thread(target=run_server)
t.start()
t2 = Thread(target=call_server)
t2.start()
| 2.78125 | 3 |
grizzly/__init__.py | dbis-ilm/grizzly | 28 | 12764192 | <reponame>dbis-ilm/grizzly<filename>grizzly/__init__.py<gh_stars>10-100
from .dataframes.frame import Table
from .dataframes.frame import ExternalTable
from .generator import GrizzlyGenerator
def use(backend):
GrizzlyGenerator._backend = backend
def close():
GrizzlyGenerator.close()
def read_table(tableName, index=None, schema=None):
return Table(tableName, index, schema)
def read_external_files(file, colDefs, hasHeader=True, delimiter='|', format="", fdw_extension_name=""):
assert format != "", "External file format must be specified"
return ExternalTable(file, colDefs, hasHeader, delimiter, format, fdw_extension_name) | 2.078125 | 2 |
xen/xen-4.2.2/tools/xm-test/tests/network/12_network_domU_tcp_pos.py | zhiming-shen/Xen-Blanket-NG | 1 | 12764193 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2006
# Author: <<EMAIL>>
# TCP tests to domU interface
# - creates two guest domains
# - sets up a single NIC on each on same subnet
# - conducts tcp tests to the domU IP address.
# hping2 $domU_IP -c 1 -d $size
# where $size = 1, 48, 64, 512, 1440, 1500, 1505,
# 4096, 4192, 32767, 65507, 65508
pingsizes = [ 1, 48, 64, 512, 1440, 1500, 1505, 4096, 4192, 16384, 24567,
32767, 65495 ]
from XmTestLib import *
def netDomain():
dom = XmTestDomain()
dom.newDevice(XenNetDevice, "eth0")
try:
console = dom.start()
console.setHistorySaveCmds(value=True)
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
return dom
rc = 0
# Test creates 2 domains, which requires 4 ips: 2 for the domains and 2 for
# aliases on dom0
if xmtest_netconf.canRunNetTest(4) == False:
SKIP("Don't have enough free configured IPs to run this test")
# Fire up a pair of guest domains w/1 nic each
src = netDomain()
src_console = src.getConsole()
dst = netDomain()
try:
# Ping the victim over eth0
fails=""
dst_netdev = dst.getDevice("eth0")
ip2 = dst_netdev.getNetDevIP()
for size in pingsizes:
out = src_console.runCmd("hping2 " + ip2 + " -E /dev/urandom -q -c 20 "
+ "--fast -d " + str(size) + " -N " + str(size))
if out["return"]:
fails += " " + str(size)
print out["output"]
except ConsoleError, e:
FAIL(str(e))
src.stop()
dst.stop()
if len(fails):
FAIL("TCP hping2 failed for size" + fails + ".")
| 2.4375 | 2 |
src/rosdiscover/interpreter/interpreter.py | rosqual/rosdiscover | 2 | 12764194 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from typing import Dict, Iterator, Optional
import contextlib
import types
import typing as t
from loguru import logger
import roswire
from roswire import AppInstance, ROSVersion
from roswire.common.launch.config import NodeConfig
from roswire.ros1.launch.reader import ROS1LaunchFileReader
from roswire.ros2.launch.reader import ROS2LaunchFileReader
from .context import NodeContext
from .model import PlaceholderModel
from .summary import SystemSummary
from .parameter import ParameterServer
from ..config import Config
from ..launch import Launch
from ..project import ProjectModels
class Interpreter:
"""
Attributes
----------
params: ParameterServer
The simulated parameter server for this interpreter.
"""
@classmethod
@contextlib.contextmanager
def for_config(cls,
config: Config
) -> Iterator['Interpreter']:
"""Constructs an interpreter for a given configuration"""
rsw = roswire.ROSWire() # TODO don't maintain multiple instances
with rsw.launch(config.image, config.sources, environment=config.environment) as app:
with Interpreter(config, app) as interpreter:
yield interpreter
def __init__(
self,
config: Config,
app: roswire.System,
) -> None:
self._app = app
self.params = ParameterServer()
self.nodes: Dict[str, NodeContext] = {}
self.models = ProjectModels(config, allow_recovery=True)
def open(self) -> None:
self.models.open()
def close(self) -> None:
self.models.close()
def __enter__(self) -> "Interpreter":
self.open()
return self
def __exit__(
self,
ex_type: t.Optional[t.Type[BaseException]],
ex_val: t.Optional[BaseException],
ex_tb: t.Optional[types.TracebackType],
) -> None:
self.close()
@property
def app(self) -> AppInstance:
return self._app
def summarise(self) -> SystemSummary:
"""Produces an immutable description of the system architecture."""
node_summaries = [node.summarise() for node in self.nodes.values()]
node_to_summary = {s.fullname: s for s in node_summaries}
return SystemSummary(node_to_summary)
def launch(self, launch_description: Launch) -> None:
"""Simulates the effects of `roslaunch` using a given launch file."""
# NOTE this method also supports command-line arguments
if self._app.description.distribution.ros == ROSVersion.ROS1:
reader = ROS1LaunchFileReader.for_app_instance(self._app)
else:
reader = ROS2LaunchFileReader.for_app_instance(self._app)
logger.debug(f"get_argv: {launch_description.get_argv()}")
config = reader.read(launch_description.filename, launch_description.get_argv())
for param in config.params.values():
self.params[param.name] = param.value
def key(x: NodeConfig) -> str:
if not x.args:
return "a"
assert isinstance(x.args, str)
return "z" if x.typ == "nodelet" and x.args.strip() != 'manager' else "a"
# Sort nodes so that nodelets occur after node managers
sorted_nodes = sorted(config.nodes, key=key)
for node in sorted_nodes:
if not node.filename:
m = ("unable to determine associated launch file for "
f"node: {node}")
raise Exception(m)
logger.debug(f"launching node: {node.name} from {node.filename}")
try:
args = node.args or ''
remappings = {old: new for (old, new) in node.remappings}
self._load(pkg=node.package,
nodetype=node.typ,
name=node.name,
namespace=node.namespace, # FIXME
launch_filename=node.filename,
remappings=remappings,
args=args
)
# FIXME this is waaay too permissive
except Exception:
logger.exception(f"failed to launch node: {node.name}")
raise
# now that all nodes have been initialised, load all plugins
for node_context in self.nodes.values():
for plugin in node_context._plugins:
plugin.load(self)
def _create_nodelet_manager(self,
name: str,
namespace: str,
manager: str,
launch_filename: str,
remappings: t.Mapping[str, str]) -> None:
"""Creates a nodelet manager with a given name."""
logger.info(f'launched nodelet manager: {manager} as {name}')
ctx = NodeContext(name=name,
namespace=namespace,
kind="nodelet",
package="nodelet",
launch_filename=launch_filename,
remappings=remappings,
files=self._app.files,
params=self.params,
app=self._app,
args='')
self.nodes[ctx.fullname] = ctx
def _load_nodelet(self,
pkg: str,
nodetype: str,
name: str,
namespace: str,
launch_filename: str,
remappings: Dict[str, str],
manager: Optional[str] = None
) -> None:
"""Loads a nodelet using the provided instructions.
Parameters
----------
pkg: str
the name of the package to which the nodelet belongs.
nodetype: str
the name of the type of nodelet that should be loaded.
name: str
the name that should be assigned to the nodelet.
namespace: str
the namespace into which the nodelet should be loaded.
launch_filename: str
the absolute path to the XML launch file where this node
was declared.
remappings: Dict[str, str]
a dictionary of name remappings that should be applied
to this nodelet, where keys correspond to old names and values
correspond to new names.
manager: Optional[str]
the name of the manager, if any, for this nodelet. If
this nodelet is standalone, :code:`manager` should be set to
:code:`None`.
Raises
------
Exception
if there is no model for the given nodelet type.
"""
if manager:
logger.info(f'launching nodelet [{name}] '
f'inside manager [{manager}] from {launch_filename}')
return self._load(pkg=pkg,
nodetype=nodetype,
name=name,
namespace=namespace,
launch_filename=launch_filename,
remappings=remappings,
args=f'manager {manager}'
)
else:
logger.info(f'launching standalone nodelet [{name}]')
return self._load(pkg=pkg,
nodetype=nodetype,
name=name,
namespace=namespace,
launch_filename=launch_filename,
remappings=remappings,
args=''
)
def _load(self,
pkg: str,
nodetype: str,
name: str,
namespace: str,
launch_filename: str,
remappings: Dict[str, str],
args: str,
) -> None:
"""Loads a node using the provided instructions.
Parameters
----------
pkg: str
the name of the package to which the node belongs.
nodetype: str
the name of the type of node that should be loaded.
name: str
the name that should be assigned to the node.
namespace: str
the namespace into which the node should be loaded.
launch_filename: str
the absolute path to the XML launch file where this node
was declared.
remappings: Dict[str, str]
a dictionary of name remappings that should be applied
to this node, where keys correspond to old names and values
correspond to new names.
args: str
a string containing command-line arguments to the node.
Raises
------
Exception
if there is no model for the given node type.
"""
args = args.strip()
split_args = args.split(" ")
if nodetype == 'nodelet':
if args.startswith('manager'):
manager = args.partition(' ')[2]
return self._create_nodelet_manager(name, namespace, manager, launch_filename, remappings)
elif args.startswith('standalone '):
pkg_and_nodetype = args.partition(' ')[2]
pkg, _, nodetype = pkg_and_nodetype.partition('/')
return self._load_nodelet(pkg=pkg,
nodetype=nodetype,
name=name,
namespace=namespace,
launch_filename=launch_filename,
remappings=remappings
)
else:
load = split_args[0] # noqa: F841
pkg_and_nodetype = split_args[1]
mgr = split_args[2]
nodelet_args = "".join(split_args[3:]) # noqa: F841
pkg, _, nodetype = pkg_and_nodetype.partition('/')
return self._load_nodelet(pkg=pkg,
nodetype=nodetype,
name=name,
namespace=namespace,
launch_filename=launch_filename,
remappings=remappings,
manager=mgr
)
if remappings:
logger.info(f"using remappings: {remappings}")
try:
model = self.models.fetch(pkg, nodetype)
# This is to handle nodelet strangness
# If we can't find it through node type, look for it by name
if isinstance(model, PlaceholderModel) and name != nodetype:
model = self.models.fetch(pkg, name)
except Exception:
m = (f"failed to find model for node type [{nodetype}] "
f"in package [{pkg}]")
logger.warning(m)
raise
if args.startswith('manager'):
# This is being loaded into an existing manager, so find that as the context
manager_name = args.split(" ")[1]
if namespace:
manager_name = f"{namespace}/{manager_name}"
manager_name = manager_name.replace('//', '/')
if manager_name in self.nodes:
manager_context = self.nodes[manager_name]
elif f"/{manager_name}" in self.nodes:
manager_context = self.nodes[f"/{manager_name}"]
else:
raise ValueError(f"The nodelet manager {manager_name} has not been launched")
# Create a context for the nodelet
ctx = NodeContext(name=name,
namespace=namespace,
kind=nodetype,
package=pkg,
args=args,
launch_filename=launch_filename,
remappings=remappings,
files=self._app.files,
params=self.params,
app=self._app)
model.eval(ctx)
manager_context.load_nodelet(ctx)
# Place the nodelet as a node, which is observed
# TODO: This needs to be rethought -- we should have a separate NodeletManagerContext
# that con contain NodeletContexts. This would better map the NodeletManager/
# Nodelet mapping, and would actually contain traceability between topics
self.nodes[ctx.fullname] = ctx
else:
ctx = NodeContext(name=name,
namespace=namespace,
kind=nodetype,
package=pkg,
args=args,
launch_filename=launch_filename,
remappings=remappings,
files=self._app.files,
params=self.params,
app=self._app)
self.nodes[ctx.fullname] = ctx
model.eval(ctx)
| 2.09375 | 2 |
sdss/dr8.py | juandesant/astrometry.net | 460 | 12764195 | # This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
from __future__ import absolute_import
import os
from astrometry.util.fits import fits_table
import numpy as np
import logging
import tempfile
import sys
py3 = (sys.version_info[0] >= 3)
if py3:
from urllib.parse import urljoin
else:
from urlparse import urljoin
fitsio = None
try:
import fitsio
except:
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
from .common import *
from .dr7 import *
from .yanny import *
from astrometry.util.run_command import run_command
class Frame(SdssFile):
def __init__(self, *args, **kwargs):
super(Frame, self).__init__(*args, **kwargs)
self.filetype = 'frame'
self.image = None
self.image_proxy = None
def getImageShape(self):
if self.image_proxy is not None:
# fitsio fits.FITSHDU object
H,W = self.image_proxy.get_info()['dims']
H = int(H)
W = int(W)
else:
H,W = self.image.shape
return H,W
def getImageSlice(self, slice):
if self.image_proxy is not None:
#print 'reading slice from image proxy:', slice
return self.image_proxy[slice]
return self.image[slice]
#def __str__(self):
def getImage(self):
if self.image is None and self.image_proxy is not None:
self.image = self.image_proxy.read()
self.image_proxy = None
return self.image
def getHeader(self):
return self.header
def getAsTrans(self):
return self.astrans
def getCalibVec(self):
return self.calib
def getSkyAt(self, x, y):
skyim = self.sky
(sh,sw) = skyim.shape
if sw != 256:
skyim = skyim.T
(sh,sw) = skyim.shape
xi = np.round(self.skyxi[x]).astype(int)
yi = np.round(self.skyyi[y]).astype(int)
yi = np.minimum(yi,sh-1)
return skyim[yi,xi]
def getSky(self):
skyim = self.sky
(sh,sw) = skyim.shape
if sw != 256:
skyim = skyim.T
(sh,sw) = skyim.shape
xi = np.round(self.skyxi).astype(int)
yi = np.round(self.skyyi).astype(int)
yi = np.minimum(yi,sh-1)
assert(all(xi >= 0) and all(xi < sw))
assert(all(yi >= 0) and all(yi < sh))
XI,YI = np.meshgrid(xi, yi)
# Nearest-neighbour interpolation -- we just need this
# for approximate invvar.
bigsky = skyim[YI,XI]
return bigsky
def getInvvar(self, psfield, bandnum, ignoreSourceFlux=False,
sourceFlux=None, constantSkyAt=None):
'''
If constantSkyAt = (x,y) (INTEGERS!),
returns a scalar (rather than a np.array) of the invvar at that point.
NOTE that this does NOT blank out masked pixels; use, eg,
fpM = sdss.readFpM(run, camcol, field, bandname)
for plane in [ 'INTERP', 'SATUR', 'CR', 'GHOST' ]:
fpM.setMaskedPixels(plane, invvar, 0, roi=roi)
'''
calibvec = self.getCalibVec()
if constantSkyAt:
x,y = constantSkyAt
calibvec = calibvec[x]
sky = self.getSkyAt(x,y)
if ignoreSourceFlux:
dn = sky
elif sourceFlux is None:
image = self.getImage()
dn = (image[y,x] / calibvec) + sky
else:
dn = (sourceFlux / calibvec) + sky
else:
bigsky = self.getSky()
if ignoreSourceFlux:
dn = bigsky
elif sourceFlux is None:
image = self.getImage()
dn = (image / calibvec) + bigsky
else:
dn = (sourceFlux / calibvec) + bigsky
gain = psfield.getGain(bandnum)
# Note, "darkvar" includes dark current *and* read noise.
darkvar = psfield.getDarkVariance(bandnum)
dnvar = (dn / gain) + darkvar
invvar = 1./(dnvar * calibvec**2)
return invvar
class PhotoObj(SdssFile):
def __init__(self, *args, **kwargs):
super(PhotoObj, self).__init__(*args, **kwargs)
self.filetype = 'photoObj'
self.table = None
def getTable(self):
return self.table
class runlist(object):
pass
class DR8(DR7):
_lup_to_mag_b = np.array([1.4e-10, 0.9e-10, 1.2e-10, 1.8e-10, 7.4e-10])
_two_lup_to_mag_b = 2.*_lup_to_mag_b
_ln_lup_to_mag_b = np.log(_lup_to_mag_b)
'''
From
http://data.sdss3.org/datamodel/glossary.html#asinh
m = -(2.5/ln(10))*[asinh(f/2b)+ln(b)].
The parameter b is a softening parameter measured in maggies, and
for the [u, g, r, i, z] bands has the values
[1.4, 0.9, 1.2, 1.8, 7.4] x 1e-10
'''
@staticmethod
def luptitude_to_mag(Lmag, bandnum, badmag=25):
if bandnum is None:
# assume Lmag is broadcastable to a 5-vector
twobi = DR8._two_lup_to_mag_b
lnbi = DR8._ln_lup_to_mag_b
else:
twobi = DR8._two_lup_to_mag_b[bandnum]
lnbi = DR8._ln_lup_to_mag_b[bandnum]
# MAGIC -1.08.... = -2.5/np.log(10.)
f = np.sinh(Lmag/-1.0857362047581294 - lnbi) * twobi
# prevent log10(-flux)
mag = np.zeros_like(f) + badmag
I = (f > 0)
mag[I] = -2.5 * np.log10(f[I])
return mag
@staticmethod
def nmgy_to_mag(nmgy):
return 22.5 - 2.5 * np.log10(nmgy)
def getDRNumber(self):
return 8
def useLocalTree(self, photoObjs=None, resolve=None):
if photoObjs is None:
photoObjs = os.environ['BOSS_PHOTOOBJ']
redux = os.environ['PHOTO_REDUX']
if resolve is None:
resolve = os.environ['PHOTO_RESOLVE']
self.filenames.update(
photoObj = os.path.join(photoObjs, '%(rerun)s', '%(run)i', '%(camcol)i',
'photoObj-%(run)06i-%(camcol)i-%(field)04i.fits'),
frame = os.path.join(photoObjs, 'frames', '%(rerun)s', '%(run)i', '%(camcol)i',
'frame-%(band)s-%(run)06i-%(camcol)i-%(field)04i.fits.bz2'),
photoField = os.path.join(photoObjs, '%(rerun)s', '%(run)i',
'photoField-%(run)06i-%(camcol)i.fits'),
psField = os.path.join(redux, '%(rerun)s', '%(run)i', 'objcs', '%(camcol)i',
'psField-%(run)06i-%(camcol)i-%(field)04i.fit'),
fpM = os.path.join(redux, '%(rerun)s', '%(run)i', 'objcs', '%(camcol)i',
'fpM-%(run)06i-%(band)s%(camcol)i-%(field)04i.fit.gz'),
window_flist = os.path.join(resolve, 'window_flist.fits'),
)
# use fpM files compressed
try:
del self.dassuffix['fpM']
except:
pass
try:
del self.processcmds['fpM']
except:
pass
def saveUnzippedFiles(self, basedir):
self.unzip_dir = basedir
def setFitsioReadBZ2(self, to=True):
'''
Call this if fitsio supports reading .bz2 files directly.
'''
self.readBz2 = to
def __init__(self, **kwargs):
'''
Useful kwargs:
basedir : (string) - local directory where data will be stored.
'''
DR7.__init__(self, **kwargs)
self.unzip_dir = None
self.readBz2 = False
# Local filenames
self.filenames.update({
'frame': 'frame-%(band)s-%(run)06i-%(camcol)i-%(field)04i.fits.bz2',
'idR': 'idR-%(run)06i-%(band)s-%(camcol)i-%(field)04i.fits',
'photoObj': 'photoObj-%(run)06i-%(camcol)i-%(field)04i.fits',
'photoField': 'photoField-%(run)06i-%(camcol)i.fits',
'window_flist': 'window_flist.fits',
})
# URLs on DAS server
self.dasurl = 'http://data.sdss3.org/sas/dr8/groups/boss/'
self.daspaths = {
'idR': 'photo/data/%(run)i/fields/%(camcol)i/idR-%(run)06i-%(band)s%(camcol)i-%(field)04i.fit.Z',
'fpObjc': 'photo/redux/%(rerun)s/%(run)i/objcs/%(camcol)i/fpObjc-%(run)06i-%(camcol)i-%(field)04i.fit',
# DR8 frames are no longer available on DAS.
'frame': '/sas/dr9/boss/photoObj/frames/%(rerun)s/%(run)i/%(camcol)i/frame-%(band)s-%(run)06i-%(camcol)i-%(field)04i.fits.bz2',
#'frame': 'photoObj/frames/%(rerun)s/%(run)i/%(camcol)i/frame-%(band)s-%(run)06i-%(camcol)i-%(field)04i.fits.bz2',
'photoObj': 'photoObj/%(rerun)s/%(run)i/%(camcol)i/photoObj-%(run)06i-%(camcol)i-%(field)04i.fits',
'psField': 'photo/redux/%(rerun)s/%(run)i/objcs/%(camcol)i/psField-%(run)06i-%(camcol)i-%(field)04i.fit',
'photoField': 'photoObj/%(rerun)s/%(run)i/photoField-%(run)06i-%(camcol)i.fits',
'fpM': 'photo/redux/%(rerun)s/%(run)i/objcs/%(camcol)i/fpM-%(run)06i-%(band)s%(camcol)i-%(field)04i.fit.gz',
'fpAtlas': 'photo/redux/%(rerun)s/%(run)i/objcs/%(camcol)i/fpAtlas-%(run)06i-%(camcol)i-%(field)04i.fit',
'window_flist': 'resolve/2010-05-23/window_flist.fits',
}
self.dassuffix = {
#'frame': '.bz2',
'fpM': '.gz',
'idR': '.Z',
}
# called in retrieve()
self.processcmds = {
'fpM': 'gunzip -cd %(input)s > %(output)s',
'idR': 'gunzip -cd %(input)s > %(output)s',
}
self.postprocesscmds = {
'frame': 'TMPFILE=$(mktemp %(output)s.tmp.XXXXXX) && bunzip2 -cd %(input)s > $TMPFILE && mv $TMPFILE %(output)s',
}
y = read_yanny(self._get_runlist_filename())
y = y['RUNDATA']
rl = runlist()
rl.run = np.array(y['run'])
rl.startfield = np.array(y['startfield'])
rl.endfield = np.array(y['endfield'])
rl.rerun = np.array(y['rerun'])
#print 'Rerun type:', type(rl.rerun), rl.rerun.dtype
self.runlist = rl
self.logger = logging.getLogger('astrometry.sdss.DR%i' %
self.getDRNumber())
#self.logger.debug('debug test')
#self.logger.info('info test')
#self.logger.warning('warning test')
def _unzip_frame(self, fn, run, camcol):
if self.readBz2:
return None,True
# No, PJM reported that pyfits failed on SDSS frame*.bz2 files
# if not fitsio:
# # pyfits can read .bz2
# return None,True
tempfn = None
keep = False
filetype = 'frame'
if not(filetype in self.postprocesscmds and fn.endswith('.bz2')):
return None,True
cmd = self.postprocesscmds[filetype]
if self.unzip_dir is not None:
udir = os.path.join(self.unzip_dir, '%i' % run, '%i' % camcol)
if not os.path.exists(udir):
try:
os.makedirs(udir)
except:
pass
tempfn = os.path.join(udir, os.path.basename(fn).replace('.bz2', ''))
#print 'Checking', tempfn
if os.path.exists(tempfn):
print('File exists:', tempfn)
return tempfn,True
else:
print('Saving to', tempfn)
keep = True
else:
fid,tempfn = tempfile.mkstemp()
os.close(fid)
cmd = cmd % dict(input = fn, output = tempfn)
self.logger.debug('cmd: %s' % cmd)
print('command:', cmd)
(rtn,out,err) = run_command(cmd)
if rtn:
print('Command failed: command', cmd)
print('Output:', out)
print('Error:', err)
print('Return val:', rtn)
raise RuntimeError('Command failed (return val %i): %s' % (rtn, cmd))
print(out)
print(err)
return tempfn,keep
def _get_runlist_filename(self):
return self._get_data_file('runList-dr8.par')
# read a data file describing the DR8 data
def _get_data_file(self, fn):
return os.path.join(os.path.dirname(__file__), fn)
def get_rerun(self, run, field=None):
I = (self.runlist.run == run)
if field is not None:
I *= (self.runlist.startfield <= field) * (self.runlist.endfield >= field)
I = np.flatnonzero(I)
reruns = np.unique(self.runlist.rerun[I])
#print 'Run', run, '-> reruns:', reruns
if len(reruns) == 0:
return None
return reruns[-1]
def get_url(self, filetype, run, camcol, field, band=None, rerun=None):
if rerun is None:
rerun = self.get_rerun(run, field)
path = self.daspaths[filetype]
url = urljoin(self.dasurl, path % dict(
run=run, camcol=camcol, field=field, rerun=rerun, band=band))
return url
def retrieve(self, filetype, run, camcol, field=None, band=None, skipExisting=True,
tempsuffix='.tmp', rerun=None):
outfn = self.getPath(filetype, run, camcol, field, band,
rerun=rerun)
print('Checking for file', outfn)
if outfn is None:
return None
if skipExisting and os.path.exists(outfn):
#print('Exists')
return outfn
outdir = os.path.dirname(outfn)
if not os.path.exists(outdir):
try:
os.makedirs(outdir)
except:
pass
url = self.get_url(filetype, run, camcol, field, band=band, rerun=rerun)
#print 'Did not find file:', outfn
print('Retrieving from URL:', url)
if self.curl:
cmd = "curl -o '%(outfn)s' '%(url)s'"
else:
cmd = "wget --continue -nv -O %(outfn)s '%(url)s'"
# suffix to add to the downloaded filename
suff = self.dassuffix.get(filetype, '')
oo = outfn + suff
if tempsuffix is not None:
oo += tempsuffix
cmd = cmd % dict(outfn=oo, url=url)
self.logger.debug('cmd: %s' % cmd)
(rtn,out,err) = run_command(cmd)
if rtn:
print('Command failed: command', cmd)
print('Output:', out)
print('Error:', err)
print('Return val:', rtn)
return None
if tempsuffix is not None:
#
self.logger.debug('Renaming %s to %s' % (oo, outfn+suff))
os.rename(oo, outfn + suff)
if filetype in self.processcmds:
cmd = self.processcmds[filetype]
cmd = cmd % dict(input = outfn + suff, output = outfn)
self.logger.debug('cmd: %s' % cmd)
(rtn,out,err) = run_command(cmd)
if rtn:
print('Command failed: command', cmd)
print('Output:', out)
print('Error:', err)
print('Return val:', rtn)
return None
return outfn
def readPhotoObj(self, run, camcol, field, filename=None):
obj = PhotoObj(run, camcol, field)
if filename is None:
fn = self.getPath('photoObj', run, camcol, field)
else:
fn = filename
obj.table = fits_table(fn)
return obj
def readFrame(self, run, camcol, field, band, filename=None):
'''
http://data.sdss3.org/datamodel/files/BOSS_PHOTOOBJ/frames/RERUN/RUN/CAMCOL/frame.html
'''
f = Frame(run, camcol, field, band)
# ...
if filename is None:
fn = self.getPath('frame', run, camcol, field, band)
else:
fn = filename
# optionally bunzip2 the frame file.
tempfn,keep = self._unzip_frame(fn, run, camcol)
if tempfn is not None:
fn = tempfn
if fitsio:
print('Frame filename', fn)
# eg /clusterfs/riemann/raid006/dr10/boss/photoObj/frames/301/2825/1/frame-u-002825-1-0126.fits.bz2
F = fitsio.FITS(fn, lower=True)
f.header = F[0].read_header()
# Allow later reading of just the pixels of interest.
f.image_proxy = F[0]
f.calib = F[1].read()
sky = F[2].read_columns(['allsky', 'xinterp', 'yinterp'])
#print 'sky', type(sky)
# ... supposed to be a recarray, but it's not...
f.sky, f.skyxi, f.skyyi = sky.tolist()[0]
tab = fits_table(F[3].read())
if not keep and tempfn is not None:
os.remove(tempfn)
else:
p = pyfits.open(fn)
# in nanomaggies
f.image = p[0].data
f.header = p[0].header
# converts counts -> nanomaggies
f.calib = p[1].data
# table with val,x,y -- binned; use bilinear interpolation to expand
sky = p[2].data
# table -- asTrans structure
tab = fits_table(p[3].data)
f.sky = sky.field('allsky')[0]
f.skyxi = sky.field('xinterp')[0]
f.skyyi = sky.field('yinterp')[0]
#print 'sky shape', f.sky.shape
if len(f.sky.shape) != 2:
f.sky = f.sky.reshape((-1, 256))
assert(len(tab) == 1)
tab = tab[0]
# DR7 has NODE, INCL in radians...
f.astrans = AsTrans(run, camcol, field, band,
node=np.deg2rad(tab.node), incl=np.deg2rad(tab.incl),
astrans=tab, cut_to_band=False)
return f
| 2.03125 | 2 |
app.py | ace-spadez/btp-app | 0 | 12764196 |
import base64
import json
import sys
import wave
from flask import Flask, jsonify, request
from flask_cors import CORS
import parselmouth
import pandas as pd
from scipy.signal import find_peaks
import numpy as np
import matplotlib.pyplot as plt
app = Flask(__name__)
app_config = {"host": "0.0.0.0", "port": sys.argv[1]}
"""
---------------------- DEVELOPER MODE CONFIG -----------------------
"""
# Developer mode uses app.py
if "app.py" in sys.argv[0]:
# Update app config
app_config["debug"] = True
# CORS settings
cors = CORS(app, resource={
r"/*":{
"origins":"*"
}
})
# CORS headers
app.config["CORS_HEADERS"] = "Content-Type"
"""
--------------------------- REST CALLS -----------------------------
"""
def draw_pitch(pitch):
# Extract selected pitch contour, and
# replace unvoiced samples by NaN to not plot
pitch_values = pitch.selected_array['frequency']
pitch_values[pitch_values==0] = np.nan
plt.plot(pitch.xs(), pitch_values, 'o', markersize=5, color='w')
plt.plot(pitch.xs(), pitch_values, 'o', markersize=2)
plt.grid(False)
plt.ylim(0, pitch.ceiling)
plt.ylabel("fundamental frequency [Hz]")
# Remove and replace with your own
@app.route("/example",methods=['GET','POST'])
def example():
print(request.json)
data=request.get_json()
snd = parselmouth.Sound(data['filepath'])
pitch = snd.to_pitch()
plt.figure()
plt.twinx()
x=pitch.xs()
y=pitch.selected_array['frequency']
dataPoints=[]
for i in range(len(y)):
if(y[i]!=0):
dataPoints.append({"x":x[i],"y":y[i]})
print(dataPoints)
draw_pitch(pitch)
plt.xlim([snd.xmin, snd.xmax])
name="image1.png"
plt.savefig(name)
data=""
with open("image1.png", "rb") as image_file:
data = format(base64.b64encode(image_file.read()))
# See /src/components/App.js for frontend call
return {"dataPoints":dataPoints}
@app.route("/wavepattern",methods=['GET','POST'])
def wavepattern():
data=request.get_json()
snd = parselmouth.Sound(data['filepath'])
pitch = snd.to_pitch()
plt.figure()
plt.twinx()
path = data['filepath']
raw = wave.open(path)
signal = raw.readframes(-1)
signal = np.frombuffer(signal, dtype ="int16")
f_rate = raw.getframerate()
time = np.linspace(
0,
len(signal) / f_rate,
num = len(signal)
)
dataPoints=[]
cnt=0
cur_x=0
cur_y=0
for i in range(len(signal)):
if i%100==0:
dataPoints.append({"x":cur_x/100,"y":cur_y/100})
cur_x=0
cur_y=0
else:
cur_x+=float(time[i])
cur_y+=float(signal[i])
plt.ylabel("fundamental frequency [Hz]")
plt.plot(time, signal,color="red")
# plt.xlim([snd.xmin, snd.xmax])
name="image2.png"
plt.savefig(name)
data=""
with open("image2.png", "rb") as image_file:
data = format(base64.b64encode(image_file.read()))
# See /src/components/App.js for frontend call
# print(dataPoints)
return jsonify({"dataPoints":dataPoints})
def differentitate_pitch(pitch,pitch2,pitch_values1,pitch_values2,s1,s2):
# Extract selected pitch contour, and
# replace unvoiced samples by NaN to not plot
if s1>s2:
pitch_values1=pitch_values1[:s2]
if s1<s2:
pitch_values2=pitch_values2[:s1]
cnt = 0
p = np.empty((pitch_values1.size))
for i in range(0,pitch_values1.size):
p[i]=np.nan
for i in range(0,pitch_values1.size):
if abs(pitch_values1[i]-pitch_values2[i])>50:
#print(pitch_values2[i])
p[i]=pitch_values2[i]
cnt += 1
# print(cnt)
# print(p)
#plt.plot(pitch2.xs(), pitch_values2, 'o', markersize=5, color='w',label='differences')
#plt.plot(pitch2.xs(), pitch_values2, 'o', markersize=2)
if s1>s2:
plt.plot(pitch2.xs(), pitch_values2, 'o', markersize=5, color='w',label='differences')
plt.plot(pitch2.xs(), pitch_values2, 'o', markersize=2)
plt.plot(pitch2.xs(), p, 'o', markersize=5, color='w',label='normal')
plt.plot(pitch2.xs(), p, 'o', markersize=2)
#draw_pitch(pitch)
if s1<s2:
plt.plot(pitch.xs(), pitch_values1, 'o', markersize=5, color='w',label='differences')
plt.plot(pitch.xs(), pitch_values1, 'o', markersize=2)
plt.plot(pitch.xs(), p, 'o', markersize=5, color='w',label='normal')
plt.plot(pitch.xs(), p, 'o', markersize=2)
#draw_pitch(pitch2)
plt.grid(False)
plt.ylim(0, pitch.ceiling)
plt.ylabel("fundamental frequency [Hz]")
@app.route("/speechpattern",methods=['GET','POST'])
def speechpattern():
data=request.get_json()
snd = parselmouth.Sound(data['filepath1'])
pitch = snd.to_pitch()
snd2 = parselmouth.Sound(data['filepath2'])
pitch2 = snd2.to_pitch()
pitch_values1 = pitch.selected_array['frequency']
pitch_values1[pitch_values1==0] = np.nan
pitch_values2 = pitch2.selected_array['frequency']
pitch_values2[pitch_values2==0] = np.nan
s1=pitch_values1.size
s2=pitch_values2.size
if s1>s2:
draw_pitch(pitch)
differentitate_pitch(pitch,pitch2,pitch_values1,pitch_values2,s1,s2)
if s1<s2:
draw_pitch(pitch2)
plt.xlim([snd2.xmin-0.2, snd2.xmax+0.2])
name="image3.png"
plt.savefig(name)
data=""
with open("image3.png", "rb") as image_file:
data = format(base64.b64encode(image_file.read()))
# See /src/components/App.js for frontend call
return jsonify({"imagename":data[2:-1]})
@app.route("/highlight",methods=['GET','POST'])
def highlight():
data=request.get_json()
snd = parselmouth.Sound(data['filepath'])
pitch = snd.to_pitch()
plt.figure()
plt.twinx()
pitch_values = pitch.selected_array['frequency']
x=pitch.xs()
y=pitch.selected_array['frequency']
dataPoints=[]
for i in range(len(y)):
if(y[i]!=0):
dataPoints.append({"x":x[i],"y":y[i]})
s = pitch_values.size
p = np.empty(s)
for i in range(s-15):
flag = 0
for j in range(0,15):
if abs(pitch_values[i]-pitch_values[i+j])>5:
flag=1
if flag == 0:
for j in range(0,15):
p[i+j]=pitch_values[i+j]
pitch_values[pitch_values==0] = np.nan
dataPoints2=[]
x=pitch.xs()
y=p
for i in range(len(y)):
if(y[i]!=0):
dataPoints2.append({"x":x[i],"y":y[i]})
p[p==0] = np.nan
plt.plot(pitch.xs(), pitch_values, 'o', markersize=5, color='w')
plt.plot(pitch.xs(), pitch_values, 'o', markersize=2)
plt.plot(pitch.xs(), p, 'o', markersize=5, color='w')
plt.plot(pitch.xs(), p, 'o', markersize=2)
plt.grid(False)
plt.ylim(0, pitch.ceiling)
plt.ylabel("fundamental frequency [Hz]")
plt.xlim([snd.xmin-0.2, snd.xmax+0.2])
name="image4.png"
plt.savefig(name)
data=""
with open("image4.png", "rb") as image_file:
data = format(base64.b64encode(image_file.read()))
# See /src/components/App.js for frontend call
return jsonify({"normal":dataPoints,"highlight":dataPoints2})
"""
-------------------------- APP SERVICES ----------------------------
"""
# Quits Flask on Electron exit
@app.route("/quit")
def quit():
shutdown = request.environ.get("werkzeug.server.shutdown")
shutdown()
return
if __name__ == "__main__":
app.run(**app_config)
| 2.609375 | 3 |
lib/SurfacingAlgorithms/huji-rich-Elad3DFast/analytic/afd.py | GalaxyHunters/Vivid | 0 | 12764197 | <reponame>GalaxyHunters/Vivid
import numpy
import cmath
def sgn(x):
if x==0:
return 0
else:
return x/abs(x)
def single_mode(x,t,k,omega,v,g0,dx,xi):
return cmath.exp(1j*k*x)*g0*dx*xi*\
(cmath.exp(1j*omega*t)-\
(1-(1-cmath.exp(-1j*k*dx))*xi)**(t*v/xi/dx))/\
(v*(-1+cmath.exp(1j*dx*xi*omega/v)+\
xi-xi*cmath.exp(-1j*k*dx)))
def primitives_to_conserved(hydro_data):
"""
Converts primitive variables to variables that are conserved along streamlines
"""
g = hydro_data['adiabatic index']
sound_speed = g*hydro_data['ambient']['pressure']/hydro_data['ambient']['density']
res = {}
res['positive riemann invariant'] = [dv + dp/(sound_speed*hydro_data['ambient']['density'])
for dp, dv in zip(hydro_data['pert']['pressure'],
hydro_data['pert']['velocity'])]
res['negative riemann invariant'] = [dv - dp/(sound_speed*hydro_data['ambient']['density'])
for dp, dv in zip(hydro_data['pert']['pressure'],
hydro_data['pert']['velocity'])]
res['entropy'] = [dp/hydro_data['ambient']['pressure'] - g*dd/hydro_data['ambient']['density']
for dd, dp in zip(hydro_data['pert']['density'],
hydro_data['pert']['pressure'])]
return res
def conserved_to_primitives(initial, conserved):
import math
g = initial['adiabatic index']
res = {}
res['velocity'] = [0.5*(jp+jm) for jp, jm in zip(conserved['positive riemann invariant'],
conserved['negative riemann invariant'])]
sound_speed = math.sqrt(g*initial['ambient']['pressure']/initial['ambient']['density'])
res['pressure'] = [0.5*sound_speed*initial['ambient']['density']*(jp-jm)
for jp, jm in zip(conserved['positive riemann invariant'],
conserved['negative riemann invariant'])]
res['density'] = [(initial['ambient']['density']/g)*(dp/initial['ambient']['pressure']-ds)
for dp,ds in zip(res['pressure'],conserved['entropy'])]
return res
def to_k_space(x_list, y_list):
import numpy
fy_list = numpy.fft.fft(y_list)
dx = x_list[1] - x_list[0]
k_list = 2*numpy.pi*numpy.fft.fftfreq(len(y_list),d=dx)
return k_list, fy_list
def apply_filter(x_list, y_list, filter_func):
k_list, fy_list = to_k_space(x_list, y_list)
filter_list = [filter_func(k) for k in k_list]
return numpy.fft.ifft([f*fy for f,fy in zip(filter_list, fy_list)])
def afd_advance_1(x_list, y_list, v, t, cfl=0.3):
dx = x_list[1] - x_list[0]
def filter_func(k):
import cmath
temp = 1 - cfl*(1.0-cmath.exp(-1j*k*dx*sgn(v)))
return temp**(t*abs(v)/(cfl*dx))
return [x.real for x in apply_filter(x_list, y_list, filter_func)]
def afd_advance_2(x_list, y_list, v, t, cfl=0.3):
dx = x_list[1] - x_list[0]
def filter_func(k):
import cmath
temp = 0.25*(4-cfl**2+cfl**2*cmath.cos(2*k*dx))-1j*cfl*cmath.sin(k*dx)
return temp**(t*v/(cfl*dx))
return [x.real for x in apply_filter(x_list, y_list, filter_func)]
def exact_advance(x_list, y_list, v, t):
import cmath
dx = x_list[1] - x_list[0]
def filter_func(k):
return cmath.exp(-1j*k*v*t)
return [x.real for x in apply_filter(x_list, y_list, filter_func)]
def calc_propagation_speeds(initial):
import math
g = initial['adiabatic index']
sound_speed = math.sqrt(g*initial['ambient']['pressure']/initial['ambient']['density'])
return {'positive riemann invariant':initial['ambient']['velocity']+sound_speed,
'negative riemann invariant':initial['ambient']['velocity']-sound_speed,
'entropy':initial['ambient']['velocity']}
def time_advance(initial, time, scheme):
initial_conserved = primitives_to_conserved(initial)
propagation_speeds = calc_propagation_speeds(initial)
final_conserved = {}
for field in initial_conserved:
final_conserved[field] = scheme(initial['grid'],
initial_conserved[field],
propagation_speeds[field],
time)
return conserved_to_primitives(initial, final_conserved)
def exact_time_advance(initial, time):
return time_advance(initial, time, exact_advance)
def first_order_time_advance(initial, time, cfl=0.3):
def my_scheme(x_list ,y_list, v, t):
return afd_advance_1(x_list ,y_list, v, t, cfl)
return time_advance(initial, time, my_scheme)
def second_order_time_advance(initial, time, cfl=0.3):
def my_scheme(x_list ,y_list, v, t):
return afd_advance_2(x_list ,y_list, v, t, cfl)
return time_advance(initial, time, my_scheme)
| 2.203125 | 2 |
taskwarrior_timecrimes/cmdline.py | coddingtonbear/taskwarrior-timecrimes | 1 | 12764198 | <filename>taskwarrior_timecrimes/cmdline.py
from __future__ import print_function
import datetime
from pytz import utc
from .undo_reader import UndoReader
def cmdline():
# No options for now
since = (
datetime.datetime.utcnow().replace(tzinfo=utc)
- datetime.timedelta(hours=24)
)
reader = UndoReader(since=since)
events = reader.get_available_events()
for idx, event_date in enumerate(events):
print("{idx}) {item}".format(
idx=idx,
item=event_date,
))
for action in reader.get_event(event_date):
if not action.get('is_old') and action.get('parent'):
print(" - Added %s" % action['description'])
print("")
selection = raw_input(
"Please select a recurrence to undo or press CTRL+C to quit: "
)
event = reader.get_event(events[int(selection)])
print("")
print("Execute the following command:")
uuids = []
for action in event:
if action.get('is_old'):
continue
if not action.get('parent'):
continue
uuids.append(action['uuid'])
print("")
print(
" task rc.confirmation=no %s delete" % (
' '.join(uuids)
)
)
print("")
print(
" - Answer 'Yes' when asked if you would "
"like to permanently delete each task."
)
print(
" - Answer 'No' when asked if you would like "
"to delete all pending occurrences of each task."
)
| 2.78125 | 3 |
claf/config/factory/tokens.py | clovaai/claf | 10 | 12764199 |
from overrides import overrides
from claf.config.registry import Registry
from claf.config.utils import convert_config2dict
from claf.tokens import tokenizer
from .base import Factory
def make_tokenizer(tokenizer_cls, tokenizer_config, parent_tokenizers={}):
if tokenizer_config is None or "name" not in tokenizer_config:
return None
package_name = tokenizer_config["name"]
package_config = tokenizer_config.get(package_name, {})
tokenizer_config["config"] = package_config
if package_name in tokenizer_config:
del tokenizer_config[package_name]
tokenizer_config.update(parent_tokenizers)
return tokenizer_cls(**tokenizer_config)
def make_all_tokenizers(all_tokenizer_config):
""" Tokenizer is resource used all token together """
sent_tokenizer = make_tokenizer(
tokenizer.SentTokenizer, all_tokenizer_config.get("sent", {"name": "punkt"})
)
word_tokenizer = make_tokenizer(
tokenizer.WordTokenizer,
all_tokenizer_config.get("word", None),
parent_tokenizers={"sent_tokenizer": sent_tokenizer},
)
subword_tokenizer = make_tokenizer(
tokenizer.SubwordTokenizer,
all_tokenizer_config.get("subword", None),
parent_tokenizers={"word_tokenizer": word_tokenizer},
)
char_tokenizer = make_tokenizer(
tokenizer.CharTokenizer,
all_tokenizer_config.get("char", None),
parent_tokenizers={"word_tokenizer": word_tokenizer},
)
return {
"char": char_tokenizer,
"subword": subword_tokenizer,
"word": word_tokenizer,
"sent": sent_tokenizer,
}
class TokenMakersFactory(Factory):
"""
TokenMakers Factory Class
* Args:
config: token config from argument (config.token)
"""
LANGS = ["eng", "kor"]
def __init__(self, config):
self.config = config
self.registry = Registry()
@overrides
def create(self):
tokenizers = make_all_tokenizers(convert_config2dict(self.config.tokenizer))
token_names, token_types = self.config.names, self.config.types
if len(token_names) != len(token_types):
raise ValueError("token_names and token_types must be same length.")
token_makers = {"tokenizers": tokenizers}
for token_name, token_type in sorted(zip(token_names, token_types)):
token_config = getattr(self.config, token_name, {})
if token_config != {}:
token_config = convert_config2dict(token_config)
# Token (tokenizer, indexer, embedding, vocab)
token_config = {
"tokenizers": tokenizers,
"indexer_config": token_config.get("indexer", {}),
"embedding_config": token_config.get("embedding", {}),
"vocab_config": token_config.get("vocab", {}),
}
token_makers[token_name] = self.registry.get(f"token:{token_type}")(**token_config)
return token_makers
| 2.28125 | 2 |
sensai_dataset/generator/__main__.py | holodata/sensai-dataset | 0 | 12764200 | import argparse
from sensai_dataset.generator.commands import generate_dataset
from sensai_dataset.generator.constants import DATASET_DIR, DATASET_SOURCE_DIR
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='dataset generator')
parser.add_argument('-m', '--matcher', type=str, default='chats_*.csv')
args = parser.parse_args()
print('target: ' + DATASET_DIR)
print('source: ' + DATASET_SOURCE_DIR)
generate_dataset(source_dir=DATASET_SOURCE_DIR,
target_dir=DATASET_DIR,
matcher=args.matcher)
| 2.28125 | 2 |
trove/tests/unittests/guestagent/test_dbaas.py | zhujzhuo/openstack-trove | 0 | 12764201 | <filename>trove/tests/unittests/guestagent/test_dbaas.py<gh_stars>0
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
import subprocess
import tempfile
from uuid import uuid4
import time
from mock import Mock
from mock import MagicMock
from mock import PropertyMock
from mock import patch
from mock import ANY
from oslo_utils import netutils
import sqlalchemy
import testtools
from testtools.matchers import Is
from testtools.matchers import Equals
from testtools.matchers import Not
from trove.common import cfg
from trove.common.exception import ProcessExecutionError
from trove.common.exception import GuestError
from trove.common import utils
from trove.common import instance as rd_instance
from trove.conductor import api as conductor_api
import trove.guestagent.datastore.mysql.service as dbaas
from trove.guestagent import dbaas as dbaas_sr
from trove.guestagent import pkg
from trove.guestagent.common import operating_system
from trove.guestagent.dbaas import to_gb
from trove.guestagent.dbaas import get_filesystem_volume_stats
from trove.guestagent.datastore.service import BaseDbStatus
from trove.guestagent.datastore.experimental.redis import service as rservice
from trove.guestagent.datastore.experimental.redis.service import RedisApp
from trove.guestagent.datastore.experimental.redis import system as RedisSystem
from trove.guestagent.datastore.experimental.cassandra import (
service as cass_service)
from trove.guestagent.datastore.experimental.cassandra import (
system as cass_system)
from trove.guestagent.datastore.mysql.service import MySqlAdmin
from trove.guestagent.datastore.mysql.service import MySqlRootAccess
from trove.guestagent.datastore.mysql.service import MySqlApp
from trove.guestagent.datastore.mysql.service import MySqlAppStatus
from trove.guestagent.datastore.mysql.service import KeepAliveConnection
from trove.guestagent.datastore.experimental.couchbase import (
service as couchservice)
from trove.guestagent.datastore.experimental.couchdb import (
service as couchdb_service)
from trove.guestagent.datastore.experimental.mongodb import (
service as mongo_service)
from trove.guestagent.datastore.experimental.mongodb import (
system as mongo_system)
from trove.guestagent.datastore.experimental.vertica.service import VerticaApp
from trove.guestagent.datastore.experimental.vertica.service import (
VerticaAppStatus)
from trove.guestagent.datastore.experimental.vertica import (
system as vertica_system)
from trove.guestagent.datastore.experimental.db2 import (
service as db2service)
from trove.guestagent.db import models
from trove.guestagent.volume import VolumeDevice
from trove.instance.models import InstanceServiceStatus
from trove.tests.unittests.util import util
CONF = cfg.CONF
"""
Unit tests for the classes and functions in dbaas.py.
"""
FAKE_DB = {"_name": "testDB", "_character_set": "latin2",
"_collate": "latin2_general_ci"}
FAKE_DB_2 = {"_name": "testDB2", "_character_set": "latin2",
"_collate": "latin2_general_ci"}
FAKE_USER = [{"_name": "random", "_password": "<PASSWORD>",
"_databases": [FAKE_DB]}]
conductor_api.API.get_client = Mock()
conductor_api.API.heartbeat = Mock()
class FakeAppStatus(BaseDbStatus):
def __init__(self, id, status):
self.id = id
self.next_fake_status = status
def _get_actual_db_status(self):
return self.next_fake_status
def set_next_status(self, next_status):
self.next_fake_status = next_status
def _is_query_router(self):
return False
class DbaasTest(testtools.TestCase):
def setUp(self):
super(DbaasTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_utils_execute = dbaas.utils.execute
def tearDown(self):
super(DbaasTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.utils.execute = self.orig_utils_execute
def test_get_auth_password(self):
dbaas.utils.execute_with_timeout = Mock(
return_value=("password ", None))
password = <PASSWORD>auth_password()
self.assertEqual("password", password)
def test_get_auth_password_error(self):
dbaas.utils.execute_with_timeout = Mock(
return_value=("password", "Error"))
self.assertRaises(RuntimeError, dbaas.get_auth_password)
def test_service_discovery(self):
with patch.object(os.path, 'isfile', return_value=True):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.assertIsNotNone(mysql_service['cmd_start'])
self.assertIsNotNone(mysql_service['cmd_enable'])
def test_load_mysqld_options(self):
output = "mysqld would've been started with the these args:\n"\
"--user=mysql --port=3306 --basedir=/usr "\
"--tmpdir=/tmp --skip-external-locking"
with patch.object(os.path, 'isfile', return_value=True):
dbaas.utils.execute = Mock(return_value=(output, None))
options = dbaas.load_mysqld_options()
self.assertEqual(5, len(options))
self.assertEqual(options["user"], ["mysql"])
self.assertEqual(options["port"], ["3306"])
self.assertEqual(options["basedir"], ["/usr"])
self.assertEqual(options["tmpdir"], ["/tmp"])
self.assertTrue("skip-external-locking" in options)
def test_load_mysqld_options_contains_plugin_loads_options(self):
output = ("mysqld would've been started with the these args:\n"
"--plugin-load=blackhole=ha_blackhole.so "
"--plugin-load=federated=ha_federated.so")
with patch.object(os.path, 'isfile', return_value=True):
dbaas.utils.execute = Mock(return_value=(output, None))
options = dbaas.load_mysqld_options()
self.assertEqual(1, len(options))
self.assertEqual(options["plugin-load"],
["blackhole=ha_blackhole.so",
"federated=ha_federated.so"])
def test_load_mysqld_options_error(self):
dbaas.utils.execute = Mock(side_effect=ProcessExecutionError())
self.assertFalse(dbaas.load_mysqld_options())
class ResultSetStub(object):
def __init__(self, rows):
self._rows = rows
def __iter__(self):
return self._rows.__iter__()
@property
def rowcount(self):
return len(self._rows)
def __repr__(self):
return self._rows.__repr__()
class MySqlAdminMockTest(testtools.TestCase):
def tearDown(self):
super(MySqlAdminMockTest, self).tearDown()
def test_list_databases(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute',
return_value=ResultSetStub(
[('db1', 'utf8', 'utf8_bin'),
('db2', 'utf8', 'utf8_bin'),
('db3', 'utf8', 'utf8_bin')])):
databases, next_marker = MySqlAdmin().list_databases(limit=10)
self.assertThat(next_marker, Is(None))
self.assertThat(len(databases), Is(3))
class MySqlAdminTest(testtools.TestCase):
def setUp(self):
super(MySqlAdminTest, self).setUp()
self.orig_get_engine = dbaas.get_engine
self.orig_LocalSqlClient = dbaas.LocalSqlClient
self.orig_LocalSqlClient_enter = dbaas.LocalSqlClient.__enter__
self.orig_LocalSqlClient_exit = dbaas.LocalSqlClient.__exit__
self.orig_LocalSqlClient_execute = dbaas.LocalSqlClient.execute
self.orig_MySQLUser_is_valid_user_name = (
models.MySQLUser._is_valid_user_name)
dbaas.get_engine = MagicMock(name='get_engine')
dbaas.LocalSqlClient = Mock
dbaas.LocalSqlClient.__enter__ = Mock()
dbaas.LocalSqlClient.__exit__ = Mock()
dbaas.LocalSqlClient.execute = Mock()
self.mySqlAdmin = MySqlAdmin()
def tearDown(self):
super(MySqlAdminTest, self).tearDown()
dbaas.get_engine = self.orig_get_engine
dbaas.LocalSqlClient = self.orig_LocalSqlClient
dbaas.LocalSqlClient.__enter__ = self.orig_LocalSqlClient_enter
dbaas.LocalSqlClient.__exit__ = self.orig_LocalSqlClient_exit
dbaas.LocalSqlClient.execute = self.orig_LocalSqlClient_execute
models.MySQLUser._is_valid_user_name = (
self.orig_MySQLUser_is_valid_user_name)
def test_create_database(self):
databases = []
databases.append(FAKE_DB)
self.mySqlAdmin.create_database(databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(args[0].text, expected,
"Create database queries are not the same")
self.assertEqual(1, dbaas.LocalSqlClient.execute.call_count,
"The client object was not called exactly once, " +
"it was called %d times"
% dbaas.LocalSqlClient.execute.call_count)
def test_create_database_more_than_1(self):
databases = []
databases.append(FAKE_DB)
databases.append(FAKE_DB_2)
self.mySqlAdmin.create_database(databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(args[0].text, expected,
"Create database queries are not the same")
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB2` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(args[0].text, expected,
"Create database queries are not the same")
self.assertEqual(2, dbaas.LocalSqlClient.execute.call_count,
"The client object was not called exactly twice, " +
"it was called %d times"
% dbaas.LocalSqlClient.execute.call_count)
def test_create_database_no_db(self):
databases = []
self.mySqlAdmin.create_database(databases)
self.assertFalse(dbaas.LocalSqlClient.execute.called,
"The client object was called when it wasn't " +
"supposed to")
def test_delete_database(self):
database = {"_name": "testDB"}
self.mySqlAdmin.delete_database(database)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = "DROP DATABASE `testDB`;"
self.assertEqual(args[0].text, expected,
"Delete database queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_delete_user(self):
user = {"_name": "testUser", "_host": None}
self.mySqlAdmin.delete_user(user)
# For some reason, call_args is None.
call_args = dbaas.LocalSqlClient.execute.call_args
if call_args is not None:
args, _ = call_args
expected = "DROP USER `testUser`@`%`;"
self.assertEqual(args[0].text, expected,
"Delete user queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_create_user(self):
self.mySqlAdmin.create_user(FAKE_USER)
expected = ("GRANT ALL PRIVILEGES ON `testDB`.* TO `random`@`%` "
"IDENTIFIED BY 'guesswhat' "
"WITH GRANT OPTION;")
# For some reason, call_args is None.
call_args = dbaas.LocalSqlClient.execute.call_args
if call_args is not None:
args, _ = call_args
self.assertEqual(args[0].text.strip(), expected,
"Create user queries are not the same")
self.assertEqual(2, dbaas.LocalSqlClient.execute.call_count)
def test_list_databases(self):
self.mySqlAdmin.list_databases()
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
def test_list_databases_with_limit(self):
limit = 2
self.mySqlAdmin.list_databases(limit)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertTrue("LIMIT " + str(limit + 1) in args[0].text)
def test_list_databases_with_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_databases(marker=marker)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND schema_name > '" + marker + "'" in args[0].text)
def test_list_databases_with_include_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_databases(marker=marker, include_marker=True)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue(("AND schema_name >= '%s'" % marker) in args[0].text)
def test_list_users(self):
self.mySqlAdmin.list_users()
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertFalse("AND Marker > '" in args[0].text)
def test_list_users_with_limit(self):
limit = 2
self.mySqlAdmin.list_users(limit)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
("LIMIT " + str(limit + 1)),
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
def test_list_users_with_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_users(marker=marker)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host, Marker",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND Marker > '" + marker + "'" in args[0].text)
def test_list_users_with_include_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_users(marker=marker, include_marker=True)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND Marker >= '" + marker + "'" in args[0].text)
def test_get_user(self):
"""
Unit tests for mySqlAdmin.get_user.
This test case checks if the sql query formed by the get_user method
is correct or not by checking with expected query.
"""
username = "user1"
hostname = "host"
self.mySqlAdmin.get_user(username, hostname)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost' AND User = 'user1'",
"ORDER BY User, Host",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
class MySqlAppTest(testtools.TestCase):
def setUp(self):
super(MySqlAppTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_time_sleep = time.sleep
self.orig_unlink = os.unlink
self.orig_get_auth_password = <PASSWORD>.get_auth_password
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.mySqlApp = MySqlApp(self.appStatus)
mysql_service = {'cmd_start': Mock(),
'cmd_stop': Mock(),
'cmd_enable': Mock(),
'cmd_disable': Mock(),
'bin': Mock()}
dbaas.operating_system.service_discovery = Mock(return_value=
mysql_service)
time.sleep = Mock()
os.unlink = Mock()
dbaas.get_auth_password = Mock()
def tearDown(self):
super(MySqlAppTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
time.sleep = self.orig_time_sleep
os.unlink = self.orig_unlink
dbaas.get_auth_password = self.orig_get_auth_password
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def mysql_starts_successfully(self):
def start(update_db=False):
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.start_mysql.side_effect = start
def mysql_starts_unsuccessfully(self):
def start():
raise RuntimeError("MySQL failed to start!")
self.mySqlApp.start_mysql.side_effect = start
def mysql_stops_successfully(self):
def stop():
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db.side_effect = stop
def mysql_stops_unsuccessfully(self):
def stop():
raise RuntimeError("MySQL failed to stop!")
self.mySqlApp.stop_db.side_effect = stop
def test_stop_mysql(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_mysql_with_db_update(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_stop_mysql_error(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.mySqlApp.stop_db)
def test_restart_is_successful(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mysql_stops_successfully()
self.mysql_starts_successfully()
self.mySqlApp.restart()
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
def test_restart_mysql_wont_start_up(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mysql_stops_unsuccessfully()
self.mysql_starts_unsuccessfully()
self.assertRaises(RuntimeError, self.mySqlApp.restart)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_wipe_ib_logfiles_error(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(ProcessExecutionError,
self.mySqlApp.wipe_ib_logfiles)
def test_start_mysql(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp._enable_mysql_on_boot = Mock()
self.mySqlApp.start_mysql()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_mysql_with_db_update(self):
dbaas.utils.execute_with_timeout = Mock()
self.mySqlApp._enable_mysql_on_boot = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.start_mysql(update_db=True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
def test_start_mysql_runs_forever(self):
dbaas.utils.execute_with_timeout = Mock()
self.mySqlApp._enable_mysql_on_boot = Mock()
self.mySqlApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.mySqlApp.start_mysql)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_start_mysql_error(self):
self.mySqlApp._enable_mysql_on_boot = Mock()
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(RuntimeError, self.mySqlApp.start_mysql)
def test_start_db_with_conf_changes(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.mysql_starts_successfully()
self.appStatus.status = rd_instance.ServiceStatuses.SHUTDOWN
self.mySqlApp.start_db_with_conf_changes(Mock())
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assertEqual(self.appStatus._get_actual_db_status(),
rd_instance.ServiceStatuses.RUNNING)
def test_start_db_with_conf_changes_mysql_is_running(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertRaises(RuntimeError,
self.mySqlApp.start_db_with_conf_changes,
Mock())
def test_remove_overrides(self):
from trove.common.exception import ProcessExecutionError
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(ProcessExecutionError, self.mySqlApp.start_mysql)
def test_mysql_error_in_write_config_verify_unlink(self):
configuration = {'config_contents': 'some junk'}
from trove.common.exception import ProcessExecutionError
dbaas.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('something')))
self.assertRaises(ProcessExecutionError,
self.mySqlApp.reset_configuration,
configuration=configuration)
self.assertEqual(dbaas.utils.execute_with_timeout.call_count, 1)
self.assertEqual(os.unlink.call_count, 1)
self.assertEqual(dbaas.get_auth_password.call_count, 1)
def test_mysql_error_in_write_config(self):
configuration = {'config_contents': 'some junk'}
from trove.common.exception import ProcessExecutionError
dbaas.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('something')))
self.assertRaises(ProcessExecutionError,
self.mySqlApp.reset_configuration,
configuration=configuration)
self.assertEqual(dbaas.utils.execute_with_timeout.call_count, 1)
self.assertEqual(dbaas.get_auth_password.call_count, 1)
class MySqlAppInstallTest(MySqlAppTest):
def setUp(self):
super(MySqlAppInstallTest, self).setUp()
self.orig_create_engine = sqlalchemy.create_engine
self.orig_pkg_version = dbaas.packager.pkg_version
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
def tearDown(self):
super(MySqlAppInstallTest, self).tearDown()
sqlalchemy.create_engine = self.orig_create_engine
dbaas.packager.pkg_version = self.orig_pkg_version
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_install(self):
self.mySqlApp._install_mysql = Mock()
pkg.Package.pkg_is_installed = Mock(return_value=False)
utils.execute_with_timeout = Mock()
pkg.Package.pkg_install = Mock()
self.mySqlApp._clear_mysql_config = Mock()
self.mySqlApp._create_mysql_confd_dir = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.install_if_needed(["package"])
self.assertTrue(pkg.Package.pkg_install.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_secure(self):
dbaas.clear_expired_password = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mySqlApp._write_mycnf = Mock()
self.mysql_stops_successfully()
self.mysql_starts_successfully()
sqlalchemy.create_engine = Mock()
self.mySqlApp.secure('contents', None)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_install_install_error(self):
from trove.guestagent import pkg
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
pkg.Package.pkg_is_installed = Mock(return_value=False)
self.mySqlApp._clear_mysql_config = Mock()
self.mySqlApp._create_mysql_confd_dir = Mock()
pkg.Package.pkg_install = \
Mock(side_effect=pkg.PkgPackageStateError("Install error"))
self.assertRaises(pkg.PkgPackageStateError,
self.mySqlApp.install_if_needed, ["package"])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_secure_write_conf_error(self):
dbaas.clear_expired_password = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mySqlApp._write_mycnf = Mock(
side_effect=IOError("Could not write file"))
self.mysql_stops_successfully()
self.mysql_starts_successfully()
sqlalchemy.create_engine = Mock()
self.assertRaises(IOError, self.mySqlApp.secure, "foo", None)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class TextClauseMatcher(object):
def __init__(self, text):
self.text = text
def __repr__(self):
return "TextClause(%s)" % self.text
def __eq__(self, arg):
print("Matching %s" % arg.text)
return self.text in arg.text
def mock_sql_connection():
utils.execute_with_timeout = MagicMock(return_value=['fake_password',
None])
mock_engine = MagicMock()
sqlalchemy.create_engine = MagicMock(return_value=mock_engine)
mock_conn = MagicMock()
dbaas.LocalSqlClient.__enter__ = MagicMock(return_value=mock_conn)
dbaas.LocalSqlClient.__exit__ = MagicMock(return_value=None)
return mock_conn
class MySqlAppMockTest(testtools.TestCase):
def setUp(self):
super(MySqlAppMockTest, self).setUp()
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
def tearDown(self):
super(MySqlAppMockTest, self).tearDown()
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_secure_keep_root(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
utils.execute_with_timeout = MagicMock(return_value=None)
# skip writing the file for now
with patch.object(os.path, 'isfile', return_value=False):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
dbaas.clear_expired_password = MagicMock(return_value=None)
app = MySqlApp(mock_status)
app._write_mycnf = MagicMock(return_value=True)
app.start_mysql = MagicMock(return_value=None)
app.stop_db = MagicMock(return_value=None)
app.secure('foo', None)
self.assertTrue(mock_conn.execute.called)
def test_secure_with_mycnf_error(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
operating_system.service_discovery = Mock(return_value={
'cmd_stop': 'service mysql stop'})
utils.execute_with_timeout = MagicMock(return_value=None)
# skip writing the file for now
with patch.object(os.path, 'isfile', return_value=False):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
dbaas.clear_expired_password = MagicMock(return_value=None)
app = MySqlApp(mock_status)
dbaas.clear_expired_password = MagicMock(return_value=None)
self.assertRaises(TypeError, app.secure, None, None)
self.assertTrue(mock_conn.execute.called)
# At least called twice
self.assertTrue(mock_conn.execute.call_count >= 2)
(mock_status.wait_for_real_status_to_change_to.
assert_called_with(rd_instance.ServiceStatuses.SHUTDOWN,
app.state_change_wait_time, False))
class MySqlRootStatusTest(testtools.TestCase):
def setUp(self):
super(MySqlRootStatusTest, self).setUp()
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
def tearDown(self):
super(MySqlRootStatusTest, self).tearDown()
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_root_is_enabled(self):
mock_conn = mock_sql_connection()
mock_rs = MagicMock()
mock_rs.rowcount = 1
with patch.object(mock_conn, 'execute', return_value=mock_rs):
self.assertThat(MySqlRootAccess().is_root_enabled(), Is(True))
def test_root_is_not_enabled(self):
mock_conn = mock_sql_connection()
mock_rs = MagicMock()
mock_rs.rowcount = 0
with patch.object(mock_conn, 'execute', return_value=mock_rs):
self.assertThat(MySqlRootAccess.is_root_enabled(), Equals(False))
def test_enable_root(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
# invocation
user_ser = MySqlRootAccess.enable_root()
# verification
self.assertThat(user_ser, Not(Is(None)))
mock_conn.execute.assert_any_call(TextClauseMatcher('CREATE USER'),
user='root', host='%')
mock_conn.execute.assert_any_call(TextClauseMatcher(
'GRANT ALL PRIVILEGES ON *.*'))
mock_conn.execute.assert_any_call(TextClauseMatcher(
'UPDATE mysql.user'))
def test_enable_root_failed(self):
with patch.object(models.MySQLUser, '_is_valid_user_name',
return_value=False):
self.assertRaises(ValueError, MySqlAdmin().enable_root)
class MockStats:
f_blocks = 1024 ** 2
f_bsize = 4096
f_bfree = 512 * 1024
class InterrogatorTest(testtools.TestCase):
def tearDown(self):
super(InterrogatorTest, self).tearDown()
def test_to_gb(self):
result = to_gb(123456789)
self.assertEqual(result, 0.11)
def test_to_gb_zero(self):
result = to_gb(0)
self.assertEqual(result, 0.0)
def test_get_filesystem_volume_stats(self):
with patch.object(os, 'statvfs', return_value=MockStats):
result = get_filesystem_volume_stats('/some/path/')
self.assertEqual(result['block_size'], 4096)
self.assertEqual(result['total_blocks'], 1048576)
self.assertEqual(result['free_blocks'], 524288)
self.assertEqual(result['total'], 4.0)
self.assertEqual(result['free'], 2147483648)
self.assertEqual(result['used'], 2.0)
def test_get_filesystem_volume_stats_error(self):
with patch.object(os, 'statvfs', side_effect=OSError):
self.assertRaises(
RuntimeError,
get_filesystem_volume_stats, '/nonexistent/path')
class ServiceRegistryTest(testtools.TestCase):
def setUp(self):
super(ServiceRegistryTest, self).setUp()
def tearDown(self):
super(ServiceRegistryTest, self).tearDown()
def test_datastore_registry_with_extra_manager(self):
datastore_registry_ext_test = {
'test': 'trove.guestagent.datastore.test.manager.Manager',
}
dbaas_sr.get_custom_managers = Mock(return_value=
datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual(test_dict.get('test'),
datastore_registry_ext_test.get('test', None))
self.assertEqual(test_dict.get('mysql'),
'trove.guestagent.datastore.mysql.'
'manager.Manager')
self.assertEqual(test_dict.get('percona'),
'trove.guestagent.datastore.mysql.'
'manager.Manager')
self.assertEqual(test_dict.get('redis'),
'trove.guestagent.datastore.experimental.redis.'
'manager.Manager')
self.assertEqual(test_dict.get('cassandra'),
'trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager')
self.assertEqual(test_dict.get('couchbase'),
'trove.guestagent.datastore.experimental.'
'couchbase.manager.Manager')
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual(test_dict.get('couchdb'),
'trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager')
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
def test_datastore_registry_with_existing_manager(self):
datastore_registry_ext_test = {
'mysql': 'trove.guestagent.datastore.mysql.'
'manager.Manager123',
}
dbaas_sr.get_custom_managers = Mock(return_value=
datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual(test_dict.get('mysql'),
'trove.guestagent.datastore.mysql.'
'manager.Manager123')
self.assertEqual(test_dict.get('percona'),
'trove.guestagent.datastore.mysql.'
'manager.Manager')
self.assertEqual(test_dict.get('redis'),
'trove.guestagent.datastore.experimental.redis.'
'manager.Manager')
self.assertEqual(test_dict.get('cassandra'),
'trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager')
self.assertEqual(test_dict.get('couchbase'),
'trove.guestagent.datastore.experimental.couchbase.'
'manager.Manager')
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual(test_dict.get('couchdb'),
'trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager')
self.assertEqual('trove.guestagent.datastore.experimental.vertica.'
'manager.Manager',
test_dict.get('vertica'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
def test_datastore_registry_with_blank_dict(self):
datastore_registry_ext_test = dict()
dbaas_sr.get_custom_managers = Mock(return_value=
datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual(test_dict.get('mysql'),
'trove.guestagent.datastore.mysql.'
'manager.Manager')
self.assertEqual(test_dict.get('percona'),
'trove.guestagent.datastore.mysql.'
'manager.Manager')
self.assertEqual(test_dict.get('redis'),
'trove.guestagent.datastore.experimental.redis.'
'manager.Manager')
self.assertEqual(test_dict.get('cassandra'),
'trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager')
self.assertEqual(test_dict.get('couchbase'),
'trove.guestagent.datastore.experimental.couchbase.'
'manager.Manager')
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual(test_dict.get('couchdb'),
'trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager')
self.assertEqual('trove.guestagent.datastore.experimental.vertica.'
'manager.Manager',
test_dict.get('vertica'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
class KeepAliveConnectionTest(testtools.TestCase):
class OperationalError(Exception):
def __init__(self, value):
self.args = [value]
def __str__(self):
return repr(self.value)
def setUp(self):
super(KeepAliveConnectionTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_LOG_err = dbaas.LOG
def tearDown(self):
super(KeepAliveConnectionTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.LOG = self.orig_LOG_err
def test_checkout_type_error(self):
dbapi_con = Mock()
dbapi_con.ping = Mock(side_effect=TypeError("Type Error"))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(TypeError, self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
def test_checkout_disconnection_error(self):
from sqlalchemy import exc
dbapi_con = Mock()
dbapi_con.OperationalError = self.OperationalError
dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(2013))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(exc.DisconnectionError, self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
def test_checkout_operation_error(self):
dbapi_con = Mock()
dbapi_con.OperationalError = self.OperationalError
dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(1234))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(self.OperationalError, self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
class BaseDbStatusTest(testtools.TestCase):
def setUp(self):
super(BaseDbStatusTest, self).setUp()
util.init_db()
self.orig_dbaas_time_sleep = time.sleep
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(BaseDbStatusTest, self).tearDown()
time.sleep = self.orig_dbaas_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def test_begin_install(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.begin_install()
self.assertEqual(self.baseDbStatus.status,
rd_instance.ServiceStatuses.BUILDING)
def test_begin_restart(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.restart_mode = False
self.baseDbStatus.begin_restart()
self.assertTrue(self.baseDbStatus.restart_mode)
def test_end_install_or_restart(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.SHUTDOWN)
self.baseDbStatus.end_install_or_restart()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN,
self.baseDbStatus.status)
self.assertFalse(self.baseDbStatus.restart_mode)
def test_is_installed(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertTrue(self.baseDbStatus.is_installed)
def test_is_installed_none(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = None
self.assertTrue(self.baseDbStatus.is_installed)
def test_is_installed_building(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.BUILDING
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_installed_new(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.NEW
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_installed_failed(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.FAILED
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_restarting(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.restart_mode = True
self.assertTrue(self.baseDbStatus._is_restarting)
def test_is_running(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertTrue(self.baseDbStatus.is_running)
def test_is_running_not(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.SHUTDOWN
self.assertFalse(self.baseDbStatus.is_running)
def test_wait_for_real_status_to_change_to(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.RUNNING)
time.sleep = Mock()
self.assertTrue(self.baseDbStatus.
wait_for_real_status_to_change_to
(rd_instance.ServiceStatuses.RUNNING, 10))
def test_wait_for_real_status_to_change_to_timeout(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.RUNNING)
time.sleep = Mock()
self.assertFalse(self.baseDbStatus.
wait_for_real_status_to_change_to
(rd_instance.ServiceStatuses.SHUTDOWN, 10))
class MySqlAppStatusTest(testtools.TestCase):
def setUp(self):
super(MySqlAppStatusTest, self).setUp()
util.init_db()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_load_mysqld_options = dbaas.load_mysqld_options
self.orig_dbaas_os_path_exists = dbaas.os.path.exists
self.orig_dbaas_time_sleep = time.sleep
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(MySqlAppStatusTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.load_mysqld_options = self.orig_load_mysqld_options
dbaas.os.path.exists = self.orig_dbaas_os_path_exists
time.sleep = self.orig_dbaas_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def test_get_actual_db_status(self):
dbaas.utils.execute_with_timeout = Mock(return_value=(None, None))
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status)
def test_get_actual_db_status_error_shutdown(self):
mocked = Mock(side_effect=ProcessExecutionError())
dbaas.utils.execute_with_timeout = mocked
dbaas.load_mysqld_options = Mock(return_value={})
dbaas.os.path.exists = Mock(return_value=False)
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status)
def test_get_actual_db_status_error_crashed(self):
dbaas.utils.execute_with_timeout = MagicMock(
side_effect=[ProcessExecutionError(), ("some output", None)])
dbaas.load_mysqld_options = Mock()
dbaas.os.path.exists = Mock(return_value=True)
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.BLOCKED, status)
class TestRedisApp(testtools.TestCase):
def setUp(self):
super(TestRedisApp, self).setUp()
self.FAKE_ID = 1000
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.app = RedisApp(self.appStatus)
self.orig_os_path_isfile = os.path.isfile
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
utils.execute_with_timeout = Mock()
rservice.utils.execute_with_timeout = Mock()
def tearDown(self):
super(TestRedisApp, self).tearDown()
self.app = None
os.path.isfile = self.orig_os_path_isfile
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
rservice.utils.execute_with_timeout = \
self.orig_utils_execute_with_timeout
def test_install_if_needed_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=True):
with patch.object(RedisApp, '_install_redis', return_value=None):
self.app.install_if_needed('bar')
pkg.Package.pkg_is_installed.assert_any_call('bar')
self.assertEqual(RedisApp._install_redis.call_count, 0)
def test_install_if_needed_not_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=False):
with patch.object(RedisApp, '_install_redis', return_value=None):
self.app.install_if_needed('asdf')
pkg.Package.pkg_is_installed.assert_any_call('asdf')
RedisApp._install_redis.assert_any_call('asdf')
def test_install_redis(self):
with patch.object(utils, 'execute_with_timeout'):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
with patch.object(RedisApp, 'start_redis', return_value=None):
self.app._install_redis('redis')
pkg.Package.pkg_install.assert_any_call('redis', {}, 1200)
RedisApp.start_redis.assert_any_call()
self.assertTrue(utils.execute_with_timeout.called)
def test_enable_redis_on_boot_without_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_enable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._enable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_enable_redis_on_boot_with_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_enable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._enable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_disable_redis_on_boot_with_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_disable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._disable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_disable_redis_on_boot_without_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_disable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._disable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_stop_db_without_fail(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
app = RedisApp(mock_status, state_change_wait_time=0)
RedisApp._disable_redis_on_boot = MagicMock(
return_value=None)
with patch.object(utils, 'execute_with_timeout', return_value=None):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
app.stop_db(do_not_start_on_reboot=True)
utils.execute_with_timeout.assert_any_call(
'sudo ' + RedisSystem.REDIS_CMD_STOP,
shell=True)
self.assertTrue(RedisApp._disable_redis_on_boot.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
def test_stop_db_with_failure(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
app = RedisApp(mock_status, state_change_wait_time=0)
RedisApp._disable_redis_on_boot = MagicMock(
return_value=None)
with patch.object(utils, 'execute_with_timeout', return_value=None):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=False)
app.stop_db(do_not_start_on_reboot=True)
utils.execute_with_timeout.assert_any_call(
'sudo ' + RedisSystem.REDIS_CMD_STOP,
shell=True)
self.assertTrue(RedisApp._disable_redis_on_boot.called)
self.assertTrue(mock_status.end_install_or_restart.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
def test_restart(self):
mock_status = MagicMock()
app = RedisApp(mock_status, state_change_wait_time=0)
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(RedisApp, 'stop_db', return_value=None):
with patch.object(RedisApp, 'start_redis', return_value=None):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.restart()
mock_status.begin_restart.assert_any_call()
RedisApp.stop_db.assert_any_call()
RedisApp.start_redis.assert_any_call()
mock_status.end_install_or_restart.assert_any_call()
def test_start_redis(self):
mock_status = MagicMock()
app = RedisApp(mock_status, state_change_wait_time=0)
with patch.object(RedisApp, '_enable_redis_on_boot',
return_value=None):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=None)
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.start_redis()
utils.execute_with_timeout.assert_any_call(
'sudo ' + RedisSystem.REDIS_CMD_START,
shell=True)
utils.execute_with_timeout.assert_any_call('pkill', '-9',
'redis-server',
run_as_root=True,
root_helper='sudo')
self.assertTrue(RedisApp._enable_redis_on_boot.called)
self.assertTrue(mock_status.end_install_or_restart.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.callled)
class CassandraDBAppTest(testtools.TestCase):
def setUp(self):
super(CassandraDBAppTest, self).setUp()
self.utils_execute_with_timeout = (
cass_service.utils.execute_with_timeout)
self.sleep = time.sleep
self.pkg_version = cass_service.packager.pkg_version
self.pkg = cass_service.packager
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.cassandra = cass_service.CassandraApp(self.appStatus)
self.orig_unlink = os.unlink
def tearDown(self):
super(CassandraDBAppTest, self).tearDown()
cass_service.utils.execute_with_timeout = (self.
utils_execute_with_timeout)
time.sleep = self.sleep
cass_service.packager.pkg_version = self.pkg_version
cass_service.packager = self.pkg
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.cassandra.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_with_db_update(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.cassandra.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_stop_db_error(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.cassandra.stop_db)
def test_restart(self):
self.cassandra.stop_db = Mock()
self.cassandra.start_db = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.restart()
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra_runs_forever(self):
cass_service.utils.execute_with_timeout = Mock()
(self.cassandra.status.
wait_for_real_status_to_change_to) = Mock(return_value=False)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.cassandra.stop_db)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_start_db_with_db_update(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.RUNNING)
self.cassandra.start_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra_error(self):
self.cassandra._enable_db_on_boot = Mock()
self.cassandra.state_change_wait_time = 1
cass_service.utils.execute_with_timeout = Mock(
side_effect=ProcessExecutionError('Error'))
self.assertRaises(RuntimeError, self.cassandra.start_db)
def test_install(self):
self.cassandra._install_db = Mock()
self.pkg.pkg_is_installed = Mock(return_value=False)
self.cassandra.install_if_needed(['cassandra'])
self.assertTrue(self.cassandra._install_db.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_install_install_error(self):
from trove.guestagent import pkg
self.cassandra.start_db = Mock()
self.cassandra.stop_db = Mock()
self.pkg.pkg_is_installed = Mock(return_value=False)
self.cassandra._install_db = Mock(
side_effect=pkg.PkgPackageStateError("Install error"))
self.assertRaises(pkg.PkgPackageStateError,
self.cassandra.install_if_needed,
['cassandra=1.2.10'])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_cassandra_error_in_write_config_verify_unlink(self):
# this test verifies not only that the write_config
# method properly invoked execute, but also that it properly
# attempted to unlink the file (as a result of the exception)
from trove.common.exception import ProcessExecutionError
execute_with_timeout = Mock(
side_effect=ProcessExecutionError('some exception'))
mock_unlink = Mock(return_value=0)
# We call tempfile.mkstemp() here and Mock() the mkstemp()
# parameter to write_config for testability.
(temp_handle, temp_config_name) = tempfile.mkstemp()
mock_mkstemp = MagicMock(return_value=(temp_handle, temp_config_name))
configuration = 'this is my configuration'
self.assertRaises(ProcessExecutionError,
self.cassandra.write_config,
config_contents=configuration,
execute_function=execute_with_timeout,
mkstemp_function=mock_mkstemp,
unlink_function=mock_unlink)
self.assertEqual(mock_unlink.call_count, 1)
# really delete the temporary_config_file
os.unlink(temp_config_name)
def test_cassandra_write_config(self):
# ensure that write_config creates a temporary file, and then
# moves the file to the final place. Also validate the
# contents of the file written.
# We call tempfile.mkstemp() here and Mock() the mkstemp()
# parameter to write_config for testability.
(temp_handle, temp_config_name) = tempfile.mkstemp()
mock_mkstemp = MagicMock(return_value=(temp_handle, temp_config_name))
configuration = 'some arbitrary configuration text'
mock_execute = MagicMock(return_value=('', ''))
self.cassandra.write_config(configuration,
execute_function=mock_execute,
mkstemp_function=mock_mkstemp)
mv, chown, chmod = mock_execute.call_args_list
mv.assert_called_with("sudo", "mv",
temp_config_name,
cass_system.CASSANDRA_CONF)
chown.assert_called_with("sudo", "chown", "cassandra:cassandra",
cass_system.CASSANDRA_CONF)
chmod.assert_called_with("sudo", "chmod", "a+r",
cass_system.CASSANDRA_CONF)
mock_mkstemp.assert_called_once()
with open(temp_config_name, 'r') as config_file:
configuration_data = config_file.read()
self.assertEqual(configuration, configuration_data)
# really delete the temporary_config_file
os.unlink(temp_config_name)
class CouchbaseAppTest(testtools.TestCase):
def fake_couchbase_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(CouchbaseAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
couchservice.utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
time.sleep = Mock()
self.orig_service_discovery = operating_system.service_discovery
self.orig_get_ip = netutils.get_my_ipv4
operating_system.service_discovery = (
self.fake_couchbase_service_discovery)
netutils.get_my_ipv4 = Mock()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.couchbaseApp = couchservice.CouchbaseApp(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(CouchbaseAppTest, self).tearDown()
couchservice.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
netutils.get_my_ipv4 = self.orig_get_ip
operating_system.service_discovery = self.orig_service_discovery
time.sleep = self.orig_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.couchbaseApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_error(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.couchbaseApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp.stop_db = Mock()
self.couchbaseApp.start_db = Mock()
self.couchbaseApp.restart()
self.assertTrue(self.couchbaseApp.stop_db.called)
self.assertTrue(self.couchbaseApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp._enable_db_on_boot = Mock()
self.couchbaseApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_error(self):
from trove.common.exception import ProcessExecutionError
mocked = Mock(side_effect=ProcessExecutionError('Error'))
couchservice.utils.execute_with_timeout = mocked
self.couchbaseApp._enable_db_on_boot = Mock()
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
def test_start_db_runs_forever(self):
couchservice.utils.execute_with_timeout = Mock()
self.couchbaseApp._enable_db_on_boot = Mock()
self.couchbaseApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_install_when_couchbase_installed(self):
couchservice.packager.pkg_is_installed = Mock(return_value=True)
couchservice.utils.execute_with_timeout = Mock()
self.couchbaseApp.install_if_needed(["package"])
self.assertTrue(couchservice.packager.pkg_is_installed.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class CouchDBAppTest(testtools.TestCase):
def fake_couchdb_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(CouchDBAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
couchdb_service.utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
time.sleep = Mock()
self.orig_service_discovery = operating_system.service_discovery
self.orig_get_ip = netutils.get_my_ipv4
operating_system.service_discovery = (
self.fake_couchdb_service_discovery)
netutils.get_my_ipv4 = Mock()
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.couchdbApp = couchdb_service.CouchDBApp(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(CouchDBAppTest, self).tearDown()
couchdb_service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
netutils.get_my_ipv4 = self.orig_get_ip
operating_system.service_discovery = self.orig_service_discovery
time.sleep = self.orig_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.couchdbApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_error(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.couchdbApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp.stop_db = Mock()
self.couchdbApp.start_db = Mock()
self.couchdbApp.restart()
self.assertTrue(self.couchdbApp.stop_db.called)
self.assertTrue(self.couchdbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp._enable_db_on_boot = Mock()
self.couchdbApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_error(self):
from trove.common.exception import ProcessExecutionError
couchdb_service.utils.execute_with_timeout = Mock(
side_effect=ProcessExecutionError('Error'))
self.couchdbApp._enable_db_on_boot = Mock()
self.assertRaises(RuntimeError, self.couchdbApp.start_db)
def test_install_when_couchdb_installed(self):
couchdb_service.packager.pkg_is_installed = Mock(return_value=True)
couchdb_service.utils.execute_with_timeout = Mock()
self.couchdbApp.install_if_needed(["package"])
self.assertTrue(couchdb_service.packager.pkg_is_installed.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class MongoDBAppTest(testtools.TestCase):
def fake_mongodb_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(MongoDBAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (mongo_service.
utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
self.orig_packager = mongo_system.PACKAGER
self.orig_service_discovery = operating_system.service_discovery
self.orig_os_unlink = os.unlink
operating_system.service_discovery = (
self.fake_mongodb_service_discovery)
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.mongoDbApp = mongo_service.MongoDBApp(self.appStatus)
time.sleep = Mock()
os.unlink = Mock()
def tearDown(self):
super(MongoDBAppTest, self).tearDown()
mongo_service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
time.sleep = self.orig_time_sleep
mongo_system.PACKAGER = self.orig_packager
operating_system.service_discovery = self.orig_service_discovery
os.unlink = self.orig_os_unlink
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stopdb(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mongoDbApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_with_db_update(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mongoDbApp.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
def test_stop_db_error(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.mongoDbApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.stop_db = Mock()
self.mongoDbApp.start_db = Mock()
self.mongoDbApp.restart()
self.assertTrue(self.mongoDbApp.stop_db.called)
self.assertTrue(self.mongoDbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'running'}))
def test_start_db(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_with_update(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.start_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'running'}))
def test_start_db_runs_forever(self):
mongo_service.utils.execute_with_timeout = Mock(
return_value=["ubuntu 17036 0.0 0.1 618960 "
"29232 pts/8 Sl+ Jan29 0:07 mongod", ""])
self.mongoDbApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
def test_start_db_error(self):
self.mongoDbApp._enable_db_on_boot = Mock()
from trove.common.exception import ProcessExecutionError
mocked = Mock(side_effect=ProcessExecutionError('Error'))
mongo_service.utils.execute_with_timeout = mocked
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
def test_mongodb_error_in_write_config_verify_unlink(self):
configuration = {'config_contents': 'some junk'}
from trove.common.exception import ProcessExecutionError
mongo_service.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('some exception')))
self.assertRaises(ProcessExecutionError,
self.mongoDbApp.reset_configuration,
configuration=configuration)
self.assertEqual(
mongo_service.utils.execute_with_timeout.call_count, 1)
self.assertEqual(os.unlink.call_count, 1)
def test_mongodb_error_in_write_config(self):
configuration = {'config_contents': 'some junk'}
from trove.common.exception import ProcessExecutionError
mongo_service.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('some exception')))
self.assertRaises(ProcessExecutionError,
self.mongoDbApp.reset_configuration,
configuration=configuration)
self.assertEqual(
mongo_service.utils.execute_with_timeout.call_count, 1)
def test_start_db_with_conf_changes_db_is_running(self):
self.mongoDbApp.start_db = Mock()
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertRaises(RuntimeError,
self.mongoDbApp.start_db_with_conf_changes,
Mock())
def test_install_when_db_installed(self):
packager_mock = MagicMock()
packager_mock.pkg_is_installed = MagicMock(return_value=True)
mongo_system.PACKAGER = packager_mock
self.mongoDbApp.install_if_needed(['package'])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_install_when_db_not_installed(self):
packager_mock = MagicMock()
packager_mock.pkg_is_installed = MagicMock(return_value=False)
mongo_system.PACKAGER = packager_mock
self.mongoDbApp.install_if_needed(['package'])
packager_mock.pkg_install.assert_any_call(ANY, {}, ANY)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class VerticaAppStatusTest(testtools.TestCase):
def setUp(self):
super(VerticaAppStatusTest, self).setUp()
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
def tearDown(self):
super(VerticaAppStatusTest, self).tearDown()
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def test_get_actual_db_status(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(return_value=['db_srvr', None])):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status)
def test_get_actual_db_status_shutdown(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', None],
['db_srvr', None]])):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status)
def test_get_actual_db_status_error_crashed(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=ProcessExecutionError('problem'
))):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status)
class VerticaAppTest(testtools.TestCase):
def setUp(self):
super(VerticaAppTest, self).setUp()
self.FAKE_ID = 1000
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.app = VerticaApp(self.appStatus)
self.setread = VolumeDevice.set_readahead_size
self.Popen = subprocess.Popen
vertica_system.shell_execute = MagicMock(return_value=('', ''))
VolumeDevice.set_readahead_size = Mock()
subprocess.Popen = Mock()
self.test_config = ConfigParser.ConfigParser()
self.test_config.add_section('credentials')
self.test_config.set('credentials',
'dbadmin_password', '<PASSWORD>')
def tearDown(self):
super(VerticaAppTest, self).tearDown()
self.app = None
VolumeDevice.set_readahead_size = self.setread
subprocess.Popen = self.Popen
def test_install_if_needed_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=True):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
self.app.install_if_needed('vertica')
pkg.Package.pkg_is_installed.assert_any_call('vertica')
self.assertEqual(pkg.Package.pkg_install.call_count, 0)
def test_install_if_needed_not_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=False):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
self.app.install_if_needed('vertica')
pkg.Package.pkg_is_installed.assert_any_call('vertica')
self.assertEqual(pkg.Package.pkg_install.call_count, 1)
def test_prepare_for_install_vertica(self):
self.app.prepare_for_install_vertica()
arguments = vertica_system.shell_execute.call_args_list[0]
self.assertEqual(VolumeDevice.set_readahead_size.call_count, 1)
expected_command = (
"VERT_DBA_USR=dbadmin VERT_DBA_HOME=/home/dbadmin "
"VERT_DBA_GRP=verticadba /opt/vertica/oss/python/bin/python"
" -m vertica.local_coerce")
arguments.assert_called_with(expected_command)
def test_install_vertica(self):
with patch.object(self.app, 'write_config',
return_value=None):
self.app.install_vertica(members='10.0.0.2')
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (
vertica_system.INSTALL_VERTICA % ('10.0.0.2', '/var/lib/vertica'))
arguments.assert_called_with(expected_command)
def test_create_db(self):
with patch.object(self.app, 'read_config',
return_value=self.test_config):
self.app.create_db(members='10.0.0.2')
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (vertica_system.CREATE_DB % ('10.0.0.2', 'db_srvr',
'/var/lib/vertica',
'/var/lib/vertica',
'<PASSWORD>'))
arguments.assert_called_with(expected_command, 'dbadmin')
def test_vertica_write_config(self):
temp_file_handle = tempfile.NamedTemporaryFile(delete=False)
mock_mkstemp = MagicMock(return_value=(temp_file_handle))
mock_unlink = Mock(return_value=0)
self.app.write_config(config=self.test_config,
temp_function=mock_mkstemp,
unlink_function=mock_unlink)
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (
("install -o root -g root -m 644 %(source)s %(target)s"
) % {'source': temp_file_handle.name,
'target': vertica_system.VERTICA_CONF})
arguments.assert_called_with(expected_command)
mock_mkstemp.assert_called_once()
configuration_data = ConfigParser.ConfigParser()
configuration_data.read(temp_file_handle.name)
self.assertEqual(
self.test_config.get('credentials', 'dbadmin_password'),
configuration_data.get('credentials', 'dbadmin_password'))
self.assertEqual(mock_unlink.call_count, 1)
# delete the temporary_config_file
os.unlink(temp_file_handle.name)
def test_vertica_error_in_write_config_verify_unlink(self):
mock_unlink = Mock(return_value=0)
temp_file_handle = tempfile.NamedTemporaryFile(delete=False)
mock_mkstemp = MagicMock(return_value=temp_file_handle)
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('some exception')):
self.assertRaises(ProcessExecutionError,
self.app.write_config,
config=self.test_config,
temp_function=mock_mkstemp,
unlink_function=mock_unlink)
self.assertEqual(mock_unlink.call_count, 1)
# delete the temporary_config_file
os.unlink(temp_file_handle.name)
def test_restart(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(VerticaApp, 'stop_db', return_value=None):
with patch.object(VerticaApp, 'start_db', return_value=None):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.restart()
mock_status.begin_restart.assert_any_call()
VerticaApp.stop_db.assert_any_call()
VerticaApp.start_db.assert_any_call()
def test_start_db(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_enable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.start_db()
agent_start, db_start = subprocess.Popen.call_args_list
agent_expected_command = [
'sudo', 'su', '-', 'root', '-c',
(vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'start')]
db_expected_cmd = [
'sudo', 'su', '-', 'dbadmin', '-c',
(vertica_system.START_DB % ('db_srvr', '<PASSWORD>_password'))]
self.assertTrue(mock_status.end_install_or_restart.called)
agent_start.assert_called_with(agent_expected_command)
db_start.assert_called_with(db_expected_cmd)
def test_start_db_failure(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
with patch.object(app, '_enable_db_on_boot',
side_effect=RuntimeError()):
with patch.object(app, 'read_config',
return_value=self.test_config):
self.assertRaises(RuntimeError, app.start_db)
def test_stop_db(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.stop_db()
self.assertEqual(vertica_system.shell_execute.call_count,
3)
# There are 3 shell-executions:
# a) stop vertica-agent service
# b) check daatabase status
# c) stop_db
# We are matcing that 3rd command called was stop_db
arguments = vertica_system.shell_execute.call_args_list[2]
expected_cmd = (vertica_system.STOP_DB % ('db_srvr',
'some_password'))
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
arguments.assert_called_with(expected_cmd, 'dbadmin')
def test_stop_db_failure(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=None)
mock_status.end_install_or_restart = MagicMock(
return_value=None)
self.assertRaises(RuntimeError, app.stop_db)
def test_export_conf_to_members(self):
self.app._export_conf_to_members(members=['member1', 'member2'])
self.assertEqual(vertica_system.shell_execute.call_count, 2)
def test_authorize_public_keys(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
self.app.authorize_public_keys(user=user, public_keys=keys)
self.assertEqual(vertica_system.shell_execute.call_count, 2)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/authorized_keys')
def test_get_public_keys(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
self.app.get_public_keys(user=user)
self.assertEqual(vertica_system.shell_execute.call_count, 2)
vertica_system.shell_execute.assert_any_call(
(vertica_system.SSH_KEY_GEN % ('/home/' + user)), user)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/id_rsa.pub')
def test_install_cluster(self):
with patch.object(self.app, 'read_config',
return_value=self.test_config):
self.app.install_cluster(members=['member1', 'member2'])
# Verifying nu,ber of shell calls,
# as command has already been tested in preceeding tests
self.assertEqual(vertica_system.shell_execute.call_count, 5)
class DB2AppTest(testtools.TestCase):
def setUp(self):
super(DB2AppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
db2service.utils.execute_with_timeout)
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.db2App = db2service.DB2App(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(DB2AppTest, self).tearDown()
db2service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
self.db2App = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
db2service.utils.execute_with_timeout = MagicMock(return_value=None)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.db2App.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_restart_server(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
mock_status = MagicMock(return_value=None)
app = db2service.DB2App(mock_status)
mock_status.begin_restart = MagicMock(return_value=None)
app.stop_db = MagicMock(return_value=None)
app.start_db = MagicMock(return_value=None)
app.restart()
self.assertTrue(mock_status.begin_restart.called)
self.assertTrue(app.stop_db.called)
self.assertTrue(app.start_db.called)
def test_start_db(self):
db2service.utils.execute_with_timeout = MagicMock(return_value=None)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
with patch.object(self.db2App, '_enable_db_on_boot',
return_value=None):
self.db2App.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class DB2AdminTest(testtools.TestCase):
def setUp(self):
super(DB2AdminTest, self).setUp()
self.db2Admin = db2service.DB2Admin()
self.orig_utils_execute_with_timeout = (
db2service.utils.execute_with_timeout)
def tearDown(self):
super(DB2AdminTest, self).tearDown()
db2service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
def test_delete_database(self):
with patch.object(
db2service, 'run_command',
MagicMock(
return_value=None,
side_effect=ProcessExecutionError('Error'))):
self.assertRaises(GuestError,
self.db2Admin.delete_database,
FAKE_DB)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 drop database testDB"
self.assertEqual(args[0], expected,
"Delete database queries are not the same")
def test_list_databases(self):
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
self.db2Admin.list_databases()
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 list database directory " \
"| grep -B6 -i indirect | grep 'Database name' | " \
"sed 's/.*= //'"
self.assertEqual(args[0], expected,
"Delete database queries are not the same")
def test_create_users(self):
with patch.object(db2service, 'run_command', MagicMock(
return_value=None)):
db2service.utils.execute_with_timeout = MagicMock(
return_value=None)
self.db2Admin.create_user(FAKE_USER)
self.assertTrue(db2service.utils.execute_with_timeout.called)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \
"ON DATABASE TO USER random; db2 connect reset"
self.assertEqual(
args[0], expected,
"Granting database access queries are not the same")
self.assertEqual(db2service.run_command.call_count, 1)
def test_delete_users_with_db(self):
with patch.object(db2service, 'run_command',
MagicMock(return_value=None)):
with patch.object(db2service.DB2Admin, 'list_access',
MagicMock(return_value=None)):
utils.execute_with_timeout = MagicMock(return_value=None)
self.db2Admin.delete_user(FAKE_USER[0])
self.assertTrue(db2service.run_command.called)
self.assertTrue(db2service.utils.execute_with_timeout.called)
self.assertFalse(db2service.DB2Admin.list_access.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \
"ON DATABASE FROM USER random; db2 connect reset"
self.assertEqual(
args[0], expected,
"Revoke database access queries are not the same")
self.assertEqual(db2service.run_command.call_count, 1)
def test_delete_users_without_db(self):
FAKE_USER.append(
{"_name": "random2", "_password": "<PASSWORD>", "_databases": []})
with patch.object(db2service, 'run_command',
MagicMock(return_value=None)):
with patch.object(db2service.DB2Admin, 'list_access',
MagicMock(return_value=[FAKE_DB])):
utils.execute_with_timeout = MagicMock(return_value=None)
self.db2Admin.delete_user(FAKE_USER[1])
self.assertTrue(db2service.run_command.called)
self.assertTrue(db2service.DB2Admin.list_access.called)
self.assertTrue(
db2service.utils.execute_with_timeout.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT," \
"DATAACCESS ON DATABASE FROM USER random2; " \
"db2 connect reset"
self.assertEqual(
args[0], expected,
"Revoke database access queries are not the same")
self.assertEqual(db2service.run_command.call_count, 1)
def test_list_users(self):
databases = []
databases.append(FAKE_DB)
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
with patch.object(self.db2Admin, "list_databases",
MagicMock(return_value=(databases, None))):
self.db2Admin.list_users()
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 +o connect to testDB; " \
"db2 -x select grantee, dataaccessauth " \
"from sysibm.sysdbauth; db2 connect reset"
self.assertEqual(args[0], expected,
"List database queries are not the same")
def test_get_user(self):
databases = []
databases.append(FAKE_DB)
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
with patch.object(self.db2Admin, "list_databases",
MagicMock(return_value=(databases, None))):
self.db2Admin._get_user('random', None)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 +o connect to testDB; " \
"db2 -x select grantee, dataaccessauth " \
"from sysibm.sysdbauth; db2 connect reset"
self.assertEqual(args[0], expected,
"Delete database queries are not the same")
| 1.578125 | 2 |
lingvo/tasks/car/tools/export_kitti_detection.py | zh794390558/lingvo | 1 | 12764202 | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Read saved Decoder's outputs and convert to KITTI text format.
First, obtain a KITTI camera calibration file.
To export all detections from a single model:
python export_kitti_detection.py \
--decoder_path=/path/to/decoder_out_000103000 \
--calib_file=/tmp/kitti_test_calibs.npz \
--output_dir=/tmp/my-kitti-export-directory \
--logtostderr
--- OR ---
Export combined detections selected from multiple models:
python export_kitti_detection.py \
--car_decoder_path=/path/to/car_decoder_out \
--ped_decoder_path=/path/to/ped_decoder_out \
--cyc_decoder_path=/path/to/cyc_decoder_out \
--calib_file=/tmp/kitti_test_calibs.npz \
--output_dir=/tmp/my-kitti-export-directory \
--logtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from lingvo import compat as tf
from lingvo.core.ops import record_pb2
from lingvo.tasks.car import kitti_metadata
from lingvo.tasks.car.tools import kitti_data
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string(
"decoder_path", None, "Paths to decoder file containing output "
"of decoder for everything. Either supply this argument or individual "
"decoder paths for cars, pedestrians and cyclists.")
flags.DEFINE_string(
"car_decoder_path", None,
"Paths to decoder file containing output of decoder for cars."
"Either supply plus cyclists and pedestrians or supply one "
"decoder for all labels.")
flags.DEFINE_string(
"ped_decoder_path", None,
"Paths to decoder file containing output of decoder for "
"pedestrians. Either supply plus cyclists and cars or "
"supply one decoder for all labels.")
flags.DEFINE_string(
"cyc_decoder_path", None,
"Paths to decoder file containing output of decoder for cyclist. "
"Either supply plus cars and pedestrians or supply one "
"decoder for all labels.")
flags.DEFINE_string(
"calib_file", None,
"Path to a npz file that contains all calibration matrices.")
flags.DEFINE_string("output_dir", None, "Place to write detections.")
flags.DEFINE_float("score_threshold", 0, "Ignore detections with lower score.")
def LoadCalibData(fname):
"""Load and parse calibration data from NPZ file."""
# If this throws an error, make sure the npz file was generated from
# the same version of python as this binary.
npz = np.load(fname)
scene_to_calib = {}
for idx, scene_id in enumerate(npz["scene_id"]):
tf.logging.info("Processing %s", scene_id)
raw_calib = {}
raw_calib["P0"] = npz["P0"][idx]
raw_calib["P1"] = npz["P1"][idx]
raw_calib["P2"] = npz["P2"][idx]
raw_calib["P3"] = npz["P3"][idx]
raw_calib["R0_rect"] = npz["R0_rect"][idx]
raw_calib["Tr_velo_to_cam"] = npz["Tr_velo_to_cam"][idx]
raw_calib["Tr_imu_to_velo"] = npz["Tr_imu_to_velo"][idx]
calib = kitti_data.ParseCalibrationDict(raw_calib)
scene_to_calib[scene_id] = calib
return scene_to_calib
def ExtractNpContent(np_dict, calib):
"""Parse saved np arrays and convert 3D bboxes to camera0 coordinates.
Args:
np_dict: a dict of numpy arrays.
calib: a parsed calibration dictionary.
Returns:
A tuple of 6 ndarrays:
- location_camera: [N, 3]. [x, y, z] in camera0 coordinate.
- dimension_camera: [N, 3]. The [height, width, length] of objects.
- phi_camera: [N]. Rotation around y-axis in camera0 coodinate.
- bboxes_2d: [N, 4]. The corresponding 2D bboxes in the image coordinate.
- scores: [N]. Confidence scores for each box for the assigned class.
- class_ids: [N]. The class id assigned to each box.
"""
bboxes = np_dict["bboxes"]
scores = np_dict["scores"]
class_ids = np_dict["class_ids"]
bboxes_2d = np_dict["bboxes_2d"]
# Transform from velodyne coordinates to camera coordinates.
velo_to_cam_transform = kitti_data.VeloToCameraTransformation(calib)
location_cam = np.zeros((len(bboxes), 3))
dimension_cam = np.zeros((len(bboxes), 3))
rotation_cam = np.zeros((len(bboxes), 1))
for idx, bbox in enumerate(bboxes):
location_cam[idx, :], dimension_cam[idx, :], rotation_cam[idx, :] = (
kitti_data.BBox3DToKITTIObject(bbox, velo_to_cam_transform))
return location_cam, dimension_cam, rotation_cam, bboxes_2d, scores, class_ids
_INCLUDED_KITTI_CLASS_NAMES = ["Car", "Pedestrian", "Cyclist"]
def ExportKITTIDetection(out_dir, source_id, location_cam, dimension_cam,
rotation_cam, bboxes_2d, scores, class_name, is_first):
"""Write detections to a text file in KITTI format."""
tf.logging.info("Exporting %s for %s" % (class_name, source_id))
fname = out_dir + "/" + source_id + ".txt"
with tf.gfile.Open(fname, "a") as fid:
# Ensure we always create a file even when there's no detection.
# TODO(shlens): Test whether this is actually necessary on the KITTI
# eval server.
if is_first:
fid.write("")
for location, dimension, ry, bbox_2d, score in zip(
location_cam, dimension_cam, rotation_cam, bboxes_2d, scores):
if score < FLAGS.score_threshold:
continue
# class_name, truncated(ignore), alpha(ignore), bbox2D x 4
part1 = [class_name, -1, -1, -10] + list(bbox_2d)
# dimesion x 3, location x 3, rotation_y x 1, score x 1
fill = tuple(part1 + list(dimension) + list(location) + [ry] + [score])
kitti_format_string = ("%s %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf "
"%lf %lf %lf %lf")
kitti_line = kitti_format_string % fill
fid.write(kitti_line + "\n")
def main(argv):
if len(argv) > 1:
raise tf.app.UsageError("Too many command-line arguments.")
if FLAGS.decoder_path:
assert not FLAGS.car_decoder_path and not FLAGS.ped_decoder_path \
and not FLAGS.cyc_decoder_path, ("Either provide decoder_path or "
"individual decoders but not both.")
else:
assert FLAGS.car_decoder_path and FLAGS.ped_decoder_path and \
FLAGS.cyc_decoder_path, ("No decoder_path specified. Please supply all "
"individual decoder_paths for labels.")
is_single_decoder_file = FLAGS.decoder_path is not None
if is_single_decoder_file:
list_of_decoder_paths = [FLAGS.decoder_path]
else:
# Note the correspondence between _INCLUDED_KITTI_CLASS_NAMES ordering and
# this list.
list_of_decoder_paths = [
FLAGS.car_decoder_path, FLAGS.ped_decoder_path, FLAGS.cyc_decoder_path
]
# A list of dictionaries mapping img ids to a dictionary of numpy tensors.
table_data = []
img_ids = []
for table_path in list_of_decoder_paths:
img_id_dict = {}
for serialized in tf.io.tf_record_iterator(table_path):
record = record_pb2.Record()
record.ParseFromString(serialized)
img_id = str(tf.make_ndarray(record.fields["img_id"]))
img_ids.append(img_id)
np_dict = {k: tf.make_ndarray(v) for k, v in record.fields.items()}
img_id_dict[img_id] = np_dict
table_data.append(img_id_dict)
img_ids = list(set(img_ids))
if not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MkDir(FLAGS.output_dir)
all_kitti_class_names = kitti_metadata.KITTIMetadata().ClassNames()
calib_data = LoadCalibData(tf.gfile.Open(FLAGS.calib_file, "rb"))
count = 0
for img_id in img_ids:
# Ignore padded samples where the img_ids are empty.
if not img_id:
continue
for table_index, img_id_dict in enumerate(table_data):
if img_id in img_id_dict:
np_dict = img_id_dict[img_id]
(location_cam, dimension_cam, rotation_cam, bboxes_2d, scores,
class_ids) = ExtractNpContent(np_dict, calib_data[img_id + ".txt"])
if is_single_decoder_file:
valid_labels = _INCLUDED_KITTI_CLASS_NAMES
else:
valid_labels = [_INCLUDED_KITTI_CLASS_NAMES[table_index]]
is_first = table_index == 0
for class_name in valid_labels:
class_mask = (class_ids == all_kitti_class_names.index(class_name))
ExportKITTIDetection(FLAGS.output_dir, img_id,
location_cam[class_mask],
dimension_cam[class_mask],
rotation_cam[class_mask], bboxes_2d[class_mask],
scores[class_mask], class_name, is_first)
count += 1
tf.logging.info("Total example exported: %d", count)
if __name__ == "__main__":
tf.app.run(main)
| 1.851563 | 2 |
scripts/train.py | CrhistyanSilva/localbitsback | 0 | 12764203 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import datetime
import json
import math
import os
import random
import time
import numpy as np
import torch
import torch.optim as optim
import torch.utils.data
import compression
from compression.utils import load_imagenet_data
from optimization.training import train, evaluate
random.seed(7610)
parser = argparse.ArgumentParser(description='PyTorch Discrete Normalizing flows')
parser.add_argument('--imagenet64_data_path', type=str, default='~/data/imagenet-small/train_64x64.npy')
parser.add_argument('--imagenet64_valid_data_path', type=str, default='~/data/imagenet-small/valid_64x64.npy')
parser.add_argument('--imagenet64_model', type=str, default=None)
parser.add_argument('--state_parameters', type=str, default=None)
parser.add_argument('--from_torch', action="store_true")
parser.add_argument('--manual_seed', type=int, help='manual seed, if not given resorts to random seed.')
parser.add_argument('--evaluate_interval_epochs', type=int, default=5,
help='Evaluate per how many epochs')
parser.add_argument('--snap_images', type=int, default=100000,
help='Number of images to process on training before save snapshots')
parser.add_argument('-od', '--out_dir', type=str, default='./', help='output directory for model snapshots etc.')
# optimization settings
parser.add_argument('-e', '--epochs', type=int, default=100, metavar='EPOCHS',
help='number of epochs to train (default: 2000)')
parser.add_argument('-bs', '--batch_size', type=int, default=2, metavar='BATCH_SIZE',
help='input batch size for training (default: 100)')
parser.add_argument('-lr', '--learning_rate', type=float, default=0.00001, metavar='LEARNING_RATE',
help='learning rate')
parser.add_argument('--step_size', default=10000, type=float,
help='Number of batch iteration to update the learning rate')
parser.add_argument('--gamma', default=0.1, type=float,
help='Multiplicative factor of learning rate decay')
args = parser.parse_args()
if args.manual_seed is None:
args.manual_seed = random.randint(1, 100000)
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
np.random.seed(args.manual_seed)
def run(args):
print('\nMODEL SETTINGS: \n', args, '\n')
print("Random Seed: ", args.manual_seed)
# ==================================================================================================================
# SNAPSHOTS
# ==================================================================================================================
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
os.makedirs(args.out_dir, exist_ok=True)
snap_dir = args.out_dir
with open(os.path.join(snap_dir, 'log.txt'), 'a') as ff:
print('\nMODEL SETTINGS: \n', args, '\n', file=ff)
# SAVING
torch.save(args, snap_dir + '.config')
# Load snapshot parameters
parameters_dict = None
if args.state_parameters is not None:
assert os.path.isfile(args.state_parameters)
parameters_dict = json.load(open(args.state_parameters))
args.learning_rate = parameters_dict['scheduler']['_last_lr'][0]
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Device:', args.device)
# ==================================================================================================================
# LOAD DATA
# ==================================================================================================================
dataset = load_imagenet_data(os.path.expanduser(args.imagenet64_data_path))
validation_dataset = load_imagenet_data(os.path.expanduser(args.imagenet64_valid_data_path))
train_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, drop_last=False)
val_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=True,
drop_last=False)
# test_loader = torch.utils.data.DataLoader(
# dataset,
# batch_size=args.batch_size,
# shuffle=False,
# **kwargs)
args.input_size = [3, 64, 64]
# ==================================================================================================================
# SELECT MODEL
# ==================================================================================================================
# flow parameters and architecture choice are passed on to model through args
print(args.input_size)
from compression.models.load_flowpp_imagenet64 import Imagenet64Model
# Load model
if args.imagenet64_model is None:
model = Imagenet64Model(force_float32_cond=True).eval()
else:
model_ctor = compression.models.load_imagenet64_model
model_filename = os.path.expanduser(args.imagenet64_model)
model = model_ctor(model_filename, force_float32_cond=True, from_torch=args.from_torch)
model.to(device=args.device)
model_sample = model
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
# ==================================================================================================================
# TRAINING
# ==================================================================================================================
train_bpd = []
val_bpd = []
# for early stopping
best_val_bpd = np.inf
best_val_loss = np.inf
if args.state_parameters is None:
last_epoch = 1
run_number = 1
else:
last_epoch = parameters_dict['epoch']
run_number = parameters_dict['run_number'] + 1
scheduler.load_state_dict(parameters_dict['scheduler'])
train_times = []
model.double()
for epoch in range(last_epoch, args.epochs + 1):
t_start = time.time()
if parameters_dict is not None:
tr_loss, tr_bpd = train(epoch, train_loader, model, optimizer, args, scheduler,
True, parameters_dict['batch_idx'], run_number)
else:
tr_loss, tr_bpd = train(epoch, train_loader, model, optimizer, args, scheduler, False)
train_bpd.append(tr_bpd)
train_times.append(time.time() - t_start)
print('One training epoch took %.2f seconds' % (time.time() - t_start))
if epoch < 5 or epoch % args.evaluate_interval_epochs == 0:
v_loss, v_bpd = evaluate(
val_loader, model, model_sample, args,
epoch=epoch, file=snap_dir + 'log.txt')
val_bpd.append(v_bpd)
best_val_bpd = min(v_bpd, best_val_bpd)
best_val_loss = min(v_loss, best_val_loss)
print('(BEST: val bpd {:.4f}, val loss {:.4f})\n'.format(best_val_bpd, best_val_loss))
print(f'VALIDATION: loss: {v_loss}, bpd: {v_bpd}')
if math.isnan(v_loss):
raise ValueError('NaN encountered!')
train_bpd = np.hstack(train_bpd)
val_bpd = np.array(val_bpd)
# training time per epoch
train_times = np.array(train_times)
mean_train_time = np.mean(train_times)
std_train_time = np.std(train_times, ddof=1)
print('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time))
# ==================================================================================================================
# EVALUATION
# ==================================================================================================================
final_model = torch.load(snap_dir + 'a.model')
test_loss, test_bpd = evaluate(
train_loader, test_loader, final_model, final_model, args,
epoch=epoch, file=snap_dir + 'test_log.txt')
print('Test loss / bpd: %.2f / %.2f' % (test_loss, test_bpd))
if __name__ == "__main__":
run(args)
| 2.28125 | 2 |
alley/alley_cli.py | xperscore/alley | 13 | 12764204 | import logging
import click
from alley import MongoMigrations
@click.group()
@click.argument('path', type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True))
@click.option('--database', '-db')
@click.option('--username', '-u')
@click.option('--password', <PASSWORD>')
@click.option('--host', '-h')
@click.option('--port', '-p', type=int)
@click.option('--auth', '-a')
@click.pass_context
def cli(ctx, path, database, username, password, host=None, port=None, auth=None):
logging.getLogger("alley.migrations").addHandler(logging.StreamHandler())
ctx.obj = MongoMigrations(path, database, username, password, host=host, port=port, auth=auth)
@cli.command()
@click.pass_obj
def status(ctx):
migrations = ctx.migrations
migrations.show_status()
@cli.command()
@click.argument('name')
@click.pass_obj
def create(ctx, name):
migrations = ctx.migrations
migrations.create(name)
@cli.command()
@click.argument('migration_id', required=False)
@click.option('--fake', is_flag=True, help="Migration to run up to")
@click.pass_obj
def up(ctx, migration_id=None, fake=False):
migrations = ctx.migrations
migrations.up(migration_id, fake)
@cli.command()
@click.argument('migration_id')
@click.pass_obj
def down(ctx, migration_id):
migrations = ctx.migrations
migrations.down(migration_id)
| 2.171875 | 2 |
cli/BitSrunCore/core.py | fangli-li/bit-srun-cli | 5 | 12764205 | import logging
import requests
import re
import json
from .encryption.srun_md5 import *
from .encryption.srun_sha1 import *
from .encryption.srun_base64 import *
from .encryption.srun_xencode import *
# logging.basicConfig(level=logging.INFO)
class Core(object):
BASE_URL = 'http://10.0.0.55'
CHALLENGE = "/cgi-bin/get_challenge"
PROTAL = "/cgi-bin/srun_portal"
INFO = "/cgi-bin/rad_user_info"
SUCCED = "/cgi-bin/rad_user_info"
STATE = {
"E3001": "流量或时长已用尽",
"E3002": "计费策略条件不匹配",
"E3003": "控制策略条件不匹配",
"E3004": "余额不足",
"E3005": "在线变更计费策略",
"E3006": "在线变更控制策略",
"E3007": "超时",
"E3008": "连线数超额,挤出在线表。",
"E3009": "有代理行为",
"E3010": "无流量超时",
"E3101": "心跳包超时",
"E4001": "Radius表DM下线",
"E4002": "DHCP表DM下线",
"E4003": "Juniper IPOE COA上线",
"E4004": "Juniper IPOE COA下线",
"E4005": "proxy表DM下线",
"E4006": "COA在线更改带宽",
"E4007": "本地下线",
"E4008": "虚拟下线",
"E4009": "策略切换时下发COA",
"E4011": "结算时虚拟下线",
"E4012": "下发COA",
"E4101": "来自radius模块的DM下线(挤出在线表)",
"E4102": "来自系统设置(8081)的DM下线",
"E4103": "来自后台管理(8080)的DM下线",
"E4104": "来自自服务(8800)的DM下线",
"E4112": "来自系统设置(8081)的本地下线",
"E4113": "来自后台管理(8080)的本地下线",
"E4114": "来自自服务(8800)的本地下线",
"E4122": "来自系统设置(8081)的虚拟下线",
"E4123": "来自后台管理(8080)的虚拟下线",
"E4124": "来自自服务(8800)的虚拟下线",
"E2531": "用户不存在",
"E2532": "两次认证的间隔太短",
"E2533": "尝试次数过于频繁",
"E2534": "有代理行为被暂时禁用",
"E2535": "认证系统已关闭",
"E2536": "系统授权已过期",
"E2553": "密码错误",
"E2601": "不是专用客户端",
"E2606": "用户被禁用",
"E2611": "MAC绑定错误",
"E2612": "MAC在黑名单中",
"E2613": "NAS PORT绑定错误",
"E2614": "VLAN ID绑定错误",
"E2615": "IP绑定错误",
"E2616": "已欠费",
"E2620": "已经在线了",
"E2806": "找不到符合条件的产品",
"E2807": "找不到符合条件的计费策略",
"E2808": "找不到符合条件的控制策略",
"E2833": "IP地址异常,请重新拿地址",
"E5990": "数据不完整",
"E5991": "无效的参数",
"E5992": "找不到这个用户",
"E5993": "用户已存在",
"E5001": "用户创建成功",
"E5002": "用户创建失败",
"E5010": "修改用户成功",
"E5011": "修改用户失败",
"E5020": "修改用户成功",
"E5021": "修改用户失败",
"E5030": "转组成功",
"E5031": "转组失败",
"E5040": "购买套餐成功",
"E5041": "购买套餐失败",
"E5042": "找不到套餐",
"E5050": "绑定MAC认证成功",
"E5051": "解绑MAC认证成功",
"E5052": "绑定MAC成功",
"E5053": "解绑MAC成功",
"E5054": "绑定nas port成功",
"E5055": "解绑nas port成功",
"E5056": "绑定vlan id成功",
"E5057": "解绑vlan id成功",
"E5058": "绑定ip成功",
"E5059": "解绑ip成功",
"E6001": "用户缴费成功",
"E6002": "用户缴费失败",
"E7001": "用户不存在",
"E7002": "添加待结算队列失败",
"E7003": "结算成功",
"E7004": "添加已结算队列失败",
"E7005": "扣除产品实例结算金额失败",
"E7006": "没有找到产品实例",
"E7007": "没有对该用户进行手动结算的权限",
"E7008": "没有对该产品进行手动结算的权限",
"E7009": "由于使用流量小于该产品结算设置而不扣费",
"E7010": "由于使用时长小于该产品结算设置而不扣费",
"E7011": "由于产品余额不足,根据结算设置而不扣费",
"E7012": "由于产品余额不足,根据结算设置余额扣为0",
"E7013": "由于产品余额不足,根据结算设置余额扣为负值",
"E7014": "删除过期套餐操作成功",
"E7015": "删除过期套餐操作失败",
"E7016": "自动购买套餐成功",
"E7017": "自动购买套餐失败",
"E7018": "产品结算模式错误",
"vcode_error": "验证码错误",
}
@staticmethod
def login(username, password):
# print("正在登录...")
challenge = ""
clientip = ""
ac_id = ""
# GET ac_id
acid_r = requests.get(Core.BASE_URL + '/index_1.html', allow_redirects=False)
# print(r.text)
# <a href="/srun_portal_pc?ac_id=1&theme=bit">Found</a>
if acid_r.status_code == 302:
ac_id = re.search('[0-9]', acid_r.text).group()
logging.info("获取acid:" + ac_id)
else:
logging.error("获取ac_id失败!")
# 获取challenge
challenge_params={
"username": username,
"callback": "jsonnp",
}
challenge_r = requests.get(Core.BASE_URL + Core.CHALLENGE, params=challenge_params)
if challenge_r.status_code == 200:
json_str = re.search('{(.*)}', challenge_r.text).group()
res_dict = json.loads(json_str)
# print(res_dict)
challenge = res_dict["challenge"]
clientip = res_dict['client_ip']
logging.info("获取challenge:"+challenge)
else:
logging.error("获取challenge失败!")
# 准备加密数据 进行login
login_params = Core._generate_params(username,password, clientip, ac_id, challenge)
# print(login_params)
login_r = requests.get(Core.BASE_URL+Core.PROTAL, params= login_params)
if login_r.status_code == 200:
json_str = re.search('{(.*)}', login_r.text).group()
res_dict = json.loads(json_str)
# print(json_str)
if res_dict['ecode'] == 0:
if res_dict['suc_msg'] == "login_ok":
print("登录成功!")
elif res_dict['suc_msg'] == "ip_already_online_error":
print("已在线!无需登录。")
print("姓名:\t"+res_dict['real_name'])
print("账户:\t"+res_dict['username'])
print("IP地址:\t"+res_dict['client_ip'])
else:
logging.error("认证失败!"+ Core.STATE[res_dict['ecode']])
else:
logging.error("认证失败!")
@staticmethod
def logout(username):
ac_id = ''
# GET ac_id
acid_r = requests.get(Core.BASE_URL + '/index_1.html', allow_redirects=False)
# print(r.text)
# <a href="/srun_portal_pc?ac_id=1&theme=bit">Found</a>
if acid_r.status_code == 302:
ac_id = re.search('[0-9]', acid_r.text).group()
logging.info("获取acid:" + ac_id)
else:
logging.error("获取ac_id失败!")
logout_params={
'action': 'logout',
'ac_id': ac_id,
'username': username
}
logout_r = requests.get(Core.BASE_URL + Core.PROTAL, params=logout_params)
print("账户: " +username)
# print(logout_r.status_code)
if logout_r.text == "logout_ok":
print("注销成功!")
elif logout_r.text == 'login_error#You are not online.':
print("注销失败,你不在线!")
else:
print("注销成功")
@staticmethod
def info():
info_r = requests.get(Core.BASE_URL+Core.INFO, params={"callback": "jsonnp"})
if info_r.status_code == 200:
json_str = re.search('{(.*)}', info_r.text).group()
res_dict = json.loads(json_str)
print(res_dict)
else:
print("Error")
@staticmethod
def _generate_params(username, password, ip, ac_id, challenge):
result ={
"callback": "jsonnp",
"action": "login",
"username": username,
"password": "",
"ac_id": ac_id,
"ip": ip,
"info": "",
"chksum": "",
"n": "200",
"type": "1"
}
info_params = {
"username": username,
"password": password,
"ip": ip,
"acid": ac_id,
"enc_ver": "srun_bx1"
}
info_str = json.dumps(info_params)
# print(info_str)
encrypted_info = "{SRBX1}" + get_base64(get_xencode(info_str, challenge))
result['info'] = encrypted_info
md5 = get_md5("", challenge)
result['password'] = <PASSWORD>
chkstr = challenge + username
chkstr += challenge + md5
chkstr += challenge + ac_id
chkstr += challenge + ip
chkstr += challenge + "200"
chkstr += challenge + "1"
chkstr += challenge + encrypted_info
result['chksum'] = get_sha1(chkstr)
return result
if __name__ == '__main__':
Core.login("xxxxxxx", "xxxxxxxx") | 2.078125 | 2 |
todolist/todos/migrations/0001_initial.py | abdu1aziz/todo-list-app | 0 | 12764206 | <reponame>abdu1aziz/todo-list-app
# Generated by Django 3.1 on 2020-09-21 00:59
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='workList',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('user_id', models.IntegerField()),
('task', models.CharField(max_length=150)),
('date', models.DateTimeField(auto_now_add=True)),
('is_done', models.BooleanField(default=False)),
],
),
]
| 1.867188 | 2 |
base-freq-histogram/test.py | kyclark/python_bioinformatics | 14 | 12764207 | #!/usr/bin/env python3
"""tests for histy.py"""
import os
import random
import re
import string
from subprocess import getstatusoutput
prg = './histy.py'
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ['', '-h', '--help']:
rv, out = getstatusoutput('{} {}'.format(prg, flag))
assert (rv > 0) if flag == '' else (rv == 0)
assert re.match("usage", out, re.IGNORECASE)
# --------------------------------------------------
def run(file, args, expected_out):
"""test"""
in_file = os.path.join('../inputs', '{}'.format(file))
expected = open(os.path.join('test-outs', expected_out)).read().rstrip()
rv, out = getstatusoutput('{} {} {}'.format(prg, args, in_file))
assert rv == 0
assert expected == out.rstrip()
# --------------------------------------------------
def test_01():
"""test"""
run('fox.txt', '', 'fox.txt.1')
# --------------------------------------------------
def test_02():
"""test"""
run('fox.txt', '-i', 'fox.txt.2')
# --------------------------------------------------
def test_03():
"""test"""
run('fox.txt', "-c '!'", 'fox.txt.3')
# --------------------------------------------------
def test_04():
"""test"""
run('sonnet-29.txt', "-m 2", 'sonnet-29.txt.1')
# --------------------------------------------------
def test_05():
"""test"""
run('sonnet-29.txt', "-w 50 -m 2 -f -c '$'", 'sonnet-29.txt.2')
| 2.515625 | 3 |
03-plotting-decisions/plotter.py | moos-tutorials/python-moos-tutorials | 3 | 12764208 | #!/usr/bin/env python3
from pymoos import pymoos
import time
import matplotlib.pyplot as plt
import numpy as np
import threading
fig, ax = plt.subplots(subplot_kw=dict(polar=True))
ax.set_theta_direction(-1)
ax.set_theta_zero_location('N')
nav_line, des_line, = ax.plot([], [], 'r', [], [], 'b')
nav_line.set_label('NAV')
des_line.set_label('DESIRED')
ax.legend()
class plotter(pymoos.comms):
"""plotter is a simple app that connects to MOOSDB and plots data."""
def __init__(self, moos_community, moos_port):
"""Initiates MOOSComms, sets the callbacks and runs the loop"""
super(plotter, self).__init__()
self.server = moos_community
self.port = moos_port
self.name = 'plotter'
self.d_heading = 0
self.d_speed = 0
self.n_heading = 0
self.n_speed = 0
# getting a lock to threadsafely draw
self.lock = threading.Lock()
self.set_on_connect_callback(self.__on_connect)
self.set_on_mail_callback(self.__on_new_mail)
self.add_active_queue('nav_queue', self.on_nav)
self.add_message_route_to_active_queue('nav_queue', 'NAV_HEADING')
self.add_message_route_to_active_queue('nav_queue', 'NAV_SPEED')
self.add_active_queue('desired_queue', self.on_desired)
self.add_message_route_to_active_queue('desired_queue', 'DESIRED_HEADING')
self.add_message_route_to_active_queue('desired_queue', 'DESIRED_SPEED')
self.run(self.server, self.port, self.name)
def __on_connect(self):
"""OnConnect callback"""
print("Connected to", self.server, self.port,
"under the name ", self.name)
return (self.register("NAV_SPEED", 0)
and self.register("NAV_HEADING", 0)
and self.register("DESIRED_SPEED", 0)
and self.register("DESIRED_HEADING", 0))
def __on_new_mail(self):
"""OnNewMail callback"""
for msg in self.fetch():
print("Unhandled mail received:", msg.key(), "!")
return True
def on_nav(self, msg):
"""Special callback for NAV_*"""
print("on_nav activated by",
msg.key(), "with value", msg.double())
if msg.key() == 'NAV_HEADING':
self.n_heading = msg.double()
elif msg.key() == 'NAV_SPEED':
self.n_speed = msg.double()
r = np.arange(0, self.n_speed, 0.1)
theta = np.deg2rad(self.n_heading)
self.lock.acquire()
try:
nav_line.set_xdata(theta)
nav_line.set_ydata(r)
ax.set_rmax(5)
plt.draw()
finally:
self.lock.release()
return True
def on_desired(self, msg):
"""Special callback for DESIRED_*"""
print("on_desired activated by",
msg.key(), "with value", msg.double())
if msg.key() == 'DESIRED_HEADING':
self.d_heading = msg.double()
elif msg.key() == 'DESIRED_SPEED':
self.d_speed = msg.double()
r = np.arange(0, self.d_speed, 0.1)
theta = np.deg2rad(self.d_heading)
self.lock.acquire()
try:
des_line.set_xdata(theta)
des_line.set_ydata(r)
ax.set_rmax(5)
plt.draw()
finally:
self.lock.release()
return True
def main():
plottr = plotter('localhost', 9000)
plt.show()
if __name__=="__main__":
main()
| 2.703125 | 3 |
dependencies.bzl | todaypp/bazel-template | 0 | 12764209 | <filename>dependencies.bzl<gh_stars>0
"""Define nodejs and yarn dependencies"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "build_bazel_rules_nodejs",
sha256 = "a09edc4ba3931a856a5ac6836f248c302d55055d35d36e390a0549799c33145b",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/5.0.0/rules_nodejs-5.0.0.tar.gz"],
)
| 1.1875 | 1 |
windows/run.py | IT-Manufactory/DataGit | 1 | 12764210 | import os
import configparser
import logging
import cx_Oracle
import sqlparse
import sys
log = logging.getLogger()
log.setLevel(logging.INFO)
SCRIPTS_FOLDER_BASE = "../db/"
INIT_FOLDER_PATH = SCRIPTS_FOLDER_BASE + "init/"
ADDITIONS_FOLDER_PATH = SCRIPTS_FOLDER_BASE + "additions/"
CONFIG_FILE_PATH = "../connection.ini"
class Operator:
def __init__(self):
self.config_file = None
self.connection = self._create_connection(self._read_db_configuration())
self.initfile, self.additionsfile = self._init_migration_table()
@staticmethod
def _create_connection(connection_string):
if connection_string is None:
log.critical("Got an error while trying to build the connection string."
" Please check your values and file formatting for file \'connection.ini\"")
return
else:
try:
connection = cx_Oracle.connect(connection_string)
return connection
except Exception as e:
log.critical("Unexpected Error: ", e)
sys.exit("Connection Could not established with the database")
def _read_db_configuration(self):
"""
:return: Database Connection string
"""
self.config_file = configparser.ConfigParser()
self.config_file.read(CONFIG_FILE_PATH)
if 'Database' in self.config_file:
database_config = self.config_file['Database']
username = database_config.get('username')
password = database_config.get('password')
url = database_config.get('url')
port = database_config.getint('port')
service = database_config.get('service')
if username is None or password is None or url is None or port is None or service is None:
log.error(
"One of the entries were not valid Please check the values and enter them in the following format: "
"\n eg. \n[Database] \nurl = localhost\nport = 1521\nusername = testuser\npassword = "
"sales\nservice = "
"xe\n")
return None
else:
return "{0}/{1}@{2}:{3}/{4}".format(username, password, url, port, service)
else:
log.error(
"Database configuration section not found. Please add database configuration to \"connection.ini\' "
"file. eg. \n[Database] \nurl = localhost\nport = 1521\nusername = testuser\npassword = "
"sales\nservice = "
"xe\n")
return None
def _init_migration_table(self):
cursor = self.connection.cursor()
query = "SELECT * from DATA_MIGRATION"
init = 'init'
additions = 'additions'
try:
result = cursor.execute(query).fetchall()
cursor.close()
self.connection.commit()
except Exception as e:
log.error("Exception occurred while fetching data from DATA_MIGRATION TABLE. \n {0}".format(e))
return init, additions
if len(result) > 0:
# Values are written in the database. Get the last file value for init and additions folder
for value in result:
init = value[1]
additions = value[2]
print("ID: {0}, LASTFILEINIT: {1}, LASTFILEADDITIONS: {2}".format(value[0], value[1], value[2]))
return init, additions
def start_operations(self):
print("********************************************* \n*********************************************")
print("Starting Init Folder")
self._execute_init_scripts()
print("Completed Init Folder")
print("********************************************* \n*********************************************")
print("Starting other scripts")
print("********************************************* \n*********************************************")
self._execute_added_scripts()
print("Completed all scripts\nDatabase migration has been completed. \n"
"Check the console if there were any malformed queries that were skipped.")
print("********************************************* \n*********************************************")
print("********************************************* \n*********************************************")
def _execute_added_scripts(self):
list_of_files = self._get_sorted_file_list_from_folder(ADDITIONS_FOLDER_PATH, self.additionsfile)
if list_of_files is not None:
print("found files")
self._perform_sql_operations(ADDITIONS_FOLDER_PATH, list_of_files, 'additions')
def _execute_init_scripts(self):
print("Checking files in init folder")
list_of_files = self._get_sorted_file_list_from_folder(INIT_FOLDER_PATH, self.initfile)
if list_of_files is not None:
print("found files")
self._perform_sql_operations(INIT_FOLDER_PATH, list_of_files, 'init')
@staticmethod
def _get_sorted_file_list_from_folder(folder_path, lastfile=None):
sql_file_list = os.listdir(folder_path)
sql_file_list.sort(reverse=True)
if lastfile is not None or lastfile != 'init' or lastfile != 'additions':
trimmed_names = []
print("Last file value is not None. Trimming files after this file")
for filename in sql_file_list:
if filename == lastfile:
trimmed_names.sort()
return trimmed_names
else:
trimmed_names.append(filename)
trimmed_names.sort()
return trimmed_names
else:
sql_file_list.sort()
return sql_file_list
def _perform_sql_operations(self, folder_path, files, section):
for file in files:
if file.endswith('.sql'):
read_file = open(folder_path + file, 'r')
sql_file = read_file.read()
read_file.close()
if len(sql_file) > 0:
sql_commands = sqlparse.split(sql_file)
for query in sql_commands:
if len(query) > 0:
last_char = query[-1:]
if last_char == ";":
query = query[:-1]
cursor = self.connection.cursor()
try:
cursor.execute(query)
cursor.close()
self.connection.commit()
# print("Successfully executed query: {0}".format(query))
except Exception as e:
log.warning(" Skipping the query : {0}\n Due to error: {1} \n".format(query, e))
self._write_to_config_file(section, file)
print("Finished file {0}".format(file))
def _write_to_config_file(self, section_name, lastfile_name):
if section_name == "init":
query = f"UPDATE DATA_MIGRATION SET LASTFILEINIT = '{lastfile_name}' WHERE ID = 1"
else:
query = f"UPDATE DATA_MIGRATION SET LASTFILEADDITIONS = '{lastfile_name}' WHERE ID = 1"
cursor = self.connection.cursor()
try:
cursor.execute(query)
cursor.close()
self.connection.commit()
except Exception as e:
log.critical("Error saving the last file executed to the database.\n Please check the logs and update the "
"database entry manually \n Exception: {0}".format(e))
def destruct(self):
if self.connection is not None:
self.connection.close()
if __name__ == '__main__':
operator = Operator()
operator.start_operations()
operator.destruct()
| 2.9375 | 3 |
migrations/alembic/versions/2a8981379eba_add_locales_table.py | bonomali/parrot | 143 | 12764211 | <filename>migrations/alembic/versions/2a8981379eba_add_locales_table.py
"""add locales table
Revision ID: 2a8981379eba
Revises: 438b950c4c9a
Create Date: 2018-01-10 16:21:39.595957
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2a8981379eba'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
CREATE TABLE locales (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
ident TEXT NOT NULL,
language TEXT NOT NULL,
country TEXT NOT NULL,
pairs hstore,
project_id UUID REFERENCES projects (id) ON UPDATE CASCADE ON DELETE CASCADE,
UNIQUE (ident, project_id)
);
""")
def downgrade():
op.execute("""
DROP TABLE locales;
""")
| 1.390625 | 1 |
gyun/gomestor/acl.py | gyun-gome/gyun-sdk-python | 0 | 12764212 | # encoding: utf-8
# =========================================================================
# ©2017-2018 北京国美云服科技有限公司
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
class ACL(object):
def __init__(self, bucket=None, acl=None):
"""
@param bucket - The bucket
@param acl - The access control list of the bucket
"""
self.bucket = bucket
self.acl = acl or []
self.grants = []
for item in self.acl:
grantee = item["grantee"]
if grantee["type"] == "user":
grant = Grant(
permission=item["permission"],
type=grantee["type"],
id=grantee["id"],
name=grantee["name"]
)
else:
grant = Grant(
permission=item["permission"],
type=grantee["type"],
name=grantee["name"]
)
self.add_grant(grant)
def add_grant(self, grant):
self.grants.append(grant)
def __repr__(self):
return str(self.grants)
class Grant(object):
def __init__(self, permission, type, id=None, name=None):
"""
@param permission - The grant permission
@param type - The grantee type
@param id - The grantee user id
@param name - The grantee name
"""
self.permission = permission
self.type = type
self.id = id
self.name = name
def __repr__(self):
if self.type== "user":
args = (self.id, self.permission)
else:
args = (self.name, self.permission)
return "<Grantee: %s, Permission: %s>" % args
def to_dict(self):
if self.type == "user":
grantee = {
"type": self.type,
"id": self.id,
"name": self.name or ""
}
else:
grantee = {
"type": self.type,
"name": self.name
}
return {
"grantee": grantee,
"permission": self.permission
}
| 2.265625 | 2 |
STL_plotter.py | kerembg0/STL-plotter | 0 | 12764213 | <gh_stars>0
import re
import numpy as np
import matplotlib.pyplot as plt
# cleaning data
f = open("<ASCII_file_name>.stl", "r")
f_text = f.read()
text = f_text.splitlines()
verticies = []
for i in range(len(text)):
text[i] = text[i].strip()
for item in text:
if (re.search("^vertex", item)):
verticies.append(item)
verticies = list(dict.fromkeys(verticies))
for i in range(len(verticies)):
verticies[i] = verticies[i][7:]
for i in range(len(verticies)):
verticies[i] = verticies[i].split()
for i in range(len(verticies)):
for j in range(3):
verticies[i][j] = float(verticies[i][j])
verticies = np.array(verticies)
# setting axises
x = []
y = []
z = []
for i in range(len(verticies)):
x.append(verticies[i][0])
for i in range(len(verticies)):
y.append(verticies[i][1])
for i in range(len(verticies)):
z.append(verticies[i][2])
x = np.array(x)
y = np.array(y)
z = np.array(z)
# plotting
fig = plt.figure(figsize = (100,100))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y ,z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
| 2.546875 | 3 |
path.py | RC-Dynamics/de_pid_angle | 0 | 12764214 | import numpy as np
from random import random
import math
class Path:
def __init__(self, r):
self.radius = r
self.path = []
def circleDiscretization(self, qtd_poits = 40):
self.path = []
angle_diff = 2 * math.pi / qtd_poits
for i in range(qtd_poits):
point = (self.radius * math.cos(i * angle_diff) + 85, self.radius * math.sin(i * angle_diff) + 65)
self.path.append(point)
return self.path
def getPath(self):
return self.path[1:] + [self.path[0]]
def pointDistance(self, x1, y1, x2, y2):
return math.sqrt( (x1-x2)**2 + (y1-y2)**2)
def area(self):
return math.pi * self.radius * self.radius
def getSimpleError(self, path, robotPath, N=1):
totalError = 0.0
for i in range(len(path) - 1):
totalError += self.getPointError(path[i], path[i+1], robotPath[i])[2]
totalError += self.getPointError(path[i], path[1], robotPath[i])[2]
return totalError/N
def getPointError(self, startPoint, endPoint, robotPos):
start, end, robot= np.array(startPoint), np.array(endPoint), np.array(robotPos)
middle = (start + end)/ 2.0
errorS = np.sqrt(((start - robot) ** 2).sum(0))
errorE = np.sqrt(((end - robot) ** 2).sum(0))
errorM = np.sqrt(((middle - robot) ** 2).sum(0))
return errorS, errorE, errorM
def tD2I(self, tup):
return (int(tup[0]), int(tup[1]))
def getPerimeterError(self, startPoint, endPoint, robotPath, src=''):
path = [startPoint] + robotPath + [endPoint]
pathNp = np.array(path)
totalError = 0.0
for i in range(len (pathNp) - 1):
totalError += np.sqrt(((pathNp[i] - pathNp[i+1]) ** 2).sum(0))
return totalError
# if __name__ == '__main__':
# height, width = 480, 640
# radius = 150
# angleShift = math.radians(20)
# src = np.zeros(shape=(height, width, 3),dtype=np.uint8)
# center = (width//2, height//2)
# cv2.circle(src, center, radius, (0,0,255),1)
# path = []
# robotPath = []
# for i in np.arange(0, angleShift * 2, angleShift):
# x = (width/2 + math.cos(i) * radius)
# y = (height/2 + math.sin(i) * radius)
# cv2.circle(src, (int(x), int(y)), 1, (255, 0, 0), 1)
# xn = (width/2 + math.cos(i + angleShift/4.0) * radius)
# yn = (height/2 + math.sin(i + angleShift/4.0) * radius)
# xr = xn + (random() - 0.5) * radius/2.5
# yr = yn + (random() - 0.5) * radius/2.5
# robotPath.append((xr, yr))
# cv2.circle(src, (int(xr), int(yr)), 1, (0, 255, 0), 1)
# xn = (width/2 + math.cos(i + angleShift/2.0) * radius)
# yn = (height/2 + math.sin(i + angleShift/2.0) * radius)
# xr = xn + (random() - 0.5) * radius/2.5
# yr = yn + (random() - 0.5) * radius/2.5
# robotPath.append((xr, yr))
# cv2.circle(src, (int(xr), int(yr)), 1, (0, 255, 0), 1)
# path.append((x,y))
# ptError = PathError()
# print (ptError.getPerimeterError(path[0], path[1], robotPath, src))
# cv2.namedWindow("ErrorWin", cv2.WINDOW_NORMAL)
# cv2.imshow("ErrorWin",src)
# cv2.waitKey(0)
# cv2.destroyAllWindows() | 3.15625 | 3 |
matrix.py | romanstrazanec/algorithms | 0 | 12764215 | class Matrix:
def __init__(self, *args, **kwargs):
if len(args) > 0:
if isinstance(args[0], Matrix):
m = args[0].copy()
else:
m = {'vals': [], 'w': 0, 'h': 0}
self.vals = m.vals
self.w = m.w
self.h = m.h
def copy(self):
new_m = Matrix()
for i in self.vals:
new_m.vals.append(i)
new_m.w = self.w
new_m.h = self.h
return new_m
@property
def width(self):
return self.w
@property
def height(self):
return self.h
def value_at(self, row, col):
return self.vals[row*self.w + col]
def at(self, row, col):
return self.value_at(row, col)
def row(self, pos):
return [self.vals[pos*self.w + i] for i in range(self.w)]
@property
def rows(self):
return [self.row(i) for i in range(self.h)]
def col(self, pos):
return [self.vals[i*self.w + pos] for i in range(self.h)]
@property
def cols(self):
return [self.col(i) for i in range(self.w)]
@staticmethod
def _isnumeric(i):
return isinstance(i, float) or isinstance(i, int)
def _add(self, r, p, q, *args):
r = len(args) if r <= 0 else r
for i in range(r):
try:
if self._isnumeric(args[i]):
self.vals.insert(i*(p + 1) + self.w*q, args[i])
except IndexError:
self.vals.insert(i*(p + 1) + self.w*q, 0)
return r
def addrow(self, *args):
self.w = self._add(self.w, 0, self.h, *args)
self.h += 1
def addcol(self, *args):
self.h = self._add(self.h, self.w, 1, *args)
self.w += 1
def _fill(self, val, pos, r, lt, p, q, addfunc):
if self._isnumeric(val):
if pos < lt:
for i in range(self.w):
self.vals[pos*p + i*q] = val
else:
addfunc(*[val for _ in range(r)])
def rowfill(self, val, pos):
self._fill(val, pos, self.w, self.h, self.w, 1, self.addrow)
def colfill(self, val, pos):
self._fill(val, pos, self.h, self.w, 1, self.w, self.addcol)
def removerow(self, pos):
if self.h > 0 and pos < self.h:
for _ in range(self.w):
self.vals.pop(self.w*pos)
self.h -= 1
if self.h == 0:
self.w = 0
def removecol(self, pos):
if self.w > 0 and pos < self.w:
pos %= self.w
for i in range(self.h):
self.vals.pop(i*(self.w-1) + pos)
self.w -= 1
if self.w == 0:
self.h = 0
def __add__(self, other):
new_m = Matrix()
def __mul__(self, other):
new_m = Matrix()
for col in other.cols:
s = [sum([self.at(l, i)*c for i, c in enumerate(col)]) for l in range(self.h)]
print(s)
#new_m.addcol()
return new_m
@property
def det(self):
if self.w * self.h == 1:
return self.vals[0]
if (self.w, self.h) == (2,2):
return self.at(0, 0)*self.at(1, 1) - self.at(0, 1)*self.at(1, 0)
d = 0
for i in range(self.h):
for j in range(self.w):
b = [[y for y in (x[:j] + x[j+1:])] for x in self.cols[:i] + self.cols[i+1:]]
d += det(val) if (i+j)%2 == 0 else -det(val)
return d
def __len__(self):
return self.w * self.h
def __repr__(self):
if (self.w, self.h) == (0, 0):
return "()"
res = ""
for i, val in enumerate(self.vals):
end = "\n" if (i+1)%self.w == 0 else "\t"
res += f"{val}{end}"
return res
def __str__(self):
return self.__repr__() | 3.3125 | 3 |
src/Grid_world/policyiteration/environment.py | aliasgar-m/Reinforcement-Learning | 0 | 12764216 | #! /usr/bin/env python
import random
import numpy as np
class Environment:
def __init__(self, size=[3,4], start=(0,0), end=(2,3), block=[(1,1)], false_end=(1,3)):
self.size = size
self.state = np.zeros(self.size)
self.action_space = self.generate_action_space()
self.state_space = self.generate_state_space()
self.agent_position = start
def generate_action_space(self):
a_keys = ['<KEY>']
a_values = [(1, 0),(-1, 0),(0, -1),(0, 1)]
return {a_keys[i]:a_values[i] for i in range(len(a_keys))}
def generate_state_space(self):
state = {}
for i in range(self.size[0]):
for j in range(self.size[1]):
state[(i,j)] = list(self.action_space)
return state
def generate_default_probability(self):
p_keys = self.state_space.keys()
p_values = [0.25, 0.25, 0.25, 0.25]
return {p_keys[i]:p_values[i] for i in range(len(p_keys))}
def get_state_action_hash(self, curr_pos):
hash_vals = []
for action in self.state_space[curr_pos]:
str_val = str(str(curr_pos)+'-'+action)
hash_vals.append(str_val)
return hash_vals
if __name__ == "__main__":
env = Environment() | 2.890625 | 3 |
zmq_pub.py | abhi-mazu/time_series_framework | 0 | 12764217 | <gh_stars>0
import zmq
import time
import random
import json
import logging
import numpy as np
# ZeroMQ Context
context = zmq.Context()
# Define the socket using the "Context"
sock = context.socket(zmq.PUB)
sock.bind("tcp://127.0.0.1:5680")
class GenerateSignal(object):
def __init__(self):
self.device = 'DEVICE1'
self.time = time.time()
self.w1 = 1
self.w2 = 2
self.w3 = 3
def simulate_signals(self):
""" Generates a new, random signals
"""
self.time = round(time.time()* 1000)
signals = []
for x in range(7):
signals.append(self.simulate_independent_value())
signals.append(self.simulate_dependent_continuous_value(signals[0], signals[1], signals[2]))
signals.append(self.simulate_dependent_continuous_value(signals[1], signals[3], signals[5]))
signals.append(self.simulate_dependent_categorical_value(signals))
signal_name = []
for i in range(10):
signal_name.append("in_" + str(i))
return signals, signal_name
def simulate_independent_value(self):
return random.uniform(1, 99)
def simulate_dependent_continuous_value(self, x1, x2, x3):
return (self.w1 + random.random()) * x1 + (self.w2 + random.random()) * x2 + (self.w3 + random.random()) * x3
def simulate_dependent_categorical_value(self, signal_list):
sum_signals = sum(signal_list)
return 1+int(sum_signals/500 + random.random()*0.2)
ip = GenerateSignal()
print("ZMQ publisher is running")
while True:
signals, signal_name = ip.simulate_signals()
print(signals)
payload = {}
payload['timestamp'] = ip.time
payload['device'] = ip.device
payload['data'] = signals
payload['signals'] = signal_name
json_data = json.dumps(payload)
sock.send_string(ip.device, flags=zmq.SNDMORE)
sock.send_json(json_data)
# print(signals)
time.sleep(0.25)
| 2.609375 | 3 |
src/agc_optims/optim/rmsprop_agc.py | Skyy93/agc_optims | 1 | 12764218 | import torch
from torch.optim.optimizer import Optimizer, required
from torch import optim, nn
import torch.optim._functional as F
from agc_optims.utils import agc
class RMSprop_AGC(Optimizer):
r"""Implements RMSprop algorithm with adaptive gradient clipping (AGC).
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \alpha \text{ (alpha)},\: \gamma \text{ (lr)},
\: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\
&\hspace{13mm} \lambda \text{ (weight decay)},\: \mu \text{ (momentum)},\: centered\\
&\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \:
\textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0 \\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}if \: \lambda \neq 0 \\
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
&\hspace{5mm}v_t \leftarrow \alpha v_{t-1} + (1 - \alpha) g^2_t
\hspace{8mm} \\
&\hspace{5mm} \tilde{v_t} \leftarrow v_t \\
&\hspace{5mm}if \: centered \\
&\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t \\
&\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} - \big(g^{ave}_{t} \big)^2 \\
&\hspace{5mm}if \: \mu > 0 \\
&\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} +
g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \\
&\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t \\
&\hspace{5mm} else \\
&\hspace{10mm}\theta_t \leftarrow \theta_{t-1} -
\gamma g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \hspace{3mm} \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to
`lecture notes <https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_ by <NAME>.
and centered version `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
The implementation here takes the square root of the gradient average before
adding epsilon (note that TensorFlow interchanges these two operations). The effective
learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma`
is the scheduled learning rate and :math:`v` is the weighted moving average
of the squared gradient.
This implementation of RMSprop was taken from the official PyTorch Sources and the code for the AGC was adapted from
https://github.com/vballoli/nfnets-pytorch.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0)
alpha (float, optional): smoothing constant (default: 0.99)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
clipping (float, optional): clipping value for the AGC (default: 1e-2)
agc_eps (float, optional): term used in agc to prevent grads clipped to zero (default: 1e-3)
"""
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, clipping=1e-2, agc_eps=1e-3):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
if not 0.0 <= clipping < 1.0:
raise ValueError("Invalid clipping parameter: {}".format(clipping))
if not 0.0 <= agc_eps:
raise ValueError("Invalid agc_eps value: {}".format(agc_eps))
defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, clipping=clipping, agc_eps=agc_eps)
super(RMSprop_AGC, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSprop_AGC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
square_avgs = []
grad_avgs = []
momentum_buffer_list = []
clipping = group['clipping']
agc_eps = group['agc_eps']
for p in group['params']:
if p.grad is None:
continue
## AGC
agc(param=p, clipping=clipping, eps=agc_eps)
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
if group['momentum'] > 0:
momentum_buffer_list.append(state['momentum_buffer'])
if group['centered']:
grad_avgs.append(state['grad_avg'])
state['step'] += 1
F.rmsprop(params_with_grad,
grads,
square_avgs,
grad_avgs,
momentum_buffer_list,
lr=group['lr'],
alpha=group['alpha'],
eps=group['eps'],
weight_decay=group['weight_decay'],
momentum=group['momentum'],
centered=group['centered'])
return loss | 2.40625 | 2 |
idaes/core/tests/test_components.py | dangunter/idaes-pse | 0 | 12764219 | <reponame>dangunter/idaes-pse
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for Component objects
Author: <NAME>
"""
import pytest
import types
from pyomo.environ import ConcreteModel, Set, Param, Var, units as pyunits
from idaes.core.components import (Component, Solute, Solvent,
Ion, Anion, Cation, Apparent)
from idaes.core.phases import (LiquidPhase, VaporPhase, SolidPhase, Phase,
PhaseType, AqueousPhase)
from idaes.core.util.exceptions import ConfigurationError
from idaes.core.property_meta import PropertyClassMetadata
class TestComponent():
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.meta_object = PropertyClassMetadata()
def get_metadata(self):
return m.meta_object
m.get_metadata = types.MethodType(get_metadata, m)
m.comp = Component()
m.comp2 = Component()
return m
@pytest.mark.unit
def test_config(self, m):
assert m.comp.config.valid_phase_types is None
assert m.comp.config.elemental_composition is None
assert not m.comp.config._component_list_exists
assert m.comp.config.henry_component is None
assert m.comp.config.has_vapor_pressure
@pytest.mark.unit
def test_populate_component_list(self, m):
assert isinstance(m.component_list, Set)
for j in m.component_list:
assert j in ["comp", "comp2"]
@pytest.mark.unit
def test_is_solute(self, m):
with pytest.raises(TypeError,
match="comp Generic Component objects do not "
"support is_solute\(\) method. Use a Solvent or "
"Solute Component instead."):
m.comp.is_solute()
@pytest.mark.unit
def test_is_solvent(self, m):
with pytest.raises(TypeError,
match="comp Generic Component objects do not "
"support is_solvent\(\) method. Use a Solvent or "
"Solute Component instead."):
m.comp.is_solvent()
@pytest.mark.unit
def test_is_phase_valid_no_assignment(self, m):
with pytest.raises(TypeError):
m.comp._is_phase_valid("foo")
@pytest.mark.unit
def test_is_phase_valid_liquid(self, m):
m.comp3 = Component(default={
"valid_phase_types": PhaseType.liquidPhase})
m.Liq = LiquidPhase()
m.Sol = SolidPhase()
m.Vap = VaporPhase()
m.Aqu = AqueousPhase()
m.Phase = Phase()
assert m.comp3._is_phase_valid(m.Liq)
assert not m.comp3._is_phase_valid(m.Sol)
assert not m.comp3._is_phase_valid(m.Vap)
assert not m.comp3._is_phase_valid(m.Aqu)
assert not m.comp3._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_vapor(self, m):
m.comp4 = Component(default={
"valid_phase_types": PhaseType.vaporPhase})
assert not m.comp4._is_phase_valid(m.Liq)
assert not m.comp4._is_phase_valid(m.Sol)
assert m.comp4._is_phase_valid(m.Vap)
assert not m.comp4._is_phase_valid(m.Aqu)
assert not m.comp4._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_solid(self, m):
m.comp5 = Component(default={
"valid_phase_types": PhaseType.solidPhase})
assert not m.comp5._is_phase_valid(m.Liq)
assert m.comp5._is_phase_valid(m.Sol)
assert not m.comp5._is_phase_valid(m.Vap)
assert not m.comp5._is_phase_valid(m.Aqu)
assert not m.comp5._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_aqueous(self, m):
m.comp6 = Component(default={
"valid_phase_types": PhaseType.aqueousPhase})
assert not m.comp6._is_phase_valid(m.Liq)
assert not m.comp6._is_phase_valid(m.Sol)
assert not m.comp6._is_phase_valid(m.Vap)
# Generic components are never valid in the aqueous phase
assert not m.comp6._is_phase_valid(m.Aqu)
assert not m.comp6._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_LV(self, m):
m.comp7 = Component(default={
"valid_phase_types": [PhaseType.liquidPhase,
PhaseType.vaporPhase]})
assert m.comp7._is_phase_valid(m.Liq)
assert not m.comp7._is_phase_valid(m.Sol)
assert m.comp7._is_phase_valid(m.Vap)
assert not m.comp7._is_phase_valid(m.Aqu)
assert not m.comp7._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_create_parameters(self):
m = ConcreteModel()
m.meta_object = PropertyClassMetadata()
def get_metadata(self):
return m.meta_object
m.get_metadata = types.MethodType(get_metadata, m)
m.get_metadata().default_units["amount"] = pyunits.mol
m.get_metadata().default_units["mass"] = pyunits.kg
m.get_metadata().default_units["time"] = pyunits.s
m.get_metadata().default_units["length"] = pyunits.m
m.get_metadata().default_units["temperature"] = pyunits.K
m.comp = Component(default={
"parameter_data": {"mw": 10,
"pressure_crit": 1e5,
"temperature_crit": 500}})
assert isinstance(m.comp.mw, Param)
assert m.comp.mw.value == 10
assert isinstance(m.comp.pressure_crit, Var)
assert m.comp.pressure_crit.value == 1e5
assert isinstance(m.comp.temperature_crit, Var)
assert m.comp.temperature_crit.value == 500
@pytest.mark.unit
def test_create_parameters_convert(self):
m = ConcreteModel()
m.meta_object = PropertyClassMetadata()
def get_metadata(self):
return m.meta_object
m.get_metadata = types.MethodType(get_metadata, m)
m.get_metadata().default_units["amount"] = pyunits.mol
m.get_metadata().default_units["mass"] = pyunits.kg
m.get_metadata().default_units["time"] = pyunits.s
m.get_metadata().default_units["length"] = pyunits.m
m.get_metadata().default_units["temperature"] = pyunits.K
m.comp = Component(default={
"parameter_data": {"mw": (10, pyunits.g/pyunits.mol),
"pressure_crit": (1, pyunits.bar),
"temperature_crit": (900, pyunits.degR)}})
assert isinstance(m.comp.mw, Param)
assert m.comp.mw.value == 1e-2
assert isinstance(m.comp.pressure_crit, Var)
assert m.comp.pressure_crit.value == 1e5
assert isinstance(m.comp.temperature_crit, Var)
assert m.comp.temperature_crit.value == 500
class TestSolute():
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.meta_object = PropertyClassMetadata()
def get_metadata(self):
return m.meta_object
m.get_metadata = types.MethodType(get_metadata, m)
m.comp = Solute()
return m
@pytest.mark.unit
def test_config(self, m):
assert m.comp.config.valid_phase_types is None
assert not m.comp.config._component_list_exists
@pytest.mark.unit
def test_populate_component_list(self, m):
assert isinstance(m.component_list, Set)
for j in m.component_list:
assert j in ["comp"]
@pytest.mark.unit
def test_is_solute(self, m):
assert m.comp.is_solute()
@pytest.mark.unit
def test_is_solvent(self, m):
assert not m.comp.is_solvent()
@pytest.mark.unit
def test_is_phase_valid_no_assignment(self, m):
with pytest.raises(TypeError):
m.comp._is_phase_valid("foo")
@pytest.mark.unit
def test_is_phase_valid_liquid(self, m):
m.comp3 = Solute(default={
"valid_phase_types": PhaseType.liquidPhase})
m.Liq = LiquidPhase()
m.Sol = SolidPhase()
m.Vap = VaporPhase()
m.Aqu = AqueousPhase()
m.Phase = Phase()
assert m.comp3._is_phase_valid(m.Liq)
assert not m.comp3._is_phase_valid(m.Sol)
assert not m.comp3._is_phase_valid(m.Vap)
assert not m.comp3._is_phase_valid(m.Aqu)
assert not m.comp3._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_vapor(self, m):
m.comp4 = Solute(default={
"valid_phase_types": PhaseType.vaporPhase})
assert not m.comp4._is_phase_valid(m.Liq)
assert not m.comp4._is_phase_valid(m.Sol)
assert m.comp4._is_phase_valid(m.Vap)
assert not m.comp4._is_phase_valid(m.Aqu)
assert not m.comp4._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_solid(self, m):
m.comp5 = Solute(default={
"valid_phase_types": PhaseType.solidPhase})
assert not m.comp5._is_phase_valid(m.Liq)
assert m.comp5._is_phase_valid(m.Sol)
assert not m.comp5._is_phase_valid(m.Vap)
assert not m.comp5._is_phase_valid(m.Aqu)
assert not m.comp5._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_aqueous(self, m):
m.comp6 = Solute(default={
"valid_phase_types": PhaseType.aqueousPhase})
assert not m.comp6._is_phase_valid(m.Liq)
assert not m.comp6._is_phase_valid(m.Sol)
assert not m.comp6._is_phase_valid(m.Vap)
assert m.comp6._is_phase_valid(m.Aqu)
assert not m.comp6._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_LV(self, m):
m.comp7 = Solute(default={
"valid_phase_types": [PhaseType.liquidPhase,
PhaseType.vaporPhase]})
assert m.comp7._is_phase_valid(m.Liq)
assert not m.comp7._is_phase_valid(m.Sol)
assert m.comp7._is_phase_valid(m.Vap)
assert not m.comp7._is_phase_valid(m.Aqu)
assert not m.comp7._is_phase_valid(m.Phase)
class TestSovent():
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.meta_object = PropertyClassMetadata()
def get_metadata(self):
return m.meta_object
m.get_metadata = types.MethodType(get_metadata, m)
m.comp = Solvent()
return m
@pytest.mark.unit
def test_config(self, m):
assert m.comp.config.valid_phase_types is None
assert not m.comp.config._component_list_exists
@pytest.mark.unit
def test_populate_component_list(self, m):
assert isinstance(m.component_list, Set)
for j in m.component_list:
assert j in ["comp"]
@pytest.mark.unit
def test_is_solute(self, m):
assert not m.comp.is_solute()
@pytest.mark.unit
def test_is_solvent(self, m):
assert m.comp.is_solvent()
@pytest.mark.unit
def test_is_phase_valid_no_assignment(self, m):
with pytest.raises(TypeError):
m.comp._is_phase_valid("foo")
@pytest.mark.unit
def test_is_phase_valid_liquid(self, m):
m.comp3 = Solvent(default={
"valid_phase_types": PhaseType.liquidPhase})
m.Liq = LiquidPhase()
m.Sol = SolidPhase()
m.Vap = VaporPhase()
m.Aqu = AqueousPhase()
m.Phase = Phase()
assert m.comp3._is_phase_valid(m.Liq)
assert not m.comp3._is_phase_valid(m.Sol)
assert not m.comp3._is_phase_valid(m.Vap)
assert not m.comp3._is_phase_valid(m.Aqu)
assert not m.comp3._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_vapor(self, m):
m.comp4 = Solvent(default={
"valid_phase_types": PhaseType.vaporPhase})
assert not m.comp4._is_phase_valid(m.Liq)
assert not m.comp4._is_phase_valid(m.Sol)
assert m.comp4._is_phase_valid(m.Vap)
assert not m.comp4._is_phase_valid(m.Aqu)
assert not m.comp4._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_solid(self, m):
m.comp5 = Solvent(default={
"valid_phase_types": PhaseType.solidPhase})
assert not m.comp5._is_phase_valid(m.Liq)
assert m.comp5._is_phase_valid(m.Sol)
assert not m.comp5._is_phase_valid(m.Vap)
assert not m.comp5._is_phase_valid(m.Aqu)
assert not m.comp5._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_LV(self, m):
m.comp6 = Solvent(default={
"valid_phase_types": [PhaseType.liquidPhase,
PhaseType.vaporPhase]})
assert m.comp6._is_phase_valid(m.Liq)
assert not m.comp6._is_phase_valid(m.Sol)
assert m.comp6._is_phase_valid(m.Vap)
assert not m.comp6._is_phase_valid(m.Aqu)
assert not m.comp6._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_aqueous(self, m):
m.comp7 = Solvent(default={
"valid_phase_types": PhaseType.aqueousPhase})
assert not m.comp7._is_phase_valid(m.Liq)
assert not m.comp7._is_phase_valid(m.Sol)
assert not m.comp7._is_phase_valid(m.Vap)
assert m.comp7._is_phase_valid(m.Aqu)
assert not m.comp7._is_phase_valid(m.Phase)
class TestIon():
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.meta_object = PropertyClassMetadata()
def get_metadata(self):
return m.meta_object
m.get_metadata = types.MethodType(get_metadata, m)
m.comp = Ion()
return m
@pytest.mark.unit
def test_config(self, m):
assert "valid_phase_types" not in m.comp.config
assert m.comp.config.charge is None
assert not m.comp.config._component_list_exists
assert not m.comp.config.has_vapor_pressure
with pytest.raises(ValueError):
m.comp.config.has_vapor_pressure = True
@pytest.mark.unit
def test_populate_component_list(self, m):
assert isinstance(m.component_list, Set)
for j in m.component_list:
assert j in ["comp"]
@pytest.mark.unit
def test_is_solute(self, m):
assert m.comp.is_solute()
@pytest.mark.unit
def test_is_solvent(self, m):
assert not m.comp.is_solvent()
@pytest.mark.unit
def test_is_phase_valid_no_assignment(self, m):
with pytest.raises(AttributeError):
m.comp._is_phase_valid("foo")
@pytest.mark.unit
def test_is_phase_valid_liquid(self, m):
m.Liq = LiquidPhase()
m.Sol = SolidPhase()
m.Vap = VaporPhase()
m.Aqu = AqueousPhase()
m.Phase = Phase()
assert not m.comp._is_phase_valid(m.Liq)
assert not m.comp._is_phase_valid(m.Sol)
assert not m.comp._is_phase_valid(m.Vap)
assert m.comp._is_phase_valid(m.Aqu)
assert not m.comp._is_phase_valid(m.Phase)
class TestAnion():
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.meta_object = PropertyClassMetadata()
def get_metadata(self):
return m.meta_object
m.get_metadata = types.MethodType(get_metadata, m)
m.comp = Anion(default={"charge": -1})
return m
@pytest.mark.unit
def test_config(self, m):
assert "valid_phase_types" not in m.comp.config
assert m.comp.config.charge == -1
assert not m.comp.config._component_list_exists
assert not m.comp.config.has_vapor_pressure
with pytest.raises(ValueError):
m.comp.config.has_vapor_pressure = True
@pytest.mark.unit
def test_populate_component_list(self, m):
assert isinstance(m.component_list, Set)
for j in m.component_list:
assert j in ["comp"]
@pytest.mark.unit
def test_is_solute(self, m):
assert m.comp.is_solute()
@pytest.mark.unit
def test_is_solvent(self, m):
assert not m.comp.is_solvent()
@pytest.mark.unit
def test_invalid_charge(self, m):
with pytest.raises(ConfigurationError,
match="an received invalid value for charge "
"configuration argument."
" Anions must have a negative charge."):
m.an = Anion(default={"charge": +1})
@pytest.mark.unit
def test_no_charge(self, m):
with pytest.raises(ConfigurationError,
match="an was not provided with a value "
"for charge."):
m.an = Anion()
@pytest.mark.unit
def test_is_phase_valid_no_assignment(self, m):
with pytest.raises(AttributeError):
m.comp._is_phase_valid("foo")
@pytest.mark.unit
def test_is_phase_valid_liquid(self, m):
m.Liq = LiquidPhase()
m.Sol = SolidPhase()
m.Vap = VaporPhase()
m.Aqu = AqueousPhase()
m.Phase = Phase()
assert not m.comp._is_phase_valid(m.Liq)
assert not m.comp._is_phase_valid(m.Sol)
assert not m.comp._is_phase_valid(m.Vap)
assert m.comp._is_phase_valid(m.Aqu)
assert not m.comp._is_phase_valid(m.Phase)
class TestCation():
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.meta_object = PropertyClassMetadata()
def get_metadata(self):
return m.meta_object
m.get_metadata = types.MethodType(get_metadata, m)
m.comp = Cation(default={"charge": +1})
return m
@pytest.mark.unit
def test_config(self, m):
assert "valid_phase_types" not in m.comp.config
assert m.comp.config.charge == +1
assert not m.comp.config._component_list_exists
assert not m.comp.config.has_vapor_pressure
with pytest.raises(ValueError):
m.comp.config.has_vapor_pressure = True
@pytest.mark.unit
def test_populate_component_list(self, m):
assert isinstance(m.component_list, Set)
for j in m.component_list:
assert j in ["comp"]
@pytest.mark.unit
def test_is_solute(self, m):
assert m.comp.is_solute()
@pytest.mark.unit
def test_is_solvent(self, m):
assert not m.comp.is_solvent()
@pytest.mark.unit
def test_invalid_charge(self, m):
with pytest.raises(ConfigurationError,
match="cat received invalid value for charge "
"configuration argument."
" Cations must have a positive charge."):
m.cat = Cation(default={"charge": -1})
@pytest.mark.unit
def test_no_charge(self, m):
with pytest.raises(ConfigurationError,
match="cat was not provided with a value "
"for charge."):
m.cat = Cation()
@pytest.mark.unit
def test_is_phase_valid_no_assignment(self, m):
with pytest.raises(AttributeError):
m.comp._is_phase_valid("foo")
@pytest.mark.unit
def test_is_phase_valid_liquid(self, m):
m.Liq = LiquidPhase()
m.Sol = SolidPhase()
m.Vap = VaporPhase()
m.Aqu = AqueousPhase()
m.Phase = Phase()
assert not m.comp._is_phase_valid(m.Liq)
assert not m.comp._is_phase_valid(m.Sol)
assert not m.comp._is_phase_valid(m.Vap)
assert m.comp._is_phase_valid(m.Aqu)
assert not m.comp._is_phase_valid(m.Phase)
class TestApparent():
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.meta_object = PropertyClassMetadata()
def get_metadata(self):
return m.meta_object
m.get_metadata = types.MethodType(get_metadata, m)
m.comp = Apparent()
return m
@pytest.mark.unit
def test_config(self, m):
assert m.comp.config.valid_phase_types is None
assert not m.comp.config._component_list_exists
@pytest.mark.unit
def test_populate_component_list(self, m):
assert isinstance(m.component_list, Set)
for j in m.component_list:
assert j in ["comp"]
@pytest.mark.unit
def test_is_solute(self, m):
assert m.comp.is_solute()
@pytest.mark.unit
def test_is_solvent(self, m):
assert not m.comp.is_solvent()
@pytest.mark.unit
def test_is_phase_valid_no_assignment(self, m):
with pytest.raises(TypeError):
m.comp._is_phase_valid("foo")
@pytest.mark.unit
def test_is_phase_valid_liquid(self, m):
m.comp3 = Apparent(default={
"valid_phase_types": PhaseType.liquidPhase})
m.Liq = LiquidPhase()
m.Sol = SolidPhase()
m.Vap = VaporPhase()
m.Aqu = AqueousPhase()
m.Phase = Phase()
assert m.comp3._is_phase_valid(m.Liq)
assert not m.comp3._is_phase_valid(m.Sol)
assert not m.comp3._is_phase_valid(m.Vap)
assert not m.comp3._is_phase_valid(m.Aqu)
assert not m.comp3._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_vapor(self, m):
m.comp4 = Apparent(default={
"valid_phase_types": PhaseType.vaporPhase})
assert not m.comp4._is_phase_valid(m.Liq)
assert not m.comp4._is_phase_valid(m.Sol)
assert m.comp4._is_phase_valid(m.Vap)
assert not m.comp4._is_phase_valid(m.Aqu)
assert not m.comp4._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_solid(self, m):
m.comp5 = Apparent(default={
"valid_phase_types": PhaseType.solidPhase})
assert not m.comp5._is_phase_valid(m.Liq)
assert m.comp5._is_phase_valid(m.Sol)
assert not m.comp5._is_phase_valid(m.Vap)
assert not m.comp5._is_phase_valid(m.Aqu)
assert not m.comp5._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_aqueous(self, m):
m.comp6 = Apparent(default={
"valid_phase_types": PhaseType.aqueousPhase})
assert not m.comp6._is_phase_valid(m.Liq)
assert not m.comp6._is_phase_valid(m.Sol)
assert not m.comp6._is_phase_valid(m.Vap)
assert m.comp6._is_phase_valid(m.Aqu)
assert not m.comp6._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_phase_valid_LV(self, m):
m.comp7 = Apparent(default={
"valid_phase_types": [PhaseType.liquidPhase,
PhaseType.vaporPhase]})
assert m.comp7._is_phase_valid(m.Liq)
assert not m.comp7._is_phase_valid(m.Sol)
assert m.comp7._is_phase_valid(m.Vap)
assert not m.comp7._is_phase_valid(m.Aqu)
assert not m.comp7._is_phase_valid(m.Phase)
@pytest.mark.unit
def test_is_aqueous_phase_valid(self, m):
assert m.comp._is_aqueous_phase_valid()
| 1.828125 | 2 |
solutions/0067.add-binary/add-binary.py | cocobear/LeetCode-in-Python | 0 | 12764220 | <reponame>cocobear/LeetCode-in-Python
#
# @lc app=leetcode id=67 lang=python3
#
# [67] Add Binary
#
# @lc code=start
class Solution:
def addBinary(self, a: str, b: str) -> str:
m = len(a)
n = len(b)
i = m - 1
j = n - 1
extra = 0
res = ''
while i >=0 or j >= 0:
if i < 0:
x = int(b[j]) + extra
elif j < 0:
x = int(a[i]) + extra
else:
x = int(a[i]) + int(b[j]) + extra
extra = x // 2
res = str(x%2) + res
i -= 1
j -= 1
if extra:
return str(extra) + res
else:
return res
tests = [
('11', '1', '100'),
('1010', '1011', '10101')
]
# @lc code=end
| 3.5625 | 4 |
bin/best_model_inference.py | jingxuanyang/Shopee-Product-Matching | 13 | 12764221 | import sys
sys.path.insert(0,'../input/shopee-competition-utils')
from config import CFG
from run_test import run_bert_test
# choose which cuda to load model on
CFG.DEVICE = 'cuda:0'
CFG.BATCH_SIZE = 16
# choose which model with what hyperparameters to use
CFG.BERT_MODEL_NAME = CFG.BERT_MODEL_NAMES[3]
CFG.MARGIN = CFG.BERT_MARGINS[3]
CFG.MODEL_PATH_BERT = f"{CFG.BERT_MODEL_NAME.rsplit('/', 1)[-1]}_epoch8-bs16x1_margin_{CFG.MARGIN}.pt"
# start inference
run_bert_test()
| 1.75 | 2 |
test/programytest/storage/stores/sql/dao/test_category.py | motazsaad/fit-bot-fb-clt | 5 | 12764222 | <filename>test/programytest/storage/stores/sql/dao/test_category.py
import unittest
from programy.storage.stores.sql.dao.category import Category
class CategoryTests(unittest.TestCase):
def test_init(self):
category1 = Category(groupid='groupid', userid='userid', topic='topic', that='that', pattern='pattern', template='template')
self.assertIsNotNone(category1)
self.assertEqual("<Category(id='n/a', groupid='groupid', userid='userid', topic='topic', that='that', pattern='pattern', template='template'>", str(category1))
category2 = Category(id=1, groupid='groupid', userid='userid', topic='topic', that='that', pattern='pattern', template='template')
self.assertIsNotNone(category2)
self.assertEqual("<Category(id='1', groupid='groupid', userid='userid', topic='topic', that='that', pattern='pattern', template='template'>", str(category2))
| 2.703125 | 3 |
vidyo/video.py | parafoxia/vidyo | 0 | 12764223 | <filename>vidyo/video.py
import datetime as dt
import io
import logging
import re
import requests
from PIL import Image
from vidyo.channel import PartialChannel
from vidyo.errors import ResponseNotOK
DUR_REGEX = re.compile(
r"P([0-9]{1,2}D)?T?([0-9]{1,2}H)?([0-9]{1,2}M)?([0-9]{1,2}S)?"
)
DUR_MUL = (86400, 3600, 60, 1)
class Video:
"""An object containing video information.
Args:
data (dict): Response data from the YouTube Data API. This should be the first element in the :code:`items` list.
Attributes:
id (str): The video ID.
published (datetime.datetime): The date and time and the video was published.
channel (PartialChannel): The YouTube channel to which the video was published.
title (str): The video title.
description (str): The video's description.
tags (list[str]): A list of the video's tags. If there are none, this is an empty list.
category_id (int): The video's category's ID.
live (bool): Whether the video is currently live. This could mean it is a premiere or a live stream.
duration (datetime.timedelta): The video's duration.
is_3d (bool): Whether the video was uploaded in 3D.
is_hd (bool): Whether there is a HD version of the video available (720p or above).
captioned (bool): Whether there are captions available on the video.
is_360 (bool): Whether the video was recorded in 360 degrees.
privacy (str): The video's privacy status. Can be "public", "unlisted", or "private".
license (str): The video's license. Can be either "youtube" or "creativeCommon".
embeddable (bool): Whether the video is embeddable.
for_kids (bool): Whether the video is marked as "Made For Kids".
views (int): The number of views the video has. If this is not available, this will be -1.
likes (int): The number of likes the video has. If ratings are disabled, this will be -1.
dislikes (int): The number of dislikes the video has. If ratings are disabled, this will be -1.
favourites (int): The number of favourites the video has. If this is not available, this will be -1.
comments (int): The number of comments the video has. If comments are disabled, this will be -1.
"""
__slots__ = (
"id",
"published",
"channel",
"title",
"description",
"_thumbnails",
"tags",
"category",
"category_id",
"live",
"duration",
"is_3d",
"is_hd",
"captioned",
"is_360",
"privacy",
"license",
"embeddable",
"for_kids",
"views",
"likes",
"dislikes",
"favourites",
"comments",
)
def __init__(self, data: dict) -> None:
snippet = data["snippet"]
content = data["contentDetails"]
status = data["status"]
stats = data["statistics"]
self._thumbnails = snippet["thumbnails"]
self.id = data["id"]
self.published = dt.datetime.fromisoformat(
snippet["publishedAt"].strip("Z")
)
self.channel = PartialChannel(
snippet["channelId"], snippet["channelTitle"]
)
self.title = snippet["title"]
self.description = snippet["description"]
self.tags = snippet.get("tags", [])
self.category_id = int(snippet["categoryId"])
self.live = snippet["liveBroadcastContent"] == "live"
self.duration = self._parse_duration(content["duration"])
self.is_3d = content["dimension"] == "3d"
self.is_hd = content["definition"] == "hd"
self.captioned = content["caption"] == "true"
self.is_360 = content["projection"] == "360"
self.privacy = status["privacyStatus"]
self.license = status["license"]
self.embeddable = status["embeddable"]
self.for_kids = status["madeForKids"]
self.views = int(stats.get("viewCount", -1))
self.likes = int(stats.get("likeCount", -1))
self.dislikes = int(stats.get("dislikeCount", -1))
self.favourites = int(stats.get("favoriteCount", -1))
self.comments = int(stats.get("commentCount", -1))
def __str__(self) -> str:
return self.id
def __repr__(self) -> str:
return f"<Video id={self.id} title={self.title} views={self.views}>"
def __eq__(self, other) -> bool:
return self.id == other.id
def __ne__(self, other) -> bool:
return self.id != other.id
def _parse_duration(self, duration: str) -> dt.timedelta:
m = DUR_REGEX.match(duration)
secs = 0
for i, g in enumerate(m.groups()):
if g:
secs += int(g[:-1]) * DUR_MUL[i]
return dt.timedelta(seconds=secs)
def get_thumbnail(self) -> Image.Image:
"""Gets the highest resolution thumbnail available.
Returns:
PIL.Image.Image: A Pillow image.
Raises:
ResponseNotOK: The request returned a non-OK status code.
"""
logging.info("Getting thumbnail...")
t = sorted(
self._thumbnails.items(), key=lambda x: x[1]["width"], reverse=True
)[0][1]
logging.info(f"Highest resolution: {t['width']}x{t['height']}")
with requests.get(t["url"]) as r:
if not r.ok:
raise ResponseNotOK(f"{r.status_code}: {r.reason}")
return Image.open(io.BytesIO(r.content))
| 2.625 | 3 |
taw/sshlike.py | mkasa/taw | 3 | 12764224 | #!/usr/bin/env python3
from __future__ import print_function
from __future__ import absolute_import
import os, click
import subprocess
from taw.util import *
from taw.taw import *
# commands/subcommands
# ==============
# SSH COMMAND
# ==============
@taw.command("ssh")
@click.argument('hostname', metavar='<host name>')
@click.argument('sshargs', nargs=-1)
@pass_global_parameters
def ssh_cmd(params, hostname, sshargs):
""" do SSH to a specified host """
ssh_like_call(params, 'ssh', hostname, sshargs)
# ==============
# MOSH COMMAND
# ==============
@taw.command("mosh")
@click.argument('hostname', metavar='<host name>')
@click.argument('moshargs', nargs=-1)
@pass_global_parameters
def mosh_cmd(params, hostname, moshargs):
""" do MOSH to a specified host """
ssh_like_call(params, 'mosh', hostname, moshargs)
# ==============
# RSSH COMMAND
# ==============
@taw.command("rssh")
@click.argument('hostname', metavar='<host name>')
@click.argument('rsshargs', nargs=-1)
@pass_global_parameters
def rssh_cmd(params, hostname, rsshargs):
""" do rSSH to a specified host """
ssh_like_call(params, 'rssh', hostname, rsshargs)
# ==============
# RSYNC COMMAND
# ==============
@taw.command("rsync")
@click.argument('hostname', metavar='<host name>')
@click.argument('rsshargs', nargs=-1)
@pass_global_parameters
def rsync_cmd(params, hostname, rsshargs):
""" do rsync to a specified host """
ssh_like_call(params, 'rsync', hostname, rsshargs)
# ==============
# SCP COMMAND
# ==============
@taw.command("scp")
@click.argument('src', nargs=-1)
@click.argument('dst', nargs=1)
@click.option('-i', 'key_file_path', help='SSH key file')
@click.option('-p', 'preserve_flag', is_flag=True, help='preserve attrs')
@click.option('-B', 'batch_flag', is_flag=True, help='batch mode')
@click.option('-C', 'compression_flag', is_flag=True, help='enable compression')
@click.option('-c', 'cypher', help='cypher type')
@click.option('-l', 'limit_bandwidth', help='bandwidth limit in Kb/s')
@click.option('-P', 'port', default=None, type=int, help='port number')
@click.option('-r', 'recursive_flag', is_flag=True, help='recursive copy')
@click.option('-q', 'quiet_flag', is_flag=True, help='quiet mode')
# TODO: support -v/-vv/-vvv, -o, -F (, -1, -2, -3, -4, -6 at lower priority)
@pass_global_parameters
def scp_cmd(params, src, dst, key_file_path, preserve_flag, batch_flag, compression_flag, cypher, limit_bandwidth, port, recursive_flag, quiet_flag):
""" do scp to/from a specified host """
args = ['scp']
if preserve_flag: args.append('-p')
if batch_flag: args.append('-B')
if compression_flag: args.append('-C')
if cypher: args += ['-c', cypher]
if limit_bandwidth: args += ['-l', limit_bandwidth]
if port: args += ['-P', port]
if recursive_flag: args.append('-r')
if quiet_flag: args.append('-q')
(dest_user, dest_host, dest_path) = decompose_rpath(dst)
copying_local_to_remote = dest_host is not None
if copying_local_to_remote:
instance = convert_host_name_to_instance(dest_host)
if instance.public_ip_address is None: error_exit("The instance has no public IP address")
dest_host = instance.public_ip_address
if dest_user == '_': dest_user = os.environ['USER']
if dest_user is None: dest_user = get_root_like_user_from_instance(instance)
if key_file_path is None: key_file_path = os.path.join(os.path.expanduser("~/.ssh"), instance.key_name + ".pem")
if os.path.exists(key_file_path):
args += ['-i', key_file_path]
else:
print_info("Key file '%s' does not exist.\nThe default keys might be used" % key_file_path)
args += list(src) + ["%s@%s:%s" % (dest_user, dest_host, dest_path)]
else:
# copying remote to local
sources_arr = [decompose_rpath(i) for i in src]
for host in sources_arr[1:]:
if host[1] != sources_arr[0][1]: error_exit("Multiple source hosts are not supported.")
if host[0] != sources_arr[0][0]: error_exit("Multiple source users are not supported.")
instance = convert_host_name_to_instance(sources_arr[0][1])
if instance.public_ip_address is None: error_exit("The instance has no public IP address")
src_host = instance.public_ip_address
src_user = sources_arr[0][0]
if src_user == '_': src_user = os.environ['USER']
if src_user is None: src_user = get_root_like_user_from_instance(instance)
if key_file_path is None: key_file_path = os.path.join(os.path.expanduser("~/.ssh"), instance.key_name + ".pem")
if os.path.exists(key_file_path):
args += ['-i', key_file_path]
else:
print_info("Key file '%s' does not exist.\nThe default keys might be used" % key_file_path)
args += ["%s@%s:%s" % (src_user, src_host, x[2]) for x in sources_arr]
args.append(dst)
if params.aws_dryrun:
print(" ".join(args))
return
try:
subprocess.check_call(args)
except:
pass
| 2.421875 | 2 |
ryu/app/controller_heart_packet_check.py | wwmm1/MC | 3 | 12764225 | <gh_stars>1-10
import schedule
import time
from multiple_Controller import SimpleSwitch13
s = SimpleSwitch13()
s
time.sleep(20)
s.send_heartbeat_packet()
| 1.4375 | 1 |
models/dto/LimitModifier.py | muhammetbolat/pythondataintegrator | 0 | 12764226 | <reponame>muhammetbolat/pythondataintegrator
from infrastructor.multi_processing.ParallelMultiProcessing import ProcessBaseData
class LimitModifier(ProcessBaseData):
def __init__(self,
Id: int = None,
TopLimit: int = None,
SubLimit: int = None,
):
super().__init__(Id)
self.SubLimit = SubLimit
self.TopLimit = TopLimit | 2.359375 | 2 |
test/test_del_group.py | spirit-87/python_training | 0 | 12764227 | # -*- coding: utf-8 -*-
from model.group import Group
import random
import re
def test_delete_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name = "test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.delete_group_by_id(group.id)
new_groups = db.get_group_list()
old_groups.remove(group)
assert sorted(old_groups, key = Group.id_or_max) == sorted(new_groups, key = Group.id_or_max)
# для отключаемой проверки, где сраниваем БД инфу с UI, надо преобразовать список, взятый из БД, - взять только id и name
if check_ui:
new_groups_ui = []
for i in new_groups:
new_groups_ui.append(Group(id=i.id, name=re.sub(" ", " ", i.name.strip())))
assert sorted(new_groups_ui, key = Group.id_or_max) == sorted(app.group.get_group_list(), key = Group.id_or_max)
| 2.421875 | 2 |
ticketrelation/select_ticket.py | CaulyKan/TracTicketRelationPlugin | 0 | 12764228 | import json
from trac.core import *
from trac.web.api import IRequestHandler, ITemplateStreamFilter
from trac.web.chrome import ITemplateProvider, add_script
from trac.ticket.query import Query, QueryModule
from trac.ticket.model import Ticket
from genshi.builder import tag
from genshi.filters import Transformer
from genshi.input import HTML
from genshi.template import MarkupTemplate
from .api import TicketRelationSystem
import pkg_resources
class SelectTicketPlugin(Component):
implements(IRequestHandler, ITemplateProvider, ITemplateStreamFilter)
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/select_tickets'
def process_request(self, req):
args = req.args
qm = QueryModule(self.env)
template, data, _whatever = qm.process_request(req)
return 'select_tickets.html', data, None
#ITemplateProvider methods
def get_htdocs_dirs(self):
return [('ticketrelation', pkg_resources.resource_filename('ticketrelation', 'htdocs'))]
def get_templates_dirs(self):
return [pkg_resources.resource_filename('ticketrelation', 'templates')]
## ITemplateStreamFilter
def filter_stream(self, req, method, filename, stream, data):
if req.path_info == '/select_tickets':
stream |= Transformer('//div[@id="banner"]').remove()
stream |= Transformer('//div[@id="mainnav"]').remove()
stream |= Transformer('//div[@id="ctxtnav"]').remove()
if (filename == "ticket.html" or filename == 'ticket_preview.html') and 'ticket' in data:
ticket = data['ticket']
trs = TicketRelationSystem(self.env)
data = {}
for relation in trs.build_relations().values():
if relation.ticket_type_a == ticket['type']:
stream = self._generate_html(relation, relation.relation_type_a, 'a', stream, ticket, data)
elif relation.ticket_type_b == ticket['type']:
stream = self._generate_html(relation, relation.relation_type_b, 'b', stream, ticket, data)
add_script(req, 'ticketrelation/js/bundle.js')
stream |= Transformer('//body').append(tag.script("""
(function () {
var data = %s;
var app = new Vue({
el: '#properties',
data: {
relation: data,
}
});
})();
""" % json.dumps(data)))
return stream
def _generate_html(self, relation, relation_type, relation_role, stream, ticket, data):
trs = TicketRelationSystem(self.env)
try:
if relation_type == 'one':
if ticket[relation.name + '_' + relation_role] is not None:
stream |= Transformer(
'//input[@id="field-%s_%s"]' % (relation.name, relation_role)) \
.replace(HTML("""
<relation-single id="field-%s_%s" :relation="relation['%s_%s']" />
""" % (relation.name, relation_role, relation.name, relation_role)))
else:
if ticket[relation.name + '_' + relation_role] is not None:
stream |= Transformer(
'//textarea[@id="field-%s_%s"]' % (relation.name, relation_role)) \
.replace(HTML("""
<relation-multi id="field-%s_%s" :relation="relation['%s_%s']" />
""" % (relation.name, relation_role, relation.name, relation_role)))
data[relation.name + '_' + relation_role] = {
'name': relation.name,
'role': relation_role,
'targetType': relation.ticket_type_a if relation_role == 'b' else relation.ticket_type_b,
'value': ticket[relation.name + '_' + relation_role]
}
except Exception as e:
self.log.error(e)
return stream
return stream
| 2.015625 | 2 |
src/models/pytorch/metrics.py | AntonioGUJ/AirwaySegmentation_Keras | 15 | 12764229 | <filename>src/models/pytorch/metrics.py<gh_stars>10-100
from typing import Tuple
import torch.nn as nn
import torch
from models.metrics import MetricBase
_EPS = 1e-7
_SMOOTH = 1.0
LIST_AVAIL_METRICS = ['MeanSquaredError',
'MeanSquaredErrorLogarithmic',
'BinaryCrossEntropy',
'WeightedBinaryCrossEntropy',
'WeightedBinaryCrossEntropyFixedWeights',
'BinaryCrossEntropyFocalLoss',
'DiceCoefficient',
'TruePositiveRate',
'TrueNegativeRate',
'FalsePositiveRate',
'FalseNegativeRate',
]
class Metric(MetricBase, nn.Module):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(Metric, self).__init__(is_mask_exclude)
nn.Module.__init__(self)
def compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
if self._is_mask_exclude:
return self._compute_masked(torch.flatten(target), torch.flatten(input))
else:
return self._compute(torch.flatten(target), torch.flatten(input))
def forward(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return self.compute(target, input)
def loss(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return self.forward(target, input)
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def _compute_masked(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return self._compute(self._get_masked_input(target, target),
self._get_masked_input(input, target))
def _get_mask(self, target: torch.Tensor) -> torch.Tensor:
return torch.where(target == self._value_mask_exclude, torch.zeros_like(target), torch.ones_like(target))
def _get_masked_input(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return torch.where(target == self._value_mask_exclude, torch.zeros_like(input), input)
class MetricWithUncertainty(Metric):
# Composed uncertainty loss
_epsilon_default = 0.01
_num_classes_gt = 2
def __init__(self, metrics_loss: Metric, epsilon: float = _epsilon_default) -> None:
self._metrics_loss = metrics_loss
self._epsilon = epsilon
super(MetricWithUncertainty, self).__init__(self._metrics_loss._is_mask_exclude)
self._name_fun_out = self._metrics_loss._name_fun_out + '_uncertain'
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return (1.0 - self._epsilon) * self._metrics_loss._compute(target, input) \
+ self._epsilon * self._metrics_loss._compute(torch.ones_like(input) / self._num_classes_gt, input)
def _compute_masked(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return (1.0 - self._epsilon) * self._metrics_loss._compute_masked(target, input) \
+ self._epsilon * self._metrics_loss._compute_masked(torch.ones_like(input) / self._num_classes_gt, input)
class CombineTwoMetrics(Metric):
def __init__(self, metrics_1: Metric, metrics_2: Metric, weight_metric2over1: float = 1.0) -> None:
super(CombineTwoMetrics, self).__init__(False)
self._metrics_1 = metrics_1
self._metrics_2 = metrics_2
self._weight_metric2over1 = weight_metric2over1
self._name_fun_out = '_'.join(['combi', metrics_1._name_fun_out, metrics_2._name_fun_out])
def compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return self._metrics_1.compute(target, input) \
+ self._weight_metric2over1 * self._metrics_2.compute(target, input)
def forward(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return self._metrics_1.forward(target, input) \
+ self._weight_metric2over1 * self._metrics_2.forward(target, input)
class MeanSquaredError(Metric):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(MeanSquaredError, self).__init__(is_mask_exclude)
self._name_fun_out = 'mean_squared'
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return torch.mean(torch.square(input - target))
def _compute_masked(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
mask = self._get_mask(target)
return torch.mean(torch.square(input - target) * mask)
class MeanSquaredErrorLogarithmic(Metric):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(MeanSquaredErrorLogarithmic, self).__init__(is_mask_exclude)
self._name_fun_out = 'mean_squared_log'
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return torch.mean(torch.square(torch.log(torch.clip(input, _EPS, None) + 1.0)
- torch.log(torch.clip(target, _EPS, None) + 1.0)))
def _compute_masked(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
mask = self._get_mask(target)
return torch.mean(torch.square(torch.log(torch.clip(input, _EPS, None) + 1.0)
- torch.log(torch.clip(target, _EPS, None) + 1.0)) * mask)
class BinaryCrossEntropy(Metric):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(BinaryCrossEntropy, self).__init__(is_mask_exclude)
self._name_fun_out = 'bin_cross'
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return torch.mean(- target * torch.log(input + _EPS)
- (1.0 - target) * torch.log(1.0 - input + _EPS))
def _compute_masked(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
mask = self._get_mask(target)
return torch.mean((- target * torch.log(input + _EPS)
- (1.0 - target) * torch.log(1.0 - input + _EPS)) * mask)
class WeightedBinaryCrossEntropy(Metric):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(WeightedBinaryCrossEntropy, self).__init__(is_mask_exclude)
self._name_fun_out = 'weight_bin_cross'
def _get_weights(self, target: torch.Tensor) -> Tuple[float, float]:
num_class_1 = torch.count_nonzero(torch.where(target == 1.0, torch.ones_like(target), torch.zeros_like(target)),
dtype=torch.int32)
num_class_0 = torch.count_nonzero(torch.where(target == 0.0, torch.ones_like(target), torch.zeros_like(target)),
dtype=torch.int32)
return (1.0, torch.cast(num_class_0, dtype=torch.float32)
/ (torch.cast(num_class_1, dtype=torch.float32) + torch.variable(_EPS)))
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
weights = self._get_weights(target)
return torch.mean(- weights[1] * target * torch.log(input + _EPS)
- weights[0] * (1.0 - target) * torch.log(1.0 - input + _EPS))
def _compute_masked(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
weights = self._get_weights(target)
mask = self._get_mask(target)
return torch.mean((- weights[1] * target * torch.log(input + _EPS)
- weights[0] * (1.0 - target) * torch.log(1.0 - input + _EPS)) * mask)
class WeightedBinaryCrossEntropyFixedWeights(WeightedBinaryCrossEntropy):
weights_no_mask_exclude = (1.0, 80.0)
weights_mask_exclude = (1.0, 300.0) # for LUVAR data
# weights_mask_exclude = (1.0, 361.0) # for DLCST data
def __init__(self, is_mask_exclude: bool = False) -> None:
if is_mask_exclude:
self._weights = self.weights_mask_exclude
else:
self._weights = self.weights_no_mask_exclude
super(WeightedBinaryCrossEntropyFixedWeights, self).__init__(is_mask_exclude)
self._name_fun_out = 'weight_bin_cross_fixed'
def _get_weights(self, target: torch.Tensor) -> Tuple[float, float]:
return self._weights
class BinaryCrossEntropyFocalLoss(Metric):
# Binary cross entropy + Focal loss
_gamma_default = 2.0
def __init__(self, gamma: float = _gamma_default, is_mask_exclude: bool = False) -> None:
self._gamma = gamma
super(BinaryCrossEntropyFocalLoss, self).__init__(is_mask_exclude)
self._name_fun_out = 'bin_cross_focal_loss'
def get_predprobs_classes(self, target: torch.Tensor, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
prob_1 = torch.where(target == 1.0, input, torch.ones_like(input))
prob_0 = torch.where(target == 0.0, input, torch.zeros_like(input))
return (prob_1, prob_0)
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return torch.mean(- target * torch.pow(1.0 - input, self._gamma) * torch.log(input + _EPS)
- (1.0 - target) * torch.pow(input, self._gamma) * torch.log(1.0 - input + _EPS))
def _compute_masked(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
mask = self._get_mask(target)
return torch.mean((- target * torch.pow(1.0 - input, self._gamma) * torch.log(input + _EPS)
- (1.0 - target) * torch.pow(input, self._gamma) * torch.log(1.0 - input + _EPS)) * mask)
class DiceCoefficient(Metric):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(DiceCoefficient, self).__init__(is_mask_exclude)
self._name_fun_out = 'dice'
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return (2.0 * torch.sum(target * input)) / (torch.sum(target) + torch.sum(input) + _SMOOTH)
def forward(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return 1.0 - self.compute(target, input)
class TruePositiveRate(Metric):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(TruePositiveRate, self).__init__(is_mask_exclude)
self._name_fun_out = 'tp_rate'
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return torch.sum(target * input) / (torch.sum(target) + _SMOOTH)
def forward(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return 1.0 - self.compute(target, input)
class TrueNegativeRate(Metric):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(TrueNegativeRate, self).__init__(is_mask_exclude)
self._name_fun_out = 'tn_rate'
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return torch.sum((1.0 - target) * (1.0 - input)) / (torch.sum((1.0 - target)) + _SMOOTH)
def forward(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return 1.0 - self.compute(target, input)
class FalsePositiveRate(Metric):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(FalsePositiveRate, self).__init__(is_mask_exclude)
self._name_fun_out = 'fp_rate'
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return torch.sum((1.0 - target) * input) / (torch.sum((1.0 - target)) + _SMOOTH)
class FalseNegativeRate(Metric):
def __init__(self, is_mask_exclude: bool = False) -> None:
super(FalseNegativeRate, self).__init__(is_mask_exclude)
self._name_fun_out = 'fn_rate'
def _compute(self, target: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
return torch.sum(target * (1.0 - input)) / (torch.sum(target) + _SMOOTH)
| 2.1875 | 2 |
src/direpack/__init__.py | zedian/direpack | 0 | 12764230 | <reponame>zedian/direpack
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 22 12:17:17 2018
@author: <NAME>, Ponalytics
"""
__name__ = "direpack"
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.0"
__date__ = "2020-20-18"
from .preprocessing.robcent import VersatileScaler, versatile_scale
from .preprocessing.gsspp import GenSpatialSignPrePprocessor, gen_ss_pp, gen_ss_covmat
from .sprm.sprm import sprm
from .sprm.snipls import snipls
from .sprm.rm import rm
from .cross_validation._cv_support_functions import robust_loss
from .ppdire.ppdire import ppdire
from .ppdire.capi import capi
from .dicomo.dicomo import dicomo
from .sudire.sudire import sudire, estimate_structural_dim
from .plot.sudire_plot import sudire_plot
from .plot.ppdire_plot import ppdire_plot
from .plot.sprm_plot import sprm_plot,sprm_plot_cv
from .ipopt_temp.ipopt_wrapper import minimize_ipopt
# from .ipopt_temp.jacobian import *
| 1.195313 | 1 |
Module 3/Chapter 4/ch4_30.py | PacktPublishing/Natural-Language-Processing-Python-and-NLTK | 50 | 12764231 | <reponame>PacktPublishing/Natural-Language-Processing-Python-and-NLTK
import nltk
noun1=[("financial","NN"),("year","NN"),("account","NN"),("summary","NN")]
gram="NP:{<NN>+}"
find = nltk.RegexpParser(gram)
print(find.parse(noun1))
x=find.parse(noun1)
x.draw()
| 3.28125 | 3 |
058-bmp-stegano/taski_zrodla/mapofbits/level0/testlevel0.py | gynvael/stream | 152 | 12764232 | <gh_stars>100-1000
#!/usr/bin/python
import os, sys
from struct import pack, unpack
PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(PATH)
import settings
def rb(d, off):
return d[off]
def rw(d, off):
return unpack("<H", str(d[off:off+2]))[0]
def rd(d, off):
return unpack("<I", str(d[off:off+4]))[0]
with open(PATH + "/level0.bmp", "rb") as f:
d = bytearray(f.read())
data_offset = rd(d, 0x0A) # bfOffBits
i = data_offset
msg_bit = 0
byte = 0
msg = ""
while i < len(d):
byte |= (d[i] & 1) << msg_bit
msg_bit += 1
if msg_bit == 8:
if byte != 0:
msg += chr(byte)
byte = 0
msg_bit = 0
i += 1
if str(settings.MSG) in msg:
print "Level 0: OK"
sys.exit(0)
print "Level 0: FAIL"
sys.exit(1)
| 2.125 | 2 |
algorithm/py3/p136-single-number.py | yuechuanx/LeetCode | 3 | 12764233 | #
# @lc app=leetcode id=136 lang=python3
#
# [136] Single Number
#
# https://leetcode.com/problems/single-number/description/
#
# algorithms
# Easy (60.26%)
# Likes: 3294
# Dislikes: 128
# Total Accepted: 600.9K
# Total Submissions: 961.2K
# Testcase Example: '[2,2,1]'
#
# Given a non-empty array of integers, every element appears twice except for
# one. Find that single one.
#
# Note:
#
# Your algorithm should have a linear runtime complexity. Could you implement
# it without using extra memory?
#
# Example 1:
#
#
# Input: [2,2,1]
# Output: 1
#
#
# Example 2:
#
#
# Input: [4,1,2,1,2]
# Output: 4
#
#
#
# @lc code=start
class Solution:
def singleNumber(self, nums: List[int]) -> int:
res = 0
for num in nums:
res = res ^ num
return res
# @lc code=end
| 3.4375 | 3 |
news/urls.py | m3brown/collab-news | 3 | 12764234 | from django.conf.urls import patterns, url
urlpatterns = patterns('news.views',
url(r'^$', 'news_list', name='list'),
url(r'^feed/(?P<slug>.*)/$',
'news_list', name='feed'),
url(r'^(?P<slug>.*)/$', 'news_item', name='item'),
)
| 1.84375 | 2 |
hic3defdr/plotting/histograms.py | thomasgilgenast/hic3defdr | 0 | 12764235 | import numpy as np
import matplotlib.pyplot as plt
from lib5c.util.plotting import plotter
@plotter
def plot_pvalue_histogram(data, xlabel='pvalue', **kwargs):
"""
Plots a p-value or q-value distribution.
Parameters
----------
data : np.ndarray
The p-values or q-values to plot.
kwargs : kwargs
Typical plotter kwargs.
Returns
-------
pyplot axis
The axis plotted on.
"""
plt.hist(data, bins=np.linspace(0, 1, 21))
plt.ylabel('number of pixels')
| 3.28125 | 3 |
python-backend/app/api/search/namespace/search.py | ActionAnalytics/mds | 0 | 12764236 | from flask_restplus import Namespace
from ..search.resources.search import SearchResource, SearchOptionsResource
from app.api.search.search.resources.simple_search import SimpleSearchResource
api = Namespace('search', description='Search related operations')
api.add_resource(SearchResource, '')
api.add_resource(SearchOptionsResource, '/options')
api.add_resource(SimpleSearchResource, '/simple') | 1.953125 | 2 |
opnsense_cli/tests/factories/test_json_type.py | jan-win1993/opn-cli | 13 | 12764237 | <reponame>jan-win1993/opn-cli<filename>opnsense_cli/tests/factories/test_json_type.py
import unittest
from opnsense_cli.factories.json_type import JsonTypeFactory, JsonObj, JsonArray, JsonObjNested
class TestJsonTypeFactory(unittest.TestCase):
def setUp(self):
self._factory = JsonTypeFactory()
self._should_be_json_obj = [
{
"result": "failed",
"validations": {
"alias.name": "An alias with this name already exists."
}
},
]
self._should_be_json_array = [
{
"use_same_key_for_each_example": [
{
'name': 'os-acme-client', 'version': '2.4', 'comment': "Let's Encrypt client",
'flatsize': '575KiB', 'locked': 'N/A', 'license': 'BSD2CLAUSE',
'repository': 'OPNsense', 'origin': 'opnsense/os-acme-client',
'provided': '1', 'installed': '0', 'path': 'OPNsense/opnsense/os-acme-client', 'configured': '0'
},
],
}
]
self._should_be_json_nested = [
{
'ArchiveOpenVPN': {'name': 'Archive', 'supportedOptions': ['plain_config', 'p12_password']},
'PlainOpenVPN': {'name': 'File Only', 'supportedOptions': ['auth_nocache', 'cryptoapi']},
'TheGreenBow': {'name': 'TheGreenBow', 'supportedOptions': []},
'ViscosityVisz': {'name': 'Viscosity (visz)', 'supportedOptions': ['plain_config', 'random_local_port']}
}
]
def test_JsonTypeFactory_returns_JsonObj(self):
for json_data in self._should_be_json_obj:
json_type_obj = self._factory.get_type_for_data(json_data)
self.assertIsInstance(json_type_obj, JsonObj)
def test_JsonTypeFactory_returns_JsonArray(self):
for json_data in self._should_be_json_array:
json_type_obj = self._factory.get_type_for_data(json_data['use_same_key_for_each_example'])
self.assertIsInstance(json_type_obj, JsonArray)
def test_JsonTypeFactory_returns_JsonObjNested(self):
for json_data in self._should_be_json_nested:
json_type_obj = self._factory.get_type_for_data(json_data)
self.assertIsInstance(json_type_obj, JsonObjNested)
| 2.125 | 2 |
exercises/test_02_01.py | DayMath82/www.warmups.dev | 5 | 12764238 | <gh_stars>1-10
def test():
# if an assertion fails, the message will be displayed
# --> must have the output in a comment
assert "Mean: 5.0" in __solution__, "Did you record the correct program output as a comment?"
# --> must have the output in a comment
assert "Mean: 4.0" in __solution__, "Did you record the correct program output as a comment?"
# --> must have the first function call
assert "mean(numbers_one)" in __solution__, "Did you call the mean function with numbers_one as input?"
# --> must have the second function call
assert "mean(numbers_two)" in __solution__, "Did you call the mean function with numbers_two as input?"
# --> must not have a TODO marker in the solution
assert "TODO" not in __solution__, "Did you remove the TODO marker when finished?"
# display a congratulations for a correct solution
__msg__.good("Well done!")
| 3.375 | 3 |
2_UNIXCommands/Exercise17.py | takeyoshinitta/NLP-100-Exercise | 3 | 12764239 | # 17. Distinct strings in the first column
# Find distinct strings (a set of strings) of the first column of the file. Confirm the result by using cut, sort, and uniq commands.
def removeDuplicates(list):
newList = []
for i in list:
if not(i in newList):
newList.append(i)
return newList
with open('popular-names.txt') as f:
firstColumn = []
lines = f.readlines()
for i in lines:
lineArray = i.split('\t')
firstColumn.append(lineArray[0])
size = len(removeDuplicates(firstColumn))
print(f'There are {size} set of strings')
f.close | 3.578125 | 4 |
_analyze.py | RunzheStat/TROPE | 8 | 12764240 | from _util import *
###########################################################################################################################################################################################################################################################################################################################
class recorder():
def __init__(self):
# length + coverage frequency
self.IS = { "error" : []
, "stds" : []
, "freq" : {"5" : [], "10" : []}
}
self.DR = { "error" : []
, "stds" : []
, "freq" : {"5" : [], "10" : []}
}
self.TR = {"error" : []
, "stds" : []
, "freq" : {"5" : [], "10" : []}}
self.QR = {"error" : []
, "stds" : []
, "freq" : {"5" : [], "10" : []}}
self.raw_Q = []
self.V_true = []
self.seed = 0
self.instances = []
self.names = ["IS", "DR", "TR", "QR"]
def add_env(self, fqi, fqe):
self.fqi_para = fqi
self.fqe_para = fqe
def update(self, V_true, are = None, are_details = None, dis = False, prec = 2):
if are_details is not None:
raw_Qs, IS_V, DR_V, TR_V, QR_V = are_details
else:
raw_Qs, IS_V, DR_V, TR_V, QR_V = are.raw_Qs, are.IS_V, are.DR_V, are.TR_V, are.QR_V
are.large = []
self.seed += 1
############################################################################################################################################
if dis:
printR("true value: {:.2f} ".format(V_true))
printR("raw Q-value: {:.2f}".format(np.mean(raw_Qs)))
pd.set_option('precision', prec)
printR("IS: est = {:.2f}, sigma = {:.2f}".format(IS_V["V"], IS_V["sigma"]))
display(DF(IS_V["CIs"], index = ["0.05", "0.1"]))
printR("DR: est = {:.2f}, sigma = {:.2f}".format(DR_V["V"], DR_V["sigma"]))
display(DF(DR_V["CIs"], index = ["0.05", "0.1"]))
printR("TR: est = {:.2f}, sigma = {:.2f}".format(TR_V["V"], TR_V["sigma"]))
display(DF(TR_V["CIs"], index = ["0.05", "0.1"]))
printR("QR: est = {:.2f}, sigma = {:.2f}".format(QR_V["V"], QR_V["sigma"]))
display(DF(QR_V["CIs"], index = ["0.05", "0.1"]))
############################ Record results ############################
self.raw_Q.append(np.mean(raw_Qs))
self.V_true.append(V_true)
self.IS["error"].append(IS_V["V"] - V_true)
self.IS["stds"].append(IS_V["sigma"])
self.DR["error"].append(DR_V["V"] - V_true)
self.DR["stds"].append(DR_V["sigma"])
self.TR["error"].append(TR_V["V"] - V_true)
self.TR["stds"].append(TR_V["sigma"])
self.QR["error"].append(QR_V["V"] - V_true)
self.QR["stds"].append(QR_V["sigma"])
for i, alpha in enumerate(["5", "10"]):
self.IS["freq"][alpha].append(IS_V["CIs"][i][0] <= V_true and IS_V["CIs"][i][1] >= V_true)
self.DR["freq"][alpha].append(DR_V["CIs"][i][0] <= V_true and DR_V["CIs"][i][1] >= V_true)
self.TR["freq"][alpha].append(TR_V["CIs"][i][0] <= V_true and TR_V["CIs"][i][1] >= V_true)
self.QR["freq"][alpha].append(QR_V["CIs"][i][0] <= V_true and QR_V["CIs"][i][1] >= V_true)
self.instances.append(are)
if dis:
printG("<<================ Iteration {} DONE ! ================>>".format(self.seed))
self.analyze()
def analyze(self, prec = 3, echo = True):
pd.set_option('precision', prec)
mat = [[ np.sqrt(np.mean(arr(estimator["error"]) ** 2))
, np.mean(np.abs(estimator["error"]))
, np.mean(estimator["error"])
, np.mean(estimator["stds"])
#, np.mean(estimator['freq']['1'])
, np.mean(estimator['freq']['5'])
, np.mean(estimator['freq']['10'])] for estimator in [self.IS, self.DR, self.TR, self.QR]]
df = DF(mat
, columns = ["RMSE", "MAE", "bias", "ave_std", "freq: 0.95", "freq: 0.9"] # "freq: 0.99",
, index = self.names)
error_Q = (arr(self.raw_Q) - arr(self.V_true))
RMSE_Q = np.sqrt(np.mean(error_Q ** 2))
MAE_Q = np.mean(np.abs(error_Q))
bias_Q = np.mean(error_Q)
if echo:
display(df)
print("Q: RMSE = {:.2f}, bias = {:.2f}".format(RMSE_Q, bias_Q))
printR("rep = {}".format(self.seed))
return mat
def save(self, path):
freq = arr([[np.mean(estimator['freq'][alpha])
for estimator in [self.IS, self.DR, self.TR, self.QR]
]
for alpha in ["5", "10"]]) # "1",
res = {"DR" : self.DR, "TR" : self.TR, "QR" : self.QR, "IS" : self.IS
, "raw_Q" : self.raw_Q
, "V_true" : self.V_true
, "RMSE" : arr([np.sqrt(np.mean(arr(estimator["error"]) ** 2))
for estimator in [self.IS, self.DR, self.TR, self.QR]])
, "MAE" : arr([
np.mean(np.abs(estimator["error"]))
for estimator in [self.IS, self.DR, self.TR, self.QR]
])
, "std" : arr([np.mean(estimator["stds"])
for estimator in [self.IS, self.DR, self.TR, self.QR]])
, "freq" : freq
, "hyper": self.hyper}
dump(res, path)
def aggregate(self, results, prec = 3):
n_reps = [len(res["DR"]["error"]) for res in results]
total_rep = sum(n_reps)
# n_reps = arr(n_reps)
# n_weight = n_reps / np.sum(n_reps)
pd.set_option('precision', prec)
RMSE = np.sqrt(np.sum([res["RMSE"] ** 2 * n for n, res in zip(n_reps, results)], 0) / total_rep)
bias = arr([np.sum([np.mean(res[est]["error"]) * n for n, res in zip(n_reps, results)], 0) / total_rep for est in self.names
])
# should deal with this line
est_std = arr([np.sum([np.std(res[est]["error"]) * n for n, res in zip(n_reps, results)], 0) / total_rep for est in self.names
])
MAE = np.sum([res["MAE"] * n for n, res in zip(n_reps, results)], 0) / total_rep
std = np.sum([res["std"] * n for n, res in zip(n_reps, results)], 0) / total_rep
freq = np.stack([res["freq"].T * n for n, res in zip(n_reps, results)], axis = 0)
freq = np.sum(freq, axis = 0) / np.sum(n_reps)
res_array = np.hstack([RMSE[:, np.newaxis]
, MAE[:, np.newaxis]
, bias[:, np.newaxis]
, est_std[:, np.newaxis]
, std[:, np.newaxis] # width
, freq])
res = DF(res_array
, columns = ["RMSE", "MAE", "bias", "std", "ave_std", "freq: 0.95", "freq: 0.9"] # "freq: 0.99",
, index = self.names)
display(res)
RMSE_Q = np.sqrt(np.sum([np.mean((arr(res["raw_Q"]) - arr(res["V_true"])) ** 2) * n for n, res in zip(n_reps, results)]) / total_rep)
MAE_Q = np.sum([np.sum(np.abs(arr(res["raw_Q"]) - arr(res["V_true"]))) for n, res in zip(n_reps, results)]) / total_rep
#np.mean(np.abs(arr(self.raw_Q) - arr(self.V_true)))
print("Q: RMSE = {:.2f}, MAE = {:.2f}".format(RMSE_Q, MAE_Q))
printR("rep = {}".format(total_rep))
return res_array
def print_one_seed(self, V_true, are = None, prec = 3):
from IPython.display import display
raw_Qs, DR_V, TR_V, QR_V = are.raw_Qs, are.DR_V, are.TR_V, are.QR_V
printR("true value: {:.2f} ".format(V_true))
printR("raw Q-value: {:.2f}".format(np.mean(raw_Qs)))
printR("raw IS: {:.2f} with std = {:.2f} ".format(are.IS_V["V"], are.IS_V["sigma"]))
pd.set_option('precision', prec)
printR("DR: est = {:.2f}, sigma = {:.2f}".format(DR_V["V"], DR_V["sigma"]))
display(DF(DR_V["CIs"], index = ["0.05", "0.1"]))
printR("TR: est = {:.2f}, sigma = {:.2f}".format(TR_V["V"], TR_V["sigma"]))
display(DF(TR_V["CIs"], index = ["0.05", "0.1"]))
printR("QR: est = {:.2f}, sigma = {:.2f}".format(QR_V["V"], QR_V["sigma"]))
display(DF(QR_V["CIs"], index = ["0.05", "0.1"]))
| 2.390625 | 2 |
madrona/openid/tests/__init__.py | movermeyer/madrona | 9 | 12764241 | <reponame>movermeyer/madrona
import unittest
def suite():
suite = unittest.TestSuite()
for name in ['test_store']:
mod = __import__('%s.%s' % (__name__, name), {}, {}, ['suite'])
suite.addTest(mod.suite())
return suite
| 2.046875 | 2 |
scripts/plot_torus.py | lanl/phoebus | 3 | 12764242 | #PHDF_PATH = '/home/brryan/rpm/phoebus/external/parthenon/scripts/python/'
#PHDF_PATH = '/home/brryan/github/phoebus/external/parthenon/scripts/python/'
#DUMP_NAMES = '/home/brryan/builds/phoebus/torus.out1.*.phdf'
DUMP_NAMES = 'torus.out1.*.phdf'
import argparse
import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import shutil
import os
from subprocess import call, DEVNULL
import glob
#sys.path.append(PHDF_PATH)
#import phdf
from parthenon_tools import phdf
import time
from enum import Enum
#plot = "mks"
plot = "cartesian"
# Outer radius to plot or None
rmax = 40
#rmax = None
parser = argparse.ArgumentParser(description='Plot torus')
parser.add_argument('--nfinal', type=int, default=-1, help='dump to plot')
parser.add_argument('--savefig', type=bool, default=False, help='Whether to save figure')
args = parser.parse_args()
# Whether to plot meshblock boundaries
plot_meshblocks = True
h_ = 0.3
a = 0.9375
rh = 1. + np.sqrt(1. - a*a)
nfinal = args.nfinal
dfnams = np.sort(glob.glob(DUMP_NAMES))
#dfnam = dfnams[nfinal]
dfile = phdf.phdf(dfnams[0])
dfile1 = phdf.phdf(dfnams[nfinal])
nblocks = dfile.NumBlocks
meshblocksize = dfile.MeshBlockSize
nb = nblocks
nx = meshblocksize[0]
ny = meshblocksize[1]
nz = meshblocksize[2]
print("File: ", dfnams[nfinal], end="\n\n")
time = dfile1.Time
print("Time: ", time, end="\n\n")
print("Nblocks: ", nblocks)
print(" nx: %i" % nx + " ny: %i" % ny)
print("")
blockbounds = dfile.BlockBounds
dx = (blockbounds[0][1] - blockbounds[0][0])/nx
dy = (blockbounds[0][3] - blockbounds[0][2])/ny
# Get pcolormesh grid for each block
xblock = np.zeros([nblocks, nx+1, ny+1])
yblock = np.zeros([nblocks, nx+1, ny+1])
for n in range(nblocks):
for i in range(nx+1):
for j in range(ny+1):
dx = (blockbounds[n][1] - blockbounds[n][0])/nx
dy = (blockbounds[n][3] - blockbounds[n][2])/ny
xblock[n,i,j] = blockbounds[n][0] + i*dx
yblock[n,i,j] = blockbounds[n][2] + j*dy
# Convert from FMKS to xy
r = np.exp(xblock)
th = np.pi*yblock + ((1. - h_)/2.)*np.sin(2.*np.pi*yblock)
x = r*np.sin(th)
y = r*np.cos(th)
print("Variables:")
for var in dfile.Variables:
print(" " + var)
print("")
# Numblocks, nz, ny, nx
Pg = dfile1.Get("pressure", flatten=False)
#bfield = dfile.Get("p.bfield", flatten=False)
vcon = dfile.Get("p.velocity", flatten=False)
density = dfile1.Get("p.density", flatten=False)
crho = dfile1.Get("c.density", flatten=False)
ug = dfile1.Get("p.energy", flatten=False)
fd = dfile1.Get("flux_divergence", flatten=False)
st = dfile1.Get("src_terms", flatten=False)
v1 = vcon[:,:,:,:,0]
v2 = vcon[:,:,:,:,1]
v3 = vcon[:,:,:,:,2]
Bcon = dfile1.Get("p.bfield", flatten=False)
flatgcov = dfile1.Get("g.c.gcov", flatten=False)
alpha = dfile1.Get("g.c.alpha", flatten=False)
gcov = np.zeros([nb,nz,ny,nx,4,4])
def flatten(m,n):
ind = [[0,1,3,6],[1,2,4,7],[3,4,5,8],[6,7,8,9]]
return ind[m][n]
for mu in range(4):
gcov[:,:,:,:,mu,0] = flatgcov[:,:,:,:,flatten(mu,0)]
gcov[:,:,:,:,0,mu] = flatgcov[:,:,:,:,flatten(0,mu)]
for mu in range(1,4):
gcov[:,:,:,:,mu,1] = flatgcov[:,:,:,:,flatten(mu,1)]
gcov[:,:,:,:,1,mu] = flatgcov[:,:,:,:,flatten(1,mu)]
for mu in range(2,4):
gcov[:,:,:,:,mu,2] = flatgcov[:,:,:,:,flatten(mu,2)]
gcov[:,:,:,:,2,mu] = flatgcov[:,:,:,:,flatten(2,mu)]
gcov[:,:,:,:,3,3] = flatgcov[:,:,:,:,flatten(3,3)]
Bcov = np.zeros([nb,nz,ny,nx,3])
vcov = np.zeros([nb,nz,ny,nx,3])
for ii in range(3):
for jj in range(3):
Bcov[:,:,:,:,ii] += gcov[:,:,:,:,ii+1,jj+1]*Bcon[:,:,:,:,jj]
vcov[:,:,:,:,ii] += gcov[:,:,:,:,ii+1,jj+1]*vcon[:,:,:,:,jj]
Bsq = np.zeros([nb,nz,ny,nx])
Bdv = np.zeros([nb,nz,ny,nx])
vsq = np.zeros([nb,nz,ny,nx])
Gamma = np.zeros([nb,nz,ny,nx])
for ii in range(3):
Bsq[:,:,:,:] += Bcon[:,:,:,:,ii]*Bcov[:,:,:,:,ii]
Bdv[:,:,:,:] += Bcon[:,:,:,:,ii]*vcov[:,:,:,:,ii]
vsq[:,:,:,:] += vcon[:,:,:,:,ii]*vcov[:,:,:,:,ii]
Gamma[:,:,:,:] = 1./np.sqrt(1 - vsq[:,:,:,:])
b0 = Gamma*Bdv/alpha
bsq = (Bsq + alpha**2*b0**2)/Gamma**2
beta = 2.*Pg/(bsq + 1.e-20)
#b1 = bfield[:,:,:,:,0]
#b2 = bfield[:,:,:,:,1]
#b3 = bfield[:,:,:,:,2]
#b2 = b1*b1 + b2*b2 + b3*b3
#beta = 2*Pg/(b2 + 1.e-100)
#fig = plt.figure()
#ax = plt.gca()
#ax.plot(density[3,0,:,64])
#print(density[3,:,:,64])
#plt.show()
#sys.exit()
var = density
#var = ug
vmin = -5
vmax = 0
#var1 = dfile1.Get("p.density", flatten=False)
#var1 = dfile1.Get("p.energy", flatten=False)
var1 = density
#var = np.fabs(v1)
#vmin=-4
#vmax=0
#var = beta
#vmin = -2
#vmax = 2
mblw = 0.5
def myplot(myvar, n, vmin=vmin, vmax=vmax, uselog=True, cmap='jet',label=None):
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax = axes[n]
#ax = axes
for nb in range(nblocks):
if plot == "mks":
im = ax.pcolormesh(xblock[nb,:,:], yblock[nb,:,:], np.log10(myvar[nb,0].transpose()),
vmin=vmin, vmax=vmax, cmap=cmap)
elif plot == "cartesian":
if uselog:
im = ax.pcolormesh(x[nb,:,:], y[nb,:,:], np.log10(myvar[nb,0].transpose()),
vmin=vmin, vmax=vmax, cmap=cmap)
else:
im = ax.pcolormesh(x[nb,:,:], y[nb,:,:], myvar[nb,0].transpose(),
vmin=vmin, vmax=vmax, cmap=cmap)
if plot_meshblocks:
ax.plot(x[nb,0,:], y[nb,0,:], color='k', linewidth=mblw, linestyle='--')
ax.plot(x[nb,-1,:], y[nb,-1,:], color='k', linewidth=mblw, linestyle='--')
ax.plot(x[nb,:,0], y[nb,:,0], color='k', linewidth=mblw, linestyle='--')
ax.plot(x[nb,:,-1], y[nb,:,-1], color='k', linewidth=mblw, linestyle='--')
if rmax is not None:
ax.set_xlim([0,rmax])
ax.set_ylim([-rmax,rmax])
else:
print("Plotting coordinates \"" + plot + "\" unknown")
sys.exit()
if plot == "cartesian":
ax.set_aspect('equal')
ax.set_xlabel('x')
ax.set_ylabel('y')
# Draw black hole
bh = plt.Circle((0, 0), rh, color='k')
ax.add_patch(bh)
if label is not None:
ax.set_title(label)
if n > 0:
ax.set_yticklabels([])
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
fig, axes = plt.subplots(1, 2, figsize=(8,8))
myplot(var1,0,label='density')
myplot(beta,1,vmin=-3,vmax=3,uselog=True,cmap='RdBu',label='plasma beta')
if args.savefig:
plt.savefig('frame_%08d.png' % args.nfinal, bbox_inches='tight')
else:
plt.show()
| 2.125 | 2 |
jcasts/podcasts/migrations/0098_remove_podcast_podcastindex.py | danjac/jcasts | 13 | 12764243 | # Generated by Django 3.2.9 on 2021-11-18 11:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("podcasts", "0097_rename_schedule_modifier_podcast_frequency_modifier"),
]
operations = [
migrations.RemoveField(
model_name="podcast",
name="podcastindex",
),
]
| 1.367188 | 1 |
madlib5.py | Leorodr501/Mad-Libs | 0 | 12764244 | <reponame>Leorodr501/Mad-Libs
name = input("Enter your name: ")
date = input("Enter a date: ")
adjective = input("Enter an adjective: ")
noun = input("Enter a noun: ")
verb = input("Enter a verb in past tense: ")
adverb = input("Enter an adverb: ")
adjective = input("Enter another adjective: ")
noun = input("Enter another noun: ")
noun = input("Enter another noun: ")
adjective = input("Enter another adjective: ")
verb = input("Enter a verb: ")
adverb = input("Enter another adverb: ")
verb = input("Enter another verb in past tense: ")
adjective = input("Enter another adjective: ")
print("Name: " + name + " Date: " + date )
print("Today I went to the zoo. I saw a " + adjective + " " + noun + " jumping up and down in its tree.")
print("He " + verb + " " + adverb + " through the large tunnel that led to its " + adjective + " " + noun + ".")
print("I got some peanuts and passed them through the cage to a gigantic gray " + noun + " towering above my head.")
print("Feeding the animals made me hungry. I went to get a " + adjective + " scoop of ice cream. It filled my stomach.")
print("Afterwards I had to " + verb + " " + adverb + " to catch our bus.")
print("When I got home I " + verb + " my mom for a " + adjective + " day at the zoo.")
| 3.484375 | 3 |
BOJ2178.py | INYEONGKIM/BOJ | 2 | 12764245 | <filename>BOJ2178.py
input=__import__('sys').stdin.readline
n,m=map(int,input().split());g=[list(input()) for _ in range(n)];c=[[0]*m for _ in range(n)]
q=__import__('collections').deque();q.append((0,0));c[0][0]=1
while q:
x,y=q.popleft()
for nx,ny in (x+1,y),(x-1,y),(x,y+1),(x,y-1):
if 0<=nx<n and 0<=ny<m and g[nx][ny]=='1' and c[nx][ny]==0:
c[nx][ny]=c[x][y]+1
q.append((nx,ny))
print(c[n-1][m-1])
| 2.515625 | 3 |
nicehash.py | wrigjl/nice-scripts | 0 | 12764246 |
import json
import hmac
import hashlib
import requests
import datetime
import uuid
from requests.auth import AuthBase
class NiceHashAuth(AuthBase):
def __init__(self, fname = None, api_secret = None, api_key = None, org_id = None):
if fname is not None:
with open(fname) as f:
keys = json.load(f)
self.api_secret = keys['api_secret']
self.api_key = keys['api_key']
self.org_id = keys['org_id']
if api_secret is not None:
self.api_secret = api_secret
if api_key is not None:
self.api_key = api_key
if org_id is not None:
self.org_id = org_id
assert self.api_secret is not None
assert self.api_key is not None
assert self.org_id is not None
def make_timestamp(self):
# current UTC time in ms as a integer expressed as a string
return str(round(datetime.datetime.now(tz=datetime.timezone.utc).timestamp() * 1000.0))
def make_nonce(self):
# random long string
return str(uuid.uuid4())
def __call__(self, request):
timestamp = self.make_timestamp()
nonce = self.make_nonce()
empty = bytearray('\x00', 'utf-8')
comps = request.path_url.split('?', 1)
url = comps[0]
query = '' if len(comps) == 1 else comps[1]
body = bytearray(self.api_key, 'utf-8') + empty
body += bytearray(timestamp, 'utf-8') + empty
body += bytearray(nonce, 'utf-8') + empty + empty
body += bytearray(self.org_id, 'utf-8') + empty + empty
body += bytearray(request.method, 'utf-8') + empty
body += bytearray(url, 'utf-8') + empty + bytearray(query, 'utf-8')
if request.body:
body += empty + request.body
digest = hmac.new(bytearray(self.api_secret, 'utf-8'), body, hashlib.sha256).hexdigest()
request.headers.update({
'X-Time': timestamp,
'X-Nonce': nonce,
'X-Organization-Id': self.org_id,
'X-Auth': f'{self.api_key}:{digest}',
})
return request
def checkNHTime():
'''Check that our clock is within 5 minutes of the nicehash clock (no point in continuing
it is isn't).'''
nhtime = float(requests.get('https://api2.nicehash.com/api/v2/time').json()['serverTime'])
nhtime /= 1000.0
nhtime = datetime.datetime.fromtimestamp(nhtime, tz=datetime.timezone.utc)
mytime = datetime.datetime.now(tz=datetime.timezone.utc)
delta = nhtime - mytime
assert delta <= datetime.timedelta(minutes=5) and delta >= datetime.timedelta(minutes=-5), \
f"timedelta is too great: {delta}"
| 2.765625 | 3 |
binary_search.py | VishwaasHegde/E2ERaga | 0 | 12764247 | import numpy as np
# Python3 program to find element
# closet to given target.
# Returns element closest to target in arr[]
def findClosest(arr, n, target):
# Corner cases
if (target <= arr[0][0]):
return 0
if (target >= arr[n - 1][0]):
return n - 1
# Doing binary search
i = 0
j = n
mid = 0
while (i < j):
mid = (i + j) // 2
if (arr[mid][0] == target):
return mid
# If target is less than array
# element, then search in left
if (target < arr[mid][0]):
# If target is greater than previous
# to mid, return closest of two
if (mid > 0 and target > arr[mid - 1][0]):
return getClosest(arr, mid - 1, mid, target)
# Repeat for left half
j = mid
# If target is greater than mid
else:
if (mid < n - 1 and target < arr[mid + 1][0]):
return getClosest(arr, mid, mid + 1, target)
# update i
i = mid + 1
# Only single element left after search
return mid
# Method to compare which one is the more close.
# We find the closest by taking the difference
# between the target and both values. It assumes
# that val2 is greater than val1 and target lies
# between these two.
def getClosest(arr, ind1, ind2, target):
val1 = arr[ind1][0]
val2 = arr[ind2][0]
if (target - val1 >= val2 - target):
return ind2
else:
return ind1
def get_bound(arr, N, s,e):
f1 = get_bound_util(arr, N, s, True)
f2 = get_bound_util(arr, N, e, False)
return f1,f2
def get_bound_util(arr, N, X, is_start):
if is_start:
idx = findClosest(arr, N, X)
# idx = 0
if idx==0:
return np.zeros(60)
else:
return arr[idx-1][1:]
else:
idx = findClosest(arr, arr.shape[0], X)
# idx = N-1
return arr[idx][1:]
if __name__ == '__main__':
gb = get_bound([[4], [5], [10], [12], [18], [20]], 6, 20, True)
print(gb)
| 3.71875 | 4 |
py_algo/number_theory/competition/monk_and_fredo.py | Sk0uF/Algorithms | 1 | 12764248 | """
Codemonk link: https://www.hackerearth.com/problem/algorithm/monk-and-fredo-cm-number-theory-97942213/
Given two weights of a and b units, in how many different ways you can achieve a weight of d units using only the given
weights? Any of the given weights can be used any number of times (including 0 number of times).
Input - Output:
The first line of input consists of an integer T denoting the number of test cases.
Each test case consists of only one line containing three space separated integers a, b and d.
For each test case, print the answer in a separate line.
Sample input:
4
2 3 7
4 10 6
6 14 0
2 3 6
Sample Output:
1
0
1
2
"""
"""
This is considered to be a common problem in number theory. However, even if the solution might seem simple at first,
the full Mathematical concept and all the proofs need study to be fully understood. Here, we will directly use the tools
without the proofs. The question can be translated into the following: Find all the possible x and y pairs for which
a*x + b*y = d (1). This is a linear diophantine equation and has solutions if and only if d % GCD(a, b) = 0. With simple
words, it would how many a's and b's can we add together to get d? Is there only one way to do it or more? If a = b = 0
and d!=0 then there are no solutions. If a=b=c=0 then there are infinite solutions. To solve this equation we use the
extended Euclidean algorithm to solve a*x' + b*'y = GCD(a, b). Then, an initial integer solution for (1) is going to be
x0 = x`*d/GCD(a, b) and y0 = y'*d/GCD(a,b). From this initial solution we can generate all the other solutions which
will be x = x0 + k*b/GCD(a, b) and y = y0 - k*a/GCD(a, b). We now need to find k so that we only have non negative
solutions.
0 <= x0 + k*b/GCD(a, b) => -x0 <= k*b/GCD(a, b) => -x'*d/GCD(a, b) <= k*b/GCD(a, b) => k >= -x'*d/b (2)
0 <= y0 - k*a/GCD(a, b) => y0 >= k*a/GCD(a, b) => y'*d/GCD(a, b) >= k*a/GCD(a, b) => k <= y'*d/a (3)
From (2) and (3) we have: -x'*d/b <= k <= y'*d/a
So, the final answer will be: number_of_solutions = floor(y'*d/a) - ceil(-x'*d/b) + 1. Why floor, ceil and + 1 you may
ask? Since k must be integer (don't get confused with the integer solution) it means that if -x'*d/b is a float number,
for example 2.2, then k >= 2.2 and the first integer that's bigger than 2.2 is 3. We follow the exact same logic to
understand the ceil. Finally, we add +1 because we already find an initial solution, x0, y0 that we didn't count.
O(log(min(A, B)) to find x', y' and GCD(a, b).
Final complexity: O(log(min(A, B))
"""
def extended_euclidean(a, b):
if b == 0:
return a, 1, 0
gcd, x1, y1 = extended_euclidean(b, a % b)
x = y1
y = x1 - (a // b) * y1
return gcd, x, y
from sys import stdin, stdout
from math import ceil, floor
t = int(stdin.readline())
for _ in range(t):
a, b, d = map(int, stdin.readline().split())
gcd, x, y = extended_euclidean(a, b)
if d % gcd != 0:
stdout.write(str(0) + "\n")
continue
first = ceil((-x)*d/b)
second = floor(y*d/a)
if first <= second:
stdout.write(str(second - first + 1) + "\n")
else:
stdout.write(str(0) + "\n")
| 3.984375 | 4 |
analysis/analysis.py | kernsuite-debian/integration | 0 | 12764249 | <reponame>kernsuite-debian/integration
#!/usr/bin/env python
# Copyright 2015 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import manage
def statistics(queue, table, samples):
confs = list()
dms_range = manage.get_dm_range(queue, table, samples)
for dm in dms_range:
queue.execute("SELECT MIN(GFLOPs),AVG(GFLOPs),MAX(GFLOPs),STDDEV_POP(GFLOPs),MIN(GBs),AVG(GBs),MAX(GBs),STDDEV_POP(GBs) FROM " + table + " WHERE (DMs = " + str(dm[0]) + " AND samples = " + samples + ")")
line = queue.fetchall()
confs.append([dm[0], line[0][0], line[0][2], line[0][1], line[0][3], (line[0][2] - line[0][1]) / line[0][3], line[0][4], line[0][5], line[0][6], line[0][7], (line[0][6] - line[0][5]) / line[0][7]])
return confs
def histogram(queue, table, samples):
hists = list()
dms_range = manage.get_dm_range(queue, table, samples)
for dm in dms_range:
queue.execute("SELECT MAX(GBs) FROM " + table + " WHERE (DMs = " + str(dm[0]) + " AND samples = " + samples + ")")
maximum = int(queue.fetchall()[0][0])
hist = [0 for i in range(0, maximum + 1)]
queue.execute("SELECT GBs FROM " + table + " WHERE (DMs = " + str(dm[0]) + " AND samples = " + samples + ")")
flops = queue.fetchall()
for flop in flops:
hist[int(flop[0])] = hist[int(flop[0])] + 1
hists.append(hist)
return hists
def optimization_space(queue, table, samples):
confs = list()
dms_range = manage.get_dm_range(queue, table, samples)
for dm in dms_range:
queue.execute("SELECT nrThreadsD0,nrThreadsD1,nrItemsD0,nrItemsD1,GBS FROM " + table + " WHERE (DMs = " + str(dm[0]) + " AND samples = " + samples + ")")
best = queue.fetchall()
confs.append([best[0][0], best[0][1], best[0][2], best[0][3], best[0][4], best[0][5]])
return confs
def single_parameter_space(queue, table, parameter, samples):
confs = list()
scenario = "(samples = " + samples + ")"
dms_range = manage.get_dm_range(queue, table, samples)
for dm in dms_range:
internalConf = list()
queue.execute("SELECT DISTINCT " + parameter + " FROM " + table + " WHERE DMs = " + str(dm[0]) + " AND " + scenario + " ORDER BY " + parameter)
values = queue.fetchall()
for value in values:
queue.execute("SELECT MAX(GFLOPs) FROM " + table + " WHERE " + parameter + " = " + str(value[0]) + " AND DMs = " + str(dm[0]) + " AND " + scenario)
best = queue.fetchall()
internalConf.append([value[0], best[0][0]])
confs.append(internalConf)
return confs
| 2.03125 | 2 |
layers/water_connection.py | JinIgarashi/postgis2qfield | 2 | 12764250 | <reponame>JinIgarashi/postgis2qfield
from layers.layer_base import LayerBase
class WaterConnection(LayerBase):
def __init__(self):
super().__init__("water_connection")
self.parse_dates = ['input_date', 'meter_installation_date', 'connection_date', 'disconnection_date']
| 2.421875 | 2 |