hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1c41ef577f4a05e6ab921f39322d64330728ff4
| 1,688
|
py
|
Python
|
start.py
|
AndrewJanuary/PM-Monitor
|
43548c805d2ee11ac54f8df874cc06be458454a8
|
[
"MIT"
] | null | null | null |
start.py
|
AndrewJanuary/PM-Monitor
|
43548c805d2ee11ac54f8df874cc06be458454a8
|
[
"MIT"
] | 6
|
2021-01-28T22:04:45.000Z
|
2021-12-20T20:59:03.000Z
|
start.py
|
AndrewJanuary/PM-Monitor
|
43548c805d2ee11ac54f8df874cc06be458454a8
|
[
"MIT"
] | null | null | null |
from app.sensor import Sensor
from app.uploader import Uploader
from app.offline import Offline
import time, logging, argparse, sys, random, datetime
logging.basicConfig(filename='airquality.log', level=logging.DEBUG, filemode='a',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sen = Sensor('PM Sensor 1', '/dev/ttyUSB0', b'\xaa', b'0xAB', b'\xc0')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-o','--offline', action="store_true")
args = parser.parse_args(sys.argv[2:])
if args.offline:
start_offline()
else:
try:
start_online()
except:
start_offline()
def start_offline():
print("Starting Air Monitor in offline mode")
off = Offline('pm25', 'pm10')
while True:
data = sen.read_from_sensor()
sen.check_message(data)
pm_two_five = sen.get_pm_two_five(data)
pm_ten = sen.get_pm_ten(data)
off.write_pm_two_five(pm_two_five)
off.write_pm_ten(pm_ten)
def start_online():
print("Starting Air Monitor")
file = 'config.yml'
up = Uploader('AIO')
username, key, feed_two_five, feed_ten = up.read_config(file)
aio = up.connect_to_aio(username, key)
while True:
up.get_feeds(aio)
data = sen.read_from_sensor()
sen.check_message(data)
pm_two_five = sen.get_pm_two_five(data)
pm_ten = sen.get_pm_ten(data)
up.send_to_aio(aio, feed_two_five, pm_two_five)
up.send_to_aio(aio, feed_ten, pm_ten)
print(up.retrieve_from_feed(aio, feed_two_five))
print(up.retrieve_from_feed(aio, feed_ten))
time.sleep(60)
| 30.690909
| 82
| 0.64455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.140403
|
e1c4600d073fba00b0a31f0113ee9536694f12a6
| 3,364
|
py
|
Python
|
py_trees_ros/visitors.py
|
geoc1234/py_trees_ros
|
65a055624f9261d67f0168ef419aa650302f96d0
|
[
"BSD-3-Clause"
] | 65
|
2019-05-01T08:21:42.000Z
|
2022-03-23T15:49:55.000Z
|
py_trees_ros/visitors.py
|
geoc1234/py_trees_ros
|
65a055624f9261d67f0168ef419aa650302f96d0
|
[
"BSD-3-Clause"
] | 62
|
2019-02-27T14:27:42.000Z
|
2022-02-08T03:54:30.000Z
|
py_trees_ros/visitors.py
|
geoc1234/py_trees_ros
|
65a055624f9261d67f0168ef419aa650302f96d0
|
[
"BSD-3-Clause"
] | 23
|
2019-03-03T17:09:59.000Z
|
2022-01-06T03:07:59.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# License: BSD
# https://raw.githubusercontent.com/splintered-reality/py_trees_ros/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
ROS Visitors are entities that can be passed to a ROS tree implementation
(e.g. :class:`~py_trees_ros.trees.BehaviourTree`) and used to either visit
each and every behaviour in the tree, or visit behaviours as the tree is
traversed in an executing tick. At each behaviour, the visitor
runs its own method on the behaviour to do as it wishes - logging, introspecting).
.. warning:: Visitors should not modify the behaviours they visit.
.. seealso:: The base interface and core visitors in :mod:`py_trees.visitors`
"""
##############################################################################
# Imports
##############################################################################
import py_trees.visitors
import py_trees_ros_interfaces.msg as py_trees_msgs
import rclpy
import time
from . import conversions
##############################################################################
# Visitors
##############################################################################
class SetupLogger(py_trees.visitors.VisitorBase):
"""
Use as a visitor to :meth:`py_trees_ros.trees.TreeManager.setup`
to log the name and timings of each behaviours' setup
to the ROS debug channel.
Args:
node: an rclpy node that will provide debug logger
"""
def __init__(self, node: rclpy.node.Node):
super().__init__(full=True)
self.node = node
def initialise(self):
"""
Initialise the timestamping chain.
"""
self.start_time = time.monotonic()
self.last_time = self.start_time
def run(self, behaviour):
current_time = time.monotonic()
self.node.get_logger().debug(
"'{}'.setup: {:.4f}s".format(behaviour.name, current_time - self.last_time)
)
self.last_time = current_time
def finalise(self):
current_time = time.monotonic()
self.node.get_logger().debug(
"Total tree setup time: {:.4f}s".format(current_time - self.start_time)
)
class TreeToMsgVisitor(py_trees.visitors.VisitorBase):
"""
Visits the entire tree and gathers all behaviours as
messages for the tree logging publishers.
Attributes:
tree (:class:`py_trees_msgs.msg.BehaviourTree`): tree representation in message form
"""
def __init__(self):
"""
Well
"""
super(TreeToMsgVisitor, self).__init__()
self.full = True # examine all nodes
def initialise(self):
"""
Initialise and stamp a :class:`py_trees_msgs.msg.BehaviourTree`
instance.
"""
self.tree = py_trees_msgs.BehaviourTree()
# TODO: crystal api
# self.tree.stamp = rclpy.clock.Clock.now().to_msg()
def run(self, behaviour):
"""
Convert the behaviour into a message and append to the tree.
Args:
behaviour (:class:`~py_trees.behaviour.Behaviour`): behaviour to convert
"""
self.tree.behaviours.append(conversions.behaviour_to_msg(behaviour))
| 31.735849
| 92
| 0.568668
| 2,040
| 0.606421
| 0
| 0
| 0
| 0
| 0
| 0
| 2,148
| 0.638526
|
e1c8c4baec324f5e5f8e13e03541f29a1a32842d
| 11,394
|
py
|
Python
|
Jarvis/features/Friday_Blueprint.py
|
faizeraza/Jarvis-Virtual-Assistant-
|
da88fc0124e6020aff1030317dc3dc918f7aa017
|
[
"MIT"
] | 1
|
2021-12-14T00:18:10.000Z
|
2021-12-14T00:18:10.000Z
|
Jarvis/features/Friday_Blueprint.py
|
faizeraza/Jarvis-Virtual-Assistant-
|
da88fc0124e6020aff1030317dc3dc918f7aa017
|
[
"MIT"
] | null | null | null |
Jarvis/features/Friday_Blueprint.py
|
faizeraza/Jarvis-Virtual-Assistant-
|
da88fc0124e6020aff1030317dc3dc918f7aa017
|
[
"MIT"
] | 1
|
2021-12-29T05:01:02.000Z
|
2021-12-29T05:01:02.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Friday_Blueprint.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(420, 650)
MainWindow.setSizeIncrement(QtCore.QSize(0, 0))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(0, 0, 421, 651))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/see.jpg"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 71, 651))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_5.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.pushButton_9 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_9.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/user.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_9.setIcon(icon)
self.pushButton_9.setIconSize(QtCore.QSize(30, 30))
self.pushButton_9.setAutoDefault(True)
self.pushButton_9.setDefault(True)
self.pushButton_9.setFlat(True)
self.pushButton_9.setObjectName("pushButton_9")
self.verticalLayout_5.addWidget(self.pushButton_9)
self.pushButton_10 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_10.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/data.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_10.setIcon(icon1)
self.pushButton_10.setIconSize(QtCore.QSize(30, 30))
self.pushButton_10.setAutoDefault(True)
self.pushButton_10.setDefault(True)
self.pushButton_10.setFlat(True)
self.pushButton_10.setObjectName("pushButton_10")
self.verticalLayout_5.addWidget(self.pushButton_10)
self.pushButton_11 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_11.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/bot.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_11.setIcon(icon2)
self.pushButton_11.setIconSize(QtCore.QSize(49, 30))
self.pushButton_11.setDefault(True)
self.pushButton_11.setFlat(True)
self.pushButton_11.setObjectName("pushButton_11")
self.verticalLayout_5.addWidget(self.pushButton_11)
self.pushButton_12 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_12.setMinimumSize(QtCore.QSize(69, 0))
self.pushButton_12.setMaximumSize(QtCore.QSize(75, 16777215))
self.pushButton_12.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/settings.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_12.setIcon(icon3)
self.pushButton_12.setIconSize(QtCore.QSize(30, 30))
self.pushButton_12.setAutoDefault(True)
self.pushButton_12.setDefault(True)
self.pushButton_12.setFlat(True)
self.pushButton_12.setObjectName("pushButton_12")
self.verticalLayout_5.addWidget(self.pushButton_12)
spacerItem = QtWidgets.QSpacerItem(20, 151, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_5.addItem(spacerItem)
spacerItem1 = QtWidgets.QSpacerItem(20, 69, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_5.addItem(spacerItem1)
spacerItem2 = QtWidgets.QSpacerItem(13, 253, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_5.addItem(spacerItem2)
self.pushButton_13 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_13.setText("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/feedback.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_13.setIcon(icon4)
self.pushButton_13.setIconSize(QtCore.QSize(40, 40))
self.pushButton_13.setDefault(True)
self.pushButton_13.setFlat(True)
self.pushButton_13.setObjectName("pushButton_13")
self.verticalLayout_5.addWidget(self.pushButton_13)
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(70, 600, 351, 51))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.pushButton_14 = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.pushButton_14.setText("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/lens.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_14.setIcon(icon5)
self.pushButton_14.setIconSize(QtCore.QSize(40, 40))
self.pushButton_14.setAutoDefault(True)
self.pushButton_14.setDefault(True)
self.pushButton_14.setFlat(True)
self.pushButton_14.setObjectName("pushButton_14")
self.horizontalLayout_4.addWidget(self.pushButton_14)
spacerItem3 = QtWidgets.QSpacerItem(65, 15, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem3)
self.label_2 = QtWidgets.QLabel(self.horizontalLayoutWidget)
#Self.label_2.setPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/Speak.gif"))
self.label_2.setText("waiting")
self.label_2.setScaledContents(True)
self.label_2.setObjectName("label_2")
self.horizontalLayout_4.addWidget(self.label_2)
spacerItem4 = QtWidgets.QSpacerItem(68, 15, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.pushButton_15 = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.pushButton_15.setText("")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/mic.gif"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_15.setIcon(icon6)
self.pushButton_15.setIconSize(QtCore.QSize(40, 40))
self.pushButton_15.setAutoDefault(True)
self.pushButton_15.setDefault(True)
self.pushButton_15.setFlat(True)
self.pushButton_15.setObjectName("pushButton_15")
self.horizontalLayout_4.addWidget(self.pushButton_15)
spacerItem5 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem5)
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(70, 560, 351, 41))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_5.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.textEdit_2 = QtWidgets.QTextEdit(self.horizontalLayoutWidget_2)
self.textEdit_2.setObjectName("textEdit_2")
self.horizontalLayout_5.addWidget(self.textEdit_2)
self.pushButton_16 = QtWidgets.QPushButton(self.horizontalLayoutWidget_2)
self.pushButton_16.setText("")
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/send.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_16.setIcon(icon7)
self.pushButton_16.setIconSize(QtCore.QSize(40, 40))
self.pushButton_16.setCheckable(False)
self.pushButton_16.setAutoRepeatDelay(300)
self.pushButton_16.setAutoDefault(True)
self.pushButton_16.setDefault(True)
self.pushButton_16.setFlat(True)
self.pushButton_16.setObjectName("pushButton_16")
self.horizontalLayout_5.addWidget(self.pushButton_16)
spacerItem6 = QtWidgets.QSpacerItem(10, 10, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem6)
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(70, 0, 351, 561))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.textEdit = QtWidgets.QTextEdit(self.verticalLayoutWidget_2)
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(420, 0, 961, 741))
self.label_3.setText("")
self.label_3.setScaledContents(True)
self.label_3.setObjectName("label_3")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(0, 650, 421, 91))
self.label_5.setText("")
self.label_5.setPixmap(QtGui.QPixmap("D:/jarvis/Jarvis/utils/images/Recognizer.gif"))
self.label_5.setScaledContents(True)
self.label_5.setObjectName("label_5")
MainWindow.setCentralWidget(self.centralwidget)
self.movie = QtGui.QMovie("D:/jarvis/Jarvis/utils/images/AIassistant.gif")
self.label_3.setMovie(self.movie)
self.movie1 = QtGui.QMovie("D:/jarvis/Jarvis/utils/images/Recognizer.gif")
self.label_5.setMovie(self.movie1)
self.startAnimation()
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def startAnimation(self):
self.movie.start()
self.movie1.start()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "JARVIS"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 49.755459
| 121
| 0.722924
| 10,821
| 0.94971
| 0
| 0
| 0
| 0
| 0
| 0
| 1,307
| 0.114709
|
e1c8d5b0e59bc3cff42a51e6c70986bae9cb73c9
| 3,201
|
py
|
Python
|
pints/toy/_logistic_model.py
|
iamleeg/pints
|
bd1c11472ff3ec0990f3d55f0b2f20d92397926d
|
[
"BSD-3-Clause"
] | null | null | null |
pints/toy/_logistic_model.py
|
iamleeg/pints
|
bd1c11472ff3ec0990f3d55f0b2f20d92397926d
|
[
"BSD-3-Clause"
] | null | null | null |
pints/toy/_logistic_model.py
|
iamleeg/pints
|
bd1c11472ff3ec0990f3d55f0b2f20d92397926d
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Logistic toy model.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import numpy as np
import pints
from . import ToyModel
class LogisticModel(pints.ForwardModelS1, ToyModel):
"""
Logistic model of population growth [1].
.. math::
f(t) &= \\frac{k}{1+(k/p_0 - 1)*\exp(-r t)} \\\\
\\frac{\\partial f(t)}{\\partial r} &=
\\frac{k t (k / p_0 - 1) \exp(-r t)}
{((k/p_0-1) \exp(-r t) + 1)^2} \\\\
\\frac{\\partial f(t)}{ \\partial k} &= \\frac{k \exp(-r t)}
{p_0 ((k/p_0-1)\exp(-r t) + 1)^2}
+ \\frac{1}{(k/p_0 - 1)\exp(-r t) + 1}
Has two parameters: A growth rate :math:`r` and a carrying capacity
:math:`k`. The initial population size :math:`f(0) = p_0` can be set using
the (optional) named constructor arg ``initial_population_size``
[1] https://en.wikipedia.org/wiki/Population_growth
*Extends:* :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`.
"""
def __init__(self, initial_population_size=2):
super(LogisticModel, self).__init__()
self._p0 = float(initial_population_size)
if self._p0 < 0:
raise ValueError('Population size cannot be negative.')
def n_parameters(self):
""" See :meth:`pints.ForwardModel.n_parameters()`. """
return 2
def simulate(self, parameters, times):
""" See :meth:`pints.ForwardModel.simulate()`. """
return self._simulate(parameters, times, False)
def simulateS1(self, parameters, times):
""" See :meth:`pints.ForwardModelS1.simulateS1()`. """
return self._simulate(parameters, times, True)
def _simulate(self, parameters, times, sensitivities):
r, k = [float(x) for x in parameters]
times = np.asarray(times)
if np.any(times < 0):
raise ValueError('Negative times are not allowed.')
if self._p0 == 0 or k < 0:
if sensitivities:
return np.zeros(times.shape), \
np.zeros((len(times), len(parameters)))
else:
return np.zeros(times.shape)
exp = np.exp(-r * times)
c = (k / self._p0 - 1)
values = k / (1 + c * exp)
if sensitivities:
dvalues_dp = np.empty((len(times), len(parameters)))
dvalues_dp[:, 0] = k * times * c * exp / (c * exp + 1)**2
dvalues_dp[:, 1] = -k * exp / \
(self._p0 * (c * exp + 1)**2) + 1 / (c * exp + 1)
return values, dvalues_dp
else:
return values
def suggested_parameters(self):
""" See :meth:`pints.toy.ToyModel.suggested_parameters()`. """
return np.array([0.1, 50])
def suggested_times(self):
""" See :meth:`pints.toy.ToyModel.suggested_times()`. """
return np.linspace(0, 100, 100)
| 34.419355
| 79
| 0.555764
| 2,830
| 0.884099
| 0
| 0
| 0
| 0
| 0
| 0
| 1,441
| 0.450172
|
e1ca3f7ea92eaa76db3dc052ef98666164a81b5e
| 414
|
py
|
Python
|
poo/pybank/bank.py
|
fredsonchaves07/python-fundamentals
|
4aee479c48f86319a2041e35ea985f971393c2ce
|
[
"MIT"
] | null | null | null |
poo/pybank/bank.py
|
fredsonchaves07/python-fundamentals
|
4aee479c48f86319a2041e35ea985f971393c2ce
|
[
"MIT"
] | null | null | null |
poo/pybank/bank.py
|
fredsonchaves07/python-fundamentals
|
4aee479c48f86319a2041e35ea985f971393c2ce
|
[
"MIT"
] | null | null | null |
class Bank:
def __init__(self):
self.__agencies = [1111, 2222, 3333]
self.__costumers = []
self.__accounts = []
def insert_costumers(self, costumer):
self.__costumers.append(costumer)
def insert_accounts(self, account):
self.__accounts.append(account)
def authenticate(self, costumer):
if costumer not in self.__costumers:
return None
| 25.875
| 44
| 0.63285
| 413
| 0.997585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e1cc98556e1e617de9737efeaed139473d56ebaf
| 426
|
py
|
Python
|
homeworks/vecutil.py
|
JediKoder/coursera-CodeMatrix
|
1ac461d22ebaf2777eabdcf31d76d709c33f472a
|
[
"MIT"
] | 3
|
2018-01-11T07:48:06.000Z
|
2020-04-27T20:49:02.000Z
|
homeworks/vecutil.py
|
JediKoder/coursera-CodeMatrix
|
1ac461d22ebaf2777eabdcf31d76d709c33f472a
|
[
"MIT"
] | null | null | null |
homeworks/vecutil.py
|
JediKoder/coursera-CodeMatrix
|
1ac461d22ebaf2777eabdcf31d76d709c33f472a
|
[
"MIT"
] | 1
|
2021-01-26T07:25:48.000Z
|
2021-01-26T07:25:48.000Z
|
# Copyright 2013 Philip N. Klein
from vec import Vec
def list2vec(L):
"""Given a list L of field elements, return a Vec with domain {0...len(L)-1}
whose entry i is L[i]
>>> list2vec([10, 20, 30])
Vec({0, 1, 2},{0: 10, 1: 20, 2: 30})
"""
return Vec(set(range(len(L))), {k:L[k] for k in range(len(L))})
def zero_vec(D):
"""Returns a zero vector with the given domain
"""
return Vec(D, {})
| 25.058824
| 80
| 0.577465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 269
| 0.631455
|
e1ccdf7c98befd87dd0fafb60f1dfb6a4f453f5f
| 59,998
|
py
|
Python
|
squidpy/im/_container.py
|
Emberwhirl/squidpy
|
456c49ac9149e16562617a8a4236a9faa2c0480d
|
[
"BSD-3-Clause"
] | 1
|
2022-02-02T13:41:36.000Z
|
2022-02-02T13:41:36.000Z
|
squidpy/im/_container.py
|
Emberwhirl/squidpy
|
456c49ac9149e16562617a8a4236a9faa2c0480d
|
[
"BSD-3-Clause"
] | null | null | null |
squidpy/im/_container.py
|
Emberwhirl/squidpy
|
456c49ac9149e16562617a8a4236a9faa2c0480d
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
from copy import copy, deepcopy
from types import MappingProxyType
from typing import (
Any,
Union,
Mapping,
TypeVar,
Callable,
Iterable,
Iterator,
Sequence,
TYPE_CHECKING,
)
from pathlib import Path
from functools import partial
from itertools import chain
from typing_extensions import Literal
import re
import validators
from scanpy import logging as logg
from anndata import AnnData
from scanpy.plotting.palettes import default_102 as default_palette
from dask import delayed
import numpy as np
import xarray as xr
import dask.array as da
from matplotlib.colors import ListedColormap
import matplotlib as mpl
import matplotlib.pyplot as plt
from skimage.util import img_as_float
from skimage.transform import rescale
from squidpy._docs import d, inject_docs
from squidpy._utils import NDArrayA, singledispatchmethod
from squidpy.im._io import _lazy_load_image, _infer_dimensions, _assert_dims_present
from squidpy.gr._utils import (
_assert_in_range,
_assert_positive,
_assert_non_negative,
_assert_spatial_basis,
_assert_non_empty_sequence,
)
from squidpy.im._coords import (
CropCoords,
CropPadding,
_NULL_COORDS,
_NULL_PADDING,
TupleSerializer,
_update_attrs_scale,
_update_attrs_coords,
)
from squidpy.im._feature_mixin import FeatureMixin
from squidpy._constants._constants import InferDimensions
from squidpy._constants._pkg_constants import Key
FoI_t = Union[int, float]
Pathlike_t = Union[str, Path]
Arraylike_t = Union[NDArrayA, xr.DataArray]
InferDims_t = Union[Literal["default", "prefer_channels", "prefer_z"], Sequence[str]]
Input_t = Union[Pathlike_t, Arraylike_t, "ImageContainer"]
Interactive = TypeVar("Interactive") # cannot import because of cyclic dependencies
_ERROR_NOTIMPLEMENTED_LIBID = f"It seems there are multiple `library_id` in `adata.uns[{Key.uns.spatial!r}]`.\n \
Loading multiple images is not implemented (yet), please specify a `library_id`."
__all__ = ["ImageContainer"]
@d.dedent # trick to overcome not top-down order
@d.dedent
class ImageContainer(FeatureMixin):
"""
Container for in memory arrays or on-disk images.
Wraps :class:`xarray.Dataset` to store several image layers with the same `x`, `y` and `z` dimensions in one object.
Dimensions of stored images are ``(y, x, z, channels)``. The channel dimension may vary between image layers.
This class also allows for lazy loading and processing using :mod:`dask`, and is given to all image
processing functions, along with :class:`anndata.AnnData` instance, if necessary.
Parameters
----------
%(add_img.parameters)s
scale
Scaling factor of the image with respect to the spatial coordinates
saved in the accompanying :class:`anndata.AnnData`.
Raises
------
%(add_img.raises)s
"""
def __init__(
self,
img: Input_t | None = None,
layer: str = "image",
lazy: bool = True,
scale: float = 1.0,
**kwargs: Any,
):
self._data: xr.Dataset = xr.Dataset()
self._data.attrs[Key.img.coords] = _NULL_COORDS # can't save None to NetCDF
self._data.attrs[Key.img.padding] = _NULL_PADDING
self._data.attrs[Key.img.scale] = scale
self._data.attrs[Key.img.mask_circle] = False
if img is not None:
self.add_img(img, layer=layer, **kwargs)
if not lazy:
self.compute()
@classmethod
def concat(
cls,
imgs: Iterable[ImageContainer],
library_ids: Sequence[str | None] | None = None,
combine_attrs: str = "identical",
**kwargs: Any,
) -> ImageContainer:
"""
Concatenate ``imgs`` in Z-dimension.
All ``imgs`` need to have the same shape and the same name to be concatenated.
Parameters
----------
imgs
Images that should be concatenated in Z-dimension.
library_ids
Name for each image that will be associated to each Z-dimension. This should match the ``library_id``
in the corresponding :class:`anndata.AnnData` object.
If `None`, the existing name of the Z-dimension is used for each image.
combine_attrs
How to combine attributes of ``imgs``. By default, all ``imgs`` need to have the same scale
and crop attributes. Use ``combine_attrs = 'override'`` to relax this requirement.
This might lead to a mismatch between :class:`ImageContainer` and :class:`anndata.AnnData` coordinates.
kwargs
Keyword arguments for :func:`xarray.concat`.
Returns
-------
Concatenated :class:`squidpy.img.ImageContainer` with ``imgs`` stacks in Z-dimension.
Raises
------
ValueError
If any of the ``imgs`` have more than 1 Z-dimension or if ``library_ids`` are not unique.
"""
# check that imgs are not already 3d
imgs = list(imgs)
for img in imgs:
if img.data.dims["z"] > 1:
raise ValueError(
f"Currently, can concatenate only images with 1 Z-dimension, found `{img.data.dims['z']}`."
)
# check library_ids
if library_ids is None:
library_ids = [None] * len(imgs)
if len(library_ids) != len(imgs):
raise ValueError(f"Expected library ids to be of length `{len(imgs)}`, found `{len(library_ids)}`.")
_library_ids = np.concatenate(
[img._get_library_ids(library_id, allow_new=True) for img, library_id in zip(imgs, library_ids)]
)
if len(set(_library_ids)) != len(_library_ids):
raise ValueError(f"Found non-unique library ids `{list(_library_ids)}`.")
# add library_id to z dim
prep_imgs = []
for lid, img in zip(_library_ids, imgs):
prep_img = img.copy()
prep_img._data = prep_img.data.assign_coords(z=[lid])
prep_imgs.append(prep_img)
return cls._from_dataset(
xr.concat([img.data for img in prep_imgs], dim="z", combine_attrs=combine_attrs, **kwargs)
)
@classmethod
def load(cls, path: Pathlike_t, lazy: bool = True, chunks: int | None = None) -> ImageContainer:
"""
Load data from a *Zarr* store.
Parameters
----------
path
Path to *Zarr* store.
lazy
Whether to use :mod:`dask` to lazily load image.
chunks
Chunk size for :mod:`dask`. Only used when ``lazy = True``.
Returns
-------
The loaded container.
"""
res = cls()
res.add_img(path, layer="image", chunks=chunks, lazy=True)
return res if lazy else res.compute()
def save(self, path: Pathlike_t, **kwargs: Any) -> None:
"""
Save the container into a *Zarr* store.
Parameters
----------
path
Path to a *Zarr* store.
Returns
-------
Nothing, just saves the container.
"""
attrs = self.data.attrs
try:
self._data = self.data.load() # if we're loading lazily and immediately saving
self.data.attrs = {
k: (v.to_tuple() if isinstance(v, TupleSerializer) else v) for k, v in self.data.attrs.items()
}
self.data.to_zarr(str(path), mode="w", **kwargs, **kwargs)
finally:
self.data.attrs = attrs
@d.get_sections(base="add_img", sections=["Parameters", "Raises"])
@d.dedent
@inject_docs(id=InferDimensions)
def add_img(
self,
img: Input_t,
layer: str | None = None,
dims: InferDims_t = InferDimensions.DEFAULT.s,
library_id: str | Sequence[str] | None = None,
lazy: bool = True,
chunks: str | tuple[int, ...] | None = None,
copy: bool = True,
**kwargs: Any,
) -> None:
"""
Add a new image to the container.
Parameters
----------
img
In-memory 2, 3 or 4-dimensional array, a URL to a *Zarr* store (ending in *.zarr*),
or a path to an on-disk image.
%(img_layer)s
dims
Where to save channel dimension when reading from a file or loading an array. Valid options are:
- `{id.CHANNELS_LAST.s!r}` - load the last non-spatial dimension as channels.
- `{id.Z_LAST.s!r}` - load the last non-spatial dimension as Z-dimension.
- `{id.DEFAULT.s!r}` - same as `{id.CHANNELS_LAST.s!r}`, but for 4-dimensional arrays,
tries to also load the first dimension as channels if the last non-spatial dimension is 1.
- a sequence of dimension names matching the shape of ``img``, e.g. ``('y', 'x', 'z', 'channels')``.
`'y'`, `'x'` and `'z'` must always be present.
library_id
Name for each Z-dimension of the image. This should correspond to the ``library_id``
in :attr:`anndata.AnnData.uns`.
lazy
Whether to use :mod:`dask` to lazily load image.
chunks
Chunk size for :mod:`dask`. Only used when ``lazy = True``.
copy
Whether to copy the underlying data if ``img`` is an in-memory array.
Returns
-------
Nothing, just adds a new ``layer`` to :attr:`data`.
Raises
------
ValueError
If loading from a file/store with an unknown format or if a supplied channel dimension cannot be aligned.
NotImplementedError
If loading a specific data type has not been implemented.
"""
layer = self._get_next_image_id("image") if layer is None else layer
dims: InferDimensions | Sequence[str] = ( # type: ignore[no-redef]
InferDimensions(dims) if isinstance(dims, str) else dims
)
res: xr.DataArray | None = self._load_img(img, chunks=chunks, layer=layer, copy=copy, dims=dims, **kwargs)
if res is not None:
library_id = self._get_library_ids(library_id, res, allow_new=not len(self))
try:
res = res.assign_coords({"z": library_id})
except ValueError as e:
if "conflicting sizes for dimension 'z'" not in str(e):
raise
# at this point, we know the container is not empty
raise ValueError(
f"Expected image to have `{len(self.library_ids)}` Z-dimension(s), found `{res.sizes['z']}`."
) from None
if TYPE_CHECKING:
assert isinstance(res, xr.DataArray)
logg.info(f"{'Overwriting' if layer in self else 'Adding'} image layer `{layer}`")
try:
self.data[layer] = res
except ValueError as e:
c_dim = res.dims[-1]
if f"along dimension {str(c_dim)!r} cannot be aligned" not in str(e):
raise
channel_dim = self._get_next_channel_id(res)
logg.warning(f"Channel dimension cannot be aligned with an existing one, using `{channel_dim}`")
self.data[layer] = res.rename({res.dims[-1]: channel_dim})
if not lazy:
self.compute(layer)
@singledispatchmethod
def _load_img(self, img: Pathlike_t | Input_t | ImageContainer, layer: str, **kwargs: Any) -> xr.DataArray | None:
if isinstance(img, ImageContainer):
if layer not in img:
raise KeyError(f"Image identifier `{layer}` not found in `{img}`.")
_ = kwargs.pop("dims", None)
return self._load_img(img[layer], **kwargs)
raise NotImplementedError(f"Loading `{type(img).__name__}` is not yet implemented.")
@_load_img.register(str)
@_load_img.register(Path)
def _(
self,
img_path: Pathlike_t,
chunks: int | None = None,
dims: InferDimensions | tuple[str, ...] = InferDimensions.DEFAULT,
**_: Any,
) -> xr.DataArray | None:
def transform_metadata(data: xr.Dataset) -> xr.Dataset:
for key, img in data.items():
if len(img.dims) != 4:
data[key] = img = img.expand_dims({"z": 1}, axis=-2) # assume only channel dim is present
_assert_dims_present(img.dims, include_z=True)
data.attrs[Key.img.coords] = CropCoords.from_tuple(data.attrs.get(Key.img.coords, _NULL_COORDS.to_tuple()))
data.attrs[Key.img.padding] = CropPadding.from_tuple(
data.attrs.get(Key.img.padding, _NULL_PADDING.to_tuple())
)
data.attrs.setdefault(Key.img.mask_circle, False)
data.attrs.setdefault(Key.img.scale, 1)
return data
img_path = str(img_path)
is_url, suffix = validators.url(img_path), Path(img_path).suffix.lower()
logg.debug(f"Loading data from `{img_path}`")
if not is_url and not Path(img_path).exists():
raise OSError(f"Path `{img_path}` does not exist.")
if suffix in (".jpg", ".jpeg", ".png", ".tif", ".tiff"):
return _lazy_load_image(img_path, dims=dims, chunks=chunks)
if suffix == ".zarr" or Path(img_path).is_dir(): # can also be a URL
if len(self._data):
raise ValueError("Loading data from `Zarr` store is disallowed when the container is not empty.")
self._data = transform_metadata(xr.open_zarr(img_path, chunks=chunks))
elif suffix in (".nc", ".cdf"):
if len(self._data):
raise ValueError("Loading data from `NetCDF` is disallowed when the container is not empty.")
self._data = transform_metadata(xr.open_dataset(img_path, chunks=chunks))
else:
raise ValueError(f"Unable to handle path `{img_path}`.")
@_load_img.register(da.Array)
@_load_img.register(np.ndarray)
def _(
self,
img: NDArrayA,
copy: bool = True,
dims: InferDimensions | tuple[str, ...] = InferDimensions.DEFAULT,
**_: Any,
) -> xr.DataArray:
logg.debug(f"Loading `numpy.array` of shape `{img.shape}`")
return self._load_img(xr.DataArray(img), copy=copy, dims=dims, warn=False)
@_load_img.register(xr.DataArray)
def _(
self,
img: xr.DataArray,
copy: bool = True,
warn: bool = True,
dims: InferDimensions | tuple[str, ...] = InferDimensions.DEFAULT,
**_: Any,
) -> xr.DataArray:
logg.debug(f"Loading `xarray.DataArray` of shape `{img.shape}`")
img = img.copy() if copy else img
if not ("y" in img.dims and "x" in img.dims and "z" in img.dims):
_, dims, _, expand_axes = _infer_dimensions(img, infer_dimensions=dims)
if TYPE_CHECKING:
assert isinstance(dims, Iterable)
if warn:
logg.warning(f"Unable to find `y`, `x` or `z` dimension in `{img.dims}`. Renaming to `{dims}`")
# `axes` is always of length 0, 1 or 2
if len(expand_axes):
dimnames = ("z", "channels") if len(expand_axes) == 2 else (("channels",) if "z" in dims else ("z",))
img = img.expand_dims([d for _, d in zip(expand_axes, dimnames)], axis=expand_axes)
img = img.rename(dict(zip(img.dims, dims)))
return img.transpose("y", "x", "z", ...)
@classmethod
@d.dedent
def from_adata(
cls,
adata: AnnData,
img_key: str | None = None,
library_id: Sequence[str] | str | None = None,
spatial_key: str = Key.uns.spatial,
**kwargs: Any,
) -> ImageContainer:
"""
Load an image from :mod:`anndata` object.
Parameters
----------
%(adata)s
img_key
Key in :attr:`anndata.AnnData.uns` ``['{spatial_key}']['{library_id}']['images']``.
If `None`, the first key found is used.
library_id
Key in :attr:`anndata.AnnData.uns` ``['{spatial_key}']`` specifying which library to access.
spatial_key
Key in :attr:`anndata.AnnData.uns` where spatial metadata is stored.
kwargs
Keyword arguments for :class:`squidpy.im.ImageContainer`.
Returns
-------
The image container.
"""
library_id = Key.uns.library_id(adata, spatial_key, library_id)
if not isinstance(library_id, str):
raise NotImplementedError(_ERROR_NOTIMPLEMENTED_LIBID)
spatial_data = adata.uns[spatial_key][library_id]
if img_key is None:
try:
img_key = next(k for k in spatial_data.get("images", []))
except StopIteration:
raise KeyError(f"No images found in `adata.uns[{spatial_key!r}][{library_id!r}]['images']`") from None
img: NDArrayA | None = spatial_data.get("images", {}).get(img_key, None)
if img is None:
raise KeyError(
f"Unable to find the image in `adata.uns[{spatial_key!r}][{library_id!r}]['images'][{img_key!r}]`."
)
scale = spatial_data.get("scalefactors", {}).get(f"tissue_{img_key}_scalef", None)
if scale is None and "scale" not in kwargs:
logg.warning(
f"Unable to determine the scale factor from "
f"`adata.uns[{spatial_key!r}][{library_id!r}]['scalefactors']['tissue_{img_key}_scalef']`, "
f"using `1.0`. Consider specifying it manually as `scale=...`"
)
scale = 1.0
kwargs.setdefault("scale", scale)
return cls(img, layer=img_key, library_id=library_id, **kwargs)
@d.get_sections(base="crop_corner", sections=["Parameters", "Returns"])
@d.dedent
def crop_corner(
self,
y: FoI_t,
x: FoI_t,
size: FoI_t | tuple[FoI_t, FoI_t] | None = None,
library_id: str | None = None,
scale: float = 1.0,
cval: int | float = 0,
mask_circle: bool = False,
preserve_dtypes: bool = True,
) -> ImageContainer:
"""
Extract a crop from the upper-left corner.
Parameters
----------
%(yx)s
%(size)s
library_id
Name of the Z-dimension to be cropped. If `None`, all Z-dimensions are cropped.
scale
Rescale the crop using :func:`skimage.transform.rescale`.
cval
Fill value to use if ``mask_circle = True`` or if crop goes out of the image boundary.
mask_circle
Whether to mask out values that are not within a circle defined by this crop.
Only available if ``size`` defines a square.
preserve_dtypes
Whether to preserver the data types of underlying :class:`xarray.DataArray`, even if ``cval``
is of different type.
Returns
-------
The cropped image of size ``size * scale``.
Raises
------
ValueError
If the crop would completely lie outside of the image or if ``mask_circle = True`` and
``size`` does not define a square.
Notes
-----
If ``preserve_dtypes = True`` but ``cval`` cannot be safely cast, ``cval`` will be set to 0.
"""
self._assert_not_empty()
y, x = self._convert_to_pixel_space((y, x))
size = self._get_size(size)
size = self._convert_to_pixel_space(size)
ys, xs = size
_assert_positive(ys, name="height")
_assert_positive(xs, name="width")
_assert_positive(scale, name="scale")
orig = CropCoords(x0=x, y0=y, x1=x + xs, y1=y + ys)
ymin, xmin = self.shape
coords = CropCoords(
x0=min(max(x, 0), xmin), y0=min(max(y, 0), ymin), x1=min(x + xs, xmin), y1=min(y + ys, ymin)
)
if not coords.dy:
raise ValueError("Height of the crop is empty.")
if not coords.dx:
raise ValueError("Width of the crop is empty.")
crop = self.data.isel(x=slice(coords.x0, coords.x1), y=slice(coords.y0, coords.y1)).copy(deep=False)
if len(crop.z) > 1:
crop = crop.sel(z=self._get_library_ids(library_id))
crop.attrs = _update_attrs_coords(crop.attrs, coords)
if orig != coords:
padding = orig - coords
# because padding does not change dtype by itself
for key, arr in crop.items():
if preserve_dtypes:
if not np.can_cast(cval, arr.dtype, casting="safe"):
cval = 0
else:
crop[key] = crop[key].astype(np.dtype(type(cval)), copy=False)
crop = crop.pad(
y=(padding.y_pre, padding.y_post),
x=(padding.x_pre, padding.x_post),
mode="constant",
constant_values=cval,
)
crop.attrs[Key.img.padding] = padding
else:
crop.attrs[Key.img.padding] = _NULL_PADDING
return self._from_dataset(
self._post_process(
data=crop, scale=scale, cval=cval, mask_circle=mask_circle, preserve_dtypes=preserve_dtypes
)
)
def _post_process(
self,
data: xr.Dataset,
scale: FoI_t = 1,
cval: FoI_t = 0,
mask_circle: bool = False,
preserve_dtypes: bool = True,
**_: Any,
) -> xr.Dataset:
def _rescale(arr: xr.DataArray) -> xr.DataArray:
scaling_fn = partial(
rescale, scale=[scale, scale, 1], preserve_range=True, order=1, channel_axis=-1, cval=cval
)
dtype = arr.dtype
if isinstance(arr.data, da.Array):
shape = np.maximum(np.round(scale * np.asarray(arr.shape)), 1)
shape[-1] = arr.shape[-1]
shape[-2] = arr.shape[-2]
return xr.DataArray(
da.from_delayed(delayed(lambda arr: scaling_fn(arr).astype(dtype))(arr), shape=shape, dtype=dtype),
dims=arr.dims,
)
return xr.DataArray(scaling_fn(arr).astype(dtype), dims=arr.dims)
if scale != 1:
attrs = data.attrs
library_ids = data.coords["z"]
data = data.map(_rescale).assign_coords({"z": library_ids})
data.attrs = _update_attrs_scale(attrs, scale)
if mask_circle:
if data.dims["y"] != data.dims["x"]:
raise ValueError(
f"Masking circle is only available for square crops, "
f"found crop of shape `{(data.dims['y'], data.dims['x'])}`."
)
c = data.x.shape[0] // 2
# manually reassign coordinates
library_ids = data.coords["z"]
data = data.where((data.x - c) ** 2 + (data.y - c) ** 2 <= c**2, other=cval).assign_coords(
{"z": library_ids}
)
data.attrs[Key.img.mask_circle] = True
if preserve_dtypes:
for key, arr in self.data.items():
data[key] = data[key].astype(arr.dtype, copy=False)
return data
@d.dedent
def crop_center(
self,
y: FoI_t,
x: FoI_t,
radius: FoI_t | tuple[FoI_t, FoI_t],
**kwargs: Any,
) -> ImageContainer:
"""
Extract a circular crop.
The extracted crop will have shape ``(radius[0] * 2 + 1, radius[1] * 2 + 1)``.
Parameters
----------
%(yx)s
radius
Radius along the ``height`` and ``width`` dimensions, respectively.
kwargs
Keyword arguments for :meth:`crop_corner`.
Returns
-------
%(crop_corner.returns)s
"""
y, x = self._convert_to_pixel_space((y, x))
_assert_in_range(y, 0, self.shape[0], name="height")
_assert_in_range(x, 0, self.shape[1], name="width")
if not isinstance(radius, Iterable):
radius = (radius, radius)
(yr, xr) = self._convert_to_pixel_space(radius)
_assert_non_negative(yr, name="radius height")
_assert_non_negative(xr, name="radius width")
return self.crop_corner( # type: ignore[no-any-return]
y=y - yr, x=x - xr, size=(yr * 2 + 1, xr * 2 + 1), **kwargs
)
@d.dedent
def generate_equal_crops(
self,
size: FoI_t | tuple[FoI_t, FoI_t] | None = None,
as_array: str | bool = False,
squeeze: bool = True,
**kwargs: Any,
) -> Iterator[ImageContainer] | Iterator[dict[str, NDArrayA]]:
"""
Decompose image into equally sized crops.
Parameters
----------
%(size)s
%(as_array)s
squeeze
Remove singleton dimensions from the results if ``as_array = True``.
kwargs
Keyword arguments for :meth:`crop_corner`.
Yields
------
The crops, whose type depends on ``as_array``.
Notes
-----
Crops going outside out of the image boundary are padded with ``cval``.
"""
self._assert_not_empty()
size = self._get_size(size)
size = self._convert_to_pixel_space(size)
y, x = self.shape
ys, xs = size
_assert_in_range(ys, 0, y, name="height")
_assert_in_range(xs, 0, x, name="width")
unique_ycoord = np.arange(start=0, stop=(y // ys + (y % ys != 0)) * ys, step=ys)
unique_xcoord = np.arange(start=0, stop=(x // xs + (x % xs != 0)) * xs, step=xs)
ycoords = np.repeat(unique_ycoord, len(unique_xcoord))
xcoords = np.tile(unique_xcoord, len(unique_ycoord))
for y, x in zip(ycoords, xcoords):
yield self.crop_corner(y=y, x=x, size=(ys, xs), **kwargs)._maybe_as_array(
as_array, squeeze=squeeze, lazy=True
)
@d.dedent
def generate_spot_crops(
self,
adata: AnnData,
spatial_key: str = Key.obsm.spatial,
library_id: Sequence[str] | str | None = None,
spot_diameter_key: str = "spot_diameter_fullres",
spot_scale: float = 1.0,
obs_names: Iterable[Any] | None = None,
as_array: str | bool = False,
squeeze: bool = True,
return_obs: bool = False,
**kwargs: Any,
) -> (
Iterator[ImageContainer] | Iterator[NDArrayA] | Iterator[tuple[NDArrayA, ...]] | Iterator[dict[str, NDArrayA]]
):
"""
Iterate over :attr:`anndata.AnnData.obs_names` and extract crops.
Implemented for 10X spatial datasets.
For Z-stacks, the specified ``library_id`` or list of ``library_id`` need to match the name of the Z-dimension.
Always extracts 2D crops from the specified Z-dimension.
Parameters
----------
%(adata)s
%(spatial_key)s
%(img_library_id)s
spot_diameter_key
Key in :attr:`anndata.AnnData.uns` ``['{spatial_key}']['{library_id}']['scalefactors']``
where the spot diameter is stored.
spot_scale
Scaling factor for the spot diameter. Larger values mean more context.
obs_names
Observations from :attr:`anndata.AnnData.obs_names` for which to generate the crops.
If `None`, all observations are used.
%(as_array)s
squeeze
Remove singleton dimensions from the results if ``as_array = True``.
return_obs
Whether to also yield names from ``obs_names``.
kwargs
Keyword arguments for :meth:`crop_center`.
Yields
------
If ``return_obs = True``, yields a :class:`tuple` ``(crop, obs_name)``. Otherwise, yields just the crops.
The type of the crops depends on ``as_array`` and the number of dimensions on ``squeeze``.
"""
self._assert_not_empty()
_assert_positive(spot_scale, name="scale")
_assert_spatial_basis(adata, spatial_key)
# limit to obs_names
if obs_names is None:
obs_names = adata.obs_names
obs_names = _assert_non_empty_sequence(obs_names, name="observations")
adata = adata[obs_names, :]
scale = self.data.attrs.get(Key.img.scale, 1)
spatial = adata.obsm[spatial_key][:, :2]
if library_id is None:
try:
library_id = Key.uns.library_id(adata, spatial_key=spatial_key, library_id=None)
if not isinstance(library_id, str):
raise NotImplementedError(_ERROR_NOTIMPLEMENTED_LIBID)
obs_library_ids = [library_id] * adata.n_obs
except ValueError as e:
if "Unable to determine which library id to use" in str(e):
raise ValueError(
str(e)
+ " Or specify a key in `adata.obs` containing a mapping from observations to library ids."
)
else:
raise e
else:
try:
obs_library_ids = adata.obs[library_id]
except KeyError:
logg.debug(
f"Unable to find library ids in `adata.obs[{library_id!r}]`. "
f"Trying in `adata.uns[{spatial_key!r}]`"
)
library_id = Key.uns.library_id(adata, spatial_key=spatial_key, library_id=library_id)
if not isinstance(library_id, str):
raise NotImplementedError(_ERROR_NOTIMPLEMENTED_LIBID)
obs_library_ids = [library_id] * adata.n_obs
lids = set(obs_library_ids)
if len(self.data.z) > 1 and len(lids) == 1:
logg.warning(
f"ImageContainer has `{len(self.data.z)}` Z-dimensions, using library id `{next(iter(lids))}` for all"
)
if adata.n_obs != len(obs_library_ids):
raise ValueError(f"Expected library ids to be of length `{adata.n_obs}`, found `{len(obs_library_ids)}`.")
for i, (obs, lid) in enumerate(zip(adata.obs_names, obs_library_ids)):
# get spot diameter of current obs (might be different library ids)
diameter = (
Key.uns.spot_diameter(
adata, spatial_key=spatial_key, library_id=lid, spot_diameter_key=spot_diameter_key
)
* scale
)
radius = int(round(diameter // 2 * spot_scale))
# get coords in image pixel space from original space
y = int(spatial[i][1] * scale)
x = int(spatial[i][0] * scale)
# if CropCoords exist, need to offset y and x
if self.data.attrs.get(Key.img.coords, _NULL_COORDS) != _NULL_COORDS:
y = int(y - self.data.attrs[Key.img.coords].y0)
x = int(x - self.data.attrs[Key.img.coords].x0)
crop = self.crop_center(y=y, x=x, radius=radius, library_id=obs_library_ids[i], **kwargs)
crop.data.attrs[Key.img.obs] = obs
crop = crop._maybe_as_array(as_array, squeeze=squeeze, lazy=False)
yield (crop, obs) if return_obs else crop
@classmethod
@d.get_sections(base="uncrop", sections=["Parameters", "Returns"])
def uncrop(
cls,
crops: list[ImageContainer],
shape: tuple[int, int] | None = None,
) -> ImageContainer:
"""
Re-assemble image from crops and their positions.
Fills remaining positions with zeros.
Parameters
----------
crops
List of image crops.
shape
Requested image shape as ``(height, width)``. If `None`, it is automatically determined from ``crops``.
Returns
-------
Re-assembled image from ``crops``.
Raises
------
ValueError
If crop metadata was not found or if the requested ``shape`` is smaller than required by ``crops``.
"""
if not len(crops):
raise ValueError("No crops were supplied.")
keys = set(crops[0].data.keys())
scales = set()
dy, dx = -1, -1
for crop in crops:
if set(crop.data.keys()) != keys:
raise KeyError(f"Expected to find `{sorted(keys)}` keys, found `{sorted(crop.data.keys())}`.")
coord = crop.data.attrs.get(Key.img.coords, None)
if coord is None:
raise ValueError("Crop does not have coordinate metadata.")
if coord == _NULL_COORDS:
raise ValueError(f"Null coordinates detected `{coord}`.")
scales.add(crop.data.attrs.get(Key.img.scale, None))
dy, dx = max(dy, coord.y0 + coord.dy), max(dx, coord.x0 + coord.dx)
scales.discard(None)
if len(scales) != 1:
raise ValueError(f"Unable to uncrop images of different scales `{sorted((scales))}`.")
scale, *_ = scales
if shape is None:
shape = (dy, dx)
# can be float because coords can be scaled
shape = tuple(map(int, shape)) # type: ignore[assignment]
if len(shape) != 2:
raise ValueError(f"Expected `shape` to be of length `2`, found `{len(shape)}`.")
if shape < (dy, dx):
raise ValueError(f"Requested final image shape `{shape}`, but minimal is `({dy}, {dx})`.")
# create resulting dataset
dataset = xr.Dataset()
dataset.attrs[Key.img.scale] = scale
for key in keys:
img = crop.data[key]
# get shape for this DataArray
dataset[key] = xr.DataArray(
np.zeros(shape + tuple(img.shape[2:]), dtype=img.dtype), dims=img.dims, coords=img.coords
)
# fill data with crops
for crop in crops:
coord = crop.data.attrs[Key.img.coords]
padding = crop.data.attrs.get(Key.img.padding, _NULL_PADDING) # maybe warn
dataset[key][coord.slice] = crop[key][coord.to_image_coordinates(padding=padding).slice]
return cls._from_dataset(dataset)
@d.dedent
def show(
self,
layer: str | None = None,
library_id: str | Sequence[str] | None = None,
channel: int | Sequence[int] | None = None,
channelwise: bool = False,
segmentation_layer: str | None = None,
segmentation_alpha: float = 0.75,
transpose: bool | None = None,
ax: mpl.axes.Axes | None = None,
figsize: tuple[float, float] | None = None,
dpi: int | None = None,
save: Pathlike_t | None = None,
**kwargs: Any,
) -> None:
"""
Show an image within this container.
Parameters
----------
%(img_layer)s
library_id
Name of Z-dimension to plot. In `None`, plot all Z-dimensions as separate images.
channel
Channels to plot. If `None`, use all channels.
channelwise
Whether to plot each channel separately or not.
segmentation_layer
Segmentation layer to plot over each ax.
segmentation_alpha
Alpha value for ``segmentation_layer``.
transpose
Whether to plot Z-dimensions in columns or in rows. If `None`, it will be set to ``not channelwise``.
ax
Optional :mod:`matplotlib` axes where to plot the image.
If not `None`, ``save``, ``figsize`` and ``dpi`` have no effect.
%(plotting)s
kwargs
Keyword arguments for :meth:`matplotlib.axes.Axes.imshow`.
Returns
-------
%(plotting_returns)s
Raises
------
ValueError
If number of supplied axes is different than the number of requested Z-dimensions or channels.
"""
from squidpy.pl._utils import save_fig
layer = self._get_layer(layer)
arr: xr.DataArray = self[layer]
library_ids = self._get_library_ids(library_id)
arr = arr.sel(z=library_ids)
if channel is not None:
channel = np.asarray([channel]).ravel() # type: ignore[assignment]
if not len(channel): # type: ignore[arg-type]
raise ValueError("No channels have been selected.")
arr = arr[{arr.dims[-1]: channel}]
else:
channel = np.arange(arr.shape[-1])
if TYPE_CHECKING:
assert isinstance(channel, Sequence)
n_channels = arr.shape[-1]
if n_channels not in (1, 3, 4) and not channelwise:
logg.warning(f"Unable to plot image with `{n_channels}`. Setting `channelwise=True`")
channelwise = True
if transpose is None:
transpose = not channelwise
fig = None
nrows, ncols = len(library_ids), (n_channels if channelwise else 1)
if transpose:
nrows, ncols = ncols, nrows
if ax is None:
fig, ax = plt.subplots(
nrows=nrows,
ncols=ncols,
figsize=(8, 8) if figsize is None else figsize,
dpi=dpi,
tight_layout=True,
squeeze=False,
)
elif isinstance(ax, mpl.axes.Axes):
ax = np.array([ax])
ax = np.asarray(ax)
try:
ax = ax.reshape(nrows, ncols)
except ValueError:
raise ValueError(f"Expected `ax` to be of shape `{(nrows, ncols)}`, found `{ax.shape}`.") from None
if segmentation_layer is not None:
seg_arr = self[segmentation_layer].sel(z=library_ids)
if not seg_arr.attrs.get("segmentation", False):
raise TypeError(f"Expected layer `{segmentation_layer!r}` to be marked as segmentation layer.")
if not np.issubdtype(seg_arr.dtype, np.integer):
raise TypeError(
f"Expected segmentation layer `{segmentation_layer!r}` to be of integer type, "
f"found `{seg_arr.dtype}`."
)
seg_arr = seg_arr.values
seg_cmap = np.array(default_palette, dtype=object)[np.arange(np.max(seg_arr)) % len(default_palette)]
seg_cmap[0] = "#00000000" # transparent background
seg_cmap = ListedColormap(seg_cmap)
else:
seg_arr, seg_cmap = None, None
for z, row in enumerate(ax):
for c, ax_ in enumerate(row):
if transpose:
z, c = c, z
title = layer
if channelwise:
img = arr[..., z, c]
title += f":{channel[c]}"
else:
img = arr[..., z, :]
if len(self.data.coords["z"]) > 1:
title += f", library_id:{library_ids[z]}"
ax_.imshow(img_as_float(img.values, force_copy=False), **kwargs)
if seg_arr is not None:
ax_.imshow(
seg_arr[:, :, z, ...],
cmap=seg_cmap,
interpolation="nearest", # avoid artifacts
alpha=segmentation_alpha,
**{k: v for k, v in kwargs.items() if k not in ("cmap", "interpolation")},
)
ax_.set_title(title)
ax_.set_axis_off()
if save and fig is not None:
save_fig(fig, save)
@d.get_sections(base="_interactive", sections=["Parameters"])
@d.dedent
def interactive(
self,
adata: AnnData,
spatial_key: str = Key.obsm.spatial,
library_key: str | None = None,
library_id: str | Sequence[str] | None = None,
cmap: str = "viridis",
palette: str | None = None,
blending: Literal["opaque", "translucent", "additive"] = "opaque",
symbol: Literal["disc", "square"] = "disc",
key_added: str = "shapes",
) -> Interactive:
"""
Launch :mod:`napari` viewer.
Parameters
----------
%(adata)s
%(spatial_key)s
library_key
Key in :attr:`adata.AnnData.obs` specifying mapping between observations and library ids.
Required if the container has more than 1 Z-dimension.
library_id
Subset of library ids to visualize. If `None`, visualize all library ids.
cmap
Colormap for continuous variables.
palette
Colormap for categorical variables in :attr:`anndata.AnnData.obs`. If `None`, use :mod:`scanpy`'s default.
blending
Method which determines how RGB and alpha values of :class:`napari.layers.Shapes` are mixed.
symbol
Symbol to use for the spots. Valid options are:
- `'disc'` - circle.
- `'square'` - square.
key_added
Key where to store :class:`napari.layers.Shapes`, which can be exported by pressing `SHIFT-E`:
- :attr:`anndata.AnnData.obs` ``['{layer_name}_{key_added}']`` - boolean mask containing the selected
cells.
- :attr:`anndata.AnnData.uns` ``['{layer_name}_{key_added}']['meshes']`` - list of :class:`numpy.array`,
defining a mesh in the spatial coordinates.
See :mod:`napari`'s `tutorial <https://napari.org/howtos/layers/shapes.html>`_ for more
information about different mesh types, such as circles, squares etc.
Returns
-------
Interactive view of this container. Screenshot of the canvas can be taken by
:meth:`squidpy.pl.Interactive.screenshot`.
"""
from squidpy.pl import Interactive # type: ignore[attr-defined]
return Interactive( # type: ignore[no-any-return]
img=self,
adata=adata,
spatial_key=spatial_key,
library_key=library_key,
library_id=library_id,
cmap=cmap,
palette=palette,
blending=blending,
key_added=key_added,
symbol=symbol,
).show()
@d.dedent
def apply(
self,
func: Callable[..., NDArrayA] | Mapping[str, Callable[..., NDArrayA]],
layer: str | None = None,
new_layer: str | None = None,
channel: int | None = None,
lazy: bool = False,
chunks: str | tuple[int, int] | None = None,
copy: bool = True,
drop: bool = True,
fn_kwargs: Mapping[str, Any] = MappingProxyType({}),
**kwargs: Any,
) -> ImageContainer | None:
"""
Apply a function to a layer within this container.
For each Z-dimension a different function can be defined, using its ``library_id`` name.
For not mentioned ``library_id``'s the identity function is applied.
Parameters
----------
func
A function or a mapping of ``{'{library_id}': function}`` which takes a :class:`numpy.ndarray` as input
and produces an image-like output.
%(img_layer)s
new_layer
Name of the new layer. If `None` and ``copy = False``, overwrites the data in ``layer``.
channel
Apply ``func`` only over a specific ``channel``. If `None`, use all channels.
chunks
Chunk size for :mod:`dask`. If `None`, don't use :mod:`dask`.
%(copy_cont)s
drop
Whether to drop Z-dimensions that were not selected by ``func``. Only used when ``copy = True``.
fn_kwargs
Keyword arguments for ``func``.
kwargs
Keyword arguments for :func:`dask.array.map_overlap` or :func:`dask.array.map_blocks`, depending whether
``depth`` is present in ``fn_kwargs``. Only used when ``chunks != None``.
Use ``depth`` to control boundary artifacts if ``func`` requires data from neighboring chunks,
by default, ``boundary = 'reflect`` is used.
Returns
-------
If ``copy = True``, returns a new container with ``layer``.
Raises
------
ValueError
If the ``func`` returns 0 or 1 dimensional array.
"""
def apply_func(func: Callable[..., NDArrayA], arr: xr.DataArray) -> NDArrayA | da.Array:
if chunks is None:
return func(arr.data, **fn_kwargs)
arr = da.asarray(arr.data).rechunk(chunks)
return (
da.map_overlap(func, arr, **fn_kwargs, **kwargs)
if "depth" in kwargs
else da.map_blocks(func, arr, **fn_kwargs, **kwargs, dtype=arr.dtype)
)
if "depth" in kwargs:
kwargs.setdefault("boundary", "reflect")
layer = self._get_layer(layer)
if new_layer is None:
new_layer = layer
arr = self[layer]
library_ids = list(arr.coords["z"].values)
dims, channel_dim = arr.dims, arr.dims[-1]
if channel is not None:
arr = arr[{channel_dim: channel}]
if callable(func):
res = apply_func(func, arr)
new_library_ids = library_ids
else:
res = {}
noop_library_ids = [] if copy and drop else list(set(library_ids) - set(func.keys()))
for key, fn in func.items():
res[key] = apply_func(fn, arr.sel(z=key))
for key in noop_library_ids:
res[key] = arr.sel(z=key).data
new_library_ids = [lid for lid in library_ids if lid in res]
try:
res = da.stack([res[lid] for lid in new_library_ids], axis=2)
except ValueError as e:
if not len(noop_library_ids) or "must have the same shape" not in str(e):
# processing functions returned wrong shape
raise ValueError(
"Unable to stack an array because functions returned arrays of different shapes."
) from e
# funcs might have changed channel dims, replace noops with 0
logg.warning(
f"Function changed the number of channels, cannot use identity "
f"for library ids `{noop_library_ids}`. Replacing with 0"
)
# TODO(michalk8): once (or if) Z-dim is not fixed, always drop ids
tmp = next(iter(res.values()))
for lid in noop_library_ids:
res[lid] = (np.zeros_like if chunks is None else da.zeros_like)(tmp)
res = da.stack([res[lid] for lid in new_library_ids], axis=2)
if res.ndim == 2: # assume that dims are y, x
res = res[..., np.newaxis]
if res.ndim == 3: # assume dims are y, x, z (changing of z dim is not supported)
res = res[..., np.newaxis]
if res.ndim != 4:
raise ValueError(f"Expected `2`, `3` or `4` dimensional array, found `{res.ndim}`.")
if copy:
cont = ImageContainer(
res,
layer=new_layer,
copy=True,
lazy=lazy,
dims=dims,
library_id=new_library_ids,
)
cont.data.attrs = self.data.attrs.copy()
return cont
self.add_img(
res,
layer=new_layer,
lazy=lazy,
copy=new_layer != layer,
dims=dims,
library_id=new_library_ids,
)
@d.dedent
def subset(self, adata: AnnData, spatial_key: str = Key.obsm.spatial, copy: bool = False) -> AnnData:
"""
Subset :class:`anndata.AnnData` using this container.
Useful when this container is a crop of the original image.
Parameters
----------
%(adata)s
%(spatial_key)s
copy
Whether to return a copy of ``adata``.
Returns
-------
Subset of :class:`anndata.AnnData`.
"""
c: CropCoords = self.data.attrs.get(Key.img.coords, _NULL_COORDS)
if c == _NULL_COORDS: # not a crop
return adata.copy() if copy else adata
_assert_spatial_basis(adata, spatial_key)
coordinates = adata.obsm[spatial_key]
coordinates = coordinates * self.data.attrs.get(Key.img.scale, 1)
mask = (
(coordinates[:, 0] >= c.x0)
& (coordinates[:, 0] <= c.x1)
& (coordinates[:, 1] >= c.y0)
& (coordinates[:, 1] <= c.y1)
)
return adata[mask, :].copy() if copy else adata[mask, :]
def rename(self, old: str, new: str) -> ImageContainer:
"""
Rename a layer.
Parameters
----------
old
Name of the layer to rename.
new
New name.
Returns
-------
Modifies and returns self.
"""
self._data = self.data.rename_vars({old: new})
return self
def compute(self, layer: str | None = None) -> ImageContainer:
"""
Trigger lazy computation in-place.
Parameters
----------
layer
Layer which to compute. If `None`, compute all layers.
Returns
-------
Modifies and returns self.
"""
if layer is None:
self.data.load()
else:
self[layer].load()
return self
@property
def library_ids(self) -> list[str]:
"""Library ids."""
try:
return list(map(str, self.data.coords["z"].values))
except KeyError:
return []
@library_ids.setter
def library_ids(self, library_ids: str | Sequence[str] | Mapping[str, str]) -> None:
"""Set library ids."""
if isinstance(library_ids, Mapping):
library_ids = [str(library_ids.get(lid, lid)) for lid in self.library_ids]
elif isinstance(library_ids, str):
library_ids = (library_ids,)
library_ids = list(map(str, library_ids))
if len(set(library_ids)) != len(library_ids):
raise ValueError(f"Remapped library ids must be unique, found `{library_ids}`.")
self._data = self.data.assign_coords({"z": library_ids})
@property
def data(self) -> xr.Dataset:
"""Underlying :class:`xarray.Dataset`."""
return self._data
@property
def shape(self) -> tuple[int, int]:
"""Image shape ``(y, x)``."""
if not len(self):
return 0, 0
return self.data.dims["y"], self.data.dims["x"]
def copy(self, deep: bool = False) -> ImageContainer:
"""
Return a copy of self.
Parameters
----------
deep
Whether to make a deep copy or not.
Returns
-------
Copy of self.
"""
return deepcopy(self) if deep else copy(self)
@classmethod
def _from_dataset(cls, data: xr.Dataset, deep: bool | None = None) -> ImageContainer:
"""
Utility function used for initialization.
Parameters
----------
data
The :class:`xarray.Dataset` to use.
deep
If `None`, don't copy the ``data``. If `True`, make a deep copy of the data, otherwise, make a shallow copy.
Returns
-------
The newly created container.
""" # noqa: D401
res = cls()
res._data = data if deep is None else data.copy(deep=deep)
res._data.attrs.setdefault(Key.img.coords, _NULL_COORDS) # can't save None to NetCDF
res._data.attrs.setdefault(Key.img.padding, _NULL_PADDING)
res._data.attrs.setdefault(Key.img.scale, 1.0)
res._data.attrs.setdefault(Key.img.mask_circle, False)
return res
def _maybe_as_array(
self,
as_array: str | Sequence[str] | bool = False,
squeeze: bool = True,
lazy: bool = True,
) -> ImageContainer | dict[str, NDArrayA] | NDArrayA | tuple[NDArrayA, ...]:
res = self
if as_array:
# do not trigger dask computation
res = {key: (res[key].data if lazy else res[key].values) for key in res} # type: ignore[assignment]
if squeeze:
axis = (2,) if len(self.data.z) == 1 else ()
res = {
k: v.squeeze(axis=axis + ((3,) if v.shape[-1] == 1 else ()))
for k, v in res.items() # type: ignore[assignment,attr-defined]
}
# this is just for convenience for DL iterators
if isinstance(as_array, str):
res = res[as_array]
elif isinstance(as_array, Sequence):
res = tuple(res[key] for key in as_array) # type: ignore[assignment]
if lazy:
return res
return res.compute() if isinstance(res, ImageContainer) else res
def _get_next_image_id(self, layer: str) -> str:
pat = re.compile(rf"^{layer}_(\d*)$")
iterator = chain.from_iterable(pat.finditer(k) for k in self.data.keys())
return f"{layer}_{(max(map(lambda m: int(m.groups()[0]), iterator), default=-1) + 1)}"
def _get_next_channel_id(self, channel: str | xr.DataArray) -> str:
if isinstance(channel, xr.DataArray):
channel, *_ = (str(dim) for dim in channel.dims if dim not in ("y", "x", "z"))
pat = re.compile(rf"^{channel}_(\d*)$")
iterator = chain.from_iterable(pat.finditer(v.dims[-1]) for v in self.data.values())
return f"{channel}_{(max(map(lambda m: int(m.groups()[0]), iterator), default=-1) + 1)}"
def _get_library_id(self, library_id: str | None = None) -> str:
self._assert_not_empty()
if library_id is None:
if len(self.library_ids) > 1:
raise ValueError(
f"Unable to determine which library id to use. Please supply one from `{self.library_ids}`."
)
library_id = self.library_ids[0]
if library_id not in self.library_ids:
raise KeyError(f"Library id `{library_id}` not found in `{self.library_ids}`.")
return library_id
def _get_library_ids(
self,
library_id: str | Sequence[str] | None = None,
arr: xr.DataArray | None = None,
allow_new: bool = False,
) -> list[str]:
"""
Get library ids.
Parameters
----------
library_id
Requested library ids.
arr
If the current container is empty, try getting the library ids from the ``arr``.
allow_new
If `True`, don't check if the returned library ids are present in the non-empty container.
This is set to `True` only in :meth:`concat` to allow for remapping.
Returns
-------
The library ids.
"""
if library_id is None:
if len(self):
library_id = self.library_ids
elif isinstance(arr, xr.DataArray):
try:
library_id = list(arr.coords["z"].values)
except (KeyError, AttributeError) as e:
logg.warning(f"Unable to retrieve library ids, reason `{e}`. Using default names")
# at this point, it should have Z-dim
library_id = [str(i) for i in range(arr.sizes["z"])]
else:
raise ValueError("Please specify the number of library ids if the container is empty.")
if isinstance(library_id, str):
library_id = [library_id]
if not isinstance(library_id, Iterable):
raise TypeError(f"Expected library ids to be `iterable`, found `{type(library_id).__name__!r}`.")
res = list(map(str, library_id))
if not len(res):
raise ValueError("No library ids have been selected.")
if not allow_new and len(self) and not (set(res) & set(self.library_ids)):
raise ValueError(f"Invalid library ids have been selected `{res}`. Valid options are `{self.library_ids}`.")
return res
def _get_layer(self, layer: str | None) -> str:
self._assert_not_empty()
if layer is None:
if len(self) > 1:
raise ValueError(
f"Unable to determine which layer to use. Please supply one from `{sorted(self.data.keys())}`."
)
layer = list(self)[0]
if layer not in self:
raise KeyError(f"Image layer `{layer}` not found in `{sorted(self)}`.")
return layer
def _assert_not_empty(self) -> None:
if not len(self):
raise ValueError("The container is empty.")
def _get_size(self, size: FoI_t | tuple[FoI_t | None, FoI_t | None] | None) -> tuple[FoI_t, FoI_t]:
if size is None:
size = (None, None)
if not isinstance(size, Iterable):
size = (size, size)
res = list(size)
if size[0] is None:
res[0] = self.shape[0]
if size[1] is None:
res[1] = self.shape[1]
return tuple(res) # type: ignore[return-value]
def _convert_to_pixel_space(self, size: tuple[FoI_t, FoI_t]) -> tuple[int, int]:
y, x = size
if isinstance(y, float):
_assert_in_range(y, 0, 1, name="y")
y = int(self.shape[0] * y)
if isinstance(x, float):
_assert_in_range(x, 0, 1, name="x")
x = int(self.shape[1] * x)
return y, x
def __delitem__(self, key: str) -> None:
del self.data[key]
def __iter__(self) -> Iterator[str]:
yield from self.data.keys()
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: str) -> xr.DataArray:
return self.data[key]
def __setitem__(self, key: str, value: NDArrayA | xr.DataArray | da.Array) -> None:
if not isinstance(value, (np.ndarray, xr.DataArray, da.Array)):
raise NotImplementedError(f"Adding `{type(value).__name__}` is not yet implemented.")
self.add_img(value, layer=key, copy=True)
def _ipython_key_completions_(self) -> Iterable[str]:
return sorted(map(str, self.data.keys()))
def __copy__(self) -> ImageContainer:
return type(self)._from_dataset(self.data, deep=False)
def __deepcopy__(self, memodict: Mapping[str, Any] = MappingProxyType({})) -> ImageContainer:
return type(self)._from_dataset(self.data, deep=True)
def _repr_html_(self) -> str:
import html
if not len(self):
return f"{self.__class__.__name__} object with 0 layers"
inflection = "" if len(self) <= 1 else "s"
s = f"{self.__class__.__name__} object with {len(self.data.keys())} layer{inflection}:"
style = "text-indent: 25px; margin-top: 0px; margin-bottom: 0px;"
for i, layer in enumerate(self.data.keys()):
s += f"<p style={style!r}><strong>{html.escape(str(layer))}</strong>: "
s += ", ".join(
f"<em>{html.escape(str(dim))}</em> ({shape})"
for dim, shape in zip(self.data[layer].dims, self.data[layer].shape)
)
s += "</p>"
if i == 9 and i < len(self) - 1: # show only first 10 layers
s += f"<p style={style!r}>and {len(self) - i - 1} more...</p>"
break
return s
def __repr__(self) -> str:
return f"{self.__class__.__name__}[shape={self.shape}, layers={sorted(self.data.keys())}]"
def __str__(self) -> str:
return repr(self)
| 37.173482
| 120
| 0.561252
| 57,865
| 0.964449
| 6,881
| 0.114687
| 57,925
| 0.965449
| 0
| 0
| 22,706
| 0.378446
|
e1cd563f597751eb051e125f9959363e2f96050c
| 397
|
py
|
Python
|
users/forms.py
|
kurosh-wss/Personal-Finance-Management
|
9c7c467b95999974492df19a0f0286809f877c87
|
[
"MIT"
] | null | null | null |
users/forms.py
|
kurosh-wss/Personal-Finance-Management
|
9c7c467b95999974492df19a0f0286809f877c87
|
[
"MIT"
] | null | null | null |
users/forms.py
|
kurosh-wss/Personal-Finance-Management
|
9c7c467b95999974492df19a0f0286809f877c87
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from crispy_bootstrap5.bootstrap5 import FloatingField
from crispy_forms.layout import Layout
from crispy_forms.helper import FormHelper
class CustomUserCreationForm(UserCreationForm):
email = forms.EmailField()
class Meta(UserCreationForm.Meta):
fields = UserCreationForm.Meta.fields + ("email",)
| 30.538462
| 58
| 0.806045
| 177
| 0.445844
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.017632
|
e1cdbef2e0091f3d12ceeeebb7a6739477ce69ea
| 1,214
|
py
|
Python
|
tutorzzz_reminder/reminder.py
|
xiebei1108/tools
|
8a3141e5d97305f4438e828c62eb7be512767aa9
|
[
"Apache-2.0"
] | null | null | null |
tutorzzz_reminder/reminder.py
|
xiebei1108/tools
|
8a3141e5d97305f4438e828c62eb7be512767aa9
|
[
"Apache-2.0"
] | null | null | null |
tutorzzz_reminder/reminder.py
|
xiebei1108/tools
|
8a3141e5d97305f4438e828c62eb7be512767aa9
|
[
"Apache-2.0"
] | null | null | null |
import json
import requests
import config
assignedIdList = list()
def __getList():
HEADERS = {
'Cookie': config.tutorzzzCookie,
'Content-Type': 'application/json'
}
res = requests.post(config.tutorzzzURL, headers = HEADERS, json = config.tutorzzzReqBody)
if res.status_code == 200:
try:
body = res.json()
except:
print("[ERROR]: tutorzzz cookie expired")
return
if body['msg'] == '操作成功':
return body['data']['data']
def __filter():
wanted = []
assignList = __getList()
if assignList == None:
return
for al in assignList:
if al['orderStatus'] == '招募中' and al['id'] not in assignedIdList:
d = {}
d['id'] = al['id']
d['title'] = al['title']
d['devPrice'] = al['devPrice']
wanted.append(d)
assignedIdList.append(d['id'])
return wanted
def remind():
wanted = __filter()
if wanted == None:
return
if len(wanted) == 0:
return '尚无招募任务'
content = '招募中任务\n'
for a in wanted:
content += a['id'] + '\t' + a['title'] + '\t' + a['devPrice'] + '\n'
return content
| 25.829787
| 93
| 0.528007
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 251
| 0.2008
|
e1ce81097515faf4dfa5b55142281e9cb5ff0a2c
| 3,315
|
py
|
Python
|
invoicing/crud/invoice_crud.py
|
dnegreira/Invoicing
|
0bc8133e989f095c10151f67482e249416274947
|
[
"MIT"
] | null | null | null |
invoicing/crud/invoice_crud.py
|
dnegreira/Invoicing
|
0bc8133e989f095c10151f67482e249416274947
|
[
"MIT"
] | null | null | null |
invoicing/crud/invoice_crud.py
|
dnegreira/Invoicing
|
0bc8133e989f095c10151f67482e249416274947
|
[
"MIT"
] | null | null | null |
from invoicing.crud.base_crud import BaseCrud
from invoicing.latex.latex_invoice import LatexInvoice
from invoicing.models.invoice_model import InvoiceModel
from invoicing.repository.invoice_repository import InvoiceRepository
from invoicing.repository.job_repository import JobRepository
from invoicing.ui.date import Date
from invoicing.ui.menu import Menu
from invoicing.ui.style import Style
from invoicing.value_validation.value_validation import Validation
class InvoiceCrud(BaseCrud):
def __init__(self):
super().__init__('Invoices', InvoiceRepository, InvoiceModel)
self.menu_actions.add_action('Generate', self.generate)
def make_paginated_menu(self):
return self.paginated_menu(
find=self.repository.find_paginated_join_clients_and_companies,
find_by_id=self.repository.find_by_id_join_clients_and_companies
)
def generate(self):
print(Style.create_title('Generate Invoice'))
invoice = self.make_paginated_menu()
if invoice:
jobRepository = JobRepository()
jobs = jobRepository.find_jobs_by_invoice_id(invoice['id'])
self.enter_billable_time(jobRepository, jobs)
jobs = jobRepository.find_jobs_by_invoice_id(invoice['id'])
invoice_data = self.make_invoice_dictionary(invoice, jobs)
LatexInvoice().generate(**invoice_data)
self.mark_invoiced_jobs_as_complete(jobRepository, jobs)
Menu.wait_for_input()
def enter_billable_time(self, jobRepository, jobs):
print(Style.create_title('Enter Billable Time'))
for job in jobs:
print('Title: %s' % job['title'])
print('Description: %s' % job['description'])
print('Estimated Time: %s' % job['estimated_time'])
print('Logged Time: %s' % job['actual_time'])
billable = ''
while not Validation.isFloat(billable):
billable = input('Billable Time: ')
jobRepository.update_billable_time(job['id'], billable)
jobRepository.save()
jobRepository.check_rows_updated('Job Updated')
def make_invoice_dictionary(self, invoice, jobs):
invoice_data = {
'reference_code': invoice['reference_code'],
'company_name': invoice['company_name'],
'company_address': invoice['company_address'],
'created_at': Date().convert_date_time_for_printing(invoice['created_at']),
'total_cost': str(sum([float(job['rate']) * float(job['billable_time']) for job in jobs])),
'jobs': [{
'title': job['title'],
'description': job['description'],
'type': 'hours',
'billable_time': str(job['billable_time']),
'staff_rate': str(job['rate']),
'cost': str(float(job['rate']) * float(job['billable_time']))
} for job in jobs]
}
return invoice_data
def mark_invoiced_jobs_as_complete(self, jobRepository, jobs):
if len(jobs):
for job in jobs:
jobRepository.update_mark_as_complete(job['id'])
jobRepository.save()
jobRepository.check_rows_updated('The selected jobs have been marked as completed')
| 44.2
| 103
| 0.650075
| 2,848
| 0.859125
| 0
| 0
| 0
| 0
| 0
| 0
| 555
| 0.167421
|
e1cfe37a6f7f6e565038ee9ac5851b8cdd75207b
| 946
|
py
|
Python
|
ds3225_client.py
|
kim-tom/dbus_server
|
b16d1b47dfe4d699ef0177592ba528ba988f17be
|
[
"MIT"
] | null | null | null |
ds3225_client.py
|
kim-tom/dbus_server
|
b16d1b47dfe4d699ef0177592ba528ba988f17be
|
[
"MIT"
] | null | null | null |
ds3225_client.py
|
kim-tom/dbus_server
|
b16d1b47dfe4d699ef0177592ba528ba988f17be
|
[
"MIT"
] | null | null | null |
from ds3225 import DS3225
import dbus
import dbus.mainloop.glib
import dbus.service
from gi.repository import GObject, GLib
UNLOCKED_DEG = 175
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
BUS_NAME = 'jp.kimura.DS3225Service'
OBJECT_PATH = '/jp/kimura/DS3225Server'
INTERFACE = 'jp.kimura.DS3225'
class DS3225Client(dbus.service.Object):
def __init__(self):
bus = dbus.SessionBus()
bus_name = dbus.service.BusName(BUS_NAME, bus)
super(DS3225Client, self).__init__(bus_name, OBJECT_PATH)
self._proxy = bus.get_object(BUS_NAME, OBJECT_PATH)
def get_pos(self):
return self._proxy.get_pos()
def set_pos(self, pos):
self._proxy.set_pos(pos)
if __name__ == '__main__':
import time
ds3225_client = DS3225Client()
while True:
ds3225_client.set_pos(UNLOCKED_DEG)
time.sleep(2)
ds3225_client.set_pos(UNLOCKED_DEG-90)
time.sleep(2)
| 27.823529
| 65
| 0.701903
| 407
| 0.430233
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.082452
|
e1d05a453d3d0e33ff80baf493eec26c3cbe59f9
| 437
|
py
|
Python
|
Extra kunskap/Kod/Farmen.py
|
abbindustrigymnasium/Programmering-1-Slutuppgift
|
679069ebb632ee59f6b4ee3035c18ae204cde145
|
[
"Apache-2.0"
] | null | null | null |
Extra kunskap/Kod/Farmen.py
|
abbindustrigymnasium/Programmering-1-Slutuppgift
|
679069ebb632ee59f6b4ee3035c18ae204cde145
|
[
"Apache-2.0"
] | null | null | null |
Extra kunskap/Kod/Farmen.py
|
abbindustrigymnasium/Programmering-1-Slutuppgift
|
679069ebb632ee59f6b4ee3035c18ae204cde145
|
[
"Apache-2.0"
] | 1
|
2020-03-09T12:04:31.000Z
|
2020-03-09T12:04:31.000Z
|
import openpyxl
wb= openpyxl.load_workbook('Farmen.xlsx')
# sheet= wb.active
# print(wb.get_sheet_names())
# Deltagar_sheet= wb.get_sheet_by_name('Deltagare')
# artists=[{"Namn":sheet.cell(row=i, column=2).value,
# "Sång":sheet.cell(row=i, column=3).value,
# "Poäng":sheet.cell(row=i, column=6).value,
# "Röst":sheet.cell(row=i, column=5).value
# } for i in range(2,sheet.max_row) ]
# print(artists)
| 33.615385
| 54
| 0.649886
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 381
| 0.865909
|
e1d12741ffdd5338667faa2590522d4debf232f5
| 3,437
|
py
|
Python
|
archive/simple_nn.py
|
petebond/MarketPlaceML
|
347ea5eab84673b846c85c58ce6c525e3f1dd0ff
|
[
"CC0-1.0"
] | null | null | null |
archive/simple_nn.py
|
petebond/MarketPlaceML
|
347ea5eab84673b846c85c58ce6c525e3f1dd0ff
|
[
"CC0-1.0"
] | null | null | null |
archive/simple_nn.py
|
petebond/MarketPlaceML
|
347ea5eab84673b846c85c58ce6c525e3f1dd0ff
|
[
"CC0-1.0"
] | null | null | null |
from os import access
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
# Create fully connected neural network
class NN(nn.Module):
def __init__(self, input_size, num_classes):
super(NN, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class CNN(nn.Module):
def __init__(self, in_channels=1, num_classes=10):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=(3,3), stride=(1,1), padding=(1,1))
self.pool = nn.MaxPool2d(kernel_size = (2, 2), stride=(2,2))
self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=(3,3), stride=(1,1), padding=(1,1))
self.fc1 = nn.Linear(16 * 7 * 7, num_classes)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.reshape(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = CNN(784, 10)
x = torch.randn(64, 784)
print(model(x).shape)
# Set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyperparameters
batch_size = 64
learning_rate = 1e-3
num_epochs = 10
input_size = 784
num_classes = 10
# Load data
train_dataset = datasets.MNIST(root='dataset/', train=True, download=True, transform=transforms.ToTensor())
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = datasets.MNIST(root='dataset/', train=False, download=True, transform=transforms.ToTensor())
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
# Initialize model
model = NN(input_size=input_size, num_classes=num_classes).to(device)
# Define loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Train model
for epoch in range(num_epochs):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
# Correct shape
data = data.reshape(data.shape[0], -1)
# Forward pass
scores = model(data)
loss = criterion(scores, target)
# Backward pass
optimizer.zero_grad()
loss.backward()
# Gradient descent step
optimizer.step()
# Check accuracy
def check_accuracy(loader, model):
if loader.dataset.train:
print('Checking accuracy on training set')
else:
print('Checking accuracy on test set')
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device)
x = x.reshape(x.shape[0], -1)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * float(num_correct) / num_samples))
model.train()
check_accuracy(train_loader, model)
check_accuracy(test_loader, model)
| 30.6875
| 112
| 0.647949
| 1,012
| 0.294443
| 0
| 0
| 0
| 0
| 0
| 0
| 345
| 0.100378
|
becbc66bb4d180935eed7f6a49ea9b7ed75ae703
| 998
|
py
|
Python
|
plot_loss.py
|
ngachago/tabular_comp
|
799a1e0dbf7a51bb04454f1f14a57f883dbd2da7
|
[
"MIT"
] | null | null | null |
plot_loss.py
|
ngachago/tabular_comp
|
799a1e0dbf7a51bb04454f1f14a57f883dbd2da7
|
[
"MIT"
] | null | null | null |
plot_loss.py
|
ngachago/tabular_comp
|
799a1e0dbf7a51bb04454f1f14a57f883dbd2da7
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
def plot_loss_mae(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='best')
plt.show()
plt.plot(history.history['mae'])
plt.plot(history.history['val_mae'])
plt.title('Model MAE')
plt.ylabel('MAE')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='best')
plt.show()
def plot_loss_accuracy(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='best')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='best')
plt.show()
| 26.972973
| 51
| 0.635271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 276
| 0.276553
|
becc2f00075040cb4b100c7b8d0736d719593862
| 18,695
|
py
|
Python
|
hotkey.py
|
RMPiria/painelContreole
|
bd07def485981456a7e7390f2b18db71740ce8da
|
[
"Unlicense"
] | null | null | null |
hotkey.py
|
RMPiria/painelContreole
|
bd07def485981456a7e7390f2b18db71740ce8da
|
[
"Unlicense"
] | null | null | null |
hotkey.py
|
RMPiria/painelContreole
|
bd07def485981456a7e7390f2b18db71740ce8da
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from os.path import dirname, join, normpath
import sys
from sys import platform
from config import config
if platform == 'darwin':
import objc
from AppKit import NSApplication, NSWorkspace, NSBeep, NSSound, NSEvent, NSKeyDown, NSKeyUp, NSFlagsChanged, NSKeyDownMask, NSFlagsChangedMask, NSShiftKeyMask, NSControlKeyMask, NSAlternateKeyMask, NSCommandKeyMask, NSNumericPadKeyMask, NSDeviceIndependentModifierFlagsMask, NSF1FunctionKey, NSF35FunctionKey, NSDeleteFunctionKey, NSClearLineFunctionKey
class HotkeyMgr:
MODIFIERMASK = NSShiftKeyMask|NSControlKeyMask|NSAlternateKeyMask|NSCommandKeyMask|NSNumericPadKeyMask
POLL = 250
# https://developer.apple.com/library/mac/documentation/Cocoa/Reference/ApplicationKit/Classes/NSEvent_Class/#//apple_ref/doc/constant_group/Function_Key_Unicodes
DISPLAY = { 0x03: u'⌅', 0x09: u'⇥', 0xd: u'↩', 0x19: u'⇤', 0x1b: u'esc', 0x20: u'⏘', 0x7f: u'⌫',
0xf700: u'↑', 0xf701: u'↓', 0xf702: u'←', 0xf703: u'→',
0xf727: u'Ins',
0xf728: u'⌦', 0xf729: u'↖', 0xf72a: u'Fn', 0xf72b: u'↘',
0xf72c: u'⇞', 0xf72d: u'⇟', 0xf72e: u'PrtScr', 0xf72f: u'ScrollLock',
0xf730: u'Pause', 0xf731: u'SysReq', 0xf732: u'Break', 0xf733: u'Reset',
0xf739: u'⌧',
}
(ACQUIRE_INACTIVE, ACQUIRE_ACTIVE, ACQUIRE_NEW) = range(3)
def __init__(self):
self.root = None
self.keycode = 0
self.modifiers = 0
self.activated = False
self.observer = None
self.acquire_key = 0
self.acquire_state = HotkeyMgr.ACQUIRE_INACTIVE
self.tkProcessKeyEvent_old = None
self.snd_good = NSSound.alloc().initWithContentsOfFile_byReference_(join(config.respath, 'snd_good.wav'), False)
self.snd_bad = NSSound.alloc().initWithContentsOfFile_byReference_(join(config.respath, 'snd_bad.wav'), False)
def register(self, root, keycode, modifiers):
self.root = root
self.keycode = keycode
self.modifiers = modifiers
self.activated = False
if keycode:
if not self.observer:
self.root.after_idle(self._observe)
self.root.after(HotkeyMgr.POLL, self._poll)
# Monkey-patch tk (tkMacOSXKeyEvent.c)
if not self.tkProcessKeyEvent_old:
sel = 'tkProcessKeyEvent:'
cls = NSApplication.sharedApplication().class__()
self.tkProcessKeyEvent_old = NSApplication.sharedApplication().methodForSelector_(sel)
newmethod = objc.selector(self.tkProcessKeyEvent, selector = self.tkProcessKeyEvent_old.selector, signature = self.tkProcessKeyEvent_old.signature)
objc.classAddMethod(cls, sel, newmethod)
# Monkey-patch tk (tkMacOSXKeyEvent.c) to:
# - workaround crash on OSX 10.9 & 10.10 on seeing a composing character
# - notice when modifier key state changes
# - keep a copy of NSEvent.charactersIgnoringModifiers, which is what we need for the hotkey
# (Would like to use a decorator but need to ensure the application is created before this is installed)
def tkProcessKeyEvent(self, cls, theEvent):
if self.acquire_state:
if theEvent.type() == NSFlagsChanged:
self.acquire_key = theEvent.modifierFlags() & NSDeviceIndependentModifierFlagsMask
self.acquire_state = HotkeyMgr.ACQUIRE_NEW
# suppress the event by not chaining the old function
return theEvent
elif theEvent.type() in (NSKeyDown, NSKeyUp):
c = theEvent.charactersIgnoringModifiers()
self.acquire_key = (c and ord(c[0]) or 0) | (theEvent.modifierFlags() & NSDeviceIndependentModifierFlagsMask)
self.acquire_state = HotkeyMgr.ACQUIRE_NEW
# suppress the event by not chaining the old function
return theEvent
# replace empty characters with charactersIgnoringModifiers to avoid crash
elif theEvent.type() in (NSKeyDown, NSKeyUp) and not theEvent.characters():
theEvent = NSEvent.keyEventWithType_location_modifierFlags_timestamp_windowNumber_context_characters_charactersIgnoringModifiers_isARepeat_keyCode_(theEvent.type(), theEvent.locationInWindow(), theEvent.modifierFlags(), theEvent.timestamp(), theEvent.windowNumber(), theEvent.context(), theEvent.charactersIgnoringModifiers(), theEvent.charactersIgnoringModifiers(), theEvent.isARepeat(), theEvent.keyCode())
return self.tkProcessKeyEvent_old(cls, theEvent)
def _observe(self):
# Must be called after root.mainloop() so that the app's message loop has been created
self.observer = NSEvent.addGlobalMonitorForEventsMatchingMask_handler_(NSKeyDownMask, self._handler)
def _poll(self):
# No way of signalling to Tkinter from within the callback handler block that doesn't
# cause Python to crash, so poll.
if self.activated:
self.activated = False
self.root.event_generate('<<Invoke>>', when="tail")
if self.keycode or self.modifiers:
self.root.after(HotkeyMgr.POLL, self._poll)
def unregister(self):
self.keycode = None
self.modifiers = None
@objc.callbackFor(NSEvent.addGlobalMonitorForEventsMatchingMask_handler_)
def _handler(self, event):
# use event.charactersIgnoringModifiers to handle composing characters like Alt-e
if (event.modifierFlags() & HotkeyMgr.MODIFIERMASK) == self.modifiers and ord(event.charactersIgnoringModifiers()[0]) == self.keycode:
if config.getint('hotkey_always'):
self.activated = True
else: # Only trigger if game client is front process
front = NSWorkspace.sharedWorkspace().frontmostApplication()
if front and front.bundleIdentifier() == 'uk.co.frontier.EliteDangerous':
self.activated = True
def acquire_start(self):
self.acquire_state = HotkeyMgr.ACQUIRE_ACTIVE
self.root.after_idle(self._acquire_poll)
def acquire_stop(self):
self.acquire_state = HotkeyMgr.ACQUIRE_INACTIVE
def _acquire_poll(self):
# No way of signalling to Tkinter from within the monkey-patched event handler that doesn't
# cause Python to crash, so poll.
if self.acquire_state:
if self.acquire_state == HotkeyMgr.ACQUIRE_NEW:
# Abuse tkEvent's keycode field to hold our acquired key & modifier
self.root.event_generate('<KeyPress>', keycode = self.acquire_key)
self.acquire_state = HotkeyMgr.ACQUIRE_ACTIVE
self.root.after(50, self._acquire_poll)
def fromevent(self, event):
# Return configuration (keycode, modifiers) or None=clear or False=retain previous
(keycode, modifiers) = (event.keycode & 0xffff, event.keycode & 0xffff0000) # Set by _acquire_poll()
if keycode and not (modifiers & (NSShiftKeyMask|NSControlKeyMask|NSAlternateKeyMask|NSCommandKeyMask)):
if keycode == 0x1b: # Esc = retain previous
self.acquire_state = HotkeyMgr.ACQUIRE_INACTIVE
return False
elif keycode in [0x7f, ord(NSDeleteFunctionKey), ord(NSClearLineFunctionKey)]: # BkSp, Del, Clear = clear hotkey
self.acquire_state = HotkeyMgr.ACQUIRE_INACTIVE
return None
elif keycode in [0x13, 0x20, 0x2d] or 0x61 <= keycode <= 0x7a: # don't allow keys needed for typing in System Map
NSBeep()
self.acquire_state = HotkeyMgr.ACQUIRE_INACTIVE
return None
return (keycode, modifiers)
def display(self, keycode, modifiers):
# Return displayable form
text = ''
if modifiers & NSControlKeyMask: text += u'⌃'
if modifiers & NSAlternateKeyMask: text += u'⌥'
if modifiers & NSShiftKeyMask: text += u'⇧'
if modifiers & NSCommandKeyMask: text += u'⌘'
if (modifiers & NSNumericPadKeyMask) and keycode <= 0x7f: text += u'№'
if not keycode:
pass
elif ord(NSF1FunctionKey) <= keycode <= ord(NSF35FunctionKey):
text += 'F%d' % (keycode + 1 - ord(NSF1FunctionKey))
elif keycode in HotkeyMgr.DISPLAY: # specials
text += HotkeyMgr.DISPLAY[keycode]
elif keycode < 0x20: # control keys
text += unichr(keycode+0x40)
elif keycode < 0xf700: # key char
text += unichr(keycode).upper()
else:
text += u'⁈'
return text
def play_good(self):
self.snd_good.play()
def play_bad(self):
self.snd_bad.play()
elif platform == 'win32':
import atexit
import ctypes
from ctypes.wintypes import *
import threading
import winsound
RegisterHotKey = ctypes.windll.user32.RegisterHotKey
UnregisterHotKey = ctypes.windll.user32.UnregisterHotKey
MOD_ALT = 0x0001
MOD_CONTROL = 0x0002
MOD_SHIFT = 0x0004
MOD_WIN = 0x0008
MOD_NOREPEAT = 0x4000
GetMessage = ctypes.windll.user32.GetMessageW
TranslateMessage = ctypes.windll.user32.TranslateMessage
DispatchMessage = ctypes.windll.user32.DispatchMessageW
PostThreadMessage = ctypes.windll.user32.PostThreadMessageW
WM_QUIT = 0x0012
WM_HOTKEY = 0x0312
WM_APP = 0x8000
WM_SND_GOOD = WM_APP + 1
WM_SND_BAD = WM_APP + 2
GetKeyState = ctypes.windll.user32.GetKeyState
MapVirtualKey = ctypes.windll.user32.MapVirtualKeyW
VK_BACK = 0x08
VK_CLEAR = 0x0c
VK_RETURN = 0x0d
VK_SHIFT = 0x10
VK_CONTROL = 0x11
VK_MENU = 0x12
VK_CAPITAL = 0x14
VK_MODECHANGE= 0x1f
VK_ESCAPE = 0x1b
VK_SPACE = 0x20
VK_DELETE = 0x2e
VK_LWIN = 0x5b
VK_RWIN = 0x5c
VK_NUMPAD0 = 0x60
VK_DIVIDE = 0x6f
VK_F1 = 0x70
VK_F24 = 0x87
VK_OEM_MINUS = 0xbd
VK_NUMLOCK = 0x90
VK_SCROLL = 0x91
VK_PROCESSKEY= 0xe5
VK_OEM_CLEAR = 0xfe
GetForegroundWindow = ctypes.windll.user32.GetForegroundWindow
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowText.argtypes = [HWND, LPWSTR, ctypes.c_int]
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
def WindowTitle(h):
if h:
l = GetWindowTextLength(h) + 1
buf = ctypes.create_unicode_buffer(l)
if GetWindowText(h, buf, l):
return buf.value
return ''
class MOUSEINPUT(ctypes.Structure):
_fields_ = [('dx', LONG), ('dy', LONG), ('mouseData', DWORD), ('dwFlags', DWORD), ('time', DWORD), ('dwExtraInfo', ctypes.POINTER(ULONG))]
class KEYBDINPUT(ctypes.Structure):
_fields_ = [('wVk', WORD), ('wScan', WORD), ('dwFlags', DWORD), ('time', DWORD), ('dwExtraInfo', ctypes.POINTER(ULONG))]
class HARDWAREINPUT(ctypes.Structure):
_fields_ = [('uMsg', DWORD), ('wParamL', WORD), ('wParamH', WORD)]
class INPUT_union(ctypes.Union):
_fields_ = [('mi', MOUSEINPUT), ('ki', KEYBDINPUT), ('hi', HARDWAREINPUT)]
class INPUT(ctypes.Structure):
_fields_ = [('type', DWORD), ('union', INPUT_union)]
SendInput = ctypes.windll.user32.SendInput
SendInput.argtypes = [ctypes.c_uint, ctypes.POINTER(INPUT), ctypes.c_int]
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
class HotkeyMgr:
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731%28v=vs.85%29.aspx
# Limit ourselves to symbols in Windows 7 Segoe UI
DISPLAY = { 0x03: 'Break', 0x08: 'Bksp', 0x09: u'↹', 0x0c: 'Clear', 0x0d: u'↵', 0x13: 'Pause',
0x14: u'Ⓐ', 0x1b: 'Esc',
0x20: u'⏘', 0x21: 'PgUp', 0x22: 'PgDn', 0x23: 'End', 0x24: 'Home',
0x25: u'←', 0x26: u'↑', 0x27: u'→', 0x28: u'↓',
0x2c: 'PrtScn', 0x2d: 'Ins', 0x2e: 'Del', 0x2f: 'Help',
0x5d: u'▤', 0x5f: u'☾',
0x90: u'➀', 0x91: 'ScrLk',
0xa6: u'⇦', 0xa7: u'⇨', 0xa9: u'⊗', 0xab: u'☆', 0xac: u'⌂', 0xb4: u'✉',
}
def __init__(self):
self.root = None
self.thread = None
self.snd_good = open(join(config.respath, 'snd_good.wav'), 'rb').read()
self.snd_bad = open(join(config.respath, 'snd_bad.wav'), 'rb').read()
atexit.register(self.unregister)
def register(self, root, keycode, modifiers):
self.root = root
if self.thread:
self.unregister()
if keycode or modifiers:
self.thread = threading.Thread(target = self.worker, name = 'Hotkey "%x:%x"' % (keycode,modifiers), args = (keycode,modifiers))
self.thread.daemon = True
self.thread.start()
def unregister(self):
thread = self.thread
if thread:
self.thread = None
PostThreadMessage(thread.ident, WM_QUIT, 0, 0)
thread.join() # Wait for it to unregister hotkey and quit
def worker(self, keycode, modifiers):
# Hotkey must be registered by the thread that handles it
if not RegisterHotKey(None, 1, modifiers|MOD_NOREPEAT, keycode):
self.thread = None
return
fake = INPUT(INPUT_KEYBOARD, INPUT_union(ki = KEYBDINPUT(keycode, keycode, 0, 0, None)))
msg = MSG()
while GetMessage(ctypes.byref(msg), None, 0, 0) != 0:
if msg.message == WM_HOTKEY:
if config.getint('hotkey_always') or WindowTitle(GetForegroundWindow()).startswith('Elite - Dangerous'):
self.root.event_generate('<<Invoke>>', when="tail")
else:
# Pass the key on
UnregisterHotKey(None, 1)
SendInput(1, fake, ctypes.sizeof(INPUT))
if not RegisterHotKey(None, 1, modifiers|MOD_NOREPEAT, keycode):
break
elif msg.message == WM_SND_GOOD:
winsound.PlaySound(self.snd_good, winsound.SND_MEMORY) # synchronous
elif msg.message == WM_SND_BAD:
winsound.PlaySound(self.snd_bad, winsound.SND_MEMORY) # synchronous
else:
TranslateMessage(ctypes.byref(msg))
DispatchMessage(ctypes.byref(msg))
UnregisterHotKey(None, 1)
self.thread = None
def acquire_start(self):
pass
def acquire_stop(self):
pass
def fromevent(self, event):
# event.state is a pain - it shows the state of the modifiers *before* a modifier key was pressed.
# event.state *does* differentiate between left and right Ctrl and Alt and between Return and Enter
# by putting KF_EXTENDED in bit 18, but RegisterHotKey doesn't differentiate.
modifiers = ((GetKeyState(VK_MENU) & 0x8000) and MOD_ALT) | ((GetKeyState(VK_CONTROL) & 0x8000) and MOD_CONTROL) | ((GetKeyState(VK_SHIFT) & 0x8000) and MOD_SHIFT) | ((GetKeyState(VK_LWIN) & 0x8000) and MOD_WIN) | ((GetKeyState(VK_RWIN) & 0x8000) and MOD_WIN)
keycode = event.keycode
if keycode in [ VK_SHIFT, VK_CONTROL, VK_MENU, VK_LWIN, VK_RWIN ]:
return (0, modifiers)
if not modifiers:
if keycode == VK_ESCAPE: # Esc = retain previous
return False
elif keycode in [ VK_BACK, VK_DELETE, VK_CLEAR, VK_OEM_CLEAR ]: # BkSp, Del, Clear = clear hotkey
return None
elif keycode in [ VK_RETURN, VK_SPACE, VK_OEM_MINUS] or ord('A') <= keycode <= ord('Z'): # don't allow keys needed for typing in System Map
winsound.MessageBeep()
return None
elif keycode in [ VK_NUMLOCK, VK_SCROLL, VK_PROCESSKEY ] or VK_CAPITAL <= keycode <= VK_MODECHANGE: # ignore unmodified mode switch keys
return (0, modifiers)
# See if the keycode is usable and available
if RegisterHotKey(None, 2, modifiers|MOD_NOREPEAT, keycode):
UnregisterHotKey(None, 2)
return (keycode, modifiers)
else:
winsound.MessageBeep()
return None
def display(self, keycode, modifiers):
text = ''
if modifiers & MOD_WIN: text += u'❖+'
if modifiers & MOD_CONTROL: text += u'Ctrl+'
if modifiers & MOD_ALT: text += u'Alt+'
if modifiers & MOD_SHIFT: text += u'⇧+'
if VK_NUMPAD0 <= keycode <= VK_DIVIDE: text += u'№'
if not keycode:
pass
elif VK_F1 <= keycode <= VK_F24:
text += 'F%d' % (keycode + 1 - VK_F1)
elif keycode in HotkeyMgr.DISPLAY: # specials
text += HotkeyMgr.DISPLAY[keycode]
else:
c = MapVirtualKey(keycode, 2) # printable ?
if not c: # oops not printable
text += u'⁈'
elif c < 0x20: # control keys
text += unichr(c+0x40)
else:
text += unichr(c).upper()
return text
def play_good(self):
if self.thread:
PostThreadMessage(self.thread.ident, WM_SND_GOOD, 0, 0)
def play_bad(self):
if self.thread:
PostThreadMessage(self.thread.ident, WM_SND_BAD, 0, 0)
else: # Linux
class HotkeyMgr:
def register(self, root, keycode, modifiers):
pass
def unregister(self):
pass
def play_good(self):
pass
def play_bad(self):
pass
# singleton
hotkeymgr = HotkeyMgr()
| 44.300948
| 424
| 0.589462
| 16,038
| 0.853948
| 0
| 0
| 732
| 0.038976
| 0
| 0
| 3,193
| 0.170012
|
becc66adc74c550995995f2d2b08dfaa9d6845d1
| 3,530
|
py
|
Python
|
Chapter10/ch10_r1_grover_aqua.py
|
georgekorpas/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience
|
938123d051c5bab72110011b3a05e515bb69ca09
|
[
"MIT"
] | 24
|
2020-11-21T20:33:51.000Z
|
2022-03-26T06:41:27.000Z
|
Chapter10/ch10_r1_grover_aqua.py
|
videomover/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience
|
938123d051c5bab72110011b3a05e515bb69ca09
|
[
"MIT"
] | 2
|
2021-02-07T14:32:12.000Z
|
2022-03-25T07:23:46.000Z
|
Chapter10/ch10_r1_grover_aqua.py
|
videomover/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience
|
938123d051c5bab72110011b3a05e515bb69ca09
|
[
"MIT"
] | 16
|
2020-11-03T07:49:11.000Z
|
2022-03-26T06:41:29.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created Nov 2020
@author: hassi
"""
from qiskit import Aer, IBMQ
# Do the necessary import for our program
from qiskit.aqua.algorithms import Grover
from qiskit.aqua.components.oracles import LogicalExpressionOracle, TruthTableOracle
# Import basic plot tools
from qiskit.tools.visualization import plot_histogram
from IPython.core.display import display
global oracle_method, oracle_type
def log_length(oracle_input,oracle_method):
from math import sqrt, pow, pi, log
if oracle_method=="log":
filtered = [c.lower() for c in oracle_input if c.isalpha()]
result = len(filtered)
num_iterations=int(pi/4*(sqrt(pow(2,result))))
else:
num_iterations = int(pi/4*(sqrt(pow(2,log(len(oracle_input),2)))))
print("Iterations: ", num_iterations)
return num_iterations
def create_oracle(oracle_method):
oracle_text={"log":"~A & ~B & C","bit":"00001000"}
# set the input
global num_iterations
print("Enter the oracle input string, such as:"+oracle_text[oracle_method]+"\nor enter 'def' for a default string.")
oracle_input=input('\nOracle input:\n ')
if oracle_input=="def":
oracle_type=oracle_text[oracle_method]
else:
oracle_type = oracle_input
num_iterations=log_length(oracle_type, oracle_method)
return(oracle_type)
def create_grover(oracle_type, oracle_method):
# Build the circuit
if oracle_method=="log":
algorithm = Grover(LogicalExpressionOracle(oracle_type),num_iterations=num_iterations)
oracle_circuit = Grover(LogicalExpressionOracle(oracle_type)).construct_circuit()
else:
algorithm = Grover(TruthTableOracle(oracle_type),num_iterations=num_iterations)
oracle_circuit = Grover(TruthTableOracle(oracle_type)).construct_circuit()
display(oracle_circuit.draw(output="mpl"))
display(algorithm)
return(algorithm)
def run_grover(algorithm,oracle_type,oracle_method):
# Run the algorithm on a simulator, printing the most frequently occurring result
backend = Aer.get_backend('qasm_simulator')
result = algorithm.run(backend)
print("Oracle method:",oracle_method)
print("Oracle for:", oracle_type)
print("Aer Result:",result['top_measurement'])
display(plot_histogram(result['measurement']))
# Run the algorithm on an IBM Q backend, printing the most frequently occurring result
print("Getting provider...")
if not IBMQ.active_account():
IBMQ.load_account()
provider = IBMQ.get_provider()
from qiskit.providers.ibmq import least_busy
filtered_backend = least_busy(provider.backends(n_qubits=5, operational=True, simulator=False))
result = algorithm.run(filtered_backend)
print("Oracle method:",oracle_method)
print("Oracle for:", oracle_type)
print("IBMQ "+filtered_backend.name()+" Result:",result['top_measurement'])
display(plot_histogram(result['measurement']))
print(result)
# Main loop
def main():
oracle_method="log"
while oracle_method!=0:
print("Ch 11: Grover search with Aqua")
print("------------------------------")
# set the oracle method: "Log" for logical expression or "Bit" for bit string.
oracle_method = input("Select oracle method (log or bit):\n")
type=create_oracle(oracle_method)
algorithm=create_grover(type, oracle_method)
run_grover(algorithm,type, oracle_method)
if __name__ == '__main__':
main()
| 34.950495
| 120
| 0.703116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 912
| 0.258357
|
becc7d9f66f89922de66fb46f1bec24640debbb3
| 1,428
|
py
|
Python
|
setup_pre-commit.py
|
fierte-product-development/Setups
|
d6151888ecc82a94e894b5d926c85eb193a5b97c
|
[
"MIT"
] | 1
|
2021-07-06T09:26:48.000Z
|
2021-07-06T09:26:48.000Z
|
setup_pre-commit.py
|
fierte-product-development/Setups
|
d6151888ecc82a94e894b5d926c85eb193a5b97c
|
[
"MIT"
] | 3
|
2022-01-26T05:29:04.000Z
|
2022-02-16T10:16:02.000Z
|
setup_pre-commit.py
|
fierte-product-development/Setups
|
d6151888ecc82a94e894b5d926c85eb193a5b97c
|
[
"MIT"
] | 1
|
2020-11-11T01:23:01.000Z
|
2020-11-11T01:23:01.000Z
|
import sys
import os
import subprocess
from subprocess import PIPE, STDOUT
from pathlib import Path
proc_arg = {
'shell': True,
'stdout': PIPE,
'text': True
}
def _run(*cmd):
proc = subprocess.run(cmd, **proc_arg, stderr=PIPE)
return proc.stdout.replace('\n', '')
def _popen(*cmd):
print(f'$ {" ".join(cmd)}')
proc = subprocess.Popen(cmd, **proc_arg, stderr=STDOUT)
while True:
if line := proc.stdout.readline():
print(line.replace('\n', ''))
elif poll := proc.poll() is not None:
print()
return poll
def _conf_exit(code, rm=False):
input('\n処理を終了します。メッセージを確認してEnterを押してください。')
if rm: os.remove(__file__)
sys.exit(code)
def pip_install(*names):
_popen('python', '-m', 'pip', 'install', '--upgrade', 'pip')
for name in names:
_popen('pip', 'install', name)
def exists_in_cd(tgt):
cd = Path(_run('cd'))
if not (cd/tgt).exists():
print(f'{tgt} が存在しません。カレントディレクトリを確認してください。')
_conf_exit(1)
def install_pre_commit():
print('.git/hooks に commit-msg ファイルを作成します。')
_popen('pre-commit', 'install', '-t', 'commit-msg')
def main():
print('pre-commitのセットアップを行います…\n')
pip_install('pre-commit')
exists_in_cd('.git')
install_pre_commit()
_conf_exit(0, True)
if __name__ == "__main__":
main()
| 21.969231
| 65
| 0.579832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 478
| 0.300629
|
bece2e0f8b2de4615e59523ec76f70e91828684b
| 889
|
py
|
Python
|
5-buscar.py
|
gustavodp22/TP_DE_-CRUD-create-read-update-delete-
|
8edfba57cbbad37f8fb1af2d42d1d601301e8dd6
|
[
"MIT"
] | null | null | null |
5-buscar.py
|
gustavodp22/TP_DE_-CRUD-create-read-update-delete-
|
8edfba57cbbad37f8fb1af2d42d1d601301e8dd6
|
[
"MIT"
] | null | null | null |
5-buscar.py
|
gustavodp22/TP_DE_-CRUD-create-read-update-delete-
|
8edfba57cbbad37f8fb1af2d42d1d601301e8dd6
|
[
"MIT"
] | null | null | null |
"""
CRUD de SQLite3 con Python 3
"""
import sqlite3
try:
bd = sqlite3.connect("libros.db")
cursor = bd.cursor()
busqueda = input("Escribe tu búsqueda: ")
if not busqueda:
print("Búsqueda inválida")
exit()
sentencia = "SELECT * FROM libros WHERE titulo LIKE ?;"
cursor.execute(sentencia, [ "%{}%".format(busqueda) ])
libros = cursor.fetchall()
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+".format("", "", "", ""))
print("|{:^20}|{:^20}|{:^10}|{:^50}|".format("Autor", "Género", "Precio", "Título"))
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+".format("", "", "", ""))
for autor, genero, precio, titulo in libros:
print("|{:^20}|{:^20}|{:^10}|{:^50}|".format(autor, genero, precio, titulo))
print("+{:-<20}+{:-<20}+{:-<10}+{:-<50}+".format("", "", "", ""))
except sqlite3.OperationalError as error:
print("Error al abrir:", error)
| 27.78125
| 86
| 0.533183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 386
| 0.431767
|
becf153d29dc0bf9abcbf500a0d578ce48e9150a
| 4,686
|
py
|
Python
|
tests/advanced_tests/regressors.py
|
amlanbanerjee/auto_ml
|
db8e1d2cfa93f13a21e55739acfc8d99837e91b0
|
[
"MIT"
] | 1,671
|
2016-08-09T04:44:48.000Z
|
2022-03-27T01:29:23.000Z
|
tests/advanced_tests/regressors.py
|
amlanbanerjee/auto_ml
|
db8e1d2cfa93f13a21e55739acfc8d99837e91b0
|
[
"MIT"
] | 428
|
2016-08-08T00:13:04.000Z
|
2022-01-19T10:09:05.000Z
|
tests/advanced_tests/regressors.py
|
amlanbanerjee/auto_ml
|
db8e1d2cfa93f13a21e55739acfc8d99837e91b0
|
[
"MIT"
] | 334
|
2016-08-29T12:34:18.000Z
|
2022-01-31T09:14:30.000Z
|
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
from sklearn.model_selection import train_test_split
import utils_testing as utils
def optimize_final_model_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
# We just want to make sure these run, not necessarily make sure that they're super accurate (which takes more time, and is dataset dependent)
df_boston_train = df_boston_train.sample(frac=0.5)
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, optimize_final_model=True, model_names=model_name)
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
# the random seed gets a score of -3.21 on python 3.5
# There's a ton of noise here, due to small sample sizes
lower_bound = -3.4
if model_name == 'DeepLearningRegressor':
lower_bound = -24
if model_name == 'LGBMRegressor':
lower_bound = -16
if model_name == 'GradientBoostingRegressor':
lower_bound = -5.1
if model_name == 'CatBoostRegressor':
lower_bound = -4.5
if model_name == 'XGBRegressor':
lower_bound = -4.8
assert lower_bound < test_score < -2.75
def getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('predictions[0]')
print(predictions[0])
print('type(predictions)')
print(type(predictions))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -2.9
if model_name == 'DeepLearningRegressor':
lower_bound = -7.8
if model_name == 'LGBMRegressor':
lower_bound = -4.95
if model_name == 'XGBRegressor':
lower_bound = -3.4
if model_name == 'CatBoostRegressor':
lower_bound = -3.7
assert lower_bound < first_score < -2.7
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.1 < duration.total_seconds() / 1.0 < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.7
| 31.877551
| 146
| 0.712761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,369
| 0.292147
|
bed0237d9ebc522d5a4384033d2b57c729cc7ede
| 39
|
py
|
Python
|
__init__.py
|
amueller/information-theoretic-mst
|
178fd4396bc9a9a499ec3d18d5047b320a5c32f2
|
[
"Unlicense"
] | 20
|
2016-05-03T13:29:09.000Z
|
2021-10-06T20:41:36.000Z
|
__init__.py
|
amueller/information-theoretic-mst
|
178fd4396bc9a9a499ec3d18d5047b320a5c32f2
|
[
"Unlicense"
] | 1
|
2018-04-21T15:32:07.000Z
|
2020-05-19T00:28:52.000Z
|
__init__.py
|
amueller/information-theoretic-mst
|
178fd4396bc9a9a499ec3d18d5047b320a5c32f2
|
[
"Unlicense"
] | 5
|
2015-04-21T00:27:49.000Z
|
2019-02-23T20:46:33.000Z
|
from itm import ITM
__all__ = ['ITM']
| 9.75
| 19
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.128205
|
bed3d451e4686403bc8880395163b3b1b1569208
| 2,827
|
py
|
Python
|
scripts/left_right_hemisphere_data/hemisphere_asymmetry.py
|
mwinding/connectome_analysis
|
dbc747290891805863c9481921d8080dc2043d21
|
[
"MIT"
] | 1
|
2021-06-10T05:48:16.000Z
|
2021-06-10T05:48:16.000Z
|
left_right_hemisphere_data/hemisphere_asymmetry.py
|
mwinding/connectome_tools
|
0392f6b1e924194299ea7760d8386eb01f3371a3
|
[
"MIT"
] | 2
|
2022-01-21T11:48:45.000Z
|
2022-01-21T11:48:45.000Z
|
scripts/left_right_hemisphere_data/hemisphere_asymmetry.py
|
mwinding/connectome_analysis
|
dbc747290891805863c9481921d8080dc2043d21
|
[
"MIT"
] | 1
|
2022-02-02T15:39:52.000Z
|
2022-02-02T15:39:52.000Z
|
import pandas as pd
import numpy as np
import csv
# import synapses divided across hemispheres
hemisphere_data = pd.read_csv('left_right_hemisphere_data/brain_hemisphere_membership.csv', header = 0)
#print(hemisphere_data)
# import pair list CSV, manually generated
#pairs = pd.read_csv('data/bp-pairs-2020-01-28.csv', header = 0)
# import skids of neurons that cross commissure
commissure_neurons = pd.read_json('left_right_hemisphere_data/cross_commissure-2020-3-2.json')['skeleton_id'].values
#print(type(commissure_neurons[0]))
#print(type(hemisphere_data['skeleton'][0]))
ipsi_neurons = np.setdiff1d(hemisphere_data['skeleton'], commissure_neurons)
ipsi_neurons_bool = pd.Series(hemisphere_data['skeleton'].values).isin(ipsi_neurons)
contra_neurons_bool = ~pd.Series(hemisphere_data['skeleton'].values).isin(ipsi_neurons)
print("IPSI")
print("Postsynaptic Sites")
print(sum(hemisphere_data[ipsi_neurons_bool]['n_inputs_left'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_inputs_right'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_inputs_left'].values)/sum(hemisphere_data[ipsi_neurons_bool]['n_inputs_right'].values))
print("")
print("Presynaptic Sites")
print(sum(hemisphere_data[ipsi_neurons_bool]['n_outputs_left'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_outputs_right'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_outputs_left'].values)/sum(hemisphere_data[ipsi_neurons_bool]['n_outputs_right'].values))
print("")
print("Treenodes")
print(sum(hemisphere_data[ipsi_neurons_bool]['n_treenodes_left'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_treenodes_right'].values))
print(sum(hemisphere_data[ipsi_neurons_bool]['n_treenodes_left'].values)/sum(hemisphere_data[ipsi_neurons_bool]['n_treenodes_right'].values))
print("")
print("")
print("")
print("CONTRA")
print("Postsynaptic Sites")
print(sum(hemisphere_data[contra_neurons_bool]['n_inputs_left'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_inputs_right'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_inputs_left'].values)/sum(hemisphere_data[contra_neurons_bool]['n_inputs_right'].values))
print("")
print("Presynaptic Sites")
print(sum(hemisphere_data[contra_neurons_bool]['n_outputs_left'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_outputs_right'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_outputs_left'].values)/sum(hemisphere_data[contra_neurons_bool]['n_outputs_right'].values))
print("")
print("Treenodes")
print(sum(hemisphere_data[contra_neurons_bool]['n_treenodes_left'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_treenodes_right'].values))
print(sum(hemisphere_data[contra_neurons_bool]['n_treenodes_left'].values)/sum(hemisphere_data[contra_neurons_bool]['n_treenodes_right'].values))
print("")
| 45.596774
| 145
| 0.812168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 995
| 0.351963
|
bed4073b9e79a28ea38d1cc06f6e14cb5d4efcb7
| 777
|
py
|
Python
|
__determineTripplesSumToZeroFromList.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
__determineTripplesSumToZeroFromList.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
__determineTripplesSumToZeroFromList.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
'''
Python program to determine which triples sum to zero from a given list of lists.
Input: [[1343532, -2920635, 332], [-27, 18, 9], [4, 0, -4], [2, 2, 2], [-20, 16, 4]]
Output:
[False, True, True, False, True]
Input: [[1, 2, -3], [-4, 0, 4], [0, 1, -5], [1, 1, 1], [-2, 4, -1]]
Output:
[True, True, False, False, False]
'''
#License: https://bit.ly/3oLErEI
def test(nums):
return [sum(t)==0 for t in nums]
nums = [[1343532, -2920635, 332], [-27, 18, 9], [4, 0, -4], [2, 2, 2], [-20, 16, 4]]
print("Original list of lists:",nums)
print("Determine which triples sum to zero:")
print(test(nums))
nums = [[1, 2, -3], [-4, 0, 4], [0, 1, -5], [1, 1, 1], [-2, 4, -1]]
print("\nOriginal list of lists:",nums)
print("Determine which triples sum to zero:")
print(test(nums))
| 32.375
| 84
| 0.574003
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 485
| 0.624196
|
bed6c276e3757d89c0d4a20b188e77bced930a94
| 701
|
py
|
Python
|
fastNLP/modules/encoder/lstm.py
|
h00Jiang/fastNLP
|
79ddb469d81946c87a3d066122a8a3aba6e40f3a
|
[
"Apache-2.0"
] | null | null | null |
fastNLP/modules/encoder/lstm.py
|
h00Jiang/fastNLP
|
79ddb469d81946c87a3d066122a8a3aba6e40f3a
|
[
"Apache-2.0"
] | null | null | null |
fastNLP/modules/encoder/lstm.py
|
h00Jiang/fastNLP
|
79ddb469d81946c87a3d066122a8a3aba6e40f3a
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
class Lstm(nn.Module):
"""
LSTM module
Args:
input_size : input size
hidden_size : hidden size
num_layers : number of hidden layers. Default: 1
dropout : dropout rate. Default: 0.5
bidirectional : If True, becomes a bidirectional RNN. Default: False.
"""
def __init__(self, input_size, hidden_size=100, num_layers=1, dropout=0, bidirectional=False):
super(Lstm, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=True,
dropout=dropout, bidirectional=bidirectional)
def forward(self, x):
x, _ = self.lstm(x)
return x
| 29.208333
| 98
| 0.636234
| 676
| 0.964337
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.376605
|
bed723c002fdd1ab37526c62f785025bbbd1fbd1
| 838
|
py
|
Python
|
geoLocApp/signals.py
|
KKWaxy/geoLoc
|
23e33b9fd7cb3b1031bd11475612dcc324680975
|
[
"Apache-2.0"
] | null | null | null |
geoLocApp/signals.py
|
KKWaxy/geoLoc
|
23e33b9fd7cb3b1031bd11475612dcc324680975
|
[
"Apache-2.0"
] | null | null | null |
geoLocApp/signals.py
|
KKWaxy/geoLoc
|
23e33b9fd7cb3b1031bd11475612dcc324680975
|
[
"Apache-2.0"
] | null | null | null |
from django.db.models.signals import pre_save,post_save
from django.dispatch import receiver
import geoLocApp.models
import geoLocApp.distance
# @receiver(post_save,sender=geoLocApp.models.Position,dispatch_uid="only_before_registered")
# def setDistance(sender, **kwargs):
# position = kwargs["instance"]
# coordonnees = position.coordonnees.all()
# print(coordonnees)
# for coordonnee in coordonnees:
# coordonnee.distance = geoLocApp.distance.distance(coordonnee.latitude,position.latitude,coordonnee.longitude,position.longitude)
# print(coordonnee.distance)
# @receiver(post_save,sender=geoLocApp.models.Position,dispatch_uid="new_position_added")
# def new_position(sender,**kwargs):
# if kwargs['created']==True:
# return ['intance']
# else:
# return 0
| 39.904762
| 139
| 0.72673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 672
| 0.801909
|
bed7a7f211ac4ca2170057d5dae27d3248efc33a
| 2,198
|
py
|
Python
|
src/main/python/shabda/data/iterators/internal/data_iterator_base.py
|
dhiraa/sabdha
|
f428418962dcc76f49e0a451ffc0545fda9b6b59
|
[
"Apache-2.0"
] | 4
|
2018-10-26T07:00:34.000Z
|
2020-10-07T01:03:08.000Z
|
src/main/python/shabda/data/iterators/internal/data_iterator_base.py
|
dhiraa/sabdha
|
f428418962dcc76f49e0a451ffc0545fda9b6b59
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/shabda/data/iterators/internal/data_iterator_base.py
|
dhiraa/sabdha
|
f428418962dcc76f49e0a451ffc0545fda9b6b59
|
[
"Apache-2.0"
] | 1
|
2018-10-26T07:00:38.000Z
|
2018-10-26T07:00:38.000Z
|
# Copyright 2018 The Shabda Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Iterator that creates features for LSTM based models
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from shabda.hyperparams.hyperparams import HParams
class DataIteratorBase():
"""
"""
def __init__(self, hparams, dataset):
self._hparams = HParams(hparams, default_hparams=self.get_default_params())
self._dataset = dataset
@staticmethod
def get_default_params():
return {"key": "value"}
def _get_train_input_fn(self):
"""
Inheriting class must implement this
:return: callable
"""
raise NotImplementedError
def _get_val_input_fn(self):
"""
Inheriting class must implement this
:return: callable
"""
raise NotImplementedError
def _get_test_input_function(self):
"""
Inheriting class must implement this
:return: callable
"""
raise NotImplementedError
def get_train_input_fn(self):
"""
Returns an data set iterator function that can be used in estimator
:return:
"""
return self._get_train_input_fn()
def get_val_input_fn(self):
"""
Returns an data set iterator function that can be used in estimator
:return:
"""
return self._get_val_input_fn()
def get_test_input_function(self):
"""
Returns an data set iterator function that can be used in estimator
:return:
"""
return self._get_test_input_function()
| 28.179487
| 83
| 0.670155
| 1,368
| 0.622384
| 0
| 0
| 75
| 0.034122
| 0
| 0
| 1,258
| 0.572338
|
bed7c4898d58c738f63aa212ff888514735a6694
| 441
|
py
|
Python
|
app/filters.py
|
dakhnovskaya/organization
|
72fb6f0a2daf03e7f7ce4dfb2cb3c3eaf1c40851
|
[
"Apache-2.0"
] | null | null | null |
app/filters.py
|
dakhnovskaya/organization
|
72fb6f0a2daf03e7f7ce4dfb2cb3c3eaf1c40851
|
[
"Apache-2.0"
] | null | null | null |
app/filters.py
|
dakhnovskaya/organization
|
72fb6f0a2daf03e7f7ce4dfb2cb3c3eaf1c40851
|
[
"Apache-2.0"
] | null | null | null |
import django_filters
from app.models import Company
class CompanyFilter(django_filters.FilterSet):
min_cost = django_filters.NumberFilter(field_name='companyproduct__cost', lookup_expr='gte')
max_cost = django_filters.NumberFilter(field_name='companyproduct__cost', lookup_expr='lte')
class Meta:
model = Company
fields = ('districts', 'products__category', 'name', 'products__name', 'min_cost', 'max_cost')
| 33.923077
| 102
| 0.750567
| 384
| 0.870748
| 0
| 0
| 0
| 0
| 0
| 0
| 127
| 0.287982
|
bed83cf2a356ae3011eb19bade5063e46f89b28c
| 6,598
|
py
|
Python
|
reaction/rpc/rabbitmq.py
|
Inkln/reaction
|
7a57bd642ac3db15012717130a5f2655c3b7b177
|
[
"Apache-2.0"
] | 73
|
2019-10-01T15:59:57.000Z
|
2021-06-29T11:59:16.000Z
|
reaction/rpc/rabbitmq.py
|
Inkln/reaction
|
7a57bd642ac3db15012717130a5f2655c3b7b177
|
[
"Apache-2.0"
] | 2
|
2020-06-25T10:26:38.000Z
|
2022-02-21T06:20:47.000Z
|
reaction/rpc/rabbitmq.py
|
Inkln/reaction
|
7a57bd642ac3db15012717130a5f2655c3b7b177
|
[
"Apache-2.0"
] | 7
|
2019-10-08T05:46:22.000Z
|
2020-07-27T12:58:18.000Z
|
from typing import List
import asyncio
import inspect
import logging
import uuid
import aio_pika
import aio_pika.exceptions
from .base import BaseRPC
from .common import RPCError, RPCHandler, RPCRequest, RPCResponse
class RPC(BaseRPC):
HEARTBEAT_INTERVAL = 300
def __init__(
self,
url: str = None,
name: str = None,
handler: RPCHandler = None,
timeout: float = None,
pool_size: int = 0,
batch_size: int = 0,
wait_for_batch: bool = False,
max_jobs: int = 0,
loop: asyncio.AbstractEventLoop = None,
):
self._loop = loop
self._url = url or self.URL
self._name = name
self._handler = handler
self._timeout = timeout
self._pool_size = pool_size
self._batch_size = batch_size
self._wait_for_batch = wait_for_batch
self._max_jobs = max_jobs
self._mconn: aio_pika.RobustConnection = None
self._mch: aio_pika.RobustChannel = None
self._mq: aio_pika.RobustQueue = None
self._queue = asyncio.Queue(loop=loop)
self._pool = []
self._consuming = False
async def _run_pool(self):
self._pool = [self._run_worker() for _ in range(self._pool_size)]
self._consuming = True
await asyncio.gather(*self._pool, loop=self._loop)
self._pool = []
async def _run_worker(self):
bs = self._batch_size
q = self._queue
while self._consuming:
batch = [await q.get()]
if self._wait_for_batch and bs > 0:
while len(batch) < bs:
batch.append(await q.get())
else:
while (bs <= 0 or len(batch) < bs) and not q.empty():
batch.append(q.get_nowait())
await asyncio.wait_for(
asyncio.ensure_future(
self._process_batch(batch), loop=self._loop,
),
self._timeout,
loop=self._loop,
)
async def _process_single(self, message: aio_pika.IncomingMessage):
return await asyncio.wait_for(
asyncio.ensure_future(
self._process_batch([message]), loop=self._loop,
),
self._timeout,
loop=self._loop,
)
async def _process_batch(self, messages: List[aio_pika.IncomingMessage]):
try:
reqs = []
for m in messages:
# logging.debug(f"message: correlation_id={m.correlation_id}")
req: RPCRequest = self.decode_request(m.body)
reqs.append(req)
# logging.debug(f"handler: {self._handler}")
results = self._handler(*reqs)
if inspect.isawaitable(results):
results = await results
except KeyboardInterrupt:
self._consuming = False
for m in messages:
await m.reject(requeue=True)
return
except Exception as e:
if len(messages) == 1:
results = [RPCError()]
logging.exception(e)
await messages[0].reject()
else:
for m in messages:
await asyncio.wait_for(
asyncio.ensure_future(
self._process_batch([m]), loop=self._loop,
),
self._timeout,
loop=self._loop,
)
return
for message, result in zip(messages, results):
result = aio_pika.Message(
self.encode_response(result),
correlation_id=message.correlation_id,
delivery_mode=message.delivery_mode,
)
await self._mch.default_exchange.publish(
result, routing_key=message.reply_to, mandatory=False,
)
if not message.processed:
await message.ack()
async def consume(self):
while True:
try:
self._mconn = await aio_pika.connect_robust(
self._url,
loop=self._loop,
heartbeat_interval=self.HEARTBEAT_INTERVAL,
)
break
except ConnectionError:
# This case is not handled by aio-pika by some reasons
logging.warning("wait for queue...")
await asyncio.sleep(1, loop=self._loop)
self._mch = await self._mconn.channel()
await self._mch.set_qos(prefetch_count=self._max_jobs)
self._mq = await self._mch.declare_queue(self._name)
if self._pool_size > 0:
await asyncio.gather(
self._run_pool(),
self._mq.consume(self._queue.put),
loop=self._loop,
)
else:
await self._mq.consume(self._process_single)
return self._mconn
async def call(self, msg: RPCRequest) -> RPCResponse:
return await asyncio.wait_for(
asyncio.ensure_future(self._call(msg), loop=self._loop,),
self._timeout,
loop=self._loop,
)
async def _call(self, msg: RPCRequest) -> RPCResponse:
if not self._mconn:
self._mconn = await aio_pika.connect_robust(
self._url,
loop=self._loop,
heartbeat_interval=self.HEARTBEAT_INTERVAL,
)
if not self._mch:
self._mch: aio_pika.RobustChannel = await self._mconn.channel()
mq: aio_pika.RobustQueue = await self._mch.declare_queue()
try:
correlation_id = str(uuid.uuid4())
message = aio_pika.Message(
self.encode_request(msg),
correlation_id=correlation_id,
reply_to=mq.name,
)
await self._mch.default_exchange.publish(
message, routing_key=self._name,
)
async with mq.iterator(no_ack=True) as it:
async for message in it:
break
if message.correlation_id != correlation_id:
raise ValueError("wrong correlation_id")
response: RPCResponse = self.decode_response(message.body)
# logging.debug(f"response: {response}")
if isinstance(response, RPCError):
response.reraise()
return response
finally:
await mq.delete(if_empty=False, if_unused=False)
| 34.910053
| 78
| 0.543346
| 6,377
| 0.966505
| 0
| 0
| 0
| 0
| 5,401
| 0.818581
| 241
| 0.036526
|
bed8ffa1e73ffa405bfc1005a04f4f722ab41812
| 2,069
|
py
|
Python
|
api/migrations/0005_auto_20200906_1951.py
|
sh2MAN/yamdb_final
|
17f84bacd832237d88d3389605cf2acdf2a590f5
|
[
"BSD-3-Clause"
] | null | null | null |
api/migrations/0005_auto_20200906_1951.py
|
sh2MAN/yamdb_final
|
17f84bacd832237d88d3389605cf2acdf2a590f5
|
[
"BSD-3-Clause"
] | null | null | null |
api/migrations/0005_auto_20200906_1951.py
|
sh2MAN/yamdb_final
|
17f84bacd832237d88d3389605cf2acdf2a590f5
|
[
"BSD-3-Clause"
] | 12
|
2021-02-11T16:39:00.000Z
|
2022-03-30T19:18:24.000Z
|
# Generated by Django 3.0.5 on 2020-09-06 19:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20200906_1752'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name': 'Категория', 'verbose_name_plural': 'Категории'},
),
migrations.AlterModelOptions(
name='genre',
options={'verbose_name': 'Жанр', 'verbose_name_plural': 'Жанры'},
),
migrations.AlterModelOptions(
name='title',
options={'ordering': ('-id',), 'verbose_name': 'Произведение', 'verbose_name_plural': 'Произведения'},
),
migrations.RemoveConstraint(
model_name='review',
name='unique_review',
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=20, verbose_name='Наименование'),
),
migrations.AlterField(
model_name='genre',
name='name',
field=models.CharField(max_length=20, verbose_name='Наименование'),
),
migrations.AlterField(
model_name='title',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='categories', to='api.Category', verbose_name='Категория'),
),
migrations.AlterField(
model_name='title',
name='description',
field=models.TextField(blank=True, null=True, verbose_name='Описание'),
),
migrations.AlterField(
model_name='title',
name='name',
field=models.CharField(max_length=100, verbose_name='Название'),
),
migrations.AddConstraint(
model_name='review',
constraint=models.UniqueConstraint(fields=('title', 'author'), name='unique_review'),
),
]
| 34.483333
| 177
| 0.585307
| 2,043
| 0.941909
| 0
| 0
| 0
| 0
| 0
| 0
| 609
| 0.280775
|
bed9a7c33d1cf837bf05eedf9e2389f71612ac64
| 1,104
|
py
|
Python
|
user_activity/models.py
|
adithya-bhat-b/user-activity
|
d2577bbb295ac381e08a31e296e3d681da7ab036
|
[
"MIT"
] | null | null | null |
user_activity/models.py
|
adithya-bhat-b/user-activity
|
d2577bbb295ac381e08a31e296e3d681da7ab036
|
[
"MIT"
] | 3
|
2021-04-08T22:04:18.000Z
|
2021-06-09T19:14:16.000Z
|
user_activity/models.py
|
adithya-bhat-b/user-activity
|
d2577bbb295ac381e08a31e296e3d681da7ab036
|
[
"MIT"
] | null | null | null |
import pytz
from django.db import models
# Create your models here.
def _get_time_zones():
"""
Function to get all the timezones
"""
timezone_choices = [(tz, tz) for tz in pytz.all_timezones]
return timezone_choices
# Model for user
class User(models.Model):
"""
User model:
attributes:
id - unique id of the user
real_name - user name
time_zone - user timezone
"""
id = models.CharField(primary_key=True, max_length=50)
real_name = models.CharField(max_length=100)
time_zone = models.CharField(max_length=50, choices=_get_time_zones())
class Meta:
# Db table name
db_table = "user"
# Model for user
class UserActivity(models.Model):
"""
UserActivity model:
start_time: start time of an user activity
end_time: end time of an user activity
"""
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
class Meta:
# Db table name
db_table = "user_activity"
| 25.674419
| 74
| 0.646739
| 827
| 0.749094
| 0
| 0
| 0
| 0
| 0
| 0
| 451
| 0.408514
|
bedad5c3db81102c82212833e871a369686befc7
| 1,992
|
py
|
Python
|
Learning/python_data_analysis1.py
|
VictoriaGuXY/MCO-Menu-Checker-Online
|
706e2e1bf7395cc344f382ea2ac53d964d459f86
|
[
"MIT"
] | null | null | null |
Learning/python_data_analysis1.py
|
VictoriaGuXY/MCO-Menu-Checker-Online
|
706e2e1bf7395cc344f382ea2ac53d964d459f86
|
[
"MIT"
] | null | null | null |
Learning/python_data_analysis1.py
|
VictoriaGuXY/MCO-Menu-Checker-Online
|
706e2e1bf7395cc344f382ea2ac53d964d459f86
|
[
"MIT"
] | null | null | null |
import pandas as pd
# pandas provides us lots of data frame and functions that we can quickly use
# to analyze data.
"""
output
"""
# This file contains notes of basic data analyzing strategies using Python.
# I will introduce two ways to read a csv file: pathway and URL.
# Also, I will introduce how to output data and save them into a csv file.
# ------------------------------------------------------------------------------
# read a csv file using the pathway
# This is based on the pathway that we saved the file.
# In python, to represent a pathway, we should either use / or //.
df = pd.read_csv('E:\\tips.csv')
# ------------------------------------------------------------------------------
# read data online using a URL
data_url = "https://raw.githubusercontent.com/mwaskom/seaborn-data/master/tips.csv"
df = pd.read_csv(data_url)
# same output for the above two methods
# output is shown below
"""
total_bill tip sex smoker day time size
0 16.99 1.01 Female No Sun Dinner 2
1 10.34 1.66 Male No Sun Dinner 3
2 21.01 3.50 Male No Sun Dinner 3
3 23.68 3.31 Male No Sun Dinner 2
4 24.59 3.61 Female No Sun Dinner 4
5 25.29 4.71 Male No Sun Dinner 4
.. ... ... ... ... ... ... ...
240 27.18 2.00 Female Yes Sat Dinner 2
241 22.67 2.00 Male Yes Sat Dinner 2
242 17.82 1.75 Male No Sat Dinner 2
243 18.78 3.00 Female No Thur Dinner 2
[244 rows x 7 columns]
"""
# ------------------------------------------------------------------------------
# output data and save them into a csv file
df.to_csv('E:\\demo.csv', encoding='utf-8', index=False)
# When index = False, when output as a csv file, the name of each line will be
# removed.
# If we contain some special characters in data, encoding will treat it as
# utf-8.
| 36.888889
| 84
| 0.533133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,844
| 0.925703
|
bedb078b9701b035a6b7ffcad6706445ac8dade2
| 768
|
py
|
Python
|
InvoiceItemModel.py
|
kevinyjiang/cpa-generator
|
c21cd1f898cf068daff8a6937d6cefc591b16ab1
|
[
"MIT"
] | 2
|
2018-09-26T19:18:45.000Z
|
2018-11-14T00:38:28.000Z
|
InvoiceItemModel.py
|
kevinyjiang/cpa-generator
|
c21cd1f898cf068daff8a6937d6cefc591b16ab1
|
[
"MIT"
] | null | null | null |
InvoiceItemModel.py
|
kevinyjiang/cpa-generator
|
c21cd1f898cf068daff8a6937d6cefc591b16ab1
|
[
"MIT"
] | null | null | null |
import config
class InvoiceItemModel(object):
def __init__(self, itemType, quantity):
self.itemType = itemType
self.quantity = int(quantity)
self.unitPrice = 0
if itemType == 'Portraiture':
self.unitPrice = config.PORTRAIT_RATE
else:
self.unitPrice = config.EVENT_RATE
def __str__(self):
return """<tr>
<td><strong>{} (1hr)</strong></td>
<td>{}</td>
<td>${}</td>
<td>${}</td>
</tr>""".format(str(self.itemType),
str(self.quantity),
str(self.unitPrice),
str(self.unitPrice * self.quantity))
| 30.72
| 68
| 0.453125
| 753
| 0.980469
| 0
| 0
| 0
| 0
| 0
| 0
| 198
| 0.257813
|
bedb1dc2f3fdaeceb37c80ae1a87e69944c3c668
| 1,725
|
py
|
Python
|
lambda/populateDB/lambda_function.py
|
aws-samples/amazon-connect-dynamic-ivr-menus
|
911f5d04cf78d3097cfe7e169bd0062459d61ec4
|
[
"MIT-0"
] | 4
|
2021-06-24T14:42:42.000Z
|
2021-12-13T07:08:48.000Z
|
lambda/populateDB/lambda_function.py
|
aws-samples/amazon-connect-dynamic-ivr-menus
|
911f5d04cf78d3097cfe7e169bd0062459d61ec4
|
[
"MIT-0"
] | 1
|
2021-12-13T06:53:39.000Z
|
2021-12-13T06:53:39.000Z
|
lambda/populateDB/lambda_function.py
|
aws-samples/amazon-connect-dynamic-ivr-menus
|
911f5d04cf78d3097cfe7e169bd0062459d61ec4
|
[
"MIT-0"
] | 2
|
2021-06-10T18:54:03.000Z
|
2021-12-13T08:07:05.000Z
|
import json
import boto3
import os
def lambda_handler(event, context):
# TODO implement
dynamodb = boto3.resource('dynamodb')
customerTable = os.environ['customerTable']
table1 = dynamodb.Table(customerTable)
policiesTable = os.environ['policiesTable']
table2 = dynamodb.Table(policiesTable)
# Phone numbers should follow international format E.164
table1.put_item(
Item={
'clientID': '+3526919xxxxxx',
'clientName': 'Marius',
'clientPolicies': ['car','house']
}
)
table1.put_item(
Item={
'clientID': '+3526919xxxxxx',
'clientName': 'John',
'clientPolicies': ['boat','pet']
}
)
table2.put_item(
Item={
'policyID': 'car',
'description': 'Your car insurance covers third party damage and theft. Authorized service points are this and that.'
}
)
table2.put_item(
Item={
'policyID': 'house',
'description': 'Your house insurance covers damage caused by natural disasters, fires and earthquakes. To fill a claim, please visit our website.'
}
)
table2.put_item(
Item={
'policyID': 'boat',
'description': 'Your boat insurance covers damage caused by natural distasters and fires. To fill a claim, please visit our website.'
}
)
table2.put_item(
Item={
'policyID': 'pet',
'description': 'Your pet insurance covers any medical interventions required to keep your pet healty. For a list of approved vet centers, please visit our website.'
}
)
return 'ok'
| 28.278689
| 176
| 0.584348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 876
| 0.507826
|
bedcd44ac29b275e927dc09d0e22f32d04f7138a
| 59
|
py
|
Python
|
pyds/heap/__init__.py
|
nitinkatyal1314/data-structures
|
2e7f5b99a6b09cea48f729682d9431b72afbfd7a
|
[
"MIT"
] | 6
|
2021-04-06T18:14:59.000Z
|
2021-07-18T03:26:03.000Z
|
pyds/heap/__init__.py
|
nitinkatyal1314/data-structures
|
2e7f5b99a6b09cea48f729682d9431b72afbfd7a
|
[
"MIT"
] | null | null | null |
pyds/heap/__init__.py
|
nitinkatyal1314/data-structures
|
2e7f5b99a6b09cea48f729682d9431b72afbfd7a
|
[
"MIT"
] | null | null | null |
from .api import HeapAPI as Heap
from .api import HeapType
| 19.666667
| 32
| 0.79661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
bedeaa04e3aa523fae916c1f3ad83805bf94106f
| 2,849
|
py
|
Python
|
examples/s5b_transfer/s5b_receiver.py
|
isabella232/slixmpp
|
e15e6735f1dbfc66a5d43efe9fa9e7f5c9d1610a
|
[
"BSD-3-Clause"
] | null | null | null |
examples/s5b_transfer/s5b_receiver.py
|
isabella232/slixmpp
|
e15e6735f1dbfc66a5d43efe9fa9e7f5c9d1610a
|
[
"BSD-3-Clause"
] | 1
|
2021-02-24T07:58:40.000Z
|
2021-02-24T07:58:40.000Z
|
examples/s5b_transfer/s5b_receiver.py
|
isabella232/slixmpp
|
e15e6735f1dbfc66a5d43efe9fa9e7f5c9d1610a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2015 Emmanuel Gil Peyrot
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import asyncio
import logging
from getpass import getpass
from argparse import ArgumentParser
import slixmpp
class S5BReceiver(slixmpp.ClientXMPP):
"""
A basic example of creating and using a SOCKS5 bytestream.
"""
def __init__(self, jid, password, filename):
slixmpp.ClientXMPP.__init__(self, jid, password)
self.file = open(filename, 'wb')
self.add_event_handler("socks5_connected", self.stream_opened)
self.add_event_handler("socks5_data", self.stream_data)
self.add_event_handler("socks5_closed", self.stream_closed)
def stream_opened(self, sid):
logging.info('Stream opened. %s', sid)
def stream_data(self, data):
self.file.write(data)
def stream_closed(self, exception):
logging.info('Stream closed. %s', exception)
self.file.close()
self.disconnect()
if __name__ == '__main__':
# Setup the command line arguments.
parser = ArgumentParser()
# Output verbosity options.
parser.add_argument("-q", "--quiet", help="set logging to ERROR",
action="store_const", dest="loglevel",
const=logging.ERROR, default=logging.INFO)
parser.add_argument("-d", "--debug", help="set logging to DEBUG",
action="store_const", dest="loglevel",
const=logging.DEBUG, default=logging.INFO)
# JID and password options.
parser.add_argument("-j", "--jid", dest="jid",
help="JID to use")
parser.add_argument("-p", "--password", dest="password",
help="password to use")
parser.add_argument("-o", "--out", dest="filename",
help="file to save to")
args = parser.parse_args()
# Setup logging.
logging.basicConfig(level=args.loglevel,
format='%(levelname)-8s %(message)s')
if args.jid is None:
args.jid = input("Username: ")
if args.password is None:
args.password = getpass("Password: ")
if args.filename is None:
args.filename = input("File path: ")
# Setup the S5BReceiver and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = S5BReceiver(args.jid, args.password, args.filename)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0065', {
'auto_accept': True
}) # SOCKS5 Bytestreams
# Connect to the XMPP server and start processing XMPP stanzas.
xmpp.connect()
xmpp.process(forever=False)
| 31.307692
| 77
| 0.630046
| 763
| 0.267813
| 0
| 0
| 0
| 0
| 0
| 0
| 1,072
| 0.376272
|
bedfb697a1311d179da9b0d371384f0a26973131
| 2,170
|
py
|
Python
|
api/app/routers/weather_models.py
|
bcgov/wps
|
71df0de72de9cd656dc9ebf8461ffe47cfb155f6
|
[
"Apache-2.0"
] | 19
|
2020-01-31T21:51:31.000Z
|
2022-01-07T14:40:03.000Z
|
api/app/routers/weather_models.py
|
bcgov/wps
|
71df0de72de9cd656dc9ebf8461ffe47cfb155f6
|
[
"Apache-2.0"
] | 1,680
|
2020-01-24T23:25:08.000Z
|
2022-03-31T23:50:27.000Z
|
api/app/routers/weather_models.py
|
bcgov/wps
|
71df0de72de9cd656dc9ebf8461ffe47cfb155f6
|
[
"Apache-2.0"
] | 6
|
2020-04-28T22:41:08.000Z
|
2021-05-05T18:16:06.000Z
|
""" Routers for weather_models.
"""
import logging
from fastapi import APIRouter, Depends
from app.auth import authentication_required, audit
from app.weather_models import ModelEnum
from app.schemas.weather_models import (
WeatherModelPredictionSummaryResponse,
WeatherStationsModelRunsPredictionsResponse)
from app.schemas.shared import WeatherDataRequest
from app.weather_models.fetch.summaries import fetch_model_prediction_summaries
from app.weather_models.fetch.predictions import (
fetch_model_run_predictions_by_station_code)
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/weather_models",
dependencies=[Depends(audit), Depends(authentication_required)],
)
@router.post('/{model}/predictions/summaries/',
response_model=WeatherModelPredictionSummaryResponse)
async def get_model_prediction_summaries(
model: ModelEnum, request: WeatherDataRequest):
""" Returns a summary of predictions for a given model. """
try:
logger.info('/weather_models/%s/predictions/summaries/', model.name)
summaries = await fetch_model_prediction_summaries(model, request.stations, request.time_of_interest)
return WeatherModelPredictionSummaryResponse(summaries=summaries)
except Exception as exception:
logger.critical(exception, exc_info=True)
raise
@router.post('/{model}/predictions/most_recent/',
response_model=WeatherStationsModelRunsPredictionsResponse)
async def get_most_recent_model_values(
model: ModelEnum, request: WeatherDataRequest):
""" Returns the weather values for the last model prediction that was issued
for the station before actual weather readings became available.
"""
try:
logger.info('/weather_models/%s/predictions/most_recent/', model.name)
station_predictions = await fetch_model_run_predictions_by_station_code(
model, request.stations, request.time_of_interest)
return WeatherStationsModelRunsPredictionsResponse(
stations=station_predictions)
except Exception as exception:
logger.critical(exception, exc_info=True)
raise
| 38.070175
| 109
| 0.765438
| 0
| 0
| 0
| 0
| 1,458
| 0.671889
| 1,220
| 0.562212
| 420
| 0.193548
|
bee066a8fc595636f1ed42106327e650d743c5d7
| 1,529
|
py
|
Python
|
155.min-stack.py
|
elfgzp/leetCode
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
[
"MIT"
] | 3
|
2019-04-12T06:22:56.000Z
|
2019-05-04T04:25:01.000Z
|
155.min-stack.py
|
elfgzp/Leetcode
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
[
"MIT"
] | null | null | null |
155.min-stack.py
|
elfgzp/Leetcode
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode.cn id=155 lang=python3
#
# [155] 最小栈
#
# https://leetcode-cn.com/problems/min-stack/description/
#
# algorithms
# Easy (47.45%)
# Total Accepted: 19.4K
# Total Submissions: 40.3K
# Testcase Example: '["MinStack","push","push","push","getMin","pop","top","getMin"]\n[[],[-2],[0],[-3],[],[],[],[]]'
#
# 设计一个支持 push,pop,top 操作,并能在常数时间内检索到最小元素的栈。
#
#
# push(x) -- 将元素 x 推入栈中。
# pop() -- 删除栈顶的元素。
# top() -- 获取栈顶元素。
# getMin() -- 检索栈中的最小元素。
#
#
# 示例:
#
# MinStack minStack = new MinStack();
# minStack.push(-2);
# minStack.push(0);
# minStack.push(-3);
# minStack.getMin(); --> 返回 -3.
# minStack.pop();
# minStack.top(); --> 返回 0.
# minStack.getMin(); --> 返回 -2.
#
#
#
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self._min = None
self._stack = []
def push(self, x: int) -> None:
if self._min is None:
self._min = x
else:
self._min = min(self._min, x)
self._stack.append(x)
def pop(self) -> None:
self._stack.pop(-1)
if self._stack:
self._min = min(self._stack)
else:
self._min = None
def top(self) -> int:
return self._stack[-1]
def getMin(self) -> int:
return self._min
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| 19.602564
| 118
| 0.530412
| 655
| 0.390346
| 0
| 0
| 0
| 0
| 0
| 0
| 1,029
| 0.61323
|
bee082fd43d018efe615e1efde05a3a482204b84
| 48,845
|
py
|
Python
|
demisto_client/demisto_api/models/investigation_playbook_task.py
|
guytest/demisto-py
|
8ca4f56a6177668151b5656cbe675a377003c0e9
|
[
"Apache-2.0"
] | 1
|
2020-04-08T14:36:06.000Z
|
2020-04-08T14:36:06.000Z
|
demisto_client/demisto_api/models/investigation_playbook_task.py
|
guytest/demisto-py
|
8ca4f56a6177668151b5656cbe675a377003c0e9
|
[
"Apache-2.0"
] | null | null | null |
demisto_client/demisto_api/models/investigation_playbook_task.py
|
guytest/demisto-py
|
8ca4f56a6177668151b5656cbe675a377003c0e9
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Demisto API
This is the public REST API to integrate with the demisto server. HTTP request can be sent using any HTTP-client. For an example dedicated client take a look at: https://github.com/demisto/demisto-py. Requests must include API-key that can be generated in the Demisto web client under 'Settings' -> 'Integrations' -> 'API keys' Optimistic Locking and Versioning\\: When using Demisto REST API, you will need to make sure to work on the latest version of the item (incident, entry, etc.), otherwise, you will get a DB version error (which not allow you to override a newer item). In addition, you can pass 'version\\: -1' to force data override (make sure that other users data might be lost). Assume that Alice and Bob both read the same data from Demisto server, then they both changed the data, and then both tried to write the new versions back to the server. Whose changes should be saved? Alice’s? Bob’s? To solve this, each data item in Demisto has a numeric incremental version. If Alice saved an item with version 4 and Bob trying to save the same item with version 3, Demisto will rollback Bob request and returns a DB version conflict error. Bob will need to get the latest item and work on it so Alice work will not get lost. Example request using 'curl'\\: ``` curl 'https://hostname:443/incidents/search' -H 'content-type: application/json' -H 'accept: application/json' -H 'Authorization: <API Key goes here>' --data-binary '{\"filter\":{\"query\":\"-status:closed -category:job\",\"period\":{\"by\":\"day\",\"fromValue\":7}}}' --compressed ``` # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from demisto_client.demisto_api.models.advance_arg import AdvanceArg # noqa: F401,E501
from demisto_client.demisto_api.models.data_collection_form import DataCollectionForm # noqa: F401,E501
from demisto_client.demisto_api.models.evidence_data import EvidenceData # noqa: F401,E501
from demisto_client.demisto_api.models.field_mapping import FieldMapping # noqa: F401,E501
from demisto_client.demisto_api.models.inv_playbook_task_complete_data import InvPlaybookTaskCompleteData # noqa: F401,E501
# from demisto_client.demisto_api.models.investigation_playbook import InvestigationPlaybook # noqa: F401,E501
from demisto_client.demisto_api.models.notifiable_item import NotifiableItem # noqa: F401,E501
from demisto_client.demisto_api.models.reputation_calc_alg import ReputationCalcAlg # noqa: F401,E501
from demisto_client.demisto_api.models.sla import SLA # noqa: F401,E501
from demisto_client.demisto_api.models.task import Task # noqa: F401,E501
from demisto_client.demisto_api.models.task_condition import TaskCondition # noqa: F401,E501
from demisto_client.demisto_api.models.task_loop import TaskLoop # noqa: F401,E501
from demisto_client.demisto_api.models.task_state import TaskState # noqa: F401,E501
from demisto_client.demisto_api.models.task_type import TaskType # noqa: F401,E501
from demisto_client.demisto_api.models.task_view import TaskView # noqa: F401,E501
from demisto_client.demisto_api.models.timer_trigger import TimerTrigger # noqa: F401,E501
class InvestigationPlaybookTask(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'arguments': 'dict(str, object)',
'assignee': 'str',
'assignee_set': 'bool',
'blocking_tasks': 'list[str]',
'comments': 'bool',
'completed_by': 'str',
'completed_count': 'int',
'completed_date': 'datetime',
'conditions': 'list[TaskCondition]',
'continue_on_error': 'bool',
'default_assignee': 'str',
'default_assignee_complex': 'AdvanceArg',
'default_reminder': 'int',
'due_date': 'datetime',
'due_date_set': 'bool',
'entries': 'list[str]',
'evidence_data': 'EvidenceData',
'execution_count': 'int',
'field_mapping': 'list[FieldMapping]',
'for_each_index': 'int',
'for_each_inputs': 'dict(str, list[object])',
'form': 'DataCollectionForm',
'id': 'str',
'ignore_worker': 'bool',
'indent': 'int',
'input': 'str',
'loop': 'TaskLoop',
'message': 'NotifiableItem',
'next_tasks': 'dict(str, list[str])',
'note': 'bool',
'outputs': 'dict(str, object)',
'parent_block_count': 'int',
'parent_playbook_id': 'str',
'patched': 'bool',
'playbook_inputs': 'dict(str, object)',
'previous_tasks': 'dict(str, list[str])',
'reminder': 'int',
'reputation_calc': 'ReputationCalcAlg',
'restricted_completion': 'bool',
'script_arguments': 'dict(str, AdvanceArg)',
'separate_context': 'bool',
'sla': 'SLA',
'sla_reminder': 'SLA',
'start_date': 'datetime',
'state': 'TaskState',
'sub_playbook': 'InvestigationPlaybook',
'task': 'Task',
'task_complete_data': 'list[InvPlaybookTaskCompleteData]',
'task_id': 'str',
'timer_triggers': 'list[TimerTrigger]',
'type': 'TaskType',
'view': 'TaskView',
'will_not_execute_count': 'int'
}
attribute_map = {
'arguments': 'arguments',
'assignee': 'assignee',
'assignee_set': 'assigneeSet',
'blocking_tasks': 'blockingTasks',
'comments': 'comments',
'completed_by': 'completedBy',
'completed_count': 'completedCount',
'completed_date': 'completedDate',
'conditions': 'conditions',
'continue_on_error': 'continueOnError',
'default_assignee': 'defaultAssignee',
'default_assignee_complex': 'defaultAssigneeComplex',
'default_reminder': 'defaultReminder',
'due_date': 'dueDate',
'due_date_set': 'dueDateSet',
'entries': 'entries',
'evidence_data': 'evidenceData',
'execution_count': 'executionCount',
'field_mapping': 'fieldMapping',
'for_each_index': 'forEachIndex',
'for_each_inputs': 'forEachInputs',
'form': 'form',
'id': 'id',
'ignore_worker': 'ignoreWorker',
'indent': 'indent',
'input': 'input',
'loop': 'loop',
'message': 'message',
'next_tasks': 'nextTasks',
'note': 'note',
'outputs': 'outputs',
'parent_block_count': 'parentBlockCount',
'parent_playbook_id': 'parentPlaybookID',
'patched': 'patched',
'playbook_inputs': 'playbookInputs',
'previous_tasks': 'previousTasks',
'reminder': 'reminder',
'reputation_calc': 'reputationCalc',
'restricted_completion': 'restrictedCompletion',
'script_arguments': 'scriptArguments',
'separate_context': 'separateContext',
'sla': 'sla',
'sla_reminder': 'slaReminder',
'start_date': 'startDate',
'state': 'state',
'sub_playbook': 'subPlaybook',
'task': 'task',
'task_complete_data': 'taskCompleteData',
'task_id': 'taskId',
'timer_triggers': 'timerTriggers',
'type': 'type',
'view': 'view',
'will_not_execute_count': 'willNotExecuteCount'
}
def __init__(self, arguments=None, assignee=None, assignee_set=None, blocking_tasks=None, comments=None, completed_by=None, completed_count=None, completed_date=None, conditions=None, continue_on_error=None, default_assignee=None, default_assignee_complex=None, default_reminder=None, due_date=None, due_date_set=None, entries=None, evidence_data=None, execution_count=None, field_mapping=None, for_each_index=None, for_each_inputs=None, form=None, id=None, ignore_worker=None, indent=None, input=None, loop=None, message=None, next_tasks=None, note=None, outputs=None, parent_block_count=None, parent_playbook_id=None, patched=None, playbook_inputs=None, previous_tasks=None, reminder=None, reputation_calc=None, restricted_completion=None, script_arguments=None, separate_context=None, sla=None, sla_reminder=None, start_date=None, state=None, sub_playbook=None, task=None, task_complete_data=None, task_id=None, timer_triggers=None, type=None, view=None, will_not_execute_count=None): # noqa: E501
"""InvestigationPlaybookTask - a model defined in Swagger""" # noqa: E501
self._arguments = None
self._assignee = None
self._assignee_set = None
self._blocking_tasks = None
self._comments = None
self._completed_by = None
self._completed_count = None
self._completed_date = None
self._conditions = None
self._continue_on_error = None
self._default_assignee = None
self._default_assignee_complex = None
self._default_reminder = None
self._due_date = None
self._due_date_set = None
self._entries = None
self._evidence_data = None
self._execution_count = None
self._field_mapping = None
self._for_each_index = None
self._for_each_inputs = None
self._form = None
self._id = None
self._ignore_worker = None
self._indent = None
self._input = None
self._loop = None
self._message = None
self._next_tasks = None
self._note = None
self._outputs = None
self._parent_block_count = None
self._parent_playbook_id = None
self._patched = None
self._playbook_inputs = None
self._previous_tasks = None
self._reminder = None
self._reputation_calc = None
self._restricted_completion = None
self._script_arguments = None
self._separate_context = None
self._sla = None
self._sla_reminder = None
self._start_date = None
self._state = None
self._sub_playbook = None
self._task = None
self._task_complete_data = None
self._task_id = None
self._timer_triggers = None
self._type = None
self._view = None
self._will_not_execute_count = None
self.discriminator = None
if arguments is not None:
self.arguments = arguments
if assignee is not None:
self.assignee = assignee
if assignee_set is not None:
self.assignee_set = assignee_set
if blocking_tasks is not None:
self.blocking_tasks = blocking_tasks
if comments is not None:
self.comments = comments
if completed_by is not None:
self.completed_by = completed_by
if completed_count is not None:
self.completed_count = completed_count
if completed_date is not None:
self.completed_date = completed_date
if conditions is not None:
self.conditions = conditions
if continue_on_error is not None:
self.continue_on_error = continue_on_error
if default_assignee is not None:
self.default_assignee = default_assignee
if default_assignee_complex is not None:
self.default_assignee_complex = default_assignee_complex
if default_reminder is not None:
self.default_reminder = default_reminder
if due_date is not None:
self.due_date = due_date
if due_date_set is not None:
self.due_date_set = due_date_set
if entries is not None:
self.entries = entries
if evidence_data is not None:
self.evidence_data = evidence_data
if execution_count is not None:
self.execution_count = execution_count
if field_mapping is not None:
self.field_mapping = field_mapping
if for_each_index is not None:
self.for_each_index = for_each_index
if for_each_inputs is not None:
self.for_each_inputs = for_each_inputs
if form is not None:
self.form = form
if id is not None:
self.id = id
if ignore_worker is not None:
self.ignore_worker = ignore_worker
if indent is not None:
self.indent = indent
if input is not None:
self.input = input
if loop is not None:
self.loop = loop
if message is not None:
self.message = message
if next_tasks is not None:
self.next_tasks = next_tasks
if note is not None:
self.note = note
if outputs is not None:
self.outputs = outputs
if parent_block_count is not None:
self.parent_block_count = parent_block_count
if parent_playbook_id is not None:
self.parent_playbook_id = parent_playbook_id
if patched is not None:
self.patched = patched
if playbook_inputs is not None:
self.playbook_inputs = playbook_inputs
if previous_tasks is not None:
self.previous_tasks = previous_tasks
if reminder is not None:
self.reminder = reminder
if reputation_calc is not None:
self.reputation_calc = reputation_calc
if restricted_completion is not None:
self.restricted_completion = restricted_completion
if script_arguments is not None:
self.script_arguments = script_arguments
if separate_context is not None:
self.separate_context = separate_context
if sla is not None:
self.sla = sla
if sla_reminder is not None:
self.sla_reminder = sla_reminder
if start_date is not None:
self.start_date = start_date
if state is not None:
self.state = state
if sub_playbook is not None:
self.sub_playbook = sub_playbook
if task is not None:
self.task = task
if task_complete_data is not None:
self.task_complete_data = task_complete_data
if task_id is not None:
self.task_id = task_id
if timer_triggers is not None:
self.timer_triggers = timer_triggers
if type is not None:
self.type = type
if view is not None:
self.view = view
if will_not_execute_count is not None:
self.will_not_execute_count = will_not_execute_count
@property
def arguments(self):
"""Gets the arguments of this InvestigationPlaybookTask. # noqa: E501
:return: The arguments of this InvestigationPlaybookTask. # noqa: E501
:rtype: dict(str, object)
"""
return self._arguments
@arguments.setter
def arguments(self, arguments):
"""Sets the arguments of this InvestigationPlaybookTask.
:param arguments: The arguments of this InvestigationPlaybookTask. # noqa: E501
:type: dict(str, object)
"""
self._arguments = arguments
@property
def assignee(self):
"""Gets the assignee of this InvestigationPlaybookTask. # noqa: E501
:return: The assignee of this InvestigationPlaybookTask. # noqa: E501
:rtype: str
"""
return self._assignee
@assignee.setter
def assignee(self, assignee):
"""Sets the assignee of this InvestigationPlaybookTask.
:param assignee: The assignee of this InvestigationPlaybookTask. # noqa: E501
:type: str
"""
self._assignee = assignee
@property
def assignee_set(self):
"""Gets the assignee_set of this InvestigationPlaybookTask. # noqa: E501
:return: The assignee_set of this InvestigationPlaybookTask. # noqa: E501
:rtype: bool
"""
return self._assignee_set
@assignee_set.setter
def assignee_set(self, assignee_set):
"""Sets the assignee_set of this InvestigationPlaybookTask.
:param assignee_set: The assignee_set of this InvestigationPlaybookTask. # noqa: E501
:type: bool
"""
self._assignee_set = assignee_set
@property
def blocking_tasks(self):
"""Gets the blocking_tasks of this InvestigationPlaybookTask. # noqa: E501
:return: The blocking_tasks of this InvestigationPlaybookTask. # noqa: E501
:rtype: list[str]
"""
return self._blocking_tasks
@blocking_tasks.setter
def blocking_tasks(self, blocking_tasks):
"""Sets the blocking_tasks of this InvestigationPlaybookTask.
:param blocking_tasks: The blocking_tasks of this InvestigationPlaybookTask. # noqa: E501
:type: list[str]
"""
self._blocking_tasks = blocking_tasks
@property
def comments(self):
"""Gets the comments of this InvestigationPlaybookTask. # noqa: E501
Whether this task had any comments or not # noqa: E501
:return: The comments of this InvestigationPlaybookTask. # noqa: E501
:rtype: bool
"""
return self._comments
@comments.setter
def comments(self, comments):
"""Sets the comments of this InvestigationPlaybookTask.
Whether this task had any comments or not # noqa: E501
:param comments: The comments of this InvestigationPlaybookTask. # noqa: E501
:type: bool
"""
self._comments = comments
@property
def completed_by(self):
"""Gets the completed_by of this InvestigationPlaybookTask. # noqa: E501
:return: The completed_by of this InvestigationPlaybookTask. # noqa: E501
:rtype: str
"""
return self._completed_by
@completed_by.setter
def completed_by(self, completed_by):
"""Sets the completed_by of this InvestigationPlaybookTask.
:param completed_by: The completed_by of this InvestigationPlaybookTask. # noqa: E501
:type: str
"""
self._completed_by = completed_by
@property
def completed_count(self):
"""Gets the completed_count of this InvestigationPlaybookTask. # noqa: E501
:return: The completed_count of this InvestigationPlaybookTask. # noqa: E501
:rtype: int
"""
return self._completed_count
@completed_count.setter
def completed_count(self, completed_count):
"""Sets the completed_count of this InvestigationPlaybookTask.
:param completed_count: The completed_count of this InvestigationPlaybookTask. # noqa: E501
:type: int
"""
self._completed_count = completed_count
@property
def completed_date(self):
"""Gets the completed_date of this InvestigationPlaybookTask. # noqa: E501
:return: The completed_date of this InvestigationPlaybookTask. # noqa: E501
:rtype: datetime
"""
return self._completed_date
@completed_date.setter
def completed_date(self, completed_date):
"""Sets the completed_date of this InvestigationPlaybookTask.
:param completed_date: The completed_date of this InvestigationPlaybookTask. # noqa: E501
:type: datetime
"""
self._completed_date = completed_date
@property
def conditions(self):
"""Gets the conditions of this InvestigationPlaybookTask. # noqa: E501
Conditions - optional list of conditions to run when task is conditional. we check conditions by their order (e.i. - considering the first one that satisfied) # noqa: E501
:return: The conditions of this InvestigationPlaybookTask. # noqa: E501
:rtype: list[TaskCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this InvestigationPlaybookTask.
Conditions - optional list of conditions to run when task is conditional. we check conditions by their order (e.i. - considering the first one that satisfied) # noqa: E501
:param conditions: The conditions of this InvestigationPlaybookTask. # noqa: E501
:type: list[TaskCondition]
"""
self._conditions = conditions
@property
def continue_on_error(self):
"""Gets the continue_on_error of this InvestigationPlaybookTask. # noqa: E501
:return: The continue_on_error of this InvestigationPlaybookTask. # noqa: E501
:rtype: bool
"""
return self._continue_on_error
@continue_on_error.setter
def continue_on_error(self, continue_on_error):
"""Sets the continue_on_error of this InvestigationPlaybookTask.
:param continue_on_error: The continue_on_error of this InvestigationPlaybookTask. # noqa: E501
:type: bool
"""
self._continue_on_error = continue_on_error
@property
def default_assignee(self):
"""Gets the default_assignee of this InvestigationPlaybookTask. # noqa: E501
:return: The default_assignee of this InvestigationPlaybookTask. # noqa: E501
:rtype: str
"""
return self._default_assignee
@default_assignee.setter
def default_assignee(self, default_assignee):
"""Sets the default_assignee of this InvestigationPlaybookTask.
:param default_assignee: The default_assignee of this InvestigationPlaybookTask. # noqa: E501
:type: str
"""
self._default_assignee = default_assignee
@property
def default_assignee_complex(self):
"""Gets the default_assignee_complex of this InvestigationPlaybookTask. # noqa: E501
:return: The default_assignee_complex of this InvestigationPlaybookTask. # noqa: E501
:rtype: AdvanceArg
"""
return self._default_assignee_complex
@default_assignee_complex.setter
def default_assignee_complex(self, default_assignee_complex):
"""Sets the default_assignee_complex of this InvestigationPlaybookTask.
:param default_assignee_complex: The default_assignee_complex of this InvestigationPlaybookTask. # noqa: E501
:type: AdvanceArg
"""
self._default_assignee_complex = default_assignee_complex
@property
def default_reminder(self):
"""Gets the default_reminder of this InvestigationPlaybookTask. # noqa: E501
:return: The default_reminder of this InvestigationPlaybookTask. # noqa: E501
:rtype: int
"""
return self._default_reminder
@default_reminder.setter
def default_reminder(self, default_reminder):
"""Sets the default_reminder of this InvestigationPlaybookTask.
:param default_reminder: The default_reminder of this InvestigationPlaybookTask. # noqa: E501
:type: int
"""
self._default_reminder = default_reminder
@property
def due_date(self):
"""Gets the due_date of this InvestigationPlaybookTask. # noqa: E501
:return: The due_date of this InvestigationPlaybookTask. # noqa: E501
:rtype: datetime
"""
return self._due_date
@due_date.setter
def due_date(self, due_date):
"""Sets the due_date of this InvestigationPlaybookTask.
:param due_date: The due_date of this InvestigationPlaybookTask. # noqa: E501
:type: datetime
"""
self._due_date = due_date
@property
def due_date_set(self):
"""Gets the due_date_set of this InvestigationPlaybookTask. # noqa: E501
:return: The due_date_set of this InvestigationPlaybookTask. # noqa: E501
:rtype: bool
"""
return self._due_date_set
@due_date_set.setter
def due_date_set(self, due_date_set):
"""Sets the due_date_set of this InvestigationPlaybookTask.
:param due_date_set: The due_date_set of this InvestigationPlaybookTask. # noqa: E501
:type: bool
"""
self._due_date_set = due_date_set
@property
def entries(self):
"""Gets the entries of this InvestigationPlaybookTask. # noqa: E501
:return: The entries of this InvestigationPlaybookTask. # noqa: E501
:rtype: list[str]
"""
return self._entries
@entries.setter
def entries(self, entries):
"""Sets the entries of this InvestigationPlaybookTask.
:param entries: The entries of this InvestigationPlaybookTask. # noqa: E501
:type: list[str]
"""
self._entries = entries
@property
def evidence_data(self):
"""Gets the evidence_data of this InvestigationPlaybookTask. # noqa: E501
:return: The evidence_data of this InvestigationPlaybookTask. # noqa: E501
:rtype: EvidenceData
"""
return self._evidence_data
@evidence_data.setter
def evidence_data(self, evidence_data):
"""Sets the evidence_data of this InvestigationPlaybookTask.
:param evidence_data: The evidence_data of this InvestigationPlaybookTask. # noqa: E501
:type: EvidenceData
"""
self._evidence_data = evidence_data
@property
def execution_count(self):
"""Gets the execution_count of this InvestigationPlaybookTask. # noqa: E501
:return: The execution_count of this InvestigationPlaybookTask. # noqa: E501
:rtype: int
"""
return self._execution_count
@execution_count.setter
def execution_count(self, execution_count):
"""Sets the execution_count of this InvestigationPlaybookTask.
:param execution_count: The execution_count of this InvestigationPlaybookTask. # noqa: E501
:type: int
"""
self._execution_count = execution_count
@property
def field_mapping(self):
"""Gets the field_mapping of this InvestigationPlaybookTask. # noqa: E501
:return: The field_mapping of this InvestigationPlaybookTask. # noqa: E501
:rtype: list[FieldMapping]
"""
return self._field_mapping
@field_mapping.setter
def field_mapping(self, field_mapping):
"""Sets the field_mapping of this InvestigationPlaybookTask.
:param field_mapping: The field_mapping of this InvestigationPlaybookTask. # noqa: E501
:type: list[FieldMapping]
"""
self._field_mapping = field_mapping
@property
def for_each_index(self):
"""Gets the for_each_index of this InvestigationPlaybookTask. # noqa: E501
Parameters needed for loops # noqa: E501
:return: The for_each_index of this InvestigationPlaybookTask. # noqa: E501
:rtype: int
"""
return self._for_each_index
@for_each_index.setter
def for_each_index(self, for_each_index):
"""Sets the for_each_index of this InvestigationPlaybookTask.
Parameters needed for loops # noqa: E501
:param for_each_index: The for_each_index of this InvestigationPlaybookTask. # noqa: E501
:type: int
"""
self._for_each_index = for_each_index
@property
def for_each_inputs(self):
"""Gets the for_each_inputs of this InvestigationPlaybookTask. # noqa: E501
:return: The for_each_inputs of this InvestigationPlaybookTask. # noqa: E501
:rtype: dict(str, list[object])
"""
return self._for_each_inputs
@for_each_inputs.setter
def for_each_inputs(self, for_each_inputs):
"""Sets the for_each_inputs of this InvestigationPlaybookTask.
:param for_each_inputs: The for_each_inputs of this InvestigationPlaybookTask. # noqa: E501
:type: dict(str, list[object])
"""
self._for_each_inputs = for_each_inputs
@property
def form(self):
"""Gets the form of this InvestigationPlaybookTask. # noqa: E501
:return: The form of this InvestigationPlaybookTask. # noqa: E501
:rtype: DataCollectionForm
"""
return self._form
@form.setter
def form(self, form):
"""Sets the form of this InvestigationPlaybookTask.
:param form: The form of this InvestigationPlaybookTask. # noqa: E501
:type: DataCollectionForm
"""
self._form = form
@property
def id(self):
"""Gets the id of this InvestigationPlaybookTask. # noqa: E501
:return: The id of this InvestigationPlaybookTask. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this InvestigationPlaybookTask.
:param id: The id of this InvestigationPlaybookTask. # noqa: E501
:type: str
"""
self._id = id
@property
def ignore_worker(self):
"""Gets the ignore_worker of this InvestigationPlaybookTask. # noqa: E501
Do not run this task in a worker # noqa: E501
:return: The ignore_worker of this InvestigationPlaybookTask. # noqa: E501
:rtype: bool
"""
return self._ignore_worker
@ignore_worker.setter
def ignore_worker(self, ignore_worker):
"""Sets the ignore_worker of this InvestigationPlaybookTask.
Do not run this task in a worker # noqa: E501
:param ignore_worker: The ignore_worker of this InvestigationPlaybookTask. # noqa: E501
:type: bool
"""
self._ignore_worker = ignore_worker
@property
def indent(self):
"""Gets the indent of this InvestigationPlaybookTask. # noqa: E501
:return: The indent of this InvestigationPlaybookTask. # noqa: E501
:rtype: int
"""
return self._indent
@indent.setter
def indent(self, indent):
"""Sets the indent of this InvestigationPlaybookTask.
:param indent: The indent of this InvestigationPlaybookTask. # noqa: E501
:type: int
"""
self._indent = indent
@property
def input(self):
"""Gets the input of this InvestigationPlaybookTask. # noqa: E501
:return: The input of this InvestigationPlaybookTask. # noqa: E501
:rtype: str
"""
return self._input
@input.setter
def input(self, input):
"""Sets the input of this InvestigationPlaybookTask.
:param input: The input of this InvestigationPlaybookTask. # noqa: E501
:type: str
"""
self._input = input
@property
def loop(self):
"""Gets the loop of this InvestigationPlaybookTask. # noqa: E501
:return: The loop of this InvestigationPlaybookTask. # noqa: E501
:rtype: TaskLoop
"""
return self._loop
@loop.setter
def loop(self, loop):
"""Sets the loop of this InvestigationPlaybookTask.
:param loop: The loop of this InvestigationPlaybookTask. # noqa: E501
:type: TaskLoop
"""
self._loop = loop
@property
def message(self):
"""Gets the message of this InvestigationPlaybookTask. # noqa: E501
:return: The message of this InvestigationPlaybookTask. # noqa: E501
:rtype: NotifiableItem
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this InvestigationPlaybookTask.
:param message: The message of this InvestigationPlaybookTask. # noqa: E501
:type: NotifiableItem
"""
self._message = message
@property
def next_tasks(self):
"""Gets the next_tasks of this InvestigationPlaybookTask. # noqa: E501
:return: The next_tasks of this InvestigationPlaybookTask. # noqa: E501
:rtype: dict(str, list[str])
"""
return self._next_tasks
@next_tasks.setter
def next_tasks(self, next_tasks):
"""Sets the next_tasks of this InvestigationPlaybookTask.
:param next_tasks: The next_tasks of this InvestigationPlaybookTask. # noqa: E501
:type: dict(str, list[str])
"""
self._next_tasks = next_tasks
@property
def note(self):
"""Gets the note of this InvestigationPlaybookTask. # noqa: E501
:return: The note of this InvestigationPlaybookTask. # noqa: E501
:rtype: bool
"""
return self._note
@note.setter
def note(self, note):
"""Sets the note of this InvestigationPlaybookTask.
:param note: The note of this InvestigationPlaybookTask. # noqa: E501
:type: bool
"""
self._note = note
@property
def outputs(self):
"""Gets the outputs of this InvestigationPlaybookTask. # noqa: E501
:return: The outputs of this InvestigationPlaybookTask. # noqa: E501
:rtype: dict(str, object)
"""
return self._outputs
@outputs.setter
def outputs(self, outputs):
"""Sets the outputs of this InvestigationPlaybookTask.
:param outputs: The outputs of this InvestigationPlaybookTask. # noqa: E501
:type: dict(str, object)
"""
self._outputs = outputs
@property
def parent_block_count(self):
"""Gets the parent_block_count of this InvestigationPlaybookTask. # noqa: E501
the number of tasks that are waiting on blocked in subplaybooks of this task # noqa: E501
:return: The parent_block_count of this InvestigationPlaybookTask. # noqa: E501
:rtype: int
"""
return self._parent_block_count
@parent_block_count.setter
def parent_block_count(self, parent_block_count):
"""Sets the parent_block_count of this InvestigationPlaybookTask.
the number of tasks that are waiting on blocked in subplaybooks of this task # noqa: E501
:param parent_block_count: The parent_block_count of this InvestigationPlaybookTask. # noqa: E501
:type: int
"""
self._parent_block_count = parent_block_count
@property
def parent_playbook_id(self):
"""Gets the parent_playbook_id of this InvestigationPlaybookTask. # noqa: E501
:return: The parent_playbook_id of this InvestigationPlaybookTask. # noqa: E501
:rtype: str
"""
return self._parent_playbook_id
@parent_playbook_id.setter
def parent_playbook_id(self, parent_playbook_id):
"""Sets the parent_playbook_id of this InvestigationPlaybookTask.
:param parent_playbook_id: The parent_playbook_id of this InvestigationPlaybookTask. # noqa: E501
:type: str
"""
self._parent_playbook_id = parent_playbook_id
@property
def patched(self):
"""Gets the patched of this InvestigationPlaybookTask. # noqa: E501
Indicates whether this task was patched to InvPB and did not originally belong to the playbook # noqa: E501
:return: The patched of this InvestigationPlaybookTask. # noqa: E501
:rtype: bool
"""
return self._patched
@patched.setter
def patched(self, patched):
"""Sets the patched of this InvestigationPlaybookTask.
Indicates whether this task was patched to InvPB and did not originally belong to the playbook # noqa: E501
:param patched: The patched of this InvestigationPlaybookTask. # noqa: E501
:type: bool
"""
self._patched = patched
@property
def playbook_inputs(self):
"""Gets the playbook_inputs of this InvestigationPlaybookTask. # noqa: E501
:return: The playbook_inputs of this InvestigationPlaybookTask. # noqa: E501
:rtype: dict(str, object)
"""
return self._playbook_inputs
@playbook_inputs.setter
def playbook_inputs(self, playbook_inputs):
"""Sets the playbook_inputs of this InvestigationPlaybookTask.
:param playbook_inputs: The playbook_inputs of this InvestigationPlaybookTask. # noqa: E501
:type: dict(str, object)
"""
self._playbook_inputs = playbook_inputs
@property
def previous_tasks(self):
"""Gets the previous_tasks of this InvestigationPlaybookTask. # noqa: E501
:return: The previous_tasks of this InvestigationPlaybookTask. # noqa: E501
:rtype: dict(str, list[str])
"""
return self._previous_tasks
@previous_tasks.setter
def previous_tasks(self, previous_tasks):
"""Sets the previous_tasks of this InvestigationPlaybookTask.
:param previous_tasks: The previous_tasks of this InvestigationPlaybookTask. # noqa: E501
:type: dict(str, list[str])
"""
self._previous_tasks = previous_tasks
@property
def reminder(self):
"""Gets the reminder of this InvestigationPlaybookTask. # noqa: E501
Duration in minutes, this field is not persisted here # noqa: E501
:return: The reminder of this InvestigationPlaybookTask. # noqa: E501
:rtype: int
"""
return self._reminder
@reminder.setter
def reminder(self, reminder):
"""Sets the reminder of this InvestigationPlaybookTask.
Duration in minutes, this field is not persisted here # noqa: E501
:param reminder: The reminder of this InvestigationPlaybookTask. # noqa: E501
:type: int
"""
self._reminder = reminder
@property
def reputation_calc(self):
"""Gets the reputation_calc of this InvestigationPlaybookTask. # noqa: E501
:return: The reputation_calc of this InvestigationPlaybookTask. # noqa: E501
:rtype: ReputationCalcAlg
"""
return self._reputation_calc
@reputation_calc.setter
def reputation_calc(self, reputation_calc):
"""Sets the reputation_calc of this InvestigationPlaybookTask.
:param reputation_calc: The reputation_calc of this InvestigationPlaybookTask. # noqa: E501
:type: ReputationCalcAlg
"""
self._reputation_calc = reputation_calc
@property
def restricted_completion(self):
"""Gets the restricted_completion of this InvestigationPlaybookTask. # noqa: E501
:return: The restricted_completion of this InvestigationPlaybookTask. # noqa: E501
:rtype: bool
"""
return self._restricted_completion
@restricted_completion.setter
def restricted_completion(self, restricted_completion):
"""Sets the restricted_completion of this InvestigationPlaybookTask.
:param restricted_completion: The restricted_completion of this InvestigationPlaybookTask. # noqa: E501
:type: bool
"""
self._restricted_completion = restricted_completion
@property
def script_arguments(self):
"""Gets the script_arguments of this InvestigationPlaybookTask. # noqa: E501
:return: The script_arguments of this InvestigationPlaybookTask. # noqa: E501
:rtype: dict(str, AdvanceArg)
"""
return self._script_arguments
@script_arguments.setter
def script_arguments(self, script_arguments):
"""Sets the script_arguments of this InvestigationPlaybookTask.
:param script_arguments: The script_arguments of this InvestigationPlaybookTask. # noqa: E501
:type: dict(str, AdvanceArg)
"""
self._script_arguments = script_arguments
@property
def separate_context(self):
"""Gets the separate_context of this InvestigationPlaybookTask. # noqa: E501
:return: The separate_context of this InvestigationPlaybookTask. # noqa: E501
:rtype: bool
"""
return self._separate_context
@separate_context.setter
def separate_context(self, separate_context):
"""Sets the separate_context of this InvestigationPlaybookTask.
:param separate_context: The separate_context of this InvestigationPlaybookTask. # noqa: E501
:type: bool
"""
self._separate_context = separate_context
@property
def sla(self):
"""Gets the sla of this InvestigationPlaybookTask. # noqa: E501
:return: The sla of this InvestigationPlaybookTask. # noqa: E501
:rtype: SLA
"""
return self._sla
@sla.setter
def sla(self, sla):
"""Sets the sla of this InvestigationPlaybookTask.
:param sla: The sla of this InvestigationPlaybookTask. # noqa: E501
:type: SLA
"""
self._sla = sla
@property
def sla_reminder(self):
"""Gets the sla_reminder of this InvestigationPlaybookTask. # noqa: E501
:return: The sla_reminder of this InvestigationPlaybookTask. # noqa: E501
:rtype: SLA
"""
return self._sla_reminder
@sla_reminder.setter
def sla_reminder(self, sla_reminder):
"""Sets the sla_reminder of this InvestigationPlaybookTask.
:param sla_reminder: The sla_reminder of this InvestigationPlaybookTask. # noqa: E501
:type: SLA
"""
self._sla_reminder = sla_reminder
@property
def start_date(self):
"""Gets the start_date of this InvestigationPlaybookTask. # noqa: E501
:return: The start_date of this InvestigationPlaybookTask. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this InvestigationPlaybookTask.
:param start_date: The start_date of this InvestigationPlaybookTask. # noqa: E501
:type: datetime
"""
self._start_date = start_date
@property
def state(self):
"""Gets the state of this InvestigationPlaybookTask. # noqa: E501
:return: The state of this InvestigationPlaybookTask. # noqa: E501
:rtype: TaskState
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this InvestigationPlaybookTask.
:param state: The state of this InvestigationPlaybookTask. # noqa: E501
:type: TaskState
"""
self._state = state
@property
def sub_playbook(self):
"""Gets the sub_playbook of this InvestigationPlaybookTask. # noqa: E501
:return: The sub_playbook of this InvestigationPlaybookTask. # noqa: E501
:rtype: InvestigationPlaybook
"""
return self._sub_playbook
@sub_playbook.setter
def sub_playbook(self, sub_playbook):
"""Sets the sub_playbook of this InvestigationPlaybookTask.
:param sub_playbook: The sub_playbook of this InvestigationPlaybookTask. # noqa: E501
:type: InvestigationPlaybook
"""
self._sub_playbook = sub_playbook
@property
def task(self):
"""Gets the task of this InvestigationPlaybookTask. # noqa: E501
:return: The task of this InvestigationPlaybookTask. # noqa: E501
:rtype: Task
"""
return self._task
@task.setter
def task(self, task):
"""Sets the task of this InvestigationPlaybookTask.
:param task: The task of this InvestigationPlaybookTask. # noqa: E501
:type: Task
"""
self._task = task
@property
def task_complete_data(self):
"""Gets the task_complete_data of this InvestigationPlaybookTask. # noqa: E501
History complete data # noqa: E501
:return: The task_complete_data of this InvestigationPlaybookTask. # noqa: E501
:rtype: list[InvPlaybookTaskCompleteData]
"""
return self._task_complete_data
@task_complete_data.setter
def task_complete_data(self, task_complete_data):
"""Sets the task_complete_data of this InvestigationPlaybookTask.
History complete data # noqa: E501
:param task_complete_data: The task_complete_data of this InvestigationPlaybookTask. # noqa: E501
:type: list[InvPlaybookTaskCompleteData]
"""
self._task_complete_data = task_complete_data
@property
def task_id(self):
"""Gets the task_id of this InvestigationPlaybookTask. # noqa: E501
:return: The task_id of this InvestigationPlaybookTask. # noqa: E501
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this InvestigationPlaybookTask.
:param task_id: The task_id of this InvestigationPlaybookTask. # noqa: E501
:type: str
"""
self._task_id = task_id
@property
def timer_triggers(self):
"""Gets the timer_triggers of this InvestigationPlaybookTask. # noqa: E501
SLA fields # noqa: E501
:return: The timer_triggers of this InvestigationPlaybookTask. # noqa: E501
:rtype: list[TimerTrigger]
"""
return self._timer_triggers
@timer_triggers.setter
def timer_triggers(self, timer_triggers):
"""Sets the timer_triggers of this InvestigationPlaybookTask.
SLA fields # noqa: E501
:param timer_triggers: The timer_triggers of this InvestigationPlaybookTask. # noqa: E501
:type: list[TimerTrigger]
"""
self._timer_triggers = timer_triggers
@property
def type(self):
"""Gets the type of this InvestigationPlaybookTask. # noqa: E501
:return: The type of this InvestigationPlaybookTask. # noqa: E501
:rtype: TaskType
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this InvestigationPlaybookTask.
:param type: The type of this InvestigationPlaybookTask. # noqa: E501
:type: TaskType
"""
self._type = type
@property
def view(self):
"""Gets the view of this InvestigationPlaybookTask. # noqa: E501
:return: The view of this InvestigationPlaybookTask. # noqa: E501
:rtype: TaskView
"""
return self._view
@view.setter
def view(self, view):
"""Sets the view of this InvestigationPlaybookTask.
:param view: The view of this InvestigationPlaybookTask. # noqa: E501
:type: TaskView
"""
self._view = view
@property
def will_not_execute_count(self):
"""Gets the will_not_execute_count of this InvestigationPlaybookTask. # noqa: E501
:return: The will_not_execute_count of this InvestigationPlaybookTask. # noqa: E501
:rtype: int
"""
return self._will_not_execute_count
@will_not_execute_count.setter
def will_not_execute_count(self, will_not_execute_count):
"""Sets the will_not_execute_count of this InvestigationPlaybookTask.
:param will_not_execute_count: The will_not_execute_count of this InvestigationPlaybookTask. # noqa: E501
:type: int
"""
self._will_not_execute_count = will_not_execute_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InvestigationPlaybookTask, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InvestigationPlaybookTask):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.498337
| 1,584
| 0.649381
| 45,570
| 0.932875
| 0
| 0
| 31,877
| 0.652562
| 0
| 0
| 27,349
| 0.559868
|
bee216da141df68c9f7b41ae5761fcf6bc3e34f4
| 36,274
|
py
|
Python
|
make.py
|
beefoo/subway-inequality
|
a93bdbf81ea3753b0d2ec85d028f816adcc2f6f9
|
[
"MIT"
] | 1
|
2021-03-08T01:44:10.000Z
|
2021-03-08T01:44:10.000Z
|
make.py
|
beefoo/subway-inequality
|
a93bdbf81ea3753b0d2ec85d028f816adcc2f6f9
|
[
"MIT"
] | null | null | null |
make.py
|
beefoo/subway-inequality
|
a93bdbf81ea3753b0d2ec85d028f816adcc2f6f9
|
[
"MIT"
] | 1
|
2021-11-02T21:55:39.000Z
|
2021-11-02T21:55:39.000Z
|
# -*- coding: utf-8 -*-
# python3 make.py -loc "data/lines/1.csv" -width 3840 -height 2160 -overwrite
# python3 make.py -loc "data/lines/1.csv" -width 3840 -height 2160 -rtl -overwrite
# python3 combine.py
# python3 make.py -data "data/lines/A_LEF.csv" -width 3840 -height 2160 -loc "data/lines/C.csv" -img "img/A.png" -sw 0.1405 -tw 0.145 -overwrite
# python3 make.py -data "data/lines/A_LEF.csv" -width 3840 -height 2160 -loc "data/lines/C.csv" -img "img/A.png" -sw 0.1405 -tw 0.145 -rtl -overwrite
# python3 combine.py -in "output/subway_line_A.mp4,output/subway_line_A_rtl.mp4" -out "output/subway_line_A_loop.mp4"
# python3 make.py -data "data/lines/7.csv" -width 3840 -height 2160 -img "img/7.png" -sw 0.11725 -tw 0.135625 -reverse -overwrite
# python3 make.py -data "data/lines/7.csv" -width 3840 -height 2160 -img "img/7.png" -sw 0.11725 -tw 0.135625 -reverse -rtl -overwrite
# python3 combine.py -in "output/subway_line_7.mp4,output/subway_line_7_rtl.mp4" -out "output/subway_line_7_loop.mp4"
import argparse
import numpy as np
import os
from pprint import pprint
import sys
from lib import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-data', dest="DATA_FILE", default="data/lines/2.csv", help="Input csv file with preprocessed data")
parser.add_argument('-loc', dest="DATA_LOCAL_FILE", default="", help="Input csv file with preprocessed data of a local train that should 'fill in' stations in-between express trains")
parser.add_argument('-img', dest="IMAGE_FILE", default="img/2.png", help="Subway bullet image")
parser.add_argument('-instruments', dest="INSTRUMENTS_FILE", default="data/instruments.csv", help="Input csv file with instruments config")
parser.add_argument('-dir', dest="MEDIA_DIRECTORY", default="audio/", help="Input media directory")
parser.add_argument('-width', dest="WIDTH", default=1920, type=int, help="Output video width")
parser.add_argument('-height', dest="HEIGHT", default=1080, type=int, help="Output video height")
parser.add_argument('-pad0', dest="PAD_START", default=2000, type=int, help="Pad start in ms")
parser.add_argument('-pad1', dest="PAD_END", default=2000, type=int, help="Pad end in ms")
parser.add_argument('-fps', dest="FPS", default=30, type=int, help="Output video frames per second")
parser.add_argument('-outframe', dest="OUTPUT_FRAME", default="tmp/line_%s/frame.%s.png", help="Output frames pattern")
parser.add_argument('-aout', dest="AUDIO_OUTPUT_FILE", default="output/subway_line_%s.mp3", help="Output audio file")
parser.add_argument('-dout', dest="DATA_OUTPUT_FILE", default="output/subway_line_%s.csv", help="Output data file")
parser.add_argument('-out', dest="OUTPUT_FILE", default="output/subway_line_%s.mp4", help="Output media file")
parser.add_argument('-overwrite', dest="OVERWRITE", action="store_true", help="Overwrite existing files?")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just view statistics?")
parser.add_argument('-reverse', dest="REVERSE", action="store_true", help="Reverse the line?")
parser.add_argument('-rtl', dest="RIGHT_TO_LEFT", action="store_true", help="Play from right to left?")
parser.add_argument('-ao', dest="AUDIO_ONLY", action="store_true", help="Only output audio?")
parser.add_argument('-vo', dest="VIDEO_ONLY", action="store_true", help="Only output video?")
parser.add_argument('-do', dest="DATA_ONLY", action="store_true", help="Only output data?")
parser.add_argument('-viz', dest="VISUALIZE_SEQUENCE", action="store_true", help="Output a visualization of the sequence")
parser.add_argument('-plot', dest="PLOT_SEQUENCE", action="store_true", help="Display a plot chart of the sequence")
parser.add_argument('-frame', dest="SINGLE_FRAME", default=-1, type=int, help="Output just a single frame")
# Music config
parser.add_argument('-db', dest="MASTER_DB", type=float, default=-2.4, help="Master +/- decibels to be applied to final audio")
parser.add_argument('-bpm', dest="BPM", type=int, default=120, help="Beats per minute, e.g. 60, 75, 100, 120, 150")
parser.add_argument('-mpb', dest="METERS_PER_BEAT", type=int, default=75, help="Higher numbers creates shorter songs")
parser.add_argument('-dpb', dest="DIVISIONS_PER_BEAT", type=int, default=4, help="e.g. 4 = quarter notes, 8 = eighth notes")
parser.add_argument('-pm', dest="PRICE_MULTIPLIER", type=float, default=1.3, help="Makes instruments more expensive; higher numbers = less instruments playing")
parser.add_argument('-vdur', dest="VARIANCE_MS", type=int, default=20, help="+/- milliseconds an instrument note should be off by to give it a little more 'natural' feel")
# Visual design config
parser.add_argument('-sw', dest="STATION_WIDTH", type=float, default=0.125, help="Minimum station width as a percent of the screen width; adjust this to change the overall visual speed")
parser.add_argument('-tw', dest="TEXT_WIDTH", type=float, default=0.15, help="Station text width as a percent of the screen width")
parser.add_argument('-cy', dest="CENTER_Y", type=float, default=0.475, help="Center y as a percent of screen height")
parser.add_argument('-bty', dest="BOROUGH_TEXT_Y", type=float, default=0.55, help="Borough text center y as a percent of screen height")
parser.add_argument('-sty', dest="STATION_TEXT_Y", type=float, default=0.375, help="Station text center y as a percent of screen height")
parser.add_argument('-cw', dest="CIRCLE_WIDTH", type=int, default=60, help="Circle radius in pixels assuming 1920x1080")
parser.add_argument('-lh', dest="LINE_HEIGHT", type=int, default=24, help="Height of horizontal line in pixels assuming 1920x1080")
parser.add_argument('-bh', dest="BOUNDARY_HEIGHT", type=int, default=166, help="Height of boundary line in pixels assuming 1920x1080")
parser.add_argument('-bw', dest="BOUNDARY_WIDTH", type=int, default=3, help="Width of boundary line in pixels assuming 1920x1080")
parser.add_argument('-bm', dest="BOUNDARY_MARGIN", type=int, default=48, help="Horizontal margin of boundary line in pixels assuming 1920x1080")
parser.add_argument('-mw', dest="MARKER_WIDTH", type=int, default=8, help="Height of horizontal line in pixels assuming 1920x1080")
parser.add_argument('-sts', dest="STATION_TEXT_SIZE", type=int, default=30, help="Station text size in pixels assuming 1920x1080")
parser.add_argument('-stm', dest="STATION_TEXT_MARGIN", type=int, default=20, help="Station text bottom margin in pixels assuming 1920x1080")
parser.add_argument('-slm', dest="STATION_LETTER_MARGIN", type=int, default=1, help="Space after each station text letter in pixels assuming 1920x1080")
parser.add_argument('-bts', dest="BOROUGH_TEXT_SIZE", type=int, default=24, help="Borough text size in pixels assuming 1920x1080")
parser.add_argument('-blm', dest="BOROUGH_LETTER_MARGIN", type=int, default=1, help="Space after each borough text letter in pixels assuming 1920x1080")
parser.add_argument('-bthresh', dest="BOROUGH_THRESHOLD", type=float, default=0.375, help="Minimum width available for displaying borough dividers")
parser.add_argument('-dw', dest="DIVIDER_WIDTH", type=int, default=28, help="Line divider in pixels assuming 1920x1080")
parser.add_argument('-dd', dest="DIVIDER_DISTANCE", type=float, default=0.333, help="Distance between dividers as a percent of screen width")
parser.add_argument('-dc', dest="DIVIDER_COLOR", default="#666666", help="Distance between dividers as a percent of screen width")
parser.add_argument('-bg', dest="BG_COLOR", default="#000000", help="Background color")
parser.add_argument('-tc', dest="TEXT_COLOR", default="#eeeeee", help="Text color")
parser.add_argument('-atc', dest="ALT_TEXT_COLOR", default="#aaaaaa", help="Secondary text color")
parser.add_argument('-mc', dest="MARKER_COLOR", default="#dddddd", help="Marker color")
parser.add_argument('-sfont', dest="STATION_FONT", default="fonts/OpenSans-Bold.ttf", help="Station font")
parser.add_argument('-bfont', dest="BOROUGH_FONT", default="fonts/OpenSans-SemiBold.ttf", help="Borough font")
parser.add_argument('-map', dest="MAP_IMAGE", default="img/nyc.png", help="Station font")
parser.add_argument('-mcoord', dest="MAP_COORDS", default=" -74.1261,40.9087,-73.7066,40.5743", help="Top left, bottom right point")
parser.add_argument('-mapm', dest="MAP_MARGIN", type=int, default=30, help="Margin of map in pixels assuming 1920x1080")
parser.add_argument('-mapw', dest="MAP_W", type=int, default=260, help="Map width in pixels assuming 1920x1080")
parser.add_argument('-mlw', dest="MAP_LINE_WIDTH", type=int, default=4, help="Map line in pixels assuming 1920x1080")
parser.add_argument('-mlc', dest="MAP_LINE_COLOR", default="#eeeeee", help="Secondary text color")
a = parser.parse_args()
if not a.AUDIO_ONLY:
import gizeh
from PIL import Image, ImageDraw, ImageFont
startTime = logTime()
# Calculations
BEAT_MS = roundInt(60.0 / a.BPM * 1000)
ROUND_TO_NEAREST = roundInt(1.0 * BEAT_MS / a.DIVISIONS_PER_BEAT)
basename = getBasename(a.DATA_FILE)
if "_" in basename:
basename, _ = tuple(basename.split("_"))
lineName = basename
if a.RIGHT_TO_LEFT:
basename += "_rtl"
# Read data
_, stations = readCsv(a.DATA_FILE)
_, instruments = readCsv(a.INSTRUMENTS_FILE)
lstations = []
if len(a.DATA_LOCAL_FILE):
_, lstations = readCsv(a.DATA_LOCAL_FILE)
# Parse instruments
instruments = prependAll(instruments, ("file", a.MEDIA_DIRECTORY))
instruments = [i for i in instruments if i["active"] > 0]
instruments = addIndices(instruments, "index")
for i, instrument in enumerate(instruments):
instruments[i]["from_beat_ms"] = roundInt(1.0 * BEAT_MS / instrument["from_tempo"])
instruments[i]["to_beat_ms"] = roundInt(1.0 * BEAT_MS / instrument["to_tempo"])
instruments[i]["interval_ms"] = roundInt(instrument["interval_phase"] * BEAT_MS)
instruments[i]["price"] = instrument["price"] * a.PRICE_MULTIPLIER
# Buy instruments based on a specified budget
def buyInstruments(station, instrumentsShelf):
budget = station['income'] / 12.0
percentile = station['percentile']
instrumentsCart = []
for i in instrumentsShelf:
# skip if not in bracket
if percentile < i['bracket_min'] or percentile >= i['bracket_max']:
continue
# add to cart if in budget
elif i['price'] < budget:
budget -= i['price']
instrumentsCart.append(i.copy())
# out of budget, finished
else:
break
return instrumentsCart
# Add local stations in-between express ones
if len(lstations) > 0:
lbasename = getBasename(a.DATA_LOCAL_FILE)
estations = {}
addStations = []
for i, s in enumerate(stations):
lines = str(s["Daytime Routes"]).split(" ")
if lbasename in lines:
estations[s["Station ID"]] = s.copy()
sortByStart = None
currentLStations = []
for i, s in enumerate(lstations):
if s["Station ID"] in estations:
if sortByStart is not None and len(currentLStations) > 0:
step = 1.0 / (len(currentLStations) + 1)
for j, ls in enumerate(currentLStations):
currentLStations[j]["sortBy"] = sortByStart + (j+1) * step
currentLStations[j]["isLocal"] = 1
addStations += currentLStations
currentLStations = []
sortByStart = estations[s["Station ID"]]["sortBy"]
elif sortByStart is not None:
currentLStations.append(s)
stations += addStations
# stations = sorted(stations, key=lambda d: d["sortBy"])
# for s in stations:
# if "isLocal" in s:
# print(" --"+s["Stop Name"])
# else:
# print(s["Stop Name"])
# sys.exit()
# Parse stations
stations = sorted(stations, key=lambda d: d["income"])
stations = addNormalizedValues(stations, "income", "nIncome")
stations = addIndices(stations, "incomeIndex")
isReverse = a.REVERSE
if a.RIGHT_TO_LEFT:
isReverse = (not isReverse)
stations = sorted(stations, key=lambda d: d["sortBy"], reverse=isReverse)
stations = addIndices(stations, "index")
stationCount = len(stations)
ms = a.PAD_START
for i, station in enumerate(stations):
stations[i]["percentile"] = 1.0 * station["incomeIndex"] / stationCount * 100
# stations[i]["percentile"] = min(99.999, 1.0 * station["nIncome"] * 100)
stations[i]["instruments"] = buyInstruments(stations[i], instruments)
# print(len(stations[i]["instruments"]))
distance = beats = duration = 0
if i < stationCount-1:
distance = earthDistance(stations[i+1]['GTFS Latitude'], stations[i+1]['GTFS Longitude'], station['GTFS Latitude'], station['GTFS Longitude'])
beats = roundInt(1.0 * distance / a.METERS_PER_BEAT)
duration = beats * BEAT_MS
boroughNext = stations[i+1]["Borough"]
stations[i]["distance"] = distance
stations[i]["beats"] = beats
stations[i]["duration"] = duration
stations[i]["vduration"] = duration
stations[i]["BoroughNext"] = boroughNext
stations[i]["ms"] = ms
stations[i]["lineName"] = lineName
ms += duration
if a.PROBE:
print("===========================")
for s in stations:
if "isLocal" in s:
print(formatSeconds(roundInt(s["ms"]/1000.0)) + " --- " + s["Stop Name"] + " (LOCAL) - $" + formatNumber(s["income"]))
else:
print(formatSeconds(roundInt(s["ms"]/1000.0)) + " - " + s["Stop Name"] + " - $" + formatNumber(s["income"]))
print("===========================")
else:
dataFilename = a.DATA_OUTPUT_FILE % basename
makeDirectories([dataFilename])
writeCsv(dataFilename, stations, headings=["ms", "Stop Name", "isLocal", "income", "Borough", "lineName"])
textFilename = replaceFileExtension(dataFilename, ".txt")
text = f'Subway Inequality: {basename} train ({stations[-1]["Stop Name"]} Bound)\n\n'
text += f'This song above mimics a ride along a subway line (the {basename} train), where the quantity and power of the instruments at any given moment in the song corresponds to the median household income of the neighborhood that you are passing through. The goal is to have the dramatic contrasts of the song echo the dramatic contrast of income in the city.\n\n'
for s in stations:
if "isLocal" not in s:
text += f'{formatSeconds(roundInt(s["ms"]/1000.0))} - {s["Stop Name"]} - ${formatNumber(s["income"])} household income\n'
writeTextFile(textFilename, text)
if a.DATA_ONLY:
sys.exit()
# Calculate ranges
distances = [s["distance"] for s in stations if s["distance"] > 0]
totalDistance = sum(distances)
minDistance, maxDistance = (min(distances), max(distances))
durations = [s["duration"] for s in stations if s["duration"] > 0]
totalMs = sum(durations)
minDuration, maxDuration = (min(durations), max(durations))
totalBeats = sum([s["beats"] for s in stations])
totalSeconds = roundInt(totalMs / 1000.0)
secondsPerStation = roundInt(1.0*totalSeconds/stationCount)
print('Total distance in meters: %s' % roundInt(totalDistance))
print('Distance range in meters: [%s, %s]' % (roundInt(minDistance), roundInt(maxDistance)))
print('Average beats per station: %s' % roundInt(1.0*totalBeats/stationCount))
print('Average time per station: %s' % formatSeconds(secondsPerStation))
print('Main sequence beats: %s' % totalBeats)
# Retrieve gain based on current beat
def getVolume(instrument, beat):
beats_per_phase = instrument['gain_phase']
percent_complete = float(beat % beats_per_phase) / beats_per_phase
percent = easeSin(percent_complete)
from_volume = instrument['from_volume']
to_volume = instrument['to_volume']
volume = lerp((from_volume, to_volume), percent)
return volume
# Get beat duration in ms based on current point in time
def getBeatMs(instrument, beat, round_to):
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
beats_per_phase = instrument['tempo_phase']
percent_complete = float(beat % beats_per_phase) / beats_per_phase
percent = easeSin(percent_complete)
ms = lerp((from_beat_ms, to_beat_ms), percent)
ms = roundInt(roundToNearest(ms, round_to))
return ms
# Return if the instrument should be played in the given interval
def isValidInterval(instrument, elapsed_ms, start_ms, end_ms, minIntervalDuration=3000):
interval_ms = instrument['interval_ms']
interval = instrument['interval']
interval_offset = instrument['interval_offset']
isValid = (int(math.floor(1.0*elapsed_ms/interval_ms)) % interval == interval_offset)
# return isValid
if end_ms - start_ms <= minIntervalDuration * 3:
return isValid
# check to see if we're at the start and not long enough
if isValid and elapsed_ms < (start_ms+minIntervalDuration) and not isValidInterval(instrument, start_ms+minIntervalDuration, start_ms, end_ms, minIntervalDuration):
isValid = False
# make start interval earlier if necessary
elif not isValid and elapsed_ms < (start_ms+minIntervalDuration) and isValidInterval(instrument, start_ms+minIntervalDuration, start_ms, end_ms, minIntervalDuration):
isValid = True
# check to see if we're at the end and not long enough
elif isValid and elapsed_ms > (end_ms-minIntervalDuration) and not isValidInterval(instrument, end_ms-minIntervalDuration, start_ms, end_ms, minIntervalDuration):
isValid = False
# make start interval earlier if necessary
elif not isValid and elapsed_ms > (end_ms-minIntervalDuration) and isValidInterval(instrument, end_ms-minIntervalDuration, start_ms, end_ms, minIntervalDuration):
isValid = True
return isValid
# Add beats to sequence
def addBeatsToSequence(sequence, instrument, duration, ms, beat_ms, round_to, pad_start):
msStart = ms
msEnd = ms + duration
offset_ms = int(instrument['tempo_offset'] * beat_ms)
ms += offset_ms
previous_ms = int(ms)
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
min_ms = min(from_beat_ms, to_beat_ms)
remaining_duration = int(duration)
elapsed_duration = offset_ms
continue_from_prev = (instrument['bracket_min'] > 0 or instrument['bracket_max'] < 100)
rn = pseudoRandom(instrument["index"]+1)
while remaining_duration >= min_ms:
elapsed_ms = int(ms)
elapsed_beat = int((elapsed_ms-previous_ms) / beat_ms)
# continue beat from previous
if continue_from_prev:
elapsed_beat = int(elapsed_ms / beat_ms)
this_beat_ms = getBeatMs(instrument, elapsed_beat, round_to)
# add to sequence if in valid interval
if isValidInterval(instrument, elapsed_ms, msStart, msEnd):
variance = roundInt(rn * a.VARIANCE_MS * 2 - a.VARIANCE_MS)
sequence.append({
'instrumentIndex': instrument["index"],
'filename': instrument["file"],
'volume': getVolume(instrument, elapsed_beat),
'ms': max([pad_start + elapsed_ms + variance, 0])
})
remaining_duration -= this_beat_ms
elapsed_duration += this_beat_ms
ms += this_beat_ms
return sequence
# Build main sequence
sequence = []
for i, instrument in enumerate(instruments):
ms = 0
stationQueueDur = 0
# Each station in stations
for station in stations:
# Check if instrument is in this station
instrumentIndex = findInList(station['instruments'], 'index', instrument['index'])
# Instrument not here, just add the station duration and continue
if instrumentIndex < 0 and stationQueueDur > 0:
sequence = addBeatsToSequence(sequence, instrument, stationQueueDur, ms, BEAT_MS, ROUND_TO_NEAREST, a.PAD_START)
ms += stationQueueDur + station['duration']
stationQueueDur = 0
elif instrumentIndex < 0:
ms += station['duration']
else:
stationQueueDur += station['duration']
if stationQueueDur > 0:
sequence = addBeatsToSequence(sequence, instrument, stationQueueDur, ms, BEAT_MS, ROUND_TO_NEAREST, a.PAD_START)
sequenceDuration = max([s["ms"] for s in sequence]) + a.PAD_END
# Now start the video frame logic
# Calculations
aa = vars(a)
aa["STATION_WIDTH"] = roundInt(1.0 * a.WIDTH * a.STATION_WIDTH)
aa["TEXT_WIDTH"] = roundInt(1.0 * a.WIDTH * a.TEXT_WIDTH)
aa["CENTER_Y"] = roundInt(1.0 * a.HEIGHT * a.CENTER_Y)
aa["BOROUGH_TEXT_Y"] = roundInt(1.0 * a.HEIGHT * a.BOROUGH_TEXT_Y)
aa["STATION_TEXT_Y"] = roundInt(1.0 * a.HEIGHT * a.STATION_TEXT_Y)
RESOLUTION = a.WIDTH / 1920.0
aa["CIRCLE_WIDTH"] = roundInt(a.CIRCLE_WIDTH * RESOLUTION)
aa["LINE_HEIGHT"] = roundInt(a.LINE_HEIGHT * RESOLUTION)
aa["BOUNDARY_MARGIN"] = roundInt(a.BOUNDARY_MARGIN * RESOLUTION)
aa["BOUNDARY_HEIGHT"] = roundInt(a.BOUNDARY_HEIGHT * RESOLUTION)
aa["BOUNDARY_WIDTH"] = roundInt(a.BOUNDARY_WIDTH * RESOLUTION)
aa["BOROUGH_THRESHOLD"] = roundInt(1.0 * a.WIDTH * a.BOROUGH_THRESHOLD)
aa["MARKER_WIDTH"] = roundInt(a.MARKER_WIDTH * RESOLUTION)
aa["STATION_TEXT_SIZE"] = roundInt(a.STATION_TEXT_SIZE * RESOLUTION)
aa["STATION_TEXT_MARGIN"] = roundInt(a.STATION_TEXT_MARGIN * RESOLUTION)
aa["STATION_LETTER_MARGIN"] = roundInt(a.STATION_LETTER_MARGIN * RESOLUTION)
aa["BOROUGH_TEXT_SIZE"] = roundInt(a.BOROUGH_TEXT_SIZE * RESOLUTION)
aa["BOROUGH_LETTER_MARGIN"] = roundInt(a.BOROUGH_LETTER_MARGIN * RESOLUTION)
aa["MAP_COORDS"] = tuple([float(c) for c in a.MAP_COORDS.strip().split(",")])
aa["MAP_MARGIN"] = roundInt(a.MAP_MARGIN * RESOLUTION)
aa["MAP_W"] = roundInt(a.MAP_W * RESOLUTION)
aa["MAP_LINE_WIDTH"] = roundInt(a.MAP_LINE_WIDTH * RESOLUTION)
aa["DIVIDER_WIDTH"] = roundInt(a.DIVIDER_WIDTH * RESOLUTION)
aa["DIVIDER_DISTANCE"] = roundInt(1.0 * a.WIDTH * a.DIVIDER_DISTANCE)
# Add borough names
boroughNames = {
"Q": "Queens",
"M": "Manhattan",
"Bk": "Brooklyn",
"Bx": "Bronx",
"SI": "Staten Island"
}
for i, station in enumerate(stations):
stations[i]["borough"] = boroughNames[station["Borough"]]
x = 0
mlon0, mlat0, mlon1, mlat1 = a.MAP_COORDS
vstations = stations[:]
# If going right to left, reverse the stations visually
if a.RIGHT_TO_LEFT:
vstations = list(reversed(vstations))
for i, station in enumerate(vstations):
if i < stationCount-1:
vstations[i]["vduration"] = vstations[i+1]["duration"]
else:
vstations[i]["vduration"] = 0
for i, station in enumerate(vstations):
boroughNext = station["borough"]
if i < stationCount-1:
boroughNext = vstations[i+1]["borough"]
vstations[i]["boroughNext"] = boroughNext
vstations[i]["width"] = roundInt(1.0 * station["vduration"] / minDuration * a.STATION_WIDTH)
vstations[i]["x"] = x
vstations[i]["x0"] = x - a.TEXT_WIDTH / 2
vstations[i]["x1"] = x + a.TEXT_WIDTH / 2
vstations[i]["mapNx"] = norm(station["GTFS Longitude"], (mlon0, mlon1))
vstations[i]["mapNy"] = norm(station["GTFS Latitude"], (mlat0, mlat1))
x += vstations[i]["width"]
totalW = x
pxPerMs = 1.0 * totalW / totalMs
pxPerS = pxPerMs * 1000.0
pxPerFrame = pxPerS / a.FPS
print("Total width: %s px" % totalW)
print("Pixels per second: %s" % pxPerS)
print("Pixels per frame: %s" % pxPerFrame)
totalFrames = msToFrame(sequenceDuration, a.FPS)
totalFrames = int(ceilToNearest(totalFrames, a.FPS))
print("Total frames: %s" % totalFrames)
sequenceDuration = frameToMs(totalFrames, a.FPS)
def drawFrame(filename, ms, xOffset, stations, totalW, bulletImg, mapImg, fontStation, fontBorough, a):
if not a.OVERWRITE and os.path.isfile(filename):
return
im = Image.new('RGB', (a.WIDTH, a.HEIGHT), a.BG_COLOR)
draw = ImageDraw.Draw(im, 'RGBA')
cx = roundInt(a.WIDTH * 0.5)
cy = a.CENTER_Y
stationCount = len(stations)
leftX = xOffset
rightX = leftX + totalW
# draw the center line
x0 = 0 if leftX < 0 else leftX
x1 = a.WIDTH if rightX > a.WIDTH else rightX
y0 = cy - a.LINE_HEIGHT/2
y1 = y0 + a.LINE_HEIGHT
draw.rectangle([(x0, y0), (x1, y1)], fill=a.ALT_TEXT_COLOR)
for i, s in enumerate(stations):
# check to see if we should draw borough divider
if s["borough"] != s["boroughNext"]:
deltaBx = abs(stations[i+1]["x"]-s["x"])
# don't draw boundary in tight space
if deltaBx > a.BOROUGH_THRESHOLD:
bdx = roundInt(xOffset + (s["x"] + stations[i+1]["x"]) * 0.5)
bdx0 = bdx - a.WIDTH/2
bdx1 = bdx + a.WIDTH/2
if 0 <= bdx0 <= a.WIDTH or 0 <= bdx1 <= a.WIDTH:
dx0 = bdx - a.BOUNDARY_WIDTH/2
dx1 = dx0 + a.BOUNDARY_WIDTH
dy0 = cy
dy1 = dy0 + a.BOUNDARY_HEIGHT
draw.rectangle([(dx0, dy0), (dx1, dy1)], fill=a.ALT_TEXT_COLOR)
blw, blh = getLineSize(fontBorough, s["borough"], a.BOROUGH_LETTER_MARGIN)
bx = dx0 - a.BOUNDARY_MARGIN - blw/2
drawTextToImage(draw, s["borough"], fontBorough, a.BOROUGH_LETTER_MARGIN, bx, a.BOROUGH_TEXT_Y, a.ALT_TEXT_COLOR)
blw, blh = getLineSize(fontBorough, s["boroughNext"], a.BOROUGH_LETTER_MARGIN)
bx = dx1 + a.BOUNDARY_MARGIN + blw/2
drawTextToImage(draw, s["boroughNext"], fontBorough, a.BOROUGH_LETTER_MARGIN, bx, a.BOROUGH_TEXT_Y, a.ALT_TEXT_COLOR)
sx = xOffset + s["x"]
sy = a.CENTER_Y
# draw dividers
if i < stationCount-1:
dividers = 0
dividerDistance = 0
nextSx = xOffset + stations[i+1]["x"]
deltaSx = abs(nextSx - sx)
if deltaSx >= a.DIVIDER_DISTANCE * 2:
dividers = int(1.0 * deltaSx / a.DIVIDER_DISTANCE) - 1
if dividers > 0:
dividerDistance = roundInt(1.0 * deltaSx / (dividers+1))
for di in range(dividers):
divX = sx + (di+1) * dividerDistance
divX0 = divX - a.DIVIDER_WIDTH/2
divX1 = divX0 + a.DIVIDER_WIDTH
divY0 = y0
divY1 = y1
if divX1 > 0:
draw.rectangle([(divX0, divY0), (divX1, divY1)], fill=a.DIVIDER_COLOR)
# check if station is visible
sx0 = xOffset + s["x0"]
sx1 = xOffset + s["x1"]
if not (0 <= sx0 <= a.WIDTH or 0 <= sx1 <= a.WIDTH):
continue
# just draw empty bullet for local stops
if "isLocal" in s:
brad = roundInt(a.CIRCLE_WIDTH/3)
bx = sx
by = sy
# Draw line using gizeh so it will be smooth
bsurface = gizeh.Surface(width=a.WIDTH, height=a.HEIGHT)
circle = gizeh.circle(r=brad, xy=[bx, by], fill=hexToRGB(a.DIVIDER_COLOR, toFloat=True))
circle.draw(bsurface)
bpixels = bsurface.get_npimage(transparent=True) # should be shape: h, w, rgba
circleImg = Image.fromarray(bpixels, mode="RGBA")
im.paste(circleImg, (0, 0), circleImg)
continue
# draw borough text
bx = sx
by = a.BOROUGH_TEXT_Y
drawTextToImage(draw, s["borough"], fontBorough, a.BOROUGH_LETTER_MARGIN, bx, by, a.ALT_TEXT_COLOR)
# draw bullet
bx = roundInt(sx - a.CIRCLE_WIDTH/2)
by = roundInt(sy - a.CIRCLE_WIDTH/2)
im.paste(bulletImg, (bx, by), bulletImg)
# draw station text
stx = sx
sty = a.STATION_TEXT_Y
slines = getMultilines(s["Stop Name"], fontStation, a.TEXT_WIDTH, a.STATION_LETTER_MARGIN)
drawTextLinesToImage(draw, slines, fontStation, a.STATION_TEXT_MARGIN, a.STATION_LETTER_MARGIN, stx, sty, a.TEXT_COLOR)
# draw the map
mw, mh = mapImg.size
mx = a.MAP_MARGIN
my = a.HEIGHT - mh - a.MAP_MARGIN
im.paste(mapImg, (mx, my))
lineColor = "#"+str(stations[0]["color"])
points = []
allPoints = []
mstations = stations[:]
if a.RIGHT_TO_LEFT:
mstations = list(reversed(mstations))
for i, s in enumerate(mstations):
sms0 = s["ms"]
sms1 = sms0 + s["duration"]
# print("%s, %s" % (sms0, sms1))
mprogress = norm(ms, (sms0, sms1), limit=True) if s["duration"] > 0 else 1.0
lx = lerp((mx, mx+mw), s["mapNx"])
ly = lerp((my, my+mh), s["mapNy"])
if ms >= sms0:
points.append((lx, ly))
if 0.0 < mprogress < 1.0 and i < stationCount-1 and s["duration"] > 0:
lx1 = lerp((mx, mx+mw), mstations[i+1]["mapNx"])
ly1 = lerp((my, my+mh), mstations[i+1]["mapNy"])
lx2 = lerp((lx, lx1), mprogress)
ly2 = lerp((ly, ly1), mprogress)
points.append((lx2, ly2))
allPoints.append((lx, ly))
# Draw line using gizeh so it will be smooth
surface = gizeh.Surface(width=a.WIDTH, height=a.HEIGHT)
line = gizeh.polyline(points=allPoints, stroke_width=max(1, a.MAP_LINE_WIDTH-1), stroke=hexToRGB(a.MAP_LINE_COLOR, toFloat=True))
line.draw(surface)
if len(points) > 1:
sline = gizeh.polyline(points=points, stroke_width=a.MAP_LINE_WIDTH, stroke=hexToRGB(lineColor, toFloat=True))
sline.draw(surface)
spixels = surface.get_npimage(transparent=True) # should be shape: h, w, rgba
lineImage = Image.fromarray(spixels, mode="RGBA")
im.paste(lineImage, (0, 0), lineImage)
# draw the marker
x0 = cx - a.MARKER_WIDTH/2
x1 = x0 + a.MARKER_WIDTH
y0 = 0
y1 = a.HEIGHT
draw.rectangle([(x0, y0), (x1, y1)], fill=(255,255,255,100))
del draw
im.save(filename)
# print("Saved %s" % filename)
def getEasedFrames(easeFrameCount, stationFrameCount, pxPerFrame):
fromFrameCount = int(min(easeFrameCount, stationFrameCount) / 2)
fromPx = fromFrameCount * pxPerFrame
toFrameCount = easeFrameCount + fromFrameCount # 'fromPx' will be stretched into 'toFrameCount' frames
# easedPoints = [easeIn(n) * pxPerFrame for n in np.linspace(0, 1.0, num=toFrameCount)]
easedPoints = [n * pxPerFrame for n in np.linspace(0, 1.0, num=toFrameCount)]
buckets = [0 for n in range(toFrameCount)]
pxPool = fromPx
for i in range(toFrameCount):
index = toFrameCount-1-i
bucketPx = buckets[index]
addPx = easedPoints[index]
if addPx > pxPool:
addPx = pxPool
buckets[index] = addPx
pxPool -= addPx
if pxPool <= 0:
break
if pxPool > 0:
incr = 0.01
while pxPool > 0:
for j in range(toFrameCount):
index = toFrameCount-1-j
bucketPx = buckets[index]
if (bucketPx+incr) <= pxPerFrame:
buckets[index] += incr
pxPool -= incr
# import matplotlib.pyplot as plt
# plt.plot(buckets)
# plt.show()
# sys.exit()
# print("%s ~ %s" % (fromPx, sum(buckets)))
return buckets
audioFilename = a.AUDIO_OUTPUT_FILE % basename
print("%s steps in sequence" % len(sequence))
print('Total sequence time: %s' % formatSeconds(sequenceDuration/1000.0))
if a.VISUALIZE_SEQUENCE:
instrumentsCount = len(instruments)
labelW = 200
unitH = 10
unitW = 10
marginH = 2
imgH = (unitH+marginH) * instrumentsCount
imgW = totalSeconds * unitW + labelW
dfont = ImageFont.truetype(font="fonts/OpenSans-Regular.ttf", size=10)
print("Making viz %s x %s" % (imgW, imgH))
im = Image.new('RGB', (imgW, imgH), "#000000")
draw = ImageDraw.Draw(im, 'RGB')
for i, ins in enumerate(instruments):
y = i * (unitH + marginH)
draw.text((2, y), ins["name"], fill="#FFFFFF", font=dfont)
steps = [step for step in sequence if step["instrumentIndex"]==ins["index"]]
for step in steps:
sx = roundInt((step["ms"] - a.PAD_START) / 1000.0 / totalSeconds * (imgW-labelW) + labelW)
draw.rectangle([(sx, y), (sx+3, y+unitH)], fill=(roundInt(255*step["volume"]),0,0))
if i > 0:
draw.line([(0, y-1), (imgW, y-1)], fill="#cccccc", width=1)
printProgress(i+1, instrumentsCount)
im.save("output/viz.png")
sys.exit()
if a.PLOT_SEQUENCE:
import matplotlib.pyplot as plt
xs = [s['ms']/1000.0 for s in stations]
ys = [s['income'] for s in stations]
plt.plot(xs, ys)
plt.show()
sys.exit()
if a.PROBE:
sys.exit()
makeDirectories([a.AUDIO_OUTPUT_FILE, a.OUTPUT_FILE])
if not a.AUDIO_ONLY:
bulletImg = Image.open(a.IMAGE_FILE)
bulletImg = bulletImg.resize((a.CIRCLE_WIDTH, a.CIRCLE_WIDTH), resample=Image.LANCZOS)
mapImg = Image.open(a.MAP_IMAGE)
mapH = roundInt((1.0 * mapImg.size[1] / mapImg.size[0]) * a.MAP_W)
mapImg = mapImg.resize((a.MAP_W, mapH), resample=Image.LANCZOS)
fontStation = ImageFont.truetype(font=a.STATION_FONT, size=a.STATION_TEXT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM)
fontBorough = ImageFont.truetype(font=a.BOROUGH_FONT, size=a.BOROUGH_TEXT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM)
makeDirectories([a.OUTPUT_FRAME % (basename, "*")])
if a.OVERWRITE and a.SINGLE_FRAME < 1:
removeFiles(a.OUTPUT_FRAME % (basename, "*"))
# calculations for easing in/out
padFrameInCount = msToFrame(a.PAD_START, a.FPS)
station0FrameCount = msToFrame(stations[0]["duration"], a.FPS)
easeInFrames = getEasedFrames(padFrameInCount, station0FrameCount, pxPerFrame)
easeInFrameCount = len(easeInFrames)
padFrameOutCount = msToFrame(a.PAD_END, a.FPS)
station1FrameCount = msToFrame(stations[-2]["duration"], a.FPS)
easeOutFrames = getEasedFrames(padFrameOutCount, station1FrameCount, pxPerFrame)
# easeOutFrames = list(reversed(easeOutFrames))
easeOutFrameCount = len(easeOutFrames)
easeOutPixels = roundInt(sum(easeOutFrames))
print("Making video frame sequence...")
videoFrames = []
centerX = roundInt(a.WIDTH * 0.5)
xOffset = centerX
direction = -1
if a.RIGHT_TO_LEFT:
direction = 1
xOffset -= totalW
xOffsetF = 1.0 * xOffset
target = centerX-totalW if direction < 0 else centerX
for f in range(totalFrames):
frame = f + 1
ms = frameToMs(frame, a.FPS)
frameFilename = a.OUTPUT_FRAME % (basename, zeroPad(frame, totalFrames))
if a.SINGLE_FRAME < 1 or a.SINGLE_FRAME == frame:
if a.SINGLE_FRAME > 0:
frameFilename = "output/frame.png"
drawFrame(frameFilename, ms, xOffset, vstations, totalW, bulletImg, mapImg, fontStation, fontBorough, a)
if a.SINGLE_FRAME > 0:
sys.exit()
pixelsLeft = abs(target - xOffset)
# ease in start
if frame < easeInFrameCount:
xOffsetF += (direction * easeInFrames[frame-1])
xOffset = roundInt(xOffsetF)
# print(abs(xOffset-centerX))
# # correct any discrepancy after ease in
# elif frame <= easeInFrameCount:
# xOffset = (frame - padFrameInCount) * pxPerFrame
# xOffsetF = 1.0 * xOffset
# ease out end
elif pixelsLeft <= easeOutPixels:
pxStep = easeOutFrames.pop() if len(easeOutFrames) > 0 else 1
xOffsetF += (direction * pxStep)
xOffset = roundInt(xOffsetF)
# print("%s > %s" % (xOffset, centerX-totalW))
else:
xOffset += (direction * pxPerFrame)
xOffsetF = 1.0 * xOffset
xOffset = lim(xOffset, (centerX-totalW, centerX))
printProgress(frame, totalFrames)
# break
stepTime = logTime(startTime, "Finished frames")
padZeros = len(str(totalFrames))
outfile = a.OUTPUT_FILE % basename
frameInfile = a.OUTPUT_FRAME % (basename, '%s')
if a.VIDEO_ONLY:
compileFrames(frameInfile, a.FPS, outfile, padZeros)
sys.exit()
if a.OVERWRITE or not os.path.isfile(audioFilename):
mixAudio(sequence, sequenceDuration, audioFilename, masterDb=a.MASTER_DB)
else:
print("%s already exists" % audioFilename)
stepTime = logTime(stepTime, "Finished Audio")
if not a.AUDIO_ONLY:
if a.VIDEO_ONLY:
audioFilename = None
if a.OVERWRITE or not os.path.isfile(outfile):
compileFrames(frameInfile, a.FPS, outfile, padZeros, audioFile=audioFilename)
else:
print("%s already exists" % outfile)
logTime(startTime, "Total execution time")
| 48.559572
| 371
| 0.668688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10,581
| 0.291697
|
bee4367acb7e986f0d0dbc48437bdbc33f87cdab
| 15,487
|
py
|
Python
|
release/scripts/startup/bl_ui/space_text.py
|
vic3t3chn0/Bforartists
|
7c54a60dd7aa568e20ae7e3778dfef993b61b7b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2020-01-18T22:13:24.000Z
|
2020-01-18T22:13:24.000Z
|
release/scripts/startup/bl_ui/space_text.py
|
vic3t3chn0/Bforartists
|
7c54a60dd7aa568e20ae7e3778dfef993b61b7b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/startup/bl_ui/space_text.py
|
vic3t3chn0/Bforartists
|
7c54a60dd7aa568e20ae7e3778dfef993b61b7b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
import bpy
from bpy.types import Header, Menu, Panel
from bpy.app.translations import pgettext_iface as iface_
class TEXT_HT_header(Header):
bl_space_type = 'TEXT_EDITOR'
def draw(self, context):
layout = self.layout
st = context.space_data
text = st.text
ALL_MT_editormenu.draw_hidden(context, layout) # bfa - show hide the editormenu
TEXT_MT_editor_menus.draw_collapsible(context, layout)
row = layout.row(align=True)
if text and text.is_modified:
row = layout.row(align=True)
row.alert = True
row.operator("text.resolve_conflict", text="", icon='HELP')
#layout.separator_spacer()
row = layout.row(align=True)
row.template_ID(st, "text", new="text.new", unlink="text.unlink", open="text.open")
layout.separator_spacer()
row = layout.row(align=True)
row.prop(st, "show_line_numbers", text="")
row.prop(st, "show_word_wrap", text="")
is_syntax_highlight_supported = st.is_syntax_highlight_supported()
syntax = row.row(align=True)
syntax.active = is_syntax_highlight_supported
syntax.prop(st, "show_syntax_highlight", text="")
if text:
text_name = text.name
is_osl = text_name.endswith((".osl", ".oso"))
row = layout.row()
if is_osl:
row = layout.row()
row.operator("node.shader_script_update")
else:
row = layout.row()
row.active = text_name.endswith(".py")
row.prop(text, "use_module")
row = layout.row()
row.active = is_syntax_highlight_supported
row.operator("text.run_script")
class TEXT_HT_footer(Header):
bl_space_type = 'TEXT_EDITOR'
bl_region_type = 'FOOTER'
def draw(self, context):
layout = self.layout
st = context.space_data
text = st.text
if text:
row = layout.row()
if text.filepath:
if text.is_dirty:
row.label(
text=iface_("File: *%s (unsaved)" % text.filepath),
translate=False,
)
else:
row.label(
text=iface_("File: %s" % text.filepath),
translate=False,
)
else:
row.label(
text=iface_("Text: External")
if text.library
else iface_("Text: Internal"),
)
# bfa - show hide the editormenu
class ALL_MT_editormenu(Menu):
bl_label = ""
def draw(self, context):
self.draw_menus(self.layout, context)
@staticmethod
def draw_menus(layout, context):
row = layout.row(align=True)
row.template_header() # editor type menus
class TEXT_MT_editor_menus(Menu):
bl_idname = "TEXT_MT_editor_menus"
bl_label = ""
def draw(self, context):
self.draw_menus(self.layout, context)
@staticmethod
def draw_menus(layout, context):
st = context.space_data
text = st.text
layout.menu("TEXT_MT_text")
layout.menu("TEXT_MT_view")
if text:
layout.menu("TEXT_MT_edit")
layout.menu("TEXT_MT_format")
class TEXT_PT_properties(Panel):
bl_space_type = 'TEXT_EDITOR'
bl_region_type = 'UI'
bl_category = "Text"
bl_label = "Properties"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
st = context.space_data
flow = layout.column_flow()
flow.use_property_split = False
flow.prop(st, "show_line_highlight")
flow.prop(st, "use_live_edit")
layout.use_property_split = True
flow = layout.column_flow()
flow.prop(st, "font_size")
flow.prop(st, "tab_width")
text = st.text
if text:
layout.prop(text, "indentation")
flow.use_property_split = False
flow.prop(st, "show_margin")
flow.use_property_split = True
if st.show_margin:
col = flow.column()
col.active = st.show_margin
col.prop(st, "margin_column")
class TEXT_PT_find(Panel):
bl_space_type = 'TEXT_EDITOR'
bl_region_type = 'UI'
bl_category = "Text"
bl_label = "Find & Replace"
def draw(self, context):
layout = self.layout
st = context.space_data
# find
col = layout.column(align=True)
row = col.row(align=True)
row.prop(st, "find_text", text="", icon='VIEWZOOM')
row.operator("text.find_set_selected", text="", icon='EYEDROPPER')
col.operator("text.find")
# replace
col = layout.column(align=True)
row = col.row(align=True)
row.prop(st, "replace_text", text="", icon='DECORATE_OVERRIDE')
row.operator("text.replace_set_selected", text="", icon='EYEDROPPER')
col.operator("text.replace")
# settings
row = layout.row(align=True)
if not st.text:
row.active = False
row.prop(st, "use_match_case", text="Case", toggle=True)
row.prop(st, "use_find_wrap", text="Wrap", toggle=True)
row.prop(st, "use_find_all", text="All", toggle=True)
class TEXT_MT_view(Menu):
bl_label = "View"
def draw(self, context):
layout = self.layout
st = context.space_data
layout.prop(st, "show_region_ui")
layout.separator()
layout.operator("text.move", text="Top of File", icon = "MOVE_UP").type = 'FILE_TOP'
layout.operator("text.move", text="Bottom of File",icon = "MOVE_DOWN").type = 'FILE_BOTTOM'
layout.separator()
layout.menu("INFO_MT_area")
#Redraw timer sub menu - Debug stuff
class TEXT_MT_redraw_timer(Menu):
bl_label = "Redraw Timer"
def draw(self, context):
layout = self.layout
layout.operator("wm.redraw_timer", text = 'Draw Region').type ='DRAW'
layout.operator("wm.redraw_timer", text = 'Draw Region Swap').type ='DRAW_SWAP'
layout.operator("wm.redraw_timer", text = 'Draw Window').type ='DRAW_WIN'
layout.operator("wm.redraw_timer", text = 'Draw Window Swap').type ='DRAW_WIN_SWAP'
layout.operator("wm.redraw_timer", text = 'Anim Step').type ='ANIM_STEP'
layout.operator("wm.redraw_timer", text = 'Anim Play').type ='ANIM_PLAY'
layout.operator("wm.redraw_timer", text = 'Undo/Redo').type ='UNDO'
class TEXT_MT_text(Menu):
bl_label = "File"
def draw(self, context):
layout = self.layout
st = context.space_data
text = st.text
layout.operator("text.new", text = "New Text", icon='NEW')
layout.operator("text.open", text = "Open Text", icon='FILE_FOLDER')
if text:
layout.operator("text.reload", icon = "FILE_REFRESH")
layout.column()
layout.operator("text.save", icon='FILE_TICK')
layout.operator("text.save_as", icon='SAVE_AS')
if text.filepath:
layout.separator()
layout.operator("text.make_internal", icon = "MAKE_INTERNAL")
layout.separator()
row = layout.row()
row.active = text.name.endswith(".py")
row.prop(text, "use_module")
row = layout.row()
layout.prop(st, "use_live_edit")
layout.separator()
layout.operator("text.run_script", icon = "PLAY")
layout.separator()
layout.menu("TEXT_MT_templates")
layout.separator()
layout.menu("TEXT_MT_redraw_timer", icon='TIME') #Redraw timer sub menu - Debug stuff
layout.operator("wm.debug_menu", icon='DEBUG') # debug menu
layout.operator("script.reload", icon='FILE_REFRESH') # Reload all python scripts. Mainly meant for the UI scripts.
class TEXT_MT_templates_py(Menu):
bl_label = "Python"
def draw(self, _context):
self.path_menu(
bpy.utils.script_paths("templates_py"),
"text.open",
props_default={"internal": True},
filter_ext=lambda ext: (ext.lower() == ".py")
)
class TEXT_MT_templates_osl(Menu):
bl_label = "Open Shading Language"
def draw(self, _context):
self.path_menu(
bpy.utils.script_paths("templates_osl"),
"text.open",
props_default={"internal": True},
filter_ext=lambda ext: (ext.lower() == ".osl")
)
class TEXT_MT_templates(Menu):
bl_label = "Templates"
def draw(self, _context):
layout = self.layout
layout.menu("TEXT_MT_templates_py")
layout.menu("TEXT_MT_templates_osl")
class TEXT_MT_format(Menu):
bl_label = "Format"
def draw(self, _context):
layout = self.layout
layout.operator("text.indent", icon = "INDENT")
layout.operator("text.unindent", icon = "UNINDENT")
layout.separator()
layout.operator("text.comment_toggle", text = "Comment", icon = "COMMENT").type = 'COMMENT'
layout.operator("text.comment_toggle", text = "Un-Comment", icon = "COMMENT").type = 'UNCOMMENT'
layout.operator("text.comment_toggle", icon = "COMMENT")
layout.separator()
layout.operator("text.convert_whitespace", text = "Whitespace to Spaces", icon = "WHITESPACE_SPACES").type = 'SPACES'
layout.operator("text.convert_whitespace", text = "Whitespace to Tabs", icon = "WHITESPACE_TABS").type = 'TABS'
class TEXT_MT_edit_to3d(Menu):
bl_label = "Text To 3D Object"
def draw(self, _context):
layout = self.layout
layout.operator("text.to_3d_object", text="One Object", icon = "OUTLINER_OB_FONT").split_lines = False
layout.operator("text.to_3d_object",text="One Object Per Line", icon = "OUTLINER_OB_FONT").split_lines = True
class TEXT_MT_edit(Menu):
bl_label = "Edit"
@classmethod
def poll(cls, _context):
return context.space_data.text is not None
def draw(self, context):
layout = self.layout
layout.operator("text.cut", icon = "CUT")
layout.operator("text.copy", icon = "COPYDOWN")
layout.operator("text.paste", icon = "PASTEDOWN")
layout.operator("text.duplicate_line", icon = "DUPLICATE")
layout.separator()
layout.operator("text.move_lines", text="Move Line(s) Up", icon = "MOVE_UP").direction = 'UP'
layout.operator("text.move_lines", text="Move Line(s) Down", icon = "MOVE_DOWN").direction = 'DOWN'
layout.separator()
layout.menu("TEXT_MT_edit_move_select")
layout.separator()
layout.menu("TEXT_MT_edit_delete")
layout.separator()
layout.operator("text.select_all", icon = "SELECT_ALL")
layout.operator("text.select_line", icon = "SELECT_LINE")
layout.separator()
layout.operator("text.jump", text = "Go to line", icon = "GOTO")
layout.operator("text.start_find", text="Find", icon = "ZOOM_SET")
layout.operator("text.autocomplete", icon = "AUTOCOMPLETE")
layout.separator()
layout.menu("TEXT_MT_edit_to3d")
# move_select submenu
class TEXT_MT_edit_move_select(Menu):
bl_label = "Select Text"
def draw(self, context):
layout = self.layout
layout.operator("text.move_select", text = "Line End", icon = "HAND").type = 'LINE_END'
layout.operator("text.move_select", text = "Line Begin", icon = "HAND").type = 'LINE_BEGIN'
layout.operator("text.move_select", text = "Previous Character", icon = "HAND").type = 'PREVIOUS_CHARACTER'
layout.operator("text.move_select", text = "Next Character", icon = "HAND").type = 'NEXT_CHARACTER'
layout.operator("text.move_select", text = "Previous Word", icon = "HAND").type = 'PREVIOUS_WORD'
layout.operator("text.move_select", text = "Next Word", icon = "HAND").type = 'NEXT_WORD'
layout.operator("text.move_select", text = "Previous Line", icon = "HAND").type = 'PREVIOUS_LINE'
layout.operator("text.move_select", text = "Next Line", icon = "HAND").type = 'NEXT_LINE'
layout.operator("text.move_select", text = "Previous Character", icon = "HAND").type = 'PREVIOUS_CHARACTER'
layout.operator("text.move_select", text = "Next Character", icon = "HAND").type = 'NEXT_CHARACTER'
class TEXT_MT_context_menu(Menu):
bl_label = ""
def draw(self, _context):
layout = self.layout
layout.operator_context = 'INVOKE_DEFAULT'
layout.operator("text.cut", icon = "CUT")
layout.operator("text.copy", icon = "COPYDOWN")
layout.operator("text.paste", icon = "PASTEDOWN")
layout.separator()
layout.operator("text.move_lines", text="Move Line(s) Up", icon = "MOVE_UP").direction = 'UP'
layout.operator("text.move_lines", text="Move Line(s) Down", icon = "MOVE_DOWN").direction = 'DOWN'
layout.separator()
layout.operator("text.indent", icon = "INDENT")
layout.operator("text.unindent", icon = "UNINDENT")
layout.separator()
layout.operator("text.comment_toggle", icon = "COMMENT")
layout.separator()
layout.operator("text.autocomplete", icon = "AUTOCOMPLETE")
class TEXT_MT_edit_delete(Menu):
bl_label = "Delete"
def draw(self, context):
layout = self.layout
layout.operator("text.delete", text = "Next Character", icon = "DELETE").type = 'NEXT_CHARACTER'
layout.operator("text.delete", text = "Previous Character", icon = "DELETE").type = 'PREVIOUS_CHARACTER'
layout.operator("text.delete", text = "Next Word", icon = "DELETE").type = 'NEXT_WORD'
layout.operator("text.delete", text = "Previous Word", icon = "DELETE").type = 'PREVIOUS_WORD'
classes = (
ALL_MT_editormenu,
TEXT_HT_header,
TEXT_HT_footer,
TEXT_MT_editor_menus,
TEXT_PT_properties,
TEXT_PT_find,
TEXT_MT_view,
TEXT_MT_redraw_timer,
TEXT_MT_text,
TEXT_MT_templates,
TEXT_MT_templates_py,
TEXT_MT_templates_osl,
TEXT_MT_format,
TEXT_MT_edit_to3d,
TEXT_MT_context_menu,
TEXT_MT_edit,
TEXT_MT_edit_move_select,
TEXT_MT_edit_delete,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 31.735656
| 125
| 0.612966
| 13,826
| 0.892749
| 0
| 0
| 516
| 0.033318
| 0
| 0
| 4,758
| 0.307225
|
bee4d2aa2b67b36999556e3fe3dbdddbb08d368e
| 6,151
|
py
|
Python
|
wb/main/jobs/create_setup_bundle/create_setup_bundle_job.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 23
|
2022-03-17T12:24:09.000Z
|
2022-03-31T09:13:30.000Z
|
wb/main/jobs/create_setup_bundle/create_setup_bundle_job.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 18
|
2022-03-21T08:17:44.000Z
|
2022-03-30T12:42:30.000Z
|
wb/main/jobs/create_setup_bundle/create_setup_bundle_job.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 16
|
2022-03-17T12:24:14.000Z
|
2022-03-31T12:15:12.000Z
|
"""
OpenVINO DL Workbench
Class for create setup bundle job
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
import tempfile
from contextlib import closing
from wb.extensions_factories.database import get_db_session_for_celery
from wb.main.enumerates import JobTypesEnum, StatusEnum
from wb.main.jobs.interfaces.ijob import IJob
from wb.main.jobs.utils.database_functions import set_status_in_db
from wb.main.models import CreateSetupBundleJobModel, SharedArtifactModel
from wb.main.scripts.job_scripts_generators.setup_script_generator import SetupScriptGenerator
from wb.main.utils.bundle_creator.setup_bundle_creator import SetupBundleCreator, SetupComponentsParams
from wb.main.utils.utils import find_by_ext
class CreateSetupBundleJob(IJob):
job_type = JobTypesEnum.create_setup_bundle_type
_job_model_class = CreateSetupBundleJobModel
def __init__(self, job_id: int, **unused_kwargs):
super().__init__(job_id=job_id)
self._attach_default_db_and_socket_observers()
with closing(get_db_session_for_celery()) as session:
create_bundle_job_model: CreateSetupBundleJobModel = self.get_job_model(session)
deployment_bundle_config = create_bundle_job_model.deployment_bundle_config
self.deployment_bundle_id = deployment_bundle_config.deployment_bundle_id
self.additional_components = [name for name, value in deployment_bundle_config.json().items() if value]
self.targets = deployment_bundle_config.targets_to_json
self.operating_system = deployment_bundle_config.operating_system
self.include_model = deployment_bundle_config.include_model
self.topology_name = create_bundle_job_model.project.topology.name if self.include_model else None
self.topology_path = create_bundle_job_model.project.topology.path if self.include_model else None
bundle: SharedArtifactModel = create_bundle_job_model.deployment_bundle_config.deployment_bundle
self.bundle_path = bundle.build_full_artifact_path()
self.is_archive = bundle.is_archive
def run(self):
self._job_state_subject.update_state(status=StatusEnum.running, log='Preparing setup bundle.')
with tempfile.TemporaryDirectory('rw') as tmp_scripts_folder:
setup_path = self.generate_script_from_template(tmp_scripts_folder, 'setup.sh')
get_devices_path = self.generate_script_from_template(tmp_scripts_folder,
'get_inference_engine_devices.sh')
get_resources_path = self.generate_script_from_template(tmp_scripts_folder, 'get_system_resources.sh')
has_internet_connection_path = self.generate_script_from_template(tmp_scripts_folder,
'has_internet_connection.sh')
topology_temporary_path = None
if self.include_model:
topology_temporary_path = os.path.join(tmp_scripts_folder, self.topology_name)
os.makedirs(topology_temporary_path)
xml_file = find_by_ext(self.topology_path, 'xml')
tmp_xml_file = os.path.join(topology_temporary_path, f'{self.topology_name}.xml')
shutil.copy(xml_file, tmp_xml_file)
bin_file = find_by_ext(self.topology_path, 'bin')
tmp_bin_file = os.path.join(topology_temporary_path, f'{self.topology_name}.bin')
shutil.copy(bin_file, tmp_bin_file)
setup_bundle_creator = SetupBundleCreator(
log_callback=lambda message, progress:
self._job_state_subject.update_state(log=message,
progress=progress)
)
setup_components = SetupComponentsParams(setup_path, get_devices_path,
get_resources_path,
has_internet_connection_path,
self.operating_system,
self.targets,
self.additional_components,
topology_temporary_path)
setup_bundle_creator.create(components=setup_components,
destination_bundle=self.bundle_path,
is_archive=self.is_archive)
self.on_success()
@staticmethod
def generate_script_from_template(result_scripts_path: str, script_name: str) -> str:
result_script_path = os.path.join(result_scripts_path, script_name)
job_script_generator = SetupScriptGenerator(script_name)
job_script_generator.create(result_file_path=result_script_path)
return result_script_path
def on_success(self):
with closing(get_db_session_for_celery()) as session:
deployment_job = self.get_job_model(session)
bundle = deployment_job.deployment_bundle_config.deployment_bundle
bundle.update(self.bundle_path)
bundle.write_record(session)
self._job_state_subject.update_state(status=StatusEnum.ready,
log='Setup bundle created successfully.')
set_status_in_db(SharedArtifactModel, bundle.id, StatusEnum.ready, session, force=True)
self._job_state_subject.detach_all_observers()
| 55.918182
| 115
| 0.676313
| 4,881
| 0.79353
| 0
| 0
| 351
| 0.057064
| 0
| 0
| 862
| 0.14014
|
bee53cb77bca7c1ce3a8035bc7f1d877d50fc52d
| 2,365
|
py
|
Python
|
ros_awsiot_agent/src/ros_awsiot_agent/mqtt2ros.py
|
whill-labs/ros_awsiot
|
4c15be53c0643fb81fd5a261a1af5be2652c4166
|
[
"MIT"
] | 4
|
2021-10-06T10:19:07.000Z
|
2022-03-02T02:13:09.000Z
|
ros_awsiot_agent/src/ros_awsiot_agent/mqtt2ros.py
|
whill-labs/ros_awsiot
|
4c15be53c0643fb81fd5a261a1af5be2652c4166
|
[
"MIT"
] | 1
|
2021-10-02T15:13:48.000Z
|
2021-10-02T15:13:48.000Z
|
ros_awsiot_agent/src/ros_awsiot_agent/mqtt2ros.py
|
whill-labs/ros_awsiot
|
4c15be53c0643fb81fd5a261a1af5be2652c4166
|
[
"MIT"
] | 1
|
2021-10-07T02:11:27.000Z
|
2021-10-07T02:11:27.000Z
|
#!/usr/bin/env python3
import logging
from os.path import expanduser
from typing import Any, Dict
from uuid import uuid4
import rospy
from awsiotclient import mqtt, pubsub
from ros_awsiot_agent import set_module_logger
from rosbridge_library.internal.message_conversion import populate_instance
from rosbridge_library.internal.ros_loader import get_message_class
set_module_logger(modname="awsiotclient", level=logging.WARN)
class Mqtt2Ros:
def __init__(
self,
topic_from: str,
topic_to: str,
topic_type: str,
conn_params: mqtt.ConnectionParams,
) -> None:
topic_class = get_message_class(topic_type)
self.inst = topic_class()
self.mqtt_connection = mqtt.init(conn_params)
connect_future = self.mqtt_connection.connect()
connect_future.result()
rospy.loginfo("Connected!")
self.pub = rospy.Publisher(topic_to, topic_class, queue_size=10)
self.mqtt_sub = pubsub.Subscriber(
self.mqtt_connection, topic_from, callback=self.callback
)
def callback(self, topic: str, msg_dict: Dict[str, Any]) -> None:
msg = populate_instance(msg_dict, self.inst)
self.pub.publish(msg)
def main() -> None:
rospy.init_node("mqtt2ros", anonymous=True)
topic_to = rospy.get_param("~topic_to", default="~output")
topic_from = rospy.get_param("~topic_from", default="/mqtt2ros")
topic_type = rospy.get_param("~topic_type", default="std_msgs/String")
conn_params = mqtt.ConnectionParams()
conn_params.cert = expanduser(
rospy.get_param("~cert", default="~/.aws/cert/certificate.pem.crt")
)
conn_params.key = expanduser(
rospy.get_param("~key", default="~/.aws/cert/private.pem.key")
)
conn_params.root_ca = expanduser(
rospy.get_param("~root_ca", default="~/.aws/cert/AmazonRootCA1.pem")
)
conn_params.endpoint = rospy.get_param("~endpoint")
conn_params.client_id = rospy.get_param(
"~client_id", default="mqtt-" + str(uuid4())
)
conn_params.signing_region = rospy.get_param(
"~signing_region", default="ap-northeast-1"
)
conn_params.use_websocket = rospy.get_param("~use_websocket", default=False)
Mqtt2Ros(topic_from, topic_to, topic_type, conn_params)
rospy.spin()
if __name__ == "__main__":
main()
| 30.320513
| 80
| 0.689641
| 794
| 0.335729
| 0
| 0
| 0
| 0
| 0
| 0
| 337
| 0.142495
|
bee627678ed010aec77f469faec38fc6e41f1465
| 5,031
|
py
|
Python
|
poly/app.py
|
thdb-theo/Polynomial
|
9943ee5eb175ef01720954c6a95c685bd7fd5f6c
|
[
"MIT"
] | null | null | null |
poly/app.py
|
thdb-theo/Polynomial
|
9943ee5eb175ef01720954c6a95c685bd7fd5f6c
|
[
"MIT"
] | null | null | null |
poly/app.py
|
thdb-theo/Polynomial
|
9943ee5eb175ef01720954c6a95c685bd7fd5f6c
|
[
"MIT"
] | null | null | null |
import sys
import re
from PyQt4 import QtGui, QtCore
from polynomial import Polynomial
from rational import Rational
class Window(QtGui.QMainWindow):
width, height = 420, 130
def __init__(self):
super().__init__()
self.setFixedSize(Window.width, Window.height)
self.setWindowTitle('Find Roots')
self.setWindowIcon(QtGui.QIcon('Images/roots.png'))
self.poly = None
self.setFont(QtGui.QFont('Times New Roman'))
self.home()
def home(self):
self.is_imag = True
self.imag_b = QtGui.QCheckBox('Return imaginary numbers?')
self.imag_b.adjustSize()
self.imag_b.setParent(self)
self.imag_b.toggle()
self.imag_b.move(10, 5)
self.imag_b.stateChanged.connect(self.toggle_imag)
self.instruction = QtGui.QLabel(self)
self.instruction.setText('Enter coefficients of a polynomial seperated by commas.')
self.instruction.move(10, 35)
self.instruction.adjustSize()
self.text = QtGui.QLabel(self)
self.entry = QtGui.QLineEdit(self)
self.entry.returnPressed.connect(self.find_roots)
self.entry.move(10, 60)
self.entry.resize(400, 30)
self.confirm = QtGui.QPushButton('Find Roots!', self)
self.confirm.move(10, 100)
self.confirm.clicked.connect(self.find_roots)
QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Return), self, self.find_roots)
self.plot_b = QtGui.QPushButton('Plot', self)
self.plot_b.clicked.connect(self.plot)
self.plot_b.move(120, 100)
self.factor_b = QtGui.QPushButton('Factorise', self)
self.factor_b.clicked.connect(self.factor)
self.factor_b.move(230, 100)
self.derivate_b = QtGui.QPushButton('Derivate', self)
self.derivate_b.clicked.connect(self.derivate)
self.derivate_b.move(340, 100)
self.eq = QtGui.QLabel(self)
self.eq.move(10, Window.height)
self.show()
def toggle_imag(self):
self.is_imag = not self.is_imag
def find_roots(self):
self.entry_text = self.entry.text()
try:
self.poly = self.get_poly(self.entry_text)
except ValueError:
QtGui.QMessageBox.warning(self, 'warning', 'Invalid arguments')
return
roots = self.poly.roots(imag=self.is_imag)
self.eq.setFont(QtGui.QFont('Consolas', 8))
s = '%s = 0' % self.poly.short_str()
self.eq.setText(re.sub("(.{44})", "\\1\n",
s, 0, re.DOTALL))
self.eq.adjustSize()
t = []
for i, r in enumerate(roots):
t.append('x<sub>%s</sub> = %s' % (i, r))
s = '<br>'.join(t)
self.text.setText(s)
self.text.adjustSize()
self.text.move(10, Window.height + self.eq.height())
new_height = Window.height + self.eq.height() + self.text.height() + 10
self.setFixedSize(Window.width, new_height)
def plot(self) -> None:
self.entry_text = self.entry.text()
try:
self.poly = self.get_poly(self.entry_text)
except ValueError:
QtGui.QMessageBox.warning(self, 'warning', 'Invalid arguments')
return
self.poly.plot()
def factor(self):
self.entry_text = self.entry.text()
try:
self.poly = self.get_poly(self.entry_text)
except ValueError:
QtGui.QMessageBox.warning(self, 'warning', 'Invalid arguments')
return
self.eq.setText('')
self.text.setText(self.poly.factor())
self.text.move(10, Window.height)
self.text.adjustSize()
self.text.setWordWrap(True)
self.setFixedSize(Window.width, Window.height + self.text.height())
def derivate(self):
self.entry_text = self.entry.text()
try:
self.poly = self.get_poly(self.entry_text)
except ValueError:
QtGui.QMessageBox.warning(self, 'warning', 'Invalid arguments')
return
self.eq.setText('')
self.text.setText(str(self.poly.derivate()))
self.text.setFont(QtGui.QFont('Courier'))
self.text.move(10, Window.height)
self.text.adjustSize()
self.text.setWordWrap(True)
self.setFixedSize(Window.width, Window.height + self.text.height())
@staticmethod
def get_poly(text):
if 'x' in text:
return Polynomial.from_string(text)
terms = re.findall(r'-?\d+\.?\d*|/', text)
if '/' in terms:
numerator, denominator = terms[:terms.index('/')], terms[terms.index('/') + 1:]
num_coefs, den_coefs = list(map(float, numerator)), list(map(float, denominator))
return Rational(num_coefs, den_coefs)
else:
coefs = map(float, terms)
return Polynomial(*coefs)
def main():
app = QtGui.QApplication(sys.argv)
GUI = Window()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 33.098684
| 93
| 0.603459
| 4,773
| 0.948718
| 0
| 0
| 511
| 0.10157
| 0
| 0
| 395
| 0.078513
|
bee68e7de68c03f76e1ccae51e5aa678663d50fa
| 493
|
py
|
Python
|
ariadne_server/tests/fixtures/fake_context.py
|
seanaye/FeatherLight-API
|
4d42a424762311ee35b3fd4f689883aa4197eb2e
|
[
"MIT"
] | 3
|
2020-06-28T17:30:57.000Z
|
2022-01-25T18:03:38.000Z
|
ariadne_server/tests/fixtures/fake_context.py
|
seanaye/FeatherLight-API
|
4d42a424762311ee35b3fd4f689883aa4197eb2e
|
[
"MIT"
] | null | null | null |
ariadne_server/tests/fixtures/fake_context.py
|
seanaye/FeatherLight-API
|
4d42a424762311ee35b3fd4f689883aa4197eb2e
|
[
"MIT"
] | 1
|
2021-02-04T07:14:08.000Z
|
2021-02-04T07:14:08.000Z
|
from secrets import token_hex
import pytest
class Object:
pass
class FakeContext(dict):
def __init__(self):
req_obj = Object()
req_obj.cookies = {}
req_obj.client = Object()
req_obj.client.host = token_hex(5)
req_obj.headers = {
'origin': 'some_origin',
'x-real-ip': 'fake_ip'
}
self['request'] = req_obj
@pytest.fixture(autouse=True, scope='function')
def context():
return FakeContext()
| 18.259259
| 47
| 0.584178
| 353
| 0.716024
| 0
| 0
| 87
| 0.176471
| 0
| 0
| 60
| 0.121704
|
bee850ee36621b995a6de029e878f2bcfff1b23e
| 4,552
|
py
|
Python
|
libs/evaluation/training_benchmark_database.py
|
eeshakumar/hythe
|
52ca795c8370ddfb2aa6fb87ff3f63a85c55f913
|
[
"MIT"
] | null | null | null |
libs/evaluation/training_benchmark_database.py
|
eeshakumar/hythe
|
52ca795c8370ddfb2aa6fb87ff3f63a85c55f913
|
[
"MIT"
] | null | null | null |
libs/evaluation/training_benchmark_database.py
|
eeshakumar/hythe
|
52ca795c8370ddfb2aa6fb87ff3f63a85c55f913
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Julian Bernhard,
# Klemens Esterle, Patrick Hart, Tobias Kessler
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from bark.benchmark.benchmark_result import BenchmarkConfig
from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.agent import TrainingBenchmark
from bark.benchmark.benchmark_runner import BenchmarkRunner, BehaviorConfig
def default_training_evaluators():
default_config = {"success" : "EvaluatorGoalReached", "collision_other" : "EvaluatorCollisionEgoAgent",
"out_of_drivable" : "EvaluatorDrivableArea", "max_steps": "EvaluatorStepCount"}
return default_config
def default_terminal_criteria(max_episode_steps):
terminal_when = {"collision_other" : lambda x: x, "out_of_drivable" : lambda x: x, \
"max_steps": lambda x : x>max_episode_steps, "success" : lambda x: x}
return terminal_when
class TrainingBenchmarkDatabase(TrainingBenchmark):
def __init__(self, benchmark_database=None,
evaluators=None,
terminal_when=None):
self.database = benchmark_database
self.evaluators = evaluators
self.terminal_when = terminal_when
def create_benchmark_configs(self, num_scenarios):
benchmark_configs = []
if self.database:
for scenario_generator, scenario_set_name, scenario_set_param_desc in self.database:
benchmark_configs.extend(self.benchmark_configs_from_scen_gen( \
scenario_generator, scenario_set_name, \
scenario_set_param_desc, num_scenarios))
else:
scenario_generator = self.training_env._scenario_generator
benchmark_configs.extend(self.benchmark_configs_from_scen_gen(
scenario_generator, "training_env", \
{}, num_scenarios))
return benchmark_configs
def benchmark_configs_from_scen_gen(self, scenario_generator, scenario_set_name, \
scenario_set_param_desc, num_scenarios):
benchmark_configs = []
for scenario, scenario_idx in scenario_generator:
if num_scenarios and scenario_idx >= num_scenarios:
break
behavior_config = BehaviorConfig("agent", self.agent, None)
benchmark_config = \
BenchmarkConfig(
len(benchmark_configs),
behavior_config,
scenario,
scenario_idx,
scenario_set_name,
scenario_set_param_desc
)
benchmark_configs.append(benchmark_config)
return benchmark_configs
def reset(self, training_env, num_episodes, max_episode_steps, agent):
super(TrainingBenchmarkDatabase, self).reset(training_env, num_episodes, \
max_episode_steps, agent)
benchmark_configs = self.create_benchmark_configs(num_episodes)
evaluators = default_training_evaluators()
if self.evaluators:
evaluators = {**self.evaluators, **evaluators}
terminal_when = default_terminal_criteria(max_episode_steps)
if self.terminal_when:
terminal_when = {**self.terminal_when, **terminal_when}
self.benchmark_runner = BenchmarkRunner(
benchmark_configs = benchmark_configs,
evaluators=evaluators,
terminal_when = terminal_when,
num_scenarios=num_episodes,
log_eval_avg_every = 100000000000,
checkpoint_dir = "checkpoints",
merge_existing = False,
deepcopy=False)
def run(self):
mean_return, formatting = super(TrainingBenchmarkDatabase, self).run()
eval_result = self.benchmark_runner.run()
data_frame = eval_result.get_data_frame()
data_frame["max_steps"] = data_frame.Terminal.apply(lambda x: "max_steps" in x and (not "collision" in x))
data_frame["success"] = data_frame.Terminal.apply(lambda x: "success" in x and (not "collision" in x) and (not "max_steps" in x))
data_frame = data_frame.drop(columns=["scen_set", "scen_idx", "behavior", "Terminal", "step", "config_idx"])
mean = data_frame.mean(axis=0)
eval_result = {**mean.to_dict(), **mean_return}
return eval_result, f"Benchmark Result: {eval_result}"
def is_better(self, eval_result1, than_eval_result2):
pass
| 47.915789
| 133
| 0.65312
| 3,665
| 0.805141
| 0
| 0
| 0
| 0
| 0
| 0
| 572
| 0.125659
|
bee859bef7a37ff661836407bce80f2d3470ddd9
| 27,023
|
py
|
Python
|
goldstone/tenants/tests_cloud.py
|
Solinea/goldstone-server
|
91b078ca9fed1b33f48dc79f4af5c9d1817a1bc5
|
[
"Apache-2.0"
] | 14
|
2015-05-18T22:11:11.000Z
|
2020-08-14T06:50:09.000Z
|
goldstone/tenants/tests_cloud.py
|
lexjacobs/goldstone-server
|
91b078ca9fed1b33f48dc79f4af5c9d1817a1bc5
|
[
"Apache-2.0"
] | 568
|
2015-05-17T01:26:36.000Z
|
2021-06-10T20:36:47.000Z
|
goldstone/tenants/tests_cloud.py
|
lexjacobs/goldstone-server
|
91b078ca9fed1b33f48dc79f4af5c9d1817a1bc5
|
[
"Apache-2.0"
] | 22
|
2015-05-25T20:16:06.000Z
|
2021-08-08T20:25:24.000Z
|
"""Unit tests for /tenants/<id>/cloud endpoints."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.contrib.auth import get_user_model
from rest_framework.status import HTTP_200_OK, HTTP_401_UNAUTHORIZED, \
HTTP_400_BAD_REQUEST, HTTP_201_CREATED, HTTP_403_FORBIDDEN, \
HTTP_204_NO_CONTENT
from goldstone.test_utils import Setup, create_and_login, \
AUTHORIZATION_PAYLOAD, CONTENT_BAD_TOKEN, CONTENT_NO_CREDENTIALS, \
check_response_without_uuid, TEST_USER_1, CONTENT_PERMISSION_DENIED, \
BAD_TOKEN, BAD_UUID
from .models import Tenant, Cloud
from .tests_tenants import TENANTS_ID_URL
# HTTP response content.
CONTENT_MISSING_OS_USERNAME = '"username":["This field is required."]'
CONTENT_MISSING_OS_NAME = '"tenant_name":["This field is required."]'
CONTENT_MISSING_OS_PASSWORD = '"password":["This field is required."]'
CONTENT_MISSING_OS_URL = '"auth_url":["This field is required."]'
# URLs used by this module.
TENANTS_ID_CLOUD_URL = TENANTS_ID_URL + "cloud/"
TENANTS_ID_CLOUD_ID_URL = TENANTS_ID_CLOUD_URL + "%s/"
class TenantsIdCloud(Setup):
"""Listing the OpenStack clouds of a tenant, and creating a new OpenStack
cloud in a tenant."""
def test_not_logged_in(self):
"""Getting the tenant clouds, or creating a tenant cloud, without being
logged in."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
# Try the GET and POST without an authorization token.
responses = \
[self.client.get(TENANTS_ID_CLOUD_URL % tenant.uuid),
self.client.post(TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps({"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url":
"http://d.com"}),
content_type="application/json")]
for response in responses:
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=HTTP_401_UNAUTHORIZED)
# Try the GET and POST with a bad authorization token.
responses = [
self.client.get(
TENANTS_ID_CLOUD_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
self.client.post(
TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps({"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN)]
for response in responses:
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=HTTP_401_UNAUTHORIZED)
def test_no_access(self):
"""Getting the tenant clouds, or creating a tenant cloud, without being
a tenant admin."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
# Create a normal user who's a member of the tenant, but *not* a
# tenant_admin
token = create_and_login()
user = get_user_model().objects.get(username=TEST_USER_1[0])
user.tenant = tenant
user.save()
# Try the GET and POST.
responses = [
self.client.get(
TENANTS_ID_CLOUD_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.post(
TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps({"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_no_tenant(self):
"""Getting a tenant, or creating a cloud in a tenant, when the tenant
doesn't exist."""
# Create a Django admin user.
token = create_and_login(is_superuser=True)
# Make a tenant, then delete it.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
tenant.delete()
# Try the GET and POST to a tenant that doesn't exist.
responses = [
self.client.get(
TENANTS_ID_CLOUD_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.post(
TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps({"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_get(self):
"""List a tenant's clouds."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
{"tenant_name": "YUNO",
"username": "YOLO",
"password": "ZOMG",
"auth_url": "http://lol.com"},
]
OTHER_CLOUD = [{"tenant_name": "lisa",
"username": "sad lisa lisa",
"password": "on the road",
"auth_url": "http://tofindout.com"},
{"tenant_name": "left",
"username": "right",
"password": "center",
"auth_url": "http://down.com"},
]
EXPECTED_RESULT = TENANT_CLOUD
# Make a tenant
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create clouds in this tenant.
for entry in TENANT_CLOUD:
Cloud.objects.create(tenant=tenant, **entry)
# Create clouds that don't belong to the tenant.
tenant_2 = Tenant.objects.create(name='boris',
owner='John',
owner_contact='206.867.5309')
for entry in OTHER_CLOUD:
entry["tenant"] = tenant_2
Cloud.objects.create(**entry)
# Log in as the tenant_admin.
token = create_and_login(tenant=tenant)
# Get the tenant's cloud list and check the response. We do a partial
# check of the uuid key. It must exist, and its value must be a string
# that's >= 32 characters.
response = self.client.get(
TENANTS_ID_CLOUD_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
# pylint: disable=E1101
self.assertEqual(response.status_code, HTTP_200_OK)
response_content = json.loads(response.content)
for entry in response_content["results"]:
self.assertIsInstance(entry["uuid"], basestring)
self.assertGreaterEqual(len(entry["uuid"]), 32)
del entry["uuid"]
self.assertItemsEqual(response_content["results"], EXPECTED_RESULT)
def test_post(self):
"""Create an OpenStack cloud in a tenant."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
]
# Make a tenant
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a user who's the tenant_admin of this tenant, and log him in.
token = create_and_login(tenant=tenant)
# Create OpenStack clouds in this tenant, and check the results.
for entry in TENANT_CLOUD:
response = self.client.post(
TENANTS_ID_CLOUD_URL % tenant.uuid,
json.dumps(entry),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_201_CREATED, entry)
class TenantsIdCloudId(Setup):
"""Retrieve a particular OpenStack cloud from a tenant, update an OpenStack
cloud in a tenant, and delete an OpenStack cloud from a tenant."""
def test_not_logged_in(self):
"""The client is not logged in."""
# Make a tenant, and put one OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant_name="ee",
username="ffffffffuuuuu",
password="gah",
auth_url="http://route66.com",
tenant=tenant)
# Try GET, PUT, and DELETE without an authorization token.
responses = [self.client.get(TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid)),
self.client.put(TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
json.dumps({"username": "fool"}),
content_type="application/json"),
self.client.delete(TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid)),
]
for response in responses:
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=HTTP_401_UNAUTHORIZED)
# Try again with a bad authorization token.
responses = [
self.client.get(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
self.client.put(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
json.dumps({"username": "fool"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
self.client.delete(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
]
for response in responses:
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=HTTP_401_UNAUTHORIZED)
def test_no_access(self):
"""The client isn't an authorized user."""
# Make a tenant, put an OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant_name="ee",
username="ffffffffuuuuu",
password="gah",
auth_url="http://route66.com",
tenant=tenant)
# Create a normal user who's a member of the tenant, but *not* a
# tenant_admin
token = create_and_login()
user = get_user_model().objects.get(username=TEST_USER_1[0])
user.tenant = tenant
user.save()
# Try GET, PUT, and DELETE.
responses = [
self.client.get(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.put(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
json.dumps({"username": "fool"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.delete(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
# Ensure the cloud wasn't deleted.
self.assertEqual(Cloud.objects.count(), 1)
def test_no_tenant(self):
"""Getting a cloud, updating a cloud, or deleting a cloud, when the
tenant doesn't exist."""
# Make a tenant, put an OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant_name="ee",
username="ffffffffuuuuu",
password="gah",
auth_url="http://route66.com",
tenant=tenant)
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try GET, PUT, and DELETE to a nonexistent tenant.
responses = [
self.client.get(
TENANTS_ID_CLOUD_ID_URL % (BAD_UUID, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.put(
TENANTS_ID_CLOUD_ID_URL % (BAD_UUID, cloud.uuid),
json.dumps({"password": "fool"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.delete(
TENANTS_ID_CLOUD_ID_URL % (BAD_UUID, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_get_no_cloud(self):
"""Get an OpenStack cloud that does not exist from a tenant."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try GETing a nonexisten cloud from this tenant.
response = self.client.get(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, BAD_UUID),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_get(self):
"""Get a specific OpenStack cloud from a tenant."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
]
# Make a tenant.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# For every test cloud...
for entry in TENANT_CLOUD:
# Make it.
cloud = Cloud.objects.create(tenant=tenant, **entry)
# Try GETting it.
response = self.client.get(
TENANTS_ID_CLOUD_ID_URL %
(tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, entry)
def test_put_no_cloud(self):
"""Update a non-existent OpenStack cloud of a tenant."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try PUTing to a nonexistent OpenStack cloud in this tenant.
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, BAD_UUID),
json.dumps({"tenant_name": "fool"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_put_bad_fields(self):
"""Update an OpenStack cloud with missing fields, unrecognized fields,
or a field that's not allowed to be changed by the tenant_admin."""
# The cloud in this test.
TENANT_CLOUD = {"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}
# Make a tenant, put an OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant=tenant, **TENANT_CLOUD)
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try PUTing to the cloud with no fields.
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
for content in [CONTENT_MISSING_OS_USERNAME, CONTENT_MISSING_OS_NAME,
CONTENT_MISSING_OS_PASSWORD, CONTENT_MISSING_OS_URL]:
self.assertContains(response,
content,
status_code=HTTP_400_BAD_REQUEST)
# Try PUTing to the cloud with no change, and with a change to an
# unrecognized field.
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
json.dumps(TENANT_CLOUD),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, TENANT_CLOUD)
bad_field = TENANT_CLOUD.copy()
bad_field["forkintheroad"] = "Traci"
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
json.dumps(bad_field),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, TENANT_CLOUD)
# Try PUTing to a cloud on a field that's not allowed to be changed.
# The response should be the same as the "unrecognized field" case.
bad_field = TENANT_CLOUD.copy()
bad_field["uuid"] = BAD_UUID
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
json.dumps(bad_field),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, TENANT_CLOUD)
def test_put(self):
"""Update an Openstack cloud in a tenant."""
# The cloud in this test.
TENANT_CLOUD = {"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"}
EXPECTED_RESPONSE = TENANT_CLOUD.copy()
EXPECTED_RESPONSE["password"] = "fffffffffuuuuuuu"
# Make a tenant, put an OpenStack cloud in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant=tenant, **TENANT_CLOUD)
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try PUTing to the cloud.
response = self.client.put(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
json.dumps(EXPECTED_RESPONSE),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response, HTTP_200_OK, EXPECTED_RESPONSE)
# Double-check that the Cloud row was updated.
self.assertEqual(Cloud.objects.count(), 1)
self.assertEqual(Cloud.objects.all()[0].password,
EXPECTED_RESPONSE["password"])
def test_delete_not_member(self):
"""Try deleting a cloud of another tenant."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
]
# Make two tenant+cloud pairs
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
tenant_2 = Tenant.objects.create(name='tenant_2',
owner='John',
owner_contact='206.867.5309')
Cloud.objects.create(tenant=tenant, **TENANT_CLOUD[0])
cloud_2 = Cloud.objects.create(tenant=tenant_2, **TENANT_CLOUD[1])
# Create a tenant_admin of the first tenant.
token = create_and_login(tenant=tenant)
# Try DELETE on the second (other) tenant's cloud.
response = self.client.delete(
TENANTS_ID_CLOUD_ID_URL %
(tenant_2.uuid, cloud_2.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
# Ensure we have the right number of OpenStack clouds.
self.assertEqual(Cloud.objects.count(), 2)
def test_delete(self):
"""Delete an OpenStack cloud from a tenant."""
# The clouds in this test.
TENANT_CLOUD = [{"tenant_name": 'a',
"username": 'b',
"password": 'c',
"auth_url": "http://d.com"},
{"tenant_name": "ee",
"username": "ffffffffuuuuu",
"password": "gah",
"auth_url": "http://route66.com"},
]
# Make a tenant with two clouds.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
cloud = Cloud.objects.create(tenant=tenant, **TENANT_CLOUD[0])
cloud_2 = Cloud.objects.create(tenant=tenant, **TENANT_CLOUD[1])
# Create a tenant_admin.
token = create_and_login(tenant=tenant)
# DELETE one cloud, check, DELETE the other cloud, check.
response = self.client.delete(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud_2.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response, '', status_code=HTTP_204_NO_CONTENT)
# Ensure we have the right number of Clouds.
self.assertEqual(Cloud.objects.count(), 1)
self.assertEqual(Cloud.objects.all()[0].tenant_name,
TENANT_CLOUD[0]["tenant_name"])
response = self.client.delete(
TENANTS_ID_CLOUD_ID_URL % (tenant.uuid, cloud.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response, '', status_code=HTTP_204_NO_CONTENT)
# Ensure we have the right number of Clouds.
self.assertEqual(Cloud.objects.count(), 0)
| 41.130898
| 79
| 0.531029
| 25,423
| 0.940791
| 0
| 0
| 0
| 0
| 0
| 0
| 7,250
| 0.26829
|
bee920effbd17d10746b345bbf080e1ea4ae7a4f
| 4,750
|
py
|
Python
|
onnxruntime/test/server/integration_tests/model_zoo_data_prep.py
|
PhaniShekhar/onnxruntime
|
2663b9c44381b30525ae6234e13ed25c69206d07
|
[
"MIT"
] | null | null | null |
onnxruntime/test/server/integration_tests/model_zoo_data_prep.py
|
PhaniShekhar/onnxruntime
|
2663b9c44381b30525ae6234e13ed25c69206d07
|
[
"MIT"
] | null | null | null |
onnxruntime/test/server/integration_tests/model_zoo_data_prep.py
|
PhaniShekhar/onnxruntime
|
2663b9c44381b30525ae6234e13ed25c69206d07
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import sys
import shutil
import onnx
import onnxruntime
import json
from google.protobuf.json_format import MessageToJson
import predict_pb2
import onnx_ml_pb2
# Current models only have one input and one output
def get_io_name(model_file_name):
sess = onnxruntime.InferenceSession(model_file_name)
return sess.get_inputs()[0].name, sess.get_outputs()[0].name
def gen_input_pb(pb_full_path, input_name, output_name, request_file_path):
t = onnx_ml_pb2.TensorProto()
with open(pb_full_path, 'rb') as fin:
t.ParseFromString(fin.read())
predict_request = predict_pb2.PredictRequest()
predict_request.inputs[input_name].CopyFrom(t)
predict_request.output_filter.append(output_name)
with open(request_file_path, "wb") as fout:
fout.write(predict_request.SerializeToString())
def gen_output_pb(pb_full_path, output_name, response_file_path):
t = onnx_ml_pb2.TensorProto()
with open(pb_full_path, 'rb') as fin:
t.ParseFromString(fin.read())
predict_response = predict_pb2.PredictResponse()
predict_response.outputs[output_name].CopyFrom(t)
with open(response_file_path, "wb") as fout:
fout.write(predict_response.SerializeToString())
def tensor2dict(full_path):
t = onnx.TensorProto()
with open(full_path, 'rb') as f:
t.ParseFromString(f.read())
jsonStr = MessageToJson(t, use_integers_for_enums=True)
data = json.loads(jsonStr)
return data
def gen_input_json(pb_full_path, input_name, output_name, json_file_path):
data = tensor2dict(pb_full_path)
inputs = {}
inputs[input_name] = data
output_filters = [ output_name ]
req = {}
req["inputs"] = inputs
req["outputFilter"] = output_filters
with open(json_file_path, 'w') as outfile:
json.dump(req, outfile)
def gen_output_json(pb_full_path, output_name, json_file_path):
data = tensor2dict(pb_full_path)
output = {}
output[output_name] = data
resp = {}
resp["outputs"] = output
with open(json_file_path, 'w') as outfile:
json.dump(resp, outfile)
def gen_req_resp(model_zoo, test_data, copy_model=False):
skip_list = [
('opset8', 'mxnet_arcface') # REASON: Known issue
]
opsets = [name for name in os.listdir(model_zoo) if os.path.isdir(os.path.join(model_zoo, name))]
for opset in opsets:
os.makedirs(os.path.join(test_data, opset), exist_ok=True)
current_model_folder = os.path.join(model_zoo, opset)
current_data_folder = os.path.join(test_data, opset)
models = [name for name in os.listdir(current_model_folder) if os.path.isdir(os.path.join(current_model_folder, name))]
for model in models:
print("Working on Opset: {0}, Model: {1}".format(opset, model))
if (opset, model) in skip_list:
print(" SKIP!!")
continue
os.makedirs(os.path.join(current_data_folder, model), exist_ok=True)
src_folder = os.path.join(current_model_folder, model)
dst_folder = os.path.join(current_data_folder, model)
onnx_file_path = ''
for fname in os.listdir(src_folder):
if not fname.startswith(".") and fname.endswith(".onnx") and os.path.isfile(os.path.join(src_folder, fname)):
onnx_file_path = os.path.join(src_folder, fname)
break
if onnx_file_path == '':
raise FileNotFoundError('Could not find any *.onnx file in {0}'.format(src_folder))
if copy_model:
# Copy model file
target_file_path = os.path.join(dst_folder, "model.onnx")
shutil.copy2(onnx_file_path, target_file_path)
for fname in os.listdir(src_folder):
if not fname.endswith(".onnx") and os.path.isfile(os.path.join(src_folder, fname)):
shutil.copy2(os.path.join(src_folder, fname), dst_folder)
iname, oname = get_io_name(onnx_file_path)
model_test_data = [name for name in os.listdir(src_folder) if os.path.isdir(os.path.join(src_folder, name))]
for test in model_test_data:
src = os.path.join(src_folder, test)
dst = os.path.join(dst_folder, test)
os.makedirs(dst, exist_ok=True)
gen_input_json(os.path.join(src, 'input_0.pb'), iname, oname, os.path.join(dst, 'request.json'))
gen_output_json(os.path.join(src, 'output_0.pb'), oname, os.path.join(dst, 'response.json'))
gen_input_pb(os.path.join(src, 'input_0.pb'), iname, oname, os.path.join(dst, 'request.pb'))
gen_output_pb(os.path.join(src, 'output_0.pb'), oname, os.path.join(dst, 'response.pb'))
if __name__ == '__main__':
model_zoo = os.path.realpath(sys.argv[1])
test_data = os.path.realpath(sys.argv[2])
os.makedirs(test_data, exist_ok=True)
gen_req_resp(model_zoo, test_data)
| 32.758621
| 123
| 0.708842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 492
| 0.103579
|
beea57272100654c7600d64caab6b4c5cdc2179e
| 2,484
|
py
|
Python
|
articlequality/feature_lists/tests/test_enwiki.py
|
mariushoch/articlequality
|
57edf786636548bed466aa4e9d9e213fe8d1093b
|
[
"MIT"
] | null | null | null |
articlequality/feature_lists/tests/test_enwiki.py
|
mariushoch/articlequality
|
57edf786636548bed466aa4e9d9e213fe8d1093b
|
[
"MIT"
] | null | null | null |
articlequality/feature_lists/tests/test_enwiki.py
|
mariushoch/articlequality
|
57edf786636548bed466aa4e9d9e213fe8d1093b
|
[
"MIT"
] | null | null | null |
from revscoring.datasources.revision_oriented import revision
from revscoring.dependencies import solve
from .. import enwiki
revision_text = revision.text
def test_cite_templates():
text = """
This is some text with a citation.<ref>{{cite lol|title=Made up}}</ref>
This is some more text. {{foo}} {{{cite}}}
I am a new paragraph.<ref>{{cite book|title=The stuff}}</ref>
{{Cite hat|ascii=_n_}}
"""
assert solve(enwiki.cite_templates, cache={revision_text: text}) == 3
def test_infobox_templates():
text = """
{{Infobox pants|hats=2|pajams=23}}
This is some text with a citation.<ref>{{cite lol|title=Made up}}</ref>
This is some more text.
I am a new paragraph.<ref>{{cite book|title=The stuff}}</ref>
{{Cite hat|ascii=_n_}}
"""
assert solve(enwiki.infobox_templates, cache={revision_text: text}) == 1
def test_cn_templates():
text = """
{{Infobox pants|hats=2|pajams=23}}
This is some text with a citation.{{cn}}
This is some more text. {{foo}}
I am a new paragraph.{{fact|date=never}}
I am a new paragraph.{{Citation_needed|date=never}}
"""
assert solve(enwiki.cn_templates, cache={revision_text: text}) == 3
def test_who_templates():
text = """
This is some text with a citation.{{cn}}
This is some more text. {{foo}}
I am a new paragraph.{{who}}
I am a new paragraph.{{who|date=today}}
"""
assert solve(enwiki.who_templates, cache={revision_text: text}) == 2
def test_main_article_templates():
text = """
This is some text with a citation.{{cn}}
This is some more text. {{foo}}
== Some section ==
{{Main|section}}
I am a new paragraph.{{who|date=today}}
"""
assert solve(enwiki.main_article_templates,
cache={revision_text: text}) == 1
def test_paragraphs_without_refs_total_length():
text = """
Here is the first paragraph.
It contains some references <ref>first reference</ref>.
Here is second paragraph. One line with reference <ref>reference</ref>.
Here is third paragraph.
It has two lines, but no references.
Here is fourth paragraph.
It has two lines <ref>reference</ref>.
One of which has a reference.
Here is fifth paragraph. One line, no references.
Short line.<ref>last</ref><ref>One more reference</ref>
"""
assert solve(enwiki.paragraphs_without_refs_total_length,
cache={revision_text: text}) == 114
| 27
| 76
| 0.654589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,540
| 0.619968
|
beead51f0acfbaced2396459fa3fc222015aa766
| 6,984
|
py
|
Python
|
GPU_compare_CPU.py
|
kvmu/SFU-workterm
|
91c976b094097912e71dd7e0d6207ad8ce7a7e93
|
[
"MIT"
] | null | null | null |
GPU_compare_CPU.py
|
kvmu/SFU-workterm
|
91c976b094097912e71dd7e0d6207ad8ce7a7e93
|
[
"MIT"
] | null | null | null |
GPU_compare_CPU.py
|
kvmu/SFU-workterm
|
91c976b094097912e71dd7e0d6207ad8ce7a7e93
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 14:31:17 2015
@author: Kevin M.
Description:
This script does CPU and GPU matrix element time complexity
profiling. It has a function which applies the matrix element
analysis for a given set of parameters, profiles the code and
plots the time complexity results (with fit) and plots the matrix
elements from each case.
"""
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from my_timer import timer
from math import log
from scipy.optimize import curve_fit
def f_MEplaceholder(neval, mode):
# Placeholder integration instead of ME calc
result, error = (sp.integrate.quad(lambda x:
sp.special.jv(2.5, x), 0, neval) if mode == 'gpu'
else sp.integrate.quadrature(lambda x:
sp.special.jv(2.5, x), 0, neval))
return result, error
def flinear(N, mode):
"""
O(n) function
"""
y = np.asarray([i for i in range(N)])
np.asarray([i for i in range(N)])
np.asarray([i for i in range(N)])
return y ,1
def fsquare(N, mode):
"""
O(n^2) function
"""
for i in range(N):
for j in range(N):
y = i*j
return y,1
def algoAnalysis(fn, nMin, nMax, mode):
"""
Run timer and plot time complexity
"""
n = []
time_result = []
y_result = []
y_err = []
for i in [j*32 for j in range(nMin,nMax+1)]:
with timer() as t:
temp_result, temp_err = fn(i, mode)
time_result.append(t.msecs)
y_result.append(temp_result)
y_err.append(temp_err)
n.append(i)
return n, time_result, y_result, y_err
def plotAll(n, time_data, y_data, err_data):
n = np.asarray(n)
time_data = np.asarray(time_data)
y_data = np.asarray(y_data)
err_data = np.asarray(err_data)
err_data[0] = err_data[1]*0.5
# plotting helpers
nTime = n[2]
n = map(lambda x: log(x,2), n[0])
colors = ['lightblue', 'lightgreen']
edgeColors = ['#1B2ACC','#3F7F4C']
faceColors = ['#089FFF', '#7EFF99']
label_entries_for_results = ['GPU Matrix Elements', 'CPU Matrix Elements']
label_entries_for_time = ['GPU Runtime', 'CPU Runtime']
plt.figure(figsize=(15,6))
###########################################################################
# The following plots the runtime information for GPU and CPU runs.
def sqFunc(x, a, b, c):
return a*x**2 + b*x +c
def linFunc(x, a, b):
return a*x + b
funcList = [linFunc, sqFunc]
ax = plt.subplot(1,2,1)
# draw plots for timing data
for dat_mode in xrange(0,2):
params = curve_fit(funcList[dat_mode], nTime, time_data[dat_mode])
x = np.linspace(nTime[0], nTime[-1], 1000)
if dat_mode == 0:
[a,b] = params[0]
y = funcList[dat_mode](x, a, b)
s = "Fit for GPU: $%.5fx$ + $%.5f$"%(a,b)
if dat_mode == 1:
[a,b,c] = params[0]
y = funcList[dat_mode](x, a, b, c)
s = "Fit for CPU: $%.5fx^2$ + $%.5fx$ + $%.2f$"%(a,b,c)
ax.text(0.035, 0.75-dat_mode*0.1, s,
transform = ax.transAxes,
fontsize = 16)
ax.plot(x,y, color='k', linestyle="--", linewidth = 4)
ax.plot(nTime, time_data[dat_mode], color=colors[dat_mode],
marker = 'o', label=label_entries_for_time[dat_mode],
linestyle = 'None')
# setting axis limits
plt.xlim([min(nTime)-50, max(nTime)+50])
plt.ylim([min(min(time_data[0]), min(time_data[1]))*1.3,
max(max(time_data[0]), max(time_data[1]))*1.3])
# hiding axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# adding horizontal grid lines
ax.yaxis.grid(True)
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
# labels
plt.xlabel('Maximum number of phase space points')
plt.ylabel('Runtime (msec)')
leg = plt.legend(loc='upper left', fancybox=True, numpoints=1)
leg.get_frame().set_alpha(0.5)
###########################################################################
# The following plots the Matrix Elements for the GPU and CPU respectively
# on a subplot, on top of each other with their corresponding errors.
ax = plt.subplot(1,2,2)
# draw plots for results
for dat_mode in xrange(0,2):
ax.errorbar(x=n, y=y_data[dat_mode], yerr=err_data[dat_mode],
fmt='o', color=colors[dat_mode], ecolor='black',
alpha = 0.3)
ax.plot(n, y_data[dat_mode,:], marker='o',
linestyle = 'None', color=colors[dat_mode],
label=label_entries_for_results[dat_mode])
ax.fill_between(n, y_data[dat_mode]-err_data[dat_mode],
y_data[dat_mode]+err_data[dat_mode],
alpha=0.2, edgecolor=edgeColors[dat_mode],
facecolor=faceColors[dat_mode],
linewidth=4, linestyle='-.', antialiased=True)
# setting axis limits
plt.xlim([min(n)-1*0.2, max(n)+1*0.2])
plt.ylim([min(min(y_data[0]), min(y_data[1]))*1.3,
max(max(y_data[0]), max(y_data[1]))*1.3])
# hiding axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# adding horizontal grid lines
ax.yaxis.grid(True)
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
# labels
plt.xlabel('$\log_2$(Maximum number of phase space points)')
plt.ylabel('Matrix Element')
leg = plt.legend(loc='upper left', fancybox=True, numpoints=1)
leg.get_frame().set_alpha(0.5)
plt.tight_layout()
plt.savefig('plots.pdf')
plt.show()
# main() function
def main():
print('\nAnalyzing Algorithms...')
n_GPU, timeGPU, yResult_GPU, yErr_GPU = algoAnalysis(f_MEplaceholder, 8, 20, 'gpu')
n_CPU, time_CPU, yResult_CPU, yErr_CPU = algoAnalysis(f_MEplaceholder, 8, 20, 'cpu')
nLin, timeLin, y1, y2 = algoAnalysis(flinear, 10, 50, 'cpu')
nSq, timeSq, y1, y2 = algoAnalysis(fsquare, 10, 50, 'cpu')
nList = [n_GPU, n_CPU, nLin, nSq] ### DELETE NLIN NSQ AFTER
timeList = [timeLin, timeSq]
yResultList = [yResult_GPU, yResult_CPU]
yErrList = [yErr_GPU, yErr_CPU]
plotAll(nList, timeList, yResultList, yErrList)
# call main
if __name__ == '__main__':
# matplotlib.rcParams.update({'font.family': 'Zapf Chancery'})
main()
| 29.719149
| 88
| 0.583477
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,928
| 0.27606
|
beeae2374b9c6c4f75bcbbf6e4da5b6372460220
| 7,714
|
py
|
Python
|
applications/plugins/Flexible/Flexible_test/scenes/python/reInitMapping.py
|
sofa-framework/issofa
|
94855f488465bc3ed41223cbde987581dfca5389
|
[
"OML"
] | null | null | null |
applications/plugins/Flexible/Flexible_test/scenes/python/reInitMapping.py
|
sofa-framework/issofa
|
94855f488465bc3ed41223cbde987581dfca5389
|
[
"OML"
] | null | null | null |
applications/plugins/Flexible/Flexible_test/scenes/python/reInitMapping.py
|
sofa-framework/issofa
|
94855f488465bc3ed41223cbde987581dfca5389
|
[
"OML"
] | null | null | null |
import Sofa
import SofaTest
import SofaPython.Tools
OBJ = SofaPython.Tools.localPath( __file__, "beam.obj" )
RAW = SofaPython.Tools.localPath( __file__, "beam.raw" )
##Check if calling Mapping::init() change anything
#
#The trick is to know that if the option evaluateShapeFunction is activated
#in the ImageGaussPointSampler then a sampler::bwdInit() must be called
#to update weights using gauss points.
class Controller(SofaTest.Controller):
def initGraph(self,node):
self.success = 1
self.count = 0
return 0
def createGraph(self,node):
self.node = node
return 0
def initAndCheckMapping(self, node):
mapping = node
oldWeights = mapping.findData("weights").value
oldWeightGradients = mapping.findData("weightGradients").value
oldWeightHessians = mapping.findData("weightHessians").value
mapping.init()
newWeights = mapping.findData("weights").value
newWeightGradients = mapping.findData("weightGradients").value
newWeightHessians = mapping.findData("weightHessians").value
if ( (oldWeights != newWeights) and (oldWeightGradients != newWeightGradients) and (oldWeightHessians != newWeightHessians) ):
self.success = 0
else:
self.success = 1
return 0
def onEndAnimationStep(self,dt):
return 0
def onBeginAnimationStep(self,dt):
self.count+=1
if(self.count == 2):
barycentricMapping = self.root.getChild("barycentricFrame").getChild("behavior").getObject("mapping")
self.initAndCheckMapping(barycentricMapping)
if(self.success == 0):
self.sendFailure("(Barycentric Shape Function) calling init once again changed linearMapping weights for no reason")
voronoiMapping = self.root.getChild("voronoiFrame").getChild("behavior").getObject("mapping")
self.initAndCheckMapping(voronoiMapping)
if(self.success == 0):
self.sendFailure("(Voronoi Shape Function) calling init once again changed linearMapping weights for no reason")
self.sendSuccess();
return 0
def createBarycentricFrame( parentNode, name ):
node = parentNode.createChild(name)
#Solver
node.createObject('EulerImplicit', name='integrator')
node.createObject('CGLinearSolver', name='linearSolver', iterations='200', tolerance="1e-15", threshold='1.0e-15')
#Frame
dofPosition="0 1.0 -0.999 1 0 0 0 1 0 0 0 1 " + "0 1.0 0.999 1 0 0 0 1 0 0 0 1 "
node.createObject('MechanicalObject', template='Affine', name='dofs', position=dofPosition, showObject='true,', showObjectScale='0.5')
node.createObject('UniformMass', template='Affine',totalMass='0.01')
#Constraint
node.createObject('BoxROI', name='roi', template='Vec3d', box="-1 -2 -1.2 1 2 -0.8", drawBoxes='true', drawSize=1)
node.createObject('FixedConstraint', indices="@[-1].indices")
#Shape function
node.createObject('MeshTopology', edges="0 0 0 1 1 1")
node.createObject('BarycentricShapeFunction', name="shapeFunc")
#Integration point sampling
behaviorNode = node.createChild('behavior')
behaviorNode.createObject("TopologyGaussPointSampler", name="sampler", inPosition="@../dofs.rest_position", showSamplesScale="0.1", drawMode="0")
behaviorNode.createObject('MechanicalObject', name="intePts", template='F332', showObject="true", showObjectScale="0.05")
behaviorNode.createObject('LinearMapping', name="mapping", template='Affine,F332', showDeformationGradientScale='0.2', showSampleScale="0", printLog="false")
#Behavior
eNode = behaviorNode.createChild('E')
eNode.createObject( 'MechanicalObject', name='E', template='E332' )
eNode.createObject( 'CorotationalStrainMapping', template='F332,E332', printLog='false' )
eNode.createObject( 'HookeForceField', template='E332', youngModulus='100', poissonRatio='0', viscosity='0' )
#Visu child node
visuNode = node.createChild('Visu')
visuNode.createObject('OglModel', template="ExtVec3f", name='Visual',filename=OBJ, translation="0 1 0")
visuNode.createObject('LinearMapping', template='Affine,ExtVec3f')
def createVoronoiFrame( parentNode, name ):
node = parentNode.createChild(name)
#Solver
node.createObject('EulerImplicit', name='integrator')
node.createObject('CGLinearSolver', name='linearSolver', iterations='200', tolerance="1e-15", threshold='1.0e-15')
#Frame
node.createObject("MeshObjLoader", name="mesh", filename=OBJ, triangulate="1")
node.createObject("ImageContainer", name="image", template="ImageUC", filename=RAW, drawBB="false")
node.createObject("ImageSampler", name="sampler", template="ImageUC", src="@image", method="1", param="0", fixedPosition="0 0 -0.999 0 0 0.999", printLog="false")
node.createObject("MergeMeshes", name="merged", nbMeshes="2", position1="@sampler.fixedPosition", position2="@sampler.position")
#node.createObject("ImageViewer", template="ImageB", name="viewer", src="@image")
node.createObject('MechanicalObject', template='Affine', name='dofs', src="@merged", showObject='true,', showObjectScale='0.5')
#Shape function
node.createObject('VoronoiShapeFunction', name="shapeFunc", position='@dofs.rest_position', src='@image', useDijkstra="true", method="0", nbRef="4")
#Uniform Mass
node.createObject('UniformMass', template='Affine',totalMass='0.01')
#Constraint
node.createObject('BoxROI', name='roi', template='Vec3d', box="-1 -2.0 -1.2 1 2.0 -0.8", drawBoxes='true', drawSize=1)
node.createObject('FixedConstraint', indices="@[-1].indices")
#Gauss point sampling
behaviorNode = node.createChild('behavior')
behaviorNode.createObject('ImageGaussPointSampler', name='sampler', indices='@../shapeFunc.indices', weights='@../shapeFunc.weights', transform='@../shapeFunc.transform', method='2', order='4', targetNumber='1', printLog='false', showSamplesScale=0.1, drawMode=0, evaluateShapeFunction="false")
behaviorNode.createObject('MechanicalObject', name="intePts", template='F332', showObject="false", showObjectScale="0.05")
behaviorNode.createObject('LinearMapping', name="mapping", template='Affine,F332', assembleJ='true', showDeformationGradientScale='0.2', printLog="false")
#Behavior
eNode = behaviorNode.createChild('E')
eNode.createObject( 'MechanicalObject', name='E', template='E332' )
eNode.createObject( 'CorotationalStrainMapping', template='F332,E332', printLog='false' )
eNode.createObject( 'HookeForceField', template='E332', youngModulus='100', poissonRatio='0', viscosity='0' )
#Visu child node
visuNode = node.createChild('Visu')
visuNode.createObject('OglModel', template="ExtVec3f", name='Visual',filename=OBJ)
visuNode.createObject('LinearMapping', template='Affine,ExtVec3f')
return node
def createScene( root ) :
#Root node data
root.findData('dt').value=0.001
root.findData('gravity').value='0 -10 0'
#Required setting
root.createObject('RequiredPlugin', name="flexible", pluginName='Flexible', printLog="false")
root.createObject('RequiredPlugin', name="image", pluginName='image', printLog="false")
#VisuStyle
root.createObject('VisualStyle', name='visuStyle', displayFlags='showWireframe showBehaviorModels')
#Animation Loop
root.createObject('DefaultAnimationLoop');
root.createObject('DefaultVisualManagerLoop');
#Python Script Controller
root.createObject('PythonScriptController', filename = __file__, classname='Controller')
createVoronoiFrame(root, 'voronoiFrame');
createBarycentricFrame(root, 'barycentricFrame');
| 47.913043
| 298
| 0.704693
| 1,775
| 0.230101
| 0
| 0
| 0
| 0
| 0
| 0
| 2,876
| 0.372829
|
beeda21a5090a064572591c96a86d43fd6daf247
| 1,688
|
py
|
Python
|
mindspore/ops/_register_for_op.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | 1
|
2021-07-16T12:05:53.000Z
|
2021-07-16T12:05:53.000Z
|
mindspore/ops/_register_for_op.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
mindspore/ops/_register_for_op.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Registry the relation."""
from collections import UserDict
from .primitive import Primitive
class Registry(UserDict):
"""Registry class for registry functions for grad and vm_impl on Primitive."""
def register(self, prim):
"""register the function."""
def deco(fn):
"""Decorate the function."""
if isinstance(prim, str):
self[prim] = fn
elif issubclass(prim, Primitive):
self[id(prim)] = fn
return fn
return deco
def get(self, prim_obj, default):
"""Get the value by primitive."""
fn = default
if isinstance(prim_obj, str) and prim_obj in self:
fn = self[prim_obj]
elif isinstance(prim_obj, Primitive):
key = id(prim_obj.__class__)
if key in self:
fn = self[key]
else:
key = prim_obj.name
if key in self:
fn = self[prim_obj.name]
return fn
| 33.76
| 82
| 0.593009
| 921
| 0.545616
| 0
| 0
| 0
| 0
| 0
| 0
| 848
| 0.50237
|
beee49868a956aa3196803cdf539676b921996ae
| 11,496
|
py
|
Python
|
senlin-7.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
senlin-7.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
senlin-7.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import webob
from senlin.api.common import version_request as vr
from senlin.api.common import wsgi
from senlin.api.middleware import version_negotiation as vn
from senlin.common import exception
from senlin.tests.unit.common import base
@mock.patch("senlin.api.openstack.versions.Controller")
class VersionNegotiationTest(base.SenlinTestCase):
def test_get_version_controller(self, mock_vc):
gvc = mock_vc.return_value
xvc = mock.Mock()
gvc.get_controller = mock.Mock(return_value=xvc)
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({})
res = vnf._get_controller('v1.0', request)
self.assertEqual(xvc, res)
self.assertEqual(1, request.environ['api.major'])
self.assertEqual(0, request.environ['api.minor'])
gvc.get_controller.assert_called_once_with('1.0')
def test_get_version_controller_shorter_version(self, mock_vc):
gvc = mock_vc.return_value
xvc = mock.Mock()
gvc.get_controller = mock.Mock(return_value=xvc)
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({})
res = vnf._get_controller('v1', request)
self.assertEqual(xvc, res)
self.assertEqual(1, request.environ['api.major'])
self.assertEqual(0, request.environ['api.minor'])
gvc.get_controller.assert_called_once_with('1.0')
def test_get_controller_not_match_version(self, mock_vc):
gvc = mock_vc.return_value
gvc.get_controller = mock.Mock(return_value=None)
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({})
res = vnf._get_controller("invalid", request)
self.assertIsNone(res)
self.assertEqual(0, gvc.get_controller.call_count)
def test_request_path_is_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({'PATH_INFO': 'versions'})
response = vnf.process_request(request)
self.assertIs(mock_vc.return_value, response)
def test_request_path_is_empty(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
request = webob.Request({'PATH_INFO': '/'})
response = vnf.process_request(request)
self.assertIs(mock_vc.return_value, response)
def test_request_path_contains_valid_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
gvc = mock_vc.return_value
x_controller = mock.Mock()
gvc.get_controller = mock.Mock(return_value=x_controller)
mock_check = self.patchobject(vnf, '_check_version_request')
major = 1
minor = 0
request = webob.Request({'PATH_INFO': 'v1.0/resource'})
response = vnf.process_request(request)
self.assertIsNone(response)
self.assertEqual(major, request.environ['api.major'])
self.assertEqual(minor, request.environ['api.minor'])
gvc.get_controller.assert_called_once_with('1.0')
mock_check.assert_called_once_with(request, x_controller)
def test_removes_version_from_request_path(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
expected_path = 'resource'
request = webob.Request({'PATH_INFO': 'v1.0/%s' % expected_path})
response = vnf.process_request(request)
self.assertIsNone(response)
self.assertEqual(expected_path, request.path_info_peek())
def test_simple_version_on_request_path(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
fake_vc = mock.Mock(return_value={'foo': 'bar'})
self.patchobject(vnf.versions_app, 'get_controller',
return_value=fake_vc)
request = webob.Request({'PATH_INFO': 'v1'})
response = vnf.process_request(request)
self.assertEqual({'foo': 'bar'}, response)
def test_full_version_on_request_path(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
fake_vc = mock.Mock(return_value={'foo': 'bar'})
self.patchobject(vnf.versions_app, 'get_controller',
return_value=fake_vc)
request = webob.Request({'PATH_INFO': 'v1.0'})
response = vnf.process_request(request)
self.assertEqual({'foo': 'bar'}, response)
def test_request_path_contains_unknown_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
gvc = mock_vc.return_value
gvc.get_controller = mock.Mock(return_value=None)
self.patchobject(vnf, '_check_version_request')
request = webob.Request({'PATH_INFO': 'v2.0/resource'})
request.headers['Accept'] = '*/*'
response = vnf.process_request(request)
self.assertIs(mock_vc.return_value, response)
def test_accept_header_contains_valid_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
major = 1
minor = 0
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/vnd.openstack.clustering-v1.0'
response = vnf.process_request(request)
self.assertIsNone(response)
self.assertEqual(major, request.environ['api.major'])
self.assertEqual(minor, request.environ['api.minor'])
def test_accept_header_contains_simple_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
fake_vc = mock.Mock(return_value={'foo': 'bar'})
self.patchobject(vnf.versions_app, 'get_controller',
return_value=fake_vc)
major = 1
minor = 0
request = webob.Request({'PATH_INFO': ''})
request.headers['Accept'] = 'application/vnd.openstack.clustering-v1.0'
response = vnf.process_request(request)
self.assertEqual(major, request.environ['api.major'])
self.assertEqual(minor, request.environ['api.minor'])
self.assertEqual({'foo': 'bar'}, response)
def test_accept_header_contains_unknown_version(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
self.patchobject(vnf, '_check_version_request')
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/vnd.openstack.clustering-v2.0'
response = vnf.process_request(request)
self.assertIsNone(response)
request.headers['Accept'] = 'application/vnd.openstack.clustering-vab'
response = vnf.process_request(request)
self.assertIsInstance(response, webob.exc.HTTPNotFound)
def test_no_URI_version_accept_with_invalid_MIME_type(self, mock_vc):
vnf = vn.VersionNegotiationFilter(None, None)
gvc = mock_vc.return_value
gvc.get_controller = mock.Mock(side_effect=[None, None])
self.patchobject(vnf, '_check_version_request')
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/invalidMIMEType'
response = vnf.process_request(request)
self.assertIsInstance(response, webob.exc.HTTPNotFound)
request.headers['Accept'] = ''
response = vnf.process_request(request)
self.assertEqual(gvc, response)
def test_check_version_request(self, mock_vc):
controller = mock.Mock()
minv = vr.APIVersionRequest('1.0')
maxv = vr.APIVersionRequest('1.3')
controller.min_api_version = mock.Mock(return_value=minv)
controller.max_api_version = mock.Mock(return_value=maxv)
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'clustering 1.0,compute 2.0'
vnf = vn.VersionNegotiationFilter(None, None)
vnf._check_version_request(request, controller)
self.assertIsNotNone(request.version_request)
expected = vr.APIVersionRequest('1.0')
self.assertEqual(expected, request.version_request)
def test_check_version_request_default(self, mock_vc):
controller = mock.Mock()
controller.DEFAULT_API_VERSION = "1.0"
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'compute 2.0'
vnf = vn.VersionNegotiationFilter(None, None)
vnf._check_version_request(request, controller)
self.assertIsNotNone(request.version_request)
expected = vr.APIVersionRequest(controller.DEFAULT_API_VERSION)
self.assertEqual(expected, request.version_request)
def test_check_version_request_invalid_format(self, mock_vc):
controller = mock.Mock()
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'clustering 2.03'
vnf = vn.VersionNegotiationFilter(None, None)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
vnf._check_version_request,
request, controller)
self.assertEqual("API Version String '2.03' is of invalid format. It "
"must be of format 'major.minor'.",
six.text_type(ex))
def test_check_version_request_invalid_version(self, mock_vc):
controller = mock.Mock()
minv = vr.APIVersionRequest('1.0')
maxv = vr.APIVersionRequest('1.100')
controller.min_api_version = mock.Mock(return_value=minv)
controller.max_api_version = mock.Mock(return_value=maxv)
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'clustering 2.3'
vnf = vn.VersionNegotiationFilter(None, None)
ex = self.assertRaises(exception.InvalidGlobalAPIVersion,
vnf._check_version_request,
request, controller)
expected = ("Version '2.3' is not supported by the API. Minimum is "
"'%(min_ver)s' and maximum is '%(max_ver)s'." %
{'min_ver': str(minv), 'max_ver': str(maxv)})
self.assertEqual(expected, six.text_type(ex))
def test_check_version_request_latest(self, mock_vc):
controller = mock.Mock()
controller.max_api_version = mock.Mock(return_value='12.34')
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'clustering Latest'
vnf = vn.VersionNegotiationFilter(None, None)
vnf._check_version_request(request, controller)
self.assertIsNotNone(request.version_request)
expected = '12.34'
self.assertEqual(expected, request.version_request)
| 40.336842
| 79
| 0.673626
| 10,626
| 0.924322
| 0
| 0
| 10,682
| 0.929193
| 0
| 0
| 1,985
| 0.172669
|
bef071b99c5638f1355cc9be272ba0f93a6cb31f
| 493
|
py
|
Python
|
iot/models.py
|
kkishans/IOT_DJANGO
|
12a19858f002a8c684e4dbb93868a8859d57615f
|
[
"MIT"
] | null | null | null |
iot/models.py
|
kkishans/IOT_DJANGO
|
12a19858f002a8c684e4dbb93868a8859d57615f
|
[
"MIT"
] | null | null | null |
iot/models.py
|
kkishans/IOT_DJANGO
|
12a19858f002a8c684e4dbb93868a8859d57615f
|
[
"MIT"
] | null | null | null |
from django.db import models
from django import forms
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=30)
password = models.TextField()
email = models.EmailField(unique=True)
objects = models.Manager()
def __str__(self):
return self.username
class Room(models.Model):
room = models.CharField(max_length=30)
email = models.ForeignKey(User,on_delete=models.CASCADE)
class Meta:
db_table = "room"
| 25.947368
| 60
| 0.699797
| 408
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.064909
|
bef0b40fe98f05288d080226293e4d439b57a362
| 2,970
|
py
|
Python
|
Homework files/Think_Python_Book_Homework_1.py
|
SillyHatsOnly/Python-Education-Experiments
|
22244defc47b4e3ba41af07957a782013afe12b0
|
[
"MIT"
] | null | null | null |
Homework files/Think_Python_Book_Homework_1.py
|
SillyHatsOnly/Python-Education-Experiments
|
22244defc47b4e3ba41af07957a782013afe12b0
|
[
"MIT"
] | null | null | null |
Homework files/Think_Python_Book_Homework_1.py
|
SillyHatsOnly/Python-Education-Experiments
|
22244defc47b4e3ba41af07957a782013afe12b0
|
[
"MIT"
] | null | null | null |
def do_twice(f):
f()
f()
def print_spam():
print('spam')
do_twice(print_spam)
def do_twice(f, a):
f(a)
f(a)
def print_spam(a):
print(a)
print(a)
do_twice(print_spam, 'spamm')
def do_four(f, a):
do_twice(f,a)
do_twice(f,a)
do_four(print_spam, "SPAM")
def hor_line():
print('+','-'*4,'+','-'*4)
hor_line
hor_line()
def hor_line():
print('+','-'*4,'+','-'*4, end="")
hor_line()
def hor_line():
print('+','-'*4,'+','-'*4,'+' end="")
def hor_line():
print('+','-'*4,'+','-'*4,'+', end="")
hor_line()
def print_main_line():
print('+','-'*4,'+','-'*4,'+', end="")
def print_second_line():
print('|', ''*4, '|',''*4,'|', end='')
print('|', ''*4, '|',''*4,'|', end='')
print('|', ''*4, '|',''*4,'|', end='')
print('|', ''*4, '|',''*4,'|', end='')
def square_print():
print_main_line()
print_second_line()
print_main_line()
print_second_line()
print_main_line()
square_print()
def print_main_line():
print('+','-'*4,'+','-'*4,'+')
def print_second_line():
print('|', ''*4, '|',''*4,'|')
print('|', ''*4, '|',''*4,'|')
print('|', ''*4, '|',''*4,'|')
print('|', ''*4, '|',''*4,'|')
def square_print():
print_main_line()
print_second_line()
print_main_line()
print_second_line()
print_main_line()
square_print()
def print_second_line():
print('|', ' '*4, '|',' '*4,'|')
print('|', ' '*4, '|',' '*4,'|')
print('|', ' '*4, '|',' '*4,'|')
print('|', ' '*4, '|',' '*4,'|')
square_print()
def print_main_line():
print('+','-'*4,'+','-'*4,'+')
def print_second_line():
print('|', ' '*4, '|',' '*4,'|')
print('|', ' '*4, '|',' '*4,'|')
print('|', ' '*4, '|',' '*4,'|')
print('|', ' '*4, '|',' '*4,'|')
def square_print():
print_main_line()
print_second_line()
print_main_line()
def double_square():
square_print()
square_print()
double_square
double_square()
def print_main_line():
print('+','-'*4,'+','-'*4,'+')
def print_second_line():
print('|', ' '*4, '|',' '*4,'|')
print('|', ' '*4, '|',' '*4,'|')
print('|', ' '*4, '|',' '*4,'|')
print('|', ' '*4, '|',' '*4,'|')
def square_print():
print_main_line()
print_second_line()
def double_square():
square_print()
square_print()
double_square()
def square_print():
print_main_line()
print_second_line()
print_main_line()
print_second_line()
print_main_line()
square_print()
def print_main_line():
print('+','-'*4,'+','-'*4,'+','-'*4,'+')
def print_second_line():
print('|', ' '*4, '|',' '*4,'|', ' '*4,'|')
print('|', ' '*4, '|',' '*4,'|', ' '*4,'|')
print('|', ' '*4, '|',' '*4,'|', ' '*4,'|')
print('|', ' '*4, '|',' '*4,'|', ' '*4,'|')
def square_print():
print_main_line()
print_second_line()
print_main_line()
print_second_line()
print_main_line()
print_second_line()
print_main_line()
square_print()
| 18.679245
| 47
| 0.476431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 538
| 0.181145
|
bef16a350cb321f3059e524b8af8bbcaac507956
| 123
|
py
|
Python
|
email_log/apps.py
|
bernd-wechner/django-email-log
|
dbbe0ef6cee8b8067d6420dccc7a8f2061662a68
|
[
"MIT"
] | 26
|
2015-04-14T18:24:54.000Z
|
2022-03-07T13:01:34.000Z
|
email_log/apps.py
|
bernd-wechner/django-email-log
|
dbbe0ef6cee8b8067d6420dccc7a8f2061662a68
|
[
"MIT"
] | 23
|
2015-06-23T02:40:39.000Z
|
2022-02-08T05:07:42.000Z
|
email_log/apps.py
|
bernd-wechner/django-email-log
|
dbbe0ef6cee8b8067d6420dccc7a8f2061662a68
|
[
"MIT"
] | 25
|
2015-02-04T16:16:05.000Z
|
2021-09-28T10:53:00.000Z
|
from django.apps import AppConfig
class EmailLogConfig(AppConfig):
name = 'email_log'
verbose_name = "Email log"
| 17.571429
| 33
| 0.731707
| 86
| 0.699187
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.178862
|
bef17e7d48e784a47058c04dd63db533f851c334
| 83
|
py
|
Python
|
gawain/tests/test_numerics.py
|
henrywatkins/gawain
|
c556be20242249504fc0e04a5d3b7168a8369043
|
[
"MIT"
] | 1
|
2021-11-20T06:16:13.000Z
|
2021-11-20T06:16:13.000Z
|
gawain/tests/test_numerics.py
|
henrywatkins/gawain
|
c556be20242249504fc0e04a5d3b7168a8369043
|
[
"MIT"
] | null | null | null |
gawain/tests/test_numerics.py
|
henrywatkins/gawain
|
c556be20242249504fc0e04a5d3b7168a8369043
|
[
"MIT"
] | null | null | null |
import pytest
from gawain.numerics import Clock, SolutionVector, MHDSolutionVector
| 27.666667
| 68
| 0.86747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
bef2574ded37985d33b872832104339ea2dcbc78
| 384
|
py
|
Python
|
project_9/util.py
|
sople1/project_9
|
7d91d786533d508572feae1ffbd1b4a6a80208ab
|
[
"CC0-1.0"
] | null | null | null |
project_9/util.py
|
sople1/project_9
|
7d91d786533d508572feae1ffbd1b4a6a80208ab
|
[
"CC0-1.0"
] | null | null | null |
project_9/util.py
|
sople1/project_9
|
7d91d786533d508572feae1ffbd1b4a6a80208ab
|
[
"CC0-1.0"
] | null | null | null |
"""
utility for project 9
:author: Seongsu Yoon <sople1@snooey.net>
:license: CC0
"""
def clear():
"""
clear cmd/term
:return: void
"""
import os
import sys
if sys.platform == 'win32':
os.system('cls') # on windows
else:
os.system('clear') # on linux / os x
if __name__ == '__main__':
raise Exception("please run main py")
| 14.769231
| 45
| 0.570313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.554688
|
bef2add5e5d23f2bc354f97f806087052f88a9fd
| 2,383
|
py
|
Python
|
api/views.py
|
HigoOliveira/DesafioFinalServer
|
284d3cea59f28f6da229345496896106e3d2048a
|
[
"MIT"
] | null | null | null |
api/views.py
|
HigoOliveira/DesafioFinalServer
|
284d3cea59f28f6da229345496896106e3d2048a
|
[
"MIT"
] | null | null | null |
api/views.py
|
HigoOliveira/DesafioFinalServer
|
284d3cea59f28f6da229345496896106e3d2048a
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from .models import User, Event
from .serializer import UserSerializer, EventSerializer
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework import status, mixins, generics
from rest_framework.response import Response
class UserVerify(mixins.RetrieveModelMixin, generics.GenericAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
lookup_field = 'phone'
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class Create(mixins.CreateModelMixin, generics.GenericAPIView):
serializer_class = UserSerializer
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class Update(mixins.UpdateModelMixin, generics.GenericAPIView):
permissioon_classes = (IsAuthenticated,)
serializer_class = UserSerializer
queryset = User.objects.all()
def get_object(self):
return self.request.user
def post(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class CreateEvent(mixins.CreateModelMixin, generics.GenericAPIView):
permissioon_classes = (IsAuthenticated,)
serializer_class = EventSerializer
def post(self, request, *args, **kwargs):
data = request.data.copy()
data['user'] = self.request.user.id
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class ListEvent(mixins.ListModelMixin, generics.GenericAPIView):
permissioon_classes = (IsAuthenticated,)
queryset = Event.objects.all()
serializer_class = EventSerializer
def get_queryset(self):
queryset = super(ListEvent, self).get_queryset()
return queryset.filter(user=self.request.user)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class DeleteEvent(mixins.DestroyModelMixin, generics.GenericAPIView):
serializer_class = EventSerializer
permissioon_classes = (IsAuthenticated,)
queryset = Event.objects.all()
def post(self, request, *args, **kwargs):
print(request.POST)
return self.destroy(request, *args, **kwargs)
| 36.661538
| 85
| 0.768779
| 2,018
| 0.846832
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.005455
|
bef317cba640175d733bcaa55e5644bbab2602a1
| 1,582
|
py
|
Python
|
Searching_Sorting/MinimumMovesToEqualAllArrayElements2.py
|
PK-100/Competitive_Programming
|
d0863feaaa99462b2999e85dcf115f7a6c08bb8d
|
[
"MIT"
] | 70
|
2018-06-25T21:20:15.000Z
|
2022-03-24T03:55:17.000Z
|
Searching_Sorting/MinimumMovesToEqualAllArrayElements2.py
|
An3sha/Competitive_Programming
|
ee7eadf51939a360d0b004d787ebabda583e92f0
|
[
"MIT"
] | 4
|
2018-09-04T13:12:20.000Z
|
2021-06-20T08:29:12.000Z
|
Searching_Sorting/MinimumMovesToEqualAllArrayElements2.py
|
An3sha/Competitive_Programming
|
ee7eadf51939a360d0b004d787ebabda583e92f0
|
[
"MIT"
] | 24
|
2018-12-26T05:15:32.000Z
|
2022-01-23T23:04:54.000Z
|
# LC 462
#from statistics import median
from random import randrange
from math import floor
class Solution:
#Quick Select Algorithm
def partition(self,x, pivot_index = 0):
i = 0
if pivot_index !=0: x[0],x[pivot_index] = x[pivot_index],x[0]
for j in range(len(x)-1):
if x[j+1] < x[0]:
x[j+1],x[i+1] = x[i+1],x[j+1]
i += 1
x[0],x[i] = x[i],x[0]
return x,i
def RSelect(self,x,k):
if len(x) == 1:
return x[0]
else:
xpart = self.partition(x,randrange(len(x)))
x = xpart[0] # partitioned array
j = xpart[1] # pivot index
if j == k:
return x[j]
elif j > k:
return self.RSelect(x[:j],k)
else:
k = k - j - 1
return self.RSelect(x[(j+1):], k)
def median(self,lst):
lstLen = len(lst)
index = (lstLen - 1) // 2
if (lstLen % 2):
return self.RSelect(lst,index)
else:
return (self.RSelect(lst,index) + self.RSelect(lst,index+1))/2.0
def sorting_median(self,lst):
lstLen = len(lst)
lst.sort()
index = (lstLen - 1) // 2
if (lstLen % 2):
return lst[index]
else:
return (lst[index] + lst[index+1])/2.0
def minMoves2(self, nums: List[int]) -> int:
mdn = floor(self.sorting_median(nums))
movesmdn = 0
for i in nums:
movesmdn += abs(i-mdn)
return movesmdn
| 28.763636
| 76
| 0.474083
| 1,489
| 0.941214
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.058786
|
bef32dc0efa2656e8a84216ea747c7b952e1b452
| 43
|
py
|
Python
|
moban/_version.py
|
CLiu13/moban
|
5deada1af7ff24a6adf698de6a8b589a258d4dc2
|
[
"MIT"
] | 1
|
2018-12-16T01:16:22.000Z
|
2018-12-16T01:16:22.000Z
|
moban/_version.py
|
CLiu13/moban
|
5deada1af7ff24a6adf698de6a8b589a258d4dc2
|
[
"MIT"
] | null | null | null |
moban/_version.py
|
CLiu13/moban
|
5deada1af7ff24a6adf698de6a8b589a258d4dc2
|
[
"MIT"
] | null | null | null |
__version__ = "0.3.9"
__author__ = "C. W."
| 14.333333
| 21
| 0.604651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.325581
|
bef3d9edefde231b494e39df17d4acf13c5e8797
| 666
|
py
|
Python
|
Gif_Rick/rick_random_gif.py
|
BiceCold/Citadel_of_Ricks
|
72f1a447accc2c11d1fa1cbf3c3342913913e50e
|
[
"Apache-2.0"
] | 2
|
2018-04-13T17:41:08.000Z
|
2018-09-20T22:19:52.000Z
|
Gif_Rick/rick_random_gif.py
|
BiceCold/Citadel_of_Ricks
|
72f1a447accc2c11d1fa1cbf3c3342913913e50e
|
[
"Apache-2.0"
] | null | null | null |
Gif_Rick/rick_random_gif.py
|
BiceCold/Citadel_of_Ricks
|
72f1a447accc2c11d1fa1cbf3c3342913913e50e
|
[
"Apache-2.0"
] | null | null | null |
import imgurpython
from Environment_Handlers.configs import get_config
import random
client_id = get_config("client_id")
client_secret = get_config("client_secret")
client_refresh_token = get_config("client_refresh")
client_access_token = get_config("client_access_token")
username = 'antipoliticsrick'
client = imgurpython.ImgurClient(client_id, client_secret, client_access_token, client_refresh_token)
# album_ids = client.get_account_album_ids(username, page=0)
img_lst = client.get_album_images('GebVe10')
giflink = []
def random_gif():
for gif in img_lst:
giflink.append(gif.link)
randomgif = random.choice(giflink)
return randomgif
| 26.64
| 101
| 0.792793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.225225
|
bef57e6edf7a67698588bda9e271df4b1e689682
| 131
|
py
|
Python
|
catalyst/dl/experiment/__init__.py
|
andrey-avdeev/catalyst
|
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
|
[
"Apache-2.0"
] | 3
|
2019-11-02T05:37:06.000Z
|
2020-01-13T02:26:07.000Z
|
catalyst/dl/experiment/__init__.py
|
andrey-avdeev/catalyst
|
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
|
[
"Apache-2.0"
] | null | null | null |
catalyst/dl/experiment/__init__.py
|
andrey-avdeev/catalyst
|
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
|
[
"Apache-2.0"
] | 1
|
2021-12-20T07:32:25.000Z
|
2021-12-20T07:32:25.000Z
|
# flake8: noqa
from .base import BaseExperiment
from .config import ConfigExperiment
from .supervised import SupervisedExperiment
| 21.833333
| 44
| 0.839695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.10687
|
bef59cacc937701b313af0467ffa47a4f4a7a929
| 3,217
|
py
|
Python
|
home_directories/Library/Application Support/Alfred 3/Alfred.alfredpreferences/workflows/user.workflow.BC9837BA-F60E-475C-B524-E761FBD0E1EB/common.py
|
joekallen/dotfiles
|
9318c168fdb9ce0b1066b032e93a7e465d0ffcee
|
[
"Apache-2.0"
] | null | null | null |
home_directories/Library/Application Support/Alfred 3/Alfred.alfredpreferences/workflows/user.workflow.BC9837BA-F60E-475C-B524-E761FBD0E1EB/common.py
|
joekallen/dotfiles
|
9318c168fdb9ce0b1066b032e93a7e465d0ffcee
|
[
"Apache-2.0"
] | null | null | null |
home_directories/Library/Application Support/Alfred 3/Alfred.alfredpreferences/workflows/user.workflow.BC9837BA-F60E-475C-B524-E761FBD0E1EB/common.py
|
joekallen/dotfiles
|
9318c168fdb9ce0b1066b032e93a7e465d0ffcee
|
[
"Apache-2.0"
] | null | null | null |
import os
import argparse
import subprocess
from workflow import Workflow
def get_kubectl_cmd_path():
wf = Workflow()
return wf.settings.get("KUBECTL_CMD_PATH") or os.environ.get("KUBECTL_CMD_PATH", '/usr/local/bin/kubectl')
class KService:
def __init__(self, type, name, age, status):
self.type = type
self.name = name
self.age = age
self.status = status
def get_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('query', nargs='?', default="")
return parser.parse_args(args)
def get_pods():
res = []
pods = subprocess.Popen("%s get pods" % get_kubectl_cmd_path(), shell=True, stdout=subprocess.PIPE).stdout.read().split(
'\n')[
1:-1]
for pod_str in pods:
try:
dep_name, _, status, _, age = " ".join(pod_str.split()).split(' ')
res.append(KService("Pod", dep_name, age, status))
except:
print("ASd")
return res
def get_deployments():
res = []
deps = subprocess.Popen("%s get deploy" % get_kubectl_cmd_path(), shell=True, stdout=subprocess.PIPE).stdout.read().split(
'\n')[1:-1]
for dep_str in deps:
dep_name, _, current, _, _, age = " ".join(dep_str.split()).split(' ')
res.append(KService("Deploy", dep_name, age, current))
return res
def get_replica_sets():
res = []
deps = subprocess.Popen("%s get rs" % get_kubectl_cmd_path(), shell=True, stdout=subprocess.PIPE).stdout.read().split(
'\n')[1:-1]
for dep_str in deps:
dep_name, desired, current, _, age = " ".join(dep_str.split()).split(' ')
res.append(KService("Deploy", dep_name, age, "%s/%s" % (desired, current)))
return res
def get_services():
res = []
res += get_pods()
res += get_deployments()
return res
def search_key_for_service(service):
return u' '.join([
service.name
])
def process_and_feedback(wf, wf_cached_data_key, data_func, icon, include_type_in_arg=False):
args = get_args(wf.args)
data = wf.cached_data(wf_cached_data_key, data_func, max_age=60)
query = args.query.strip()
if query:
data = wf.filter(query, data, key=search_key_for_service, min_score=20)
for d in data:
if include_type_in_arg:
arg = "{type} {name}".format(type=d.type.lower(), name=d.name)
else:
arg = d.name
wf.add_item(title=d.name,
subtitle="%s - Age: %s | Extra: %s" % (d.type, d.age, d.status),
arg=arg,
valid=True,
icon=icon)
wf.send_feedback()
def update_local_path_vars(wf):
set_path_to = os.environ.get('set_path_to')
configured_path = os.environ.get('configured_path')
wf.settings[set_path_to] = configured_path
wf.settings.save()
print("Successfully set path to %s with %s" % (set_path_to, wf.settings[set_path_to]))
def _report_missing_var(wf, var_name):
print("Missing dashbaord url; use *ksetenv*")
"""
wf.add_item(title="Hit enter to set %s environment variable." % var_name,
arg="setenv",
valid=True)
wf.send_feedback()
"""
| 27.732759
| 126
| 0.608952
| 166
| 0.051601
| 0
| 0
| 0
| 0
| 0
| 0
| 494
| 0.153559
|
bef59fb3dbc590e868cbbe9ba87904ee2be92c5d
| 528
|
py
|
Python
|
dependencies/generate maps/pythongis/app/tk2/__init__.py
|
karimbahgat/AutoMap
|
eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f
|
[
"MIT"
] | 4
|
2015-12-05T14:31:55.000Z
|
2018-02-09T05:54:36.000Z
|
dependencies/generate maps/pythongis/app/tk2/__init__.py
|
karimbahgat/AutoMap
|
eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f
|
[
"MIT"
] | 1
|
2022-01-13T02:52:09.000Z
|
2022-01-13T02:52:09.000Z
|
dependencies/generate maps/pythongis/app/tk2/__init__.py
|
karimbahgat/AutoMap
|
eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f
|
[
"MIT"
] | 1
|
2018-10-24T01:08:11.000Z
|
2018-10-24T01:08:11.000Z
|
"""
Tk2
Tk2 is a convenience library for extending the functionality of Tkinter,
to make it easier and more flexible to create GUI applications.
"""
from .basics import *
from .scrollwidgets import *
from .texteditor import Text, MultiTextSearch
from .variables import *
# Later
from .multiwidgets import *
from .progbar import *
from .ribbon import *
#from orderedlist import *
#from calendar import *
from web import *
from . import filedialog
from . import messagebox
from . import colorchooser
from . import dispatch
| 20.307692
| 73
| 0.767045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 206
| 0.390152
|
bef5aaf1ff9723ae8680002976dbc5ebda4fccc9
| 37
|
py
|
Python
|
pp/web/base/tests/test_forjenkins.py
|
oisinmulvihill/pp-web-base
|
0be51b1d98c4923e1f4ccbfaea59ae662a8c5cdc
|
[
"BSD-3-Clause"
] | null | null | null |
pp/web/base/tests/test_forjenkins.py
|
oisinmulvihill/pp-web-base
|
0be51b1d98c4923e1f4ccbfaea59ae662a8c5cdc
|
[
"BSD-3-Clause"
] | null | null | null |
pp/web/base/tests/test_forjenkins.py
|
oisinmulvihill/pp-web-base
|
0be51b1d98c4923e1f4ccbfaea59ae662a8c5cdc
|
[
"BSD-3-Clause"
] | null | null | null |
def test_nonop():
assert 1 == 1
| 9.25
| 17
| 0.567568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
bef5e1efda3dd7f954a3c06d34cbd2bd9106ec13
| 803
|
py
|
Python
|
tools/deepke/relation_extraction/standard/models/BasicModule.py
|
dfface/DoctorKG
|
6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8
|
[
"MIT"
] | 1
|
2022-03-26T16:08:08.000Z
|
2022-03-26T16:08:08.000Z
|
tools/deepke/relation_extraction/standard/models/BasicModule.py
|
dfface/DoctorKG
|
6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8
|
[
"MIT"
] | null | null | null |
tools/deepke/relation_extraction/standard/models/BasicModule.py
|
dfface/DoctorKG
|
6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8
|
[
"MIT"
] | null | null | null |
import os
import time
import torch
import torch.nn as nn
class BasicModule(nn.Module):
'''
封装nn.Module, 提供 save 和 load 方法
'''
def __init__(self):
super(BasicModule, self).__init__()
def load(self, path, device):
'''
加载指定路径的模型
'''
self.load_state_dict(torch.load(path, map_location=device))
def save(self, epoch=0, cfg=None):
'''
保存模型,默认使用“模型名字+时间”作为文件名
'''
time_prefix = time.strftime('%Y-%m-%d_%H-%M-%S')
prefix = os.path.join(cfg.cwd, 'checkpoints',time_prefix)
os.makedirs(prefix, exist_ok=True)
name = os.path.join(prefix, cfg.model_name + '_' + f'epoch{epoch}' + '.pth')
torch.save(self.state_dict(), name)
return name
| 22.942857
| 85
| 0.555417
| 808
| 0.919226
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.300341
|
bef6dbd81f470e4f916903c6f30ebc2cb970bd0a
| 310
|
py
|
Python
|
url_shortener_client/exceptions/__init__.py
|
Andrelpoj/hire.me
|
79428e2094a6b56e762a7f958e1b75f395f59cef
|
[
"Apache-2.0"
] | null | null | null |
url_shortener_client/exceptions/__init__.py
|
Andrelpoj/hire.me
|
79428e2094a6b56e762a7f958e1b75f395f59cef
|
[
"Apache-2.0"
] | null | null | null |
url_shortener_client/exceptions/__init__.py
|
Andrelpoj/hire.me
|
79428e2094a6b56e762a7f958e1b75f395f59cef
|
[
"Apache-2.0"
] | null | null | null |
class AliasNotFound(Exception):
def __init__(self, alias):
self.alias = alias
class AliasAlreadyExists(Exception):
def __init__(self, alias):
self.alias = alias
class UnexpectedServerResponse(Exception):
def __init__(self, response):
self.response = response
| 25.833333
| 43
| 0.670968
| 298
| 0.96129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
bef786a72fbb29131b60f5c806a5c2a1d2c1e463
| 3,135
|
py
|
Python
|
software/nuke/init.py
|
kei-iketani/plex
|
cf09c8ef93984e5a69b23bf56248b87e4cfd98b0
|
[
"MIT"
] | 153
|
2018-03-22T18:29:17.000Z
|
2022-03-07T03:43:09.000Z
|
software/nuke/init.py
|
kei-iketani/plex
|
cf09c8ef93984e5a69b23bf56248b87e4cfd98b0
|
[
"MIT"
] | 30
|
2018-08-16T16:27:42.000Z
|
2021-02-24T05:37:25.000Z
|
software/nuke/init.py
|
alexanderrichter/arPipeline
|
3466f70a79e4d32c0647ba21d9689157a0f7772e
|
[
"MIT"
] | 34
|
2018-03-24T03:54:05.000Z
|
2022-03-10T11:36:52.000Z
|
#*********************************************************************
# content = init Nuke
# version = 0.1.0
# date = 2019-12-01
#
# license = MIT <https://github.com/alexanderrichtertd>
# author = Alexander Richter <alexanderrichtertd.com>
#*********************************************************************
import os
import errno
import nuke
import pipefunc
from tank import Tank
#*********************************************************************
# VARIABLE
TITLE = os.path.splitext(os.path.basename(__file__))[0]
LOG = Tank().log.init(script=TITLE)
PROJECT_DATA = Tank().data_project
RESOLUTION = (' ').join([str(PROJECT_DATA['resolution'][0]),
str(PROJECT_DATA['resolution'][1]),
PROJECT_DATA['name'].replace(' ', '')])
#*********************************************************************
# FOLDER CREATION
def create_write_dir():
file_name = nuke.filename(nuke.thisNode())
file_path = os.path.dirname(file_name)
os_path = nuke.callbacks.filenameFilter(file_path)
# cope with the directory existing already by ignoring that exception
try: os.makedirs(os_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def add_plugin_paths():
# ADD all IMG paths
for img in os.getenv('IMG_PATH').split(';'):
for img_sub in pipefunc.get_deep_folder_list(path=img, add_path=True):
nuke.pluginAddPath(img_sub)
# ADD sub software paths
for paths in os.getenv('SOFTWARE_SUB_PATH').split(';'):
nuke.pluginAddPath(paths)
#*********************************************************************
# PIPELINE
Tank().init_software()
add_plugin_paths()
try: from scripts import write_node
except: LOG.warning('FAILED loading write_node')
# LOAD paths
try:
for paths in os.getenv('SOFTWARE_SUB_PATH').split(';'):
nuke.pluginAddPath(paths)
except:
LOG.warning('FAILED loading SOFTWARE_SUB_PATH')
print('SETTINGS')
# RESOLUTION *********************************************************************
try:
nuke.addFormat(RESOLUTION)
nuke.knobDefault('Root.format', PROJECT_DATA['name'].replace(' ', ''))
print(' {} ON - {}'.format(chr(254), RESOLUTION))
except:
LOG.error(' OFF - {}'.format(RESOLUTION), exc_info=True)
print(' {} OFF - {}'.format(chr(254), RESOLUTION))
# FPS *********************************************************************
try:
nuke.knobDefault("Root.fps", str(PROJECT_DATA['fps']))
print(' {} ON - {} fps'.format(chr(254), PROJECT_DATA['fps']))
except:
LOG.error(' OFF - {} fps'.format(PROJECT_DATA['fps']), exc_info=True)
print(' {} OFF - {} fps'.format(chr(254), PROJECT_DATA['fps']))
# createFolder *********************************************************************
try:
nuke.addBeforeRender(create_write_dir)
print(' {} ON - create_write_dir (before render)'.format(chr(254)))
except:
LOG.error(' OFF - create_write_dir (before render)'.format(chr(254)), exc_info=True)
print(' {} OFF - create_write_dir (before render)'.format(chr(254)))
print('')
| 30.436893
| 89
| 0.536204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,382
| 0.440829
|
bef8b3cc0e57e7d1cb77a57410c13eec81de3df9
| 589
|
py
|
Python
|
main.py
|
gaoshanyu/web_ui_test_sample
|
8a6cc9b54b5f728af7ef0725dea42d759bd115d0
|
[
"MIT"
] | null | null | null |
main.py
|
gaoshanyu/web_ui_test_sample
|
8a6cc9b54b5f728af7ef0725dea42d759bd115d0
|
[
"MIT"
] | null | null | null |
main.py
|
gaoshanyu/web_ui_test_sample
|
8a6cc9b54b5f728af7ef0725dea42d759bd115d0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Created at 03/10/2020
__author__ = 'raniys'
import pytest
if __name__ == '__main__':
# -v: verbose; -s: shortcut for --capture=no;
# -m: only run tests matching given mark expression. example: -m 'mark1 and not mark2';
# --html=path: create html report file at given path.
# pytest.main(["-v", "-s", "-m", "smoke", "--html=./reports/smoke_tests_report.html"])
# pytest.main(["-v", "-s", "-m", "sample", "--html=./reports/sample_tests_report.html"])
pytest.main(["-v", "-s", "-m", "search", "--html=./reports/search_tests_report.html"])
| 39.266667
| 92
| 0.614601
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 486
| 0.825127
|
bef9a72ceb82bbb48832da89c306ea29b20a4752
| 863
|
py
|
Python
|
rnd/HaskellRSLCompiler/test/parse/test.py
|
syoyo/lucille
|
ff81b332ae78181dbbdc1ec3c3b0f59992e7c0fa
|
[
"BSD-3-Clause"
] | 77
|
2015-01-29T21:02:10.000Z
|
2022-03-04T11:23:12.000Z
|
rnd/HaskellRSLCompiler/test/parse/test.py
|
syoyo/lucille
|
ff81b332ae78181dbbdc1ec3c3b0f59992e7c0fa
|
[
"BSD-3-Clause"
] | 1
|
2018-11-08T02:11:24.000Z
|
2018-11-08T04:31:17.000Z
|
rnd/HaskellRSLCompiler/test/parse/test.py
|
syoyo/lucille
|
ff81b332ae78181dbbdc1ec3c3b0f59992e7c0fa
|
[
"BSD-3-Clause"
] | 13
|
2015-04-20T08:17:29.000Z
|
2020-06-17T18:35:06.000Z
|
#!/usr/bin/env python
import os, sys
import subprocess
import re
import glob
errlog = []
def run(f):
cmd = "../../lslc"
p = subprocess.Popen([cmd, f], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
outs = [l for l in p.stdout]
errs = [l for l in p.stderr]
errline = re.compile("TODO")
failed = False
for l in errs:
if errline.search(l):
failed = True
if failed:
print "[FAIL] ", f
errlog.append("==== [" + f + "] ====")
for l in errs:
errlog.append(l[:-1])
errlog.append("=====================")
errlog.append("\n")
else:
print "[OK ] ", f
def main():
for f in glob.glob("*.sl"):
run(f)
f = open("errlog.log", "w")
for l in errlog:
print >>f, l
if __name__ == '__main__':
main()
| 17.979167
| 98
| 0.499421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.151796
|
befb15db729fb7dcc8145ec21e9b327a9461b95c
| 657
|
py
|
Python
|
glasses/models/classification/base/__init__.py
|
rentainhe/glasses
|
34300a76985c7fc643094fa8d617114926a0ee75
|
[
"MIT"
] | 271
|
2020-10-20T12:30:23.000Z
|
2022-03-17T03:02:38.000Z
|
glasses/models/classification/base/__init__.py
|
rentainhe/glasses
|
34300a76985c7fc643094fa8d617114926a0ee75
|
[
"MIT"
] | 212
|
2020-07-25T13:02:23.000Z
|
2022-02-20T10:33:32.000Z
|
glasses/models/classification/base/__init__.py
|
rentainhe/glasses
|
34300a76985c7fc643094fa8d617114926a0ee75
|
[
"MIT"
] | 23
|
2021-01-03T13:53:36.000Z
|
2022-03-17T05:40:34.000Z
|
from torch import Tensor, nn
from ...base import VisionModule
class ClassificationModule(VisionModule):
"""Base Classification Module class"""
def __init__(
self,
encoder: nn.Module,
head: nn.Module,
in_channels: int = 3,
n_classes: int = 1000,
**kwargs
):
super().__init__()
self.encoder = encoder(in_channels=in_channels, **kwargs)
self.head = head(self.encoder.widths[-1], n_classes)
self.initialize()
def initialize(self):
pass
def forward(self, x: Tensor) -> Tensor:
x = self.encoder(x)
x = self.head(x)
return x
| 21.9
| 65
| 0.584475
| 591
| 0.899543
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.057839
|
befc1052790c2cb39af3f31238e68ac4213b7a50
| 3,202
|
py
|
Python
|
lib/data.py
|
PEDIA-Charite/classifier
|
13e9d6108f9691b089aac59c7392f7940033b8af
|
[
"MIT"
] | 2
|
2019-04-04T03:44:25.000Z
|
2019-12-23T17:08:51.000Z
|
lib/data.py
|
PEDIA-Charite/classifier
|
13e9d6108f9691b089aac59c7392f7940033b8af
|
[
"MIT"
] | 9
|
2017-05-23T09:55:15.000Z
|
2019-11-22T11:24:20.000Z
|
lib/data.py
|
PEDIA-Charite/classifier
|
13e9d6108f9691b089aac59c7392f7940033b8af
|
[
"MIT"
] | 2
|
2017-05-24T12:23:13.000Z
|
2019-09-03T08:36:18.000Z
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import sys
import logging
import csv
# Setup logging
logger = logging.getLogger(__name__)
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s: %(message)s', datefmt='%m-%d %H:%M')
console_handle.setFormatter(formatter)
logger.addHandler(console_handle)
class Data:
"""Common class for a list of instances of the class Samples
Attributes:
name: name of the data as a string
samples: a list of samples as instances of class Sample
casedisgene: a list of lists [[case,gene]] containing each case in samples and the respective disease causing gene
"""
# index for each score
FM_IDX = 0
CADD_IDX = 1
GESTALT_IDX = 2
BOQA_IDX = 3
PHENO_IDX = 4
# FEATURE_IDX is for feature vector which contain the above feature score
# LABEL_IDX is for pathogenic gene label (0, 1)
# GENE_IDX is for gene symbol
FEATURE_IDX = 0
LABEL_IDX = 1
GENE_IDX = 2
GENE_NAME_IDX = 3
def __init__(self):
self.data = {}
# Filter dict
self.filter_dict = {0: "feature_score", 1: "cadd_phred_score", 2: "gestalt_score", 3: "boqa_score", 4: "pheno_score"}
def loadData(self, input_file, filter_field=None):
filter_cases = []
with open(input_file) as csvfile:
reader = csv.DictReader(csvfile)
case = ""
for row in reader:
case = row["case"]
if not case in self.data:
self.data.update({case:[[], [], [], []]})
x = self.data[case][self.FEATURE_IDX]
y = self.data[case][self.LABEL_IDX]
gene = self.data[case][self.GENE_IDX]
gene_name = self.data[case][self.GENE_NAME_IDX]
x.append([row["feature_score"], row["cadd_phred_score"], row["gestalt_score"], row["boqa_score"], row["pheno_score"]])
y.append(int(row["label"]))
gene.append(row["gene_id"])
gene_name.append(row["gene_symbol"])
# filter the sample which has no the feature we assigned
if filter_field != None:
if int(row["label"]) == 1:
if row[self.filter_dict[filter_field[0]]] == 'nan' or row[self.filter_dict[filter_field[0]]] == '0':
logger.debug("%s - %s has no %s score", case, row["gene_symbol"], self.filter_dict[filter_field[0]])
filter_cases.append(case)
for key in list(self.data):
if key in filter_cases:
del self.data[key]
else:
x = self.data[key][self.FEATURE_IDX]
y = self.data[key][self.LABEL_IDX]
x = np.array(x)
y = np.array(y)
self.data[key][self.FEATURE_IDX] = x
self.data[key][self.LABEL_IDX] = y
logger.info("Input %s: total %d cases", input_file, len(self.data))
| 37.232558
| 135
| 0.553716
| 2,805
| 0.876015
| 0
| 0
| 0
| 0
| 0
| 0
| 908
| 0.283573
|
befd00f2e7be9ee982348d730c3ed4d4bbdd8988
| 177
|
py
|
Python
|
iris_sdk/models/data/tn_status.py
|
NumberAI/python-bandwidth-iris
|
0e05f79d68b244812afb97e00fd65b3f46d00aa3
|
[
"MIT"
] | 2
|
2020-04-13T13:47:59.000Z
|
2022-02-23T20:32:41.000Z
|
iris_sdk/models/data/tn_status.py
|
bandwidthcom/python-bandwidth-iris
|
dbcb30569631395041b92917252d913166f7d3c9
|
[
"MIT"
] | 5
|
2020-09-18T20:59:24.000Z
|
2021-08-25T16:51:42.000Z
|
iris_sdk/models/data/tn_status.py
|
bandwidthcom/python-bandwidth-iris
|
dbcb30569631395041b92917252d913166f7d3c9
|
[
"MIT"
] | 5
|
2018-12-12T14:39:50.000Z
|
2020-11-17T21:42:29.000Z
|
#!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.maps.tn_status import TnStatusMap
class TnStatus(TnStatusMap, BaseData):
pass
| 25.285714
| 54
| 0.813559
| 47
| 0.265537
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.118644
|
befd8dcdbdb6d9ed65837be1a16b79168d010d75
| 8,437
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_device_group_member.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_device_group_member.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_device_group_member.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_device_group_member
short_description: Manages members in a device group
description:
- Manages members in a device group. Members in a device group can only
be added or removed, never updated. This is because the members are
identified by unique name values and changing that name would invalidate
the uniqueness.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the device that you want to add to the
device group. Often this will be the hostname of the device.
This member must be trusted by the device already. Trusting
can be done with the C(bigip_device_trust) module and the
C(peer_hostname) option to that module.
type: str
required: True
device_group:
description:
- The device group to which you want to add the member.
type: str
required: True
state:
description:
- When C(present), ensures the device group member exists.
- When C(absent), ensures the device group member is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add the current device to the "device_trust_group" device group
bigip_device_group_member:
name: "{{ inventory_hostname }}"
device_group: device_trust_group
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add the hosts in the current scope to "device_trust_group"
bigip_device_group_member:
name: "{{ item }}"
device_group: device_trust_group
provider:
password: secret
server: lb.mydomain.com
user: admin
loop: "{{ hostvars.keys() }}"
run_once: true
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {}
api_attributes = []
returnables = []
updatables = []
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = Parameters(params=self.module.params)
self.have = None
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.client, self.module, version)
return result
def present(self):
if self.exists():
return False
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to remove the member from the device group.")
return True
def exists(self):
errors = [401, 403, 409, 500, 501, 502, 503, 504]
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.device_group,
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.device_group
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.device_group,
self.want.name
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
device_group=dict(required=True),
state=dict(
default='present',
choices=['absent', 'present']
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| 28.6
| 94
| 0.6174
| 5,482
| 0.649757
| 0
| 0
| 0
| 0
| 0
| 0
| 2,528
| 0.299633
|
befdd813dce9c8916652b3514805d92fc7258e7d
| 793
|
py
|
Python
|
comrade/blueprints/rest.py
|
sp3c73r2038/elasticsearch-comrade
|
ed0c94e071d2fe701a14429981390b9a89df79a7
|
[
"MIT"
] | 256
|
2019-09-09T10:09:34.000Z
|
2022-03-28T04:15:21.000Z
|
comrade/blueprints/rest.py
|
sp3c73r2038/elasticsearch-comrade
|
ed0c94e071d2fe701a14429981390b9a89df79a7
|
[
"MIT"
] | 503
|
2019-07-31T17:01:12.000Z
|
2022-03-28T13:19:26.000Z
|
comrade/blueprints/rest.py
|
nmeisels/elasticsearch-comrade
|
57dc600e5ffd7f9d4c055b584124bef9365e538c
|
[
"MIT"
] | 25
|
2019-08-30T13:04:31.000Z
|
2022-03-09T09:50:32.000Z
|
from elasticsearch import TransportError
from sanic import Blueprint
from sanic.request import Request
from sanic.response import HTTPResponse, json
from ..connections import get_client
rest_bp = Blueprint('rest')
def format_es_exception(e: TransportError):
return json({"status_code": e.status_code,
"error": e.error,
"info": e.info})
@rest_bp.route('/query', methods=['POST'])
async def close_index(request: Request) -> HTTPResponse:
client = get_client(request)
body = request.json['body']
method = request.json['method']
path = request.json['path']
try:
resp = await client.transport.perform_request(method, path, body=body)
except TransportError as e:
return format_es_exception(e)
return json(resp)
| 28.321429
| 78
| 0.693569
| 0
| 0
| 0
| 0
| 412
| 0.519546
| 369
| 0.465322
| 66
| 0.083228
|
befebe8c408a00b9be09490e9fa3fb8d41c06ce6
| 1,081
|
py
|
Python
|
tests/test_utils.py
|
tedeler/pyexchange
|
58042f473cbd4f00769249ce9ca20c6a376eddb6
|
[
"Apache-2.0"
] | 128
|
2015-01-11T10:29:40.000Z
|
2021-06-25T05:27:45.000Z
|
tests/test_utils.py
|
tedeler/pyexchange
|
58042f473cbd4f00769249ce9ca20c6a376eddb6
|
[
"Apache-2.0"
] | 52
|
2015-01-02T15:24:28.000Z
|
2020-08-07T04:49:49.000Z
|
tests/test_utils.py
|
tedeler/pyexchange
|
58042f473cbd4f00769249ce9ca20c6a376eddb6
|
[
"Apache-2.0"
] | 96
|
2015-01-02T15:16:20.000Z
|
2021-12-25T01:37:46.000Z
|
from datetime import datetime
from pytz import timezone, utc
from pytest import mark
from pyexchange.utils import convert_datetime_to_utc
def test_converting_none_returns_none():
assert convert_datetime_to_utc(None) is None
def test_converting_non_tz_aware_date_returns_tz_aware():
utc_time = datetime(year=2014, month=1, day=1, hour=1, minute=1, second=1)
assert utc_time.tzinfo is None
assert convert_datetime_to_utc(utc_time) == datetime(year=2014, month=1, day=1, hour=1, minute=1, second=1, tzinfo=utc)
def test_converting_tz_aware_date_returns_tz_aware_date():
# US/Pacific timezone is UTC-07:00 (In April we are in DST)
# We use localize() because according to the pytz documentation, using the tzinfo
# argument of the standard datetime constructors does not work for timezones with DST.
pacific_time = timezone("US/Pacific").localize(datetime(year=2014, month=4, day=1, hour=1, minute=0, second=0))
utc_time = utc.localize(datetime(year=2014, month=4, day=1, hour=8, minute=0, second=0))
assert convert_datetime_to_utc(pacific_time) == utc_time
| 43.24
| 121
| 0.781684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 238
| 0.220167
|
befed480f20eb883fd15d6235756ef7750bbee56
| 786
|
py
|
Python
|
vidpub/__main__.py
|
gary9630/session-video-publisher
|
6602f53d722af8e569c82b7de8ef79a63293c766
|
[
"0BSD"
] | null | null | null |
vidpub/__main__.py
|
gary9630/session-video-publisher
|
6602f53d722af8e569c82b7de8ef79a63293c766
|
[
"0BSD"
] | 5
|
2020-11-15T12:45:03.000Z
|
2021-12-07T08:29:40.000Z
|
vidpub/__main__.py
|
gary9630/session-video-publisher
|
6602f53d722af8e569c82b7de8ef79a63293c766
|
[
"0BSD"
] | 4
|
2018-06-23T16:48:03.000Z
|
2021-04-18T09:51:29.000Z
|
import argparse
from .upload_video import upload_video
from .generate_playlist import generate_playlist
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
"-u", "--upload", action="store_true", help="Upload videos to YouTube channel"
)
parser.add_argument(
"-p", "--playlist", action="store_true", help="Generate playlist information in json files"
)
parser.add_argument(
"-o", "--output_dir", default="./videos", help="Output path of video information"
)
return parser.parse_args(argv)
def main(argv=None):
options = parse_args(argv)
if options.upload:
upload_video()
if options.playlist:
generate_playlist(options.output_dir)
if __name__ == "__main__":
main()
| 23.818182
| 99
| 0.675573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 205
| 0.260814
|
beff85e9c6691647f15d3bfe260f151e7cc2041f
| 3,275
|
py
|
Python
|
ally/utils/option.py
|
rjfranssen/PyAlly
|
f24d4d449dd0578f52e75365ad0ba69a572d3237
|
[
"MIT"
] | 53
|
2019-08-11T20:39:16.000Z
|
2022-02-01T02:05:12.000Z
|
ally/utils/option.py
|
rjfranssen/PyAlly
|
f24d4d449dd0578f52e75365ad0ba69a572d3237
|
[
"MIT"
] | 53
|
2019-12-11T06:39:59.000Z
|
2022-02-13T05:06:44.000Z
|
ally/utils/option.py
|
rjfranssen/PyAlly
|
f24d4d449dd0578f52e75365ad0ba69a572d3237
|
[
"MIT"
] | 31
|
2019-10-05T02:28:16.000Z
|
2022-02-03T03:41:42.000Z
|
# MIT License
#
# Copyright (c) 2020 Brett Graves
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import math
from .utils import *
############################################################################
def option_format(symbol="", exp_date="1970-01-01", strike=0, direction=""):
"""Returns the OCC standardized option name.
Args:
symbol: the underlying symbol, case insensitive
exp_date: date of expiration, in string-form.
strike: strike price of the option
direction: 'C' or 'call' or the like, for call, otherwise 'p' or 'Put' for put
Returns:
OCC string, like 'IBM201231C00301000'
.. code-block:: python
# Construct the option's OCC symbol
>>> ibm_call = ally.utils.option_format(
exp_date = '2020-12-31',
symbol = 'IBM', # case insensitive
direction = 'call',
strike = 301
)
>>> ibm_call
'IBM201231C00301000'
"""
if not (
check(symbol) and check(exp_date) and check(str(strike)) and check(direction)
):
return ""
# direction into C or P
direction = "C" if "C" in direction.upper() else "P"
# Pad strike with zeros
def format_strike(strike):
x = str(math.floor(float(strike) * 1000))
return "0" * (8 - len(x)) + x
# Assemble
return (
str(symbol).upper()
+ datetime.datetime.strptime(exp_date, "%Y-%m-%d").strftime("%y%m%d")
+ direction
+ format_strike(strike)
)
def option_strike(name):
"""Pull apart an OCC standardized option name and
retreive the strike price, in integer form"""
return int(name[-8:]) / 1000.0
def option_maturity(name):
"""Given OCC standardized option name,
return the date of maturity"""
return datetime.datetime.strptime(name[-15:-9], "%y%m%d").strftime("%Y-%m-%d")
def option_callput(name):
"""Given OCC standardized option name,
return whether its a call or a put"""
return "call" if name.upper()[-9] == "C" else "put"
def option_symbol(name):
"""Given OCC standardized option name, return option ticker"""
return name[:-15]
| 31.796117
| 90
| 0.635725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,376
| 0.725496
|
8300d2d4159d348f8f2a81357e0afeb556ced95e
| 460
|
py
|
Python
|
examples/104-python3-9-pipeline.py
|
marviniter/argo-dataflow
|
89a060b1c6ea70f7c26bc58a01ba675c3acc1c06
|
[
"Apache-2.0"
] | null | null | null |
examples/104-python3-9-pipeline.py
|
marviniter/argo-dataflow
|
89a060b1c6ea70f7c26bc58a01ba675c3acc1c06
|
[
"Apache-2.0"
] | null | null | null |
examples/104-python3-9-pipeline.py
|
marviniter/argo-dataflow
|
89a060b1c6ea70f7c26bc58a01ba675c3acc1c06
|
[
"Apache-2.0"
] | null | null | null |
from argo_dataflow import pipeline, kafka
def handler(msg, context):
return ("hi! " + msg.decode("UTF-8")).encode("UTF-8")
if __name__ == '__main__':
(pipeline("104-python3-9")
.owner('argoproj-labs')
.describe("""This example is of the Python 3.9 handler.
[Learn about handlers](../docs/HANDLERS.md)""")
.step(
(kafka('input-topic')
.code('main', handler)
.kafka('output-topic')
))
.save())
| 23
| 60
| 0.582609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.404348
|
8300f1e857cc9e2e0c3bf9685d4664e9e4c8faa9
| 2,195
|
py
|
Python
|
djangur.py
|
JerryPopi/djangur-py
|
0ba76a1a9c0f77ded014f0f3a0b3a98bf7835f51
|
[
"MIT"
] | null | null | null |
djangur.py
|
JerryPopi/djangur-py
|
0ba76a1a9c0f77ded014f0f3a0b3a98bf7835f51
|
[
"MIT"
] | null | null | null |
djangur.py
|
JerryPopi/djangur-py
|
0ba76a1a9c0f77ded014f0f3a0b3a98bf7835f51
|
[
"MIT"
] | null | null | null |
import asyncio
import discord
from commands import Commands, Guild_Instance, leave, play_search
import os
from pymongo import MongoClient
from dotenv import load_dotenv
load_dotenv()
CONNECTION_STRING = f"mongodb+srv://{os.environ['mongo_user']}:{os.environ['mongo_pass']}@djangur.erogd.mongodb.net/djangur?retryWrites=true&w=majority"
db_client = MongoClient(CONNECTION_STRING)
db = db_client['djangur']
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as {0.user}'.format(client))
print(os.environ['prefix'])
@client.event
async def on_message(msg):
if msg.author == client.user:
return
ginst = Guild_Instance.by_id(msg.guild.id)
ginst.tc = msg.channel
ginst.db = db[str(msg.guild.id)]
if msg.content.isdigit() and ginst.searching:
await play_search(msg.content, msg=msg, client=client, ginst=ginst)
if not msg.content.startswith(os.environ['prefix']):
return
no_prefix = msg.content[len(os.environ['prefix']):]
split = no_prefix.split(' ', 1)
cmd = split[0]
args = split[1] if (len(split) == 2) else ''
if cmd in Commands.command_map:
await Commands.command_map[cmd].fn(args, msg=msg, client=client, ginst=ginst)
else:
await msg.channel.send(f'{cmd}: Command not found.')
@client.event
async def on_voice_state_update(member, before, after):
if not member.name == 'Tramvai':
return
elif before.channel is None:
ginst = Guild_Instance.by_id(after.channel.guild.id)
voice = after.channel.guild.voice_client
time = 0
while True:
await asyncio.sleep(1)
time = time + 1
if voice.is_playing() and not voice.is_paused():
time = 0
if time == 600:
print(await Commands.command_map['leave'].fn(None, None, None, ginst))
if not voice.is_connected():
break
elif before.channel is not None:
if after.channel is None:
ginst = Guild_Instance.by_id(before.channel.guild.id)
await Commands.command_map['leave'].fn(None, None, None, ginst)
client.run(os.environ['token'])
| 29.662162
| 152
| 0.653303
| 0
| 0
| 0
| 0
| 1,719
| 0.783144
| 1,677
| 0.764009
| 251
| 0.114351
|
830374b559d44b39454687ae70bffd40d78c9944
| 44,236
|
py
|
Python
|
membership/models.py
|
str4nd/sikteeri
|
34dd5a4dc35558cdba9e6f97fd38fb661a36b8a5
|
[
"MIT"
] | 22
|
2015-03-30T19:33:15.000Z
|
2022-01-10T03:52:43.000Z
|
membership/models.py
|
str4nd/sikteeri
|
34dd5a4dc35558cdba9e6f97fd38fb661a36b8a5
|
[
"MIT"
] | 66
|
2015-05-15T13:54:59.000Z
|
2021-05-27T20:28:39.000Z
|
membership/models.py
|
str4nd/sikteeri
|
34dd5a4dc35558cdba9e6f97fd38fb661a36b8a5
|
[
"MIT"
] | 13
|
2015-03-09T18:59:29.000Z
|
2022-01-10T04:08:38.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from decimal import Decimal
import logging
from django.core.files.storage import FileSystemStorage
from membership.billing.pdf_utils import get_bill_pdf, create_reminder_pdf
from membership.reference_numbers import barcode_4, group_right,\
generate_membership_bill_reference_number
import traceback
from io import StringIO, BytesIO
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db import transaction
from django.db.models import Q, Sum, Count
from django.utils.translation import ugettext_lazy as _
import django.utils.timezone
from django.conf import settings
from django.template.loader import render_to_string
from django.forms import ValidationError
from django.db.models.query import QuerySet
from django.contrib.contenttypes.models import ContentType
from .utils import log_change, tupletuple_to_dict
from membership.signals import send_as_email, send_preapprove_email, send_duplicate_payment_notice
from .email_utils import bill_sender, preapprove_email_sender, duplicate_payment_sender, format_email
logger = logging.getLogger("membership.models")
class BillingEmailNotFound(Exception):
pass
class MembershipOperationError(Exception):
pass
class MembershipAlreadyStatus(MembershipOperationError):
pass
class PaymentAttachedError(Exception): pass
MEMBER_TYPES = (('P', _('Person')),
('J', _('Junior')),
('S', _('Supporting')),
('O', _('Organization')),
('H', _('Honorary')))
MEMBER_TYPES_DICT = tupletuple_to_dict(MEMBER_TYPES)
STATUS_NEW = 'N'
STATUS_PREAPPROVED = 'P'
STATUS_APPROVED = 'A'
STATUS_DIS_REQUESTED = 'S'
STATUS_DISASSOCIATED = 'I'
STATUS_DELETED = 'D'
MEMBER_STATUS = ((STATUS_NEW, _('New')),
(STATUS_PREAPPROVED, _('Pre-approved')),
(STATUS_APPROVED, _('Approved')),
(STATUS_DIS_REQUESTED, _('Dissociation requested')),
(STATUS_DISASSOCIATED, _('Dissociated')),
(STATUS_DELETED, _('Deleted')))
MEMBER_STATUS_DICT = tupletuple_to_dict(MEMBER_STATUS)
BILL_EMAIL = 'E'
BILL_PAPER = 'P'
BILL_SMS = 'S'
BILL_TYPES = (
(BILL_EMAIL, _('Email')),
(BILL_PAPER, _('Paper')),
(BILL_SMS, _('SMS'))
)
BILL_TYPES_DICT = tupletuple_to_dict(BILL_TYPES)
def logging_log_change(sender, instance, created, **kwargs):
operation = "created" if created else "modified"
logger.info('%s %s: %s' % (sender.__name__, operation, repr(instance)))
def _get_logs(self):
'''Gets the log entries related to this object.
Getter to be used as property instead of GenericRelation'''
my_class = self.__class__
ct = ContentType.objects.get_for_model(my_class)
object_logs = ct.logentry_set.filter(object_id=self.id)
return object_logs
class Contact(models.Model):
logs = property(_get_logs)
last_changed = models.DateTimeField(auto_now=True, verbose_name=_('contact changed'))
created = models.DateTimeField(auto_now_add=True, verbose_name=_('contact created'))
first_name = models.CharField(max_length=128, verbose_name=_('First name'), blank=True) # Primary first name
given_names = models.CharField(max_length=128, verbose_name=_('Given names'), blank=True)
last_name = models.CharField(max_length=128, verbose_name=_('Last name'), blank=True)
organization_name = models.CharField(max_length=256, verbose_name=_('Organization name'), blank=True)
street_address = models.CharField(max_length=128, verbose_name=_('Street address'))
postal_code = models.CharField(max_length=10, verbose_name=_('Postal code'))
post_office = models.CharField(max_length=128, verbose_name=_('Post office'))
country = models.CharField(max_length=128, verbose_name=_('Country'))
phone = models.CharField(max_length=64, blank=True, verbose_name=_('Phone'))
sms = models.CharField(max_length=64, blank=True, verbose_name=_('SMS number'))
email = models.EmailField(blank=True, verbose_name=_('E-mail'))
homepage = models.URLField(blank=True, verbose_name=_('Homepage'))
def save(self, *args, **kwargs):
if self.homepage:
if '://' not in self.homepage:
self.homepage = "http://{homepage}".format(homepage=self.homepage)
if self.organization_name:
if len(self.organization_name) < 5:
raise Exception("Organization's name should be at least 5 characters.")
super(Contact, self).save(*args, **kwargs)
def delete_if_no_references(self, user):
person = Q(person=self)
org = Q(organization=self)
billing = Q(billing_contact=self)
tech = Q(tech_contact=self)
refs = Membership.objects.filter(person | org | billing | tech)
if refs.count() == 0:
logger.info("Deleting contact %s: no more references (by %s)" % (
str(self), str(user)))
self.logs.delete()
self.delete()
def find_memberid(self):
# Is there better way to find a memberid?
try:
return Membership.objects.get(person_id=self.id).id
except Membership.DoesNotExist:
pass
try:
return Membership.objects.get(organization_id=self.id).id
except Membership.DoesNotExist:
pass
try:
return Membership.objects.get(billing_contact_id=self.id).id
except Membership.DoesNotExist:
pass
try:
return Membership.objects.get(tech_contact_id=self.id).id
except Membership.DoesNotExist:
return None
def email_to(self):
if self.email:
return format_email(name=self.name(), email=self.email)
return None
def name(self):
if self.organization_name:
return self.organization_name
else:
return '%s %s' % (self.first_name, self.last_name)
def __str__(self):
if self.organization_name:
return self.organization_name
else:
return '%s %s' % (self.last_name, self.first_name)
class MembershipManager(models.Manager):
def sort(self, sortkey):
qs = MembershipQuerySet(self.model)
return qs.sort(sortkey)
def get_query_set(self):
return MembershipQuerySet(self.model)
class MembershipQuerySet(QuerySet):
def sort(self, sortkey):
sortkey = sortkey.strip()
reverse = False
if sortkey == "name":
return self.order_by("person__first_name",
"organization__organization_name")
elif sortkey == "-name":
return self.order_by("person__first_name",
"organization__organization_name"
).reverse()
elif sortkey == "last_name":
return self.order_by("person__last_name",
"organization__organization_name")
elif sortkey == "-last_name":
return self.order_by("person__last_name",
"organization__organization_name").reverse()
return self.order_by(sortkey)
class Membership(models.Model):
class Meta:
permissions = (
("read_members", "Can read member details"),
("manage_members", "Can change details, pre-/approve"),
("delete_members", "Can delete members"),
("dissociate_members", "Can dissociate members"),
("request_dissociation_for_member", "Can request dissociation for member"),
)
logs = property(_get_logs)
type = models.CharField(max_length=1, choices=MEMBER_TYPES, verbose_name=_('Membership type'))
status = models.CharField(max_length=1, choices=MEMBER_STATUS, default=STATUS_NEW, verbose_name=_('Membership status'))
created = models.DateTimeField(auto_now_add=True, verbose_name=_('Membership created'))
approved = models.DateTimeField(blank=True, null=True, verbose_name=_('Membership approved'))
last_changed = models.DateTimeField(auto_now=True, verbose_name=_('Membership changed'))
public_memberlist = models.BooleanField(_('Show in the memberlist'), default=False)
municipality = models.CharField(_('Home municipality'), max_length=128, blank=True)
nationality = models.CharField(_('Nationality'), max_length=128)
birth_year = models.IntegerField(_('Year of birth'), null=True, blank=True)
organization_registration_number = models.CharField(_('Business ID'),
blank=True, max_length=15)
person = models.ForeignKey('Contact', related_name='person_set', verbose_name=_('Person'), blank=True, null=True,
on_delete=models.PROTECT)
billing_contact = models.ForeignKey('Contact', related_name='billing_set', verbose_name=_('Billing contact'),
blank=True, null=True, on_delete=models.PROTECT)
tech_contact = models.ForeignKey('Contact', related_name='tech_contact_set', verbose_name=_('Technical contact'),
blank=True, null=True, on_delete=models.PROTECT)
organization = models.ForeignKey('Contact', related_name='organization_set', verbose_name=_('Organization'),
blank=True, null=True, on_delete=models.PROTECT)
extra_info = models.TextField(blank=True, verbose_name=_('Additional information'))
locked = models.DateTimeField(blank=True, null=True, verbose_name=_('Membership locked'))
dissociation_requested = models.DateTimeField(blank=True, null=True, verbose_name=_('Dissociation requested'))
dissociated = models.DateTimeField(blank=True, null=True, verbose_name=_('Member dissociated'))
objects = MembershipManager()
def primary_contact(self):
if self.organization:
return self.organization
else:
return self.person
def name(self):
if self.primary_contact():
return self.primary_contact().name()
else:
return str(self)
def email(self):
return self.primary_contact().email
def email_to(self):
return self.primary_contact().email_to()
def get_billing_contact(self):
'''Resolves the actual billing contact. Useful for billing details.'''
if self.billing_contact:
return self.billing_contact
elif self.person:
return self.person
else:
return self.organization
def billing_email(self):
'''Finds the best email address for billing'''
contact_priority_list = [self.billing_contact, self.person,
self.organization]
for contact in contact_priority_list:
if contact:
if contact.email:
return str(contact.email_to())
raise BillingEmailNotFound("Neither billing or administrative contact "
"has an email address")
# https://docs.djangoproject.com/en/dev/ref/models/instances/#django.db.models.Model.clean
def clean(self):
if self.type not in list(MEMBER_TYPES_DICT.keys()):
raise ValidationError("Illegal member type '%s'" % self.type)
if self.status not in list(MEMBER_STATUS_DICT.keys()):
raise ValidationError("Illegal member status '%s'" % self.status)
if self.status != STATUS_DELETED:
if self.type == 'O' and self.person:
raise ValidationError("Organization may not have a person contact.")
if self.type != 'O' and self.organization:
raise ValidationError("Non-organization may not have an organization contact.")
if self.person and self.organization:
raise ValidationError("Person-contact and organization-contact are mutually exclusive.")
if not self.person and not self.organization:
raise ValidationError("Either Person-contact or organization-contact must be defined.")
if not self.municipality:
raise ValidationError("Municipality can't be null.")
else:
if self.person or self.organization or self.billing_contact or self.tech_contact:
raise ValidationError("A membership may not have any contacts if it is deleted.")
def save(self, *args, **kwargs):
try:
self.full_clean()
except ValidationError as ve:
raise ve
super(Membership, self).save(*args, **kwargs)
def _change_status(self, new_status):
# Allowed transitions From State: [TO STATES]
_allowed_transitions = {
STATUS_NEW: [
STATUS_PREAPPROVED,
STATUS_DELETED
],
STATUS_PREAPPROVED: [
STATUS_APPROVED,
STATUS_DELETED
],
STATUS_APPROVED: [
STATUS_DIS_REQUESTED,
STATUS_DISASSOCIATED
],
STATUS_DISASSOCIATED: [
STATUS_DELETED
],
STATUS_DIS_REQUESTED: [
STATUS_DISASSOCIATED,
STATUS_APPROVED
],
}
with transaction.atomic():
me = Membership.objects.select_for_update().filter(pk=self.pk)[0]
current_status = me.status
if new_status == current_status:
raise MembershipAlreadyStatus("Membership is already {status}".format(status=new_status))
elif new_status not in _allowed_transitions[current_status]:
raise MembershipOperationError("Membership status can't change from {current} to {new}".format(
current=current_status, new=new_status))
me.status = new_status
if new_status == STATUS_APPROVED:
# Preserve original approve time (cancel dissociation)
if not me.approved:
me.approved = datetime.now()
me.dissociation_requested = None
elif new_status == STATUS_DIS_REQUESTED:
me.dissociation_requested = datetime.now()
elif new_status == STATUS_DISASSOCIATED:
me.dissociated = datetime.now()
me.cancel_outstanding_bills()
elif new_status == STATUS_DELETED:
me.person = None
me.billing_contact = None
me.tech_contact = None
me.organization = None
me.municipality = ''
me.birth_year = None
me.organization_registration_number = ''
me.save()
self.refresh_from_db()
def preapprove(self, user):
assert user is not None
self._change_status(new_status=STATUS_PREAPPROVED)
log_change(self, user, change_message="Preapproved")
ret_items = send_preapprove_email.send_robust(self.__class__, instance=self, user=user)
for item in ret_items:
sender, error = item
if error is not None:
raise error
logger.info("Membership {membership} preapproved.".format(membership=self))
def approve(self, user):
assert user is not None
self._change_status(new_status=STATUS_APPROVED)
log_change(self, user, change_message="Approved")
def request_dissociation(self, user):
assert user is not None
self._change_status(new_status='S')
log_change(self, user, change_message="Dissociation requested")
def cancel_dissociation_request(self, user):
assert user is not None
if not self.approved:
raise MembershipOperationError("Can't cancel dissociation request unless approved as member")
self._change_status(new_status=STATUS_APPROVED)
log_change(self, user, change_message="Dissociation request state reverted")
def dissociate(self, user):
assert user is not None
self._change_status(new_status=STATUS_DISASSOCIATED)
log_change(self, user, change_message="Dissociated")
def cancel_outstanding_bills(self):
try:
latest_billingcycle = self.billingcycle_set.latest('start')
if not latest_billingcycle.is_paid:
bill = latest_billingcycle.first_bill()
if not bill.is_reminder():
CancelledBill.objects.get_or_create(bill=bill)
logger.info("Created CancelledBill for Member #{member.pk} bill {bill.pk}".format(
bill=bill, member=bill.billingcycle.membership))
except ObjectDoesNotExist:
return # No billing cycle, no need to cancel bills
@transaction.atomic
def delete_membership(self, user):
assert user is not None
me = Membership.objects.select_for_update().filter(pk=self.pk)[0]
if me.status == STATUS_DELETED:
raise MembershipAlreadyStatus("Membership already deleted")
elif me.status == STATUS_NEW:
# must be imported here due to cyclic imports
from services.models import Service
logger.info("Deleting services of the membership application %s." % repr(self))
for service in Service.objects.filter(owner=self):
service.delete()
logger.info("Deleting aliases of the membership application %s." % repr(self))
for alias in self.alias_set.all():
alias.delete()
else:
logger.info("Not deleting services of membership %s." % repr(self))
logger.info("Expiring aliases of membership %s." % repr(self))
for alias in self.alias_set.all():
alias.expire()
contacts = [self.person, self.billing_contact, self.tech_contact,
self.organization]
self._change_status(new_status=STATUS_DELETED)
for contact in contacts:
if contact is not None:
contact.delete_if_no_references(user)
log_change(self, user, change_message="Deleted")
def duplicates(self):
"""
Finds duplicates of memberships, looks for similar names, emails, phone
numbers and contact details. Returns a QuerySet object that doesn't
include the membership of which duplicates are search for itself.
"""
matches = Membership.objects.none()
if self.person and not self.organization:
# Matches by first or last name
matches |= Membership.objects.filter(
person__first_name__icontains=self.person.first_name.strip(),
person__last_name__icontains=self.person.last_name.strip())
# Matches by email address
matches |= Membership.objects.filter(
person__email__contains=self.person.email.strip())
# Matches by phone or SMS number
phone_number = self.person.phone.strip()
sms_number = self.person.sms.strip()
if phone_number:
matches |= Membership.objects.filter(person__phone__icontains=phone_number)
if sms_number:
matches |= Membership.objects.filter(person__sms__icontains=sms_number)
elif self.organization and not self.person:
organization_name = self.organization.organization_name.strip()
matches = Membership.objects.filter(
organization__organization_name__icontains=organization_name)
return matches.exclude(id=self.id)
@classmethod
def search(cls, query):
person_contacts = Contact.objects
org_contacts = Contact.objects
# Split into words and remove duplicates
words = set(query.split(" "))
# Each word narrows the search further
for word in words:
# Exact match for membership id (for Django admin)
if word.startswith('#'):
try:
mid = int(word[1:])
person_contacts = person_contacts.filter(person_set__id=mid)
org_contacts = org_contacts.filter(organization_set__id=mid)
continue
except ValueError:
pass # Continue processing normal search
# Exact word match when word is "word"
if word.startswith('"') and word.endswith('"'):
word = word[1:-1]
# Search query for people
f_q = Q(first_name__iexact=word)
l_q = Q(last_name__iexact=word)
g_q = Q(given_names__iexact=word)
person_contacts = person_contacts.filter(f_q | l_q | g_q)
# Search for organizations
o_q = Q(organization_name__iexact=word)
org_contacts = org_contacts.filter(o_q)
else:
# Common search parameters
email_q = Q(email__icontains=word)
phone_q = Q(phone__icontains=word)
sms_q = Q(sms__icontains=word)
common_q = email_q | phone_q | sms_q
# Search query for people
f_q = Q(first_name__icontains=word)
l_q = Q(last_name__icontains=word)
g_q = Q(given_names__icontains=word)
person_contacts = person_contacts.filter(f_q | l_q | g_q | common_q)
# Search for organizations
o_q = Q(organization_name__icontains=word)
org_contacts = org_contacts.filter(o_q | common_q)
# Finally combine matches; all membership for which there are matching
# contacts or aliases
person_q = Q(person__in=person_contacts)
org_q = Q(organization__in=org_contacts)
alias_q = Q(alias__name__in=words)
qs = Membership.objects.filter(person_q | org_q | alias_q).distinct()
qs = qs.order_by("organization__organization_name",
"person__last_name",
"person__first_name")
return qs
@classmethod
def paper_reminder_sent_unpaid_after(cls, days=14):
unpaid_filter = Q(billingcycle__is_paid=False)
type_filter = Q(type=BILL_PAPER)
date_filter = Q(due_date__lt=datetime.now() - timedelta(days=days))
not_deleted_filter = Q(billingcycle__membership__status__exact=STATUS_APPROVED)
bill_qs = Bill.objects.filter(unpaid_filter, type_filter, date_filter,
not_deleted_filter)
membership_ids = set()
for bill in bill_qs:
membership_ids.add(bill.billingcycle.membership.id)
return Membership.objects.filter(id__in=membership_ids)
def __repr__(self):
return "<Membership(%s): %s (%i)>" % (self.type, str(self), self.id)
def __str__(self):
if self.organization:
return str(self.organization)
else:
if self.person:
return str(self.person)
else:
return "#%d" % self.id
class Fee(models.Model):
type = models.CharField(max_length=1, choices=MEMBER_TYPES, verbose_name=_('Fee type'))
start = models.DateTimeField(_('Valid from date'))
sum = models.DecimalField(_('Sum'), max_digits=6, decimal_places=2)
vat_percentage = models.IntegerField(_('VAT percentage'))
def __str__(self):
return "Fee for %s, %s euros, %s%% VAT, %s--" % \
(self.get_type_display(), str(self.sum), str(self.vat_percentage), str(self.start))
class BillingCycleManager(models.Manager):
def get_query_set(self):
return BillingCycleQuerySet(self.model)
class BillingCycleQuerySet(QuerySet):
def sort(self, sortkey):
sortkey = sortkey.strip()
reverse = False
if sortkey == "name":
return self.order_by("membership__person__first_name",
"membership__organization__organization_name")
elif sortkey == "-name":
return self.order_by("membership__person__first_name",
"memership__organization__organization_name").reverse()
elif sortkey == "last_name":
return self.order_by("membership__person__last_name",
"membership__organization__organization_name")
elif sortkey == "-last_name":
return self.order_by("membership__person__last_name",
"membership__organization__organization_name"
).reverse()
elif sortkey == "reminder_count":
return self.annotate(reminder_sum=Sum('bill__reminder_count')
).order_by('reminder_sum')
elif sortkey == "-reminder_count":
return self.annotate(reminder_sum=Sum('bill__reminder_count')
).order_by('reminder_sum').reverse()
return self.order_by(sortkey)
class BillingCycle(models.Model):
class Meta:
permissions = (
("read_bills", "Can read billing details"),
("manage_bills", "Can manage billing"),
)
membership = models.ForeignKey('Membership', verbose_name=_('Membership'), on_delete=models.PROTECT)
start = models.DateTimeField(default=django.utils.timezone.now, verbose_name=_('Start'))
end = models.DateTimeField(verbose_name=_('End'))
sum = models.DecimalField(_('Sum'), max_digits=6, decimal_places=2) # This limits sum to 9999,99
is_paid = models.BooleanField(default=False, verbose_name=_('Is paid'))
# NOT an integer since it can begin with 0 XXX: format
reference_number = models.CharField(max_length=64, verbose_name=_('Reference number'))
logs = property(_get_logs)
objects = BillingCycleManager()
def first_bill_sent_on(self):
try:
first_sent_date = self.bill_set.order_by('created')[0].created
return first_sent_date
except IndexError:
# No bills sent yet
return None
def last_bill(self):
try:
return self.bill_set.latest("due_date")
except ObjectDoesNotExist:
return None
def first_bill(self):
try:
return self.bill_set.order_by('due_date')[0]
except IndexError:
return None
def is_first_bill_late(self):
if self.is_paid:
return False
try:
first_due_date = self.bill_set.order_by('due_date')[0].due_date
except IndexError:
# No bills sent yet
return False
if datetime.now() > first_due_date:
return True
return False
def is_last_bill_late(self):
if self.is_paid or self.last_bill() is None:
return False
if datetime.now() > self.last_bill().due_date:
return True
return False
def amount_paid(self):
data = self.payment_set.aggregate(Sum('amount'))['amount__sum']
if data is None:
data = Decimal('0')
return data
def update_is_paid(self, user=None):
was_paid = self.is_paid
total_paid = self.amount_paid()
if not was_paid and total_paid >= self.sum:
self.is_paid = True
self.save()
logger.info("BillingCycle %s marked as paid, total paid: %.2f." % (
repr(self), total_paid))
elif was_paid and total_paid < self.sum:
self.is_paid = False
self.save()
logger.info("BillingCycle %s marked as unpaid, total paid: %.2f." % (
repr(self), total_paid))
if user:
log_change(self, user, change_message="Marked as paid")
def get_fee(self):
for_this_type = Q(type=self.membership.type)
not_before_start = Q(start__lte=self.start)
fees = Fee.objects.filter(for_this_type, not_before_start)
valid_fee = fees.latest('start').sum
return valid_fee
def get_vat_percentage(self):
for_this_type = Q(type=self.membership.type)
not_before_start = Q(start__lte=self.start)
fees = Fee.objects.filter(for_this_type, not_before_start)
vat_percentage = fees.latest('start').vat_percentage
return vat_percentage
def is_cancelled(self):
first_bill = self.first_bill()
if first_bill:
return first_bill.is_cancelled()
return False
def get_rf_reference_number(self):
"""
Get reference number in international RFXX format.
For example 218012 is formatted as RF28218012 where 28 is checksum
:return: RF formatted reference number
"""
# Magic 2715 is "RF" in number encoded format and
# zeros are placeholders for modulus calculation.
reference_number_int = int(''.join(self.reference_number.split()) + '271500')
modulo = reference_number_int % 97
return "RF%02d%s" % (98 - modulo, reference_number_int)
@classmethod
def get_reminder_billingcycles(cls, memberid=None):
"""
Get queryset for BillingCycles with missing payments and witch have 2 or more bills already sent.
:param memberid:
:return:
"""
if not settings.ENABLE_REMINDERS:
return cls.objects.none()
qs = cls.objects
# Single membership case
if memberid:
logger.info('memberid: %s' % memberid)
qs = qs.filter(membership__id=memberid)
qs = qs.exclude(bill__type=BILL_PAPER)
return qs
# For all memberships in Approved state
qs = qs.annotate(bills=Count('bill'))
qs = qs.filter(bills__gt=2,
is_paid__exact=False,
membership__status=STATUS_APPROVED,
membership__id__gt=-1)
qs = qs.exclude(bill__type=BILL_PAPER)
qs = qs.order_by('start')
return qs
@classmethod
def get_pdf_reminders(cls, memberid=None):
buffer = BytesIO()
cycles = cls.create_paper_reminder_list(memberid)
if len(cycles) == 0:
return None
create_reminder_pdf(cycles, buffer, payments=Payment)
pdf_content = buffer.getvalue()
buffer.close()
return pdf_content
@classmethod
def create_paper_reminder_list(cls, memberid=None):
"""
Create list of BillingCycles with missing payments and which already don't have paper bill.
:param memberid: optional member id
:return: list of billingcycles
"""
datalist = []
for cycle in cls.get_reminder_billingcycles(memberid).all():
# check if paper reminder already sent
cont = False
for bill in cycle.bill_set.all():
if bill.type == BILL_PAPER:
cont = True
break
if cont:
continue
datalist.append(cycle)
return datalist
def end_date(self):
"""Logical end date
This is one day before actual end since actual end is a timestamp.
The end date is the previous day.
E.g. 2015-01-01 -- 2015-12-31
"""
day = timedelta(days=1)
return self.end.date()-day
def __str__(self):
return str(self.start.date()) + "--" + str(self.end_date())
def save(self, *args, **kwargs):
if not self.end:
self.end = self.start + timedelta(days=365)
if (self.end.day != self.start.day):
# Leap day
self.end += timedelta(days=1)
if not self.reference_number:
self.reference_number = generate_membership_bill_reference_number(self.membership.id, self.start.year)
if not self.sum:
self.sum = self.get_fee()
super(BillingCycle, self).save(*args, **kwargs)
cache_storage = FileSystemStorage(location=settings.CACHE_DIRECTORY)
class CancelledBill(models.Model):
"""List of bills that have been cancelled"""
bill = models.OneToOneField('Bill', verbose_name=_('Original bill'), on_delete=models.PROTECT)
created = models.DateTimeField(auto_now_add=True, verbose_name=_('Created'))
exported = models.BooleanField(default=False)
logs = property(_get_logs)
def save(self, *args, **kwargs):
if self.bill.is_reminder():
raise ValueError("Can not cancel reminder bills")
super(CancelledBill, self).save(*args, **kwargs)
class Bill(models.Model):
billingcycle = models.ForeignKey(BillingCycle, verbose_name=_('Cycle'), on_delete=models.PROTECT)
reminder_count = models.IntegerField(default=0, verbose_name=_('Reminder count'))
due_date = models.DateTimeField(verbose_name=_('Due date'))
created = models.DateTimeField(auto_now_add=True, verbose_name=_('Created'))
last_changed = models.DateTimeField(auto_now=True, verbose_name=_('Last changed'))
pdf_file = models.FileField(upload_to="bill_pdfs", storage=cache_storage, null=True)
type = models.CharField(max_length=1, choices=BILL_TYPES, blank=False, null=False, verbose_name=_('Bill type'), default='E')
logs = property(_get_logs)
def is_due(self):
return self.due_date < datetime.now()
def __str__(self):
return '{sent_on} {date}'.format(sent_on=_('Sent on'), date=str(self.created))
def save(self, *args, **kwargs):
if not self.due_date:
self.due_date = datetime.now() + timedelta(days=settings.BILL_DAYS_TO_DUE)
# Second is from reminder_count so that tests can assume due_date
# is monotonically increasing
self.due_date = self.due_date.replace(hour=23, minute=59, second=self.reminder_count % 60)
super(Bill, self).save(*args, **kwargs)
def is_reminder(self):
return self.reminder_count > 0
def is_cancelled(self):
try:
if self.cancelledbill is not None:
return True
except CancelledBill.DoesNotExist:
pass
return False
# FIXME: different template based on class? should this code be here?
def render_as_text(self):
"""
Renders the object as text suitable for sending as e-mail.
"""
membership = self.billingcycle.membership
vat = Decimal(self.billingcycle.get_vat_percentage()) / Decimal(100)
if not self.is_reminder():
non_vat_amount = (self.billingcycle.sum / (Decimal(1) + vat))
return render_to_string('membership/bill.txt', {
'membership_type' : MEMBER_TYPES_DICT[membership.type],
'membership_type_raw' : membership.type,
'bill_id': self.id,
'member_id': membership.id,
'member_name': membership.name(),
'billing_contact': membership.billing_contact,
'billing_name': str(membership.get_billing_contact()),
'street_address': membership.get_billing_contact().street_address,
'postal_code': membership.get_billing_contact().postal_code,
'post_office': membership.get_billing_contact().post_office,
'country': membership.get_billing_contact().country,
'billingcycle': self.billingcycle,
'iban_account_number': settings.IBAN_ACCOUNT_NUMBER,
'bic_code': settings.BIC_CODE,
'due_date': self.due_date,
'today': datetime.now(),
'reference_number': group_right(self.billingcycle.reference_number),
'sum': self.billingcycle.sum,
'vat_amount': vat * non_vat_amount,
'non_vat_amount': non_vat_amount,
'vat_percentage': self.billingcycle.get_vat_percentage(),
'barcode': barcode_4(iban = settings.IBAN_ACCOUNT_NUMBER,
refnum = self.billingcycle.reference_number,
duedate = self.due_date,
euros = self.billingcycle.sum)
})
else:
amount_paid = self.billingcycle.amount_paid()
sum = self.billingcycle.sum - amount_paid
non_vat_amount = sum / (Decimal(1) + vat)
return render_to_string('membership/reminder.txt', {
'membership_type' : MEMBER_TYPES_DICT[membership.type],
'membership_type_raw' : membership.type,
'bill_id': self.id,
'member_id': membership.id,
'member_name': membership.name(),
'billing_contact': membership.billing_contact,
'billing_name': str(membership.get_billing_contact()),
'street_address': membership.get_billing_contact().street_address,
'postal_code': membership.get_billing_contact().postal_code,
'post_office': membership.get_billing_contact().post_office,
'municipality': membership.municipality,
'billing_email': membership.get_billing_contact().email,
'email': membership.primary_contact().email,
'billingcycle': self.billingcycle,
'iban_account_number': settings.IBAN_ACCOUNT_NUMBER,
'bic_code': settings.BIC_CODE,
'today': datetime.now(),
'latest_recorded_payment': Payment.latest_payment_date(),
'reference_number': group_right(self.billingcycle.reference_number),
'original_sum': self.billingcycle.sum,
'amount_paid': amount_paid,
'sum': sum,
'vat_amount': vat * non_vat_amount,
'non_vat_amount': non_vat_amount,
'vat_percentage': self.billingcycle.get_vat_percentage(),
'barcode': barcode_4(iban = settings.IBAN_ACCOUNT_NUMBER,
refnum = self.billingcycle.reference_number,
duedate = None,
euros = sum)
})
def generate_pdf(self):
"""
Generate pdf and return pdf content
"""
return get_bill_pdf(self, payments=Payment)
# FIXME: Should save sending date
def send_as_email(self):
membership = self.billingcycle.membership
if self.billingcycle.sum > 0:
ret_items = send_as_email.send_robust(self.__class__, instance=self)
for item in ret_items:
sender, error = item
if error != None:
logger.error("%s" % traceback.format_exc())
logger.exception("Error while sending email")
raise error
else:
self.billingcycle.is_paid = True
logger.info('Bill not sent: membership fee zero for %s: %s' % (
membership.email, repr(Bill)))
self.billingcycle.save()
def bill_subject(self):
if not self.is_reminder():
subject = settings.BILL_SUBJECT
else:
subject = settings.REMINDER_SUBJECT
return subject.format(id=self.id)
def reference_number(self):
return self.billingcycle.reference_number
class Payment(models.Model):
class Meta:
permissions = (
("can_import_payments", "Can import payment data"),
)
"""
Payment object for billing
"""
# While Payment refers to BillingCycle, the architecture scales to support
# recording payments that are not related to any billingcycle for future
# extension
billingcycle = models.ForeignKey('BillingCycle', verbose_name=_('Cycle'), null=True, on_delete=models.PROTECT)
ignore = models.BooleanField(default=False, verbose_name=_('Ignored payment'))
comment = models.CharField(max_length=64, verbose_name=_('Comment'), blank=True)
reference_number = models.CharField(max_length=64, verbose_name=_('Reference number'), blank=True)
message = models.CharField(max_length=256, verbose_name=_('Message'), blank=True)
transaction_id = models.CharField(max_length=30, verbose_name=_('Transaction id'), unique=True)
payment_day = models.DateTimeField(verbose_name=_('Payment day'))
# This limits sum to 9999999.99
amount = models.DecimalField(max_digits=9, decimal_places=2, verbose_name=_('Amount'))
type = models.CharField(max_length=64, verbose_name=_('Type'))
payer_name = models.CharField(max_length=64, verbose_name=_('Payer name'))
duplicate = models.BooleanField(verbose_name=_('Duplicate payment'), blank=False, null=False, default=False)
logs = property(_get_logs)
def __str__(self):
return "%.2f euros (reference '%s', date '%s')" % (self.amount, self.reference_number, self.payment_day)
def attach_to_cycle(self, cycle, user=None):
if self.billingcycle:
raise PaymentAttachedError("Payment %s already attached to BillingCycle %s." % (repr(self), repr(cycle)))
self.billingcycle = cycle
self.ignore = False
self.save()
logger.info("Payment %s attached to member %s cycle %s." % (repr(self),
cycle.membership.id, repr(cycle)))
if user:
log_change(self, user, change_message="Attached to billing cycle")
cycle.update_is_paid(user=user)
def detach_from_cycle(self, user=None):
if not self.billingcycle:
return
cycle = self.billingcycle
logger.info("Payment %s detached from cycle %s." % (repr(self),
repr(cycle)))
self.billingcycle = None
self.save()
if user:
log_change(self, user, change_message="Detached from billing cycle")
cycle.update_is_paid()
def send_duplicate_payment_notice(self, user, **kwargs):
if not user:
raise Exception('send_duplicate_payment_notice user objects as parameter')
billingcycle = BillingCycle.objects.get(reference_number=self.reference_number)
if billingcycle.sum > 0:
ret_items = send_duplicate_payment_notice.send_robust(self.__class__, instance=self, user=user,
billingcycle=billingcycle)
for item in ret_items:
sender, error = item
if error is not None:
logger.error("%s" % traceback.format_exc())
raise error
log_change(self, user, change_message="Duplicate payment notice sent")
@classmethod
def latest_payment_date(cls):
try:
return Payment.objects.latest("payment_day").payment_day
except Payment.DoesNotExist:
return None
class ApplicationPoll(models.Model):
"""
Store statistics taken from membership application "where did you
hear about us" poll.
"""
membership = models.ForeignKey('Membership', verbose_name=_('Membership'), on_delete=models.PROTECT)
date = models.DateTimeField(auto_now=True, verbose_name=_('Timestamp'))
answer = models.CharField(max_length=512, verbose_name=_('Service specific data'))
models.signals.post_save.connect(logging_log_change, sender=Membership)
models.signals.post_save.connect(logging_log_change, sender=Contact)
models.signals.post_save.connect(logging_log_change, sender=BillingCycle)
models.signals.post_save.connect(logging_log_change, sender=Bill)
models.signals.post_save.connect(logging_log_change, sender=Fee)
models.signals.post_save.connect(logging_log_change, sender=Payment)
# These are registered here due to import madness and general clarity
send_as_email.connect(bill_sender, sender=Bill, dispatch_uid="email_bill")
send_preapprove_email.connect(preapprove_email_sender, sender=Membership,
dispatch_uid="preapprove_email")
send_duplicate_payment_notice.connect(duplicate_payment_sender, sender=Payment,
dispatch_uid="duplicate_payment_notice")
| 41.149767
| 128
| 0.628651
| 40,604
| 0.917895
| 0
| 0
| 6,739
| 0.152342
| 0
| 0
| 8,305
| 0.187743
|
830421c0eef174df1951cc79db82af6869f9e1bc
| 177
|
py
|
Python
|
napari_imc/io/__init__.py
|
neuromusic/napari-imc
|
ce2ff998b33b49f19a786585cc2cb8e59db74c24
|
[
"MIT"
] | 4
|
2021-01-29T15:11:37.000Z
|
2021-03-01T02:04:24.000Z
|
napari_imc/io/__init__.py
|
neuromusic/napari-imc
|
ce2ff998b33b49f19a786585cc2cb8e59db74c24
|
[
"MIT"
] | 25
|
2021-01-19T01:49:13.000Z
|
2022-02-09T10:46:41.000Z
|
napari_imc/io/__init__.py
|
neuromusic/napari-imc
|
ce2ff998b33b49f19a786585cc2cb8e59db74c24
|
[
"MIT"
] | 3
|
2021-01-29T17:31:05.000Z
|
2022-03-25T10:23:32.000Z
|
from .imaxt import ImaxtFileReader
from .mcd import McdFileReader
from .txt import TxtFileReader
__all__ = [
'ImaxtFileReader',
'McdFileReader',
'TxtFileReader',
]
| 17.7
| 34
| 0.734463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.265537
|
83043d6bcc47235264f0457736e61baf87cbac95
| 2,449
|
py
|
Python
|
eval.py
|
ldzhangyx/TCN-for-beat-tracking
|
8e09ba5b2f222a4944a8bd039987a01240ae778d
|
[
"BSD-3-Clause"
] | 3
|
2021-03-22T01:59:52.000Z
|
2022-01-22T11:08:56.000Z
|
eval.py
|
ldzhangyx/TCN-for-beat-tracking
|
8e09ba5b2f222a4944a8bd039987a01240ae778d
|
[
"BSD-3-Clause"
] | 1
|
2021-06-21T19:14:35.000Z
|
2021-06-21T19:14:35.000Z
|
eval.py
|
ldzhangyx/TCN-for-beat-tracking
|
8e09ba5b2f222a4944a8bd039987a01240ae778d
|
[
"BSD-3-Clause"
] | 1
|
2021-03-22T01:59:57.000Z
|
2021-03-22T01:59:57.000Z
|
import torch
from torch.utils.data import Dataset
import numpy as np
import os
import pickle
from madmom.features import DBNBeatTrackingProcessor
import torch
from model import BeatTrackingNet
from utils import init_single_spec
from mir_eval.beat import evaluate
from data import BallroomDataset
from beat_tracker import predict_beats_from_spectrogram
import yaml
import sys
import pdb
# import config
with open('config.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
def evaluate_model(
model_checkpoint,
spectrogram,
ground_truth):
"""
Given a model checkpoint, a single spectrogram, and the corresponding
ground truth, evaluate the model's performance on all beat tracking metrics
offered by mir_eval.beat.
"""
prediction = predict_beats_from_spectrogram(
spectrogram,
model_checkpoint)
scores = evaluate(ground_truth, prediction)
return scores
def evaluate_model_on_dataset(
model_checkpoint,
dataset,
ground_truths):
"""
Run through a whole instance of torch.utils.data.Dataset and compare the
model's predictions to the given ground truths.
"""
# Create dicts to store scores and histories
mean_scores = {}
running_scores = {}
# Iterate over dataset
for i in range(len(dataset)):
spectrogram = dataset[i]["spectrogram"].unsqueeze(0)
ground_truth = ground_truths[i]
scores = evaluate_model(
model_checkpoint,
spectrogram,
ground_truth)
beat_scores = scores
for metric in beat_scores:
if metric not in running_scores:
running_scores[metric] = 0.0
running_scores[metric] += beat_scores[metric]
# Each iteration, pass our current index and our running score total
# to a print callback function.
print(f"{i}, {str(running_scores)}")
# After all iterations, calculate mean scores.
for metric in running_scores:
mean_scores[metric] = running_scores[metric] / (i + 1)
# Return a dictionary of helpful information
return {
"total_examples": i + 1,
"scores": mean_scores
}
dataset = BallroomDataset()
ground_truths = (dataset.get_ground_truth(i) for i in range(len(dataset)))
# Run evaluation
evaluate_model_on_dataset(config['default_checkpoint_path'],
dataset,
ground_truths)
| 25.510417
| 79
| 0.685178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 728
| 0.297264
|
830448984e5a77e90d22cacc683d54197d1adc44
| 130,468
|
py
|
Python
|
pycity_calc/cities/scripts/city_generator/city_generator.py
|
RWTH-EBC/pyCity_calc
|
99fd0dab7f9a9030fd84ba4715753364662927ec
|
[
"MIT"
] | 4
|
2020-06-22T14:14:25.000Z
|
2021-11-08T11:47:01.000Z
|
pycity_calc/cities/scripts/city_generator/city_generator.py
|
RWTH-EBC/pyCity_calc
|
99fd0dab7f9a9030fd84ba4715753364662927ec
|
[
"MIT"
] | 4
|
2019-08-28T19:42:28.000Z
|
2019-08-28T19:43:44.000Z
|
pycity_calc/cities/scripts/city_generator/city_generator.py
|
RWTH-EBC/pyCity_calc
|
99fd0dab7f9a9030fd84ba4715753364662927ec
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Script to generate city object.
"""
from __future__ import division
import os
import numpy as np
import pickle
import warnings
import random
import datetime
import shapely.geometry.point as point
import pycity_base.classes.Weather as weath
import pycity_base.classes.demand.SpaceHeating as SpaceHeating
import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand
import pycity_base.classes.demand.Apartment as Apartment
import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater
import pycity_base.classes.demand.Occupancy as occup
import pycity_calc.environments.timer as time
# import pycity_calc.environments.market as price
import pycity_calc.environments.germanmarket as germanmarket
import pycity_calc.environments.environment as env
import pycity_calc.environments.co2emissions as co2
import pycity_calc.buildings.building as build_ex
import pycity_calc.cities.city as city
import pycity_calc.visualization.city_visual as citvis
import pycity_calc.toolbox.modifiers.slp_th_manipulator as slpman
import pycity_calc.toolbox.teaser_usage.teaser_use as tusage
import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc
try:
import teaser.logic.simulation.VDI_6007.weather as vdiweather
except: # pragma: no cover
msg = 'Could not import teaser.logic.simulation.VDI_6007.weather. ' \
'If you need to use it, install ' \
'it via pip "pip install TEASER". Alternatively, you might have ' \
'run into trouble with XML bindings in TEASER. This can happen ' \
'if you try to re-import TEASER within an active Python console.' \
'Please close the active Python console and open another one. Then' \
' try again. You might also be on the wrong TEASER branch ' \
'(without VDI 6007 core).'
warnings.warn(msg)
def load_data_file_with_spec_demand_data(filename):
"""
Function loads and returns data from
.../src/data/BaseData/Specific_Demand_Data/filename.
Filename should hold float (or int) values.
Other values (e.g. strings) will be loaded as 'nan'.
Parameter
---------
filename : str
String with name of file, e.g. 'district_data.txt'
Returns
-------
dataset : numpy array
Numpy array with data
"""
src_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname
(
os.path.abspath(
__file__)))))
input_data_path = os.path.join(src_path, 'data', 'BaseData',
'Specific_Demand_Data', filename)
dataset = np.genfromtxt(input_data_path, delimiter='\t', skip_header=1)
return dataset
def convert_th_slp_int_and_str(th_slp_int):
"""
Converts thermal slp type integer into string
Parameters
----------
th_slp_int : int
SLP type integer number
Returns
-------
th_slp_tag : str
SLP type string
Annotations
-----------
- `HEF` : Single family household
- `HMF` : Multi family household
- `GBA` : Bakeries
- `GBD` : Other services
- `GBH` : Accomodations
- `GGA` : Restaurants
- `GGB` : Gardening
- `GHA` : Retailers
- `GHD` : Summed load profile business, trade and services
- `GKO` : Banks, insurances, public institutions
- `GMF` : Household similar businesses
- `GMK` : Automotive
- `GPD` : Paper and printing
- `GWA` : Laundries
"""
if th_slp_int is None:
msg = 'th_slp_int is None. Going to return None.'
warnings.warn(msg)
return None
slp_th_profile_dict_tag = {0: 'HEF',
1: 'HMF',
2: 'GMF',
3: 'GMK',
4: 'GPD',
5: 'GHA',
6: 'GBD',
7: 'GKO',
8: 'GBH',
9: 'GGA',
10: 'GBA',
11: 'GWA',
12: 'GGB',
13: 'GHD'}
th_slp_tag = slp_th_profile_dict_tag[th_slp_int]
return th_slp_tag
def convert_el_slp_int_and_str(el_slp_int):
"""
Converts el slp type integer into string
Parameters
----------
el_slp_int : int
SLP type integer number
Returns
-------
el_slp_tag : str
SLP type string
Annotations
-----------
# 0: H0 : Residential
# 1: G0 : Commercial
# 2: G1 : Commercial Mo-Sa 08:00 to 18:00
# 3: G2 : Commercial, mainly evening hours
# 4: G3 : Commercial 24 hours
# 5: G4 : Shop / hairdresser
# 6: G5 : Backery
# 7: G6 : Commercial, weekend
# 8: L0 : Farm
# 9: L1 : Farm, mainly cattle and milk
# 10: L2 : Other farming
"""
if el_slp_int is None:
msg = 'el_slp_int is None. Going to return None.'
warnings.warn(msg)
return None
slp_el_profile_dict_tag = {0: 'H0',
1: 'G0',
2: 'G1',
3: 'G2',
4: 'G3',
5: 'G4',
6: 'G5',
7: 'G6',
8: 'L0',
9: 'L1',
10: 'L2'}
el_slp_tag = slp_el_profile_dict_tag[el_slp_int]
return el_slp_tag
def convert_method_3_nb_into_str(method_3_nb):
"""
Converts method_3_nb into string
Parameters
----------
method_3_nb : int
Number of method 3
Returns
-------
method_3_str : str
String of method 3
"""
if method_3_nb is None:
msg = 'method_3_nb is None. Going to return None.'
warnings.warn(msg)
return None
dict_method_3 = {0: 'food_pro',
1: 'metal',
2: 'rest',
3: 'sports',
4: 'repair'}
method_3_str = dict_method_3[method_3_nb]
return method_3_str
def convert_method_4_nb_into_str(method_4_nb):
"""
Converts method_4_nb into string
Parameters
----------
method_4_nb : int
Number of method 4
Returns
-------
method_4_str : str
String of method 4
"""
if method_4_nb is None:
msg = 'method_4_nb is None. Going to return None.'
warnings.warn(msg)
return None
dict_method_4 = {0: 'metal_1', 1: 'metal_2', 2: 'warehouse'}
method_4_str = dict_method_4[method_4_nb]
return method_4_str
def conv_build_type_nb_to_name(build_type):
"""
Convert build_type number to name / explanation
Parameters
----------
build_type : int
Building type number, based on Spec_demands_non_res.txt
Returns
-------
build_name : str
Building name / explanation
"""
if build_type is None:
msg = 'build_type is None. Going to return None for build_name.'
warnings.warn(msg)
return None
dict_b_name = {
0: 'Residential',
1: 'Office (simulation)',
2: 'Main construction work',
3: 'Finishing trade construction work',
4: 'Bank and insurance',
5: 'Public institution',
6: 'Non profit organization',
7: 'Small office buildings',
8: 'Other services',
9: 'Metal',
10: 'Automobile',
11: 'Wood and timber',
12: 'Paper',
13: 'Small retailer for food',
14: 'Small retailer for non-food',
15: 'Large retailer for food',
16: 'Large retailer for non-food',
17: 'Primary school',
18: 'School for physically handicapped',
19: 'High school',
20: 'Trade school',
21: 'University',
22: 'Hotel',
23: 'Restaurant',
24: 'Childrens home',
25: 'Backery',
26: 'Butcher',
27: 'Laundry',
28: 'Farm primary agriculture ',
29: 'Farm with 10 - 49 cattle units',
30: 'Farm with 50 - 100 cattle units',
31: 'Farm with more than 100 cattle units',
32: 'Gardening',
33: 'Hospital',
34: 'Library',
35: 'Prison',
36: 'Cinema',
37: 'Theater',
38: 'Parish hall',
39: 'Sports hall',
40: 'Multi purpose hall',
41: 'Swimming hall',
42: 'Club house',
43: 'Fitness studio',
44: 'Train station smaller 5000m2',
45: 'Train station equal to or larger than 5000m2'
}
return dict_b_name[build_type]
def constrained_sum_sample_pos(n, total):
"""
Return a randomly chosen list of n positive integers summing to total.
Each such list is equally likely to occur.
Parameters
----------
n : int
Number of chosen integers
total : int
Sum of all entries of result list
Returns
-------
results_list : list (of int)
List with result integers, which sum up to value 'total'
"""
dividers = sorted(random.sample(range(1, int(total)), int(n - 1)))
list_occ = [a - b for a, b in zip(dividers + [total], [0] + dividers)]
for i in range(len(list_occ)):
list_occ[i] = int(list_occ[i])
return list_occ
def redistribute_occ(occ_list):
"""
Redistribute occupants in occ_list, so that each apartment is having at
least 1 person and maximal 5 persons.
Parameters
----------
occ_list
Returns
-------
occ_list_new : list
List holding number of occupants per apartment
"""
occ_list_new = occ_list[:]
if sum(occ_list_new) / len(occ_list_new) > 5: # pragma: no cover
msg = 'Average number of occupants per apartment is higher than 5.' \
' This is not valid for usage of Richardson profile generator.'
raise AssertionError(msg)
# Number of occupants to be redistributed
nb_occ_redist = 0
# Find remaining occupants
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] > 5:
# Add remaining occupants to nb_occ_redist
nb_occ_redist += occ_list_new[i] - 5
# Set occ_list_new entry to 5 persons
occ_list_new[i] = 5
if nb_occ_redist == 0:
# Return original list
return occ_list_new
# Identify empty apartments and add single occupant
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] == 0:
# Add single occupant
occ_list_new[i] = 1
# Remove occupant from nb_occ_redist
nb_occ_redist -= 1
if nb_occ_redist == 0:
# Return original list
return occ_list_new
# Redistribute remaining occupants
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] < 5:
# Fill occupants up with remaining occupants
for j in range(5 - occ_list_new[i]):
# Add single occupant
occ_list_new[i] += 1
# Remove single occupant from remaining sum
nb_occ_redist -= 1
if nb_occ_redist == 0:
# Return original list
return occ_list_new
if nb_occ_redist: # pragma: no cover
raise AssertionError('Not all occupants could be distributed.'
'Check inputs and/or redistribute_occ() call.')
def generate_environment(timestep=3600,
year_timer=2017,
year_co2=2017,
try_path=None,
location=(51.529086, 6.944689),
altitude=55,
new_try=False):
"""
Returns environment object. Total number of timesteps is automatically
generated for one year.
Parameters
----------
timestep : int
Timestep in seconds
year_timer : int, optional
Chosen year of analysis (default: 2010)
(influences initial day for profile generation)
year_co2 : int, optional
Chose year with specific emission factors (default: 2017)
try_path : str, optional
Path to TRY weather file (default: None)
If set to None, uses default weather TRY file (2010, region 5)
location : Tuple, optional
(latitude , longitude) of the simulated system's position,
(default: (51.529086, 6.944689) for Bottrop, Germany.
altitude : float, optional
Altitute of location in m (default: 55 - City of Bottrop)
new_try : bool, optional
Defines, if TRY dataset have been generated after 2017 (default: False)
If False, assumes that TRY dataset has been generated before 2017.
If True, assumes that TRY dataset has been generated after 2017 and
belongs to the new TRY classes. This is important for extracting
the correct values from the TRY dataset!
Returns
-------
environment : object
Environment object
"""
# Create environment
timer = time.TimerExtended(timestep=timestep, year=year_timer)
weather = weath.Weather(timer, useTRY=True, pathTRY=try_path,
location=location, altitude=altitude,
new_try=new_try)
market = germanmarket.GermanMarket()
co2em = co2.Emissions(year=year_co2)
environment = env.EnvironmentExtended(timer=timer,
weather=weather,
prices=market,
location=location,
co2em=co2em)
return environment
def generate_res_building_single_zone(environment, net_floor_area,
spec_th_demand,
th_gen_method,
el_gen_method,
annual_el_demand=None,
el_random=False,
use_dhw=False,
dhw_method=1, number_occupants=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
height_of_floors=None, nb_of_floors=None,
neighbour_buildings=None,
residential_layout=None, attic=None,
cellar=None, construction_type=None,
dormer=None, dhw_volumen=None,
do_normalization=True,
slp_manipulate=True,
curr_central_ahu=None,
dhw_random=False, prev_heat_dev=True,
season_mod=None):
"""
Function generates and returns extended residential building object
with single zone.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
el_gen_method : int, optional
Electrical generation method (default: 1)
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building)
annual_el_demand : float, optional
Annual electrical energy demand in kWh/a (default: None)
el_random : bool, optional
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value. (default: False)
use_dhw : bool, optional
Boolean to define, if domestic hot water profile should be generated
(default: False)
True - Generate dhw profile
dhw_method : int, optional
Domestic hot water profile generation method (default: 1)
1 - Use Annex 42 profile
2 - Use stochastic profile
number_occupants : int, optional
Number of occupants (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
height_of_floors : float
average height of single floor
nb_of_floors : int
Number of floors above the ground
neighbour_buildings : int
neighbour (default = 0)
0: no neighbour
1: one neighbour
2: two neighbours
residential_layout : int
type of floor plan (default = 0)
0: compact
1: elongated/complex
attic : int
type of attic (default = 0)
0: flat roof
1: non heated attic
2: partly heated attic
3: heated attic
cellar : int
type of cellar (default = 0)
0: no cellar
1: non heated cellar
2: partly heated cellar
3: heated cellar
construction_type : str
construction type (default = "heavy")
heavy: heavy construction
light: light construction
dormer : str
construction type
0: no dormer
1: dormer
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 °C. Rescales profile to
original demand value.
curr_central_ahu : bool, optional
Defines, if building has air handling unit (AHU)
(default: False)
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
Returns
-------
extended_building : object
BuildingExtended object
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
if annual_el_demand is not None:
assert annual_el_demand >= 0
else:
assert number_occupants is not None
assert number_occupants > 0
# Define SLP profiles for residential building with single zone
th_slp_type = 'HEF'
el_slp_type = 'H0'
if number_occupants is not None:
assert number_occupants > 0
assert number_occupants <= 5 # Max 5 occupants for stochastic profile
if el_gen_method == 2 or (dhw_method == 2 and use_dhw == True):
# Generate occupancy profile (necessary for stochastic, el. or
# dhw profile)
occupancy_object = occup.Occupancy(environment,
number_occupants=number_occupants)
else: # Generate occupancy object without profile generation
# Just used to store information about number of occupants
occupancy_object = occup.Occupancy(environment,
number_occupants=number_occupants,
do_profile=False)
else:
occupancy_object = None # Dummy object to prevent error with
# apartment usage
if el_gen_method == 2:
warnings.warn('Stochastic el. profile cannot be generated ' +
'due to missing number of occupants. ' +
'SLP is used instead.')
# Set el_gen_method to 1 (SLP)
el_gen_method = 1
elif dhw_method == 2:
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
if (number_occupants is None and dhw_method == 1 and use_dhw == True):
# Set number of occupants to 2 to enable dhw usage
number_occupants = 2
# Create space heating demand
if th_gen_method == 1:
# Use SLP
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
if slp_manipulate: # Do SLP manipulation
timestep = environment.timer.timeDiscretization
temp_array = environment.weather.tAmbient
mod_curve = \
slpman.slp_th_manipulator(timestep,
th_slp_curve=heat_power_curve.loadcurve,
temp_array=temp_array)
heat_power_curve.loadcurve = mod_curve
elif th_gen_method == 2:
# Use Modelica result profile
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=3,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
# Calculate el. energy demand for apartment, if no el. energy
# demand is given for whole building to rescale
if annual_el_demand is None:
# Generate annual_el_demand_ap
annual_el_demand = calc_el_dem_ap(nb_occ=number_occupants,
el_random=el_random,
type='sfh')
print('Annual electrical demand in kWh: ', annual_el_demand)
if number_occupants is not None:
print('El. demand per person in kWh: ')
print(annual_el_demand / number_occupants)
print()
# Create electrical power curve
if el_gen_method == 2:
if season_mod is not None:
season_light_mod = True
else:
season_light_mod = False
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=2,
total_nb_occupants=number_occupants,
randomizeAppliances=True,
lightConfiguration=0,
annualDemand=annual_el_demand,
occupancy=occupancy_object.occupancy,
do_normalization=do_normalization,
prev_heat_dev=prev_heat_dev,
season_light_mod=season_light_mod,
light_mod_fac=season_mod)
else: # Use el. SLP
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand,
profileType=el_slp_type)
# Create domestic hot water demand
if use_dhw:
if dhw_volumen is None or dhw_random:
dhw_kwh = calc_dhw_dem_ap(nb_occ=number_occupants,
dhw_random=dhw_random,
type='sfh')
# Reconvert kWh/a to Liters per day
dhw_vol_ap = dhw_kwh * 1000 * 3600 * 1000 / (955 * 4182 * 35 * 365)
# DHW volume per person and day
dhw_volumen = dhw_vol_ap / number_occupants
if dhw_method == 1: # Annex 42
dhw_power_curve = DomesticHotWater.DomesticHotWater(environment,
tFlow=60,
thermal=True,
method=1,
# Annex 42
dailyConsumption=dhw_volumen * number_occupants,
supplyTemperature=25)
else: # Stochastic profile
dhw_power_curve = DomesticHotWater.DomesticHotWater(environment,
tFlow=60,
thermal=True,
method=2,
supplyTemperature=25,
occupancy=occupancy_object.occupancy)
# Rescale to reference dhw volume (liters per person
# and day)
curr_dhw_vol_flow = dhw_power_curve.water
# Water volume flow in Liter/hour
curr_volume_year = sum(curr_dhw_vol_flow) * \
environment.timer.timeDiscretization / \
3600
curr_vol_day = curr_volume_year / 365
curr_vol_day_and_person = curr_vol_day / \
occupancy_object.number_occupants
print('Curr. volume per person and day: ',
curr_vol_day_and_person)
dhw_con_factor = dhw_volumen / curr_vol_day_and_person
print('Conv. factor of hot water: ', dhw_con_factor)
print('New volume per person and day: ',
curr_vol_day_and_person * dhw_con_factor)
# Normalize water flow and power load
dhw_power_curve.water *= dhw_con_factor
dhw_power_curve.loadcurve *= dhw_con_factor
# Create apartment
apartment = Apartment.Apartment(environment, occupancy=occupancy_object,
net_floor_area=net_floor_area)
# Add demands to apartment
if th_gen_method == 1 or th_gen_method == 2:
if use_dhw:
apartment.addMultipleEntities([heat_power_curve, el_power_curve,
dhw_power_curve])
else:
apartment.addMultipleEntities([heat_power_curve, el_power_curve])
else:
if use_dhw:
apartment.addMultipleEntities([el_power_curve,
dhw_power_curve])
else:
apartment.addEntity(el_power_curve)
# Create extended building object
extended_building = \
build_ex.BuildingExtended(environment,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
net_floor_area=net_floor_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
neighbour_buildings=neighbour_buildings,
residential_layout=residential_layout,
attic=attic,
cellar=cellar,
construction_type=construction_type,
dormer=dormer,
with_ahu=
curr_central_ahu)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def generate_res_building_multi_zone(environment,
net_floor_area,
spec_th_demand,
th_gen_method,
el_gen_method,
nb_of_apartments,
annual_el_demand=None,
el_random=False,
use_dhw=False,
dhw_method=1,
total_number_occupants=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
height_of_floors=None, nb_of_floors=None,
neighbour_buildings=None,
residential_layout=None, attic=None,
cellar=None, construction_type=None,
dormer=None, dhw_volumen=None,
do_normalization=True,
slp_manipulate=True,
curr_central_ahu=False,
dhw_random=False, prev_heat_dev=True,
season_mod=None):
"""
Function generates and returns extended residential building object
with multiple apartments. Occupants are randomly distributed over
number of apartments.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
annual_el_demand : float, optional
Annual electrical energy demand in kWh/a (default: None)
el_random : bool, optional
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value. (default: False)
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
el_gen_method : int, optional
Electrical generation method (default: 1)
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building)
nb_of_apartments : int
Number of apartments within building
use_dhw : bool, optional
Boolean to define, if domestic hot water profile should be generated
(default: False)
True - Generate dhw profile
dhw_method : int, optional
Domestic hot water profile generation method (default: 1)
1 - Use Annex 42 profile
2 - Use stochastic profile
total_number_occupants : int, optional
Total number of occupants in all apartments (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
height_of_floors : float
average height of the floors
nb_of_floors : int
Number of floors above the ground
neighbour_buildings : int
neighbour (default = 0)
0: no neighbour
1: one neighbour
2: two neighbours
residential_layout : int
type of floor plan (default = 0)
0: compact
1: elongated/complex
attic : int
type of attic (default = 0)
0: flat roof
1: non heated attic
2: partly heated attic
3: heated attic
cellar : int
type of cellar (default = 0)
0: no cellar
1: non heated cellar
2: partly heated cellar
3: heated cellar
construction_type : str
construction type (default = "heavy")
heavy: heavy construction
light: light construction
dormer : str
construction type
0: no dormer
1: dormer
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 °C. Rescales profile to
original demand value.
curr_central_ahu : bool, optional
Defines, if building has air handling unit (AHU)
(default: False)
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
Returns
-------
extended_building : object
BuildingExtended object
Annotation
----------
Raise assertion error when share of occupants per apartment is higher
than 5 (necessary for stochastic, el. profile generation)
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
if annual_el_demand is not None:
assert annual_el_demand >= 0
if total_number_occupants is not None:
assert total_number_occupants > 0
assert total_number_occupants / nb_of_apartments <= 5, (
'Number of occupants per apartment is ' +
'at least once higher than 5.')
# Distribute occupants to different apartments
occupancy_list = constrained_sum_sample_pos(n=nb_of_apartments,
total=total_number_occupants)
# While not all values are smaller or equal to 5, return run
# This while loop might lead to large runtimes for buildings with a
# large number of apartments (not finding a valid solution, see
# issue #147). Thus, we add a counter to exit the loop
count = 0
while all(i <= 5 for i in occupancy_list) is not True:
occupancy_list = constrained_sum_sample_pos(n=nb_of_apartments,
total=total_number_occupants)
if count == 100000:
# Take current occupancy_list and redistribute occupants
# manually until valid distribution is found
occupancy_list = redistribute_occ(occ_list=occupancy_list)
# Exit while loop
break
count += 1
print('Current list of occupants per apartment: ', occupancy_list)
else:
msg = 'Number of occupants is None for current building!'
warnings.warn(msg)
# Define SLP profiles for residential building with multiple zone
th_slp_type = 'HMF'
el_slp_type = 'H0'
# Create extended building object
extended_building = \
build_ex.BuildingExtended(environment,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
net_floor_area=net_floor_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
neighbour_buildings=
neighbour_buildings,
residential_layout=
residential_layout,
attic=attic,
cellar=cellar,
construction_type=
construction_type,
dormer=dormer,
with_ahu=curr_central_ahu)
if annual_el_demand is not None:
# Distribute el. demand equally to apartments
annual_el_demand_ap = annual_el_demand / nb_of_apartments
else:
annual_el_demand_ap = None
# Loop over apartments
# #---------------------------------------------------------------------
for i in range(int(nb_of_apartments)):
# Dummy init of number of occupants
curr_number_occupants = None
# Check number of occupants
if total_number_occupants is not None:
# Get number of occupants
curr_number_occupants = occupancy_list[i]
# Generate occupancy profiles for stochastic el. and/or dhw
if el_gen_method == 2 or (dhw_method == 2 and use_dhw):
# Generate occupancy profile (necessary for stochastic, el. or
# dhw profile)
occupancy_object = occup.Occupancy(environment,
number_occupants=
curr_number_occupants)
else: # Generate occupancy object without profile
occupancy_object = occup.Occupancy(environment,
number_occupants=
curr_number_occupants,
do_profile=False)
else:
if el_gen_method == 2:
warnings.warn('Stochastic el. profile cannot be generated ' +
'due to missing number of occupants. ' +
'SLP is used instead.')
# Set el_gen_method to 1 (SLP)
el_gen_method = 1
elif dhw_method == 2:
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
if (curr_number_occupants is None and dhw_method == 1 and
use_dhw == True):
# If dhw profile should be generated, but current number of
# occupants is None, number of occupants is samples from
# occupancy distribution for apartment
curr_number_occupants = usunc.calc_sampling_occ_per_app(
nb_samples=1)
# Assumes equal area share for all apartments
apartment_area = net_floor_area / nb_of_apartments
# Create space heating demand (for apartment)
if th_gen_method == 1:
# Use SLP
heat_power_curve = \
SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=apartment_area,
specificDemand=spec_th_demand)
if slp_manipulate: # Do SLP manipulation
timestep = environment.timer.timeDiscretization
temp_array = environment.weather.tAmbient
mod_curve = \
slpman.slp_th_manipulator(timestep,
th_slp_curve=heat_power_curve.loadcurve,
temp_array=temp_array)
heat_power_curve.loadcurve = mod_curve
elif th_gen_method == 2:
# Use Modelica result profile
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=3,
livingArea=apartment_area,
specificDemand=spec_th_demand)
# Calculate el. energy demand for apartment, if no el. energy
# demand is given for whole building to rescale
if annual_el_demand_ap is None:
# Generate annual_el_demand_ap
annual_el_demand_ap = calc_el_dem_ap(nb_occ=curr_number_occupants,
el_random=el_random,
type='mfh')
print('Annual el. demand (apartment) in kWh: ', annual_el_demand_ap)
if curr_number_occupants is not None:
print('El. demand per person in kWh: ')
print(annual_el_demand_ap / curr_number_occupants)
print()
# Create electrical power curve
if el_gen_method == 2:
if season_mod is not None:
season_light_mod = True
else:
season_light_mod = False
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=2,
total_nb_occupants=curr_number_occupants,
randomizeAppliances=True,
lightConfiguration=0,
annualDemand=annual_el_demand_ap,
occupancy=occupancy_object.occupancy,
do_normalization=do_normalization,
prev_heat_dev=prev_heat_dev,
season_light_mod=season_light_mod,
light_mod_fac=season_mod)
else: # Use el. SLP
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand_ap,
profileType=el_slp_type)
# Create domestic hot water demand
if use_dhw:
if dhw_volumen is None or dhw_random:
dhw_kwh = calc_dhw_dem_ap(nb_occ=curr_number_occupants,
dhw_random=dhw_random,
type='mfh')
# Reconvert kWh/a to Liters per day
dhw_vol_ap = dhw_kwh * 1000 * 3600 * 1000 / (
955 * 4182 * 35 * 365)
# DHW volume per person and day
dhw_volumen = dhw_vol_ap / curr_number_occupants
if dhw_method == 1: # Annex 42
dhw_power_curve = DomesticHotWater.DomesticHotWater(
environment,
tFlow=60,
thermal=True,
method=1,
# Annex 42
dailyConsumption=dhw_volumen * curr_number_occupants,
supplyTemperature=25)
else: # Stochastic profile
dhw_power_curve = DomesticHotWater.DomesticHotWater(
environment,
tFlow=60,
thermal=True,
method=2,
supplyTemperature=25,
occupancy=occupancy_object.occupancy)
# Rescale to reference dhw volume (liters per person
# and day)
curr_dhw_vol_flow = dhw_power_curve.water
# Water volume flow in Liter/hour
curr_volume_year = sum(curr_dhw_vol_flow) * \
environment.timer.timeDiscretization / \
3600
curr_vol_day = curr_volume_year / 365
curr_vol_day_and_person = curr_vol_day / \
occupancy_object.number_occupants
print('Curr. volume per person and day: ',
curr_vol_day_and_person)
dhw_con_factor = dhw_volumen / curr_vol_day_and_person
print('Conv. factor of hot water: ', dhw_con_factor)
print('New volume per person and day: ',
curr_vol_day_and_person * dhw_con_factor)
# Normalize water flow and power load
dhw_power_curve.water *= dhw_con_factor
dhw_power_curve.loadcurve *= dhw_con_factor
# Create apartment
apartment = Apartment.Apartment(environment,
occupancy=occupancy_object,
net_floor_area=apartment_area)
# Add demands to apartment
if th_gen_method == 1 or th_gen_method == 2:
if use_dhw:
apartment.addMultipleEntities([heat_power_curve,
el_power_curve,
dhw_power_curve])
else:
apartment.addMultipleEntities([heat_power_curve,
el_power_curve])
else:
if use_dhw:
apartment.addMultipleEntities([el_power_curve,
dhw_power_curve])
else:
apartment.addEntity(el_power_curve)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def generate_nonres_building_single_zone(environment,
net_floor_area, spec_th_demand,
annual_el_demand, th_slp_type,
el_slp_type=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
method_3_type=None,
method_4_type=None,
height_of_floors=None,
nb_of_floors=None):
"""
Function generates and returns extended nonresidential building object
with single zone.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
annual_el_demand : float
Annual electrical energy demand in kWh/a
th_slp_type : str
Thermal SLP type (for non-residential buildings)
- `GBA` : Bakeries
- `GBD` : Other services
- `GBH` : Accomodations
- `GGA` : Restaurants
- `GGB` : Gardening
- `GHA` : Retailers
- `GHD` : Summed load profile business, trade and services
- `GKO` : Banks, insurances, public institutions
- `GMF` : Household similar businesses
- `GMK` : Automotive
- `GPD` : Paper and printing
- `GWA` : Laundries
el_slp_type : str, optional (default: None)
Electrical SLP type
- H0 : Household
- L0 : Farms
- L1 : Farms with breeding / cattle
- L2 : Farms without cattle
- G0 : Business (general)
- G1 : Business (workingdays 8:00 AM - 6:00 PM)
- G2 : Business with high loads in the evening
- G3 : Business (24 hours)
- G4 : Shops / Barbers
- G5 : Bakery
- G6 : Weekend operation
number_occupants : int, optional
Number of occupants (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
method_3_type : str, optional
Defines type of profile for method=3 (default: None)
Options:
- 'food_pro': Food production
- 'metal': Metal company
- 'rest': Restaurant (with large cooling load)
- 'sports': Sports hall
- 'repair': Repair / metal shop
method_4_type : str, optional
Defines type of profile for method=4 (default: None)
- 'metal_1' : Metal company with smooth profile
- 'metal_2' : Metal company with fluctuation in profile
- 'warehouse' : Warehouse
height_of_floors : float
average height of the floors
nb_of_floors : int
Number of floors above the ground
Returns
-------
extended_building : object
BuildingExtended object
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
assert annual_el_demand >= 0
assert th_slp_type != 'HEF', ('HEF thermal slp profile only valid for ' +
'residential buildings.')
assert th_slp_type != 'HMF', ('HMF thermal slp profile only valid for ' +
'residential buildings.')
assert el_slp_type != 'H0', ('H0 thermal slp profile only valid for ' +
'residential buildings.')
# Create space heating demand
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
if method_3_type is not None:
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=3,
annualDemand=annual_el_demand,
do_normalization=True,
method_3_type=method_3_type)
elif method_4_type is not None:
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=4,
annualDemand=annual_el_demand,
do_normalization=True,
method_4_type=method_4_type)
else:
# Use el. SLP for el. power load generation
assert el_slp_type is not None, 'el_slp_type is required!'
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand,
profileType=el_slp_type)
# Create apartment
apartment = Apartment.Apartment(environment)
# Add demands to apartment
apartment.addMultipleEntities([heat_power_curve, el_power_curve])
# Create extended building object
extended_building = build_ex.BuildingExtended(environment,
net_floor_area=net_floor_area,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def get_district_data_from_txt(path, delimiter='\t'):
"""
Load city district data from txt file (see annotations below for further
information of required inputs).
naN are going to be replaced with Python None.
Parameters
----------
path : str
Path to txt file
delimiter : str, optional
Defines delimiter for txt file (default: '\t')
Returns
-------
district_data : ndarray
Numpy 2d-array with city district data (each column represents
different parameter, see annotations)
Annotations
-----------
File structure
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional) (0 - free standing)
(1 - double house) (2 - row house)
18: Type of attic (int, optional, e.g. 0 for flat roof) (1 - regular roof;
unheated) (2 - regular roof; partially heated) (3 - regular roof; fully
heated)
19: Type of cellar (int, optional, e.g. 1 for non heated cellar)
(0 - no basement) (1 - non heated) (2 - partially heated) (3 - fully heated)
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional)
"""
district_data = np.genfromtxt(path, delimiter=delimiter, skip_header=1)
# Replace nan with None values of Python
district_data = np.where(np.isnan(district_data), None, district_data)
return district_data
def calc_el_dem_ap(nb_occ, el_random, type):
"""
Calculate electric energy demand per apartment per year
in kWh/a (residential buildings, only)
Parameters
----------
nb_occ : int
Number of occupants
el_random : bool
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value.
type : str
Define residential building type (single family or multi-
family)
Options:
- 'sfh' : Single family house
- 'mfh' : Multi family house
Returns
-------
el_dem : float
Electric energy demand per apartment in kWh/a
"""
assert nb_occ > 0
assert nb_occ <= 5, 'Number of occupants cannot exceed 5 per ap.'
assert type in ['sfh', 'mfh']
if el_random:
# Choose first entry of random sample list
el_dem = usunc.calc_sampling_el_demand_per_apartment(
nb_samples=1,
nb_persons=nb_occ,
type=type)[0]
else:
# Choose average value depending on nb_occ
# Class D without hot water (Stromspiegel 2017)
dict_sfh = {1: 2500,
2: 3200,
3: 3900,
4: 4200,
5: 5400}
dict_mfh = {1: 1500,
2: 2200,
3: 2800,
4: 3200,
5: 4000}
if type == 'sfh':
el_dem = dict_sfh[nb_occ]
elif type == 'mfh':
el_dem = dict_mfh[nb_occ]
return el_dem
def calc_dhw_dem_ap(nb_occ, dhw_random, type, delta_t=35, c_p_water=4182,
rho_water=995):
"""
Calculate hot water energy demand per apartment per year
in kWh/a (residential buildings, only)
Parameters
----------
nb_occ : int
Number of occupants
dhw_random : bool
Defines, if random value should be chosen from statistics
or if average value should be chosen. dhw_random == True means,
use random value.
type : str
Define residential building type (single family or multi-
family)
Options:
- 'sfh' : Single family house
- 'mfh' : Multi family house
delta_t : float, optional
Temperature split of heated up water in Kelvin (default: 35)
c_p_water : float, optional
Specific heat capacity of water in J/kgK (default: 4182)
rho_water : float, optional
Density of water in kg/m3 (default: 995)
Returns
-------
dhw_dem : float
Electric energy demand per apartment in kWh/a
"""
assert nb_occ > 0
assert nb_occ <= 5, 'Number of occupants cannot exceed 5 per ap.'
assert type in ['sfh', 'mfh']
if dhw_random:
# Choose first entry of random sample list
# DHW volume in liters per apartment and day
dhw_volume = usunc.calc_sampling_dhw_per_apartment(
nb_samples=1,
nb_persons=nb_occ,
b_type=type)[0]
dhw_dem = dhw_volume * 365 * rho_water * c_p_water * delta_t / \
(1000 * 3600 * 1000)
else:
# Choose average value depending on nb_occ
# Class D without hot water (Stromspiegel 2017)
dict_sfh = {1: 500,
2: 800,
3: 1000,
4: 1300,
5: 1600}
dict_mfh = {1: 500,
2: 900,
3: 1300,
4: 1400,
5: 2000}
if type == 'sfh':
dhw_dem = dict_sfh[nb_occ]
elif type == 'mfh':
dhw_dem = dict_mfh[nb_occ]
return dhw_dem
def run_city_generator(generation_mode, timestep,
year_timer, year_co2,
location,
th_gen_method,
el_gen_method, district_data, use_dhw=False,
dhw_method=1, try_path=None,
pickle_city_filename=None, do_save=True,
path_save_city=None, eff_factor=0.85,
show_city=False, altitude=55, dhw_volumen=None,
do_normalization=True, slp_manipulate=True,
call_teaser=False, teaser_proj_name='pycity',
do_log=True, log_path=None,
project_name='teaser_project',
air_vent_mode=1, vent_factor=0.5,
t_set_heat=20,
t_set_cool=70,
t_night=16,
vdi_sh_manipulate=False, city_osm=None,
el_random=False, dhw_random=False, prev_heat_dev=True,
season_mod=None, merge_windows=False, new_try=False):
"""
Function generates city district for user defined input. Generated
buildings consist of only one single zone!
Parameters
----------
generation_mode : int
Integer to define method to generate city district
(so far, only csv/txt file import has been implemented)
generation_mode = 0: Load data from csv/txt file (tab seperated)
timestep : int
Timestep in seconds
year_timer : int
Chosen year of analysis
(influences initial day for profile generation)
year_co2 : int, optional
Chose year with specific emission factors
location : Tuple
(latitude, longitude) of the simulated system's position.
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
3 - Use TEASER VDI 6007 core to simulate thermal loads‚
el_gen_method : int
Electrical generation method
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building). Requires number of occupants.
district_data : ndarray
Numpy 2d-array with city district data (each column represents
different parameter, see annotations)
use_dhw : bool, optional
Defines if domestic hot water profiles should be generated.
(default: False)
dhw_method : int, optional
Defines method for dhw profile generation (default: 1)
Only relevant if use_dhw=True. Options:
- 1: Generate profiles via Annex 42
- 2: Generate stochastic dhw profiles
try_path : str, optional
Path to TRY weather file (default: None)
If set to None, uses default weather TRY file (2010, region 5)
pickle_city_filename : str, optional
Name for file, which should be pickled and saved, if no path is
handed over to save object to(default: None)
do_save : bool, optional
Defines, if city object instance should be saved as pickle file
(default: True)
path_save_city : str, optional
Path to save (pickle and dump) city object instance to (default: None)
If None is used, saves file to .../output/...
eff_factor : float, optional
Efficiency factor of thermal boiler system (default: 0.85)
show_city : bool, optional
Boolean to define if city district should be printed by matplotlib
after generation (default: False)
True: Print results
False: Do not print results
altitude : float, optional
Altitude of location in m (default: 55 - City of Bottrop)
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 °C. Rescales profile to
original demand value.
call_teaser : bool, optional
Defines, if teaser should be called to generate typeBuildings
(currently, residential typeBuildings only).
(default: False)
If set to True, generates typeBuildings and add them to building node
as attribute 'type_building'
teaser_proj_name : str, optional
TEASER project name (default: 'pycity'). Only relevant, if call_teaser
is set to True
do_log : bool, optional
Defines, if log file of inputs should be generated (default: True)
log_path : str, optional
Path to log file (default: None). If set to None, saves log to
.../output
air_vent_mode : int
Defines method to generation air exchange rate for VDI 6007 simulation
Options:
0 : Use constant value (vent_factor in 1/h)
1 : Use deterministic, temperature-dependent profile
2 : Use stochastic, user-dependent profile
vent_factor : float, optional
Ventilation rate factor in 1/h (default: 0.5). Only used, if
array_vent_rate is None (otherwise, array_vent_rate array is used)
t_set_heat : float, optional
Heating set temperature in degree Celsius. If temperature drops below
t_set_heat, model is going to be heated up. (default: 20)
(Related to constraints for res. buildings in DIN V 18599)
t_set_cool : float, optional
Cooling set temperature in degree Celsius. If temperature rises above
t_set_cool, model is going to be cooled down. (default: 70)
t_night : float, optional
Night set back temperature in degree Celsius (default: 16)
(Related to constraints for res. buildings in DIN V 18599)
project_name : str, optional
TEASER project name (default: 'teaser_project')
vdi_sh_manipulate : bool, optional
Defines, if VDI 6007 thermal space heating load curve should be
normalized to match given annual space heating demand in kWh
(default: False)
el_random : bool, optional
Defines, if annual, eletrical demand value for normalization of
el. load profile should randomly diverge from reference value
within specific boundaries (default: False).
If False: Use reference value for normalization
If True: Allow generating values that is different from reference value
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
merge_windows : bool, optional
Defines TEASER project setting for merge_windows_calc
(default: False). If set to False, merge_windows_calc is set to False.
If True, Windows are merged into wall resistances.
new_try : bool, optional
Defines, if TRY dataset have been generated after 2017 (default: False)
If False, assumes that TRY dataset has been generated before 2017.
If True, assumes that TRY dataset has been generated after 2017 and
belongs to the new TRY classes. This is important for extracting
the correct values from the TRY dataset!
Returns
-------
city_object : object
City object of pycity_calc
Annotations
-----------
Non-residential building loads are automatically generated via SLP
(even if el_gen_method is set to 2). Furthermore, dhw profile generation
is automatically neglected (only valid for residential buildings)
Electrical load profiles of residential buildings without occupants
are automatically generated via SLP (even if el_gen_method is set to 2)
File structure (district_data np.array)
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
For residential: space heating, only!
For non-residential: Space heating AND hot water! (SLP usage)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional); 0 - free standing; 1 - Double house; 2 - Row house;
18: Type of attic (int, optional, e.g. 0 for flat roof); 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
19: Type of basement (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional) (0 to 4)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional) (0 - 2)
method_3_type : str, optional
Defines type of profile for method=3 (default: None)
Options:
0 - 'food_pro': Food production
1 - 'metal': Metal company
2 - 'rest': Restaurant (with large cooling load)
3 - 'sports': Sports hall
4 - 'repair': Repair / metal shop
method_4_type : str, optional
Defines type of profile for method=4 (default: None)
0 - 'metal_1' : Metal company with smooth profile
1 - 'metal_2' : Metal company with fluctuation in profile
2 - 'warehouse' : Warehouse
"""
assert eff_factor > 0, 'Efficiency factor has to be larger than zero.'
assert eff_factor <= 1, 'Efficiency factor cannot increase value 1.'
if dhw_volumen is not None: # pragma: no cover
assert dhw_volumen >= 0, 'Hot water volume cannot be below zero.'
if generation_mode == 1: # pragma: no cover
assert city_osm is not None, 'Generation mode 1 requires city object!'
if vdi_sh_manipulate is True and th_gen_method == 3: # pragma: no cover
msg = 'Simulated profiles of VDI 6007 call (TEASER --> ' \
'space heating) is going to be normalized with annual thermal' \
' space heating demand values given by user!'
warnings.warn(msg)
if do_log: # pragma: no cover
# Write log file
# ################################################################
# Log file path
if log_path is None:
# If not existing, use default path
this_path = os.path.dirname(os.path.abspath(__file__))
log_path = os.path.join(this_path, 'output', 'city_gen_log.txt')
log_file = open(log_path, mode='w')
log_file.write('PyCity_Calc city_generator.py log file')
log_file.write('\n############## Time and location ##############\n')
log_file.write('Date: ' + str(datetime.datetime.now()) + '\n')
log_file.write('generation_mode: ' + str(generation_mode) + '\n')
log_file.write('timestep in seconds: ' + str(timestep) + '\n')
log_file.write('Year for timer: ' + str(year_timer) + '\n')
log_file.write('Year for CO2 emission factors: '
+ str(year_co2) + '\n')
log_file.write('Location: ' + str(location) + '\n')
log_file.write('altitude: ' + str(altitude) + '\n')
if generation_mode == 0:
log_file.write('Generation mode: csv/txt input, only.\n')
elif generation_mode == 1:
log_file.write('Generation mode: csv/txt plus city osm object.\n')
log_file.write('\n############## Generation methods ##############\n')
log_file.write('th_gen_method: ' + str(th_gen_method) + '\n')
if th_gen_method == 1:
log_file.write('Manipulate SLP: ' + str(slp_manipulate) + '\n')
elif th_gen_method == 3:
log_file.write('t_set_heat: ' + str(t_set_heat) + '\n')
log_file.write('t_set_night: ' + str(t_night) + '\n')
log_file.write('t_set_cool: ' + str(t_set_cool) + '\n')
log_file.write('air_vent_mode: ' + str(air_vent_mode) + '\n')
log_file.write('vent_factor: ' + str(vent_factor) + '\n')
log_file.write('el_gen_method: ' + str(el_gen_method) + '\n')
log_file.write(
'Normalize el. profile: ' + str(do_normalization) + '\n')
log_file.write(
'Do random el. normalization: ' + str(el_random) + '\n')
log_file.write(
'Prevent el. heating devices for el load generation: '
'' + str(prev_heat_dev) + '\n')
log_file.write(
'Rescaling factor lighting power curve to implement seasonal '
'influence: ' + str(season_mod) + '\n')
log_file.write('use_dhw: ' + str(use_dhw) + '\n')
log_file.write('dhw_method: ' + str(dhw_method) + '\n')
log_file.write('dhw_volumen: ' + str(dhw_volumen) + '\n')
log_file.write(
'Do random dhw. normalization: ' + str(dhw_random) + '\n')
log_file.write('\n############## Others ##############\n')
log_file.write('try_path: ' + str(try_path) + '\n')
log_file.write('eff_factor: ' + str(eff_factor) + '\n')
log_file.write('timestep in seconds: ' + str(timestep) + '\n')
log_file.write('call_teaser: ' + str(call_teaser) + '\n')
log_file.write('teaser_proj_name: ' + str(teaser_proj_name) + '\n')
# Log file is closed, after pickle filename has been generated
# (see code below)
if generation_mode == 0 or generation_mode == 1:
# ##################################################################
# Load specific demand files
# Load specific thermal demand input data
spec_th_dem_res_building = load_data_file_with_spec_demand_data(
'RWI_res_building_spec_th_demand.txt')
start_year_column = (spec_th_dem_res_building[:, [0]])
# Reverse
start_year_column = start_year_column[::-1]
"""
Columns:
1. Start year (int)
2. Final year (int)
3. Spec. thermal energy demand in kWh/m2*a (float)
"""
# ##################################################################
# Load specific electrical demand input data
spec_el_dem_res_building = load_data_file_with_spec_demand_data(
'AGEB_res_building_spec_e_demand.txt')
"""
Columns:
1. Start year (int)
2. Final year (int)
3. Spec. thermal energy demand in kWh/m2*a (float)
"""
# ##################################################################
# Load specific electrical demand input data
# (depending on number of occupants)
spec_el_dem_res_building_per_person = \
load_data_file_with_spec_demand_data(
'Stromspiegel2017_spec_el_energy_demand.txt')
"""
Columns:
1. Number of persons (int) ( 1 - 5 SFH and 1 - 5 MFH)
2. Annual electrical demand in kWh/a (float)
3. Specific electrical demand per person in kWh/person*a (float)
"""
# ###################################################################
# Load specific demand data and slp types for
# non residential buildings
spec_dem_and_slp_non_res = load_data_file_with_spec_demand_data(
'Spec_demands_non_res.txt')
"""
Columns:
1. type_id (int)
2. type_name (string) # Currently 'nan', due to expected float
3. Spec. thermal energy demand in kWh/m2*a (float)
4. Spec. electrical energy demand in kWh/m2*a (float)
5. Thermal SLP type (int)
6. Electrical SLP type (int)
"""
# ###################################################################
# Generate city district
# Generate extended environment of pycity_calc
environment = generate_environment(timestep=timestep,
year_timer=year_timer,
year_co2=year_co2,
location=location,
try_path=try_path,
altitude=altitude,
new_try=new_try)
print('Generated environment object.\n')
if generation_mode == 0:
# Generate city object
# ############################################################
city_object = city.City(environment=environment)
print('Generated city object.\n')
else:
# Overwrite city_osm environment
print('Overwrite city_osm.environment with new environment')
city_osm.environment = environment
city_object = city_osm
# Check if district_data only holds one entry for single building
# In this case, has to be processed differently
if district_data.ndim > 1:
multi_data = True
else: # Only one entry (single building)
multi_data = False
# If multi_data is false, loop below is going to be exited with
# a break statement at the end.
# Generate dummy node id and thermal space heating demand dict
dict_id_vdi_sh = {}
# Loop over district_data
# ############################################################
for i in range(len(district_data)):
if multi_data:
# Extract data out of input file
curr_id = int(
district_data[i][0]) # id / primary key of building
curr_x = district_data[i][1] # x-coordinate in m
curr_y = district_data[i][2] # y-coordinate in m
curr_build_type = int(
district_data[i][3]) # building type nb (int)
curr_nfa = district_data[i][4] # Net floor area in m2
curr_build_year = district_data[i][5] # Year of construction
curr_mod_year = district_data[i][
6] # optional (last year of modernization)
curr_th_e_demand = district_data[i][
7] # optional: Final thermal energy demand in kWh
# For residential buildings: Space heating only!
# For non-residential buildings: Space heating AND hot water! (SLP)
curr_el_e_demand = district_data[i][
8] # optional (Annual el. energy demand in kWh)
curr_pv_roof_area = district_data[i][
9] # optional (Usable pv roof area in m2)
curr_nb_of_apartments = district_data[i][
10] # optional (Number of apartments)
curr_nb_of_occupants = district_data[i][
11] # optional (Total number of occupants)
curr_nb_of_floors = district_data[i][
12] # optional (Number of floors above the ground)
curr_avg_height_of_floors = district_data[i][
13] # optional (Average Height of floors)
curr_central_ahu = district_data[i][
14] # optional (If building has a central air handling unit (AHU) or not (boolean))
curr_res_layout = district_data[i][
15] # optional Residential layout (int, optional, e.g. 0 for compact)
curr_nb_of_neighbour_bld = district_data[i][
16] # optional Neighbour Buildings (int, optional)
curr_type_attic = district_data[i][
17] # optional Type of attic (int, optional, e.g. 0 for flat roof);
# 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
curr_type_cellar = district_data[i][
18] # optional Type of basement
# (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
curr_dormer = district_data[i][
19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer)
curr_construction_type = district_data[i][
20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
curr_method_3_nb = district_data[i][
21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile
curr_method_4_nb = district_data[i][
22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile
else: # Single entry
# Extract data out of input file
curr_id = int(district_data[0]) # id / primary key of building
curr_x = district_data[1] # x-coordinate in m
curr_y = district_data[2] # y-coordinate in m
curr_build_type = int(
district_data[3]) # building type nb (int)
curr_nfa = district_data[4] # Net floor area in m2
curr_build_year = district_data[5] # Year of construction
curr_mod_year = district_data[
6] # optional (last year of modernization)
curr_th_e_demand = district_data[
7] # optional: Final thermal energy demand in kWh
# For residential buildings: Space heating only!
# For non-residential buildings: Space heating AND hot water! (SLP)
curr_el_e_demand = district_data[
8] # optional (Annual el. energy demand in kWh)
curr_pv_roof_area = district_data[
9] # optional (Usable pv roof area in m2)
curr_nb_of_apartments = district_data[
10] # optional (Number of apartments)
curr_nb_of_occupants = district_data[
11] # optional (Total number of occupants)
curr_nb_of_floors = district_data[
12] # optional (Number of floors above the ground)
curr_avg_height_of_floors = district_data[
13] # optional (Average Height of floors)
curr_central_ahu = district_data[
14] # optional (If building has a central air handling unit (AHU) or not (boolean))
curr_res_layout = district_data[
15] # optional Residential layout (int, optional, e.g. 0 for compact)
curr_nb_of_neighbour_bld = district_data[
16] # optional Neighbour Buildings (int, optional)
curr_type_attic = district_data[
17] # optional Type of attic (int, optional, e.g. 0 for flat roof);
# 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
curr_type_cellar = district_data[
18] # optional Type of basement
# (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
curr_dormer = district_data[
19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer)
curr_construction_type = district_data[
20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
curr_method_3_nb = district_data[
21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile
curr_method_4_nb = district_data[
22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile
print('Process building', curr_id)
print('########################################################')
# Assert functions
# ############################################################
assert curr_build_type >= 0
assert curr_nfa > 0
for m in range(5, 9):
if multi_data:
if district_data[i][m] is not None:
assert district_data[i][m] > 0
else:
if district_data[m] is not None:
assert district_data[m] > 0
if curr_nb_of_apartments is not None:
assert curr_nb_of_apartments > 0
# Convert to int
curr_nb_of_apartments = int(curr_nb_of_apartments)
if curr_nb_of_occupants is not None:
assert curr_nb_of_occupants > 0
# Convert curr_nb_of_occupants from float to int
curr_nb_of_occupants = int(curr_nb_of_occupants)
if (curr_nb_of_occupants is not None
and curr_nb_of_apartments is not None):
assert curr_nb_of_occupants / curr_nb_of_apartments <= 5, (
'Average share of occupants per apartment should ' +
'not exceed 5 persons! (Necessary for stochastic, el.' +
'profile generation.)')
if curr_method_3_nb is not None:
curr_method_3_nb >= 0
if curr_method_4_nb is not None:
curr_method_4_nb >= 0
if curr_build_type == 0 and curr_nb_of_apartments is None: # pragma: no cover
# Define single apartment, if nb of apartments is unknown
msg = 'Building ' + str(curr_id) + ' is residential, but' \
' does not have a number' \
' of apartments. Going' \
' to set nb. to 1.'
warnings.warn(msg)
curr_nb_of_apartments = 1
if (curr_build_type == 0 and curr_nb_of_occupants is None
and use_dhw and dhw_method == 2):
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
# Check if TEASER inputs are defined
if call_teaser or th_gen_method == 3:
if curr_build_type == 0: # Residential
assert curr_nb_of_floors is not None
assert curr_avg_height_of_floors is not None
assert curr_central_ahu is not None
assert curr_res_layout is not None
assert curr_nb_of_neighbour_bld is not None
assert curr_type_attic is not None
assert curr_type_cellar is not None
assert curr_dormer is not None
assert curr_construction_type is not None
if curr_nb_of_floors is not None:
assert curr_nb_of_floors > 0
if curr_avg_height_of_floors is not None:
assert curr_avg_height_of_floors > 0
if curr_central_ahu is not None:
assert 0 <= curr_central_ahu <= 1
if curr_res_layout is not None:
assert 0 <= curr_res_layout <= 1
if curr_nb_of_neighbour_bld is not None:
assert 0 <= curr_nb_of_neighbour_bld <= 2
if curr_type_attic is not None:
assert 0 <= curr_type_attic <= 3
if curr_type_cellar is not None:
assert 0 <= curr_type_cellar <= 3
if curr_dormer is not None:
assert 0 <= curr_dormer <= 1
if curr_construction_type is not None:
assert 0 <= curr_construction_type <= 1
# Check building type (residential or non residential)
# #-------------------------------------------------------------
if curr_build_type == 0: # Is residential
print('Residential building')
# Get spec. net therm. demand value according to last year
# of modernization or build_year
# If year of modernization is defined, use curr_mod_year
if curr_mod_year is not None:
use_year = int(curr_mod_year)
else: # Use year of construction
use_year = int(curr_build_year)
# Get specific, thermal energy demand (based on use_year)
for j in range(len(start_year_column)):
if use_year >= start_year_column[j]:
curr_spec_th_demand = spec_th_dem_res_building[len(
spec_th_dem_res_building) - 1 - j][2]
break
# # Get spec. electr. demand
# if curr_nb_of_occupants is None:
# # USE AGEB values, if no number of occupants is given
# # Set specific demand value in kWh/m2*a
# curr_spec_el_demand = spec_el_dem_res_building[1]
# # Only valid for array like [2012 38.7]
# else:
# # Use Stromspiegel 2017 values
# # Calculate specific electric demand values depending
# # on number of occupants
#
# if curr_nb_of_apartments == 1:
# btype = 'sfh'
# elif curr_nb_of_apartments > 1:
# btype = 'mfh'
#
# # Average occupancy number per apartment
# curr_av_occ_per_app = \
# curr_nb_of_occupants / curr_nb_of_apartments
# print('Average number of occupants per apartment')
# print(round(curr_av_occ_per_app, ndigits=2))
#
# if curr_av_occ_per_app <= 5 and curr_av_occ_per_app > 0:
# # Correctur factor for non-int. av. number of
# # occupants (#19)
#
# # Divide annual el. energy demand with net floor area
# if btype == 'sfh':
# row_idx_low = math.ceil(curr_av_occ_per_app) - 1
# row_idx_high = math.floor(curr_av_occ_per_app) - 1
# elif btype == 'mfh':
# row_idx_low = math.ceil(curr_av_occ_per_app) - 1 \
# + 5
# row_idx_high = math.floor(curr_av_occ_per_app) - 1 \
# + 5
#
# cur_spec_el_dem_per_occ_high = \
# spec_el_dem_res_building_per_person[row_idx_high][2]
# cur_spec_el_dem_per_occ_low = \
# spec_el_dem_res_building_per_person[row_idx_low][2]
#
# print('Chosen reference spec. el. demands per person '
# 'in kWh/a (high and low value):')
# print(cur_spec_el_dem_per_occ_high)
# print(cur_spec_el_dem_per_occ_low)
#
# delta = round(curr_av_occ_per_app, 0) - \
# curr_av_occ_per_app
#
# if delta < 0:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_high + \
# (cur_spec_el_dem_per_occ_high -
# cur_spec_el_dem_per_occ_low) * delta
# elif delta > 0:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_low + \
# (cur_spec_el_dem_per_occ_high -
# cur_spec_el_dem_per_occ_low) * delta
# else:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_high
#
# # print('Calculated spec. el. demand per person in '
# # 'kWh/a:')
# # print(round(curr_spec_el_dem_occ, ndigits=2))
#
# # Specific el. demand per person (dependend on av.
# # number of occupants in each apartment)
# # --> Multiplied with number of occupants
# # --> Total el. energy demand in kWh
# # --> Divided with net floor area
# # --> Spec. el. energy demand in kWh/a
#
# curr_spec_el_demand = \
# curr_spec_el_dem_occ * curr_nb_of_occupants \
# / curr_nfa
#
# # print('Spec. el. energy demand in kWh/m2:')
# # print(curr_spec_el_demand)
#
# else:
# raise AssertionError('Invalid number of occupants')
# if el_random:
# if curr_nb_of_occupants is None:
# # Randomize curr_spec_el_demand with normal distribution
# # with curr_spec_el_demand as mean and 10 % standard dev.
# curr_spec_el_demand = \
# np.random.normal(loc=curr_spec_el_demand,
# scale=0.10 * curr_spec_el_demand)
# else:
# # Randomize rounding up and down of curr_av_occ_per_ap
# if round(curr_av_occ_per_app) > curr_av_occ_per_app:
# # Round up
# delta = round(curr_av_occ_per_app) - \
# curr_av_occ_per_app
# prob_r_up = 1 - delta
# rnb = random.random()
# if rnb < prob_r_up:
# use_occ = math.ceil(curr_av_occ_per_app)
# else:
# use_occ = math.floor(curr_av_occ_per_app)
#
# else:
# # Round down
# delta = curr_av_occ_per_app - \
# round(curr_av_occ_per_app)
# prob_r_down = 1 - delta
# rnb = random.random()
# if rnb < prob_r_down:
# use_occ = math.floor(curr_av_occ_per_app)
# else:
# use_occ = math.ceil(curr_av_occ_per_app)
#
# sample_el_per_app = \
# usunc.calc_sampling_el_demand_per_apartment(nb_samples=1,
# nb_persons=use_occ,
# type=btype)[0]
#
# # Divide sampled el. demand per apartment through
# # number of persons of apartment (according to
# # Stromspiegel 2017) and multiply this value with
# # actual number of persons in building to get
# # new total el. energy demand. Divide this value with
# # net floor area to get specific el. energy demand
# curr_spec_el_demand = \
# (sample_el_per_app / curr_av_occ_per_app) * \
# curr_nb_of_occupants / curr_nfa
# conversion of the construction_type from int to str
if curr_construction_type == 0:
new_curr_construction_type = 'heavy'
elif curr_construction_type == 1:
new_curr_construction_type = 'light'
else:
new_curr_construction_type = 'heavy'
# #-------------------------------------------------------------
else: # Non-residential
print('Non residential')
# Get spec. demands and slp types according to building_type
curr_spec_th_demand = \
spec_dem_and_slp_non_res[curr_build_type - 2][2]
curr_spec_el_demand = \
spec_dem_and_slp_non_res[curr_build_type - 2][3]
curr_th_slp_type = \
spec_dem_and_slp_non_res[curr_build_type - 2][4]
curr_el_slp_type = \
spec_dem_and_slp_non_res[curr_build_type - 2][5]
# Convert slp type integers into strings
curr_th_slp_type = convert_th_slp_int_and_str(curr_th_slp_type)
curr_el_slp_type = convert_el_slp_int_and_str(curr_el_slp_type)
# If curr_el_e_demand is not known, calculate it via spec.
# demand
if curr_el_e_demand is None:
curr_el_e_demand = curr_spec_el_demand * curr_nfa
# #-------------------------------------------------------------
# If curr_th_e_demand is known, recalc spec e. demand
if curr_th_e_demand is not None:
# Calc. spec. net thermal energy demand with efficiency factor
curr_spec_th_demand = eff_factor * curr_th_e_demand / curr_nfa
else:
# Spec. final energy demand is given, recalculate it to
# net thermal energy demand with efficiency factor
curr_spec_th_demand *= eff_factor
# # If curr_el_e_demand is not known, calculate it via spec. demand
# if curr_el_e_demand is None:
# curr_el_e_demand = curr_spec_el_demand * curr_nfa
if th_gen_method == 1 or th_gen_method == 2 or curr_build_type != 0:
print('Used specific thermal demand value in kWh/m2*a:')
print(curr_spec_th_demand)
# #-------------------------------------------------------------
# Generate BuildingExtended object
if curr_build_type == 0: # Residential
if curr_nb_of_apartments > 1: # Multi-family house
building = generate_res_building_multi_zone(environment,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method,
nb_of_apartments=curr_nb_of_apartments,
use_dhw=use_dhw,
dhw_method=dhw_method,
total_number_occupants=curr_nb_of_occupants,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors,
neighbour_buildings=curr_nb_of_neighbour_bld,
residential_layout=curr_res_layout,
attic=curr_type_attic,
cellar=curr_type_cellar,
construction_type=new_curr_construction_type,
dormer=curr_dormer,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
curr_central_ahu=curr_central_ahu,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
season_mod=season_mod)
elif curr_nb_of_apartments == 1: # Single-family house
building = generate_res_building_single_zone(environment,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method,
use_dhw=use_dhw,
dhw_method=dhw_method,
number_occupants=curr_nb_of_occupants,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors,
neighbour_buildings=curr_nb_of_neighbour_bld,
residential_layout=curr_res_layout,
attic=curr_type_attic,
cellar=curr_type_cellar,
construction_type=new_curr_construction_type,
dormer=curr_dormer,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
curr_central_ahu=curr_central_ahu,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
season_mod=season_mod)
else:
raise AssertionError('Wrong number of apartments')
else: # Non-residential
method_3_str = None
method_4_str = None
# Convert curr_method numbers, if not None
if curr_method_3_nb is not None:
method_3_str = \
convert_method_3_nb_into_str(int(curr_method_3_nb))
if curr_method_4_nb is not None:
method_4_str = \
convert_method_4_nb_into_str(int(curr_method_4_nb))
building = generate_nonres_building_single_zone(environment,
th_slp_type=curr_th_slp_type,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
el_slp_type=curr_el_slp_type,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
method_3_type=method_3_str,
method_4_type=method_4_str,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors
)
# Generate position shapely point
position = point.Point(curr_x, curr_y)
if generation_mode == 0:
# Add building to city object
id = city_object.add_extended_building(
extended_building=building,
position=position, name=curr_id)
elif generation_mode == 1:
# Add building as entity to corresponding building node
# Positions should be (nearly) equal
assert position.x - city_object.nodes[int(curr_id)][
'position'].x <= 0.1
assert position.y - city_object.nodes[int(curr_id)][
'position'].y <= 0.1
city_object.nodes[int(curr_id)]['entity'] = building
id = curr_id
# Save annual thermal net heat energy demand for space heating
# to dict (used for normalization with VDI 6007 core)
dict_id_vdi_sh[id] = curr_spec_th_demand * curr_nfa
print('Finished processing of building', curr_id)
print('#######################################################')
print()
# If only single building should be processed, break loop
if multi_data is False:
break
# #-------------------------------------------------------------
print('Added all buildings with data to city object.')
# VDI 6007 simulation to generate space heating load curves
# Overwrites existing heat load curves (and annual heat demands)
if th_gen_method == 3:
print('Perform VDI 6007 space heating load simulation for every'
' building')
if el_gen_method == 1:
# Skip usage of occupancy and electrial load profiles
# as internal loads within VDI 6007 core
requ_profiles = False
else:
requ_profiles = True
tusage.calc_and_add_vdi_6007_loads_to_city(city=city_object,
air_vent_mode=air_vent_mode,
vent_factor=vent_factor,
t_set_heat=t_set_heat,
t_set_cool=t_set_cool,
t_night=t_night,
alpha_rad=None,
project_name=project_name,
requ_profiles=requ_profiles)
# Set call_teaser to False, as it is already included
# in calc_and_add_vdi_6007_loads_to_city
call_teaser = False
if vdi_sh_manipulate:
# Normalize VDI 6007 load curves to match given annual
# thermal space heating energy demand
for n in city_object.nodes():
if 'node_type' in city_object.nodes[n]:
# If node_type is building
if city_object.nodes[n]['node_type'] == 'building':
# If entity is kind building
if city_object.nodes[n][
'entity']._kind == 'building':
# Given value (user input)
ann_sh = dict_id_vdi_sh[n]
# Building pointer
curr_b = city_object.nodes[n]['entity']
# Current value on object
curr_sh = curr_b.get_annual_space_heat_demand()
norm_factor = ann_sh / curr_sh
# Do normalization
# Loop over apartments
for apart in curr_b.apartments:
# Normalize apartment space heating load
apart.demandSpaceheating.loadcurve \
*= norm_factor
print('Generation results:')
print('###########################################')
for n in city_object.nodes():
if 'node_type' in city_object.nodes[n]:
if city_object.nodes[n]['node_type'] == 'building':
if 'entity' in city_object.nodes[n]:
if city_object.nodes[n]['entity']._kind == 'building':
print('Results of building: ', n)
print('################################')
print()
curr_b = city_object.nodes[n]['entity']
sh_demand = curr_b.get_annual_space_heat_demand()
el_demand = curr_b.get_annual_el_demand()
dhw_demand = curr_b.get_annual_dhw_demand()
nfa = curr_b.net_floor_area
print('Annual space heating demand in kWh:')
print(sh_demand)
if nfa is not None and nfa != 0:
print(
'Specific space heating demand in kWh/m2:')
print(sh_demand / nfa)
print()
print('Annual electric demand in kWh:')
print(el_demand)
if nfa is not None and nfa != 0:
print('Specific electric demand in kWh/m2:')
print(el_demand / nfa)
nb_occ = curr_b.get_number_of_occupants()
if nb_occ is not None and nb_occ != 0:
print('Specific electric demand in kWh'
' per person and year:')
print(el_demand / nb_occ)
print()
print('Annual hot water demand in kWh:')
print(dhw_demand)
if nfa is not None and nfa != 0:
print('Specific hot water demand in kWh/m2:')
print(dhw_demand / nfa)
volume_year = dhw_demand * 1000 * 3600 / (
4200 * 35)
volume_day = volume_year / 365
if nb_occ is not None and nb_occ != 0:
v_person_day = \
volume_day / nb_occ
print('Hot water volume per person and day:')
print(v_person_day)
print()
# Create and add TEASER type_buildings to every building node
if call_teaser:
# Create TEASER project
project = tusage.create_teaser_project(name=teaser_proj_name,
merge_windows=merge_windows)
# Generate typeBuildings and add to city
tusage.create_teaser_typecity(project=project,
city=city_object,
generate_Output=False)
if do_save: # pragma: no cover
if path_save_city is None:
if pickle_city_filename is None:
msg = 'If path_save_city is None, pickle_city_filename' \
'cannot be None! Instead, filename has to be ' \
'defined to be able to save city object.'
raise AssertionError
this_path = os.path.dirname(os.path.abspath(__file__))
path_save_city = os.path.join(this_path, 'output',
pickle_city_filename)
try:
# Pickle and dump city objects
pickle.dump(city_object, open(path_save_city, 'wb'))
print('Pickled and dumped city object to: ')
print(path_save_city)
except:
warnings.warn('Could not pickle and save city object')
if do_log: # pragma: no cover
if pickle_city_filename is not None:
log_file.write('pickle_city_filename: ' +
str(pickle_city_filename)
+ '\n')
print('Wrote log file to: ' + str(log_path))
# Close log file
log_file.close()
# Visualize city
if show_city: # pragma: no cover
# Plot city district
try:
citvis.plot_city_district(city=city_object,
plot_street=False)
except:
warnings.warn('Could not plot city district.')
return city_object
if __name__ == '__main__':
this_path = os.path.dirname(os.path.abspath(__file__))
# User inputs #########################################################
# Choose generation mode
# ######################################################
# 0 - Use csv/txt input to generate city district
# 1 - Use csv/txt input file to enrich existing city object, based on
# osm call (city object should hold nodes, but no entities. City
# generator is going to add building, apartment and load entities to
# building nodes
generation_mode = 0
# Generate environment
# ######################################################
year_timer = 2017
year_co2 = 2017
timestep = 3600 # Timestep in seconds
# location = (51.529086, 6.944689) # (latitude, longitude) of Bottrop
location = (50.775346, 6.083887) # (latitude, longitude) of Aachen
altitude = 266 # Altitude of location in m (Aachen)
# Weather path
try_path = None
# If None, used default TRY (region 5, 2010)
new_try = False
# new_try has to be set to True, if you want to use TRY data of 2017
# or newer! Else: new_try = False
# Space heating load generation
# ######################################################
# Thermal generation method
# 1 - SLP (standardized load profile)
# 2 - Load and rescale Modelica simulation profile
# (generated with TRY region 12, 2010)
# 3 - VDI 6007 calculation (requires el_gen_method = 2)
th_gen_method = 3
# For non-residential buildings, SLPs are generated automatically.
# Manipulate thermal slp to fit to space heating demand?
slp_manipulate = False
# True - Do manipulation
# False - Use original profile
# Only relevant, if th_gen_method == 1
# Sets thermal power to zero in time spaces, where average daily outdoor
# temperature is equal to or larger than 12 °C. Rescales profile to
# original demand value.
# Manipulate vdi space heating load to be normalized to given annual net
# space heating demand in kWh
vdi_sh_manipulate = False
# Electrical load generation
# ######################################################
# Choose electric load profile generation method (1 - SLP; 2 - Stochastic)
# Stochastic profile is only generated for residential buildings,
# which have a defined number of occupants (otherwise, SLP is used)
el_gen_method = 2
# If user defindes method_3_nb or method_4_nb within input file
# (only valid for non-residential buildings), SLP will not be used.
# Instead, corresponding profile will be loaded (based on measurement
# data, see ElectricalDemand.py within pycity)
# Do normalization of el. load profile
# (only relevant for el_gen_method=2).
# Rescales el. load profile to expected annual el. demand value in kWh
do_normalization = True
# Randomize electrical demand value (residential buildings, only)
el_random = True
# Prevent usage of electrical heating and hot water devices in
# electrical load generation (only relevant if el_gen_method == 2)
prev_heat_dev = True
# True: Prevent electrical heating device usage for profile generation
# False: Include electrical heating devices in electrical load generation
# Use cosine function to increase winter lighting usage and reduce
# summer lighting usage in richadson el. load profiles
# season_mod is factor, which is used to rescale cosine wave with
# lighting power reference (max. lighting power)
season_mod = 0.3
# If None, do not use cosine wave to estimate seasonal influence
# Else: Define float
# (only relevant if el_gen_method == 2)
# Hot water profile generation
# ######################################################
# Generate DHW profiles? (True/False)
use_dhw = True # Only relevant for residential buildings
# DHW generation method? (1 - Annex 42; 2 - Stochastic profiles)
# Choice of Anex 42 profiles NOT recommended for multiple builings,
# as profile stays the same and only changes scaling.
# Stochastic profiles require defined nb of occupants per residential
# building
dhw_method = 2 # Only relevant for residential buildings
# Define dhw volume per person and day (use_dhw=True)
dhw_volumen = None # Only relevant for residential buildings
# Randomize choosen dhw_volume reference value by selecting new value
dhw_random = True
# Input file names and pathes
# ######################################################
# Define input data filename
filename = 'city_3_buildings.txt'
# filename = 'city_clust_simple.txt'
# filename = 'aachen_forsterlinde_mod_6.txt'
# filename = 'aachen_frankenberg_mod_6.txt'
# filename = 'aachen_huenefeld_mod_6.txt'
# filename = 'aachen_kronenberg_mod_8.txt'
# filename = 'aachen_preusweg_mod_8.txt'
# filename = 'aachen_tuerme_mod_6.txt'
# Output filename
pickle_city_filename = filename[:-4] + '.pkl'
# For generation_mode == 1:
# city_osm_input = None
# city_osm_input = 'aachen_forsterlinde_mod_7.pkl'
city_osm_input = 'aachen_frankenberg_mod_7.pkl'
# city_osm_input = 'aachen_huenefeld_mod_7.pkl'
# city_osm_input = 'aachen_kronenberg_mod_7.pkl'
# city_osm_input = 'aachen_preusweg_mod_7.pkl'
# city_osm_input = 'aachen_tuerme_mod_7.pkl'
# Pickle and dump city object instance?
do_save = True
# Path to save city object instance to
path_save_city = None
# If None, uses .../output/...
# Efficiency factor of thermal energy systems
# Used to convert input values (final energy demand) to net energy demand
eff_factor = 1
# For VDI 6007 simulation (th_gen_method == 3)
# #####################################
t_set_heat = 20 # Heating set temperature in degree Celsius
t_set_night = 16 # Night set back temperature in degree Celsius
t_set_cool = 70 # Cooling set temperature in degree Celsius
# Air exchange rate (required for th_gen_method = 3 (VDI 6007 sim.))
air_vent_mode = 2
# int; Define mode for air ventilation rate generation
# 0 : Use constant value (vent_factor in 1/h)
# 1 : Use deterministic, temperature-dependent profile
# 2 : Use stochastic, user-dependent profile
# False: Use static ventilation rate value
vent_factor = 0.3 # Constant. ventilation rate
# (only used, if air_vent_mode is 0. Otherwise, estimate vent_factor
# based on last year of modernization)
# TEASER typebuilding generation
# ######################################################
# Use TEASER to generate typebuildings?
call_teaser = False
teaser_proj_name = filename[:-4]
# Requires additional attributes (such as nb_of_floors, net_floor_area..)
merge_windows = False
# merge_windows : bool, optional
# Defines TEASER project setting for merge_windows_calc
# (default: False). If set to False, merge_windows_calc is set to False.
# If True, Windows are merged into wall resistances.
txt_path = os.path.join(this_path, 'input', filename)
if generation_mode == 1:
path_city_osm_in = os.path.join(this_path, 'input', city_osm_input)
# Path for log file
log_f_name = log_file_name = str('log_' + filename)
log_f_path = os.path.join(this_path, 'output', log_file_name)
# End of user inputs ################################################
print('Run city generator for ', filename)
assert generation_mode in [0, 1]
if generation_mode == 1:
assert city_osm_input is not None
if air_vent_mode == 1 or air_vent_mode == 2:
assert el_gen_method == 2, 'air_vent_mode 1 and 2 require occupancy' \
' profiles!'
# Load district_data file
district_data = get_district_data_from_txt(txt_path)
if generation_mode == 1:
# Load city input file
city_osm = pickle.load(open(path_city_osm_in, mode='rb'))
else:
# Dummy value
city_osm = None
# Generate city district
city = run_city_generator(generation_mode=generation_mode,
timestep=timestep,
year_timer=year_timer,
year_co2=year_co2,
location=location,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method, use_dhw=use_dhw,
dhw_method=dhw_method,
district_data=district_data,
pickle_city_filename=pickle_city_filename,
eff_factor=eff_factor, show_city=True,
try_path=try_path, altitude=altitude,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
call_teaser=call_teaser,
teaser_proj_name=teaser_proj_name,
air_vent_mode=air_vent_mode,
vent_factor=vent_factor,
t_set_heat=t_set_heat,
t_set_cool=t_set_cool,
t_night=t_set_night,
vdi_sh_manipulate=vdi_sh_manipulate,
city_osm=city_osm, el_random=el_random,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
log_path=log_f_path,
season_mod=season_mod,
merge_windows=merge_windows,
new_try=new_try,
path_save_city=path_save_city,
do_save=do_save)
| 44.482782
| 173
| 0.52153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61,217
| 0.469189
|
830541d7c666d087b745fabc733309dfe46fdeb0
| 14,092
|
py
|
Python
|
cpgan_data.py
|
basilevh/object-discovery-cp-gan
|
170cdcf14aa0b5f7258d15e177485ee4fd697afb
|
[
"MIT"
] | 14
|
2020-06-04T15:50:38.000Z
|
2021-10-03T02:59:54.000Z
|
cpgan_data.py
|
basilevh/object-discovery-cp-gan
|
170cdcf14aa0b5f7258d15e177485ee4fd697afb
|
[
"MIT"
] | null | null | null |
cpgan_data.py
|
basilevh/object-discovery-cp-gan
|
170cdcf14aa0b5f7258d15e177485ee4fd697afb
|
[
"MIT"
] | 1
|
2021-01-19T15:50:47.000Z
|
2021-01-19T15:50:47.000Z
|
# Basile Van Hoorick, March 2020
# Common code for PyTorch implementation of Copy-Pasting GAN
import copy
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os, platform, time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from torch.utils.data import Dataset
from tqdm import tqdm
def read_image_robust(img_path, monochromatic=False):
''' Returns an image that meets conditions along with a success flag, in order to avoid crashing. '''
try:
# image = plt.imread(img_path).copy()
image = np.array(Image.open(img_path)).copy() # always uint8
success = True
if np.any(np.array(image.strides) < 0):
success = False # still negative stride
elif not(monochromatic) and (image.ndim != 3 or image.shape[2] != 3):
success = False # not RGB
elif monochromatic:
# width, height = image.shape[1], image.shape[0]
# image = np.broadcast_to(x[:, :, np.newaxis], (height, width, 3))
image = image[:, :, np.newaxis] # one channel <=> only one ground truth
except IOError:
# Probably corrupt file
image = None
success = False
return image, success
def paint_squares(image, noisy=False, channels=10):
'''
Paints one or more squares at random locations to create an artificial foreground image.
Generates multiple associated ground truth masks; one per object.
'''
width, height = image.shape[1], image.shape[0]
image = image.copy() # do not overwrite background
object_count = np.random.randint(1, 5) # [1, 4] inclusive
masks = np.zeros((height, width, channels), dtype=np.uint8)
for i in range(object_count):
sq_w, sq_h = 9, 9
x1 = np.random.randint(0, width - sq_w + 1)
y1 = np.random.randint(0, height - sq_h + 1)
x2 = x1 + sq_w
y2 = y1 + sq_h
masks[y1:y2, x1:x2, i] = 255
if not(noisy):
# Pick one fixed (not necessarily saturated) color for the whole square
clr = np.random.randint(0, 256, 3)
image[y1:y2, x1:x2] = clr
else:
# Pick a random fully saturated (extremal) color for every pixel
image[y1:y2, x1:x2] = np.random.choice([0, 255], (sq_h, sq_w, 3))
return image, masks, object_count
def create_random_gfake_mask(width, height):
''' See Appendix D. '''
x0, y0 = np.random.rand(2) * 0.8 + 0.1
num_verts = np.random.randint(4, 7)
# TODO possible improvement: allow up to more vertices?
# TODO possible improvement: encourage convex (currently many "sharp" objects)
radii = np.random.rand(num_verts) * 0.4 + 0.1
# radii = np.random.rand(num_verts) * 0.8 + 0.2 # TODO: not very clear from paper
angles = np.sort(np.random.rand(num_verts)) * 2.0 * np.pi
poly_polar = list(zip(radii, angles))
poly_cart = [(int(width * (x0 + r * np.cos(a)) / 1),
int(height * (y0 + r * np.sin(a)) / 1)) for (r, a) in poly_polar]
# poly_cart = [(x1, y1), (x2, y2), ...]
img = Image.new('L', (width, height), 0)
ImageDraw.Draw(img).polygon(poly_cart, outline=1, fill=255)
mask = np.array(img, dtype='uint8')
assert(mask.shape == (height, width))
return mask
def apply_border_zero(masks):
ndim = len(masks.shape)
if ndim == 2:
masks[0, :] = 0
masks[-1, :] = 0
masks[:, 0] = 0
masks[:, -1] = 0
elif ndim == 3:
masks[:, 0, :] = 0
masks[:, -1, :] = 0
masks[:, :, 0] = 0
masks[:, :, -1] = 0
elif ndim == 4:
masks[:, :, 0, :] = 0
masks[:, :, -1, :] = 0
masks[:, :, :, 0] = 0
masks[:, :, :, -1] = 0
else:
raise Exception('Mask has too many dimensions')
return masks
def copy_paste(fores, masks, backs, border_zero=True):
# TODO possible improvement: poisson blending
# if hard_thres > 0:
# used_masks = (masks > hard_thres).float() # force binary
# else:
used_masks = masks.clone()
# Border zeroing implemented in April 2020
if border_zero:
used_masks = apply_border_zero(used_masks)
return used_masks * fores + (1.0 - used_masks) * backs
class MyCopyPasteDataset(Dataset):
'''
Custom dataset class with foreground, background, and optional mask folders as image sources.
Only one object may appear per image, since the object count is not kept track of.
Returns irrelevant foreground anti-shortcuts as well. Enforces color (RGB) images.
'''
def __init__(self, fore_dir, back_dir, mask_dir=None, rand_horz_flip=True, post_resize=-1, center_crop=False):
self.fore_dir = fore_dir
self.back_dir = back_dir
self.rand_horz_flip = rand_horz_flip
if post_resize <= 0:
self.post_tf = transforms.ToTensor() # converts [0, 255] to [0.0, 1.0]
elif center_crop:
# Resize + square center crop
self.post_tf = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(post_resize),
transforms.CenterCrop(post_resize),
transforms.ToTensor()
])
else:
# Resize both dimensions, possibly distorting the images
self.post_tf = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((post_resize, post_resize)),
transforms.ToTensor()
])
self.has_masks = (mask_dir is not None)
# Load all file paths; file names must be the same across all 2 or 3 given directories
# self.all_fore_files = []
# self.all_mask_files = []
# self.all_back_files = []
# for fn in os.listdir(fore_dir):
# fore_fp = os.path.join(fore_dir, fn)
# if os.path.isfile(fore_fp):
# back_fp = os.path.join(back_dir, fn)
# assert(os.path.isfile(back_fp))
# self.all_fore_files.append(fore_fp)
# self.all_back_files.append(back_fp)
# if self.has_masks:
# mask_fp = os.path.join(mask_dir, fn)
# assert(os.path.isfile(mask_fp))
# self.all_mask_files.append(mask_fp)
# Load all file paths; file names must be the same across foreground and segmentation masks
self.all_fore_files = []
self.all_mask_files = []
self.all_back_files = []
for fn in os.listdir(fore_dir):
fore_fp = os.path.join(fore_dir, fn)
self.all_fore_files.append(fore_fp)
if self.has_masks:
mask_fp_jpg = os.path.join(mask_dir, fn[:-4] + '.jpg')
mask_fp_png = os.path.join(mask_dir, fn[:-4] + '.png')
if os.path.isfile(mask_fp_jpg):
self.all_mask_files.append(mask_fp_jpg)
elif os.path.isfile(mask_fp_png):
self.all_mask_files.append(mask_fp_png)
else:
raise Exception('No matching mask file found for ' + fore_fp)
for fn in os.listdir(back_dir):
back_fp = os.path.join(back_dir, fn)
self.all_back_files.append(back_fp)
self.fore_count = len(self.all_fore_files)
self.back_count = len(self.all_back_files)
print('Image file count: ' + str(self.fore_count) + ' foreground, ' + str(self.back_count) + ' background, has masks: ' + str(self.has_masks))
def __len__(self):
return self.fore_count
def __getitem__(self, idx):
# Force randomness (especially if num_workers > 0)
np.random.seed(idx + int((time.time() * 654321) % 123456))
# Read random pair of images from file system
success = False
while not(success):
file_idx = np.random.choice(self.fore_count)
fp = self.all_fore_files[file_idx]
fore, success = read_image_robust(fp)
if not(success):
continue
if self.has_masks:
fp = self.all_mask_files[file_idx]
mask, success = read_image_robust(fp, monochromatic=True)
assert(success) # must match fore
# mask = ((mask > 0) * 255.0).astype('uint8') # convert soft masks to hard
else:
mask = None
# Read random background image
success = False
while not(success):
file_idx2 = np.random.choice(self.back_count)
fp = self.all_back_files[file_idx2]
back, success = read_image_robust(fp)
# Read irrelevant foreground image
success = False
while not(success):
file_idx3 = np.random.choice(self.fore_count)
if file_idx3 == file_idx:
continue # try again, cannot pick same image
fp = self.all_fore_files[file_idx3]
irrel, success = read_image_robust(fp)
# Transform foregrounds (+ masks) and backgrounds
# NOTE: identical random choices must be made for some images
if self.rand_horz_flip:
if np.random.rand() < 0.5:
fore = fore[:, ::-1, :].copy()
if self.has_masks:
mask = mask[:, ::-1, :].copy()
if np.random.rand() < 0.5:
irrel = irrel[:, ::-1, :].copy()
if np.random.rand() < 0.5:
back = back[:, ::-1, :].copy()
fore = self.post_tf(fore)
irrel = self.post_tf(irrel)
back = self.post_tf(back)
if self.has_masks:
mask = self.post_tf(mask)
# Verify sizes
assert(fore.shape[1:] == irrel.shape[1:])
assert(fore.shape[1:] == back.shape[1:])
if self.has_masks:
assert(fore.shape[1:] == mask.shape[1:])
# Create grounded fake mask and composite
width, height = fore.shape[2], fore.shape[1] # fore is (C, H, W)
gfake_mask = self.post_tf(create_random_gfake_mask(width, height))
comp_gfake = copy_paste(fore, gfake_mask, back)
# Construct dictionary; object count is unknown
result = {'fore': fore, 'back': back, 'irrel': irrel, 'object_cnt': 1, 'gfake_mask': gfake_mask, 'comp_gfake': comp_gfake}
if self.has_masks:
result['mask'] = mask # don't set None, otherwise crash
return result
class MySquaresDataset(Dataset):
'''
Custom dataset class with just a collection of background images as source.
One or more artificial objects are painted to create a foreground, keeping track of object count.
Returns irrelevant foreground anti-shortcuts as well. Enforces color (RGB) images.
'''
def __init__(self, back_dir, rand_horz_flip=True, noisy=False, max_objects=10):
self.back_dir = back_dir
self.rand_horz_flip = rand_horz_flip
self.post_tf = transforms.ToTensor() # converts [0, 255] to [0.0, 1.0]
self.noisy = noisy
self.max_objects = max_objects
# Load all file paths; file names must be the same across all 2 or 3 given directories
self.all_back_files = []
for fn in os.listdir(back_dir):
back_fp = os.path.join(back_dir, fn)
self.all_back_files.append(back_fp)
self.file_count = len(self.all_back_files)
print('Image file count: ' + str(self.file_count) + ', noisy: ' + str(self.noisy) + ', max objects: ' + str(self.max_objects))
def __len__(self):
return self.file_count
def __getitem__(self, idx):
# Read a random triplet (relevant + background + irrelevant) of non-overlapping backgrounds from file system
success = False
while not(success):
file_idx = np.random.choice(self.file_count)
fp = self.all_back_files[file_idx]
fore, success = read_image_robust(fp)
success = False
while not(success):
file_idx2 = np.random.choice(self.file_count)
if file_idx2 == file_idx:
continue # try again, cannot pick same image
fp = self.all_back_files[file_idx2]
back, success = read_image_robust(fp)
success = False
while not(success):
file_idx3 = np.random.choice(self.file_count)
if file_idx3 == file_idx or file_idx3 == file_idx2:
continue # try again, cannot pick same image
fp = self.all_back_files[file_idx3]
irrel, success = read_image_robust(fp)
# Create corresponding foregrounds and masks; leave actual background unchanged
fore, masks, object_cnt = paint_squares(fore, noisy=self.noisy, channels=self.max_objects)
irrel, _, _ = paint_squares(irrel, noisy=self.noisy, channels=self.max_objects)
# Transform foregrounds (+ masks) and backgrounds
# NOTE: identical random choices must be made for some images
if self.rand_horz_flip:
if np.random.rand() < 0.5:
fore = fore[:, ::-1, :].copy()
masks = masks[:, ::-1, :].copy()
if np.random.rand() < 0.5:
irrel = irrel[:, ::-1, :].copy()
if np.random.rand() < 0.5:
back = back[:, ::-1, :].copy()
fore = self.post_tf(fore)
masks = self.post_tf(masks)
irrel = self.post_tf(irrel)
back = self.post_tf(back)
# Create grounded fake mask and composite
width, height = fore.shape[2], fore.shape[1] # fore is (C, H, W)
gfake_mask = self.post_tf(create_random_gfake_mask(width, height))
comp_gfake = copy_paste(fore, gfake_mask, back)
# Construct dictionary
result = {'fore': fore, 'back': back, 'irrel': irrel, 'mask': masks, 'object_cnt': object_cnt, 'gfake_mask': gfake_mask, 'comp_gfake': comp_gfake}
return result
| 39.92068
| 154
| 0.590477
| 9,761
| 0.692663
| 0
| 0
| 0
| 0
| 0
| 0
| 4,107
| 0.291442
|
8305a58a05e7a9623ae618b46a183f5331e34e3b
| 3,207
|
py
|
Python
|
provision/env/lib/python3.6/site-packages/ansible/plugins/become/dzdo.py
|
brightkan/tukole-frontend
|
45e1d82a4ae5a65e88e7434f67d4d1a88f462e96
|
[
"MIT"
] | 1
|
2020-03-29T18:41:01.000Z
|
2020-03-29T18:41:01.000Z
|
ansible/ansible/plugins/become/dzdo.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 7
|
2020-09-07T17:27:56.000Z
|
2022-03-02T06:25:46.000Z
|
ansible/ansible/plugins/become/dzdo.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 1
|
2020-10-30T12:48:24.000Z
|
2020-10-30T12:48:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
become: dzdo
short_description: Centrify's Direct Authorize
description:
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description: User you 'become' to execute the task
ini:
- section: privilege_escalation
key: become_user
- section: dzdo_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_dzdo_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_DZDO_USER
become_exe:
description: Sudo executable
default: dzdo
ini:
- section: privilege_escalation
key: become_exe
- section: dzdo_become_plugin
key: executable
vars:
- name: ansible_become_exe
- name: ansible_dzdo_exe
env:
- name: ANSIBLE_BECOME_EXE
- name: ANSIBLE_DZDO_EXE
become_flags:
description: Options to pass to dzdo
default: -H -S -n
ini:
- section: privilege_escalation
key: become_flags
- section: dzdo_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_dzdo_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_DZDO_FLAGS
become_pass:
description: Options to pass to dzdo
required: False
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_dzdo_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_DZDO_PASS
ini:
- section: dzdo_become_plugin
key: password
"""
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'dzdo'
# messages for detecting prompted password issues
fail = ('Sorry, try again.',)
def build_become_command(self, cmd, shell):
super(BecomeModule, self).build_become_command(cmd, shell)
if not cmd:
return cmd
becomecmd = self.get_option('become_exe') or self.name
flags = self.get_option('become_flags') or ''
if self.get_option('become_pass'):
self._prompt = '[dzdo via ansible, key=%s] password:' % self._id
flags = '%s -p "%s"' % (flags.replace('-n', ''), self._prompt)
user = self.get_option('become_user') or ''
if user:
user = '-u %s' % (user)
return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)])
| 32.72449
| 131
| 0.558154
| 811
| 0.252884
| 0
| 0
| 0
| 0
| 0
| 0
| 2,434
| 0.758965
|
830713faff66a018b4d3b736c65a71173ebb4219
| 3,078
|
py
|
Python
|
templates/php/functionsTest.py
|
anconaesselmann/LiveUnit
|
8edebb49cb02fa898550cbafdf87af7fc22f106b
|
[
"MIT"
] | null | null | null |
templates/php/functionsTest.py
|
anconaesselmann/LiveUnit
|
8edebb49cb02fa898550cbafdf87af7fc22f106b
|
[
"MIT"
] | null | null | null |
templates/php/functionsTest.py
|
anconaesselmann/LiveUnit
|
8edebb49cb02fa898550cbafdf87af7fc22f106b
|
[
"MIT"
] | null | null | null |
import unittest
import os
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.abspath(path.join(__file__, "..", "..")))
sys.path.append(path.abspath(path.join(__file__, "..", "..", "..", "classes_and_tests")))
from php.functions import *
from src.mocking.MockFileSystem import MockFileSystem
class PhpFunctionsTest(unittest.TestCase):
def test_get_doc_block_tag(self):
settings = "{\"author\": \"Axel\"}"
args = {"settings" : settings}
expected = "@author Axel"
fc = FunctionCollection()
result = fc.get_doc_block_tag(args)
self.assertEqual(expected, result)
def test_get_doc_block_tag_with_empty_value(self):
settings = "{\"author\": None}"
args = {"settings" : settings}
expected = None
fc = FunctionCollection()
result = fc.get_doc_block_tag(args)
self.assertEqual(expected, result)
def test_get_class_name(self):
args = {"dir" : path.join("Folder1", "Folder2", "FileName.php")}
expected = "FileName"
fc = FunctionCollection()
result = fc.get_class_name(args)
self.assertEqual(expected, result)
def test_get_py_package_name(self):
args = {"dir" : path.join(os.sep, "MyProject", "library", "aae", "mvc", "Controller.php")}
expected = path.join("aae\\mvc")
mockFileSystem = MockFileSystem()
mockFileSystem.createFile(path.join(os.sep, "MyProject", "libraryTest", "SomeFileTest.php"))
fc = FunctionCollection()
fc.fileSystem = mockFileSystem
result = fc.get_php_namespace(args)
self.assertEqual(expected, result)
"""def test_get_relative_autoloader_path(self):
settings = "{\"php_autoloader_dir\": \"relative/path/to/Autoloader.php\"}"
args = {"settings" : settings}
expected = "require_once strstr(__FILE__, 'Test', true).'/relative/path/to/Autoloader.php';"
result = FunctionCollection.get_php_autoloader(args)
self.assertEqual(expected, result)
def test_get_absolute_autoloader_path(self):
settings = "{\"php_autoloader_dir\": \"/absolute/path/to/Autoloader.php\"}"
args = {"settings" : settings}
expected = "require_once \"/absolute/path/to/Autoloader.php\";"
result = FunctionCollection.get_php_autoloader(args)
self.assertEqual(expected, result)
def test_getautoloader_path_with_no_value(self):
settings = "{\"php_autoloader_dir\": None}"
args = {"settings" : settings}
expected = None
result = FunctionCollection.get_php_autoloader(args)
self.assertEqual(expected, result)
def test_get_php_namespace(self):
settings = "{\"base_dir\": \"/MyProject/library\"}"
args = {"settings" : settings, "dir": "/MyProject/library/aae/mvc/Controller.php"}
expected = "aae\\mvc"
result = FunctionCollection.get_php_namespace(args)
self.assertEqual(expected, result)"""
if __name__ == '__main__':
unittest.main()
| 34.58427
| 100
| 0.649448
| 2,677
| 0.869721
| 0
| 0
| 0
| 0
| 0
| 0
| 1,611
| 0.523392
|
8307eb589ed701e9bef2d35aecb16eec594af392
| 5,629
|
py
|
Python
|
app.py
|
opeyemibami/decision_support_system
|
15ffdd795c8f2704b577a9c84db9dafb1fcf792d
|
[
"MIT"
] | 1
|
2021-10-31T13:07:24.000Z
|
2021-10-31T13:07:24.000Z
|
app.py
|
opeyemibami/decision_support_system
|
15ffdd795c8f2704b577a9c84db9dafb1fcf792d
|
[
"MIT"
] | null | null | null |
app.py
|
opeyemibami/decision_support_system
|
15ffdd795c8f2704b577a9c84db9dafb1fcf792d
|
[
"MIT"
] | 1
|
2022-02-03T13:12:59.000Z
|
2022-02-03T13:12:59.000Z
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import efficientnet.keras as efn
import streamlit as st
import SessionState
from skimage.transform import resize
import skimage
import skimage.filters
import reportgenerator
import style
from keras.models import Model, load_model
st.set_option('deprecation.showPyplotGlobalUse', False)
model = load_model('classifier.h5')
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
max-width: {1000}px;
padding-top: {5}rem;
padding-right: {0}rem;
padding-left: {0}rem;
padding-bottom: {0}rem;
}}
.reportview-container .main {{
}}
[data-testid="stImage"] img {{
margin: 0 auto;
max-width: 500px;
}}
</style>
""",
unsafe_allow_html=True,
)
# main panel
logo = Image.open('dss_logo.png')
st.image(logo, width=None)
style.display_app_header(main_txt='Gleason Score Prediction for Prostate Cancer',
sub_txt='The intensity of prostate cancer metastasis in using artificial intelligence', is_sidebar=False)
# session state
ss = SessionState.get(page='home', run_model=False)
st.markdown('**Upload biopsy image to analyze**')
st.write('')
uploaded_file = st.file_uploader("Choose an image...", type=['png', 'jpg'])
med_opinion_list = ["The cancer cells look like healthy cells and PSA levels are low. However, cancer in this early stage is usually slow growing.",
"Well differentiated cells and PSA levels are medium. This stage also includes larger tumors found only in the prostate, as long as the cancer cells are still well differentiated. ",
"Moderately diffentiated cells and the PSA level is medium. The tumor is found only inside the prostate, and it may be large enough to be felt during DRE.",
"Moderately or poorly diffentiated cells and the PSA level is medium. The tumor is found only inside the prostate, and it may be large enough to be felt during DRE.",
"Poorly diffentiated cells. The cancer has spread beyond the outer layer of the prostate into nearby tissues. It may also have spread to the seminal vesicles. The PSA level is high.",
"Poorly diffentiated cells. The tumor has grown outside of the prostate gland and may have invaded nearby structures, such as the bladder or rectum.",
"Poorly diffentiated cells. The cancer cells across the tumor are poorly differentiated, meaning they look very different from healthy cells.",
"Poorly diffentiated cells. The cancer has spread to the regional lymph nodes.",
"Poorly diffentiated cells. The cancer has spread to distant lymph nodes, other parts of the body, or to the bones.",
]
if uploaded_file is not None:
# uploaded_file.read()
image = Image.open(uploaded_file)
st.image(image, caption='Biopsy image', use_column_width=True)
im_resized = image.resize((224, 224))
im_resized = resize(np.asarray(im_resized), (224, 224, 3))
# grid section
col1, col2, col3 = st.columns(3)
col1.header('Resized Image')
col1.image(im_resized, caption='Biopsy image', use_column_width=False)
with col2:
st.header('Gray Image')
gray_image = skimage.color.rgb2gray(im_resized)
st.image(gray_image, caption='preprocessed image',
use_column_width=False)
with col3:
st.header('Spotted Pattern')
# sigma = float(sys.argv[2])
gray_image = skimage.color.rgb2gray(im_resized)
blur = skimage.filters.gaussian(gray_image, sigma=1.5)
# perform adaptive thresholding
t = skimage.filters.threshold_otsu(blur)
mask = blur > t
sel = np.zeros_like(im_resized)
sel[mask] = im_resized[mask]
st.image(sel, caption='preprocessed image', use_column_width=False)
preds = model.predict(np.expand_dims(im_resized, 0))
data = (preds[0]*100).round(2)
isup_data = [data[0], data[1], data[2], data[3],
data[4]+data[5]+data[6], data[7]+data[8]+data[9]]
gleason_label = ['0+0', '3+3', '3+4', '4+3',
'4+4', '3+5', '5+3', '4+5', '5+4', '5+5']
gleason_colors = ['yellowgreen', 'red', 'gold', 'lightskyblue',
'cyan', 'lightcoral', 'blue', 'pink', 'darkgreen', 'yellow']
isup_label = ['0', '1', '2', '3', '4', '5']
isup_colors = ['gold', 'lightskyblue', 'cyan', 'lightcoral', 'blue']
col1, col2, = st.columns(2)
with col1:
reportgenerator.visualize_confidence_level(data, label=gleason_label, ylabel='GleasonScore Pattern Scale',
title='GleasonScore Prediction ')
with col2:
reportgenerator.pieChart(data, label=gleason_label, colors=gleason_colors,
title='GleasonScore Prediction Distribution', startangle=120)
col1, col2, = st.columns(2)
with col1:
reportgenerator.pieChart(isup_data, label=isup_label, colors=isup_colors,
title='ISUP Pattern Scale Prediction Distribution', startangle=45)
with col2:
reportgenerator.visualize_confidence_level(isup_data, label=isup_label, ylabel='ISUP Pattern Scale',
title='ISUP Prediction')
opinion = list(data).index(max(list(data)))
style.display_app_header(main_txt='Medical Report Proposition:',
sub_txt=med_opinion_list[opinion], is_sidebar=False)
| 45.032
| 203
| 0.650382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,568
| 0.456209
|
83080191fabbc152072cd0019bf81fd6f737d375
| 7,129
|
py
|
Python
|
richardson_extrapolation.py
|
PrabalChowdhury/CSE330-NUMERICAL-METHODS
|
aabfea01f4ceaecfbb50d771ee990777d6e1122c
|
[
"MIT"
] | null | null | null |
richardson_extrapolation.py
|
PrabalChowdhury/CSE330-NUMERICAL-METHODS
|
aabfea01f4ceaecfbb50d771ee990777d6e1122c
|
[
"MIT"
] | null | null | null |
richardson_extrapolation.py
|
PrabalChowdhury/CSE330-NUMERICAL-METHODS
|
aabfea01f4ceaecfbb50d771ee990777d6e1122c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Richardson-Extrapolation.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oNlSL2Vztk9Fc7tMBgPcL82WGaUuCY-A
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).
Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
"""
NAME = "Prabal Chowdhury"
COLLABORATORS = ""
"""---
## CSE330 Lab: Richardson Extrapolation
---
## Instructions
Today's assignment is to:
1. Implement Richardson Extrapolation method using Python
## Richardson Extrapolation:
We used central difference method to calculate derivatives of functions last task. In this task we will use Richardson extrapolation to get a more accurate result.
Let,
$$ D_h = \frac{f(x_1+h) -f(x_1-h)}{2h}\tag{5.1}$$
General Taylor Series formula:
$$ f(x) = f(x_1) + f'(x_1)(x - x_1) + \frac{f''(x_1)}{2}(x - x_1)^2+... $$
Using Taylor's theorem to expand we get,
\begin{align}
f(x_1+h) &= f(x_1) + f^{\prime}(x_1)h + \frac{f^{\prime \prime}(x_1)}{2}h^2 + \frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + \frac{f^{(4)}(x_1)}{4!}h^4 + \frac{f^{(5)}(x_1)}{5!}h^5 + O(h^6)\tag{5.2} \\
f(x_1-h) &= f(x_1) - f^{\prime}(x_1)h + \frac{f^{\prime \prime}(x_1)}{2}h^2 - \frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + \frac{f^{(4)}(x_1)}{4!}h^4 - \frac{f^{(5)}(x_1)}{5!}h^5 + O(h^6)\tag{5.3}
\end{align}
Subtracting $5.3$ from $5.2$ we get,
$$ f(x_1+h) - f(x_1-h) = 2f^{\prime}(x_1)h + 2\frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + 2\frac{f^{(5)}(x_1)}{5!}h^5 + O(h^7)\tag{5.4}$$
So,
\begin{align}
D_h &= \frac{f(x_1+h) - f(x_1-h)}{2h} \\
&= \frac{1}{2h} \left( 2f^{\prime}(x_1)h + 2\frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + 2\frac{f^{(5)}(x_1)}{5!}h^5 + O(h^7) \right) \\
&= f^{\prime}(x_1) + \frac{f^{\prime \prime \prime}(x_1)}{6}h^2 + \frac{f^{(5)}(x_1)}{120}h^4 + O(h^6) \tag{5.5}
\end{align}
We get our derivative $f'(x)$ plus some error terms of order $>= 2$ Now, we want to bring our error order down to 4.
If we use $h, \text{and} \frac{h}{2}$ as step size in $5.5$, we get,
\begin{align}
D_h &= f^{\prime}(x_1) + f^{\prime \prime \prime}(x_1)\frac{h^2}{6} + f^{(5)}(x_1) \frac{h^4}{120} + O(h^6) \tag{5.6} \\
D_{h/2} &= f^{\prime}(x_1) + f^{\prime \prime \prime}(x_1)\frac{h^2}{2^2 . 6} + f^{(5)}(x_1) \frac{h^4}{2^4 . 120} + O(h^6) \tag{5.7}
\end{align}
Multiplying $5.7$ by $4$ and subtracting from $5.6$ we get,
\begin{align}
D_h - 4D_{h/2} &= -3f^{\prime}(x) + f^{(5)}(x_1) \frac{h^4}{160} + O(h^6)\\
\Longrightarrow D^{(1)}_h = \frac{4D_{h/2} - D_h}{3} &= f^{\prime}(x) - f^{(5)}(x_1) \frac{h^4}{480} + O(h^6) \tag{5.8}
\end{align}
Let's calculate the derivative using $5.8$
### 1. Let's import the necessary headers
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.polynomial import Polynomial
"""### 2. Let's create a function named `dh(f, h, x)`
function `dh(f, h, x)` takes three parameters as input: a function `f`, a value `h`, and a set of values `x`. It returns the derivatives of the function at each elements of array `x` using the Central Difference method. This calculates equation $(5.1)$.
"""
def dh(f, h, x):
'''
Input:
f: np.polynomial.Polynonimial type data.
h: floating point data.
x: np.array type data.
Output:
return np.array type data of slope at each point x.
'''
# --------------------------------------------
return (f(x+h) - f(x-h)) / (2*h)
# --------------------------------------------
"""### 3. Let's create another funtion `dh1(f, h, x)`.
`dh1(f, h, x)` takes the same type of values as `dh(f, h, x)` as input. It calculates the derivative using previously defined `dh(f, h, x)` function and using equation $5.8$ and returns the values.
"""
def dh1(f, h, x):
'''
Input:
f: np.polynomial.Polynonimial type data.
h: floating point data.
x: np.array type data.
Output:
return np.array type data of slope at each point x.
'''
# --------------------------------------------
# YOUR CODE HERE
return (4 * dh(f, h/2, x) - dh(f, h, x)) / 3
# --------------------------------------------
"""### 4. Now let's create the `error(f, hs, x_i)` function
The `error(f, hs, x_i)` function takes a function `f` as input. It also takes a list of different values of h as `hs` and a specific value as `x_i` as input. It calculates the derivatives as point `x_i` using both functions described in **B** and **C**, i.e. `dh` and `dh1`
"""
def error(f, hs, x_i): #Using the functions we wrote dh() my c_diff and dh1() which is my first order c diff, we find the error through appending their diffrences with Y_actual ny f(x)
'''
Input:
f : np.polynomial.Polynonimial type data.
hs : np.array type data. list of h.
x_i: floating point data. single value of x.
Output:
return two np.array type data of errors by two methods..
'''
f_prime = f.deriv(1) #first order derivitive f^1(x)
Y_actual = f_prime(x_i)
diff_error = []
diff2_error = []
for h in hs: #where h is my loop counter iterating through hs
# for each values of hs calculate the error using both methods
# and append those values into diff_error and diff2_error list.
# --------------------------------------------
# YOUR CODE HERE
e1 = Y_actual - dh(f, hs, x_i)
diff_error.append(e1)
e2 = Y_actual - dh1(f, hs, x_i)
diff2_error.append(e2)
# --------------------------------------------
print(pd.DataFrame({"h": hs, "Diff": diff_error, "Diff2": diff2_error}))
return diff_error, diff2_error
"""### 5. Finally let's run some tests
function to draw the actual function
"""
def draw_graph(f, ax, domain=[-10, 10], label=None):
data = f.linspace(domain=domain)
ax.plot(data[0], data[1], label='Function')
"""### Draw the polynomial and it's actual derivative function"""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
p = Polynomial([2.0, 1.0, -6.0, -2.0, 2.5, 1.0])
p_prime = p.deriv(1)
draw_graph(p, ax, [-2.4, 1.5], 'Function')
draw_graph(p_prime, ax, [-2.4, 1.5], 'Derivative')
ax.legend()
"""### Draw the actual derivative and richardson derivative using `h=1` and `h=0.1` as step size."""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
draw_graph(p_prime, ax, [-2.4, 1.5], 'actual')
h = 1
x = np.linspace(-2.4, 1.5, 50, endpoint=True)
y = dh1(p, h, x)
ax.plot(x, y, label='Richardson; h=1')
h = 0.1
x = np.linspace(-2.4, 1.5, 50, endpoint=True)
y = dh1(p, h, x)
ax.plot(x, y, label='Richardson; h=0.1')
ax.legend()
"""### Draw error-vs-h cuve"""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
hs = np.array([1., 0.55, 0.3, .17, 0.1, 0.055, 0.03, 0.017, 0.01])
e1, e2 = error(p, hs, 2.0)
ax.plot(hs, e1, label='e1')
ax.plot(hs, e2, label='e2')
ax.legend()
| 36.747423
| 273
| 0.591668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,533
| 0.776126
|
83092d72acd08ca21db99e040f029c6dead0fb17
| 9,050
|
py
|
Python
|
src/mlshell/blocks/pipeline/steps.py
|
nizaevka/mlshell
|
36893067f598f6b071b61604423d0fd15c2a7c62
|
[
"Apache-2.0"
] | 8
|
2020-10-04T15:33:58.000Z
|
2020-11-24T15:10:18.000Z
|
src/mlshell/blocks/pipeline/steps.py
|
nizaevka/mlshell
|
36893067f598f6b071b61604423d0fd15c2a7c62
|
[
"Apache-2.0"
] | 5
|
2020-03-06T18:13:10.000Z
|
2022-03-12T00:52:48.000Z
|
src/mlshell/blocks/pipeline/steps.py
|
nizaevka/mlshell
|
36893067f598f6b071b61604423d0fd15c2a7c62
|
[
"Apache-2.0"
] | null | null | null |
"""The :mod:`mlshell.pipeline.steps` contains unified pipeline steps."""
import inspect
import mlshell
import numpy as np
import pandas as pd
import sklearn
import sklearn.impute
import sklearn.compose
__all__ = ['Steps']
class Steps(object):
"""Unified pipeline steps.
Parameters
----------
estimator : :mod:`sklearn` estimator
Estimator to use in the last step.
If ``estimator_type=regressor``:
``sklearn.compose.TransformedTargetRegressor(regressor=`estimator`)``
If ``estimator_type=classifier`` and ``th_step=True``:
``sklearn.pipeline.Pipeline(steps=[
('predict_proba',
mlshell.model_selection.PredictionTransformer(`estimator`)),
('apply_threshold',
mlshell.model_selection.ThresholdClassifier(threshold=0.5,
kwargs='auto')),
])``
If ``estimator_type=classifier`` and ``th_step=False``:
``sklearn.pipeline.Pipeline(steps=[('classifier', `estimator`)])``
estimator_type : str {'classifier`, 'regressor'}, optional (default=None)
Either regression or classification task. If None, get from
:func:`sklearn.base.is_classifier` on ``estimator``.
th_step : bool
If True and ``estimator_type=classifier``: ``mlshell.model_selection.
ThresholdClassifier`` sub-step added, otherwise ignored.
Notes
-----
Assembling steps in class are made for convenience. Use steps property to
access after initialization. Only OneHot encoder and imputer steps are
initially activated.
By default, 4 parameters await for resolution ('auto'):
'process_parallel__pipeline_categoric__select_columns__kw_args'
'process_parallel__pipeline_numeric__select_columns__kw_args'
'estimate__apply_threshold__threshold'
'estimate__apply_threshold__params'
Set corresponding parameters with ``set_params()`` to overwrite default in
created pipeline or use :class:`mlshell.model_selection.Resolver` .
'pass_custom' step allows brute force arbitrary parameters in uniform style
with pipeline hp (as if score contains additional nested loops). Step name
is hard-coded and could not be changed.
'apply_threshold' allows grid search classification thresholds as pipeline
hyper-parameter.
'estimate' step should be the last.
"""
_required_parameters = ['estimator', 'estimator_type']
def __init__(self, estimator, estimator_type=None, th_step=False):
if estimator_type is None:
estimator_type = 'classifier' if sklearn.base.is_classifier(estimator)\
else 'regressor'
self._steps = [
('pass_custom', mlshell.preprocessing.FunctionTransformer(func=self.scorer_kwargs, validate=False, skip=True, kw_args={})),
('select_rows', mlshell.preprocessing.FunctionTransformer(func=self.subrows, validate=False, skip=True)),
('process_parallel', sklearn.pipeline.FeatureUnion(transformer_list=[
('pipeline_categoric', sklearn.pipeline.Pipeline(steps=[
('select_columns', mlshell.preprocessing.FunctionTransformer(self.subcolumns, validate=False, skip=False, kw_args='auto')), # {'indices': dataset.meta['categoric_ind_name']}
('encode_onehot', mlshell.preprocessing.OneHotEncoder(handle_unknown='ignore', categories='auto', sparse=False, drop=None, skip=False)), # x could be [].
])),
('pipeline_numeric', sklearn.pipeline.Pipeline(steps=[
('select_columns', mlshell.preprocessing.FunctionTransformer(self.subcolumns, validate=False, skip=False, kw_args='auto')), # {'indices': dataset.meta['numeric_ind_name']}
('impute', sklearn.pipeline.FeatureUnion([
('indicators', sklearn.impute.MissingIndicator(missing_values=np.nan, error_on_new=False)),
('gaps', sklearn.impute.SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0, copy=True)),
])),
('transform_normal', mlshell.preprocessing.PowerTransformer(method='yeo-johnson', standardize=False, copy=False, skip=True)),
('scale_row_wise', mlshell.preprocessing.FunctionTransformer(func=None, validate=False, skip=True)),
('scale_column_wise', sklearn.preprocessing.RobustScaler(quantile_range=(0, 100), copy=False)),
('add_polynomial', sklearn.preprocessing.PolynomialFeatures(degree=1, include_bias=False)), # x => degree=1 => x, x => degree=0 => []
('compose_columns', sklearn.compose.ColumnTransformer([
("discretize", sklearn.preprocessing.KBinsDiscretizer(n_bins=5, encode='onehot-dense', strategy='quantile'), self.bining_mask)], sparse_threshold=0, remainder='passthrough'))
])),
])),
('select_columns', sklearn.feature_selection.SelectFromModel(estimator=CustomSelector(estimator_type=estimator_type, verbose=False, skip=True), prefit=False)),
('reduce_dimensions', mlshell.decomposition.PCA(random_state=42, skip=True)),
('estimate', self.last_step(estimator, estimator_type, th_step=th_step)),
]
def last_step(self, estimator, estimator_type, th_step):
"""Prepare estimator step."""
if estimator_type == 'regressor':
last_step =\
sklearn.compose.TransformedTargetRegressor(regressor=estimator)
elif estimator_type == 'classifier' and th_step:
last_step = sklearn.pipeline.Pipeline(steps=[
('predict_proba',
mlshell.model_selection.PredictionTransformer(
estimator)),
('apply_threshold',
mlshell.model_selection.ThresholdClassifier(
params='auto', threshold=None)),
])
elif estimator_type == 'classifier' and not th_step:
last_step = sklearn.pipeline.Pipeline(steps=[('classifier',
estimator)])
else:
raise ValueError(f"Unknown estimator type `{estimator_type}`.")
if sklearn.base.is_classifier(estimator=last_step)\
^ (estimator_type == "classifier"):
raise TypeError(f"{self.__class__.__name__}:"
f"{inspect.stack()[0][3]}:"
f" wrong estimator type: {last_step}")
return last_step
@property
def steps(self):
"""list : access steps to pass in `sklearn.pipeline.Pipeline` ."""
return self._steps
def scorer_kwargs(self, x, **kw_args):
"""Mock function to custom kwargs setting.
Parameters
----------
x : :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Features of shape [n_samples, n_features].
**kw_args : dict
Step parameters. Could be extracted from pipeline in scorer if
needed.
Returns
-------
result: :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Unchanged ``x``.
"""
return x
def subcolumns(self, x, **kw_args):
"""Get sub-columns from x.
Parameters
----------
x : :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Features of shape [n_samples, n_features].
**kw_args : dict
Columns indices to extract: {'indices': array-like}.
Returns
-------
result: :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Extracted sub-columns of ``x``.
"""
indices = kw_args['indices']
if isinstance(x, pd.DataFrame):
return x.iloc[:, indices]
else:
return x[:, indices]
def subrows(self, x):
"""Get rows from x."""
# For example to delete outlier/anomalies.
return x
def bining_mask(self, x):
"""Get features indices which need bining."""
# Use slice(0, None) to get all.
return []
class CustomSelector(sklearn.base.BaseEstimator):
"""Custom feature selector template."""
def __init__(self, estimator_type='classifier', verbose=True,
skip=False):
self.skip = skip
self.verbose = verbose
self.feature_importances_ = None
self.estimator_type = estimator_type
super().__init__()
if not self.skip:
raise NotImplementedError
def fit(self, x, y):
if self.skip:
self.feature_importances_ = np.full(x.shape[1], fill_value=1)
return self
# TODO: some logic
self.feature_importances_ = np.full(x.shape[1], fill_value=1)
return self
if __name__ == '__main__':
pass
| 43.301435
| 206
| 0.612818
| 8,780
| 0.970166
| 0
| 0
| 132
| 0.014586
| 0
| 0
| 4,305
| 0.475691
|
830a09b0fe214d145afe8c3a467c3effd538a38b
| 2,283
|
py
|
Python
|
paste/application/repositories.py
|
Afonasev/Paste
|
ca1dcb566f15a9cf1aa0e97c6fc4cf4d450ec89d
|
[
"MIT"
] | null | null | null |
paste/application/repositories.py
|
Afonasev/Paste
|
ca1dcb566f15a9cf1aa0e97c6fc4cf4d450ec89d
|
[
"MIT"
] | 1
|
2018-05-07T00:12:59.000Z
|
2018-05-07T00:12:59.000Z
|
paste/application/repositories.py
|
Afonasev/Paste
|
ca1dcb566f15a9cf1aa0e97c6fc4cf4d450ec89d
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import peewee
from paste import domain
from . import db
class AbstractRepository(domain.IRepository):
_model = NotImplemented
_entity = NotImplemented
def count(self):
return self._model.count()
def save(self, entity):
model = _entity_to_model(entity)
if model.pk is None:
model.created_at = datetime.utcnow()
model.updated_at = datetime.utcnow()
model.save()
return _model_to_entity(model)
def get(self, **kw):
try:
return _model_to_entity(self._model.get(**kw))
except peewee.DoesNotExist:
raise domain.DoesNotExist('%s: %s' % (self._entity, kw))
def find(self, page, size, **kw):
if kw:
for k, v in kw.items():
if isinstance(v, domain.Entity):
kw[k] = v.pk
query = self._model.filter(**kw)
else:
query = self._model.select()
return [_model_to_entity(i) for i in query.paginate(page, size)]
def delete(self, entity):
_entity_to_model(entity).delete_instance()
class UserRepository(AbstractRepository):
_model = db.User
_entity = domain.User
class SnippetRepository(AbstractRepository):
_model = db.Snippet
_entity = domain.Snippet
def _by_object(obj):
name = obj.__class__.__name__
fields = ('pk', 'created_at', 'updated_at')
if name == 'User':
return domain.User, db.User, fields + ('name', 'passhash')
if name == 'Snippet':
fields += ('author', 'name', 'syntax', 'raw', 'html')
return domain.Snippet, db.Snippet, fields
raise NotImplementedError
def _entity_to_model(entity):
_, model_cls, fields = _by_object(entity)
attrs = {}
for field in fields:
value = getattr(entity, field)
if isinstance(value, domain.Entity):
value = value.pk
attrs[field] = value
return model_cls(**attrs)
def _model_to_entity(model):
entity_cls, _, fields = _by_object(model)
attrs = {}
for f in fields:
value = getattr(model, f)
if isinstance(value, db.AbstractModel):
value = _model_to_entity(value)
attrs[f] = value
return entity_cls(**attrs)
| 23.78125
| 72
| 0.610162
| 1,234
| 0.540517
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.043802
|
830a2f904f214eab34723ae65f4d0799f4773a77
| 3,278
|
py
|
Python
|
example/example_nursery.py
|
airysen/racog
|
8751436437e9e82d80d54617a8b39fae5fd0ebdd
|
[
"MIT"
] | 3
|
2019-03-06T07:58:22.000Z
|
2021-03-12T18:10:46.000Z
|
example/example_nursery.py
|
airysen/racog
|
8751436437e9e82d80d54617a8b39fae5fd0ebdd
|
[
"MIT"
] | 1
|
2019-08-19T18:51:02.000Z
|
2019-08-19T18:51:02.000Z
|
example/example_nursery.py
|
airysen/racog
|
8751436437e9e82d80d54617a8b39fae5fd0ebdd
|
[
"MIT"
] | 1
|
2019-08-19T19:07:05.000Z
|
2019-08-19T19:07:05.000Z
|
# Dataset https://archive.ics.uci.edu/ml/datasets/Nursery
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from imblearn.metrics import geometric_mean_score
from sklearn.metrics import mean_squared_error, make_scorer, roc_auc_score, log_loss
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from racog import RACOG
RS = 334
nurseryurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/nursery/nursery.data'
attribute_list = ['parents', 'has_nurs', 'form', 'children',
'housing', 'finance', 'social', 'health', 'target']
nursery = pd.read_csv(nurseryurl, header=None, names=attribute_list)
LE = LabelEncoder()
X = nursery.drop('target', axis=1)
y = nursery['target']
ii = y[y == 'recommend'].index.values
X.drop(ii, inplace=True)
y.drop(ii, inplace=True)
for col in X:
if X[col].dtype == 'object':
X[col] = LE.fit_transform(X[col])
X = X.values
LE = LabelEncoder()
y = LE.fit_transform(y)
rf = RandomForestClassifier()
params = {'class_weight': 'balanced',
'criterion': 'entropy',
'max_depth': 15,
'max_features': 0.9,
'min_samples_leaf': 11,
'min_samples_split': 2,
'min_weight_fraction_leaf': 0,
'n_estimators': 30}
rf.set_params(**params)
gscore = make_scorer(geometric_mean_score, average='multiclass')
def gmean(y_true, y_pred):
return geometric_mean_score(y_true, y_pred, average='multiclass')
strf = StratifiedKFold(n_splits=3, shuffle=True, random_state=RS)
count = 0
for train_index, test_index in strf.split(X, y):
print(Counter(y[test_index]), Counter(y[train_index]))
# swap train/test
X_train, X_test, y_train, y_test = X[test_index], X[train_index], y[test_index], y[train_index]
rf.set_params(**params)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
print('#####################################################')
print('Count', count)
print('')
print('Without oversampling | Gmean:', gmean(y_test, y_pred))
rnd_over = RandomOverSampler(random_state=RS + count)
X_rndo, y_rndo = rnd_over.fit_sample(X_train, y_train)
print('')
rf.fit(X_rndo, y_rndo)
y_pred = rf.predict(X_test)
print('Random oversampling | Gmean:', gmean(y_test, y_pred))
smote = SMOTE(random_state=RS + count, kind='regular', k_neighbors=5, m=None,
m_neighbors=10, n_jobs=1)
X_smote, y_smote = smote.fit_sample(X_train, y_train)
rf.fit(X_smote, y_smote)
y_pred = rf.predict(X_test)
print('')
print('SMOTE oversampling | Gmean:', gmean(y_test, y_pred))
racog = RACOG(categorical_features='all',
warmup_offset=100, lag0=20, n_iter='auto',
threshold=10, eps=10E-5, verbose=0, n_jobs=1)
X_racog, y_racog = racog.fit_sample(X_train, y_train)
rf.fit(X_racog, y_racog)
y_pred = rf.predict(X_test)
print('RACOG oversampling | Gmean:', gmean(y_test, y_pred))
print('')
count = count + 1
| 31.519231
| 99
| 0.682123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 645
| 0.196766
|
830a7a30cf722db0418fa36cfcde2cb40ad3323f
| 8,187
|
py
|
Python
|
channels/piratestreaming.py
|
sodicarus/channels
|
d77402f4f460ea6daa66959aa5384aaffbff70b5
|
[
"MIT"
] | null | null | null |
channels/piratestreaming.py
|
sodicarus/channels
|
d77402f4f460ea6daa66959aa5384aaffbff70b5
|
[
"MIT"
] | null | null | null |
channels/piratestreaming.py
|
sodicarus/channels
|
d77402f4f460ea6daa66959aa5384aaffbff70b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# StreamOnDemand Community Edition - Kodi Addon
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canale piratestreaming
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# ------------------------------------------------------------
import re
import urlparse
from core import config, httptools
from platformcode import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "piratestreaming"
host = "https://www.piratestreaming.watch/"
def mainlist(item):
logger.info()
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Film[/COLOR]",
action="peliculas",
extra="movie",
url="%s/category/films/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV[/COLOR]",
extra="serie",
action="peliculas_tv",
url="%s/category/serie/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR azure]Anime[/COLOR]",
extra="serie",
action="peliculas_tv",
url="%s/category/anime-cartoni-animati/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca SerieTV...[/COLOR]",
action="search",
extra="serie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def peliculas(item):
logger.info()
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = 'data-placement="bottom" title="(.*?)" alt=[^=]+="([^"]+)"> <img'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoSod(
Item(channel=__channel__,
action="findvideos",
contentType="movie",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True), tipo='movie'))
# Paginazione
patronvideos = '<a\s*class="nextpostslink" rel="next" href="([^"]+)">Avanti'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="HomePage",
title="[COLOR yellow]Torna Home[/COLOR]",
folder=True)),
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def peliculas_tv(item):
logger.info()
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = 'data-placement="bottom" title="(.*?)" alt=[^=]+="([^"]+)"> <img'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodios",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True), tipo='tv'))
# Paginazione
patronvideos = '<a\s*class="nextpostslink" rel="next" href="([^"]+)">Avanti'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="HomePage",
title="[COLOR yellow]Torna Home[/COLOR]",
folder=True)),
itemlist.append(
Item(channel=__channel__,
action="peliculas_tv",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def HomePage(item):
import xbmc
xbmc.executebuiltin("ReplaceWindow(10024,plugin://plugin.video.streamondemand)")
def search(item, texto):
logger.info("[piratestreaming.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
if item.extra == "movie":
return peliculas(item)
if item.extra == "serie":
return peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'link-episode">(.*?)<\/span> <a\s*ref="nofollow" target=[^=]+="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle)
scrapedtitle = scrapedtitle.replace(" -", "")
scrapedtitle = scrapedtitle.replace("<strong>", "")
scrapedtitle = scrapedtitle.replace("</strong>", " ")
itemlist.append(
Item(channel=__channel__,
action="findvid_serie",
contentType="episode",
title=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumbnail,
extra=item.extra,
fulltitle=scrapedtitle,
show=item.show))
if config.get_library_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=__channel__,
title="[COLOR yellow]""Aggiungi alla libreria""[/COLOR]",
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
def findvid_serie(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
return itemlist
| 36.386667
| 142
| 0.561011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,368
| 0.289239
|
830be7742fbd411e52ef441a27dec4480a075f6e
| 5,259
|
py
|
Python
|
test/IECore/LayeredDictTest.py
|
gcodebackups/cortex-vfx
|
72fa6c6eb3327fce4faf01361c8fcc2e1e892672
|
[
"BSD-3-Clause"
] | 5
|
2016-07-26T06:09:28.000Z
|
2022-03-07T03:58:51.000Z
|
test/IECore/LayeredDictTest.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | null | null | null |
test/IECore/LayeredDictTest.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | 3
|
2015-03-25T18:45:24.000Z
|
2020-02-15T15:37:18.000Z
|
##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class LayeredDictTest( unittest.TestCase ) :
def testDict( self ) :
dict1 = {
"a" : 10,
"b" : {
"c" : 20,
"d" : 30,
},
"e" : 40,
}
dict2 = {
"a" : 20,
"b" : {
"c" : 100,
"f" : {
"g" : 1000,
},
"h" : 1
},
}
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assertEqual( d["a"], 10 )
self.assertEqual( d["b"]["c"], 20 )
self.assertEqual( d["b"]["d"], 30 )
self.assertEqual( d["b"]["f"]["g"], 1000 )
self.assertEqual( d["e"], 40 )
self.assertEqual( d["b"]["h"], 1 )
self.assertRaises( KeyError, d.__getitem__, "z" )
def testCompoundObject( self ) :
dict1 = IECore.CompoundObject(
{
"a" : IECore.IntData( 10 ),
"b" : {
"c" : IECore.IntData( 20 ),
"d" : IECore.IntData( 30 ),
},
"e" : IECore.IntData( 40 ),
}
)
dict2 = IECore.CompoundObject(
{
"a" : IECore.IntData( 20 ),
"b" : {
"c" : IECore.IntData( 100 ),
"f" : {
"g" : IECore.IntData( 1000 ),
},
"h" : IECore.IntData( 1 )
},
}
)
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assertEqual( d["a"], IECore.IntData( 10 ) )
self.assertEqual( d["b"]["c"], IECore.IntData( 20 ) )
self.assertEqual( d["b"]["d"], IECore.IntData( 30 ) )
self.assertEqual( d["b"]["f"]["g"], IECore.IntData( 1000 ) )
self.assertEqual( d["e"], IECore.IntData( 40 ) )
self.assertEqual( d["b"]["h"], IECore.IntData( 1 ) )
self.assertRaises( KeyError, d.__getitem__, "z" )
def testKeys( self ) :
dict1 = {
"a" : 10,
"b" : {
"c" : 20,
"d" : 30,
},
"e" : 40,
}
dict2 = IECore.CompoundObject(
{
"a" : IECore.IntData( 20 ),
"b" : {
"c" : IECore.IntData( 100 ),
"f" : {
"g" : IECore.IntData( 1000 ),
},
"h" : IECore.IntData( 1 )
},
"i" : IECore.IntData( 1 )
}
)
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assertEqual( set( d.keys() ), set( [ "a", "b", "e", "i" ] ) )
self.assertEqual( set( d["b"].keys() ), set( [ "c", "d", "f", "h" ] ) )
def testContains( self ) :
dict1 = {
"a" : 10,
"b" : {
},
"e" : 40,
}
dict2 = IECore.CompoundObject(
{
"b" : IECore.CompoundObject(),
"i" : IECore.IntData( 1 )
}
)
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assert_( "a" in d )
self.assert_( "b" in d )
self.assert_( "e" in d )
self.assert_( "i" in d )
self.assert_( not "x" in d )
def testGet( self ) :
dict1 = {
"a" : 10,
"e" : 40,
}
dict2 = IECore.CompoundObject(
{
"a" : IECore.StringData( "hello" ),
"b" : IECore.FloatData( 10 ),
"i" : IECore.IntData( 1 )
}
)
d = IECore.LayeredDict( [ dict1, dict2 ] )
self.assertEqual( d.get( "a", None ), 10 )
self.assertEqual( d.get( "b", None ), IECore.FloatData( 10 ) )
self.assertEqual( d.get( "i", None ), IECore.IntData( 1 ) )
self.assertEqual( d.get( "e", None ), 40 )
self.assertEqual( d.get( "x", 11 ), 11 )
def testLayerEditing( self ) :
dict1 = {
"a" : 10,
"e" : 40,
}
dict2 = IECore.CompoundObject(
{
"a" : IECore.StringData( "hello" ),
"b" : IECore.FloatData( 10 ),
"i" : IECore.IntData( 1 )
}
)
layers = [ dict1, dict2 ]
d = IECore.LayeredDict( layers )
self.failUnless( d.layers is layers )
self.assertEqual( d["a"], 10 )
layers.insert( 0, { "a" : 100 } )
self.assertEqual( d["a"], 100 )
if __name__ == "__main__":
unittest.main()
| 24.460465
| 76
| 0.56969
| 3,389
| 0.644419
| 0
| 0
| 0
| 0
| 0
| 0
| 2,064
| 0.39247
|
830c809918b4ad486fc0af2abc3ed71d8ce032a1
| 1,161
|
py
|
Python
|
www/courses/cs1120/spring2017/code/day10.py
|
ic4f/sergey.cs.uni.edu
|
52bbf121f73603fdb465dae36dbe691fe39e6e47
|
[
"Unlicense"
] | null | null | null |
www/courses/cs1120/spring2017/code/day10.py
|
ic4f/sergey.cs.uni.edu
|
52bbf121f73603fdb465dae36dbe691fe39e6e47
|
[
"Unlicense"
] | null | null | null |
www/courses/cs1120/spring2017/code/day10.py
|
ic4f/sergey.cs.uni.edu
|
52bbf121f73603fdb465dae36dbe691fe39e6e47
|
[
"Unlicense"
] | null | null | null |
def makePic():
file = pickAFile()
return makePicture(file)
def decreaseRed(picture):
for pixel in getPixels(picture):
setRed(pixel, getRed(pixel) * 0.2)
repaint(picture)
def decreaseRed2(picture):
pixels = getPixels(picture)
for i in range(len(pixels)):
pixel = pixels[i]
setRed(pixel, getRed(pixel) * 0.2)
repaint(picture)
def decreaseRedHalf(picture):
pixels = getPixels(picture)
for i in range((len(pixels)/2) * 0.9):
pixel = pixels[i]
setRed(pixel, getRed(pixel) * 0.2)
repaint(picture)
def makeNetherlands(picture):
pixels = getPixels(picture)
color1 = makeColor(174,28,40)
color2 = makeColor(255, 255, 255)
color3 = makeColor(33,70,139)
point1 = len(pixels)/3
point2 = point1 * 2
point3 = len(pixels)
for i in range(0, point1):
pixel = pixels[i]
setColor(pixel, color1)
print i
for i in range(point1, point2):
pixel = pixels[i]
setColor(pixel, color2)
print i
for i in range(point2, point3):
pixel = pixels[i]
setColor(pixel, color3)
print i
repaint(picture)
| 23.22
| 41
| 0.612403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
830ca47819b03f644d5fc932f9eb92819146316f
| 1,425
|
py
|
Python
|
nuplan/database/utils/boxes/box.py
|
MCZhi/nuplan-devkit
|
3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c
|
[
"Apache-2.0"
] | null | null | null |
nuplan/database/utils/boxes/box.py
|
MCZhi/nuplan-devkit
|
3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c
|
[
"Apache-2.0"
] | null | null | null |
nuplan/database/utils/boxes/box.py
|
MCZhi/nuplan-devkit
|
3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
import abc
from typing import Any, Dict
class BoxInterface(abc.ABC):
""" Interface for box. """
@property # type: ignore
@abc.abstractmethod
def label(self) -> int:
"""
Label id.
:return: Label id.
"""
pass
@label.setter # type: ignore
@abc.abstractmethod
def label(self, label: int) -> None:
"""
Sets label id.
:param label: label id.
"""
pass
@property # type: ignore
@abc.abstractmethod
def score(self) -> float:
"""
Classification score.
:return: Classification score.
"""
pass
@score.setter # type: ignore
@abc.abstractmethod
def score(self, score: float) -> None:
"""
Sets classification score.
:param score: Classification score.
"""
pass
@abc.abstractmethod
def serialize(self) -> Dict[str, Any]:
"""
Serializes the box instance to a JSON-friendly vector representation.
:return: Encoding of the box.
"""
pass
@classmethod
@abc.abstractmethod
def deserialize(cls, data: Dict[str, Any]) -> BoxInterface:
"""
Instantiates a Box3D instance from serialized vector representation.
:param data: Output from serialize.
:return: Deserialized box.
"""
pass
| 22.619048
| 77
| 0.562105
| 1,346
| 0.944561
| 0
| 0
| 1,251
| 0.877895
| 0
| 0
| 692
| 0.485614
|
830e1e09e8a968bc1c2ae3714f7a575834f1f2be
| 4,627
|
py
|
Python
|
tests/test_training.py
|
Hilly12/masters-code
|
60b20a0e5e4c0ab9152b090b679391d8d62ec88a
|
[
"MIT"
] | null | null | null |
tests/test_training.py
|
Hilly12/masters-code
|
60b20a0e5e4c0ab9152b090b679391d8d62ec88a
|
[
"MIT"
] | null | null | null |
tests/test_training.py
|
Hilly12/masters-code
|
60b20a0e5e4c0ab9152b090b679391d8d62ec88a
|
[
"MIT"
] | null | null | null |
import torch
import prifair as pf
N_SAMPLES = 10000
VAL_SAMPLES = 1000
STUDENT_SAMPLES = 5000
INPUTS = 1000
OUTPUTS = 5
BATCH_SIZE = 256
MAX_PHYSICAL_BATCH_SIZE = 128
EPSILON = 2.0
DELTA = 1e-5
MAX_GRAD_NORM = 1.0
N_TEACHERS = 4
N_GROUPS = 10
EPOCHS = 2
class MockModel(torch.nn.Module):
def __init__(self):
super(MockModel, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(in_features=INPUTS, out_features=OUTPUTS),
torch.nn.LogSoftmax(dim=1),
)
def forward(self, x):
return self.model(x)
X = torch.randn(N_SAMPLES + VAL_SAMPLES, INPUTS)
Y = torch.randint(0, OUTPUTS, (N_SAMPLES + VAL_SAMPLES,))
student = torch.randn(STUDENT_SAMPLES, INPUTS)
groups = torch.randint(0, N_GROUPS, (N_SAMPLES,))
weights = torch.ones(N_SAMPLES) / N_SAMPLES
train_data = torch.utils.data.TensorDataset(X[:N_SAMPLES], Y[:N_SAMPLES])
val_data = torch.utils.data.TensorDataset(X[N_SAMPLES:], Y[N_SAMPLES:])
student_data = torch.utils.data.TensorDataset(student, torch.zeros(STUDENT_SAMPLES))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=BATCH_SIZE)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=BATCH_SIZE)
student_loader = torch.utils.data.DataLoader(student_data, batch_size=BATCH_SIZE)
model_class = MockModel
optim_class = torch.optim.NAdam
criterion = torch.nn.NLLLoss()
def test_vanilla():
model, metrics = pf.training.train_vanilla(
train_loader=train_loader,
val_loader=val_loader,
model_class=model_class,
optim_class=optim_class,
loss_fn=criterion,
epochs=EPOCHS,
)
assert model is not None and metrics is not None
def test_dpsgd():
model, metrics = pf.training.train_dpsgd(
train_loader=train_loader,
val_loader=val_loader,
model_class=model_class,
optim_class=optim_class,
loss_fn=criterion,
target_epsilon=EPSILON,
target_delta=DELTA,
max_grad_norm=MAX_GRAD_NORM,
epochs=EPOCHS,
max_physical_batch_size=MAX_PHYSICAL_BATCH_SIZE,
)
assert model is not None and metrics is not None
def test_dpsgd_weighted():
model, metrics = pf.training.train_dpsgd_weighted(
train_loader=train_loader,
val_loader=val_loader,
model_class=model_class,
optim_class=optim_class,
loss_fn=criterion,
target_epsilon=EPSILON,
target_delta=DELTA,
max_grad_norm=MAX_GRAD_NORM,
epochs=EPOCHS,
max_physical_batch_size=MAX_PHYSICAL_BATCH_SIZE,
weighting="sensitive_attr",
labels=groups.numpy(),
)
assert model is not None and metrics is not None
model, metrics = pf.training.train_dpsgd_weighted(
train_loader=train_loader,
val_loader=val_loader,
model_class=model_class,
optim_class=optim_class,
loss_fn=criterion,
target_epsilon=EPSILON,
target_delta=DELTA,
max_grad_norm=MAX_GRAD_NORM,
epochs=EPOCHS,
max_physical_batch_size=MAX_PHYSICAL_BATCH_SIZE,
weighting="custom",
weights=weights,
)
assert model is not None and metrics is not None
def test_dpsgdf():
model, metrics = pf.training.train_dpsgdf(
train_loader=train_loader,
val_loader=val_loader,
model_class=model_class,
optim_class=optim_class,
loss_fn=criterion,
target_epsilon=EPSILON,
target_delta=DELTA,
base_clipping_threshold=MAX_GRAD_NORM,
epochs=EPOCHS,
group_labels=groups,
max_physical_batch_size=MAX_PHYSICAL_BATCH_SIZE,
)
assert model is not None and metrics is not None
def test_pate():
model, metrics = pf.training.train_pate(
train_loader=train_loader,
val_loader=val_loader,
student_loader=student_loader,
model_class=model_class,
optim_class=optim_class,
loss_fn=criterion,
n_teachers=N_TEACHERS,
target_epsilon=EPSILON,
target_delta=DELTA,
epochs=EPOCHS,
)
assert model is not None and metrics is not None
def test_reweighed_sft_pate():
model, metrics = pf.training.train_reweighed_sftpate(
train_loader=train_loader,
val_loader=val_loader,
student_loader=student_loader,
model_class=model_class,
optim_class=optim_class,
loss_fn=criterion,
n_teachers=N_TEACHERS,
target_epsilon=EPSILON,
target_delta=DELTA,
epochs=EPOCHS,
weights=weights,
)
assert model is not None and metrics is not None
| 28.91875
| 84
| 0.690944
| 318
| 0.068727
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 0.005187
|
830e39c22c34be264cb1928c1b6da3f32584283d
| 177
|
py
|
Python
|
problem/01000~09999/02164/2164.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-19T16:37:44.000Z
|
2019-04-19T16:37:44.000Z
|
problem/01000~09999/02164/2164.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-20T11:42:44.000Z
|
2019-04-20T11:42:44.000Z
|
problem/01000~09999/02164/2164.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 3
|
2019-04-19T16:37:47.000Z
|
2021-10-25T00:45:00.000Z
|
from collections import deque
n,x=int(input()),deque()
for i in range(1,n+1): x.append(i)
while len(x)>1:
x.popleft()
if len(x)==1: break
x.append(x.popleft())
print(x.pop())
| 22.125
| 34
| 0.661017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
830e650277386eb71938c69ac25104bf879b279f
| 2,430
|
py
|
Python
|
craft_ai/timezones.py
|
craft-ai/craft-ai-client-python
|
3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3
|
[
"BSD-3-Clause"
] | 14
|
2016-08-26T07:06:57.000Z
|
2020-09-22T07:41:21.000Z
|
craft_ai/timezones.py
|
craft-ai/craft-ai-client-python
|
3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3
|
[
"BSD-3-Clause"
] | 94
|
2016-08-02T14:07:59.000Z
|
2021-10-06T11:50:52.000Z
|
craft_ai/timezones.py
|
craft-ai/craft-ai-client-python
|
3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3
|
[
"BSD-3-Clause"
] | 8
|
2017-02-07T12:05:57.000Z
|
2021-10-14T09:45:30.000Z
|
import re
_TIMEZONE_REGEX = re.compile(r"^([+-](2[0-3]|[01][0-9])(:?[0-5][0-9])?|Z)$")
TIMEZONES = {
"UTC": "+00:00",
"GMT": "+00:00",
"BST": "+01:00",
"IST": "+01:00",
"WET": "+00:00",
"WEST": "+01:00",
"CET": "+01:00",
"CEST": "+02:00",
"EET": "+02:00",
"EEST": "+03:00",
"MSK": "+03:00",
"MSD": "+04:00",
"AST": "-04:00",
"ADT": "-03:00",
"EST": "-05:00",
"EDT": "-04:00",
"CST": "-06:00",
"CDT": "-05:00",
"MST": "-07:00",
"MDT": "-06:00",
"PST": "-08:00",
"PDT": "-07:00",
"HST": "-10:00",
"AKST": "-09:00",
"AKDT": "-08:00",
"AEST": "+10:00",
"AEDT": "+11:00",
"ACST": "+09:30",
"ACDT": "+10:30",
"AWST": "+08:00",
}
def is_timezone(value):
# Valid time zone range is -12:00 (-720 min) and +14:00 (+840 min)
# cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
if isinstance(value, int) and value <= 840 and value >= -720:
return True
if not isinstance(value, str):
return False
if value in TIMEZONES:
return True
result_reg_exp = _TIMEZONE_REGEX.match(value) is not None
return result_reg_exp
def get_timezone_key(configuration):
for key in configuration:
if configuration[key]["type"] == "timezone":
return key
return None
def timezone_offset_in_sec(timezone):
if isinstance(timezone, int):
# If the offset belongs to [-15, 15] it is considered to represent hours.
# This reproduces Moment's utcOffset behaviour.
if timezone > -16 and timezone < 16:
return timezone * 60 * 60
return timezone * 60
if timezone in TIMEZONES:
timezone = TIMEZONES[timezone]
if len(timezone) > 3:
timezone = timezone.replace(":", "")
offset = (int(timezone[-4:-2]) * 60 + int(timezone[-2:])) * 60
else:
offset = (int(timezone[-2:]) * 60) * 60
if timezone[0] == "-":
offset = -offset
return offset
def timezone_offset_in_standard_format(timezone):
if isinstance(timezone, int):
sign = "+" if timezone >= 0 else "-"
absolute_offset = abs(timezone)
if absolute_offset < 16:
return "%s%02d:00" % (sign, absolute_offset)
return "%s%02d:%02d" % (
sign,
int(absolute_offset / 60),
int(absolute_offset % 60),
)
return timezone
| 26.413043
| 81
| 0.530864
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 746
| 0.306996
|
830ebcb1b5a538ed7758db2770eff5e0ab51ebf3
| 2,066
|
py
|
Python
|
gpvdm_gui/gui/json_fdtd.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 12
|
2016-09-13T08:58:13.000Z
|
2022-01-17T07:04:52.000Z
|
gpvdm_gui/gui/json_fdtd.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 3
|
2017-11-11T12:33:02.000Z
|
2019-03-08T00:48:08.000Z
|
gpvdm_gui/gui/json_fdtd.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 6
|
2019-01-03T06:17:12.000Z
|
2022-01-01T15:59:00.000Z
|
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package json_transfer_matrix
# Store the cv domain json data
#
import sys
import os
import shutil
import json
from json_base import json_base
class json_fdtd_simulation(json_base):
def __init__(self):
json_base.__init__(self,"fdtd_segment")
self.var_list=[]
self.var_list.append(["english_name","FDTD (beta)"])
self.var_list.append(["icon","fdtd"])
self.var_list.append(["fdtd_lambda_start",520e-9])
self.var_list.append(["fdtd_lambda_stop",700e-9])
self.var_list.append(["fdtd_lambda_points",1])
self.var_list.append(["use_gpu",False])
self.var_list.append(["max_ittr",100000])
self.var_list.append(["zlen",1])
self.var_list.append(["xlen",60])
self.var_list.append(["ylen",60])
self.var_list.append(["xsize",8.0])
self.var_list.append(["lam_jmax",12])
self.var_list.append(["plot",1])
self.var_list.append(["fdtd_xzy","zy"])
self.var_list.append(["dt",1e-19])
self.var_list.append(["id",self.random_id()])
self.var_list_build()
class json_fdtd(json_base):
def __init__(self):
json_base.__init__(self,"fdtd",segment_class=True,segment_example=json_fdtd_simulation())
| 31.30303
| 91
| 0.727009
| 969
| 0.469022
| 0
| 0
| 0
| 0
| 0
| 0
| 1,181
| 0.571636
|
830eef9810e77b134c4cc2e988eadd23436bf9ed
| 4,637
|
py
|
Python
|
gru/plugins/base/inventory.py
|
similarweb/gru
|
49ef70c2b5e58302c84dbe7d984a7d49aebc0384
|
[
"BSD-2-Clause-FreeBSD"
] | 7
|
2016-12-11T19:58:33.000Z
|
2020-07-11T08:55:34.000Z
|
gru/plugins/base/inventory.py
|
similarweb/gru
|
49ef70c2b5e58302c84dbe7d984a7d49aebc0384
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
gru/plugins/base/inventory.py
|
similarweb/gru
|
49ef70c2b5e58302c84dbe7d984a7d49aebc0384
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2019-12-09T19:31:50.000Z
|
2019-12-09T19:31:50.000Z
|
from . import BasePlugin
from gru.config import settings
class Host(object):
def __init__(self, host_id, host_data=None):
self.host_id = host_id
if host_data:
self.host_data = host_data
else:
self.host_data = {}
def __repr__(self):
return 'Host(host_id="{}")'.format(
self.host_id,
self.host_data
)
def __str__(self):
return self.__repr__()
def get_identifier(self):
return self.host_id
def get_display_name(self):
display_name = self.field(settings.get('inventory.host_display_name_field'), None)
if not display_name:
display_name = self.host_id
return display_name
def field(self, field_name, default=None):
"""
Return a (possibly nested) field
:param field_name: the name of the field to return. If it contains periods ("."), a nested lookup will be
performed
:param default: a default value to return if the field is not found
:return: The value matching the field name inside the host data
"""
try:
return self.host_data[field_name]
except KeyError:
pass
parts = field_name.split('.')
current_val = self.host_data
for part in parts:
try:
current_val = current_val[part]
except (KeyError, TypeError):
return default
except TypeError:
return default
return current_val
class HostCategory(object):
def __init__(self, category, group, count=0):
self.category = category
self.group = group
self.count = count
def __repr__(self):
return 'HostCategory(category="{}", group="{}", count={})'.format(
self.category,
self.group,
self.count
)
def __str__(self):
return self.__repr__()
class HostList(object):
def __init__(self, hosts=None, total_hosts=0):
if hosts:
self.hosts = hosts
else:
self.hosts = []
self.total_hosts = total_hosts
def __repr__(self):
return 'HostList(hosts={}..., total_hosts={})'.format(
self.hosts[:5],
self.total_hosts
)
def append(self, host):
self.hosts.append(host)
def __str__(self):
return self.__repr__()
def __iter__(self):
for host in self.hosts:
yield host
def __nonzero__(self):
return self.hosts.__nonzero__()
def __len__(self):
return len(self.hosts)
class InventoryProvider(BasePlugin):
def host_group_breakdown(self, category):
"""
Returns a list of groups belonging to a category.
Example: if category = "datacenter", an expected return value would be ["us-east-1", "us-west-2"]
:param category: A category string to aggregate by
:return: A list of HostCategory objects
"""
raise NotImplementedError('override me')
def list(self, category, group, sort_by=None, from_ind=None, to_ind=None):
"""
Filter by a field and value.
Example: provider.list("datacenter", "us-east-1") will return all Hosts in the us-east-1 datacenter
:param category: Category to filter by (i.e. "datacenter")
:param group: group to filter by (i.e. "us-east-1")
:param sort_by: optional, a string representing a host attribute to sort by. hostname, for example
:param from_ind: to support pagination, you may return only a subset of the results. this is the start index
:param to_ind: to support pagination, you may return only a subset of the results. this is the end index
:return: a list of Host objects
"""
raise NotImplementedError('override me')
def host_search(self, query, from_ind=None, to_ind=None):
"""
Given a query string, perform a search of hosts
:param query: a query string to perform the lookup by
:param from_ind: to support pagination, you may return only a subset of the results. this is the start index
:param to_ind: to support pagination, you may return only a subset of the results. this is the end index
:return: a list of Host objects
"""
raise NotImplementedError('override me')
def get_host_by_id(self, host_id):
"""
Return a Host object by its ID
:param host_id: a host ID to query by
:return: a Host object
"""
raise NotImplementedError('override me')
| 31.120805
| 117
| 0.607505
| 4,566
| 0.984688
| 74
| 0.015959
| 0
| 0
| 0
| 0
| 2,025
| 0.436705
|