hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace594b6f52319dea2e29f7361ef7791291dbf5e | 182 | py | Python | Day0/weighted_mean.py | DavinciB/10_days_of_statistics | ce3f43a6fe5304e8e83a215dcf8462f63b941132 | [
"MIT"
] | null | null | null | Day0/weighted_mean.py | DavinciB/10_days_of_statistics | ce3f43a6fe5304e8e83a215dcf8462f63b941132 | [
"MIT"
] | null | null | null | Day0/weighted_mean.py | DavinciB/10_days_of_statistics | ce3f43a6fe5304e8e83a215dcf8462f63b941132 | [
"MIT"
] | null | null | null | N = int(input())
A = list(map(int, input().split()[:N]))
W = list(map(int, input().split()[:N]))
x = 0, y = 0
for i in range(N):
x += A[i] * W[i]
y += W[i]
print(round((x/y), 1)) | 22.75 | 39 | 0.494505 |
ace595b0f45e11236cc0f9015c5ce5201f04131d | 3,734 | py | Python | kubernetes/client/models/v1_host_alias.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2021-06-10T23:44:11.000Z | 2021-06-10T23:44:11.000Z | kubernetes/client/models/v1_host_alias.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_host_alias.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2018-11-06T16:33:43.000Z | 2018-11-06T16:33:43.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1HostAlias(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hostnames': 'list[str]',
'ip': 'str'
}
attribute_map = {
'hostnames': 'hostnames',
'ip': 'ip'
}
def __init__(self, hostnames=None, ip=None):
"""
V1HostAlias - a model defined in Swagger
"""
self._hostnames = None
self._ip = None
self.discriminator = None
if hostnames is not None:
self.hostnames = hostnames
if ip is not None:
self.ip = ip
@property
def hostnames(self):
"""
Gets the hostnames of this V1HostAlias.
Hostnames for the above IP address.
:return: The hostnames of this V1HostAlias.
:rtype: list[str]
"""
return self._hostnames
@hostnames.setter
def hostnames(self, hostnames):
"""
Sets the hostnames of this V1HostAlias.
Hostnames for the above IP address.
:param hostnames: The hostnames of this V1HostAlias.
:type: list[str]
"""
self._hostnames = hostnames
@property
def ip(self):
"""
Gets the ip of this V1HostAlias.
IP address of the host file entry.
:return: The ip of this V1HostAlias.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""
Sets the ip of this V1HostAlias.
IP address of the host file entry.
:param ip: The ip of this V1HostAlias.
:type: str
"""
self._ip = ip
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1HostAlias):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.090323 | 105 | 0.532673 |
ace595e0863f1732318c175a80e0fb6ef8c52800 | 15,769 | py | Python | Section4/Designer_code/Video7_9_SQL_object_names_Design.py | Tom-Niesytto/Hands-On-Python-3.x-GUI-Programming | e84de864f6b5d02012e2a50b88ea041a1977a87d | [
"MIT"
] | 10 | 2019-03-24T12:09:49.000Z | 2021-10-01T22:06:02.000Z | Section4/Designer_code/Video7_9_SQL_object_names_Design.py | Tom-Niesytto/Hands-On-Python-3.x-GUI-Programming | e84de864f6b5d02012e2a50b88ea041a1977a87d | [
"MIT"
] | null | null | null | Section4/Designer_code/Video7_9_SQL_object_names_Design.py | Tom-Niesytto/Hands-On-Python-3.x-GUI-Programming | e84de864f6b5d02012e2a50b88ea041a1977a87d | [
"MIT"
] | 4 | 2019-03-24T12:09:52.000Z | 2020-05-26T10:37:55.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Video6_9_SQL_final_object_names_Design.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(366, 295)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(0, 0, 0, 255), stop:1 rgba(255, 255, 255, 255));")
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(0, 0, 0, 255), stop:1 rgba(255, 255, 255, 255));")
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setStyleSheet("QWidget{\n"
"background-color: rgb(0, 0, 255);\n"
"}\n"
"\n"
"QPushButton{\n"
"background-color: rgb(255, 255, 255);\n"
"}\n"
"\n"
"QPushButton#pushButton_set_label{\n"
"background-color: rgb(255, 0, 255);\n"
"}\n"
"")
self.tab.setObjectName("tab")
self.verticalLayoutWidget = QtWidgets.QWidget(self.tab)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 341, 201))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_clear = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_clear.setObjectName("pushButton_clear")
self.horizontalLayout.addWidget(self.pushButton_clear)
self.pushButton_set_label = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_set_label.setObjectName("pushButton_set_label")
self.horizontalLayout.addWidget(self.pushButton_set_label)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setAutoFillBackground(False)
self.tab_2.setObjectName("tab_2")
self.pushButton_convert = QtWidgets.QPushButton(self.tab_2)
self.pushButton_convert.setGeometry(QtCore.QRect(260, 180, 75, 23))
self.pushButton_convert.setStyleSheet("background-color: rgb(0, 170, 255);")
self.pushButton_convert.setObjectName("pushButton_convert")
self.groupBox = QtWidgets.QGroupBox(self.tab_2)
self.groupBox.setGeometry(QtCore.QRect(10, 69, 321, 101))
self.groupBox.setStyleSheet("background-color: rgb(255, 255, 255);")
self.groupBox.setObjectName("groupBox")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.groupBox)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(0, 50, 321, 51))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_convert = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label_convert.setObjectName("label_convert")
self.horizontalLayout_2.addWidget(self.label_convert)
self.lineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_2.addWidget(self.lineEdit)
self.toolButton = QtWidgets.QToolButton(self.horizontalLayoutWidget)
self.toolButton.setObjectName("toolButton")
self.horizontalLayout_2.addWidget(self.toolButton)
self.radioButton_ui = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_ui.setGeometry(QtCore.QRect(10, 20, 82, 17))
self.radioButton_ui.setObjectName("radioButton_ui")
self.radioButton_exe = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_exe.setGeometry(QtCore.QRect(120, 20, 82, 17))
self.radioButton_exe.setObjectName("radioButton_exe")
self.tabWidget.addTab(self.tab_2, "")
self.tab_3_network = QtWidgets.QWidget()
self.tab_3_network.setStyleSheet("")
self.tab_3_network.setObjectName("tab_3_network")
self.groupBox_2_client1 = QtWidgets.QGroupBox(self.tab_3_network)
self.groupBox_2_client1.setGeometry(QtCore.QRect(10, 10, 121, 191))
self.groupBox_2_client1.setStyleSheet("background-color: rgb(255, 255, 255);")
self.groupBox_2_client1.setObjectName("groupBox_2_client1")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.groupBox_2_client1)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 19, 101, 161))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.textEdit_client1 = QtWidgets.QTextEdit(self.verticalLayoutWidget_2)
self.textEdit_client1.setObjectName("textEdit_client1")
self.verticalLayout_4.addWidget(self.textEdit_client1)
self.lineEdit_client1 = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)
self.lineEdit_client1.setObjectName("lineEdit_client1")
self.verticalLayout_4.addWidget(self.lineEdit_client1)
self.pushButton_client1_send = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.pushButton_client1_send.setStyleSheet("background-color: rgb(255, 0, 0);\n"
"color: rgb(255, 255, 255);\n"
"")
self.pushButton_client1_send.setObjectName("pushButton_client1_send")
self.verticalLayout_4.addWidget(self.pushButton_client1_send)
self.groupBox_3_client2 = QtWidgets.QGroupBox(self.tab_3_network)
self.groupBox_3_client2.setGeometry(QtCore.QRect(140, 10, 121, 191))
self.groupBox_3_client2.setStyleSheet("background-color: rgb(255, 255, 255);")
self.groupBox_3_client2.setObjectName("groupBox_3_client2")
self.verticalLayoutWidget_3 = QtWidgets.QWidget(self.groupBox_3_client2)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(10, 19, 101, 161))
self.verticalLayoutWidget_3.setObjectName("verticalLayoutWidget_3")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.textEdit_client2 = QtWidgets.QTextEdit(self.verticalLayoutWidget_3)
self.textEdit_client2.setObjectName("textEdit_client2")
self.verticalLayout_5.addWidget(self.textEdit_client2)
self.lineEdit_client2 = QtWidgets.QLineEdit(self.verticalLayoutWidget_3)
self.lineEdit_client2.setObjectName("lineEdit_client2")
self.verticalLayout_5.addWidget(self.lineEdit_client2)
self.pushButton_client2_send = QtWidgets.QPushButton(self.verticalLayoutWidget_3)
self.pushButton_client2_send.setStyleSheet("background-color: rgb(0, 0, 255);\n"
"color: rgb(255, 255, 255);")
self.pushButton_client2_send.setObjectName("pushButton_client2_send")
self.verticalLayout_5.addWidget(self.pushButton_client2_send)
self.groupBox_4_server = QtWidgets.QGroupBox(self.tab_3_network)
self.groupBox_4_server.setGeometry(QtCore.QRect(270, 100, 61, 101))
self.groupBox_4_server.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(255, 178, 102, 255), stop:0.55 rgba(235, 148, 61, 255), stop:0.98 rgba(0, 0, 0, 255), stop:1 rgba(0, 0, 0, 0));")
self.groupBox_4_server.setObjectName("groupBox_4_server")
self.label_2 = QtWidgets.QLabel(self.groupBox_4_server)
self.label_2.setGeometry(QtCore.QRect(10, 20, 47, 13))
self.label_2.setObjectName("label_2")
self.lineEdit_4_port_number = QtWidgets.QLineEdit(self.groupBox_4_server)
self.lineEdit_4_port_number.setGeometry(QtCore.QRect(10, 40, 41, 20))
self.lineEdit_4_port_number.setStyleSheet("background-color: rgb(255, 255, 255);")
self.lineEdit_4_port_number.setObjectName("lineEdit_4_port_number")
self.pushButton_3_start = QtWidgets.QPushButton(self.groupBox_4_server)
self.pushButton_3_start.setGeometry(QtCore.QRect(10, 70, 41, 23))
self.pushButton_3_start.setStyleSheet("background-color: rgb(0, 0, 0);\n"
"color: rgb(255, 255, 255);\n"
"")
self.pushButton_3_start.setObjectName("pushButton_3_start")
self.tabWidget.addTab(self.tab_3_network, "")
self.tab_4_SQL = QtWidgets.QWidget()
self.tab_4_SQL.setStyleSheet("background-color: rgb(170, 0, 0);\n"
"color: rgb(255, 255, 255);")
self.tab_4_SQL.setObjectName("tab_4_SQL")
self.tableView = QtWidgets.QTableView(self.tab_4_SQL)
self.tableView.setGeometry(QtCore.QRect(95, 11, 241, 191))
self.tableView.setStyleSheet("background-color: rgb(255, 255, 255);")
self.tableView.setObjectName("tableView")
self.verticalLayoutWidget_4 = QtWidgets.QWidget(self.tab_4_SQL)
self.verticalLayoutWidget_4.setGeometry(QtCore.QRect(10, 20, 81, 171))
self.verticalLayoutWidget_4.setObjectName("verticalLayoutWidget_4")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_4)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.button_SQL_view_data = QtWidgets.QPushButton(self.verticalLayoutWidget_4)
self.button_SQL_view_data.setObjectName("button_SQL_view_data")
self.verticalLayout_3.addWidget(self.button_SQL_view_data)
self.button_SQL_add = QtWidgets.QPushButton(self.verticalLayoutWidget_4)
self.button_SQL_add.setObjectName("button_SQL_add")
self.verticalLayout_3.addWidget(self.button_SQL_add)
self.button_SQL_delete = QtWidgets.QPushButton(self.verticalLayoutWidget_4)
self.button_SQL_delete.setObjectName("button_SQL_delete")
self.verticalLayout_3.addWidget(self.button_SQL_delete)
self.button_SQL_create_db = QtWidgets.QPushButton(self.verticalLayoutWidget_4)
self.button_SQL_create_db.setObjectName("button_SQL_create_db")
self.verticalLayout_3.addWidget(self.button_SQL_create_db)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.tabWidget.addTab(self.tab_4_SQL, "")
self.verticalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 366, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../icons/new_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionNew.setIcon(icon)
self.actionNew.setObjectName("actionNew")
self.actionExit = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("../icons/exit_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExit.setIcon(icon1)
self.actionExit.setObjectName("actionExit")
self.menuFile.addAction(self.actionNew)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(3)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.pushButton_clear.setText(_translate("MainWindow", "Clear label"))
self.pushButton_set_label.setText(_translate("MainWindow", "Set label"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Label"))
self.pushButton_convert.setText(_translate("MainWindow", "Convert"))
self.groupBox.setTitle(_translate("MainWindow", "UI and PY conversions"))
self.label_convert.setText(_translate("MainWindow", "<click radio button>"))
self.toolButton.setText(_translate("MainWindow", "..."))
self.radioButton_ui.setText(_translate("MainWindow", "UI to PY"))
self.radioButton_exe.setText(_translate("MainWindow", "PY to EXE"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Conversions"))
self.groupBox_2_client1.setTitle(_translate("MainWindow", "TCP Client"))
self.pushButton_client1_send.setText(_translate("MainWindow", "Connect to Server"))
self.groupBox_3_client2.setTitle(_translate("MainWindow", "TCP Client"))
self.pushButton_client2_send.setText(_translate("MainWindow", "Connect to Server"))
self.groupBox_4_server.setTitle(_translate("MainWindow", "Server"))
self.label_2.setText(_translate("MainWindow", "Port:"))
self.lineEdit_4_port_number.setText(_translate("MainWindow", "12345"))
self.pushButton_3_start.setText(_translate("MainWindow", "Start"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3_network), _translate("MainWindow", "Network"))
self.button_SQL_view_data.setText(_translate("MainWindow", "View Data"))
self.button_SQL_add.setText(_translate("MainWindow", "Add Row"))
self.button_SQL_delete.setText(_translate("MainWindow", "Delete Row"))
self.button_SQL_create_db.setText(_translate("MainWindow", "Create DB"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4_SQL), _translate("MainWindow", "SQL"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.statusbar.setStatusTip(_translate("MainWindow", "Statusbar"))
self.actionNew.setText(_translate("MainWindow", "New"))
self.actionNew.setStatusTip(_translate("MainWindow", "New File"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 60.65 | 241 | 0.728328 |
ace59601f4b078c6d92e5e27c1af992ae56c3ed8 | 1,085 | py | Python | tests/console/Console/test_text_input.py | pybee/briefcase | d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa | [
"BSD-3-Clause"
] | 522 | 2015-07-28T16:06:18.000Z | 2019-03-25T17:16:55.000Z | tests/console/Console/test_text_input.py | pybee/briefcase | d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa | [
"BSD-3-Clause"
] | 154 | 2015-09-17T02:50:55.000Z | 2019-03-22T07:10:34.000Z | tests/console/Console/test_text_input.py | pybee/briefcase | d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa | [
"BSD-3-Clause"
] | 105 | 2015-09-25T08:43:26.000Z | 2019-03-25T15:59:27.000Z | import pytest
from briefcase.console import InputDisabled
@pytest.mark.parametrize(
"value, expected",
[
("Value", "Value"),
("", "Default"),
],
)
def test_text_input(console, value, expected):
prompt = "> "
default = "Default"
console.rich_console.input.return_value = value
actual = console.text_input(prompt=prompt, default=default)
assert actual == expected
console.rich_console.input.assert_called_once_with(prompt, markup=False)
def test_disabled(disabled_console):
"""If input is disabled, the default is returned."""
prompt = "> "
actual = disabled_console.text_input(prompt=prompt, default="Default")
assert actual == "Default"
disabled_console.rich_console.input.assert_not_called()
def test_disabled_no_default(disabled_console):
"""If input is disabled and there is no default, an error is raised."""
prompt = "> "
with pytest.raises(InputDisabled):
disabled_console.text_input(prompt=prompt, default=None)
disabled_console.rich_console.input.assert_not_called()
| 25.232558 | 76 | 0.704147 |
ace59773f12efdda4f62f69df453f6a094da420b | 2,821 | py | Python | venv/Lib/site-packages/pyrogram/raw/types/update_pinned_dialogs.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/raw/types/update_pinned_dialogs.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/types/update_pinned_dialogs.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class UpdatePinnedDialogs(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.Update`.
Details:
- Layer: ``126``
- ID: ``0xfa0f3ca2``
Parameters:
folder_id (optional): ``int`` ``32-bit``
order (optional): List of :obj:`DialogPeer <pyrogram.raw.base.DialogPeer>`
"""
__slots__: List[str] = ["folder_id", "order"]
ID = 0xfa0f3ca2
QUALNAME = "types.UpdatePinnedDialogs"
def __init__(self, *, folder_id: Union[None, int] = None, order: Union[None, List["raw.base.DialogPeer"]] = None) -> None:
self.folder_id = folder_id # flags.1?int
self.order = order # flags.0?Vector<DialogPeer>
@staticmethod
def read(data: BytesIO, *args: Any) -> "UpdatePinnedDialogs":
flags = Int.read(data)
folder_id = Int.read(data) if flags & (1 << 1) else None
order = TLObject.read(data) if flags & (1 << 0) else []
return UpdatePinnedDialogs(folder_id=folder_id, order=order)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
flags = 0
flags |= (1 << 1) if self.folder_id is not None else 0
flags |= (1 << 0) if self.order is not None else 0
data.write(Int(flags))
if self.folder_id is not None:
data.write(Int(self.folder_id))
if self.order is not None:
data.write(Vector(self.order))
return data.getvalue()
| 35.708861 | 126 | 0.620347 |
ace598977edc8dbaa640e0ce0cd6f651544026f1 | 544 | py | Python | hub/urls.py | Benard18/PhotoHub | ca03591e2a1b0667ce24b10fc2b07b7f0e199532 | [
"MIT"
] | null | null | null | hub/urls.py | Benard18/PhotoHub | ca03591e2a1b0667ce24b10fc2b07b7f0e199532 | [
"MIT"
] | null | null | null | hub/urls.py | Benard18/PhotoHub | ca03591e2a1b0667ce24b10fc2b07b7f0e199532 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from . import views
urlpatterns=[
url('^$',views.index,name='index'),
url(r'^search/', views.search_results,name='search_results'),
url(r'^image/(?P<image_id>\d+)',views.image,name ='image'),
url(r'^category/(?P<category_id>\d+)',views.category,name = 'category'),
url(r'^location/(?P<location_id>\d+)',views.location,name = 'location')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 38.857143 | 76 | 0.742647 |
ace598a009c5f217b3f2b6a4886de4daf4cf5e90 | 809 | py | Python | Lib/corpuscrawler/crawl_quw.py | cash/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 95 | 2019-06-13T23:34:21.000Z | 2022-03-12T05:22:49.000Z | Lib/corpuscrawler/crawl_quw.py | sahwar/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 31 | 2019-06-02T18:56:53.000Z | 2021-08-10T20:16:02.000Z | Lib/corpuscrawler/crawl_quw.py | sahwar/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 35 | 2019-06-18T08:26:24.000Z | 2022-01-11T13:59:40.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='quw')
crawl_bibleis(crawler, out, bible='QUWHLE')
| 36.772727 | 74 | 0.770087 |
ace598a84006d8bd9c0a3ab97794fb0c44b489c0 | 3,527 | py | Python | src/davinci_crawling/throttle/throttle.py | intellstartup/django-davinci-crawling | 638739855b63f02e318abf484d5daeab6c861e7a | [
"MIT"
] | null | null | null | src/davinci_crawling/throttle/throttle.py | intellstartup/django-davinci-crawling | 638739855b63f02e318abf484d5daeab6c861e7a | [
"MIT"
] | null | null | null | src/davinci_crawling/throttle/throttle.py | intellstartup/django-davinci-crawling | 638739855b63f02e318abf484d5daeab6c861e7a | [
"MIT"
] | 1 | 2020-03-16T20:25:53.000Z | 2020-03-16T20:25:53.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019 BuildGroup Data Services Inc.
# https://quentin.pradet.me/blog/how-do-you-rate-limit-calls-with-aiohttp.html
import inspect
from abc import ABC, abstractmethod
from datetime import timedelta
from functools import wraps
from davinci_crawling.utils import get_class_from_name
from django.conf import settings
DEFAULT_THROTTLE_MANAGER = "davinci_crawling.throttle.memory_throttle.MemoryThrottle"
class Throttle(object):
"""
Decorator that prevents a function from being called more than once every
time period.
To create a function that cannot be called more than 10
requests per minute a minute:
@throttle(minutes=1, rate=10, max_tokens=10)
def my_fun():
pass
"""
manager = None
manager_clazz = None
def __init__(self, crawler_name, seconds=1, minutes=0, hours=0, rate=10, max_tokens=10, throttle_suffix_field=None):
self.throttle_period = timedelta(seconds=seconds, minutes=minutes, hours=hours)
self.rate = rate
self.max_tokens = max_tokens
self.crawler_name = crawler_name
self.suffix_field = throttle_suffix_field
@classmethod
def get_manager_clazz(cls):
if not cls.manager_clazz:
if (
hasattr(settings, "DAVINCI_CONF")
and "throttle" in settings.DAVINCI_CONF["architecture-params"]
and "implementation" in settings.DAVINCI_CONF["architecture-params"]["throttle"]
):
throttle_implementation = settings.DAVINCI_CONF["architecture-params"]["throttle"]["implementation"]
else:
throttle_implementation = DEFAULT_THROTTLE_MANAGER
cls.manager_clazz = get_class_from_name(throttle_implementation)
return cls.manager_clazz
def get_throttle_manager(self):
if not self.manager:
manager_clazz = self.get_manager_clazz()
self.manager = manager_clazz(
self.crawler_name, seconds=self.throttle_period.seconds, rate=self.rate, max_tokens=self.max_tokens
)
return self.manager
def __call__(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
throttle_suffix = None
if self.suffix_field:
throttle_suffix = kwargs.get(self.suffix_field)
if not throttle_suffix:
arguments = inspect.getfullargspec(fn).args
if self.suffix_field in arguments:
argument_position = arguments.index(self.suffix_field)
if argument_position < len(args):
throttle_suffix = args[argument_position]
manager = self.get_throttle_manager()
if throttle_suffix:
throttle_key = "%s_%s_%s" % (manager.crawler_name, fn.__name__, throttle_suffix)
else:
throttle_key = "%s_%s" % (manager.crawler_name, fn.__name__)
manager.wait_for_token(throttle_key)
return fn(*args, **kwargs)
return wrapper
class ThrottleManager(ABC):
def __init__(self, crawler_name, seconds=1, minutes=0, hours=0, rate=10, max_tokens=10):
self.throttle_period = timedelta(seconds=seconds, minutes=minutes, hours=hours)
self.rate = rate
self.max_tokens = max_tokens
self.crawler_name = crawler_name
@abstractmethod
def wait_for_token(self, key):
raise NotImplementedError
| 35.626263 | 120 | 0.64871 |
ace59905d28ecad5b56494ef63bdc0c453420618 | 281 | py | Python | raceways/__init__.py | alecf/strava-raceways | 82e922996ddb73d80789ffc249baf5ce52525d0f | [
"MIT"
] | null | null | null | raceways/__init__.py | alecf/strava-raceways | 82e922996ddb73d80789ffc249baf5ce52525d0f | [
"MIT"
] | null | null | null | raceways/__init__.py | alecf/strava-raceways | 82e922996ddb73d80789ffc249baf5ce52525d0f | [
"MIT"
] | null | null | null | import os
import urllib
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
| 21.615385 | 64 | 0.676157 |
ace599ddb7fdb8b352a44a927a75fb727b47a470 | 2,503 | py | Python | examples/stock.py | gitvipin/Pali | 5da870ab93de5166493519f263c83c9e8200a5a6 | [
"BSD-2-Clause"
] | null | null | null | examples/stock.py | gitvipin/Pali | 5da870ab93de5166493519f263c83c9e8200a5a6 | [
"BSD-2-Clause"
] | null | null | null | examples/stock.py | gitvipin/Pali | 5da870ab93de5166493519f263c83c9e8200a5a6 | [
"BSD-2-Clause"
] | null | null | null | '''
A simple python module to scan Stock Ticker symbols.
'''
import datetime
import json
import logging
import requests
from src import logger
from src import task
from src import worker
log = logger.getLogger(__name__)
logger.getLogger("requests").setLevel(logging.WARNING)
logger.getLogger("urllib").setLevel(logging.WARNING)
class StockTask(task.Task):
FIN_DATA_URL='https://www.alphavantage.co'
API_KEY='ADD_YOUR_KEY'
FIN_DATA_TYPE='TIME_SERIES_DAILY_ADJUSTED'
def __init__(self, tckr='TSLA'):
super(StockTask, self).__init__()
self.tckr = tckr
self.data = None
self.date = datetime.datetime.now().strftime("%Y-%m-%d")
def build_url(self):
self.url = '%s/query?apikey=%s&function=%s&symbol=%s' % (
self.FIN_DATA_URL,
self.API_KEY,
self.FIN_DATA_TYPE,
self.tckr)
def _run(self):
try:
self.build_url()
r = requests.get(self.url)
if r.status_code == 200:
self.data = json.loads(r.text)
opening = self.open_today()
log.info("%s opened at : %s", self.tckr, opening)
except Exception as err:
log.exception("Tckr: %s , data: %s", self.tckr, self.data)
def open_today(self):
if not self.data:
return None
return self.data["Time Series (Daily)"][self.date]["1. open"]
def simple():
data = ['VMW', 'AAPL', 'GOOG', 'TSLA', 'CRWD']
tasks = [StockTask(tckr=x) for x in data]
with worker.ThreadPool(2) as tpool:
_ = [tpool.append_task(t) for t in tasks]
open_today = [(x.tckr, x.open_today()) for x in tasks if x.data]
log.info("Open today : %s", open_today)
def get_next_tckr():
alphas = [chr(i).upper() for i in range(97, 97 + 26)]
for i in alphas + ['_']:
for j in alphas:
for k in alphas:
for l in alphas:
tckr = j + k + l
tckr = tckr if i == '_' else i + tckr
yield tckr
def scrapper():
log.info("======================")
log.info(" Start Scrapping ")
log.info("======================")
with worker.ThreadPool(max_threads=15, max_queue_size=26**4) as tpool:
for tckr in get_next_tckr():
# log.info("Appending task for : %s", tckr)
task = StockTask(tckr)
tpool.append_task(task)
if __name__ == '__main__':
simple()
| 27.811111 | 74 | 0.562125 |
ace59c157ce1352e596790b8f9888c205bae2564 | 844 | py | Python | src/examples/splitters/main.py | robertkist/qtmodernredux | c7f791a1492ff855f3e4b963b8e9f20c46ba503f | [
"Apache-2.0"
] | 4 | 2021-04-12T19:30:47.000Z | 2022-02-11T18:24:16.000Z | src/examples/splitters/main.py | robertkist/qtmodernredux | c7f791a1492ff855f3e4b963b8e9f20c46ba503f | [
"Apache-2.0"
] | null | null | null | src/examples/splitters/main.py | robertkist/qtmodernredux | c7f791a1492ff855f3e4b963b8e9f20c46ba503f | [
"Apache-2.0"
] | null | null | null | import sys
from PySide2.QtWidgets import QMainWindow, QApplication
from PySide2.QtGui import QColor
from qtmodernredux import QtModernRedux
from mainwindow_ui import Ui_MainWindow
"""
This example demonstates the styling of the QSplitter element.
"""
__author__ = "Robert Kist"
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.__ui = Ui_MainWindow()
self.__ui.setupUi(self)
self.setWindowTitle('QSplitter Test')
if __name__ == "__main__":
app = QtModernRedux.QApplication(sys.argv)
mw = QtModernRedux.wrap(MainWindow(),
titlebar_color=QColor('#555555'),
window_buttons_position=QtModernRedux.WINDOW_BUTTONS_LEFT)
desktop = QApplication.desktop()
mw.show()
sys.exit(app.exec_())
| 25.575758 | 86 | 0.689573 |
ace59ea3eb72224222cd6fc615b45a56336ecb9b | 1,958 | py | Python | scripts/regrid.py | norlandrhagen/forest-risks | 2cbc87064ac05299dba952c9f0cb8022ffd8909a | [
"MIT"
] | 20 | 2021-05-01T18:08:07.000Z | 2022-03-09T10:24:53.000Z | scripts/regrid.py | norlandrhagen/forest-risks | 2cbc87064ac05299dba952c9f0cb8022ffd8909a | [
"MIT"
] | 15 | 2021-03-31T05:20:55.000Z | 2022-02-28T13:02:58.000Z | scripts/regrid.py | norlandrhagen/forest-risks | 2cbc87064ac05299dba952c9f0cb8022ffd8909a | [
"MIT"
] | 4 | 2020-10-26T20:52:30.000Z | 2021-02-19T07:42:52.000Z | import os
import sys
import numpy as np
import pandas as pd
import xarray as xr
from tqdm import tqdm
from carbonplan_forest_risks import fit, load, utils
args = sys.argv
if len(args) < 2:
raise ValueError('must specify dataset')
dataset = args[1]
if len(args) == 2:
store = 'local'
else:
store = args[2]
cmip_models = [
'CanESM5-CanOE',
'MIROC-ES2L',
'ACCESS-CM2',
'ACCESS-ESM1-5',
'MRI-ESM2-0',
'MPI-ESM1-2-LR',
]
scenarios = ['ssp245', 'ssp370', 'ssp585']
targets = list(map(lambda x: str(x), np.arange(2010, 2100, 10)))
pf = pd.read_parquet(f'data/{dataset}.parquet')
ds = xr.Dataset()
print(f'[{dataset}] filtering values')
pf = pf.dropna().reset_index(drop=True)
print(f'[{dataset}] computing multi model mean')
for scenario in scenarios:
for target in targets:
keys = list(
filter(
lambda x: x is not None,
[key if ((scenario in key) & (target in key)) else None for key in pf.columns],
)
)
pf[scenario + '_' + target] = pf[keys].mean(axis=1)
print(f'[{dataset}] regridding predictions')
nlcd = load.nlcd(store=store, year=2016, classes=[41, 42, 43, 90])
final_mask = nlcd.sum('band')
final_mask.attrs['crs'] = nlcd.attrs['crs']
if 'biomass' in dataset:
final_mask.values = final_mask.values * (final_mask.values > 0.5)
else:
final_mask.values = final_mask.values > 0.5
ds['historical'] = fit.interp(pf, final_mask, var='historical')
for scenario in tqdm(scenarios):
results = []
for target in targets:
key = scenario + '_' + target
gridded = fit.interp(pf, final_mask, var=key)
results.append(gridded)
da = xr.concat(results, dim=xr.Variable('year', targets))
ds[scenario] = da
account_key = os.environ.get('BLOB_ACCOUNT_KEY')
path = utils.get_store(
'carbonplan-forests', f'risks/results/web/{dataset}.zarr', account_key=account_key
)
ds.to_zarr(path, mode='w')
| 25.428571 | 95 | 0.644535 |
ace59efc7fa5646679005e52c4c812cd8ad2a3b3 | 1,072 | py | Python | figlet.py | nick3499/scrape_dark_sky_nick3499 | 580bf64de8831f60f25d7bd002a53bea90b93618 | [
"MIT"
] | null | null | null | figlet.py | nick3499/scrape_dark_sky_nick3499 | 580bf64de8831f60f25d7bd002a53bea90b93618 | [
"MIT"
] | null | null | null | figlet.py | nick3499/scrape_dark_sky_nick3499 | 580bf64de8831f60f25d7bd002a53bea90b93618 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''Contains `get_figlet()` method which returns ASCII art title.
x1b[38;2;140;28;32m <-- dark red
x1b[38;2;119;121;174m <-- mostly desaturated dark blue
x1b[38;2;213;122;100m <-- moderate red
'''
def get_figlet():
'''Returns ASCII art title "Scrape Dark Sky".'''
figlet = '''\x1b[38;2;140;28;32m
_____ _____ _ _____ _
/ ____| | __ \ | | / ____| |
| (___ ___ _ __ __ _ _ __ ___ | | | | __ _ _ __| | __ | (___ | | ___ _
\___ \ / __| '__/ _` | '_ \ / _ \ | | | |/ _` | '__| |/ / \___ \| |/ / | | |\x1b[38;2;119;121;174m
____) | (__| | | (_| | |_) | __/ | |__| | (_| | | | < ____) | <| |_| |
|_____/ \___|_| \__,_| .__/ \___| |_____/ \__,_|_| |_|\_\ |_____/|_|\_\___, |\x1b[38;2;140;28;32m
| | __/ |
|_| |___/\x1b[0m'''
return figlet
if __name__ == '__main__':
get_figlet()
| 44.666667 | 102 | 0.402985 |
ace59fba5ab41f1cbc24412c4dd2b0146c38f432 | 2,005 | py | Python | data/cirq_new/cirq_program/startCirq_Class305.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_Class305.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_Class305.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=15
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=8
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.rx(0.666017642561036).on(input_qubit[2])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=13
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class305.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 30.378788 | 80 | 0.674314 |
ace5a00700a8e766cc83bd7b78d936a7b31126ac | 2,014 | py | Python | cirq-web/cirq_web/bloch_sphere/bloch_sphere.py | sub-mersion/Cirq | 2b1e4070ea7bcf3c7c8f4fa694f85f6a0c0ff61f | [
"Apache-2.0"
] | 1 | 2020-08-27T08:50:38.000Z | 2020-08-27T08:50:38.000Z | cirq-web/cirq_web/bloch_sphere/bloch_sphere.py | xeedmm/Cirq | dc013726c9472d39bc2a3909208e188fb535e081 | [
"Apache-2.0"
] | 2 | 2020-10-15T15:36:40.000Z | 2021-01-28T16:18:55.000Z | cirq-web/cirq_web/bloch_sphere/bloch_sphere.py | xeedmm/Cirq | dc013726c9472d39bc2a3909208e188fb535e081 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cirq_web import widget
from cirq.qis.states import bloch_vector_from_state_vector
from cirq.qis.states import STATE_VECTOR_LIKE
class BlochSphere(widget.Widget):
def __init__(
self,
sphere_radius: int = 5,
state_vector: STATE_VECTOR_LIKE = None,
):
"""Initializes a BlochSphere, gathering all the user information and
converting to JSON for output.
Also initializes it's parent class Widget with the bundle file provided.
Args:
sphere_radius: the radius of the bloch sphere in the three.js diagram.
The default value is 5.
state_vector: a state vector to pass in to be represented.
"""
super().__init__()
if sphere_radius <= 0:
raise ValueError('You must input a positive radius for the sphere')
self.sphere_radius = sphere_radius
if state_vector is None:
raise ValueError('No state vector given in BlochSphere initialization')
self.bloch_vector = bloch_vector_from_state_vector(state_vector, 0)
def get_client_code(self) -> str:
return f"""
<script>
renderBlochSphere('{self.id}', {self.sphere_radius})
.addVector({self.bloch_vector[0]}, {self.bloch_vector[1]}, {self.bloch_vector[2]});
</script>
"""
def get_widget_bundle_name(self) -> str:
return 'bloch_sphere.bundle.js'
| 35.333333 | 95 | 0.684211 |
ace5a00b44d240e7ddda52abd6bfe3374f36f318 | 22,300 | py | Python | printrun/gviz.py | aurelianammon/flask-socketio-printer | addd318d1468891fdf46adb1f01f319ae33f2044 | [
"MIT"
] | null | null | null | printrun/gviz.py | aurelianammon/flask-socketio-printer | addd318d1468891fdf46adb1f01f319ae33f2044 | [
"MIT"
] | null | null | null | printrun/gviz.py | aurelianammon/flask-socketio-printer | addd318d1468891fdf46adb1f01f319ae33f2044 | [
"MIT"
] | null | null | null | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
from queue import Queue
from collections import deque
import numpy
import wx
import time
from . import gcoder
from .injectgcode import injector, injector_edit
from .utils import imagefile, install_locale, get_home_pos
install_locale('pronterface')
class GvizBaseFrame(wx.Frame):
def create_base_ui(self):
self.CreateStatusBar(1)
self.SetStatusText(_("Layer number and Z position show here when you scroll"))
hpanel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
panel = wx.Panel(hpanel, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox = wx.BoxSizer(wx.VERTICAL)
self.toolbar = wx.ToolBar(panel, -1, style = wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_HORZ_TEXT)
self.toolbar.AddTool(1, '', wx.Image(imagefile('zoom_in.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), _("Zoom In [+]"),)
self.toolbar.AddTool(2, '', wx.Image(imagefile('zoom_out.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), _("Zoom Out [-]"))
self.toolbar.AddSeparator()
self.toolbar.AddTool(3, '', wx.Image(imagefile('arrow_up.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), _("Move Up a Layer [U]"))
self.toolbar.AddTool(4, '', wx.Image(imagefile('arrow_down.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), _("Move Down a Layer [D]"))
self.toolbar.AddTool(5, " " + _("Reset view"), wx.Image(imagefile('reset.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), shortHelp = _("Reset view"))
self.toolbar.AddSeparator()
self.toolbar.AddTool(6, '', wx.Image(imagefile('inject.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), wx.NullBitmap, shortHelp = _("Inject G-Code"), longHelp = _("Insert code at the beginning of this layer"))
self.toolbar.AddTool(7, '', wx.Image(imagefile('edit.png'), wx.BITMAP_TYPE_PNG).ConvertToBitmap(), wx.NullBitmap, shortHelp = _("Edit layer"), longHelp = _("Edit the G-Code of this layer"))
vbox.Add(self.toolbar, 0, border = 5)
panel.SetSizer(vbox)
hbox.Add(panel, 1, flag = wx.EXPAND)
self.layerslider = wx.Slider(hpanel, style = wx.SL_VERTICAL | wx.SL_AUTOTICKS | wx.SL_LEFT | wx.SL_INVERSE)
self.layerslider.Bind(wx.EVT_SCROLL, self.process_slider)
hbox.Add(self.layerslider, 0, border = 5, flag = wx.LEFT | wx.EXPAND)
hpanel.SetSizer(hbox)
return panel, vbox
def setlayercb(self, layer):
self.layerslider.SetValue(layer)
def process_slider(self, event):
raise NotImplementedError
ID_ABOUT = 101
ID_EXIT = 110
class GvizWindow(GvizBaseFrame):
def __init__(self, f = None, size = (600, 600), build_dimensions = [200, 200, 100, 0, 0, 0], grid = (10, 50), extrusion_width = 0.5, bgcolor = "#000000"):
super(GvizWindow, self).__init__(None, title = _("Gcode view, shift to move view, mousewheel to set layer"), size = size)
panel, vbox = self.create_base_ui()
self.p = Gviz(panel, size = size, build_dimensions = build_dimensions, grid = grid, extrusion_width = extrusion_width, bgcolor = bgcolor, realparent = self)
self.toolbar.Realize()
vbox.Add(self.p, 1, wx.EXPAND)
self.SetMinSize(self.ClientToWindowSize(vbox.GetMinSize()))
self.Bind(wx.EVT_TOOL, lambda x: self.p.zoom(-1, -1, 1.2), id = 1)
self.Bind(wx.EVT_TOOL, lambda x: self.p.zoom(-1, -1, 1 / 1.2), id = 2)
self.Bind(wx.EVT_TOOL, lambda x: self.p.layerup(), id = 3)
self.Bind(wx.EVT_TOOL, lambda x: self.p.layerdown(), id = 4)
self.Bind(wx.EVT_TOOL, self.resetview, id = 5)
self.Bind(wx.EVT_TOOL, lambda x: self.p.inject(), id = 6)
self.Bind(wx.EVT_TOOL, lambda x: self.p.editlayer(), id = 7)
self.initpos = None
self.p.Bind(wx.EVT_KEY_DOWN, self.key)
self.Bind(wx.EVT_KEY_DOWN, self.key)
self.p.Bind(wx.EVT_MOUSEWHEEL, self.zoom)
self.Bind(wx.EVT_MOUSEWHEEL, self.zoom)
self.p.Bind(wx.EVT_MOUSE_EVENTS, self.mouse)
self.Bind(wx.EVT_MOUSE_EVENTS, self.mouse)
if f:
gcode = gcoder.GCode(f, get_home_pos(self.p.build_dimensions))
self.p.addfile(gcode)
def set_current_gline(self, gline):
return
def process_slider(self, event):
self.p.layerindex = self.layerslider.GetValue()
z = self.p.get_currentz()
wx.CallAfter(self.SetStatusText, _("Layer %d - Z = %.03f mm") % (self.p.layerindex + 1, z), 0)
self.p.dirty = True
wx.CallAfter(self.p.Refresh)
def resetview(self, event):
self.p.translate = [0.0, 0.0]
self.p.scale = self.p.basescale
self.p.zoom(0, 0, 1.0)
def mouse(self, event):
if event.ButtonUp(wx.MOUSE_BTN_LEFT) or event.ButtonUp(wx.MOUSE_BTN_RIGHT):
if self.initpos is not None:
self.initpos = None
elif event.Dragging():
e = event.GetPosition()
if self.initpos is None:
self.initpos = e
self.basetrans = self.p.translate
self.p.translate = [self.basetrans[0] + (e[0] - self.initpos[0]),
self.basetrans[1] + (e[1] - self.initpos[1])]
self.p.dirty = True
wx.CallAfter(self.p.Refresh)
else:
event.Skip()
def key(self, event):
# Keycode definitions
kup = [85, 315] # Up keys
kdo = [68, 317] # Down Keys
kzi = [388, 316, 61] # Zoom In Keys
kzo = [390, 314, 45] # Zoom Out Keys
x = event.GetKeyCode()
cx, cy = self.p.translate
if x in kup:
self.p.layerup()
if x in kdo:
self.p.layerdown()
if x in kzi:
self.p.zoom(cx, cy, 1.2)
if x in kzo:
self.p.zoom(cx, cy, 1 / 1.2)
def zoom(self, event):
z = event.GetWheelRotation()
if event.ShiftDown():
if z > 0: self.p.layerdown()
elif z < 0: self.p.layerup()
else:
if z > 0: self.p.zoom(event.GetX(), event.GetY(), 1.2)
elif z < 0: self.p.zoom(event.GetX(), event.GetY(), 1 / 1.2)
from printrun.gui.viz import BaseViz
class Gviz(wx.Panel, BaseViz):
# Mark canvas as dirty when setting showall
_showall = 0
def _get_showall(self):
return self._showall
def _set_showall(self, showall):
if showall != self._showall:
self.dirty = True
self._showall = showall
showall = property(_get_showall, _set_showall)
def __init__(self, parent, size = (200, 200), build_dimensions = [200, 200, 100, 0, 0, 0], grid = (10, 50), extrusion_width = 0.5, bgcolor = "#000000", realparent = None):
wx.Panel.__init__(self, parent, -1)
self.widget = self
size = [max(1.0, x) for x in size]
ratio = size[0] / size[1]
self.SetMinSize((150, 150 / ratio))
self.parent = realparent if realparent else parent
self.size = size
self.build_dimensions = build_dimensions
self.grid = grid
self.Bind(wx.EVT_PAINT, self.paint)
self.Bind(wx.EVT_SIZE, self.resize)
self.hilight = deque()
self.hilightarcs = deque()
self.hilightqueue = Queue(0)
self.hilightarcsqueue = Queue(0)
self.clear()
self.filament_width = extrusion_width # set it to 0 to disable scaling lines with zoom
self.update_basescale()
self.scale = self.basescale
penwidth = max(1.0, self.filament_width * ((self.scale[0] + self.scale[1]) / 2.0))
self.translate = [0.0, 0.0]
self.mainpen = wx.Pen(wx.Colour(0, 0, 0), penwidth)
self.arcpen = wx.Pen(wx.Colour(255, 0, 0), penwidth)
self.travelpen = wx.Pen(wx.Colour(10, 80, 80), penwidth)
self.hlpen = wx.Pen(wx.Colour(200, 50, 50), penwidth)
self.fades = [wx.Pen(wx.Colour(int(250 - 0.6 ** i * 100), int(250 - 0.6 ** i * 100), int(200 - 0.4 ** i * 50)), penwidth) for i in range(6)]
self.penslist = [self.mainpen, self.arcpen, self.travelpen, self.hlpen] + self.fades
self.bgcolor = wx.Colour()
self.bgcolor.Set(bgcolor)
self.blitmap = wx.Bitmap(self.GetClientSize()[0], self.GetClientSize()[1], -1)
self.paint_overlay = None
def inject(self):
layer = self.layers[self.layerindex]
injector(self.gcode, self.layerindex, layer)
def editlayer(self):
layer = self.layers[self.layerindex]
injector_edit(self.gcode, self.layerindex, layer)
def clearhilights(self):
self.hilight.clear()
self.hilightarcs.clear()
while not self.hilightqueue.empty():
self.hilightqueue.get_nowait()
while not self.hilightarcsqueue.empty():
self.hilightarcsqueue.get_nowait()
def clear(self):
self.gcode = None
self.lastpos = [0, 0, 0, 0, 0, 0, 0]
self.hilightpos = self.lastpos[:]
self.lines = {}
self.pens = {}
self.arcs = {}
self.arcpens = {}
self.layers = {}
self.layersz = []
self.clearhilights()
self.layerindex = 0
self.showall = 0
self.dirty = True
self.partial = False
self.painted_layers = set()
wx.CallAfter(self.Refresh)
def get_currentz(self):
z = self.layersz[self.layerindex]
z = 0. if z is None else z
return z
def layerup(self):
if self.layerindex + 1 < len(self.layers):
self.layerindex += 1
z = self.get_currentz()
wx.CallAfter(self.parent.SetStatusText, _("Layer %d - Going Up - Z = %.03f mm") % (self.layerindex + 1, z), 0)
self.dirty = True
self.parent.setlayercb(self.layerindex)
wx.CallAfter(self.Refresh)
def layerdown(self):
if self.layerindex > 0:
self.layerindex -= 1
z = self.get_currentz()
wx.CallAfter(self.parent.SetStatusText, _("Layer %d - Going Down - Z = %.03f mm") % (self.layerindex + 1, z), 0)
self.dirty = True
self.parent.setlayercb(self.layerindex)
wx.CallAfter(self.Refresh)
def setlayer(self, layer):
if layer in self.layers:
self.clearhilights()
self.layerindex = self.layers[layer]
self.dirty = True
self.showall = 0
wx.CallAfter(self.Refresh)
def update_basescale(self):
self.basescale = 2 * [min(float(self.size[0] - 1) / self.build_dimensions[0],
float(self.size[1] - 1) / self.build_dimensions[1])]
def resize(self, event):
old_basescale = self.basescale
width, height = self.GetClientSize()
if width < 1 or height < 1:
return
self.size = (width, height)
self.update_basescale()
zoomratio = float(self.basescale[0]) / old_basescale[0]
wx.CallLater(200, self.zoom, 0, 0, zoomratio)
def zoom(self, x, y, factor):
if x == -1 and y == -1:
side = min(self.size)
x = y = side / 2
self.scale = [s * factor for s in self.scale]
self.translate = [x - (x - self.translate[0]) * factor,
y - (y - self.translate[1]) * factor]
penwidth = max(1.0, self.filament_width * ((self.scale[0] + self.scale[1]) / 2.0))
for pen in self.penslist:
pen.SetWidth(penwidth)
self.dirty = True
wx.CallAfter(self.Refresh)
def _line_scaler(self, x):
return (self.scale[0] * x[0],
self.scale[1] * x[1],
self.scale[0] * x[2],
self.scale[1] * x[3],)
def _arc_scaler(self, x):
return (self.scale[0] * x[0],
self.scale[1] * x[1],
self.scale[0] * x[2],
self.scale[1] * x[3],
self.scale[0] * x[4],
self.scale[1] * x[5],)
def _drawlines(self, dc, lines, pens):
scaled_lines = [self._line_scaler(l) for l in lines]
dc.DrawLineList(scaled_lines, pens)
def _drawarcs(self, dc, arcs, pens):
scaled_arcs = [self._arc_scaler(a) for a in arcs]
dc.SetBrush(wx.TRANSPARENT_BRUSH)
for i in range(len(scaled_arcs)):
dc.SetPen(pens[i] if isinstance(pens, numpy.ndarray) else pens)
dc.DrawArc(*scaled_arcs[i])
def repaint_everything(self):
width = self.scale[0] * self.build_dimensions[0]
height = self.scale[1] * self.build_dimensions[1]
self.blitmap = wx.Bitmap(width + 1, height + 1, -1)
dc = wx.MemoryDC()
dc.SelectObject(self.blitmap)
dc.SetBackground(wx.Brush((250, 250, 200)))
dc.Clear()
dc.SetPen(wx.Pen(wx.Colour(180, 180, 150)))
for grid_unit in self.grid:
if grid_unit > 0:
for x in range(int(self.build_dimensions[0] / grid_unit) + 1):
draw_x = self.scale[0] * x * grid_unit
dc.DrawLine(draw_x, 0, draw_x, height)
for y in range(int(self.build_dimensions[1] / grid_unit) + 1):
draw_y = self.scale[1] * (self.build_dimensions[1] - y * grid_unit)
dc.DrawLine(0, draw_y, width, draw_y)
dc.SetPen(wx.Pen(wx.Colour(0, 0, 0)))
if not self.showall:
# Draw layer gauge
dc.SetBrush(wx.Brush((43, 144, 255)))
dc.DrawRectangle(width - 15, 0, 15, height)
dc.SetBrush(wx.Brush((0, 255, 0)))
if self.layers:
dc.DrawRectangle(width - 14, (1.0 - (1.0 * (self.layerindex + 1)) / len(self.layers)) * height, 13, height - 1)
if self.showall:
for i in range(len(self.layersz)):
self.painted_layers.add(i)
self._drawlines(dc, self.lines[i], self.pens[i])
self._drawarcs(dc, self.arcs[i], self.arcpens[i])
dc.SelectObject(wx.NullBitmap)
return
if self.layerindex < len(self.layers) and self.layerindex in self.lines:
for layer_i in range(max(0, self.layerindex - 6), self.layerindex):
self._drawlines(dc, self.lines[layer_i], self.fades[self.layerindex - layer_i - 1])
self._drawarcs(dc, self.arcs[layer_i], self.fades[self.layerindex - layer_i - 1])
self._drawlines(dc, self.lines[self.layerindex], self.pens[self.layerindex])
self._drawarcs(dc, self.arcs[self.layerindex], self.arcpens[self.layerindex])
self._drawlines(dc, self.hilight, self.hlpen)
self._drawarcs(dc, self.hilightarcs, self.hlpen)
self.paint_hilights(dc)
dc.SelectObject(wx.NullBitmap)
def repaint_partial(self):
if self.showall:
dc = wx.MemoryDC()
dc.SelectObject(self.blitmap)
for i in set(range(len(self.layersz))).difference(self.painted_layers):
self.painted_layers.add(i)
self._drawlines(dc, self.lines[i], self.pens[i])
self._drawarcs(dc, self.arcs[i], self.arcpens[i])
dc.SelectObject(wx.NullBitmap)
def paint_hilights(self, dc = None):
if self.hilightqueue.empty() and self.hilightarcsqueue.empty():
return
hl = []
if not dc:
dc = wx.MemoryDC()
dc.SelectObject(self.blitmap)
while not self.hilightqueue.empty():
hl.append(self.hilightqueue.get_nowait())
self._drawlines(dc, hl, self.hlpen)
hlarcs = []
while not self.hilightarcsqueue.empty():
hlarcs.append(self.hilightarcsqueue.get_nowait())
self._drawarcs(dc, hlarcs, self.hlpen)
dc.SelectObject(wx.NullBitmap)
def paint(self, event):
if self.dirty:
self.dirty = False
self.partial = False
self.repaint_everything()
elif self.partial:
self.partial = False
self.repaint_partial()
self.paint_hilights()
dc = wx.PaintDC(self)
dc.SetBackground(wx.Brush(self.bgcolor))
dc.Clear()
dc.DrawBitmap(self.blitmap, self.translate[0], self.translate[1])
if self.paint_overlay:
self.paint_overlay(dc)
def addfile_perlayer(self, gcode, showall = False):
self.clear()
self.gcode = gcode
self.showall = showall
generator = self.add_parsed_gcodes(gcode)
generator_output = next(generator)
while generator_output is not None:
yield generator_output
generator_output = next(generator)
max_layers = len(self.layers)
if hasattr(self.parent, "layerslider"):
self.parent.layerslider.SetRange(0, max_layers - 1)
self.parent.layerslider.SetValue(0)
yield None
def addfile(self, gcode = None, showall = False):
generator = self.addfile_perlayer(gcode, showall)
while next(generator) is not None:
continue
def _get_movement(self, start_pos, gline):
"""Takes a start position and a gcode, and returns a 3-uple containing
(final position, line, arc), with line and arc being None if not
used"""
target = start_pos[:]
target[5] = 0.0
target[6] = 0.0
if gline.current_x is not None: target[0] = gline.current_x
if gline.current_y is not None: target[1] = gline.current_y
if gline.current_z is not None: target[2] = gline.current_z
if gline.e is not None:
if gline.relative_e:
target[3] += gline.e
else:
target[3] = gline.e
if gline.f is not None: target[4] = gline.f
if gline.i is not None: target[5] = gline.i
if gline.j is not None: target[6] = gline.j
if gline.command in ["G0", "G1"]:
line = [self._x(start_pos[0]),
self._y(start_pos[1]),
self._x(target[0]),
self._y(target[1])]
return target, line, None
elif gline.command in ["G2", "G3"]:
# startpos, endpos, arc center
arc = [self._x(start_pos[0]), self._y(start_pos[1]),
self._x(target[0]), self._y(target[1]),
self._x(start_pos[0] + target[5]), self._y(start_pos[1] + target[6])]
if gline.command == "G2": # clockwise, reverse endpoints
arc[0], arc[1], arc[2], arc[3] = arc[2], arc[3], arc[0], arc[1]
return target, None, arc
def _y(self, y):
return self.build_dimensions[1] - (y - self.build_dimensions[4])
def _x(self, x):
return x - self.build_dimensions[3]
def add_parsed_gcodes(self, gcode):
start_time = time.time()
layer_idx = 0
while layer_idx < len(gcode.all_layers):
layer = gcode.all_layers[layer_idx]
has_move = False
for gline in layer:
if gline.is_move:
has_move = True
break
if not has_move:
yield layer_idx
layer_idx += 1
continue
viz_layer = len(self.layers)
self.lines[viz_layer] = []
self.pens[viz_layer] = []
self.arcs[viz_layer] = []
self.arcpens[viz_layer] = []
for gline in layer:
if not gline.is_move:
continue
target, line, arc = self._get_movement(self.lastpos[:], gline)
if line is not None:
self.lines[viz_layer].append(line)
self.pens[viz_layer].append(self.mainpen if target[3] != self.lastpos[3] or gline.extruding else self.travelpen)
elif arc is not None:
self.arcs[viz_layer].append(arc)
self.arcpens[viz_layer].append(self.arcpen)
self.lastpos = target
# Transform into a numpy array for memory efficiency
self.lines[viz_layer] = numpy.asarray(self.lines[viz_layer], dtype = numpy.float32)
self.pens[viz_layer] = numpy.asarray(self.pens[viz_layer])
self.arcs[viz_layer] = numpy.asarray(self.arcs[viz_layer], dtype = numpy.float32)
self.arcpens[viz_layer] = numpy.asarray(self.arcpens[viz_layer])
# Only add layer to self.layers now to prevent the display of an
# unfinished layer
self.layers[layer_idx] = viz_layer
self.layersz.append(layer.z)
# Refresh display if more than 0.2s have passed
if time.time() - start_time > 0.2:
start_time = time.time()
self.partial = True
wx.CallAfter(self.Refresh)
yield layer_idx
layer_idx += 1
self.dirty = True
wx.CallAfter(self.Refresh)
yield None
def addgcodehighlight(self, gline):
if gline.command not in ["G0", "G1", "G2", "G3"]:
return
target, line, arc = self._get_movement(self.hilightpos[:], gline)
if line is not None:
self.hilight.append(line)
self.hilightqueue.put_nowait(line)
elif arc is not None:
self.hilightarcs.append(arc)
self.hilightarcsqueue.put_nowait(arc)
self.hilightpos = target
wx.CallAfter(self.Refresh)
if __name__ == '__main__':
import sys
app = wx.App(False)
main = GvizWindow(open(sys.argv[1], "rU"))
main.Show()
app.MainLoop()
| 40.107914 | 215 | 0.582063 |
ace5a066dda6cbd1b7da1a9bb784a2ea8039ba00 | 783 | py | Python | staicoin/util/chain_utils.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | 1 | 2021-12-03T02:39:29.000Z | 2021-12-03T02:39:29.000Z | staicoin/util/chain_utils.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | null | null | null | staicoin/util/chain_utils.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | null | null | null | from typing import List
from staicoin.types.blockchain_format.coin import Coin
from staicoin.types.blockchain_format.program import SerializedProgram
from staicoin.types.blockchain_format.sized_bytes import bytes32
from staicoin.util.condition_tools import (
conditions_dict_for_solution,
created_outputs_for_conditions_dict,
)
def additions_for_solution(
coin_name: bytes32, puzzle_reveal: SerializedProgram, solution: SerializedProgram, max_cost: int
) -> List[Coin]:
"""
Checks the conditions created by CoinSpend and returns the list of all coins created
"""
err, dic, cost = conditions_dict_for_solution(puzzle_reveal, solution, max_cost)
if err or dic is None:
return []
return created_outputs_for_conditions_dict(dic, coin_name)
| 35.590909 | 100 | 0.789272 |
ace5a123e3eba21b96a64ef6e11810c14507a909 | 75 | py | Python | Mundo01/Exercicios/ex002.py | molonti/CursoemVideo---Python | 4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11 | [
"MIT"
] | null | null | null | Mundo01/Exercicios/ex002.py | molonti/CursoemVideo---Python | 4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11 | [
"MIT"
] | null | null | null | Mundo01/Exercicios/ex002.py | molonti/CursoemVideo---Python | 4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11 | [
"MIT"
] | null | null | null | nome = input('Digite seu nome: ')
print('Muito prazer, {}!'.format(nome))
| 18.75 | 39 | 0.64 |
ace5a195b9b99d8d70ea919fbb210ec5310b7e32 | 2,111 | py | Python | alphabet_detector/alphabet_detector.py | EliFinkelshteyn/alphabet-detector | 234682e439fa549cba9b2289c8d348f31daa7282 | [
"MIT"
] | 152 | 2015-03-03T20:39:28.000Z | 2022-02-04T19:47:11.000Z | alphabet_detector/alphabet_detector.py | EliFinkelshteyn/alphabet-detector | 234682e439fa549cba9b2289c8d348f31daa7282 | [
"MIT"
] | 10 | 2015-03-14T18:41:50.000Z | 2021-06-02T17:38:41.000Z | alphabet_detector/alphabet_detector.py | EliFinkelshteyn/alphabet-detector | 234682e439fa549cba9b2289c8d348f31daa7282 | [
"MIT"
] | 17 | 2015-03-06T11:08:20.000Z | 2021-09-30T14:28:48.000Z | import unicodedata as ud
from collections import defaultdict
class AlphabetDetector:
def __init__(self, no_memory=False):
self.alphabet_letters = defaultdict(dict)
self.no_memory = no_memory
def is_in_alphabet(self, uchr, alphabet):
if self.no_memory:
return alphabet in ud.name(uchr)
try:
return self.alphabet_letters[alphabet][uchr]
except KeyError:
return self.alphabet_letters[alphabet].setdefault(
uchr, alphabet in ud.name(uchr))
def only_alphabet_chars(self, unistr, alphabet):
return all(self.is_in_alphabet(uchr, alphabet)
for uchr in unistr if uchr.isalpha())
def detect_alphabet(self, unistr):
return set(ud.name(char).split(' ')[0]
for char in unistr if char.isalpha())
def is_greek(self, unistr):
return True if self.only_alphabet_chars(unistr, 'GREEK') else False
def is_cyrillic(self, unistr):
return True if self.only_alphabet_chars(unistr, 'CYRILLIC') else False
def is_latin(self, unistr):
return True if self.only_alphabet_chars(unistr, 'LATIN') else False
def is_arabic(self, unistr):
return True if self.only_alphabet_chars(unistr, 'ARABIC') else False
def is_hebrew(self, unistr):
return True if self.only_alphabet_chars(unistr, 'HEBREW') else False
# NOTE: this only detects Chinese script characters (Hanzi/Kanji/Hanja).
# it does not detect other CJK script characters like Hangul or Katakana
def is_cjk(self, unistr):
return True if self.only_alphabet_chars(unistr, 'CJK') else False
def is_hangul(self, unistr):
return True if self.only_alphabet_chars(unistr, 'HANGUL') else False
def is_hiragana(self, unistr):
return True if self.only_alphabet_chars(unistr, 'HIRAGANA') else False
def is_katakana(self, unistr):
return True if self.only_alphabet_chars(unistr, 'KATAKANA') else False
def is_thai(self, unistr):
return True if self.only_alphabet_chars(unistr, 'THAI') else False
| 36.396552 | 78 | 0.681667 |
ace5a321d0a713dbfce5fc0c1ab7a4d85903cd60 | 1,833 | py | Python | code/socialDistribution/urls.py | CMPUT404F21TEAM/social-distribution | c6775bfa3bf93025d426bc4601431128a51d4c48 | [
"W3C-20150513"
] | null | null | null | code/socialDistribution/urls.py | CMPUT404F21TEAM/social-distribution | c6775bfa3bf93025d426bc4601431128a51d4c48 | [
"W3C-20150513"
] | 173 | 2021-09-30T00:12:03.000Z | 2021-12-09T00:32:46.000Z | code/socialDistribution/urls.py | CMPUT404F21TEAM/social-distribution | c6775bfa3bf93025d426bc4601431128a51d4c48 | [
"W3C-20150513"
] | 3 | 2021-12-09T01:27:49.000Z | 2022-03-11T06:04:16.000Z | from django.urls import path
from . import views
app_name = 'socialDistribution'
urlpatterns = [
path('', views.index, name='index'),
path('login/', views.loginPage, name='login'),
path('register/', views.register, name='register'),
path('logout/', views.logoutUser, name='logout'),
path('inbox/', views.inbox, name='inbox'),
path('author/', views.authors, name='authors'),
path('author/<uuid:author_id>/', views.author, name='author'),
path('home/', views.home, name='home'),
path('author/<uuid:author_id>/posts/', views.posts, name='posts'),
path('unlisted/<uuid:post_id>', views.unlisted_post_image, name='unlisted-post-image'),
path('author/<uuid:author_id>/befriend/', views.befriend, name='befriend'),
path('author/<uuid:author_id>/un-befriend/', views.un_befriend, name='un-befriend'),
path('author/<uuid:author_id>/friend-request/<str:action>', views.friend_request, name='friend-request'),
path('author/unlisted-posts', views.unlisted_posts, name='unlisted-posts'),
path('create/', views.create, name='create'),
path('profile/', views.profile, name='profile'),
path('user/', views.user, name='user'),
path('posts/<str:post_type>/<uuid:id>/', views.single_post, name='single-post'),
path('posts/<str:post_type>/<uuid:id>/like', views.like_post, name='like-post'),
path('like-comment/', views.like_comment, name='like-comment'),
path('author/<author_id>/posts/<uuid:post_id>/comments/', views.post_comment, name='post-comment'),
path('delete-post/<uuid:id>', views.delete_post, name='delete-post'),
path('edit-post/<uuid:id>', views.edit_post, name='edit-post'),
path('share-post/<uuid:id>', views.share_post, name='share-post'),
path('copy-link/<uuid:id>', views.copy_link, name='copy-link'),
path('public-share/<str:id>/', views.public_share, name='public-share'),
]
| 49.540541 | 107 | 0.692853 |
ace5a48097e15725726c9e6cd8edbc5c70a3e204 | 1,944 | py | Python | pelicanconf.py | Goclis/pelican-blog | f42f4955d37ffbdb26c4445f85e3a13cab4a89f3 | [
"MIT"
] | null | null | null | pelicanconf.py | Goclis/pelican-blog | f42f4955d37ffbdb26c4445f85e3a13cab4a89f3 | [
"MIT"
] | null | null | null | pelicanconf.py | Goclis/pelican-blog | f42f4955d37ffbdb26c4445f85e3a13cab4a89f3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Goclis Yao'
SITENAME = u'Logging'
SITEURL = 'http://goclis.github.io'
TIMEZONE = 'Asia/Shanghai'
DATE_FORMATS = {
'zh_CN': '%Y-%m-%d',
}
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DATE = 'fs' # use filesystem's mtime
DEFAULT_LANG = u'zh_CN'
FILENAME_METADATA = '(?P<slug>.*)'
DEFAULT_PAGINATION = False
THEME = "./themes/elegant-1.3-based" # 主题
# Jinja2模板引擎的扩展
JINJA_EXTENSIONS = ['jinja2.ext.do']
# 相关链接及社交信息
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),)
SOCIAL_PROFILE_LABEL = u'Contact me'
SOCIAL = (('Github', 'https://github.com/Goclis/'),
('Twitter', 'https://twitter.com/Goclis'),
('Email', 'mailto:goclisyyh@gmail.com'))
# Page相关设置,主要是为了About页面
PAGE_PATHS = ['pages']
PAGE_URL = '{slug}.html'
PAGE_SAVE_AS = '{slug}.html'
DISPLAY_PAGES_ON_MENU = False
# Category相关
USE_FOLDER_AS_CATEGORY = True
# 使用Tempalte Page来把文章分类
TEMPLATE_PAGES = {
# 'tpages/tech.html': 'tech.html', 使用index.html替代
'tpages/life.html': 'life.html',
'tpages/note.html': 'note.html'
}
TECH_CATEGORIES = ['Tech', 'Python']
NOTE_CATEGORIES = ['Note']
LIFE_CATEGORIES = ['Life']
# Markdown扩展:高亮、表格及代码等、目录
MD_EXTENSIONS = ['codehilite(css_class=highlight)', 'extra', 'toc(permalink=true)']
# 插件:提取目录
PLUGIN_PATHS = ['plugins']
PLUGINS = ['extract_toc', 'sitemap', 'tipue_search']
# 文章生成
STATIC_PATHS = ['images', 'pdfs'] # 静态文件目录
ARTICLE_EXCLUDES = ['unposts', 'tpages'] # 生成忽略的目录
ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}.html'
ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}.html'
ARTICLE_LANG_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}.html'
ARTICLE_LANG_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}.html'
# Disqus
DISQUS_SITENAME = "goclis"
COMMENTS_INTRO = u"有啥留个言呗~"
| 27.771429 | 83 | 0.662037 |
ace5a5246a6c7e36c985bd67a62ac64b9e0ebae0 | 7,263 | py | Python | LCM/scripts/Register.py | amitsara/PowerShell-DSC-for-Linux | 22694d09f1fe61228210aae9bdd53b6f3da4c2d1 | [
"MIT"
] | 2 | 2020-05-19T20:07:32.000Z | 2020-08-08T00:58:15.000Z | LCM/scripts/Register.py | amitsara/PowerShell-DSC-for-Linux | 22694d09f1fe61228210aae9bdd53b6f3da4c2d1 | [
"MIT"
] | null | null | null | LCM/scripts/Register.py | amitsara/PowerShell-DSC-for-Linux | 22694d09f1fe61228210aae9bdd53b6f3da4c2d1 | [
"MIT"
] | 4 | 2019-10-31T19:10:42.000Z | 2022-03-15T07:42:03.000Z | #!/usr/bin/python
import sys
import os
import os.path
import tempfile
import shutil
def usage():
print("""Usage: Register.py [OPTIONS]
OPTIONS (case insensitive):
--RegistrationKey KEY
--ServerURL URL
--ConfigurationName NAME
--RefreshFrequencyMins NUM default=30
--ConfigurationModeFrequencyMins NUM default=15
--ConfigurationMode (ApplyAndMonitor,ApplyAndAutoCorrect,ApplyOnly) default=ApplyAndMonitor
--RefreshMode (Pull|Push) default=Pull
--Help
""")
# Apply a DSC meta configuration based on a template
Variables = dict()
# Parse command line arguments
args = []
optlist = []
command_line_length = len(sys.argv)
i = 0
inArgument = False
currentArgument = ""
arg = ""
while i < command_line_length:
arg = sys.argv[i]
if i == 0:
# skip the program name
i += 1
continue
if inArgument:
Variables[currentArgument] = arg
inArgument = False
else:
if arg[0:2] == "--":
inArgument = True
currentArgument = arg[2:].lower()
else:
# The rest are not options
args = sys.argv[i:]
break
i += 1
if inArgument:
Variables[currentArgument] = arg
AcceptableOptions = ["registrationkey", "serverurl", "configurationname", "refreshfrequencymins", "configurationmodefrequencymins", "configurationmode", "refreshmode", "help", "regeneratecert"]
if "help" in Variables:
usage()
sys.exit(0)
optionsValid = True
for arg in Variables.keys():
if arg.lower() not in AcceptableOptions:
optionsValid = False
print("Error: %s is not a valid option" % arg)
if len(Variables.keys()) == 0:
if len(args) == 2:
# Assume first parameter is RegistrationKey and second parameter is ServerURL in this case
Variables["registrationkey"] = args[0]
Variables["serverurl"] = args[1]
else:
print("Error: Unexpected (" + str(len(args)) + ") number of non-option arguments. Without options specified, we expect arguments to be RegistrationKey followed by ServerURL.")
optionsValid = False
if optionsValid == False:
usage()
sys.exit(1)
ServerURL = ""
RegistrationKey = ""
ConfigurationName = ""
# If RefreshMode == Pull (which is default), then RegistrationKey and ServerURL are required.
RefreshMode = "Pull"
if "refreshmode" in Variables:
RefreshMode = Variables["refreshmode"]
if RefreshMode == "Pull":
if "registrationkey" not in Variables:
print("Error: RegistrationKey must be specified for Pull mode")
usage()
sys.exit(1)
if "serverurl" not in Variables:
print("Error: ServerURL must be specified for Pull mode")
usage()
sys.exit(1)
ServerURL = Variables["serverurl"]
RegistrationKey = Variables["registrationkey"]
if "configurationname" in Variables:
ConfigurationName = Variables["configurationname"]
ConfigurationMode = "ApplyAndMonitor"
if "configurationmode" in Variables:
ConfigurationMode = Variables["configurationmode"]
RefreshFrequencyMins = "30"
if "refreshfrequencymins" in Variables:
RefreshFrequencyMins = Variables["refreshfrequencymins"]
ConfigurationModeFrequencyMins = "15"
if "configurationmodefrequencymins" in Variables:
ConfigurationModeFrequencyMins = Variables["configurationmodefrequencymins"]
RegenerateCert = False
if "regeneratecert" in Variables:
RegenerateCert = True
metaConfig = ""
if RefreshMode == "Push":
metaConfig = """
instance of MSFT_DSCMetaConfiguration as $MSFT_DSCMetaConfiguration1ref
{
DownloadManagerName = "WebDownloadManager";
RefreshMode = "<REFRESHMODE>";
ConfigurationMode = "<CONFIGURATIONMODE>";
};
instance of OMI_ConfigurationDocument
{
Version="1.0.0";
};
"""
else:
metaConfig="""
instance of MSFT_WebDownloadManager as $MSFT_WebDownloadManager1ref
{
ResourceID = "[ConfigurationRepositoryWeb]AzureAutomationDSC";
SourceInfo = "C:\\\\OaaS-RegistrationMetaConfig2.ps1::20::9::ConfigurationRepositoryWeb";
RegistrationKey = "<REGKEY>";
ServerURL = "<SERVERURL>";
ConfigurationNames = {"<CONFIGURATIONNAME>"};
};
instance of MSFT_WebResourceManager as $MSFT_WebResourceManager1ref
{
SourceInfo = "C:\\\\OaaS-RegistrationMetaConfig2.ps1::27::9::ResourceRepositoryWeb";
ResourceID = "[ResourceRepositoryWeb]AzureAutomationDSC";
RegistrationKey = "<REGKEY>";
ServerURL = "<SERVERURL>";
};
instance of MSFT_WebReportManager as $MSFT_WebReportManager1ref
{
SourceInfo = "C:\\\\OaaS-RegistrationMetaConfig2.ps1::34::9::ReportServerWeb";
ResourceID = "[ReportServerWeb]AzureAutomationDSC";
RegistrationKey = "<REGKEY>";
ServerURL = "<SERVERURL>";
};
instance of MSFT_DSCMetaConfiguration as $MSFT_DSCMetaConfiguration1ref
{
RefreshMode = "<REFRESHMODE>";
AllowModuleOverwrite = False;
RefreshFrequencyMins = <REFRESHFREQUENCYMINS>;
RebootNodeIfNeeded = False;
ConfigurationModeFrequencyMins = <CONFIGURATIONMODEFREQUENCYMINS>;
ConfigurationMode = "<CONFIGURATIONMODE>";
ResourceModuleManagers = {
$MSFT_WebResourceManager1ref
};
ReportManagers = {
$MSFT_WebReportManager1ref
};
ConfigurationDownloadManagers = {
$MSFT_WebDownloadManager1ref
};
};
instance of OMI_ConfigurationDocument
{
Version="2.0.0";
MinimumCompatibleVersion = "2.0.0";
CompatibleVersionAdditionalProperties= { "MSFT_DSCMetaConfiguration:StatusRetentionTimeInDays" };
Author="azureautomation";
Name="RegistrationMetaConfig";
};
"""
metaConfig = metaConfig.replace("<REFRESHMODE>", RefreshMode)
metaConfig = metaConfig.replace("<REFRESHFREQUENCYMINS>", RefreshFrequencyMins)
metaConfig = metaConfig.replace("<CONFIGURATIONMODEFREQUENCYMINS>", ConfigurationModeFrequencyMins)
metaConfig = metaConfig.replace("<CONFIGURATIONMODE>", ConfigurationMode)
metaConfig = metaConfig.replace("<SERVERURL>", ServerURL)
metaConfig = metaConfig.replace("<REGKEY>", RegistrationKey)
metaConfig = metaConfig.replace("<CONFIGURATIONNAME>", ConfigurationName)
# Write to file and run SendMetaConfigurationApply.py
tempdir = tempfile.mkdtemp()
meta_path = tempdir + "/metaconf.mof"
f = open(meta_path, "w")
f.write(metaConfig)
f.close()
# Generate new cert if specified
if RegenerateCert == True:
OAAS_CERTPATH="<OAAS_CERTPATH>"
OAAS_KEYPATH="<OAAS_KEYPATH>"
OAAS_THUMBPRINT="<OAAS_THUMBPRINT>"
os.system("touch " + OAAS_KEYPATH + "; chmod 0600 " + OAAS_KEYPATH);
os.system("touch " + OAAS_KEYPATH + "_old; chmod 0600 " + OAAS_KEYPATH + "_old");
os.system("openssl req -subj '/CN=DSC-OaaS' -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout " + OAAS_KEYPATH + "_old -out " + OAAS_CERTPATH + " && openssl rsa -in " + OAAS_KEYPATH + "_old -out " + OAAS_KEYPATH + " && rm -f " + OAAS_KEYPATH + "_old");
os.system("openssl x509 -noout -in " + OAAS_CERTPATH + " -fingerprint | sed 's/^.*=//' > " + OAAS_THUMBPRINT);
os.system("<DSC_SCRIPT_PATH>/SetDscLocalConfigurationManager.py -configurationmof " + meta_path)
shutil.rmtree(tempdir)
| 33.013636 | 260 | 0.68966 |
ace5a5cfce51ceca1c6a1e00288aa87bb9759f3d | 4,621 | py | Python | tensor2tensor/models/revnet_test.py | SamuelmsWong/tensor2tensor | 7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af | [
"Apache-2.0"
] | 3 | 2021-01-19T20:21:15.000Z | 2021-01-19T21:36:37.000Z | tensor2tensor/models/revnet_test.py | SamuelmsWong/tensor2tensor | 7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af | [
"Apache-2.0"
] | null | null | null | tensor2tensor/models/revnet_test.py | SamuelmsWong/tensor2tensor | 7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af | [
"Apache-2.0"
] | 1 | 2020-06-19T17:36:10.000Z | 2020-06-19T17:36:10.000Z | # coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Revnet."""
from tensor2tensor.models import revnet
import tensorflow.compat.v1 as tf
class RevnetTest(tf.test.TestCase):
def testH(self):
rev_block_input = tf.random_uniform([1, 299, 299, 3])
rev_block_output = revnet.downsample_bottleneck(rev_block_input, 256)
self.assertEqual(rev_block_output.get_shape().as_list(), [1, 299, 299, 256])
def testHStride(self):
rev_block_input = tf.random_uniform([2, 299, 299, 256])
rev_block_output = revnet.downsample_bottleneck(
rev_block_input, 512, stride=2, scope='HStride')
self.assertEqual(rev_block_output.get_shape().as_list(), [2, 150, 150, 512])
def testInit(self):
images = tf.random_uniform([1, 299, 299, 3])
x1, x2 = revnet.init(images, 32)
self.assertEqual(x1.get_shape().as_list(), [1, 74, 74, 16])
self.assertEqual(x2.get_shape().as_list(), [1, 74, 74, 16])
def testInit3D(self):
images = tf.random_uniform([1, 299, 299, 299, 3])
x1, x2 = revnet.init(images, 32, dim='3d', scope='init3d')
self.assertEqual(x1.get_shape().as_list(), [1, 74, 74, 74, 16])
self.assertEqual(x2.get_shape().as_list(), [1, 74, 74, 74, 16])
def testUnit1(self):
x1 = tf.random_uniform([4, 74, 74, 256])
x2 = tf.random_uniform([4, 74, 74, 256])
x1, x2 = revnet.unit(x1, x2, block_num=1, depth=64,
first_batch_norm=True, num_layers=1)
self.assertEqual(x1.get_shape().as_list(), [4, 74, 74, 256])
self.assertEqual(x2.get_shape().as_list(), [4, 74, 74, 256])
def testUnit2(self):
x1 = tf.random_uniform([4, 74, 74, 256])
x2 = tf.random_uniform([4, 74, 74, 256])
x1, x2 = revnet.unit(x1, x2, block_num=2, depth=128,
num_layers=1, stride=2)
self.assertEqual(x1.get_shape().as_list(), [4, 37, 37, 512])
self.assertEqual(x2.get_shape().as_list(), [4, 37, 37, 512])
def testUnit3(self):
x1 = tf.random_uniform([1, 37, 37, 512])
x2 = tf.random_uniform([1, 37, 37, 512])
x1, x2 = revnet.unit(x1, x2, block_num=3, depth=256,
num_layers=10, stride=2)
self.assertEqual(x1.get_shape().as_list(), [1, 19, 19, 1024])
self.assertEqual(x2.get_shape().as_list(), [1, 19, 19, 1024])
def testUnit4(self):
x1 = tf.random_uniform([1, 19, 19, 1024])
x2 = tf.random_uniform([1, 19, 19, 1024])
x1, x2 = revnet.unit(x1, x2, block_num=4, depth=416,
num_layers=1, stride=2)
self.assertEqual(x1.get_shape().as_list(), [1, 10, 10, 1664])
self.assertEqual(x2.get_shape().as_list(), [1, 10, 10, 1664])
def testUnit3D(self):
x1 = tf.random_uniform([4, 74, 74, 74, 256])
x2 = tf.random_uniform([4, 74, 74, 74, 256])
x1, x2 = revnet.unit(x1, x2, block_num=5, depth=128,
num_layers=1, dim='3d', stride=2)
self.assertEqual(x1.get_shape().as_list(), [4, 37, 37, 37, 512])
self.assertEqual(x2.get_shape().as_list(), [4, 37, 37, 37, 512])
def testFinalBlock(self):
x1 = tf.random_uniform([5, 10, 10, 1024])
x2 = tf.random_uniform([5, 10, 10, 1024])
logits = revnet.final_block(x1, x2)
self.assertEqual(logits.shape, [5, 1, 1, 2048])
def testFinalBlock3D(self):
x1 = tf.random_uniform([5, 10, 10, 10, 1024])
x2 = tf.random_uniform([5, 10, 10, 10, 1024])
logits = revnet.final_block(x1, x2, dim='3d', scope='FinalBlock3D')
self.assertEqual(logits.shape, [5, 1, 1, 1, 2048])
def testEndToEnd(self):
images = tf.random_uniform([1, 299, 299, 3])
hparams = revnet.revnet_base()
hparams.mode = tf.estimator.ModeKeys.TRAIN
logits = revnet.revnet(images, hparams)
self.assertEqual(logits.shape, [1, 1, 1, 3328])
def testEndToEnd3D(self):
images = tf.random_uniform([1, 299, 299, 299, 3])
hparams = revnet.revnet_base()
hparams.dim = '3d'
hparams.mode = tf.estimator.ModeKeys.TRAIN
logits = revnet.revnet(images, hparams)
self.assertEqual(logits.shape, [1, 1, 1, 1, 3328])
if __name__ == '__main__':
tf.test.main()
| 39.836207 | 80 | 0.647912 |
ace5a5d02231e577bbe06617095bebc71317122d | 2,845 | py | Python | django-modal-forms/django_modal_forms/settings.py | lyosonernes/Django | d13bd504419b0d2630885c979f9f8413371b80e8 | [
"MIT"
] | null | null | null | django-modal-forms/django_modal_forms/settings.py | lyosonernes/Django | d13bd504419b0d2630885c979f9f8413371b80e8 | [
"MIT"
] | null | null | null | django-modal-forms/django_modal_forms/settings.py | lyosonernes/Django | d13bd504419b0d2630885c979f9f8413371b80e8 | [
"MIT"
] | null | null | null | """
Django settings for django_modal_forms project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y)b@&v$e463h02#=dyr0^zrr^gi2d242wa0q*__07(4@4sjrv)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'test_app',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_modal_forms.urls'
WSGI_APPLICATION = 'django_modal_forms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| 26.588785 | 74 | 0.6942 |
ace5a6615d63524a55cbb56f6f74fcf15cfdcaca | 1,742 | py | Python | flash_examples/video_classification.py | oojo12/lightning-flash | 9aa91eb1e07989912cc1c5529fa2c1d3d83c2586 | [
"Apache-2.0"
] | null | null | null | flash_examples/video_classification.py | oojo12/lightning-flash | 9aa91eb1e07989912cc1c5529fa2c1d3d83c2586 | [
"Apache-2.0"
] | null | null | null | flash_examples/video_classification.py | oojo12/lightning-flash | 9aa91eb1e07989912cc1c5529fa2c1d3d83c2586 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import flash
from flash.core.data.utils import download_data
from flash.video import VideoClassificationData, VideoClassifier
# 1. Create the DataModule
# Find more datasets at https://pytorchvideo.readthedocs.io/en/latest/data.html
download_data("https://pl-flash-data.s3.amazonaws.com/kinetics.zip", "./data")
datamodule = VideoClassificationData.from_folders(
train_folder="data/kinetics/train",
val_folder="data/kinetics/val",
clip_sampler="uniform",
clip_duration=1,
decode_audio=False,
batch_size=1,
)
# 2. Build the task
model = VideoClassifier(backbone="x3d_xs", labels=datamodule.labels, pretrained=False)
# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count(), fast_dev_run=True)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
# 4. Make a prediction
datamodule = VideoClassificationData.from_folders(predict_folder="data/kinetics/predict", batch_size=1)
predictions = trainer.predict(model, datamodule=datamodule, output="labels")
print(predictions)
# 5. Save the model!
trainer.save_checkpoint("video_classification.pt")
| 37.06383 | 103 | 0.778416 |
ace5a6bd433a2cb129dbd24c305d4e6d36b0d101 | 9,751 | py | Python | env/lib/python3.6/site-packages/debug_toolbar/panels/templates/panel.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | env/lib/python3.6/site-packages/debug_toolbar/panels/templates/panel.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | env/lib/python3.6/site-packages/debug_toolbar/panels/templates/panel.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | from __future__ import absolute_import, unicode_literals
from collections import OrderedDict
from contextlib import contextmanager
from os.path import normpath
from pprint import pformat
from django import http
from django.conf.urls import url
from django.core import signing
from django.db.models.query import QuerySet, RawQuerySet
from django.template import RequestContext, Template
from django.test.signals import template_rendered
from django.test.utils import instrumented_test_render
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import Panel
from debug_toolbar.panels.sql.tracking import SQLQueryTriggered, recording
from debug_toolbar.panels.templates import views
# Monkey-patch to enable the template_rendered signal. The receiver returns
# immediately when the panel is disabled to keep the overhead small.
# Code taken and adapted from Simon Willison and Django Snippets:
# https://www.djangosnippets.org/snippets/766/
if Template._render != instrumented_test_render:
Template.original_render = Template._render
Template._render = instrumented_test_render
# Monkey-patch to store items added by template context processors. The
# overhead is sufficiently small to justify enabling it unconditionally.
@contextmanager
def _request_context_bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
self.context_processors = OrderedDict()
updates = {}
for processor in processors:
name = '%s.%s' % (processor.__module__, processor.__name__)
context = processor(self.request)
self.context_processors[name] = context
updates.update(context)
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
RequestContext.bind_template = _request_context_bind_template
class TemplatesPanel(Panel):
"""
A panel that lists all templates used during processing of a response.
"""
def __init__(self, *args, **kwargs):
super(TemplatesPanel, self).__init__(*args, **kwargs)
self.templates = []
# Refs GitHub issue #910
# Hold a series of seen dictionaries within Contexts. A dictionary is
# considered seen if it is `in` this list, requiring that the __eq__
# for the dictionary matches. If *anything* in the dictionary is
# different it is counted as a new layer.
self.seen_layers = []
# Holds all dictionaries which have been prettified for output.
# This should align with the seen_layers such that an index here is
# the same as the index there.
self.pformat_layers = []
def _store_template_info(self, sender, **kwargs):
template, context = kwargs['template'], kwargs['context']
# Skip templates that we are generating through the debug toolbar.
if (isinstance(template.name, six.string_types) and (
template.name.startswith('debug_toolbar/') or
template.name.startswith(
tuple(self.toolbar.config['SKIP_TEMPLATE_PREFIXES'])))):
return
context_list = []
for context_layer in context.dicts:
if hasattr(context_layer, 'items') and context_layer:
# Refs GitHub issue #910
# If we can find this layer in our pseudo-cache then find the
# matching prettified version in the associated list.
key_values = sorted(context_layer.items())
if key_values in self.seen_layers:
index = self.seen_layers.index(key_values)
pformatted = self.pformat_layers[index]
context_list.append(pformatted)
else:
temp_layer = {}
for key, value in context_layer.items():
# Replace any request elements - they have a large
# unicode representation and the request data is
# already made available from the Request panel.
if isinstance(value, http.HttpRequest):
temp_layer[key] = '<<request>>'
# Replace the debugging sql_queries element. The SQL
# data is already made available from the SQL panel.
elif key == 'sql_queries' and isinstance(value, list):
temp_layer[key] = '<<sql_queries>>'
# Replace LANGUAGES, which is available in i18n context processor
elif key == 'LANGUAGES' and isinstance(value, tuple):
temp_layer[key] = '<<languages>>'
# QuerySet would trigger the database: user can run the query from SQL Panel
elif isinstance(value, (QuerySet, RawQuerySet)):
model_name = "%s.%s" % (
value.model._meta.app_label, value.model.__name__)
temp_layer[key] = '<<%s of %s>>' % (
value.__class__.__name__.lower(), model_name)
else:
try:
recording(False)
force_text(value) # this MAY trigger a db query
except SQLQueryTriggered:
temp_layer[key] = '<<triggers database query>>'
except UnicodeEncodeError:
temp_layer[key] = '<<unicode encode error>>'
except Exception:
temp_layer[key] = '<<unhandled exception>>'
else:
temp_layer[key] = value
finally:
recording(True)
# Refs GitHub issue #910
# If we've not seen the layer before then we will add it
# so that if we see it again we can skip formatting it.
self.seen_layers.append(key_values)
# Note: this *ought* to be len(...) - 1 but let's be safe.
index = self.seen_layers.index(key_values)
try:
pformatted = force_text(pformat(temp_layer))
except UnicodeEncodeError:
pass
else:
# Note: this *ought* to be len(...) - 1 but let's be safe.
self.pformat_layers.insert(index, pformatted)
context_list.append(pformatted)
kwargs['context'] = context_list
kwargs['context_processors'] = getattr(context, 'context_processors', None)
self.templates.append(kwargs)
# Implement the Panel API
nav_title = _("Templates")
@property
def title(self):
num_templates = len(self.templates)
return _("Templates (%(num_templates)s rendered)") % {'num_templates': num_templates}
@property
def nav_subtitle(self):
if self.templates:
return self.templates[0]['template'].name
return ''
template = 'debug_toolbar/panels/templates.html'
@classmethod
def get_urls(cls):
return [
url(r'^template_source/$', views.template_source, name='template_source'),
]
def enable_instrumentation(self):
template_rendered.connect(self._store_template_info)
def disable_instrumentation(self):
template_rendered.disconnect(self._store_template_info)
def generate_stats(self, request, response):
template_context = []
for template_data in self.templates:
info = {}
# Clean up some info about templates
template = template_data.get('template', None)
if hasattr(template, 'origin') and template.origin and template.origin.name:
template.origin_name = template.origin.name
template.origin_hash = signing.dumps(template.origin.name)
else:
template.origin_name = _('No origin')
template.origin_hash = ''
info['template'] = template
# Clean up context for better readability
if self.toolbar.config['SHOW_TEMPLATE_CONTEXT']:
context_list = template_data.get('context', [])
info['context'] = '\n'.join(context_list)
template_context.append(info)
# Fetch context_processors/template_dirs from any template
if self.templates:
context_processors = self.templates[0]['context_processors']
template = self.templates[0]['template']
# django templates have the 'engine' attribute, while jinja templates use 'backend'
engine_backend = getattr(template, 'engine', None) or getattr(template, 'backend')
template_dirs = engine_backend.dirs
else:
context_processors = None
template_dirs = []
self.record_stats({
'templates': template_context,
'template_dirs': [normpath(x) for x in template_dirs],
'context_processors': context_processors,
})
| 43.726457 | 100 | 0.60199 |
ace5a6c53c508181275dcbd23eba33a8b546d3bc | 13,756 | py | Python | myenv/lib/python2.7/site-packages/django_countries/tests/test_fields.py | dkumarlinux/saleor | e3a852fed7da38e4141b0755bd282012f508c7b9 | [
"BSD-3-Clause"
] | null | null | null | myenv/lib/python2.7/site-packages/django_countries/tests/test_fields.py | dkumarlinux/saleor | e3a852fed7da38e4141b0755bd282012f508c7b9 | [
"BSD-3-Clause"
] | 2 | 2022-02-10T16:51:56.000Z | 2022-02-10T18:23:52.000Z | myenv/lib/python2.7/site-packages/django_countries/tests/test_fields.py | dkumarlinux/saleor | e3a852fed7da38e4141b0755bd282012f508c7b9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core import validators
from django.forms import Select
from django.forms.models import modelform_factory
from django.test import TestCase
from django.utils import translation
from django.utils.encoding import force_text
from django_countries import fields, countries
from django_countries.tests import forms, custom_countries
from django_countries.tests.models import Person, AllowNull, MultiCountry
class TestCountryField(TestCase):
def test_logic(self):
person = Person(name='Chris Beaven', country='NZ')
self.assertEqual(person.country, 'NZ')
self.assertNotEqual(person.country, 'ZZ')
self.assertTrue(person.country)
person.country = ''
self.assertFalse(person.country)
def test_only_from_instance(self):
self.assertRaises(AttributeError, lambda: Person.country)
def test_deconstruct(self):
field = Person._meta.get_field('country')
self.assertEqual(
field.deconstruct(),
('country', 'django_countries.fields.CountryField', [],
{'max_length': 2}))
def test_text(self):
person = Person(name='Chris Beaven', country='NZ')
self.assertEqual(force_text(person.country), 'NZ')
def test_name(self):
person = Person(name='Chris Beaven', country='NZ')
self.assertEqual(person.country.name, 'New Zealand')
def test_flag(self):
person = Person(name='Chris Beaven', country='NZ')
with self.settings(STATIC_URL='/static-assets/'):
self.assertEqual(
person.country.flag, '/static-assets/flags/nz.gif')
def test_custom_field_flag_url(self):
person = Person(name='Chris Beaven', country='NZ', other_country='US')
self.assertEqual(
person.other_country.flag, '//flags.example.com/us.gif')
def test_unicode_flags(self):
person = Person(
name='Matthew Schinckel', country='AU', other_country='DE')
self.assertEqual(person.country.unicode_flag, '🇦🇺')
self.assertEqual(person.other_country.unicode_flag, '🇩🇪')
def test_unicode_flag_blank(self):
person = Person(name='Matthew Schinckel')
self.assertEqual(person.country.unicode_flag, '')
def test_COUNTRIES_FLAG_URL_setting(self):
# Custom relative url
person = Person(name='Chris Beaven', country='NZ')
with self.settings(COUNTRIES_FLAG_URL='img/flag-{code_upper}.png',
STATIC_URL='/static-assets/'):
self.assertEqual(
person.country.flag, '/static-assets/img/flag-NZ.png')
# Custom absolute url
with self.settings(COUNTRIES_FLAG_URL='https://flags.example.com/'
'{code_upper}.PNG'):
self.assertEqual(
person.country.flag, 'https://flags.example.com/NZ.PNG')
def test_blank(self):
person = Person.objects.create(name='The Outsider')
self.assertEqual(person.country.code, '')
person = Person.objects.get(pk=person.pk)
self.assertEqual(person.country.code, '')
def test_null(self):
person = AllowNull.objects.create(country=None)
self.assertIsNone(person.country.code)
person = AllowNull.objects.get(pk=person.pk)
self.assertIsNone(person.country.code)
def test_deferred(self):
Person.objects.create(name='Person',
country='NZ')
person = Person.objects.defer('country').get(name='Person')
self.assertEqual(person.country.code, 'NZ')
def test_only(self):
Person.objects.create(name='Person',
country='NZ')
person = Person.objects.only('name').get()
self.assertEqual(person.country.code, 'NZ')
def test_nullable_deferred(self):
AllowNull.objects.create(country=None)
person = AllowNull.objects.defer('country').get()
self.assertIsNone(person.country.code)
def test_len(self):
person = Person(name='Chris Beaven', country='NZ')
self.assertEqual(len(person.country), 2)
person = Person(name='The Outsider', country=None)
self.assertEqual(len(person.country), 0)
def test_lookup_text(self):
Person.objects.create(name='Chris Beaven', country='NZ')
Person.objects.create(name='Pavlova', country='NZ')
Person.objects.create(name='Killer everything', country='AU')
lookup = Person.objects.filter(country='NZ')
names = lookup.order_by('name').values_list('name', flat=True)
self.assertEqual(list(names), ['Chris Beaven', 'Pavlova'])
def test_lookup_country(self):
Person.objects.create(name='Chris Beaven', country='NZ')
Person.objects.create(name='Pavlova', country='NZ')
Person.objects.create(name='Killer everything', country='AU')
oz = fields.Country(code='AU', flag_url='')
lookup = Person.objects.filter(country=oz)
names = lookup.values_list('name', flat=True)
self.assertEqual(list(names), ['Killer everything'])
def test_save_empty_country(self):
Person.objects.create(name='The Outsider')
person = Person.objects.get()
self.assertEqual(person.country.code, '')
def test_create_modelform(self):
Form = modelform_factory(Person, fields=['country'])
form_field = Form().fields['country']
self.assertTrue(isinstance(form_field.widget, Select))
def test_render_form(self):
Form = modelform_factory(Person, fields=['country'])
Form().as_p()
class TestValidation(TestCase):
def test_validate(self):
person = Person(name='Chris', country='NZ')
person.full_clean()
def test_validate_empty(self):
person = Person(name='Chris')
self.assertRaises(validators.ValidationError, person.full_clean)
def test_validate_invalid(self):
person = Person(name='Chris', country=':(')
self.assertRaises(validators.ValidationError, person.full_clean)
def test_validate_multiple(self):
person = MultiCountry(countries=['NZ', 'AU'])
person.full_clean()
def test_validate_multiple_empty(self):
person = MultiCountry()
self.assertRaises(validators.ValidationError, person.full_clean)
def test_validate_multiple_invalid(self):
person = MultiCountry(countries=[':(', 'AU'])
self.assertRaises(validators.ValidationError, person.full_clean)
def test_validate_multiple_uneditable(self):
person = MultiCountry(countries='NZ', uneditable_countries='xx')
person.full_clean()
class TestCountryCustom(TestCase):
def test_name(self):
person = Person(name='Chris Beaven', fantasy_country='NV')
self.assertEqual(person.fantasy_country.name, 'Neverland')
def test_field(self):
self.assertEqual(
list(Person._meta.get_field('fantasy_country').choices),
[('NV', 'Neverland'), ('NZ', 'New Zealand')])
def test_deconstruct(self):
field = Person._meta.get_field('fantasy_country')
self.assertEqual(
field.deconstruct(),
(
'fantasy_country',
'django_countries.fields.CountryField',
[],
{
'countries': custom_countries.FantasyCountries,
'blank': True,
'max_length': 2
}
))
class TestCountryMultiple(TestCase):
def test_empty(self):
obj = MultiCountry()
self.assertEqual(obj.countries, [])
def test_empty_save(self):
MultiCountry.objects.create()
def test_single(self):
obj = MultiCountry(countries='NZ')
self.assertEqual(len(obj.countries), 1)
self.assertTrue(isinstance(obj.countries[0], fields.Country))
self.assertEqual(obj.countries[0], 'NZ')
def test_multiple(self):
obj = MultiCountry(countries='AU,NZ')
self.assertEqual(len(obj.countries), 2)
for country in obj.countries:
self.assertTrue(isinstance(country, fields.Country))
self.assertEqual(obj.countries[0], 'AU')
self.assertEqual(obj.countries[1], 'NZ')
def test_set_text(self):
obj = MultiCountry()
obj.countries = 'NZ,AU'
self.assertEqual(obj.countries, ['NZ', 'AU'])
def test_set_list(self):
obj = MultiCountry()
obj.countries = ['NZ', 'AU']
self.assertEqual(obj.countries, ['NZ', 'AU'])
def test_set_country(self):
obj = MultiCountry()
obj.countries = fields.Country('NZ')
self.assertEqual(obj.countries, ['NZ'])
def test_set_countries(self):
obj = MultiCountry()
obj.countries = [fields.Country('NZ'), fields.Country('AU')]
self.assertEqual(obj.countries, ['NZ', 'AU'])
def test_all_countries(self):
all_codes = list(c[0] for c in countries)
MultiCountry.objects.create(countries=all_codes)
obj = MultiCountry.objects.get()
self.assertEqual(obj.countries, all_codes)
def test_deconstruct(self):
field = MultiCountry._meta.get_field('countries')
self.assertEqual(
field.deconstruct(),
(
'countries',
'django_countries.fields.CountryField',
[],
{'max_length': 599, 'multiple': True}
))
class TestCountryObject(TestCase):
def test_hash(self):
country = fields.Country(code='XX', flag_url='')
self.assertEqual(hash(country), hash('XX'))
def test_repr(self):
country1 = fields.Country(code='XX')
country2 = fields.Country(code='XX', flag_url='')
country3 = fields.Country(code='XX', str_attr='name')
self.assertEqual(
repr(country1),
'Country(code={0})'.format(repr('XX')))
self.assertEqual(
repr(country2),
'Country(code={0}, flag_url={1})'.format(repr('XX'), repr('')))
self.assertEqual(
repr(country3),
'Country(code={0}, str_attr={1})'.format(repr('XX'), repr('name')))
def test_str(self):
country = fields.Country(code='NZ')
self.assertEqual('%s' % country, 'NZ')
def test_str_attr(self):
country = fields.Country(code='NZ', str_attr='name')
self.assertEqual('%s' % country, 'New Zealand')
def test_flag_on_empty_code(self):
country = fields.Country(code='', flag_url='')
self.assertEqual(country.flag, '')
def test_ioc_code(self):
country = fields.Country(code='NL', flag_url='')
self.assertEqual(country.ioc_code, 'NED')
def test_country_from_ioc_code(self):
country = fields.Country.country_from_ioc('NED')
self.assertEqual(country, fields.Country('NL', flag_url=''))
def test_country_from_blank_ioc_code(self):
country = fields.Country.country_from_ioc('')
self.assertIsNone(country)
def test_country_from_nonexistence_ioc_code(self):
country = fields.Country.country_from_ioc('XXX')
self.assertIsNone(country)
def test_alpha3(self):
country = fields.Country(code='BN')
self.assertEqual(country.alpha3, 'BRN')
def test_alpha3_invalid(self):
country = fields.Country(code='XX')
self.assertEqual(country.alpha3, '')
def test_numeric(self):
country = fields.Country(code='BN')
self.assertEqual(country.numeric, 96)
def test_numeric_padded(self):
country = fields.Country(code='AL')
self.assertEqual(country.numeric_padded, '008')
country = fields.Country(code='BN')
self.assertEqual(country.numeric_padded, '096')
country = fields.Country(code='NZ')
self.assertEqual(country.numeric_padded, '554')
def test_numeric_invalid(self):
country = fields.Country(code='XX')
self.assertEqual(country.numeric, None)
def test_numeric_padded_invalid(self):
country = fields.Country(code='XX')
self.assertEqual(country.numeric_padded, None)
def test_empty_flag_url(self):
country = fields.Country(code='XX', flag_url='')
self.assertEqual(country.flag, '')
class TestModelForm(TestCase):
def test_translated_choices(self):
lang = translation.get_language()
translation.activate('eo')
form = forms.PersonForm()
try:
# This is just to prove that the language changed.
self.assertEqual(list(countries)[0][1], 'Afganio')
# If the choices aren't lazy, this wouldn't be translated. It's the
# second choice because the first one is the initial blank option.
self.assertEqual(
form.fields['country'].choices[1][1], 'Afganio')
self.assertEqual(
form.fields['country'].widget.choices[1][1], 'Afganio')
finally:
translation.activate(lang)
def test_blank_choice(self):
form = forms.PersonForm()
self.assertEqual(form.fields['country'].choices[0], ('', '---------'))
def test_no_blank_choice(self):
form = forms.PersonForm()
self.assertEqual(
form.fields['favourite_country'].choices[0], ('AF', 'Afghanistan'))
def test_blank_choice_label(self):
form = forms.AllowNullForm()
self.assertEqual(
form.fields['country'].choices[0], ('', '(select country)'))
def test_validation(self):
form = forms.MultiCountryForm(data={'countries': ['NZ', 'AU']})
self.assertEqual(form.errors, {})
| 35.637306 | 79 | 0.629543 |
ace5a7b06907dcf5ea1ded19274e7564ce2a846b | 12,773 | py | Python | common/common/common/util.py | JuroOravec/knwldg | 33235f78ae1ea6409883f312adcf8679c5bf2401 | [
"MIT"
] | null | null | null | common/common/common/util.py | JuroOravec/knwldg | 33235f78ae1ea6409883f312adcf8679c5bf2401 | [
"MIT"
] | null | null | null | common/common/common/util.py | JuroOravec/knwldg | 33235f78ae1ea6409883f312adcf8679c5bf2401 | [
"MIT"
] | null | null | null | import unicodedata
from datetime import datetime
import functools
from http import cookies
import inspect
import importlib
import itertools
from pathlib import Path
import re
from urllib import parse
import tempfile
import requests
def string_combinations(
seed="",
pattern=None,
start=None,
end=None,
valid_chars=None,
variants=None,
min_length=1,
index=None
):
'''
Generates string combinations from `seed`, until the string is at least of `min_length`.
Only combinations that are lexicographically equal to or after `start` and equal to
or before `end` are returned, if `start` or `end` are given.
If `pattern` is given, only those combinations are returned that match the pattern.
`valid_chars` specifies which characters can be added to the `seed` string.
If any of the valid characters can have multiple variants (such as `c` being `c` or `ch`),
these can be specified by `variants`. `variants` must be either a list of tuples or dict.
Keys must match characters that have multiple variants. Values must be a list of these variants.
These variants can be of any length.
Index specifies which character of the `seed` string is being considered. If `index` is out of range
of `seed` string, the new character is appended to the `seed` string
EXAMPLE:
>>> string_combinations(
>>> seed="hi",
>>> start='ho',
>>> end='hq',
>>> variants={'o': ['oh', 'ok', 'obuh']},
>>> min_length=4,
>>> index=1
>>> )
# From string 'hi', generates all strings that start with 'ho' and 'hq' (inclusive) and everything in between,
>>>
# whereas the string combinations start at index 1 ("i"). Generated string are of length 4, possibly except when
>>>
# strings containing 'o' were generated variants with 'oh', 'ok', or 'obuh' instead of 'o'.
>>>
'''
if index is not None \
and len(seed) >= min_length \
and (not(start) or seed[:len(start)] >= start)\
and (not(end) or seed[:len(end)] <= end):
yield seed
return
seed = bytearray(seed, "ascii")
index = len(seed) if index is None else index
valid_chars = valid_chars or 'abcdefghijklmnopqrstuvwxzy0123456789'
# variants should be {char: [list, of, variants]} or [(char, [list, of, variants])]
variants = variants or []
variants = variants.items() if isinstance(variants, dict) else variants
start_reached = False
for s in valid_chars:
# Skip if start is given and has not been reached yet
# or if end is given and has been already reached
if (start and not(start_reached) and len(start) >= (index + 1) and s != start[index]):
continue
# Prevent going into depth if we already have minimum length
# and start or end conditions are shorter than that
elif index > min_length - 1 and (start and index > len(start) - 1) and (end and index > len(end) - 1):
continue
if not start_reached:
start_reached = True
# workaround for "ch" being considered a separate char.
# uses (temp_seed + variant) as a final name for all variants
curr_variants = [s]
for case, v in variants:
if s == case:
curr_variants.extend(v)
for v in curr_variants:
temp_seed = seed.copy()
# Modify seed with current variant
for i, c in enumerate(v):
if len(temp_seed) < index + 1 + i:
temp_seed.append(ord(c))
else:
temp_seed[index] = ord(c)
temp_seed = temp_seed.decode()
# End reached
if end and temp_seed[:len(end)] > end:
return
# Skip seed if it does not match the pattern
if pattern and not re.search(pattern, temp_seed):
continue
# Go one level deeper (1 char longer seed)
results = string_combinations(
seed=temp_seed,
valid_chars=valid_chars,
pattern=pattern,
start=start,
end=end,
variants=variants,
min_length=min_length,
index=index + 1
)
for res in results:
yield res
def map_dict_val(fn, d):
return {
k: fn(v)
for k, v in d.items()
}
def unpack_url(url):
'''
Get URL object and Query object from a url, as returned by
urllib.parse.urlparse and urllib.parse.parse_qs, respectively.
Reverse of pack_url
'''
url_obj = parse.urlparse(url)
q_obj = parse.parse_qs(url_obj.query)
q_obj = map_dict_val(lambda l: l[0], q_obj)
return url_obj, q_obj
def pack_url(url_obj, q_obj):
'''
Get url string from URL object and Query object.
Reverse of unpack_url
'''
url_obj = url_obj._replace(query=parse.urlencode(q_obj))
url_string = parse.urlunparse(url_obj)
return url_string
def xpath_class(classes, operator="or"):
''''Format an XPath class condition'''
return f" {operator} ".join(
f"contains(concat(' ', normalize-space(@class),' '),' {cls} ')"
for cls in classes
)
def xpath_startswith(attr, s):
return f"@{attr} and starts-with(@{attr}, '{s}')"
def get_dir(obj):
path = inspect.getfile(obj)
return str(Path(path).parent.absolute())
def module_from_abs_path(name, path):
'''
See https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
# importing-a-source-file-directly
and https://docs.python.org/3/library/importlib.html
'''
spec = importlib.util.spec_from_file_location(name, path)
mdl = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mdl)
return mdl
def local_module(cls, filename):
'''
Search the directory of a module where `cls` is defined and
look for file `filename`.
Raises `ValueError` if the file does not exist.
Raises `ModuleNotFoundError` if the import failed.
'''
cls_dir = get_dir(cls)
file_path = Path(cls_dir, filename)
file_abs_path = str(file_path.absolute())
if not file_path.exists():
raise ValueError('File not found: {}'.format(file_abs_path))
import_path = cls.__module__.rsplit('.', 1)[0]
module_path = "{}.{}".format(import_path, file_path.stem)
module = module_from_abs_path(module_path, file_abs_path)
return module
def pairwise(iterable):
'''See https://docs.python.org/3/library/itertools.html#itertools-recipes'''
a, b = itertools.tee(iterable)
next(b, None)
return itertools.zip_longest(a, b, fillvalue=object())
def remove_adjacent_dup(iterable):
'''
See https://stackoverflow.com/a/34986013/9788634
'''
return [x for x, y in pairwise(x) if x != y]
def soft_update(d1, *dicts, dict_mode='override', list_mode='override', copy=False):
'''
Update dictonary entries, overriding values if they are primitives
or if the type changes.
Returns the updated dictionary. If `copy` is `True`, the updates are made
to a copy.
If the values are dictionaries then one of the following modes apply:
- `update` - keep the nested dictionaries, and only update entries
- `override` - replace the nested dictonaries with new values
If the values are lists then one of the following modes apply:
- `append` - join elements from all occurences
- `set` - add new list member only if it is not present in the list already
- `override` - replace the list with new value
'''
if copy:
out = {}
the_dicts = [d1, *dicts]
else:
out = d1
the_dicts = dicts
for d in the_dicts:
for k, v in d.items():
if k not in out:
out[k] = v
continue
elif type(v) != type(out[k]):
out[k] = v
elif isinstance(v, dict):
if dict_mode == 'update':
out[k].update(v)
elif dict_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown dict mode "{dict_mode}"')
elif isinstance(v, list):
if list_mode == 'append':
out[k].extend(v)
elif list_mode == 'set':
out[k].extend([i for i in v if i not in out[k]])
elif list_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown list mode "{list_mode}"')
else:
out[k] = v
return out
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(fillvalue=fillvalue, *args)
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
@functools.wraps(filter)
def lfilter(functionOrNone, iterable):
return list(filter(functionOrNone, iterable))
@functools.wraps(map)
def lmap(func, *iterables):
return list(map(func, *iterables))
@functools.wraps(map)
def map2str(*iterables):
return map(str, iterables)
@functools.wraps(map)
def map2int(*iterables):
return map(int, iterables)
def flatten(iterable):
t = type(iterable)
return t(i for grp in iterable for i in grp)
def lflatten(iterable):
return flatten(list(iterable))
def time_tag():
return datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
def update_request_cookies(request, inplace=True, pattern=None):
c = cookies.SimpleCookie()
h = request.headers.copy() if not inplace else request.headers
for header in ['Cookie', 'Set-Cookie']:
for ck in h.getlist(header):
c.load(ck.decode('utf-8'))
h.pop('cookie', None)
h.pop('set-cookie', None)
for morsel in c.values():
if pattern is None or re.search(pattern, morsel.key):
h.appendlist('cookie', '{}={}'.format(morsel.key, morsel.value))
return h
def strip_accents(text):
'''https://stackoverflow.com/a/44433664/9788634'''
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
def dol2lot(dol):
'''Convert dict of lists to list of (key, value) tuples'''
lot = []
for k, val in dol.items():
try:
vals = iter(val)
except TypeError:
vals = [val]
lot.extend((k, v) for v in vals)
return lot
def lot2dol(lot):
'''Convert list of (key, value) tuples to dict of lists'''
dol = {}
for k, val in lot:
if k not in dol:
dol[k] = []
dol[k].append(val)
return dol
def conditional_deco(deco, predicate):
'''
Decorator that takes another decorator and a predicate,
and applies the second decorator to a function only if the predicate
evaluates to True.a[@href and starts-with(@href, '/events/csv')]
'''
def deco_(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if predicate(*args, **kwargs):
return deco(function)(*args, **kwargs)
return function(*args, **kwargs)
return inner
return deco_
def is_url(url):
try:
result = parse.urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def _parse_user_agent_url(url):
return requests.get(url).strip().split('\n')
def get_user_agent_list(brws):
# Taken from https://github.com/tamimibrahim17/List-of-user-agents
url_template = 'https://raw.githubusercontent.com/tamimibrahim17/List-of-user-agents/master/{}.txt'
ual = []
for brw in brws:
url = url_template.format(parse.quote(brw))
uas = [
ua
for ua in _parse_user_agent_url(url)[:-2]
if "user agents string" not in ua
]
ual.extend(uas)
tempfile.NamedTemporaryFile()
return ual
def get_proxy_list(urls=None, files=None):
proxies = [p for p_list in map(_parse_user_agent_url, urls) for p in p_list]
proxies.extend([
Path(f).read_text(encoding='utf-8') for f in files
])
return proxies | 29.295872 | 116 | 0.602678 |
ace5a866500834e7480a0d2d4919459e56561227 | 6,059 | py | Python | fos_regression.py | LedererLab/HDIM-Stratus | aca5765ae22e738e0b66213a76bc62ed02485ae8 | [
"MIT"
] | null | null | null | fos_regression.py | LedererLab/HDIM-Stratus | aca5765ae22e738e0b66213a76bc62ed02485ae8 | [
"MIT"
] | null | null | null | fos_regression.py | LedererLab/HDIM-Stratus | aca5765ae22e738e0b66213a76bc62ed02485ae8 | [
"MIT"
] | null | null | null | import importlib.util
import pandas as pd
import numpy as np
import json
from io import BytesIO
spec = importlib.util.spec_from_file_location("module.name", "/var/www/HDIM_Stratus/HDIM-Algo/Python_Wrapper/hdim.py")
hdim = importlib.util.module_from_spec(spec)
spec.loader.exec_module( hdim )
class multiFOS:
""" Base class that handles some of data preparation for FOS regressions.
This class is not responsible for processing raw POST request form-data.
It is assumed that the data has already been converted into a pandas DataFrame.
"""
def _process( self, raw_data, regression_var ):
""" Take in pre-processed data, run a regression via FOS and return the results.
Args:
raw_data: A pandas.DataFrame that contains both the design matrix and
vector of predictors along with any labels.
regression_var: Index of the column in raw_data that contains the vector
of predictors.
Returns:
A pandas.DataFrame containing the regression coefficients corresponding
to the support ( that is non-zero and signigiant coefficients ) along
with the intercept term.
"""
reg_idx = int( regression_var )
Y = raw_data.ix[:,reg_idx]
X = raw_data.ix[:, raw_data.columns != raw_data.columns[reg_idx] ]
col_names = list( raw_data.columns.values )
Y = Y.as_matrix()
X = X.as_matrix()
fos = hdim.X_FOS_d()
fos( X, Y, hdim.SolverType_screen_cd )
coefficients = fos.ReturnCoefficients()
intercept = fos.ReturnIntercept()
support = fos.ReturnSupport()
nz_indices = support.nonzero()[0]
support_coefs = coefficients[ nz_indices ]
# There seems to be an off by one error causing the column names to be
# selected incorrently. Hence the + 1.
col_names = [ col_names[ idx + 1 ] for idx in nz_indices ]
col_names.insert( 0, "Intercept" )
support_coefs = np.insert( support_coefs, 0, intercept )
return( pd.DataFrame( data = support_coefs , index = col_names ).to_json(orient='columns') )
class csvFOS( multiFOS ):
""" Functor that handles FOS regression for data supplied as a raw .csv
( Comma Seperated Value ) file.
"""
def __call__( self, file_contents, regression_var ):
""" Run the regression and return the results.
Args:
file_contents: The raw contents of an HTTP POST request where form-data
corresponds to a .csv file containing the design matrix, vector of predictors
and any headers.
regression_var: Index of the column that contains the vector
of predictors.
Returns:
A pandas.DataFrame containing the regression coefficients corresponding
to the support ( that is non-zero and signigiant coefficients ) along
with the intercept term.
"""
return super()._process( self.__load( file_contents ), regression_var )
def __load( self, raw_content ):
""" Dump form-data from an HTTP POST request into a pandas.DataFrame.
Args:
raw_content: Raw form-data from an HTTP POST request where the data
corresponds to a .csv formatted file.
Returns:
pandas.DataFrame containing the contents of the originally .csv file.
"""
return( pd.read_csv( BytesIO( raw_content ) ) )
class xlsxFOS( multiFOS ):
""" Functor that handles FOS regression for data supplied as a raw .xlsx
( Excel ) file.
"""
def __call__( self, file_contents, regression_var ):
""" Run the regression and return the results.
Args:
file_contents: The raw contents of an HTTP POST request where form-data
corresponds to a .xlsx file containing the design matrix, vector of predictors
and any headers.
regression_var: Index of the column that contains the vector
of predictors.
Returns:
A pandas.DataFrame containing the regression coefficients corresponding
to the support ( that is non-zero and signigiant coefficients ) along
with the intercept term.
"""
return super()._process( self.__load( file_contents ), regression_var )
def __load( self, raw_content ):
""" Dump form-data from an HTTP POST request into a pandas.DataFrame.
Args:
raw_content: Raw form-data from an HTTP POST request where the data
corresponds to a .xlsx formatted file.
Returns:
pandas.DataFrame containing the contents of the original .xlsx file.
"""
return( pd.read_excel( BytesIO( raw_content ) ) )
class jsonFOS( multiFOS ):
""" Functor that handles FOS regression for data supplied as a JSON string.
"""
def __call__( self, json_blob, regression_var ):
""" Run the regression and return the results.
Args:
file_contents: The raw contents of an HTTP POST request where form-data
corresponds to a JSON string containing the design matrix, vector of predictors
and any headers.
regression_var: Index of the column that contains the vector
of predictors.
Returns:
A pandas.DataFrame containing the regression coefficients corresponding
to the support ( that is non-zero and signigiant coefficients ) along
with the intercept term.
"""
return super()._process( self.__load( json_blob ), regression_var )
def __load( self, json_blob ):
""" Dump contents of JSON string into a pandas.DataFrame.
Args:
raw_content: Raw form-data from an HTTP POST request where the data
corresponds to a JSON string.
Returns:
pandas.DataFrame containing the contents of the original JSON string.
"""
return pd.read_json( json_blob, orient='split' )
| 36.721212 | 118 | 0.647962 |
ace5a88ca34d47417b38973df9ef0d151b1de2d8 | 8,389 | py | Python | tests/test_19_attribute_converter.py | tpazderka/pysaml2 | 20e701de74ca7fef61e85c4ea4b7db2f2f6e5ef0 | [
"BSD-2-Clause"
] | null | null | null | tests/test_19_attribute_converter.py | tpazderka/pysaml2 | 20e701de74ca7fef61e85c4ea4b7db2f2f6e5ef0 | [
"BSD-2-Clause"
] | null | null | null | tests/test_19_attribute_converter.py | tpazderka/pysaml2 | 20e701de74ca7fef61e85c4ea4b7db2f2f6e5ef0 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
from saml2 import attribute_converter, saml
from attribute_statement_data import *
from pathutils import full_path
from saml2.attribute_converter import AttributeConverterNOOP
from saml2.attribute_converter import to_local
from saml2.saml import attribute_from_string
def _eq(l1, l2):
return set(l1) == set(l2)
BASIC_NF = 'urn:oasis:names:tc:SAML:2.0:attrname-format:basic'
URI_NF = 'urn:oasis:names:tc:SAML:2.0:attrname-format:uri'
SAML1 = 'urn:mace:shibboleth:1.0:attributeNamespace:uri'
def test_default():
acs = attribute_converter.ac_factory()
assert acs
class TestAC():
def setup_class(self):
self.acs = attribute_converter.ac_factory(full_path("attributemaps"))
def test_setup(self):
print self.acs
assert len(self.acs) == 3
assert _eq([a.name_format for a in self.acs], [BASIC_NF, URI_NF, SAML1])
def test_ava_fro_1(self):
ats = saml.attribute_statement_from_string(STATEMENT1)
#print ats
ava = None
for ac in self.acs:
try:
ava = ac.fro(ats)
except attribute_converter.UnknownNameFormat:
pass
# break if we have something
if ava:
break
print ava.keys()
assert _eq(ava.keys(), ['givenName', 'displayName', 'uid',
'eduPersonNickname', 'street',
'eduPersonScopedAffiliation',
'employeeType', 'eduPersonAffiliation',
'eduPersonPrincipalName', 'sn', 'postalCode',
'physicalDeliveryOfficeName', 'ou',
'eduPersonTargetedID', 'cn'])
def test_ava_fro_2(self):
ats = saml.attribute_statement_from_string(STATEMENT2)
#print ats
ava = {}
for ac in self.acs:
ava.update(ac.fro(ats))
print ava.keys()
assert _eq(ava.keys(), ['eduPersonEntitlement', 'eduPersonAffiliation',
'uid', 'mail', 'givenName', 'sn'])
def test_to_attrstat_1(self):
ava = {"givenName": "Roland", "sn": "Hedberg"}
statement = attribute_converter.from_local(self.acs, ava, BASIC_NF)
assert statement is not None
assert len(statement) == 2
a0 = statement[0]
a1 = statement[1]
if a0.friendly_name == 'sn':
assert a0.name == 'urn:mace:dir:attribute-def:sn'
assert a0.name_format == BASIC_NF
assert a1.friendly_name == "givenName"
assert a1.name == 'urn:mace:dir:attribute-def:givenName'
assert a1.name_format == BASIC_NF
elif a0.friendly_name == 'givenName':
assert a0.name == 'urn:mace:dir:attribute-def:givenName'
assert a0.name_format == BASIC_NF
assert a1.friendly_name == "sn"
assert a1.name == 'urn:mace:dir:attribute-def:sn'
assert a1.name_format == BASIC_NF
else:
assert False
def test_to_attrstat_2(self):
ava = {"givenName": "Roland", "sn": "Hedberg"}
statement = attribute_converter.from_local(self.acs, ava, URI_NF)
assert len(statement) == 2
a0 = statement[0]
a1 = statement[1]
if a0.friendly_name == 'sn':
assert a0.name == 'urn:oid:2.5.4.4'
assert a0.name_format == URI_NF
assert a1.friendly_name == "givenName"
assert a1.name == 'urn:oid:2.5.4.42'
assert a1.name_format == URI_NF
elif a0.friendly_name == 'givenName':
assert a0.name == 'urn:oid:2.5.4.42'
assert a0.name_format == URI_NF
assert a1.friendly_name == "sn"
assert a1.name == 'urn:oid:2.5.4.4'
assert a1.name_format == URI_NF
else:
assert False
def test_to_local_name(self):
attr = [
saml.Attribute(
friendly_name="surName",
name="urn:oid:2.5.4.4",
name_format="urn:oasis:names:tc:SAML:2.0:attrname-format:uri"),
saml.Attribute(
friendly_name="efternamn",
name="urn:oid:2.5.4.42",
name_format="urn:oasis:names:tc:SAML:2.0:attrname-format:uri"),
saml.Attribute(
friendly_name="titel",
name="urn:oid:2.5.4.12",
name_format="urn:oasis:names:tc:SAML:2.0:attrname-format:uri")]
lan = [attribute_converter.to_local_name(self.acs, a) for a in attr]
assert _eq(lan, ['sn', 'givenName', 'title'])
# def test_ava_fro_1(self):
#
# attr = [saml.Attribute(friendly_name="surName",
# name="urn:oid:2.5.4.4",
# name_format="urn:oasis:names:tc:SAML:2.0:attrname-format:uri"),
# saml.Attribute(friendly_name="efternamn",
# name="urn:oid:2.5.4.42",
# name_format="urn:oasis:names:tc:SAML:2.0:attrname-format:uri"),
# saml.Attribute(friendly_name="titel",
# name="urn:oid:2.5.4.12",
# name_format="urn:oasis:names:tc:SAML:2.0:attrname-format:uri")]
#
# result = attribute_converter.ava_fro(self.acs, attr)
#
# print result
# assert result == {'givenName': [], 'sn': [], 'title': []}
def test_to_local_name_from_basic(self):
attr = [saml.Attribute(
name="urn:mace:dir:attribute-def:eduPersonPrimaryOrgUnitDN")]
lan = [attribute_converter.to_local_name(self.acs, a) for a in attr]
assert _eq(lan, ['eduPersonPrimaryOrgUnitDN'])
def test_to_and_for(self):
ava = {"givenName": "Roland", "surname": "Hedberg"}
basic_ac = [a for a in self.acs if a.name_format == BASIC_NF][0]
attr_state = saml.AttributeStatement(basic_ac.to_(ava))
oava = basic_ac.fro(attr_state)
assert _eq(ava.keys(), oava.keys())
def test_unspecified_name_format(self):
ats = saml.attribute_statement_from_string(STATEMENT4)
ava = to_local(self.acs, ats)
assert ava == {'user_id': ['bob'], 'NameID': ['bobsnameagain']}
def test_mixed_attributes_1(self):
ats = saml.attribute_statement_from_string(STATEMENT_MIXED)
ava = to_local(self.acs, ats)
assert ava == {'eduPersonAffiliation': ['staff'],
'givenName': ['Roland'], 'sn': ['Hedberg'],
'uid': ['demouser'], 'user_id': ['bob']}
# Allow unknown
ava = to_local(self.acs, ats, True)
assert ava == {'eduPersonAffiliation': ['staff'],
'givenName': ['Roland'], 'sn': ['Hedberg'],
'swissEduPersonHomeOrganizationType': ['others'],
'uid': ['demouser'], 'urn:example:com:foo': ['Thing'],
'user_id': ['bob']}
def test_noop_attribute_conversion():
ava = {"urn:oid:2.5.4.4": "Roland", "urn:oid:2.5.4.42": "Hedberg"}
aconv = AttributeConverterNOOP(URI_NF)
res = aconv.to_(ava)
print res
assert len(res) == 2
for attr in res:
assert len(attr.attribute_value) == 1
if attr.name == "urn:oid:2.5.4.42":
assert attr.name_format == URI_NF
assert attr.attribute_value[0].text == "Hedberg"
elif attr.name == "urn:oid:2.5.4.4":
assert attr.name_format == URI_NF
assert attr.attribute_value[0].text == "Roland"
ava = ("<?xml version='1.0' encoding='UTF-8'?>"
'<ns0:Attribute xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" FriendlyName="schacHomeOrganization" '
'Name="urn:oid:1.3.6.1.4.1.25178.1.2.9" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">'
'<ns0:AttributeValue xsi:nil="true" xsi:type="xs:string">uu.se</ns0:AttributeValue></ns0:Attribute>')
def test_schac():
attr = attribute_from_string(ava)
acs = attribute_converter.ac_factory()
for ac in acs:
try:
res = ac.ava_from(attr)
assert res[0] == "schacHomeOrganization"
except KeyError:
pass
if __name__ == "__main__":
# t = TestAC()
# t.setup_class()
# t.test_mixed_attributes_1()
test_schac()
| 36.159483 | 109 | 0.575396 |
ace5a8f5de8307e30025004662ecc3d4ec7c009c | 3,270 | py | Python | qagent.py | foltman/Bebop_Controller_With_Reinforcement_Learning | aa1140768360b09433b0e252016ee013b7fbdf9a | [
"MIT"
] | 1 | 2019-08-23T08:11:04.000Z | 2019-08-23T08:11:04.000Z | qagent.py | foltman/Bebop_Controller_With_Reinforcement_Learning | aa1140768360b09433b0e252016ee013b7fbdf9a | [
"MIT"
] | 1 | 2019-08-23T08:19:48.000Z | 2019-08-23T08:19:48.000Z | qagent.py | foltman/Bebop_Controller_With_Reinforcement_Learning | aa1140768360b09433b0e252016ee013b7fbdf9a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import random
import csv
class QAgent:
def __init__(self, alpha = 0.8, gamma = 0.8, epsilon = 0.4, qmatrix = [], seed = 123):
self.qmatrix = qmatrix
self.epsilon = epsilon
self.gamma = gamma
self.alpha = alpha
self.seed = seed
self.rnd = random.seed(self.seed)
def create_qmatrix(self, states_count, actions_count):
#Create matrix based on given dimensions
qm = np.zeros((states_count, actions_count))
self.qmatrix = qm
def qmatrix_from_file(self, filename):
#Load qmatrix from csv file
self.qmatrix = np.loadtxt(filename,dtype=float,delimiter=',')
def qmatrix_from_list(self, list):
#Load qmatrix from list of lists
self.qmatrix = np.array(list)
def save_qmatrix (self, filename):
#Save qmatrix to csv file
f = open(filename, 'w')
writer = csv.writer(f)
writer.writerows(self.qmatrix)
f.close()
def chose_action(self, current_state):
#Chooses the next action based on the current_state.
this_action = -10000
if random.random() < self.epsilon:
possible_actions = self.get_action_ids(current_state)
this_action = possible_actions[random.randrange(len(possible_actions))]
else:
if self.sum_possible_actions(current_state) != 0:
possible_q_rewards = []
for possible_action in self.get_action_ids(current_state):
possible_q_rewards.append(self.qmatrix[current_state,possible_action])
this_action =self.get_action_ids(current_state)[np.argmax(possible_q_rewards)]
else:
possible_actions = self.get_action_ids(current_state)
this_action = possible_actions[random.randrange(len(possible_actions))]
return this_action
def update_q(self, current_state, action, next_state, reward):
#Updates the qvalue(current_state, action) using reward, then assigns the next_state value to current_state.
qsa = self.qmatrix[current_state, action]
action_values = self.get_action_values(next_state)
new_q = qsa + self.alpha * (reward + self.gamma * action_values[np.argmax(action_values)] -qsa)
self.qmatrix[current_state, action] = new_q
def get_action_ids (self, state):
ids = []
for i in range (np.shape(self.qmatrix)[1]):
if self.qmatrix[state, i] == -10000:
continue
ids.append(i)
return ids
def sum_possible_actions(self,state):
action_sum = 0
acts = self.get_action_ids(state)
for i in range (len(acts) - 1):
this_value = self.qmatrix[state, acts[i]]
if this_value == -10000:
continue
action_sum = action_sum + this_value
return action_sum
def get_action_values (self,state):
val = []
for i in range (np.shape(self.qmatrix)[1]):
if self.qmatrix[state, i] == -10000:
continue
val.append(self.qmatrix[state, i])
return val | 37.159091 | 113 | 0.604281 |
ace5a9b2ac8dfca2edc0ebd8df6d1b601b978b2c | 2,573 | py | Python | userbot/format.py | iqbalfcksht/madafaka | f7b4c9b986cc2f6ae771df74242440c50e600c4a | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/format.py | iqbalfcksht/madafaka | f7b4c9b986cc2f6ae771df74242440c50e600c4a | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/format.py | iqbalfcksht/madafaka | f7b4c9b986cc2f6ae771df74242440c50e600c4a | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | import datetime
from telethon.tl.tlobject import TLObject
from telethon.tl.types import MessageEntityPre
from telethon.utils import add_surrogate
def parse_pre(text):
text = text.strip()
return (
text,
[MessageEntityPre(offset=0, length=len(add_surrogate(text)), language="")],
)
def yaml_format(obj, indent=0, max_str_len=256, max_byte_len=64):
"""
Pretty formats the given object as a YAML string which is returned.
(based on TLObject.pretty_format)
"""
result = []
if isinstance(obj, TLObject):
obj = obj.to_dict()
if isinstance(obj, dict):
if not obj:
return "dict:"
items = obj.items()
has_items = len(items) > 1
has_multiple_items = len(items) > 2
result.append(obj.get("_", "dict") + (":" if has_items else ""))
if has_multiple_items:
result.append("\n")
indent += 2
for k, v in items:
if k == "_" or v is None:
continue
formatted = yaml_format(v, indent)
if not formatted.strip():
continue
result.append(" " * (indent if has_multiple_items else 1))
result.append(f"{k}:")
if not formatted[0].isspace():
result.append(" ")
result.append(f"{formatted}")
result.append("\n")
if has_items:
result.pop()
if has_multiple_items:
indent -= 2
elif isinstance(obj, str):
# truncate long strings and display elipsis
result = repr(obj[:max_str_len])
if len(obj) > max_str_len:
result += "…"
return result
elif isinstance(obj, bytes):
# repr() bytes if it's printable, hex like "FF EE BB" otherwise
if all(0x20 <= c < 0x7F for c in obj):
return repr(obj)
else:
return (
"<…>" if len(obj) > max_byte_len else " ".join(f"{b:02X}" for b in obj)
)
elif isinstance(obj, datetime.datetime):
# ISO-8601 without timezone offset (telethon dates are always UTC)
return obj.strftime("%Y-%m-%d %H:%M:%S")
elif hasattr(obj, "__iter__"):
# display iterables one after another at the base indentation level
result.append("\n")
indent += 2
for x in obj:
result.append(f"{' ' * indent}- {yaml_format(x, indent + 2)}")
result.append("\n")
result.pop()
indent -= 2
else:
return repr(obj)
return "".join(result)
| 32.1625 | 87 | 0.552274 |
ace5a9ddc20fa8d89861e066745f237d88f549fa | 118 | py | Python | tests/func/utils/predicates.py | Joacchim/BookMyComics | 21ed9c4ebfd7de46220f5638c7b4a0af60b4201a | [
"Apache-2.0"
] | null | null | null | tests/func/utils/predicates.py | Joacchim/BookMyComics | 21ed9c4ebfd7de46220f5638c7b4a0af60b4201a | [
"Apache-2.0"
] | 64 | 2018-09-05T13:36:55.000Z | 2020-08-16T19:56:20.000Z | tests/func/utils/predicates.py | BookMyComics-Developers/BookMyComics | e2639f5dff91176c84a6bb8c3b4d72f559b5f3ff | [
"Apache-2.0"
] | 1 | 2018-09-05T11:14:59.000Z | 2018-09-05T11:14:59.000Z | def with_next_page(self):
return self.has_next_page()
def with_prev_page(self):
return self.has_prev_page()
| 16.857143 | 31 | 0.745763 |
ace5ab06a2f24e624f0d65ea29357f42e3c2dec0 | 2,138 | py | Python | engine/BaseAgent.py | oceanprotocol/tokenspice0.1 | 78fc81b393408403d01acbf518a72c3108ee21a3 | [
"Apache-2.0"
] | 2 | 2022-01-03T00:15:01.000Z | 2022-03-25T08:59:55.000Z | engine/BaseAgent.py | tokenspice/tokenspice0.1 | 78fc81b393408403d01acbf518a72c3108ee21a3 | [
"Apache-2.0"
] | null | null | null | engine/BaseAgent.py | tokenspice/tokenspice0.1 | 78fc81b393408403d01acbf518a72c3108ee21a3 | [
"Apache-2.0"
] | null | null | null | import logging
log = logging.getLogger('baseagent')
from abc import ABC, abstractmethod
import enforce
import typing
from engine import BaseAgent, Wallet
from util.constants import SAFETY
from util.strutil import StrMixin
@enforce.runtime_validation
class BaseAgent(ABC, StrMixin):
"""This can be a data buyer, publisher, etc. Sub-classes implement each."""
def __init__(self, name: str, USD: float, OCEAN: float):
self.name = name
self._wallet = Wallet.Wallet(USD=USD, OCEAN=OCEAN)
#=======================================================================
@abstractmethod
def takeStep(self, state): #this is where the Agent does *work*
pass
#=======================================================================
def USD(self) -> float:
#return self._wallet.USD() #slower
return self._wallet._USD #faster
def receiveUSD(self, amount: float) -> None:
self._wallet.depositUSD(amount)
def _transferUSD(self, receiving_agent,
amount: typing.Union[float,None]) -> None:
"""set receiver to None to model spending, without modeling receiver"""
if SAFETY:
assert isinstance(receiving_agent, BaseAgent) or (receiving_agent is None)
if receiving_agent is not None:
receiving_agent.receiveUSD(amount)
self._wallet.withdrawUSD(amount)
#=======================================================================
def OCEAN(self) -> float:
#return self._wallet.OCEAN() #slower
return self._wallet._OCEAN #faster
def receiveOCEAN(self, amount: float) -> None:
self._wallet.depositOCEAN(amount)
def _transferOCEAN(self, receiving_agent,
amount: typing.Union[float,None]) -> None:
"""set receiver to None to model spending, without modeling receiver"""
if SAFETY:
assert isinstance(receiving_agent, BaseAgent) or (receiving_agent is None)
if receiving_agent is not None:
receiving_agent.receiveOCEAN(amount)
self._wallet.withdrawOCEAN(amount)
| 36.862069 | 86 | 0.591674 |
ace5ab0d4704a70f5e98330d806510dc96a98ecb | 13,117 | py | Python | azurefs.py | ahmetb/azurefs | efe4d2fefa541ae22a65ba50666640d6b53f1019 | [
"Apache-2.0"
] | 10 | 2017-03-04T21:19:28.000Z | 2022-03-31T13:26:46.000Z | azurefs.py | ahmetb/azurefs | efe4d2fefa541ae22a65ba50666640d6b53f1019 | [
"Apache-2.0"
] | null | null | null | azurefs.py | ahmetb/azurefs | efe4d2fefa541ae22a65ba50666640d6b53f1019 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
A FUSE wrapper for locally mounting Azure blob storage
Ahmet Alp Balkan <ahmetalpbalkan at gmail.com>
"""
import math
import time
import logging
import random
import base64
from sys import argv, exit, maxint
from stat import S_IFDIR, S_IFREG
from errno import *
from os import getuid
from datetime import datetime
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
from azure.common import AzureException, AzureMissingResourceHttpError
from azure.storage.blob import BlobService
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S %Z'
if not hasattr(__builtins__, 'bytes'):
bytes = str
if __name__ == '__main__':
log = logging.getLogger()
ch = logging.StreamHandler()
log.addHandler(ch)
log.setLevel(logging.DEBUG)
class AzureFS(LoggingMixIn, Operations):
"""Azure Blob Storage filesystem"""
blobs = None
containers = dict() # <cname, dict(stat:dict,
#files:None|dict<fname, stat>)
fds = dict() # <fd, (path, bytes, dirty)>
fd = 0
def __init__(self, account, key):
self.blobs = BlobService(account, key)
self.rebuild_container_list()
def convert_to_epoch(self, date):
"""Converts Tue, 31 Jul 2012 07:17:34 GMT format to epoch"""
return int(time.mktime(time.strptime(date, TIME_FORMAT)))
def rebuild_container_list(self):
cmap = dict()
cnames = set()
for c in self.blobs.list_containers():
date = c.properties.last_modified
cstat = dict(st_mode=(S_IFDIR | 0755), st_uid=getuid(), st_size=0,
st_mtime=self.convert_to_epoch(date))
cname = c.name
cmap['/' + cname] = dict(stat=cstat, files=None)
cnames.add(cname)
cmap['/'] = dict(files={},
stat=dict(st_mode=(S_IFDIR | 0755),
st_uid=getuid(), st_size=0,
st_mtime=int(time.time())))
self.containers = cmap # destroys fs tree cache resistant to misses
def _parse_path(self, path): # returns </dir, file(=None)>
if path.count('/') > 1: # file
return str(path[:path.rfind('/')]), str(path[path.rfind('/') + 1:])
else: # dir
pos = path.rfind('/', 1)
if pos == -1:
return path, None
else:
return str(path[:pos]), None
def parse_container(self, path):
base_container = path[1:] # /abc/def/g --> abc
if base_container.find('/') > -1:
base_container = base_container[:base_container.find('/')]
return str(base_container)
def _get_dir(self, path, contents_required=False):
if not self.containers:
self.rebuild_container_list()
if path in self.containers and not (contents_required and \
self.containers[path]['files'] is None):
return self.containers[path]
cname = self.parse_container(path)
if '/' + cname not in self.containers:
raise FuseOSError(ENOENT)
else:
if self.containers['/' + cname]['files'] is None:
# fetch contents of container
log.info("------> CONTENTS NOT FOUND: %s" % cname)
blobs = self.blobs.list_blobs(cname)
dirstat = dict(st_mode=(S_IFDIR | 0755), st_size=0,
st_uid=getuid(), st_mtime=time.time())
if self.containers['/' + cname]['files'] is None:
self.containers['/' + cname]['files'] = dict()
for f in blobs:
blob_name = f.name
blob_date = f.properties.last_modified
blob_size = long(f.properties.content_length)
node = dict(st_mode=(S_IFREG | 0644), st_size=blob_size,
st_mtime=self.convert_to_epoch(blob_date),
st_uid=getuid())
if blob_name.find('/') == -1: # file just under container
self.containers['/' + cname]['files'][blob_name] = node
return self.containers['/' + cname]
return None
def _get_file(self, path):
d, f = self._parse_path(path)
dir = self._get_dir(d, True)
if dir is not None and f in dir['files']:
return dir['files'][f]
def getattr(self, path, fh=None):
d, f = self._parse_path(path)
if f is None:
dir = self._get_dir(d)
return dir['stat']
else:
file = self._get_file(path)
if file:
return file
raise FuseOSError(ENOENT)
# FUSE
def mkdir(self, path, mode):
if path.count('/') <= 1: # create on root
name = path[1:]
if not 3 <= len(name) <= 63:
log.error("Container names can be 3 through 63 chars long.")
raise FuseOSError(ENAMETOOLONG)
if name is not name.lower():
log.error("Container names cannot contain uppercase \
characters.")
raise FuseOSError(EACCES)
if name.count('--') > 0:
log.error('Container names cannot contain consecutive \
dashes (-).')
raise FuseOSError(EAGAIN)
#TODO handle all "-"s must be preceded by letter or numbers
#TODO starts with only letter or number, can contain letter, nr,'-'
resp = self.blobs.create_container(name)
if resp:
self.rebuild_container_list()
log.info("CONTAINER %s CREATED" % name)
else:
raise FuseOSError(EACCES)
log.error("Invalid container name or container already \
exists.")
else:
raise FuseOSError(ENOSYS) # TODO support 2nd+ level mkdirs
def rmdir(self, path):
if path.count('/') == 1:
c_name = path[1:]
resp = self.blobs.delete_container(c_name)
if resp:
if path in self.containers:
del self.containers[path]
else:
raise FuseOSError(EACCES)
else:
raise FuseOSError(ENOSYS) # TODO support 2nd+ level mkdirs
def create(self, path, mode):
node = dict(st_mode=(S_IFREG | mode), st_size=0, st_nlink=1,
st_uid=getuid(), st_mtime=time.time())
d, f = self._parse_path(path)
if not f:
log.error("Cannot create files on root level: /")
raise FuseOSError(ENOSYS)
dir = self._get_dir(d, True)
if not dir:
raise FuseOSError(EIO)
dir['files'][f] = node
return self.open(path, data='') # reusing handler provider
def open(self, path, flags=0, data=None):
if data == None: # download contents
c_name = self.parse_container(path)
f_name = path[path.find('/', 1) + 1:]
try:
data = self.blobs.get_blob(c_name, f_name)
except AzureMissingResourceHttpError:
dir = self._get_dir('/' + c_name, True)
if f_name in dir['files']:
del dir['files'][f_name]
raise FuseOSError(ENOENT)
except AzureException as e:
log.error("Read blob failed HTTP %d" % e.code)
raise FuseOSError(EAGAIN)
self.fd += 1
self.fds[self.fd] = (path, data, False)
return self.fd
def flush(self, path, fh=None):
if not fh:
raise FuseOSError(EIO)
else:
if fh not in self.fds:
raise FuseOSError(EIO)
path = self.fds[fh][0]
data = self.fds[fh][1]
dirty = self.fds[fh][2]
if not dirty:
return 0 # avoid redundant write
d, f = self._parse_path(path)
c_name = self.parse_container(path)
if data is None:
data = ''
try:
if len(data) < 64 * 1024 * 1024: # 64 mb
self.blobs.put_blob(c_name, f, data, 'BlockBlob')
else:
# divide file by blocks and upload
block_size = 8 * 1024 * 1024
num_blocks = int(math.ceil(len(data) * 1.0 / block_size))
rd = str(random.randint(1, 1e8))
block_ids = list()
for i in range(num_blocks):
part = data[i * block_size:min((i + 1) * block_size,
len(data))]
block_id = base64.encodestring('%s_%s' % (rd,
(8 - len(str(i))) * '0' + str(i)))
self.blobs.put_block(c_name, f, part, block_id)
block_ids.append(block_id)
self.blobs.put_block_list(c_name, f, block_ids)
except AzureException:
raise FuseOSError(EAGAIN)
dir = self._get_dir(d, True)
if not dir or f not in dir['files']:
raise FuseOSError(EIO)
# update local data
dir['files'][f]['st_size'] = len(data)
dir['files'][f]['st_mtime'] = time.time()
self.fds[fh] = (path, data, False) # mark as not dirty
return 0
def release(self, path, fh=None):
if fh is not None and fh in self.fds:
del self.fds[fh]
def truncate(self, path, length, fh=None):
return 0 # assume done, no need
def write(self, path, data, offset, fh=None):
if not fh or fh not in self.fds:
raise FuseOSError(ENOENT)
else:
d = self.fds[fh][1]
if d is None:
d = ""
self.fds[fh] = (self.fds[fh][0], d[:offset] + data, True)
return len(data)
def unlink(self, path):
c_name = self.parse_container(path)
d, f = self._parse_path(path)
try:
self.blobs.delete_blob(c_name, f)
_dir = self._get_dir(path, True)
if _dir and f in _dir['files']:
del _dir['files'][f]
return 0
except AzureMissingResourceHttpError:
raise FuseOSError(ENOENT)
except Exception as e:
raise FuseOSError(EAGAIN)
def readdir(self, path, fh):
if path == '/':
return ['.', '..'] + [x[1:] for x in self.containers.keys() \
if x is not '/']
dir = self._get_dir(path, True)
if not dir:
raise FuseOSError(ENOENT)
return ['.', '..'] + dir['files'].keys()
def read(self, path, size, offset, fh):
if not fh or fh not in self.fds:
raise FuseOSError(ENOENT)
f_name = path[path.find('/', 1) + 1:]
c_name = path[1:path.find('/', 1)]
try:
data = self.blobs.get_blob(c_name, f_name)
self.fds[fh] = (self.fds[fh][0], data, False)
return data[offset:offset + size]
except URLError, e:
if e.code == 404:
raise FuseOSError(ENOENT)
elif e.code == 403:
raise FUSEOSError(EPERM)
else:
log.error("Read blob failed HTTP %d" % e.code)
raise FuseOSError(EAGAIN)
data = self.fds[fh][1]
if data is None:
data = ""
return data[offset:offset + size]
def statfs(self, path):
return dict(f_bsize=1024, f_blocks=1, f_bavail=maxint)
def rename(self, old, new):
"""Three stage move operation because Azure do not have
move or rename call. """
od, of = self._parse_path(old)
if of is None: # move dir
raise FuseOSError(ENOSYS)
files = self._list_container_blobs(old)
if of not in files:
raise FuseOSError(ENOENT)
src = files[of]
if src['st_mode'] & S_IFREG <= 0: # move dir
raise FuseOSError(ENOSYS)
fh = self.open(old, 0)
data = self.read(old, src['st_size'], 0, fh)
self.flush(old, fh)
fh = self.create(new, 0644)
if new < 0:
raise FuseOSError(EIO)
self.write(new, data, 0, fh)
res = self.flush(new, fh)
if res == 0:
self.unlink(old)
def symlink(self, target, source):
raise FuseOSError(ENOSYS)
def getxattr(self, path, name, position=0):
return ''
def chmod(self, path, mode):
pass
def chown(self, path, uid, gid):
pass
if __name__ == '__main__':
if len(argv) < 4:
print('Usage: %s <mount_directory> <account> <secret_key>' % argv[0])
exit(1)
fuse = FUSE(AzureFS(argv[2], argv[3]), argv[1], debug=False,
nothreads=False, foreground=True)
| 32.874687 | 79 | 0.519479 |
ace5ab5202c1a79191a9ff66784b795a6fc6ccab | 1,141 | py | Python | dataset.py | tmaciulis22/DeepLearningAgriculture | 1bcd2fa868cff1526cda9591f28fa69a1e6a286c | [
"MIT"
] | null | null | null | dataset.py | tmaciulis22/DeepLearningAgriculture | 1bcd2fa868cff1526cda9591f28fa69a1e6a286c | [
"MIT"
] | null | null | null | dataset.py | tmaciulis22/DeepLearningAgriculture | 1bcd2fa868cff1526cda9591f28fa69a1e6a286c | [
"MIT"
] | 1 | 2021-07-08T19:12:05.000Z | 2021-07-08T19:12:05.000Z | from torch.utils.data import DataLoader
from torch.utils.data import Dataset as BaseDataset
import numpy as np
import cv2
from matplotlib import pyplot as plt
import os
class Dataset(BaseDataset):
CLASSES = ['weed_cluster','water','nutrient_deficiency']
def __init__(
self,
images_dir,
labels_dir
):
self.ids = os.listdir(images_dir)
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.labels_dir = labels_dir
def __getitem__(self, i):
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
masks = []
for x in self.CLASSES:
path = os.path.join(self.labels_dir, x ,self.ids[i].replace('.jpg','.png'))
if(os.path.exists(path)):
mask = cv2.imread(path, 0)/255
masks.append(mask)
else:
masks.append(np.zeros((512,512)))
return image.transpose(2, 0, 1).astype('float32'), np.stack(masks).astype('float32')
def __len__(self):
return len(self.ids) | 31.694444 | 92 | 0.599474 |
ace5ac5ece7d6510a8e2c728a0c57dda287d9aa3 | 11,645 | py | Python | test/unit/pulled_search/main.py | mjpernot/pulled-search | 2c8f4dee420f0c2fc12867b2a642e3a530d2f24c | [
"MIT"
] | null | null | null | test/unit/pulled_search/main.py | mjpernot/pulled-search | 2c8f4dee420f0c2fc12867b2a642e3a530d2f24c | [
"MIT"
] | null | null | null | test/unit/pulled_search/main.py | mjpernot/pulled-search | 2c8f4dee420f0c2fc12867b2a642e3a530d2f24c | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Classification (U)
"""Program: main.py
Description: Unit testing of main in pulled_search.py.
Usage:
test/unit/pulled_search/main.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import pulled_search
import lib.gen_libs as gen_libs
import version
__version__ = version.__version__
class ProgramLock(object):
"""Class: ProgramLock
Description: Class stub holder for gen_class.ProgramLock class.
Methods:
__init__ -> Class initialization.
"""
def __init__(self, cmdline, flavor):
"""Method: __init__
Description: Class initialization.
Arguments:
(input) cmdline -> Argv command line.
(input) flavor -> Lock flavor ID.
"""
self.cmdline = cmdline
self.flavor = flavor
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Unit testing initilization.
test_help_true -> Test with help_func returns True.
test_help_false -> Test with help_func returns False.
test_require_true -> Test with arg_require returns True.
test_require_false -> Test with arg_require returns False.
test_con_req_or_false -> Test with arg_cond_req_or returns False.
test_con_req_or_true -> Test with arg_cond_req_or returns True.
test_dir_chk_crt_true -> Test with arg_dir_chk_crt returns True.
test_dir_chk_crt_false -> Test with arg_dir_chk_crt returns False.
test_xor_dict_false -> Test with arg_xor_dict returns False.
test_xor_dict_true -> Test with arg_xor_dict returns True.
test_run_program -> Test run_program function.
test_programlock_true -> Test with ProgramLock returns True.
test_programlock_false -> Test with ProgramLock returns False.
test_programlock_id -> Test ProgramLock with flavor ID.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.args = {"-c": "config_file", "-d": "config_dir", "-R": True}
self.args2 = {"-c": "config_file", "-d": "config_dir", "-R": True,
"-y": "Flavor"}
self.proglock = ProgramLock(["cmdline"], "FlavorID")
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser.arg_parse2")
def test_help_true(self, mock_arg, mock_help):
"""Function: test_help_true
Description: Test with help_func returns True.
Arguments:
"""
mock_arg.return_value = self.args
mock_help.return_value = True
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_help_false(self, mock_arg, mock_help):
"""Function: test_help_false
Description: Test with help_func returns False.
Arguments:
"""
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = True
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_require_true(self, mock_arg, mock_help):
"""Function: test_require_true
Description: Test with arg_require returns True.
Arguments:
"""
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = True
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_require_false(self, mock_arg, mock_help):
"""Function: test_require_false
Description: Test with arg_require returns False.
Arguments:
"""
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = False
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_con_req_or_false(self, mock_arg, mock_help):
"""Function: test_con_req_or_false
Description: Test with arg_cond_req_or returns False.
Arguments:
"""
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = False
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_con_req_or_true(self, mock_arg, mock_help):
"""Function: test_con_req_or_true
Description: Test with arg_cond_req_or returns True.
Arguments:
"""
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_xor_dict.return_value = True
mock_arg.arg_dir_chk_crt.return_value = True
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_dir_chk_crt_true(self, mock_arg, mock_help):
"""Function: test_dir_chk_crt_true
Description: Test with arg_dir_chk_crt returns True.
Arguments:
"""
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = True
mock_arg.arg_dir_chk_crt.return_value = True
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_dir_chk_crt_false(self, mock_arg, mock_help):
"""Function: test_dir_chk_crt_false
Description: Test with arg_dir_chk_crt returns False.
Arguments:
"""
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = True
mock_arg.arg_dir_chk_crt.return_value = False
mock_arg.arg_xor_dict.return_value = False
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_xor_dict_false(self, mock_arg, mock_help):
"""Function: test_xor_dict_false
Description: Test with arg_xor_dict returns False.
Arguments:
"""
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = True
mock_arg.arg_dir_chk_crt.return_value = False
mock_arg.arg_xor_dict.return_value = False
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_class.ProgramLock")
@mock.patch("pulled_search.run_program")
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_xor_dict_true(self, mock_arg, mock_help, mock_run, mock_lock):
"""Function: test_xor_dict_true
Description: Test with arg_xor_dict returns True.
Arguments:
"""
mock_lock.return_value = self.proglock
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = True
mock_arg.arg_dir_chk_crt.return_value = False
mock_arg.arg_xor_dict.return_value = True
mock_run.return_value = True
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_class.ProgramLock")
@mock.patch("pulled_search.run_program")
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_run_program(self, mock_arg, mock_help, mock_run, mock_lock):
"""Function: test_run_program
Description: Test run_program function.
Arguments:
"""
mock_lock.return_value = self.proglock
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = True
mock_arg.arg_dir_chk_crt.return_value = False
mock_arg.arg_xor_dict.return_value = True
mock_run.return_value = True
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_class.ProgramLock")
@mock.patch("pulled_search.run_program")
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_programlock_true(self, mock_arg, mock_help, mock_run, mock_lock):
"""Function: test_programlock_true
Description: Test with ProgramLock returns True.
Arguments:
"""
mock_lock.return_value = self.proglock
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = True
mock_arg.arg_dir_chk_crt.return_value = False
mock_arg.arg_xor_dict.return_value = True
mock_run.return_value = True
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_class.ProgramLock")
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_programlock_false(self, mock_arg, mock_help, mock_lock):
"""Function: test_programlock_false
Description: Test with ProgramLock returns False.
Arguments:
"""
mock_lock.side_effect = \
pulled_search.gen_class.SingleInstanceException
mock_arg.arg_parse2.return_value = self.args
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = True
mock_arg.arg_dir_chk_crt.return_value = False
with gen_libs.no_std_out():
self.assertFalse(pulled_search.main())
@mock.patch("pulled_search.gen_class.ProgramLock")
@mock.patch("pulled_search.run_program")
@mock.patch("pulled_search.gen_libs.help_func")
@mock.patch("pulled_search.arg_parser")
def test_programlock_id(self, mock_arg, mock_help, mock_run, mock_lock):
"""Function: test_programlock_id
Description: Test ProgramLock with flavor ID.
Arguments:
"""
mock_lock.return_value = self.proglock
mock_arg.arg_parse2.return_value = self.args2
mock_help.return_value = False
mock_arg.arg_require.return_value = False
mock_arg.arg_cond_req_or.return_value = True
mock_arg.arg_dir_chk_crt.return_value = False
mock_arg.arg_xor_dict.return_value = True
mock_run.return_value = True
self.assertFalse(pulled_search.main())
if __name__ == "__main__":
unittest.main()
| 28.753086 | 78 | 0.681408 |
ace5ad8fa01e7bdc96a6735d9263a2f2ef4dbde6 | 3,537 | py | Python | app/main.py | davidsean/slideshowbob | f4443efad6011ce62b24226891221d346dc389d4 | [
"MIT"
] | null | null | null | app/main.py | davidsean/slideshowbob | f4443efad6011ce62b24226891221d346dc389d4 | [
"MIT"
] | null | null | null | app/main.py | davidsean/slideshowbob | f4443efad6011ce62b24226891221d346dc389d4 | [
"MIT"
] | null | null | null | import os
import json
import logging
from json import JSONDecodeError
from flask import Flask, request, Response
from app.mqtt_helper import MQTTHelper
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
app = Flask(__name__)
# Disable strict trailing slashes in URLs
app.url_map.strict_slashes = False
@app.route('/', methods=['GET'])
def hello()-> Response:
return Response("Hi", status=200)
@app.route('/webhook', methods=['GET'])
def get_webhook()-> Response:
"""process GET webhook
Returns:
Response: respoonse object
"""
logger.info('GET webhook')
# extract query params
mode = request.args.get('hub.mode', None)
token = request.args.get('hub.verify_token', None)
challenge = request.args.get('hub.challenge', None)
logger.debug('mode: %s', mode)
logger.debug('token: %s', token)
logger.debug('challenge: %s', challenge)
if (mode is not None and token is not None):
if (mode=='subscribe' and token==VERIFY_TOKEN):
logger.info('Webhook Verified')
return Response(challenge, status=200)
return Response("Forbidden", status=402)
@app.route('/webhook', methods=['POST'])
def post_webhook():
"""process POST webhook
Returns:
Response: respoonse object
"""
logger.debug('POST webhook')
try:
payload:dict = request.json
except JSONDecodeError as err:
logger.warning('Invalid payload: %s', err)
return Response('Invalid payload', status=400)
logger.debug(payload)
if payload is None:
return Response('Invalid payload', status=400)
if ('object' in payload) and (payload['object']=='page') and ('entry' in payload):
# iterate over possibly multiple entries
for entry in payload['entry']:
if 'messaging' in entry and ('message' in entry['messaging'][0]):
logger.debug('got message')
# should only get one
message = entry['messaging'][0]['message']
# mqtt = MQTTHelper()
# mqtt.publish(os.environ.get('TOPIC'), message, 0)
if 'sender' not in entry['messaging'][0] or 'id' not in entry['messaging'][0]['sender']:
return Response('Invalid payload', status=400)
sender_id = entry['messaging'][0]['sender']['id']
logger.info('got sender_id: %s', sender_id)
if 'attachments' in message:
# process all attachments
for attachment in message['attachments']:
if 'type' in attachment and attachment['type'] == 'image':
#process image attachment
logger.debug('process image attachment')
if 'payload' in attachment:
url = attachment['payload']['url']
logger.debug('got url: %s', url)
payload = {
'url':url,
'sender_id':sender_id
}
mqtt = MQTTHelper()
mqtt.publish(os.environ.get('TOPIC'), json.dumps(payload), 0)
else:
logger.warning('payload.object is not page or entry not present. payload: %s', payload)
return Response('Invalid payload', status=400)
return Response("EVENT_RECEIVED", status=200)
| 35.727273 | 104 | 0.567713 |
ace5ae9c4444e37b91e38b0805d83a0c3f197a60 | 4,654 | py | Python | src/chat.py | Charcoal-SE/SmokeDetector2 | 02c1b32c05f4c9e44b5a8dd717288d9d567ff13f | [
"Apache-2.0",
"MIT"
] | 7 | 2017-03-29T19:46:55.000Z | 2021-11-11T04:26:25.000Z | src/chat.py | Charcoal-SE/SmokeDetector2 | 02c1b32c05f4c9e44b5a8dd717288d9d567ff13f | [
"Apache-2.0",
"MIT"
] | 27 | 2017-03-30T19:22:32.000Z | 2017-04-28T17:09:41.000Z | src/chat.py | Charcoal-SE/SmokeDetector2 | 02c1b32c05f4c9e44b5a8dd717288d9d567ff13f | [
"Apache-2.0",
"MIT"
] | 5 | 2017-03-30T18:02:20.000Z | 2021-11-11T04:26:18.000Z | # vim: set filetype=python tabstop=4 shiftwidth=4 expandtab:
from ChatExchange.chatexchange import client as chatclient, events
import collections
import itertools
import json
from database import SESSION, SmokeyMessage
from excepthook import excepthook
import command_dispatch
import config
import git
import secrets
_init = False
_clients = {
"stackexchange.com": None,
"stackoverflow.com": None,
"meta.stackexchange.com": None
}
_room_permissions = None
_rooms = {}
def require_chat(function):
def f(*args, **kwargs):
assert _init
return function(*args, **kwargs)
return f
@secrets.require_secrets
def init():
global _clients
global _init
global _room_permissions
global _rooms
global _last_messages
for site in _clients.keys():
client = chatclient.Client(site)
for _ in range(10):
try:
client.login(secrets.email.value, secrets.pw.value)
break
except:
pass
else:
raise Exception("Failed to log into " + site)
_clients[site] = client
_room_permissions = parse_room_config()
for site, roomid in _room_permissions["commands"]:
room = _clients[site].get_room(roomid)
room.join()
room.watch_socket(excepthook(lambda msg, client: on_msg(msg, client, room)))
_rooms[(site, roomid)] = room
_init = True
def parse_room_config():
with open("../config/rooms.json", "r") as room_config:
room_dict = json.load(room_config)
rooms = {}
for site, site_rooms in room_dict.items():
for roomid, room in site_rooms.items():
room_identifier = (site, roomid)
for perm in room["msg_types"]:
if perm not in rooms:
rooms[perm] = set()
rooms[perm].add(room_identifier)
return rooms
@require_chat
def on_msg(msg, client, room):
if isinstance(msg, events.MessagePosted) or isinstance(msg, events.MessageEdited):
message = msg.message
if message.owner.id in config.my_ids:
SmokeyMessage.create(chat_site_url=client.host, room_id=room.id, message_id=message.id, is_report=False)
elif message.parent and message.parent.owner.id in config.my_ids:
command = message.content.split(" ", 1)[1]
message.reply(command_dispatch.dispatch_reply_command(message.parent, message, command))
elif message.content.startswith(config.shorthand_prefix):
message.reply(command_dispatch.dispatch_shorthand_command(message, room))
elif message.content.startswith(config.command_prefix):
message.reply(command_dispatch.dispatch_command(message))
@require_chat
def send_to_room(room, msg, **kwargs):
msg = msg.rstrip()
if kwargs.get('prefix'):
msg = "[ [SmokeDetector-ng]({}) ] ".format(config.github) + msg
room.send_message(msg)
@require_chat
def tell_rooms_with(prop, msg, **kwargs):
tell_rooms(msg, (prop,), (), **kwargs)
@require_chat
def tell_rooms_without(prop, msg, **kwargs):
tell_rooms(msg, (), (prop,), **kwargs)
@require_chat
def tell_rooms(msg, has, hasnt, **kwargs):
global _rooms
target_rooms = set()
for prop_has in has:
for room in _room_permissions[prop_has]:
if all(map(lambda prop_hasnt: room not in _room_permissions[prop_hasnt], hasnt)):
if room not in _rooms:
site, roomid = room
new_room = _clients[site].get_room(roomid)
new_room.join()
_rooms[room] = new_room
target_rooms.add(_rooms[room])
for room in target_rooms:
send_to_room(room, msg, **kwargs)
@require_chat
def handle_start():
tell_rooms_with("debug", "SmokeDetector-ng started at revision [{}]({})."
.format(git.short_rev(), config.github + "/commit/" + git.rev()),
prefix=True)
@require_chat
def handle_signal(signal):
tell_rooms_with("debug", "Recovered from signal %d." % signal)
@require_chat
def handle_err():
tell_rooms_with("debug", "Recovered from exception.")
@require_chat
def get_last_messages(room, count):
messages = SESSION.query(SmokeyMessage.message_id).filter(SmokeyMessage.chat_site_url == room._client.host,
SmokeyMessage.room_id == room.id)
for msg_id, in messages.order_by(SmokeyMessage.message_id.desc()).limit(count):
yield room._client.get_message(msg_id)
| 27.05814 | 116 | 0.637086 |
ace5af3f624a21779f30cfaf6d054f5917b49449 | 6,074 | py | Python | src/main.py | pak1989/botty | 77e36a71c2c49912ed616b6fe71917591d4636cf | [
"MIT"
] | null | null | null | src/main.py | pak1989/botty | 77e36a71c2c49912ed616b6fe71917591d4636cf | [
"MIT"
] | null | null | null | src/main.py | pak1989/botty | 77e36a71c2c49912ed616b6fe71917591d4636cf | [
"MIT"
] | null | null | null | from bot import Bot
from game_recovery import GameRecovery
from game_stats import GameStats
from health_manager import HealthManager
from death_manager import DeathManager
from screen import Screen
from logger import Logger
import keyboard
import os
from config import Config
from utils.graphic_debugger import run_graphic_debugger
from version import __version__
from utils.auto_settings import adjust_settings
from utils.misc import kill_thread, send_discord
import threading
from beautifultable import BeautifulTable
import time
import logging
import cv2
import traceback
def run_bot(
config: Config,
screen: Screen,
game_recovery: GameRecovery,
game_stats: GameStats,
death_manager: DeathManager,
health_manager: HealthManager,
pick_corpse: bool = False
):
# Start bot thread
bot = Bot(screen, game_stats, pick_corpse)
bot_thread = threading.Thread(target=bot.start)
bot_thread.daemon = True
bot_thread.start()
# Register that thread to the death and health manager so they can stop the bot thread if needed
death_manager.set_callback(lambda: bot.stop() or kill_thread(bot_thread))
health_manager.set_callback(lambda: bot.stop() or kill_thread(bot_thread))
do_restart = False
keyboard.add_hotkey(config.general["exit_key"], lambda: Logger.info(f'Force Exit') or os._exit(1))
keyboard.add_hotkey(config.general['resume_key'], lambda: bot.toggle_pause())
while 1:
health_manager.update_location(bot.get_curr_location())
max_game_length_reached = game_stats.get_current_game_length() > config.general["max_game_length_s"]
if max_game_length_reached or death_manager.died() or health_manager.did_chicken():
# Some debug and logging
if max_game_length_reached:
Logger.info(f"Max game length reached. Attempting to restart {config.general['name']}!")
if config.general["info_screenshots"]:
cv2.imwrite("./info_screenshots/info_max_game_length_reached_" + time.strftime("%Y%m%d_%H%M%S") + ".png", bot._screen.grab())
elif death_manager.died():
game_stats.log_death()
elif health_manager.did_chicken():
game_stats.log_chicken()
bot.stop()
kill_thread(bot_thread)
# Try to recover from whatever situation we are and go back to hero selection
do_restart = game_recovery.go_to_hero_selection()
break
time.sleep(0.5)
bot_thread.join()
if do_restart:
# Reset flags before running a new bot
death_manager.reset_death_flag()
health_manager.reset_chicken_flag()
game_stats.log_end_game(failed=max_game_length_reached)
return run_bot(config, screen, game_recovery, game_stats, death_manager, health_manager, True)
else:
if config.general["info_screenshots"]:
cv2.imwrite("./info_screenshots/info_could_not_recover_" + time.strftime("%Y%m%d_%H%M%S") + ".png", bot._screen.grab())
Logger.error(f"{config.general['name']} could not recover from a max game length violation. Shutting down everything.")
if config.general["custom_discord_hook"]:
send_discord(f"{config.general['name']} got stuck and can not resume", config.general["custom_discord_hook"])
os._exit(1)
def main():
config = Config(print_warnings=True)
if config.general["logg_lvl"] == "info":
Logger.init(logging.INFO)
elif config.general["logg_lvl"] == "debug":
Logger.init(logging.DEBUG)
else:
print(f"ERROR: Unkown logg_lvl {config.general['logg_lvl']}. Must be one of [info, debug]")
# Create folder for debug screenshots if they dont exist yet
if not os.path.exists("info_screenshots"):
os.system("mkdir info_screenshots")
if not os.path.exists("loot_screenshots"):
os.system("mkdir loot_screenshots")
keyboard.add_hotkey(config.general["exit_key"], lambda: Logger.info(f'Force Exit') or os._exit(1))
print(f"============ Botty {__version__} [name: {config.general['name']}] ============")
print("\nFor gettings started and documentation\nplease read https://github.com/aeon0/botty\n")
table = BeautifulTable()
table.rows.append([config.general['auto_settings_key'], "Adjust D2R settings"])
table.rows.append([config.general['graphic_debugger_key'], "Graphic debugger"])
table.rows.append([config.general['resume_key'], "Start / Pause Botty"])
table.rows.append([config.general['exit_key'], "Stop bot"])
table.columns.header = ["hotkey", "action"]
print(table)
print("\n")
while 1:
if keyboard.is_pressed(config.general['resume_key']):
screen = Screen(config.general["monitor"])
# Run health monitor thread
health_manager = HealthManager(screen)
health_monitor_thread = threading.Thread(target=health_manager.start_monitor)
health_monitor_thread.daemon = True
health_monitor_thread.start()
# Run death monitor thread
death_manager = DeathManager(screen)
death_monitor_thread = threading.Thread(target=death_manager.start_monitor)
death_monitor_thread.daemon = True
death_monitor_thread.start()
# Create other "global" instances
game_recovery = GameRecovery(screen, death_manager)
game_stats = GameStats()
run_bot(config, screen, game_recovery, game_stats, death_manager, health_manager)
break
if keyboard.is_pressed(config.general['auto_settings_key']):
adjust_settings()
break
elif keyboard.is_pressed(config.general['graphic_debugger_key']):
run_graphic_debugger()
break
time.sleep(0.02)
if __name__ == "__main__":
# To avoid cmd just closing down, except any errors and add a input() to the end
try:
main()
except:
traceback.print_exc()
print("Press Enter to exit ...")
input()
| 43.385714 | 145 | 0.683899 |
ace5b2ee61c18512a976ded1285a92748ca21d5b | 3,399 | py | Python | uniback/db_interfaces/backup_sets.py | XXL6/Uniback | 8dea83b4ba75e7ad77b806253240e4e3e750a888 | [
"MIT"
] | null | null | null | uniback/db_interfaces/backup_sets.py | XXL6/Uniback | 8dea83b4ba75e7ad77b806253240e4e3e750a888 | [
"MIT"
] | null | null | null | uniback/db_interfaces/backup_sets.py | XXL6/Uniback | 8dea83b4ba75e7ad77b806253240e4e3e750a888 | [
"MIT"
] | null | null | null | from uniback.models.general import BackupSet, BackupObject
from uniback.tools.local_session import LocalSession
from uniback.dictionary.uniback_constants import BackupSetList, BackupSetTypes
import json
def delete_backup_set(id):
with LocalSession() as session:
session.query(BackupSet).filter_by(id=id).delete()
session.commit()
def delete_backup_sets(ids):
with LocalSession() as session:
for id in ids:
session.query(BackupSet).filter_by(id=id).delete()
session.query(BackupObject).filter_by(backup_set_id=id).delete()
session.commit()
def get_backup_sets():
with LocalSession() as session:
backup_sets = session.query(BackupSet)
return_list = []
for backup_set in backup_sets:
return_list.append(
dict(
id=backup_set.id,
name=backup_set.name,
type=BackupSetList.BACKUP_SETS[backup_set.type]
)
)
return return_list
# used for getting a tuple of values to be added to a select field
# on a form
def get_backup_sets_tuple():
with LocalSession() as session:
backup_sets = session.query(BackupSet)
return_list = []
for backup_set in backup_sets:
return_list.append(
(backup_set.id, backup_set.name)
)
return return_list
def add_backup_set(data):
with LocalSession() as session:
if data['type'] == BackupSetTypes.BS_TYPE_FILESFOLDERS:
json_object = json.loads(data['backup_object_data']['file_list'])
backup_object_list = json_object['file_list']
display_state = json.dumps(json_object['state'])
else:
raise Exception(f"Unsupported backup set {data['type']}")
backup_set = (
BackupSet(
name=data['name'],
type=data['type'],
source=data['source'],
data=display_state
)
)
print(display_state)
session.add(backup_set)
session.commit()
for backup_object in backup_object_list:
new_backup_object = BackupObject(
data=backup_object,
backup_set_id=backup_set.id)
session.add(new_backup_object)
session.commit()
def get_backup_set_info(id):
with LocalSession() as session:
backup_set = session.query(BackupSet).filter_by(id=id).first()
set_item_list = session.query(BackupObject).filter_by(backup_set_id=id)
set_item_list_data = []
for item in set_item_list:
set_item_list_data.append(item.data)
if backup_set:
info_dict = dict(
id=backup_set.id,
name=backup_set.name,
source=backup_set.source,
type_name=BackupSetList.BACKUP_SETS[backup_set.type],
data=backup_set.data,
type=backup_set.type,
time_added=backup_set.time_added
)
else:
info_dict = dict(
id="UNDEFINED",
name="UNDEFINED",
source="UNDEFINED",
type_name="UNDEFINED",
type="UNDEFINED",
time_added="UNDEFINED"
)
return info_dict, set_item_list_data
| 33.323529 | 79 | 0.589879 |
ace5b3a3fc82e37d1d6511f80ff8c497b7053c0c | 3,463 | py | Python | test/functional/mempool_limit.py | boris-gleec/GleecBTC | 219769f6ed50a5f0d54f0db7e1b7f6d03c1cf500 | [
"MIT"
] | 5 | 2020-06-19T11:29:40.000Z | 2021-08-20T08:57:24.000Z | test/functional/mempool_limit.py | boris-gleec/GleecBTC | 219769f6ed50a5f0d54f0db7e1b7f6d03c1cf500 | [
"MIT"
] | 3 | 2021-03-25T14:28:32.000Z | 2021-06-11T04:17:57.000Z | test/functional/mempool_limit.py | boris-gleec/GleecBTC | 219769f6ed50a5f0d54f0db7e1b7f6d03c1cf500 | [
"MIT"
] | 2 | 2020-04-17T15:50:08.000Z | 2020-08-27T15:32:34.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The GleecBTC Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from decimal import Decimal
from test_framework.test_framework import GleecBTCTestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class MempoolLimitTest(GleecBTCTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
"-maxmempool=5",
"-spendzeroconfchange=0",
]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert txid not in self.nodes[0].getrawmempool()
txdata = self.nodes[0].gettransaction(txid)
assert txdata['confirmations'] == 0 #confirmation should still be 0
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
self.log.info('Create a mempool tx that will not pass mempoolminfee')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
# specifically fund this tx with a fee < mempoolminfee, >= than minrelaytxfee
txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee})
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
assert_raises_rpc_error(-26, "mempool min fee not met", self.nodes[0].sendrawtransaction, txFS['hex'])
if __name__ == '__main__':
MempoolLimitTest().main()
| 46.797297 | 166 | 0.671961 |
ace5b4cb109e1542b1ee76413f3471c95c65f86f | 99,322 | py | Python | ibis/expr/api.py | jclay/ibis | a32dc3b58c485e4706e9e8493dc8c031fe14a865 | [
"Apache-2.0"
] | 1 | 2020-08-04T08:29:44.000Z | 2020-08-04T08:29:44.000Z | ibis/expr/api.py | jclay/ibis | a32dc3b58c485e4706e9e8493dc8c031fe14a865 | [
"Apache-2.0"
] | null | null | null | ibis/expr/api.py | jclay/ibis | a32dc3b58c485e4706e9e8493dc8c031fe14a865 | [
"Apache-2.0"
] | null | null | null | """Ibis expression API definitions."""
import collections
import datetime
import functools
import numbers
import operator
from typing import Union
import dateutil.parser
import pandas as pd
import toolz
import ibis
import ibis.common.exceptions as com
import ibis.expr.analysis as _L
import ibis.expr.analytics as _analytics
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
import ibis.util as util
from ibis.compat import to_date, to_time
from ibis.expr.analytics import bucket, histogram
from ibis.expr.groupby import GroupedTableExpr # noqa
from ibis.expr.random import random # noqa
from ibis.expr.schema import Schema
from ibis.expr.types import ( # noqa
ArrayColumn,
ArrayScalar,
ArrayValue,
BooleanColumn,
BooleanScalar,
BooleanValue,
CategoryScalar,
CategoryValue,
ColumnExpr,
DateColumn,
DateScalar,
DateValue,
DecimalColumn,
DecimalScalar,
DecimalValue,
Expr,
FloatingColumn,
FloatingScalar,
FloatingValue,
GeoSpatialColumn,
GeoSpatialScalar,
GeoSpatialValue,
IntegerColumn,
IntegerScalar,
IntegerValue,
IntervalColumn,
IntervalScalar,
IntervalValue,
LineStringColumn,
LineStringScalar,
LineStringValue,
MapColumn,
MapScalar,
MapValue,
MultiLineStringColumn,
MultiLineStringScalar,
MultiLineStringValue,
MultiPointColumn,
MultiPointScalar,
MultiPointValue,
MultiPolygonColumn,
MultiPolygonScalar,
MultiPolygonValue,
NullColumn,
NullScalar,
NullValue,
NumericColumn,
NumericScalar,
NumericValue,
PointColumn,
PointScalar,
PointValue,
PolygonColumn,
PolygonScalar,
PolygonValue,
ScalarExpr,
StringColumn,
StringScalar,
StringValue,
StructColumn,
StructScalar,
StructValue,
TableExpr,
TimeColumn,
TimeScalar,
TimestampColumn,
TimestampScalar,
TimestampValue,
TimeValue,
ValueExpr,
as_value_expr,
literal,
null,
param,
sequence,
)
from ibis.expr.window import (
cumulative_window,
range_window,
rows_with_max_lookback,
trailing_range_window,
trailing_window,
window,
)
__all__ = (
'aggregate',
'case',
'cast',
'coalesce',
'cross_join',
'cumulative_window',
'date',
'desc',
'Expr',
'expr_list',
'geo_area',
'geo_as_binary',
'geo_as_ewkb',
'geo_as_ewkt',
'geo_as_text',
'geo_azimuth',
'geo_buffer',
'geo_centroid',
'geo_contains',
'geo_contains_properly',
'geo_covers',
'geo_covered_by',
'geo_crosses',
'geo_d_fully_within',
'geo_disjoint',
'geo_difference',
'geo_d_within',
'geo_envelope',
'geo_equals',
'geo_geometry_n',
'geo_geometry_type',
'geo_intersection',
'geo_intersects',
'geo_is_valid',
'geo_line_locate_point',
'geo_line_merge',
'geo_line_substring',
'geo_ordering_equals',
'geo_overlaps',
'geo_touches',
'geo_distance',
'geo_end_point',
'geo_length',
'geo_max_distance',
'geo_n_points',
'geo_n_rings',
'geo_perimeter',
'geo_point',
'geo_point_n',
'geo_simplify',
'geo_srid',
'geo_start_point',
'geo_transform',
'geo_unary_union',
'geo_union',
'geo_within',
'geo_x',
'geo_x_max',
'geo_x_min',
'geo_y',
'geo_y_max',
'geo_y_min',
'greatest',
'ifelse',
'infer_dtype',
'infer_schema',
'interval',
'join',
'least',
'literal',
'NA',
'negate',
'now',
'null',
'param',
'pi',
'prevent_rewrite',
'random',
'range_window',
'row_number',
'rows_with_max_lookback',
'schema',
'Schema',
'sequence',
'table',
'time',
'timestamp',
'trailing_range_window',
'trailing_window',
'where',
'window',
)
_data_type_docs = """\
Ibis uses its own type aliases that map onto database types. See, for
example, the correspondence between Ibis type names and Impala type names:
Ibis type Impala Type
~~~~~~~~~ ~~~~~~~~~~~
int8 TINYINT
int16 SMALLINT
int32 INT
int64 BIGINT
float FLOAT
double DOUBLE
boolean BOOLEAN
string STRING
timestamp TIMESTAMP
decimal(p, s) DECIMAL(p,s)
interval(u) INTERVAL(u)"""
infer_dtype = dt.infer
infer_schema = sch.infer
NA = null()
def schema(pairs=None, names=None, types=None):
if pairs is not None:
return Schema.from_tuples(pairs)
else:
return Schema(names, types)
def table(schema, name=None):
"""
Create an unbound Ibis table for creating expressions. Cannot be executed
without being bound to some physical table.
Useful for testing
Parameters
----------
schema : ibis Schema
name : string, default None
Name for table
Returns
-------
table : TableExpr
"""
if not isinstance(schema, Schema):
if isinstance(schema, dict):
schema = Schema.from_dict(schema)
else:
schema = Schema.from_tuples(schema)
node = ops.UnboundTable(schema, name=name)
return node.to_expr()
def desc(expr):
"""
Create a sort key (when used in sort_by) by the passed array expression or
column name.
Parameters
----------
expr : array expression or string
Can be a column name in the table being sorted
Examples
--------
>>> import ibis
>>> t = ibis.table([('g', 'string')])
>>> result = t.group_by('g').size('count').sort_by(ibis.desc('count'))
"""
if not isinstance(expr, Expr):
return ops.DeferredSortKey(expr, ascending=False)
else:
return ops.SortKey(expr, ascending=False).to_expr()
def timestamp(value, timezone=None):
"""
Returns a timestamp literal if value is likely coercible to a timestamp
Parameters
----------
value : timestamp value as string
timezone: timezone as string
defaults to None
Returns
--------
result : TimestampScalar
"""
if isinstance(value, str):
try:
value = pd.Timestamp(value, tz=timezone)
except pd.errors.OutOfBoundsDatetime:
value = dateutil.parser.parse(value)
if isinstance(value, numbers.Integral):
raise TypeError(
(
"Passing an integer to ibis.timestamp is not supported. Use "
"ibis.literal({value:d}).to_timestamp() to create a timestamp "
"expression from an integer."
).format(value=value)
)
return literal(value, type=dt.Timestamp(timezone=timezone))
def date(value):
"""
Returns a date literal if value is likely coercible to a date
Parameters
----------
value : date value as string
Returns
--------
result : TimeScalar
"""
if isinstance(value, str):
value = to_date(value)
return literal(value, type=dt.date)
def time(value):
"""
Returns a time literal if value is likely coercible to a time
Parameters
----------
value : time value as string
Returns
--------
result : TimeScalar
"""
if isinstance(value, str):
value = to_time(value)
return literal(value, type=dt.time)
def interval(
value=None,
unit='s',
years=None,
quarters=None,
months=None,
weeks=None,
days=None,
hours=None,
minutes=None,
seconds=None,
milliseconds=None,
microseconds=None,
nanoseconds=None,
):
"""
Returns an interval literal
Parameters
----------
value : int or datetime.timedelta, default None
years : int, default None
quarters : int, default None
months : int, default None
days : int, default None
weeks : int, default None
hours : int, default None
minutes : int, default None
seconds : int, default None
milliseconds : int, default None
microseconds : int, default None
nanoseconds : int, default None
Returns
--------
result : IntervalScalar
"""
if value is not None:
if isinstance(value, datetime.timedelta):
unit = 's'
value = int(value.total_seconds())
elif not isinstance(value, int):
raise ValueError('Interval value must be an integer')
else:
kwds = [
('Y', years),
('Q', quarters),
('M', months),
('W', weeks),
('D', days),
('h', hours),
('m', minutes),
('s', seconds),
('ms', milliseconds),
('us', microseconds),
('ns', nanoseconds),
]
defined_units = [(k, v) for k, v in kwds if v is not None]
if len(defined_units) != 1:
raise ValueError('Exactly one argument is required')
unit, value = defined_units[0]
value_type = literal(value).type()
type = dt.Interval(unit, value_type)
return literal(value, type=type).op().to_expr()
schema.__doc__ = """\
Validate and return an Ibis Schema object
{}
Parameters
----------
pairs : list of (name, type) tuples
Mutually exclusive with names/types
names : list of string
Field names
types : list of string
Field types
Examples
--------
>>> from ibis import schema
>>> sc = schema([('foo', 'string'),
... ('bar', 'int64'),
... ('baz', 'boolean')])
>>> sc2 = schema(names=['foo', 'bar', 'baz'],
... types=['string', 'int64', 'boolean'])
Returns
-------
schema : Schema
""".format(
_data_type_docs
)
def case():
"""
Similar to the .case method on array expressions, create a case builder
that accepts self-contained boolean expressions (as opposed to expressions
which are to be equality-compared with a fixed value expression)
Use the .when method on the resulting object followed by .end to create a
complete case.
Examples
--------
>>> import ibis
>>> cond1 = ibis.literal(1) == 1
>>> cond2 = ibis.literal(2) == 1
>>> result1 = 3
>>> result2 = 4
>>> expr = (ibis.case()
... .when(cond1, result1)
... .when(cond2, result2).end())
Returns
-------
case : CaseBuilder
"""
return ops.SearchedCaseBuilder()
def now():
"""
Compute the current timestamp
Returns
-------
now : Timestamp scalar
"""
return ops.TimestampNow().to_expr()
def row_number():
"""Analytic function for the current row number, starting at 0.
This function does not require an ORDER BY clause, however, without an
ORDER BY clause the order of the result is nondeterministic.
Returns
-------
row_number : IntArray
"""
return ops.RowNumber().to_expr()
e = ops.E().to_expr()
pi = ops.Pi().to_expr()
def _add_methods(klass, method_table):
for k, v in method_table.items():
setattr(klass, k, v)
def _unary_op(name, klass, doc=None):
def f(arg):
return klass(arg).to_expr()
f.__name__ = name
if doc is not None:
f.__doc__ = doc
else:
f.__doc__ = klass.__doc__
return f
def negate(arg):
"""
Negate a numeric expression
Parameters
----------
arg : numeric value expression
Returns
-------
negated : type of caller
"""
op = arg.op()
if hasattr(op, 'negate'):
result = op.negate()
else:
result = ops.Negate(arg)
return result.to_expr()
def count(expr, where=None):
"""
Compute cardinality / sequence size of expression. For array expressions,
the count is excluding nulls. For tables, it's the size of the entire
table.
Returns
-------
counts : int64 type
"""
op = expr.op()
if isinstance(op, ops.DistinctColumn):
result = ops.CountDistinct(op.args[0], where).to_expr()
else:
result = ops.Count(expr, where).to_expr()
return result.name('count')
def group_concat(arg, sep=',', where=None):
"""
Concatenate values using the indicated separator (comma by default) to
produce a string
Parameters
----------
arg : array expression
sep : string, default ','
where : bool, default None
Returns
-------
concatenated : string scalar
"""
return ops.GroupConcat(arg, sep, where).to_expr()
def arbitrary(arg, where=None, how=None):
"""
Selects the first / last non-null value in a column
Parameters
----------
arg : array expression
where: bool, default None
how : {'first', 'last', 'heavy'}, default 'first'
Heavy selects a frequently occurring value using the heavy hitters
algorithm. Heavy is only supported by Clickhouse backend.
Returns
-------
arbitrary element : scalar type of caller
"""
return ops.Arbitrary(arg, how, where).to_expr()
def _binop_expr(name, klass):
def f(self, other):
try:
other = as_value_expr(other)
op = klass(self, other)
return op.to_expr()
except (com.IbisTypeError, NotImplementedError):
return NotImplemented
f.__name__ = name
return f
def _rbinop_expr(name, klass):
# For reflexive binary ops, like radd, etc.
def f(self, other):
other = as_value_expr(other)
op = klass(other, self)
return op.to_expr()
f.__name__ = name
return f
def _boolean_binary_op(name, klass):
def f(self, other):
other = as_value_expr(other)
if not isinstance(other, ir.BooleanValue):
raise TypeError(other)
op = klass(self, other)
return op.to_expr()
f.__name__ = name
return f
def _boolean_unary_op(name, klass):
def f(self):
return klass(self).to_expr()
f.__name__ = name
return f
def _boolean_binary_rop(name, klass):
def f(self, other):
other = as_value_expr(other)
if not isinstance(other, ir.BooleanValue):
raise TypeError(other)
op = klass(other, self)
return op.to_expr()
f.__name__ = name
return f
def _agg_function(name, klass, assign_default_name=True):
def f(self, where=None):
expr = klass(self, where).to_expr()
if assign_default_name:
expr = expr.name(name)
return expr
f.__name__ = name
return f
def _extract_field(name, klass):
def f(self):
expr = klass(self).to_expr()
return expr.name(name)
f.__name__ = name
return f
# ---------------------------------------------------------------------
# Generic value API
def cast(arg, target_type):
# validate
op = ops.Cast(arg, to=target_type)
if op.to.equals(arg.type()):
# noop case if passed type is the same
return arg
if isinstance(op.to, (dt.Geography, dt.Geometry)):
from_geotype = arg.type().geotype or 'geometry'
to_geotype = op.to.geotype
if from_geotype == to_geotype:
return arg
result = op.to_expr()
if not arg.has_name():
return result
expr_name = 'cast({}, {})'.format(arg.get_name(), op.to)
return result.name(expr_name)
cast.__doc__ = """
Cast value(s) to indicated data type. Values that cannot be
successfully casted
Parameters
----------
target_type : data type name
Notes
-----
{0}
Returns
-------
cast_expr : ValueExpr
""".format(
_data_type_docs
)
def typeof(arg):
"""
Return the data type of the argument according to the current backend
Returns
-------
typeof_arg : string
"""
return ops.TypeOf(arg).to_expr()
def hash(arg, how='fnv'):
"""
Compute an integer hash value for the indicated value expression.
Parameters
----------
arg : value expression
how : {'fnv'}, default 'fnv'
Hash algorithm to use
Returns
-------
hash_value : int64 expression
"""
return ops.Hash(arg, how).to_expr()
def fillna(arg, fill_value):
"""
Replace any null values with the indicated fill value
Parameters
----------
fill_value : scalar / array value or expression
Examples
--------
>>> import ibis
>>> table = ibis.table([('col', 'int64'), ('other_col', 'int64')])
>>> result = table.col.fillna(5)
>>> result2 = table.col.fillna(table.other_col * 3)
Returns
-------
filled : type of caller
"""
return ops.IfNull(arg, fill_value).to_expr()
def coalesce(*args):
"""
Compute the first non-null value(s) from the passed arguments in
left-to-right order. This is also known as "combine_first" in pandas.
Parameters
----------
*args : variable-length value list
Examples
--------
>>> import ibis
>>> expr1 = None
>>> expr2 = 4
>>> result = ibis.coalesce(expr1, expr2, 5)
Returns
-------
coalesced : type of first provided argument
"""
return ops.Coalesce(args).to_expr()
def greatest(*args):
"""
Compute the largest value (row-wise, if any arrays are present) among the
supplied arguments.
Returns
-------
greatest : type depending on arguments
"""
return ops.Greatest(args).to_expr()
def least(*args):
"""
Compute the smallest value (row-wise, if any arrays are present) among the
supplied arguments.
Returns
-------
least : type depending on arguments
"""
return ops.Least(args).to_expr()
def where(boolean_expr, true_expr, false_null_expr):
"""
Equivalent to the ternary expression: if X then Y else Z
Parameters
----------
boolean_expr : BooleanValue (array or scalar)
true_expr : value
Values for each True value
false_null_expr : value
Values for False or NULL values
Returns
-------
result : arity depending on inputs
Type of true_expr used to determine output type
"""
op = ops.Where(boolean_expr, true_expr, false_null_expr)
return op.to_expr()
def over(expr, window):
"""
Turn an aggregation or full-sample analytic operation into a windowed
operation. See ibis.window for more details on window configuration
Parameters
----------
expr : value expression
window : ibis.Window
Returns
-------
expr : type of input
"""
prior_op = expr.op()
if isinstance(prior_op, ops.WindowOp):
op = prior_op.over(window)
else:
op = ops.WindowOp(expr, window)
result = op.to_expr()
try:
name = expr.get_name()
except com.ExpressionError:
pass
else:
result = result.name(name)
return result
def value_counts(arg, metric_name='count'):
"""
Compute a frequency table for this value expression
Parameters
----------
Returns
-------
counts : TableExpr
Aggregated table
"""
base = ir.find_base_table(arg)
metric = base.count().name(metric_name)
try:
arg.get_name()
except com.ExpressionError:
arg = arg.name('unnamed')
return base.group_by(arg).aggregate(metric)
def nullif(value, null_if_expr):
"""
Set values to null if they match/equal a particular expression (scalar or
array-valued).
Common use to avoid divide-by-zero problems (get NULL instead of INF on
divide-by-zero): 5 / expr.nullif(0)
Parameters
----------
value : value expression
Value to modify
null_if_expr : value expression (array or scalar)
Returns
-------
null_if : type of caller
"""
return ops.NullIf(value, null_if_expr).to_expr()
def between(arg, lower, upper):
"""
Check if the input expr falls between the lower/upper bounds
passed. Bounds are inclusive. All arguments must be comparable.
Returns
-------
is_between : BooleanValue
"""
lower = as_value_expr(lower)
upper = as_value_expr(upper)
op = ops.Between(arg, lower, upper)
return op.to_expr()
def isin(arg, values):
"""
Check whether the value expression is contained within the indicated
list of values.
Parameters
----------
values : list, tuple, or array expression
The values can be scalar or array-like. Each of them must be
comparable with the calling expression, or None (NULL).
Examples
--------
>>> import ibis
>>> table = ibis.table([('string_col', 'string')])
>>> table2 = ibis.table([('other_string_col', 'string')])
>>> expr = table.string_col.isin(['foo', 'bar', 'baz'])
>>> expr2 = table.string_col.isin(table2.other_string_col)
Returns
-------
contains : BooleanValue
"""
op = ops.Contains(arg, values)
return op.to_expr()
def notin(arg, values):
"""
Like isin, but checks whether this expression's value(s) are not
contained in the passed values. See isin docs for full usage.
"""
op = ops.NotContains(arg, values)
return op.to_expr()
add = _binop_expr('__add__', ops.Add)
sub = _binop_expr('__sub__', ops.Subtract)
mul = _binop_expr('__mul__', ops.Multiply)
div = _binop_expr('__div__', ops.Divide)
floordiv = _binop_expr('__floordiv__', ops.FloorDivide)
pow = _binop_expr('__pow__', ops.Power)
mod = _binop_expr('__mod__', ops.Modulus)
radd = _rbinop_expr('__radd__', ops.Add)
rsub = _rbinop_expr('__rsub__', ops.Subtract)
rdiv = _rbinop_expr('__rdiv__', ops.Divide)
rfloordiv = _rbinop_expr('__rfloordiv__', ops.FloorDivide)
def substitute(arg, value, replacement=None, else_=None):
"""
Substitute (replace) one or more values in a value expression
Parameters
----------
value : expr-like or dict
replacement : expr-like, optional
If an expression is passed to value, this must be passed
else_ : expr, optional
Returns
-------
replaced : case statement (for now!)
"""
expr = arg.case()
if isinstance(value, dict):
for k, v in sorted(value.items()):
expr = expr.when(k, v)
else:
expr = expr.when(value, replacement)
if else_ is not None:
expr = expr.else_(else_)
else:
expr = expr.else_(arg)
return expr.end()
def _case(arg):
"""Create a new SimpleCaseBuilder to chain multiple if-else statements. Add
new search expressions with the .when method. These must be comparable with
this array expression. Conclude by calling .end()
Returns
-------
builder : CaseBuilder
Examples
--------
>>> import ibis
>>> t = ibis.table([('string_col', 'string')], name='t')
>>> expr = t.string_col
>>> case_expr = (expr.case()
... .when('a', 'an a')
... .when('b', 'a b')
... .else_('null or (not a and not b)')
... .end())
>>> case_expr # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
string_col : string
<BLANKLINE>
SimpleCase[string*]
base:
string_col = Column[string*] 'string_col' from table
ref_0
cases:
Literal[string]
a
Literal[string]
b
results:
Literal[string]
an a
Literal[string]
a b
default:
Literal[string]
null or (not a and not b)
"""
return ops.SimpleCaseBuilder(arg)
def cases(arg, case_result_pairs, default=None):
"""
Create a case expression in one shot.
Returns
-------
case_expr : SimpleCase
"""
builder = arg.case()
for case, result in case_result_pairs:
builder = builder.when(case, result)
if default is not None:
builder = builder.else_(default)
return builder.end()
_generic_value_methods = dict(
hash=hash,
cast=cast,
coalesce=coalesce,
typeof=typeof,
fillna=fillna,
nullif=nullif,
between=between,
isin=isin,
notin=notin,
isnull=_unary_op('isnull', ops.IsNull),
notnull=_unary_op('notnull', ops.NotNull),
over=over,
case=_case,
cases=cases,
substitute=substitute,
__eq__=_binop_expr('__eq__', ops.Equals),
__ne__=_binop_expr('__ne__', ops.NotEquals),
__ge__=_binop_expr('__ge__', ops.GreaterEqual),
__gt__=_binop_expr('__gt__', ops.Greater),
__le__=_binop_expr('__le__', ops.LessEqual),
__lt__=_binop_expr('__lt__', ops.Less),
collect=_unary_op('collect', ops.ArrayCollect),
identical_to=_binop_expr('identical_to', ops.IdenticalTo),
)
approx_nunique = _agg_function('approx_nunique', ops.HLLCardinality, True)
approx_median = _agg_function('approx_median', ops.CMSMedian, True)
max = _agg_function('max', ops.Max, True)
min = _agg_function('min', ops.Min, True)
nunique = _agg_function('nunique', ops.CountDistinct, True)
def lag(arg, offset=None, default=None):
return ops.Lag(arg, offset, default).to_expr()
def lead(arg, offset=None, default=None):
return ops.Lead(arg, offset, default).to_expr()
first = _unary_op('first', ops.FirstValue)
last = _unary_op('last', ops.LastValue)
rank = _unary_op('rank', ops.MinRank)
dense_rank = _unary_op('dense_rank', ops.DenseRank)
percent_rank = _unary_op('percent_rank', ops.PercentRank)
cummin = _unary_op('cummin', ops.CumulativeMin)
cummax = _unary_op('cummax', ops.CumulativeMax)
def ntile(arg, buckets):
return ops.NTile(arg, buckets).to_expr()
def nth(arg, k):
"""
Analytic operation computing nth value from start of sequence
Parameters
----------
arg : array expression
k : int
Desired rank value
Returns
-------
nth : type of argument
"""
return ops.NthValue(arg, k).to_expr()
def distinct(arg):
"""
Compute set of unique values occurring in this array. Can not be used
in conjunction with other array expressions from the same context
(because it's a cardinality-modifying pseudo-reduction).
"""
op = ops.DistinctColumn(arg)
return op.to_expr()
def topk(arg, k, by=None):
"""
Returns
-------
topk : TopK filter expression
"""
op = ops.TopK(arg, k, by=by)
return op.to_expr()
def bottomk(arg, k, by=None):
raise NotImplementedError
def _generic_summary(arg, exact_nunique=False, prefix=None):
"""
Compute a set of summary metrics from the input value expression
Parameters
----------
arg : value expression
exact_nunique : boolean, default False
Compute the exact number of distinct values (slower)
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, nunique)
"""
metrics = [arg.count(), arg.isnull().sum().name('nulls')]
if exact_nunique:
unique_metric = arg.nunique().name('uniques')
else:
unique_metric = arg.approx_nunique().name('uniques')
metrics.append(unique_metric)
return _wrap_summary_metrics(metrics, prefix)
def _numeric_summary(arg, exact_nunique=False, prefix=None):
"""
Compute a set of summary metrics from the input numeric value expression
Parameters
----------
arg : numeric value expression
exact_nunique : boolean, default False
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, min, max, sum, mean, nunique)
"""
metrics = [
arg.count(),
arg.isnull().sum().name('nulls'),
arg.min(),
arg.max(),
arg.sum(),
arg.mean(),
]
if exact_nunique:
unique_metric = arg.nunique().name('nunique')
else:
unique_metric = arg.approx_nunique().name('approx_nunique')
metrics.append(unique_metric)
return _wrap_summary_metrics(metrics, prefix)
def _wrap_summary_metrics(metrics, prefix):
result = expr_list(metrics)
if prefix is not None:
result = result.prefix(prefix)
return result
def expr_list(exprs):
for e in exprs:
e.get_name()
return ops.ExpressionList(exprs).to_expr()
_generic_column_methods = dict(
bottomk=bottomk,
distinct=distinct,
nunique=nunique,
topk=topk,
summary=_generic_summary,
count=count,
arbitrary=arbitrary,
min=min,
max=max,
approx_median=approx_median,
approx_nunique=approx_nunique,
group_concat=group_concat,
value_counts=value_counts,
first=first,
last=last,
dense_rank=dense_rank,
rank=rank,
percent_rank=percent_rank,
# nth=nth,
ntile=ntile,
lag=lag,
lead=lead,
cummin=cummin,
cummax=cummax,
)
# TODO: should bound to AnyValue and AnyColumn instead, but that breaks
# doc builds, because it checks methods on ColumnExpr
_add_methods(ir.ValueExpr, _generic_value_methods)
_add_methods(ir.ColumnExpr, _generic_column_methods)
# ---------------------------------------------------------------------
# Numeric API
def round(arg, digits=None):
"""
Round values either to integer or indicated number of decimal places.
Returns
-------
rounded : type depending on digits argument
digits None or 0
decimal types: decimal
other numeric types: bigint
digits nonzero
decimal types: decimal
other numeric types: double
"""
op = ops.Round(arg, digits)
return op.to_expr()
def log(arg, base=None):
"""
Perform the logarithm using a specified base
Parameters
----------
base : number, default None
If None, base e is used
Returns
-------
logarithm : double type
"""
op = ops.Log(arg, base)
return op.to_expr()
def clip(arg, lower=None, upper=None):
"""
Trim values at input threshold(s).
Parameters
----------
lower : float
upper : float
Returns
-------
clipped : same as type of the input
"""
if lower is None and upper is None:
raise ValueError("at least one of lower and " "upper must be provided")
op = ops.Clip(arg, lower, upper)
return op.to_expr()
def quantile(arg, quantile, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type
"""
if isinstance(quantile, collections.abc.Sequence):
op = ops.MultiQuantile(arg, quantile, interpolation)
else:
op = ops.Quantile(arg, quantile, interpolation)
return op.to_expr()
def _integer_to_timestamp(arg, unit='s'):
"""
Convert integer UNIX timestamp (at some resolution) to a timestamp type
Parameters
----------
unit : {'s', 'ms', 'us'}
Second (s), millisecond (ms), or microsecond (us) resolution
Returns
-------
timestamp : timestamp value expression
"""
op = ops.TimestampFromUNIX(arg, unit)
return op.to_expr()
def _integer_to_interval(arg, unit='s'):
"""
Convert integer interval with the same inner type
Parameters
----------
unit : {'Y', 'M', 'W', 'D', 'h', 'm', s', 'ms', 'us', 'ns'}
Returns
-------
interval : interval value expression
"""
op = ops.IntervalFromInteger(arg, unit)
return op.to_expr()
abs = _unary_op('abs', ops.Abs)
ceil = _unary_op('ceil', ops.Ceil)
degrees = _unary_op('degrees', ops.Degrees)
exp = _unary_op('exp', ops.Exp)
floor = _unary_op('floor', ops.Floor)
log2 = _unary_op('log2', ops.Log2)
log10 = _unary_op('log10', ops.Log10)
ln = _unary_op('ln', ops.Ln)
radians = _unary_op('radians', ops.Radians)
sign = _unary_op('sign', ops.Sign)
sqrt = _unary_op('sqrt', ops.Sqrt)
# TRIGONOMETRIC OPERATIONS
acos = _unary_op('acos', ops.Acos)
asin = _unary_op('asin', ops.Asin)
atan = _unary_op('atan', ops.Atan)
atan2 = _binop_expr('atan2', ops.Atan2)
cos = _unary_op('cos', ops.Cos)
cot = _unary_op('cot', ops.Cot)
sin = _unary_op('sin', ops.Sin)
tan = _unary_op('tan', ops.Tan)
_numeric_value_methods = dict(
__neg__=negate,
abs=abs,
ceil=ceil,
degrees=degrees,
deg2rad=radians,
floor=floor,
radians=radians,
rad2deg=degrees,
sign=sign,
exp=exp,
sqrt=sqrt,
log=log,
ln=ln,
log2=log2,
log10=log10,
round=round,
nullifzero=_unary_op('nullifzero', ops.NullIfZero),
zeroifnull=_unary_op('zeroifnull', ops.ZeroIfNull),
clip=clip,
__add__=add,
add=add,
__sub__=sub,
sub=sub,
__mul__=mul,
mul=mul,
__div__=div,
__truediv__=div,
__floordiv__=floordiv,
div=div,
floordiv=floordiv,
__rdiv__=rdiv,
__rtruediv__=rdiv,
__rfloordiv__=rfloordiv,
rdiv=rdiv,
rfloordiv=rfloordiv,
__pow__=pow,
pow=pow,
__radd__=add,
radd=add,
__rsub__=rsub,
rsub=rsub,
__rmul__=_rbinop_expr('__rmul__', ops.Multiply),
__rpow__=_rbinop_expr('__rpow__', ops.Power),
__mod__=mod,
__rmod__=_rbinop_expr('__rmod__', ops.Modulus),
# trigonometric operations
acos=acos,
asin=asin,
atan=atan,
atan2=atan2,
cos=cos,
cot=cot,
sin=sin,
tan=tan,
)
def convert_base(arg, from_base, to_base):
"""
Convert number (as integer or string) from one base to another
Parameters
----------
arg : string or integer
from_base : integer
to_base : integer
Returns
-------
converted : string
"""
return ops.BaseConvert(arg, from_base, to_base).to_expr()
_integer_value_methods = dict(
to_timestamp=_integer_to_timestamp,
to_interval=_integer_to_interval,
convert_base=convert_base,
)
mean = _agg_function('mean', ops.Mean, True)
cummean = _unary_op('cummean', ops.CumulativeMean)
sum = _agg_function('sum', ops.Sum, True)
cumsum = _unary_op('cumsum', ops.CumulativeSum)
def std(arg, where=None, how='sample'):
"""
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
"""
expr = ops.StandardDev(arg, how, where).to_expr()
expr = expr.name('std')
return expr
def variance(arg, where=None, how='sample'):
"""
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
"""
expr = ops.Variance(arg, how, where).to_expr()
expr = expr.name('var')
return expr
def correlation(left, right, where=None, how='sample'):
"""
Compute correlation of two numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
corr : double scalar
"""
expr = ops.Correlation(left, right, how, where).to_expr()
return expr
def covariance(left, right, where=None, how='sample'):
"""
Compute covariance of two numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
cov : double scalar
"""
expr = ops.Covariance(left, right, how, where).to_expr()
return expr
_numeric_column_methods = dict(
mean=mean,
cummean=cummean,
sum=sum,
cumsum=cumsum,
quantile=quantile,
std=std,
var=variance,
corr=correlation,
cov=covariance,
bucket=bucket,
histogram=histogram,
summary=_numeric_summary,
)
_floating_value_methods = dict(
isnan=_unary_op('isnull', ops.IsNan), isinf=_unary_op('isinf', ops.IsInf)
)
_add_methods(ir.NumericValue, _numeric_value_methods)
_add_methods(ir.IntegerValue, _integer_value_methods)
_add_methods(ir.FloatingValue, _floating_value_methods)
_add_methods(ir.NumericColumn, _numeric_column_methods)
# ----------------------------------------------------------------------
# GeoSpatial API
def geo_area(arg):
"""
Compute area of a geo spatial data
Parameters
----------
arg : geometry or geography
Returns
-------
area : double scalar
"""
op = ops.GeoArea(arg)
return op.to_expr()
def geo_as_binary(arg):
"""
Get the geometry as well-known bytes (WKB) without the SRID data.
Parameters
----------
arg : geometry or geography
Returns
-------
wkb : binary
"""
op = ops.GeoAsBinary(arg)
return op.to_expr()
def geo_as_ewkt(arg):
"""
Get the geometry as well-known text (WKT) with the SRID data.
Parameters
----------
arg : geometry or geography
Returns
-------
wkt : string
"""
op = ops.GeoAsEWKT(arg)
return op.to_expr()
def geo_as_text(arg):
"""
Get the geometry as well-known text (WKT) without the SRID data.
Parameters
----------
arg : geometry or geography
Returns
-------
wkt : string
"""
op = ops.GeoAsText(arg)
return op.to_expr()
def geo_as_ewkb(arg):
"""
Get the geometry as well-known bytes (WKB) with the SRID data.
Parameters
----------
arg : geometry or geography
Returns
-------
wkb : binary
"""
op = ops.GeoAsEWKB(arg)
return op.to_expr()
def geo_contains(left, right):
"""
Check if the first geometry contains the second one
Parameters
----------
left : geometry
right : geometry
Returns
-------
contains : bool scalar
"""
op = ops.GeoContains(left, right)
return op.to_expr()
def geo_contains_properly(left, right):
"""
Check if the first geometry contains the second one,
with no common border points.
Parameters
----------
left : geometry
right : geometry
Returns
-------
contains_properly : bool scalar
"""
op = ops.GeoContainsProperly(left, right)
return op.to_expr()
def geo_covers(left, right):
"""
Check if the first geometry covers the second one.
Parameters
----------
left : geometry
right : geometry
Returns
-------
covers : bool scalar
"""
op = ops.GeoCovers(left, right)
return op.to_expr()
def geo_covered_by(left, right):
"""
Check if the first geometry is covered by the second one.
Parameters
----------
left : geometry
right : geometry
Returns
-------
covered_by : bool scalar
"""
op = ops.GeoCoveredBy(left, right)
return op.to_expr()
def geo_crosses(left, right):
"""
Check if the geometries have some, but not all, interior points in common.
Parameters
----------
left : geometry
right : geometry
Returns
-------
crosses : bool scalar
"""
op = ops.GeoCrosses(left, right)
return op.to_expr()
def geo_d_fully_within(left, right, distance):
"""
Check if the first geometry is fully within a specified distance from
the second one.
Parameters
----------
left : geometry
right : geometry
distance: double
Returns
-------
d_fully_within : bool scalar
"""
op = ops.GeoDFullyWithin(left, right, distance)
return op.to_expr()
def geo_disjoint(left, right):
"""
Check if the geometries have no points in common.
Parameters
----------
left : geometry
right : geometry
Returns
-------
disjoint : bool scalar
"""
op = ops.GeoDisjoint(left, right)
return op.to_expr()
def geo_d_within(left, right, distance):
"""
Check if the first geometry is within a specified distance from
the second one.
Parameters
----------
left : geometry
right : geometry
distance: double
Returns
-------
d_within : bool scalar
"""
op = ops.GeoDWithin(left, right, distance)
return op.to_expr()
def geo_equals(left, right):
"""
Check if the geometries are the same.
Parameters
----------
left : geometry
right : geometry
Returns
-------
equals : bool scalar
"""
op = ops.GeoEquals(left, right)
return op.to_expr()
def geo_geometry_n(arg, n):
"""
Get the 1-based Nth geometry of a multi geometry.
Parameters
----------
arg : geometry
n : integer
Returns
-------
geom : geometry scalar
"""
op = ops.GeoGeometryN(arg, n)
return op.to_expr()
def geo_geometry_type(arg):
"""
Get the type of a geometry.
Parameters
----------
arg : geometry
Returns
-------
type : string scalar
"""
op = ops.GeoGeometryType(arg)
return op.to_expr()
def geo_intersects(left, right):
"""
Check if the geometries share any points.
Parameters
----------
left : geometry
right : geometry
Returns
-------
intersects : bool scalar
"""
op = ops.GeoIntersects(left, right)
return op.to_expr()
def geo_is_valid(arg):
"""
Check if the geometry is valid.
Parameters
----------
arg : geometry
Returns
-------
valid : bool scalar
"""
op = ops.GeoIsValid(arg)
return op.to_expr()
def geo_line_locate_point(left, right):
"""
Locate the distance a point falls along the length of a line.
Returns a float between zero and one representing the location of the
closest point on the linestring to the given point, as a fraction of the
total 2d line length.
Parameters
----------
left : linestring
right: point
Returns
-------
distance: float scalar
"""
op = ops.GeoLineLocatePoint(left, right)
return op.to_expr()
def geo_line_merge(arg):
"""
Merge a MultiLineString into a LineString.
Returns a (set of) LineString(s) formed by sewing together the
constituent line work of a MultiLineString. If a geometry other than
a LineString or MultiLineString is given, this will return an empty
geometry collection.
Parameters
----------
arg : (multi)linestring
Returns
-------
merged: geometry scalar
"""
op = ops.GeoLineMerge(arg)
return op.to_expr()
def geo_line_substring(arg, start, end):
"""
Clip a substring from a LineString.
Returns a linestring that is a substring of the input one, starting
and ending at the given fractions of the total 2d length. The second
and third arguments are floating point values between zero and one.
This only works with linestrings.
Parameters
----------
arg: linestring
start: float
end: float
Returns
-------
substring: linestring scalar
"""
op = ops.GeoLineSubstring(arg, start, end)
return op.to_expr()
def geo_ordering_equals(left, right):
"""
Check if two geometries are equal and have the same point ordering.
Returns true if the two geometries are equal and the coordinates
are in the same order.
Parameters
----------
left : geometry
right : geometry
Returns
-------
ordering_equals : bool scalar
"""
op = ops.GeoOrderingEquals(left, right)
return op.to_expr()
def geo_overlaps(left, right):
"""
Check if the geometries share space, are of the same dimension,
but are not completely contained by each other.
Parameters
----------
left : geometry
right : geometry
Returns
-------
overlaps : bool scalar
"""
op = ops.GeoOverlaps(left, right)
return op.to_expr()
def geo_point(
left: Union[NumericValue, int, float],
right: Union[NumericValue, int, float],
) -> ops.GeoPoint:
"""
Return a point constructed on the fly from the provided coordinate values.
Constant coordinates result in construction of a POINT literal.
Parameters
----------
left : NumericValue, integer or float
right : NumericValue, integer or float
Returns
-------
point
"""
op = ops.GeoPoint(left, right)
return op.to_expr()
def geo_touches(left, right):
"""
Check if the geometries have at least one point in common,
but do not intersect.
Parameters
----------
left : geometry
right : geometry
Returns
-------
touches : bool scalar
"""
op = ops.GeoTouches(left, right)
return op.to_expr()
def geo_distance(left, right):
"""
Compute distance between two geo spatial data
Parameters
----------
left : geometry or geography
right : geometry or geography
Returns
-------
distance : double scalar
"""
op = ops.GeoDistance(left, right)
return op.to_expr()
def geo_length(arg):
"""
Compute length of a geo spatial data
Parameters
----------
arg : geometry or geography
Returns
-------
length : double scalar
"""
op = ops.GeoLength(arg)
return op.to_expr()
def geo_perimeter(arg):
"""
Compute perimeter of a geo spatial data
Parameters
----------
arg : geometry or geography
Returns
-------
perimeter : double scalar
"""
op = ops.GeoPerimeter(arg)
return op.to_expr()
def geo_max_distance(left, right):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
Parameters
----------
left : geometry
right : geometry
Returns
-------
MaxDistance : double scalar
"""
op = ops.GeoMaxDistance(left, right)
return op.to_expr()
def geo_unary_union(arg):
"""
Aggregate a set of geometries into a union.
This corresponds to the aggregate version of the PostGIS ST_Union.
We give it a different name (following the corresponding method
in GeoPandas) to avoid name conflicts with the non-aggregate version.
Parameters
----------
arg : geometry column
Returns
-------
union : geometry scalar
"""
expr = ops.GeoUnaryUnion(arg).to_expr()
expr = expr.name('union')
return expr
def geo_union(left, right):
"""
Merge two geometries into a union geometry.
Returns the pointwise union of the two geometries.
This corresponds to the non-aggregate version the PostGIS ST_Union.
Parameters
----------
left : geometry
right : geometry
Returns
-------
union : geometry scalar
"""
op = ops.GeoUnion(left, right)
return op.to_expr()
def geo_x(arg):
"""Return the X coordinate of the point, or NULL if not available.
Input must be a point
Parameters
----------
arg : geometry
Returns
-------
X : double scalar
"""
op = ops.GeoX(arg)
return op.to_expr()
def geo_y(arg):
"""Return the Y coordinate of the point, or NULL if not available.
Input must be a point
Parameters
----------
arg : geometry
Returns
-------
Y : double scalar
"""
op = ops.GeoY(arg)
return op.to_expr()
def geo_x_min(arg):
"""Returns Y minima of a geometry
Parameters
----------
arg : geometry
Returns
-------
XMin : double scalar
"""
op = ops.GeoXMin(arg)
return op.to_expr()
def geo_x_max(arg):
"""Returns X maxima of a geometry
Parameters
----------
arg : geometry
Returns
-------
XMax : double scalar
"""
op = ops.GeoXMax(arg)
return op.to_expr()
def geo_y_min(arg):
"""Returns Y minima of a geometry
Parameters
----------
arg : geometry
Returns
-------
YMin : double scalar
"""
op = ops.GeoYMin(arg)
return op.to_expr()
def geo_y_max(arg):
"""Returns Y maxima of a geometry
Parameters
----------
arg : geometry
Returns
-------
YMax : double scalar
"""
op = ops.GeoYMax(arg)
return op.to_expr()
def geo_start_point(arg):
"""Returns the first point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
Parameters
----------
arg : geometry
Returns
-------
Point : geometry scalar
"""
op = ops.GeoStartPoint(arg)
return op.to_expr()
def geo_end_point(arg):
"""Returns the last point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
Parameters
----------
arg : geometry or geography
Returns
-------
EndPoint : geometry scalar
"""
op = ops.GeoEndPoint(arg)
return op.to_expr()
def geo_point_n(arg, n):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar
"""
op = ops.GeoPointN(arg, n)
return op.to_expr()
def geo_n_points(arg):
"""Return the number of points in a geometry. Works for all geometries
Parameters
----------
arg : geometry
Returns
-------
NPoints : double scalar
"""
op = ops.GeoNPoints(arg)
return op.to_expr()
def geo_n_rings(arg):
"""If the geometry is a polygon or multi-polygon returns the number of
rings. It counts the outer rings as well
Parameters
----------
arg : geometry or geography
Returns
-------
NRings : double scalar
"""
op = ops.GeoNRings(arg)
return op.to_expr()
def geo_srid(arg):
"""Returns the spatial reference identifier for the ST_Geometry
Parameters
----------
arg : geometry
Returns
-------
SRID : Integer scalar
"""
op = ops.GeoSRID(arg)
return op.to_expr()
def geo_set_srid(arg, srid):
"""Set the spatial reference identifier for the ST_Geometry
Parameters
----------
arg : geometry
srid : integer
Returns
-------
SetSRID : geometry
"""
op = ops.GeoSetSRID(arg, srid)
return op.to_expr()
def geo_buffer(arg, radius):
"""Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry.
Parameters
----------
arg : geometry
radius: double
Returns
-------
buffer : geometry scalar
"""
op = ops.GeoBuffer(arg, radius)
return op.to_expr()
def geo_centroid(arg):
"""Returns the centroid of the geometry.
Parameters
----------
arg : geometry
Returns
-------
centroid : geometry scalar
"""
op = ops.GeoCentroid(arg)
return op.to_expr()
def geo_envelope(arg):
"""Returns a geometry representing the bounding box of the arg.
Parameters
----------
arg : geometry
Returns
-------
envelope : geometry scalar
"""
op = ops.GeoEnvelope(arg)
return op.to_expr()
def geo_within(left, right):
"""
Check if the first geometry is completely inside of the second.
Parameters
----------
left : geometry
right : geometry
Returns
-------
within : bool scalar
"""
op = ops.GeoWithin(left, right)
return op.to_expr()
def geo_azimuth(left, right):
"""
Check if the geometries have at least one point in common,
but do not intersect.
Parameters
----------
left : point
right : point
Returns
-------
azimuth : float scalar
"""
op = ops.GeoAzimuth(left, right)
return op.to_expr()
def geo_intersection(left, right):
"""
Return the intersection of two geometries.
Parameters
----------
left : geometry
right : geometry
Returns
-------
intersection : geometry scalar
"""
op = ops.GeoIntersection(left, right)
return op.to_expr()
def geo_difference(left, right):
"""
Return the difference of two geometries.
Parameters
----------
left : geometry
right : geometry
Returns
-------
difference : geometry scalar
"""
op = ops.GeoDifference(left, right)
return op.to_expr()
def geo_simplify(arg, tolerance, preserve_collapsed):
"""
Simplify a given geometry.
Parameters
----------
arg : geometry
tolerance: float
preserved_collapsed: boolean
Returns
-------
simplified : geometry scalar
"""
op = ops.GeoSimplify(arg, tolerance, preserve_collapsed)
return op.to_expr()
def geo_transform(arg, srid):
"""
Transform a geometry into a new SRID.
Parameters
----------
arg : geometry
srid: integer
Returns
-------
transformed : geometry scalar
"""
op = ops.GeoTransform(arg, srid)
return op.to_expr()
_geospatial_value_methods = dict(
area=geo_area,
as_binary=geo_as_binary,
as_ewkb=geo_as_ewkb,
as_ewkt=geo_as_ewkt,
as_text=geo_as_text,
azimuth=geo_azimuth,
buffer=geo_buffer,
centroid=geo_centroid,
contains=geo_contains,
contains_properly=geo_contains_properly,
covers=geo_covers,
covered_by=geo_covered_by,
crosses=geo_crosses,
d_fully_within=geo_d_fully_within,
difference=geo_difference,
disjoint=geo_disjoint,
distance=geo_distance,
d_within=geo_d_within,
end_point=geo_end_point,
envelope=geo_envelope,
equals=geo_equals,
geometry_n=geo_geometry_n,
geometry_type=geo_geometry_type,
intersection=geo_intersection,
intersects=geo_intersects,
is_valid=geo_is_valid,
line_locate_point=geo_line_locate_point,
line_merge=geo_line_merge,
line_substring=geo_line_substring,
length=geo_length,
max_distance=geo_max_distance,
n_points=geo_n_points,
n_rings=geo_n_rings,
ordering_equals=geo_ordering_equals,
overlaps=geo_overlaps,
perimeter=geo_perimeter,
point_n=geo_point_n,
set_srid=geo_set_srid,
simplify=geo_simplify,
srid=geo_srid,
start_point=geo_start_point,
touches=geo_touches,
transform=geo_transform,
union=geo_union,
within=geo_within,
x=geo_x,
x_max=geo_x_max,
x_min=geo_x_min,
y=geo_y,
y_max=geo_y_max,
y_min=geo_y_min,
)
_geospatial_column_methods = dict(unary_union=geo_unary_union)
_add_methods(ir.GeoSpatialValue, _geospatial_value_methods)
_add_methods(ir.GeoSpatialColumn, _geospatial_column_methods)
# ----------------------------------------------------------------------
# Boolean API
# TODO: logical binary operators for BooleanValue
def ifelse(arg, true_expr, false_expr):
"""
Shorthand for implementing ternary expressions
bool_expr.ifelse(0, 1)
e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END
"""
# Result will be the result of promotion of true/false exprs. These
# might be conflicting types; same type resolution as case expressions
# must be used.
case = ops.SearchedCaseBuilder()
return case.when(arg, true_expr).else_(false_expr).end()
_boolean_value_methods = dict(
ifelse=ifelse,
__and__=_boolean_binary_op('__and__', ops.And),
__or__=_boolean_binary_op('__or__', ops.Or),
__xor__=_boolean_binary_op('__xor__', ops.Xor),
__rand__=_boolean_binary_rop('__rand__', ops.And),
__ror__=_boolean_binary_rop('__ror__', ops.Or),
__rxor__=_boolean_binary_rop('__rxor__', ops.Xor),
__invert__=_boolean_unary_op('__invert__', ops.Not),
)
_boolean_column_methods = dict(
any=_unary_op('any', ops.Any),
notany=_unary_op('notany', ops.NotAny),
all=_unary_op('all', ops.All),
notall=_unary_op('notany', ops.NotAll),
cumany=_unary_op('cumany', ops.CumulativeAny),
cumall=_unary_op('cumall', ops.CumulativeAll),
)
_add_methods(ir.BooleanValue, _boolean_value_methods)
_add_methods(ir.BooleanColumn, _boolean_column_methods)
# ---------------------------------------------------------------------
# String API
def _string_substr(self, start, length=None):
"""
Pull substrings out of each string value by position and maximum
length.
Parameters
----------
start : int
First character to start splitting, indices starting at 0 (like
Python)
length : int, optional
Maximum length of each substring. If not supplied, splits each string
to the end
Returns
-------
substrings : type of caller
"""
op = ops.Substring(self, start, length)
return op.to_expr()
def _string_left(self, nchars):
"""
Return left-most up to N characters from each string. Convenience
use of substr.
Returns
-------
substrings : type of caller
"""
return self.substr(0, length=nchars)
def _string_right(self, nchars):
"""
Return up to nchars starting from end of each string.
Returns
-------
substrings : type of caller
"""
return ops.StrRight(self, nchars).to_expr()
def repeat(self, n):
"""
Returns the argument string repeated n times
Parameters
----------
n : int
Returns
-------
result : string
"""
return ops.Repeat(self, n).to_expr()
def _translate(self, from_str, to_str):
"""
Returns string with set of 'from' characters replaced
by set of 'to' characters.
from_str[x] is replaced by to_str[x].
To avoid unexpected behavior, from_str should be
shorter than to_string.
Parameters
----------
from_str : string
to_str : string
Examples
--------
>>> import ibis
>>> table = ibis.table([('string_col', 'string')])
>>> expr = table.string_col.translate('a', 'b')
>>> expr = table.string_col.translate('a', 'bc')
Returns
-------
translated : string
"""
return ops.Translate(self, from_str, to_str).to_expr()
def _string_find(self, substr, start=None, end=None):
"""
Returns position (0 indexed) of first occurence of substring,
optionally after a particular position (0 indexed)
Parameters
----------
substr : string
start : int, default None
end : int, default None
Not currently implemented
Returns
-------
position : int, 0 indexed
"""
if end is not None:
raise NotImplementedError
return ops.StringFind(self, substr, start, end).to_expr()
def _lpad(self, length, pad=' '):
"""
Returns string of given length by truncating (on right)
or padding (on left) original string
Parameters
----------
length : int
pad : string, default is ' '
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> expr = table.strings.lpad(5, '-')
>>> expr = ibis.literal('a').lpad(5, '-') # 'a' becomes '----a'
>>> expr = ibis.literal('abcdefg').lpad(5, '-') # 'abcdefg' becomes 'abcde' # noqa: E501
Returns
-------
padded : string
"""
return ops.LPad(self, length, pad).to_expr()
def _rpad(self, length, pad=' '):
"""
Returns string of given length by truncating (on right)
or padding (on right) original string
Parameters
----------
length : int
pad : string, default is ' '
Examples
--------
>>> import ibis
>>> table = ibis.table([('string_col', 'string')])
>>> expr = table.string_col.rpad(5, '-')
>>> expr = ibis.literal('a').rpad(5, '-') # 'a' becomes 'a----'
>>> expr = ibis.literal('abcdefg').rpad(5, '-') # 'abcdefg' becomes 'abcde' # noqa: E501
Returns
-------
padded : string
"""
return ops.RPad(self, length, pad).to_expr()
def _find_in_set(self, str_list):
"""
Returns postion (0 indexed) of first occurence of argument within
a list of strings. No string in list can have a comma
Returns -1 if search string isn't found or if search string contains ','
Parameters
----------
str_list : list of strings
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> result = table.strings.find_in_set(['a', 'b'])
Returns
-------
position : int
"""
return ops.FindInSet(self, str_list).to_expr()
def _string_join(self, strings):
"""
Joins a list of strings together using the calling string as a separator
Parameters
----------
strings : list of strings
Examples
--------
>>> import ibis
>>> sep = ibis.literal(',')
>>> result = sep.join(['a', 'b', 'c'])
Returns
-------
joined : string
"""
return ops.StringJoin(self, strings).to_expr()
def _string_like(self, patterns):
"""
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use
% as a multiple-character wildcard or _ (underscore) as a single-character
wildcard.
Use re_search or rlike for regex-based matching.
Parameters
----------
pattern : str or List[str]
A pattern or list of patterns to match. If `pattern` is a list, then if
**any** pattern matches the input then the corresponding row in the
output is ``True``.
Returns
-------
matched : ir.BooleanColumn
"""
return functools.reduce(
operator.or_,
(
ops.StringSQLLike(self, pattern).to_expr()
for pattern in util.promote_list(patterns)
),
)
def _string_ilike(self, patterns):
"""
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use
% as a multiple-character wildcard or _ (underscore) as a single-character
wildcard.
Use re_search or rlike for regex-based matching.
Parameters
----------
pattern : str or List[str]
A pattern or list of patterns to match. If `pattern` is a list, then if
**any** pattern matches the input then the corresponding row in the
output is ``True``.
Returns
-------
matched : ir.BooleanColumn
"""
return functools.reduce(
operator.or_,
(
ops.StringSQLILike(self, pattern).to_expr()
for pattern in util.promote_list(patterns)
),
)
def re_search(arg, pattern):
"""
Search string values using a regular expression. Returns True if the regex
matches a string and False otherwise.
Parameters
----------
pattern : string (regular expression string)
Returns
-------
searched : boolean value
"""
return ops.RegexSearch(arg, pattern).to_expr()
def regex_extract(arg, pattern, index):
"""
Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string
"""
return ops.RegexExtract(arg, pattern, index).to_expr()
def regex_replace(arg, pattern, replacement):
"""
Replaces match found by regex with replacement string.
Replacement string can also be a regex
Parameters
----------
pattern : string (regular expression string)
replacement : string (can be regular expression string)
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> result = table.strings.replace('(b+)', r'<\1>') # 'aaabbbaa' becomes 'aaa<bbb>aaa' # noqa: E501
Returns
-------
modified : string
"""
return ops.RegexReplace(arg, pattern, replacement).to_expr()
def _string_replace(arg, pattern, replacement):
"""
Replaces each exactly occurrence of pattern with given replacement
string. Like Python built-in str.replace
Parameters
----------
pattern : string
replacement : string
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> result = table.strings.replace('aaa', 'foo') # 'aaabbbaaa' becomes 'foobbbfoo' # noqa: E501
Returns
-------
replaced : string
"""
return ops.StringReplace(arg, pattern, replacement).to_expr()
def to_timestamp(arg, format_str, timezone=None):
"""
Parses a string and returns a timestamp.
Parameters
----------
format_str : A format string potentially of the type '%Y-%m-%d'
timezone : An optional string indicating the timezone,
i.e. 'America/New_York'
Examples
--------
>>> import ibis
>>> date_as_str = ibis.literal('20170206')
>>> result = date_as_str.to_timestamp('%Y%m%d')
Returns
-------
parsed : TimestampValue
"""
return ops.StringToTimestamp(arg, format_str, timezone).to_expr()
def parse_url(arg, extract, key=None):
"""
Returns the portion of a URL corresponding to a part specified
by 'extract'
Can optionally specify a key to retrieve an associated value
if extract parameter is 'QUERY'
Parameters
----------
extract : one of {'PROTOCOL', 'HOST', 'PATH', 'REF',
'AUTHORITY', 'FILE', 'USERINFO', 'QUERY'}
key : string (optional)
Examples
--------
>>> url = "https://www.youtube.com/watch?v=kEuEcWfewf8&t=10"
>>> parse_url(url, 'QUERY', 'v') # doctest: +SKIP
'kEuEcWfewf8'
Returns
-------
extracted : string
"""
return ops.ParseURL(arg, extract, key).to_expr()
def _string_contains(arg, substr):
"""
Determine if indicated string is exactly contained in the calling string.
Parameters
----------
substr : str or ibis.expr.types.StringValue
Returns
-------
contains : ibis.expr.types.BooleanValue
"""
return arg.find(substr) >= 0
def _string_split(arg, delimiter):
"""Split `arg` on `delimiter`.
Parameters
----------
arg : str or ibis.expr.types.StringValue
delimiter : str or ibis.expr.types.StringValue
Returns
-------
splitsville : Array[String]
"""
return ops.StringSplit(arg, delimiter).to_expr()
def _string_concat(*args):
return ops.StringConcat(args).to_expr()
def _string_dunder_contains(arg, substr):
raise TypeError('Use val.contains(arg)')
def _string_getitem(self, key):
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
if step is not None and not isinstance(step, ir.Expr) and step != 1:
raise ValueError('Step can only be 1')
if not isinstance(start, ir.Expr):
if start is not None and start < 0:
raise ValueError(
'Negative slicing not yet supported, got start value of '
'{:d}'.format(start)
)
if start is None:
start = 0
if not isinstance(stop, ir.Expr):
if stop is not None and stop < 0:
raise ValueError(
'Negative slicing not yet supported, got stop value of '
'{:d}'.format(stop)
)
if stop is None:
stop = self.length()
return self.substr(start, stop - start)
elif isinstance(key, int):
return self.substr(key, 1)
raise NotImplementedError(
'string __getitem__[{}]'.format(type(key).__name__)
)
_string_value_methods = dict(
__getitem__=_string_getitem,
length=_unary_op('length', ops.StringLength),
lower=_unary_op('lower', ops.Lowercase),
upper=_unary_op('upper', ops.Uppercase),
reverse=_unary_op('reverse', ops.Reverse),
ascii_str=_unary_op('ascii', ops.StringAscii),
strip=_unary_op('strip', ops.Strip),
lstrip=_unary_op('lstrip', ops.LStrip),
rstrip=_unary_op('rstrip', ops.RStrip),
capitalize=_unary_op('initcap', ops.Capitalize),
convert_base=convert_base,
__contains__=_string_dunder_contains,
contains=_string_contains,
like=_string_like,
ilike=_string_ilike,
rlike=re_search,
replace=_string_replace,
re_search=re_search,
re_extract=regex_extract,
re_replace=regex_replace,
to_timestamp=to_timestamp,
parse_url=parse_url,
substr=_string_substr,
left=_string_left,
right=_string_right,
repeat=repeat,
find=_string_find,
translate=_translate,
find_in_set=_find_in_set,
split=_string_split,
join=_string_join,
lpad=_lpad,
rpad=_rpad,
__add__=_string_concat,
__radd__=lambda *args: _string_concat(*args[::-1]),
__mul__=mul,
__rmul__=mul,
)
_add_methods(ir.StringValue, _string_value_methods)
# ---------------------------------------------------------------------
# Array API
def _array_slice(array, index):
"""Slice or index `array` at `index`.
Parameters
----------
index : int or ibis.expr.types.IntegerValue or slice
Returns
-------
sliced_array : ibis.expr.types.ValueExpr
If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then
the return type is the element type of `array`. If `index` is a
``slice`` then the return type is the same type as the input.
"""
if isinstance(index, slice):
start = index.start
stop = index.stop
if (start is not None and start < 0) or (
stop is not None and stop < 0
):
raise ValueError('negative slicing not yet supported')
step = index.step
if step is not None and step != 1:
raise NotImplementedError('step can only be 1')
op = ops.ArraySlice(array, start if start is not None else 0, stop)
else:
op = ops.ArrayIndex(array, index)
return op.to_expr()
_array_column_methods = dict(
length=_unary_op('length', ops.ArrayLength),
__getitem__=_array_slice,
__add__=_binop_expr('__add__', ops.ArrayConcat),
__radd__=toolz.flip(_binop_expr('__radd__', ops.ArrayConcat)),
__mul__=_binop_expr('__mul__', ops.ArrayRepeat),
__rmul__=_binop_expr('__rmul__', ops.ArrayRepeat),
)
_add_methods(ir.ArrayValue, _array_column_methods)
# ---------------------------------------------------------------------
# Map API
def get(expr, key, default=None):
"""
Return the mapped value for this key, or the default
if the key does not exist
Parameters
----------
key : any
default : any
"""
return ops.MapValueOrDefaultForKey(expr, key, default).to_expr()
_map_column_methods = dict(
get=get,
length=_unary_op('length', ops.MapLength),
__getitem__=_binop_expr('__getitem__', ops.MapValueForKey),
keys=_unary_op('keys', ops.MapKeys),
values=_unary_op('values', ops.MapValues),
__add__=_binop_expr('__add__', ops.MapConcat),
__radd__=toolz.flip(_binop_expr('__radd__', ops.MapConcat)),
)
_add_methods(ir.MapValue, _map_column_methods)
# ---------------------------------------------------------------------
# Struct API
def _struct_get_field(expr, field_name):
"""Get the `field_name` field from the ``Struct`` expression `expr`.
Parameters
----------
field_name : str
The name of the field to access from the ``Struct`` typed expression
`expr`. Must be a Python ``str`` type; programmatic struct field
access is not yet supported.
Returns
-------
value_expr : ibis.expr.types.ValueExpr
An expression with the type of the field being accessed.
"""
return ops.StructField(expr, field_name).to_expr().name(field_name)
_struct_column_methods = dict(
__getattr__=_struct_get_field, __getitem__=_struct_get_field
)
_add_methods(ir.StructValue, _struct_column_methods)
# ---------------------------------------------------------------------
# Timestamp API
def _timestamp_truncate(arg, unit):
"""
Zero out smaller-size units beyond indicated unit. Commonly used for time
series resampling.
Parameters
----------
unit : string, one of below table
'Y': year
'Q': quarter
'M': month
'W': week
'D': day
'h': hour
'm': minute
's': second
'ms': millisecond
'us': microsecond
'ns': nanosecond
Returns
-------
truncated : timestamp
"""
return ops.TimestampTruncate(arg, unit).to_expr()
def _timestamp_strftime(arg, format_str):
"""
Format timestamp according to the passed format string. Format string may
depend on backend, but we try to conform to ANSI strftime (e.g. Python
built-in datetime.strftime)
Parameters
----------
format_str : string
Returns
-------
formatted : string
"""
return ops.Strftime(arg, format_str).to_expr()
def _timestamp_time(arg):
"""Return a Time node for a Timestamp.
We can perform certain operations on this node w/o actually instantiating
the underlying structure (which is inefficient in pandas/numpy)
Returns
-------
TimeValue
"""
return ops.Time(arg).to_expr()
def _timestamp_date(arg):
"""Return a Date for a Timestamp.
Returns
-------
DateValue
"""
return ops.Date(arg).to_expr()
def _timestamp_sub(left, right):
right = as_value_expr(right)
if isinstance(right, ir.TimestampValue):
op = ops.TimestampDiff(left, right)
else:
op = ops.TimestampSub(left, right) # let the operation validate
return op.to_expr()
_timestamp_add = _binop_expr('__add__', ops.TimestampAdd)
_timestamp_radd = _binop_expr('__radd__', ops.TimestampAdd)
_day_of_week = property(
lambda self: ops.DayOfWeekNode(self).to_expr(),
doc="""\
Namespace expression containing methods for extracting information about the
day of the week of a TimestampValue or DateValue expression.
Returns
-------
DayOfWeek
An namespace expression containing methods to use to extract information.
""",
)
_timestamp_value_methods = dict(
strftime=_timestamp_strftime,
year=_extract_field('year', ops.ExtractYear),
month=_extract_field('month', ops.ExtractMonth),
day=_extract_field('day', ops.ExtractDay),
day_of_week=_day_of_week,
day_of_year=_extract_field('day_of_year', ops.ExtractDayOfYear),
quarter=_extract_field('quarter', ops.ExtractQuarter),
hour=_extract_field('hour', ops.ExtractHour),
minute=_extract_field('minute', ops.ExtractMinute),
second=_extract_field('second', ops.ExtractSecond),
millisecond=_extract_field('millisecond', ops.ExtractMillisecond),
truncate=_timestamp_truncate,
time=_timestamp_time,
date=_timestamp_date,
__sub__=_timestamp_sub,
sub=_timestamp_sub,
__add__=_timestamp_add,
add=_timestamp_add,
__radd__=_timestamp_radd,
radd=_timestamp_radd,
__rsub__=_timestamp_sub,
rsub=_timestamp_sub,
)
_add_methods(ir.TimestampValue, _timestamp_value_methods)
# ---------------------------------------------------------------------
# Date API
def _date_truncate(arg, unit):
"""
Zero out smaller-size units beyond indicated unit. Commonly used for time
series resampling.
Parameters
----------
unit : string, one of below table
'Y': year
'Q': quarter
'M': month
'W': week
'D': day
Returns
-------
truncated : date
"""
return ops.DateTruncate(arg, unit).to_expr()
def _date_sub(left, right):
right = rlz.one_of([rlz.date, rlz.interval], right)
if isinstance(right, ir.DateValue):
op = ops.DateDiff(left, right)
else:
op = ops.DateSub(left, right) # let the operation validate
return op.to_expr()
_date_add = _binop_expr('__add__', ops.DateAdd)
_date_value_methods = dict(
strftime=_timestamp_strftime,
year=_extract_field('year', ops.ExtractYear),
month=_extract_field('month', ops.ExtractMonth),
day=_extract_field('day', ops.ExtractDay),
day_of_week=_day_of_week,
day_of_year=_extract_field('day_of_year', ops.ExtractDayOfYear),
quarter=_extract_field('quarter', ops.ExtractQuarter),
truncate=_date_truncate,
__sub__=_date_sub,
sub=_date_sub,
__rsub__=_date_sub,
rsub=_date_sub,
__add__=_date_add,
add=_date_add,
__radd__=_date_add,
radd=_date_add,
)
_add_methods(ir.DateValue, _date_value_methods)
def _to_unit(arg, target_unit):
if arg._dtype.unit != target_unit:
arg = util.convert_unit(arg, arg._dtype.unit, target_unit)
arg.type().unit = target_unit
return arg
def _interval_property(target_unit, name):
return property(
functools.partial(_to_unit, target_unit=target_unit),
doc="""Extract the number of {0}s from an IntervalValue expression.
Returns
-------
IntegerValue
The number of {0}s in the expression
""".format(
name
),
)
_interval_add = _binop_expr('__add__', ops.IntervalAdd)
_interval_radd = _binop_expr('__radd__', ops.IntervalAdd)
_interval_sub = _binop_expr('__sub__', ops.IntervalSubtract)
_interval_mul = _binop_expr('__mul__', ops.IntervalMultiply)
_interval_rmul = _binop_expr('__rmul__', ops.IntervalMultiply)
_interval_floordiv = _binop_expr('__floordiv__', ops.IntervalFloorDivide)
_interval_value_methods = dict(
to_unit=_to_unit,
years=_interval_property('Y', 'year'),
quarters=_interval_property('Q', 'quarter'),
months=_interval_property('M', 'month'),
weeks=_interval_property('W', 'week'),
days=_interval_property('D', 'day'),
hours=_interval_property('h', 'hour'),
minutes=_interval_property('m', 'minute'),
seconds=_interval_property('s', 'second'),
milliseconds=_interval_property('ms', 'millisecond'),
microseconds=_interval_property('us', 'microsecond'),
nanoseconds=_interval_property('ns', 'nanosecond'),
__add__=_interval_add,
add=_interval_add,
__sub__=_interval_sub,
sub=_interval_sub,
__radd__=_interval_radd,
radd=_interval_radd,
__mul__=_interval_mul,
mul=_interval_mul,
__rmul__=_interval_rmul,
rmul=_interval_rmul,
__floordiv__=_interval_floordiv,
floordiv=_interval_floordiv,
__neg__=negate,
negate=negate,
)
_add_methods(ir.IntervalValue, _interval_value_methods)
# ---------------------------------------------------------------------
# Time API
def between_time(arg, lower, upper, timezone=None):
"""Check if the input expr falls between the lower/upper bounds passed.
Bounds are inclusive. All arguments must be comparable.
Parameters
----------
lower : str, datetime.time
upper : str, datetime.time
timezone : str, timezone, default None
Returns
-------
BooleanValue
"""
op = arg.op()
if isinstance(op, ops.Time):
# Here we pull out the first argument to the underlying Time operation
# which is by definition (in _timestamp_value_methods) a
# TimestampValue. We do this so that we can potentially specialize the
# "between time" operation for timestamp_value_expr.time().between().
# A similar mechanism is triggered when creating expressions like
# t.column.distinct().count(), which is turned into t.column.nunique().
arg = op.arg
if timezone is not None:
arg = arg.cast(dt.Timestamp(timezone=timezone))
op = ops.BetweenTime(arg, lower, upper)
else:
op = ops.Between(arg, lower, upper)
return op.to_expr()
def _time_truncate(arg, unit):
"""
Zero out smaller-size units beyond indicated unit. Commonly used for time
series resampling.
Parameters
----------
unit : string, one of below table
'h': hour
'm': minute
's': second
'ms': millisecond
'us': microsecond
'ns': nanosecond
Returns
-------
truncated : time
"""
return ops.TimeTruncate(arg, unit).to_expr()
def _time_sub(left, right):
right = as_value_expr(right)
if isinstance(right, ir.TimeValue):
op = ops.TimeDiff(left, right)
else:
op = ops.TimeSub(left, right) # let the operation validate
return op.to_expr()
_time_add = _binop_expr('__add__', ops.TimeAdd)
_time_value_methods = dict(
between=between_time,
truncate=_time_truncate,
hour=_extract_field('hour', ops.ExtractHour),
minute=_extract_field('minute', ops.ExtractMinute),
second=_extract_field('second', ops.ExtractSecond),
millisecond=_extract_field('millisecond', ops.ExtractMillisecond),
__sub__=_time_sub,
sub=_time_sub,
__rsub__=_time_sub,
rsub=_time_sub,
__add__=_time_add,
add=_time_add,
__radd__=_time_add,
radd=_time_add,
)
_add_methods(ir.TimeValue, _time_value_methods)
# ---------------------------------------------------------------------
# Decimal API
_decimal_value_methods = dict(
precision=_unary_op('precision', ops.DecimalPrecision),
scale=_unary_op('scale', ops.DecimalScale),
)
_add_methods(ir.DecimalValue, _decimal_value_methods)
# ----------------------------------------------------------------------
# Category API
_category_value_methods = dict(label=_analytics.category_label)
_add_methods(ir.CategoryValue, _category_value_methods)
# ---------------------------------------------------------------------
# Table API
_join_classes = {
'inner': ops.InnerJoin,
'left': ops.LeftJoin,
'any_inner': ops.AnyInnerJoin,
'any_left': ops.AnyLeftJoin,
'outer': ops.OuterJoin,
'right': ops.RightJoin,
'left_semi': ops.LeftSemiJoin,
'semi': ops.LeftSemiJoin,
'anti': ops.LeftAntiJoin,
'cross': ops.CrossJoin,
}
def join(left, right, predicates=(), how='inner'):
"""Perform a relational join between two tables. Does not resolve resulting
table schema.
Parameters
----------
left : TableExpr
right : TableExpr
predicates : join expression(s)
how : string, default 'inner'
- 'inner': inner join
- 'left': left join
- 'outer': full outer join
- 'right': right outer join
- 'semi' or 'left_semi': left semi join
- 'anti': anti join
Returns
-------
joined : TableExpr
Note that the schema is not materialized yet
"""
klass = _join_classes[how.lower()]
if isinstance(predicates, Expr):
predicates = _L.flatten_predicate(predicates)
op = klass(left, right, predicates)
return op.to_expr()
def asof_join(left, right, predicates=(), by=(), tolerance=None):
"""Perform an asof join between two tables. Similar to a left join
except that the match is done on nearest key rather than equal keys.
Optionally, match keys with 'by' before joining with predicates.
Parameters
----------
left : TableExpr
right : TableExpr
predicates : join expression(s)
by : string
column to group by before joining
tolerance : interval
Amount of time to look behind when joining
Returns
-------
joined : TableExpr
Note that the schema is not materialized yet
"""
return ops.AsOfJoin(left, right, predicates, by, tolerance).to_expr()
def cross_join(*tables, **kwargs):
"""
Perform a cross join (cartesian product) amongst a list of tables, with
optional set of prefixes to apply to overlapping column names
Parameters
----------
tables : ibis.expr.types.TableExpr
Returns
-------
joined : TableExpr
Examples
--------
>>> import ibis
>>> schemas = [(name, 'int64') for name in 'abcde']
>>> a, b, c, d, e = [
... ibis.table([(name, type)], name=name) for name, type in schemas
... ]
>>> joined1 = ibis.cross_join(a, b, c, d, e)
>>> joined1 # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: a
schema:
a : int64
ref_1
UnboundTable[table]
name: b
schema:
b : int64
ref_2
UnboundTable[table]
name: c
schema:
c : int64
ref_3
UnboundTable[table]
name: d
schema:
d : int64
ref_4
UnboundTable[table]
name: e
schema:
e : int64
CrossJoin[table]
left:
Table: ref_0
right:
CrossJoin[table]
left:
CrossJoin[table]
left:
CrossJoin[table]
left:
Table: ref_1
right:
Table: ref_2
right:
Table: ref_3
right:
Table: ref_4
"""
# TODO(phillipc): Implement prefix keyword argument
op = ops.CrossJoin(*tables, **kwargs)
return op.to_expr()
def _table_count(self):
"""
Returns the computed number of rows in the table expression
Returns
-------
count : Int64Scalar
"""
return ops.Count(self, None).to_expr().name('count')
def _table_info(self, buf=None):
"""
Similar to pandas DataFrame.info. Show column names, types, and null
counts. Output to stdout by default
"""
metrics = [self.count().name('nrows')]
for col in self.columns:
metrics.append(self[col].count().name(col))
metrics = self.aggregate(metrics).execute().loc[0]
names = ['Column', '------'] + self.columns
types = ['Type', '----'] + [repr(x) for x in self.schema().types]
counts = ['Non-null #', '----------'] + [str(x) for x in metrics[1:]]
col_metrics = util.adjoin(2, names, types, counts)
result = 'Table rows: {}\n\n{}'.format(metrics[0], col_metrics)
print(result, file=buf)
def _table_set_column(table, name, expr):
"""
Replace an existing column with a new expression
Parameters
----------
name : string
Column name to replace
expr : value expression
New data for column
Returns
-------
set_table : TableExpr
New table expression
"""
expr = table._ensure_expr(expr)
if expr._name != name:
expr = expr.name(name)
if name not in table:
raise KeyError('{0} is not in the table'.format(name))
# TODO: This assumes that projection is required; may be backend-dependent
proj_exprs = []
for key in table.columns:
if key == name:
proj_exprs.append(expr)
else:
proj_exprs.append(table[key])
return table.projection(proj_exprs)
def _regular_join_method(name, how, doc=None):
def f(self, other, predicates=()):
return self.join(other, predicates, how=how)
if doc:
f.__doc__ = doc
else:
# XXX
f.__doc__ = join.__doc__
f.__name__ = name
return f
def filter(table, predicates):
"""
Select rows from table based on boolean expressions
Parameters
----------
predicates : boolean array expressions, or list thereof
Returns
-------
filtered_expr : TableExpr
"""
resolved_predicates = _resolve_predicates(table, predicates)
return _L.apply_filter(table, resolved_predicates)
def _resolve_predicates(table, predicates):
if isinstance(predicates, Expr):
predicates = _L.flatten_predicate(predicates)
predicates = util.promote_list(predicates)
predicates = [ir.bind_expr(table, x) for x in predicates]
resolved_predicates = []
for pred in predicates:
if isinstance(pred, ir.AnalyticExpr):
pred = pred.to_filter()
resolved_predicates.append(pred)
return resolved_predicates
def aggregate(table, metrics=None, by=None, having=None, **kwds):
"""
Aggregate a table with a given set of reductions, with grouping
expressions, and post-aggregation filters.
Parameters
----------
table : table expression
metrics : expression or expression list
by : optional, default None
Grouping expressions
having : optional, default None
Post-aggregation filters
Returns
-------
agg_expr : TableExpr
"""
if metrics is None:
metrics = []
for k, v in sorted(kwds.items()):
v = table._ensure_expr(v)
metrics.append(v.name(k))
op = table.op().aggregate(table, metrics, by=by, having=having)
return op.to_expr()
def _table_distinct(self):
"""
Compute set of unique rows/tuples occurring in this table
"""
op = ops.Distinct(self)
return op.to_expr()
def _table_limit(table, n, offset=0):
"""
Select the first n rows at beginning of table (may not be deterministic
depending on implementation and presence of a sorting).
Parameters
----------
n : int
Number of rows to include
offset : int, default 0
Number of rows to skip first
Returns
-------
limited : TableExpr
"""
op = ops.Limit(table, n, offset=offset)
return op.to_expr()
def _head(table, n=5):
"""
Select the first n rows at beginning of a table (may not be deterministic
depending on implementation and presence of a sorting).
Parameters
----------
n : int
Number of rows to include, defaults to 5
Returns
-------
limited : TableExpr
See Also
--------
ibis.expr.types.TableExpr.limit
"""
return _table_limit(table, n=n)
def _table_sort_by(table, sort_exprs):
"""
Sort table by the indicated column expressions and sort orders
(ascending/descending)
Parameters
----------
sort_exprs : sorting expressions
Must be one of:
- Column name or expression
- Sort key, e.g. desc(col)
- (column name, True (ascending) / False (descending))
Examples
--------
>>> import ibis
>>> t = ibis.table([('a', 'int64'), ('b', 'string')])
>>> ab_sorted = t.sort_by([('a', True), ('b', False)])
Returns
-------
sorted : TableExpr
"""
result = table.op().sort_by(table, sort_exprs)
return result.to_expr()
def _table_union(left, right, distinct=False):
"""
Form the table set union of two table expressions having identical
schemas.
Parameters
----------
right : TableExpr
distinct : boolean, default False
Only union distinct rows not occurring in the calling table (this
can be very expensive, be careful)
Returns
-------
union : TableExpr
"""
op = ops.Union(left, right, distinct=distinct)
return op.to_expr()
def _table_to_array(self):
"""
Single column tables can be viewed as arrays.
"""
op = ops.TableArrayView(self)
return op.to_expr()
def _table_materialize(table):
"""
Force schema resolution for a joined table, selecting all fields from
all tables.
"""
if table._is_materialized():
return table
op = ops.MaterializedJoin(table)
return op.to_expr()
def _safe_get_name(expr):
try:
return expr.get_name()
except com.ExpressionError:
return None
def mutate(table, exprs=None, **mutations):
"""
Convenience function for table projections involving adding columns
Parameters
----------
exprs : list, default None
List of named expressions to add as columns
mutations : keywords for new columns
Returns
-------
mutated : TableExpr
Examples
--------
Using keywords arguments to name the new columns
>>> import ibis
>>> table = ibis.table([('foo', 'double'), ('bar', 'double')], name='t')
>>> expr = table.mutate(qux=table.foo + table.bar, baz=5)
>>> expr # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
foo : float64
bar : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
Table: ref_0
baz = Literal[int8]
5
qux = Add[float64*]
left:
foo = Column[float64*] 'foo' from table
ref_0
right:
bar = Column[float64*] 'bar' from table
ref_0
Using the :meth:`ibis.expr.types.Expr.name` method to name the new columns
>>> new_columns = [ibis.literal(5).name('baz',),
... (table.foo + table.bar).name('qux')]
>>> expr2 = table.mutate(new_columns)
>>> expr.equals(expr2)
True
"""
exprs = [] if exprs is None else util.promote_list(exprs)
exprs.extend(
(expr(table) if util.is_function(expr) else as_value_expr(expr)).name(
name
)
for name, expr in sorted(mutations.items(), key=operator.itemgetter(0))
)
by_name = collections.OrderedDict(
(expr.get_name(), expr) for expr in exprs
)
columns = table.columns
used = by_name.keys() & columns
if used:
proj_exprs = [
by_name.get(column, table[column]) for column in columns
] + [expr for name, expr in by_name.items() if name not in used]
else:
proj_exprs = [table] + exprs
return table.projection(proj_exprs)
def projection(table, exprs):
"""
Compute new table expression with the indicated column expressions from
this table.
Parameters
----------
exprs : column expression, or string, or list of column expressions and
strings. If strings passed, must be columns in the table already
Returns
-------
projection : TableExpr
Notes
-----
Passing an aggregate function to this method will broadcast the aggregate's
value over the number of rows in the table. See the examples section for
more details.
Examples
--------
Simple projection
>>> import ibis
>>> fields = [('a', 'int64'), ('b', 'double')]
>>> t = ibis.table(fields, name='t')
>>> proj = t.projection([t.a, (t.b + 1).name('b_plus_1')])
>>> proj # doctest: +NORMALIZE_WHITESPACE
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
a = Column[int64*] 'a' from table
ref_0
b_plus_1 = Add[float64*]
left:
b = Column[float64*] 'b' from table
ref_0
right:
Literal[int8]
1
>>> proj2 = t[t.a, (t.b + 1).name('b_plus_1')]
>>> proj.equals(proj2)
True
Aggregate projection
>>> agg_proj = t[t.a.sum().name('sum_a'), t.b.mean().name('mean_b')]
>>> agg_proj # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
ref_0
UnboundTable[table]
name: t
schema:
a : int64
b : float64
<BLANKLINE>
Selection[table]
table:
Table: ref_0
selections:
sum_a = WindowOp[int64*]
sum_a = Sum[int64]
a = Column[int64*] 'a' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
mean_b = WindowOp[float64*]
mean_b = Mean[float64]
b = Column[float64*] 'b' from table
ref_0
where:
None
<ibis.expr.window.Window object at 0x...>
Note the ``<ibis.expr.window.Window>`` objects here, their existence means
that the result of the aggregation will be broadcast across the number of
rows in the input column. The purpose of this expression rewrite is to make
it easy to write column/scalar-aggregate operations like
.. code-block:: python
t[(t.a - t.a.mean()).name('demeaned_a')]
"""
import ibis.expr.analysis as L
if isinstance(exprs, (Expr, str)):
exprs = [exprs]
projector = L.Projector(table, exprs)
op = projector.get_result()
return op.to_expr()
def _table_relabel(table, substitutions, replacements=None):
"""
Change table column names, otherwise leaving table unaltered
Parameters
----------
substitutions
Returns
-------
relabeled : TableExpr
"""
if replacements is not None:
raise NotImplementedError
observed = set()
exprs = []
for c in table.columns:
expr = table[c]
if c in substitutions:
expr = expr.name(substitutions[c])
observed.add(c)
exprs.append(expr)
for c in substitutions:
if c not in observed:
raise KeyError('{0!r} is not an existing column'.format(c))
return table.projection(exprs)
def _table_view(self):
"""
Create a new table expression that is semantically equivalent to the
current one, but is considered a distinct relation for evaluation
purposes (e.g. in SQL).
For doing any self-referencing operations, like a self-join, you will
use this operation to create a reference to the current table
expression.
Returns
-------
expr : TableExpr
"""
new_view = ops.SelfReference(self)
return new_view.to_expr()
def _table_drop(self, fields):
if not fields:
# no-op if nothing to be dropped
return self
schema = self.schema()
field_set = frozenset(fields)
missing_fields = field_set.difference(schema)
if missing_fields:
raise KeyError('Fields not in table: {0!s}'.format(missing_fields))
return self[[field for field in schema if field not in field_set]]
_table_methods = dict(
aggregate=aggregate,
count=_table_count,
distinct=_table_distinct,
drop=_table_drop,
info=_table_info,
limit=_table_limit,
head=_head,
set_column=_table_set_column,
filter=filter,
materialize=_table_materialize,
mutate=mutate,
projection=projection,
select=projection,
relabel=_table_relabel,
join=join,
cross_join=cross_join,
inner_join=_regular_join_method('inner_join', 'inner'),
left_join=_regular_join_method('left_join', 'left'),
any_inner_join=_regular_join_method('any_inner_join', 'any_inner'),
any_left_join=_regular_join_method('any_left_join', 'any_left'),
outer_join=_regular_join_method('outer_join', 'outer'),
semi_join=_regular_join_method('semi_join', 'semi'),
anti_join=_regular_join_method('anti_join', 'anti'),
asof_join=asof_join,
sort_by=_table_sort_by,
to_array=_table_to_array,
union=_table_union,
view=_table_view,
)
_add_methods(ir.TableExpr, _table_methods)
def prevent_rewrite(expr, client=None):
"""Prevent optimization from happening below `expr`.
Parameters
----------
expr : ir.TableExpr
Any table expression whose optimization you want to prevent
client : ibis.client.Client, optional, default None
A client to use to create the SQLQueryResult operation. This is useful
if you're compiling an expression that derives from an
:class:`~ibis.expr.operations.UnboundTable` operation.
Returns
-------
sql_query_result : ir.TableExpr
"""
if client is None:
(client,) = ibis.client.find_backends(expr)
query = client.compile(expr)
return ops.SQLQueryResult(query, expr.schema(), client).to_expr()
| 23.012512 | 105 | 0.615433 |
ace5b7844a7c9ee27b1dd76a661d3b8d19d50f11 | 3,465 | py | Python | nnabla_rl/environment_explorers/gaussian_explorer.py | sony/nnabla-rl | 6a9a91ac5363b8611e0c9f736590729952a8d460 | [
"Apache-2.0"
] | 75 | 2021-06-14T02:35:19.000Z | 2022-03-23T04:30:24.000Z | nnabla_rl/environment_explorers/gaussian_explorer.py | sony/nnabla-rl | 6a9a91ac5363b8611e0c9f736590729952a8d460 | [
"Apache-2.0"
] | 2 | 2021-12-17T08:46:54.000Z | 2022-03-15T02:04:53.000Z | nnabla_rl/environment_explorers/gaussian_explorer.py | sony/nnabla-rl | 6a9a91ac5363b8611e0c9f736590729952a8d460 | [
"Apache-2.0"
] | 3 | 2021-06-15T13:32:57.000Z | 2022-03-25T16:53:14.000Z | # Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from dataclasses import dataclass
from typing import Dict, Tuple
import numpy as np
from nnabla_rl.environment_explorer import EnvironmentExplorer, EnvironmentExplorerConfig
from nnabla_rl.environments.environment_info import EnvironmentInfo
from nnabla_rl.typing import ActionSelector
@dataclass
class GaussianExplorerConfig(EnvironmentExplorerConfig):
"""
List of configurations for gaussian explorer.
Args:
action_clip_low (float): Minimum noise value. Noise below this value will be clipped.
Defaults to sys.float_info.min.
action_clip_high (float): Maximum noise value. Noise above this value will be clipped.
Defaults to sys.float_info.max.
sigma (float): Standard deviation of gaussian noise. Must be positive. Defaults to 1.0.
"""
action_clip_low: float = sys.float_info.min
action_clip_high: float = sys.float_info.max
sigma: float = 1.0
def __post_init__(self):
self._assert_positive(self.sigma, 'sigma')
class GaussianExplorer(EnvironmentExplorer):
'''Gaussian explorer
Explore using policy's action without gaussian noise appended to it. Policy's action must be continuous action.
Args:
policy_action_selector (:py:class:`ActionSelector <nnabla_rl.typing.ActionSelector>`):
callable which computes current policy's action with respect to current state.
env_info (:py:class:`EnvironmentInfo <nnabla_rl.environments.environment_info.EnvironmentInfo>`):
environment info
config (:py:class:`LinearDecayEpsilonGreedyExplorerConfig\
<nnabla_rl.environment_explorers.LinearDecayEpsilonGreedyExplorerConfig>`): the config of this class.
'''
# type declarations to type check with mypy
# NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar
# See https://mypy.readthedocs.io/en/stable/class_basics.html for details
_config: GaussianExplorerConfig
def __init__(self,
policy_action_selector: ActionSelector,
env_info: EnvironmentInfo,
config: GaussianExplorerConfig = GaussianExplorerConfig()):
super().__init__(env_info, config)
self._policy_action_selector = policy_action_selector
def action(self, step: int, state: np.ndarray, *, begin_of_episode: bool = False) -> Tuple[np.ndarray, Dict]:
(action, info) = self._policy_action_selector(state, begin_of_episode=begin_of_episode)
return self._append_noise(action, self._config.action_clip_low, self._config.action_clip_high), info
def _append_noise(self, action, low, high):
noise = np.random.normal(loc=0.0, scale=self._config.sigma, size=action.shape).astype(np.float32)
return np.clip(action + noise, low, high)
| 42.777778 | 115 | 0.737951 |
ace5b7dbdf842d4dbfda8b97f9c0a4f676a0148f | 3,859 | py | Python | StanCode_Projects/boggle_game_solver/boggle.py | jackychang16/sc-projects | c405e391b4f69dc7780820b2e246b400dcec5cf5 | [
"MIT"
] | 1 | 2021-10-05T04:21:33.000Z | 2021-10-05T04:21:33.000Z | StanCode_Projects/boggle_game_solver/boggle.py | jackychang16/sc-projects | c405e391b4f69dc7780820b2e246b400dcec5cf5 | [
"MIT"
] | null | null | null | StanCode_Projects/boggle_game_solver/boggle.py | jackychang16/sc-projects | c405e391b4f69dc7780820b2e246b400dcec5cf5 | [
"MIT"
] | null | null | null | """
File: boggle.py
Name:Jacky
----------------------------------------
This program lets user to input alphabets which arrange on square.
And this program will print words which combined on the neighbor alphabets.
"""
# This is the file name of the dictionary txt file
# we will be checking if a word exists by searching through it
FILE = 'dictionary.txt'
SIZE = 4
dictionary_list = {}
def main():
"""
boggle list let user inputs all alphabets and gives any alphabet a position.
"""
boggle = []
# if user input a wrong formats, the switch will keep on False and this program will print Illegal input
switch = False
for i in range(SIZE):
if switch is True:
break
boggle_line = input(f'{i+1} row of letters:')
boggle_line= boggle_line.split()
if len(boggle_line) != SIZE:
print('Illegal input')
switch = True
break
for alphabet in boggle_line:
if len(alphabet) != 1:
print('Illegal input')
switch = True
break
boggle.append(boggle_line)
# print(boggle)
# let the world in the FILE = 'dictionary.txt' to the dictionary_list
read_dictionary()
# let any alphabet have own position
map_dict = {}
# if the neighbor alphabet combined surpass 4 units and this world in the dictionary_list
correct_word_list = []
# define any position where can connect
if switch is False:
for x in range(SIZE):
for y in range(SIZE):
map_dict[(x, y)] = {}
map_dict[(x, y)]['alphabet'] = boggle[x][y]
if x==0 and y==0:
map_dict[(x,y)]['map'] = [(x+1,y),(x,y+1),(x+1,y+1)]
elif x==0 and y==SIZE-1:
map_dict[(x,y)]['map'] = [(0,y-1),(1,y-1),(1,y-1)]
elif x==SIZE-1 and y== 0 :
map_dict[(x,y)]['map'] = [(x-1,0),(x-1,1),(x,1)]
elif x==SIZE-1 and y==SIZE-1:
map_dict[(x,y)]['map'] = [(x-1,y),(x-1,y-1),(x,y-1)]
elif x==0:
map_dict[(x,y)]['map'] = [(x,y-1),(x+1,y-1),(x+1,y),(x+1,y+1),(x,y+1)]
elif x == SIZE-1:
map_dict[(x,y)]['map'] = [(x,y-1),(x-1,y-1),(x-1,y),(x-1,y+1),(x,y+1)]
elif y == 0:
map_dict[(x,y)]['map'] = [(x-1,y),(x-1,y+1),(x,y+1),(x+1,y+1),(x+1,y)]
elif y == SIZE-1:
map_dict[(x,y)]['map'] = [(x-1,y),(x-1,y-1),(x,y-1),(x+1,y-1),(x+1,y)]
else:
map_dict[(x,y)]['map'] = [(x-1,y-1),(x,y-1),(x+1,y-1),(x+1,y),(x+1,y+1),(x,y+1),(x-1,y+1),(x-1,y)]
for i in range(SIZE):
for j in range(SIZE):
permutation(map_dict,(i,j),correct_word_list)
print(f'There are {len(correct_word_list)} words in total')
def permutation(map_dict,position,correct_word_list, coordinate_list=[]):
coordinate_list.append(position)
maybe_word = (num_to_string(coordinate_list, map_dict))
# print(num_to_string(coordinate_list, map_dict))
if len(maybe_word) >=4 and maybe_word in dictionary_list and maybe_word not in correct_word_list:
correct_word_list.append(maybe_word)
print(f'Found "{maybe_word}"')
for next_position in map_dict[position]['map']:
if next_position not in coordinate_list:
if has_prefix(maybe_word):
permutation(map_dict,next_position,correct_word_list, coordinate_list)
coordinate_list.pop()
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
global dictionary_list
with open(FILE, 'r') as f:
for line in f:
words = line.split()
for word in words:
dictionary_list[word] = word
return dictionary_list
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for dict_word in dictionary_list:
if dict_word.startswith(sub_s):
return True
return False
def num_to_string(coordinate_list, map_dict):
ans = ""
for figure in coordinate_list:
ans += map_dict[figure]['alphabet']
return ans
if __name__ == '__main__':
main()
| 31.120968 | 105 | 0.649132 |
ace5b8062c16d7a151ed5c8d8736b585909fd5ed | 252 | py | Python | boa3_test/test_sc/native_test/stdlib/MemorySearchStart.py | OnBlockIO/neo3-boa | cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/native_test/stdlib/MemorySearchStart.py | OnBlockIO/neo3-boa | cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/native_test/stdlib/MemorySearchStart.py | OnBlockIO/neo3-boa | cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from typing import Union
from boa3.builtin import public
from boa3.builtin.nativecontract.stdlib import StdLib
@public
def main(mem: Union[bytes, str], value: Union[bytes, str], start: int) -> int:
return StdLib.memory_search(mem, value, start)
| 25.2 | 78 | 0.757937 |
ace5ba40c5f69965dd82e7929da19da880ea6c98 | 1,813 | py | Python | setup.py | iolucas/cv2wrap | 0cd749e484d79640e8f2a7dfe55531abb4fc2af5 | [
"MIT"
] | null | null | null | setup.py | iolucas/cv2wrap | 0cd749e484d79640e8f2a7dfe55531abb4fc2af5 | [
"MIT"
] | null | null | null | setup.py | iolucas/cv2wrap | 0cd749e484d79640e8f2a7dfe55531abb4fc2af5 | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name = 'cv2wrap',
packages = ['cv2wrap'], # this must be the same as the name above
version = '1.0',
description = 'Wrapper for python opencv 2.4.12 32bit',
author = 'Lucas V. Oliveira',
author_email = 'lucas.o@live.com',
url = 'https://github.com/lucasolivier/cv2wrap', # use the URL to the github repo
download_url = 'https://github.com/lucasolivier/cv2wrap/tarball/1.0', # I'll explain this in a second
keywords = ['opencv', 'cv2', 'python'], # arbitrary keywords
classifiers = []
)
#setup(name='cv2',
#version='1.0',
#py_modules=['cv2'],
#)
#setup_args = {
# 'name': 'cv2wrap',
# 'packages': ['cv2wrap'], # this must be the same as the name above
# 'version': '1.0',
# 'description': 'Wrapper for python opencv 2.4.12 32bit',
# 'author': 'Lucas V. Oliveira',
# 'author_email': 'lucas.o@live.com',
# 'url': 'https://github.com/lucasolivier/cv2wrap', # use the URL to the github repo
# 'download_url': 'https://github.com/lucasolivier/cv2wrap/tarball/1.0', # I'll explain this in a second
# 'keywords': ['opencv', 'cv2', 'python'], # arbitrary keywords
# 'classifiers': []
#}
#if True:
#class my_build_ext():
#def build_extension(self, ext):
#''' Copies the already-compiled pyd
#'''
#import shutil
#import os.path
#try:
#os.makedirs(os.path.dirname(self.get_ext_fullpath(ext.name)))
#except WindowsError, e:
#if e.winerror != 183: # already exists
#raise
#shutil.copyfile(os.path.join(this_dir, r'..\..\bin\Python%d%d\my.pyd' % sys.version_info[0:2]), self.get_ext_fullpath(ext.name))
#setup_args['cmdclass'] = {'build_ext': my_build_ext }
#setup(**setup_args) | 34.207547 | 141 | 0.611693 |
ace5bacfcfc0b028c6836505d86ea6ba9b7f0a56 | 7,012 | py | Python | sdk/python/pulumi_azure_nextgen/desktopvirtualization/v20201102preview/workspace.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/desktopvirtualization/v20201102preview/workspace.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/desktopvirtualization/v20201102preview/workspace.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Workspace']
class Workspace(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_references: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a Workspace definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] application_group_references: List of applicationGroup resource Ids.
:param pulumi.Input[str] description: Description of Workspace.
:param pulumi.Input[str] friendly_name: Friendly name of Workspace.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['application_group_references'] = application_group_references
__props__['description'] = description
__props__['friendly_name'] = friendly_name
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:desktopvirtualization:Workspace"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190123preview:Workspace"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190924preview:Workspace"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20191210preview:Workspace"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20200921preview:Workspace"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201019preview:Workspace"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201110preview:Workspace"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210114preview:Workspace"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210201preview:Workspace")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Workspace, __self__).__init__(
'azure-nextgen:desktopvirtualization/v20201102preview:Workspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Workspace':
"""
Get an existing Workspace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Workspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationGroupReferences")
def application_group_references(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of applicationGroup resource Ids.
"""
return pulumi.get(self, "application_group_references")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of Workspace.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of Workspace.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.66242 | 810 | 0.662008 |
ace5bc7fd290f469ac9fcd76cf19987a53516504 | 852 | py | Python | onnx/backend/test/case/node/cosh.py | pchandrasekaran1595/onnx | 10da6f2e919c8515877e227a41cd44e86ae0bb2d | [
"Apache-2.0"
] | 12,820 | 2017-09-07T07:00:24.000Z | 2022-03-31T14:41:57.000Z | onnx/backend/test/case/node/cosh.py | pchandrasekaran1595/onnx | 10da6f2e919c8515877e227a41cd44e86ae0bb2d | [
"Apache-2.0"
] | 3,213 | 2017-09-07T17:48:17.000Z | 2022-03-31T19:44:57.000Z | onnx/backend/test/case/node/cosh.py | pchandrasekaran1595/onnx | 10da6f2e919c8515877e227a41cd44e86ae0bb2d | [
"Apache-2.0"
] | 2,922 | 2017-09-07T07:46:00.000Z | 2022-03-31T15:55:24.000Z | # SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Cosh(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Cosh',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.cosh(x) # expected output [1.54308069, 1., 1.54308069]
expect(node, inputs=[x], outputs=[y],
name='test_cosh_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.cosh(x)
expect(node, inputs=[x], outputs=[y],
name='test_cosh')
| 25.058824 | 72 | 0.593897 |
ace5bc9e04f2225518925e2c5c6e09e15c7b540d | 1,370 | py | Python | aea/helpers/ipfs/__init__.py | bryanchriswhite/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 126 | 2019-09-07T09:32:44.000Z | 2022-03-29T14:28:41.000Z | aea/helpers/ipfs/__init__.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 1,814 | 2019-08-24T10:08:07.000Z | 2022-03-31T14:28:36.000Z | aea/helpers/ipfs/__init__.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 46 | 2019-09-03T22:13:58.000Z | 2022-03-22T01:25:16.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains helper methods and classes for the 'aea' package."""
from aea.helpers.ipfs.utils import _protobuf_python_implementation
# fix for ipfs hashes, preload protobuf classes with protobuf python implementation
with _protobuf_python_implementation():
from aea.helpers.ipfs.pb import ( # noqa: F401 # pylint: disable=import-outside-toplevel,unused-import
merkledag_pb2,
unixfs_pb2,
)
from aea.helpers.ipfs.pb.merkledag_pb2 import ( # noqa: F401 # pylint: disable=import-outside-toplevel,unused-import
PBNode,
)
| 42.8125 | 123 | 0.643066 |
ace5bca641b22bfc4ca7c64507391ab16dd54634 | 67,332 | py | Python | tensorflow/python/keras/engine/training_utils.py | miguelmorin/tensorflow | ffd4de40fb30cab04745c1f28e924c5e595ac390 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/engine/training_utils.py | miguelmorin/tensorflow | ffd4de40fb30cab04745c1f28e924c5e595ac390 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/engine/training_utils.py | miguelmorin/tensorflow | ffd4de40fb30cab04745c1f28e924c5e595ac390 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import atexit
import collections
from collections import OrderedDict
import multiprocessing.pool
import threading
import time
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import util as tf_losses_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
@six.add_metaclass(abc.ABCMeta)
class Aggregator(object):
"""Abstract base class used to aggregate batch-level outputs of a loop.
Attributes:
use_steps: Whether the loop is using `step` or `batch_size`.
num_samples_or_steps: Either `batch_size*num_batches` or `steps`.
results: What to return at the end of the aggregation loop.
"""
def __init__(self, use_steps, num_samples_or_steps):
self.use_steps = use_steps
self.num_samples_or_steps = num_samples_or_steps
self.results = []
@abc.abstractmethod
def create(self, batch_outs):
"""Creates the initial results from the first batch outputs.
Arguments:
batch_outs: A list of batch-level outputs.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
"""Aggregates batch-level results into total results.
Arguments:
batch_outs: A list of batch-level outputs.
batch_start: The start index of this batch. Always `None` if `use_steps`
is `True`.
batch_end: The end index of this batch. Always `None` if `use_steps` is
`True`.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def finalize(self):
"""Prepares the total results to be returned."""
raise NotImplementedError('Must be implemented in subclasses.')
class MetricsAggregator(Aggregator):
"""Aggregator that calculates loss and metrics info."""
def create(self, batch_outs):
self.results = [0.] * len(batch_outs)
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
# Loss.
if self.use_steps:
self.results[0] += batch_outs[0]
else:
self.results[0] += batch_outs[0] * (batch_end - batch_start)
# Metrics (always stateful, just grab current values.)
self.results[1:] = batch_outs[1:]
def finalize(self):
if not self.results:
raise ValueError('Empty training data.')
self.results[0] /= self.num_samples_or_steps
class ConcatAggregator(Aggregator):
"""Combine tensor-likes which cannot be merged on the fly.
This class expects to aggregate a single tensor-like rather than a nested
structure of tensor-likes.
"""
def __init__(self):
self.composite = None
super(ConcatAggregator, self).__init__(
use_steps=True, num_samples_or_steps=None)
def create(self, batch_element):
self.composite = composite_tensor_utils.is_composite_or_composite_value(
batch_element)
def aggregate(self, batch_element, batch_start=None, batch_end=None):
self.results.append(batch_element)
def finalize(self):
# Special case of single batch inference which skips a copy.
if len(self.results) == 1:
self.results = self.results[0]
elif self.composite:
# TODO(taylorrobie): efficiently concatenate.
results = self.results[0]
for r in self.results[1:]:
results = composite_tensor_utils.append_composite_tensor(results, r)
self.results = results
else:
self.results = np.concatenate(self.results, axis=0)
if isinstance(self.results, ops.EagerTensor):
self.results = self.results._cpu_nograd()._numpy() # pylint: disable=protected-access
_COPY_THREADS = 4
_COPY_POOL = None
def get_copy_pool():
"""Shared threadpool for copying arrays.
Pool instantiation takes ~ 2ms, so a singleton pool is used rather than
creating a pool per SliceAggregator.
Returns:
The global copy threadpool.
"""
global _COPY_POOL
if _COPY_POOL is None:
_COPY_POOL = multiprocessing.pool.ThreadPool(_COPY_THREADS)
atexit.register(_COPY_POOL.close)
return _COPY_POOL
class SliceAggregator(Aggregator):
"""Combine arrays where the final size is known.
This class expects to aggregate a single tensor-like rather than a nested
structure of tensor-likes.
NumPy copies are an operation that threads handle quite well because all of
the heavy lifting is in c and does not need the GIL. Moreover, we can perform
lock-free writes to the same buffer in multiple threads because the nature of
result aggregation guarantees that either the indices are disjoint or the
aggregator will throw an exception in finalize. Moreover, because aggregation
is performed on the slowest varying dimension, assignments for a given batch
will write to contiguous blocks of memory, further minimizing contention.
There is, however, some scheduling and context switching overhead which will
offset the gains from pipelining the slice assignment. Below a given threshold
it is faster to simply assign in the main thread rather than enqueue the
assigmnet in a side thread. The exact threshold will vary from system to
system, but the time is not very sensitive to the exact transition so a value
of 2 ** 14 was chosen which should be reasonable on most systems.
"""
_BINARY_SIZE_THRESHOLD = 2 ** 14
_MAX_COPY_SECONDS = 300
def __init__(self, num_samples_or_steps):
self._async_copies = []
self._pool = get_copy_pool()
self._errors = []
super(SliceAggregator, self).__init__(
use_steps=False, num_samples_or_steps=num_samples_or_steps)
def create(self, batch_element):
# This step does not need to be pipelined because NumPy empty array
# initialization is effectively instantaneous.
shape = (self.num_samples_or_steps,) + batch_element.shape[1:]
dtype = batch_element.dtype
if isinstance(batch_element, ops.EagerTensor):
dtype = dtype.as_numpy_dtype()
self.results = np.empty(shape=shape, dtype=dtype)
def aggregate(self, batch_element, batch_start, batch_end):
# Fail early.
if self._errors:
six.reraise(type(self._errors[0]), self._errors[0])
# In the special case of single batch inference, no copy is needed.
if batch_end - batch_start == self.num_samples_or_steps:
self.results = batch_element
return
# This is an approximate threshold, so we don't need to consider the number
# of bytes per element.
num_elements = np.prod(batch_element.shape)
if num_elements < self._BINARY_SIZE_THRESHOLD:
self.results[batch_start:batch_end] = batch_element
else:
is_finished = threading.Event()
self._pool.apply_async(
self._slice_assign,
args=(batch_element, batch_start, batch_end, is_finished))
self._async_copies.append(is_finished)
def _slice_assign(self, batch_element, batch_start, batch_end, is_finished):
try:
self.results[batch_start:batch_end] = batch_element
except Exception as e: # pylint: disable=broad-except
# `_slice_assign` should only be called in threads and exceptions raised
# in threads do not carry over to the main thread. So instead we perform a
# a broad catch in the thread and then store the exception to be re-raised
# in the main thread.
self._errors.append(e)
finally:
is_finished.set()
def finalize(self):
start_time = time.time()
for is_finished in self._async_copies:
timeout = max([0., self._MAX_COPY_SECONDS - (time.time() - start_time)])
if not is_finished.wait(timeout):
raise ValueError('Timed out waiting for copy to complete.')
if self._errors:
six.reraise(self._errors[0].__class__, self._errors[0])
class OutputsAggregator(Aggregator):
"""Aggregator that concatenates outputs."""
_structure = None
def create(self, batch_outs):
# SparseTensorValue is a named tuple which nest will flatten, so we need
# to guard it to properly handle the structure.
self._structure = nest.get_traverse_shallow_structure(
lambda x: not composite_tensor_utils.is_composite_or_composite_value(x),
batch_outs)
batch_outs = nest.flatten_up_to(self._structure, batch_outs)
for batch_element in batch_outs:
if composite_tensor_utils.is_composite_or_composite_value(batch_element):
# If the output is not a ndarray, it will be either a composite tensor
# or a composite tensor's Value object. In either case, we can't
# allocate an array to hold the object - we'll handle it later.
self.results.append(ConcatAggregator())
elif isinstance(batch_element, (np.ndarray, ops.EagerTensor)):
self.results.append(ConcatAggregator() if self.use_steps else
SliceAggregator(self.num_samples_or_steps))
else:
# This is not a ndarray, a CompositeTensor, or a CompositeTensorValue.
# Fail fast rather than trying to concatenate it.
raise RuntimeError('Attempted to aggregate unsupported object {}.'
.format(batch_element))
self.results[-1].create(batch_element)
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
batch_outs = nest.flatten_up_to(self._structure, batch_outs)
for batch_element, result in zip(batch_outs, self.results):
result.aggregate(batch_element, batch_start, batch_end)
def finalize(self):
for result in self.results:
result.finalize()
self.results = [i.results for i in self.results]
self.results = nest.pack_sequence_as(self._structure, self.results)
def get_progbar(model, count_mode):
"""Get Progbar."""
stateful_metric_names = None
if hasattr(model, 'metrics_names'):
stateful_metric_names = model.metrics_names[1:] # Exclude `loss`
return cbks.ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names)
def slice_arrays(arrays, indices, contiguous=True):
"""Slices batches out of provided arrays (workaround for eager tensors).
Unfortunately eager tensors don't have the same slicing behavior as
Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
hence we cannot use `generic_utils.slice_arrays` directly
and we have to implement this workaround based on `concat`. This has a
performance cost.
Arguments:
arrays: Single array or list of arrays.
indices: List of indices in the array that should be included in the output
batch.
contiguous: Boolean flag indicating whether the indices are contiguous.
Returns:
Slice of data (either single array or list of arrays).
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tensor_util.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [array_ops.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
else:
slices = generic_utils.slice_arrays(arrays, indices)
if converted_to_list:
slices = slices[0]
return slices
def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'):
"""Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Arguments:
ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples) before declaring
`_predict_loop` finished. Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`.
"""
if steps is not None and batch_size is not None:
raise ValueError('If ' + steps_name +
' is set, the `batch_size` must be None.')
if check_steps_argument(ins, steps, steps_name):
return None
if hasattr(ins[0], 'shape'):
return int(ins[0].shape[0])
return None # Edge case where ins == [static_learning_phase]
def standardize_single_array(x, expected_shape=None):
"""Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1."""
if x is None:
return None
if composite_tensor_utils.is_composite_or_composite_value(x):
return x
if (x.shape is not None and len(x.shape) == 1 and
(expected_shape is None or len(expected_shape) != 1)):
if tensor_util.is_tensor(x):
x = array_ops.expand_dims(x, axis=1)
else:
x = np.expand_dims(x, 1)
return x
def standardize_input_data(data,
names,
shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Arguments:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that the batch axis of the
arrays matches the expected value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
Returns:
List of standardized input arrays (one array per model input).
Raises:
ValueError: in case of improperly formatted user-provided data.
"""
if not names:
if (data is not None and hasattr(data, '__len__') and len(data) and
not isinstance(data, dict)):
raise ValueError(
'Error when checking model ' + exception_prefix + ': '
'expected no data, but got:', data)
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
try:
data = [
data[x].values
if data[x].__class__.__name__ == 'DataFrame' else data[x]
for x in names
]
except KeyError as e:
raise ValueError('No data provided for "' + e.args[0] + '". Need data '
'for each key in: ' + str(names))
elif isinstance(data, (list, tuple)):
if isinstance(data[0], (list, tuple)):
data = [np.asarray(d) for d in data]
elif len(names) == 1 and isinstance(data[0], (float, int)):
data = [np.asarray(data)]
else:
data = [
x.values if x.__class__.__name__ == 'DataFrame' else x for x in data
]
else:
data = data.values if data.__class__.__name__ == 'DataFrame' else data
data = [data]
if shapes is not None:
data = [
standardize_single_array(x, shape) for (x, shape) in zip(data, shapes)
]
else:
data = [standardize_single_array(x) for x in data]
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError('Error when checking model ' + exception_prefix +
': the list of Numpy arrays that you are passing to '
'your model is not the size the model expected. '
'Expected to see ' + str(len(names)) + ' array(s), '
'but instead got the following list of ' +
str(len(data)) + ' arrays: ' + str(data)[:200] + '...')
elif len(names) > 1:
raise ValueError('Error when checking model ' + exception_prefix +
': you are passing a list as input to your model, '
'but the model expects a list of ' + str(len(names)) +
' Numpy arrays instead. The list you passed was: ' +
str(data)[:200])
elif len(data) == 1 and not hasattr(data[0], 'shape'):
raise TypeError('Error when checking model ' + exception_prefix +
': data should be a Numpy array, or list/dict of '
'Numpy arrays. Found: ' + str(data)[:200] + '...')
elif len(names) == 1:
data = [np.asarray(data)]
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is not None:
if tensor_util.is_tensor(data[i]):
tensorshape = data[i].shape
if not tensorshape:
continue
data_shape = tuple(tensorshape.as_list())
elif composite_tensor_utils.is_composite_or_composite_value(data[i]):
data_shape = composite_tensor_utils.get_shape(data[i])
else:
data_shape = data[i].shape
shape = shapes[i]
if len(data_shape) != len(shape):
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have ' +
str(len(shape)) + ' dimensions, but got array '
'with shape ' + str(data_shape))
if not check_batch_axis:
data_shape = data_shape[1:]
shape = shape[1:]
for dim, ref_dim in zip(data_shape, shape):
if ref_dim != dim and ref_dim is not None and dim is not None:
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have shape ' +
str(shape) + ' but got array with shape ' +
str(data_shape))
return data
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
Arguments:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or (isinstance(x_weight, (list, tuple)) and
len(x_weight) == 0): # pylint: disable=g-explicit-length-test
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, (list, tuple)):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) + ' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, collections.Mapping):
generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names)
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' + weight_type + '` '
'should be either a list or a dict. '
'Provided `' + weight_type + '` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight, output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight, output_names,
'sample_weight')
def check_array_lengths(inputs, targets, weights=None):
"""Does user input validation for numpy arrays.
Arguments:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
"""
def set_of_lengths(x):
# Returns a set with the variation between
# different shapes, with None => 0
if x is None:
return {}
else:
return set([
y.shape[0]
for y in x
if y is not None and not tensor_util.is_tensor(y)
])
set_x = set_of_lengths(inputs)
set_y = set_of_lengths(targets)
set_w = set_of_lengths(weights)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' +
str([x.shape for x in inputs]))
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' +
str([y.shape for y in targets]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' +
str([w.shape for w in weights]))
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' +
str(list(set_y)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly. This check
is purely for UX purposes.
Arguments:
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
Raises:
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_loss_fns = {
losses.mean_squared_error, losses.binary_crossentropy,
losses.categorical_crossentropy
}
key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,
losses.CategoricalCrossentropy)
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if y is None or loss is None or tensor_util.is_tensor(y):
continue
if losses.is_categorical_crossentropy(loss):
if y.shape[-1] == 1:
raise ValueError('You are passing a target array of shape ' +
str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)
if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and
(loss.fn in key_loss_fns))):
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
loss_name = loss.name
if loss_name is None:
loss_type = loss.fn if is_loss_wrapper else type(loss)
loss_name = loss_type.__name__
raise ValueError('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss_name + '`. '
'This loss expects targets to have the same shape '
'as the output.')
def collect_per_output_metric_info(metrics,
output_names,
output_shapes,
loss_fns,
is_weighted=False):
"""Maps metric names and functions to model outputs.
Arguments:
metrics: a list or a list of lists or a dict of metric functions.
output_names: a list of the names (strings) of model outputs.
output_shapes: a list of the shapes (strings) of model outputs.
loss_fns: a list of the loss functions corresponding to the model outputs.
is_weighted: Boolean indicating whether the given metrics are weighted.
Returns:
A list (one entry per model output) of dicts.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like: `[{
'acc': binary_accuracy(),
'ce': binary_crossentropy(),
}, {
'acc': binary_accuracy(),
}]`
Raises:
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [{} for _ in output_names]
if isinstance(metrics, list):
any_sub_list = any(isinstance(m, list) for m in metrics)
if any_sub_list:
if len(metrics) != len(output_names):
raise ValueError('When passing a list of lists as `metrics`, '
'it should have one entry per model output. '
'The model has ' + str(len(output_names)) +
' outputs, but you passed metrics=' + str(metrics))
# User has provided a list of len = len(outputs).
nested_metrics = [generic_utils.to_list(m) for m in metrics]
else:
# If it is a single list we then apply all metrics to all outputs.
if len(output_names) > 1:
nested_metrics = []
for _ in output_names:
nested_metrics.append(
[metrics_module.clone_metric(m) for m in metrics])
else:
nested_metrics = [metrics]
elif isinstance(metrics, collections.Mapping):
generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)
nested_metrics = []
for name in output_names:
output_metrics = generic_utils.to_list(metrics.get(name, []))
nested_metrics.append(output_metrics)
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' + str(metrics))
per_output_metrics = []
for i, metrics in enumerate(nested_metrics):
metrics_dict = OrderedDict()
for metric in metrics:
metric_name = get_metric_name(metric, is_weighted)
metric_fn = get_metric_function(
metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])
# If the metric function is not stateful, we create a stateful version.
if not isinstance(metric_fn, metrics_module.Metric):
metric_fn = metrics_module.MeanMetricWrapper(
metric_fn, name=metric_name)
metrics_dict[metric_name] = metric_fn
per_output_metrics.append(metrics_dict)
return per_output_metrics
def batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
Arguments:
index_array: array of indices to be shuffled.
batch_size: integer.
Returns:
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def standardize_weights(y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array. If both `sample_weight` and `class_weight` are provided,
the weights are multiplied.
Arguments:
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`. `"temporal"` indicated
that we expect 2D weight data that will be applied to the last 2
dimensions of the targets (i.e. we are weighting timesteps, not
samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
# Iterator may return sample_weight as 1-tuple
if isinstance(sample_weight, tuple):
sample_weight = sample_weight[0]
if sample_weight_mode is not None and sample_weight_mode != 'samplewise':
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' + str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' +
str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if (not tensor_util.is_tensor(sample_weight) and
y.shape[:sample_weight.ndim] != sample_weight.shape):
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + ' for an input with shape ' +
str(y.shape) + '. '
'sample_weight cannot be broadcast.')
# Class weights applied per-sample.
class_sample_weight = None
if isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('`class_weight` not supported for '
'3+ dimensional targets.')
if len(y.shape) == 2:
if y.shape[1] > 1:
y_classes = np.argmax(y, axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
class_sample_weight = np.asarray(
[class_weight[cls] for cls in y_classes if cls in class_weight])
if len(class_sample_weight) != len(y_classes):
# subtract the sets to pick all missing classes
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError(
'`class_weight` must contain all classes in the data.'
' The classes %s exist in the data but not in '
'`class_weight`.' % (existing_classes - existing_class_weight))
if class_sample_weight is not None and sample_weight is not None:
# Multiply weights if both are provided.
return class_sample_weight * sample_weight
if sample_weight is not None:
return sample_weight
if class_sample_weight is not None:
return class_sample_weight
return None
def has_symbolic_tensors(ls):
if context.executing_eagerly():
return False
return has_tensors(ls)
def has_tensors(ls):
if isinstance(ls, (list, tuple)):
return any(tensor_util.is_tensor(v) for v in ls)
if isinstance(ls, dict):
return any(tensor_util.is_tensor(v) for _, v in six.iteritems(ls))
return tensor_util.is_tensor(ls)
def get_metric_name(metric, weighted=False):
"""Returns the name corresponding to the given metric input.
Arguments:
metric: Metric function name or reference.
weighted: Boolean indicating if the given metric is weighted.
Returns:
The metric name.
"""
if tf2.enabled():
# We keep the string that the user has set in compile as the metric name.
if isinstance(metric, six.string_types):
return metric
metric = metrics_module.get(metric)
return metric.name if hasattr(metric, 'name') else metric.__name__
else:
metric_name_prefix = 'weighted_' if weighted else ''
if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):
if metric in ('accuracy', 'acc'):
suffix = 'acc'
elif metric in ('crossentropy', 'ce'):
suffix = 'ce'
else:
metric_fn = metrics_module.get(metric)
# Get metric name as string
if hasattr(metric_fn, 'name'):
suffix = metric_fn.name
else:
suffix = metric_fn.__name__
metric_name = metric_name_prefix + suffix
return metric_name
def get_metric_function(metric, output_shape=None, loss_fn=None):
"""Returns the metric function corresponding to the given metric input.
Arguments:
metric: Metric function name or reference.
output_shape: The shape of the output that this metric will be calculated
for.
loss_fn: The loss function used.
Returns:
The metric function.
"""
if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:
return metrics_module.get(metric)
is_sparse_categorical_crossentropy = (
isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or
(isinstance(loss_fn, losses.LossFunctionWrapper) and
loss_fn.fn == losses.sparse_categorical_crossentropy))
is_binary_crossentropy = (
isinstance(loss_fn, losses.BinaryCrossentropy) or
(isinstance(loss_fn, losses.LossFunctionWrapper) and
loss_fn.fn == losses.binary_crossentropy))
if metric in ['accuracy', 'acc']:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_accuracy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_accuracy
# If the output_shape[-1] is not 1, then we know output is `categorical`.
# We assume it is sparse categorical only if loss is explicitly given
# as sparse categorical crossentropy loss.
return metrics_module.categorical_accuracy
else:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_crossentropy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_crossentropy
return metrics_module.categorical_crossentropy
def call_metric_function(metric_fn,
y_true,
y_pred=None,
weights=None,
mask=None):
"""Invokes metric function and returns the metric result tensor."""
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
if weights is None:
# Use mask as sample weight.
weights = mask
else:
# Update dimensions of weights to match with mask.
mask, _, weights = tf_losses_utils.squeeze_or_expand_dimensions(
mask, sample_weight=weights)
weights *= mask
if y_pred is not None:
return metric_fn(y_true, y_pred, sample_weight=weights)
# `Mean` metric only takes a single value.
return metric_fn(y_true, sample_weight=weights)
def get_loss_function(loss):
"""Returns the loss corresponding to the loss input in `compile` API."""
if loss is None or isinstance(loss, losses.Loss):
return loss
# Deserialize loss configuration, if needed.
if isinstance(loss, collections.Mapping):
loss = losses.get(loss)
# Custom callable class.
if callable(loss) and not hasattr(loss, '__name__'):
return loss
# Wrap loss function with signature `(y_true, y_pred, **kwargs)`
# in `LossFunctionWrapper` class.
loss_fn = losses.get(loss)
# For losses which are given as strings/functions in the compile API,
# we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`
# (both in distribution strategy context and otherwise).
return losses.LossFunctionWrapper(
loss_fn,
name=loss_fn.__name__,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)
def validate_dataset_input(x, y, sample_weight, validation_split=None):
"""Validates user input arguments when a dataset iterator is passed.
Arguments:
x: Input data. A `tf.data` dataset or iterator.
y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).
Expected to be `None` when `x` is a dataset iterator.
sample_weight: An optional sample-weight array passed by the user to weight
the importance of each sample in `x`. Expected to be `None` when `x` is a
dataset iterator
validation_split: Float between 0 and 1. Fraction of the training data to be
used as validation data. Expected to be `None` when `x` is a dataset
iterator.
Raises:
ValueError: if argument `y` or `sample_weight` or `validation_split` are
provided by user.
"""
if y is not None:
raise ValueError('You passed a dataset or dataset iterator (%s) as '
'input `x` to your model. In that case, you should '
'not specify a target (`y`) argument, since the dataset '
'or dataset iterator generates both input data and '
'target data. '
'Received: %s' % (x, y))
if sample_weight is not None:
raise ValueError('`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator. Instead, you'
'can provide sample_weight as the third element of your'
'dataset, i.e. (inputs, targets, sample_weight). '
'Received: x=%s, sample_weight=%s' % (x, sample_weight))
if validation_split is not None and validation_split != 0.0:
raise ValueError(
'`validation_split` argument is not supported when '
'input `x` is a dataset or a dataset iterator. '
'Received: x=%s, validation_split=%f' % (x, validation_split))
def check_generator_arguments(y=None, sample_weight=None,
validation_split=None):
"""Validates arguments passed when using a generator."""
if y is not None:
raise ValueError('`y` argument is not supported when data is'
'a generator or Sequence instance. Instead pass targets'
' as the second element of the generator.')
if sample_weight is not None:
raise ValueError('`sample_weight` argument is not supported when data is'
'a generator or Sequence instance. Instead pass sample'
' weights as the third element of the generator.')
if validation_split:
raise ValueError('If your data is in the form of a Python generator, '
'you cannot use `validation_split`.')
def check_steps_argument(input_data, steps, steps_name):
"""Validates `steps` argument based on input data's type.
The cases when `steps` value must be provided are when
1. input data passed is an iterator.
2. model was built on top of symbolic tensors, input data is not
required and is `None`.
3. input data passed is a symbolic tensor.
Arguments:
input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or
tf.data.Dataset iterator or `None`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
steps_name: The public API's parameter name for `steps`.
Returns:
boolean, True if `steps` argument is required, else False.
Raises:
ValueError: if `steps` argument is required for given input data type
but not provided.
"""
# TODO(fchollet): allow datasets with steps=None if cardinality is known.
is_x_iterator = isinstance(
input_data, (iterator_ops.Iterator, iterator_ops.IteratorV2))
if (input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or
(isinstance(input_data, list) and not input_data)):
if steps is None:
input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors'
raise ValueError('When using {input_type} as input to a model, you should'
' specify the `{steps_name}` argument.'.format(
input_type=input_type_str, steps_name=steps_name))
return True
return False
def cast_single_tensor(x, dtype=None):
x = ops.convert_to_tensor(x)
dtype = dtype or K.floatx()
if x.dtype.is_floating:
return math_ops.cast(x, dtype=dtype)
return x
def cast_if_floating_dtype(x):
"""Casts the given data tensors to the default floating point type.
Casts only if the input is already a floating point type.
Args:
x: tensor or list/tuple of tensors.
Returns:
Converted input.
"""
return nest.map_structure(cast_single_tensor, x)
def cast_if_floating_to_model_input_dtypes(x, model):
"""Casts the given data tensors to the dtypes of the model inputs.
Casts only if the input is already a floating point type.
Args:
x: tensor or list/tuple of tensors.
model: The model.
Returns:
Converted input. Each tensor is casted to the corresponding input in
`model.inputs`.
"""
# TODO(b/131372221): We should probably cast even if the input is not
# floating-point.
input_dtypes = nest.map_structure(lambda t: t.dtype, model.inputs)
return nest.map_structure(cast_single_tensor, x, input_dtypes)
def prepare_sample_weight_modes(training_endpoints, sample_weight_mode):
"""Prepares sample weight modes for the model.
Args:
training_endpoints: List of model _TrainingEndpoints.
sample_weight_mode: sample weight mode user input passed from compile API.
Raises:
ValueError: In case of invalid `sample_weight_mode` input.
"""
if isinstance(sample_weight_mode, collections.Mapping):
generic_utils.check_for_unexpected_keys(
'sample_weight_mode', sample_weight_mode,
[e.output_name for e in training_endpoints])
for end_point in training_endpoints:
if not end_point.should_skip_target_weights():
if end_point.output_name not in sample_weight_mode:
raise ValueError('Output ' + end_point.output_name +
'missing from `_sample_weight_modes` dictionary')
else:
end_point.sample_weight_mode = sample_weight_mode.get(
end_point.output_name)
elif isinstance(sample_weight_mode, (list, tuple)):
if len(sample_weight_mode) != len(training_endpoints):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model output. '
'The model has ' + str(len(training_endpoints)) +
' outputs, but you passed ' +
str(len(sample_weight_mode)) + '_sample_weight_modes.')
for mode, endpoint in zip(sample_weight_mode, training_endpoints):
if not endpoint.should_skip_target_weights():
endpoint.sample_weight_mode = mode
else:
for endpoint in training_endpoints:
if not endpoint.should_skip_target_weights():
endpoint.sample_weight_mode = sample_weight_mode
def prepare_loss_functions(loss, output_names):
"""Converts loss to a list of loss functions.
Arguments:
loss: String (name of objective function), objective function or
`tf.losses.Loss` instance. See `tf.losses`. If the model has multiple
outputs, you can use a different loss on each output by passing a
dictionary or a list of losses. The loss value that will be minimized by
the model will then be the sum of all individual losses.
output_names: List of model output names.
Returns:
A list of loss objective functions.
Raises:
ValueError: If loss is a dict with keys not in model output names,
or if loss is a list with len not equal to model outputs.
"""
if isinstance(loss, collections.Mapping):
generic_utils.check_for_unexpected_keys('loss', loss, output_names)
loss_functions = []
for name in output_names:
if name not in loss:
logging.warning(
'Output {0} missing from loss dictionary. We assume '
'this was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to {0}.'.format(name))
loss_functions.append(get_loss_function(loss.get(name, None)))
elif isinstance(loss, six.string_types):
loss_functions = [get_loss_function(loss) for _ in output_names]
elif isinstance(loss, collections.Sequence):
if len(loss) != len(output_names):
raise ValueError('When passing a list as loss, it should have one entry '
'per model outputs. The model has {} outputs, but you '
'passed loss={}'.format(len(output_names), loss))
loss_functions = nest.map_structure(get_loss_function, loss)
else:
loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]
return loss_functions
def prepare_loss_weights(training_endpoints, loss_weights=None):
"""Converts loss weights to a list of loss weights.
The result loss weights will be populated on the trainging endpoint.
Arguments:
training_endpoints: List of model training endpoints.
loss_weights: Optional list or dictionary specifying scalar coefficients
(Python floats) to weight the loss contributions of different model
outputs. The loss value that will be minimized by the model will then be
the *weighted sum* of all individual losses, weighted by the
`loss_weights` coefficients. If a list, it is expected to have a 1:1
mapping to the model's outputs. If a dict, it is expected to map
output names (strings) to scalar coefficients.
Raises:
ValueError: If loss weight is a dict with key not in model output names,
or if loss is a list with len not equal to model outputs.
"""
if loss_weights is None:
for e in training_endpoints:
e.loss_weight = 1.
elif isinstance(loss_weights, collections.Mapping):
generic_utils.check_for_unexpected_keys(
'loss_weights', loss_weights,
[e.output_name for e in training_endpoints])
for e in training_endpoints:
e.loss_weight = loss_weights.get(e.output_name, 1.)
elif isinstance(loss_weights, list):
if len(loss_weights) != len(training_endpoints):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(training_endpoints)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
for w, e in zip(loss_weights, training_endpoints):
e.loss_weight = w
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
# TODO(rohanj): This is a hack to get around not depending on feature_column and
# create a cyclical dependency. Figure out a cleaner solution
def is_feature_layer(layer):
"""Returns whether `layer` is a FeatureLayer or not."""
return getattr(layer, '_is_feature_layer', False)
def is_eager_dataset_or_iterator(data):
return context.executing_eagerly() and isinstance(
data,
(dataset_ops.DatasetV1, dataset_ops.DatasetV2, iterator_ops.IteratorV2))
# pylint: disable=protected-access
def assert_not_batched(dataset):
"""Asserts that `dataset` is not batched.
The algorithm used by this method is sound but not complete. In other words,
if the method fails to establish the assertion, it does not mean the dataset
is batched.
Example usage:
```python
try:
assert_not_batched(dataset)
# safe to assume `dataset` it not batched here
expect ValueError:
# make no assumptions about `dataset`
```
Args:
dataset: The dataset to analyze.
Raises:
ValueError: If the method cannot establish the assertion.
"""
if isinstance(dataset, dataset_ops.DatasetV1Adapter):
return assert_not_batched(dataset._dataset)
else:
whitelisted_types = [
dataset_ops._OptionsDataset,
dataset_ops.ConcatenateDataset,
dataset_ops.CacheDataset,
dataset_ops.FilterDataset,
dataset_ops.MapDataset,
dataset_ops.ParallelMapDataset,
dataset_ops.PrefetchDataset,
dataset_ops.RangeDataset,
dataset_ops.RepeatDataset,
dataset_ops.ShuffleDataset,
dataset_ops.SkipDataset,
dataset_ops.SparseTensorSliceDataset,
dataset_ops.TakeDataset,
dataset_ops.TensorDataset,
dataset_ops.TensorSliceDataset,
dataset_ops.ZipDataset,
readers.FixedLengthRecordDatasetV2,
readers.TextLineDatasetV2,
readers.TFRecordDatasetV2,
]
for ty in whitelisted_types:
if isinstance(dataset, ty):
for input_dataset in dataset._inputs():
assert_not_batched(input_dataset)
return
raise ValueError('Could not assert that dataset is not batched.')
# pylint: disable=protected-access
def assert_not_shuffled(dataset):
"""Asserts that `dataset` is not shuffled.
The algorithm used by this method is sound but not complete. In other words,
if the method fails to establish the assertion, it does not mean the dataset
is shuffled.
Example usage:
```python
try:
assert_not_shuffled(dataset)
# safe to assume `dataset` it not shuffled here
expect ValueError:
# make no assumptions about `dataset`
```
Args:
dataset: The dataset to analyze.
Raises:
ValueError: If the method cannot establish the assertion.
"""
if isinstance(dataset, dataset_ops.DatasetV1Adapter):
return assert_not_shuffled(dataset._dataset)
else:
whitelisted_types = [
dataset_ops._OptionsDataset,
dataset_ops.BatchDataset,
dataset_ops.ConcatenateDataset,
dataset_ops.CacheDataset,
dataset_ops.FilterDataset,
dataset_ops.MapDataset,
dataset_ops.PaddedBatchDataset,
dataset_ops.ParallelMapDataset,
dataset_ops.PrefetchDataset,
dataset_ops.RangeDataset,
dataset_ops.RepeatDataset,
dataset_ops.SkipDataset,
dataset_ops.SparseTensorSliceDataset,
dataset_ops.TakeDataset,
dataset_ops.TensorDataset,
dataset_ops.TensorSliceDataset,
dataset_ops.WindowDataset,
dataset_ops.ZipDataset,
readers.FixedLengthRecordDatasetV2,
readers.TextLineDatasetV2,
readers.TFRecordDatasetV2,
]
for ty in whitelisted_types:
if isinstance(dataset, ty):
for input_dataset in dataset._inputs():
assert_not_shuffled(input_dataset)
return
raise ValueError('Could not assert that dataset is not shuffled.')
def verify_dataset_shuffled(x):
"""Verifies that the dataset is shuffled.
Args:
x: Dataset passed as an input to the model.
Raises:
ValueError: if the dataset is not already shuffled.
"""
assert isinstance(x, dataset_ops.DatasetV2)
try:
assert_not_shuffled(x)
except ValueError:
# Dataset may or may not be shuffled.
return
else:
logging.warning('Expected a shuffled dataset but input dataset `x` is '
'not shuffled. Please invoke `shuffle()` on input dataset.')
def is_dataset_or_iterator(data):
return isinstance(data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2,
iterator_ops.Iterator, iterator_ops.IteratorV2))
def get_iterator(dataset):
"""Create and initialize an iterator from a dataset."""
if context.executing_eagerly():
iterator = dataset_ops.make_one_shot_iterator(dataset)
else:
iterator = dataset_ops.make_initializable_iterator(dataset)
initialize_iterator(iterator)
return iterator
def initialize_iterator(iterator):
if not context.executing_eagerly():
init_op = iterator.initializer
K.get_session((init_op,)).run(init_op)
def extract_tensors_from_dataset(dataset):
"""Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.
Arguments:
dataset: Dataset instance.
Returns:
Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
"""
iterator = get_iterator(dataset)
inputs, targets, sample_weight = unpack_iterator_input(iterator)
return inputs, targets, sample_weight
def unpack_iterator_input(iterator):
"""Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.
Arguments:
iterator: Instance of a dataset iterator.
Returns:
Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
"""
try:
next_element = iterator.get_next()
except errors.OutOfRangeError:
raise RuntimeError('Your dataset iterator ran out of data; '
'Make sure that your dataset can generate '
'required number of samples.')
if isinstance(next_element, (list, tuple)):
if len(next_element) not in [2, 3]:
raise ValueError(
'Please provide model inputs as a list or tuple of 2 or 3 '
'elements: (input, target) or (input, target, sample_weights) '
'Received %s' % next_element)
if len(next_element) == 2:
x, y = next_element
weights = None
else:
x, y, weights = next_element
else:
x = next_element
y = None
weights = None
return x, y, weights
def infer_steps_for_dataset(dataset, steps, epochs=1, steps_name='steps'):
"""Infers steps_per_epoch needed to loop through a dataset.
Arguments:
dataset: Input data of type tf.data.Dataset.
steps: Number of steps to draw from the dataset (may be None if unknown).
epochs: Number of times to iterate over the dataset.
steps_name: The string name of the steps argument, either `steps`,
`validation_steps`, or `steps_per_epoch`. Only used for error message
formatting.
Returns:
Integer or `None`. Inferred number of steps to loop through the dataset.
`None` is returned if the size of the dataset is unknown and `steps` was
not specified.
Raises:
ValueError: In case of invalid argument values.
"""
assert isinstance(dataset, dataset_ops.DatasetV2)
size = K.get_value(cardinality.cardinality(dataset))
if size == cardinality.INFINITE and steps is None:
raise ValueError('When passing an infinitely repeating dataset, you '
'must specify the `%s` argument.' % (steps_name,))
if size >= 0:
if steps is not None and steps * epochs > size:
if epochs > 1:
raise ValueError('The dataset you passed contains %s batches, but you '
'passed `epochs=%s` and `%s=%s`, which is a total of '
'%s steps. We cannot draw that many steps from this '
'dataset. We suggest to set `%s=%s`.' %
(size, epochs, steps_name, steps, steps * epochs,
steps_name, size // epochs))
else:
raise ValueError('The dataset you passed contains %s batches, but you '
'passed `%s=%s`. We cannot draw that many steps from '
'this dataset. We suggest to set `%s=%s`.' %
(size, steps_name, steps, steps_name, size))
if steps is None:
if size >= 0:
return size
return None
return steps
class ModelInputs(object):
"""Encapsulates model inputs.
Allows for transforming model inputs while keeping the same structure.
"""
def __init__(self, inputs):
self._inputs = inputs
self._is_dict = isinstance(self._inputs, dict)
self._is_single_input = not isinstance(self._inputs, (list, tuple, dict))
self._flattened_inputs = []
self._input_names = []
if self._is_dict:
for k in sorted(self._inputs.keys()):
self._flattened_inputs.append(self._inputs[k])
self._input_names.append(k)
else:
self._flattened_inputs = nest.flatten(self._inputs)
self._input_names = [
'input_%d' % (i + 1) for i in range(len(self._flattened_inputs))
]
def get_input_names(self):
"""Returns keys to name inputs by.
In case inputs provided were a list, tuple or single entry, we make up a
key 'input_%d'. For dictionary case, we return a sorted list of keys.
"""
return self._input_names
def get_symbolic_inputs(self, return_single_as_list=False):
"""Returns inputs to be set as self.inputs for a model."""
# TODO(karmel): There is a side-effect here where what you get
# with as_list and as_dict depends on whether you have called this
# method first, since it modifies in place.
for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)):
if isinstance(v, (list, float, int)):
v = np.asarray(v)
if v.ndim == 1:
v = np.expand_dims(v, 1)
if isinstance(v, (np.ndarray, ops.EagerTensor)):
# We fix the placeholder shape except the batch size.
# This is suboptimal, but it is the best we can do with the info
# we have. The user should call `model._set_inputs(placeholders)`
# to specify custom placeholders if the need arises.
shape = (None,) + tuple(v.shape[1:])
dtype = dtypes.as_dtype(v.dtype)
if dtype.is_floating:
dtype = K.floatx()
v = K.placeholder(shape=shape, name=k, dtype=dtype)
elif isinstance(v, tensor_spec.TensorSpec):
shape = (None,) + tuple(v.shape.as_list()[1:])
v = K.placeholder(shape=shape, name=k, dtype=v.dtype)
self._flattened_inputs[i] = v
if self._is_dict:
return dict(zip(self._input_names, self._flattened_inputs))
if self._is_single_input and not return_single_as_list:
return self._flattened_inputs[0]
return self._flattened_inputs
def as_dict(self):
"""An iterable over a dictionary version of inputs."""
for k, v in zip(self._input_names, self._flattened_inputs):
yield k, v
def as_list(self):
"""Returning the inputs as a list."""
return self._flattened_inputs
# Allow use of methods not exposed to the user.
# pylint: disable=protected-access
def get_input_shape_and_dtype(layer):
"""Retrieves input shape and input dtype of layer if applicable.
Args:
layer: Layer (or model) instance.
Returns:
Tuple (input_shape, input_dtype). Both could be None if the layer
does not have a defined input shape.
Raises:
ValueError: in case an empty Sequential or Functional model is passed.
"""
def _is_graph_model(layer):
return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or
layer.__class__.__name__ == 'Sequential')
# In case of nested models: recover the first layer
# of the deepest model to infer input shape and dtype.
# Subclassed Models may not have been built so can't be checked.
while _is_graph_model(layer):
if not layer.layers:
raise ValueError('An empty Model cannot be used as a Layer.')
layer = layer.layers[0]
if hasattr(layer, '_batch_input_shape'):
return layer._batch_input_shape, layer.dtype
return None, None
# pylint: enable=protected-access
def get_static_batch_size(layer):
"""Gets the static batch size of a Layer.
Arguments:
layer: a `Layer` instance.
Returns:
The static batch size of a Layer.
"""
batch_input_shape, _ = get_input_shape_and_dtype(layer)
if batch_input_shape is not None:
return tensor_shape.as_dimension(batch_input_shape[0]).value
return None
def generic_output_names(outputs_list):
return ['output_%d' % (i + 1) for i in range(len(outputs_list))]
def convert_eager_tensors_to_numpy(structure):
"""Convert every EagerTensor in `structure` to NumPy.
Arguments:
structure: An arbitrary structure of elements to be converted to NumPy
arrays.
Returns:
An identical structure with EagerTensors converted to NumPy arrays.
"""
def _convert(element):
if isinstance(element, ops.EagerTensor):
return element.numpy()
return element
return nest.map_structure(_convert, structure)
def should_run_validation(validation_freq, epoch):
"""Checks if validation should be run this epoch.
Arguments:
validation_freq: Integer or list. If an integer, specifies how many training
epochs to run before a new validation run is performed. If a list,
specifies the epochs on which to run validation.
epoch: Integer, the number of the training epoch just completed.
Returns:
Bool, True if validation should be run.
Raises:
ValueError: if `validation_freq` is an Integer and less than 1, or if
it is neither an Integer nor a Sequence.
"""
# `epoch` is 0-indexed internally but 1-indexed in the public API.
one_indexed_epoch = epoch + 1
if isinstance(validation_freq, int):
if validation_freq < 1:
raise ValueError('`validation_freq` can not be less than 1.')
return one_indexed_epoch % validation_freq == 0
if not isinstance(validation_freq, collections.Container):
raise ValueError('`validation_freq` must be an Integer or '
'`collections.Container` (e.g. list, tuple, etc.)')
return one_indexed_epoch in validation_freq
class TrainingLoop(object):
"""TrainingLoop is a wrapper class around the training logic.
This class is trying to encapsulate the different logic of fit/eval/predict
with regard to different data input and model condition.
Note that TrainingLoop is stateless, which means it doesn't contain any
internal field and can be reused with different model and inputs.
"""
def fit(self,
model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
**kwargs):
"""Train the model with the inputs and targets."""
raise NotImplementedError()
def evaluate(self,
model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
**kwargs):
"""Returns the loss value & metrics values for the model in test mode."""
raise NotImplementedError()
def predict(self,
model,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
**kwargs):
raise NotImplementedError()
| 37.657718 | 92 | 0.666726 |
ace5bf1d69eaeaa6e1bda03a64e93a516f1dc561 | 817 | py | Python | neddit/neddit/urls.py | WeilonYing/infs2608-neddit | 13584359e3c96bbc64087c091d41e1f645523358 | [
"MIT"
] | null | null | null | neddit/neddit/urls.py | WeilonYing/infs2608-neddit | 13584359e3c96bbc64087c091d41e1f645523358 | [
"MIT"
] | 6 | 2018-05-09T03:08:21.000Z | 2018-05-17T15:00:37.000Z | neddit/neddit/urls.py | WeilonYing/infs2608-neddit | 13584359e3c96bbc64087c091d41e1f645523358 | [
"MIT"
] | null | null | null | """neddit URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('website.urls'))
]
| 35.521739 | 78 | 0.684211 |
ace5bf4d7fcaaaf8f5bebd7cac880803d88f19ca | 1,709 | py | Python | src/compose/src/build/processors/cassandra/cassandracommand.py | RizkiMufrizal/Axway-API-Gateway-Docker | 2a3a39f89ea695c5d48dec0392d5d7c8e868cce1 | [
"Linux-OpenIB"
] | null | null | null | src/compose/src/build/processors/cassandra/cassandracommand.py | RizkiMufrizal/Axway-API-Gateway-Docker | 2a3a39f89ea695c5d48dec0392d5d7c8e868cce1 | [
"Linux-OpenIB"
] | null | null | null | src/compose/src/build/processors/cassandra/cassandracommand.py | RizkiMufrizal/Axway-API-Gateway-Docker | 2a3a39f89ea695c5d48dec0392d5d7c8e868cce1 | [
"Linux-OpenIB"
] | 1 | 2021-07-10T09:21:13.000Z | 2021-07-10T09:21:13.000Z | import uuid
import re
from core.docker import *
from core.util import *
from core.config import *
from ..util.command import *
class ConfigureCassandraPorts(Command) :
seed_host = None
def __init__(self, container, config) :
super(ConfigureCassandraPorts, self).__init__(config)
if container["cassandraConfig"].get("seedHost",None) :
self.seed_host = container["cassandraConfig"]["seedHost"]
else:
self.seed_host = container["hostname"]
scriptcmd = [
"/scripts/configure_cassandraports.py",
"--cassandraDir", container["cassandraConfig"]["cassandraDir"],
"--seed_host", self.seed_host,
"--cassandraPort", container["cassandraConfig"]["cassandraPort"]]
self.cmds.append((container["runtime"], scriptcmd, None))
class ModifyCassandraJVMSettings(Command) :
def __init__(self, container, config) :
super(ModifyCassandraJVMSettings, self).__init__(config)
scriptcmd = [
"/scripts/configure_cassandrajvmheap.xml",
"--cassandraDir", container["cassandraConfig"]["cassandraDir"]]
self.cmds.append((container["runtime"], scriptcmd, None))
class StartCassandra(Command) :
seed_host = None
def __init__(self, container, config) :
super(StartCassandra, self).__init__(config)
scriptcmd = [
"/scripts/manage_cassandra.py",
"--cassandraDir", container["cassandraConfig"]["cassandraDir"],
"--cassandraPort", container["cassandraConfig"]["cassandraPort"],
"--start"]
self.cmds.append((container["runtime"], scriptcmd, None))
| 34.877551 | 85 | 0.634874 |
ace5bfc94ddea1c671aa180d29b33d677aaceca0 | 2,736 | py | Python | resources/save.py | achesak/rock-collector | af5bc42837013b97838ec33107518e597782715d | [
"MIT"
] | 1 | 2015-01-28T15:53:05.000Z | 2015-01-28T15:53:05.000Z | resources/save.py | achesak/rock-collector | af5bc42837013b97838ec33107518e597782715d | [
"MIT"
] | null | null | null | resources/save.py | achesak/rock-collector | af5bc42837013b97838ec33107518e597782715d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file defines the functions for saving the application data.
# Import json for saving the data.
import json
def save_data(main_dir, rocks, minerals, fossils):
"""Saves the main data."""
try:
# Save the data files.
rocks_file = open("%s/rocks.json" % main_dir, "w")
json.dump(rocks, rocks_file)
rocks_file.close()
minerals_file = open("%s/minerals.json" % main_dir, "w")
json.dump(minerals, minerals_file)
minerals_file.close()
fossils_file = open("%s/fossils.json" % main_dir, "w")
json.dump(fossils, fossils_file)
fossils_file.close()
except IOError:
# Show the error message if something happened, but continue.
# This one is shown if there was an error writing to the files.
print("Error saving data files (IOError).")
except (TypeError, ValueError):
# Show the error message if something happened, but continue.
# This one is shown if there was an error with the data types.
print("Error saving data files (TypeError or ValueError).")
def save_counters(conf_dir, rocks_count, minerals_count, fossils_count):
"""Saves the counters."""
try:
# Save the counter files.
rocks_count_file = open("%s/rocks_counter" % conf_dir, "w")
minerals_count_file = open("%s/minerals_counter" % conf_dir, "w")
fossils_count_file = open("%s/fossils_counter" % conf_dir, "w")
rocks_count_file.write(str(rocks_count))
minerals_count_file.write(str(minerals_count))
fossils_count_file.write(str(fossils_count))
rocks_count_file.close()
minerals_count_file.close()
fossils_count_file.close()
except IOError:
# Show the error message if something happened, but continue.
# This one is shown if there was an error writing to the files.
print("Error saving counter files (IOError).")
except (TypeError, ValueError):
# Show the error message if something happened, but continue.
# This one is shown if there was an error with the data types.
print("Error saving counter files (TypeError or ValueError).")
def save_window(conf_dir, width, height):
"""Saves the configuration data."""
try:
# Save the window size.
wins_file = open("%s/window_size" % conf_dir, "w")
wins_file.write("%d\n%d" % (height, width))
wins_file.close()
except IOError:
# Show the error message if something happened, but continue.
# This one is shown if there was an error writing to the file.
print("Error saving window size file (IOError).")
| 36 | 73 | 0.642909 |
ace5c046f2a5bbc312a4a7e05d5f70b92bd813fd | 234 | py | Python | AXF_Roit/App/views.py | thr1030/TC_1803_Flask | aa162347123433785485ddb78ed94780c32d9e02 | [
"Apache-2.0"
] | null | null | null | AXF_Roit/App/views.py | thr1030/TC_1803_Flask | aa162347123433785485ddb78ed94780c32d9e02 | [
"Apache-2.0"
] | null | null | null | AXF_Roit/App/views.py | thr1030/TC_1803_Flask | aa162347123433785485ddb78ed94780c32d9e02 | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint
from App.models import Home
blue = Blueprint("first_blue",__name__)
def init_first_blue(app):
app.register_blueprint(blueprint=blue)
@blue.route('/')
def hello():
return "Welcom to use Flask!"
| 16.714286 | 42 | 0.735043 |
ace5c107f1e4227b3d44bfc93e1aa34905f57163 | 7,495 | py | Python | ThesisCode/TrainingMain/make_session.py | tsteternlieb/DrugDesignThesis | 2ab00826dbfd2567db5a9054731bd7d49ff12126 | [
"MIT"
] | 2 | 2021-12-29T13:14:29.000Z | 2022-02-07T20:03:09.000Z | ThesisCode/TrainingMain/make_session.py | tsteternlieb/DrugDesignThesis | 2ab00826dbfd2567db5a9054731bd7d49ff12126 | [
"MIT"
] | 4 | 2022-02-04T00:39:19.000Z | 2022-02-16T18:37:09.000Z | ThesisCode/TrainingMain/make_session.py | tsteternlieb/DrugDesignThesis | 2ab00826dbfd2567db5a9054731bd7d49ff12126 | [
"MIT"
] | null | null | null | import sys
sys.path.append('..')
import torch
from torch.utils.tensorboard import SummaryWriter
from TrainingUtils.PPO import PPOTrainer
from enviroment.ChemEnv import ChemEnv
# from Train.sv_utils import SupervisedTrainingWrapper
from TrainingUtils.SupervisedTraining.sv_utils import SupervisedTrainingWrapper
from training import Trainer
from Architectures.models import Actor, Critic
from Rewards.rewards import FinalRewardModule
from config_utils import generateRewardModule, generateMolFeaturizer, generateActor, generateCritic
import wandb
def make(config):
"""Generates object to handle training
Args:
path (str): path to config file
Returns:
(Trainer,dict): training session and config dict
"""
# with open(path) as file:
# config = yaml.safe_load(file)
# CUDA
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# LOGGING
RUN_TITLE = config['RUN_TITLE']
WRITER = SummaryWriter(f'./logs/{RUN_TITLE}_logs/tb')
# subprocess.run(['mkdir', f'./logs/'])
# subprocess.run(['cp', path, f'./logs/{RUN_TITLE}_logs/config.yaml'])
if config['ACTOR_RESTART_PATH'] != 'None':
ACTOR_RESTART_PATH = config['ACTOR_RESTART_PATH']
print(f'Using saved actor at location: {config["ACTOR_RESTART_PATH"][0]}')
else:
ACTOR_RESTART_PATH = None
if config['CRITIC_RESTART_PATH'] != 'None':
CRITIC_RESTART_PATH = config['CRITIC_RESTART_PATH']
print(f'Using saved critic at location: {config["CRITIC_RESTART_PATH"][0]}')
else:
CRITIC_RESTART_PATH = None
# ENVIROMENT VARIABLES
NUM_NODE_FEATS = int(config['ENVIROMENT_VARIABLES']['NUM_NODE_FEATS'])
MOL_FEATURIZER = generateMolFeaturizer(
config['ENVIROMENT_VARIABLES']['MOL_FEATURIZER'])
REWARD_MODULE = FinalRewardModule(WRITER, generateRewardModule(
config['ENVIROMENT_VARIABLES']['REWARD_MODULES']))
# ACTOR HYPER PARAMATERS
HIDDEN_DIM = int(config['MODEL_VARIABLES']['HIDDEN_DIM'])
NUM_ATOM_TYPES = int(config['MODEL_VARIABLES']['NUM_ATOM_TYPES'])
# PPO VARIABLES
PPO_BATCH_SIZE = int(config['PPO_VARIABLES']['PPO_BATCH_SIZE'])
TIMESTEPS_PER_ITERATION = int(
config['PPO_VARIABLES']['TIMESTEPS_PER_ITERATION'])
CLIP = float(config['PPO_VARIABLES']['CLIP'])
A_LR = float(config['PPO_VARIABLES']['A_LR'])
C_LR = float(config['PPO_VARIABLES']['C_LR'])
NUM_UPDATED_PER_ITERATION = int(
config['PPO_VARIABLES']['NUM_UPDATED_PER_ITERATION'])
MAX_TIMESTEPS_PER_EPISODE = int(
config['PPO_VARIABLES']['MAX_TIMESTEPS_PER_EPISODE'])
GAMMA = float(config['PPO_VARIABLES']['GAMMA'])
# SUPERVISED TRAINING VARIABLES
SUPERVISED_BATCH_SIZE = int(
config['SUPERVISED_TRAINING_VARIABLES']['SUPERVISED_BATCH_SIZE'])
SUPERVISED_LEARNING_RATE = float(
config['SUPERVISED_TRAINING_VARIABLES']['LR'])
SUPERVISED_LR_DECAY = float(
config['SUPERVISED_TRAINING_VARIABLES']['DECAY'])
DATASET_SIZE = int(config['SUPERVISED_TRAINING_VARIABLES']['DATASET_SIZE'])
PATH = config['SUPERVISED_TRAINING_VARIABLES']['PATH']
# DEFININING MODULES
ENV = ChemEnv(NUM_NODE_FEATS, REWARD_MODULE, MOL_FEATURIZER, WRITER)
ACTOR = generateActor(config['ACTOR_VARIABLES'], ACTOR_RESTART_PATH).cuda()
CRITIC = generateCritic(config['CRITIC_VARIABLES'], CRITIC_RESTART_PATH).cuda()
PPO = PPOTrainer(ENV, PPO_BATCH_SIZE, TIMESTEPS_PER_ITERATION,
CLIP, A_LR, C_LR, NUM_UPDATED_PER_ITERATION,
MAX_TIMESTEPS_PER_EPISODE, GAMMA, ACTOR, CRITIC,WRITER, RUN_TITLE)
SUPERVISED_TRAINER = SupervisedTrainingWrapper(
ACTOR, SUPERVISED_BATCH_SIZE, SUPERVISED_LEARNING_RATE, SUPERVISED_LR_DECAY, WRITER, PATH)
# DEFINING TRAINING
SV_EPOCHS = int(config['FINAL_TRAINING_VARIABLES']['SV_EPOCHS'])
PPO_STEPS = int(config['FINAL_TRAINING_VARIABLES']['PPO_STEPS'])
TRAINING_SESSION = Trainer(
RUN_TITLE, WRITER, SUPERVISED_TRAINER, PPO, SV_EPOCHS, PPO_STEPS)
# TRAINING_SESSION.Train(SV_EPOCHS,PPO_STEPS)
return TRAINING_SESSION, config
def make2(config):
"""Generates object to handle training
Args:
path (str): path to config file
Returns:
(Trainer,dict): training session and config dict
"""
# with open(path) as file:
# config = yaml.safe_load(file)
# CUDA
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(wandb.config)
# LOGGING
RUN_TITLE = "SWEEP"
WRITER = SummaryWriter(f'./logs/{RUN_TITLE}_logs/tb')
# subprocess.run(['mkdir', f'./logs/'])
# subprocess.run(['cp', path, f'./logs/{RUN_TITLE}_logs/config.yaml'])
# ENVIROMENT VARIABLES
NUM_NODE_FEATS = int(config.NUM_NODE_FEATS)
MOL_FEATURIZER = generateMolFeaturizer(
config.MOL_FEATURIZER)
REWARD_MODULE = FinalRewardModule(WRITER, generateRewardModule(
config.REWARD_MODULES))
# ACTOR HYPER PARAMATERS
actor_kwargs = {
'in_dim': config.NUM_NODE_FEATS,
'hidden_dim': int(config.HIDDEN_DIM_A),
'out_dim': int(config.NUM_NODE_FEATS),
'drop_out': config.DROPOUT_A,
'graph_activation': config.GRAPH_ACTIVATION_A,
'dense_activation': config.DENSE_ACTIVATION_A,
'model': config.MODEL_TYPE_A,
'graph_num_layers': config.GRAPH_NUM_LAYERS_A,
'dense_num_layers': config.DENSE_NUM_LAYERS_A,
'norm': config.DENSE_NORMALIZATION_A
}
# CRITIC HYPER PARAMATERS
critic_kwargs = {
'in_dim': config.NUM_NODE_FEATS,
'hidden_dim': config.HIDDEN_DIM_C,
'model': config.MODEL_TYPE_C,
'graph_num_layers': config.GRAPH_NUM_LAYERS_C,
'dense_num_layers': config.DENSE_NUM_LAYERS_C,
'graph_activation': config.GRAPH_ACTIVATION_C,
'dense_activation': config.DENSE_ACTIVATION_C,
'dropout': config.DROPOUT_C,
'norm': config.DENSE_NORMALIZATION_C
}
# PPO VARIABLES
PPO_BATCH_SIZE = config.PPO_BATCH_SIZE
TIMESTEPS_PER_ITERATION = config.TIMESTEPS_PER_ITERATION
CLIP = config.CLIP
A_LR = config.A_LR
C_LR = config.C_LR
NUM_UPDATED_PER_ITERATION = config.NUM_UPDATED_PER_ITERATION
MAX_TIMESTEPS_PER_EPISODE = config.MAX_TIMESTEPS_PER_EPISODE
GAMMA = config.GAMMA
# SUPERVISED TRAINING VARIABLES
SUPERVISED_BATCH_SIZE = config.SUPERVISED_BATCH_SIZE
SUPERVISED_LEARNING_RATE = config.SUPERVSED_LR
SUPERVISED_LR_DECAY = config.SUPERVISED_LR_DECAY
PATH = config.PATH
# DEFININING MODULES
ENV = ChemEnv(NUM_NODE_FEATS, REWARD_MODULE, MOL_FEATURIZER, WRITER)
# MODEL = BaseLine(NUM_NODE_FEATS, HIDDEN_DIM, NUM_ATOM_TYPES).cuda()
ACTOR = Actor(**actor_kwargs)
CRITIC = Critic(**critic_kwargs)
PPO = PPOTrainer(ENV, PPO_BATCH_SIZE, TIMESTEPS_PER_ITERATION,
CLIP, A_LR, C_LR, NUM_UPDATED_PER_ITERATION,
MAX_TIMESTEPS_PER_EPISODE, GAMMA, ACTOR, CRITIC, WRITER)
SUPERVISED_TRAINER = SupervisedTrainingWrapper(
ACTOR, SUPERVISED_BATCH_SIZE, SUPERVISED_LEARNING_RATE, SUPERVISED_LR_DECAY, WRITER, PATH)
# DEFINING TRAINING
SV_EPOCHS = int(config.SV_EPOCHS)
PPO_STEPS = int(config.PPO_STEPS)
TRAINING_SESSION = Trainer(
RUN_TITLE, WRITER, SUPERVISED_TRAINER, PPO, SV_EPOCHS, PPO_STEPS)
# TRAINING_SESSION.Train(SV_EPOCHS,PPO_STEPS)
return TRAINING_SESSION
| 35.353774 | 99 | 0.707272 |
ace5c19086ad5e95f8964ff0991fd00fb048481b | 13,389 | py | Python | plugin/settings/settings_storage.py | mjendruk/EasyClangComplete | 51caeb384f2941b8fe0b51584d25bea692ecf517 | [
"MIT"
] | null | null | null | plugin/settings/settings_storage.py | mjendruk/EasyClangComplete | 51caeb384f2941b8fe0b51584d25bea692ecf517 | [
"MIT"
] | null | null | null | plugin/settings/settings_storage.py | mjendruk/EasyClangComplete | 51caeb384f2941b8fe0b51584d25bea692ecf517 | [
"MIT"
] | null | null | null | """Holds a class that encapsulates plugin settings.
Attributes:
log (logging.Logger): logger for this module
"""
import logging
import sublime
from os import path
from ..tools import Tools
log = logging.getLogger("ECC")
class Wildcards:
"""Enum class of supported wildcards.
Attributes:
CLANG_VERSION (str): a wildcard to be replaced with a clang version
PROJECT_NAME (str): a wildcard to be replaced by the project name
PROJECT_PATH (str): a wildcard to be replaced by the project path
"""
PROJECT_PATH = "project_base_path"
PROJECT_NAME = "project_name"
CLANG_VERSION = "clang_version"
HOME_PATH = "~"
class SettingsStorage:
"""A class that stores all loaded settings.
Attributes:
max_cache_age (int): maximum cache age in seconds
FLAG_SOURCES (str[]): possible flag sources
NAMES_ENUM (str[]): all supported settings names
PREFIXES (str[]): setting prefixes supported by this plugin
"""
FLAG_SOURCES = ["CMakeLists.txt",
"Makefile",
"compile_commands.json",
"CppProperties.json",
"c_cpp_properties.json",
".clang_complete"]
FLAG_SOURCES_ENTRIES_WITH_PATHS = ["search_in", "prefix_paths"]
PREFIXES = ["ecc_", "easy_clang_complete_"]
COLOR_SUBLIME_STYLE_TAG = "ColorSublime"
MOON_STYLE_TAG = "Moon"
NONE_STYLE_TAG = "None"
PROGRESS_STYLES = [COLOR_SUBLIME_STYLE_TAG, MOON_STYLE_TAG, NONE_STYLE_TAG]
GUTTER_COLOR_STYLE = "color"
GUTTER_MONO_STYLE = "mono"
NONE_STYLE = "none"
GUTTER_STYLES = [GUTTER_COLOR_STYLE, GUTTER_MONO_STYLE, NONE_STYLE]
# refer to Preferences.sublime-settings for usage explanation
NAMES_ENUM = [
"autocomplete_all",
"clang_binary",
"cmake_binary",
"common_flags",
"ignore_list",
"expand_template_types",
"flags_sources",
"gutter_style",
"header_to_source_mapping",
"hide_default_completions",
"include_file_folder",
"include_file_parent_folder",
"lang_flags",
"libclang_path",
"max_cache_age",
"progress_style",
"show_errors",
"show_type_body",
"show_type_info",
"target_c_compiler",
"target_cpp_compiler",
"target_objective_c_compiler",
"target_objective_cpp_compiler",
"triggers",
"use_libclang",
"use_libclang_caching",
"use_target_compiler_built_in_flags",
"valid_lang_syntaxes",
"verbose",
]
def __init__(self, settings_handle):
"""Initialize settings storage with default settings handle.
Args:
settings_handle (sublime.Settings): handle to sublime settings
"""
log.debug("creating new settings storage object")
self.clang_version = ''
self.libclang_path = ''
self.clang_binary = ''
self.cmake_binary = ''
self.project_folder = ''
self.project_name = ''
self._wildcard_values = {}
self.__load_vars_from_settings(settings_handle,
project_specific=False)
def update_from_view(self, view):
"""Update from view using view-specific settings.
Args:
view (sublime.View): current view
"""
try:
# Init current and parent folders.
if not Tools.is_valid_view(view):
log.error("no view to populate common flags from")
return
self.__load_vars_from_settings(view.settings(),
project_specific=True)
# Initialize wildcard values with view.
self.__update_wildcard_values(view)
# Replace wildcards in various paths.
self.__populate_common_flags(view.file_name())
self.__populate_flags_source_paths()
self.__update_ignore_list()
self.libclang_path = self.__replace_wildcard_if_needed(
self.libclang_path)
self.clang_binary = self.__replace_wildcard_if_needed(
self.clang_binary)
self.cmake_binary = self.__replace_wildcard_if_needed(
self.cmake_binary)
except AttributeError as e:
log.error("view became None. Do not continue.")
log.error("original error: %s", e)
def need_reparse(self):
"""Define a very hacky check that there was an incomplete load.
This is needed because of something I believe is a bug in sublime text
plugin handling. When we enable the plugin and load its settings with
on_plugin_loaded() function not all settings are active.
'progress_style' is one of the missing settings. The settings will
just need to be loaded at a later time then.
Returns:
bool: True if needs reparsing, False otherwise
"""
if 'progress_style' in self.__dict__:
log.debug('settings complete')
return False
log.debug('settings incomplete and will be reloaded a bit later')
return True
def is_valid(self):
"""Check settings validity.
If any of the settings is None the settings are not valid.
Returns:
(bool, str): validity of settings + error message.
"""
error_msg = ""
for key, value in self.__dict__.items():
if key.startswith('__') or callable(key):
continue
if value is None:
error_msg = "No value for setting '{}' found!".format(key)
return False, error_msg
if self.progress_style not in SettingsStorage.PROGRESS_STYLES:
error_msg = "Progress style '{}' is not one of {}".format(
self.progress_style, SettingsStorage.PROGRESS_STYLES)
return False, error_msg
if self.gutter_style not in SettingsStorage.GUTTER_STYLES:
error_msg = "Gutter style '{}' is not one of {}".format(
self.gutter_style, SettingsStorage.GUTTER_STYLES)
return False, error_msg
for source_dict in self.flags_sources:
if "file" not in source_dict:
error_msg = "No 'file' setting in a flags source '{}'".format(
source_dict)
return False, error_msg
if source_dict["file"] not in SettingsStorage.FLAG_SOURCES:
error_msg = "flag source '{}' is not one of {}".format(
source_dict["file"], SettingsStorage.FLAG_SOURCES)
return False, error_msg
# Check if all languages are present in language-specific settings.
for lang_tag in Tools.LANG_TAGS:
if lang_tag not in self.lang_flags.keys():
error_msg = "lang '{}' is not in {}".format(
lang_tag, self.lang_flags)
return False, error_msg
if lang_tag not in self.valid_lang_syntaxes:
error_msg = "No '{}' in syntaxes '{}'".format(
lang_tag, self.valid_lang_syntaxes)
return False, error_msg
return True, ""
@property
def target_compilers(self):
"""Create a dictionary with the target compilers to use."""
result = dict()
if hasattr(self, "target_c_compiler"):
result["c"] = self.target_c_compiler
if hasattr(self, "target_cpp_compiler"):
result["c++"] = self.target_cpp_compiler
if hasattr(self, "target_objective_c_compiler"):
result["objective-c"] = self.target_objective_c_compiler
if hasattr(self, "target_objective_cpp_compiler"):
result["objective-c++"] = self.target_objective_cpp_compiler
return result
def __load_vars_from_settings(self, settings, project_specific=False):
"""Load all settings and add them as attributes of self.
Args:
settings (dict): settings from sublime
project_specific (bool, optional): defines if the settings are
project-specific and should be read with appropriate prefixes
"""
if project_specific:
log.debug("Overriding settings by project ones if needed:")
log.debug("Valid prefixes: %s", SettingsStorage.PREFIXES)
log.debug("Reading settings...")
# project settings are all prefixed to disambiguate them from others
if project_specific:
prefixes = SettingsStorage.PREFIXES
else:
prefixes = [""]
for setting_name in SettingsStorage.NAMES_ENUM:
for prefix in prefixes:
val = settings.get(prefix + setting_name)
if val is not None:
# we don't want to override existing setting
break
if val is not None:
# set this value to this object too
setattr(self, setting_name, val)
# tell the user what we have done
log.debug("%-26s <-- '%s'", setting_name, val)
log.debug("Settings sucessfully read...")
# initialize max_cache_age if is it not yet, default to 30 minutes
self.max_cache_age = getattr(self, "max_cache_age", "00:30:00")
# get seconds from string if needed
if isinstance(self.max_cache_age, str):
self.max_cache_age = Tools.seconds_from_string(self.max_cache_age)
def __populate_flags_source_paths(self):
"""Populate variables inside flags sources."""
if not self.flags_sources:
log.critical(" Cannot update paths of flag sources.")
return
for idx, source_dict in enumerate(self.flags_sources):
for option in SettingsStorage.FLAG_SOURCES_ENTRIES_WITH_PATHS:
if option not in source_dict:
continue
if not source_dict[option]:
continue
if isinstance(source_dict[option], str):
self.flags_sources[idx][option] =\
self.__replace_wildcard_if_needed(source_dict[option])
elif isinstance(source_dict[option], list):
for i, entry in enumerate(source_dict[option]):
self.flags_sources[idx][option][i] =\
self.__replace_wildcard_if_needed(entry)
def __populate_common_flags(self, current_file_name):
"""Populate the variables inside common_flags with real values.
Args:
current_file_name (str): current view file name
"""
# populate variables to real values
log.debug("populating common_flags with current variables.")
for idx, flag in enumerate(self.common_flags):
self.common_flags[idx] = self.__replace_wildcard_if_needed(flag)
file_current_folder = path.dirname(current_file_name)
if self.include_file_folder:
self.common_flags.append("-I" + file_current_folder)
file_parent_folder = path.dirname(file_current_folder)
if self.include_file_parent_folder:
self.common_flags.append("-I" + file_parent_folder)
def __update_ignore_list(self):
"""Populate variables inside flags sources."""
if not self.ignore_list:
log.critical(" Cannot update paths of ignore list.")
return
for idx, path_to_ignore in enumerate(self.ignore_list):
self.ignore_list[idx] = self.__replace_wildcard_if_needed(
path_to_ignore)
def __replace_wildcard_if_needed(self, line):
"""Replace wildcards in a line if they are present there.
Args:
line (str): line possibly with wildcards in it
Returns:
str: line with replaced wildcards
"""
res = path.expandvars(line)
res = sublime.expand_variables(res, self._wildcard_values)
if Wildcards.HOME_PATH in res:
# replace '~' by full home path. Leave everything else intact.
prefix_idx = res.index(Wildcards.HOME_PATH)
prefix = res[:prefix_idx]
home_path = path.expanduser(res[prefix_idx:prefix_idx + 1])
res = prefix + home_path + res[prefix_idx + 1:]
if res != line:
log.debug("populated '%s' to '%s'", line, res)
return res
def __update_wildcard_values(self, view):
"""Update values for wildcard variables."""
variables = view.window().extract_variables()
self._wildcard_values.update(variables)
self._wildcard_values[Wildcards.PROJECT_PATH] = \
variables.get("folder", "").replace("\\", "\\\\")
self._wildcard_values[Wildcards.PROJECT_NAME] = \
variables.get("project_base_name", "")
# get clang version string
version_str = Tools.get_clang_version_str(self.clang_binary)
self._wildcard_values[Wildcards.CLANG_VERSION] = version_str
# duplicate as fields
self.project_folder = self._wildcard_values[Wildcards.PROJECT_PATH]
self.project_name = self._wildcard_values[Wildcards.PROJECT_NAME]
self.clang_version = self._wildcard_values[Wildcards.CLANG_VERSION]
| 39.379412 | 79 | 0.614908 |
ace5c25419b5de7f438909b2297e34b6cdc2d6a7 | 545 | py | Python | TimeSeriesAnalysisWithPython-master/SciPyTimeSeries/snippets/nearestmean.py | sunny2309/scipy_conf_notebooks | 30a85d5137db95e01461ad21519bc1bdf294044b | [
"MIT"
] | 2 | 2021-01-09T15:57:26.000Z | 2021-11-29T01:44:21.000Z | TimeSeriesAnalysisWithPython-master/SciPyTimeSeries/snippets/nearestmean.py | sunny2309/scipy_conf_notebooks | 30a85d5137db95e01461ad21519bc1bdf294044b | [
"MIT"
] | 5 | 2019-11-15T02:00:26.000Z | 2021-01-06T04:26:40.000Z | TimeSeriesAnalysisWithPython-master/SciPyTimeSeries/snippets/nearestmean.py | sunny2309/scipy_conf_notebooks | 30a85d5137db95e01461ad21519bc1bdf294044b | [
"MIT"
] | null | null | null |
from random import randint
def test_idea(words_df):
distances = []
mean_words = words_df.groupby(words_df.index).mean()
i = randint(0, words_df.shape[0])
random_word = words_df.iloc[i]
for mean_idx in range(mean_words.shape[0]):
current_mean_word = mean_words.iloc[mean_idx]
distances.append(DTWDistance(current_mean_word.values, random_word.values))
return (random_word.name, distances)
word_type, distances = test_idea(words)
indexed_distances = enumerate(distances)
word_type
list(indexed_distances) | 32.058824 | 83 | 0.746789 |
ace5c421507720d430bab2cfad4fabb8a910178e | 101 | py | Python | api/tacticalrmm/core/admin.py | jeffreyvh/tacticalrmm | dcfb1732954c2c165e82e6b24686e27f9f909eb3 | [
"MIT"
] | 1 | 2021-01-19T20:39:02.000Z | 2021-01-19T20:39:02.000Z | api/tacticalrmm/core/admin.py | jeffreyvh/tacticalrmm | dcfb1732954c2c165e82e6b24686e27f9f909eb3 | [
"MIT"
] | 5 | 2021-04-08T19:44:31.000Z | 2021-09-22T19:34:33.000Z | api/tacticalrmm/core/admin.py | jeffreyvh/tacticalrmm | dcfb1732954c2c165e82e6b24686e27f9f909eb3 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import CoreSettings
admin.site.register(CoreSettings)
| 20.2 | 33 | 0.841584 |
ace5c56bc9e8af080c01bb99d2f2ea75d309bf96 | 10,129 | py | Python | udpecho.py | rolandbaer/udp-echo-server | b9676233a4ac815761fe5438b364c7bb5bab2252 | [
"MIT"
] | null | null | null | udpecho.py | rolandbaer/udp-echo-server | b9676233a4ac815761fe5438b364c7bb5bab2252 | [
"MIT"
] | 7 | 2021-04-12T09:08:26.000Z | 2021-11-01T18:14:53.000Z | udpecho.py | rolandbaer/udp-echo-server | b9676233a4ac815761fe5438b364c7bb5bab2252 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
An UDP echo server and client that writes its own UDP and IPv4 headers
and allows to control udp and ip header fields.
"""
__version__ = "0.7.0"
import argparse
import ipaddress
import itertools
import logging
import platform
import socket
import struct
import sys
import time
from dataclasses import dataclass
from random import choice, randint
from string import ascii_uppercase
LOGGER = logging.getLogger(__name__)
# the buffer for receiving incoming messages
BUFFER_SIZE = 4096
# default port definition
CLIENT_PORT = 2010
SERVER_PORT = 2001
# dummy checksum, as the checksum is not mandatory (UDP) or calculated by the driver (IP)
DUMMY_CHECKSUM = 0
# Don't fragment flag set, other flags not set, fragment offset 0
DONT_FRAGMENT = 0x4000
FRAGMENTATION_ALLOWED = 0x0000
# Header legths
IP_HEADER_LENGTH_WORDS = 5
IP_HEADER_LENGTH = IP_HEADER_LENGTH_WORDS * 4
UDP_HEADER_LENGTH = 8
# IP Protocol version
IP_V4 = 4
# characters reserved for counter: #12345#Message
COUNTER_SIZE = 7
@dataclass
class Sockets:
"Container of sending and listening sockets"
def __init__(self, sender: socket.socket, listener: socket.socket):
self.sender = sender
self.listener = listener
@dataclass
class ProtocolData:
"Container for protocol data"
def __init__(self, ip_id: int, address: str, port: int, dontfragment: bool):
self.ip_id = ip_id
self.address = address
self.port = port
self.dontfragment = dontfragment
def is_windows():
return platform.system().lower().startswith('win')
def send_and_receive_one(sockets: Sockets, message: str, addr: tuple, protocol_data: ProtocolData):
"Sends the message over the sender socket and waits for the response on the listener socket."
send_udp_message(message, addr, sockets.sender, protocol_data)
try:
input_data, addr = sockets.listener.recvfrom(BUFFER_SIZE)
LOGGER.info("Received message back from %s: %s (%s bytes).",
addr, input_data.decode(), len(input_data))
return True
except socket.timeout:
LOGGER.warning("Message never received back from %s: (%s).", addr, message)
return False
def send_udp_message(message: str, addr: tuple, sender: socket.socket, protocol_data: ProtocolData):
"Sends the message over the socket as an self-built udp/ip packet"
message_encoded = message.encode()
if is_windows():
LOGGER.debug("windows (udp)")
output_len = sender.sendto(message_encoded, addr)
else:
LOGGER.debug("other (raw)")
udp_msg = struct.pack("!HHHH"+str(len(message_encoded))+"s",
protocol_data.port, addr[1], UDP_HEADER_LENGTH + len(message_encoded),
DUMMY_CHECKSUM, message_encoded)
ip_header = struct.pack("!BBHHHBBHLL",
IP_V4*16+IP_HEADER_LENGTH_WORDS,
0,
IP_HEADER_LENGTH + len(udp_msg),
protocol_data.ip_id,
DONT_FRAGMENT if protocol_data.dontfragment else FRAGMENTATION_ALLOWED,
255,
socket.IPPROTO_UDP,
DUMMY_CHECKSUM,
int(ipaddress.IPv4Address(protocol_data.address)),
int(ipaddress.IPv4Address(addr[0])))
data = ip_header + udp_msg
output_len = sender.sendto(data, addr)
LOGGER.info("Sent message to %s: %s (%s bytes, total %s bytes).", addr, message,
len(message_encoded), output_len)
def receive_next(listener: socket.socket):
"Repeatedly tries receiving on the given socket until some data comes in."
LOGGER.debug("Waiting to receive data...")
while True:
try:
return listener.recvfrom(BUFFER_SIZE)
except socket.timeout:
LOGGER.debug("No data received yet: retrying.")
def receive_and_send_one(sockets: Sockets, ip_id: int, port: int, dontfragment: bool):
"Waits for a single datagram over the socket and echoes it back."
input_data, addr = receive_next(sockets.listener)
host_addr = sockets.listener.getsockname()
message = input_data.decode()
LOGGER.info("Received message from %s: %s (%s bytes).", addr, message, len(input_data))
protocol_data = ProtocolData(ip_id, host_addr[0], port, dontfragment)
send_udp_message(message, addr, sockets.sender, protocol_data)
def get_local_ip(target: str):
"Gets the IP address of the interfaces used to connect to the target."
temp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
temp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
temp_socket.connect((target, 0))
my_ip = temp_socket.getsockname()[0]
temp_socket.close()
return my_ip
def start_client(arguments):
"Starts sending messages to the server."
ip_id = randint(0, 65535)
listener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if is_windows():
sender = listener
else:
sender = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
listener.settimeout(1) # seconds
listener.bind((arguments.host, arguments.cport))
if arguments.host == "0.0.0.0":
host_address = get_local_ip(arguments.client)
LOGGER.debug("Clients IP: %s", host_address)
else:
host_address = arguments.host
addr = (arguments.client, arguments.port)
message = ''.join(choice(ascii_uppercase) for i in range(arguments.size - COUNTER_SIZE))
i = 1
received = 0;
try:
while i <= arguments.count:
message_with_counter = "#{:05d}#{}".format(i % 100000, message)
sockets = Sockets(sender, listener)
protocol_data = ProtocolData(ip_id, host_address, arguments.cport,
arguments.dontfragment)
if send_and_receive_one(sockets, message_with_counter, addr, protocol_data):
received = received + 1
ip_id = (ip_id + 1) % 65536
i = i + 1
if i <= arguments.count:
time.sleep(arguments.interval)
finally:
if i > 1:
LOGGER.info("%s packets transmitted, %s received, %s%% loss", i - 1, received, 100 * (i - 1 - received) / (i - 1))
LOGGER.info("Shutting down.")
sender.close()
listener.close()
def start_server(arguments):
"Runs the server."
ip_id = randint(0, 65535)
if is_windows():
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
sender = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
listener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
listener.settimeout(5) # seconds
listener.bind((arguments.host, arguments.port))
LOGGER.info("Listening on %s:%s.", arguments.host, arguments.port)
try:
for i in itertools.count(1):
sockets = Sockets(sender, listener)
receive_and_send_one(sockets, ip_id, arguments.port, arguments.dontfragment)
ip_id = (ip_id + 1) % 65536
i = i + 1
except KeyboardInterrupt:
LOGGER.debug("<CTRL>-C received, ending listener")
finally:
LOGGER.info("Shutting down.")
sender.close()
listener.close()
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
GROUP = PARSER.add_mutually_exclusive_group(required=True)
GROUP.add_argument('-C', '--client', help='Run in client mode, connect to the given HOST.',
metavar="HOST")
GROUP.add_argument('-S', '--server', help='Run in server mode.', action='store_true')
GROUP_CLIENT_SERVER = PARSER.add_argument_group("For client and server")
GROUP_CLIENT_SERVER.add_argument('-H', '--host',
help='The host that the listener should listen on.',
default="0.0.0.0")
GROUP_CLIENT_SERVER.add_argument('-p', '--port', help='Server port to listen on/connect to.',
type=int, default=SERVER_PORT)
GROUP_CLIENT_SERVER.add_argument('-d', '--dontfragment',
help='Sets the don''t fragment flag (default: not set).',
action='store_true')
GROUP_CLIENT = PARSER.add_argument_group("Only for client")
GROUP_CLIENT.add_argument('--cport',
help='The port that the client will use to listen for the reply.',
type=int, default=CLIENT_PORT)
GROUP_CLIENT.add_argument('-s', '--size',
help='Size of udp data to be sent in payload (default: 64).',
type=int, default=64)
GROUP_CLIENT.add_argument('-c', '--count',
help='Number of udp packets to be sent. (default: 1)',
type=int, default=1)
GROUP_CLIENT.add_argument('-i', '--interval',
help='Seconds between two sendings (default: 1 second).',
type=int, default=1)
PARSER.add_argument('-v', '--verbose', help="Increases the logging verbosity level.",
action='count')
PARSER.add_argument('-V', '--version', help="Show version information and quit.",
action='version', version='UDPecho version ' + __version__)
if len(sys.argv) == 1:
PARSER.print_help(sys.stderr)
sys.exit(1)
ARGS = PARSER.parse_args()
logging.basicConfig(level=logging.DEBUG if ARGS.verbose else logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
if is_windows():
if ARGS.dontfragment:
LOGGER.error("Argument 'dontfragment' is not supported on windows systems.")
sys.exit(2)
if ARGS.server:
start_server(ARGS)
else:
start_client(ARGS)
| 42.558824 | 126 | 0.627999 |
ace5c668eeaf80647fd5b2d37fec9a584df91c94 | 147 | py | Python | wagtail_react_streamfield/__init__.py | hoganld/wagtail-react-streamfield | 27f5cd6236c870e1af96ad6d4c12eaae998fbd70 | [
"BSD-3-Clause"
] | null | null | null | wagtail_react_streamfield/__init__.py | hoganld/wagtail-react-streamfield | 27f5cd6236c870e1af96ad6d4c12eaae998fbd70 | [
"BSD-3-Clause"
] | null | null | null | wagtail_react_streamfield/__init__.py | hoganld/wagtail-react-streamfield | 27f5cd6236c870e1af96ad6d4c12eaae998fbd70 | [
"BSD-3-Clause"
] | null | null | null | VERSION = (0, 8, 4)
__version__ = '.'.join(map(str, VERSION))
default_app_config = 'wagtail_react_streamfield.apps.WagtailReactStreamFieldConfig'
| 29.4 | 83 | 0.77551 |
ace5c66bac66ba3a7e1534cf2bf7be2a0586b128 | 1,260 | py | Python | builtins/extractcode_7z-linux/setup.py | nexB/scancode-plugins-builtin | 3243913072a58dd62a72f52dce9b86273aa8d122 | [
"Apache-2.0"
] | null | null | null | builtins/extractcode_7z-linux/setup.py | nexB/scancode-plugins-builtin | 3243913072a58dd62a72f52dce9b86273aa8d122 | [
"Apache-2.0"
] | null | null | null | builtins/extractcode_7z-linux/setup.py | nexB/scancode-plugins-builtin | 3243913072a58dd62a72f52dce9b86273aa8d122 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from glob import glob
from os.path import basename
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
desc = '''A ScanCode path provider plugin to provide a prebuilt native sevenzip binary.'''
setup(
name='extractcode_7z',
version='16.5.210531',
license='apache-2.0 AND lgpl-2.1 and unrar and brian-gladman-3-clause',
description=desc,
long_description=desc,
author='nexB',
author_email='info@aboutcode.org',
url='https://github.com/nexB/scancode-plugins',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Utilities',
],
keywords=[
'open source', 'extractcode', 'libarchive'
],
entry_points={
'scancode_location_provider': [
'extractcode_7zip = extractcode_7z:SevenzipPaths',
],
},
)
| 28.636364 | 90 | 0.665079 |
ace5c78b4aa6b438fbcac90116dd44d96bb82a1a | 13,047 | py | Python | odps/df/tests/test_dataframe.py | wjsi/aliyun-odps-python-sdk | 8b064340e4376def201b8d8fdc0c2fa021aae9be | [
"Apache-2.0"
] | 412 | 2015-11-01T09:27:52.000Z | 2022-03-26T05:04:03.000Z | odps/df/tests/test_dataframe.py | wjsi/aliyun-odps-python-sdk | 8b064340e4376def201b8d8fdc0c2fa021aae9be | [
"Apache-2.0"
] | 168 | 2015-11-16T09:46:39.000Z | 2022-03-17T06:35:26.000Z | odps/df/tests/test_dataframe.py | wjsi/aliyun-odps-python-sdk | 8b064340e4376def201b8d8fdc0c2fa021aae9be | [
"Apache-2.0"
] | 103 | 2015-12-01T08:10:09.000Z | 2022-02-21T12:46:35.000Z | # -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from decimal import Decimal
from odps.tests.core import TestBase, tn, pandas_case, global_locked
from odps.df.backends.context import context
from odps.compat import unittest
from odps.models import Schema
from odps.df import DataFrame, Delay
from odps.utils import to_text
from odps.errors import ODPSError, DependencyNotInstalledError
class Test(TestBase):
def setup(self):
test_table_name = tn('pyodps_test_dataframe')
schema = Schema.from_lists(['id', 'name'], ['bigint', 'string'])
self.odps.delete_table(test_table_name, if_exists=True)
self.table = self.odps.create_table(test_table_name, schema)
with self.table.open_writer() as w:
w.write([[1, 'name1'], [2, 'name2'], [3, 'name3']])
def teardown(self):
self.table.drop()
def testDataFrame(self):
df = DataFrame(self.table)
self.assertEqual(3, df.count().execute())
self.assertEqual(1, df[df.name == 'name1'].count().execute())
res = df[df.name.contains('中文')].execute()
self.assertGreaterEqual(len(res), 0)
@pandas_case
def testDataFrameFromPandas(self):
import pandas as pd
pd_df = pd.DataFrame({'a': [1, 2, 3], 'b': [None, None, None]})
self.assertRaises(TypeError, lambda: DataFrame(pd_df))
df = DataFrame(pd_df, unknown_as_string=True)
self.assertEqual(df.schema.get_type('b').name, 'string')
df = DataFrame(pd_df[['a']], as_type={'a': 'string'})
self.assertEqual(df.schema.get_type('a').name, 'string')
df = DataFrame(pd_df, as_type={'b': 'int'})
self.assertEqual(df.schema.get_type('b').name, 'int64')
pd_df = pd.DataFrame({'a': [1, 2, 3], 'b': [[1, 2], [3, 4, 5], [6]]})
self.assertRaises(TypeError, DataFrame, pd_df)
df = DataFrame(pd_df, as_type={'b': 'list<int64>'})
self.assertEqual(df.schema.get_type('b').name, 'list<int64>')
df = DataFrame(pd_df, as_type={'b': 'list<string>'})
self.assertEqual(df.schema.get_type('b').name, 'list<string>')
pd_df = pd.DataFrame({'a': [1, 2, 3],
'b': [{1: 'a', 2: 'b'}, {3: 'c', 4: 'd', 5: None}, {6: 'f'}]})
self.assertRaises(TypeError, DataFrame, pd_df)
df = DataFrame(pd_df, as_type={'b': 'dict<int64, string>'})
self.assertEqual(df.schema.get_type('b').name, 'dict<int64,string>')
df = DataFrame(pd_df, as_type={'b': 'dict<string, string>'})
self.assertEqual(df.schema.get_type('b').name, 'dict<string,string>')
def testHeadAndTail(self):
df = DataFrame(self.table)
self.assertEqual(1, len(df.head(1)))
self.assertEqual(2, len(df.head(2)))
self.assertEqual([3, 'name3'], list(df.tail(1)[0]))
r = df[df.name == 'name2'].head(1)
self.assertEqual(1, len(r))
self.assertEqual([2, 'name2'], list(r[0]))
@global_locked('odps_instance_method_repr')
def testInstanceMethodRepr(self):
from odps import options
class CannotExecuteDataFrame(DataFrame):
def execute(self, **kwargs):
raise RuntimeError('DataFrame cannot be executed')
options.interactive = True
try:
df = CannotExecuteDataFrame(self.table)
self.assertEqual(repr(df.count), '<bound method Collection.count>')
self.assertEqual(repr(df.name.count), '<bound method Column._count>')
finally:
options.interactive = False
@pandas_case
def testToPandas(self):
table_name = tn('pyodps_test_mixed_engine_to_pandas')
self.odps.delete_table(table_name, if_exists=True)
table2 = self.odps.create_table(name=table_name,
schema=Schema.from_lists(['col%s' % i for i in range(7)],
['bigint', 'double', 'string', 'datetime',
'boolean', 'decimal', 'datetime']))
expr2 = DataFrame(table2)
data2 = [
[1234567, 3.14, 'test', datetime(2016, 6, 1), True, Decimal('3.14'), None]
]
self.odps.write_table(table2, 0, data2)
pd_df = expr2.to_pandas()
self.assertSequenceEqual(data2[0], pd_df.iloc[0].tolist())
wrapped_pd_df = expr2.to_pandas(wrap=True)
self.assertSequenceEqual(data2[0], list(next(wrapped_pd_df.execute())))
pd_df_col = expr2.col0.to_pandas()
self.assertSequenceEqual([data2[0][0]], pd_df_col.tolist())
wrapped_pd_df_col = expr2.col0.to_pandas(wrap=True)
self.assertSequenceEqual([data2[0][0]], list(next(wrapped_pd_df_col.execute())))
pd_df_future = expr2.to_pandas(async_=True)
self.assertSequenceEqual(data2[0], pd_df_future.result().iloc[0].tolist())
wrapped_pd_df_future = expr2.to_pandas(async_=True, wrap=True)
self.assertSequenceEqual(data2[0], list(next(wrapped_pd_df_future.result().execute())))
delay = Delay()
pd_df_future = expr2.to_pandas(delay=delay)
delay.execute()
self.assertSequenceEqual(data2[0], pd_df_future.result().iloc[0].tolist())
exc_future = (expr2.col0 / 0).to_pandas(async_=True)
self.assertRaises(ODPSError, exc_future.result)
@pandas_case
def testUnicodePdDataFrame(self):
import pandas as pd
pd_df = pd.DataFrame([['中文'], [to_text('中文2')]], columns=[to_text('字段')])
df = DataFrame(pd_df)
r = df['字段'].execute()
self.assertEqual(to_text('中文'), to_text(r[0][0]))
self.assertEqual(to_text('中文2'), to_text(r[1][0]))
@pandas_case
def testPandasGroupbyFilter(self):
import pandas as pd
data = [
[2001, 1],
[2002, 2],
[2003, 3]
]
df = DataFrame(pd.DataFrame(data, columns=['id', 'fid']))
df2 = df.groupby('id').agg(df.fid.sum())
df3 = df2[df2.id == 2003]
expected = [
[2003, 3]
]
self.assertEqual(df3.execute().values.values.tolist(), expected)
df2 = df.groupby('id').agg(df.fid.sum())
df2.execute()
self.assertTrue(context.is_cached(df2))
df3 = df2[df2.id == 2003]
self.assertEqual(df3.execute().values.values.tolist(), expected)
self.assertEqual(df3.execute().values.values.tolist(), expected)
df4 = df.fid.sum()
self.assertEqual(df4.execute(), 6)
self.assertEqual(df4.execute(), 6)
def testCreateDataFrameFromPartition(self):
from odps.types import PartitionSpec
test_table_name = tn('pyodps_test_dataframe_partition')
schema = Schema.from_lists(['id', 'name'], ['bigint', 'string'], ['ds'], ['string'])
self.odps.delete_table(test_table_name, if_exists=True)
table = self.odps.create_table(test_table_name, schema)
with table.open_writer('ds=today', create_partition=True) as w:
w.write([[1, 'name1'], [2, 'name2'], [3, 'name3']])
try:
df = DataFrame(table.get_partition('ds=today'))
self.assertEqual(df.count().execute(), 3)
df = table.get_partition('ds=today').to_df()
partition = df.data
self.assertIs(partition.table, table)
self.assertEqual(partition.partition_spec, PartitionSpec('ds=today'))
self.assertEqual(df.count().execute(), 3)
finally:
table.drop()
def testSetItem(self):
df = DataFrame(self.table)
df['id2'] = df.id + 1
self.assertEqual(len(df.execute()), 3)
df['id3'] = df['id2'] * 2
self.assertEqual(len(next(df.execute())), 4)
del df['id2']
res = df.execute()
result = self._get_result(res)
expected = [
[1, 'name1', 4],
[2, 'name2', 6],
[3, 'name3', 8]
]
self.assertEqual(expected, result)
df = DataFrame(self.table)
df['id2'] = df.id
try:
res = df.to_pandas()
result = self._get_result(res)
expected = [
[1, 'name1', 1],
[2, 'name2', 2],
[3, 'name3', 3]
]
self.assertEqual(expected, result)
except (DependencyNotInstalledError, ImportError):
pass
df = DataFrame(self.table)
df[df.id <= 2, 'id2'] = df.id
try:
res = df.to_pandas()
result = self._get_result(res)
expected = [
[1, 'name1', 1],
[2, 'name2', 2],
[3, 'name3', None]
]
self.assertEqual(expected, result)
except (DependencyNotInstalledError, ImportError):
pass
df = DataFrame(self.table)
df[df.id <= 2, 'id2'] = df.id
df[df.id > 1, 'id2'] = None
try:
res = df.to_pandas()
result = self._get_result(res)
expected = [
[1, 'name1', 1],
[2, 'name2', None],
[3, 'name3', None]
]
self.assertEqual(expected, result)
except (DependencyNotInstalledError, ImportError):
pass
df = DataFrame(self.table)
df[df.id < 2, 'id2'] = df.id
df[df.id > 2, df.name == 'name3', 'id2'] = df.id + 1
try:
res = df.to_pandas()
result = self._get_result(res)
expected = [
[1, 'name1', 1],
[2, 'name2', None],
[3, 'name3', 4]
]
self.assertEqual(expected, result)
except (DependencyNotInstalledError, ImportError):
pass
def testRepeatSetItem(self):
df = DataFrame(self.table)
df['rank'] = df.groupby('name').sort('id').id.rank()
df['rank'] = df.groupby('name').sort('id').id.rank()
self.assertEqual(len(df.execute()), 3)
def testDataFrameWithColHead(self):
test_table_name2 = tn('pyodps_test_dataframe_with_head')
schema = Schema.from_lists(['id', 'head'], ['bigint', 'string'])
self.odps.delete_table(test_table_name2, if_exists=True)
table = self.odps.create_table(test_table_name2, schema)
with table.open_writer() as w:
w.write([[1, 'name1'], [2, 'name2'], [3, 'name3']])
df = DataFrame(table)
df2 = DataFrame(self.table)
df3 = df.join(df2, on=('head', 'name'))
df3.head(10)
def testFillna(self):
test_table_name = tn('pyodps_test_dataframe_fillna')
self.odps.delete_table(test_table_name, if_exists=True)
table = self.odps.create_table(
test_table_name, Schema.from_lists(['val1', 'val2', 'val3', 'val4'], ['bigint'] * 4,
['name'], ['string']))
table.create_partition('name=a')
df = DataFrame(table.get_partition('name=a'))
columns = df.columns[:3]
df2 = df[columns].fillna(0, subset=columns[:2])
df2.head()
def sum_val(row):
return sum(row)
df2['new_field'] = df2.apply(sum_val, axis=1, reduce=True, rtype='int')
df2.head()
def testJoinPartitionDataFrame(self):
test_table_name = tn('pyodps_test_join_partition_dataframe')
schema = Schema.from_lists(['id', 'name'], ['bigint', 'string'], ['ds'], ['string'])
self.odps.delete_table(test_table_name, if_exists=True)
table = self.odps.create_table(test_table_name, schema)
table.create_partition('ds=today')
test_table_name2 = tn('pyodps_test_join_partition_dataframe2')
self.odps.delete_table(test_table_name2, if_exists=True)
table2 = self.odps.create_table(test_table_name2, schema)
table2.create_partition('ds=today')
df = DataFrame(table.get_partition('ds=today'))
df2 = DataFrame(table2.get_partition('ds=today'))
df3 = DataFrame(self.table)
df4 = df2.join(df, on=[df2.id.astype('string') == df.id.astype('string')])
df5 = df3.join(df, on=[df3.id.astype('string') == df.id.astype('string')])
df4.left_join(df5, on=[df4.id_y.astype('string') == df5.id_y.astype('string')]).head()
if __name__ == '__main__':
unittest.main()
| 34.607427 | 107 | 0.578524 |
ace5c7998b4f8287b7a970d89d1ecc75ff00e3d3 | 1,744 | py | Python | DPS_Huijben2020/CIFAR10_MNIST/PresaveMNIST.py | IamHuijben/Deep-Probabilistic-Subsampling | 4cb348a7610619d4b62a6c66b944ee7fc5a10a32 | [
"MIT"
] | 12 | 2020-03-24T13:41:51.000Z | 2022-02-28T11:59:31.000Z | DPS_Huijben2020/CIFAR10_MNIST/PresaveMNIST.py | IamHuijben/Deep-Probabilistic-Subsampling | 4cb348a7610619d4b62a6c66b944ee7fc5a10a32 | [
"MIT"
] | null | null | null | DPS_Huijben2020/CIFAR10_MNIST/PresaveMNIST.py | IamHuijben/Deep-Probabilistic-Subsampling | 4cb348a7610619d4b62a6c66b944ee7fc5a10a32 | [
"MIT"
] | 2 | 2020-02-17T13:58:50.000Z | 2021-08-09T15:48:44.000Z | """
=============================================================================
Eindhoven University of Technology
==============================================================================
Source Name : presaveMNIST.py
File to load MNIST data and split into train,validate and test set Author : Iris Huijben
Date : 24/07/2019
Reference : Iris A.M. Huijben, Bastiaan S. Veeling, and Ruud J.G. van Sloun,
"Deep probabilistic subsampling for task-adaptive compressed sensing", 2019
==============================================================================
"""
# File to load MNIST data and split into train,validate and test set
from keras.datasets import mnist
from sklearn.model_selection import train_test_split
import keras
import numpy as np
import os
from pathsetupMNIST import in_dir
num_classes = 10
# Load The data, split between train and test sets:
# The targets are the labels for classification
(x_train, y_train), (x_valANDtest, y_valANDtest) = mnist.load_data()
#%%
# Convert targets to one-hot vectors for classification.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_valANDtest = keras.utils.to_categorical(y_valANDtest, num_classes)
#
## Split into separate validation and test set
x_val, x_test, y_val, y_test = train_test_split(x_valANDtest, y_valANDtest, test_size=0.5, random_state=1)
#%%
savedir = in_dir
np.save(os.path.join(savedir,'x_train.npy'),x_train)
np.save(os.path.join(savedir,'x_val.npy'),x_val)
np.save(os.path.join(savedir,'x_test.npy'),x_test)
np.save(os.path.join(savedir,'y_train.npy'),y_train)
np.save(os.path.join(savedir,'y_val.npy'),y_val)
np.save(os.path.join(savedir,'y_test.npy'),y_test) | 40.55814 | 118 | 0.637615 |
ace5c7c0474528c7bb49f148aa34b0738801def6 | 87,838 | py | Python | t5/data/preprocessors.py | Bharat123rox/text-to-text-transfer-transformer | a08f0d1c4a7caa6495aec90ce769a29787c3c87c | [
"Apache-2.0"
] | null | null | null | t5/data/preprocessors.py | Bharat123rox/text-to-text-transfer-transformer | a08f0d1c4a7caa6495aec90ce769a29787c3c87c | [
"Apache-2.0"
] | null | null | null | t5/data/preprocessors.py | Bharat123rox/text-to-text-transfer-transformer | a08f0d1c4a7caa6495aec90ce769a29787c3c87c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocess tensorflow.data.Dataset()."""
import collections
import math
import re
import uuid
import babel
import gin
import tensorflow.compat.v1 as tf
# pylint: disable=g-long-lambda
@gin.configurable
def num_parallel_calls(deterministic=False):
"""Number of parallel calls to tf.data.Dataset.map of stateful function.
Intended usage: gin-config deterministic to True for evaluations, so as to
make evaluation deterministic. Set deterministic to False for training
to enable parallel execution (for a faster input pipeline).
Args:
deterministic: a boolean
Returns:
a value to be passed as num_parallel_calls to tf.data.Dataset.map
"""
return None if deterministic else tf.data.experimental.AUTOTUNE
def rekey(dataset, key_map=None):
"""Replace the feature keys according to the mapping in `key_map`.
For example, if the dataset returns examples of the format:
{'foo': 'something', 'bar': 'something else'}
and key_map = {'boo': 'foo', 'spar': 'bar'} then this function will return
examples with the format
{'boo': 'something', 'spar': 'something else'}
If a mapping is to an empty key or None, set the new key to an empty string.
Args:
dataset: a tf.data.Dataset to process.
key_map: dictionary mapping new keys to original keys
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def my_fn(x):
if key_map:
return {new_key: x[old_key] if old_key else ''
for new_key, old_key in key_map.items()}
return x
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def translate(dataset, source_language, target_language):
"""Convert a translation dataset to a text2text pair.
For example, say the dataset returns examples of this format:
{'de': 'Das ist gut.', 'en': 'That is good.'}
If source_language = 'de', target_language = 'en', then the outputs will have
the format:
{'inputs': 'translate German to English: Das ist gut.',
'targets': 'That is good.'}
Args:
dataset: a tf.data.Dataset to process.
source_language: source language code (e.g. 'en') to translate from.
target_language: target language code (e.g. 'de') to translate to.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
# Language codes like zh-cn are not supported; use only the first 2 chars
for language in (source_language, target_language):
if language != language[:2]:
tf.logging.warn(
'Extended language code {} not supported. Falling back on {}'.format(
language, language[:2]
)
)
lang_id_to_string = {
source_language: babel.Locale(source_language[:2]).english_name,
target_language: babel.Locale(target_language[:2]).english_name,
}
def my_fn(x):
"""Add translate X to X strings to source/target language strings."""
src_str = 'translate {}'.format(lang_id_to_string[source_language])
tgt_str = ' to {}: '.format(lang_id_to_string[target_language])
return {
'inputs': tf.strings.join([src_str, tgt_str, x[source_language]]),
'targets': x[target_language],
}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def summarize(dataset, article_key, summary_key):
"""Convert a summarization dataset to a text2text pair.
For example, say the dataset returns examples of this format:
{'article': <article>, 'highlights': <summary>}
If article_key = 'article', summary_key = 'highlights', then the outputs will
have the format:
{'inputs': 'summarize': <article>, 'targets': <summary>}
Args:
dataset: a tf.data.Dataset to process.
article_key: the feature key for the article to summarize.
summary_key: the feature key for the target summary.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def my_fn(x):
"""Convert summarization example to a text2text example."""
strs_to_join = ['summarize:', x[article_key]]
return {
'inputs': tf.strings.join(strs_to_join, separator=' '),
'targets': x[summary_key],
}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Unicode ranges for characters in non-spaced languages.
# https://en.wikipedia.org/wiki/Category:Writing_systems_without_word_boundaries
# https://en.wikipedia.org/wiki/Han_unification#Unicode_ranges
# https://linguistics.stackexchange.com/questions/6131
NON_SPACED_LANGUAGE_RANGES = (
'\u1000-\u104f', # Burmese
'\u4e00-\u9fff', # CJK Unified Ideographs
'\u3400-\u4dbf', # CJK Unified Ideographs Extension A
'\uf900-\ufaff', # CJK Compatibility Ideographs
'\u2e80-\u2eff', # CJK Radicals Supplement
'\u31c0-\u31ef', # CJK Strokes
'\u3000-\u303f', # CJK Symbols and Punctuation
'\u3040-\u309f', # Japanese Hiragana
'\u30a0-\u30ff', # Japanese Katakana
'\ua980-\ua9df', # Javanese
'\u1780-\u17ff', # Khmer
'\u19e0-\u19ff', # Khmer Symbols
'\u0e80-\u0eff', # Lao
'\u1980-\u19df', # Tai Lue
'\u1a20-\u1aaf', # Tai Tham
'\u0e00-\u0e7f', # Thai
'\u0f00-\u0fff', # Tibetan
)
def pad_nonspaced_languages(dataset, text_key='text'):
"""Pad non-spaced languages with spaces around each character.
Args:
dataset: a tf.data.Dataset to process.
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
Returns:
a tf.data.Dataset with the modified examples.
"""
def my_fn(x):
res = dict(x)
text = res[text_key]
# Add spaces around any character from a non-spaced language.
pattern = ''.join(NON_SPACED_LANGUAGE_RANGES)
text = tf.strings.regex_replace(text, u'([{}])'.format(pattern), r' \1 ')
# Collapse consecutive whitespace into one space.
text = tf.strings.regex_replace(text, r'\s+', ' ')
res[text_key] = text
return res
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _pad_punctuation(text):
"""Adds spaces around punctuation."""
# Add space around punctuation.
text = tf.strings.regex_replace(text, r'(\W)', r' \1 ')
# Collapse consecutive whitespace into one space.
text = tf.strings.regex_replace(text, r'\s+', ' ')
return text
def _string_join(lst):
# Join on space, but collapse consecutive spaces.
out = tf.strings.join(lst, separator=' ')
return tf.strings.regex_replace(out, r'\s+', ' ')
def trivia_qa(dataset):
"""Convert a TriviaQA example to multiple flattened examples.
TriviaQA produces examples with this form:
{'entity_pages': {dict of wiki entities},
'search_results': <dict of web search results>,
'answer': {dict of all answers}, 'question': <question>,
'question_id': <question_id>, 'question_source': <question_source>}
This function will return flattend examples of the format:
{'inputs': 'question: <question> context: <article>'
'targets': 'answer: <sampled answer>'}
Args:
dataset: a tf.data.Dataset to process.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def triviaqa_question_answer_context(x):
"""Extracts matched contexts and answers.
Returns all matched (question-context, answer) pairs.
Args:
x: A tfds sample.
Returns:
Flattened samples: (question-context, answer).
"""
contexts = []
if 'entity_pages' in x:
contexts.append(x['entity_pages']['wiki_context'])
if 'search_results' in x:
contexts.append(x['search_results']['search_context'])
contexts = tf.concat(contexts, 0)
q = _pad_punctuation(x['question'])
answers = x['answer']['normalized_aliases']
combination_size = tf.size(answers)*tf.size(contexts)
find_answers = tf.TensorArray(
tf.bool, size=combination_size, dynamic_size=True)
selected_answers = tf.TensorArray(
tf.string, size=combination_size, dynamic_size=True)
join_q_c = tf.TensorArray(
tf.string, size=combination_size, dynamic_size=True)
def cond_fn(i, find_answers, selected_answers, join_q_c):
del find_answers, selected_answers, join_q_c # Unused
return tf.less(i, combination_size)
def body_fn(i, find_answers, selected_answers, join_q_c):
"""Find answers from contexts and join."""
context_idx = tf.math.floordiv(i, tf.size(answers))
answer_idx = tf.math.mod(i, tf.size(answers))
a = _pad_punctuation(answers[answer_idx])
a_ = tf.strings.join(['.*', a, '.*'])
c = _pad_punctuation(contexts[context_idx])
find_a = tf.strings.regex_full_match(
tf.strings.lower(c),
tf.strings.lower(a_))
find_answers = find_answers.write(i, find_a)
selected_answers = selected_answers.write(i, a)
join_q_c_str = _string_join(['question:', q, 'context:', c])
join_q_c = join_q_c.write(i, join_q_c_str)
return (i + 1, find_answers, selected_answers, join_q_c)
_, find_answers, selected_answers, join_q_c = tf.while_loop(
cond_fn,
body_fn,
loop_vars=[
tf.constant(0), find_answers, selected_answers,
join_q_c
])
find_answers = find_answers.stack()
selected_answers = selected_answers.stack()
join_q_c = join_q_c.stack()
selected_answers = tf.boolean_mask(selected_answers, find_answers)
selected_join_q_c = tf.boolean_mask(join_q_c, find_answers)
return selected_join_q_c, selected_answers
def my_fn(x):
"""Create TriviaQA example."""
join_q_c, a = triviaqa_question_answer_context(x)
return {
'inputs': join_q_c,
'targets': a
}
dataset = dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.unbatch()
def squad(dataset, include_context=True):
"""Convert SQuAD examples to a text2text pair.
SQuAD produces examples with this form:
{'id': <id>, context': <article>, 'question': <question>,
'answers': { 'text': [<n answers>] }}
This function will return examples of the format:
{'inputs': 'question: <question> context: <article>'
'targets': '<answer_0>',
'id': <id>, 'question': <question>, 'context': <context>,
'answers': [<n answers>]},
Args:
dataset: a tf.data.Dataset to process.
include_context: a boolean
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def my_fn(x):
"""Create squad example."""
a = _pad_punctuation(x['answers']['text'])
q = _pad_punctuation(x['question'])
c = _pad_punctuation(x['context'])
if include_context:
inputs = _string_join(['question:', q, 'context:', c])
else:
inputs = _string_join(['squad trivia question:', q])
return {
'inputs': inputs,
'targets': a[0],
'id': x['id'],
'context': c,
'question': q,
'answers': a
}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _span_answer(context, answer_text):
"""Finds start/end indices of answer_text in context after space tokenization.
If answer_tokens is not a sublist of context_tokens, returns empty string.
Args:
context: 0-d string tensor
answer_text: 0-d string
Returns:
A string tensor.
"""
def space_tok(s):
"""Replace non-word chars with space then split on space."""
s = tf.strings.regex_replace(s, r'\W', ' ')
return tf.strings.split(input=[s], sep=' ').values
def find_subseq(n, h):
"""Finds index of needle subsequence inside haystack.
Args:
n: 1-d tensor
h: 1-d tensor same type as n
Returns:
Index of start of n if found found; otherwise -1.
"""
l_n = tf.size(n)
l_h = tf.size(h)
i = tf.constant(0)
end = l_h - l_n
# TODO(peterjliu): Replace with craffel@'s more efficient code
# if necessary: cr/254848350.
w = tf.while_loop(
lambda i: tf.logical_and(tf.less(i, end),
tf.reduce_any(tf.not_equal(h[i:i+l_n], n))),
lambda i: i+1,
[i])
return tf.cond(tf.equal(end, w), lambda: -1, lambda: w)
answer_tokens = space_tok(answer_text)
context_tokens = space_tok(context)
start = find_subseq(answer_tokens, context_tokens)
end = start + tf.size(answer_tokens) - 1
# Just take the first candidate that matches exactly.
return tf.cond(tf.equal(start, -1),
lambda: tf.constant(''),
lambda: tf.strings.format('start: {} end: {}', [start, end]))
def squad_span_space_tokenized(dataset):
"""Convert SQuAD examples to a text2text pair with span output.
SQuAD produces examples with this form:
{'context': <article>, 'question': <question>,
'answers': { 'text': [<all answers>] }}
This function returns examples with the format
{'inputs': 'context: <article> question: <question>',
'targets': 'start: <start_index> end: <end_index>'}
where <start_index> and <end_index> specify the space-tokenized span
start/end indices. Both <start_index> and <end_index> are included in
the answer. In the case where the tokenized answer is
not found in the tokenized context, the example is skipped.
Args:
dataset: a tf.data.Dataset to process.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def my_fn(x):
"""Create squad example as in squad_span_char, but tokenized on spaces."""
res = dict(x)
res['targets'] = _span_answer(x['context'], x['targets'],)
return res
dataset = squad(dataset)
dataset = dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.filter(lambda x: tf.strings.length(x['targets']) > 0)
def random_split_text(dataset,
text_key='text',
min_words_per_segment=16,
max_words_per_segment=512,
max_words_total=8192):
"""Randomly split single-string examples into multiple examples each.
Segment lengths are chosen according to a log-uniform distribution.
Each incoming string is chopped into multiple equal-length examples
with the last one possibly being shorter.
If the input string is longer than max_words_total, then we use one random
chunk and discard the rest. This may help with model stability.
The intended use case is to break up long text examples for use in
unsupervised transfer-learning.
We don't really want to use this preprocessor for any dataset which has a
well-defined evaluation procedure. If apply this preprocessor e.g. in an MT
component, then the evaluation job will randomly split text when evaluating
and the BLEU will get funky.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key text_key
text_key: a string
min_words_per_segment: an integer
max_words_per_segment: an integer
max_words_total: an integer
Returns:
a dataset
"""
def random_chunk(x, chunk_size):
"""Pick a random chunk of a 1d Tensor.
The tensor is divided into chunks of length chunk_size, with the last
chunk being potentially smaller. A random chunk is returned.
Args:
x: a 1d tf.Tensor
chunk_size: an integer
Returns:
a 1d tf.Tensor with length <= chunk_size
"""
size = tf.size(x)
num_chunks = tf.maximum(1, (size - 1) // chunk_size + 1)
chunk_num = tf.random.uniform(
[], minval=0, maxval=num_chunks, dtype=tf.int32)
return x[chunk_size * chunk_num:chunk_size * (chunk_num + 1)]
def my_fn(x):
"""Split one string into multiple strings.
Args:
x: a feature dictionary
Returns:
a feature dictionary
"""
text = x[text_key]
words = tf.strings.split([text]).values
if max_words_total:
words = random_chunk(words, max_words_total)
n_words = tf.size(words)
# first pick a length (number of words per segment)
length = tf.cast(tf.exp(tf.random_uniform(
[],
minval=math.log(min_words_per_segment),
maxval=math.log(max_words_per_segment))), tf.int32)
# Pad to a multiple of length, then use tf.reshape to split up the words
# into num_segments segments each of the given length.
num_segments = tf.cast(
tf.ceil(tf.cast(n_words, tf.float32) / tf.cast(length, tf.float32)),
tf.int32)
padding = num_segments * length - n_words
words = tf.pad(words, [[0, padding]])
words = tf.reshape(words, [-1, length])
# Finally, join with spaces and strip. The padding turns into a bunch of
# spaces that get stripped out.
words = tf.strings.reduce_join(words, axis=1, separator=' ')
return {text_key: tf.strings.strip(words)}
dataset = dataset.map(my_fn, num_parallel_calls=num_parallel_calls())
return dataset.unbatch()
def _split_text_to_words(dataset, text_key='text', min_num_words=2):
"""Split text to words and filter out examples with too few words."""
def split(x):
res = dict(x)
res['words'] = tf.strings.split([x[text_key]]).values
return res
dataset = dataset.map(split, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.filter(lambda x: tf.size(x['words']) >= min_num_words)
def fill_in_the_blank(dataset,
text_key='text',
label='fill: '):
"""Create a dataset consisting of fill-in-the-blank text examples.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'inputs' and 'targets'.
The input string is split on whitespace to form a sequence of words.
This sequence is chopped randomly into segments of one or more words.
Alternate segments are included in the inputs and targets, with a special
word 'X' marking a missing segment.
The given label is prepended to the inputs. Each input string produces two
examples - one the inverse of the other. Inputs with less than two words
are dropped.
EXAMPLE:
input:
{
'text': 'The fat cat sat on the mat.'
}
outputs:
{
'inputs': 'fill: The fat X the X'
'targets': 'X cat sat on X mat.'
}
{
'inputs': 'fill: X cat sat on X mat.'
'targets': 'The fat X the X'
}
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
label: a string, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
def my_fn(x):
"""Generates two preprocessed examples that are roughly inverses.
Args:
x: an example dict with text pre-split in `words` feature.
Returns:
an example dict with two inputs and two targets, one for each resulting
preprocessed example.
"""
words = x['words']
n_words = tf.size(words)
# First select the break probability. We pick this on a log-uniform
# distribution between 1/(n_words + 1) and 1/2. This means that some
# sequences will be chopped roughly and others finely.
min_log_p_break = -tf.math.log(tf.to_float(n_words) + 2.0)
max_log_p_break = -tf.math.log(2.0)
p_break = tf.exp(tf.random_uniform(
[], minval=min_log_p_break, maxval=max_log_p_break))
# craffel@ says that there may be bugs in random_uniform making it not
# really uniform. This doesn't seem horribly important here, but may
# need another look.
breaks = tf.less(tf.random_uniform([n_words - 1]), p_break)
def one_random_break():
pos = tf.random_uniform(
[], minval=0, maxval=n_words - 1, dtype=tf.int32)
return tf.one_hot(pos, n_words - 1,
dtype=tf.bool, on_value=True, off_value=False)
breaks = tf.cond(
tf.math.reduce_any(breaks), lambda: breaks, one_random_break)
breaks = tf.concat([[True], breaks], axis=0)
word_to_seq_id = tf.mod(tf.math.cumsum(tf.to_int32(breaks)), 2)
# separators:
# if in your segment: ' '
# if break to other segment: ' X'
# else: ''
results = []
for seq_id in [0, 1]:
in_my_seq = tf.equal(word_to_seq_id, seq_id)
separator_strings = tf.where(
in_my_seq,
tf.fill([n_words], ' '),
tf.where(breaks, tf.fill([n_words], ' X'),
tf.fill([n_words], '')))
word_strings = tf.where(in_my_seq, words, tf.fill([n_words], ''))
all_strings = tf.stack([separator_strings, word_strings], axis=1)
results.append(tf.strings.substr(
tf.strings.reduce_join(all_strings), 1, tf.int32.max))
inputs = tf.stack([tf.strings.join([label, results[0]]),
tf.strings.join([label, results[1]])])
targets = tf.stack([results[1], results[0]])
return {'inputs': inputs, 'targets': targets}
dataset = _split_text_to_words(dataset, text_key, min_num_words=2)
dataset = dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.unbatch()
def fill_in_the_blank_sized(
dataset,
size_bins=(1, 2, 4, 8, 16, 32, 64, 128, 256, 512),
text_key='text',
label='fill: '):
"""Fill in the blank preprocessor that labels blank with a binned size.
The actual blank size is sampled uniformly from the inclusive range of the min
and max bin. The blank is then filled in with the closest bin size to the
actual blank size.
Args:
dataset: a tf.data.Dataset, the dataset to preprocess.
size_bins: a list, a list of blank sizes to select from when labelling the
blank.
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
label: a string, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
bins = sorted(size_bins)
def my_fn(x):
"""Apply transformation."""
words = x['words']
n_words = tf.size(words)
blank_size = tf.random.uniform(
[], minval=bins[0], maxval=tf.math.minimum(n_words, bins[-1]),
dtype=tf.dtypes.int32)
bin_delta = tf.math.abs(bins - blank_size)
bin_ = tf.gather(bins, tf.argmin(bin_delta))
blank_start = tf.random.uniform(
[], minval=0, maxval=tf.math.maximum(0, n_words-blank_size) + 1,
dtype=tf.dtypes.int32)
pre_blank = tf.strings.reduce_join(words[0:blank_start], separator=' ')
post_blank = tf.strings.reduce_join(
words[blank_start+blank_size:], separator=' ')
blank = tf.strings.format('_{}_', bin_)
# We strip to handle cases where blank is at beginning or end.
input_ = tf.strings.strip(
tf.strings.join([pre_blank, blank, post_blank], ' '))
input_ = tf.strings.join([label, input_])
target = tf.strings.reduce_join(
words[blank_start:blank_start+blank_size], separator=' ')
return {
'inputs': tf.strings.strip(input_),
'targets': tf.strings.strip(target)}
dataset = _split_text_to_words(dataset, text_key, min_num_words=2)
# Filter out examples with fewer words than the minimum.
dataset = dataset.filter(lambda x: tf.size(x['words']) >= bins[0])
dataset = dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def prefix_lm(dataset,
text_key='text',
label='prefix: '):
"""Create a dataset consisting of text prefix examples for LM-style training.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'inputs' and 'targets'.
The input string is split on whitespace to form a sequence of words.
A random uniform distribution is used to select a prefix from the input text.
This prefix is the inputs and rest of the text is the targets. Inputs with
less than two words are dropped.
The given label is prepended to the inputs.
EXAMPLE:
input:
{
'text': 'The fat cat sat on the mat.'
}
output:
{
'inputs': 'prefix: The fat'
'targets': 'cat sat on the mat.'
}
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
label: a string, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
def my_fn(x):
"""Split an example into two parts for text2text models."""
words = x['words']
num_words = tf.size(words)
split = tf.random_uniform(
[], minval=0, maxval=num_words - 1, dtype=tf.int32)
input_words, target_words = tf.split(words, [split, num_words - split])
inputs = tf.strings.join(
[label,
tf.strings.reduce_join([input_words], separator=' ')])
targets = tf.strings.reduce_join([target_words], separator=' ')
return {'inputs': inputs, 'targets': targets}
dataset = _split_text_to_words(dataset, text_key, min_num_words=2)
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def neighboring_pairs(dataset, text_key='text', reuse_sentences=True):
"""Create a dataset consisting of neighboring sentence pairs.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'first' and 'second'.
We only take sentence pairs from within the same line since lines seem to
represent paragraph-like structures in our text datasets. Empty lines and
1-sentence lines will thus be ignored.
The argument reuse_sentences determines whether a sentence can be used as both
the first and last element in the pair. For example, the input with sentences
A,B,C,D will return (A,B),(B,C),(C,D) if reuse_sentences is True and
(A,B),(C,D) if reuse_sentences is False.
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
reuse_sentences: a boolean
Returns:
a tf.data.Dataset
"""
def split_by_lines(dataset):
"""Splits text in dataset by line, removing empty lines."""
def my_fn(text):
lines = tf.strings.split([text], sep='\n').values
return tf.strings.strip(lines)
dataset = dataset.map(
my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.unbatch()
return dataset.filter(lambda x: tf.strings.length(x) > 0)
def split_into_pairs(line):
"""Split a given text example into pairs of neighboring sentences."""
# TODO(mmatena): Use better sentence segmentation.
sep = str(uuid.uuid4())
sentences = tf.strings.regex_replace(line, r'((?:\.|\!|\?)+)', r'\1' + sep)
sentences = tf.strings.strip(tf.strings.split([sentences], sep).values)
if reuse_sentences:
firsts = sentences[:-1]
seconds = sentences[1:]
else:
firsts = sentences[:-1:2]
seconds = sentences[1::2]
return {
'first': firsts,
'second': seconds,
}
def example_len(x):
return tf.math.minimum(
tf.strings.length(x['first']), tf.strings.length(x['second']))
# Split by lines.
dataset = dataset.map(
lambda x: x[text_key], num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = split_by_lines(dataset)
# Get pairs of neighboring sentences.
dataset = dataset.map(
split_into_pairs, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.unbatch()
# Remove examples with empty strings.
dataset = dataset.filter(lambda x: example_len(x) > 0)
return dataset
def glue(
dataset, benchmark_name, label_names, feature_names=None, id_key='idx'):
"""Convert a dataset from glue to text2text examples.
This function uses the feature names from the dataset to unpack examples into
a format amenable for a text2text problem. For example, consider the Quora
Question Pairs (QQP) benchmark, which would suggest
benchmark_name="qqp"
label_names=['not_duplicate', 'duplicate']
For QQP, a typical example might look like
{
"question1": "Why do I easily get bored of my friends?",
"question2": "Why do I get bored of friends so quickly?",
"label": 1,
"idx": 10,
}
This example would be transformed to
{
"inputs": (
"qqp question1: Why do I easily get bored of my friends? question2: "
"Why do I get bored of my friends so quickly?"
),
"targets": "duplicate",
"idx": 10,
}
Args:
dataset: a tf.data.Dataset to process.
benchmark_name: the name of the GLUE benchmark for this dataset.
label_names: a list of label names corresponding to class index.
feature_names: an optional ordered list of feature names. If provided,
features will be ordered in this way in the output. If not provided, all
features (except 'idx' and 'label') will be used, sorted by name.
id_key: str, key for id in the dataset. If not provided, 'idx' will be used.
if None, no id will be added to the dataset.
Returns:
a tf.data.Dataset
"""
def my_fn(x):
"""Collapse an example into or text2text pair."""
# If an ordering is not provided, sort feature keys to ensure a consistent
# order.
feature_keys = (
feature_names or sorted(set(x.keys()).difference(['label', 'idx'])))
# Pack keys (formatted as " key: ") and corresponding text feature
strs_to_join = []
for key in feature_keys:
strs_to_join.append('{}:'.format(key))
strs_to_join.append(x[key])
# Add benchmark name at the start
strs_to_join.insert(0, benchmark_name)
label_name = tf.cond(
# When no label is provided (label == -1), use "<unk>"
tf.equal(x['label'], -1),
lambda: tf.constant('<unk>'),
# Otherwise grab the label text from label_names
lambda: tf.gather(label_names, x['label']),
)
joined = tf.strings.join(strs_to_join, separator=' ')
ex = {}
if benchmark_name == 'multirc':
# Remove HTML markup.
joined = tf.strings.regex_replace(joined, '<br>', ' ')
joined = tf.strings.regex_replace(joined, '<(/)?b>', '')
# Store the data index in the returned example (used by eval)
ex['idx/paragraph'] = x['idx']['paragraph']
ex['idx/question'] = x['idx']['question']
ex['idx/answer'] = x['idx']['answer']
else:
# Store the data index in the returned example (used by eval)
if id_key:
ex['idx'] = x[id_key]
ex['inputs'] = joined
ex['targets'] = label_name
return ex
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def stsb(dataset):
"""Convert STSB examples to text2text format.
STSB maps two sentences to a floating point number between 1 and 5
representing their semantic similarity. Since we are treating all tasks as
text-to-text tasks we need to convert this floating point number to a string.
The vast majority of the similarity score labels in STSB are in the set
[0, 0.2, 0.4, ..., 4.8, 5.0]. So, we first round the number to the closest
entry in this set, and then we convert the result to a string (literally e.g.
"3.4"). This converts STSB roughly into a 26-class classification dataset.
This function uses the feature names from the dataset to unpack examples into
a format amenable for a text2text problem.
For example, a typical example from STSB might look like
{
"sentence1": "Three more US soldiers killed in Afghanistan",
"sentence2": "NATO Soldier Killed in Afghanistan",
"label": 1.8,
}
This example would be transformed to
{
"inputs": (
"stsb sentence1: Three more US soldiers killed in Afghanistan "
"sentence2: NATO Soldier Killed in Afghanistan"
),
"targets": "1.8",
}
Args:
dataset: a tf.data.Dataset to process.
Returns:
a tf.data.Dataset
"""
def my_fn(x):
"""Collapse an example into a text2text pair."""
strs_to_join = [
'stsb sentence1:', x['sentence1'], 'sentence2:', x['sentence2']
]
label_string = tf.as_string(tf.round(x['label']*5)/5, precision=1)
joined = tf.strings.join(strs_to_join, separator=' ')
return {'inputs': joined, 'targets': label_string, 'idx': x['idx']}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def wsc(dataset):
"""Convert WSC examples to text2text format.
WSC includes a sentence along with 2 'spans': the first denoting a noun and
the other a pronoun. The 'label' specifies whether or not the pronoun is
referencing the noun. This preprocessor puts ' * ' around the noun and ' # '
around the pronoun.
For example, a typical example from WSC might look like
{
'text': 'This is a test sentence .',
'span1_text': 'test',
'span1_index': 3,
'span2_text': 'This',
'span2_index': 0,
'label': 0
}
This example would be transformed to
{
'inputs': 'wsc text: # This # is a * test * sentence .',
'targets': 'False'
}
Args:
dataset: a tf.data.Dataset to process.
Returns:
a tf.data.Dataset
"""
def my_fn(x):
"""Collapse an example into a text2text pair."""
def _mark_span(text, span_str, span_idx, mark):
pattern_tmpl = r'^((?:\S+\s){N})(W)'
pattern = tf.strings.regex_replace(
pattern_tmpl, 'N', tf.as_string(span_idx))
pattern = tf.strings.regex_replace(pattern, 'W', span_str)
return tf.strings.regex_replace(
text, pattern, r'\1{0} \2 {0}'.format(mark))
text = x['text']
text = _mark_span(text, x['span1_text'], x['span1_index'], '*')
# Compensate for 2 added "words" added in previous step.
span2_index = x['span2_index'] + 2 * tf.cast(
x['span1_index'] < x['span2_index'], tf.int32)
text = _mark_span(text, x['span2_text'], span2_index, '#')
# Add benchmark name at the start
strs_to_join = ['wsc', 'text:', text]
label_name = tf.cond(
# When no label is provided (label == -1), use "<unk>"
tf.equal(x['label'], -1),
lambda: tf.constant('<unk>'),
# Otherwise use False/True.
lambda: tf.gather(['False', 'True'], x['label'])
)
joined = tf.strings.join(strs_to_join, separator=' ')
return {'inputs': joined, 'targets': label_name, 'idx': x['idx']}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
@gin.configurable
def record(dataset):
"""Convert ReCoRD examples to text2text examples.
ReCoRD contains a passage, query containing a '@placeholder' string, and a set
of entities that are the possible values of the placeholder. Each train and
validation example will have a list of answers, any of which would be
considered correct.
For example, a typical example from ReCoRD might look like
{
'passsage': 'This is the passage.',
'query': 'A @placeholder is a bird.',
'entities': ['penguin', 'potato', 'pigeon'],
'answers': ['penguin', 'pigeon'],
}
which this preprocessor would turn into the following two examples:
{
'inputs': 'record query: A @placeholder is a bird. entities: penguin, '
'potato, pigeon passage: This is the passage.',
'targets': 'penguin',
}
and
{
'inputs': 'record query: A @placeholder is a bird. entities: penguin, '
'potato, pigeon passage: This is the passage.',
'targets': 'potato',
}
Args:
dataset: a tf.data.Dataset to process.
Returns:
a tf.data.Dataset
"""
def process_answers(x):
"""Helper fn to get one example per answer."""
ex = x.copy()
num_answers = tf.size(ex['answers'])
def duplicate_along_first_dim(t):
n_duplicates = tf.math.maximum(num_answers, 1)
return tf.broadcast_to(
t, shape=tf.concat([[n_duplicates], tf.shape(t)], axis=0))
for k, v in x.items():
if k != 'idx':
ex[k] = duplicate_along_first_dim(v)
ex['targets'] = tf.cond(
tf.greater(num_answers, 0), lambda: x['answers'],
lambda: tf.constant(['<unk>']))
ex['idx'] = {
'passage': duplicate_along_first_dim(x['idx']['passage']),
'query': duplicate_along_first_dim(x['idx']['query']),
}
return ex
def my_fn(x):
"""Converts the processed example to text2text strings."""
passage = x['passage']
passage = tf.strings.regex_replace(passage,
r'(\.|\?|\!|\"|\')\n@highlight\n',
r'\1 ')
passage = tf.strings.regex_replace(passage, r'\n@highlight\n', '. ')
strs_to_join = [
'record query:', x['query'], 'entities:',
tf.strings.reduce_join(x['entities'], separator=', '), 'passage:',
passage
]
joined = tf.strings.join(strs_to_join, separator=' ')
ex = {}
# Store the data index in the returned example (used by eval)
ex['idx/passage'] = x['idx']['passage']
ex['idx/query'] = x['idx']['query']
ex['inputs'] = joined
# Note that "answers" has been converted to a single string by the
# process_answers function.
ex['targets'] = x['targets']
# Pass-through full list of answers for eval
ex['answers'] = x['answers']
return ex
dataset = dataset.map(
process_answers, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.unbatch()
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def multi_translate(dataset, source_language, target_language):
"""Convert a multi-translate dataset to a text2text pair.
For example, say the dataset returns examples which have a 'translations'
feature key so that examples have the following format:
{
...
'translations': {
'language': ['de', 'fr', 'en'],
'translation': ['Das ist gut.', 'Ca c'est bon', 'That is good.']
},
...
}
If source_language = 'de', target_language = 'en', then this function will
return examples of the format:
{'inputs': 'translate German to English: Das is gut.',
'targets': 'That is good.'}
Any other languages present in the dataset will be filtered out.
Args:
dataset: a tf.data.Dataset to process.
source_language: source language code (e.g. 'en') to translate from.
target_language: target language code (e.g. 'de') to translate to.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def filter_fn(x):
langs = x['translations']['language']
# Test whether both source/target_language appear in the language list
source_in_langs = tf.reduce_any(tf.equal(source_language, langs))
target_in_langs = tf.reduce_any(tf.equal(target_language, langs))
return tf.logical_and(source_in_langs, target_in_langs)
def map_fn(x):
langs = x['translations']['language']
# Retrieve the index in langs where source/target_language appears
src_idx = tf.squeeze(tf.where(tf.equal(langs, source_language)))
tgt_idx = tf.squeeze(tf.where(tf.equal(langs, target_language)))
return {
source_language: x['translations']['translation'][src_idx],
target_language: x['translations']['translation'][tgt_idx],
}
dataset = dataset.filter(filter_fn)
dataset = dataset.map(
map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return translate(dataset, source_language, target_language)
def definite_pronoun_resolution_simple(dataset,
label='wsc:'):
"""Converts DPR examples to a simple text to text format.
A typical example from the definite pronoun resolution dataset might look like
{
'sentence': 'Bob asked Tom if he can lend some money.',
'pronoun': 'he',
'candidates': ['Bob', 'Tom'],
'label': 1,
}
This will be transformed to
{
'inputs': 'wsc: Bob asked Tom if *he* can lend some money.'
'targets': 'Tom',
}
Args:
dataset: a tf.data.Dataset
label: a string, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
def my_fn(x):
"""Function to be called for every example."""
# If there are multiple instances of the pronoun in the sentence, the first
# one is the one that needs to be resolved.
inputs = [
label,
tf.strings.regex_replace(
x['sentence'],
tf.strings.join([r' (', x['pronoun'], r')( |\.|,)']),
r' *\1*\2',
replace_global=False,
),
]
return {
'inputs': tf.strings.join(inputs, separator=' '),
'targets': x['candidates'][x['label']],
}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def next_sentence_prediction(dataset,
text_key='text',
reuse_sentences=True,
label_sentences=False,
p_neighbors=0.5,
label='nsp: ',
buffer_size=50000):
"""Create a dataset containing a next sentence prediction objective.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'inputs' and 'targets'.
EXAMPLE OUTPUTS:
{
input: "nsp: sentence1: The man went to the store. sentence2: Penguins are "
"flightless birds.",
target: "not_next"
}
The "sentence1:" and "sentence2:" labels will be omitted if label_sentences is
False.
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
reuse_sentences: a boolean, see docs for `neighboring_pairs` for more info.
label_sentences: a boolean
p_neighbors: a float between 0 and 1, the probability that a sentence pair
will be neighbors.
label: a string, the label to prepend to the inputs.
buffer_size: an int, the size of the shuffle buffer used to get
non-neighboring sentences.
Returns:
a tf.data.Dataset
"""
sentence1_label, sentence2_label = '', ''
if label_sentences:
sentence1_label, sentence2_label = 'sentence1: ', 'sentence2: '
empty = tf.constant('', dtype=tf.string, shape=[1])
dataset = neighboring_pairs(
dataset, text_key=text_key, reuse_sentences=reuse_sentences)
dataset = dataset.shuffle(buffer_size).batch(2, drop_remainder=True)
def some_are_empty(*tensors):
"""See if at least one tensor has shape [0]."""
empty = [tf.equal(tf.size(t), 0) for t in tensors]
return tf.reduce_any(empty)
def my_fn(x):
"""Function to be applied to each example in dataset."""
use_neighbors = tf.random.uniform(shape=[]) < p_neighbors
firsts, seconds = tf.cond(
use_neighbors,
lambda: (x['first'], x['second']),
lambda: (x['first'], tf.stack([x['second'][1], x['second'][0]])),
)
relation_label = tf.cond(
use_neighbors,
lambda: 'next',
lambda: 'not_next',
)
inputs = []
for i in range(2):
first_inputs = firsts[i]
second_inputs = seconds[i]
def create_examples(first_i=first_inputs, second_i=second_inputs):
return tf.strings.join([
label,
sentence1_label,
first_i,
' ',
sentence2_label,
second_i,
])
inpt = tf.cond(
some_are_empty(first_inputs, second_inputs),
lambda: empty,
create_examples,
)
inputs.append(tf.strings.strip(inpt))
inputs = tf.reshape(inputs, [-1])
targets = tf.reshape(2 * [relation_label], [-1])
return {'inputs': inputs, 'targets': targets}
dataset = dataset.map(my_fn, num_parallel_calls=num_parallel_calls())
dataset = dataset.unbatch()
def example_len(x):
return tf.math.minimum(
tf.strings.length(x['inputs']), tf.strings.length(x['targets']))
# Remove examples with empty strings.
return dataset.filter(lambda x: example_len(x) > 0)
def lm(dataset):
"""Basic language modeling objective for text - empty inputs.
Given inputs with the format:
{"text": "Here is some text."}
This preprocess produces examples with the format
{"inputs": "", "targets": "Here is some text."}
Args:
dataset: A tf.data.Dataset to process.
Returns:
A preprocessed tf.data.Dataset.
"""
return dataset.map(
lambda x: {'inputs': '', 'targets': x['text']},
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
def _wsc_inputs(x):
"""Given an example from SuperGLUE WSC, compute the 'inputs' value.
The output will look like a fill in the blank with the pronoun blanked out.
For example, the text
'Mitchell asked Tom if he could lend some money.'
would be transformed to
'Mitchell asked Tom if X could lend some money.'
Args:
x: A dict that is an example from the WSC task of SuperGLUE.
Returns:
A scalar string tensor.
"""
words = tf.strings.split([x['text']], sep=' ').values
# We would need some special logic to handle the case where the pronoun is the
# first or last word in the text. None of the examples in WSC seem to have
# this, so we are ignoring these cases.
with tf.control_dependencies([
tf.assert_greater(x['span2_index'], 0),
tf.assert_less(x['span2_index'], tf.size(words)),
]):
pronoun_index = tf.identity(x['span2_index'])
def create_input():
with tf.control_dependencies(
[tf.assert_equal(words[pronoun_index], x['span2_text'])]):
return tf.strings.join(
[
tf.strings.reduce_join(words[:pronoun_index], separator=' '),
'X',
tf.strings.reduce_join(
words[pronoun_index + 1:], separator=' '),
],
separator=' ',
)
# Handle some special cases.
return tf.case(
{
# The issue here is that the pronoun is 'him,"' in the text.
tf.equal(
x['text'],
'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. \"Good for him,\" he said. '
):
lambda:
'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. "Good for X ," he said.',
# Using the span2_index, we get 'use' instead of 'it'.
tf.equal(
x['text'],
'When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use it , but really for now, what more could they wish for?'
):
lambda:
'When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use X , but really for now, what more could they wish for?'
},
default=create_input,
exclusive=True)
def wsc_simple(dataset,
label='wsc:',
correct_referent_only=False):
"""Converts SuperGLUE WSC examples to a simple text to text format.
A typical example from SuperGLUE WSC might look like
{
'text': 'Mitchell asked Tom if he could lend some money.',
'span1_text': 'Tom',
'span2_text': 'he',
'span2_index': 4,
}
This will be transformed to
{
'inputs': 'wsc: Bob asked Tom if *he* can lend some money.'
'targets': 'Tom',
}
The targets will always be the text of the referent regardless of whether it
is the correct referrent of the pronoun. Thus for training purposes, please
set `correct_referent_only` to be True.
Args:
dataset: a tf.data.Dataset
label: a string, the label to prepend to the inputs.
correct_referent_only: a bool, whether to filter out examples for which the
targets is not the correct referent of the pronoun.
Returns:
a tf.data.Dataset
"""
def map_fn(x):
"""Function to be called for every example in dataset."""
inputs = [
label,
tf.strings.regex_replace(
_wsc_inputs(x), r' X ', ' *' + x['span2_text'] + '* '),
]
referent = x['span1_text']
return {
'inputs': tf.strings.join(inputs, separator=' '),
# The reshape is necessary as otherwise the tensor has unknown rank.
'targets': tf.reshape(referent, shape=[]),
'label': x.get('label', 0),
'idx': x['idx'],
}
if correct_referent_only:
dataset = dataset.filter(lambda x: tf.cast(x.get('label', False), tf.bool))
return dataset.map(map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def wnli_simple(dataset, label='wsc:'):
"""Converts GLUE WNLI examples to a simple text to text format.
A typical example from WNLI might look like:
{
'sentence1': 'The fish ate the worm. It was tasty.',
'sentence2': 'The worm was tasty.',
'label': 1,
}
This will be transformed to:
{
'inputs': 'wsc: The fish ate the worm. *It* was tasty.',
'targets': 'The worm',
'premise': 'The fish ate the worm. It was tasty.,
'hypothesis': 'The worm was tasty.',
'label': 1,
}
This preprocessor has been manually verified to produce reasonable WSC
examples for the dev and test sets. Tasks using this preprocessor should only
be used eval and not train.
Args:
dataset: a tf.data.Dataset
label: a string, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
pronouns = ['he', 'she', 'they', 'it', 'her', 'his', 'their', 'them', 'him']
PronounMatch = collections.namedtuple( # pylint: disable=invalid-name
'PronounMatch', ['score', 'index_in_premise', 'candidate'])
def split_clean(s):
"""Returns array of words with punctuation and capitalization removed."""
words = [
re.sub(r'(\.|,|\?|\!)$', '', w) for w in s.strip().lower().split(' ')
]
return [w for w in words if w]
def get_all_pronoun_indices(s):
return [i for i, w in enumerate(s) if w in pronouns]
def get_post_match_size(hypothesis, words):
"""Returns len of largest prefix of words that is substr of hypothesis."""
hypothesis = ' '.join(hypothesis)
for i in range(len(words)):
if ' '.join(words[:i + 1]) not in hypothesis:
return i
return len(words)
def get_pre_match_size(hypothesis, words):
"""Returns len of largest suffix of words that is substr of hypothesis."""
return get_post_match_size(hypothesis[::-1], words[::-1])
def get_pronoun_match(premise, hypothesis, index):
"""Return the PronounMatch for the pronoun at `index` in premise."""
pre, post = premise[:index], premise[index + 1:]
pre_match_size = get_pre_match_size(hypothesis, pre)
post_match_size = get_post_match_size(hypothesis, post)
score = pre_match_size + post_match_size
candidate = ''
if score:
pre_match = pre[-pre_match_size or len(pre):]
post_match = post[:post_match_size]
m = re.search(' '.join(pre_match + [r'(.+)'] + post_match),
' '.join(hypothesis))
if not m:
# Handle cases where the candidate is at the start of the hypthesis.
m = re.search(' '.join([r'^(.+)'] + post_match), ' '.join(hypothesis))
if not m:
# Handle cases where the candidate is at the end of the hypthesis.
m = re.search(' '.join(pre_match + [r'(.+)$']), ' '.join(hypothesis))
if m:
candidate = m.group(1)
return PronounMatch(
score=score, index_in_premise=index, candidate=candidate)
def get_best_pronoun_match(premise, hypothesis):
"""Returns the match for the pronoun in the premise to disambiguate."""
pronoun_indices = get_all_pronoun_indices(premise)
scoredpronouns = [
get_pronoun_match(premise, hypothesis, index)
for index in pronoun_indices
]
return max(scoredpronouns, key=lambda x: x.score)
def highlight(sentence, index):
words = sentence.split(' ')
word = words[index]
if word[-1] in ['.', ',', '!', '?']:
highlighted = '*{}* {}'.format(word[:-1], word[-1])
else:
highlighted = '*{}*'.format(word)
return ' '.join(words[:index] + [highlighted] + words[index + 1:])
def make_nonpossessive(word):
# WSC simple targets will never be possessive, even when the pronoun is
# possesive.
if word.endswith("'"):
return word[:-1]
elif word.endswith("'s"):
return word[:-2]
else:
return word
def clean_up(candidate):
words = candidate.split(' ')
# Sometimes the candidate extraction messes up, and the candidate will start
# with the start of the hypothesis and extend to the correct candidate. We
# can try to clean up the candidate in some cases by removing everything up
# to the last article in the sentence.
article_index = max(
[words.index(art) for art in {'a', 'an', 'the'} if art in words] or [0])
return ' '.join(words[article_index:])
def process_candidate(candidate, hypothesis):
"""Handles special cases and adds proper punctuation/capitalization."""
candidate = clean_up(candidate)
pattern = '({})'.format(' '.join([
r'{}(?:\.|,|\?|\!)?'.format(re.escape(c)) for c in candidate.split(' ')
]))
m = re.search(pattern, hypothesis, re.IGNORECASE)
if not m:
raise ValueError(
'Unable to find candidate "{}" in hypothesis "{}".'.format(
candidate, hypothesis))
candidate = m.group(1)
if candidate and candidate[-1] in ['.', ',', '!', '?']:
candidate = candidate[:-1]
return make_nonpossessive(candidate)
def compute_inputs_and_targets(premise, hypothesis):
"""Compute inputs and targets for WNLI simple."""
premise = tf.compat.as_text(premise.numpy())
hypothesis = tf.compat.as_text(hypothesis.numpy())
match = get_best_pronoun_match(
split_clean(premise), split_clean(hypothesis))
targets = process_candidate(match.candidate, hypothesis)
inputs = '{} {}'.format(label, highlight(premise, match.index_in_premise))
return inputs, targets
def map_fn(x):
inputs, targets = tf.py_function(
compute_inputs_and_targets,
inp=[x['sentence1'], x['sentence2']],
Tout=[tf.string, tf.string])
return {
# The reshape is necessary as otherwise the tensor has unknown rank.
'inputs': tf.reshape(inputs, shape=[]),
'targets': tf.reshape(targets, shape=[]),
'premise': x['sentence1'],
'hypothesis': x['sentence2'],
'label': x.get('label', 0),
'idx': x['idx'],
}
return dataset.map(map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# ======================Token Preprocessors=====================================
@gin.configurable
def select_random_chunk(dataset,
max_length=gin.REQUIRED,
feature_key='targets',
**unused_kwargs):
"""Token-preprocessor to extract one span of at most `max_length` tokens.
If the token sequence is longer than `max_length`, then we return a random
subsequence. Otherwise, we return the full sequence.
This is generally followed by split_tokens.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
max_length: an integer
feature_key: an string
Returns:
a dataset
"""
def _my_fn(x):
"""Select a random chunk of tokens.
Args:
x: a 1d Tensor
Returns:
a 1d Tensor
"""
tokens = x[feature_key]
n_tokens = tf.size(tokens)
num_segments = tf.cast(
tf.ceil(tf.cast(n_tokens, tf.float32)
/ tf.cast(max_length, tf.float32)),
tf.int32)
start = max_length * tf.random_uniform(
[], maxval=num_segments, dtype=tf.int32)
end = tf.minimum(start + max_length, n_tokens)
return {feature_key: tokens[start:end]}
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
return dataset.map(_my_fn, num_parallel_calls=num_parallel_calls())
@gin.configurable
def reduce_concat_tokens(dataset,
feature_key='targets',
batch_size=128,
**unused_kwargs):
"""Token-preprocessor to concatenate multiple unrelated documents.
If we want to generate examples of exactly the right length,
(to avoid wasting space on padding), then we use this function, folowed by
split_tokens.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
feature_key: an string
batch_size: an integer - how many documents to concatenate into one
Returns:
a dataset
"""
dataset = dataset.map(lambda x: {feature_key: x[feature_key]},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.padded_batch(batch_size, padded_shapes={feature_key: [-1]})
def _my_fn(x):
tokens = tf.reshape(x[feature_key], [-1])
# strip padding
tokens = tf.boolean_mask(tokens, tf.cast(tokens, tf.bool))
return {feature_key: tokens}
return dataset.map(_my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
@gin.configurable
def split_tokens(dataset,
min_tokens_per_segment=None,
max_tokens_per_segment=gin.REQUIRED,
feature_key='targets',
**unused_kwargs):
"""Split examples into multiple examples each.
The intended use case is to break up long examples for use in unsupervised
transfer-learning.
This function is generally preceded by select_random_chunk.
If min_tokens_per_segment is provided, the segment length is chosen randomly
per document from a log-uniform distribution. If min_tokens_per_segment is
None, then the segment length is max_tokens_per_segment (except for a possibly
shorter last segment in each document).
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
min_tokens_per_segment: an optional integer
max_tokens_per_segment: an integer, the maximum number of tokens in each
segment. Only the final segment may be shorter.
feature_key: a string, the feature to split
Returns:
a dataset
"""
def _split_tokens(x):
"""Split one token sequence into multiple multiple."""
tokens = x[feature_key]
n_tokens = tf.size(tokens)
if min_tokens_per_segment is None:
length = max_tokens_per_segment
else:
# pick a length - log-uniformly distributed
length = tf.cast(tf.exp(tf.random_uniform(
[],
minval=math.log(min_tokens_per_segment),
maxval=math.log(max_tokens_per_segment))), tf.int32)
# Pad to a multiple of length, then use tf.reshape to split up the tokens
# into num_segments segments each of the given length.
num_segments = tf.cast(
tf.ceil(tf.cast(n_tokens, tf.float32) / tf.cast(length, tf.float32)),
tf.int32)
padding = num_segments * length - tf.size(tokens)
tokens = tf.pad(tokens, [[0, padding]])
return tf.reshape(tokens, [-1, length])
def _strip_padding(x):
return {feature_key: tf.boolean_mask(x, tf.cast(x, tf.bool))}
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
dataset = dataset.map(_split_tokens, num_parallel_calls=num_parallel_calls())
dataset = dataset.unbatch()
return dataset.map(
_strip_padding, num_parallel_calls=tf.data.experimental.AUTOTUNE)
@gin.configurable
def split_tokens_to_inputs_length(dataset, sequence_length, **unused_kwargs):
return split_tokens(dataset,
max_tokens_per_segment=sequence_length['inputs'])
@gin.configurable
def split_tokens_to_targets_length(dataset, sequence_length, **unused_kwargs):
return split_tokens(dataset,
max_tokens_per_segment=sequence_length['targets'])
@gin.configurable
def split_tokens_to_random_length(dataset, sequence_length, **unused_kwargs):
return split_tokens(dataset,
min_tokens_per_segment=8,
max_tokens_per_segment=sequence_length['inputs'])
@gin.configurable()
def denoise(dataset,
output_features,
noise_density=gin.REQUIRED,
noise_mask_fn=gin.REQUIRED,
inputs_fn=gin.REQUIRED,
targets_fn=None,
**unused_kwargs):
"""Gin-configurable token preprocessor for self-supervised denoising tasks.
This function takes a dataset containing "targets" sequences,
and turns each sequence into a dictionary containing:
{
"inputs": noisy version of the original sequence
"targets": the full original sequence or missing parts of original sequence
}
In particular, for each sequence, we choose a boolean noise_mask identifying
which tokens in the sequence to corrupt, as defined by the given
noise_mask_fn.
Given the sequence and the noise mask, we generate the inputs and targets
using the given inputs_fn and targets_fn respectively.
The self-supervised tasks vary along these axes:
- noise_density: What fraction of the tokens to select as noise
- noise_mask_fn: What pattern should the noise mask follow
(iid, regular segments, etc.)
- inputs_fn: How to apply the noise
(drop noise tokens, replace with sentinels, etc.)
- targets_fn: How to represent the output
(full sequence, only non-noise tokens, etc.)
Note: Some functionality has been deleted, which we may or may not want to
restore at a later date. The code for this functionality can be found in
the deleted code for this CL. In particular:
- mixture of masking and random replacement
- task labels prepended to the inputs
Args:
dataset: A tf.data.Dataset to process.
output_features: a dict mapping feature name to t5.data.Feature.
noise_density: a float
noise_mask_fn: a function from (length, noise_density) -> boolean mask
inputs_fn: a function from (tokens, noise_mask, vocabulary) -> tokens
targets_fn: a function from (tokens, noise_mask, vocabulary) -> tokens
Returns:
A preprocessed tf.data.Dataset.
"""
def my_fn(features):
"""Map function."""
tokens = features['targets']
vocabulary = output_features['targets'].vocabulary
if ('inputs' in output_features and
vocabulary != output_features['inputs'].vocabulary):
raise ValueError(
'denoise creates inputs based on tokenized targets but was applied '
'to a task that uses different vocabularies for inputs and targets.'
)
noise_mask = noise_mask_fn(tf.size(tokens), noise_density)
inputs = inputs_fn(tokens, noise_mask, vocabulary)
if targets_fn:
targets = targets_fn(tokens, noise_mask, vocabulary)
else:
targets = tokens
return {'inputs': inputs, 'targets': targets}
return dataset.map(my_fn, num_parallel_calls=num_parallel_calls())
def trivia_qa_truncate_inputs(dataset, output_features, sequence_length):
"""Gin configurable token preprocessor for the trivia QA dataset.
This function takes a dataset containing "targets" and "inputs". It searches
for the "targets" in the "inputs" and truncates the "inputs" to
`sequence_length` while ensuring that the "targets" are present in the
"inputs". The function will randomly select a subset of "inputs".
If "targets" are not found in the "inputs", then the example is
is dropped from the dataset.
E.g.
Input dataset
{
"inputs": [0, 3, 5, 7, 9, 11, 13, 15, 17, 18]
"targets": [5, 7, 9]
}
Output dataset (assuming sequence_length['inputs'] = 4)
{
"inputs": [3, 5, 7, 9]
"targets": [5, 7, 9]
}
or
{
"inputs": [5, 7, 9, 11]
"targets": [5, 7, 9]
}
Args:
dataset: a tf.data.Dataset with dictionaries containing the "inputs" and
"targets".
output_features: unused by this function.
sequence_length: a dict, with keys as "inputs" and "targets" indicating the
maximum number of tokens in each of the sequences.
Returns:
a dataset
"""
del output_features
def my_fn(features):
"""Function to map original dataset to the new dataset."""
inputs = features['inputs']
targets = features['targets']
ans_len = tf.shape(targets)[0]
max_input_tokens = sequence_length['inputs']
def truncate_inputs():
"""Helper function to truncate the inputs."""
def answer_in_context(context, answer):
"""Helper function that checks if the answer is present in the context.
Args:
context: Tensor, tokenized representation of the context
answer: Tensor, tokenized representation of the answer
Returns:
result: boolean, indicates if the answer was present in the context.
pos_mask: boolean mask, a mask for every possible start position of
the answer in the context. Indicates whether the answer starts at
the particular position.
"""
conv_inp = tf.reshape(tf.cast(context, tf.float32), [1, -1, 1])
ans_len = tf.shape(answer)[0]
filters = tf.eye(ans_len, dtype=tf.float32)
# Assume context len is N and answer len is M.
# Use a convolution to create a matrix of (N-M) x M elements where
# each row of the matrix is a sequence of len M. This matrix contains
# all possible contiguous sequences of length M from the context.
# Every row of this matrix is compared with the answer to check if the
# answer exists in the context.
strided = tf.nn.conv1d(conv_inp,
tf.reshape(filters, [ans_len, 1, ans_len]), 1,
'VALID')
strided = tf.cast(strided[0], answer.dtype)
pos_mask = tf.reduce_all(
tf.equal(strided, tf.reshape(answer, [1, -1])), 1)
result = tf.reduce_any(pos_mask)
return result, pos_mask
def slice_inputs(inputs, answer_len, pos_mask):
"""Helper function to slice inputs while keeping the answer."""
ans_start_pos = tf.to_int32(tf.where(pos_mask)[0][0])
inputs_len = tf.shape(inputs)[0]
start_range_min = tf.maximum(
0, ans_start_pos - (max_input_tokens - answer_len))
start_range_max = tf.minimum(ans_start_pos,
inputs_len - max_input_tokens) + 1
start_pos = tf.random.uniform([],
minval=start_range_min,
maxval=start_range_max,
dtype=tf.int32)
return inputs[start_pos:start_pos + max_input_tokens]
result, pos_mask = answer_in_context(inputs, targets)
truncated_inputs = tf.cond(
result, lambda: slice_inputs(inputs, ans_len, pos_mask),
lambda: tf.constant([], dtype=inputs.dtype))
return truncated_inputs
inputs = tf.cond(
tf.shape(inputs)[0] > max_input_tokens, truncate_inputs, lambda: inputs)
return {'inputs': inputs, 'targets': features['targets']}
dataset = dataset.map(my_fn, num_parallel_calls=num_parallel_calls())
return dataset.filter(lambda x: tf.size(x['inputs']) > 0)
@gin.configurable()
def unsupervised(dataset, preprocessors=None, **kwargs):
"""Configure this to point at unsupervised preprocessors.
This function creates an extra level of indirection in case we want
different unsupervised pretraining functions in the future which do not
fit into the denoise() framework.
Args:
dataset: A tf.data.Dataset to process.
preprocessors: a list of token-preprocessor functions
**kwargs: passthrough keyword arguments for token preprocessors
Returns:
A preprocessed tf.data.Dataset.
"""
if preprocessors is None:
tf.logging.warn(
'unsupervised preprocessor got preprocessors=None; no preprocessing '
'will be applied.'
)
return dataset
for p in preprocessors:
dataset = p(dataset, **kwargs)
return dataset
@gin.configurable()
def iid_noise_mask(length, noise_density):
"""Independent and identically distributed token noise.
Args:
length: an int32 scalar
noise_density: a float - approximate density of output mask
Returns:
a boolean tensor with shape [length]
"""
return tf.random.uniform([length]) < noise_density
@gin.configurable()
def regular_noise_mask(length,
noise_density,
min_span_length=1,
max_span_length=5):
"""Noise mask consisting of equally spaced spans of equal length.
The span length and the offset are chosen randomly per-example.
The beginning and end of the sequence may be part of shorter spans of noise.
For example, if noise_density=0.25 and a span length of 2 is chosen,
then the output might be:
[T F F F F F F T T F F F F F F T T F F F F F F T T F F]
Args:
length: an int32 scalar
noise_density: a float - approximate density of output mask
min_span_length: an integer
max_span_length: an integer
Returns:
a boolean tensor with shape [length]
"""
span_length = tf.random.uniform([],
minval=min_span_length,
maxval=max_span_length + 1,
dtype=tf.int32)
period = tf.cast(
tf.round(tf.cast(span_length, tf.float32) / noise_density), tf.int32)
offset = tf.random.uniform([], maxval=period, dtype=tf.int32)
return (tf.range(length, dtype=tf.int32) + offset) % period < span_length
@gin.configurable()
def random_spans_noise_mask(length,
noise_density,
mean_noise_span_length=3.0):
"""Noise mask consisting of random spans of noise tokens.
The number of noise tokens and the number of noise spans and non-noise spans
are determined deterministically as follows:
num_noise_tokens = round(length * noise_density)
num_nonnoise_spans = num_noise_spans = round(
num_noise_tokens / mean_noise_span_length)
Spans alternate between non-noise and noise, beginning with non-noise.
Subject to the above restrictions, all masks are equally likely.
Args:
length: an int32 scalar (length of the incoming token sequence)
noise_density: a float - approximate density of output mask
mean_noise_span_length: a number
Returns:
a boolean tensor with shape [length]
"""
orig_length = length
# increase length to avoid degeneracy
length = tf.maximum(length, 2)
def to_int(x):
return tf.cast(x, tf.int32)
def to_float(x):
return tf.cast(x, tf.float32)
num_noise_tokens = to_int(tf.round(to_float(length) * noise_density))
# avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
num_noise_tokens = tf.minimum(tf.maximum(num_noise_tokens, 1), length - 1)
num_noise_spans = to_int(
tf.round(to_float(num_noise_tokens) / mean_noise_span_length))
# avoid degeneracy by ensuring positive number of noise spans
num_noise_spans = tf.maximum(num_noise_spans, 1)
num_nonnoise_tokens = length - num_noise_tokens
# pick the lengths of the noise spans and the non-noise spans
def _random_segmentation(num_items, num_segments):
"""Partition a sequence of items randomly into non-empty segments.
Args:
num_items: an integer scalar > 0
num_segments: an integer scalar in [1, num_items]
Returns:
a Tensor with shape [num_segments] containing positive integers that add
up to num_items
"""
first_in_segment = tf.pad(
tf.random.shuffle(to_int(tf.range(num_items - 1) < num_segments - 1),
seed=123),
[[1, 0]])
segment_id = tf.cumsum(first_in_segment)
segment_length = tf.segment_sum(tf.ones_like(segment_id), segment_id)
return segment_length
noise_span_lengths = _random_segmentation(num_noise_tokens, num_noise_spans)
nonnoise_span_lengths = _random_segmentation(
num_nonnoise_tokens, num_noise_spans)
interleaved_span_lengths = tf.reshape(
tf.stack([nonnoise_span_lengths, noise_span_lengths], axis=1),
[num_noise_spans * 2])
span_starts = tf.cumsum(interleaved_span_lengths)[:-1]
span_start_indicator = tf.unsorted_segment_sum(
tf.ones_like(span_starts), span_starts, length)
span_num = tf.cumsum(span_start_indicator)
is_noise = tf.equal(span_num % 2, 1)
return is_noise[:orig_length]
@gin.configurable
def random_spans_helper(inputs_length=gin.REQUIRED,
noise_density=gin.REQUIRED,
mean_noise_span_length=gin.REQUIRED,
extra_tokens_per_span_inputs=gin.REQUIRED,
extra_tokens_per_span_targets=gin.REQUIRED):
"""Training parameters to avoid padding with random_spans_noise_mask.
When training a model with random_spans_noise_mask, we would like to set the
other training hyperparmeters in a way that avoids padding. This function
helps us compute these hyperparameters.
We assume that each noise span in the input is replaced by
extra_tokens_per_span_inputs sentinel tokens, and each non-noise span in the
targets is replaced by extra_tokens_per_span_targets sentinel tokens.
This function tells us the required number of tokens in the raw example (for
split_tokens()) as well as the length of the encoded targets.
Args:
inputs_length: an integer - desired length of the tokenized inputs sequence
noise_density: a float
mean_noise_span_length: a float
extra_tokens_per_span_inputs: an integer
extra_tokens_per_span_targets: an integer
Returns:
tokens_length: length of original text in tokens
targets_length: an integer - length in tokens of encoded targets sequence
"""
def _tokens_length_to_inputs_length_targets_length(tokens_length):
num_noise_tokens = int(round(tokens_length * noise_density))
num_nonnoise_tokens = tokens_length - num_noise_tokens
num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
# inputs contain all nonnoise tokens, sentinels for all noise spans
# and one EOS token.
return (
num_nonnoise_tokens +
num_noise_spans * extra_tokens_per_span_inputs + 1,
num_noise_tokens +
num_noise_spans * extra_tokens_per_span_targets + 1)
tokens_length = inputs_length
while (_tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0]
<= inputs_length):
tokens_length += 1
inputs_length, targets_length = (
_tokens_length_to_inputs_length_targets_length(tokens_length))
# minor hack to get the targets length to be equal to inputs length
# which is more likely to have been set to a nice round number.
if noise_density == 0.5 and targets_length > inputs_length:
tokens_length -= 1
targets_length -= 1
tf.logging.info(
'tokens_length=%s inputs_length=%s targets_length=%s '
'noise_density=%s mean_noise_span_length=%s ' %
(tokens_length, inputs_length, targets_length,
noise_density, mean_noise_span_length))
return tokens_length, targets_length
@gin.configurable
def random_spans_tokens_length():
"""Helper for gin-configuring split_tokens with random_spans_noise_mask."""
return random_spans_helper()[0]
@gin.configurable
def random_spans_targets_length():
"""Helper for gin-configuring the targets sequence length."""
return random_spans_helper()[1]
@gin.configurable()
def random_prefix_noise_mask(length, noise_density):
"""First part of the sequence is noise (for prefix_lm).
The length of the prefix is chosen uniformly between [1, length)
noise_density must be 0.5
TODO(noam): figure out some distribution to use if noise_density != 0.5
Args:
length: an int32 scalar
noise_density: a float - must equal 0.5
Returns:
a boolean tensor with shape [length]
"""
if noise_density != 0.5:
raise NotImplementedError(
'noise density must equal 0.5 for random_prefix_noise_mask')
max_input_tokens = length - 1
min_input_tokens = tf.minimum(max_input_tokens, 1)
num_input_tokens = tf.random.uniform(
[], minval=min_input_tokens, maxval=max_input_tokens + 1, dtype=tf.int32)
return tf.range(length, dtype=tf.int32) < num_input_tokens
@gin.configurable()
def sentinel_id(vocabulary, return_value=None):
"""Token ID to use as a sentinel.
By default, we use the last token in the vocabulary.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
return_value: an optional integer
Returns:
an integer
"""
if return_value is not None:
return return_value
return vocabulary.vocab_size - 1
@gin.configurable()
def noise_token_to_sentinel(tokens, noise_mask, vocabulary):
"""Replace each noise token with the given sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
Returns:
a Tensor with the same shape and dtype as tokens
"""
return tf.where_v2(noise_mask,
tf.cast(sentinel_id(vocabulary), tokens.dtype),
tokens)
@gin.configurable()
def noise_span_to_sentinel(tokens, noise_mask, vocabulary):
"""Replace each run of consecutive noise tokens with a single sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
Returns:
a Tensor with the same shape and dtype as tokens
"""
tokens = tf.where_v2(noise_mask,
tf.cast(sentinel_id(vocabulary), tokens.dtype),
tokens)
prev_token_is_noise = tf.pad(noise_mask[:-1], [[1, 0]])
subsequent_noise_tokens = tf.logical_and(noise_mask, prev_token_is_noise)
return tf.boolean_mask(tokens, tf.logical_not(subsequent_noise_tokens))
@gin.configurable()
def nonnoise_span_to_sentinel(tokens, noise_mask, vocabulary):
return noise_span_to_sentinel(
tokens, tf.logical_not(noise_mask), vocabulary)
@gin.configurable()
def noise_span_to_unique_sentinel(tokens, noise_mask, vocabulary):
"""Replace each run of consecutive noise tokens with a different sentinel.
The idea here is to be able to align the dropped spans in the inputs
with the markers in the targets.
We want to generate training examples like
"We hold X to be Y that" -> "X these truths Y self evident Z"
Sentinels assigned in decreasing order within the sequence starting at
vocabulary.size - 1. That is, we appropriate the last tokens in the
vocabulary for additional use as sentinels.
TODO(noam): we may want to try enlarging the vocabulary and leaving room
for the sentinels instead. However, this requires enlarging the embedding
tables in the model, so that is a bigger change.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
Returns:
a Tensor with the same shape and dtype as tokens
"""
vocab_size = vocabulary.vocab_size
prev_token_is_noise = tf.pad(noise_mask[:-1], [[1, 0]])
first_noise_tokens = tf.logical_and(
noise_mask, tf.logical_not(prev_token_is_noise))
subsequent_noise_tokens = tf.logical_and(noise_mask, prev_token_is_noise)
sentinel = vocab_size - tf.cumsum(tf.cast(first_noise_tokens, tokens.dtype))
tokens = tf.where_v2(first_noise_tokens, sentinel, tokens)
return tf.boolean_mask(tokens, tf.logical_not(subsequent_noise_tokens))
@gin.configurable()
def nonnoise_span_to_unique_sentinel(tokens, noise_mask, vocabulary):
return noise_span_to_unique_sentinel(
tokens, tf.logical_not(noise_mask), vocabulary)
@gin.configurable()
def drop_noise_tokens(tokens, noise_mask, unused_vocabulary):
"""Drop noise tokens without inserting a sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
unused_vocabulary: a vocabulary.Vocabulary
Returns:
a Tensor with the same shape and dtype as tokens
"""
return tf.boolean_mask(tokens, tf.logical_not(noise_mask))
@gin.configurable()
def drop_nonnoise_tokens(tokens, noise_mask, unused_vocabulary):
"""Drop non-noise tokens without inserting a sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
unused_vocabulary: a vocabulary.Vocabulary
Returns:
a Tensor with the same shape and dtype as tokens
"""
return tf.boolean_mask(tokens, noise_mask)
@gin.configurable()
def permute_noise_tokens(tokens, noise_mask, unused_vocabulary):
"""Permute the noise tokens, keeping the non-noise tokens where they are.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
unused_vocabulary: a vocabulary.Vocabulary
Returns:
a Tensor with the same shape and dtype as tokens
"""
masked_only = tf.boolean_mask(tokens, noise_mask)
permuted = tf.random.shuffle(masked_only)
# pad to avoid errors when it has size 0
permuted = tf.pad(permuted, [[0, 1]])
indices = tf.cumsum(tf.cast(noise_mask, tf.int32), exclusive=True)
return tf.where_v2(noise_mask,
tf.gather(permuted, indices),
tokens)
@gin.configurable()
def noise_token_to_gathered_token(tokens, noise_mask, unused_vocabulary):
"""Replace each noise token with a random token from the sequence.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
unused_vocabulary: a vocabulary.Vocabulary
Returns:
a Tensor with the same shape and dtype as tokens
"""
indices = tf.random_uniform(
shape=tf.shape(tokens), maxval=tf.size(tokens), dtype=tf.int32)
return tf.where_v2(noise_mask,
tf.gather(tokens, indices),
tokens)
@gin.configurable()
def noise_token_to_random_token(tokens, noise_mask, vocabulary,
num_reserved_tokens=3):
"""Replace each noise token with a random token from the vocabulary.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
num_reserved_tokens: an integer
Returns:
a Tensor with the same shape and dtype as tokens
"""
return tf.where_v2(noise_mask,
tf.random.uniform(
tf.shape(tokens),
minval=num_reserved_tokens,
maxval=vocabulary.vocab_size,
dtype=tokens.dtype),
tokens)
@gin.configurable()
def noise_token_to_random_token_or_sentinel(tokens, noise_mask, vocabulary,
random_prob=0.1):
"""Replace each noise token with a random token or a sentinel.
For each masked token, with probability random_prob, we replace it by a
random token from the vocabulary. Otherwise, we replace it with a sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
random_prob: a float
Returns:
a Tensor with the same shape and dtype as tokens
"""
use_random = tf.random.uniform(tf.shape(tokens)) < random_prob
return tf.where_v2(
use_random,
noise_token_to_random_token(tokens, noise_mask, vocabulary),
noise_token_to_sentinel(tokens, noise_mask, vocabulary))
@gin.configurable
def take(dataset, num_examples=-1, **unused_kwargs):
"""Takes the first `num_examples` examples from the dataset.
This is done to simulate that the dataset is smaller than it actually is. The
result will be cached via `tf.data.Dataset.cache`. This ensures that the same
examples get repeated if `dataset` is stochastic and `repeat()` is called on
the dataset after `take`. To use this preprocessor, make sure your resulting
dataset can fit in memory.
Args:
dataset: tf.data.Dataset, dataset to process.
num_examples: int, the number of examples to take from the dataset. If
`num_examples == -1`, the new dataset will have as many examples as the
original dataset (i.e., no truncation will occur).
Returns:
A tf.data.Dataset with at most `num_examples` examples.
"""
if num_examples == -1:
return dataset
else:
return dataset.take(num_examples).cache()
def parse_tsv(dataset,
field_names,
field_delim='\t'):
"""Splits TSV lines into dict examples mapping field name to string value.
Args:
dataset: a `tf.data.Dataset` containing comma/tab-delimited strings.
field_names: a list of strings, the ordered names of the TSV fields.
field_delim: a string, the delimiter to split on e.g. ',' for csv.
Returns:
A `tf.data.Dataset` containing dict examples mapping field name to string
value.
"""
def parse_line(line):
return dict(zip(
field_names,
tf.io.decode_csv(
line, record_defaults=[''] * len(field_names),
field_delim=field_delim, use_quote_delim=False)
))
return dataset.map(
parse_line, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def preprocess_tsv(dataset,
field_delim='\t',
num_fields=2,
inputs_format='{0}',
targets_format='{1}'):
r"""Parse tab-delimited strings into inputs and targets.
This function takes a tf.data.Dataset of strings, each of which contains
tab-delimited fields. The function returns a tf.data.Dataset of feature
dictionaries of the form {"inputs": string, "targets": string}.
inputs_format contains a template string and field numbers used to produce
the "inputs" string.
targets_format contains a template string and field numbers used to produce
the "targets" string.
Example:
The input dataset contains the lines:
"6,7,42"
"2,9,18"
preprocess_tsv(dataset,
field_delim=',',
inputs_format='numerator: {2} denominator: {1}',
targets_format='quotient: {0}'
would produce a dataset containing the dictionaries:
{"inputs": "numerator: 42 denomnator: 7", "targets": "quotient: 6"}
{"inputs": "numerator: 18 denomnator: 9", "targets": "quotient: 2"}
Args:
dataset: a tf.data.Dataset containing comma/tab-delimited strings.
field_delim: a string, the delimiter to split on e.g. ',' for csv.
num_fields: an integer
inputs_format: a string, the desired output format with placeholders for
field values.
targets_format: a string, the desired output format with placeholders for
field values.
Returns:
a tf.data.Dataset of feature dictionaries with 'inputs' and
'targets' features.
"""
def _format_part(part, field_values):
found = re.findall(r'{(\d)}', part)
if found:
return field_values[int(found[0])]
else:
return part
def _format(format_string, field_values):
parts = [_format_part(p, field_values)
for p in re.split(r'({\d})', format_string)]
return tf.strings.join(parts)
def _parse_fn(line):
"""Function to process a line."""
field_values = tf.io.decode_csv(
line, record_defaults=[''] * num_fields,
field_delim=field_delim, use_quote_delim=False)
return {'inputs': _format(inputs_format, field_values),
'targets': _format(targets_format, field_values)}
return dataset.map(
_parse_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
| 35.121152 | 219 | 0.669744 |
ace5c8e7cd9692b8872a4ce5224a7f8673c8e913 | 45,890 | py | Python | SOSS/dms/soss_centroids.py | njcuk9999/jwst-mtl | 81d3e7ec6adc5dae180cd9d3bff8e4a2a7292596 | [
"MIT"
] | 1 | 2022-02-04T13:59:18.000Z | 2022-02-04T13:59:18.000Z | SOSS/dms/soss_centroids.py | njcuk9999/jwst-mtl | 81d3e7ec6adc5dae180cd9d3bff8e4a2a7292596 | [
"MIT"
] | 12 | 2020-09-17T20:14:03.000Z | 2022-03-21T21:16:43.000Z | SOSS/dms/soss_centroids.py | njcuk9999/jwst-mtl | 81d3e7ec6adc5dae180cd9d3bff8e4a2a7292596 | [
"MIT"
] | 1 | 2020-09-18T15:25:52.000Z | 2020-09-18T15:25:52.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import warnings
import numpy as np
from astropy.io import fits
from SOSS.extract import soss_read_refs
from .soss_utils import zero_roll, robust_polyfit, get_image_dim
from matplotlib import colors
import matplotlib.pyplot as plt
def _plot_centroid(image, xtrace, ytrace):
"""Overplot the extracted trace positions on the image.
:param image: A 2D image of the detector.
:param xtrace: The x coordinates of the trace to overplot on the image.
:param ytrace: The y coordinates of the trace to overplot on the image.
:type image: array[float]
:type xtrace: array[float]
:type ytrace: array[float]
"""
nrows, ncols = image.shape
if nrows == ncols:
aspect = 1
figsize = ncols/64, nrows/64
else:
aspect = 2
figsize = ncols/64, nrows/32
plt.figure(figsize=figsize)
plt.title('Trace Centroids')
plt.imshow(image, origin='lower', cmap='inferno', norm=colors.LogNorm(), aspect=aspect)
plt.plot(xtrace, ytrace, lw=2, ls='--', c='black', label='Centroids')
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.legend(fontsize=12)
plt.xlim(-0.5, ncols - 0.5)
plt.ylim(-0.5, nrows - 0.5)
plt.tight_layout()
plt.show()
plt.close()
return
def _plot_centroids(image, centroids):
"""Visualize the trace extracted by get_soss_centroids().
:param image: A 2D image of the detector.
:param centroids: A dictionary containg the trace, as returned by get_soss_centroids().
:type image: array[float]
:type centroids: dict
"""
# Determine an appropriate figure size.
nrows, ncols = image.shape
if nrows == ncols:
aspect = 1
figsize = ncols/64, nrows/64
else:
aspect = 2
figsize = ncols/64, nrows/32
# Make a figure showing the trace for all 3 orders.
plt.figure(figsize=figsize)
plt.title('Trace Positions')
plt.imshow(image, origin='lower', cmap='inferno', norm=colors.LogNorm(), aspect=aspect)
tmp = centroids['order 1']
plt.plot(tmp['X centroid'], tmp['Y centroid'], color='orange', label='Order 1')
plt.plot(tmp['X centroid'], tmp['Y centroid'] - tmp['trace widths'] / 2, color='orange')
plt.plot(tmp['X centroid'], tmp['Y centroid'] + tmp['trace widths'] / 2, color='orange')
if 'order 2' in centroids:
tmp = centroids['order 2']
plt.plot(tmp['X centroid'], tmp['Y centroid'], color='black', label='Order 2')
plt.plot(tmp['X centroid'], tmp['Y centroid'] - tmp['trace widths'] / 2, color='black')
plt.plot(tmp['X centroid'], tmp['Y centroid'] + tmp['trace widths'] / 2, color='black')
if 'order 3' in centroids:
tmp = centroids['order 3']
plt.plot(tmp['X centroid'], tmp['Y centroid'], color='red', label='Order 3')
plt.plot(tmp['X centroid'], tmp['Y centroid'] - tmp['trace widths'] / 2, color='red')
plt.plot(tmp['X centroid'], tmp['Y centroid'] + tmp['trace widths'] / 2, color='red')
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.legend(fontsize=12)
plt.xlim(-0.5, ncols - 0.5)
plt.ylim(-0.5, nrows - 0.5)
plt.tight_layout()
plt.show()
plt.close()
return
def center_of_mass(column, ypos, halfwidth):
"""Compute a windowed center-of-mass along a column.
:param column: The column on which to compute the windowed center of mass.
:param ypos: The position along the column to center the window on.
:param halfwidth: The half-size of the window in pixels.
:type column: array[float]
:type ypos: float
:type halfwidth: int
:returns: ycom - the centerof-mass of the pixels withn the window.
:rtype: float
"""
# Get the column shape and create a corresponding array of positions.
dimy, = column.shape
ypix = np.arange(dimy)
# Find the indices of the window.
miny = np.int(np.fmax(np.around(ypos - halfwidth), 0))
maxy = np.int(np.fmin(np.around(ypos + halfwidth + 1), dimy))
# Compute the center of mass on the window.
with np.errstate(invalid='ignore'):
ycom = np.nansum(column[miny:maxy]*ypix[miny:maxy])/np.nansum(column[miny:maxy])
return ycom
def get_centroids_com(image, header=None, mask=None, poly_order=11, verbose=False):
"""Determine the x, y coordinates of the trace using a center-of-mass analysis.
Works for either order if there is no contamination, or for order 1 on a detector
where the two orders are overlapping.
:param image: A 2D image of the detector.
:param header: The header from one of the SOSS reference files.
:param mask: A boolean array of the same shape as image. Pixels corresponding to True values will be masked.
:param poly_order: Order of the polynomial to fit to the extracted trace positions.
:param verbose: If set True some diagnostic plots will be made.
:type image: array[float]
:type header: astropy.io.fits.Header
:type mask: array[bool]
:type poly_order: int
:type verbose: bool
:returns: xtrace, ytrace, param - The x, y coordinates of trace as computed from the best fit polynomial
and the best-fit polynomial parameters.
:rtype: Tuple(array[float], array[float], array[float])
"""
# If no mask was given use all pixels.
if mask is None:
mask = np.zeros_like(image, dtype='bool')
# Call the script that determines the dimensions of the stack.
result = get_image_dim(image, header=header, verbose=verbose)
dimx, dimy, xos, yos, xnative, ynative, padding, refpix_mask = result
# Replace masked pixel values with NaNs.
image_masked = np.where(mask | ~refpix_mask, np.nan, image)
# Compute and subtract the background level of each column.
col_bkg = np.nanpercentile(image_masked, 10, axis=0)
image_masked_bkg = image_masked - col_bkg
# Find centroid - first pass, use all pixels in the column.
# Normalize each column
with np.errstate(invalid='ignore'):
image_norm = image_masked_bkg / np.nanmax(image_masked_bkg, axis=0)
# Create 2D Array of pixel positions.
xpix = np.arange(dimx)
ypix = np.arange(dimy)
_, ygrid = np.meshgrid(xpix, ypix)
# CoM analysis to find initial positions using all rows.
with np.errstate(invalid='ignore'):
ytrace = np.nansum(image_norm*ygrid, axis=0)/np.nansum(image_norm, axis=0)
# Second pass - use a windowed CoM at the previous position.
halfwidth = 30 * yos
for icol in range(dimx):
ycom = center_of_mass(image_norm[:, icol], ytrace[icol], halfwidth)
# If NaN was returned we are done.
if not np.isfinite(ycom):
ytrace[icol] = np.nan
continue
# If the pixel at the centroid is below the local mean we are likely mid-way between orders and
# we should shift the window downward to get a reliable centroid for order 1.
irow = np.int(np.around(ycom))
miny = np.int(np.fmax(np.around(ycom) - halfwidth, 0))
maxy = np.int(np.fmin(np.around(ycom) + halfwidth + 1, dimy))
if image_norm[irow, icol] < np.nanmean(image_norm[miny:maxy, icol]):
ycom = center_of_mass(image_norm[:, icol], ycom - halfwidth, halfwidth)
# If NaN was returned or the position is too close to the array edge, use NaN.
if not np.isfinite(ycom) or (ycom <= 5 * yos) or (ycom >= (ynative - 6) * yos):
ytrace[icol] = np.nan
continue
# Update the position if the above checks were succesfull.
ytrace[icol] = ycom
# Third pass - fine tuning using a smaller window.
halfwidth = 16 * yos
for icol in range(dimx):
ytrace[icol] = center_of_mass(image_norm[:, icol], ytrace[icol], halfwidth)
# Fit the y-positions with a polynomial and use the result as the true y-positions.
xtrace = np.arange(dimx)
mask = np.isfinite(ytrace)
# For padded arrays ignore padding for consistency with real data
if padding != 0:
mask = mask & (xtrace >= xos*padding) & (xtrace < (dimx - xos*padding))
# If no polynomial order was given return the raw measurements.
if poly_order is None:
param = []
else:
param = robust_polyfit(xtrace[mask], ytrace[mask], poly_order)
ytrace = np.polyval(param, xtrace)
# If verbose visualize the result.
if verbose is True:
_plot_centroid(image_masked, xtrace, ytrace)
return xtrace, ytrace, param
def edge_trigger(image, halfwidth=5, yos=1, verbose=False):
"""Detect the edges and center of the trace based on the minima and maxima of the derivate
of the columns, which is computed in a running window along the columns of the detector image
:param image: A 2D image of the detector.
:param halfwidth: the size of the window used when computing the derivatives.
:param yos: the oversampling factor of the image array along the y-direction.
:param verbose: If set True some diagnostic plots will be made.
:type image: array[float]
:type halfwidth: int
:type yos: int
:type verbose: bool
:returns: ytrace_max, ytrace_min, ytrace_comb - The upper edge, lower edge and center of the trace.
:rtype: Tuple(array[float], array[float], array[float])
"""
dimy, dimx = image.shape
halfwidth = halfwidth * yos
# Create coordinate arrays.
xpix = np.arange(dimx)
ypix = np.arange(dimy)
_, ygrid = np.meshgrid(xpix, ypix)
# Compute windowed slopes over the columns.
slopevals = np.zeros_like(image)
for irow in range(halfwidth, dimy-halfwidth):
# Compute the window indices.
ymin = irow - halfwidth
ymax = irow + halfwidth + 1
# Get the x and y data to find the slope to.
datay = image[ymin:ymax, :]
mask = np.isfinite(datay)
datax = np.where(mask, ygrid[ymin:ymax, :], np.nan) # Need to set values NaN in y to NaN in x.
# Compute the slope.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
xmean = np.nanmean(datax, axis=0, keepdims=True)
ymean = np.nanmean(datay, axis=0, keepdims=True)
with np.errstate(invalid='ignore'):
num = np.nansum((datax - xmean) * (datay - ymean), axis=0)
denom = np.nansum((datax - xmean) ** 2, axis=0)
slope = num / denom
# Set slopes computed from < 3 datapoints to NaN.
slopevals[irow, :] = np.where(np.sum(mask, axis=0) >= 3, slope, 0.)
# Find the upper and lower bounds on the trace.
args = np.nanargmax(slopevals, axis=0)
vals = np.nanmax(slopevals, axis=0)
ytrace_max = np.where(vals != 0, ypix[args], np.nan)
args = np.nanargmin(slopevals, axis=0)
vals = np.nanmin(slopevals, axis=0)
ytrace_min = np.where(vals != 0, ypix[args], np.nan)
# Scan through a range of trace widths.
slopes_best = np.zeros_like(xpix)
ytrace_best = np.zeros_like(xpix)
widths_best = np.zeros_like(xpix)
for width in range(18*yos, 27*yos):
# Add the slope and its offset negative.
comb = slopevals - zero_roll(slopevals, -width)
# Find the maximum resulting slope.
args = np.nanargmax(comb, axis=0)
vals = np.nanmax(comb, axis=0)
# Update the best values.
mask = (vals > slopes_best)
slopes_best = np.where(mask, vals, slopes_best)
ytrace_best = np.where(mask, ypix[args], ytrace_best)
widths_best = np.where(mask, width, widths_best)
# Set the y position to NaN if the best slope was zero.
ytrace_best = np.where(slopes_best != 0, ytrace_best + widths_best/2., np.nan)
widths_best = np.where(slopes_best != 0, widths_best, np.nan)
if verbose:
nrows, ncols = image.shape
plt.figure(figsize=(ncols/128, nrows/128))
plt.title('Edge-trigger Trace Positions')
plt.imshow(image, origin='lower', cmap='inferno', norm=colors.LogNorm())
plt.plot(ytrace_min, lw=2, ls='--', c='black', label='Edges')
plt.plot(ytrace_max, lw=2, ls='--', c='black')
plt.plot(ytrace_best, lw=2, c='black', label='Centroids')
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.legend(fontsize=12)
plt.tight_layout()
plt.show()
plt.close()
return ytrace_max, ytrace_min, ytrace_best, widths_best
def get_centroids_edgetrigger(image, header=None, mask=None, poly_order=11,
halfwidth=5, mode='combined', verbose=False):
"""Determine the x, y coordinates of the trace using the derivatives along the y-axis.
Works for either order if there is no contamination.
:param image: A 2D image of the detector.
:param header: The header from one of the SOSS reference files.
:param mask: A boolean array of the same shape as image. Pixels corresponding to True values will be masked.
:param poly_order: Order of the polynomial to fit to the extracted trace positions.
:param halfwidth: the size of the window used when computing the derivatives.
:param mode: Which trace values to use. Can be 'bottomedge', 'topedge', 'mean' or 'combined'.
:param verbose: If set True some diagnostic plots will be made.
:type image: array[float]
:type header: astropy.io.fits.Header
:type mask: array[bool]
:type poly_order: int or None
:type halfwidth: int
:type mode: str
:type verbose: bool
:returns: xtrace, ytrace, tracewidth, param - The x, y coordinates of trace as computed from the best fit polynomial
and the best-fit polynomial parameters.
:rtype: Tuple(array[float], array[float], array[float])
"""
# If no mask was given use all pixels.
if mask is None:
mask = np.zeros_like(image, dtype='bool')
# Call the script that determines the dimensions of the image.
result = get_image_dim(image, header=header, verbose=verbose)
dimx, dimy, xos, yos, xnative, ynative, padding, refpix_mask = result
# Replace masked pixel values with NaNs.
image_masked = np.where(mask | ~refpix_mask, np.nan, image)
# Use edge trigger to compute the edges and center of the trace.
fkwargs = dict(halfwidth=halfwidth, yos=yos, verbose=verbose)
ytrace_max, ytrace_min, ytrace_best, widths_best = edge_trigger(image_masked, **fkwargs)
# Use different y-positions depending on the mode parameter.
if mode == 'bottomedge':
ytrace = ytrace_max
elif mode == 'topedge':
ytrace = ytrace_min
elif mode == 'mean':
ytrace = (ytrace_min + ytrace_max)/2.
elif mode == 'combined':
ytrace = ytrace_best
else:
raise ValueError('Unknown mode: {}'.format(mode))
# Fit the y-positions with a polynomial and use the result as the true y-positions.
xtrace = np.arange(dimx)
mask = np.isfinite(ytrace)
# If no polynomial order was given return the raw measurements.
if poly_order is None:
param = []
else:
param = robust_polyfit(xtrace[mask], ytrace[mask], poly_order)
ytrace = np.polyval(param, xtrace)
# If verbose visualize the result.
if verbose is True:
_plot_centroid(image_masked, xtrace, ytrace)
return xtrace, ytrace, widths_best, param
def build_mask_vertical(shape, xlims, mask_right=True, mask_between=True):
"""Mask along the vertical(s) given by xlims.
If xlims contains 1 element masks pixels blue-wards or red-wards according
to the value of mask_blue (and mask_between is ignored).
If xlims contains 2 elements masks pixels between or outside these values
according to the value of mask_between (and mask_blue is ignored).
:param shape: tuple containing the intended shape of the mask array.
:param xlims: the column indices to use as the limits of the masked area.
:param mask_right: if True mask pixels to the right of xlims, otherwise mask to the left.
:param mask_between: if True mask pixels between xlims, otherwise mask outside.
:type shape: tuple[int]
:type xlims: list[float]
:type mask_right: bool
:type mask_between: bool
:returns: mask - A mask the removes a vertical region according to xlims.
:rtype: array[bool]
"""
dimy, dimx = shape
# Create a coordinate grid.
x = np.arange(dimx)
y = np.arange(dimy)
xgrid, _ = np.meshgrid(x, y)
if np.size(xlims) == 1:
# Mask blue-wards or red-wards of a single value.
if mask_right:
mask = xgrid >= xlims[0]
else:
mask = xgrid < xlims[0]
elif np.size(xlims) == 2:
# Mask between or exterior to two values.
if mask_between:
mask = (xgrid >= xlims[0]) & (xgrid < xlims[1])
else:
mask = (xgrid < xlims[0]) | (xgrid >= xlims[1])
else:
msg = 'xlims must be a list or array of up to 2 indices.'
raise ValueError(msg)
return mask
def build_mask_sloped(shape, point1, point2, mask_above=True, verbose=False):
"""Mask pixels above or below the boundary line defined by point1 and point2.
:param shape: tuple containing the intended shape of the mask array.
:param point1: the first x, y pair defining the boundary line.
:param point2: the second x, y pair defining the boundary line.
:param mask_above: if True mask pixels above the boundary line, else mask below.
:param verbose: if True be verbose.
:type shape: tuple[int]
:type point1: list[float]
:type point2: list[float]
:type mask_above: bool
:type verbose: bool
:returns: mask - A mask the removes a diagonal region along the slope defined by point1 and point2.
:rtype: array[bool]
"""
dimy, dimx = shape
# Obtain the parameters of the line by fitting the point.
xvals = np.array([point1[0], point2[0]])
yvals = np.array([point1[1], point2[1]])
param = np.polyfit(xvals, yvals, 1)
# Compute the position of the line at every x position.
xline = np.arange(dimx)
yline = np.polyval(param, xline)
if verbose:
print('line fit param:', param)
# Create a coordinate grid.
x = np.arange(dimx)
y = np.arange(dimy)
_, ygrid = np.meshgrid(x, y)
# Mask pixels above or below the boundary line.
if mask_above:
mask = (ygrid - yline) >= 0
else:
mask = (ygrid - yline) < 0
return mask
def build_mask_256(subarray='SUBSTRIP256', apex_order1=None):
"""Restrict the analysis to a (N, 2048) section of the image, where N is 256 or less.
Normally this only applies to the FULL subarray, masking everything but the SUBSTRIP256 region.
When apex_order1 is given rows from apex_order1 - 40 to apex_order1 + 216 are kept instead.
:param subarray: the subarray for which to build a mask.
:param apex_order1: The y-position of the order1 apex at 1.3 microns, in the given subarray.
:type subarray: str
:type apex_order1: float
:returns: mask_256 - A mask that removes any area not related to the trace of the target.
:rtype: array[bool]
"""
dimx = 2048
# Check the subarray value and set dimy accordingly.
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
if apex_order1 is None:
apex_order1 = 40 # Assuming SUBSTRIP256.
if subarray == 'FULL':
apex_order1 += 1792
if subarray == 'SUBSTRIP96':
apex_order1 += -10
# Round the apex value to the nearest integer.
apex_order1 = int(apex_order1)
# Prepare the mask array.
mask_256 = np.ones((dimy, dimx), dtype='bool')
# Keep only the 256 region around the apex_order1 value.
# In SUBSTRIP256 the apex would be at y ~ 40.
rowmin = np.maximum(apex_order1 - 40, 0)
rowmax = np.minimum(apex_order1 + 216, dimy)
mask_256[rowmin:rowmax, :] = False
return mask_256
def build_mask_trace(ytrace, subarray='SUBSTRIP256', halfwidth=30,
extend_below=False, extend_above=False):
"""Mask out the trace in a given subarray based on the y-positions provided.
A band of pixels around the trace position of width = 2*halfwidth will be masked.
Optionally extend_above and extend_below can be used to mask all pixels above
or below the trace.
:param ytrace: the trace y-position at each column, must have shape = (2048,).
:param subarray: the subarray for which to build a mask.
:param halfwidth: the size of the window to mask around the trace.
:param extend_below: if True mask all pixels above the trace.
:param extend_above: if True mask all pixels below the trace.
:type ytrace: array[float]
:type subarray: str
:type halfwidth: float
:type extend_below: bool
:type extend_above: bool
:returns: mask_trace - A mask that removes an area centered on the given trace positions.
:rtype: array[bool]
"""
dimx = 2048
# Check the shape of the y-positions.
if np.shape(ytrace) != (dimx,):
msg = 'ytrace must have shape (2048,)'
raise ValueError(msg)
# Check the subarray value and set dimy accordingly.
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
# Cannot both be True, that would mask everything.
if extend_below and extend_above:
msg = 'Only one of extend_below, extend_above should be used.'
raise ValueError(msg)
# Create a coordinate grid.
x = np.arange(dimx)
y = np.arange(dimy)
_, ygrid = np.meshgrid(x, y)
# Mask the pixels within a halfwidth of the trace center.
mask_trace = np.abs(ygrid - ytrace) < halfwidth
# If True mask all pixels below the trace center.
if extend_below:
mask_below = (ygrid - ytrace) < 0
mask_trace = mask_trace | mask_below
# If True mask all pixels above the trace center.
if extend_above:
mask_above = (ygrid - ytrace) >= 0
mask_trace = mask_trace | mask_above
return mask_trace
def build_mask_order2_contaminated(ytrace_o1, ytrace_o3, subarray='SUBSTRIP256',
halfwidth_o1=25, halfwidth_o3=15, xlim=150):
"""Build a mask that isolates the contaminated part of the order 2 trace.
This is done by masking the order 1 trace and averything below, the order
2 trace and everything above and all pixels blue-ward (to the right) of xlim.
:param ytrace_o1: y position of the order 1 trace at every column.
:param ytrace_o3: y position of the order 3 trace at every column.
:param subarray: the subarray for which to build a mask.
:param halfwidth_o1: the size of the window to mask around the order 1 trace.
:param halfwidth_o3: the size of the window to mask around the order 3 trace.
:param xlim: the boundary for masking pixels blue-ward (to the right).
:type ytrace_o1: array[float]
:type ytrace_o3: array[float]
:type subarray: str
:type halfwidth_o1: float
:type halfwidth_o3: float
:type xlim: float
:returns: mask - A mask that removes everything but the contaminated part of the
order 2 trace.
:rtype: array[bool]
"""
dimx = 2048
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
# Mask the order 1 trace and everything below.
mask_trace_o1 = build_mask_trace(ytrace_o1, subarray=subarray,
halfwidth=halfwidth_o1,
extend_below=True)
# Mask the order 3 trace and everything above.
mask_trace_o3 = build_mask_trace(ytrace_o3, subarray=subarray,
halfwidth=halfwidth_o3,
extend_above=True)
# Mask all pixels blue-ward of xlim.
mask_blue = build_mask_vertical((dimy, dimx), xlims=[xlim],
mask_right=True)
# Combine the masks.
mask = mask_trace_o1 | mask_trace_o3 | mask_blue
return mask
def build_mask_order2_uncontaminated(ytrace_o1, ytrace_o3, subarray='SUBSTRIP256',
halfwidth_o1=25, halfwidth_o3=15,
xlims=None, point1=None, point2=None,
apex_order1=None):
"""Build a mask that isolates the uncontaminated part of the order 2 trace.
This is done by masking the order 1 trace and averything below, the order
2 trace and everything above, all pixels outside of the range defined by xlims
and all pixels below the line defined by point 1 and point 2.
:param ytrace_o1: y position of the order 1 trace at every column.
:param ytrace_o3: y position of the order 3 trace at every column.
:param subarray: the subarray for which to build a mask.
:param halfwidth_o1: the size of the window to mask around the order 1 trace.
:param halfwidth_o3: the size of the window to mask around the order 3 trace.
:param xlims:
:param point1: the first x, y pair defining the boundary line.
:param point2: the second x, y pair defining the boundary line.
:param apex_order1: The y-position of the order1 apex at 1.3 microns, in the given subarray.
:type ytrace_o1: array[float]
:type ytrace_o3: array[float]
:type subarray: str
:type halfwidth_o1: float
:type halfwidth_o3: float
:type xlims: list[float]
:type point1: list[float]
:type point2: list[float]
:type apex_order1: float
:returns: mask - A mask that removes everything but the uncontaminated part of the
order 2 trace.
:rtype: array[bool]
"""
dimx = 2048
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
if xlims is None:
xlims = [700, 1800]
if (point1 is None) ^ (point2 is None):
msg = 'point1 and point2 must both be None or both be set.'
raise ValueError(msg)
elif (point1 is None) & (point2 is None):
# If no points were given use default values.
point1 = [1249, 31] # Assuming SUBSTRIP256.
point2 = [1911, 253] # Assuming SUBSTRIP256.
if subarray == 'FULL':
point1[1] += 1792
point2[1] += 1792
if subarray == 'SUBSTRIP96':
point1[1] += -10
point2[1] += -10
# If apex_order1 was given shift the points as needed.
if apex_order1 is not None:
apex_default = 40 # Assuming SUBSTRIP256.
if subarray == 'FULL':
apex_default += 1792
if subarray == 'SUBSTRIP96':
apex_default += -10
# Shift points based on apex_order1.
offset = apex_order1 - apex_default
point1[1] += offset
point2[1] += offset
else:
msg = ('Using user-provided values for point1 and point2, '
'apex_order1 will be ignored.')
print(msg)
# Mask the order 1 trace and everything below.
mask_trace_o1 = build_mask_trace(ytrace_o1, subarray=subarray,
halfwidth=halfwidth_o1,
extend_below=True)
# Mask the order 3 trace and everything above.
mask_trace_o3 = build_mask_trace(ytrace_o3, subarray=subarray,
halfwidth=halfwidth_o3,
extend_above=True)
# Mask what is on the left side where orders 1 and 2 are well blended
mask_vertical = build_mask_vertical((dimy, dimx), xlims, mask_between=False)
# Mask the corner below the order 2 trace to remove the wings of the order 1 trace.
mask_sloped = build_mask_sloped((dimy, dimx), point1, point2, mask_above=False)
# Combine the masks.
mask = (mask_trace_o1 | mask_trace_o3 | mask_vertical | mask_sloped)
return mask
def build_mask_order3(subarray='SUBSTRIP256', xlim=None, point1=None, point2=None, apex_order1=None):
"""Builds a mask that isolates the order 3 trace.
This done by masking all pixels blue-ward (to the right) of xlim where the order 3
transmission goes to zero, and all pixels below the line defined by point1 and point2
(the order1 trace and order 2 trace).
:param subarray: the subarray for which to build a mask.
:param xlim: the boundary for masking pixels blue-ward (to the right).
:param point1: the first x, y pair defining the boundary line.
:param point2: the seconf x, y pair defining the boundary line.
:param apex_order1: The y-position of the order1 apex at 1.3 microns, in the given subarray.
:type subarray: str
:type xlim: float
:type point1: list[float]
:type point2: list[float]
:type apex_order1: float
:returns: mask - A mask that removes everything but the order 3 trace.
:rtype: array[bool]
"""
dimx = 2048
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
if subarray == 'SUBSTRIP96':
# Create an empty mask.
mask = np.zeros((dimy, dimx), dtype='bool')
# Nothing to be done because order 3 can not be present.
print('Warning. No mask produced for order 3 when subarray=SUBSTRIP96')
return mask
if xlim is None:
xlim = 700
if (point1 is None) ^ (point2 is None):
msg = 'point1 and point2 must both be None or both be set.'
raise ValueError(msg)
elif (point1 is None) & (point2 is None):
# If no points were given use default values.
point1 = [0, 132] # Assuming SUBSTRIP256.
point2 = [1000, 163] # Assuming SUBSTRIP256.
if subarray == 'FULL':
point1[1] += 1792
point2[1] += 1792
if subarray == 'SUBSTRIP96':
point1[1] += -10
point2[1] += -10
# If apex_order1 was given shift the points as needed.
if apex_order1 is not None:
apex_default = 40 # Assuming SUBSTRIP256.
if subarray == 'FULL':
apex_default += 1792
if subarray == 'SUBSTRIP96':
apex_default += -10
# Shift points based on apex_order1.
offset = apex_order1 - apex_default
point1[1] += offset
point2[1] += offset
else:
msg = ('Using user-provided values for point1 and point2, '
'apex_order1 will be ignored.')
print(msg)
# Check how close the boundary line is to the top of the subarray.
if point1[1] > (dimy - 25 - 10):
msg = ('Warning: masking for order 3 leaves too little of '
'order 3 to fit position.')
print(msg)
# Mask everything beyond where the order 3 transmission approaches zero.
mask_vertical = build_mask_vertical((dimy, dimx), [xlim], mask_right=True)
# Mask everything below order 3.
mask_sloped = build_mask_sloped((dimy, dimx), point1, point2, mask_above=False)
# Combine the masks.
mask = mask_vertical | mask_sloped
return mask
def wavelength_calibration(xpos, order=1, subarray='SUBSTRIP256'):
"""Find the wavelengths corresponding to a set of x-positions using the
trace table reference file.
:param xpos: the array of x-positions to calibrate.
:param order: the trace order the x-positions correspond to.
:param subarray: the subarray the x-positions correspond to.
:type xpos: array[float]
:type order: int
:type subarray: str
:returns: wavelengths - an array of wavelengths corresponding to xpos.
:rtype: array[float]
"""
# Read the wavelength vs x-position relation from the reference file.
ref = soss_read_refs.RefTraceTable()
ref_wavelengths, ref_xpos = ref('X', subarray=subarray, order=order)
# Sort so the reference positions are in ascending order.
args = np.argsort(ref_xpos)
ref_xpos, ref_wavelengths = ref_xpos[args], ref_wavelengths[args]
# Find the wavelengths corresponding to the input array by interpolating.
wavelengths = np.interp(xpos, ref_xpos, ref_wavelengths)
return wavelengths
def calibrate_widths(width_o1, width_o2=None, width_o3=None, subarray='SUBSTRIP256', verbose=False):
"""Fit an exponential function to the wavelength-width relation, for use obtaining the
contaminated order 2 trace positions.
:param width_o1: The order 1 trace width at each column, must have shape = (2048,).
:param width_o2: The order 2 trace width at each column, must have shape = (2048,).
:param width_o3: The order 3 trace width at each column, must have shape = (2048,).
:param subarray: The subarray for which to build a mask.
:param verbose: If set True some diagnostic plots will be made.
:type width_o1: array[float]
:type width_o2: array[float]
:type width_o3: array[float]
:type subarray: str
:type verbose: bool
:returns: pars_width - a list containing the best-fit parameters for the wavelength-width relation.
:rtype list[float]
"""
dimx = 2048
# Check the shapes of the widths.
if np.shape(width_o1) != (dimx,):
msg = 'width_o1 must have shape (2048,)'
raise ValueError(msg)
if width_o2 is not None:
if np.shape(width_o2) != (dimx,):
msg = 'width_o2_uncont must have shape (2048,)'
raise ValueError(msg)
else:
width_o2 = np.full(dimx, fill_value=np.nan)
if width_o3 is not None:
if np.shape(width_o3) != (dimx,):
msg = 'width_o3_uncont must have shape (2048,)'
raise ValueError(msg)
else:
width_o3 = np.full(dimx, fill_value=np.nan)
# Convert pixel positions to wavelengths for each order.
x = np.arange(dimx)
lba_o1 = wavelength_calibration(x, order=1, subarray=subarray)
lba_o2 = wavelength_calibration(x, order=2, subarray=subarray)
lba_o3 = wavelength_calibration(x, order=3, subarray=subarray)
# Join data from different orders.
lba_all = np.concatenate((lba_o1, lba_o2, lba_o3), axis=None)
width_all = np.concatenate((width_o1, width_o2, width_o3), axis=None)
# Fit the wavelength vs width of order 1 and 2 using an exponential model.
mask = np.isfinite(width_all) & np.isfinite(lba_all)
pars_width = robust_polyfit(np.log(lba_all[mask]), np.log(width_all[mask]), 1)
# Make a figure of the trace width versus the wavelength
if verbose:
# Evalaute the best-fit model.
lba_fit = np.linspace(np.nanmin(lba_all), np.nanmax(lba_all), 101)
w0, m = np.exp(pars_width[1]), pars_width[0] # w = w0 * lba^m
width_fit = w0 * lba_fit ** m
# Make the figure.
plt.figure(figsize=(8, 5))
plt.scatter(lba_o1, width_o1, marker=',', s=1, color='red',
label='Order 1')
if np.any(np.isfinite(width_o2)):
plt.scatter(lba_o2, width_o2 + 0.05, marker=',', s=1,
color='orange', label='Order 2')
if np.any(np.isfinite(width_o3)):
plt.scatter(lba_o3, width_o3 + 0.10, marker=',', s=1, color='navy',
label='Order 3')
plt.plot(lba_fit, width_fit, color='black', linewidth=5,
label='Joint Fit:\nwidth = {:6.2F} $\\lambda**({:6.4F})$'.format(w0, m))
plt.xlabel('Wavelength (microns)', fontsize=12)
plt.ylabel('Trace Width (pixels)', fontsize=12)
plt.legend(fontsize=12)
plt.tight_layout()
plt.show()
plt.close()
return pars_width
def get_soss_centroids(image, mask=None, subarray='SUBSTRIP256', halfwidth=2,
poly_orders=None, apex_order1=None,
calibrate=True, verbose=False):
"""Determine the traces positions on a real image (native size) with as few
assumptions as possible using the 'edge trigger' method.
The algorithm assumes:
1) The brightest order is order 1 and the target order 1 is the brightest
of all order 1 traces present.
2) Order 2 has a minimum in transmission between ~1.0 and ~1.2 microns.
3) Order 2 widths are the same as order 1 width for the same wavelengths.
:param image: A 2D image of the detector.
:param mask: A boolean array of the same shape as image. Pixels corresponding to True values will be masked.
:param subarray: the subarray for which to build a mask.
:param halfwidth: the size of the window used when computing the derivatives of the 'edge trigger' method.
:param poly_orders: Dictionary of polynomial orders to fit to the extracted trace positions for each spectral order.
:param apex_order1: The y-position of the order1 apex at 1.3 microns, in the given subarray.
A rough estimate is sufficient as it is only used to mask rows when subarray='FULL' to
ensure that the target of interest is detected instead of a field target.
:param calibrate: If True model the wavelength trace width relation, otherwise use the CV3 parameters.
Default is True.
:param verbose: If set True some diagnostic plots will be made. Default is False.
:type image: array[float]
:type mask: array[bool]
:type subarray: str
:type halfwidth: int
:type poly_orders: dict
:type apex_order1: float
:type calibrate: bool
:type verbose: bool
:returns: trace_dict - A dictionary containing the trace x, y, width and polynomial fit parameters for each order.
:rtype: dict
"""
default_orders = {'order 1': 11,
'order 2': 5,
'order 3': 3}
if poly_orders is not None:
default_orders = {**default_orders, **poly_orders}
# Initialize output dictionary.
centroids = dict()
# Build a mask that restricts the analysis to a SUBSTRIP256-like region centered on the target trace.
mask_256 = build_mask_256(subarray=subarray, apex_order1=apex_order1)
# Combine the subsection mask with the user specified mask.
if mask is not None:
mask_256 = mask_256 | mask
if verbose:
hdu = fits.PrimaryHDU()
hdu.data = np.where(mask_256, np.nan, image)
hdu.writeto('mask_256.fits', overwrite=True)
# Get the order 1 trace position.
result = get_centroids_edgetrigger(image, mask=mask_256,
poly_order=default_orders['order 1'],
halfwidth=halfwidth, mode='combined',
verbose=verbose)
x_o1, y_o1, w_o1, par_o1 = result
# Add parameters to output dictionary.
o1_dict = dict()
o1_dict['X centroid'] = x_o1
o1_dict['Y centroid'] = y_o1
o1_dict['trace widths'] = w_o1
o1_dict['poly coefs'] = par_o1
centroids['order 1'] = o1_dict
# For SUBSTRIP96 only the order 1 can be measured.
if subarray == 'SUBSTRIP96':
if verbose:
# Make a figure showing the order 1 trace.
_plot_centroids(image, centroids)
return centroids
# Update the order1 apex based on the extracted trace.
apex_order1 = np.nanmin(y_o1)
# Make a mask to isolate the order 3 trace and combine it with the user-specified mask.
mask_o3 = build_mask_order3(subarray=subarray, apex_order1=apex_order1)
if mask is not None:
mask_o3 = mask_o3 | mask
if verbose:
hdu = fits.PrimaryHDU()
hdu.data = np.where(mask_o3, np.nan, image)
hdu.writeto('mask_o3.fits', overwrite=True)
# Get the order 3 trace position.
result = get_centroids_edgetrigger(image, mask=mask_o3,
poly_order=default_orders['order 3'],
halfwidth=halfwidth, mode='combined',
verbose=verbose)
x_o3, y_o3, w_o3, par_o3 = result
# Add parameters to output dictionary.
o3_dict = dict()
o3_dict['X centroid'] = x_o3
o3_dict['Y centroid'] = y_o3
o3_dict['trace widths'] = w_o3
o3_dict['poly coefs'] = par_o3
centroids['order 3'] = o3_dict
# Make masks for the second order trace - split in two segments:
# A) Uncontaminated region 700 < x < 1800 - fit both edges combined (default).
# B) Contaminated region (x = 0-200) - fit only the top edge.
# Make a mask to isolate the uncontaminated order 2 trace and combine it with the user-specified mask.
mask_o2_uncont = build_mask_order2_uncontaminated(y_o1, y_o3,
subarray=subarray, apex_order1=apex_order1)
if mask is not None:
mask_o2_uncont = mask_o2_uncont | mask
if verbose:
hdu = fits.PrimaryHDU()
hdu.data = np.where(mask_o2_uncont, np.nan, image)
hdu.writeto('mask_o2_uncont.fits', overwrite=True)
# Get the raw trace positions for the uncontaminated part of the order 2 trace.
result = get_centroids_edgetrigger(image, mask=mask_o2_uncont,
poly_order=None, halfwidth=halfwidth,
mode='combined', verbose=verbose)
x_o2_uncont, y_o2_uncont, w_o2_uncont, par_o2_uncont = result
if calibrate:
pars_width = calibrate_widths(w_o1, w_o2_uncont, subarray=subarray, verbose=verbose)
else:
# Use pre-computed parameters from the CV3 deepstack.
pars_width = [-0.20711659, 3.16387517]
w0, m = np.exp(pars_width[1]), pars_width[0] # w = w0 * lba^m
# Make a mask to isolate the contaminated order 2 trace and combine it with the user-specified mask.
mask_o2_cont = build_mask_order2_contaminated(y_o1, y_o3,
subarray=subarray)
if mask is not None:
mask_o2_cont = mask_o2_cont | mask
if verbose:
hdu = fits.PrimaryHDU()
hdu.data = np.where(mask_o2_cont, np.nan, image)
hdu.writeto('mask_o2_cont.fits', overwrite=True)
# Get the raw top-edge poistions of the contaminated order 2 trace.
result = get_centroids_edgetrigger(image, mask=mask_o2_cont,
poly_order=None, halfwidth=halfwidth,
mode='topedge', verbose=verbose)
x_o2_top, y_o2_top, w_o2_top, par_o2_top = result
# Convert pixel positions to wavelengths for order 2.
lba_o2_top = wavelength_calibration(x_o2_top, order=2, subarray=subarray)
# Use the wavelength width relation to obtain the order 2 trace width.
w_o2_cont = np.where(np.isfinite(w_o2_top), w0 * lba_o2_top**m, np.nan)
# Finally combine the top-edge positions and the width to get an estimate of the trace center.
x_o2_cont = np.copy(x_o2_top)
y_o2_cont = y_o2_top - w_o2_cont/2.
# Combine the trace positions from the uncontaminated and contaminated sections.
mask_comb = np.isfinite(y_o2_uncont)
x_o2 = np.where(mask_comb, x_o2_uncont, x_o2_cont)
y_o2 = np.where(mask_comb, y_o2_uncont, y_o2_cont)
w_o2 = np.where(mask_comb, w_o2_uncont, w_o2_cont)
# Fit the combined order 2 trace position with a polynomial.
mask_fit = np.isfinite(x_o2) & np.isfinite(y_o2)
if default_orders['order 2'] is None:
par_o2 = []
else:
par_o2 = robust_polyfit(x_o2[mask_fit], y_o2[mask_fit], default_orders['order 2'])
y_o2 = np.polyval(par_o2, x_o2)
if verbose:
# Determine an appropriate figure size.
nrows, ncols = image.shape
if subarray == 'FULL':
aspect = 1
figsize = ncols/64, nrows/64
else:
aspect = 2
figsize = ncols/64, nrows/32
# Make a figure showing how the order 2 trace was built from segments A and B.
plt.figure(figsize=figsize)
plt.title('Order 2 Trace Positions')
plt.imshow(image, origin='lower', cmap='inferno', norm=colors.LogNorm(), aspect=aspect)
plt.plot(x_o2_cont, y_o2_cont, color='red', label='Contaminated')
plt.plot(x_o2_uncont, y_o2_uncont, color='navy', label='Uncontaminated')
plt.plot(x_o2, y_o2, color='black', label='Polynomial Fit')
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.legend(fontsize=12)
plt.xlim(-0.5, ncols - 0.5)
plt.ylim(-0.5, nrows - 0.5)
plt.tight_layout()
plt.show()
plt.close()
# Add parameters to output dictionary.
o2_dict = dict()
o2_dict['X centroid'] = x_o2
o2_dict['Y centroid'] = y_o2
o2_dict['trace widths'] = w_o2
o2_dict['poly coefs'] = par_o2
centroids['order 2'] = o2_dict
if verbose:
# Make a figure showing the trace for all orders.
_plot_centroids(image, centroids)
return centroids
def main():
"""Placeholder for potential multiprocessing."""
return
if __name__ == '__main__':
main()
| 35.327175 | 120 | 0.643125 |
ace5c901d09a597c6f6bdb8bebb71e65a9f6839c | 2,697 | py | Python | open_horadric_lib/proxy/protocol_adapter.py | got686/open_horadric_lib | 7ccaac17268e5e5174f49edb1d4a0a1fe0293cdd | [
"MIT"
] | null | null | null | open_horadric_lib/proxy/protocol_adapter.py | got686/open_horadric_lib | 7ccaac17268e5e5174f49edb1d4a0a1fe0293cdd | [
"MIT"
] | 1 | 2020-01-06T00:29:21.000Z | 2020-01-28T21:51:41.000Z | open_horadric_lib/proxy/protocol_adapter.py | got686/open_horadric_lib | 7ccaac17268e5e5174f49edb1d4a0a1fe0293cdd | [
"MIT"
] | 2 | 2019-10-30T13:37:58.000Z | 2020-01-05T20:42:11.000Z | from __future__ import annotations
import json
from typing import TypeVar
import msgpack
from flask.wrappers import Request as FlaskRequest
from flask.wrappers import Response
import yaml
from google.protobuf.json_format import MessageToDict
from google.protobuf.json_format import ParseDict
from google.protobuf.message import Message
from open_horadric_lib.base.context import Context
from open_horadric_lib.proxy.protocol_parser import ProtocolParser
from open_horadric_lib.proxy.protocol_parser import ProtocolType
MessageType = TypeVar("MessageType", bound=Message)
class ProtocolAdapter:
def __init__(self, protocol_parser: ProtocolParser = None):
if protocol_parser is None:
protocol_parser = ProtocolParser()
self.protocol_parser = protocol_parser
def get_request(self, request: FlaskRequest, context: Context) -> MessageType:
if not request.data:
return context.request_message_type()
protocol = self.protocol_parser.get_input_protocol_type()
if protocol == ProtocolType.MSGPACK:
content = msgpack.loads(request.data or b"\x80", raw=False)
message = ParseDict(content, context.request_message_type())
elif protocol == ProtocolType.JSON:
content = json.loads(request.data or "{}")
message = ParseDict(content, context.request_message_type())
elif protocol == ProtocolType.YAML:
content = yaml.safe_load(request.data or "{}")
message = ParseDict(content, context.request_message_type())
elif protocol == ProtocolType.PROTOBUF:
message = context.request_message_type.FromString(request.data)
else:
raise ValueError("Unexpected protocol type {}".format(protocol))
return message
def make_response(self, response: MessageType, context: Context):
protocol = self.protocol_parser.get_output_protocol_type()
if protocol == ProtocolType.MSGPACK:
content_type = "application/x-msgpack"
content = msgpack.dumps(MessageToDict(response))
elif protocol == ProtocolType.JSON:
content_type = "application/json"
content = json.dumps(MessageToDict(response))
elif protocol == ProtocolType.YAML:
content_type = "application/x-yaml"
content = yaml.dump(MessageToDict(response))
elif protocol == ProtocolType.PROTOBUF:
content_type = "application/x-protobuf"
content = response.SerializeToString()
else:
raise ValueError("Unexpected protocol type {}".format(protocol))
return Response(content, content_type=content_type)
| 40.253731 | 82 | 0.701891 |
ace5c997507e0758f5a1eb89c5aec4c4c4fa24cf | 82,946 | py | Python | surveyGrids.py | thommevans/TESS_ACWG | 43060b5457bf7c50dee9491c605a1d5255d5e98b | [
"MIT"
] | null | null | null | surveyGrids.py | thommevans/TESS_ACWG | 43060b5457bf7c50dee9491c605a1d5255d5e98b | [
"MIT"
] | null | null | null | surveyGrids.py | thommevans/TESS_ACWG | 43060b5457bf7c50dee9491c605a1d5255d5e98b | [
"MIT"
] | null | null | null | import pdb, sys, os
import csv
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pickle
from . import Utils, processTargetLists, downloadTargetLists
from . import surveySetup
#from astropy.io import ascii
from astropy.table import Table
try:
import pysynphot
pysynphotImport = True
except:
pysynphotImport = False
FIGDIR = os.path.join( os.getcwd(), 'Figures' )
def quickCycle1():
def gridEdgesFunc( surveyName='ACWG' ):
if surveyName=='ACWG':
TeqK = np.array( [ 100, 350, 800, 1250, 1750, 2250, 3000 ] )
RpRE = np.array( [ 0.3, 1.50, 2.75, 4.00, 10.00, 25 ] )
return TeqK, RpRE
d = np.loadtxt( 'cycle1.csv', delimiter=',', dtype=str )
pl = d[:,0]
RpRE = np.array(d[:,2],dtype=float)
TeqK = np.array(d[:,3],dtype=float)
obsType = np.array(d[:,4],dtype=int)
n = len( RpRE )
ixs0 = np.arange( n )
ixs1 = ixs0[( obsType==1 )] # transits
ixs2 = ixs0[( obsType==2 )] # eclipses
ixs3 = ixs0[( obsType==3 )] # phC
titleStr0 = 'JWST Cycle 1 targets that will be observed GTO + GO'
titleStr0 += '\nNo distinction for different instrument modes '
titleStr1 = '{0}\ntransits + eclipses + phase curves'.format( titleStr0 )
titleStr2a = '{0}\ntransmission (i.e. transits + phase curves)'.format( titleStr0 )
titleStr2b = '{0}\nemission (i.e. eclipses + phase curves)'.format( titleStr0 )
c1 = 'Orange'
c2 = 'Cyan'
c3 = 'Magenta'
l1 = 'Transits'
l2 = 'Eclipses'
l3 = 'Phase curves'
ms0 = 10
fp = 1.5
z = [ [ titleStr1,[ [ixs1,'o',c1,l1,ms0], [ixs2,'d',c2,l2,ms0], \
[ixs3,'*',c3,l3,fp*ms0] ] ], \
[ titleStr2b,[ [ixs2,'d',c2,l2,ms0], [ixs3,'*',c3,l3,fp*ms0] ] ], \
[ titleStr2a,[ [ixs1,'o',c1,l1,ms0], [ixs3,'*',c3,l3,fp*ms0] ] ] ]
for i in z:
fig, ax, ax2 = generateAxisScatter( wideFormat=True, titleStr='', showLegend=False )
title_fs = 18
toplineY = 0.98
fig.text( 0.02, toplineY-0.02, i[0], fontsize=title_fs, weight='heavy', \
rotation=0, horizontalalignment='left', verticalalignment='top' )
survey = { 'surveyName':'ACWG', 'gridEdges':gridEdgesFunc }
Tgrid, Rgrid = drawGrid( ax, survey=survey )
for j in i[1]:
print( j[1] )
ax.plot( TeqK[j[0]], RpRE[j[0]], j[1], mfc=j[2], mec='none', \
alpha=0.8, ms=j[4], label='' )
ax.plot( TeqK[j[0]], RpRE[j[0]], j[1], mfc='none', mec='Black', \
alpha=1, ms=j[4], label='' )
ax.plot( [-TeqK[j[0]][0]], [-RpRE[j[0]][0]], j[1], mfc=j[2], mec='Black', \
alpha=1, ms=j[4], label=j[3] )
ax.legend( ncol=len( i[1] ), loc='lower right', bbox_to_anchor=[0.8,1], fontsize=16 )
# pdb.set_trace()
def Confirmed( ipath='confirmedProperties.pkl', survey={}, SMFlag='TSM', HeatMap=False ):
"""
"""
wideFormat = True
showNeptuneRadius = False
showJupiterRadius = False
addSignature = False
figPaths = transmissionGridConfirmed( ipath=ipath, wideFormat=wideFormat, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius, \
survey=survey, addSignature=addSignature, \
SMFlag=SMFlag, HeatMap=HeatMap )
for f in figPaths: # PDFs and PNGs
for k in list( f.keys() ):
fnew = f[k].replace( 'Confirmed_', 'Confirmed_{0}_'\
.format( survey['obsSample'] ) )
if os.path.isfile( fnew ):
os.remove( fnew )
os.rename( f[k], fnew )
plt.close( 'all' )
return None
def TOIs( ipath='toiProperties.pkl', survey={}, RARanges='all', SMFlag='TSM', \
onlyPCs=False, HeatMap=False ):
"""
"""
wideFormat = True
addSignature = False
DecRestrictions = [ ['DecAll',None,None], ['DecNth',-20,None], ['DecSth',None,20] ]
z = readTOIProperties( ipath=ipath, SMFlag=SMFlag)[0]
n0 = len( z['planetName'] )
ixs = np.isfinite( z['TeqK'] )*np.isfinite( z['SM'] )*np.isfinite( z['RpValRE'] )
print( '\nReading in {0:.0f} TOIs total.'.format( n0 ) )
print( 'Returning {0:.0f} TOIs with radii, {1}, and Teq values.'\
.format( ixs.sum(), SMFlag ) )
print( '\nSaved:' )
RARestrictions = Utils.getRARanges()
if RARanges=='completeSet':
RARestrictions += [ [ 0, 24 ] ]
opaths = {}
for i in DecRestrictions:
opaths[i[0]] = {}
for RA in RARestrictions:
if RARanges=='all':
r = 'RAall'
else:
r = 'RA{0:.0f}-{1:.0f}h'.format( RA[0], RA[1] )
figPaths = transmissionGridTOIs( ipath=ipath, wideFormat=wideFormat, \
addSignature=addSignature, survey=survey, \
RAMin_hr=RA[0], RAMax_hr=RA[1], \
DecMin_deg=i[1], DecMax_deg=i[2],
SMFlag=SMFlag, onlyPCs=onlyPCs, \
HeatMap=HeatMap )
opaths[i[0]][r] = []
for f in figPaths: # PDFs and PNGs
for k in list( f.keys() ):
opath = f[k]
if f[k].find( '.pdf' )>0:
fnew = f[k].replace( '.pdf', '_{0}_{1}.pdf'.format( i[0], r ) )
elif opath.find( '.png' )>0:
fnew = f[k].replace( '.png', '_{0}_{1}.png'.format( i[0], r ) )
if os.path.isfile( fnew ):
os.remove( fnew )
os.rename( f[k], fnew )
opaths[i[0]][r] += [ fnew ]
plt.close( 'all' )
return opaths
#############################################################################
# Transmission/Emission spectroscopy survey:
def transmissionGridTOIs( ipath='toiProperties.pkl', wideFormat=True, \
addSignature=False, survey={}, \
RAMin_hr=None, RAMax_hr=None, \
DecMin_deg=None, DecMax_deg=None, \
SMFlag='TSM', onlyPCs=False, ASCII=False, HeatMap=False ):
"""
TOIs that have not been confirmed.
"""
showGrid = True
z, dateStr = readTOIProperties( ipath=ipath, SMFlag=SMFlag )
ostr = 'TOIs'
if onlyPCs == True:
ostr = 'TOIs_onlyPCs'
n0 = len( z['planetName'] )
ixs0, cutStr, titleStr = survey['preCuts']( z )
# Exclude targets outside the RA limits:
RAStr, RAMin_hr, RAMax_hr = Utils.processRARestriction( RAMin_hr, RAMax_hr )
ixsRA = ( z['RA_hr'][ixs0]>=RAMin_hr )*( z['RA_hr'][ixs0]<=RAMax_hr )
# Exclude targets outside the Dec limits:
DecStr, DecMin_deg, DecMax_deg = Utils.processDecRestriction( DecMin_deg, DecMax_deg )
ixsDec = ( z['Dec_deg'][ixs0]>=DecMin_deg )*( z['Dec_deg'][ixs0]<=DecMax_deg )
RADecStr = '{0}\n{1}\nNo bright limits have been applied\n'.format( RAStr, DecStr )
if onlyPCs == True:
ixsPCs = ( [i[-4:]=='(PC)' for i in z['planetName'][ixs0]] )
ixs = np.arange( n0 )[ixs0][ixsRA*ixsDec*ixsPCs]
else:
ixs = np.arange( n0 )[ixs0][ixsRA*ixsDec]
pl = z['planetName'][ixs]
Teq = z['TeqK'][ixs]
Ts = z['TstarK'][ixs]
MpVal = z['MpValME'][ixs]
RpVal = z['RpValRE'][ixs]
SM = z['SM'][ixs]
onames = {}
# Radius-temperature grid plot listing the top-ranked planets in each cell:
if ASCII:
plList = plotTeqRpGrid( Teq, RpVal, Ts, (SMFlag, SM) , pl, \
titleStr=titleStr, dateStr=dateStr, \
survey=survey, RADecStr=RADecStr, ASCII=ASCII, \
HeatMap=HeatMap )
return plList
fig2, ax2 = plotTeqRpGrid( Teq, RpVal, Ts, (SMFlag, SM) , pl, \
titleStr=titleStr, dateStr=dateStr, \
survey=survey, RADecStr=RADecStr, HeatMap=HeatMap )
onames['2'] = '{0}_gridTop{1}s.pdf'.format( ostr, SMFlag )
toiNote = 'TOIs with "PC" TFOPWG Disposition shown in darker font\n'
if onlyPCs == True:
toiNote = 'Only TOIs with "PC" TFOPWG Disposition are displayed\n'
toiNote += 'Masses estimated from empirical relation (adapted from Chen & Kipping 2017)'
fig2.text( 0.08, 0.91-0.10, toiNote, \
c='black', fontsize=14, horizontalalignment='left', \
verticalalignment='bottom' )
if addSignature==True:
for ax in [ax2]:
addSignatureToAxis( ax )
figs = { '2':fig2 }
if wideFormat==True:
odirExt = 'survey{0}/wideFormat/TOIs/{1}'.format( survey['surveyName'], \
SMFlag)
if onlyPCs:
odirExt = odirExt+'/onlyPCs'
else:
odirExt = 'survey{0}/narrowFormat/TOIs/{1}'.format( survey['surveyName'], \
SMFlag)
odir = os.path.join( FIGDIR, odirExt )
if os.path.isdir( odir )==False:
os.makedirs( odir )
opathsPDF = {}
opathsPNG = {}
sourceStr = 'Source: NASA Exoplanet Archive ({0})'.format( dateStr )
for k in ['2']:
figs[k].text( 0.97, 0.01, sourceStr, fontsize=10, \
horizontalalignment='right', verticalalignment='bottom' )
if addSignature==True:
onames[k] = onames[k].replace( '.pdf', '_wSignature.pdf' )
opathk = os.path.join( odir, onames[k] )
figs[k].savefig( opathk )
opathk_png = opathk.replace( '.pdf', '.png' )
figs[k].savefig( opathk_png )
opathsPDF[k] = opathk
opathsPNG[k] = opathk_png
print( '{0}\n{1}'.format( opathk, opathk_png ) )
print( 'RADecStr = {0}'.format( RADecStr ) )
return opathsPDF, opathsPNG
def transmissionGridTESS( publishedMasses=True, wideFormat=True, addSignature=False, SMFlag = 'TSM' ):
"""
Confirmed TESS planets without published mass.
Currently unused, may be out of date
"""
surveyName = '' #This variable is used below but undefined, this is added as a placeholder definition
showGrid = True
z = readConfirmedTESSProperties( publishedMasses=publishedMasses, SMFlag=SMFlag )
if publishedMasses==True:
ostr = 'ConfirmedWithMassTESS'
titleStr = 'Confirmed TESS planets with peer-reviewed published masses'
else:
ostr = 'ConfirmedNoMassTESS'
titleStr = 'Confirmed TESS planets without peer-reviewed published masses'
n0 = len( z['planetName'] )
# Exclude very big and small stars:
ixs0 = ( z['RsRS']>=0.05 )*( z['RsRS']<10 )
# TODO = Add option to apply a bright limit?
ixs = np.arange( n0 )[ixs0]#[ixs2][ixs3]
print( '{0:.0f} planets have >5-sigma mass measurements.'.format( len( ixs ) ) )
pl = z['planetName'][ixs]
SM = z['SM'][ixs]
Teq = z['TeqK'][ixs]
Ts = z['TstarK'][ixs]
MpVal = z['MpValME'][ixs]
RpVal = z['RpValRE'][ixs]
RpLsig = z['RpLsigRE'][ixs]
RpUsig = z['RpUsigRE'][ixs]
onames = {}
# Radius-temperature grid plot listing the top-ranked planets in each cell:
fig2, ax2 = plotTeqRpGrid( Teq, RpVal, Ts, (SMFlag, SM), pl, titleStr=titleStr )
onames['2'] = '{0}_gridTop{1}s.pdf'.format( ostr, SMFlag )
if publishedMasses==False:
fig2.text( 0.10, 0.905-0.025, 'Masses estimated from empirical relation', \
c='black', fontsize=14, horizontalalignment='left', \
verticalalignment='bottom' )
if addSignature==True:
for ax in [ax2]:
addSignatureToAxis( ax )
figs = { '2':fig2 }
if wideFormat==True:
odirExt = 'survey{0}/wideFormat'.format( surveyName ) #surveyName is undefined
else:
odirExt = 'survey{0}/narrowFormat'.format( surveyName )
odir = os.path.join( FIGDIR, odirExt )
if os.path.isdir( odir )==False:
os.makedirs( odir )
for k in ['2']:
if addSignature==True:
onames[k] = onames[k].replace( '.pdf', '_wSignature.pdf' )
opath = os.path.join( odir, onames[k] )
figs[k].savefig( opath )
print( opath )
return None
def transmissionGridConfirmed( ipath='confirmedProperties.pkl', wideFormat=True, \
survey={}, addSignature=False, showGrid=True, \
showNeptuneRadius=False, showJupiterRadius=False, \
SMFlag='TSM', HeatMap=False, ASCII=False ):
"""
"""
z, dateStr = readConfirmedProperties( ipath=ipath, SMFlag = SMFlag )
ostr = 'Confirmed'
print
# Not applying Dec restrictions to Confirmed planets for now:
#DecStr, DecMin_deg, DecMax_deg = processDecRestriction( None, None )
RADecStr = ''
ixs, cutStr, titleStr = survey['preCuts']( z, survey['obsSample'] )
print( '{0:.0f} planets have mass measurements or estimates'.format( len( ixs ) ) )
print( 'and orbit stars with radii 0.05-10 R_Sun' )
pl = z['planetName'][ixs]
RA = z['RA'][ixs]
Dec = z['Dec'][ixs]
RA_deg = z['RA_deg'][ixs]
Dec_deg = z['Dec_deg'][ixs]
SM = z['SM'][ixs]
Teq = z['TeqK'][ixs]
Ts = z['TstarK'][ixs]
MpVal = z['MpValME'][ixs]
MpLsig = z['MpLsigME'][ixs]
MpUsig = z['MpUsigME'][ixs]
RpVal = z['RpValRE'][ixs]
RpLsig = z['RpLsigRE'][ixs]
RpUsig = z['RpUsigRE'][ixs]
TESS = np.array( z['TESS'][ixs], dtype=int )
plTess = pl[TESS>0]
plTess=list(plTess)
for i in range(len(plTess)):
plTess[i] = plTess[i].replace(' ', '')
onames = {}
# Radius-temperature plot for all planets with well-measured mass:
if not ASCII:
fig1a, ax1a = plotTeqRpScatter( pl, Teq, RpVal, Ts, (SMFlag, SM), TESS,
applySMcuts=False, \
wideFormat=wideFormat, survey=survey, \
showGrid=showGrid, titleStr=titleStr, \
indicateTESS=False, dateStr=dateStr, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius )
onames['1a'] = '{0}_allPlanets.pdf'.format( ostr )
# Radius-temperature plot for all planets with well-measured mass
# and SM cuts applied:
fig1b, ax1b = plotTeqRpScatter( pl, Teq, RpVal, Ts, (SMFlag, SM), TESS, \
applySMcuts=True, \
wideFormat=wideFormat, survey=survey, \
showGrid=showGrid, titleStr=titleStr, \
indicateTESS=False, dateStr=dateStr, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius )
onames['1b'] = '{0}_{1}cutsApplied.pdf'.format( ostr, SMFlag )
# Radius-temperature grid plot listing the top-ranked planets in each cell:
extraNotes = 'TESS discoveries shown in bold font'
fig2, ax2 = plotTeqRpGrid( Teq, RpVal, Ts, (SMFlag, SM), pl, plTess, \
titleStr=titleStr, extraNotes=extraNotes, \
dateStr=dateStr, survey=survey, \
RADecStr=RADecStr, HeatMap=HeatMap )
fig2.text( 0.10, 0.995, cutStr, c='black', fontsize=12, \
horizontalalignment='left', verticalalignment='top' )
#print( 'PLOT input to plotTeqRpGrid from transmissionGridConfirmed:' )
#print( len( pl ) )
#print( ipath )
#pdb.set_trace()
else:
#print( 'ASCII input to plotTeqRpGrid from transmissionGridConfirmed:' )
#print( len( pl ) )
#print( ipath )
#pdb.set_trace()
plList = plotTeqRpGrid( Teq, RpVal, Ts, (SMFlag, SM), pl, plTess, \
titleStr=titleStr, \
dateStr=dateStr, survey=survey, ASCII=ASCII, \
RADecStr=RADecStr, HeatMap=HeatMap )
return plList
onames['2'] = '{0}_gridTop{1}s.pdf'.format( ostr, SMFlag )
# Scatter plots without the grid:
fig3a, ax3a = plotTeqRpScatter( pl, Teq, RpVal, Ts, (SMFlag, SM), TESS, \
applySMcuts=False, \
wideFormat=wideFormat, survey=survey, \
showGrid=showGrid, titleStr=titleStr, \
indicateTESS=True, dateStr=dateStr, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius )
onames['3a'] = '{0}_allPlanets_showsTESS.pdf'.format( ostr )
fig3b, ax3b = plotTeqRpScatter( pl, Teq, RpVal, Ts, (SMFlag, SM), TESS, \
applySMcuts=True, \
wideFormat=wideFormat, survey=survey, \
showGrid=showGrid, titleStr=titleStr,
indicateTESS=True, dateStr=dateStr, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius )
onames['3b'] = '{0}_{1}cutsApplied_showsTESS.pdf'.format( ostr, SMFlag )
if addSignature==True:
for ax in [ax1a,ax1b,ax2,ax3a,ax3b]:
addSignatureToAxis( ax )
figs = { '1a':fig1a, '1b':fig1b, '2':fig2, '3a':fig3a, '3b':fig3b }
print( '\nSaved:' )
if wideFormat==True:
odirExt = 'survey{0}/wideFormat/Confirmed/{1}'.format( survey['surveyName'], \
SMFlag )
else:
odirExt = 'survey{0}/narrowFormat/Confirmed/{1}'.format( survey['surveyName'], \
SMFlag )
odir = os.path.join( FIGDIR, odirExt )
if os.path.isdir( odir )==False:
os.makedirs( odir )
opathsPDF = {}
opathsPNG = {}
sourceStr = 'Source: NASA Exoplanet Archive ({0})'.format( dateStr )
for k in ['1a','1b','2','3a','3b']:
figs[k].text( 0.97, 0.01, sourceStr, fontsize=10, \
horizontalalignment='right', verticalalignment='bottom' )
if addSignature==True:
onames[k] = onames[k].replace( '.pdf', '_wSignature.pdf' )
opathk = os.path.join( odir, onames[k] )
figs[k].savefig( opathk )
opathk_png = opathk.replace( '.pdf', '.png' )
figs[k].savefig( opathk_png )
opathsPDF[k] = opathk
opathsPNG[k] = opathk_png
print( '{0}\n{1}'.format( opathk, opathk_png ) )
return opathsPDF, opathsPNG
def transmissionPredictedTESS( showSolarSystem=True, wideFormat=False, \
surveyModule='ACWG', showGrid=True, \
showStellarTrack=True, showNeptuneRadius=True, \
showJupiterRadius=True, crossHair=None, \
addSignature=False, SMFlag='TSM' ):
"""
Focusing on TESS predicted yield, not worrying about survey grid.
Currently unused, out of date. In the middle of being repurposed
to create plots of predicted planets
"""
z = readPredictedProperties(SMFlag = SMFlag)
ostr = 'predictedTESS'
survey = { 'surveyName':'ACWG', 'obsSample':'PublishedMassesOnly', \
'framework':'ACWG', 'gridEdges':surveySetup.gridEdges, \
'thresholdTSM':surveySetup.thresholdTSM, \
'thresholdESM':surveySetup.thresholdESM, \
'preCuts':surveySetup.preCutsConfirmed }
n0 = len( z['RsRS'] )
#Exclude very big and small stars:
ixs = ( z['RsRS']>=0.05 )*( z['RsRS']<10 )
SM = z['SM'][ixs]
Teq = z['TeqK'][ixs]
Ts = z['TstarK'][ixs]
MpVal = z['MpValME'][ixs]
RpVal = z['RpValRE'][ixs]
TESS = [0 for i in range(len(ixs))]
pl = ['PredictedPlanet-{0}'.format(str(i)) for i in range(len(ixs))]
dateStr = processTargetLists.getDateStr( 'predictedProperties_v2.pkl', \
whichList='Predicted' )
# Radius-temperature plot for all planets with well-measured mass:
titleStr = 'Top ranked predicted planets'
fig0a, ax0a = plotTeqRpScatter( pl, Teq[ixs], RpVal[ixs], Ts[ixs], \
('SMFlag', SM[ixs]), \
TESS[ixs], applyTSMcuts=False, ms=6, alpha=1, \
starColors=True, showSolarSystem=showSolarSystem, \
showStellarTrack=showStellarTrack, \
wideFormat=wideFormat, titleStr=titleStr, \
surveyModule=surveyModule, showGrid=showGrid, \
indicateTESS=False, dateStr=dateStr, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius, survey=survey )
if crossHair is not None:
ix = ( pl==crossHair )
ax0a.axvline( Teq[ix], c='HotPink' )
ax0a.axhline( RpVal[ix], c='HotPink' )
# pdb.set_trace()
if wideFormat==True:
odirExt = 'survey{0}/wideFormat'.format( surveyModule )
else:
odirExt = 'survey{0}/narrowFormat'.format( surveyModule )
odir = os.path.join( FIGDIR, odirExt )
if os.path.isdir( odir )==False:
os.makedirs( odir )
onames = {}
for k in ['0a','0b','1a','1b','2a','2b','3']:
onames[k] = '{0}_{1}.pdf'.format( ostr, k )
if showSolarSystem==True:
for k in ['0a','0b','1a','1b','2a','2b','3']:
onames[k] = onames[k].replace( '.pdf', '_wSS.pdf' )
if showStellarTrack==True:
for k in ['0a','0b','1a','1b','2a','2b','3']:
onames[k] = onames[k].replace( '.pdf', '_wST.pdf' )
opaths = {}
for k in ['0a','0b','1a','1b','2a','2b','3']:
opaths[k] = os.path.join( odir, onames[k] )
fig0a.savefig( opaths['0a'] )
fig0b, ax0b = plotTeqRpScatter( pl[ixs], Teq[ixs], RpVal[ixs], Ts[ixs], \
('SMFlag', SM[ixs]), \
TESS[ixs], applyTSMcuts=False, ms=6, alpha=1, \
starColors=True, showSolarSystem=showSolarSystem, \
showStellarTrack=showStellarTrack, \
wideFormat=wideFormat, titleStr=titleStr, \
surveyModule=surveyModule, showGrid=showGrid, \
indicateTESS=False, dateStr=dateStr, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius, survey=survey )
plotTeqRpTESS( ax0b, SMFlag = SMFlag )
fig0b.savefig( opaths['0b'] )
# Same as previous but with low alpha value for known planets:
titleStr = 'Predicted TESS planets'
fig1, ax1 = plotTeqRpScatter( pl[ixs], Teq[ixs], RpVal[ixs], Ts[ixs], \
('SMFlag', SM[ixs]), \
TESS[ixs], applyTSMcuts=False, ms=6, alpha=0.3, \
starColors=True, showSolarSystem=showSolarSystem, \
showStellarTrack=showStellarTrack, \
wideFormat=wideFormat, titleStr=titleStr, \
surveyModule=surveyModule, showGrid=showGrid, \
indicateTESS=False, dateStr=dateStr, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius, survey=survey )
fig1.savefig( opaths['1a'] )
# Add the bright predicted TESS planets:
titleStr = 'Predicted TESS planets'
plotTeqRpTESS( ax1, showSolarSystem=False, showNeptuneRadius=False, \
showJupiterRadius=False, SMFlag = SMFlag, z=z )
fig1.savefig( opaths['1b'] )
# Radius-temperature plot for all planets with well-measured mass:
fig2, ax2 = plotTeqRpScatter( pl[ixs], Teq[ixs], RpVal[ixs], Ts[ixs], \
('SMFlag', SM[ixs]), \
TESS[ixs], applyTSMcuts=False, ms=3, alpha=1, \
starColors=False, showSolarSystem=showSolarSystem, \
showStellarTrack=showStellarTrack, \
wideFormat=wideFormat, titleStr=titleStr, \
surveyModule=surveyModule, showGrid=showGrid, \
indicateTESS=False, dateStr=dateStr, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius, survey=survey )
if wideFormat==True:
odirExt = 'survey{0}/wideFormat/{1}'.format( surveyModule, SMFlag )
else:
odirExt = 'survey{0}/narrowFormat/{1}'.format( surveyModule, SMFlag )
odir = os.path.join( FIGDIR, odirExt )
if os.path.isdir( odir )==False:
os.makedirs( odir )
fig2.savefig( opaths['2a'] )
# Add the bright predicted TESS planets:
plotTeqRpTESS( ax2, showSolarSystem=False, showNeptuneRadius=False, \
showJupiterRadius=False, SMFlag = SMFlag, z=z )
fig2.savefig( opaths['2b'] )
# Make a plot with the TESS planets only:
# titleStr = 'TESS predicted planets (Barclay et al., 2018)'
# fig3, ax3, ax3Legend = generateBasicAxis( wideFormat=wideFormat, titleStr=titleStr )
# plotTeqRpTESS( ax3, titleStr=titleStr, showSolarSystem=showSolarSystem, \
# showNeptuneRadius=showNeptuneRadius, \
# showJupiterRadius=showJupiterRadius, SMFlag = SMFlag, z=z )
# if showStellarTrack==True:
# ax3 = addStellarTrack( ax3 )
# #opath3 = os.path.join( FIGDIR, onames['3'] )
# fig3.savefig( opaths['3'] )
print( '\nSaved:' )
for k in ['0a','0b','1a','1b','2a','2b','3']:
print( onames[k] )
return None
#############################################################################
# Utility routines:
def printTopPredictedSubNeptunes( z, onlySubNeptunes=True ):
ixs = ( z['cad2min']==1 )*( z['RpValRE']>1.5 )*( z['RpValRE']<4 )
brightLim = 6
ixs *= ( z['Vmag']>brightLim )*( z['Jmag']>brightLim )*( z['Kmag']>brightLim )
TSM = z['TSM'][ixs]
RpRE = z['RpValRE'][ixs]
MpME = z['MpValME'][ixs]
aRs = z['aRs'][ixs]
ideg = z['ideg'][ixs]
T14hr = z['T14hr'][ixs]
Pday = z['Pday'][ixs]
Teq = z['TeqK'][ixs]
RsRS = z['RsRS'][ixs]
Ts = z['TstarK'][ixs]
V = z['Vmag'][ixs]
J = z['Jmag'][ixs]
K = z['Kmag'][ixs]
Tedges = np.arange( 200, 1100, 100 )
nbins = len( Tedges )-1
ntop = 5
if onlySubNeptunes==True:
print( '\nTop predicted **sub-Neptunes** cooler than 1000K in 100K bins:' )
else:
print( '\nTop predicted planets cooler than 1000K in 100K bins:' )
for i in range( nbins ):
Tl = Tedges[i]
Tu = Tedges[i+1]
ixs1 = ( Teq>=Tl )*( Teq<Tu )
print( '\n>>>> Between {0:.0f}-{1:.0f}K:'.format( Tl, Tu ) )
hstr = ' TSM Mp(ME) Rp(RE) Rs(RS) Ts(K) Tp(K) '
hstr += 'P(d) aRs i(deg) T14(h) V J K'
print( hstr )
print( '{0}'.format( 110*'-' ) )
m = int( ixs1.sum() )
ixs2 = np.arange( m )[np.argsort( TSM[ixs1] )][::-1]
n = int( min( [ len( ixs2 ), ntop ] ) )
for j in range( ntop ):
ostr = '{0:.0f}. '.format( j+1 ).rjust( 5 )
ostr += '{0:.1f} '.format( TSM[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.2f} '.format( MpME[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.2f} '.format( RpRE[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.2f} '.format( RsRS[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.0f} '.format( Ts[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.0f} '.format( Teq[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.2f} '.format( Pday[ixs1][ixs2][j] ).rjust( 9 )
ostr += '{0:.2f} '.format( aRs[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.1f} '.format( ideg[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.1f} '.format( T14hr[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.1f} '.format( V[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.1f} '.format( J[ixs1][ixs2][j] ).rjust( 8 )
ostr += '{0:.1f} '.format( K[ixs1][ixs2][j] ).rjust( 8 )
print( ostr )
return None
def plotTeqRpTESS( ax, showSolarSystem=True, showNeptuneRadius=True, \
showJupiterRadius=True, SMFlag = 'TSM', z={}):
m = 10
ixs = ( z['cad2min']==1 )*( ( z['Vmag']<m )+( z['Jmag']<m )+( z['Kmag']<m ) )
SM = z['SM'][ixs]
Teq = z['TeqK'][ixs]
RpVal = z['RpValRE'][ixs]
printTopPredictedSubNeptunes( z )
Ts = z['TstarK'][ixs]
n = len( Teq )
alpha = 1
ms = 6
z0 = 1000
applySMcuts = False
for i in range( n ):
c = Utils.getStarColor( Ts[i] )
if SMFlag == 'TSM':
thresholdSM = surveySetup.thresholdTSM( RpVal[i], framework='ACWG' )
elif SMFlag == 'ESM':
thresholdSM = surveySetup.thresholdESM(RpVal[i], framework='ACWG')
if applySMcuts==False: # plotting everything regardless of TSM
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=ms, alpha=alpha, \
mfc=c, mec=c, zorder=z0+i )
elif SM[i]>thresholdSM: # if TSM cuts applied, this one is high enough
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=ms, alpha=alpha, \
mfc=c, mec=c, zorder=z0+i )
else: # otherwise plot as a smaller background point
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=0.5*ms, alpha=alpha, \
mfc=c, mec=c, zorder=0 )
ax = addSolarSystem( ax, showSolarSystem=showSolarSystem, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius )
return ax
def drawGrid( ax, cgrid=None, zorder=0, survey={} ):
if cgrid is None:
cgrid = np.array( [ 201, 148, 199 ] )/256.
Tgrid, Rgrid = survey['gridEdges']( survey['surveyName'] )
# Number of cells along each axis:
nT = len( Tgrid )
nR = len( Rgrid )
for i in range( nT ):
ax.plot( [Tgrid[i],Tgrid[i]], [Rgrid.min(),Rgrid.max()], '-', \
c=cgrid, zorder=zorder )
for i in range( nR ):
ax.plot( [Tgrid.min(),Tgrid.max()], [Rgrid[i],Rgrid[i]], '-', \
c=cgrid, zorder=zorder )
return Tgrid, Rgrid
def addSignatureToAxis( ax ):
c = 0.2*np.ones( 3 )
ax.text( 0.02, 1, 'Figure by T. Mikal-Evans', fontsize=12, color=c, \
verticalalignment='top', transform=ax.transAxes, zorder=0 )
return None
def plotTeqRpGrid( TeqK, RpRE, TstarK, SM, pl, plTess=None, cgrid=None, titleStr='', \
RADecStr='', dateStr='', wideFormat=True, survey={}, \
ASCII=False, HeatMap=True, extraNotes=None ):
"""
Plots grid of planets and TOIs by TeqK and RpRE
SM: (TSM or ESM, list of float)
"""
if cgrid is None:
cgrid = np.array( [ 201, 148, 199 ] )/256.
if not ASCII:
if HeatMap:
fig, ax, ax2, axc = generateAxisGrid( wideFormat=wideFormat, \
titleStr=titleStr, \
RADecStr=RADecStr, HeatMap=HeatMap )
else:
fig, ax, ax2 = generateAxisGrid( wideFormat=wideFormat, titleStr=titleStr, \
RADecStr=RADecStr, HeatMap=HeatMap )
else:
ax = None
Tgrid, Rgrid = survey['gridEdges']( survey['surveyName'] )
nT = len( Tgrid )
nR = len( Rgrid )
xLines = np.arange( 0.5, nT+0.5 )
yLines = np.arange( 0.5, nR+0.5 )
if ASCII:
plList = addTopSMs( ax, pl, SM, TeqK, RpRE, TstarK, Tgrid, Rgrid, \
xLines, yLines, survey=survey, ASCII=True )
return plList, dateStr
ax, SMstr = addTopSMs( ax, pl, SM, TeqK, RpRE, TstarK, Tgrid, Rgrid, \
xLines, yLines, plTess=plTess, survey=survey )
for i in range( nT ):
ax.plot( [xLines[i],xLines[i]], [yLines.min(),yLines.max()], '-', \
c=cgrid, zorder=1 )
for i in range( nR ):
ax.plot( [xLines.min(),xLines.max()], [yLines[i],yLines[i]], '-', \
c=cgrid, zorder=1 )
if HeatMap:
# Creates a new custom colormap
cdict = { 'red':[ [0, 1, 204/255],
[1/6, 204/255, 153/255],
[1/2, 153/255, 1],
[1, 1, 1]],
'green':[ [0, 1, 153/255],
[1/6, 153/255, 204/255],
[2/6, 204/255, 1],
[4/6, 1, 204/255],
[5/6, 204/255, 153/255],
[0.999, 153/255, 1],
[1, 1, 1]],
'blue':[ [0, 1, 1],
[2/6, 1, 153/255],
[0.999, 153/255, 1],
[1, 1, 1]] }
cmap = matplotlib.colors.LinearSegmentedColormap( 'testCmap', \
segmentdata=cdict, N=256 )
ax, val = addHeatMap(ax, xLines, yLines, TeqK, RpRE, Tgrid, Rgrid, cmap)
addColorBar(axc, val, cmap)
formatAxisTicks( ax )
ax.xaxis.set_ticks( xLines, minor=False )
ax.yaxis.set_ticks( yLines, minor=False )
ax.set_xticklabels( Tgrid )
ax.set_yticklabels( Rgrid )
if wideFormat==False:
subtitleY = 0.94
dySubTitle = 0.01
else:
subtitleY = 0.925
dySubTitle = 0.015
fig.text( 0.08, subtitleY, SMstr, c='green', fontsize=14, \
horizontalalignment='left', verticalalignment='bottom' )
otherNotes = '{0} values are listed in square brackets \n'.format( SM[0] )
otherNotes += 'Asterisks indicate top-5 predicted (Barclay et al., 2018)'.format(SM[0])
if extraNotes is not None:
otherNotes += '\n{0}'.format( extraNotes )
fig.text( 0.08, subtitleY-dySubTitle, otherNotes, c='black', \
fontsize=14, horizontalalignment='left', verticalalignment='top' )
dx = 0.02*( xLines.max()-xLines.min() )
dy = 0.03*( yLines.max()-yLines.min() )
ax.set_xlim( [ xLines.min()-dx, xLines.max()+dx ] )
ax.set_ylim( [ yLines.min()-dy, yLines.max()+dy ] )
return fig, ax
def formatAxisTicks( ax ):
tick_fs = 14
tl = 10
tlm = 5
tw = 2
ax.spines['bottom'].set_linewidth( tw )
ax.spines['left'].set_linewidth( tw )
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.tick_params( labelsize=tick_fs )
ax.xaxis.set_tick_params( length=tl, width=tw, which='major' )
ax.xaxis.set_tick_params( length=tlm, width=tw, which='minor' )
ax.yaxis.set_tick_params( length=tl, width=tw, which='major' )
ax.yaxis.set_tick_params( length=tlm, width=tw, which='minor' )
return ax
def addTopSMs( ax, pl, SM, TeqK, RpRE, TstarK, Tgrid, Rgrid, \
xLines, yLines, plTess=None, survey={}, ASCII=False ):
"""
Supplementary routine to graph planets and TOIs with top SM values in each grid section
Parameters:
ax: figure axes from other supplementary routines
pl, TeqK, RpRE, TstarK: planet values
SM: (TSM or ESM, list of values)
Tgrid, Rgrid: values of TeqK and RpRE grid section values
xLines, yLines: x and y values for graph
survey: dictionary of survey values
"""
plNames = []
framework = survey['framework']
nx = len( xLines )-1 # Number of lines on x axis
ny = len( yLines )-1 # Number of lines on y axis
n = len( pl ) # Number of planets
ixs0 = np.arange( n )
text_fs = 12
nList = 5
ms = 8
for i in range( nx ): # loop over temperature columns
ixsi = ( TeqK>=Tgrid[i] )*( TeqK<Tgrid[i+1] ) # Show if within the temperature range
xsymb = xLines[i] + 0.06*( xLines[i+1]-xLines[i] )
xtxt = xLines[i] + 0.12*( xLines[i+1]-xLines[i] )
for j in range( ny ): # loop over radius rows
# Indices inside box above the TSM threshold:
RpREj = 0.5*( Rgrid[j]+Rgrid[j+1] )
#Find the threshold SM for the cell
if SM[0] == 'TSM':
SMj, SMstr = survey['thresholdTSM']( RpREj, framework=framework )
elif SM[0] == 'ESM':
SMj, SMstr = survey['thresholdESM']( RpREj, framework=framework )
ixsj = ( RpRE>=Rgrid[j] )*( RpRE<Rgrid[j+1] ) # Show if within the radius range
# Show if in the cell and SM higher than threshold:
ixsij = ixs0[ixsi*ixsj*( SM[1]>SMj )]
nij = len( ixsij ) # Number in cell higher than threshold
if nij>0:
# Order by decreasing SM:
ixso = np.argsort( SM[1][ixsij] )
ixs = ixsij[ixso][::-1]
nwrite = min( [ nij, nList ] ) # number above threshold in cell or 5
dy = ( yLines[j+1]-yLines[j] )/float(nList+0.5)
y0 = yLines[j]+4.8*dy
predSM = getFifthPredicted( SM[0], Rgrid[j+1], Rgrid[j], \
Tgrid[i+1], Tgrid[i] )
for k in range( nwrite ): #For each planet (max 5)
ytxt = y0-k*dy
plStr = pl[ixs][k].replace( ' ', '' )
plStr = '{0} [{1:.0f}]'.format( plStr, SM[1][ixs][k] ) #Planet Name [SM]
if SM[1][ixs][k] >= predSM:
plStr += '*'
plNames.append(plStr)
if not ASCII:
# Silver if APC or CP
if ( plStr.find( '(APC' )>0 )+( plStr.find( '(CP)' )>0 ):
c = 'Silver'
wt = 'normal'
else: # Black if PC
c = 'Black'
wt = 'normal'
if plTess != None:
if plStr.split(' ')[0] in plTess:
wt = 'bold'
ax.text( xtxt, ytxt, plStr, fontsize=text_fs, weight=wt, color=c, \
horizontalalignment='left', verticalalignment='center' )
ck = Utils.getStarColor( TstarK[ixs][k] )
ax.plot( [xsymb], [ytxt], 'o', ms=ms, mec=ck, mfc=ck )
if ASCII:
# test
#print( 'addTopSMs ASCII', len( pl ), len( plNames ) )
#pdb.set_trace()
return plNames
#else:
# # test
# #print( 'addTopSMs PLOT', len( pl ), len( plNames ) )
# #pdb.set_trace()
return ax, SMstr
def generateAxisScatter( xlim=[0,3100], ylim=[0,26], wideFormat=False, \
whichType='RpTeq', titleStr='', DecStr='', \
showLegend=True ):
fig, ax, axLegend, axc = generateAxes( wideFormat=wideFormat, whichType=whichType, \
showLegend=showLegend )
title_fs = 18
toplineY = 0.98
fig.text( 0.02, toplineY-0.02, titleStr, fontsize=title_fs, weight='heavy', \
rotation=0, horizontalalignment='left', verticalalignment='bottom' )
return fig, ax, axLegend
def generateAxisGrid( xlim=[0,3100], ylim=[0,26], wideFormat=False, whichType='RpTeq', \
RADecStr='', titleStr='', showLegend=True, HeatMap=False ):
if HeatMap:
fig, ax, axLegend, axc = generateAxes( wideFormat=wideFormat, whichType=whichType, \
showLegend=showLegend, HeatMap=HeatMap )
else:
fig, ax, axLegend, axc = generateAxes( wideFormat=wideFormat, whichType=whichType, \
showLegend=showLegend, HeatMap=HeatMap )
title_fs = 18
toplineY = 0.98
fig.text( 0.02, toplineY-0.02, titleStr, fontsize=title_fs, weight='heavy', \
rotation=0, horizontalalignment='left', verticalalignment='bottom' )
subtitle_fs = 14
if HeatMap:
label_fs = 14
cb_text = 'Fraction of TOIs vs Fraction of Predicted'
axc.text( 0, 2, cb_text, fontsize=label_fs, \
horizontalalignment='left', verticalalignment='center', \
rotation=0, transform=axc.transAxes ) # Creates the color bar label
return fig, ax, axLegend, axc
else:
fig.text( 0.98, toplineY, RADecStr, fontsize=subtitle_fs, weight='normal', \
rotation=0, horizontalalignment='right', verticalalignment='top' )
return fig, ax, axLegend
def generateAxes( wideFormat=True, whichType='RpTeq', showLegend=True, HeatMap=False ):
if wideFormat==False:
fig = plt.figure( figsize=[11,9] )
xlow = 0.09
ylow = 0.085
axh = 0.8
axw = 0.90
dxl = 0.06
xlow2 = xlow+0.5*axw
ylow2 = ylow+axh+0.005
axw2 = 0.5*axw
subtitleY = 0.94
dyNewLine = 0.01
else:
fig = plt.figure( figsize=[16,9] )
xlow = 0.064
ylow = 0.085
axh = 0.715
axw = 0.93
dxl = 0.044
xlow2 = xlow+0.7*axw
ylow2 = ylow+axh+0.02
axw2 = 0.25*axw
subtitleY = 0.925
dySubTitle = 0.015
ax = fig.add_axes( [ xlow, ylow, axw, axh ] )
if showLegend==True:
axLegend = fig.add_axes( [ xlow2, ylow2, axw2, 0.09*axh ] )
addStellarSpectralTypeLegend( axLegend, ms=8, text_fs=10 )
else:
axLegend = None
if HeatMap:
axc = fig.add_axes([xlow2, ylow2+0.125, axw2, 0.015*axh]) #Colorbar axis
ax = formatAxes( ax, whichType=whichType )
label_fs = 16
if whichType=='RpTeq':
fig.text( xlow-dxl, ylow+0.5*axh, '$R_p$ ($R_E$)', fontsize=label_fs, \
rotation=90, horizontalalignment='right', verticalalignment='center' )
fig.text( xlow+0.5*axw, 0.001, '$T_{\\rm{eq}}$ (K)', fontsize=label_fs, \
rotation=0, horizontalalignment='center', verticalalignment='bottom' )
elif ( whichType=='RpInsolLog' )+( whichType=='RpInsol' ):
fig.text( xlow-dxl, ylow+0.5*axh, '$R_p$ ($R_E$)', fontsize=label_fs, \
rotation=90, horizontalalignment='right', verticalalignment='center' )
fig.text( xlow+0.5*axw, 0.001, 'Insolation relative to Earth', rotation=0,
fontsize=label_fs, horizontalalignment='center', \
verticalalignment='bottom' )
if showLegend==True:
subtitle_fs = 14
subtitleStr = 'Circles indicate host star spectral type'
if wideFormat==True:
sptY = 0.87
else:
sptY = 0.90
fig.text( xlow2+0.5*axw2, sptY, subtitleStr, fontsize=subtitle_fs, \
horizontalalignment='center', verticalalignment='bottom', \
weight='normal', rotation=0 )
if HeatMap:
return fig, ax, axLegend, axc
else:
return fig, ax, axLegend, None
def formatAxes( ax, whichType='RpTeq', xlim='default', ylim='default', \
xticksMajor='default', yticksMajor='default', \
xticksMinor='default', yticksMinor='default' ):
tick_fs = 14
tl = 10
tlm = 5
tw = 2
if whichType=='RpTeq':
if xlim is 'default':
xlim = [ 0, 3100 ]
if ylim is 'default':
ylim = [ 0, 26 ]
if xticksMinor is 'default':
xticksMinor = np.arange( 0, 4000, 100 )
if xticksMajor is 'default':
xticksMajor = np.arange( 0, 4000, 500 )
if yticksMinor is 'default':
yticksMinor = np.arange( 0, 30, 1 )
if yticksMajor is 'default':
yticksMajor = np.arange( 0, 30, 2 )
elif whichType=='RpInsol':
if xlim is 'default':
xlim = [ 0, 11e3 ]
if ylim is 'default':
ylim = [ 0, 26 ]
if xticksMinor is 'default':
xticksMinor = np.arange( 0, 10500, 100 )
if xticksMajor is 'default':
xticksMajor = np.arange( 0, 10500, 1000 )
if yticksMinor is 'default':
yticksMinor = np.arange( 0, 30, 1 )
if yticksMajor is 'default':
yticksMajor = np.arange( 0, 30, 2 )
elif whichType=='RpInsolLog':
ax.set_xscale( 'log' )
ax.xaxis.set_major_formatter( matplotlib.ticker.FuncFormatter( tickLogFormat ) )
if xlim is 'default':
xlim = [ 0.1, 11e3 ]
if ylim is 'default':
ylim = [ 0, 26 ]
if xticksMinor is 'default':
xticksMinor = np.arange( 0.1, 1, 0.1 )
xticksMinor = np.concatenate( [ xticksMinor, np.arange( 1, 10, 1 ) ] )
xticksMinor = np.concatenate( [ xticksMinor, np.arange( 10, 100, 10 ) ] )
xticksMinor = np.concatenate( [ xticksMinor, np.arange( 100, 1000, 100 ) ] )
xticksMinor = np.concatenate( [ xticksMinor, np.arange( 1000, 10000, 1000 ) ] )
if xticksMajor is 'default':
xticksMajor = np.logspace( -1, 4, 6 )
if yticksMinor is 'default':
yticksMinor = np.arange( 0, 30, 1 )
if yticksMajor is 'default':
yticksMajor = np.arange( 0, 30, 2 )
else:
pdb.set_trace()
ax.xaxis.set_ticks( xticksMinor, minor=True )
ax.xaxis.set_ticks( xticksMajor, minor=False )
ax.yaxis.set_ticks( yticksMinor, minor=True )
ax.yaxis.set_ticks( yticksMajor, minor=False )
ax.tick_params( labelsize=tick_fs )
ax.xaxis.set_tick_params( length=tl, width=tw, which='major' )
ax.xaxis.set_tick_params( length=tlm, width=tw, which='minor' )
ax.yaxis.set_tick_params( length=tl, width=tw, which='major' )
ax.yaxis.set_tick_params( length=tlm, width=tw, which='minor' )
ax.spines['bottom'].set_linewidth( tw )
ax.spines['left'].set_linewidth( tw )
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_ylim( ylim )
ax.set_xlim( xlim )
return ax
def tickLogFormat( y, pos ):
# Find the number of decimal places required
decimalplaces = int(np.ceil(np.maximum(-np.log10(y),0))) # =0 for numbers >=1
# Insert that number into a format string
formatstring = '{{:.{:1d}f}}'.format(decimalplaces)
# Return the formatted tick label
return formatstring.format(y)
def plotTeqRpScatter( planetNames, Teq, RpVal, Ts, SM, TESS, ms=8, alpha=1, \
starColors=True, applySMcuts=False, survey={}, \
showGrid=True, indicateTESS=False, showSolarSystem=False, \
showStellarTrack=False, showNeptuneRadius=True, \
showJupiterRadius=True, titleStr='', wideFormat=False, \
dateStr='' ):
"""
Creates a scatter plot of planets on a (Teq, Rp) graph
Parameters:
planetNames: list of planet names (str) (currently unused)
Teq, RpVal, Ts, TESS: lists of planet values (float)
SM: (TSM or ESM, list of values)
survey: dictionary of survey properties
"""
n = len( Teq )
nTESS = np.sum( TESS )
cTESS = np.array( [ 213, 128, 255 ] )/256.
zTESS = 2*n
z0 = 200
msTESS = 2*ms
fig, ax, ax2 = generateAxisScatter( wideFormat=wideFormat, titleStr=titleStr )
if showGrid==True:
Tgrid, Rgrid = drawGrid( ax, survey=survey, zorder=1 )
framework = survey['framework']
c0 = 0.9*np.ones( 3 )
for i in range( n ): # loop over each exoplanet
if starColors==True:
c = Utils.getStarColor( Ts[i] )
else:
c = c0
if SM[0] == 'TSM':
SMi, SMstr = survey['thresholdTSM']( RpVal[i], framework=framework )
elif SM[0] == 'ESM':
SMi, SMstr = survey['thresholdESM']( RpVal[i], framework=framework )
if applySMcuts==False: # plotting everything regardless of SM
if ( indicateTESS==True )*( TESS[i]==1 ):
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=ms, alpha=alpha, \
mfc=c, mec=c, zorder=zTESS+nTESS+i )
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=msTESS, alpha=alpha, \
zorder=zTESS, mfc=cTESS, mec=cTESS )
else:
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=ms, alpha=alpha, \
mfc=c, mec=c, zorder=z0+i )
elif SM[1][i]>SMi: # if SM cuts applied, this one is high enough
if ( indicateTESS==True )*( TESS[i]==1 ):
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=ms, alpha=alpha, \
mfc=c, mec=c, zorder=zTESS+nTESS+i )
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=msTESS, alpha=alpha, \
zorder=zTESS, mfc=cTESS, mec=cTESS )
else:
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=ms, alpha=alpha, \
mfc=c, mec=c, zorder=z0+i )
else: # otherwise plot as a smaller background point
ax.plot( [Teq[i]], [RpVal[i]], 'o', ms=0.5*ms, alpha=alpha, \
mfc=c0, mec=c0, zorder=0 )
if showStellarTrack==True:
ax = addStellarTrack( ax )
ax = addSolarSystem( ax, showSolarSystem=showSolarSystem, \
showNeptuneRadius=showNeptuneRadius, \
showJupiterRadius=showJupiterRadius )
if wideFormat==False:
subtitleY = 0.94
dyNewLine = 0.01
else:
subtitleY = 0.925
dySubTitle = 0.015
if applySMcuts==True:
fig.text( 0.08, subtitleY, SMstr, c='green', fontsize=14, \
horizontalalignment='left', verticalalignment='bottom' )
otherNotes = 'Grey points do not meet {0} thresholds'.format( SM[0] )
fig.text( 0.08, subtitleY-dySubTitle, otherNotes, c='black', \
fontsize=14, horizontalalignment='left', verticalalignment='top' )
return fig, ax
def addStellarTrack( ax ):
Rs0, Ts0 = Utils.readStellarTrack()
Ts = np.linspace( Ts0.min(), Ts0.max(), 1000 )
Rs = np.interp( Ts, Ts0, Rs0 )
ixs1 = ( Ts>2200 )
ixs2 = ( Ts<=2200 )*( Ts>1300 )
ixs3 = ( Ts<=1300 )
lw = 5
c1 = np.array( [178,24,43] )/256. #Cyan
c2 = np.array( [191,129,45] )/256. #'HotPink'
c3 = np.array( [140,81,10] )/256. #'GreenYellow'
ax.plot( Ts[ixs1], Rs[ixs1], '-', c=c1, lw=lw, zorder=1e5 )
ax.plot( Ts[ixs2], Rs[ixs2], '-', c=c2, lw=lw, zorder=1e5 )
ax.plot( Ts[ixs3], Rs[ixs3], '-', c=c3, lw=lw, zorder=1e5 )
return ax
def addSolarSystem( ax, showSolarSystem=True, showNeptuneRadius=True, \
showJupiterRadius=True ):
ss = Utils.solarSystem()
if showSolarSystem==True:
ssc = 'HotPink'
ssc = np.array( [0,204,0] )/256.
ssPlanets = list( ss['TeqK'].keys() )
ssms = 8
for k in ssPlanets:
ax.plot( [ss['TeqK'][k]], [ss['RpRE'][k]], 'o', ms=ssms, \
zorder=1e6, mfc=ssc, mec=ssc, alpha=0.5 )
ax.plot( [ss['TeqK'][k]], [ss['RpRE'][k]], 'o', ms=ssms, \
zorder=1e6, mfc='none', mec=ssc, mew=2, alpha=1 )
cgrey = 0.6*np.ones( 3 )
if showNeptuneRadius==True:
ax.axhline( ss['RpRE']['Neptune'], ls='--', c=cgrey, lw=2, zorder=0 )
if showJupiterRadius==True:
ax.axhline( ss['RpRE']['Jupiter'], ls='--', c=cgrey, lw=2, zorder=0 )
return ax
def addStellarSpectralTypeLegend( ax, ms=10, text_fs=12 ):
c, SpTs = Utils.getAllStarColors()
for i in ['top','bottom','right','left']:
ax.spines[i].set_visible(False)
plt.setp( ax.xaxis.get_ticklabels(), visible=False )
plt.setp( ax.yaxis.get_ticklabels(), visible=False )
n = len( SpTs )
for i in range( n ):
k = SpTs[i]
ax.plot( i+1, 0.5, 'o', ms=ms, mfc=c[k], mec=c[k] )
ax.text( i+1, 0.4, k, rotation=45, fontsize=text_fs, \
horizontalalignment='right', verticalalignment='top' )
ax.set_ylim( [ 0, 1 ] )
ax.tick_params(axis = 'x', which = 'both', bottom = False, top = False)
ax.tick_params(axis = 'y', which = 'both', left = False, right = False)
return None
def readConfirmedProperties( ipath='confirmedProperties.pkl', SMFlag='TSM' ):
"""
Returns properties for all confirmed planets. For planets with
available radius but no mass measurement, an empirical relation
is used to estimate the mass. This has been done for the purpose
of identifying targets for a UV host star survey, which is why
it's OK if the planet masses haven't been published.
"""
ifile = open( ipath, 'rb' )
z0 = pickle.load( ifile )
ifile.close()
z = z0['allVals']
planetName = z['planetName']
RA = z['RA']
Dec = z['Dec']
RA_deg = z['RA_deg']
Dec_deg = z['Dec_deg']
RsRS = z['RsRS']
aAU = z['aAU']
TeqK = z['TeqK']
Insol = z['Insol']
TstarK = z['TstarK']
T14hr = z['T14hr']
b = z['b']
RpRs = z['RpRs']
Vmag = z['Vmag']
Jmag = z['Jmag']
Hmag = z['Hmag']
Kmag = z['Kmag']
RpValRE = z['RpValRE']
RpLsigRE = z['RpLowErrRE']
RpUsigRE = z['RpUppErrRE']
MpValME = z['MpValME']
MpLsigME = z['MpLowErrME']
MpUsigME = z['MpUppErrME']
TESS = z['discoveredByTESS']
if SMFlag == 'TSM':
SM = z['TSM']
elif SMFlag == 'ESM':
SM = z['ESM']
n0 = len( planetName )
ixs = np.arange( n0 )[np.isnan( MpValME )*np.isfinite( RpValRE )]
n = len( ixs )
for i in range( n ):
MpValME[ixs[i]] = Utils.planetMassFromRadius( RpValRE[ixs[i]] )
MpUncFill = 1e9 #min( [ 0.1, MpValME[ixs[i]]/5.01 ] )
MpLsigME[ixs[i]] = MpUncFill
MpUsigME[ixs[i]] = MpUncFill
if SMFlag == 'TSM':
SM[ixs] = Utils.computeTSM( RpValRE[ixs], MpValME[ixs], \
RsRS[ixs], TeqK[ixs], Jmag[ixs] )
elif SMFlag == 'ESM':
RpRs = RpValRE/RsRS
SM[ixs] = Utils.computeESM( TeqK[ixs], RpRs[ixs], TstarK[ixs], Kmag[ixs] )
ixs = np.isfinite( TeqK )*np.isfinite( SM )*np.isfinite( RpValRE )
print( '\nReading in {0:.0f} planets total.'.format( n0 ) )
print( 'Returning {0:.0f} planets with radii, {1}, and Teq values.'\
.format( ixs.sum(), SMFlag ) )
outp = { 'planetName':planetName[ixs], 'TESS':TESS[ixs], \
'RA':RA[ixs], 'Dec':Dec[ixs], 'RA_deg':RA_deg[ixs], 'Dec_deg':Dec_deg[ixs], \
'SM':SM[ixs], 'T14hr':T14hr[ixs], 'Vmag':Vmag[ixs], \
'Jmag':Jmag[ixs], 'Hmag':Hmag[ixs], 'Kmag':Kmag[ixs], \
'b':b[ixs], 'RpRs':RpRs[ixs], 'TeqK':TeqK[ixs], 'Insol':Insol[ixs], \
'TstarK':TstarK[ixs], 'RsRS':RsRS[ixs], 'aAU':aAU[ixs], \
'RpValRE':RpValRE[ixs], 'RpLsigRE':RpLsigRE[ixs], 'RpUsigRE':RpUsigRE[ixs], \
'MpValME':MpValME[ixs], 'MpLsigME':MpLsigME[ixs], 'MpUsigME':MpUsigME[ixs] }
return outp, z0['dateStr']
def readTOIProperties( ipath='toiProperties.pkl', SMFlag = 'TSM' ):
"""
"""
ifile = open( ipath, 'rb' )
z0 = pickle.load( ifile )
ifile.close()
z = z0['allVals']
planetName = z['planetName']
RA = z['RA_deg']
RAhr = RA*(24/360.)
Dec = z['Dec_deg']
RsRS = z['RsRS']
TeqK = z['TeqK']
Jmag = z['Jmag']
TstarK = z['TstarK']
RpValRE = z['RpValRE']
MpValME = z['MpValME']
RpRs = z['RpRs']
Jmag = z['Jmag']
TeqK_exofop = z['TeqK_exofop']
if SMFlag == 'TSM':
SM = z['TSM']
elif SMFlag == 'ESM':
SM = z['ESM']
ixs = np.isfinite( TeqK )*np.isfinite( SM )*np.isfinite( RpValRE )
outp = { 'planetName':planetName[ixs], 'SM':SM[ixs], 'RpRs':RpRs[ixs], \
'RA_deg':RA[ixs], 'RA_hr':RAhr[ixs], 'Dec_deg':Dec[ixs], \
'TeqK':TeqK[ixs], 'TstarK':TstarK[ixs], 'RsRS':RsRS[ixs], 'Jmag':Jmag[ixs], \
'RpValRE':RpValRE[ixs], 'MpValME':MpValME[ixs], 'TeqK_exofop': TeqK_exofop[ixs]}
return outp, z0['dateStr']
def readNoMassTESSProperties():
"""
Returns properties of confirmed TESS planets lacking published masses.
"""
ifile = open( 'confirmedProperties.pkl', 'rb' )
z0 = pickle.load( ifile )
ifile.close()
z = z0['allVals']
planetName = z['planetName']
RsRS = z['RsRS']
TeqK = z['TeqK']
Jmag = z['Jmag']
TstarK = z['TstarK']
TSM = z['TSM']
RpValRE = z['RpValRE']
RpLsigRE = z['RpLowErrRE']
RpUsigRE = z['RpUppErrRE']
MpValME = z['MpValME']
MpLsigME = z['MpLowErrME']
MpUsigME = z['MpUppErrME']
TESS = z['discoveredByTESS']
print( '\nReading confirmed TESS planets lacking peer-reviewed published masses:' )
ixs = np.isfinite( TeqK )*np.isfinite( RpValRE )*np.isnan( MpValME )*( TESS==1 )
n = ixs.sum()
for i in range( n ):
print( '{0:.0f}. {1}'.format( i+1, planetName[ixs][i] ) )
print( 'Returning {0:.0f} planets with measured radii and Teq values.'\
.format( ixs.sum() ) )
# Add in the estimated masses and then use these to compute estimated TSM values
# (ESM values should already be included as it doesn't require the mass):
MpValME = np.zeros( n )
for i in range( n ):
MpValME[i] = Utils.planetMassFromRadius( RpValRE[ixs][i] )
TSM = Utils.computeTSM( RpValRE[ixs], MpValME, RsRS[ixs], TeqK[ixs], Jmag[ixs] )
print( 'Masses taken from empirical relation and used for TSM calculation.\n' )
outp = { 'planetName':planetName[ixs], 'TESS':TESS[ixs], \
'TeqK':TeqK[ixs], 'TstarK':TstarK[ixs], 'RsRS':RsRS[ixs], \
'RpValRE':RpValRE[ixs], 'RpLsigRE':RpLsigRE[ixs], 'RpUsigRE':RpUsigRE[ixs], \
'MpValME':MpValME, 'TSM':TSM }
return outp
def readConfirmedTESSProperties( publishedMasses=True, SMFlag = 'TSM' ):
"""
Returns properties of confirmed TESS planets.
"""
ifile = open( 'confirmedProperties.pkl', 'rb' )
z0 = pickle.load( ifile )
ifile.close()
z = z0['allVals']
planetName = z['planetName']
RsRS = z['RsRS']
TeqK = z['TeqK']
Jmag = z['Jmag']
Kmag = z['Kmag']
TstarK = z['TstarK']
RpValRE = z['RpValRE']
RpLsigRE = z['RpLowErrRE']
RpUsigRE = z['RpUppErrRE']
MpValME = z['MpValME']
MpLsigME = z['MpLowErrME']
MpUsigME = z['MpUppErrME']
TESS = z['discoveredByTESS']
if SMFlag == 'TSM':
SM = z['TSM']
elif SMFlag == 'ESM':
SM = z['ESM']
if publishedMasses==True:
print( '\nReading confirmed TESS planets with peer-reviewed published masses:' )
ixs = np.isfinite( TeqK )*np.isfinite( RpValRE )*np.isfinite( MpValME )*( TESS==1 )
n = ixs.sum()
for i in range( n ):
print( '{0:.0f}. {1}'.format( i+1, planetName[ixs][i] ) )
print( 'Returning {0:.0f} planets with measured radii, {1}, and Teq values.'\
.format( ixs.sum(), SMFlag ) )
outp = { 'planetName':planetName[ixs], 'TESS':TESS[ixs], \
'TeqK':TeqK[ixs], 'TstarK':TstarK[ixs], 'RsRS':RsRS[ixs], \
'RpValRE':RpValRE[ixs], 'RpLsigRE':RpLsigRE[ixs], \
'RpUsigRE':RpUsigRE[ixs], 'MpValME':MpValME[ixs], 'SM':SM[ixs] }
else:
print( '\nReading confirmed TESS planets lacking peer-reviewed published masses:' )
ixs = np.isfinite( TeqK )*np.isfinite( RpValRE )*np.isnan( MpValME )*( TESS==1 )
n = ixs.sum()
for i in range( n ):
print( '{0:.0f}. {1}'.format( i+1, planetName[ixs][i] ) )
print( 'Returning {0:.0f} planets with measured radii and Teq values.'\
.format( ixs.sum() ) )
if SMFlag == 'TSM':
# Add in the estimated masses and then use these to compute estimated TSM values
# (ESM values should already be included as it doesn't require the mass):
MpValME = np.zeros( n )
for i in range( n ):
MpValME[i] = Utils.planetMassFromRadius( RpValRE[ixs][i] )
SM = Utils.computeTSM( RpValRE[ixs], MpValME, RsRS[ixs], \
TeqK[ixs], Jmag[ixs] )
print( 'Masses taken from empirical relation and used for TSM calculation.\n' )
if SMFlag == 'ESM':
RpRs = RpValRE/RsRS
SM = Utils.computeESM( TeqK[ixs], RpRs[ixs], TstarK[ixs], Kmag)
outp = { 'planetName':planetName[ixs], 'TESS':TESS[ixs], \
'TeqK':TeqK[ixs], 'TstarK':TstarK[ixs], 'RsRS':RsRS[ixs], \
'RpValRE':RpValRE[ixs], 'RpLsigRE':RpLsigRE[ixs], \
'RpUsigRE':RpUsigRE[ixs], 'MpValME':MpValME, 'SM':SM[ixs] }
return outp
def readWithMassTESSProperties():
"""
Returns properties of confirmed TESS planets lacking published masses.
ESM not implemented because no references
"""
ifile = open( 'confirmedProperties.pkl', 'rb' )
z0 = pickle.load( ifile )
ifile.close()
z = z0['allVals']
planetName = z['planetName']
RsRS = z['RsRS']
TeqK = z['TeqK']
Jmag = z['Jmag']
TstarK = z['TstarK']
TSM = z['TSM']
RpValRE = z['RpValRE']
RpLsigRE = z['RpLowErrRE']
RpUsigRE = z['RpUppErrRE']
MpValME = z['MpValME']
MpLsigME = z['MpLowErrME']
MpUsigME = z['MpUppErrME']
TESS = z['discoveredByTESS']
print( '\nReading confirmed TESS planets lacking peer-reviewed published masses:' )
ixs = np.isfinite( TeqK )*np.isfinite( RpValRE )*np.isfinite( MpValME )*( TESS==1 )
n = ixs.sum()
for i in range( n ):
print( '{0:.0f}. {1}'.format( i+1, planetName[ixs][i] ) )
print( 'Returning {0:.0f} planets with measured radii, TSM, and Teq values.'\
.format( ixs.sum() ) )
outp = { 'planetName':planetName[ixs], 'TESS':TESS[ixs], \
'TeqK':TeqK[ixs], 'TstarK':TstarK[ixs], 'RsRS':RsRS[ixs], \
'RpValRE':RpValRE[ixs], 'RpLsigRE':RpLsigRE[ixs], 'RpUsigRE':RpUsigRE[ixs], \
'MpValME':MpValME[ixs], 'TSM':TSM[ixs] }
return outp
def readPredictedProperties(SMFlag = 'TSM'):
"""
Processes the predicted planet information from Barclay (uses version 2)
Parameters:
SMFlag: TSM or ESM (float)
"""
idir = os.path.dirname( __file__ )
ipath = os.path.join( idir, 'predictedProperties_v2.pkl' )
if os.path.isfile( ipath )==False:
processTargetLists.predictedTESS()
ifile = open( ipath, 'rb' )
z = pickle.load( ifile )
ifile.close()
RsRS = z['RsRS']
TeqK = z['TeqK']
Insol = z['Insol']
TstarK = z['TstarK']
if SMFlag == 'TSM':
SM = z['TSM']
elif SMFlag == 'ESM':
SM = z['ESM']
aRs = z['aRs']
b = z['b']
ideg = np.rad2deg( np.arccos( b/aRs ) )
T14hr = z['T14hr']
Pday = z['Pday']
MsMS = z['MsMS']
RpValRE = z['RpValRE']
MpValME = z['MpValME']
cad2min = z['cad2min']
Vmag = z['Vmag']
Jmag = z['Jmag']
Kmag = z['Kmag']
ixs = np.isfinite( TeqK )*np.isfinite( SM )*np.isfinite( RpValRE )
outp = { 'SM': SM[ixs], 'cad2min': cad2min, 'TeqK':TeqK[ixs], \
'aRs':aRs [ixs], 'Pday':Pday[ixs], 'Insol':Insol[ixs], \
'MsValMS':MsMS[ixs], 'RsRS':RsRS[ixs], 'TstarK':TstarK[ixs], \
'RpValRE':RpValRE[ixs], 'MpValME':MpValME[ixs], \
'ideg':ideg[ixs], 'b':b[ixs], 'T14hr':T14hr[ixs], \
'Vmag':Vmag[ixs], 'Jmag':Jmag[ixs], 'Kmag':Kmag[ixs] }
return outp
def getFifthPredicted(SMFlag='TSM', RpMax = 0, RpMin = 0, TeqMax = 0, TeqMin = 0):
"""
Finds the fifth-highest ESM or TSM value for the predicted planets in a given RpRE and Teq range
Parameters:
SMFlag- TSM or ESM (str)
RpMax, RpMin, TeqMax, TeqMin- Define the grid cell in question (float)
"""
z = readPredictedProperties(SMFlag = SMFlag)
numPlanets = len(z['RsRS'])
ixs = [i for i in range(numPlanets) if RpMin < z['RpValRE'][i] < RpMax]
ixs1 = [i for i in ixs if TeqMin < z['TeqK'][i] < TeqMax]
highestSMs = [0, 0, 0, 0, 0]
for n in ixs1:
if z['SM'][n] > highestSMs[0]:
highestSMs.pop(0)
highestSMs.append(z['SM'][n])
highestSMs.sort()
return highestSMs[0]
def SMRepeats( SMFlag = 'ESM', survey = {} ):
data = pickle.load(open('toiProperties.pkl','rb'))['allVals']
tic = data['TICID']
RpRE = data['RpValRE']
plName = data['planetName']
ESM = data['ESM']
TSM = data['TSM']
topRanked = transmissionGridTOIs( survey=survey, SMFlag=SMFlag,\
ASCII=True )
for i, j in enumerate( topRanked ):
ix = j.find(' ')
topRanked[i] = j[:ix]
if SMFlag == 'ESM':
SM = ESM
else:
SM = TSM
namesToTIC = {}
for i,j in enumerate( plName ):
namesToTIC[j, SM[i], RpRE[i]] = tic[i]
TICtoNames = {}
for key, value in namesToTIC.items():
if value in TICtoNames:
TICtoNames[value].append(key)
else:
TICtoNames[value]=[key]
bestSMs = {}
for key, value in TICtoNames.items():
bestSMs[key] = []
if len( value ) > 1:
for l in value:
if l[2] > 0:
name = None
if SMFlag == 'ESM':
if l[1] > surveySetup.thresholdESM(l[2])[0]:
name = l[0]
else:
if l[1] > surveySetup.thresholdTSM(l[2])[0]:
name = l[0]
if name in topRanked:
name = f'{l[0]}*'
if name != None:
bestSMs[key].append(name)
if len( bestSMs[key] ) < 2:
del( bestSMs[key] )
return bestSMs
def readExoFOP( forceDownload=False ):
"""
Reads in the TFOP priority and comments for each TOI.
"""
exoFOPpath = downloadTargetLists.ExoFOP( forceDownload=forceDownload )
ifile = open( exoFOPpath )
reader = csv.reader( ifile, quotechar='"' )
d = []
for i in reader:
d += [ i ]
cols = np.array( d[0] )
rows = d[1:]
n = len( rows )
y = {}
for i in range( n ):
TOI = np.array( rows[i] )[cols=='TOI'][0]
Priority = np.array( rows[i] )[cols=='Master'][0]
Comments = np.array( rows[i] )[cols=='Comments'][0]
y[TOI] = [ Priority, Comments ]
return y
def CreateASCII( ipath='toiProperties.pkl', survey={}, SMFlag = 'TSM', onlyPCs=False, \
topFivePredicted=False, multTIC=False, forceDownloadExoFOP=False ):
Tgrid, Rgrid = survey['gridEdges']( survey['surveyName'] )
ifile = open( ipath, 'rb' )
z0 = pickle.load( ifile )
ifile.close()
z = z0['allVals']
props = ['planetName', 'TICID', 'RA', 'Dec', \
'Vmag', 'Imag', 'Jmag', 'Hmag', 'Kmag',\
SMFlag, 'Kamp', 'Pday', \
'TstarK', 'loggstarCGS', 'RsRS', 'MsMS', \
'MpValME', 'RpValRE', 'TeqK' ]
topRanked, dateStr = transmissionGridTOIs( ipath=ipath, survey=survey, SMFlag=SMFlag, \
onlyPCs=onlyPCs, ASCII=True )
nAll = len( z['planetName'] )
ixsAll = np.arange( nAll )
nTop = len( topRanked )
if multTIC:
topRanked = []
for i in list( SMRepeats(SMFlag=SMFlag, survey=survey ).values()):
for j in i:
topRanked.append(j)
nTop = len(topRanked)
topFivePredicted = False
topRankedIxs = np.zeros( nTop, dtype=int )
for i in range( nTop ):
ixName = topRanked[i].rfind( ')' )
ix = int( ixsAll[z['planetName']==topRanked[i][:ixName+1]] )
topRankedIxs[i] = ix
if topRanked[i][-1]=='*':
z['planetName'][ix] = '{0}*'.format( z['planetName'][ix] )
if topFivePredicted:
topToPrintIxs = []
for i in range( nTop ):
ix = topRankedIxs[i]
if z['planetName'][ix][-1]=='*':
topToPrintIxs += [ ix ]
else:
topToPrintIxs = topRankedIxs
print(topToPrintIxs)
n = len( topToPrintIxs )
# Dictionary of properties for top-ranked to be written to ASCII output:
ASCII = {}
for p in props:
ASCII[p] = z[p][topToPrintIxs]
RA_deg = z['RA_deg'][topToPrintIxs]
Dec_deg = z['Dec_deg'][topToPrintIxs]
# Sort by declination coordinate:
#ixs = np.argsort( ASCII['Dec_deg'] )
ixs = np.argsort( Dec_deg )
for p in props:
ASCII[p] = np.array( ASCII[p] )[ixs]
# Add exofop data to file:
exoFOP = readExoFOP( forceDownload=forceDownloadExoFOP )
priority = []
comments = []
TOI = list(ASCII['planetName'])
for i,j in enumerate(TOI):
k = j.split('-')[1]
TOI[i] = k.split('(')[0]
for i in TOI:
if i in exoFOP:
priority.append(exoFOP[i][0])
comments.append(exoFOP[i][1])
else:
priority.append(None)
comments.append(None)
ASCII['Priority'] = np.array(priority)
ASCII['Comments'] = np.array(comments)
for j in ['Priority', 'Comments']:
props.append(j)
# Correct missing Imags (probably most of them):
if pysynphotImport==True:
ixs = np.arange( n )[np.isnan( ASCII['Imag'] )]
m = len( ixs )
print( '\nEstimating {0:.0f} Imags...'.format( m ) )
for i in range( m ):
Jmag = ASCII['Jmag'][ixs[i]]
TstarK = ASCII['TstarK'][ixs[i]]
loggCGS = ASCII['loggstarCGS'][ixs[i]]
if np.isfinite( Jmag )*( TstarK<31000 )*np.isfinite( loggCGS ):
Imag = Utils.convertMag( Jmag, TstarK, loggCGS, \
inputMag='J', outputMag='I' )
ASCII['Imag'][ixs[i]] = Imag
col0 = 'Target'.rjust( 18 )
col1 = 'TICID'.rjust( 16 )
col2 = 'RA'.center( 16 )
col3 = 'Dec'.center( 14 )
col4a = 'Vmag'.rjust( 7 )
col4b = 'Imag'.rjust( 7 )
col4c = 'Jmag'.rjust( 7 )
col4d = 'Hmag'.rjust( 7 )
col4e = 'Kmag'.rjust( 7 )
col5 = SMFlag.rjust( 10 )
col6 = 'K(m/s)'.rjust( 8 )
col7 = 'P(d)'.rjust( 10 )
col8 = 'Teff(K)'.rjust( 10 )
col9 = 'logg(CGS)'.rjust( 10 )
col10 = 'Rs(RS)'.rjust( 10 )
col11 = 'Ms(MS)'.rjust( 10 )
col12 = 'Mp(ME)'.rjust( 10 )
col13 = 'Rp(RE)'.rjust( 10 )
col14 = 'Teq(K)'.rjust( 10 )
col15 = 'Priority'.center( 12 )
col16 = 'Comments'.ljust( 50 )
hdr = '# TOIs accessed on date (YYYY-MM-DD): {0}\n# '.format( dateStr )
hdr += '{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}{10}{11}{12}{13}{14}{15}{16}{17}{18}{19}{20}'\
.format( col0, col1, col2, col3, \
col4a, col4b, col4c, col4d, col4e, \
col5, col6, col7, col8, col9, col10, \
col11, col12, col13, col14, col15, col16 )
ncol = [ 18, 15, 16, 15, 7, 7, 7, 7, 7, 10, 8, \
10, 10, 10, 10, 10, 10, 10, 10, 12, 50 ] # column width
ndps = [ 0, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, \
3, 0, 1, 1, 1, 1, 1, 0, 0, 0 ] # decimal places
hdr += '\n#{0}'.format( 323*'-' )
m = len( props )
def rowStr( i, zin ):
rstr = '\n '
for j in range( m ): # loop over each property
k = props[j]
if ( k!='planetName' )*( k!='TICID' )*( k!='RA' )*( k!='Dec' )*(k!='Comments')*(k!='Priority'):
# numbers
rstr += '{0:.{1}f}'.format( zin[k][i], ndps[j] ).rjust( ncol[j] )
elif (k=='Priority'):
rstr += '{0}'.format( zin[k][i] ).center( ncol[j] )
elif (k=='Comments'):
# numbers
rstr += '{0}'.format( zin[k][i] ).ljust( ncol[j] )
else: # strings
rstr += '{0}'.format( zin[k][i] ).rjust( ncol[j] )
return rstr
ostr = '{0}'.format( hdr )
for i in range( n ): # loop over each TOI
rstr = rowStr( i, ASCII )
ostr += rstr
# print(ostr)
# Write to file:
oname = f'RVvaluesBy{SMFlag}.txt'
if multTIC == True:
oname = oname.replace('.txt', '_Multis.txt')
else:
if onlyPCs == True:
oname = oname.replace( '.txt', '_onlyPCs.txt' )
if topFivePredicted==True:
oname = oname.replace( '.txt', '_topPredicted.txt' )
odir = os.path.join( os.getcwd(), 'ASCII' )
if os.path.isdir( odir )==False:
os.makedirs( odir )
opath1 = os.path.join( odir, oname )
ofile = open( opath1, 'w' )
ofile.write( ostr )
ofile.close()
# Now write out the same file but ranked by SM:
ixs = np.arange( n )[np.argsort( ASCII[SMFlag] )[::-1]]
for k in props:
ASCII[k] = ASCII[k][ixs]
ostr2 = '{0}'.format( hdr )
for i in range( n ): # loop over each TOI
rstr = rowStr( i, ASCII )
ostr2 += rstr
opath2 = opath1.replace( '.txt', '_ranked{0}.txt'.format( SMFlag ) )
ofile = open( opath2, 'w' )
ofile.write( ostr2 )
ofile.close()
print( '\nSaved:\n{0}\n{1}'.format( opath1, opath2 ) )
return opath1, opath2
def CreateASCII_Confirmed( ipath='confirmedProperties.pkl', survey={}, SMFlag = 'TSM' ):
"""
Would be better to merge this in single routine for TOIs and Confirmed.
"""
Tgrid, Rgrid = survey['gridEdges']( survey['surveyName'] )
ifile = open( ipath, 'rb' )
z0 = pickle.load( ifile )
ifile.close()
z = z0['allVals']
# TODO = Need to add RA, Dec, loggstarCGS to confirmedProperties.pkl
props = [ 'planetName', 'RA', 'Dec', \
'Vmag', 'Jmag', 'Hmag', 'Kmag',\
SMFlag, 'Kamp', 'Pday', 'TstarK', 'RsRS', 'MsMS', \
'MpValME', 'MpLowErrME', 'MpUppErrME', 'RpValRE', 'TeqK' ]
topRanked, dateStr = transmissionGridConfirmed( ipath=ipath, survey=survey, SMFlag=SMFlag, \
ASCII=True )
#print( 'createASCII', survey, len( topRanked ) )
#pdb.set_trace()
nAll = len( z['planetName'] )
ixsAll = np.arange( nAll )
nTop = len( topRanked )
#if multTIC:
# topRanked = []
# for i in list( SMRepeats(SMFlag=SMFlag, survey=survey ).values()):
# for j in i:
# topRanked.append(j)
# nTop = len(topRanked)
# topFivePredicted = False
for i in range( nAll ):
z['planetName'][i] = z['planetName'][i].replace( ' ', '' )
topRankedIxs = np.zeros( nTop, dtype=int )
for i in range( nTop ):
ixName = topRanked[i].rfind( '[' ) # DIFFERENT TO TOIs!
ix = int( ixsAll[z['planetName']==topRanked[i][:ixName-1]] )
topRankedIxs[i] = ix
if topRanked[i][-1]=='*':
z['planetName'][ix] = '{0}*'.format( z['planetName'][ix] )
#if topFivePredicted:
# topToPrintIxs = []
# for i in range( nTop ):
# ix = topRankedIxs[i]
# if z['planetName'][ix][-1]=='*':
# topToPrintIxs += [ ix ]
#else:
# topToPrintIxs = topRankedIxs
topToPrintIxs = topRankedIxs
print(topToPrintIxs)
n = len( topToPrintIxs )
# Dictionary of properties for top-ranked to be written to ASCII output:
ASCII = {}
for p in props:
ASCII[p] = z[p][topToPrintIxs]
RA_deg = z['RA_deg'][topToPrintIxs]
Dec_deg = z['Dec_deg'][topToPrintIxs]
# TO ADD BACK IN ONCE DEC_DEG ADDED BACK IN...
# Sort by declination coordinate:
#ixs = np.argsort( ASCII['Dec_deg'] )
ixs = np.argsort( Dec_deg )
for p in props:
ASCII[p] = np.array( ASCII[p] )[ixs]
RA_deg = RA_deg[ixs]
Dec_deg = Dec_deg[ixs]
#print( Dec_deg )
#pdb.set_trace()
pl = list(ASCII['planetName'])
nn = len( pl )
#for i in range( nn ):
# print( '' )
# print( ASCII['planetName'][i], ASCII['RA'][i], ASCII['Dec'][i] )
# print( ASCII['planetName'][i], RA_deg[i], Dec_deg[i] )
#pdb.set_trace()
#Add exofop data to file:
#exoFOP = ReadExoFOPProperties()
#priority = []
#comments = []
for i,j in enumerate(pl):
try:
k = j.split('-')[1]
except:
continue
col0 = 'Target'.rjust( 18 )
col1 = 'RA'.center( 16 )
col2 = 'Dec'.center( 14 )
col3a = 'Vmag'.rjust( 7 )
col3b = 'Jmag'.rjust( 7 )
col3c = 'Hmag'.rjust( 7 )
col3d = 'Kmag'.rjust( 7 )
col4 = SMFlag.rjust( 10 )
col5 = 'K(m/s)'.rjust( 8 )
col6 = 'P(d)'.rjust( 10 )
col7 = 'Teff(K)'.rjust( 10 )
#col7 = 'logg(CGS)'.rjust( 10 )
col8 = 'Rs(RS)'.rjust( 10 )
col9 = 'Ms(MS)'.rjust( 10 )
col10a = 'MpVal(ME)'.rjust( 12 )
col10b = 'MpSigL(ME)'.rjust( 12 )
col10c = 'MpSigU(ME)'.rjust( 12 )
col11 = 'Rp(RE)'.rjust( 10 )
col12 = 'Teq(K)'.rjust( 10 )
ostr = '# Exoplanet Archive accessed on date (YYYY-MM-DD): {0}\n# '.format( dateStr )
ostr += '{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}{10}{11}{12}{13}{14}{15}{16}{17}'\
.format( col0, col1, col2, \
col3a, col3b, col3c, col3d, col4, \
col5, col6, col7, col8, col9, \
col10a, col10b, col10c, \
col11, col12 )
ncol = [ 18, 16, 15, 7, 7, 7, 7, 10, 8, \
10, 10, 10, 10, 12, 12, 12, 10, 10 ] # column width
ndps = [ 0, 0, 0, 1, 1, 1, 1, 1, 1, \
3, 0, 1, 1, 3, 3, 3, 1, 0 ] # decimal places
ostr += '\n#{0}'.format( 192*'-' )
m = len( props )
def rowStr( i ):
rstr = '\n '
for j in range( m ): # loop over each property
k = props[j]
if ( k!='planetName' )*( k!='RA' )*( k!='Dec' ):
# numbers
rstr += '{0:.{1}f}'.format( ASCII[k][i], ndps[j] ).rjust( ncol[j] )
else: # strings
rstr += '{0}'.format( ASCII[k][i] ).rjust( ncol[j] )
return rstr
for i in range( n ): # loop over each TOI
rstr = rowStr( i )
ostr += rstr
# print(ostr)
# Write to file:
oname = 'confirmedPlanets_{0}.txt'.format( SMFlag )
#if multTIC == True:
# oname = oname.replace('.txt', '_Multis.txt')
#else:
# if onlyPCs == True:
# oname = oname.replace( '.txt', '_onlyPCs.txt' )
# if topFivePredicted==True:
# oname = oname.replace( '.txt', '_topPredicted.txt' )
odir = os.path.join( os.getcwd(), 'ASCII' )
if os.path.isdir( odir )==False:
os.makedirs( odir )
opath = os.path.join( odir, oname )
ofile = open( opath, 'w' )
ofile.write( ostr )
ofile.close()
print( '\nSaved:\n{0}'.format( opath ) )
return opath
def TeqK_ExoFOPvsKempton (Kempton, ExoFOP):
"""
Creates a plot that compares the values of TeqK computed as in Kempton et al. and those pulled
from the Exoplanet Archive
"""
fig = plt.figure()
plt.plot(Kempton, ExoFOP, 'b.', Kempton, Kempton, 'r-')
plt.xlabel('TeqK Kempton')
plt.ylabel('TeqK ExoFOP')
plt.title('Temperatures computed as in Kempton et al. (2018) \n vs those pulled from the Exoplanet Archive')
oname = "TeqK_Kempton_vs_ExoFOP.pdf"
odir = FIGDIR
opathk = os.path.join( odir, oname )
fig.savefig( opathk )
print('\n Saved: ', oname)
def addHeatMap (ax, xLines, yLines, TeqK, RpRE, Tgrid, Rgrid, cmap):
"""
Adds a Heat Map to a figure based on fraction of planets/TOIs found in box compared to
those predicted by Barclay et al.
"""
boxes = []
box_values = []
boxn = 0
zPredicted = readPredictedProperties()
predTeqK = zPredicted['TeqK']
predRpVal = zPredicted['RpValRE']
# Generates the box coordinates and numbers, using ticks from 0.5 to 6.5
for x in xLines[:-1]:
for y in yLines[:-1]:
box_value = Utils.HeatMapValues([Tgrid[int(x-0.5)], Tgrid[int(x+0.5)]], \
[Rgrid[int(y-0.5)], Rgrid[int(y+0.5)]], \
TeqK, RpRE, predTeqK, predRpVal) # Calculates fraction of TOIs:fraction of pred in box
boxes.append([[x, x, x+1, x+1], [y, y+1, y+1, y], boxn]) # Box coordinates and number
box_values.append(box_value)
boxn +=1
box_values = np.array(box_values)
box_values2 = []
for value in box_values:
if value != 0:
box_values2.append(np.log2(value))
else:
box_values2.append(np.nan)
#Normalizes the boxes. TOI < pred and TOI > pred are normalized separately into [0, 0.5] and [0.5, 1]
box_norm = Utils.Normalize(box_values2, True)
# Colors in the boxes
for box in boxes:
box_color = cmap(box_norm[box[2]])
ax.fill(box[0], box[1], color=box_color, zorder = 0)
return ax, np.log2(np.max(box_values))
def addColorBar(ax, val, cmap):
tick_fs = 12
ax.tick_params( labelsize=tick_fs ) # Changes the font size of tick marks
cb_norm = matplotlib.colors.Normalize( vmin=-val, vmax=val )
# Only right half of colorbar properly normalized. Unsure how to have both consistent with figure
# normalization without changing so that all data normalized together
cb = matplotlib.colorbar.ColorbarBase( ax, cmap=cmap, norm=cb_norm, \
orientation='horizontal' ) # Creates the color bar in cb_axis
cb.solids.set_rasterized( True )
cb.solids.set_edgecolor( 'face' )
| 39.877885 | 112 | 0.530659 |
ace5cb53259f90b3d563336f2da1574f988dcb16 | 17,535 | py | Python | tests/integration/cmor/_fixes/test_common.py | jvegreg/ESMValCore | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | [
"Apache-2.0"
] | 26 | 2019-06-07T07:50:07.000Z | 2022-03-22T21:04:01.000Z | tests/integration/cmor/_fixes/test_common.py | jvegreg/ESMValCore | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | [
"Apache-2.0"
] | 1,370 | 2019-06-06T09:03:07.000Z | 2022-03-31T04:37:20.000Z | tests/integration/cmor/_fixes/test_common.py | jvegreg/ESMValCore | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | [
"Apache-2.0"
] | 26 | 2019-07-03T13:08:48.000Z | 2022-03-02T16:08:47.000Z | """Test for common fixes used for multiple datasets."""
import iris
import numpy as np
import pytest
from cf_units import Unit
from esmvalcore.cmor._fixes.common import (
ClFixHybridHeightCoord,
ClFixHybridPressureCoord,
OceanFixGrid,
SiconcFixScalarCoord,
)
from esmvalcore.cmor.table import get_var_info
from esmvalcore.iris_helpers import var_name_constraint
AIR_PRESSURE_POINTS = np.array([[[[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0]],
[[2.0, 3.0, 4.0, 5.0],
[6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0]]]])
AIR_PRESSURE_BOUNDS = np.array([[[[[0.0, 1.5],
[-1.0, 2.0],
[-2.0, 2.5],
[-3.0, 3.0]],
[[-4.0, 3.5],
[-5.0, 4.0],
[-6.0, 4.5],
[-7.0, 5.0]],
[[-8.0, 5.5],
[-9.0, 6.0],
[-10.0, 6.5],
[-11.0, 7.0]]],
[[[1.5, 3.0],
[2.0, 5.0],
[2.5, 7.0],
[3.0, 9.0]],
[[3.5, 11.0],
[4.0, 13.0],
[4.5, 15.0],
[5.0, 17.0]],
[[5.5, 19.0],
[6.0, 21.0],
[6.5, 23.0],
[7.0, 25.0]]]]])
def hybrid_pressure_coord_fix_metadata(nc_path, short_name, fix):
"""Test ``fix_metadata`` of file with hybrid pressure coord."""
cubes = iris.load(str(nc_path))
# Raw cubes
assert len(cubes) == 4
var_names = [cube.var_name for cube in cubes]
assert short_name in var_names
assert 'ps' in var_names
assert 'b_bnds' in var_names
# Raw cube
cube = cubes.extract_cube(var_name_constraint(short_name))
air_pressure_coord = cube.coord('air_pressure')
assert air_pressure_coord.points is not None
assert air_pressure_coord.bounds is None
np.testing.assert_allclose(air_pressure_coord.points, AIR_PRESSURE_POINTS)
# Raw ps cube
ps_cube = cubes.extract_cube('surface_air_pressure')
assert ps_cube.attributes == {'additional_attribute': 'xyz'}
# Apply fix
fixed_cubes = fix.fix_metadata(cubes)
assert len(fixed_cubes) == 1
fixed_cube = fixed_cubes.extract_cube(var_name_constraint(short_name))
fixed_air_pressure_coord = fixed_cube.coord('air_pressure')
assert fixed_air_pressure_coord.points is not None
assert fixed_air_pressure_coord.bounds is not None
np.testing.assert_allclose(fixed_air_pressure_coord.points,
AIR_PRESSURE_POINTS)
np.testing.assert_allclose(fixed_air_pressure_coord.bounds,
AIR_PRESSURE_BOUNDS)
surface_pressure_coord = fixed_cube.coord(var_name='ps')
assert surface_pressure_coord.attributes == {}
return var_names
@pytest.mark.sequential
def test_cl_hybrid_pressure_coord_fix_metadata_with_a(test_data_path):
"""Test ``fix_metadata`` for ``cl``."""
vardef = get_var_info('CMIP6', 'Amon', 'cl')
nc_path = test_data_path / 'common_cl_a.nc'
var_names = hybrid_pressure_coord_fix_metadata(
nc_path, 'cl', ClFixHybridPressureCoord(vardef))
assert 'a_bnds' in var_names
@pytest.mark.sequential
def test_cl_hybrid_pressure_coord_fix_metadata_with_ap(test_data_path):
"""Test ``fix_metadata`` for ``cl``."""
vardef = get_var_info('CMIP6', 'Amon', 'cl')
nc_path = test_data_path / 'common_cl_ap.nc'
var_names = hybrid_pressure_coord_fix_metadata(
nc_path, 'cl', ClFixHybridPressureCoord(vardef))
assert 'ap_bnds' in var_names
HEIGHT_POINTS = np.array([[[1.0, 1.0]],
[[2.0, 3.0]]])
HEIGHT_BOUNDS_WRONG = np.array([[[[0.5, 1.5],
[0.5, 1.5]]],
[[[1.5, 3.0],
[2.5, 4.0]]]])
HEIGHT_BOUNDS_RIGHT = np.array([[[[0.5, 1.5],
[-0.5, 2.0]]],
[[[1.5, 3.0],
[2.0, 5.0]]]])
PRESSURE_POINTS = np.array([[[101312.98512207, 101312.98512207]],
[[101300.97123885, 101288.95835383]]])
PRESSURE_BOUNDS = np.array([[[[101318.99243691, 101306.9780559],
[101331.00781103, 101300.97123885]]],
[[[101306.9780559, 101288.95835383],
[101300.97123885, 101264.93559234]]]])
def hybrid_height_coord_fix_metadata(nc_path, short_name, fix):
"""Test ``fix_metadata`` of file with hybrid height coord."""
cubes = iris.load(str(nc_path))
# Raw cubes
assert len(cubes) == 3
var_names = [cube.var_name for cube in cubes]
assert short_name in var_names
assert 'orog' in var_names
assert 'b_bnds' in var_names
# Raw cube
cube = cubes.extract_cube(var_name_constraint(short_name))
height_coord = cube.coord('altitude')
assert height_coord.points is not None
assert height_coord.bounds is not None
np.testing.assert_allclose(height_coord.points, HEIGHT_POINTS)
np.testing.assert_allclose(height_coord.bounds, HEIGHT_BOUNDS_WRONG)
assert not np.allclose(height_coord.bounds, HEIGHT_BOUNDS_RIGHT)
assert not cube.coords('air_pressure')
# Apply fix
fixed_cubes = fix.fix_metadata(cubes)
assert len(fixed_cubes) == 1
fixed_cube = fixed_cubes.extract_cube(var_name_constraint(short_name))
fixed_height_coord = fixed_cube.coord('altitude')
assert fixed_height_coord.points is not None
assert fixed_height_coord.bounds is not None
np.testing.assert_allclose(fixed_height_coord.points, HEIGHT_POINTS)
np.testing.assert_allclose(fixed_height_coord.bounds, HEIGHT_BOUNDS_RIGHT)
assert not np.allclose(fixed_height_coord.bounds, HEIGHT_BOUNDS_WRONG)
air_pressure_coord = cube.coord('air_pressure')
np.testing.assert_allclose(air_pressure_coord.points, PRESSURE_POINTS)
np.testing.assert_allclose(air_pressure_coord.bounds, PRESSURE_BOUNDS)
assert air_pressure_coord.var_name == 'plev'
assert air_pressure_coord.standard_name == 'air_pressure'
assert air_pressure_coord.long_name == 'pressure'
assert air_pressure_coord.units == 'Pa'
@pytest.mark.sequential
def test_cl_hybrid_height_coord_fix_metadata(test_data_path):
"""Test ``fix_metadata`` for ``cl``."""
vardef = get_var_info('CMIP6', 'Amon', 'cl')
nc_path = test_data_path / 'common_cl_hybrid_height.nc'
hybrid_height_coord_fix_metadata(nc_path, 'cl',
ClFixHybridHeightCoord(vardef))
@pytest.fixture
def siconc_cubes():
"""Sample cube."""
time_coord = iris.coords.DimCoord([0.0], standard_name='time',
var_name='time',
units='days since 6543-2-1')
lat_coord = iris.coords.DimCoord([-30.0], standard_name='latitude',
var_name='lat', units='degrees_north')
lon_coord = iris.coords.DimCoord([30.0], standard_name='longitude',
var_name='lon', units='degrees_east')
coords_specs = [(time_coord, 0), (lat_coord, 1), (lon_coord, 2)]
cube = iris.cube.Cube([[[22.0]]], standard_name='sea_ice_area_fraction',
var_name='siconc', units='%',
dim_coords_and_dims=coords_specs)
return iris.cube.CubeList([cube])
def test_siconc_fix_metadata(siconc_cubes):
"""Test ``fix_metadata`` for ``siconc``."""
assert len(siconc_cubes) == 1
siconc_cube = siconc_cubes[0]
assert siconc_cube.var_name == "siconc"
# Extract siconc cube
siconc_cube = siconc_cubes.extract_cube('sea_ice_area_fraction')
assert not siconc_cube.coords('typesi')
# Apply fix
vardef = get_var_info('CMIP6', 'SImon', 'siconc')
fix = SiconcFixScalarCoord(vardef)
fixed_cubes = fix.fix_metadata(siconc_cubes)
assert len(fixed_cubes) == 1
fixed_siconc_cube = fixed_cubes.extract_cube(
'sea_ice_area_fraction')
fixed_typesi_coord = fixed_siconc_cube.coord('area_type')
assert fixed_typesi_coord.points is not None
assert fixed_typesi_coord.bounds is None
np.testing.assert_equal(fixed_typesi_coord.points,
['sea_ice'])
np.testing.assert_equal(fixed_typesi_coord.units,
Unit('No unit'))
def get_tos_cubes(wrong_ij_names=False, ij_bounds=False):
"""Cubes containing tos variable."""
if wrong_ij_names:
j_var_name = 'lat'
j_long_name = 'latitude'
i_var_name = 'lon'
i_long_name = 'longitude'
else:
j_var_name = 'j'
j_long_name = 'cell index along second dimension'
i_var_name = 'i'
i_long_name = 'cell index along first dimension'
if ij_bounds:
j_bounds = [[10.0, 30.0], [30.0, 50.0]]
i_bounds = [[5.0, 15.0], [15.0, 25.0], [25.0, 35.0]]
else:
j_bounds = None
i_bounds = None
j_coord = iris.coords.DimCoord(
[20.0, 40.0],
bounds=j_bounds,
var_name=j_var_name,
long_name=j_long_name,
)
i_coord = iris.coords.DimCoord(
[10.0, 20.0, 30.0],
bounds=i_bounds,
var_name=i_var_name,
long_name=i_long_name,
)
lat_coord = iris.coords.AuxCoord(
[[-40.0, -20.0, 0.0], [-20.0, 0.0, 20.0]],
var_name='lat',
standard_name='latitude',
units='degrees_north',
)
lon_coord = iris.coords.AuxCoord(
[[100.0, 140.0, 180.0], [80.0, 100.0, 120.0]],
var_name='lon',
standard_name='longitude',
units='degrees_east',
)
time_coord = iris.coords.DimCoord(
1.0,
bounds=[0.0, 2.0],
var_name='time',
standard_name='time',
long_name='time',
units='days since 1950-01-01',
)
# Create tos variable cube
cube = iris.cube.Cube(
np.full((1, 2, 3), 300.0),
var_name='tos',
long_name='sea_surface_temperature',
units='K',
dim_coords_and_dims=[(time_coord, 0), (j_coord, 1), (i_coord, 2)],
aux_coords_and_dims=[(lat_coord, (1, 2)), (lon_coord, (1, 2))],
)
# Create empty (dummy) cube
empty_cube = iris.cube.Cube(0.0)
return iris.cube.CubeList([cube, empty_cube])
@pytest.fixture
def tos_cubes_wrong_ij_names():
"""Cubes with wrong ij names."""
return get_tos_cubes(wrong_ij_names=True, ij_bounds=True)
def test_ocean_fix_grid_wrong_ij_names(tos_cubes_wrong_ij_names):
"""Test ``fix_metadata`` with cubes with wrong ij names."""
cube_in = tos_cubes_wrong_ij_names.extract_cube('sea_surface_temperature')
assert len(cube_in.coords('latitude')) == 2
assert len(cube_in.coords('longitude')) == 2
assert cube_in.coord('latitude', dimensions=1).bounds is not None
assert cube_in.coord('longitude', dimensions=2).bounds is not None
assert cube_in.coord('latitude', dimensions=(1, 2)).bounds is None
assert cube_in.coord('longitude', dimensions=(1, 2)).bounds is None
# Apply fix
vardef = get_var_info('CMIP6', 'Omon', 'tos')
fix = OceanFixGrid(vardef)
fixed_cubes = fix.fix_metadata(tos_cubes_wrong_ij_names)
assert len(fixed_cubes) == 1
fixed_cube = fixed_cubes.extract_cube('sea_surface_temperature')
assert fixed_cube is cube_in
# Check ij names
i_coord = fixed_cube.coord('cell index along first dimension')
j_coord = fixed_cube.coord('cell index along second dimension')
assert i_coord.var_name == 'i'
assert i_coord.standard_name is None
assert i_coord.long_name == 'cell index along first dimension'
assert i_coord.units == '1'
assert i_coord.circular is False
assert j_coord.var_name == 'j'
assert j_coord.standard_name is None
assert j_coord.long_name == 'cell index along second dimension'
assert j_coord.units == '1'
# Check ij points and bounds
np.testing.assert_allclose(i_coord.points, [0, 1, 2])
np.testing.assert_allclose(i_coord.bounds,
[[-0.5, 0.5], [0.5, 1.5], [1.5, 2.5]])
np.testing.assert_allclose(j_coord.points, [0, 1])
np.testing.assert_allclose(j_coord.bounds, [[-0.5, 0.5], [0.5, 1.5]])
# Check bounds of latitude and longitude
assert len(fixed_cube.coords('latitude')) == 1
assert len(fixed_cube.coords('longitude')) == 1
assert fixed_cube.coord('latitude').bounds is not None
assert fixed_cube.coord('longitude').bounds is not None
latitude_bounds = np.array(
[[[-43.48076211, -34.01923789, -22.00961894, -31.47114317],
[-34.01923789, -10.0, 2.00961894, -22.00961894],
[-10.0, -0.53847577, 11.47114317, 2.00961894]],
[[-31.47114317, -22.00961894, -10.0, -19.46152423],
[-22.00961894, 2.00961894, 14.01923789, -10.0],
[2.00961894, 11.47114317, 23.48076211, 14.01923789]]]
)
np.testing.assert_allclose(fixed_cube.coord('latitude').bounds,
latitude_bounds)
longitude_bounds = np.array([[[140.625, 99.375, 99.375, 140.625],
[99.375, 140.625, 140.625, 99.375],
[140.625, 99.375, 99.375, 140.625]],
[[140.625, 99.375, 99.375, 140.625],
[99.375, 140.625, 140.625, 99.375],
[140.625, 99.375, 99.375, 140.625]]])
np.testing.assert_allclose(fixed_cube.coord('longitude').bounds,
longitude_bounds)
@pytest.fixture
def tos_cubes_no_ij_bounds():
"""Cubes with no ij bounds."""
return get_tos_cubes(wrong_ij_names=False, ij_bounds=False)
def test_ocean_fix_grid_no_ij_bounds(tos_cubes_no_ij_bounds):
"""Test ``fix_metadata`` with cubes with no ij bounds."""
cube_in = tos_cubes_no_ij_bounds.extract_cube('sea_surface_temperature')
assert len(cube_in.coords('latitude')) == 1
assert len(cube_in.coords('longitude')) == 1
assert cube_in.coord('latitude').bounds is None
assert cube_in.coord('longitude').bounds is None
assert cube_in.coord('cell index along first dimension').var_name == 'i'
assert cube_in.coord('cell index along second dimension').var_name == 'j'
assert cube_in.coord('cell index along first dimension').bounds is None
assert cube_in.coord('cell index along second dimension').bounds is None
# Apply fix
vardef = get_var_info('CMIP6', 'Omon', 'tos')
fix = OceanFixGrid(vardef)
fixed_cubes = fix.fix_metadata(tos_cubes_no_ij_bounds)
assert len(fixed_cubes) == 1
fixed_cube = fixed_cubes.extract_cube('sea_surface_temperature')
assert fixed_cube is cube_in
# Check ij names
i_coord = fixed_cube.coord('cell index along first dimension')
j_coord = fixed_cube.coord('cell index along second dimension')
assert i_coord.var_name == 'i'
assert i_coord.standard_name is None
assert i_coord.long_name == 'cell index along first dimension'
assert i_coord.units == '1'
assert i_coord.circular is False
assert j_coord.var_name == 'j'
assert j_coord.standard_name is None
assert j_coord.long_name == 'cell index along second dimension'
assert j_coord.units == '1'
# Check ij points and bounds
np.testing.assert_allclose(i_coord.points, [0, 1, 2])
np.testing.assert_allclose(i_coord.bounds,
[[-0.5, 0.5], [0.5, 1.5], [1.5, 2.5]])
np.testing.assert_allclose(j_coord.points, [0, 1])
np.testing.assert_allclose(j_coord.bounds, [[-0.5, 0.5], [0.5, 1.5]])
# Check bounds of latitude and longitude
assert len(fixed_cube.coords('latitude')) == 1
assert len(fixed_cube.coords('longitude')) == 1
assert fixed_cube.coord('latitude').bounds is not None
assert fixed_cube.coord('longitude').bounds is not None
latitude_bounds = np.array(
[[[-43.48076211, -34.01923789, -22.00961894, -31.47114317],
[-34.01923789, -10.0, 2.00961894, -22.00961894],
[-10.0, -0.53847577, 11.47114317, 2.00961894]],
[[-31.47114317, -22.00961894, -10.0, -19.46152423],
[-22.00961894, 2.00961894, 14.01923789, -10.0],
[2.00961894, 11.47114317, 23.48076211, 14.01923789]]]
)
np.testing.assert_allclose(fixed_cube.coord('latitude').bounds,
latitude_bounds)
longitude_bounds = np.array([[[140.625, 99.375, 99.375, 140.625],
[99.375, 140.625, 140.625, 99.375],
[140.625, 99.375, 99.375, 140.625]],
[[140.625, 99.375, 99.375, 140.625],
[99.375, 140.625, 140.625, 99.375],
[140.625, 99.375, 99.375, 140.625]]])
np.testing.assert_allclose(fixed_cube.coord('longitude').bounds,
longitude_bounds)
| 41.650831 | 78 | 0.597605 |
ace5ccf3b3958206a6f68ce70751b2a08c32cf99 | 2,565 | py | Python | lavalink/events.py | Massinez/Lavalink.py | df841c1e2a98d1de5e0ed4634e2a925577b603dc | [
"MIT"
] | null | null | null | lavalink/events.py | Massinez/Lavalink.py | df841c1e2a98d1de5e0ed4634e2a925577b603dc | [
"MIT"
] | null | null | null | lavalink/events.py | Massinez/Lavalink.py | df841c1e2a98d1de5e0ed4634e2a925577b603dc | [
"MIT"
] | null | null | null | class Event:
""" The base for all Lavalink events. """
class QueueEndEvent(Event):
""" This event is dispatched when there are no more songs in the queue. """
def __init__(self, player):
self.player = player
class TrackStuckEvent(Event):
""" This event is dispatched when the currently playing song is stuck. """
def __init__(self, player, track, threshold):
self.player = player
self.track = track
self.threshold = threshold
class TrackExceptionEvent(Event):
""" This event is dispatched when an exception occurs while playing a track. """
def __init__(self, player, track, exception):
self.exception = exception
self.player = player
self.track = track
class TrackEndEvent(Event):
""" This event is dispatched when the player finished playing a track. """
def __init__(self, player, track, reason):
self.reason = reason
self.player = player
self.track = track
class TrackStartEvent(Event):
""" This event is dispatched when the player starts to play a track. """
def __init__(self, player, track):
self.player = player
self.track = track
class PlayerUpdateEvent(Event):
""" This event is dispatched when the player's progress changes """
def __init__(self, player, position: int, timestamp: int):
self.player = player
self.position = position
self.timestamp = timestamp
class NodeDisconnectedEvent(Event):
""" This event is dispatched when a node disconnects and becomes unavailable """
def __init__(self, node, code: int, reason: str):
self.node = node
self.code = code
self.reason = reason
class NodeConnectedEvent(Event):
""" This event is dispatched when Lavalink.py successfully connects to a node """
def __init__(self, node):
self.node = node
class NodeChangedEvent(Event):
"""
This event is dispatched when a player changes to another node.
Keep in mind this event can be dispatched multiple times if a node
disconnects and the load balancer moves players to a new node.
Parameters
----------
player: BasePlayer
The player whose node was changed.
old_node: Node
The node the player was moved from.
new_node: Node
The node the player was moved to.
"""
def __init__(self, player, old_node, new_node):
self.player = player
self.old_node = old_node
self.new_node = new_node
# TODO: The above needs their parameters documented.
| 29.825581 | 85 | 0.663158 |
ace5cd895db32cd2ce64bd1e88e593e6018b187e | 81 | py | Python | adventofcode/2015/1/which_floor/__init__.py | bneradt/toy | 982e80ec98f4e951f7275e5f22cb0197f8f86c08 | [
"Apache-2.0"
] | null | null | null | adventofcode/2015/1/which_floor/__init__.py | bneradt/toy | 982e80ec98f4e951f7275e5f22cb0197f8f86c08 | [
"Apache-2.0"
] | null | null | null | adventofcode/2015/1/which_floor/__init__.py | bneradt/toy | 982e80ec98f4e951f7275e5f22cb0197f8f86c08 | [
"Apache-2.0"
] | null | null | null | from .which_floor import description_to_final_floor, get_first_basement_position
| 40.5 | 80 | 0.91358 |
ace5cdb0661b75db3d05d595aa01d778f9935ad0 | 25,468 | py | Python | lib/galaxy_test/api/test_dataset_collections.py | NordicESMhub/galaxy | ec3ffb7d8ec5dac9b179f4c9d39b8060b69d5492 | [
"CC-BY-3.0"
] | 1 | 2018-12-04T10:39:31.000Z | 2018-12-04T10:39:31.000Z | lib/galaxy_test/api/test_dataset_collections.py | NordicESMhub/galaxy | ec3ffb7d8ec5dac9b179f4c9d39b8060b69d5492 | [
"CC-BY-3.0"
] | 78 | 2019-01-18T08:12:49.000Z | 2022-03-13T08:56:41.000Z | lib/galaxy_test/api/test_dataset_collections.py | NordicESMhub/galaxy | ec3ffb7d8ec5dac9b179f4c9d39b8060b69d5492 | [
"CC-BY-3.0"
] | 9 | 2019-01-18T07:49:12.000Z | 2021-06-26T22:21:09.000Z | import json
import zipfile
from io import BytesIO
from galaxy_test.base.api_asserts import assert_object_id_error
from galaxy_test.base.populators import DatasetCollectionPopulator, DatasetPopulator, skip_if_github_down
from ._framework import ApiTestCase
class DatasetCollectionApiTestCase(ApiTestCase):
def setUp(self):
super().setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
self.dataset_collection_populator = DatasetCollectionPopulator(self.galaxy_interactor)
self.history_id = self.dataset_populator.new_history()
def test_create_pair_from_history(self):
payload = self.dataset_collection_populator.create_pair_payload(
self.history_id,
instance_type="history",
)
create_response = self._post("dataset_collections", payload)
dataset_collection = self._check_create_response(create_response)
returned_datasets = dataset_collection["elements"]
assert len(returned_datasets) == 2, dataset_collection
def test_create_list_from_history(self):
element_identifiers = self.dataset_collection_populator.list_identifiers(self.history_id)
payload = dict(
instance_type="history",
history_id=self.history_id,
element_identifiers=json.dumps(element_identifiers),
collection_type="list",
)
create_response = self._post("dataset_collections", payload)
dataset_collection = self._check_create_response(create_response)
returned_datasets = dataset_collection["elements"]
assert len(returned_datasets) == 3, dataset_collection
def test_create_list_of_existing_pairs(self):
pair_payload = self.dataset_collection_populator.create_pair_payload(
self.history_id,
instance_type="history",
)
pair_create_response = self._post("dataset_collections", pair_payload)
dataset_collection = self._check_create_response(pair_create_response)
hdca_id = dataset_collection["id"]
element_identifiers = [
dict(name="test1", src="hdca", id=hdca_id)
]
payload = dict(
instance_type="history",
history_id=self.history_id,
element_identifiers=json.dumps(element_identifiers),
collection_type="list",
)
create_response = self._post("dataset_collections", payload)
dataset_collection = self._check_create_response(create_response)
returned_collections = dataset_collection["elements"]
assert len(returned_collections) == 1, dataset_collection
def test_create_list_of_new_pairs(self):
identifiers = self.dataset_collection_populator.nested_collection_identifiers(self.history_id, "list:paired")
payload = dict(
collection_type="list:paired",
instance_type="history",
history_id=self.history_id,
name="a nested collection",
element_identifiers=json.dumps(identifiers),
)
create_response = self._post("dataset_collections", payload)
dataset_collection = self._check_create_response(create_response)
assert dataset_collection["collection_type"] == "list:paired"
assert dataset_collection["name"] == "a nested collection"
returned_collections = dataset_collection["elements"]
assert len(returned_collections) == 1, dataset_collection
pair_1_element = returned_collections[0]
self._assert_has_keys(pair_1_element, "element_identifier", "element_index", "object")
assert pair_1_element["element_identifier"] == "test_level_1", pair_1_element
assert pair_1_element["element_index"] == 0, pair_1_element
pair_1_object = pair_1_element["object"]
self._assert_has_keys(pair_1_object, "collection_type", "elements", "element_count")
self.assertEqual(pair_1_object["collection_type"], "paired")
self.assertEqual(pair_1_object["populated"], True)
pair_elements = pair_1_object["elements"]
assert len(pair_elements) == 2
pair_1_element_1 = pair_elements[0]
assert pair_1_element_1["element_index"] == 0
def test_list_download(self):
fetch_response = self.dataset_collection_populator.create_list_in_history(self.history_id, direct_upload=True).json()
dataset_collection = self.dataset_collection_populator.wait_for_fetched_collection(fetch_response)
returned_dce = dataset_collection["elements"]
assert len(returned_dce) == 3, dataset_collection
create_response = self._download_dataset_collection(history_id=self.history_id, hdca_id=dataset_collection['id'])
self._assert_status_code_is(create_response, 200)
archive = zipfile.ZipFile(BytesIO(create_response.content))
namelist = archive.namelist()
assert len(namelist) == 3, f"Expected 3 elements in [{namelist}]"
collection_name = dataset_collection['name']
for element, zip_path in zip(returned_dce, namelist):
assert f"{collection_name}/{element['element_identifier']}.{element['object']['file_ext']}" == zip_path
def test_pair_download(self):
fetch_response = self.dataset_collection_populator.create_pair_in_history(self.history_id, direct_upload=True).json()
dataset_collection = self.dataset_collection_populator.wait_for_fetched_collection(fetch_response)
returned_dce = dataset_collection["elements"]
assert len(returned_dce) == 2, dataset_collection
hdca_id = dataset_collection['id']
create_response = self._download_dataset_collection(history_id=self.history_id, hdca_id=hdca_id)
self._assert_status_code_is(create_response, 200)
archive = zipfile.ZipFile(BytesIO(create_response.content))
namelist = archive.namelist()
assert len(namelist) == 2, f"Expected 2 elements in [{namelist}]"
collection_name = dataset_collection['name']
for element, zip_path in zip(returned_dce, namelist):
assert f"{collection_name}/{element['element_identifier']}.{element['object']['file_ext']}" == zip_path
def test_list_pair_download(self):
fetch_response = self.dataset_collection_populator.create_list_of_pairs_in_history(self.history_id).json()
dataset_collection = self.dataset_collection_populator.wait_for_fetched_collection(fetch_response)
returned_dce = dataset_collection["elements"]
assert len(returned_dce) == 1, dataset_collection
list_collection_name = dataset_collection['name']
pair = returned_dce[0]
create_response = self._download_dataset_collection(history_id=self.history_id, hdca_id=dataset_collection['id'])
self._assert_status_code_is(create_response, 200)
archive = zipfile.ZipFile(BytesIO(create_response.content))
namelist = archive.namelist()
assert len(namelist) == 2, f"Expected 2 elements in [{namelist}]"
pair_collection_name = pair['element_identifier']
for element, zip_path in zip(pair['object']['elements'], namelist):
assert f"{list_collection_name}/{pair_collection_name}/{element['element_identifier']}.{element['object']['file_ext']}" == zip_path
def test_list_list_download(self):
dataset_collection = self.dataset_collection_populator.create_list_of_list_in_history(self.history_id).json()
self.dataset_collection_populator.wait_for_dataset_collection(dataset_collection, assert_ok=True)
returned_dce = dataset_collection["elements"]
assert len(returned_dce) == 1, dataset_collection
create_response = self._download_dataset_collection(history_id=self.history_id, hdca_id=dataset_collection['id'])
self._assert_status_code_is(create_response, 200)
archive = zipfile.ZipFile(BytesIO(create_response.content))
namelist = archive.namelist()
assert len(namelist) == 3, f"Expected 3 elements in [{namelist}]"
def test_list_list_list_download(self):
dataset_collection = self.dataset_collection_populator.create_list_of_list_in_history(self.history_id, collection_type='list:list:list').json()
self.dataset_collection_populator.wait_for_dataset_collection(dataset_collection, assert_ok=True)
returned_dce = dataset_collection["elements"]
assert len(returned_dce) == 1, dataset_collection
create_response = self._download_dataset_collection(history_id=self.history_id, hdca_id=dataset_collection['id'])
self._assert_status_code_is(create_response, 200)
archive = zipfile.ZipFile(BytesIO(create_response.content))
namelist = archive.namelist()
assert len(namelist) == 3, f"Expected 3 elements in [{namelist}]"
def test_hda_security(self):
element_identifiers = self.dataset_collection_populator.pair_identifiers(self.history_id)
self.dataset_populator.make_private(self.history_id, element_identifiers[0]["id"])
with self._different_user():
history_id = self.dataset_populator.new_history()
payload = dict(
instance_type="history",
history_id=history_id,
element_identifiers=json.dumps(element_identifiers),
collection_type="paired",
)
create_response = self._post("dataset_collections", payload)
self._assert_status_code_is(create_response, 403)
def test_enforces_unique_names(self):
element_identifiers = self.dataset_collection_populator.list_identifiers(self.history_id)
element_identifiers[2]["name"] = element_identifiers[0]["name"]
payload = dict(
instance_type="history",
history_id=self.history_id,
element_identifiers=json.dumps(element_identifiers),
collection_type="list",
)
create_response = self._post("dataset_collections", payload)
self._assert_status_code_is(create_response, 400)
def test_upload_collection(self):
elements = [{"src": "files", "dbkey": "hg19", "info": "my cool bed", "tags": ["name:data1", "group:condition:treated", "machine:illumina"]}]
targets = [{
"destination": {"type": "hdca"},
"elements": elements,
"collection_type": "list",
"name": "Test upload",
"tags": ["name:collection1"]
}]
payload = {
"history_id": self.history_id,
"targets": json.dumps(targets),
"__files": {"files_0|file_data": open(self.test_data_resolver.get_filename("4.bed"))},
}
self.dataset_populator.fetch(payload)
hdca = self._assert_one_collection_created_in_history()
self.assertEqual(hdca["name"], "Test upload")
hdca_tags = hdca["tags"]
assert len(hdca_tags) == 1
assert "name:collection1" in hdca_tags
assert len(hdca["elements"]) == 1, hdca
element0 = hdca["elements"][0]
assert element0["element_identifier"] == "4.bed"
dataset0 = element0["object"]
assert dataset0["file_size"] == 61
dataset_tags = dataset0["tags"]
assert len(dataset_tags) == 3, dataset0
def test_upload_nested(self):
elements = [{"name": "samp1", "elements": [{"src": "files", "dbkey": "hg19", "info": "my cool bed"}]}]
targets = [{
"destination": {"type": "hdca"},
"elements": elements,
"collection_type": "list:list",
"name": "Test upload",
}]
payload = {
"history_id": self.history_id,
"targets": json.dumps(targets),
"__files": {"files_0|file_data": open(self.test_data_resolver.get_filename("4.bed"))},
}
self.dataset_populator.fetch(payload)
hdca = self._assert_one_collection_created_in_history()
self.assertEqual(hdca["name"], "Test upload")
assert len(hdca["elements"]) == 1, hdca
element0 = hdca["elements"][0]
assert element0["element_identifier"] == "samp1"
@skip_if_github_down
def test_upload_collection_from_url(self):
elements = [{"src": "url", "url": "https://raw.githubusercontent.com/galaxyproject/galaxy/dev/test-data/4.bed", "info": "my cool bed"}]
targets = [{
"destination": {"type": "hdca"},
"elements": elements,
"collection_type": "list",
}]
payload = {
"history_id": self.history_id,
"targets": json.dumps(targets),
"__files": {"files_0|file_data": open(self.test_data_resolver.get_filename("4.bed"))},
}
self.dataset_populator.fetch(payload)
hdca = self._assert_one_collection_created_in_history()
assert len(hdca["elements"]) == 1, hdca
element0 = hdca["elements"][0]
assert element0["element_identifier"] == "4.bed"
assert element0["object"]["file_size"] == 61
@skip_if_github_down
def test_upload_collection_failed_expansion_url(self):
targets = [{
"destination": {"type": "hdca"},
"elements_from": "bagit",
"collection_type": "list",
"src": "url",
"url": "https://raw.githubusercontent.com/galaxyproject/galaxy/dev/test-data/4.bed",
}]
payload = {
"history_id": self.history_id,
"targets": json.dumps(targets),
"__files": {"files_0|file_data": open(self.test_data_resolver.get_filename("4.bed"))},
}
self.dataset_populator.fetch(payload, assert_ok=False, wait=True)
hdca = self._assert_one_collection_created_in_history()
assert hdca["populated"] is False
assert "bagit.txt" in hdca["populated_state_message"], hdca
def _assert_one_collection_created_in_history(self):
contents_response = self._get(f"histories/{self.history_id}/contents/dataset_collections")
self._assert_status_code_is(contents_response, 200)
contents = contents_response.json()
assert len(contents) == 1
hdca = contents[0]
assert hdca["history_content_type"] == "dataset_collection"
hdca_id = hdca["id"]
collection_response = self._get(f"histories/{self.history_id}/contents/dataset_collections/{hdca_id}")
self._assert_status_code_is(collection_response, 200)
return collection_response.json()
def _check_create_response(self, create_response):
self._assert_status_code_is(create_response, 200)
dataset_collection = create_response.json()
self._assert_has_keys(dataset_collection, "elements", "url", "name", "collection_type", "element_count")
return dataset_collection
def _download_dataset_collection(self, history_id, hdca_id):
return self._get(f"histories/{history_id}/contents/dataset_collections/{hdca_id}/download")
def test_collection_contents_security(self):
# request contents on an hdca that doesn't belong to user
hdca, contents_url = self._create_collection_contents_pair()
with self._different_user():
contents_response = self._get(contents_url)
self._assert_status_code_is(contents_response, 403)
def test_collection_contents_invalid_collection(self):
# request an invalid collection from a valid hdca, should get 404
hdca, contents_url = self._create_collection_contents_pair()
response = self._get(contents_url)
self._assert_status_code_is(response, 200)
fake_collection_id = '5d7db0757a2eb7ef'
fake_contents_url = f"/api/dataset_collections/{hdca['id']}/contents/{fake_collection_id}"
error_response = self._get(fake_contents_url)
assert_object_id_error(error_response)
def test_show_dataset_collection(self):
fetch_response = self.dataset_collection_populator.create_list_in_history(self.history_id, direct_upload=True).json()
dataset_collection = self.dataset_collection_populator.wait_for_fetched_collection(fetch_response)
returned_dce = dataset_collection["elements"]
assert len(returned_dce) == 3, dataset_collection
hdca_id = dataset_collection['id']
dataset_collection_url = f"/api/dataset_collections/{hdca_id}"
dataset_collection = self._get(dataset_collection_url).json()
assert dataset_collection['id'] == hdca_id
assert dataset_collection['collection_type'] == 'list'
def test_show_dataset_collection_contents(self):
# Get contents_url from history contents, use it to show the first level
# of collection contents in the created HDCA, then use it again to drill
# down into the nested collection contents
hdca = self.dataset_collection_populator.create_list_of_list_in_history(self.history_id).json()
root_contents_url = self._get_contents_url_for_hdca(hdca)
# check root contents for this collection
root_contents = self._get(root_contents_url).json()
assert len(root_contents) == len(hdca['elements'])
self._compare_collection_contents_elements(root_contents, hdca['elements'])
# drill down, retrieve nested collection contents
assert 'object' in root_contents[0]
assert 'contents_url' in root_contents[0]['object']
drill_contents_url = root_contents[0]['object']['contents_url']
drill_contents = self._get(drill_contents_url).json()
assert len(drill_contents) == len(hdca['elements'][0]['object']['elements'])
self._compare_collection_contents_elements(drill_contents, hdca['elements'][0]['object']['elements'])
def test_collection_contents_limit_offset(self):
# check limit/offset params for collection contents endpoint
hdca, root_contents_url = self._create_collection_contents_pair()
# check limit
limited_contents = self._get(f"{root_contents_url}?limit=1").json()
assert len(limited_contents) == 1
assert limited_contents[0]['element_index'] == 0
# check offset
offset_contents = self._get(f"{root_contents_url}?offset=1").json()
assert len(offset_contents) == 1
assert offset_contents[0]['element_index'] == 1
def test_get_suitable_converters_single_datatype(self):
response = self.dataset_collection_populator.upload_collection(self.history_id, "list:paired", elements=[
{
"name": "test0",
"elements": [
{"src": "pasted", "paste_content": "123\n", "name": "forward", "ext": "bed"},
{"src": "pasted", "paste_content": "456\n", "name": "reverse", "ext": "bed"},
]
},
{
"name": "test1",
"elements": [
{"src": "pasted", "paste_content": "789\n", "name": "forward", "ext": "bed"},
{"src": "pasted", "paste_content": "0ab\n", "name": "reverse", "ext": "bed"},
]
}
])
self._assert_status_code_is(response, 200)
hdca_list_id = response.json()["outputs"][0]["id"]
converters = self._get("dataset_collections/" + hdca_list_id + "/suitable_converters")
expected = [
'CONVERTER_bed_to_fli_0',
'CONVERTER_interval_to_bed_0',
'CONVERTER_bed_gff_or_vcf_to_bigwig_0',
'CONVERTER_bed_to_gff_0',
'CONVERTER_interval_to_bgzip_0',
'tabular_to_csv',
'CONVERTER_interval_to_bed6_0',
'CONVERTER_interval_to_bedstrict_0',
'CONVERTER_interval_to_tabix_0',
'CONVERTER_interval_to_bed12_0']
actual = []
for converter in converters.json():
actual.append(converter["tool_id"])
assert sorted(actual) == sorted(expected)
def test_get_suitable_converters_different_datatypes_matches(self):
response = self.dataset_collection_populator.upload_collection(self.history_id, "list:paired", elements=[
{
"name": "test0",
"elements": [
{"src": "pasted", "paste_content": "123\n", "name": "forward", "ext": "bed"},
{"src": "pasted", "paste_content": "456\n", "name": "reverse", "ext": "bed"},
]
},
{
"name": "test1",
"elements": [
{"src": "pasted", "paste_content": "789\n", "name": "forward", "ext": "tabular"},
{"src": "pasted", "paste_content": "0ab\n", "name": "reverse", "ext": "tabular"},
]
}
])
self._assert_status_code_is(response, 200)
hdca_list_id = response.json()["outputs"][0]["id"]
converters = self._get("dataset_collections/" + hdca_list_id + "/suitable_converters")
expected = ['tabular_to_csv']
actual = []
for converter in converters.json():
actual.append(converter["tool_id"])
assert sorted(actual) == sorted(expected)
def test_get_suitable_converters_different_datatypes_no_matches(self):
response = self.dataset_collection_populator.upload_collection(self.history_id, "list:paired", elements=[
{
"name": "test0",
"elements": [
{"src": "pasted", "paste_content": "123\n", "name": "forward", "ext": "bed"},
{"src": "pasted", "paste_content": "456\n", "name": "reverse", "ext": "bed"},
]
},
{
"name": "test1",
"elements": [
{"src": "pasted", "paste_content": "789\n", "name": "forward", "ext": "fasta"},
{"src": "pasted", "paste_content": "0ab\n", "name": "reverse", "ext": "fasta"},
]
}
])
self._assert_status_code_is(response, 200)
hdca_list_id = response.json()["outputs"][0]["id"]
converters = self._get("dataset_collections/" + hdca_list_id + "/suitable_converters")
expected = []
actual = []
for converter in converters.json():
actual.append(converter["tool_id"])
assert sorted(actual) == sorted(expected)
def test_collection_tools_tag_propagation(self):
elements = [{"src": "files", "tags": ["name:element_tag"]}]
targets = [{
"destination": {"type": "hdca"},
"elements": elements,
"collection_type": "list",
"name": "Test collection",
"tags": ["name:collection_tag"]
}]
payload = {
"history_id": self.history_id,
"targets": json.dumps(targets),
"__files": {"files_0|file_data": open(self.test_data_resolver.get_filename("4.bed"))},
}
hdca_id = self.dataset_populator.fetch(payload).json()['output_collections'][0]['id']
inputs = {
"input": {"batch": False, "src": "hdca", "id": hdca_id},
}
payload = self.dataset_populator.run_tool_payload(
tool_id='__FILTER_FAILED_DATASETS__',
inputs=inputs,
history_id=self.history_id,
input_format='legacy',
)
response = self._post("tools", payload).json()
self.dataset_populator.wait_for_history(self.history_id, assert_ok=False)
output_collection = response["output_collections"][0]
# collection should not inherit tags from input collection elements, only parent collection
assert output_collection['tags'] == ["name:collection_tag"]
element = output_collection['elements'][0]
# new element hda should have tags copied from old hda
assert element['object']['tags'] == ['name:element_tag']
def _compare_collection_contents_elements(self, contents_elements, hdca_elements):
# compare collection api results to existing hdca element contents
fields = ['element_identifier', 'element_index', 'element_type', 'id', 'model_class']
for (content_element, hdca_element) in zip(contents_elements, hdca_elements):
for f in fields:
assert content_element[f] == hdca_element[f]
def _create_collection_contents_pair(self):
# Create a simple collection, return hdca and contents_url
payload = self.dataset_collection_populator.create_pair_payload(self.history_id, instance_type="history")
create_response = self._post("dataset_collections", payload)
hdca = self._check_create_response(create_response)
root_contents_url = self._get_contents_url_for_hdca(hdca)
return hdca, root_contents_url
def _get_contents_url_for_hdca(self, hdca):
# look up the history contents using optional serialization key
history_contents_url = f"histories/{self.history_id}/contents?v=dev&view=summary&keys=contents_url"
json = self._get(history_contents_url).json()
# filter out the collection we just made id = hdca.id
# make sure the contents_url appears
def find_hdca(c):
return c['history_content_type'] == 'dataset_collection' and c['id'] == hdca['id']
matches = list(filter(find_hdca, json))
assert len(matches) == 1
assert 'contents_url' in matches[0]
return matches[0]['contents_url']
| 49.937255 | 151 | 0.657021 |
ace5ce5f6cb16095ddd160701934e7c582e303de | 7,198 | py | Python | plotbee/utils.py | jachansantiago/plotbee | fb8d984015640707e3641b0fe4434dbadd22ed1f | [
"MIT"
] | null | null | null | plotbee/utils.py | jachansantiago/plotbee | fb8d984015640707e3641b0fe4434dbadd22ed1f | [
"MIT"
] | null | null | null | plotbee/utils.py | jachansantiago/plotbee | fb8d984015640707e3641b0fe4434dbadd22ed1f | [
"MIT"
] | null | null | null | import json
import cv2
import numpy as np
import random
import os
from skimage import io
from tqdm import tqdm
import math
from collections import defaultdict
import matplotlib.pyplot as plt
import bisect
from skimage import io
import pandas as pd
from concurrent import futures
YELLOW = (255, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
MAGENTA = (255, 0, 255)
GREEN = (0, 255, 0)
COLOR_BY_CONNECTION = {
(1, 3) : BLUE,
(3, 2) : RED,
(2, 4) : YELLOW,
(2, 5) : YELLOW,
(1, 2) : MAGENTA
}
COLOR_BY_PART = {
'0' : BLUE, #TAIL
'1' : RED, #HEAD
'2' : MAGENTA, #ABDOMEN
'3' : YELLOW, #ANTENA
'4' : YELLOW #ANTENA
}
def read_json(path):
with open(path, 'r') as fp:
data = json.load(fp)
return data
def save_json(path, data):
with open(path, 'w') as outfile:
json.dump(data, outfile, indent=4)
def hash1(i):
return ((88*i+78) % 2029) % 256
def hash2(i):
return ((90*i+9) % 2683) % 256
def hash3(i):
return ((99*i+100) % 2719) % 256
def id2color(i):
if i == -1:
return (0,0,0)
#return hash1(i), hash2(i), hash3(i)
return hash1(i)//2+128, hash2(i)//2+128, hash3(i)//2+128
def angleBetweenPoints(p1, p2):
myradians = math.atan2(p1[0]-p2[0],p1[1]-p2[1])
mydegrees = math.degrees(myradians)
return (mydegrees)%360
def rotate_around_point(xy, radians, origin=(0, 0)):
"""Rotate a point around a given point.
I call this the "high performance" version since we're caching some
values that are needed >1 time. It's less readable than the previous
function but it's faster.
"""
x, y = xy
offset_x, offset_y = origin
adjusted_x = (x - offset_x)
adjusted_y = (y - offset_y)
cos_rad = math.cos(radians)
sin_rad = math.sin(radians)
qx = offset_x + cos_rad * adjusted_x + sin_rad * adjusted_y
qy = offset_y + -sin_rad * adjusted_x + cos_rad * adjusted_y
return int(qx), int(qy)
def bound_box_points(p):
W = 100
HU = 200
HL = 200
x, y, *_ = p
*_, angle = p
if angle == -1:
angle = 0
angle = math.radians(angle - 90)
x, y = int(x), int(y)
p1 = x - W, y - HU
p2 = x + W, y - HU
p3 = x + W, y + HL
p4 = x - W, y + HL
p1 = rotate_around_point(p1, angle, (x,y))
p2 = rotate_around_point(p2, angle, (x,y))
p3 = rotate_around_point(p3, angle, (x,y))
p4 = rotate_around_point(p4, angle, (x,y))
return p1, p2, p3, p4
def rotatedBoundBoxPoints(p, angle, width, height):
W = width//2
H = height//2
x, y = p
if angle == -1:
angle = 0
angle = math.radians(angle)
x, y = int(x), int(y)
p1 = x - W, y - H
p2 = x + W, y - H
p3 = x + W, y + H
p4 = x - W, y + H
p1 = rotate_around_point(p1, angle, (x,y))
p2 = rotate_around_point(p2, angle, (x,y))
p3 = rotate_around_point(p3, angle, (x,y))
p4 = rotate_around_point(p4, angle, (x,y))
return p1, p2, p3, p4
def pointInRotatedBbox(p, center, angle, width, height):
angle = math.radians(angle)
rotated_p = rotate_around_point(p, -angle, center)
W = width//2
H = height//2
x, y = center
rx, ry = rotated_p
if rx >= x - W and rx <= x + W and ry >= y - H and ry <= y + H:
return True
return False
def plot_bounding_box(frame, p, color):
*_, angle = p
if angle == -1:
#color = (0,0,0)
return frame
p1, p2, p3, p4 = bound_box_points(p)
frame = cv2.line(frame, p1, p2, color=color, thickness=7)
frame = cv2.line(frame, p2, p3, color=color, thickness=7)
frame = cv2.line(frame, p3, p4, color=color, thickness=7)
frame = cv2.line(frame, p4, p1, color=color, thickness=7)
return frame
def getRotationMatrix(image_size,x,y,angle, w,h, cX=None, cY=None, scale=1.0):
# grab the dimensions of the image and then determine the
# center
(h0, w0) = image_size
(pX, pY) = (x, y) # Rect center in input
if cX is None:
cX = w / 2 # Rect center in output
if cY is None:
cY = h / 2 # Rect center in output
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), angle, scale) # angle in degrees
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# adjust the rotation matrix to take into account translation
M[0, 2] += pX - cX
M[1, 2] += pY - cY
return M
def rotate_bound2(image,x,y,angle, w,h, cX=None, cY=None, scale=1.0):
image_size = image.shape[:2]
M = getRotationMatrix(image_size,x,y,angle, w,h, cX, cY, scale)
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (w, h), flags=cv2.WARP_INVERSE_MAP,borderMode=cv2.BORDER_REPLICATE)
def get_fname(path):
return path.split('/')[-1]
def find_connections(body, detsByLimbs):
updated = True
while updated:
updated = False
for part, points in body.get_parts():
paths = get_connections_with(part)
for limb, target in paths:
for p in points:
if p in detsByLimbs[limb][part]:
indices = [i for i, x in enumerate(detsByLimbs[limb][part]) if x == p]
for indx in indices:
target_point = detsByLimbs[limb][target][indx]
if (target, target_point) not in body:
body.update(target, target_point)
updated = True
else:
continue
def trackevent2color(track):
if track.pollen and track.event == 'entering':
return YELLOW
elif track.event == 'entering':
return GREEN
elif track.event == 'leaving':
return RED
else:
return None
def rescale_image(image, rescale_factor=4):
image_height, image_width, _ = image.shape
dim = (image_width//rescale_factor, image_height//rescale_factor)
image = cv2.resize(image, dim)
return image
# def divide_video(video, fname, N):
# frames = len(video)
# batch = frames//N
# fpath, ext = os.path.splitext(fname)
# filenames = list()
# for i in range(N):
# start = i * batch
# end = (i + 1) * batch
# if end > frames:
# end = frames
# v = video[start:end]
# path = fpath + "_" + str(i) + ext
# v.save(path)
# filenames.append(path)
# return filenames
# def merge_videos(video_names):
# v = Video.load(video_names[0])
# folder, file = os.path.split(video_names[0])
# pfname, ext = os.path.splitext(file)
# pfname = "_".join(pfname.split("_")[:-1]) + ext
# for pname in pollen_names[1:]:
# vi = Video.load(pname)
# v.append(vi)
# out_filename = os.path.join(folder, pfname)
# v.save(out_filename)
# return out_filename
| 24.993056 | 103 | 0.565435 |
ace5cf1cbabf6c538aa61c7e42edf3d4e3024b45 | 5,891 | py | Python | my_answers.py | AbdullahMu/Predicting-Bike-Sharing-Patterns | 06baae8302ce645639244627d5421bd900a700e4 | [
"MIT"
] | null | null | null | my_answers.py | AbdullahMu/Predicting-Bike-Sharing-Patterns | 06baae8302ce645639244627d5421bd900a700e4 | [
"MIT"
] | null | null | null | my_answers.py | AbdullahMu/Predicting-Bike-Sharing-Patterns | 06baae8302ce645639244627d5421bd900a700e4 | [
"MIT"
] | null | null | null | import numpy as np
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
# Sigmoid activation
self.activation_function = lambda x : 1 / (1 + np.exp(-x))
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
final_outputs, hidden_outputs = self.forward_pass_train(X) # Implement the forward pass function below
# Implement the backproagation function below
delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y,
delta_weights_i_h, delta_weights_h_o)
self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)
def forward_pass_train(self, X):
''' Implement forward pass here
Arguments
---------
X: features batch
'''
#### Implement the forward pass here ####
### Forward pass ###
hidden_inputs = np.dot(X, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer (no activation because it is a regression problem)
return final_outputs, hidden_outputs
def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):
''' Implement backpropagation
Arguments
---------
final_outputs: output from forward pass
y: target (i.e. label) batch
delta_weights_i_h: change in weights from input to hidden layers
delta_weights_h_o: change in weights from hidden to output layers
'''
#### Implement the backward pass here ####
### Backward pass ###
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
output_error_term = error #* final_outputs * (1 - final_outputs), no activation because it is a regression problem. f(h) = h ==> f'(h) = 1
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output, output_error_term)
# TODO: Calculate the hidden layer's contribution to the error
hidden_error_term = hidden_outputs * (1 - hidden_outputs) * hidden_error
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
return delta_weights_i_h, delta_weights_h_o
def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):
''' Update weights on gradient descent step
Arguments
---------
delta_weights_i_h: change in weights from input to hidden layers
delta_weights_h_o: change in weights from hidden to output layers
n_records: number of records
'''
self.weights_hidden_to_output += delta_weights_h_o * self.lr / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += delta_weights_i_h * self.lr / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer (no activation because it is a regression problem)
return final_outputs
#########################################################
# Set your hyperparameters here
##########################################################
iterations = 10000
learning_rate = 0.5
hidden_nodes = 20
output_nodes = 1
| 44.969466 | 146 | 0.623324 |
ace5d04b23ec4b44fe6f1bd5b16f4559a506a1c4 | 1,082 | py | Python | eNMS/models/services/configuration/napalm_rollback.py | afourmy/e-napalm | 622c531bf2251917b17461a895dc26f891588ddf | [
"MIT"
] | 25 | 2017-11-21T09:05:55.000Z | 2017-12-13T09:43:42.000Z | eNMS/models/services/configuration/napalm_rollback.py | afourmy/e-napalm | 622c531bf2251917b17461a895dc26f891588ddf | [
"MIT"
] | 11 | 2017-11-18T21:49:42.000Z | 2017-12-08T09:49:13.000Z | eNMS/models/services/configuration/napalm_rollback.py | afourmy/e-napalm | 622c531bf2251917b17461a895dc26f891588ddf | [
"MIT"
] | 2 | 2017-11-30T20:46:32.000Z | 2017-12-07T23:33:07.000Z | from sqlalchemy import Boolean, ForeignKey, Integer
from eNMS.database import db
from eNMS.forms import NapalmForm
from eNMS.fields import HiddenField
from eNMS.models.automation import ConnectionService
class NapalmRollbackService(ConnectionService):
__tablename__ = "napalm_rollback_service"
pretty_name = "NAPALM Rollback"
parent_type = "connection_service"
id = db.Column(Integer, ForeignKey("connection_service.id"), primary_key=True)
driver = db.Column(db.SmallString)
use_device_driver = db.Column(Boolean, default=True)
timeout = db.Column(Integer, default=60)
optional_args = db.Column(db.Dict)
__mapper_args__ = {"polymorphic_identity": "napalm_rollback_service"}
def job(self, run, device):
napalm_connection = run.napalm_connection(device)
run.log("info", "Configuration Rollback with NAPALM", device)
napalm_connection.rollback()
return {"success": True, "result": "Rollback successful"}
class NapalmRollbackForm(NapalmForm):
form_type = HiddenField(default="napalm_rollback_service")
| 34.903226 | 82 | 0.752311 |
ace5d160d6d7e2013c40e22015f569cce03017aa | 5,066 | py | Python | src/crl/interactivesessions/shells/terminalclient.py | SampoPaukkonen/crl-interactivesessions | a4500552ef25b4764d2d2855da269e391752d6a3 | [
"BSD-3-Clause"
] | null | null | null | src/crl/interactivesessions/shells/terminalclient.py | SampoPaukkonen/crl-interactivesessions | a4500552ef25b4764d2d2855da269e391752d6a3 | [
"BSD-3-Clause"
] | null | null | null | src/crl/interactivesessions/shells/terminalclient.py | SampoPaukkonen/crl-interactivesessions | a4500552ef25b4764d2d2855da269e391752d6a3 | [
"BSD-3-Clause"
] | null | null | null | import logging
import itertools
from contextlib import contextmanager
import pexpect
from monotonic import monotonic
from crl.interactivesessions.shells.shell import TimeoutError
from .remotemodules.msgmanager import MsgManagerBase
from .remotemodules.chunkcomm import (
ChunkReaderBase,
ChunkWriterBase)
from .remotemodules.msgs import Ack
__copyright__ = 'Copyright (C) 2019, Nokia'
LOGGER = logging.getLogger(__name__)
class TerminalClientError(Exception):
pass
class TerminalClientFatalError(TerminalClientError):
pass
class TimerTimeout(Exception):
pass
class Timer(object):
def __init__(self, timeout):
self._timeout = timeout
self._start_time = monotonic()
def remaining_timeouts(self):
while True:
r = self._remaining()
if r <= 0:
raise TimerTimeout()
yield r
def _remaining(self):
return self._timeout - self._elapsed()
def _elapsed(self):
return monotonic() - self._start_time
class TerminalClient(MsgManagerBase):
def __init__(self):
super(TerminalClient, self).__init__()
self._wrap_timeout_exception = None
self._terminalcomm = None
self._uid_iter = itertools.count()
def set_wrap_timeout_exception(self, wrap_timeout_exception):
self._wrap_timeout_exception = wrap_timeout_exception
def set_terminal(self, terminal):
self._terminalcomm = TerminalComm(terminal)
self.set_comm_factory(lambda: self._terminalcomm)
def send_and_receive(self, msg, timeout):
msg.set_uid(next(self._uid_iter))
for received_msg in self._received_msgs_for_msg(msg):
if isinstance(received_msg, Ack):
return self._try_to_receive_until_reply(msg, timeout)
return received_msg
return self._final_try_to_receive_until_reply(msg, timeout)
def _received_msgs_for_msg(self, msg):
for t in self._retry.timeouts():
self.send(msg)
try:
yield self._receive_ack_or_reply(msg, t)
except (TerminalClientError, TimerTimeout) as e:
LOGGER.debug('No reply yet for message uid %s: %s', msg.uid, e)
def _try_to_receive_until_reply(self, msg, timeout):
with self._raise_in_timertimeout(TerminalClientError('Timeout')):
return self._receive_until_reply(msg, timeout)
def _final_try_to_receive_until_reply(self, msg, timeout):
with self._raise_in_timertimeout(TerminalClientFatalError('Connection broken')):
return self._receive_until_reply(msg, timeout)
@staticmethod
@contextmanager
def _raise_in_timertimeout(exception):
try:
yield
except TimerTimeout:
raise exception
def _receive_until_reply(self, msg, timeout):
return self._receive_until_condition(timeout,
lambda r: self._is_reply(msg, r))
def _receive_ack_or_reply(self, msg, timeout):
return self._receive_until_condition(timeout,
lambda r: self._is_reply_or_ack(msg, r))
def _receive_until_condition(self, timeout, condition):
timer = Timer(timeout)
for remaining_timeout in timer.remaining_timeouts():
try:
r = self.receive(remaining_timeout)
except TerminalClientError:
continue
self._send_ack_if_needed(r)
if condition(r):
return r
def _is_reply(self, msg, reply):
return (not isinstance(msg, Ack)) and self._is_reply_or_ack(msg, reply)
@staticmethod
def _is_reply_or_ack(msg, reply):
return msg.uid == reply.uid
def _send_ack_if_needed(self, msg):
if not isinstance(msg, Ack):
self._send_ack(msg)
def _send_ack(self, msg):
self.send(Ack.create_reply(msg))
def send(self, msg):
self._strcomm.write_str(self.serialize(msg))
def receive(self, timeout):
self._terminalcomm.set_timeout(timeout)
with self._client_exception_wrap():
return self.deserialize(self._strcomm.read_str())
@contextmanager
def _client_exception_wrap(self):
with self._wrap_timeout_exception():
try:
yield None
except (pexpect.TIMEOUT, TimeoutError) as e:
raise TerminalClientError(e)
except Exception as e:
LOGGER.debug('Raised exception: %s: %s', e.__class__.__name__, e)
raise
class TerminalComm(ChunkReaderBase, ChunkWriterBase):
def __init__(self, terminal):
ChunkReaderBase.__init__(self)
self._terminal = terminal
self._timeout = -1
def set_timeout(self, timeout):
self._timeout = timeout
def _read(self, n):
return self._terminal.read_nonblocking(n, timeout=self._timeout)
def _flush(self):
pass
def _write(self, s):
self._terminal.send(s)
| 29.625731 | 88 | 0.651204 |
ace5d1cbf36962787b8ce82eca6b2c3e99ea2ec8 | 32,106 | py | Python | third_party/xiuminglib/xiuminglib/blender/object.py | leehsiu/nerfactor | 87f7d3ffa56bdbca925958a4b89e249d35006c80 | [
"Apache-2.0"
] | 183 | 2021-06-04T01:22:57.000Z | 2022-03-31T06:18:20.000Z | third_party/xiuminglib/xiuminglib/blender/object.py | leehsiu/nerfactor | 87f7d3ffa56bdbca925958a4b89e249d35006c80 | [
"Apache-2.0"
] | 40 | 2019-05-05T17:04:10.000Z | 2021-09-06T18:11:19.000Z | third_party/xiuminglib/xiuminglib/blender/object.py | leehsiu/nerfactor | 87f7d3ffa56bdbca925958a4b89e249d35006c80 | [
"Apache-2.0"
] | 26 | 2021-06-04T18:28:11.000Z | 2022-03-22T13:44:19.000Z | import re
from os.path import basename, dirname
import numpy as np
from ..imprt import preset_import
from .. import log, os as xm_os
logger = log.get_logger()
def get_object(otype, any_ok=False):
"""Gets the handle of the only (or any) object of the given type.
Args:
otype (str): Object type: ``'MESH'``, ``'CAMERA'``, ``'LAMP'`` or any
string ``a_bpy_obj.type`` may return.
any_ok (bool, optional): Whether it's ok to grab any object when there
exist multiple ones matching the given type. If ``False``, there
must be exactly one object of the given type.
Returns:
bpy_types.Object.
"""
bpy = preset_import('bpy', assert_success=True)
objs = [x for x in bpy.data.objects if x.type == otype]
n_objs = len(objs)
if n_objs == 0:
raise RuntimeError("There's no object matching the given type")
if n_objs == 1:
return objs[0]
# More than one objects
if any_ok:
return objs[0]
raise RuntimeError((
"When `any_ok` is `False`, there must be exactly "
"one object matching the given type"))
def remove_objects(name_pattern, regex=False):
"""Removes object(s) from current scene.
Args:
name_pattern (str): Name or name pattern of object(s) to remove.
regex (bool, optional): Whether to interpret ``name_pattern`` as a
regex.
"""
bpy = preset_import('bpy', assert_success=True)
objs = bpy.data.objects
removed = []
if regex:
assert (name_pattern != '*'), \
"Want to match everything? Correct regex for that is '.*'"
name_pattern = re.compile(name_pattern)
for obj in objs:
if name_pattern.match(obj.name):
obj.select_set(True)
removed.append(obj.name)
else:
obj.select_set(False)
else:
for obj in objs:
if obj.name == name_pattern:
obj.select_set(True)
removed.append(obj.name)
else:
obj.select_set(False)
# Delete
bpy.ops.object.delete()
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
logger.info("Removed from scene: %s", removed)
def import_object(
model_path, axis_forward='-Z', axis_up='Y',
rot_mat=((1, 0, 0), (0, 1, 0), (0, 0, 1)),
trans_vec=(0, 0, 0), scale=1, merge=False, name=None):
"""Imports external object to current scene, the low-level way.
Args:
model_path (str): Path to object to add.
axis_forward (str, optional): Which direction is forward.
axis_up (str, optional): Which direction is upward.
rot_mat (array_like, optional): 3-by-3 rotation matrix *preceding*
translation.
trans_vec (array_like, optional): 3D translation vector *following*
rotation.
scale (float, optional): Scale of the object.
merge (bool, optional): Whether to merge objects into one.
name (str, optional): Object name after import.
Returns:
bpy_types.Object or list(bpy_types.Object): Imported object(s).
"""
bpy = preset_import('bpy', assert_success=True)
Matrix = preset_import('Matrix', assert_success=True)
# Deselect all
for o in bpy.data.objects:
o.select_set(False)
# Import
if model_path.endswith('.obj'):
bpy.ops.import_scene.obj(
filepath=model_path, axis_forward=axis_forward, axis_up=axis_up)
elif model_path.endswith('.ply'):
bpy.ops.import_mesh.ply(filepath=model_path)
logger.warning("axis_forward and axis_up ignored for .ply")
else:
raise NotImplementedError(".%s" % model_path.split('.')[-1])
# Merge, if asked to
if merge and len(bpy.context.selected_objects) > 1:
objs_to_merge = bpy.context.selected_objects
context = bpy.context.copy()
context['active_object'] = objs_to_merge[0]
context['selected_objects'] = objs_to_merge
context['selected_editable_bases'] = \
[bpy.context.scene.object_bases[o.name] for o in objs_to_merge]
bpy.ops.object.join(context)
objs_to_merge[0].name = 'merged' # change object name
# objs_to_merge[0].data.name = 'merged' # change mesh name
obj_list = []
for i, obj in enumerate(bpy.context.selected_objects):
# Rename
if name is not None:
if len(bpy.context.selected_objects) == 1:
obj.name = name
else:
obj.name = name + '_' + str(i)
# Compute world matrix
trans_4x4 = Matrix.Translation(trans_vec)
rot_4x4 = Matrix(rot_mat).to_4x4()
scale_4x4 = Matrix(np.eye(4)) # don't scale here
obj.matrix_world = trans_4x4 * rot_4x4 * scale_4x4
# Scale
obj.scale = (scale, scale, scale)
obj_list.append(obj)
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
logger.info("Imported: %s", model_path)
if len(obj_list) == 1:
return obj_list[0]
return obj_list
def export_object(obj_names, model_path, axis_forward=None, axis_up=None):
"""Exports Blender object(s) to a file.
Args:
obj_names (str or list(str)): Object name(s) to export. Must be a
single string if output format is .ply.
model_path (str): Output .obj or .ply path.
axis_forward (str, optional): Which direction is forward. For .obj,
the default is ``'-Z'``, and ``'Y'`` for .ply.
axis_up (str, optional): Which direction is upward. For .obj, the
default is ``'Y'``, and ``'Z'`` for .ply.
Writes
- Exported model file, possibly accompanied by a material file.
"""
bpy = preset_import('bpy', assert_success=True)
out_dir = dirname(model_path)
xm_os.makedirs(out_dir)
if isinstance(obj_names, str):
obj_names = [obj_names]
exported = []
for o in [x for x in bpy.data.objects if x.type == 'MESH']:
o.select_set(o.name in obj_names)
if o.select:
exported.append(o.name)
if model_path.endswith('.ply'):
assert len(obj_names) == 1, \
".ply holds a single object; use .obj for multiple objects"
if axis_forward is None:
axis_forward = 'Y'
if axis_up is None:
axis_up = 'Z'
bpy.ops.export_mesh.ply(
filepath=model_path, axis_forward=axis_forward, axis_up=axis_up)
elif model_path.endswith('.obj'):
if axis_forward is None:
axis_forward = '-Z'
if axis_up is None:
axis_up = 'Y'
bpy.ops.export_scene.obj(
filepath=model_path, use_selection=True,
axis_forward=axis_forward, axis_up=axis_up)
else:
raise NotImplementedError(".%s" % model_path.split('.')[-1])
logger.info("%s Exported to %s", exported, model_path)
def add_cylinder_between(pt1, pt2, r=1e-3, name=None):
"""Adds a cylinder specified by two end points and radius.
Super useful for visualizing rays in ray tracing while debugging.
Args:
pt1 (array_like): World coordinates of point 1.
pt2 (array_like): World coordinates of point 2.
r (float, optional): Cylinder radius.
name (str, optional): Cylinder name.
Returns:
bpy_types.Object: Cylinder added.
"""
bpy = preset_import('bpy', assert_success=True)
pt1 = np.array(pt1)
pt2 = np.array(pt2)
d = pt2 - pt1
# Add cylinder at the correct location
dist = np.linalg.norm(d)
loc = (pt1[0] + d[0] / 2, pt1[1] + d[1] / 2, pt1[2] + d[2] / 2)
bpy.ops.mesh.primitive_cylinder_add(radius=r, depth=dist, location=loc)
cylinder_obj = bpy.context.object
if name is not None:
cylinder_obj.name = name
# Further rotate it accordingly
phi = np.arctan2(d[1], d[0])
theta = np.arccos(d[2] / dist)
cylinder_obj.rotation_euler[1] = theta
cylinder_obj.rotation_euler[2] = phi
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
return cylinder_obj
def add_rectangular_plane(
center_loc=(0, 0, 0), point_to=(0, 0, 1), size=(2, 2), name=None):
"""Adds a rectangular plane specified by its center location, dimensions,
and where its +z points to.
Args:
center_loc (array_like, optional): Plane center location in world
coordinates.
point_to (array_like, optional): Point in world coordinates to which
plane's +z points.
size (array_like, optional): Sizes in x and y directions (0 in z).
name (str, optional): Plane name.
Returns:
bpy_types.Object: Plane added.
"""
bpy = preset_import('bpy', assert_success=True)
Vector = preset_import('Vector', assert_success=True)
center_loc = np.array(center_loc)
point_to = np.array(point_to)
size = np.append(np.array(size), 0)
bpy.ops.mesh.primitive_plane_add(location=center_loc)
plane_obj = bpy.context.object
if name is not None:
plane_obj.name = name
plane_obj.dimensions = size
# Point it to target
direction = Vector(point_to) - plane_obj.location
# Find quaternion that rotates plane's 'Z' so that it aligns with
# `direction`. This rotation is not unique because the rotated plane can
# still rotate about direction vector. Specifying 'Y' gives the rotation
# quaternion with plane's 'Y' pointing up
rot_quat = direction.to_track_quat('Z', 'Y')
plane_obj.rotation_euler = rot_quat.to_euler()
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
return plane_obj
def create_mesh(verts, faces, name='new-mesh'):
"""Creates a mesh from vertices and faces.
Args:
verts (array_like): Local coordinates of the vertices, of shape
N-by-3.
faces (list(tuple)): Faces specified by ordered vertex indices.
name (str, optional): Mesh name.
Returns:
bpy_types.Mesh: Mesh data created.
"""
bpy = preset_import('bpy', assert_success=True)
verts = np.array(verts)
# Create mesh
mesh_data = bpy.data.meshes.new(name)
mesh_data.from_pydata(verts, [], faces)
mesh_data.update()
logger.info("Mesh '%s' created", name)
return mesh_data
def create_object_from_mesh(mesh_data, obj_name='new-obj',
location=(0, 0, 0), rotation_euler=(0, 0, 0),
scale=(1, 1, 1)):
"""Creates object from mesh data.
Args:
mesh_data (bpy_types.Mesh): Mesh data.
obj_name (str, optional): Object name.
location (tuple, optional): Object location in world coordinates.
rotation_euler (tuple, optional): Object rotation in radians.
scale (tuple, optional): Object scale.
Returns:
bpy_types.Object: Object created.
"""
bpy = preset_import('bpy', assert_success=True)
# Create
obj = bpy.data.objects.new(obj_name, mesh_data)
# Link to current scene
scene = bpy.context.scene
scene.objects.link(obj)
obj.select_set(True)
scene.objects.active = obj # make the selection effective
# Set attributes
obj.location = location
obj.rotation_euler = rotation_euler
obj.scale = scale
logger.info("Object '%s' created from mesh data and selected", obj_name)
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
return obj
def _clear_nodetree_for_active_material(obj):
"""Internal helper function clears the node tree of active material.
So that desired node tree can be cleanly set up. If no active material, one
will be created.
"""
bpy = preset_import('bpy', assert_success=True)
# Create material if none
if obj.active_material is None:
mat = bpy.data.materials.new(name='new-mat-for-%s' % obj.name)
if obj.data.materials:
# Assign to first material slot
obj.data.materials[0] = mat
else:
# No slots
obj.data.materials.append(mat)
active_mat = obj.active_material
active_mat.use_nodes = True
node_tree = active_mat.node_tree
nodes = node_tree.nodes
# Remove all nodes
for node in nodes:
nodes.remove(node)
return node_tree
def color_vertices(obj, vert_ind, colors):
r"""Colors each vertex of interest with the given color.
Colors are defined for vertex loops, in fact. This function uses the same
color for all loops of a vertex. Useful for making a 3D heatmap.
Args:
obj (bpy_types.Object): Object.
vert_ind (int or list(int)): Index/indices of vertex/vertices to
color.
colors (tuple or list(tuple)): RGB value(s) to paint on
vertex/vertices. Values :math:`\in [0, 1]`. If one tuple,
this color will be applied to all vertices. If list of tuples,
must be of the same length as ``vert_ind``.
"""
bpy = preset_import('bpy', assert_success=True)
# Validate inputs
if isinstance(vert_ind, int):
vert_ind = [vert_ind]
else:
vert_ind = list(vert_ind)
if isinstance(colors, tuple):
colors = [colors] * len(vert_ind)
assert (len(colors) == len(vert_ind)), \
("`colors` and `vert_ind` must be of the same length, "
"or `colors` is a single tuple")
for i, c in enumerate(colors):
c = tuple(c)
if len(c) == 3:
colors[i] = c + (1,)
elif len(c) == 4: # In case some Blender version needs 4-tuples
colors[i] = c
else:
raise ValueError("Wrong color length: %d" % len(c))
if any(x > 1 for c in colors for x in c):
logger.warning("Did you forget to normalize color values to [0, 1]?")
scene = bpy.context.scene
scene.objects.active = obj
obj.select_set(True)
bpy.ops.object.mode_set(mode='OBJECT')
mesh = obj.data
if mesh.vertex_colors:
vcol_layer = mesh.vertex_colors.active
else:
vcol_layer = mesh.vertex_colors.new()
# A vertex and one of its edges combined are called a loop, which has a
# color. So if a vertex has four outgoing edges, it has four colors for
# the four loops
for poly in mesh.polygons:
for loop_idx in poly.loop_indices:
loop_vert_idx = mesh.loops[loop_idx].vertex_index
try:
color_idx = vert_ind.index(loop_vert_idx)
except ValueError:
color_idx = None
if color_idx is not None:
try:
vcol_layer.data[loop_idx].color = colors[color_idx]
except ValueError:
# This Blender version requires 3-tuples
vcol_layer.data[loop_idx].color = colors[color_idx][:3]
# Set up nodes for vertex colors
node_tree = _clear_nodetree_for_active_material(obj)
nodes = node_tree.nodes
attr_node = nodes.new('ShaderNodeAttribute')
diffuse_node = nodes.new('ShaderNodeBsdfDiffuse')
output_node = nodes.new('ShaderNodeOutputMaterial')
nodes['Attribute'].attribute_name = vcol_layer.name
node_tree.links.new(attr_node.outputs[0], diffuse_node.inputs[0])
node_tree.links.new(diffuse_node.outputs[0], output_node.inputs[0])
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
logger.info("Vertex color(s) added to '%s'", obj.name)
logger.warning("..., so node tree of '%s' has changed", obj.name)
def _assert_cycles(scene):
engine = scene.render.engine
if engine != 'CYCLES':
raise NotImplementedError(engine)
def _make_texture_node(obj, texture_str):
bpy = preset_import('bpy', assert_success=True)
mat = obj.active_material
node_tree = mat.node_tree
nodes = node_tree.nodes
texture_node = nodes.new('ShaderNodeTexImage')
if texture_str == 'bundled':
texture = mat.active_texture
assert texture is not None, "No bundled texture found"
img = texture.image
else:
# Path given -- external texture map
bpy.data.images.load(texture_str, check_existing=True)
img = bpy.data.images[basename(texture_str)]
# Careless texture mapping
texture_node.projection = 'FLAT'
texcoord_node = nodes.new('ShaderNodeTexCoord')
if obj.data.uv_layers.active is None:
texcoord = texcoord_node.outputs['Generated']
else:
texcoord = texcoord_node.outputs['UV']
node_tree.links.new(texcoord, texture_node.inputs['Vector'])
texture_node.image = img
return texture_node
def setup_simple_nodetree(obj, texture, shader_type, roughness=0):
r"""Sets up a simple (diffuse and/or glossy) node tree.
Texture can be an bundled texture map, a path to an external texture map,
or simply a pure color. If a path to an external image, and UV
coordinates are given (e.g., in the geometry .obj file), then they will be
used. If they are not given, texture mapping will be done carelessly,
with automatically generated UV coordinates. See private function
:func:`_make_texture_node` for how this is done.
Args:
obj (bpy_types.Object): Object, optionally bundled with texture map.
texture (str or tuple): If string, must be ``'bundled'`` or path to
the texture image. If tuple, must be of 4 floats
:math:`\in [0, 1]` as RGBA values.
shader_type (str): Either ``'diffuse'`` or ``'glossy'``.
roughness (float, optional): If diffuse, the roughness in Oren-Nayar,
0 gives Lambertian. If glossy, 0 means perfectly reflective.
"""
bpy = preset_import('bpy', assert_success=True)
scene = bpy.context.scene
_assert_cycles(scene)
node_tree = _clear_nodetree_for_active_material(obj)
nodes = node_tree.nodes
if shader_type == 'glossy':
shader_node = nodes.new('ShaderNodeBsdfGlossy')
elif shader_type == 'diffuse':
shader_node = nodes.new('ShaderNodeBsdfDiffuse')
else:
raise ValueError(shader_type)
if isinstance(texture, str):
texture_node = _make_texture_node(obj, texture)
node_tree.links.new(
texture_node.outputs['Color'], shader_node.inputs['Color'])
elif isinstance(texture, tuple):
shader_node.inputs['Color'].default_value = texture
else:
raise TypeError(texture)
output_node = nodes.new('ShaderNodeOutputMaterial')
node_tree.links.new(
shader_node.outputs['BSDF'], output_node.inputs['Surface'])
# Roughness
shader_node.inputs['Roughness'].default_value = roughness
logger.info(
"%s node tree set up for '%s'", shader_type.capitalize(), obj.name)
def setup_emission_nodetree(obj, texture=(1, 1, 1, 1), strength=1, hide=False):
r"""Sets up an emission node tree for the object.
Args:
obj (bpy_types.Object): Object (maybe bundled with texture map).
texture (str or tuple, optional): If string, must be ``'bundled'`` or
path to the texture image. If tuple, must be of 4 floats
:math:`\in [0, 1]` as RGBA values.
strength (float, optional): Emission strength.
hide (bool, optional): Useful for hiding the emissive object (but
keeping the light of course).
"""
bpy = preset_import('bpy', assert_success=True)
scene = bpy.context.scene
_assert_cycles(scene)
node_tree = _clear_nodetree_for_active_material(obj)
nodes = node_tree.nodes
# Emission node
nodes.new('ShaderNodeEmission')
if isinstance(texture, str):
texture_node = _make_texture_node(obj, texture)
node_tree.links.new(
texture_node.outputs['Color'], nodes['Emission'].inputs['Color'])
elif isinstance(texture, tuple):
nodes['Emission'].inputs['Color'].default_value = texture
else:
raise TypeError(texture)
nodes['Emission'].inputs['Strength'].default_value = strength
# Output node
nodes.new('ShaderNodeOutputMaterial')
node_tree.links.new(
nodes['Emission'].outputs['Emission'],
nodes['Material Output'].inputs['Surface'])
# hide_render hides the object and the light, but this keeps the light
obj.cycles_visibility.camera = not hide
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
logger.info("Emission node tree set up for '%s'", obj.name)
def setup_holdout_nodetree(obj):
"""Sets up a holdout node tree for the object.
Args:
obj (bpy_types.Object): Object bundled with texture map.
"""
bpy = preset_import('bpy', assert_success=True)
scene = bpy.context.scene
_assert_cycles(scene)
node_tree = _clear_nodetree_for_active_material(obj)
nodes = node_tree.nodes
nodes.new('ShaderNodeHoldout')
nodes.new('ShaderNodeOutputMaterial')
node_tree.links.new(
nodes['Holdout'].outputs[0], nodes['Material Output'].inputs[0])
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
logger.info("Holdout node tree set up for '%s'", obj.name)
def setup_retroreflective_nodetree(
obj, texture, roughness=0, glossy_weight=0.1):
r"""Sets up a retroreflective texture node tree.
Bundled texture can be an external texture map (carelessly mapped) or a
pure color. Mathematically, the BRDF model is a mixture of a diffuse BRDF
and a glossy BRDF using incoming light directions as normals.
Args:
obj (bpy_types.Object): Object, optionally bundled with texture map.
texture (str or tuple): If string, must be ``'bundled'`` or path to
the texture image. If tuple, must be of 4 floats :math:`\in [0, 1]`
as RGBA values.
roughness (float, optional): Roughness for both the glossy and diffuse
shaders.
glossy_weight (float, optional): Mixture weight for the glossy shader.
"""
bpy = preset_import('bpy', assert_success=True)
scene = bpy.context.scene
_assert_cycles(scene)
node_tree = _clear_nodetree_for_active_material(obj)
nodes = node_tree.nodes
# Set color for diffuse and glossy nodes
diffuse_node = nodes.new('ShaderNodeBsdfDiffuse')
glossy_node = nodes.new('ShaderNodeBsdfGlossy')
if isinstance(texture, str):
texture_node = _make_texture_node(obj, texture)
node_tree.links.new(
texture_node.outputs['Color'], diffuse_node.inputs['Color'])
node_tree.links.new(
texture_node.outputs['Color'], glossy_node.inputs['Color'])
elif isinstance(texture, tuple):
diffuse_node.inputs['Color'].default_value = texture
glossy_node.inputs['Color'].default_value = texture
else:
raise TypeError(texture)
geometry_node = nodes.new('ShaderNodeNewGeometry')
mix_node = nodes.new('ShaderNodeMixShader')
output_node = nodes.new('ShaderNodeOutputMaterial')
node_tree.links.new(
geometry_node.outputs['Incoming'], glossy_node.inputs['Normal'])
node_tree.links.new(diffuse_node.outputs['BSDF'], mix_node.inputs[1])
node_tree.links.new(glossy_node.outputs['BSDF'], mix_node.inputs[2])
node_tree.links.new(
mix_node.outputs['Shader'], output_node.inputs['Surface'])
# Roughness
diffuse_node.inputs['Roughness'].default_value = roughness
glossy_node.inputs['Roughness'].default_value = roughness
mix_node.inputs['Fac'].default_value = glossy_weight
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
logger.info("Retroreflective node tree set up for '%s'", obj.name)
def get_bmesh(obj):
"""Gets Blender mesh data from object.
Args:
obj (bpy_types.Object): Object.
Returns:
BMesh: Blender mesh data.
"""
bpy = preset_import('bpy', assert_success=True)
bmesh = preset_import('bmesh', assert_success=True)
bm = bmesh.new()
bm.from_mesh(obj.data)
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
return bm
def subdivide_mesh(obj, n_subdiv=2):
"""Subdivides mesh of object.
Args:
obj (bpy_types.Object): Object whose mesh is to be subdivided.
n_subdiv (int, optional): Number of subdivision levels.
"""
bpy = preset_import('bpy', assert_success=True)
scene = bpy.context.scene
# All objects need to be in 'OBJECT' mode to apply modifiers -- maybe a
# Blender bug?
for o in bpy.data.objects:
scene.objects.active = o
bpy.ops.object.mode_set(mode='OBJECT')
o.select_set(False)
obj.select_set(True)
scene.objects.active = obj
bpy.ops.object.modifier_add(type='SUBSURF')
obj.modifiers['Subdivision'].subdivision_type = 'CATMULL_CLARK'
obj.modifiers['Subdivision'].levels = n_subdiv
obj.modifiers['Subdivision'].render_levels = n_subdiv
# Apply modifier
bpy.ops.object.modifier_apply(modifier='Subdivision', apply_as='DATA')
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
logger.info("Subdivided mesh of '%s'", obj.name)
def select_mesh_elements_by_vertices(obj, vert_ind, select_type):
"""Selects vertices or their associated edges/faces in edit mode.
Args:
obj (bpy_types.Object): Object.
vert_ind (int or list(int)): Vertex index/indices.
select_type (str): Type of mesh elements to select: ``'vertex'``,
``'edge'`` or ``'face'``.
"""
bpy = preset_import('bpy', assert_success=True)
bmesh = preset_import('bmesh', assert_success=True)
if isinstance(vert_ind, int):
vert_ind = [vert_ind]
# Edit mode
scene = bpy.context.scene
scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
# Deselect all
bpy.ops.mesh.select_mode(type='FACE')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_mode(type='EDGE')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_mode(type='VERT')
bpy.ops.mesh.select_all(action='DESELECT')
bm = bmesh.from_edit_mesh(obj.data)
bvs = bm.verts
bvs.ensure_lookup_table()
for i in vert_ind:
bv = bvs[i]
if select_type == 'vertex':
bv.select_set(True)
# Select all edges with this vertex at an end
elif select_type == 'edge':
for be in bv.link_edges:
be.select_set(True)
# Select all faces with this vertex
elif select_type == 'face':
for bf in bv.link_faces:
bf.select_set(True)
else:
raise ValueError("Wrong selection type")
# Update viewport
scene.objects.active = scene.objects.active
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
logger.info("Selected %s elements of '%s'", select_type, obj.name)
def add_sphere(
location=(0, 0, 0), scale=1, n_subdiv=2, shade_smooth=False, name=None):
"""Adds a sphere.
Args:
location (array_like, optional): Location of the sphere center.
scale (float, optional): Scale of the sphere.
n_subdiv (int, optional): Control of how round the sphere is.
shade_smooth (bool, optional): Whether to use smooth shading.
name (str, optional): Name of the added sphere.
Returns:
bpy_types.Object: Sphere created.
"""
bpy = preset_import('bpy', assert_success=True)
bpy.ops.mesh.primitive_ico_sphere_add()
sphere = bpy.context.active_object
if name is not None:
sphere.name = name
sphere.location = location
sphere.scale = (scale, scale, scale)
# Subdivide for smoother sphere
bpy.ops.object.modifier_add(type='SUBSURF')
sphere.modifiers['Subdivision'].subdivision_type = 'CATMULL_CLARK'
sphere.modifiers['Subdivision'].levels = n_subdiv
sphere.modifiers['Subdivision'].render_levels = n_subdiv
bpy.context.view_layer.objects.active = sphere
bpy.ops.object.modifier_apply(modifier='Subdivision', apply_as='DATA')
# Fake smoothness
if shade_smooth:
for f in sphere.data.polygons:
f.use_smooth = True
return sphere
def smart_uv_unwrap(obj, area_weight=0.0):
"""UV unwrapping using Blender's smart projection.
A vertex may map to multiple UV locations, but each loop maps to exactly
one UV location. If a face uses M vertices, then it has M loops, so a vertex
may belong to multiple loops, each of which has one UV location.
Note:
If a vertex belongs to no face, it doesn't get a UV coordinate,
so don't assume you can get a UV for any given vertex index.
Args:
obj (bpy_types.Object): Object to UV unwrap.
area_weight (float, optional): Area weight.
Returns:
dict(numpy.ndarray): Dictionary with its keys being the face indices,
and values being 2D arrays with four columns containing the
corresponding face's loop indices, vertex indices, :math:`u`, and
:math:`v`.
UV coordinate convention:
.. code-block:: none
(0, 1)
^ v
|
|
|
|
+-----------> (1, 0)
(0, 0) u
"""
bpy = preset_import('bpy', assert_success=True)
assert obj.type == 'MESH'
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.uv.smart_project(user_area_weight=area_weight)
bpy.ops.object.mode_set(mode='OBJECT')
# Since # faces is usually very large, using faces as dictionary
# keys usually leads to speedups (compared with having them as
# an array's column and then slicing the array)
fi_li_vi_u_v = {}
for f in obj.data.polygons:
li_vi_u_v = []
for vi, li in zip(f.vertices, f.loop_indices):
uv = obj.data.uv_layers.active.data[li].uv
li_vi_u_v.append([li, vi, uv.x, uv.y])
fi_li_vi_u_v[f.index] = np.array(li_vi_u_v)
return fi_li_vi_u_v
def raycast(obj_bvhtree, ray_from_objspc, ray_to_objspc):
"""Casts a ray to an object.
Args:
obj_bvhtree (mathutils.bvhtree.BVHTree): Constructed BVH tree of the
object.
ray_from_objspc (mathutils.Vector): Ray origin, in object's local
coordinates.
ray_to_objspc (mathutils.Vector): Ray goes through this point, also
specified in the object's local coordinates. Note that the ray
doesn't stop at this point, and this is just for computing the
ray direction.
Returns:
tuple:
- **hit_loc** (*mathutils.Vector*) -- Hit location on the object,
in the object's local coordinates. ``None`` means no
intersection.
- **hit_normal** (*mathutils.Vector*) -- Normal of the hit
location, also in the object's local coordinates.
- **hit_fi** (*int*) -- Index of the face where the hit happens.
- **ray_dist** (*float*) -- Distance that the ray has traveled
before hitting the object. If ``ray_to_objspc`` is a point on
the object surface, then this return value is useful for
checking for self occlusion.
"""
ray_dir = (ray_to_objspc - ray_from_objspc).normalized()
hit_loc, hit_normal, hit_fi, ray_dist = \
obj_bvhtree.ray_cast(ray_from_objspc, ray_dir)
if hit_loc is None:
assert hit_normal is None and hit_fi is None and ray_dist is None
return hit_loc, hit_normal, hit_fi, ray_dist
| 33.51357 | 80 | 0.644366 |
ace5d3596ba89feaa89a022ec390bace628609a8 | 761 | py | Python | cogs/image.py | haruyuki/CS-Pound | 5bd8ca24560994bbca96fa3a5822f961b823a02c | [
"MIT"
] | 4 | 2019-01-23T00:57:53.000Z | 2021-12-22T14:59:39.000Z | cogs/image.py | haruyuki/CS-Pound | 5bd8ca24560994bbca96fa3a5822f961b823a02c | [
"MIT"
] | 11 | 2018-10-03T09:12:03.000Z | 2022-01-15T01:44:12.000Z | cogs/image.py | haruyuki/CS-Pound | 5bd8ca24560994bbca96fa3a5822f961b823a02c | [
"MIT"
] | 4 | 2018-10-03T08:45:03.000Z | 2020-07-21T09:21:43.000Z | import discord
from discord.ext import commands
import chickensmoothie as cs
from constants import Strings
class PetImage(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['img'])
@commands.guild_only()
async def image(self, ctx, link: str = ''):
pet_image = await cs.image(link)
if pet_image is not None:
await ctx.send(file=discord.File(fp=pet_image, filename='pet.png')) # Upload the file to the channel where message came from
else: # If data is invalid
embed = discord.Embed(title='Pet', description=Strings.pet_unsuccessful, colour=0xff5252)
await ctx.send(embed=embed) # Send embed
def setup(bot):
bot.add_cog(PetImage(bot))
| 30.44 | 137 | 0.671485 |
ace5d449c4c5f3ab4fd59fce425cd7ec92e63342 | 113 | py | Python | SchoolDesk.py | Arnabsaha6/Snakify | df0c1112ae8a56a275044f786bfd89f746e3ca85 | [
"MIT"
] | null | null | null | SchoolDesk.py | Arnabsaha6/Snakify | df0c1112ae8a56a275044f786bfd89f746e3ca85 | [
"MIT"
] | null | null | null | SchoolDesk.py | Arnabsaha6/Snakify | df0c1112ae8a56a275044f786bfd89f746e3ca85 | [
"MIT"
] | null | null | null | Code:
a = int(input())
b = int(input())
c = int(input())
print(a // 2 + b // 2 + c // 2 + a % 2 + b % 2 + c % 2) | 18.833333 | 55 | 0.424779 |
ace5d4a7022f9496ec600639b858bb1ee408dffd | 3,913 | py | Python | week8/5-learning_curve.py | MathAdventurer/Data_Mining | b0a06b5f7c13a3762a07eb84518aa4ee56896516 | [
"MIT"
] | 1 | 2021-02-27T18:35:39.000Z | 2021-02-27T18:35:39.000Z | week8/5-learning_curve.py | MathAdventurer/Data_Mining | b0a06b5f7c13a3762a07eb84518aa4ee56896516 | [
"MIT"
] | null | null | null | week8/5-learning_curve.py | MathAdventurer/Data_Mining | b0a06b5f7c13a3762a07eb84518aa4ee56896516 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 19:31:51 2020
@author: Neal LONG
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_iris
def plot_learning_curve(estimator, title, X, y, givenTrainSizes, scoring = 'accuracy', cv = None):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature scorer(estimator, X, y).
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold Stratified cross-validation,
- integer, to specify the number of folds for Stratified cross-validation
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
givenTrainSizes : list, defines different percentages of whole train data used to
evaluate learning capacity
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
"""
fig = plt.figure(1, figsize=(6, 6))
ax = fig.add_subplot(111)
plt.title(title)
plt.xlabel("Training examples")
plt.ylabel("Accuracy")
# read the help of learning_curve, and call learning_curve with proper paramters
train_sizes, train_scores, test_scores = learning_curve(estimator,X,y,
scoring=scoring,
cv=cv,
train_sizes=givenTrainSizes,
random_state=0)
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
plt.grid()
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
for xy in zip(train_sizes, test_scores_mean): # <--
ax.annotate('%s' % round(xy[1],2), xy=xy, textcoords='data')
plt.legend(loc="best")
plt.show()
iris = load_iris()
# Determine feature matrix X and taget array Y
X = iris.data[:,:2]
y = iris.target
basic_clf = DecisionTreeClassifier(random_state=0)
title = "Learning Curves (DecisionTreeClassifier)"
givenTrainSizes = np.linspace(.1, 1.0, 5)
# 5-fold Stratified CV for train example with percentage of training data from
# givenTrainSizes =[0.1 , 0.325, 0.55 , 0.775, 1. ]
plot_learning_curve(basic_clf, title, X, y, givenTrainSizes, scoring='accuracy', cv=5)
| 39.928571 | 99 | 0.618707 |
ace5d5fd7a6fd6cb35e955821236bdee4840e91d | 16,126 | py | Python | run_Natural_UAP.py | kztakemoto/Natural_UAP | b1d014996fdf2538947de04e08b2503ff9368c25 | [
"MIT"
] | null | null | null | run_Natural_UAP.py | kztakemoto/Natural_UAP | b1d014996fdf2538947de04e08b2503ff9368c25 | [
"MIT"
] | null | null | null | run_Natural_UAP.py | kztakemoto/Natural_UAP | b1d014996fdf2538947de04e08b2503ff9368c25 | [
"MIT"
] | null | null | null | #################################################################################
# argument parser
#################################################################################
# --X_train_path: str, path to training images (in npy format) '*/*.npy'
# --y_train_path: str, path to the labels of training images '*/*.npy'
# --X_test_path: str, path to test images '*/*.npy'
# --y_test_path: str, Upath to the labels of test images '*/*.npy'
# --X_materials_dir: str, path to the directory storing natural images
# --model_path: str, path to model weight '*/*.h5'
# --model_type: 'InceptionV3', 'VGG16', 'ResNet50'
# --norm_type: str, '2' or 'inf', norm type of UAPs
# --norm_rate: float, noise strength (zeta)
# --fgsm_eps: float, attack step size of FGSM
# --uap_iter: int, maximum number of iterations for computing UAP.
# --targeted: int, target class (negative value indicates non-targeted attacks)
# --save_path: str, path to output files
#################################################################################
import warnings
warnings.filterwarnings('ignore')
import os, sys, glob, argparse
sys.stdout = os.fdopen(sys.stdout.fileno(), "w", buffering=1)
import numpy as np
import keras
import tensorflow as tf
from keras import backend as K
from keras import utils
# for preventing tensorflow from allocating the totality of a GPU memory.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
from keras.applications.inception_v3 import InceptionV3
from keras.applications.vgg16 import VGG16
from keras.applications.resnet50 import ResNet50
from keras.layers import Lambda, Input, Dense, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import SGD
from art.classifiers import KerasClassifier
from art.attacks import UniversalPerturbation
from art.utils import random_sphere
from art.utils import projection
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
# check the starting time for computing total processing time
import time
start_time = time.time()
### UAP class ###
# classifier: classifier
# X_train: ndarray, training images
# y_train: ndarray, the labels of the training images
# X_test: ndarray, test images
# y_test: ndarray, the labels of the test images
# X_materials_paths: array, path to the directory storing natural images
# norm_type: 2 or np.inf, norm type of UAPs
# norm_size: float, noise size (xi)
# fgsm_eps: float, Fattack step size of FGSM
# uap_iter: int, maximum number of iterations for computing UAP.
# targeted: int, target class (negative value indicates non-targeted attacks)
# save_path: str, path to output files
class my_UAP:
def __init__(
self,
classifier,
X_train, y_train,
X_test, y_test,
X_materials_paths,
norm_type,
norm_size,
fgsm_eps,
uap_iter,
targeted,
save_path
):
self.classifier = classifier
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
self.X_materials_paths = X_materials_paths
self.norm_type = norm_type
self.norm_size = norm_size
self.fgsm_eps = fgsm_eps
self.uap_iter = uap_iter
self.targeted = targeted
self.save_path = save_path
### compute the attack success rate
# images: ndarray, target image set
# noise: ndarray, UAP
def my_calc_fooling_ratio(self, images=0, noise=0):
adv_images = images + noise
if self.targeted < 0:
preds = np.argmax(self.classifier.predict(images), axis=1)
preds_adv = np.argmax(self.classifier.predict(adv_images), axis=1)
fooling_ratio = np.sum(preds_adv != preds) / images.shape[0]
return fooling_ratio
else:
preds_adv = np.argmax(self.classifier.predict(adv_images), axis=1)
fooling_ratio_targeted = np.sum(preds_adv == self.targeted) / adv_images.shape[0]
return fooling_ratio_targeted
### generate the labels (in one-hot vector representation) for targeted attacks
# length: int, number of target images
def my_target_labels(self, length=0):
classes = self.y_train.shape[1]
return utils.to_categorical([self.targeted] * length, classes)
### generate UAP
def my_gen_UAP(self):
num_m = len(self.X_materials_paths)
imshape = self.X_train[0].shape
if self.targeted >= 0:
print(" *** targeted attack *** \n")
adv_crafter = UniversalPerturbation(
self.classifier,
attacker='fgsm',
delta=0.000001,
attacker_params={"targeted":True, "eps":self.fgsm_eps},
max_iter=self.uap_iter,
eps=self.norm_size,
norm=self.norm_type)
else:
print(" *** non-targeted attack *** \n")
adv_crafter = UniversalPerturbation(
self.classifier,
attacker='fgsm',
delta=0.000001,
attacker_params={"eps":self.fgsm_eps},
max_iter=self.uap_iter,
eps=self.norm_size,
norm=self.norm_type)
# initialization
LOG = []
X_materials_cnt = 0
noise = np.zeros(imshape)
noise = noise.astype('float32')
for i, path in enumerate(self.X_materials_paths):
X_materials = np.load(path)
X_materials_cnt += X_materials.shape[0]
# normalization
X_materials -= 128.0
X_materials /= 128.0
# craft UAP
if self.targeted >= 0:
# generate the labels for targeted attacks
Y_materials_tar = self.my_target_labels(length=X_materials.shape[0])
noise = adv_crafter.generate(X_materials, noise=noise, y=Y_materials_tar, targeted=True)
else:
noise = adv_crafter.generate(X_materials, noise=noise)
# handling for no noise
if type(adv_crafter.noise[0,:]) == int:
noise = np.zeros(imshape)
else:
noise = np.copy(adv_crafter.noise)
noise = np.reshape(noise, imshape)
# generate random UAP whose size equals to the size of the UAP
noise_size = float(np.linalg.norm(noise.reshape(-1), ord=self.norm_type))
noise_random = random_sphere(
nb_points=1,
nb_dims=np.prod(X_materials[0].shape),
radius=noise_size,
norm=self.norm_type
).reshape(imshape)
# compute attack success rate of UAP
# for X_train
fr_train = self.my_calc_fooling_ratio(images=self.X_train, noise=noise)
# for X_test
fr_test = self.my_calc_fooling_ratio(images=self.X_test, noise=noise)
# for X_materials
fr_m = self.my_calc_fooling_ratio(images=X_materials, noise=noise)
# compute attack success rate of random UAP (random control)
# for X_train
fr_train_r = self.my_calc_fooling_ratio(images=self.X_train, noise=noise_random)
# for X_test
fr_test_r = self.my_calc_fooling_ratio(images=self.X_test, noise=noise_random)
# for X_materials
fr_m_r = self.my_calc_fooling_ratio(images=X_materials, noise=noise_random)
# compute UAP size
norm_2 = np.linalg.norm(noise)
norm_inf = abs(noise).max()
LOG.append([X_materials_cnt, norm_2, norm_inf, fr_train, fr_test, fr_m, fr_train_r, fr_test_r, fr_m_r])
print("LOG: {} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}".format(X_materials_cnt, norm_2, norm_inf, fr_train, fr_test, fr_m, fr_train_r, fr_test_r, fr_m_r))
del(X_materials) # for saving memory
np.save(self.save_path+'_noise', noise)
np.save(self.save_path+'_LOG', np.array(LOG))
return noise, np.array(LOG)
### cofiguration of classifier
# model_type: 'InceptionV3', 'VGG16', 'ResNet50'
# model_path: str, path to model weight
# output_class: int, number of classes
# mono: int, monochrome images if mono = 1, RGB images otherwise
# silence: int, prevent to output model summary if silence = 1, not otherwise
class my_DNN:
def __init__(
self,
model_type,
model_path,
output_class,
mono,
silence
):
self.model_type = model_type
self.model_path = model_path
self.output_class = output_class
self.mono = mono
self.silence = silence
def my_classifier(self):
if self.mono==1:
if self.model_type == 'InceptionV3':
print(" MODEL: InceptionV3")
base_model = InceptionV3(weights='imagenet', include_top=False)
elif self.model_type == 'VGG16':
print(" MODEL: VGG16")
base_model = VGG16(weights='imagenet', include_top=False)
elif self.model_type == "ResNet50":
print(" MODEL: ResNet50")
base_model = ResNet50(weights='imagenet', include_top=False)
else:
print(" --- ERROR : UNKNOWN MODEL TYPE --- ")
base_model.layers.pop(0)
newInput = Input(batch_shape=(None, 299,299,1))
x = Lambda(lambda image: tf.image.grayscale_to_rgb(image))(newInput)
tmp_out = base_model(x)
tmpModel = Model(newInput, tmp_out)
x = tmpModel.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(self.output_class, activation='softmax')(x)
model = Model(tmpModel.input, predictions)
else:
input_shape = (299, 299, 3)
if self.model_type == 'InceptionV3':
print(" MODEL: InceptionV3")
base_model = InceptionV3(weights='imagenet', input_shape=input_shape, include_top=False)
elif self.model_type == 'VGG16':
print(" MODEL: VGG16")
base_model = VGG16(weights='imagenet', input_shape=input_shape, include_top=False)
elif self.model_type == "ResNet50":
print(" MODEL: ResNet50")
base_model = ResNet50(weights='imagenet', input_shape=input_shape, include_top=False)
else:
print(" --- ERROR: UNKNOWN MODEL TYPE --- ")
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(self.output_class, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in model.layers:
layer.trainable = True
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights(self.model_path)
if self.silence != 1:
model.summary()
classifier = KerasClassifier(model=model)
return classifier
### Main ###
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--X_train_path', type=str)
parser.add_argument('--y_train_path', type=str)
parser.add_argument('--X_test_path', type=str)
parser.add_argument('--y_test_path', type=str)
parser.add_argument('--X_materials_dir', type=str)
parser.add_argument('--model_path', type=str)
parser.add_argument('--model_type', type=str)
parser.add_argument('--norm_type', type=str)
parser.add_argument('--norm_rate', type=float)
parser.add_argument('--fgsm_eps', type=float)
parser.add_argument('--uap_iter', type=int)
parser.add_argument('--targeted', type=int)
parser.add_argument('--save_path', type=str)
args = parser.parse_args()
if args.norm_type == '2':
norm_type = 2
elif args.norm_type == 'inf':
norm_type = np.inf
norm_rate = args.norm_rate
# load data
X_train = np.load(args.X_train_path)
y_train = np.load(args.y_train_path)
X_test = np.load(args.X_test_path)
y_test = np.load(args.y_test_path)
# obtain the file names of X_materials
X_materials_paths = glob.glob(args.X_materials_dir + '/*.npy')
# check color type (mono or RGB)
if X_train.shape[-1] != 3:
mono = 1
else:
mono = 0
# compute the actual norm size from the ratio `norm_rate` of the Lp of the UAP to the average Lp norm of an image in the dataset (training images)
if norm_type == np.inf:
norm_mean = 0
for img in X_train:
norm_mean += abs(img).max()
norm_mean = norm_mean/X_train.shape[0]
norm_size = float(norm_rate*norm_mean/128.0)
print("\n ------------------------------------")
print(" Linf norm: {:.2f} ".format(norm_size))
else:
norm_mean = 0
for img in X_train:
norm_mean += np.linalg.norm(img)
norm_mean = norm_mean/X_train.shape[0]
norm_size = float(norm_rate*norm_mean/128.0)
print(" L2 norm: {:.2f} ".format(norm_size))
# normalization
X_train -= 128.0
X_train /= 128.0
X_test -= 128.0
X_test /= 128.0
dnn = my_DNN(
model_type=args.model_type,
model_path=args.model_path,
output_class=y_train.shape[1],
mono=mono,
silence=1
)
classifier = dnn.my_classifier()
# compute the accuracies for clean images
preds = np.argmax(classifier.predict(X_train), axis=1)
acc = np.sum(preds == np.argmax(y_train, axis=1)) / y_train.shape[0]
print(" Accuracy [train]: {:.2f}".format(acc))
preds = np.argmax(classifier.predict(X_test), axis=1)
acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
print(" Accuracy [test]: {:.2f}".format(acc))
print(" ------------------------------------\n")
# generate UAP
uap = my_UAP(
classifier=classifier,
X_train=X_train, y_train=y_train,
X_test=X_test, y_test=y_test,
X_materials_paths=X_materials_paths,
norm_type=norm_type,
norm_size=norm_size,
fgsm_eps=args.fgsm_eps,
uap_iter=args.uap_iter,
targeted=args.targeted,
save_path=args.save_path,
)
noise, LOG = uap.my_gen_UAP()
# output log
# X_materials_cnt, norm_2, norm_inf, fr_train, fr_test, fr_m, fr_train_r, fr_test_r, fr_m_r
plt.figure()
plt.ylim(0, LOG[:,0][-1])
plt.ylim(0, 1)
p1 = plt.plot(LOG[:,0], LOG[:,3], linewidth=3, color="darkred", linestyle="solid", label="fr_train")
p2 = plt.plot(LOG[:,0], LOG[:,4], linewidth=3, color="darkblue", linestyle="solid", label="fr_test")
p3 = plt.plot(LOG[:,0], LOG[:,5], linewidth=3, color="dimgray", linestyle="solid", label="fr_matel")
p4 = plt.plot(LOG[:,0], LOG[:,6], linewidth=3, color="lightcoral", linestyle="dashed", label="fr_train_r")
p5 = plt.plot(LOG[:,0], LOG[:,7], linewidth=3, color="lightblue", linestyle="dashed", label="fr_test_r")
p6 = plt.plot(LOG[:,0], LOG[:,8], linewidth=3, color="lightgray", linestyle="dashed", label="fr_matel_r")
plt.xlabel("# of iterations (natural images)")
plt.ylabel("Attack success rate")
plt.legend(loc='lower right')
plt.grid(True)
plt.savefig(args.save_path+'_fig.png')
# output processing time
processing_time = time.time() - start_time
print("\n\t ------------------------------------")
print("\t total processing time : {:.2f} h.".format(processing_time / 3600.0))
print("\t ------------------------------------\n")
| 40.619647 | 182 | 0.595436 |
ace5d87fe70e90f4b9dca0bacc6a77514923f19e | 1,226 | py | Python | src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/__init__.py | taliesins/azure-cli | a2451fe7148dfdd005f0f2ec797915eb479f6f6a | [
"MIT"
] | 1 | 2018-01-30T05:55:29.000Z | 2018-01-30T05:55:29.000Z | src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/__init__.py | mickeymitic/azure-cli | 92af6b3cea52f99eee84df93f5d3e2003a273d04 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/__init__.py | mickeymitic/azure-cli | 92af6b3cea52f99eee84df93f5d3e2003a273d04 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
import azure.cli.command_modules.acs._help # pylint: disable=unused-import
class ContainerServiceCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
acs_custom = CliCommandType(operations_tmpl='azure.cli.command_modules.acs.custom#{}')
super(ContainerServiceCommandsLoader, self).__init__(cli_ctx=cli_ctx, custom_command_type=acs_custom)
def load_command_table(self, args):
from azure.cli.command_modules.acs.commands import load_command_table
load_command_table(self, args)
return self.command_table
def load_arguments(self, command):
from azure.cli.command_modules.acs._params import load_arguments
load_arguments(self, command)
COMMAND_LOADER_CLS = ContainerServiceCommandsLoader
| 40.866667 | 109 | 0.654976 |
ace5d8d265c4fbf046055fc17191738fcc01f2a8 | 1,026 | py | Python | Torneo/apps/equipo/forms.py | Estuardiaz2611/Djangotuto2 | bdb8665c7e2a95296ab85e1e986bbcdca1828fa7 | [
"MIT"
] | null | null | null | Torneo/apps/equipo/forms.py | Estuardiaz2611/Djangotuto2 | bdb8665c7e2a95296ab85e1e986bbcdca1828fa7 | [
"MIT"
] | null | null | null | Torneo/apps/equipo/forms.py | Estuardiaz2611/Djangotuto2 | bdb8665c7e2a95296ab85e1e986bbcdca1828fa7 | [
"MIT"
] | null | null | null | from django import forms
from apps.equipo.models import Equipo
class EquipoForm (forms.ModelForm):
class Meta:
model = Equipo
fields = [
'nombre_equipo',
'siglas',
'jugadores',
'encargado',
'telefono',
'liga',
]
label = {
'nombre_equipo': 'Nombre Liga',
'siglas': 'Siglas',
'jugadores': 'Jugadores',
'encargado': 'Encargado',
'telefono': 'Telefono',
'liga': 'Liga',
}
widgets = {
'nombre_equipo': forms.TextInput(attrs={'class':'form-control'}),
'siglas': forms.TextInput(attrs={'class':'form-control'}),
'jugadores': forms.TextInput(attrs={'class':'form-control'}),
'encargado': forms.TextInput(attrs={'class':'form-control'}),
'telefono': forms.TextInput(attrs={'class':'form-control'}),
'liga': forms.Select(attrs={'class': 'form-control'})
} | 31.090909 | 77 | 0.506823 |
ace5d981bfefd450b2fa5d1ed62a51f1f3361bb2 | 7,855 | py | Python | masakari-7.0.0/masakari/tests/unit/fakes.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | masakari-7.0.0/masakari/tests/unit/fakes.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | masakari-7.0.0/masakari/tests/unit/fakes.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2016 NTT DATA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from oslo_utils import uuidutils
from masakari import objects
from masakari.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
class FakeNovaClient(object):
class Server(object):
def __init__(self, id=None, uuid=None, host=None, vm_state=None,
task_state=None, power_state=1, ha_enabled=None,
locked=False):
self.id = id
self.uuid = uuid or uuidutils.generate_uuid()
self.host = host
setattr(self, 'OS-EXT-SRV-ATTR:hypervisor_hostname', host)
setattr(self, 'OS-EXT-STS:vm_state', vm_state)
setattr(self, 'OS-EXT-STS:task_state', task_state)
setattr(self, 'OS-EXT-STS:power_state', power_state)
self.metadata = {"HA_Enabled": ha_enabled}
self.locked = locked
class ServerManager(object):
def __init__(self):
self._servers = []
self.reset_state_calls = []
self.stop_calls = []
def create(self, id, uuid=None, host=None, vm_state='active',
task_state=None, power_state=1, ha_enabled=False):
server = FakeNovaClient.Server(id=id, uuid=uuid, host=host,
vm_state=vm_state,
task_state=task_state,
power_state=power_state,
ha_enabled=ha_enabled)
self._servers.append(server)
return server
def get(self, id):
for s in self._servers:
if s.id == id:
return s
return None
def list(self, detailed=True, search_opts=None):
matching = list(self._servers)
if search_opts:
for opt, val in search_opts.items():
if 'all_tenants' in search_opts:
continue
matching = [m for m in matching
if getattr(m, opt, None) == val]
return matching
def reset_state(self, uuid, status):
current_status = getattr(self.get(uuid), "OS-EXT-STS:vm_state")
self.reset_state_calls.append((uuid, current_status))
server = self.get(uuid)
setattr(server, 'OS-EXT-STS:vm_state', status)
def evacuate(self, uuid, host=None):
if not host:
host = 'fake-host-1'
server = self.get(uuid)
setattr(server, 'OS-EXT-SRV-ATTR:hypervisor_hostname', host)
# pretending that instance is evacuated successfully on given host
if getattr(server, "OS-EXT-STS:vm_state") in ['active', 'error']:
setattr(server, 'OS-EXT-STS:vm_state', 'active')
else:
setattr(server, 'OS-EXT-STS:vm_state', 'stopped')
def stop(self, id):
self.stop_calls.append(id)
server = self.get(id)
setattr(server, 'OS-EXT-STS:vm_state', 'stopped')
def start(self, id):
server = self.get(id)
setattr(server, 'OS-EXT-STS:vm_state', 'active')
class Aggregate(object):
def __init__(self, id=None, uuid=None, name=None, hosts=None):
self.id = id
self.uuid = uuid or uuidutils.generate_uuid()
self.name = name
self.hosts = hosts
class AggregatesManager(object):
def __init__(self):
self.aggregates = []
def create(self, id, uuid=None, name=None, hosts=None):
aggregate = FakeNovaClient.Aggregate(id=id, uuid=uuid, name=name,
hosts=hosts)
self.aggregates.append(aggregate)
return aggregate
def list(self):
return self.aggregates
def add_host(self, aggregate_id, host_name):
aggregate = self.get(aggregate_id)
if host_name not in aggregate.hosts:
aggregate.hosts.append(host_name)
def get(self, aggregate_id):
for aggregate in self.aggregates:
if aggregate.id == aggregate_id:
return aggregate
class Service(object):
def __init__(self, id=None, host=None, binary=None, status='enabled'):
self.id = id
self.host = host
self.binary = binary
self.status = status
class Services(object):
def __init__(self):
self._services = []
def create(self, id, host=None, binary=None,
status=None):
self._services.append(FakeNovaClient.Service(id=id, host=host,
binary=binary,
status=status))
def disable(self, service_id):
for _service in self._services:
if _service.id == service_id:
service = _service
service.status = 'disabled'
def list(self, host=None, binary=None):
services = []
for service in self._services:
if host == service.host and binary == service.binary:
services.append(service)
return services
def __init__(self):
self.servers = FakeNovaClient.ServerManager()
self.services = FakeNovaClient.Services()
self.aggregates = FakeNovaClient.AggregatesManager()
def create_fake_notification(type="VM", id=1, payload=None,
source_host_uuid=uuidsentinel.fake_host,
generated_time=NOW, status="new",
notification_uuid=uuidsentinel.fake_notification):
return objects.Notification(
type=type, id=id, payload=payload, source_host_uuid=source_host_uuid,
generated_time=generated_time, status=status,
notification_uuid=notification_uuid)
def create_fake_host(name='fake_host', id=1, reserved=False,
on_maintenance=False, type='SSH',
control_attributes='fake',
uuid=uuidsentinel.fake_host,
failover_segment_id=uuidsentinel.fake_segment):
return objects.Host(
name=name, id=id, reserved=reserved, on_maintenance=on_maintenance,
type=type, control_attributes=control_attributes, uuid=uuid,
failover_segment_id=failover_segment_id)
def create_fake_failover_segment(name='fake_segment', id=1, description=None,
service_type='COMPUTE',
recovery_method="auto",
uuid=uuidsentinel.fake_segment):
return objects.FailoverSegment(
name=name, id=id, description=description, service_type=service_type,
recovery_method=recovery_method, uuid=uuid)
def create_fake_notification_progress_details(
name, uuid, progress, state, progress_details):
return objects.NotificationProgressDetails(
name=name, uuid=uuid, progress=progress, state=state,
progress_details=progress_details)
| 39.472362 | 79 | 0.576194 |
ace5dae8b3e5dd4919292811fd6ec3018c9a261a | 104 | py | Python | modules/2.79/bpy/types/ParentActuator.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/ParentActuator.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/ParentActuator.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | class ParentActuator:
mode = None
object = None
use_compound = None
use_ghost = None
| 11.555556 | 23 | 0.634615 |
ace5dbbbb7eb8a54eeef225f2cdac879c0886712 | 1,273 | py | Python | setup.py | ssriceboat/riceprint | da9579087bc5641220587f36986129891f62672e | [
"MIT"
] | 6 | 2019-07-16T02:48:47.000Z | 2021-02-05T03:38:47.000Z | setup.py | ssriceboat/riceprint | da9579087bc5641220587f36986129891f62672e | [
"MIT"
] | null | null | null | setup.py | ssriceboat/riceprint | da9579087bc5641220587f36986129891f62672e | [
"MIT"
] | 1 | 2019-11-01T18:23:20.000Z | 2019-11-01T18:23:20.000Z | import os
from setuptools import setup
cwd = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(cwd, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Install colorama if using Windows
if os.name == 'nt':
dependencies = ['colorama']
else:
dependencies = []
setup(
name='riceprint',
version='1.5.6',
description='OS-agnostic colored & custom Python console print() functions.',
long_description=long_description,
author='Kevin Sacca',
author_email='ssriceboat@gmail.com',
url='https://github.com/ssriceboat/riceprint',
project_urls={'Documentation': 'https://riceprint.readthedocs.io/en/latest/',
},
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages=['riceprint'],
package_dir={'riceprint': 'src/riceprint'},
install_requires=dependencies,
keywords='print console terminal shell python pprint color progress bar'
)
| 28.931818 | 80 | 0.667714 |
ace5dbe5374188ae8abe9d306a8713c74ea8df85 | 6,206 | py | Python | sqlalchemy_oso/partial.py | nitros12/oso-patch | 3a88f94f6ed520f778d9a8e913cd0ccc9fa29ccb | [
"Apache-2.0"
] | null | null | null | sqlalchemy_oso/partial.py | nitros12/oso-patch | 3a88f94f6ed520f778d9a8e913cd0ccc9fa29ccb | [
"Apache-2.0"
] | null | null | null | sqlalchemy_oso/partial.py | nitros12/oso-patch | 3a88f94f6ed520f778d9a8e913cd0ccc9fa29ccb | [
"Apache-2.0"
] | null | null | null | import functools
from typing import Any, Callable, Tuple
from sqlalchemy.orm.session import Session
from sqlalchemy import inspect
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy.sql import expression as sql
from polar.partial import dot_path
from polar.expression import Expression
from polar.variable import Variable
from polar.exceptions import UnsupportedError
# TODO (dhatch) Better types here, first any is model, second any is a sqlalchemy expr.
EmitFunction = Callable[[Session, Any], Any]
def partial_to_filter(expression: Expression, session: Session, model, get_model):
"""Convert constraints in ``partial`` to a filter over ``model`` that should be applied to query."""
return translate_expr(expression, session, model, get_model)
def translate_expr(expression: Expression, session: Session, model, get_model):
assert isinstance(expression, Expression)
if expression.operator in COMPARISONS:
return translate_compare(expression, session, model, get_model)
elif expression.operator == "Isa":
return translate_isa(expression, session, model, get_model)
elif expression.operator == "In":
return translate_in(expression, session, model, get_model)
elif expression.operator == "And":
return translate_and(expression, session, model, get_model)
else:
raise UnsupportedError(f"Unsupported {expression}")
def translate_and(expression: Expression, session: Session, model, get_model):
assert expression.operator == "And"
expr = sql.and_()
for expression in expression.args:
translated = translate_expr(expression, session, model, get_model)
expr = expr & translated
return expr
def translate_isa(expression: Expression, session: Session, model, get_model):
assert expression.operator == "Isa"
left, right = expression.args
if dot_path(left) == ():
assert left == Variable("_this")
else:
for field_name in dot_path(left):
_, model, __ = get_relationship(model, field_name)
assert not right.fields, "Unexpected fields in isa expression"
constraint_type = get_model(right.tag)
model_type = inspect(model, raiseerr=True).class_
return sql.true() if issubclass(model_type, constraint_type) else sql.false()
def translate_compare(expression: Expression, session: Session, model, get_model):
(left, right) = expression.args
left_path = dot_path(left)
if left_path:
path, field_name = left_path[:-1], left_path[-1]
return translate_dot(
path,
session,
model,
functools.partial(emit_compare, field_name, right, expression.operator),
)
else:
assert left == Variable("_this")
if not isinstance(right, model):
return sql.false()
if expression.operator not in ("Eq", "Unify"):
raise UnsupportedError(
f"Unsupported comparison: {expression}. Models can only be compared"
" with `=` or `==`"
)
primary_keys = [pk.name for pk in inspect(model).primary_key]
pk_filter = sql.true()
for key in primary_keys:
pk_filter &= getattr(model, key) == getattr(right, key)
return pk_filter
def translate_in(expression, session, model, get_model):
assert expression.operator == "In"
left = expression.args[0]
right = expression.args[1]
# IN means at least something must be contained in the property.
# There are two possible types of in operations. In both, the right hand side
# should be a dot op.
# Partial In: LHS is an expression
if isinstance(left, Expression):
path = dot_path(right)
assert path
return translate_dot(
path, session, model, functools.partial(emit_subexpression, left, get_model)
)
else:
# Contains: LHS is not an expression.
# TODO (dhatch) Missing check, left type must match type of the target?
path = dot_path(right)
assert path
path, field_name = path[:-1], path[-1]
return translate_dot(
path, session, model, functools.partial(emit_contains, field_name, left)
)
def translate_dot(path: Tuple[str, ...], session: Session, model, func: EmitFunction):
if len(path) == 0:
return func(session, model)
else:
property, model, is_multi_valued = get_relationship(model, path[0])
if not is_multi_valued:
return property.has(translate_dot(path[1:], session, model, func))
else:
return property.any(translate_dot(path[1:], session, model, func))
def get_relationship(model, field_name: str):
"""Get the property object for field on model. field must be a relationship field.
:returns: (property, model, is_multi_valued)
"""
property = getattr(model, field_name)
assert isinstance(property.property, RelationshipProperty)
relationship = property.property
model = property.entity.class_
return (property, model, relationship.uselist)
COMPARISONS = {
"Unify": lambda p, v: p == v,
"Eq": lambda p, v: p == v,
"Neq": lambda p, v: p != v,
"Geq": lambda p, v: p >= v,
"Gt": lambda p, v: p > v,
"Leq": lambda p, v: p <= v,
"Lt": lambda p, v: p < v,
}
def emit_compare(field_name, value, operator, session, model):
"""Emit a comparison operation comparing the value of ``field_name`` on ``model`` to ``value``."""
property = getattr(model, field_name)
return COMPARISONS[operator](property, value)
def emit_subexpression(sub_expression: Expression, get_model, session: Session, model):
"""Emit a sub-expression on ``model``."""
return translate_expr(sub_expression, session, model, get_model)
def emit_contains(field_name, value, session, model):
"""Emit a contains operation, checking that multi-valued relationship field ``field_name`` contains ``value``."""
# TODO (dhatch): Could this be valid for fields that are not relationship fields?
property, model, is_multi_valued = get_relationship(model, field_name)
assert is_multi_valued
return property.contains(value)
| 35.872832 | 117 | 0.677409 |
ace5dbf6c9f293ba338b617f32253e4680fdf600 | 1,320 | py | Python | src/fetch.py | LTurret/mltd-boarding-service | 269a3cc10ff6ffb0e4c43a77305bee5b2382dc7a | [
"MIT"
] | null | null | null | src/fetch.py | LTurret/mltd-boarding-service | 269a3cc10ff6ffb0e4c43a77305bee5b2382dc7a | [
"MIT"
] | 5 | 2022-01-27T13:38:30.000Z | 2022-03-04T02:22:55.000Z | src/fetch.py | LTurret/mltd-boarding-service | 269a3cc10ff6ffb0e4c43a77305bee5b2382dc7a | [
"MIT"
] | 1 | 2022-01-28T09:07:34.000Z | 2022-01-28T09:07:34.000Z | async def GetNewestEvent(session):
async with session.get("https://api.matsurihi.me/mltd/v1/events") as response:
try:
data = await response.json()
return data[-1]
except Exception as e:
print(f"exception occur: {e}")
return {}
async def SearchEvent(evtid, session):
async with session.get(f"https://api.matsurihi.me/mltd/v1/events/{evtid}") as response:
try:
data = await response.json()
return data
except Exception as e:
print(f"exception occur: {e}")
return {}
async def FetchBorder(evtid, session):
async with session.get(f"https://api.matsurihi.me/mltd/v1/events/{evtid}/rankings/borderPoints") as response:
try:
data = await response.json()
return data
except Exception as e:
print(f"exception occur: {e}")
return {}
async def FetchCover(session, evtid):
async with session.get(f"https://storage.matsurihi.me/mltd/event_bg/{evtid:0>4,d}.png") as response:
try:
with open(f"{evtid:0>4,d}.png", "wb") as file:
file.write(await response.read())
return 0
except Exception as e:
print(f"exception occur: {e}")
return 1
| 35.675676 | 113 | 0.573485 |
ace5dcdc7eb8d47b6d74e96e5b0d7a8b2d26bba1 | 1,388 | py | Python | utils/lookup_table.py | cns-iu/ccf-research | e029c8985a249c1caec925e95f5286c505c706ea | [
"MIT"
] | 1 | 2020-09-09T13:45:44.000Z | 2020-09-09T13:45:44.000Z | utils/lookup_table.py | cns-iu/ccf-research | e029c8985a249c1caec925e95f5286c505c706ea | [
"MIT"
] | null | null | null | utils/lookup_table.py | cns-iu/ccf-research | e029c8985a249c1caec925e95f5286c505c706ea | [
"MIT"
] | 4 | 2020-08-14T19:31:56.000Z | 2021-09-07T04:11:45.000Z | from skimage import io
import numpy as np
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
image_path = r"x:\bin_VAN008_combined.jpg"
image = io.imread(image_path)
offset = 15 # 10 is enough, 15 is for 100% cover
types = 4 + 1 # include background
# find the unique points
values, counts = np.unique(image, return_counts=True)
ind = np.argpartition(-counts, kth=5)[:5]
# print(values[ind])
centers = list(values[ind])
centers.sort()
print(centers)
color_table = [[255, 255, 255], # 'white',
[0, 0, 255], # 'blue',
[255, 0, 0], # 'red',
[255, 255, 0], # 'yellow',
[0, 255, 0], ] # 'green'
color_dict = {}
for k in range(len(centers)):
center = centers[k]
for i in range(-offset, offset + 1):
color_dict[center + i] = color_table[k]
new_image = np.zeros((image.shape[0], image.shape[1], 3), dtype='uint8')
# for i in range(len(image)):
# # quick background conversion
# if np.amax(image[i]) == 0:
# new_image[i] = 255
# continue
#
# for j in range(len(image[i])):
# for k in range(len(centers)):
# new_image[i][j] = color_dict[image[i][j]]
# if i % 100 == 0:
# print(i)
for value in values:
mask = (image == value)
new_image[mask] = color_dict[value]
print(value)
io.imsave(image_path.replace("jpg", "png"), new_image)
| 24.785714 | 72 | 0.590058 |
ace5dd2b5cc1fdd89a2fabab9d14aff915841dbc | 563 | py | Python | codeEval/medium/magic_numbers.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-09-30T19:53:08.000Z | 2020-09-30T19:53:08.000Z | codeEval/medium/magic_numbers.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | null | null | null | codeEval/medium/magic_numbers.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-10-15T09:10:57.000Z | 2020-10-15T09:10:57.000Z | from sys import argv
def is_magic(s):
l = len(s)
if len(set(s)) != l:
return False
visited = set()
a = b = s[0]
j = 0
while b not in visited:
visited.add(b)
j = (int(b) + j) % l
b = s[j]
return a == b and len(visited) == l
magic = {}
for i in range(1, 10001):
magic[i] = is_magic(str(i))
with open(argv[1], 'r') as test_cases:
for test in test_cases:
a, b = map(int, test.split())
r = " ".join(str(i) for i in range(a, b+1) if magic[i])
print(-1 if r == "" else r)
| 20.107143 | 63 | 0.49556 |
ace5de2bc33db4c784bea1b7d880ccaba83fa110 | 3,129 | py | Python | Lecture_8_CSP/Exercise6/Exercise/CSP_Australia.py | aleksander-GD/AI-F20 | c5d086e317f657c1b7a2d2179eafcef0267755ed | [
"MIT"
] | null | null | null | Lecture_8_CSP/Exercise6/Exercise/CSP_Australia.py | aleksander-GD/AI-F20 | c5d086e317f657c1b7a2d2179eafcef0267755ed | [
"MIT"
] | null | null | null | Lecture_8_CSP/Exercise6/Exercise/CSP_Australia.py | aleksander-GD/AI-F20 | c5d086e317f657c1b7a2d2179eafcef0267755ed | [
"MIT"
] | null | null | null | from random import shuffle
class CSP:
def __init__(self, variables, domains, neighbours, constraints):
self.variables = variables
self.domains = domains
self.neighbours = neighbours
self.constraints = constraints
def backtracking_search(self):
return self.recursive_backtracking({})
def recursive_backtracking(self, assignment):
if self.is_complete(assignment):
return assignment
var = self.select_unassigned_variable(assignment)
for value in self.order_domain_values(var, assignment):
if self.is_consistent(var, value, assignment):
assignment[var] = value
results = self.recursive_backtracking(assignment)
if results is not None:
return results
assignment[var] = None
return None
def select_unassigned_variable(self, assignment):
for variable in self.variables:
if variable not in assignment:
return variable
def is_complete(self, assignment):
for variable in self.variables:
if variable not in assignment:
return False
return True
def order_domain_values(self, variable, assignment):
all_values = self.domains[variable][:]
# shuffle(all_values)
return all_values
def is_consistent(self, variable, value, assignment):
if not assignment:
return True
for constraint in self.constraints.values():
for neighbour in self.neighbours[variable]:
if neighbour not in assignment:
continue
neighbour_value = assignment[neighbour]
if not constraint(variable, value, neighbour, neighbour_value):
return False
return True
def create_australia_csp():
wa, q, t, v, sa, nt, nsw = 'WA', 'Q', 'T', 'V', 'SA', 'NT', 'NSW'
values = ['Red', 'Green', 'Blue']
variables = [wa, q, t, v, sa, nt, nsw]
domains = {
wa: values[:],
q: values[:],
t: values[:],
v: values[:],
sa: values[:],
nt: values[:],
nsw: values[:],
}
neighbours = {
wa: [sa, nt],
q: [sa, nt, nsw],
t: [],
v: [sa, nsw],
sa: [wa, nt, q, nsw, v],
nt: [sa, wa, q],
nsw: [sa, q, v],
}
def constraint_function(first_variable, first_value, second_variable, second_value):
return first_value != second_value
constraints = {
wa: constraint_function,
q: constraint_function,
t: constraint_function,
v: constraint_function,
sa: constraint_function,
nt: constraint_function,
nsw: constraint_function,
}
return CSP(variables, domains, neighbours, constraints)
if __name__ == '__main__':
australia = create_australia_csp()
result = australia.backtracking_search()
for area, color in sorted(result.items()):
print("{}: {}".format(area, color))
# Check at https://mapchart.net/australia.html
| 29.8 | 88 | 0.584532 |
ace5de651b07aed59e12e465d216af896cb372bd | 579 | py | Python | examples/low-level/error_handler_example.py | homus32/vkbottle | 8247665ef74835abe0c2c5e5981826540d0ecdb5 | [
"MIT"
] | 698 | 2019-08-09T17:32:52.000Z | 2021-07-22T08:30:32.000Z | examples/low-level/error_handler_example.py | homus32/vkbottle | 8247665ef74835abe0c2c5e5981826540d0ecdb5 | [
"MIT"
] | 216 | 2019-08-18T19:22:50.000Z | 2021-07-30T12:15:17.000Z | examples/low-level/error_handler_example.py | homus32/vkbottle | 8247665ef74835abe0c2c5e5981826540d0ecdb5 | [
"MIT"
] | 268 | 2019-08-10T14:52:04.000Z | 2021-07-28T07:06:42.000Z | import asyncio
from vkbottle import ErrorHandler
error_handler = ErrorHandler()
# You can set redirect_arguments to error_handler and they
# will be passed after exception to exception handler
# ---
# async def f(a, b): raise RuntimeError
# async def exc_h(exc: RuntimeError, a, b): ...
async def exc_handler(exc: RuntimeError):
print("Oops error:", exc)
@error_handler.wraps_error_handler()
async def main():
raise RuntimeError("Oh my god i am an exception")
error_handler.register_error_handler(RuntimeError, exception_handler=exc_handler)
asyncio.run(main())
| 23.16 | 81 | 0.761658 |
ace5e0fb4da41d7330239e861b9eb25268006bcb | 66,631 | py | Python | samples/samples/snippets.py | Ahmah2009/python-spanner | 46c1de0b97b5dcd9089f46c78c3dd4eaa9550f12 | [
"Apache-2.0"
] | null | null | null | samples/samples/snippets.py | Ahmah2009/python-spanner | 46c1de0b97b5dcd9089f46c78c3dd4eaa9550f12 | [
"Apache-2.0"
] | null | null | null | samples/samples/snippets.py | Ahmah2009/python-spanner | 46c1de0b97b5dcd9089f46c78c3dd4eaa9550f12 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to do basic operations using Cloud
Spanner.
For more information, see the README.rst under /spanner.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import argparse
import base64
import datetime
import decimal
from google.cloud import spanner
from google.cloud.spanner_v1 import param_types
# [START spanner_create_instance]
def create_instance(instance_id):
"""Creates an instance."""
spanner_client = spanner.Client()
config_name = "{}/instanceConfigs/regional-us-central1".format(
spanner_client.project_name
)
instance = spanner_client.instance(
instance_id,
configuration_name=config_name,
display_name="This is a display name.",
node_count=1,
)
operation = instance.create()
print("Waiting for operation to complete...")
operation.result(120)
print("Created instance {}".format(instance_id))
# [END spanner_create_instance]
# [START spanner_create_database]
def create_database(instance_id, database_id):
"""Creates a database and tables for sample data."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(
database_id,
ddl_statements=[
"""CREATE TABLE Singers (
SingerId INT64 NOT NULL,
FirstName STRING(1024),
LastName STRING(1024),
SingerInfo BYTES(MAX)
) PRIMARY KEY (SingerId)""",
"""CREATE TABLE Albums (
SingerId INT64 NOT NULL,
AlbumId INT64 NOT NULL,
AlbumTitle STRING(MAX)
) PRIMARY KEY (SingerId, AlbumId),
INTERLEAVE IN PARENT Singers ON DELETE CASCADE""",
],
)
operation = database.create()
print("Waiting for operation to complete...")
operation.result(120)
print("Created database {} on instance {}".format(database_id, instance_id))
# [END spanner_create_database]
# [START spanner_insert_data]
def insert_data(instance_id, database_id):
"""Inserts sample data into the given database.
The database and table must already exist and can be created using
`create_database`.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.batch() as batch:
batch.insert(
table="Singers",
columns=("SingerId", "FirstName", "LastName"),
values=[
(1, u"Marc", u"Richards"),
(2, u"Catalina", u"Smith"),
(3, u"Alice", u"Trentor"),
(4, u"Lea", u"Martin"),
(5, u"David", u"Lomond"),
],
)
batch.insert(
table="Albums",
columns=("SingerId", "AlbumId", "AlbumTitle"),
values=[
(1, 1, u"Total Junk"),
(1, 2, u"Go, Go, Go"),
(2, 1, u"Green"),
(2, 2, u"Forever Hold Your Peace"),
(2, 3, u"Terrified"),
],
)
print("Inserted data.")
# [END spanner_insert_data]
# [START spanner_delete_data]
def delete_data(instance_id, database_id):
"""Deletes sample data from the given database.
The database, table, and data must already exist and can be created using
`create_database` and `insert_data`.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
# Delete individual rows
albums_to_delete = spanner.KeySet(keys=[[2, 1], [2, 3]])
# Delete a range of rows where the column key is >=3 and <5
singers_range = spanner.KeyRange(start_closed=[3], end_open=[5])
singers_to_delete = spanner.KeySet(ranges=[singers_range])
# Delete remaining Singers rows, which will also delete the remaining
# Albums rows because Albums was defined with ON DELETE CASCADE
remaining_singers = spanner.KeySet(all_=True)
with database.batch() as batch:
batch.delete("Albums", albums_to_delete)
batch.delete("Singers", singers_to_delete)
batch.delete("Singers", remaining_singers)
print("Deleted data.")
# [END spanner_delete_data]
# [START spanner_query_data]
def query_data(instance_id, database_id):
"""Queries sample data from the database using SQL."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT SingerId, AlbumId, AlbumTitle FROM Albums"
)
for row in results:
print(u"SingerId: {}, AlbumId: {}, AlbumTitle: {}".format(*row))
# [END spanner_query_data]
# [START spanner_read_data]
def read_data(instance_id, database_id):
"""Reads sample data from the database."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
table="Albums", columns=("SingerId", "AlbumId", "AlbumTitle"), keyset=keyset
)
for row in results:
print(u"SingerId: {}, AlbumId: {}, AlbumTitle: {}".format(*row))
# [END spanner_read_data]
# [START spanner_read_stale_data]
def read_stale_data(instance_id, database_id):
"""Reads sample data from the database. The data is exactly 15 seconds
stale."""
import datetime
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
staleness = datetime.timedelta(seconds=15)
with database.snapshot(exact_staleness=staleness) as snapshot:
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
table="Albums",
columns=("SingerId", "AlbumId", "MarketingBudget"),
keyset=keyset,
)
for row in results:
print(u"SingerId: {}, AlbumId: {}, MarketingBudget: {}".format(*row))
# [END spanner_read_stale_data]
# [START spanner_query_data_with_new_column]
def query_data_with_new_column(instance_id, database_id):
"""Queries sample data from the database using SQL.
This sample uses the `MarketingBudget` column. You can add the column
by running the `add_column` sample or by running this DDL statement against
your database:
ALTER TABLE Albums ADD COLUMN MarketingBudget INT64
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT SingerId, AlbumId, MarketingBudget FROM Albums"
)
for row in results:
print(u"SingerId: {}, AlbumId: {}, MarketingBudget: {}".format(*row))
# [END spanner_query_data_with_new_column]
# [START spanner_create_index]
def add_index(instance_id, database_id):
"""Adds a simple index to the example database."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl(
["CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)"]
)
print("Waiting for operation to complete...")
operation.result(120)
print("Added the AlbumsByAlbumTitle index.")
# [END spanner_create_index]
# [START spanner_query_data_with_index]
def query_data_with_index(
instance_id, database_id, start_title="Aardvark", end_title="Goo"
):
"""Queries sample data from the database using SQL and an index.
The index must exist before running this sample. You can add the index
by running the `add_index` sample or by running this DDL statement against
your database:
CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)
This sample also uses the `MarketingBudget` column. You can add the column
by running the `add_column` sample or by running this DDL statement against
your database:
ALTER TABLE Albums ADD COLUMN MarketingBudget INT64
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
params = {"start_title": start_title, "end_title": end_title}
param_types = {
"start_title": spanner.param_types.STRING,
"end_title": spanner.param_types.STRING,
}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT AlbumId, AlbumTitle, MarketingBudget "
"FROM Albums@{FORCE_INDEX=AlbumsByAlbumTitle} "
"WHERE AlbumTitle >= @start_title AND AlbumTitle < @end_title",
params=params,
param_types=param_types,
)
for row in results:
print(u"AlbumId: {}, AlbumTitle: {}, " "MarketingBudget: {}".format(*row))
# [END spanner_query_data_with_index]
# [START spanner_read_data_with_index]
def read_data_with_index(instance_id, database_id):
"""Reads sample data from the database using an index.
The index must exist before running this sample. You can add the index
by running the `add_index` sample or by running this DDL statement against
your database:
CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
table="Albums",
columns=("AlbumId", "AlbumTitle"),
keyset=keyset,
index="AlbumsByAlbumTitle",
)
for row in results:
print("AlbumId: {}, AlbumTitle: {}".format(*row))
# [END spanner_read_data_with_index]
# [START spanner_create_storing_index]
def add_storing_index(instance_id, database_id):
"""Adds an storing index to the example database."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl(
[
"CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle)"
"STORING (MarketingBudget)"
]
)
print("Waiting for operation to complete...")
operation.result(120)
print("Added the AlbumsByAlbumTitle2 index.")
# [END spanner_create_storing_index]
# [START spanner_read_data_with_storing_index]
def read_data_with_storing_index(instance_id, database_id):
"""Reads sample data from the database using an index with a storing
clause.
The index must exist before running this sample. You can add the index
by running the `add_soring_index` sample or by running this DDL statement
against your database:
CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle)
STORING (MarketingBudget)
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
table="Albums",
columns=("AlbumId", "AlbumTitle", "MarketingBudget"),
keyset=keyset,
index="AlbumsByAlbumTitle2",
)
for row in results:
print(u"AlbumId: {}, AlbumTitle: {}, " "MarketingBudget: {}".format(*row))
# [END spanner_read_data_with_storing_index]
# [START spanner_add_column]
def add_column(instance_id, database_id):
"""Adds a new column to the Albums table in the example database."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl(
["ALTER TABLE Albums ADD COLUMN MarketingBudget INT64"]
)
print("Waiting for operation to complete...")
operation.result(120)
print("Added the MarketingBudget column.")
# [END spanner_add_column]
# [START spanner_update_data]
def update_data(instance_id, database_id):
"""Updates sample data in the database.
This updates the `MarketingBudget` column which must be created before
running this sample. You can add the column by running the `add_column`
sample or by running this DDL statement against your database:
ALTER TABLE Albums ADD COLUMN MarketingBudget INT64
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.batch() as batch:
batch.update(
table="Albums",
columns=("SingerId", "AlbumId", "MarketingBudget"),
values=[(1, 1, 100000), (2, 2, 500000)],
)
print("Updated data.")
# [END spanner_update_data]
# [START spanner_read_write_transaction]
def read_write_transaction(instance_id, database_id):
"""Performs a read-write transaction to update two sample records in the
database.
This will transfer 200,000 from the `MarketingBudget` field for the second
Album to the first Album. If the `MarketingBudget` is too low, it will
raise an exception.
Before running this sample, you will need to run the `update_data` sample
to populate the fields.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def update_albums(transaction):
# Read the second album budget.
second_album_keyset = spanner.KeySet(keys=[(2, 2)])
second_album_result = transaction.read(
table="Albums",
columns=("MarketingBudget",),
keyset=second_album_keyset,
limit=1,
)
second_album_row = list(second_album_result)[0]
second_album_budget = second_album_row[0]
transfer_amount = 200000
if second_album_budget < transfer_amount:
# Raising an exception will automatically roll back the
# transaction.
raise ValueError("The second album doesn't have enough funds to transfer")
# Read the first album's budget.
first_album_keyset = spanner.KeySet(keys=[(1, 1)])
first_album_result = transaction.read(
table="Albums",
columns=("MarketingBudget",),
keyset=first_album_keyset,
limit=1,
)
first_album_row = list(first_album_result)[0]
first_album_budget = first_album_row[0]
# Update the budgets.
second_album_budget -= transfer_amount
first_album_budget += transfer_amount
print(
"Setting first album's budget to {} and the second album's "
"budget to {}.".format(first_album_budget, second_album_budget)
)
# Update the rows.
transaction.update(
table="Albums",
columns=("SingerId", "AlbumId", "MarketingBudget"),
values=[(1, 1, first_album_budget), (2, 2, second_album_budget)],
)
database.run_in_transaction(update_albums)
print("Transaction complete.")
# [END spanner_read_write_transaction]
# [START spanner_read_only_transaction]
def read_only_transaction(instance_id, database_id):
"""Reads data inside of a read-only transaction.
Within the read-only transaction, or "snapshot", the application sees
consistent view of the database at a particular timestamp.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot(multi_use=True) as snapshot:
# Read using SQL.
results = snapshot.execute_sql(
"SELECT SingerId, AlbumId, AlbumTitle FROM Albums"
)
print("Results from first read:")
for row in results:
print(u"SingerId: {}, AlbumId: {}, AlbumTitle: {}".format(*row))
# Perform another read using the `read` method. Even if the data
# is updated in-between the reads, the snapshot ensures that both
# return the same data.
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
table="Albums", columns=("SingerId", "AlbumId", "AlbumTitle"), keyset=keyset
)
print("Results from second read:")
for row in results:
print(u"SingerId: {}, AlbumId: {}, AlbumTitle: {}".format(*row))
# [END spanner_read_only_transaction]
# [START spanner_create_table_with_timestamp_column]
def create_table_with_timestamp(instance_id, database_id):
"""Creates a table with a COMMIT_TIMESTAMP column."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl(
[
"""CREATE TABLE Performances (
SingerId INT64 NOT NULL,
VenueId INT64 NOT NULL,
EventDate Date,
Revenue INT64,
LastUpdateTime TIMESTAMP NOT NULL
OPTIONS(allow_commit_timestamp=true)
) PRIMARY KEY (SingerId, VenueId, EventDate),
INTERLEAVE IN PARENT Singers ON DELETE CASCADE"""
]
)
print("Waiting for operation to complete...")
operation.result(120)
print(
"Created Performances table on database {} on instance {}".format(
database_id, instance_id
)
)
# [END spanner_create_table_with_timestamp_column]
# [START spanner_insert_data_with_timestamp_column]
def insert_data_with_timestamp(instance_id, database_id):
"""Inserts data with a COMMIT_TIMESTAMP field into a table. """
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.batch() as batch:
batch.insert(
table="Performances",
columns=("SingerId", "VenueId", "EventDate", "Revenue", "LastUpdateTime"),
values=[
(1, 4, "2017-10-05", 11000, spanner.COMMIT_TIMESTAMP),
(1, 19, "2017-11-02", 15000, spanner.COMMIT_TIMESTAMP),
(2, 42, "2017-12-23", 7000, spanner.COMMIT_TIMESTAMP),
],
)
print("Inserted data.")
# [END spanner_insert_data_with_timestamp_column]
# [START spanner_add_timestamp_column]
def add_timestamp_column(instance_id, database_id):
""" Adds a new TIMESTAMP column to the Albums table in the example database.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl(
[
"ALTER TABLE Albums ADD COLUMN LastUpdateTime TIMESTAMP "
"OPTIONS(allow_commit_timestamp=true)"
]
)
print("Waiting for operation to complete...")
operation.result(120)
print(
'Altered table "Albums" on database {} on instance {}.'.format(
database_id, instance_id
)
)
# [END spanner_add_timestamp_column]
# [START spanner_update_data_with_timestamp_column]
def update_data_with_timestamp(instance_id, database_id):
"""Updates Performances tables in the database with the COMMIT_TIMESTAMP
column.
This updates the `MarketingBudget` column which must be created before
running this sample. You can add the column by running the `add_column`
sample or by running this DDL statement against your database:
ALTER TABLE Albums ADD COLUMN MarketingBudget INT64
In addition this update expects the LastUpdateTime column added by
applying this DDL statement against your database:
ALTER TABLE Albums ADD COLUMN LastUpdateTime TIMESTAMP
OPTIONS(allow_commit_timestamp=true)
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.batch() as batch:
batch.update(
table="Albums",
columns=("SingerId", "AlbumId", "MarketingBudget", "LastUpdateTime"),
values=[
(1, 1, 1000000, spanner.COMMIT_TIMESTAMP),
(2, 2, 750000, spanner.COMMIT_TIMESTAMP),
],
)
print("Updated data.")
# [END spanner_update_data_with_timestamp_column]
# [START spanner_query_data_with_timestamp_column]
def query_data_with_timestamp(instance_id, database_id):
"""Queries sample data from the database using SQL.
This updates the `LastUpdateTime` column which must be created before
running this sample. You can add the column by running the
`add_timestamp_column` sample or by running this DDL statement
against your database:
ALTER TABLE Performances ADD COLUMN LastUpdateTime TIMESTAMP
OPTIONS (allow_commit_timestamp=true)
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT SingerId, AlbumId, MarketingBudget FROM Albums "
"ORDER BY LastUpdateTime DESC"
)
for row in results:
print(u"SingerId: {}, AlbumId: {}, MarketingBudget: {}".format(*row))
# [END spanner_query_data_with_timestamp_column]
# [START spanner_add_numeric_column]
def add_numeric_column(instance_id, database_id):
""" Adds a new NUMERIC column to the Venues table in the example database.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl(["ALTER TABLE Venues ADD COLUMN Revenue NUMERIC"])
print("Waiting for operation to complete...")
operation.result(120)
print(
'Altered table "Venues" on database {} on instance {}.'.format(
database_id, instance_id
)
)
# [END spanner_add_numeric_column]
# [START spanner_update_data_with_numeric_column]
def update_data_with_numeric(instance_id, database_id):
"""Updates Venues tables in the database with the NUMERIC
column.
This updates the `Revenue` column which must be created before
running this sample. You can add the column by running the
`add_numeric_column` sample or by running this DDL statement
against your database:
ALTER TABLE Venues ADD COLUMN Revenue NUMERIC
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.batch() as batch:
batch.update(
table="Venues",
columns=("VenueId", "Revenue"),
values=[
(4, decimal.Decimal("35000")),
(19, decimal.Decimal("104500")),
(42, decimal.Decimal("99999999999999999999999999999.99")),
],
)
print("Updated data.")
# [END spanner_update_data_with_numeric_column]
# [START spanner_write_data_for_struct_queries]
def write_struct_data(instance_id, database_id):
"""Inserts sample data that can be used to test STRUCT parameters
in queries.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.batch() as batch:
batch.insert(
table="Singers",
columns=("SingerId", "FirstName", "LastName"),
values=[
(6, u"Elena", u"Campbell"),
(7, u"Gabriel", u"Wright"),
(8, u"Benjamin", u"Martinez"),
(9, u"Hannah", u"Harris"),
],
)
print("Inserted sample data for STRUCT queries")
# [END spanner_write_data_for_struct_queries]
def query_with_struct(instance_id, database_id):
"""Query a table using STRUCT parameters. """
# [START spanner_create_struct_with_data]
record_type = param_types.Struct(
[
param_types.StructField("FirstName", param_types.STRING),
param_types.StructField("LastName", param_types.STRING),
]
)
record_value = ("Elena", "Campbell")
# [END spanner_create_struct_with_data]
# [START spanner_query_data_with_struct]
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT SingerId FROM Singers WHERE " "(FirstName, LastName) = @name",
params={"name": record_value},
param_types={"name": record_type},
)
for row in results:
print(u"SingerId: {}".format(*row))
# [END spanner_query_data_with_struct]
def query_with_array_of_struct(instance_id, database_id):
"""Query a table using an array of STRUCT parameters. """
# [START spanner_create_user_defined_struct]
name_type = param_types.Struct(
[
param_types.StructField("FirstName", param_types.STRING),
param_types.StructField("LastName", param_types.STRING),
]
)
# [END spanner_create_user_defined_struct]
# [START spanner_create_array_of_struct_with_data]
band_members = [
("Elena", "Campbell"),
("Gabriel", "Wright"),
("Benjamin", "Martinez"),
]
# [END spanner_create_array_of_struct_with_data]
# [START spanner_query_data_with_array_of_struct]
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT SingerId FROM Singers WHERE "
"STRUCT<FirstName STRING, LastName STRING>"
"(FirstName, LastName) IN UNNEST(@names)",
params={"names": band_members},
param_types={"names": param_types.Array(name_type)},
)
for row in results:
print(u"SingerId: {}".format(*row))
# [END spanner_query_data_with_array_of_struct]
# [START spanner_field_access_on_struct_parameters]
def query_struct_field(instance_id, database_id):
"""Query a table using field access on a STRUCT parameter. """
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
name_type = param_types.Struct(
[
param_types.StructField("FirstName", param_types.STRING),
param_types.StructField("LastName", param_types.STRING),
]
)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT SingerId FROM Singers " "WHERE FirstName = @name.FirstName",
params={"name": ("Elena", "Campbell")},
param_types={"name": name_type},
)
for row in results:
print(u"SingerId: {}".format(*row))
# [END spanner_field_access_on_struct_parameters]
# [START spanner_field_access_on_nested_struct_parameters]
def query_nested_struct_field(instance_id, database_id):
"""Query a table using nested field access on a STRUCT parameter. """
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
song_info_type = param_types.Struct(
[
param_types.StructField("SongName", param_types.STRING),
param_types.StructField(
"ArtistNames",
param_types.Array(
param_types.Struct(
[
param_types.StructField("FirstName", param_types.STRING),
param_types.StructField("LastName", param_types.STRING),
]
)
),
),
]
)
song_info = ("Imagination", [("Elena", "Campbell"), ("Hannah", "Harris")])
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT SingerId, @song_info.SongName "
"FROM Singers WHERE "
"STRUCT<FirstName STRING, LastName STRING>"
"(FirstName, LastName) "
"IN UNNEST(@song_info.ArtistNames)",
params={"song_info": song_info},
param_types={"song_info": song_info_type},
)
for row in results:
print(u"SingerId: {} SongName: {}".format(*row))
# [END spanner_field_access_on_nested_struct_parameters]
def insert_data_with_dml(instance_id, database_id):
"""Inserts sample data into the given database using a DML statement. """
# [START spanner_dml_standard_insert]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def insert_singers(transaction):
row_ct = transaction.execute_update(
"INSERT Singers (SingerId, FirstName, LastName) "
" VALUES (10, 'Virginia', 'Watson')"
)
print("{} record(s) inserted.".format(row_ct))
database.run_in_transaction(insert_singers)
# [END spanner_dml_standard_insert]
def update_data_with_dml(instance_id, database_id):
"""Updates sample data from the database using a DML statement. """
# [START spanner_dml_standard_update]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def update_albums(transaction):
row_ct = transaction.execute_update(
"UPDATE Albums "
"SET MarketingBudget = MarketingBudget * 2 "
"WHERE SingerId = 1 and AlbumId = 1"
)
print("{} record(s) updated.".format(row_ct))
database.run_in_transaction(update_albums)
# [END spanner_dml_standard_update]
def delete_data_with_dml(instance_id, database_id):
"""Deletes sample data from the database using a DML statement. """
# [START spanner_dml_standard_delete]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def delete_singers(transaction):
row_ct = transaction.execute_update(
"DELETE FROM Singers WHERE FirstName = 'Alice'"
)
print("{} record(s) deleted.".format(row_ct))
database.run_in_transaction(delete_singers)
# [END spanner_dml_standard_delete]
def update_data_with_dml_timestamp(instance_id, database_id):
"""Updates data with Timestamp from the database using a DML statement. """
# [START spanner_dml_standard_update_with_timestamp]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def update_albums(transaction):
row_ct = transaction.execute_update(
"UPDATE Albums "
"SET LastUpdateTime = PENDING_COMMIT_TIMESTAMP() "
"WHERE SingerId = 1"
)
print("{} record(s) updated.".format(row_ct))
database.run_in_transaction(update_albums)
# [END spanner_dml_standard_update_with_timestamp]
def dml_write_read_transaction(instance_id, database_id):
"""First inserts data then reads it from within a transaction using DML."""
# [START spanner_dml_write_then_read]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def write_then_read(transaction):
# Insert record.
row_ct = transaction.execute_update(
"INSERT Singers (SingerId, FirstName, LastName) "
" VALUES (11, 'Timothy', 'Campbell')"
)
print("{} record(s) inserted.".format(row_ct))
# Read newly inserted record.
results = transaction.execute_sql(
"SELECT FirstName, LastName FROM Singers WHERE SingerId = 11"
)
for result in results:
print("FirstName: {}, LastName: {}".format(*result))
database.run_in_transaction(write_then_read)
# [END spanner_dml_write_then_read]
def update_data_with_dml_struct(instance_id, database_id):
"""Updates data with a DML statement and STRUCT parameters. """
# [START spanner_dml_structs]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
record_type = param_types.Struct(
[
param_types.StructField("FirstName", param_types.STRING),
param_types.StructField("LastName", param_types.STRING),
]
)
record_value = ("Timothy", "Campbell")
def write_with_struct(transaction):
row_ct = transaction.execute_update(
"UPDATE Singers SET LastName = 'Grant' "
"WHERE STRUCT<FirstName STRING, LastName STRING>"
"(FirstName, LastName) = @name",
params={"name": record_value},
param_types={"name": record_type},
)
print("{} record(s) updated.".format(row_ct))
database.run_in_transaction(write_with_struct)
# [END spanner_dml_structs]
def insert_with_dml(instance_id, database_id):
"""Inserts data with a DML statement into the database. """
# [START spanner_dml_getting_started_insert]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def insert_singers(transaction):
row_ct = transaction.execute_update(
"INSERT Singers (SingerId, FirstName, LastName) VALUES "
"(12, 'Melissa', 'Garcia'), "
"(13, 'Russell', 'Morales'), "
"(14, 'Jacqueline', 'Long'), "
"(15, 'Dylan', 'Shaw')"
)
print("{} record(s) inserted.".format(row_ct))
database.run_in_transaction(insert_singers)
# [END spanner_dml_getting_started_insert]
def query_data_with_parameter(instance_id, database_id):
"""Queries sample data from the database using SQL with a parameter."""
# [START spanner_query_with_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT SingerId, FirstName, LastName FROM Singers "
"WHERE LastName = @lastName",
params={"lastName": "Garcia"},
param_types={"lastName": spanner.param_types.STRING},
)
for row in results:
print(u"SingerId: {}, FirstName: {}, LastName: {}".format(*row))
# [END spanner_query_with_parameter]
def write_with_dml_transaction(instance_id, database_id):
""" Transfers part of a marketing budget from one album to another. """
# [START spanner_dml_getting_started_update]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def transfer_budget(transaction):
# Transfer marketing budget from one album to another. Performed in a
# single transaction to ensure that the transfer is atomic.
second_album_result = transaction.execute_sql(
"SELECT MarketingBudget from Albums " "WHERE SingerId = 2 and AlbumId = 2"
)
second_album_row = list(second_album_result)[0]
second_album_budget = second_album_row[0]
transfer_amount = 200000
# Transaction will only be committed if this condition still holds at
# the time of commit. Otherwise it will be aborted and the callable
# will be rerun by the client library
if second_album_budget >= transfer_amount:
first_album_result = transaction.execute_sql(
"SELECT MarketingBudget from Albums "
"WHERE SingerId = 1 and AlbumId = 1"
)
first_album_row = list(first_album_result)[0]
first_album_budget = first_album_row[0]
second_album_budget -= transfer_amount
first_album_budget += transfer_amount
# Update first album
transaction.execute_update(
"UPDATE Albums "
"SET MarketingBudget = @AlbumBudget "
"WHERE SingerId = 1 and AlbumId = 1",
params={"AlbumBudget": first_album_budget},
param_types={"AlbumBudget": spanner.param_types.INT64},
)
# Update second album
transaction.execute_update(
"UPDATE Albums "
"SET MarketingBudget = @AlbumBudget "
"WHERE SingerId = 2 and AlbumId = 2",
params={"AlbumBudget": second_album_budget},
param_types={"AlbumBudget": spanner.param_types.INT64},
)
print(
"Transferred {} from Album2's budget to Album1's".format(
transfer_amount
)
)
database.run_in_transaction(transfer_budget)
# [END spanner_dml_getting_started_update]
def update_data_with_partitioned_dml(instance_id, database_id):
""" Update sample data with a partitioned DML statement. """
# [START spanner_dml_partitioned_update]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
row_ct = database.execute_partitioned_dml(
"UPDATE Albums SET MarketingBudget = 100000 WHERE SingerId > 1"
)
print("{} records updated.".format(row_ct))
# [END spanner_dml_partitioned_update]
def delete_data_with_partitioned_dml(instance_id, database_id):
""" Delete sample data with a partitioned DML statement. """
# [START spanner_dml_partitioned_delete]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
row_ct = database.execute_partitioned_dml("DELETE FROM Singers WHERE SingerId > 10")
print("{} record(s) deleted.".format(row_ct))
# [END spanner_dml_partitioned_delete]
def update_with_batch_dml(instance_id, database_id):
"""Updates sample data in the database using Batch DML. """
# [START spanner_dml_batch_update]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
insert_statement = (
"INSERT INTO Albums "
"(SingerId, AlbumId, AlbumTitle, MarketingBudget) "
"VALUES (1, 3, 'Test Album Title', 10000)"
)
update_statement = (
"UPDATE Albums "
"SET MarketingBudget = MarketingBudget * 2 "
"WHERE SingerId = 1 and AlbumId = 3"
)
def update_albums(transaction):
row_cts = transaction.batch_update([insert_statement, update_statement])
print("Executed {} SQL statements using Batch DML.".format(len(row_cts)))
database.run_in_transaction(update_albums)
# [END spanner_dml_batch_update]
def create_table_with_datatypes(instance_id, database_id):
"""Creates a table with supported dataypes. """
# [START spanner_create_table_with_datatypes]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl(
[
"""CREATE TABLE Venues (
VenueId INT64 NOT NULL,
VenueName STRING(100),
VenueInfo BYTES(MAX),
Capacity INT64,
AvailableDates ARRAY<DATE>,
LastContactDate DATE,
OutdoorVenue BOOL,
PopularityScore FLOAT64,
LastUpdateTime TIMESTAMP NOT NULL
OPTIONS(allow_commit_timestamp=true)
) PRIMARY KEY (VenueId)"""
]
)
print("Waiting for operation to complete...")
operation.result(120)
print(
"Created Venues table on database {} on instance {}".format(
database_id, instance_id
)
)
# [END spanner_create_table_with_datatypes]
def insert_datatypes_data(instance_id, database_id):
"""Inserts data with supported datatypes into a table. """
# [START spanner_insert_datatypes_data]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
exampleBytes1 = base64.b64encode(u"Hello World 1".encode())
exampleBytes2 = base64.b64encode(u"Hello World 2".encode())
exampleBytes3 = base64.b64encode(u"Hello World 3".encode())
available_dates1 = ["2020-12-01", "2020-12-02", "2020-12-03"]
available_dates2 = ["2020-11-01", "2020-11-05", "2020-11-15"]
available_dates3 = ["2020-10-01", "2020-10-07"]
with database.batch() as batch:
batch.insert(
table="Venues",
columns=(
"VenueId",
"VenueName",
"VenueInfo",
"Capacity",
"AvailableDates",
"LastContactDate",
"OutdoorVenue",
"PopularityScore",
"LastUpdateTime",
),
values=[
(
4,
u"Venue 4",
exampleBytes1,
1800,
available_dates1,
"2018-09-02",
False,
0.85543,
spanner.COMMIT_TIMESTAMP,
),
(
19,
u"Venue 19",
exampleBytes2,
6300,
available_dates2,
"2019-01-15",
True,
0.98716,
spanner.COMMIT_TIMESTAMP,
),
(
42,
u"Venue 42",
exampleBytes3,
3000,
available_dates3,
"2018-10-01",
False,
0.72598,
spanner.COMMIT_TIMESTAMP,
),
],
)
print("Inserted data.")
# [END spanner_insert_datatypes_data]
def query_data_with_array(instance_id, database_id):
"""Queries sample data using SQL with an ARRAY parameter. """
# [START spanner_query_with_array_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
exampleArray = ["2020-10-01", "2020-11-01"]
param = {"available_dates": exampleArray}
param_type = {"available_dates": param_types.Array(param_types.DATE)}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName, AvailableDate FROM Venues v,"
"UNNEST(v.AvailableDates) as AvailableDate "
"WHERE AvailableDate in UNNEST(@available_dates)",
params=param,
param_types=param_type,
)
for row in results:
print(u"VenueId: {}, VenueName: {}, AvailableDate: {}".format(*row))
# [END spanner_query_with_array_parameter]
def query_data_with_bool(instance_id, database_id):
"""Queries sample data using SQL with a BOOL parameter. """
# [START spanner_query_with_bool_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
exampleBool = True
param = {"outdoor_venue": exampleBool}
param_type = {"outdoor_venue": param_types.BOOL}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName, OutdoorVenue FROM Venues "
"WHERE OutdoorVenue = @outdoor_venue",
params=param,
param_types=param_type,
)
for row in results:
print(u"VenueId: {}, VenueName: {}, OutdoorVenue: {}".format(*row))
# [END spanner_query_with_bool_parameter]
def query_data_with_bytes(instance_id, database_id):
"""Queries sample data using SQL with a BYTES parameter. """
# [START spanner_query_with_bytes_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
exampleBytes = base64.b64encode(u"Hello World 1".encode())
param = {"venue_info": exampleBytes}
param_type = {"venue_info": param_types.BYTES}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName FROM Venues " "WHERE VenueInfo = @venue_info",
params=param,
param_types=param_type,
)
for row in results:
print(u"VenueId: {}, VenueName: {}".format(*row))
# [END spanner_query_with_bytes_parameter]
def query_data_with_date(instance_id, database_id):
"""Queries sample data using SQL with a DATE parameter. """
# [START spanner_query_with_date_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
exampleDate = "2019-01-01"
param = {"last_contact_date": exampleDate}
param_type = {"last_contact_date": param_types.DATE}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName, LastContactDate FROM Venues "
"WHERE LastContactDate < @last_contact_date",
params=param,
param_types=param_type,
)
for row in results:
print(u"VenueId: {}, VenueName: {}, LastContactDate: {}".format(*row))
# [END spanner_query_with_date_parameter]
def query_data_with_float(instance_id, database_id):
"""Queries sample data using SQL with a FLOAT64 parameter. """
# [START spanner_query_with_float_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
exampleFloat = 0.8
param = {"popularity_score": exampleFloat}
param_type = {"popularity_score": param_types.FLOAT64}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName, PopularityScore FROM Venues "
"WHERE PopularityScore > @popularity_score",
params=param,
param_types=param_type,
)
for row in results:
print(u"VenueId: {}, VenueName: {}, PopularityScore: {}".format(*row))
# [END spanner_query_with_float_parameter]
def query_data_with_int(instance_id, database_id):
"""Queries sample data using SQL with a INT64 parameter. """
# [START spanner_query_with_int_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
exampleInt = 3000
param = {"capacity": exampleInt}
param_type = {"capacity": param_types.INT64}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName, Capacity FROM Venues "
"WHERE Capacity >= @capacity",
params=param,
param_types=param_type,
)
for row in results:
print(u"VenueId: {}, VenueName: {}, Capacity: {}".format(*row))
# [END spanner_query_with_int_parameter]
def query_data_with_string(instance_id, database_id):
"""Queries sample data using SQL with a STRING parameter. """
# [START spanner_query_with_string_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
exampleString = "Venue 42"
param = {"venue_name": exampleString}
param_type = {"venue_name": param_types.STRING}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName FROM Venues " "WHERE VenueName = @venue_name",
params=param,
param_types=param_type,
)
for row in results:
print(u"VenueId: {}, VenueName: {}".format(*row))
# [END spanner_query_with_string_parameter]
def query_data_with_numeric_parameter(instance_id, database_id):
"""Queries sample data using SQL with a NUMERIC parameter. """
# [START spanner_query_with_numeric_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
example_numeric = decimal.Decimal("100000")
param = {"revenue": example_numeric}
param_type = {"revenue": param_types.NUMERIC}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, Revenue FROM Venues " "WHERE Revenue < @revenue",
params=param,
param_types=param_type,
)
for row in results:
print(u"VenueId: {}, Revenue: {}".format(*row))
# [END spanner_query_with_numeric_parameter]
def query_data_with_timestamp_parameter(instance_id, database_id):
"""Queries sample data using SQL with a TIMESTAMP parameter. """
# [START spanner_query_with_timestamp_parameter]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
example_timestamp = datetime.datetime.utcnow().isoformat() + "Z"
# [END spanner_query_with_timestamp_parameter]
# Avoid time drift on the local machine.
# https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4197.
example_timestamp = (
datetime.datetime.utcnow() + datetime.timedelta(days=1)
).isoformat() + "Z"
# [START spanner_query_with_timestamp_parameter]
param = {"last_update_time": example_timestamp}
param_type = {"last_update_time": param_types.TIMESTAMP}
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName, LastUpdateTime FROM Venues "
"WHERE LastUpdateTime < @last_update_time",
params=param,
param_types=param_type,
)
for row in results:
print(u"VenueId: {}, VenueName: {}, LastUpdateTime: {}".format(*row))
# [END spanner_query_with_timestamp_parameter]
def query_data_with_query_options(instance_id, database_id):
"""Queries sample data using SQL with query options."""
# [START spanner_query_with_query_options]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName, LastUpdateTime FROM Venues",
query_options={"optimizer_version": "1"},
)
for row in results:
print(u"VenueId: {}, VenueName: {}, LastUpdateTime: {}".format(*row))
# [END spanner_query_with_query_options]
def create_client_with_query_options(instance_id, database_id):
"""Create a client with query options."""
# [START spanner_create_client_with_query_options]
# instance_id = "your-spanner-instance"
# database_id = "your-spanner-db-id"
spanner_client = spanner.Client(query_options={"optimizer_version": "1"})
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT VenueId, VenueName, LastUpdateTime FROM Venues"
)
for row in results:
print(u"VenueId: {}, VenueName: {}, LastUpdateTime: {}".format(*row))
# [END spanner_create_client_with_query_options]
if __name__ == "__main__": # noqa: C901
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("instance_id", help="Your Cloud Spanner instance ID.")
parser.add_argument(
"--database-id", help="Your Cloud Spanner database ID.", default="example_db"
)
subparsers = parser.add_subparsers(dest="command")
subparsers.add_parser("create_instance", help=create_instance.__doc__)
subparsers.add_parser("create_database", help=create_database.__doc__)
subparsers.add_parser("insert_data", help=insert_data.__doc__)
subparsers.add_parser("delete_data", help=delete_data.__doc__)
subparsers.add_parser("query_data", help=query_data.__doc__)
subparsers.add_parser("read_data", help=read_data.__doc__)
subparsers.add_parser("read_stale_data", help=read_stale_data.__doc__)
subparsers.add_parser("add_column", help=add_column.__doc__)
subparsers.add_parser("update_data", help=update_data.__doc__)
subparsers.add_parser(
"query_data_with_new_column", help=query_data_with_new_column.__doc__
)
subparsers.add_parser("read_write_transaction", help=read_write_transaction.__doc__)
subparsers.add_parser("read_only_transaction", help=read_only_transaction.__doc__)
subparsers.add_parser("add_index", help=add_index.__doc__)
query_data_with_index_parser = subparsers.add_parser(
"query_data_with_index", help=query_data_with_index.__doc__
)
query_data_with_index_parser.add_argument("--start_title", default="Aardvark")
query_data_with_index_parser.add_argument("--end_title", default="Goo")
subparsers.add_parser("read_data_with_index", help=insert_data.__doc__)
subparsers.add_parser("add_storing_index", help=add_storing_index.__doc__)
subparsers.add_parser("read_data_with_storing_index", help=insert_data.__doc__)
subparsers.add_parser(
"create_table_with_timestamp", help=create_table_with_timestamp.__doc__
)
subparsers.add_parser(
"insert_data_with_timestamp", help=insert_data_with_timestamp.__doc__
)
subparsers.add_parser("add_timestamp_column", help=add_timestamp_column.__doc__)
subparsers.add_parser(
"update_data_with_timestamp", help=update_data_with_timestamp.__doc__
)
subparsers.add_parser(
"query_data_with_timestamp", help=query_data_with_timestamp.__doc__
)
subparsers.add_parser("write_struct_data", help=write_struct_data.__doc__)
subparsers.add_parser("query_with_struct", help=query_with_struct.__doc__)
subparsers.add_parser(
"query_with_array_of_struct", help=query_with_array_of_struct.__doc__
)
subparsers.add_parser("query_struct_field", help=query_struct_field.__doc__)
subparsers.add_parser(
"query_nested_struct_field", help=query_nested_struct_field.__doc__
)
subparsers.add_parser("insert_data_with_dml", help=insert_data_with_dml.__doc__)
subparsers.add_parser("update_data_with_dml", help=update_data_with_dml.__doc__)
subparsers.add_parser("delete_data_with_dml", help=delete_data_with_dml.__doc__)
subparsers.add_parser(
"update_data_with_dml_timestamp", help=update_data_with_dml_timestamp.__doc__
)
subparsers.add_parser(
"dml_write_read_transaction", help=dml_write_read_transaction.__doc__
)
subparsers.add_parser(
"update_data_with_dml_struct", help=update_data_with_dml_struct.__doc__
)
subparsers.add_parser("insert_with_dml", help=insert_with_dml.__doc__)
subparsers.add_parser(
"query_data_with_parameter", help=query_data_with_parameter.__doc__
)
subparsers.add_parser(
"write_with_dml_transaction", help=write_with_dml_transaction.__doc__
)
subparsers.add_parser(
"update_data_with_partitioned_dml",
help=update_data_with_partitioned_dml.__doc__,
)
subparsers.add_parser(
"delete_data_with_partitioned_dml",
help=delete_data_with_partitioned_dml.__doc__,
)
subparsers.add_parser("update_with_batch_dml", help=update_with_batch_dml.__doc__)
subparsers.add_parser(
"create_table_with_datatypes", help=create_table_with_datatypes.__doc__
)
subparsers.add_parser("insert_datatypes_data", help=insert_datatypes_data.__doc__)
subparsers.add_parser("query_data_with_array", help=query_data_with_array.__doc__)
subparsers.add_parser("query_data_with_bool", help=query_data_with_bool.__doc__)
subparsers.add_parser("query_data_with_bytes", help=query_data_with_bytes.__doc__)
subparsers.add_parser("query_data_with_date", help=query_data_with_date.__doc__)
subparsers.add_parser("query_data_with_float", help=query_data_with_float.__doc__)
subparsers.add_parser("query_data_with_int", help=query_data_with_int.__doc__)
subparsers.add_parser("query_data_with_string", help=query_data_with_string.__doc__)
subparsers.add_parser(
"query_data_with_timestamp_parameter",
help=query_data_with_timestamp_parameter.__doc__,
)
subparsers.add_parser(
"query_data_with_query_options", help=query_data_with_query_options.__doc__
)
subparsers.add_parser(
"create_client_with_query_options",
help=create_client_with_query_options.__doc__,
)
args = parser.parse_args()
if args.command == "create_instance":
create_instance(args.instance_id)
elif args.command == "create_database":
create_database(args.instance_id, args.database_id)
elif args.command == "insert_data":
insert_data(args.instance_id, args.database_id)
elif args.command == "delete_data":
delete_data(args.instance_id, args.database_id)
elif args.command == "query_data":
query_data(args.instance_id, args.database_id)
elif args.command == "read_data":
read_data(args.instance_id, args.database_id)
elif args.command == "read_stale_data":
read_stale_data(args.instance_id, args.database_id)
elif args.command == "add_column":
add_column(args.instance_id, args.database_id)
elif args.command == "update_data":
update_data(args.instance_id, args.database_id)
elif args.command == "query_data_with_new_column":
query_data_with_new_column(args.instance_id, args.database_id)
elif args.command == "read_write_transaction":
read_write_transaction(args.instance_id, args.database_id)
elif args.command == "read_only_transaction":
read_only_transaction(args.instance_id, args.database_id)
elif args.command == "add_index":
add_index(args.instance_id, args.database_id)
elif args.command == "query_data_with_index":
query_data_with_index(
args.instance_id, args.database_id, args.start_title, args.end_title
)
elif args.command == "read_data_with_index":
read_data_with_index(args.instance_id, args.database_id)
elif args.command == "add_storing_index":
add_storing_index(args.instance_id, args.database_id)
elif args.command == "read_data_with_storing_index":
read_data_with_storing_index(args.instance_id, args.database_id)
elif args.command == "create_table_with_timestamp":
create_table_with_timestamp(args.instance_id, args.database_id)
elif args.command == "insert_data_with_timestamp":
insert_data_with_timestamp(args.instance_id, args.database_id)
elif args.command == "add_timestamp_column":
add_timestamp_column(args.instance_id, args.database_id)
elif args.command == "update_data_with_timestamp":
update_data_with_timestamp(args.instance_id, args.database_id)
elif args.command == "query_data_with_timestamp":
query_data_with_timestamp(args.instance_id, args.database_id)
elif args.command == "write_struct_data":
write_struct_data(args.instance_id, args.database_id)
elif args.command == "query_with_struct":
query_with_struct(args.instance_id, args.database_id)
elif args.command == "query_with_array_of_struct":
query_with_array_of_struct(args.instance_id, args.database_id)
elif args.command == "query_struct_field":
query_struct_field(args.instance_id, args.database_id)
elif args.command == "query_nested_struct_field":
query_nested_struct_field(args.instance_id, args.database_id)
elif args.command == "insert_data_with_dml":
insert_data_with_dml(args.instance_id, args.database_id)
elif args.command == "update_data_with_dml":
update_data_with_dml(args.instance_id, args.database_id)
elif args.command == "delete_data_with_dml":
delete_data_with_dml(args.instance_id, args.database_id)
elif args.command == "update_data_with_dml_timestamp":
update_data_with_dml_timestamp(args.instance_id, args.database_id)
elif args.command == "dml_write_read_transaction":
dml_write_read_transaction(args.instance_id, args.database_id)
elif args.command == "update_data_with_dml_struct":
update_data_with_dml_struct(args.instance_id, args.database_id)
elif args.command == "insert_with_dml":
insert_with_dml(args.instance_id, args.database_id)
elif args.command == "query_data_with_parameter":
query_data_with_parameter(args.instance_id, args.database_id)
elif args.command == "write_with_dml_transaction":
write_with_dml_transaction(args.instance_id, args.database_id)
elif args.command == "update_data_with_partitioned_dml":
update_data_with_partitioned_dml(args.instance_id, args.database_id)
elif args.command == "delete_data_with_partitioned_dml":
delete_data_with_partitioned_dml(args.instance_id, args.database_id)
elif args.command == "update_with_batch_dml":
update_with_batch_dml(args.instance_id, args.database_id)
elif args.command == "create_table_with_datatypes":
create_table_with_datatypes(args.instance_id, args.database_id)
elif args.command == "insert_datatypes_data":
insert_datatypes_data(args.instance_id, args.database_id)
elif args.command == "query_data_with_array":
query_data_with_array(args.instance_id, args.database_id)
elif args.command == "query_data_with_bool":
query_data_with_bool(args.instance_id, args.database_id)
elif args.command == "query_data_with_bytes":
query_data_with_bytes(args.instance_id, args.database_id)
elif args.command == "query_data_with_date":
query_data_with_date(args.instance_id, args.database_id)
elif args.command == "query_data_with_float":
query_data_with_float(args.instance_id, args.database_id)
elif args.command == "query_data_with_int":
query_data_with_int(args.instance_id, args.database_id)
elif args.command == "query_data_with_string":
query_data_with_string(args.instance_id, args.database_id)
elif args.command == "query_data_with_timestamp_parameter":
query_data_with_timestamp_parameter(args.instance_id, args.database_id)
elif args.command == "query_data_with_query_options":
query_data_with_query_options(args.instance_id, args.database_id)
elif args.command == "create_client_with_query_options":
create_client_with_query_options(args.instance_id, args.database_id)
| 35.536533 | 88 | 0.67311 |
ace5e19d77d517bdedc8417716bd3dddfe01b79e | 13,823 | py | Python | src/werkzeug/_reloader.py | TrizlyBear/werkzeug | 4848b99a3863e84b4f1969abfa58864431bdf2d5 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T01:20:21.000Z | 2021-12-22T01:20:21.000Z | src/werkzeug/_reloader.py | javad-jafari/werkzeug | 18166bfd5b38b1775dbed383a1067ae92db86782 | [
"BSD-3-Clause"
] | 30 | 2021-07-01T08:02:38.000Z | 2022-03-01T08:04:08.000Z | src/werkzeug/_reloader.py | mahak/werkzeug | 30ce9eebd2cfe5e16f45155a85a2124e889d25cc | [
"BSD-3-Clause"
] | 2 | 2021-07-04T12:18:26.000Z | 2021-12-11T13:26:29.000Z | import fnmatch
import os
import subprocess
import sys
import threading
import time
import typing as t
from itertools import chain
from pathlib import PurePath
from ._internal import _log
# The various system prefixes where imports are found. Base values are
# different when running in a virtualenv. The stat reloader won't scan
# these directories, it would be too inefficient.
prefix = {sys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix}
if hasattr(sys, "real_prefix"):
# virtualenv < 20
prefix.add(sys.real_prefix) # type: ignore
_ignore_prefixes = tuple(prefix)
del prefix
def _iter_module_paths() -> t.Iterator[str]:
"""Find the filesystem paths associated with imported modules."""
# List is in case the value is modified by the app while updating.
for module in list(sys.modules.values()):
name = getattr(module, "__file__", None)
if name is None:
continue
while not os.path.isfile(name):
# Zip file, find the base file without the module path.
old = name
name = os.path.dirname(name)
if name == old: # skip if it was all directories somehow
break
else:
yield name
def _remove_by_pattern(paths: t.Set[str], exclude_patterns: t.Set[str]) -> None:
for pattern in exclude_patterns:
paths.difference_update(fnmatch.filter(paths, pattern))
def _find_stat_paths(
extra_files: t.Set[str], exclude_patterns: t.Set[str]
) -> t.Iterable[str]:
"""Find paths for the stat reloader to watch. Returns imported
module files, Python files under non-system paths. Extra files and
Python files under extra directories can also be scanned.
System paths have to be excluded for efficiency. Non-system paths,
such as a project root or ``sys.path.insert``, should be the paths
of interest to the user anyway.
"""
paths = set()
for path in chain(list(sys.path), extra_files):
path = os.path.abspath(path)
if os.path.isfile(path):
# zip file on sys.path, or extra file
paths.add(path)
for root, dirs, files in os.walk(path):
# Ignore system prefixes for efficience. Don't scan
# __pycache__, it will have a py or pyc module at the import
# path. As an optimization, ignore .git and .hg since
# nothing interesting will be there.
if root.startswith(_ignore_prefixes) or os.path.basename(root) in {
"__pycache__",
".git",
".hg",
}:
dirs.clear()
continue
for name in files:
if name.endswith((".py", ".pyc")):
paths.add(os.path.join(root, name))
paths.update(_iter_module_paths())
_remove_by_pattern(paths, exclude_patterns)
return paths
def _find_watchdog_paths(
extra_files: t.Set[str], exclude_patterns: t.Set[str]
) -> t.Iterable[str]:
"""Find paths for the stat reloader to watch. Looks at the same
sources as the stat reloader, but watches everything under
directories instead of individual files.
"""
dirs = set()
for name in chain(list(sys.path), extra_files):
name = os.path.abspath(name)
if os.path.isfile(name):
name = os.path.dirname(name)
dirs.add(name)
for name in _iter_module_paths():
dirs.add(os.path.dirname(name))
_remove_by_pattern(dirs, exclude_patterns)
return _find_common_roots(dirs)
def _find_common_roots(paths: t.Iterable[str]) -> t.Iterable[str]:
root: t.Dict[str, dict] = {}
for chunks in sorted((PurePath(x).parts for x in paths), key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node, path):
for prefix, child in node.items():
_walk(child, path + (prefix,))
if not node:
rv.add(os.path.join(*path))
_walk(root, ())
return rv
def _get_args_for_reloading() -> t.List[str]:
"""Determine how the script was executed, and return the args needed
to execute it again in a new process.
"""
rv = [sys.executable]
py_script = sys.argv[0]
args = sys.argv[1:]
# Need to look at main module to determine how it was executed.
__main__ = sys.modules["__main__"]
# The value of __package__ indicates how Python was called. It may
# not exist if a setuptools script is installed as an egg. It may be
# set incorrectly for entry points created with pip on Windows.
if getattr(__main__, "__package__", None) is None or (
os.name == "nt"
and __main__.__package__ == ""
and not os.path.exists(py_script)
and os.path.exists(f"{py_script}.exe")
):
# Executed a file, like "python app.py".
py_script = os.path.abspath(py_script)
if os.name == "nt":
# Windows entry points have ".exe" extension and should be
# called directly.
if not os.path.exists(py_script) and os.path.exists(f"{py_script}.exe"):
py_script += ".exe"
if (
os.path.splitext(sys.executable)[1] == ".exe"
and os.path.splitext(py_script)[1] == ".exe"
):
rv.pop(0)
rv.append(py_script)
else:
# Executed a module, like "python -m werkzeug.serving".
if sys.argv[0] == "-m":
# Flask works around previous behavior by putting
# "-m flask" in sys.argv.
# TODO remove this once Flask no longer misbehaves
args = sys.argv
else:
if os.path.isfile(py_script):
# Rewritten by Python from "-m script" to "/path/to/script.py".
py_module = t.cast(str, __main__.__package__)
name = os.path.splitext(os.path.basename(py_script))[0]
if name != "__main__":
py_module += f".{name}"
else:
# Incorrectly rewritten by pydevd debugger from "-m script" to "script".
py_module = py_script
rv.extend(("-m", py_module.lstrip(".")))
rv.extend(args)
return rv
class ReloaderLoop:
name = ""
def __init__(
self,
extra_files: t.Optional[t.Iterable[str]] = None,
exclude_patterns: t.Optional[t.Iterable[str]] = None,
interval: t.Union[int, float] = 1,
) -> None:
self.extra_files: t.Set[str] = {os.path.abspath(x) for x in extra_files or ()}
self.exclude_patterns: t.Set[str] = set(exclude_patterns or ())
self.interval = interval
def __enter__(self) -> "ReloaderLoop":
"""Do any setup, then run one step of the watch to populate the
initial filesystem state.
"""
self.run_step()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Clean up any resources associated with the reloader."""
pass
def run(self) -> None:
"""Continually run the watch step, sleeping for the configured
interval after each step.
"""
while True:
self.run_step()
time.sleep(self.interval)
def run_step(self) -> None:
"""Run one step for watching the filesystem. Called once to set
up initial state, then repeatedly to update it.
"""
pass
def restart_with_reloader(self) -> int:
"""Spawn a new Python interpreter with the same arguments as the
current one, but running the reloader thread.
"""
while True:
_log("info", f" * Restarting with {self.name}")
args = _get_args_for_reloading()
new_environ = os.environ.copy()
new_environ["WERKZEUG_RUN_MAIN"] = "true"
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename: str) -> None:
self.log_reload(filename)
sys.exit(3)
def log_reload(self, filename: str) -> None:
filename = os.path.abspath(filename)
_log("info", f" * Detected change in {filename!r}, reloading")
class StatReloaderLoop(ReloaderLoop):
name = "stat"
def __enter__(self) -> ReloaderLoop:
self.mtimes: t.Dict[str, float] = {}
return super().__enter__()
def run_step(self) -> None:
for name in chain(_find_stat_paths(self.extra_files, self.exclude_patterns)):
try:
mtime = os.stat(name).st_mtime
except OSError:
continue
old_time = self.mtimes.get(name)
if old_time is None:
self.mtimes[name] = mtime
continue
if mtime > old_time:
self.trigger_reload(name)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args, **kwargs) -> None:
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
super().__init__(*args, **kwargs)
trigger_reload = self.trigger_reload
class EventHandler(PatternMatchingEventHandler): # type: ignore
def on_any_event(self, event):
trigger_reload(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith("observer"):
reloader_name = reloader_name[:-8]
self.name = f"watchdog ({reloader_name})"
self.observer = Observer()
# Extra patterns can be non-Python files, match them in addition
# to all Python files in default and extra directories. Ignore
# __pycache__ since a change there will always have a change to
# the source file (or initial pyc file) as well. Ignore Git and
# Mercurial internal changes.
extra_patterns = [p for p in self.extra_files if not os.path.isdir(p)]
self.event_handler = EventHandler(
patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
ignore_patterns=[
"*/__pycache__/*",
"*/.git/*",
"*/.hg/*",
*self.exclude_patterns,
],
)
self.should_reload = False
def trigger_reload(self, filename: str) -> None:
# This is called inside an event handler, which means throwing
# SystemExit has no effect.
# https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
self.log_reload(filename)
def __enter__(self) -> ReloaderLoop:
self.watches: t.Dict[str, t.Any] = {}
self.observer.start()
return super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.observer.stop()
self.observer.join()
def run(self) -> None:
while not self.should_reload:
self.run_step()
time.sleep(self.interval)
sys.exit(3)
def run_step(self) -> None:
to_delete = set(self.watches)
for path in _find_watchdog_paths(self.extra_files, self.exclude_patterns):
if path not in self.watches:
try:
self.watches[path] = self.observer.schedule(
self.event_handler, path, recursive=True
)
except OSError:
# Clear this path from list of watches We don't want
# the same error message showing again in the next
# iteration.
self.watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = self.watches.pop(path, None)
if watch is not None:
self.observer.unschedule(watch)
reloader_loops: t.Dict[str, t.Type[ReloaderLoop]] = {
"stat": StatReloaderLoop,
"watchdog": WatchdogReloaderLoop,
}
try:
__import__("watchdog.observers")
except ImportError:
reloader_loops["auto"] = reloader_loops["stat"]
else:
reloader_loops["auto"] = reloader_loops["watchdog"]
def ensure_echo_on():
"""Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after a reload."""
# tcgetattr will fail if stdin isn't a tty
if sys.stdin is None or not sys.stdin.isatty():
return
try:
import termios
except ImportError:
return
attributes = termios.tcgetattr(sys.stdin)
if not attributes[3] & termios.ECHO:
attributes[3] |= termios.ECHO
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
def run_with_reloader(
main_func: t.Callable[[], None],
extra_files: t.Optional[t.Iterable[str]] = None,
exclude_patterns: t.Optional[t.Iterable[str]] = None,
interval: t.Union[int, float] = 1,
reloader_type: str = "auto",
):
"""Run the given function in an independent Python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
reloader = reloader_loops[reloader_type](
extra_files=extra_files, exclude_patterns=exclude_patterns, interval=interval
)
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.daemon = True
# Enter the reloader to set up initial state, then start
# the app thread and reloader update loop.
with reloader:
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
| 32.071926 | 88 | 0.602257 |
ace5e295b3cd2b2402b4c3fd12fc696e6033ea15 | 3,564 | py | Python | libpycr/builtin/accounts/ssh-key.py | JcDelay/pycr | f729e003473b421b76bc49c5d55d06d7086d63cc | [
"Apache-2.0"
] | 1 | 2015-03-12T10:34:38.000Z | 2015-03-12T10:34:38.000Z | libpycr/builtin/accounts/ssh-key.py | JcDelay/pycr | f729e003473b421b76bc49c5d55d06d7086d63cc | [
"Apache-2.0"
] | null | null | null | libpycr/builtin/accounts/ssh-key.py | JcDelay/pycr | f729e003473b421b76bc49c5d55d06d7086d63cc | [
"Apache-2.0"
] | null | null | null | """Operate on SSH keys associated with one's account"""
# pylint: disable=invalid-name
import argparse
import logging
import os
import sys
from libpycr.exceptions import PyCRError
from libpycr.gerrit.client import Gerrit
from libpycr.meta import GerritAccountBuiltin
from libpycr.utils.commandline import expect_account_as_positional
from libpycr.utils.system import fail
class SshKey(GerritAccountBuiltin):
"""Implement the SSH-KEY command"""
# Logger for this command
log = logging.getLogger(__name__)
subcommands = ('get', 'add', 'remove')
@property
def name(self):
return 'ssh-key'
@property
def description(self):
return 'manage ssh keys'
@staticmethod
def get_usage():
"""Return the command line usage for the SSH-KEY command
:rtype: str
"""
prog = os.path.basename(sys.argv[0])
return os.linesep.join((
'usage: %s [-h] {get,add,remove} ...' % prog,
'',
'Fetch, edit, delete ssh-key(s) from a user account',
'',
'positional arguments:',
' {get,add,remove} available builtins',
' get display a ssh-key',
' add add a new ssh-key',
' remove delete an existing ssh-key',
'',
'optional arguments:',
' -h, --help show this help message and exit',
''
))
@staticmethod
def parse_command_line(arguments):
"""Parse the SSH-KEY command command-line arguments
:param arguments: a list of command-line arguments to parse
:type arguments: list[str]
:rtype: Namespace
"""
parser = argparse.ArgumentParser(description='Manage ssh key(s)')
expect_account_as_positional(parser)
actions = parser.add_subparsers(dest='cmd', help='available commands')
get_cmd = actions.add_parser('get', help='Display SSH key details')
get_cmd.add_argument('uuid', type=int, help='SSH key ID')
cmdline = parser.parse_args(arguments)
# fetch changes details
return cmdline
@staticmethod
def run_get(arguments, *args, **kwargs):
"""???"""
del args, kwargs
parser = argparse.ArgumentParser(description='Display ssh key')
parser.add_argument('account', type=str, help='account ID')
parser.add_argument('uuid', type=int, help='SSH key ID')
cmdline = parser.parse_args(arguments)
try:
key = Gerrit.get_ssh_key(cmdline.account, cmdline.uuid)
except PyCRError as why:
fail('cannot list account SSH keys', why)
print key.ssh_public_key.strip()
def run_add(self, arguments, *args, **kwargs):
"""???"""
pass
def run_remove(self, arguments, *args, **kwargs):
"""???"""
pass
def run(self, arguments, *args, **kwargs):
if not arguments:
fail('No command given.')
command = arguments[0]
arguments = arguments[1:]
if command in ('-h', '--help'):
sys.exit(self.get_usage())
if command not in self.subcommands:
fail('Unknown subcommand: {}'.format(command))
if command == 'get':
self.run_get(arguments, *args, **kwargs)
elif command == 'add':
self.run_add(arguments, *args, **kwargs)
elif command == 'remove':
self.run_remove(arguments, *args, **kwargs)
| 28.741935 | 78 | 0.582772 |
ace5e2bc43505029b39343e7c9f1a20c953cbaa1 | 73 | py | Python | code/__init__.py | mukulv/gameOfLife | 677b12995d154f4aa5fbce969c54c62a90af46b2 | [
"Apache-2.0"
] | null | null | null | code/__init__.py | mukulv/gameOfLife | 677b12995d154f4aa5fbce969c54c62a90af46b2 | [
"Apache-2.0"
] | null | null | null | code/__init__.py | mukulv/gameOfLife | 677b12995d154f4aa5fbce969c54c62a90af46b2 | [
"Apache-2.0"
] | 3 | 2020-08-23T19:21:42.000Z | 2020-10-27T03:38:32.000Z | def inc(x):
return x + 1
def test_answer():
assert inc(3) == 4
| 10.428571 | 22 | 0.547945 |
ace5e2c9750d8ba7ab700cf24cc0bbd303ea4a0d | 2,503 | py | Python | examples/tesla_tester.py | DavidWAbrahams/panda | d4c5a4e260fb87a499e5d93c3a24952b1204a5aa | [
"MIT"
] | null | null | null | examples/tesla_tester.py | DavidWAbrahams/panda | d4c5a4e260fb87a499e5d93c3a24952b1204a5aa | [
"MIT"
] | null | null | null | examples/tesla_tester.py | DavidWAbrahams/panda | d4c5a4e260fb87a499e5d93c3a24952b1204a5aa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import binascii
from panda import Panda
def tesla_tester():
try:
print("Trying to connect to Panda over USB...")
p = Panda()
except AssertionError:
print("USB connection failed. Trying WiFi...")
try:
p = Panda("WIFI")
except:
print("WiFi connection timed out. Please make sure your Panda is connected and try again.")
sys.exit(0)
body_bus_speed = 125 # Tesla Body busses (B, BF) are 125kbps, rest are 500kbps
body_bus_num = 1 # My TDC to OBD adapter has PT on bus0 BDY on bus1 and CH on bus2
p.set_can_speed_kbps(body_bus_num, body_bus_speed)
# Now set the panda from its default of SAFETY_NOOUTPUT (read only) to SAFETY_ALLOUTPUT
# Careful, as this will let us send any CAN messages we want (which could be very bad!)
print("Setting Panda to output mode...")
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
# BDY 0x248 is the MCU_commands message, which includes folding mirrors, opening the trunk, frunk, setting the cars lock state and more. For our test, we will edit the 3rd byte, which is MCU_lockRequest. 0x01 will lock, 0x02 will unlock:
print("Unlocking Tesla...")
p.can_send(0x248, "\x00\x00\x02\x00\x00\x00\x00\x00", body_bus_num)
#Or, we can set the first byte, MCU_frontHoodCommand + MCU_liftgateSwitch, to 0x01 to pop the frunk, or 0x04 to open/close the trunk (0x05 should open both)
print("Opening Frunk...")
p.can_send(0x248, "\x01\x00\x00\x00\x00\x00\x00\x00", body_bus_num)
#Back to safety...
print("Disabling output on Panda...")
p.set_safety_mode(Panda.SAFETY_NOOUTPUT)
print("Reading VIN from 0x568. This is painfully slow and can take up to 3 minutes (1 minute per message; 3 messages needed for full VIN)...")
cnt = 0
vin = {}
while True:
#Read the VIN
can_recv = p.can_recv()
for address, _, dat, src in can_recv:
if src == body_bus_num:
if address == 1384: #0x568 is VIN
vin_index = int(binascii.hexlify(dat)[:2]) #first byte is the index, 00, 01, 02
vin_string = binascii.hexlify(dat)[2:] #rest of the string is the actual VIN data
vin[vin_index] = vin_string.decode("hex")
print("Got VIN index " + str(vin_index) + " data " + vin[vin_index])
cnt += 1
#if we have all 3 parts of the VIN, print it and break out of our while loop
if cnt == 3:
print("VIN: " + vin[0] + vin[1] + vin[2][:3])
break
if __name__ == "__main__":
tesla_tester()
| 39.109375 | 239 | 0.67479 |
ace5e2f089369af0861d5e7b35731da7420ef785 | 7,120 | py | Python | deeposlandia/datagen.py | shadofren/deeposlandia | 3dcb511482aff9c62bffd383e92055920c7a7e85 | [
"MIT"
] | null | null | null | deeposlandia/datagen.py | shadofren/deeposlandia | 3dcb511482aff9c62bffd383e92055920c7a7e85 | [
"MIT"
] | null | null | null | deeposlandia/datagen.py | shadofren/deeposlandia | 3dcb511482aff9c62bffd383e92055920c7a7e85 | [
"MIT"
] | null | null | null | """Main method to generate new datasets
Example of program calls:
* generate 64*64 pixel images from Shapes dataset, 10000 images in the training set, 100 in the
validation set, 1000 in the testing set::
python deeposlandia/datagen.py -D shapes -s 64 -t 10000 -v 100 -T 1000
* generate 224*224 pixel images from Mapillary dataset, with aggregated labels::
python deeposlandia/datagen.py -D mapillary -s 224 -a
"""
import argparse
import os
import sys
import daiquiri
import pandas as pd
from deeposlandia import utils
from deeposlandia.datasets import AVAILABLE_DATASETS
from deeposlandia.datasets.mapillary import MapillaryDataset
from deeposlandia.datasets.aerial import AerialDataset
from deeposlandia.datasets.shapes import ShapeDataset
from deeposlandia.datasets.tanzania import TanzaniaDataset
logger = daiquiri.getLogger(__name__)
def add_instance_arguments(parser):
"""Add instance-specific arguments from the command line
Parameters
----------
parser : argparse.ArgumentParser
Input parser
Returns
-------
argparse.ArgumentParser
Modified parser, with additional arguments
"""
parser.add_argument('-a', '--aggregate-label', action='store_true',
help="Aggregate labels with respect to their categories")
parser.add_argument('-D', '--dataset',
required=True, choices=AVAILABLE_DATASETS,
help=("Dataset type (to be chosen amongst available"
"datasets)"))
parser.add_argument('-p', '--datapath',
default="data",
help="Relative path towards data directory")
parser.add_argument('-s', '--image-size',
default=256,
type=int,
help=("Desired size of images (width = height)"))
parser.add_argument('-T', '--nb-testing-image',
type=int,
default=0,
help=("Number of testing images"))
parser.add_argument('-t', '--nb-training-image',
type=int,
default=0,
help=("Number of training images"))
parser.add_argument('-v', '--nb-validation-image',
type=int,
default=0,
help=("Number of validation images"))
return parser
if __name__=='__main__':
# Parse command-line arguments
parser = argparse.ArgumentParser(description=("Convolutional Neural Netw"
"ork on street-scene images"))
parser = add_instance_arguments(parser)
args = parser.parse_args()
# Data path and repository management
aggregate_value = "full" if not args.aggregate_label else "aggregated"
input_folder = utils.prepare_input_folder(args.datapath, args.dataset)
prepro_folder = utils.prepare_preprocessed_folder(args.datapath,
args.dataset,
args.image_size,
aggregate_value)
# Dataset creation
if args.dataset == "mapillary":
config_name = "config.json" if not args.aggregate_label else "config_aggregate.json"
config_path = os.path.join(input_folder, config_name)
train_dataset = MapillaryDataset(args.image_size, config_path)
validation_dataset = MapillaryDataset(args.image_size, config_path)
test_dataset = MapillaryDataset(args.image_size, config_path)
elif args.dataset == "shapes":
train_dataset = ShapeDataset(args.image_size)
validation_dataset = ShapeDataset(args.image_size)
test_dataset = ShapeDataset(args.image_size)
os.makedirs(os.path.join(prepro_folder["testing"], "labels"),
exist_ok=True)
elif args.dataset == "aerial":
train_dataset = AerialDataset(args.image_size)
validation_dataset = AerialDataset(args.image_size)
test_dataset = AerialDataset(args.image_size)
elif args.dataset == "tanzania":
train_dataset = TanzaniaDataset(args.image_size)
validation_dataset = TanzaniaDataset(args.image_size)
test_dataset = TanzaniaDataset(args.image_size)
else:
logger.error("Unsupported dataset type. Please choose amongst %s",
AVAILABLE_DATASETS)
sys.exit(1)
# Dataset populating/loading (depends on the existence of a specification file)
if args.nb_training_image > 0:
if os.path.isfile(prepro_folder["training_config"]):
train_dataset.load(prepro_folder["training_config"],
args.nb_training_image)
else:
logger.info(("No existing configuration file for this dataset. "
"Create %s.", prepro_folder['training_config']))
input_image_dir = os.path.join(input_folder, "training")
train_dataset.populate(prepro_folder["training"], input_image_dir,
nb_images=args.nb_training_image,
aggregate=args.aggregate_label)
train_dataset.save(prepro_folder["training_config"])
if args.nb_validation_image > 0:
if os.path.isfile(prepro_folder["validation_config"]):
validation_dataset.load(prepro_folder["validation_config"],
args.nb_validation_image)
else:
logger.info(("No existing configuration file for this dataset. "
"Create %s.", prepro_folder['validation_config']))
input_image_dir = os.path.join(input_folder, "validation")
validation_dataset.populate(prepro_folder["validation"],
input_image_dir,
nb_images=args.nb_validation_image,
aggregate=args.aggregate_label)
validation_dataset.save(prepro_folder["validation_config"])
if args.nb_testing_image > 0:
if os.path.isfile(prepro_folder["testing_config"]):
test_dataset.load(prepro_folder["testing_config"], args.nb_testing_image)
else:
logger.info(("No existing configuration file for this dataset. "
"Create %s.", prepro_folder['testing_config']))
input_image_dir = os.path.join(input_folder, "testing")
test_dataset.populate(prepro_folder["testing"],
input_image_dir,
nb_images=args.nb_testing_image,
aggregate=args.aggregate_label,
labelling=False)
test_dataset.save(prepro_folder["testing_config"])
glossary = pd.DataFrame(train_dataset.labels)
glossary["popularity"] = train_dataset.get_label_popularity()
logger.info("Data glossary:\n%s", glossary)
sys.exit(0)
| 43.680982 | 95 | 0.606039 |
ace5e358097b4677598ded7ac9c4b88bd1a30483 | 19,307 | py | Python | mriqc/viz/utils.py | effigies/mriqc | de60ff0f65e4fe0e315143fe3b75ecd940beb2b1 | [
"BSD-3-Clause"
] | null | null | null | mriqc/viz/utils.py | effigies/mriqc | de60ff0f65e4fe0e315143fe3b75ecd940beb2b1 | [
"BSD-3-Clause"
] | null | null | null | mriqc/viz/utils.py | effigies/mriqc | de60ff0f65e4fe0e315143fe3b75ecd940beb2b1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# @Author: oesteban
# @Date: 2016-01-05 11:32:01
# @Email: code@oscaresteban.es
# @Last modified by: oesteban
""" Visualization utilities """
from __future__ import print_function, division, absolute_import, unicode_literals
import math
import os.path as op
import numpy as np
import nibabel as nb
import pandas as pd
from nilearn.plotting import plot_anat, plot_roi
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas
import seaborn as sns
from builtins import zip, range, str, bytes # pylint: disable=W0622
from .svg import combine_svg, svg2str
DEFAULT_DPI = 300
DINA4_LANDSCAPE = (11.69, 8.27)
DINA4_PORTRAIT = (8.27, 11.69)
def plot_slice(dslice, spacing=None, cmap='Greys_r', label=None,
ax=None, vmax=None, vmin=None, annotate=False):
from matplotlib.cm import get_cmap
if isinstance(cmap, (str, bytes)):
cmap = get_cmap(cmap)
est_vmin, est_vmax = _get_limits(dslice)
if not vmin:
vmin = est_vmin
if not vmax:
vmax = est_vmax
if ax is None:
ax = plt.gca()
if spacing is None:
spacing = [1.0, 1.0]
phys_sp = np.array(spacing) * dslice.shape
ax.imshow(np.swapaxes(dslice, 0, 1), vmin=vmin, vmax=vmax, cmap=cmap,
interpolation='nearest', origin='lower',
extent=[0, phys_sp[0], 0, phys_sp[1]])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(False)
ax.axis('off')
bgcolor = cmap(min(vmin, 0.0))
fgcolor = cmap(vmax)
if annotate:
ax.text(.95, .95, 'R', color=fgcolor, transform=ax.transAxes,
horizontalalignment='center', verticalalignment='top',
size=18, bbox=dict(boxstyle="square,pad=0", ec=bgcolor, fc=bgcolor))
ax.text(.05, .95, 'L', color=fgcolor, transform=ax.transAxes,
horizontalalignment='center', verticalalignment='top',
size=18, bbox=dict(boxstyle="square,pad=0", ec=bgcolor, fc=bgcolor))
if label is not None:
ax.text(.98, .01, label, color=fgcolor, transform=ax.transAxes,
horizontalalignment='right', verticalalignment='bottom',
size=18, bbox=dict(boxstyle="square,pad=0", ec=bgcolor, fc=bgcolor))
def plot_slice_tern(dslice, prev=None, post=None,
spacing=None, cmap='Greys_r', label=None, ax=None,
vmax=None, vmin=None):
from matplotlib.cm import get_cmap
if isinstance(cmap, (str, bytes)):
cmap = get_cmap(cmap)
est_vmin, est_vmax = _get_limits(dslice)
if not vmin:
vmin = est_vmin
if not vmax:
vmax = est_vmax
if ax is None:
ax = plt.gca()
if spacing is None:
spacing = [1.0, 1.0]
else:
spacing = [spacing[1], spacing[0]]
phys_sp = np.array(spacing) * dslice.shape
if prev is None:
prev = np.ones_like(dslice)
if post is None:
post = np.ones_like(dslice)
combined = np.swapaxes(np.vstack((prev, dslice, post)), 0, 1)
ax.imshow(combined, vmin=vmin, vmax=vmax, cmap=cmap,
interpolation='nearest', origin='lower',
extent=[0, phys_sp[1] * 3, 0, phys_sp[0]])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(False)
if label is not None:
ax.text(.5, .05, label,
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='top',
size=14,
bbox=dict(boxstyle="square,pad=0", ec='k', fc='k'),
color='w')
def plot_spikes(in_file, in_fft, spikes_list, cols=3,
labelfmt='t={0:.3f}s (z={1:d})',
out_file=None):
from mpl_toolkits.axes_grid1 import make_axes_locatable
nii = nb.as_closest_canonical(nb.load(in_file))
fft = nb.load(in_fft).get_data()
data = nii.get_data()
zooms = nii.header.get_zooms()[:2]
tstep = nii.header.get_zooms()[-1]
ntpoints = data.shape[-1]
if len(spikes_list) > cols * 7:
cols += 1
nspikes = len(spikes_list)
rows = 1
if nspikes > cols:
rows = math.ceil(nspikes / cols)
fig = plt.figure(figsize=(7 * cols, 5 * rows))
for i, (t, z) in enumerate(spikes_list):
prev = None
pvft = None
if t > 0:
prev = data[..., z, t - 1]
pvft = fft[..., z, t - 1]
post = None
psft = None
if t < (ntpoints - 1):
post = data[..., z, t + 1]
psft = fft[..., z, t + 1]
ax1 = fig.add_subplot(rows, cols, i + 1)
divider = make_axes_locatable(ax1)
ax2 = divider.new_vertical(size="100%", pad=0.1)
fig.add_axes(ax2)
plot_slice_tern(data[..., z, t], prev=prev, post=post, spacing=zooms,
ax=ax2,
label=labelfmt.format(t * tstep, z))
plot_slice_tern(fft[..., z, t], prev=pvft, post=psft, vmin=-5, vmax=5,
cmap=get_parula(), ax=ax1)
plt.tight_layout()
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == '.gz':
fname, _ = op.splitext(fname)
out_file = op.abspath('%s.svg' % fname)
fig.savefig(out_file, format='svg', dpi=300, bbox_inches='tight')
return out_file
def plot_mosaic(img, out_file=None, ncols=8, title=None, overlay_mask=None,
bbox_mask_file=None, only_plot_noise=False, annotate=True,
vmin=None, vmax=None, cmap='Greys_r', plot_sagittal=True,
fig=None, zmax=128):
if isinstance(img, (str, bytes)):
nii = nb.as_closest_canonical(nb.load(img))
img_data = nii.get_data()
zooms = nii.header.get_zooms()
else:
img_data = img
zooms = [1.0, 1.0, 1.0]
out_file = 'mosaic.svg'
# Remove extra dimensions
img_data = np.squeeze(img_data)
if img_data.shape[2] > zmax and bbox_mask_file is None:
lowthres = np.percentile(img_data, 5)
mask_file = np.ones_like(img_data)
mask_file[img_data <= lowthres] = 0
img_data = _bbox(img_data, mask_file)
if bbox_mask_file is not None:
bbox_data = nb.as_closest_canonical(
nb.load(bbox_mask_file)).get_data()
img_data = _bbox(img_data, bbox_data)
z_vals = np.array(list(range(0, img_data.shape[2])))
# Reduce the number of slices shown
if len(z_vals) > zmax:
rem = 15
# Crop inferior and posterior
if not bbox_mask_file:
# img_data = img_data[..., rem:-rem]
z_vals = z_vals[rem:-rem]
else:
# img_data = img_data[..., 2 * rem:]
z_vals = z_vals[2 * rem:]
while len(z_vals) > zmax:
# Discard one every two slices
# img_data = img_data[..., ::2]
z_vals = z_vals[::2]
n_images = len(z_vals)
nrows = math.ceil(n_images / ncols)
if plot_sagittal:
nrows += 1
if overlay_mask:
overlay_data = nb.as_closest_canonical(
nb.load(overlay_mask)).get_data()
# create figures
if fig is None:
fig = plt.figure(figsize=(22, nrows * 3))
est_vmin, est_vmax = _get_limits(img_data,
only_plot_noise=only_plot_noise)
if not vmin:
vmin = est_vmin
if not vmax:
vmax = est_vmax
naxis = 1
for z_val in z_vals:
ax = fig.add_subplot(nrows, ncols, naxis)
if overlay_mask:
ax.set_rasterized(True)
plot_slice(img_data[:, :, z_val], vmin=vmin, vmax=vmax,
cmap=cmap, ax=ax, spacing=zooms[:2],
label='%d' % z_val, annotate=annotate)
if overlay_mask:
from matplotlib import cm
msk_cmap = cm.Reds # @UndefinedVariable
msk_cmap._init()
alphas = np.linspace(0, 0.75, msk_cmap.N + 3)
msk_cmap._lut[:, -1] = alphas
plot_slice(overlay_data[:, :, z_val], vmin=0, vmax=1,
cmap=msk_cmap, ax=ax, spacing=zooms[:2])
naxis += 1
if plot_sagittal:
naxis = ncols * (nrows - 1) + 1
step = int(img_data.shape[0] / (ncols + 1))
start = step
stop = img_data.shape[0] - step
if step == 0:
step = 1
for x_val in list(range(start, stop, step))[:ncols]:
ax = fig.add_subplot(nrows, ncols, naxis)
plot_slice(img_data[x_val, ...], vmin=vmin, vmax=vmax,
cmap=cmap, ax=ax, label='%d' % x_val,
spacing=[zooms[0], zooms[2]])
naxis += 1
fig.subplots_adjust(
left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05,
hspace=0.05)
if title:
fig.suptitle(title, fontsize='10')
fig.subplots_adjust(wspace=0.002, hspace=0.002)
if out_file is None:
fname, ext = op.splitext(op.basename(img))
if ext == ".gz":
fname, _ = op.splitext(fname)
out_file = op.abspath(fname + '_mosaic.svg')
fig.savefig(out_file, format='svg', dpi=300, bbox_inches='tight')
return out_file
def plot_fd(fd_file, fd_radius, mean_fd_dist=None, figsize=DINA4_LANDSCAPE):
fd_power = _calc_fd(fd_file, fd_radius)
fig = plt.Figure(figsize=figsize)
FigureCanvas(fig)
if mean_fd_dist:
grid = GridSpec(2, 4)
else:
grid = GridSpec(1, 2, width_ratios=[3, 1])
grid.update(hspace=1.0, right=0.95, left=0.1, bottom=0.2)
ax = fig.add_subplot(grid[0, :-1])
ax.plot(fd_power)
ax.set_xlim((0, len(fd_power)))
ax.set_ylabel("Frame Displacement [mm]")
ax.set_xlabel("Frame number")
ylim = ax.get_ylim()
ax = fig.add_subplot(grid[0, -1])
sns.distplot(fd_power, vertical=True, ax=ax)
ax.set_ylim(ylim)
if mean_fd_dist:
ax = fig.add_subplot(grid[1, :])
sns.distplot(mean_fd_dist, ax=ax)
ax.set_xlabel("Mean Frame Displacement (over all subjects) [mm]")
mean_fd = fd_power.mean()
label = r'$\overline{{\text{{FD}}}}$ = {0:g}'.format(mean_fd)
plot_vline(mean_fd, label, ax=ax)
return fig
def plot_dist(
main_file, mask_file, xlabel, distribution=None, xlabel2=None,
figsize=DINA4_LANDSCAPE):
data = _get_values_inside_a_mask(main_file, mask_file)
fig = plt.Figure(figsize=figsize)
FigureCanvas(fig)
gsp = GridSpec(2, 1)
ax = fig.add_subplot(gsp[0, 0])
sns.distplot(data.astype(np.double), kde=False, bins=100, ax=ax)
ax.set_xlabel(xlabel)
ax = fig.add_subplot(gsp[1, 0])
sns.distplot(np.array(distribution).astype(np.double), ax=ax)
cur_val = np.median(data)
label = "{0!g}".format(cur_val)
plot_vline(cur_val, label, ax=ax)
ax.set_xlabel(xlabel2)
return fig
def plot_vline(cur_val, label, ax):
ax.axvline(cur_val)
ylim = ax.get_ylim()
vloc = (ylim[0] + ylim[1]) / 2.0
xlim = ax.get_xlim()
pad = (xlim[0] + xlim[1]) / 100.0
ax.text(cur_val - pad, vloc, label, color="blue", rotation=90,
verticalalignment='center', horizontalalignment='right')
def _calc_rows_columns(ratio, n_images):
rows = 2
for _ in range(100):
columns = math.floor(ratio * rows)
total = (rows - 1) * columns
if total > n_images:
rows = np.ceil(n_images / columns) + 1
break
rows += 1
return int(rows), int(columns)
def _calc_fd(fd_file, fd_radius):
from math import pi
lines = open(fd_file, 'r').readlines()
rows = [[float(x) for x in line.split()] for line in lines]
cols = np.array([list(col) for col in zip(*rows)])
translations = np.transpose(np.abs(np.diff(cols[0:3, :])))
rotations = np.transpose(np.abs(np.diff(cols[3:6, :])))
fd_power = np.sum(translations, axis=1) + \
(fd_radius * pi / 180) * np.sum(rotations, axis=1)
# FD is zero for the first time point
fd_power = np.insert(fd_power, 0, 0)
return fd_power
def _get_mean_fd_distribution(fd_files, fd_radius):
mean_fds = []
max_fds = []
for fd_file in fd_files:
fd_power = _calc_fd(fd_file, fd_radius)
mean_fds.append(fd_power.mean())
max_fds.append(fd_power.max())
return mean_fds, max_fds
def _get_values_inside_a_mask(main_file, mask_file):
main_nii = nb.load(main_file)
main_data = main_nii.get_data()
nan_mask = np.logical_not(np.isnan(main_data))
mask = nb.load(mask_file).get_data() > 0
data = main_data[np.logical_and(nan_mask, mask)]
return data
def plot_segmentation(anat_file, segmentation, out_file,
**kwargs):
import nibabel as nb
import numpy as np
from nilearn.plotting import plot_anat
vmax = kwargs.get('vmax')
vmin = kwargs.get('vmin')
if kwargs.get('saturate', False):
vmax = np.percentile(nb.load(anat_file).get_data().reshape(-1), 70)
if vmax is None and vmin is None:
vmin = np.percentile(nb.load(anat_file).get_data().reshape(-1), 10)
vmax = np.percentile(nb.load(anat_file).get_data().reshape(-1), 99)
disp = plot_anat(
anat_file,
display_mode=kwargs.get('display_mode', 'ortho'),
cut_coords=kwargs.get('cut_coords', 8),
title=kwargs.get('title'),
vmax=vmax, vmin=vmin)
disp.add_contours(
segmentation,
levels=kwargs.get('levels', [1]),
colors=kwargs.get('colors', 'r'))
disp.savefig(out_file)
disp.close()
disp = None
return out_file
def plot_bg_dist(in_file):
import os.path as op # pylint: disable=W0621
import numpy as np
import json
from io import open # pylint: disable=W0622
import matplotlib.pyplot as plt
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# rc('text', usetex=True)
# Write out figure of the fitting
out_file = op.abspath('background_fit.svg')
try:
with open(in_file, 'r') as jsonf:
data = json.load(jsonf)
except ValueError:
with open(out_file, 'w') as ofh:
ofh.write('<p>Background noise fitting could not be plotted.</p>')
return out_file
fig = plt.figure()
ax1 = fig.add_subplot(111)
fig.suptitle('Noise distribution on the air mask, and fitted chi distribution')
ax1.set_xlabel('Intensity')
ax1.set_ylabel('Frequency')
width = (data['x'][1] - data['x'][0])
left = [v - 0.5 * width for v in data['x']]
ymax = np.max([np.array(data['y']).max(), np.array(data['y_hat']).max()])
ax1.set_ylim((0.0, 1.10 * ymax))
ax1.bar(left, data['y'], width)
ax1.plot(left, data['y_hat'], 'k--', linewidth=1.2)
ax1.plot((data['x_cutoff'], data['x_cutoff']), ax1.get_ylim(), 'k--')
fig.savefig(out_file, format='svg', dpi=300)
plt.close()
return out_file
def _get_limits(nifti_file, only_plot_noise=False):
from builtins import bytes, str # pylint: disable=W0622
if isinstance(nifti_file, (str, bytes)):
nii = nb.as_closest_canonical(nb.load(nifti_file))
data = nii.get_data()
else:
data = nifti_file
data_mask = np.logical_not(np.isnan(data))
if only_plot_noise:
data_mask = np.logical_and(data_mask, data != 0)
vmin = np.percentile(data[data_mask], 0)
vmax = np.percentile(data[data_mask], 61)
else:
vmin = np.percentile(data[data_mask], 0.5)
vmax = np.percentile(data[data_mask], 99.5)
return vmin, vmax
def _bbox(img_data, bbox_data):
B = np.argwhere(bbox_data)
(ystart, xstart, zstart), (ystop, xstop, zstop) = B.min(0), B.max(0) + 1
return img_data[ystart:ystop, xstart:xstop, zstart:zstop]
def get_parula():
from matplotlib.colors import LinearSegmentedColormap
cm_data = [
[0.2081, 0.1663, 0.5292],
[0.2116238095, 0.1897809524, 0.5776761905],
[0.212252381, 0.2137714286, 0.6269714286],
[0.2081, 0.2386, 0.6770857143],
[0.1959047619, 0.2644571429, 0.7279],
[0.1707285714, 0.2919380952, 0.779247619],
[0.1252714286, 0.3242428571, 0.8302714286],
[0.0591333333, 0.3598333333, 0.8683333333],
[0.0116952381, 0.3875095238, 0.8819571429],
[0.0059571429, 0.4086142857, 0.8828428571],
[0.0165142857, 0.4266, 0.8786333333],
[0.032852381, 0.4430428571, 0.8719571429],
[0.0498142857, 0.4585714286, 0.8640571429],
[0.0629333333, 0.4736904762, 0.8554380952],
[0.0722666667, 0.4886666667, 0.8467],
[0.0779428571, 0.5039857143, 0.8383714286],
[0.079347619, 0.5200238095, 0.8311809524],
[0.0749428571, 0.5375428571, 0.8262714286],
[0.0640571429, 0.5569857143, 0.8239571429],
[0.0487714286, 0.5772238095, 0.8228285714],
[0.0343428571, 0.5965809524, 0.819852381],
[0.0265, 0.6137, 0.8135],
[0.0238904762, 0.6286619048, 0.8037619048],
[0.0230904762, 0.6417857143, 0.7912666667],
[0.0227714286, 0.6534857143, 0.7767571429],
[0.0266619048, 0.6641952381, 0.7607190476],
[0.0383714286, 0.6742714286, 0.743552381],
[0.0589714286, 0.6837571429, 0.7253857143],
[0.0843, 0.6928333333, 0.7061666667],
[0.1132952381, 0.7015, 0.6858571429],
[0.1452714286, 0.7097571429, 0.6646285714],
[0.1801333333, 0.7176571429, 0.6424333333],
[0.2178285714, 0.7250428571, 0.6192619048],
[0.2586428571, 0.7317142857, 0.5954285714],
[0.3021714286, 0.7376047619, 0.5711857143],
[0.3481666667, 0.7424333333, 0.5472666667],
[0.3952571429, 0.7459, 0.5244428571],
[0.4420095238, 0.7480809524, 0.5033142857],
[0.4871238095, 0.7490619048, 0.4839761905],
[0.5300285714, 0.7491142857, 0.4661142857],
[0.5708571429, 0.7485190476, 0.4493904762],
[0.609852381, 0.7473142857, 0.4336857143],
[0.6473, 0.7456, 0.4188],
[0.6834190476, 0.7434761905, 0.4044333333],
[0.7184095238, 0.7411333333, 0.3904761905],
[0.7524857143, 0.7384, 0.3768142857],
[0.7858428571, 0.7355666667, 0.3632714286],
[0.8185047619, 0.7327333333, 0.3497904762],
[0.8506571429, 0.7299, 0.3360285714],
[0.8824333333, 0.7274333333, 0.3217],
[0.9139333333, 0.7257857143, 0.3062761905],
[0.9449571429, 0.7261142857, 0.2886428571],
[0.9738952381, 0.7313952381, 0.266647619],
[0.9937714286, 0.7454571429, 0.240347619],
[0.9990428571, 0.7653142857, 0.2164142857],
[0.9955333333, 0.7860571429, 0.196652381],
[0.988, 0.8066, 0.1793666667],
[0.9788571429, 0.8271428571, 0.1633142857],
[0.9697, 0.8481380952, 0.147452381],
[0.9625857143, 0.8705142857, 0.1309],
[0.9588714286, 0.8949, 0.1132428571],
[0.9598238095, 0.9218333333, 0.0948380952],
[0.9661, 0.9514428571, 0.0755333333],
[0.9763, 0.9831, 0.0538]]
return LinearSegmentedColormap.from_list('parula', cm_data)
| 32.178333 | 84 | 0.602061 |
ace5e361541868c94955b4226ad4a3e41a3db34c | 638 | py | Python | month01/面向对象/类和对象/类和对象实例/exercise01 就餐人数.py | chaofan-zheng/python_leanring_code | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | [
"Apache-2.0"
] | 4 | 2021-01-07T14:25:10.000Z | 2021-02-01T10:36:01.000Z | month01/面向对象/类和对象/类和对象实例/exercise01 就餐人数.py | chaofan-zheng/python_leanring_code | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | [
"Apache-2.0"
] | null | null | null | month01/面向对象/类和对象/类和对象实例/exercise01 就餐人数.py | chaofan-zheng/python_leanring_code | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | [
"Apache-2.0"
] | null | null | null | class Restaurant:
def __init__(self, name, type):
self.name = name
self.type = type
self.number_served = 0
def describe_restaurant(self):
print(f"餐厅的名称为{self.name},餐厅的类别的{self.type}")
def open_restaurant(self):
print("餐厅正在营业")
def print_number_served(self):
print(f"有{self.number_served}人在{self.name}就餐过")
def set_number_served(self, new_number):
if new_number > 0:
self.number_served = new_number
else:
print("就餐人数不能小于0")
hidilao = Restaurant("海底捞", "hotpot")
hidilao.set_number_served(10000)
hidilao.print_number_served()
| 24.538462 | 55 | 0.641066 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.