hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1243bbe5c8b07ce563c0e5bf91f426e6d0292391 | 65,985 | py | Python | nitropyapp/ui/mainwindowtest.py | Nitrokey/nitrokey-app2 | ab66bbceb854e1f18987b0331528e86e3e7ff702 | [
"Apache-2.0"
] | 1 | 2021-11-23T12:54:35.000Z | 2021-11-23T12:54:35.000Z | nitropyapp/ui/mainwindowtest.py | Nitrokey/nitrokey-app2 | ab66bbceb854e1f18987b0331528e86e3e7ff702 | [
"Apache-2.0"
] | null | null | null | nitropyapp/ui/mainwindowtest.py | Nitrokey/nitrokey-app2 | ab66bbceb854e1f18987b0331528e86e3e7ff702 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindowtest.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import resources_rc
| 65.46131 | 406 | 0.736576 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindowtest.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(932, 854)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(0, 110))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/images/new/icon_NK.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setAutoFillBackground(True)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralWidget)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.label_8 = QtWidgets.QLabel(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_8.sizePolicy().hasHeightForWidth())
self.label_8.setSizePolicy(sizePolicy)
self.label_8.setMinimumSize(QtCore.QSize(120, 120))
self.label_8.setMaximumSize(QtCore.QSize(1200, 70))
self.label_8.setFocusPolicy(QtCore.Qt.NoFocus)
self.label_8.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.label_8.setText("")
self.label_8.setPixmap(QtGui.QPixmap(":/images/new/icon_Logo_App.svg"))
self.label_8.setIndent(1)
self.label_8.setObjectName("label_8")
self.verticalLayout.addWidget(self.label_8)
self.formFrame = QtWidgets.QFrame(self.centralWidget)
self.formFrame.setMinimumSize(QtCore.QSize(0, 0))
self.formFrame.setBaseSize(QtCore.QSize(12, 10))
self.formFrame.setObjectName("formFrame")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.formFrame)
self.horizontalLayout_10.setContentsMargins(11, 11, 11, 11)
self.horizontalLayout_10.setSpacing(6)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
spacerItem = QtWidgets.QSpacerItem(40, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem)
self.btn_dial_quit = QtWidgets.QPushButton(self.formFrame)
self.btn_dial_quit.setStyleSheet("#upLeft { background-color: transparent; border-image: url(:/images/new/icon_safe.svg); background: none; border: none; background-repeat: none; }")
self.btn_dial_quit.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/images/new/icon_quit.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btn_dial_quit.setIcon(icon1)
self.btn_dial_quit.setObjectName("btn_dial_quit")
self.horizontalLayout_10.addWidget(self.btn_dial_quit)
self.btn_dial_lock = QtWidgets.QPushButton(self.formFrame)
self.btn_dial_lock.setStyleSheet("#upLeft { background-color: transparent; border-image: url(:/images/new/icon_safe.svg); background: none; border: none; background-repeat: none; }")
self.btn_dial_lock.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/images/new/icon_unsafe.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btn_dial_lock.setIcon(icon2)
self.btn_dial_lock.setObjectName("btn_dial_lock")
self.horizontalLayout_10.addWidget(self.btn_dial_lock)
self.btn_dial_help = QtWidgets.QPushButton(self.formFrame)
self.btn_dial_help.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.btn_dial_help.setMouseTracking(False)
self.btn_dial_help.setStyleSheet("")
self.btn_dial_help.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("images/new/icon_fragezeichen.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btn_dial_help.setIcon(icon3)
self.btn_dial_help.setObjectName("btn_dial_help")
self.horizontalLayout_10.addWidget(self.btn_dial_help)
self.pushButton_2 = QtWidgets.QPushButton(self.formFrame)
self.pushButton_2.setText("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("images/new/icon_about_nitrokey.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_2.setIcon(icon4)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout_10.addWidget(self.pushButton_2)
self.verticalLayout.addWidget(self.formFrame)
self.tabWidget = QtWidgets.QTabWidget(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setTabShape(QtWidgets.QTabWidget.Rounded)
self.tabWidget.setElideMode(QtCore.Qt.ElideNone)
self.tabWidget.setDocumentMode(False)
self.tabWidget.setTabsClosable(False)
self.tabWidget.setMovable(False)
self.tabWidget.setObjectName("tabWidget")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.gridLayout_5 = QtWidgets.QGridLayout(self.tab_5)
self.gridLayout_5.setContentsMargins(11, 11, 11, 11)
self.gridLayout_5.setSpacing(6)
self.gridLayout_5.setObjectName("gridLayout_5")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem1, 2, 0, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem2, 0, 1, 1, 1)
self.btn_dial_PWS = QtWidgets.QPushButton(self.tab_5)
self.btn_dial_PWS.setEnabled(True)
self.btn_dial_PWS.setStyleSheet("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/images/new/icon_safe.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btn_dial_PWS.setIcon(icon5)
self.btn_dial_PWS.setObjectName("btn_dial_PWS")
self.gridLayout_5.addWidget(self.btn_dial_PWS, 2, 1, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem3, 3, 1, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem4, 2, 4, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem5, 2, 2, 1, 1)
self.tabWidget.addTab(self.tab_5, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_16 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_16.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_16.setSpacing(6)
self.verticalLayout_16.setObjectName("verticalLayout_16")
self.checkBox_2 = QtWidgets.QCheckBox(self.tab)
self.checkBox_2.setChecked(True)
self.checkBox_2.setObjectName("checkBox_2")
self.verticalLayout_16.addWidget(self.checkBox_2, 0, QtCore.Qt.AlignRight)
self.frame_8 = QtWidgets.QFrame(self.tab)
self.frame_8.setAutoFillBackground(False)
self.frame_8.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_8.setObjectName("frame_8")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.frame_8)
self.verticalLayout_11.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_11.setSpacing(6)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.label_24 = QtWidgets.QLabel(self.frame_8)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_24.setFont(font)
self.label_24.setObjectName("label_24")
self.verticalLayout_11.addWidget(self.label_24)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setSpacing(6)
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.radioButton_2 = QtWidgets.QRadioButton(self.frame_8)
self.radioButton_2.setChecked(True)
self.radioButton_2.setObjectName("radioButton_2")
self.horizontalLayout_14.addWidget(self.radioButton_2)
self.radioButton = QtWidgets.QRadioButton(self.frame_8)
self.radioButton.setObjectName("radioButton")
self.horizontalLayout_14.addWidget(self.radioButton)
self.label_25 = QtWidgets.QLabel(self.frame_8)
font = QtGui.QFont()
font.setItalic(True)
self.label_25.setFont(font)
self.label_25.setObjectName("label_25")
self.horizontalLayout_14.addWidget(self.label_25)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_14.addItem(spacerItem6)
self.verticalLayout_11.addLayout(self.horizontalLayout_14)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setSpacing(6)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.label = QtWidgets.QLabel(self.frame_8)
self.label.setObjectName("label")
self.horizontalLayout_9.addWidget(self.label)
self.slotComboBox = QtWidgets.QComboBox(self.frame_8)
self.slotComboBox.setMinimumSize(QtCore.QSize(300, 0))
self.slotComboBox.setCurrentText("HOTP slot")
self.slotComboBox.setObjectName("slotComboBox")
self.slotComboBox.addItem("")
self.slotComboBox.setItemText(0, "HOTP slot")
self.horizontalLayout_9.addWidget(self.slotComboBox)
self.label_2 = QtWidgets.QLabel(self.frame_8)
self.label_2.setObjectName("label_2")
self.horizontalLayout_9.addWidget(self.label_2)
self.nameEdit = QtWidgets.QLineEdit(self.frame_8)
self.nameEdit.setInputMask("")
self.nameEdit.setMaxLength(15)
self.nameEdit.setObjectName("nameEdit")
self.horizontalLayout_9.addWidget(self.nameEdit)
self.eraseButton = QtWidgets.QPushButton(self.frame_8)
self.eraseButton.setObjectName("eraseButton")
self.horizontalLayout_9.addWidget(self.eraseButton)
self.verticalLayout_11.addLayout(self.horizontalLayout_9)
self.verticalLayout_16.addWidget(self.frame_8)
self.frame = QtWidgets.QFrame(self.tab)
self.frame.setAutoFillBackground(False)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame.setObjectName("frame")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_7.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_7.setSpacing(6)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.label_21 = QtWidgets.QLabel(self.frame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_21.setFont(font)
self.label_21.setObjectName("label_21")
self.verticalLayout_7.addWidget(self.label_21)
self.formLayout_4 = QtWidgets.QFormLayout()
self.formLayout_4.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_4.setSpacing(6)
self.formLayout_4.setObjectName("formLayout_4")
self.label_4 = QtWidgets.QLabel(self.frame)
self.label_4.setObjectName("label_4")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setSpacing(6)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.base32RadioButton = QtWidgets.QRadioButton(self.frame)
self.base32RadioButton.setCheckable(True)
self.base32RadioButton.setChecked(True)
self.base32RadioButton.setObjectName("base32RadioButton")
self.horizontalLayout_6.addWidget(self.base32RadioButton)
self.hexRadioButton = QtWidgets.QRadioButton(self.frame)
self.hexRadioButton.setChecked(False)
self.hexRadioButton.setObjectName("hexRadioButton")
self.horizontalLayout_6.addWidget(self.hexRadioButton)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem7)
self.l_supportedLength = QtWidgets.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(11)
self.l_supportedLength.setFont(font)
self.l_supportedLength.setObjectName("l_supportedLength")
self.horizontalLayout_6.addWidget(self.l_supportedLength)
self.formLayout_4.setLayout(1, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_6)
self.label_3 = QtWidgets.QLabel(self.frame)
self.label_3.setObjectName("label_3")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setSpacing(6)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.secretEdit = QtWidgets.QLineEdit(self.frame)
self.secretEdit.setInputMask("")
self.secretEdit.setText("")
self.secretEdit.setMaxLength(200)
self.secretEdit.setEchoMode(QtWidgets.QLineEdit.PasswordEchoOnEdit)
self.secretEdit.setClearButtonEnabled(False)
self.secretEdit.setObjectName("secretEdit")
self.verticalLayout_6.addWidget(self.secretEdit)
self.labelNotify = QtWidgets.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.labelNotify.setFont(font)
self.labelNotify.setObjectName("labelNotify")
self.verticalLayout_6.addWidget(self.labelNotify)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setSpacing(6)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.checkBox = QtWidgets.QCheckBox(self.frame)
self.checkBox.setEnabled(True)
self.checkBox.setChecked(True)
self.checkBox.setObjectName("checkBox")
self.horizontalLayout_4.addWidget(self.checkBox)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem8)
self.verticalLayout_6.addLayout(self.horizontalLayout_4)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(6)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_7 = QtWidgets.QLabel(self.frame)
self.label_7.setObjectName("label_7")
self.horizontalLayout_3.addWidget(self.label_7)
self.secret_key_generated_len = QtWidgets.QSpinBox(self.frame)
self.secret_key_generated_len.setMinimum(10)
self.secret_key_generated_len.setMaximum(40)
self.secret_key_generated_len.setProperty("value", 40)
self.secret_key_generated_len.setObjectName("secret_key_generated_len")
self.horizontalLayout_3.addWidget(self.secret_key_generated_len)
self.randomSecretButton = QtWidgets.QPushButton(self.frame)
self.randomSecretButton.setAccessibleDescription("")
self.randomSecretButton.setObjectName("randomSecretButton")
self.horizontalLayout_3.addWidget(self.randomSecretButton)
self.btn_copyToClipboard = QtWidgets.QPushButton(self.frame)
self.btn_copyToClipboard.setObjectName("btn_copyToClipboard")
self.horizontalLayout_3.addWidget(self.btn_copyToClipboard)
self.verticalLayout_6.addLayout(self.horizontalLayout_3)
self.formLayout_4.setLayout(2, QtWidgets.QFormLayout.FieldRole, self.verticalLayout_6)
self.verticalLayout_7.addLayout(self.formLayout_4)
self.label_26 = QtWidgets.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(11)
font.setItalic(True)
self.label_26.setFont(font)
self.label_26.setWordWrap(True)
self.label_26.setObjectName("label_26")
self.verticalLayout_7.addWidget(self.label_26)
self.verticalLayout_16.addWidget(self.frame)
self.frame_2 = QtWidgets.QFrame(self.tab)
self.frame_2.setAutoFillBackground(False)
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_2.setObjectName("frame_2")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.frame_2)
self.verticalLayout_9.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_9.setSpacing(6)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.label_22 = QtWidgets.QLabel(self.frame_2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_22.setFont(font)
self.label_22.setObjectName("label_22")
self.verticalLayout_9.addWidget(self.label_22)
self.formLayout_5 = QtWidgets.QFormLayout()
self.formLayout_5.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_5.setSpacing(6)
self.formLayout_5.setObjectName("formLayout_5")
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setSpacing(6)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setSpacing(6)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.counterEdit = QtWidgets.QLineEdit(self.frame_2)
self.counterEdit.setInputMask("")
self.counterEdit.setMaxLength(20)
self.counterEdit.setObjectName("counterEdit")
self.horizontalLayout_7.addWidget(self.counterEdit)
self.setToZeroButton = QtWidgets.QPushButton(self.frame_2)
self.setToZeroButton.setObjectName("setToZeroButton")
self.horizontalLayout_7.addWidget(self.setToZeroButton)
self.setToRandomButton = QtWidgets.QPushButton(self.frame_2)
self.setToRandomButton.setObjectName("setToRandomButton")
self.horizontalLayout_7.addWidget(self.setToRandomButton)
self.verticalLayout_5.addLayout(self.horizontalLayout_7)
self.formLayout_5.setLayout(4, QtWidgets.QFormLayout.FieldRole, self.verticalLayout_5)
spacerItem9 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.formLayout_5.setItem(5, QtWidgets.QFormLayout.FieldRole, spacerItem9)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setSpacing(6)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.digits6radioButton = QtWidgets.QRadioButton(self.frame_2)
self.digits6radioButton.setChecked(True)
self.digits6radioButton.setObjectName("digits6radioButton")
self.horizontalLayout_8.addWidget(self.digits6radioButton)
self.digits8radioButton = QtWidgets.QRadioButton(self.frame_2)
self.digits8radioButton.setObjectName("digits8radioButton")
self.horizontalLayout_8.addWidget(self.digits8radioButton)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem10)
self.formLayout_5.setLayout(3, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_8)
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setSpacing(6)
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.intervalSpinBox = QtWidgets.QSpinBox(self.frame_2)
self.intervalSpinBox.setMinimum(1)
self.intervalSpinBox.setMaximum(65536)
self.intervalSpinBox.setProperty("value", 30)
self.intervalSpinBox.setObjectName("intervalSpinBox")
self.horizontalLayout_15.addWidget(self.intervalSpinBox)
spacerItem11 = QtWidgets.QSpacerItem(40, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_15.addItem(spacerItem11)
self.formLayout_5.setLayout(2, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_15)
self.label_6 = QtWidgets.QLabel(self.frame_2)
self.label_6.setObjectName("label_6")
self.formLayout_5.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.label_5 = QtWidgets.QLabel(self.frame_2)
self.label_5.setObjectName("label_5")
self.formLayout_5.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.intervalLabel = QtWidgets.QLabel(self.frame_2)
self.intervalLabel.setObjectName("intervalLabel")
self.formLayout_5.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.intervalLabel)
self.verticalLayout_9.addLayout(self.formLayout_5)
self.verticalLayout_16.addWidget(self.frame_2)
spacerItem12 = QtWidgets.QSpacerItem(20, 1, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
self.verticalLayout_16.addItem(spacerItem12)
self.line = QtWidgets.QFrame(self.tab)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_16.addWidget(self.line)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.progressBar = QtWidgets.QProgressBar(self.tab)
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.horizontalLayout.addWidget(self.progressBar)
spacerItem13 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem13)
self.cancelButton = QtWidgets.QPushButton(self.tab)
self.cancelButton.setToolTip("")
self.cancelButton.setObjectName("cancelButton")
self.horizontalLayout.addWidget(self.cancelButton)
self.writeButton = QtWidgets.QPushButton(self.tab)
self.writeButton.setObjectName("writeButton")
self.horizontalLayout.addWidget(self.writeButton)
self.verticalLayout_16.addLayout(self.horizontalLayout)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.tab_2)
self.verticalLayout_14.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_14.setSpacing(6)
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.frame_4 = QtWidgets.QFrame(self.tab_2)
self.frame_4.setAutoFillBackground(False)
self.frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_4.setObjectName("frame_4")
self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.frame_4)
self.verticalLayout_12.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_12.setSpacing(6)
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.label_11 = QtWidgets.QLabel(self.frame_4)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.verticalLayout_12.addWidget(self.label_11)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.enableUserPasswordCheckBox = QtWidgets.QCheckBox(self.frame_4)
self.enableUserPasswordCheckBox.setObjectName("enableUserPasswordCheckBox")
self.verticalLayout_2.addWidget(self.enableUserPasswordCheckBox)
self.deleteUserPasswordCheckBox = QtWidgets.QCheckBox(self.frame_4)
self.deleteUserPasswordCheckBox.setObjectName("deleteUserPasswordCheckBox")
self.verticalLayout_2.addWidget(self.deleteUserPasswordCheckBox)
self.verticalLayout_12.addLayout(self.verticalLayout_2)
self.verticalLayout_14.addWidget(self.frame_4)
spacerItem14 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
self.verticalLayout_14.addItem(spacerItem14)
self.line_2 = QtWidgets.QFrame(self.tab_2)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_14.addWidget(self.line_2)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(6)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
spacerItem15 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem15)
self.generalCancelButton = QtWidgets.QPushButton(self.tab_2)
self.generalCancelButton.setObjectName("generalCancelButton")
self.horizontalLayout_5.addWidget(self.generalCancelButton)
self.writeGeneralConfigButton = QtWidgets.QPushButton(self.tab_2)
self.writeGeneralConfigButton.setObjectName("writeGeneralConfigButton")
self.horizontalLayout_5.addWidget(self.writeGeneralConfigButton)
self.verticalLayout_14.addLayout(self.horizontalLayout_5)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.verticalLayout_19 = QtWidgets.QVBoxLayout(self.tab_3)
self.verticalLayout_19.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_19.setSpacing(6)
self.verticalLayout_19.setObjectName("verticalLayout_19")
self.frame_7 = QtWidgets.QFrame(self.tab_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_7.sizePolicy().hasHeightForWidth())
self.frame_7.setSizePolicy(sizePolicy)
self.frame_7.setAutoFillBackground(False)
self.frame_7.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_7.setObjectName("frame_7")
self.verticalLayout_18 = QtWidgets.QVBoxLayout(self.frame_7)
self.verticalLayout_18.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_18.setSpacing(6)
self.verticalLayout_18.setObjectName("verticalLayout_18")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.PWS_EditPassword = QtWidgets.QLineEdit(self.frame_7)
self.PWS_EditPassword.setObjectName("PWS_EditPassword")
self.gridLayout.addWidget(self.PWS_EditPassword, 3, 1, 1, 1)
self.PWS_EditLoginName = QtWidgets.QLineEdit(self.frame_7)
self.PWS_EditLoginName.setObjectName("PWS_EditLoginName")
self.gridLayout.addWidget(self.PWS_EditLoginName, 2, 1, 1, 1)
self.label_19 = QtWidgets.QLabel(self.frame_7)
self.label_19.setObjectName("label_19")
self.gridLayout.addWidget(self.label_19, 3, 0, 1, 1)
self.PWS_EditSlotName = QtWidgets.QLineEdit(self.frame_7)
self.PWS_EditSlotName.setObjectName("PWS_EditSlotName")
self.gridLayout.addWidget(self.PWS_EditSlotName, 1, 1, 1, 1)
self.label_18 = QtWidgets.QLabel(self.frame_7)
self.label_18.setObjectName("label_18")
self.gridLayout.addWidget(self.label_18, 2, 0, 1, 1)
self.PWS_ButtonClearSlot = QtWidgets.QPushButton(self.frame_7)
self.PWS_ButtonClearSlot.setObjectName("PWS_ButtonClearSlot")
self.gridLayout.addWidget(self.PWS_ButtonClearSlot, 0, 3, 1, 1)
self.label_17 = QtWidgets.QLabel(self.frame_7)
self.label_17.setObjectName("label_17")
self.gridLayout.addWidget(self.label_17, 1, 0, 1, 1)
self.PWS_ComboBoxSelectSlot = QtWidgets.QComboBox(self.frame_7)
self.PWS_ComboBoxSelectSlot.setCurrentText("Static password 0")
self.PWS_ComboBoxSelectSlot.setObjectName("PWS_ComboBoxSelectSlot")
self.PWS_ComboBoxSelectSlot.addItem("")
self.gridLayout.addWidget(self.PWS_ComboBoxSelectSlot, 0, 1, 1, 1)
self.PWS_ButtonCreatePW = QtWidgets.QPushButton(self.frame_7)
self.PWS_ButtonCreatePW.setAccessibleDescription("")
self.PWS_ButtonCreatePW.setObjectName("PWS_ButtonCreatePW")
self.gridLayout.addWidget(self.PWS_ButtonCreatePW, 3, 3, 1, 1)
self.label_16 = QtWidgets.QLabel(self.frame_7)
self.label_16.setObjectName("label_16")
self.gridLayout.addWidget(self.label_16, 0, 0, 1, 1)
self.PWS_CheckBoxHideSecret = QtWidgets.QCheckBox(self.frame_7)
self.PWS_CheckBoxHideSecret.setEnabled(True)
self.PWS_CheckBoxHideSecret.setAccessibleName("")
self.PWS_CheckBoxHideSecret.setChecked(True)
self.PWS_CheckBoxHideSecret.setObjectName("PWS_CheckBoxHideSecret")
self.gridLayout.addWidget(self.PWS_CheckBoxHideSecret, 4, 1, 1, 1)
self.l_chars_left_info = QtWidgets.QLabel(self.frame_7)
self.l_chars_left_info.setObjectName("l_chars_left_info")
self.gridLayout.addWidget(self.l_chars_left_info, 0, 2, 1, 1)
self.l_c_name = QtWidgets.QLabel(self.frame_7)
self.l_c_name.setText("...")
self.l_c_name.setAlignment(QtCore.Qt.AlignCenter)
self.l_c_name.setObjectName("l_c_name")
self.gridLayout.addWidget(self.l_c_name, 1, 2, 1, 1)
self.l_c_login = QtWidgets.QLabel(self.frame_7)
self.l_c_login.setText("...")
self.l_c_login.setAlignment(QtCore.Qt.AlignCenter)
self.l_c_login.setObjectName("l_c_login")
self.gridLayout.addWidget(self.l_c_login, 2, 2, 1, 1)
self.l_c_password = QtWidgets.QLabel(self.frame_7)
self.l_c_password.setAlignment(QtCore.Qt.AlignCenter)
self.l_c_password.setObjectName("l_c_password")
self.gridLayout.addWidget(self.l_c_password, 3, 2, 1, 1)
self.verticalLayout_18.addLayout(self.gridLayout)
self.verticalLayout_19.addWidget(self.frame_7)
self.l_utf8_info = QtWidgets.QLabel(self.tab_3)
self.l_utf8_info.setFocusPolicy(QtCore.Qt.StrongFocus)
self.l_utf8_info.setWordWrap(True)
self.l_utf8_info.setObjectName("l_utf8_info")
self.verticalLayout_19.addWidget(self.l_utf8_info)
spacerItem16 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
self.verticalLayout_19.addItem(spacerItem16)
self.line_3 = QtWidgets.QFrame(self.tab_3)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout_19.addWidget(self.line_3)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.PWS_ButtonEnable = QtWidgets.QPushButton(self.tab_3)
self.PWS_ButtonEnable.setObjectName("PWS_ButtonEnable")
self.horizontalLayout_2.addWidget(self.PWS_ButtonEnable)
self.PWS_Lock = QtWidgets.QPushButton(self.tab_3)
self.PWS_Lock.setObjectName("PWS_Lock")
self.horizontalLayout_2.addWidget(self.PWS_Lock)
self.PWS_progressBar = QtWidgets.QProgressBar(self.tab_3)
self.PWS_progressBar.setProperty("value", 42)
self.PWS_progressBar.setObjectName("PWS_progressBar")
self.horizontalLayout_2.addWidget(self.PWS_progressBar)
spacerItem17 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem17)
self.PWS_ButtonClose = QtWidgets.QPushButton(self.tab_3)
self.PWS_ButtonClose.setObjectName("PWS_ButtonClose")
self.horizontalLayout_2.addWidget(self.PWS_ButtonClose)
self.PWS_ButtonSaveSlot = QtWidgets.QPushButton(self.tab_3)
self.PWS_ButtonSaveSlot.setAutoDefault(True)
self.PWS_ButtonSaveSlot.setDefault(True)
self.PWS_ButtonSaveSlot.setObjectName("PWS_ButtonSaveSlot")
self.horizontalLayout_2.addWidget(self.PWS_ButtonSaveSlot)
self.verticalLayout_19.addLayout(self.horizontalLayout_2)
self.tabWidget.addTab(self.tab_3, "")
self.tab_7 = QtWidgets.QWidget()
self.tab_7.setObjectName("tab_7")
self.gridLayout_7 = QtWidgets.QGridLayout(self.tab_7)
self.gridLayout_7.setContentsMargins(11, 11, 11, 11)
self.gridLayout_7.setSpacing(6)
self.gridLayout_7.setObjectName("gridLayout_7")
self.btn_dial_EV = QtWidgets.QPushButton(self.tab_7)
self.btn_dial_EV.setStyleSheet("#upLeft { background-color: transparent; border-image: url(:/images/new/icon_safe.svg); background: none; border: none; background-repeat: none; }")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/images/new/icon_harddrive.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btn_dial_EV.setIcon(icon6)
self.btn_dial_EV.setObjectName("btn_dial_EV")
self.gridLayout_7.addWidget(self.btn_dial_EV, 1, 0, 1, 1)
spacerItem18 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_7.addItem(spacerItem18, 1, 1, 1, 1)
self.btn_dial_HV = QtWidgets.QPushButton(self.tab_7)
self.btn_dial_HV.setStyleSheet("#upLeft { background-color: transparent; border-image: url(:/images/new/icon_safe.svg); background: none; border: none; background-repeat: none; }")
self.btn_dial_HV.setIcon(icon6)
self.btn_dial_HV.setObjectName("btn_dial_HV")
self.gridLayout_7.addWidget(self.btn_dial_HV, 1, 2, 1, 1)
spacerItem19 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_7.addItem(spacerItem19, 1, 3, 1, 1)
self.tabWidget.addTab(self.tab_7, "")
self.tab_6 = QtWidgets.QWidget()
self.tab_6.setObjectName("tab_6")
self.gridLayout_8 = QtWidgets.QGridLayout(self.tab_6)
self.gridLayout_8.setContentsMargins(11, 11, 11, 11)
self.gridLayout_8.setSpacing(6)
self.gridLayout_8.setObjectName("gridLayout_8")
self.tableWidget = QtWidgets.QTableWidget(self.tab_6)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(4)
self.tableWidget.setRowCount(3)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 3, item)
self.gridLayout_8.addWidget(self.tableWidget, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_6, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.verticalLayout_73 = QtWidgets.QVBoxLayout(self.tab_4)
self.verticalLayout_73.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_73.setSpacing(6)
self.verticalLayout_73.setObjectName("verticalLayout_73")
self.verticalLayout_72 = QtWidgets.QVBoxLayout()
self.verticalLayout_72.setSpacing(6)
self.verticalLayout_72.setObjectName("verticalLayout_72")
self.frame_9 = QtWidgets.QFrame(self.tab_4)
self.frame_9.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_9.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_9.setObjectName("frame_9")
self.verticalLayout_15 = QtWidgets.QVBoxLayout(self.frame_9)
self.verticalLayout_15.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_15.setSpacing(6)
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.gr_general = QtWidgets.QGroupBox(self.frame_9)
self.gr_general.setObjectName("gr_general")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.gr_general)
self.verticalLayout_3.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_3.setSpacing(6)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setSpacing(6)
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.cb_first_run_message = QtWidgets.QCheckBox(self.gr_general)
self.cb_first_run_message.setObjectName("cb_first_run_message")
self.horizontalLayout_19.addWidget(self.cb_first_run_message)
self.cb_show_window_on_start = QtWidgets.QCheckBox(self.gr_general)
self.cb_show_window_on_start.setObjectName("cb_show_window_on_start")
self.horizontalLayout_19.addWidget(self.cb_show_window_on_start)
self.verticalLayout_3.addLayout(self.horizontalLayout_19)
self.horizontalLayout_17 = QtWidgets.QHBoxLayout()
self.horizontalLayout_17.setSpacing(6)
self.horizontalLayout_17.setObjectName("horizontalLayout_17")
self.cb_hide_main_window_on_close = QtWidgets.QCheckBox(self.gr_general)
self.cb_hide_main_window_on_close.setChecked(True)
self.cb_hide_main_window_on_close.setObjectName("cb_hide_main_window_on_close")
self.horizontalLayout_17.addWidget(self.cb_hide_main_window_on_close)
self.cb_hide_main_window_on_connection = QtWidgets.QCheckBox(self.gr_general)
self.cb_hide_main_window_on_connection.setChecked(True)
self.cb_hide_main_window_on_connection.setObjectName("cb_hide_main_window_on_connection")
self.horizontalLayout_17.addWidget(self.cb_hide_main_window_on_connection)
self.verticalLayout_3.addLayout(self.horizontalLayout_17)
self.cb_show_main_window_on_connection = QtWidgets.QCheckBox(self.gr_general)
self.cb_show_main_window_on_connection.setChecked(True)
self.cb_show_main_window_on_connection.setObjectName("cb_show_main_window_on_connection")
self.verticalLayout_3.addWidget(self.cb_show_main_window_on_connection)
self.cb_check_symlink = QtWidgets.QCheckBox(self.gr_general)
self.cb_check_symlink.setChecked(True)
self.cb_check_symlink.setObjectName("cb_check_symlink")
self.verticalLayout_3.addWidget(self.cb_check_symlink)
self.cb_device_connection_message = QtWidgets.QCheckBox(self.gr_general)
self.cb_device_connection_message.setChecked(True)
self.cb_device_connection_message.setObjectName("cb_device_connection_message")
self.verticalLayout_3.addWidget(self.cb_device_connection_message)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setSpacing(6)
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.label_28 = QtWidgets.QLabel(self.gr_general)
self.label_28.setObjectName("label_28")
self.horizontalLayout_12.addWidget(self.label_28)
self.combo_languages = QtWidgets.QComboBox(self.gr_general)
self.combo_languages.setObjectName("combo_languages")
self.horizontalLayout_12.addWidget(self.combo_languages)
self.verticalLayout_3.addLayout(self.horizontalLayout_12)
self.verticalLayout_15.addWidget(self.gr_general)
self.verticalLayout_72.addWidget(self.frame_9)
self.verticalLayout_71 = QtWidgets.QVBoxLayout()
self.verticalLayout_71.setSpacing(6)
self.verticalLayout_71.setObjectName("verticalLayout_71")
self.groupBox = QtWidgets.QGroupBox(self.tab_4)
self.groupBox.setObjectName("groupBox")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_3.setContentsMargins(11, 11, 11, 11)
self.gridLayout_3.setSpacing(6)
self.gridLayout_3.setObjectName("gridLayout_3")
self.edit_debug_file_path = QtWidgets.QLineEdit(self.groupBox)
self.edit_debug_file_path.setObjectName("edit_debug_file_path")
self.gridLayout_3.addWidget(self.edit_debug_file_path, 1, 1, 1, 1)
self.spin_debug_verbosity = QtWidgets.QSpinBox(self.groupBox)
self.spin_debug_verbosity.setMaximum(6)
self.spin_debug_verbosity.setProperty("value", 2)
self.spin_debug_verbosity.setObjectName("spin_debug_verbosity")
self.gridLayout_3.addWidget(self.spin_debug_verbosity, 3, 1, 1, 1)
self.label_10 = QtWidgets.QLabel(self.groupBox)
self.label_10.setObjectName("label_10")
self.gridLayout_3.addWidget(self.label_10, 1, 0, 1, 1)
self.label_20 = QtWidgets.QLabel(self.groupBox)
self.label_20.setObjectName("label_20")
self.gridLayout_3.addWidget(self.label_20, 3, 0, 1, 1)
self.cb_debug_enabled = QtWidgets.QCheckBox(self.groupBox)
self.cb_debug_enabled.setAccessibleName("")
self.cb_debug_enabled.setObjectName("cb_debug_enabled")
self.gridLayout_3.addWidget(self.cb_debug_enabled, 0, 0, 1, 1)
self.btn_select_debug_console = QtWidgets.QPushButton(self.groupBox)
self.btn_select_debug_console.setObjectName("btn_select_debug_console")
self.gridLayout_3.addWidget(self.btn_select_debug_console, 1, 2, 1, 1)
self.btn_select_debug_file_path = QtWidgets.QPushButton(self.groupBox)
self.btn_select_debug_file_path.setObjectName("btn_select_debug_file_path")
self.gridLayout_3.addWidget(self.btn_select_debug_file_path, 1, 3, 1, 1)
self.verticalLayout_71.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_4)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_4 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_4.setContentsMargins(11, 11, 11, 11)
self.gridLayout_4.setSpacing(6)
self.gridLayout_4.setObjectName("gridLayout_4")
self.spin_OTP_time = QtWidgets.QSpinBox(self.groupBox_2)
self.spin_OTP_time.setMinimum(10)
self.spin_OTP_time.setMaximum(600)
self.spin_OTP_time.setProperty("value", 120)
self.spin_OTP_time.setObjectName("spin_OTP_time")
self.gridLayout_4.addWidget(self.spin_OTP_time, 3, 1, 1, 1)
self.label_30 = QtWidgets.QLabel(self.groupBox_2)
self.label_30.setObjectName("label_30")
self.gridLayout_4.addWidget(self.label_30, 1, 0, 1, 1)
self.spin_PWS_time = QtWidgets.QSpinBox(self.groupBox_2)
self.spin_PWS_time.setMinimum(10)
self.spin_PWS_time.setMaximum(600)
self.spin_PWS_time.setProperty("value", 60)
self.spin_PWS_time.setObjectName("spin_PWS_time")
self.gridLayout_4.addWidget(self.spin_PWS_time, 1, 1, 1, 1)
self.label_31 = QtWidgets.QLabel(self.groupBox_2)
self.label_31.setObjectName("label_31")
self.gridLayout_4.addWidget(self.label_31, 3, 0, 1, 1)
spacerItem20 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem20, 1, 2, 1, 1)
self.verticalLayout_71.addWidget(self.groupBox_2)
spacerItem21 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
self.verticalLayout_71.addItem(spacerItem21)
self.line_10 = QtWidgets.QFrame(self.tab_4)
self.line_10.setFrameShape(QtWidgets.QFrame.HLine)
self.line_10.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_10.setObjectName("line_10")
self.verticalLayout_71.addWidget(self.line_10)
self.verticalLayout_72.addLayout(self.verticalLayout_71)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setSpacing(6)
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
spacerItem22 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_11.addItem(spacerItem22)
self.btn_cancelSettings = QtWidgets.QPushButton(self.tab_4)
self.btn_cancelSettings.setObjectName("btn_cancelSettings")
self.horizontalLayout_11.addWidget(self.btn_cancelSettings)
self.btn_writeSettings = QtWidgets.QPushButton(self.tab_4)
self.btn_writeSettings.setObjectName("btn_writeSettings")
self.horizontalLayout_11.addWidget(self.btn_writeSettings)
self.verticalLayout_72.addLayout(self.horizontalLayout_11)
self.verticalLayout_73.addLayout(self.verticalLayout_72)
self.tabWidget.addTab(self.tab_4, "")
self.verticalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralWidget)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setSizeGripEnabled(False)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 932, 22))
self.menuBar.setObjectName("menuBar")
MainWindow.setMenuBar(self.menuBar)
self.label_10.setBuddy(self.edit_debug_file_path)
self.label_20.setBuddy(self.spin_debug_verbosity)
self.label_30.setBuddy(self.spin_debug_verbosity)
self.label_31.setBuddy(self.spin_debug_verbosity)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(5)
self.slotComboBox.setCurrentIndex(0)
self.PWS_ComboBoxSelectSlot.setCurrentIndex(0)
self.btn_cancelSettings.clicked.connect(MainWindow.hide)
self.cancelButton.clicked.connect(MainWindow.hide)
self.cb_debug_enabled.toggled['bool'].connect(self.edit_debug_file_path.setEnabled)
self.cb_debug_enabled.toggled['bool'].connect(self.spin_debug_verbosity.setEnabled)
self.cb_debug_enabled.toggled['bool'].connect(self.btn_select_debug_file_path.setEnabled)
self.generalCancelButton.clicked.connect(MainWindow.hide)
self.cb_debug_enabled.toggled['bool'].connect(self.btn_select_debug_console.setEnabled)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.radioButton_2, self.radioButton)
MainWindow.setTabOrder(self.radioButton, self.slotComboBox)
MainWindow.setTabOrder(self.slotComboBox, self.nameEdit)
MainWindow.setTabOrder(self.nameEdit, self.eraseButton)
MainWindow.setTabOrder(self.eraseButton, self.base32RadioButton)
MainWindow.setTabOrder(self.base32RadioButton, self.hexRadioButton)
MainWindow.setTabOrder(self.hexRadioButton, self.secretEdit)
MainWindow.setTabOrder(self.secretEdit, self.checkBox)
MainWindow.setTabOrder(self.checkBox, self.intervalSpinBox)
MainWindow.setTabOrder(self.intervalSpinBox, self.digits6radioButton)
MainWindow.setTabOrder(self.digits6radioButton, self.digits8radioButton)
MainWindow.setTabOrder(self.digits8radioButton, self.counterEdit)
MainWindow.setTabOrder(self.counterEdit, self.setToZeroButton)
MainWindow.setTabOrder(self.setToZeroButton, self.setToRandomButton)
MainWindow.setTabOrder(self.setToRandomButton, self.cancelButton)
MainWindow.setTabOrder(self.cancelButton, self.writeButton)
MainWindow.setTabOrder(self.writeButton, self.enableUserPasswordCheckBox)
MainWindow.setTabOrder(self.enableUserPasswordCheckBox, self.deleteUserPasswordCheckBox)
MainWindow.setTabOrder(self.deleteUserPasswordCheckBox, self.PWS_ComboBoxSelectSlot)
MainWindow.setTabOrder(self.PWS_ComboBoxSelectSlot, self.PWS_ButtonClearSlot)
MainWindow.setTabOrder(self.PWS_ButtonClearSlot, self.PWS_EditSlotName)
MainWindow.setTabOrder(self.PWS_EditSlotName, self.PWS_EditLoginName)
MainWindow.setTabOrder(self.PWS_EditLoginName, self.PWS_EditPassword)
MainWindow.setTabOrder(self.PWS_EditPassword, self.PWS_ButtonCreatePW)
MainWindow.setTabOrder(self.PWS_ButtonCreatePW, self.PWS_CheckBoxHideSecret)
MainWindow.setTabOrder(self.PWS_CheckBoxHideSecret, self.l_utf8_info)
MainWindow.setTabOrder(self.l_utf8_info, self.PWS_ButtonEnable)
MainWindow.setTabOrder(self.PWS_ButtonEnable, self.PWS_Lock)
MainWindow.setTabOrder(self.PWS_Lock, self.PWS_ButtonClose)
MainWindow.setTabOrder(self.PWS_ButtonClose, self.PWS_ButtonSaveSlot)
MainWindow.setTabOrder(self.PWS_ButtonSaveSlot, self.cb_first_run_message)
MainWindow.setTabOrder(self.cb_first_run_message, self.cb_show_window_on_start)
MainWindow.setTabOrder(self.cb_show_window_on_start, self.cb_check_symlink)
MainWindow.setTabOrder(self.cb_check_symlink, self.cb_device_connection_message)
MainWindow.setTabOrder(self.cb_device_connection_message, self.cb_show_main_window_on_connection)
MainWindow.setTabOrder(self.cb_show_main_window_on_connection, self.cb_hide_main_window_on_connection)
MainWindow.setTabOrder(self.cb_hide_main_window_on_connection, self.cb_hide_main_window_on_close)
MainWindow.setTabOrder(self.cb_hide_main_window_on_close, self.combo_languages)
MainWindow.setTabOrder(self.combo_languages, self.cb_debug_enabled)
MainWindow.setTabOrder(self.cb_debug_enabled, self.edit_debug_file_path)
MainWindow.setTabOrder(self.edit_debug_file_path, self.btn_select_debug_console)
MainWindow.setTabOrder(self.btn_select_debug_console, self.btn_select_debug_file_path)
MainWindow.setTabOrder(self.btn_select_debug_file_path, self.spin_debug_verbosity)
MainWindow.setTabOrder(self.spin_debug_verbosity, self.spin_PWS_time)
MainWindow.setTabOrder(self.spin_PWS_time, self.spin_OTP_time)
MainWindow.setTabOrder(self.spin_OTP_time, self.btn_cancelSettings)
MainWindow.setTabOrder(self.btn_cancelSettings, self.btn_writeSettings)
MainWindow.setTabOrder(self.btn_writeSettings, self.writeGeneralConfigButton)
MainWindow.setTabOrder(self.writeGeneralConfigButton, self.generalCancelButton)
MainWindow.setTabOrder(self.generalCancelButton, self.secret_key_generated_len)
MainWindow.setTabOrder(self.secret_key_generated_len, self.randomSecretButton)
MainWindow.setTabOrder(self.randomSecretButton, self.btn_copyToClipboard)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Nitrokey App"))
self.btn_dial_PWS.setText(_translate("MainWindow", "Unlock Password Safe"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("MainWindow", "Overview"))
self.checkBox_2.setText(_translate("MainWindow", "Advanced mode"))
self.label_24.setText(_translate("MainWindow", "Manage slots"))
self.radioButton_2.setAccessibleName(_translate("MainWindow", "Select slot type: TOTP"))
self.radioButton_2.setAccessibleDescription(_translate("MainWindow", "(Recommendation: Use TOTP for web applications and HOTP for local applications)"))
self.radioButton_2.setText(_translate("MainWindow", "TOTP"))
self.radioButton.setAccessibleName(_translate("MainWindow", "Select slot type: HOTP"))
self.radioButton.setAccessibleDescription(_translate("MainWindow", "(Recommendation: Use TOTP for web applications and HOTP for local applications)"))
self.radioButton.setText(_translate("MainWindow", "HOTP"))
self.label_25.setText(_translate("MainWindow", "(Recommendation: Use TOTP for web applications and HOTP for local applications)"))
self.label.setText(_translate("MainWindow", "Slot:"))
self.slotComboBox.setAccessibleName(_translate("MainWindow", "Select OTP slot number"))
self.label_2.setText(_translate("MainWindow", "Name:"))
self.nameEdit.setAccessibleName(_translate("MainWindow", "Slot name"))
self.eraseButton.setText(_translate("MainWindow", "Erase Slot"))
self.label_21.setText(_translate("MainWindow", "Secret key"))
self.label_4.setText(_translate("MainWindow", "Input format:"))
self.base32RadioButton.setToolTip(_translate("MainWindow", "Example: \"ZR3M5I...\""))
self.base32RadioButton.setAccessibleName(_translate("MainWindow", "Secret input format: base32"))
self.base32RadioButton.setText(_translate("MainWindow", "Base32"))
self.hexRadioButton.setToolTip(_translate("MainWindow", "Example: \"A3911C05...\" (remove any 0x prefix)"))
self.hexRadioButton.setAccessibleName(_translate("MainWindow", "Secret input format: hex"))
self.hexRadioButton.setText(_translate("MainWindow", "Hex"))
self.l_supportedLength.setAccessibleName(_translate("MainWindow", "Entered OTP \'Secret Key\' string is longer than supported by this device"))
self.l_supportedLength.setAccessibleDescription(_translate("MainWindow", "Label shown when the OTP secret key is too long"))
self.l_supportedLength.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-weight:600; color:#a40000;\">Entered OTP \'Secret Key\' string is longer than supported by this device</span></p></body></html>"))
self.label_3.setToolTip(_translate("MainWindow", "<html><head/><body><p>The secret is provided by your service provider you may want to login or can be configured in your local application which you may want to login to.</p></body></html>"))
self.label_3.setText(_translate("MainWindow", "Secret Key:"))
self.secretEdit.setToolTip(_translate("MainWindow", "The secret is provided by your service provider you may want to login or can be configured in your local application which you may want to login to."))
self.secretEdit.setAccessibleName(_translate("MainWindow", "OTP secret key"))
self.secretEdit.setAccessibleDescription(_translate("MainWindow", "Note: 2nd factors aren\'t protected against physical attacks. Change all OTP secrets in case you loose the Nitrokey."))
self.secretEdit.setPlaceholderText(_translate("MainWindow", "********************************"))
self.labelNotify.setText(_translate("MainWindow", "Secret copied to clipboard"))
self.checkBox.setToolTip(_translate("MainWindow", "<html><head/><body><p>Hide or show the secret.</p></body></html>"))
self.checkBox.setText(_translate("MainWindow", "Hide secret"))
self.label_7.setText(_translate("MainWindow", "Generated secret target length (bytes):"))
self.secret_key_generated_len.setAccessibleName(_translate("MainWindow", "TOTP interval value"))
self.randomSecretButton.setToolTip(_translate("MainWindow", "After generating a random secret, you would need to copy it into your application or service where you want to login to."))
self.randomSecretButton.setText(_translate("MainWindow", "Generate random secret"))
self.btn_copyToClipboard.setToolTip(_translate("MainWindow", "Copy secret to clipboard"))
self.btn_copyToClipboard.setText(_translate("MainWindow", "Copy to clipboard"))
self.label_26.setText(_translate("MainWindow", "Note: 2<sup>nd</sup> factors aren\'t protected against physical attacks. Change all OTP secrets in case you loose the Nitrokey."))
self.label_22.setText(_translate("MainWindow", "Parameters"))
self.counterEdit.setAccessibleName(_translate("MainWindow", "HOTP moving factor seed"))
self.counterEdit.setText(_translate("MainWindow", "00000000000000000000"))
self.setToZeroButton.setAccessibleName(_translate("MainWindow", "Set HOTP counter to zero"))
self.setToZeroButton.setText(_translate("MainWindow", "Set to zero"))
self.setToRandomButton.setAccessibleName(_translate("MainWindow", "Set HOTP counter to random value"))
self.setToRandomButton.setText(_translate("MainWindow", "Set to random"))
self.digits6radioButton.setAccessibleName(_translate("MainWindow", "OTP code length: 6 digits"))
self.digits6radioButton.setText(_translate("MainWindow", "6 digits"))
self.digits8radioButton.setAccessibleName(_translate("MainWindow", "OTP code length: 8 digits"))
self.digits8radioButton.setText(_translate("MainWindow", "8 digits"))
self.intervalSpinBox.setAccessibleName(_translate("MainWindow", "TOTP interval value"))
self.label_6.setText(_translate("MainWindow", "Moving factor seed:"))
self.label_5.setText(_translate("MainWindow", "HOTP length:"))
self.intervalLabel.setText(_translate("MainWindow", "TOTP interval:"))
self.cancelButton.setText(_translate("MainWindow", "Cancel"))
self.writeButton.setText(_translate("MainWindow", "Save"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "OTP Slot Configuration"))
self.label_11.setText(_translate("MainWindow", "OTP Password settings"))
self.enableUserPasswordCheckBox.setText(_translate("MainWindow", "Protect OTP by user PIN (will be requested on first use each session)"))
self.deleteUserPasswordCheckBox.setText(_translate("MainWindow", "Forget user PIN after 10 minutes (if unchecked user PIN will remain in memory until application exits)"))
self.generalCancelButton.setText(_translate("MainWindow", "Cancel"))
self.writeGeneralConfigButton.setText(_translate("MainWindow", "Save"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "OTP General"))
self.PWS_EditPassword.setAccessibleName(_translate("MainWindow", "Password:"))
self.PWS_EditLoginName.setAccessibleName(_translate("MainWindow", "Login name:"))
self.label_19.setText(_translate("MainWindow", "Password:"))
self.PWS_EditSlotName.setAccessibleName(_translate("MainWindow", "Slot name:"))
self.label_18.setText(_translate("MainWindow", "Login name:"))
self.PWS_ButtonClearSlot.setAccessibleName(_translate("MainWindow", "Erase Password Safe slot"))
self.PWS_ButtonClearSlot.setText(_translate("MainWindow", "Erase Slot"))
self.label_17.setText(_translate("MainWindow", "Slot name:"))
self.PWS_ComboBoxSelectSlot.setAccessibleName(_translate("MainWindow", "Password Safe slot number"))
self.PWS_ComboBoxSelectSlot.setItemText(0, _translate("MainWindow", "Static password 0"))
self.PWS_ButtonCreatePW.setText(_translate("MainWindow", "Generate random password"))
self.label_16.setText(_translate("MainWindow", "Slot:"))
self.PWS_CheckBoxHideSecret.setText(_translate("MainWindow", "Hide secret"))
self.l_chars_left_info.setText(_translate("MainWindow", "Characters left:"))
self.l_c_password.setText(_translate("MainWindow", "..."))
self.l_utf8_info.setText(_translate("MainWindow", "<html><head/><body><p>Password Safe fields support UTF8 data. It means that you can use your national characters here. Please remember however that non-English characters could take more space (up to 4 characters). The counters next to each field are to inform how much more standard English characters can given field accept.</p></body></html>"))
self.PWS_ButtonEnable.setText(_translate("MainWindow", "Unlock Password Safe"))
self.PWS_Lock.setText(_translate("MainWindow", "Lock Device"))
self.PWS_ButtonClose.setText(_translate("MainWindow", "Cancel"))
self.PWS_ButtonSaveSlot.setText(_translate("MainWindow", "Save"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "Password Safe"))
self.btn_dial_EV.setText(_translate("MainWindow", "Unlock Encrypted Volume"))
self.btn_dial_HV.setText(_translate("MainWindow", "Unlock Hidden Volume"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_7), _translate("MainWindow", "Storage"))
item = self.tableWidget.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "erstes Ding"))
item = self.tableWidget.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Neue Zeile"))
item = self.tableWidget.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "Neue Zeile"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Name"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Stuff"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Neue Spalte"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Copy"))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
self.tableWidget.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_6), _translate("MainWindow", "FIDO2"))
self.gr_general.setTitle(_translate("MainWindow", "General"))
self.cb_first_run_message.setText(_translate("MainWindow", "Show first-run message"))
self.cb_show_window_on_start.setText(_translate("MainWindow", "Show main window on start"))
self.cb_hide_main_window_on_close.setText(_translate("MainWindow", "Do not quit when the main window is closed"))
self.cb_hide_main_window_on_connection.setText(_translate("MainWindow", "Hide main window when device disconnects"))
self.cb_show_main_window_on_connection.setText(_translate("MainWindow", "Show main window when device connects"))
self.cb_check_symlink.setText(_translate("MainWindow", "Show warning when no partitions could be detected on Encrypted Volume (Linux only)"))
self.cb_device_connection_message.setText(_translate("MainWindow", "Show message about device\'s connection / disconnection"))
self.label_28.setText(_translate("MainWindow", "<html><head/><body><p>Translation file (needs restart)</p></body></html>"))
self.combo_languages.setAccessibleName(_translate("MainWindow", "Translation file (needs restart)"))
self.groupBox.setTitle(_translate("MainWindow", "Debug log settings"))
self.edit_debug_file_path.setAccessibleName(_translate("MainWindow", "Path for debug log file:"))
self.spin_debug_verbosity.setAccessibleName(_translate("MainWindow", "Verbosity level:"))
self.label_10.setText(_translate("MainWindow", "Path for debug log file:"))
self.label_20.setText(_translate("MainWindow", "Verbosity level:"))
self.cb_debug_enabled.setText(_translate("MainWindow", "Logging enabled"))
self.btn_select_debug_console.setText(_translate("MainWindow", "Log to console"))
self.btn_select_debug_file_path.setText(_translate("MainWindow", "Select path"))
self.groupBox_2.setTitle(_translate("MainWindow", "Clipboard settings"))
self.spin_OTP_time.setAccessibleName(_translate("MainWindow", "Time to store OTP secrets in clipboard (in seconds):"))
self.label_30.setText(_translate("MainWindow", "Time to store Password Safe secrets in clipboard (in seconds):"))
self.spin_PWS_time.setAccessibleName(_translate("MainWindow", "TIme to store Password Safe secrets in clipboard (in seconds):"))
self.label_31.setText(_translate("MainWindow", "Time to store OTP secrets in clipboard (in seconds):"))
self.btn_cancelSettings.setText(_translate("MainWindow", "Cancel"))
self.btn_writeSettings.setText(_translate("MainWindow", "Save"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("MainWindow", "Settings"))
import resources_rc
| 65,541 | 7 | 76 |
45b79edc02fd781ca28eb52676d72c73c84cd286 | 452 | py | Python | src/worker/__init__.py | mehsoy/jaws | b79723c1fc549741494ebf5d948e94a44e971f2a | [
"MIT"
] | 1 | 2019-06-17T17:01:17.000Z | 2019-06-17T17:01:17.000Z | src/worker/__init__.py | mehsoy/jaws | b79723c1fc549741494ebf5d948e94a44e971f2a | [
"MIT"
] | 7 | 2021-02-08T20:46:15.000Z | 2021-09-08T02:12:59.000Z | src/worker/__init__.py | mehsoy/jaws | b79723c1fc549741494ebf5d948e94a44e971f2a | [
"MIT"
] | null | null | null | #!/usr/bin/python
#-*- coding: utf-8 -*-
""".. module:: blueprints """
import json
from flask import request, abort, current_app
| 23.789474 | 52 | 0.575221 | #!/usr/bin/python
#-*- coding: utf-8 -*-
""".. module:: blueprints """
import json
from flask import request, abort, current_app
def auth_required(func):
def check_auth(*args, **kwargs):
try:
token = request.cookies.get('token'),
if token != current_app.config['token']:
abort(401)
except Exception as err:
abort(401)
return func(*args, **kwargs)
return check_auth
| 298 | 0 | 23 |
037089f36a7441ea98b0f35ec71d28ad247195e7 | 917 | py | Python | apiview_viewset_with_models/permissions.py | arabindamahato/DjangoREST_API_DRF | 5292f47c4f29e64de4b5c15dc254a892b2661639 | [
"MIT"
] | null | null | null | apiview_viewset_with_models/permissions.py | arabindamahato/DjangoREST_API_DRF | 5292f47c4f29e64de4b5c15dc254a892b2661639 | [
"MIT"
] | 4 | 2021-03-19T01:51:07.000Z | 2021-09-22T18:52:10.000Z | apiview_viewset_with_models/permissions.py | arabindamahato/DjangoREST_API_DRF | 5292f47c4f29e64de4b5c15dc254a892b2661639 | [
"MIT"
] | null | null | null | from rest_framework.permissions import BasePermission, SAFE_METHODS
'''
If the name is arabinda then allow all methods If the name is not arabinda and the name
contains even number of characters then allow only SAFE_METHODS otherwise not allowed
to perform any operation.
'''
| 24.131579 | 88 | 0.742639 | from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsReadOnly(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
else:
return False
class IsGetOrPatch(BasePermission):
def has_permission(self, request, view):
allowed_methods = ['GET', 'PATCH']
if request.method in allowed_methods:
return True
else:
return False
'''
If the name is arabinda then allow all methods If the name is not arabinda and the name
contains even number of characters then allow only SAFE_METHODS otherwise not allowed
to perform any operation.
'''
class IsArabinda(BasePermission):
def has_permission(self, request, view):
user = request.user.username
print(user)
if user.lower() == 'arabinda':
return True
elif len(user) % 2 == 0 and user != '' and request.method in SAFE_METHODS:
return True
else:
return False
| 458 | 38 | 137 |
bad2d5229202ece78e93c204d8d954bec36390b7 | 552 | py | Python | python/nRF2401/__init__.py | natdan/AVR-Bootloaders | f01768fe1b6f3f2ba2602bfaef155a87afae0937 | [
"Apache-2.0"
] | 3 | 2018-02-13T21:39:55.000Z | 2018-04-26T18:17:39.000Z | python/nRF2401/__init__.py | natdan/AVR-Bootloaders | f01768fe1b6f3f2ba2602bfaef155a87afae0937 | [
"Apache-2.0"
] | null | null | null | python/nRF2401/__init__.py | natdan/AVR-Bootloaders | f01768fe1b6f3f2ba2602bfaef155a87afae0937 | [
"Apache-2.0"
] | null | null | null | #################################################################################
# Copyright (c) 2018 Creative Sphere Limited.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License v2.0
# which accompanies this distribution, and is available at
# https://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# Creative Sphere - initial API and implementation
#
#################################################################################
from nRF2401.nRF2401 import *
| 39.428571 | 81 | 0.541667 | #################################################################################
# Copyright (c) 2018 Creative Sphere Limited.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License v2.0
# which accompanies this distribution, and is available at
# https://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# Creative Sphere - initial API and implementation
#
#################################################################################
from nRF2401.nRF2401 import *
| 0 | 0 | 0 |
6c688764869b16c1adb550bc52167a7ee9753b4b | 84 | py | Python | scripts/test12.py | spencerparkin/MathTree | 4aa286248c2dc6a34ad2ef3e56d48b60838f3b72 | [
"MIT"
] | null | null | null | scripts/test12.py | spencerparkin/MathTree | 4aa286248c2dc6a34ad2ef3e56d48b60838f3b72 | [
"MIT"
] | null | null | null | scripts/test12.py | spencerparkin/MathTree | 4aa286248c2dc6a34ad2ef3e56d48b60838f3b72 | [
"MIT"
] | null | null | null | # test12.py
a = _n('a')
b = _n('b')
c = _n('c')
root = a + a + a^b + b^a + a^(b|c) | 12 | 34 | 0.392857 | # test12.py
a = _n('a')
b = _n('b')
c = _n('c')
root = a + a + a^b + b^a + a^(b|c) | 0 | 0 | 0 |
8302c64afce11bb6a7c70e83a8cef5c89491def6 | 16,644 | py | Python | adamspy/postprocess/ppt.py | bthornton191/Adams_Modules | e5473c6dc194148353cefb9964ad2081e79741d7 | [
"MIT"
] | 5 | 2019-07-01T01:38:44.000Z | 2020-05-18T00:56:41.000Z | adamspy/postprocess/ppt.py | bthornton191/Adams_Modules | e5473c6dc194148353cefb9964ad2081e79741d7 | [
"MIT"
] | 4 | 2020-03-24T16:55:01.000Z | 2021-03-20T00:44:18.000Z | adamspy/postprocess/ppt.py | bthornton191/adamspy | e5473c6dc194148353cefb9964ad2081e79741d7 | [
"MIT"
] | null | null | null | """This module is for sending scripts through Adams/PPT
"""
import os
import subprocess
import re
import time
import platform
import jinja2
from numpy import genfromtxt
import matplotlib.pyplot as plt
from thornpy.signal import manually_clean_sig, remove_data_point, manually_clean_sigs, low_pass
from thornpy.signal import _clean_sig as clean_sig
LOG_COMPLETE_PATTERN = '! Command file is exhausted, batch run is finished.'
LUNAR_SCRIPT_NAME = 'get_lunar_results.py'
GET_RESULTS_SCRIPT_NAME = 'get_results.py'
EDIT_RESULTS_SCRIPT_NAME = 'edit_results.py'
TEMP_OUTPUT_FILENAME = 'results.tmp'
LOG_NAME = 'aview.log'
TMPLT_ENV = jinja2.Environment(
loader=jinja2.PackageLoader('adamspy.postprocess', 'aview_scripts'),
autoescape=False,
keep_trailing_newline=True,
trim_blocks=True,
lstrip_blocks=True
)
_TIMEOUT = 300
LOG_FILE_ERROR_PATTERN = '! \\S*Error: '
def get_results(res_file, reqs_to_get, t_min=None, t_max=None, _just_write_script=False, timeout=_TIMEOUT):
"""Gets results from an Adams results (.res) file.
Example
-------
>>> result_file = 'example.res'
>>> t_min = 70
>>> t_max = 80
>>> reqs_to_get = {}
>>> reqs_to_get['MSE'] = ['Instantaneous_Bottom_MSE', 'Filtered_Surface_MSE']
>>> reqs_to_get['ROP_controls'] = ['Command_ROP', 'True_WOB']
>>> requests, units = get_results(result_file, reqs_to_get, t_min, t_max)
Note
----
This funciton only works with Requests. It does not work with Result Sets.
Note
----
This function only works with xml results files.
Parameters
----------
result_file : str
Filename of an Adams results (.res) file
reqs_to_get : dict
Dictionary of requests to extract (the default is None, which gets all results)
t_min : float, optional
Minimum time for which to extract results (the default is None)
t_max : float, optional
Maximum time for which to extract results (the default is None)
Returns
-------
dict
Dictionary of request data
"""
template = TMPLT_ENV.from_string(open(os.path.join(os.path.dirname(__file__), 'aview_scripts', GET_RESULTS_SCRIPT_NAME)).read())
working_directory = os.path.dirname(res_file)
script_filename = _get_unique_filename(GET_RESULTS_SCRIPT_NAME)
output_filename = _get_unique_filename(TEMP_OUTPUT_FILENAME)
with open(os.path.join(working_directory, script_filename), 'w') as fid:
fid.write(template.render({'res_file': os.path.split(res_file)[-1], 'reqs_to_get': reqs_to_get, 't_min': t_min, 't_max': t_max, 'output_file': output_filename}))
if _just_write_script is False:
# Delete the aview.log file
try:
os.remove(os.path.join(working_directory, LOG_NAME))
except FileNotFoundError:
pass
except PermissionError:
pass
# Run the postprocessor
if platform.system() == 'Windows':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen('"{}" aview ru-s b {}'.format(os.environ['ADAMS_LAUNCH_COMMAND'], script_filename), cwd=working_directory, startupinfo=startupinfo)
else:
subprocess.Popen([os.environ['ADAMS_LAUNCH_COMMAND'], '-c', 'aview', 'ru-standard', 'b', script_filename, 'exit'], cwd=working_directory)
# Wait for complete
_wait(os.path.join(working_directory, LOG_NAME), timeout=timeout)
# Check the log file for errors
_get_log_errors(os.path.join(working_directory, LOG_NAME))
# Read and return the results
data = genfromtxt(os.path.join(working_directory, output_filename), delimiter=',', names=True, dtype=None)
os.remove(os.path.join(working_directory, script_filename))
os.remove(os.path.join(working_directory, output_filename))
output_dict = {'time': list(data['time'])}
for res in reqs_to_get:
output_dict[res] = {}
for comp in reqs_to_get[res]:
output_dict[res][comp] = list(data[f'{res}_{comp}'])
return output_dict
def _wait(log_file, sleep_time=0.2, timeout=300):
"""Waits for the log file to write the last line of the macro
Parameters
----------
log_file : str
filename of log file
sleep_time : float, optional
Time between checks, by default 0.2
timeout : int, optional
During after which to time out, by default 300
"""
for _i in range(int(timeout/sleep_time)):
ppt_log_file_exists = os.path.exists(log_file)
if ppt_log_file_exists is True:
# If ppt.log exists, open it and see if the results have been loaded
with open(log_file, 'r') as fid:
text = fid.read()
if re.search(LOG_COMPLETE_PATTERN, text):
break
time.sleep(sleep_time)
def _get_log_errors(log_file):
"""Checks the log file for errors of the type AviewError.
Parameters
----------
log_file : str
Filename of aview log file (usulally aview.log)
"""
with open(log_file, 'r') as fid:
lines = fid.readlines()
for line in lines:
if re.search(LOG_FILE_ERROR_PATTERN, line):
raise AviewError(line[2:])
def manually_remove_spikes(res_file, reqs_to_clean, reqs_to_check=None, t_min=None, t_max=None, _just_write_script=False, timeout=_TIMEOUT, _inplace=False):
"""Allows the user to manually scan through the result sets to pick out points to eliminate.
Parameters
----------
res_file : str
Adams Results (.res) filename
reqs_to_clean : dict, optional
Nested dictionary of result sets and result components to clean
reqs_to_check : dict
Nested dictionary of result sets and result components to check for spikes, by default same as reqs_to_clean
t_min : float, optional
Minumum simulation time to clean, by default None
t_max : float, optional
Maximum simulation time to clean, by default None
timeout : float, optional
Number of seconds to wait for results to load before timing out, by default _TIMEOUT
Returns
-------
dict
Nested dictionary of cleaned results
"""
if reqs_to_check is None:
reqs_to_check = reqs_to_clean
results = get_results(res_file, reqs_to_clean, t_min=t_min, t_max=t_max, _just_write_script=_just_write_script, timeout=timeout)
time_sig = results['time']
# Remove the spikes
for (res, res_comps) in [(r, rc) for r, rc in results.items() if r in reqs_to_check]:
for (res_comp, values) in [(rc, v) for rc, v in res_comps.items() if rc in reqs_to_check[res]]:
results[res][res_comp], i_mod = manually_clean_sig(time_sig, values, indices=True)
# If a modification was made to the signal, make that modification to the rest of the signals
if i_mod != []:
# Loop over all the other results
for (other_res, other_res_comps) in [(r, rc) for r, rc in results.items() if r != 'time']:
for (other_res_comp, other_values) in [(rc, v) for rc, v in other_res_comps.items() if not (other_res == res and rc == res_comp)]: #pylint: disable=no-member
for i in i_mod:
results[other_res][other_res_comp] = remove_data_point(time_sig, other_values, i)
# Update the analysis files
edit_results(res_file, results)
# Return the cleaned results
return results
def filter_results(res_file, reqs_to_clean, freq_cutoff, N_filter=5, reqs_to_check=None, t_min=None, t_max=None, _just_write_script=False, timeout=_TIMEOUT, _inplace=False, return_raw=False):
"""Similar to `manually_remove_spikes`, but allows user to plot the signals in batches. Instead of passing a dictionary for the `reqs_to_check` argument, pass a list of dictionaries and the results in each dictionary in the list will be plotted together.
Parameters
----------
res_file : str
Adams Results (.res) filename
reqs_to_clean : dict
Nested dictionary of result sets and result components to clean
freq_cutoff : float
Cutoff freqency of filter in Hz
N_filter : int
Order of filter
reqs_to_check : list of dicts
list of nested dictionary of result sets and result components to check for spikes, by default same as reqs_to_clean
t_min : float, optional
Minumum simulation time to clean, by default None
t_max : float, optional
Maximum simulation time to clean, by default None
timeout : float, optional
Number of seconds to wait for results to load before timing out, by default _TIMEOUT
Returns
-------
dict
Nested dictionary of cleaned results
"""
if reqs_to_check is None:
reqs_to_check = [reqs_to_clean]
results = get_results(res_file, reqs_to_clean, t_min=t_min, t_max=t_max, _just_write_script=_just_write_script, timeout=timeout)
time_sig = results.pop('time')
filtered_results = {}
for res_name, res_comps in results.items():
filtered_results[res_name] = {}
for res_comp, values in res_comps.items(): # pylint: disable=no-member
cleaned_sig, _, _ = clean_sig(values, 3)
filtered_results[res_name][res_comp], _ = low_pass(cleaned_sig, time_sig, freq_cutoff, N=N_filter)
# Return the cleaned results
if return_raw is True:
return {'time': time_sig, **filtered_results}, {'time': time_sig, **results}
else:
return {'time': time_sig, **filtered_results}
def manually_remove_spikes_batch(res_file, reqs_to_clean, reqs_to_check=None, t_min=None, t_max=None, _just_write_script=False, timeout=_TIMEOUT, _inplace=False):
"""Similar to `manually_remove_spikes`, but allows user to plot the signals in batches. Instead of passing a dictionary for the `reqs_to_check` argument, pass a list of dictionaries and the results in each dictionary in the list will be plotted together.
Parameters
----------
res_file : str
Adams Results (.res) filename
reqs_to_clean : dict
Nested dictionary of result sets and result components to clean
reqs_to_check : list of dicts
list of nested dictionary of result sets and result components to check for spikes, by default same as reqs_to_clean
t_min : float, optional
Minumum simulation time to clean, by default None
t_max : float, optional
Maximum simulation time to clean, by default None
timeout : float, optional
Number of seconds to wait for results to load before timing out, by default _TIMEOUT
Returns
-------
dict
Nested dictionary of cleaned results
"""
if reqs_to_check is None:
reqs_to_check = [reqs_to_clean]
results = get_results(res_file, reqs_to_clean, t_min=t_min, t_max=t_max, _just_write_script=_just_write_script, timeout=timeout)
time_sig = results['time']
# Create a flag indicating if the results have been modified and need to be rewritten
results_modified = False
for batch_to_check in reqs_to_check:
# Make a list/batch of values to clean
values_to_check = []
for (res, res_comps) in [(r, rc) for r, rc in results.items() if r in batch_to_check]:
for (res_comp, values) in [(rc, v) for rc, v in res_comps.items() if rc in batch_to_check[res]]:
values_to_check.append(values)
_, i_mod = manually_clean_sigs(time_sig, values_to_check, indices=True)
# If a modification was made to the signal, make that modification to the rest of the signals
if i_mod != []:
# Loop over all the results
for (res, res_comps) in [(r, rc) for r, rc in results.items() if r != 'time']:
for (res_comp, values) in res_comps.items():
for i in i_mod:
results[res][res_comp] = remove_data_point(time_sig, values, i)
# Flag that the results have been modified
results_modified = True
# If the results were modified, update the analysis files
if results_modified is True:
edit_results(res_file, results)
# Return the cleaned results
return results
class AviewError(Exception):
"""Raise this error to if a known error occurs in the log file.
"""
pass | 39.070423 | 259 | 0.665765 | """This module is for sending scripts through Adams/PPT
"""
import os
import subprocess
import re
import time
import platform
import jinja2
from numpy import genfromtxt
import matplotlib.pyplot as plt
from thornpy.signal import manually_clean_sig, remove_data_point, manually_clean_sigs, low_pass
from thornpy.signal import _clean_sig as clean_sig
LOG_COMPLETE_PATTERN = '! Command file is exhausted, batch run is finished.'
LUNAR_SCRIPT_NAME = 'get_lunar_results.py'
GET_RESULTS_SCRIPT_NAME = 'get_results.py'
EDIT_RESULTS_SCRIPT_NAME = 'edit_results.py'
TEMP_OUTPUT_FILENAME = 'results.tmp'
LOG_NAME = 'aview.log'
TMPLT_ENV = jinja2.Environment(
loader=jinja2.PackageLoader('adamspy.postprocess', 'aview_scripts'),
autoescape=False,
keep_trailing_newline=True,
trim_blocks=True,
lstrip_blocks=True
)
_TIMEOUT = 300
LOG_FILE_ERROR_PATTERN = '! \\S*Error: '
def get_results(res_file, reqs_to_get, t_min=None, t_max=None, _just_write_script=False, timeout=_TIMEOUT):
"""Gets results from an Adams results (.res) file.
Example
-------
>>> result_file = 'example.res'
>>> t_min = 70
>>> t_max = 80
>>> reqs_to_get = {}
>>> reqs_to_get['MSE'] = ['Instantaneous_Bottom_MSE', 'Filtered_Surface_MSE']
>>> reqs_to_get['ROP_controls'] = ['Command_ROP', 'True_WOB']
>>> requests, units = get_results(result_file, reqs_to_get, t_min, t_max)
Note
----
This funciton only works with Requests. It does not work with Result Sets.
Note
----
This function only works with xml results files.
Parameters
----------
result_file : str
Filename of an Adams results (.res) file
reqs_to_get : dict
Dictionary of requests to extract (the default is None, which gets all results)
t_min : float, optional
Minimum time for which to extract results (the default is None)
t_max : float, optional
Maximum time for which to extract results (the default is None)
Returns
-------
dict
Dictionary of request data
"""
template = TMPLT_ENV.from_string(open(os.path.join(os.path.dirname(__file__), 'aview_scripts', GET_RESULTS_SCRIPT_NAME)).read())
working_directory = os.path.dirname(res_file)
script_filename = _get_unique_filename(GET_RESULTS_SCRIPT_NAME)
output_filename = _get_unique_filename(TEMP_OUTPUT_FILENAME)
with open(os.path.join(working_directory, script_filename), 'w') as fid:
fid.write(template.render({'res_file': os.path.split(res_file)[-1], 'reqs_to_get': reqs_to_get, 't_min': t_min, 't_max': t_max, 'output_file': output_filename}))
if _just_write_script is False:
# Delete the aview.log file
try:
os.remove(os.path.join(working_directory, LOG_NAME))
except FileNotFoundError:
pass
except PermissionError:
pass
# Run the postprocessor
if platform.system() == 'Windows':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen('"{}" aview ru-s b {}'.format(os.environ['ADAMS_LAUNCH_COMMAND'], script_filename), cwd=working_directory, startupinfo=startupinfo)
else:
subprocess.Popen([os.environ['ADAMS_LAUNCH_COMMAND'], '-c', 'aview', 'ru-standard', 'b', script_filename, 'exit'], cwd=working_directory)
# Wait for complete
_wait(os.path.join(working_directory, LOG_NAME), timeout=timeout)
# Check the log file for errors
_get_log_errors(os.path.join(working_directory, LOG_NAME))
# Read and return the results
data = genfromtxt(os.path.join(working_directory, output_filename), delimiter=',', names=True, dtype=None)
os.remove(os.path.join(working_directory, script_filename))
os.remove(os.path.join(working_directory, output_filename))
output_dict = {'time': list(data['time'])}
for res in reqs_to_get:
output_dict[res] = {}
for comp in reqs_to_get[res]:
output_dict[res][comp] = list(data[f'{res}_{comp}'])
return output_dict
def write_results(res_file, input_dict):
# TODO
return
def edit_results(res_file, input_dict, new_res_file=None, _just_write_script=False, timeout=_TIMEOUT):
template = TMPLT_ENV.from_string(open(os.path.join(os.path.dirname(__file__), 'aview_scripts', EDIT_RESULTS_SCRIPT_NAME)).read())
working_directory = os.path.dirname(res_file)
new_res_file = os.path.split(res_file)[-1] if new_res_file is None else os.path.split(new_res_file)[-1]
script_name = _get_unique_filename(EDIT_RESULTS_SCRIPT_NAME)
with open(os.path.join(working_directory, script_name), 'w') as fid:
fid.write(template.render({'res_file': os.path.split(res_file)[-1], 'reqs_to_edit': input_dict, 'output_file': new_res_file}))
if _just_write_script is False:
# Delete the aview.log file
try:
os.remove(os.path.join(working_directory, LOG_NAME))
except FileNotFoundError:
pass
except PermissionError:
pass
if platform.system() == 'Windows':
# Run the postprocessor
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen('"{}" aview ru-s b {}'.format(os.environ['ADAMS_LAUNCH_COMMAND'], script_name), cwd=working_directory, startupinfo=startupinfo)
else:
subprocess.Popen([os.environ['ADAMS_LAUNCH_COMMAND'], '-c', 'aview', 'ru-s', 'b', script_name, 'exit'], cwd=working_directory)
# Wait for complete
_wait(os.path.join(working_directory, LOG_NAME), timeout=timeout)
# Check the log file for errors
_get_log_errors(os.path.join(working_directory, LOG_NAME))
def get_lunar_results(res_files, reqs_to_get, t_min, t_max, output_file, _just_write_script=False, timeout=_TIMEOUT):
template = TMPLT_ENV.from_string(open(os.path.join(os.path.dirname(__file__), 'aview_scripts', LUNAR_SCRIPT_NAME)).read())
working_directory = os.path.dirname(res_files[0])
script_name = _get_unique_filename(LUNAR_SCRIPT_NAME)
with open(os.path.join(working_directory, script_name), 'w') as fid:
fid.write(template.render({'res_files': res_files, 'reqs_to_get': reqs_to_get, 't_min': t_min, 't_max': t_max, 'output_suffix': output_file}))
if _just_write_script is False:
# Delete the aview.log file
try:
os.remove(os.path.join(working_directory, LOG_NAME))
except FileNotFoundError:
pass
# Run the postprocessor
if platform.system() == 'Windows':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen('"{}" aview ru-s b {}'.format(os.environ['ADAMS_LAUNCH_COMMAND'], script_name), cwd=working_directory, startupinfo=startupinfo)
else:
subprocess.Popen([os.environ['ADAMS_LAUNCH_COMMAND'], 'aview', 'ru-s', 'b', script_name], cwd=working_directory)
# Wait for complete
_wait(os.path.join(working_directory, LOG_NAME), timeout=timeout)
os.remove(os.path.join(working_directory, script_name))
# Make a list of the files that are written
res_output_files = [os.path.splitext(output_file)[0] + '_time' + os.path.splitext(output_file)[-1]]
for res_name, res_comps in reqs_to_get.items():
for res_comp in res_comps:
full_res_name = f'{res_name}_{res_comp}'
res_output_files.append(os.path.splitext(output_file)[0] + '_' + full_res_name + os.path.splitext(output_file)[-1])
return res_output_files
def _wait(log_file, sleep_time=0.2, timeout=300):
"""Waits for the log file to write the last line of the macro
Parameters
----------
log_file : str
filename of log file
sleep_time : float, optional
Time between checks, by default 0.2
timeout : int, optional
During after which to time out, by default 300
"""
for _i in range(int(timeout/sleep_time)):
ppt_log_file_exists = os.path.exists(log_file)
if ppt_log_file_exists is True:
# If ppt.log exists, open it and see if the results have been loaded
with open(log_file, 'r') as fid:
text = fid.read()
if re.search(LOG_COMPLETE_PATTERN, text):
break
time.sleep(sleep_time)
def _get_log_errors(log_file):
"""Checks the log file for errors of the type AviewError.
Parameters
----------
log_file : str
Filename of aview log file (usulally aview.log)
"""
with open(log_file, 'r') as fid:
lines = fid.readlines()
for line in lines:
if re.search(LOG_FILE_ERROR_PATTERN, line):
raise AviewError(line[2:])
def manually_remove_spikes(res_file, reqs_to_clean, reqs_to_check=None, t_min=None, t_max=None, _just_write_script=False, timeout=_TIMEOUT, _inplace=False):
"""Allows the user to manually scan through the result sets to pick out points to eliminate.
Parameters
----------
res_file : str
Adams Results (.res) filename
reqs_to_clean : dict, optional
Nested dictionary of result sets and result components to clean
reqs_to_check : dict
Nested dictionary of result sets and result components to check for spikes, by default same as reqs_to_clean
t_min : float, optional
Minumum simulation time to clean, by default None
t_max : float, optional
Maximum simulation time to clean, by default None
timeout : float, optional
Number of seconds to wait for results to load before timing out, by default _TIMEOUT
Returns
-------
dict
Nested dictionary of cleaned results
"""
if reqs_to_check is None:
reqs_to_check = reqs_to_clean
results = get_results(res_file, reqs_to_clean, t_min=t_min, t_max=t_max, _just_write_script=_just_write_script, timeout=timeout)
time_sig = results['time']
# Remove the spikes
for (res, res_comps) in [(r, rc) for r, rc in results.items() if r in reqs_to_check]:
for (res_comp, values) in [(rc, v) for rc, v in res_comps.items() if rc in reqs_to_check[res]]:
results[res][res_comp], i_mod = manually_clean_sig(time_sig, values, indices=True)
# If a modification was made to the signal, make that modification to the rest of the signals
if i_mod != []:
# Loop over all the other results
for (other_res, other_res_comps) in [(r, rc) for r, rc in results.items() if r != 'time']:
for (other_res_comp, other_values) in [(rc, v) for rc, v in other_res_comps.items() if not (other_res == res and rc == res_comp)]: #pylint: disable=no-member
for i in i_mod:
results[other_res][other_res_comp] = remove_data_point(time_sig, other_values, i)
# Update the analysis files
edit_results(res_file, results)
# Return the cleaned results
return results
def filter_results(res_file, reqs_to_clean, freq_cutoff, N_filter=5, reqs_to_check=None, t_min=None, t_max=None, _just_write_script=False, timeout=_TIMEOUT, _inplace=False, return_raw=False):
"""Similar to `manually_remove_spikes`, but allows user to plot the signals in batches. Instead of passing a dictionary for the `reqs_to_check` argument, pass a list of dictionaries and the results in each dictionary in the list will be plotted together.
Parameters
----------
res_file : str
Adams Results (.res) filename
reqs_to_clean : dict
Nested dictionary of result sets and result components to clean
freq_cutoff : float
Cutoff freqency of filter in Hz
N_filter : int
Order of filter
reqs_to_check : list of dicts
list of nested dictionary of result sets and result components to check for spikes, by default same as reqs_to_clean
t_min : float, optional
Minumum simulation time to clean, by default None
t_max : float, optional
Maximum simulation time to clean, by default None
timeout : float, optional
Number of seconds to wait for results to load before timing out, by default _TIMEOUT
Returns
-------
dict
Nested dictionary of cleaned results
"""
if reqs_to_check is None:
reqs_to_check = [reqs_to_clean]
results = get_results(res_file, reqs_to_clean, t_min=t_min, t_max=t_max, _just_write_script=_just_write_script, timeout=timeout)
time_sig = results.pop('time')
filtered_results = {}
for res_name, res_comps in results.items():
filtered_results[res_name] = {}
for res_comp, values in res_comps.items(): # pylint: disable=no-member
cleaned_sig, _, _ = clean_sig(values, 3)
filtered_results[res_name][res_comp], _ = low_pass(cleaned_sig, time_sig, freq_cutoff, N=N_filter)
# Return the cleaned results
if return_raw is True:
return {'time': time_sig, **filtered_results}, {'time': time_sig, **results}
else:
return {'time': time_sig, **filtered_results}
def manually_remove_spikes_batch(res_file, reqs_to_clean, reqs_to_check=None, t_min=None, t_max=None, _just_write_script=False, timeout=_TIMEOUT, _inplace=False):
"""Similar to `manually_remove_spikes`, but allows user to plot the signals in batches. Instead of passing a dictionary for the `reqs_to_check` argument, pass a list of dictionaries and the results in each dictionary in the list will be plotted together.
Parameters
----------
res_file : str
Adams Results (.res) filename
reqs_to_clean : dict
Nested dictionary of result sets and result components to clean
reqs_to_check : list of dicts
list of nested dictionary of result sets and result components to check for spikes, by default same as reqs_to_clean
t_min : float, optional
Minumum simulation time to clean, by default None
t_max : float, optional
Maximum simulation time to clean, by default None
timeout : float, optional
Number of seconds to wait for results to load before timing out, by default _TIMEOUT
Returns
-------
dict
Nested dictionary of cleaned results
"""
if reqs_to_check is None:
reqs_to_check = [reqs_to_clean]
results = get_results(res_file, reqs_to_clean, t_min=t_min, t_max=t_max, _just_write_script=_just_write_script, timeout=timeout)
time_sig = results['time']
# Create a flag indicating if the results have been modified and need to be rewritten
results_modified = False
for batch_to_check in reqs_to_check:
# Make a list/batch of values to clean
values_to_check = []
for (res, res_comps) in [(r, rc) for r, rc in results.items() if r in batch_to_check]:
for (res_comp, values) in [(rc, v) for rc, v in res_comps.items() if rc in batch_to_check[res]]:
values_to_check.append(values)
_, i_mod = manually_clean_sigs(time_sig, values_to_check, indices=True)
# If a modification was made to the signal, make that modification to the rest of the signals
if i_mod != []:
# Loop over all the results
for (res, res_comps) in [(r, rc) for r, rc in results.items() if r != 'time']:
for (res_comp, values) in res_comps.items():
for i in i_mod:
results[res][res_comp] = remove_data_point(time_sig, values, i)
# Flag that the results have been modified
results_modified = True
# If the results were modified, update the analysis files
if results_modified is True:
edit_results(res_file, results)
# Return the cleaned results
return results
def _get_unique_filename(filename):
if os.path.exists(filename):
for i in range(9999):
new_name, ext = os.path.splitext(filename)
new_name = new_name + f'_{i+1}'
if not os.path.exists(new_name + ext):
filename = new_name + ext
break
return filename
class AviewError(Exception):
"""Raise this error to if a known error occurs in the log file.
"""
pass | 3,928 | 0 | 96 |
1b93c694f6d898cd16deab7c152e24c19de63302 | 3,375 | py | Python | middleware.py | CloudCIX/framework | 321b4b953a2b2083c069fff133e538ea1ecca4a7 | [
"Apache-2.0"
] | null | null | null | middleware.py | CloudCIX/framework | 321b4b953a2b2083c069fff133e538ea1ecca4a7 | [
"Apache-2.0"
] | null | null | null | middleware.py | CloudCIX/framework | 321b4b953a2b2083c069fff133e538ea1ecca4a7 | [
"Apache-2.0"
] | null | null | null | """
Move this to wherever once we establish a setup for the Application Framework
"""
import re
from collections import deque
from typing import Callable, Deque
from time import perf_counter
from cloudcix_metrics import prepare_metrics
from django.http.response import HttpResponseBase
from rest_framework.request import Request
from metrics.client_ip import post_client_ip
from metrics.response_time import post_response_time
GetResponseType = Callable[[Request], HttpResponseBase]
class OpenAPIDeepObjectParserMiddleware:
"""
This middleware will transform the GET parameters received from the user and turn any OAPI deepObject types into
nested dictionaries
OAPI deepObject example: ?search[name]=yes&exclude[name]=no
Turning these into nested dictionaries makes the search and exclude validation a lot easier
"""
def __init__(self, get_response: GetResponseType) -> None:
"""
Set up this middleware class
:param get_response: A callable that calls the next part in the chain.
This might be another middleware or the view itself.
"""
self.get_response = get_response
self.pattern = re.compile(r'(?P<dict>[a-zA-z][a-zA-Z0-9]+)\[(?P<key>.+)\]')
def __call__(self, request: Request) -> HttpResponseBase:
"""
This method is run when the middleware is called by Django
:param request: The current request object passed from the last part of the chain
:returns: The response to be returned to the user
"""
# Before we pass on the request, we should alter the GET params
# Find all deepObject style params and transform them
new_get = request.GET.copy()
transformed: Deque = deque()
for k in request.GET.keys():
match = self.pattern.match(k)
if match:
# Attempt to get the named dict
new_get.setdefault(match['dict'], {})[match['key']] = request.GET[k]
transformed.append(k)
for k in transformed:
new_get.pop(k)
request.GET = new_get
# Now pass the request to the next part of the chain and return what
# comes back
return self.get_response(request)
class MetricsMiddleware:
"""
This middleware will handle the generation and logging of response time metrics to our influx instance
"""
def __init__(self, get_response: GetResponseType) -> None:
"""
Set up this middleware class
:param get_response: A callable that calls the next part in the chain.
This might be another middleware or the view itself.
"""
self.get_response = get_response
def __call__(self, request: Request) -> HttpResponseBase:
"""
This method is run when the middleware is called by Django to deal with metrics
:param request: The current request object passed from the last part of the chain
:returns: The response to be returned to the user
"""
start = perf_counter()
response = self.get_response(request)
time = perf_counter() - start
prepare_metrics(post_response_time, time=time, response=response, request=request)
prepare_metrics(post_client_ip, response=response, request=request)
return response
| 38.352273 | 116 | 0.670519 | """
Move this to wherever once we establish a setup for the Application Framework
"""
import re
from collections import deque
from typing import Callable, Deque
from time import perf_counter
from cloudcix_metrics import prepare_metrics
from django.http.response import HttpResponseBase
from rest_framework.request import Request
from metrics.client_ip import post_client_ip
from metrics.response_time import post_response_time
GetResponseType = Callable[[Request], HttpResponseBase]
class OpenAPIDeepObjectParserMiddleware:
"""
This middleware will transform the GET parameters received from the user and turn any OAPI deepObject types into
nested dictionaries
OAPI deepObject example: ?search[name]=yes&exclude[name]=no
Turning these into nested dictionaries makes the search and exclude validation a lot easier
"""
def __init__(self, get_response: GetResponseType) -> None:
"""
Set up this middleware class
:param get_response: A callable that calls the next part in the chain.
This might be another middleware or the view itself.
"""
self.get_response = get_response
self.pattern = re.compile(r'(?P<dict>[a-zA-z][a-zA-Z0-9]+)\[(?P<key>.+)\]')
def __call__(self, request: Request) -> HttpResponseBase:
"""
This method is run when the middleware is called by Django
:param request: The current request object passed from the last part of the chain
:returns: The response to be returned to the user
"""
# Before we pass on the request, we should alter the GET params
# Find all deepObject style params and transform them
new_get = request.GET.copy()
transformed: Deque = deque()
for k in request.GET.keys():
match = self.pattern.match(k)
if match:
# Attempt to get the named dict
new_get.setdefault(match['dict'], {})[match['key']] = request.GET[k]
transformed.append(k)
for k in transformed:
new_get.pop(k)
request.GET = new_get
# Now pass the request to the next part of the chain and return what
# comes back
return self.get_response(request)
class MetricsMiddleware:
"""
This middleware will handle the generation and logging of response time metrics to our influx instance
"""
def __init__(self, get_response: GetResponseType) -> None:
"""
Set up this middleware class
:param get_response: A callable that calls the next part in the chain.
This might be another middleware or the view itself.
"""
self.get_response = get_response
def __call__(self, request: Request) -> HttpResponseBase:
"""
This method is run when the middleware is called by Django to deal with metrics
:param request: The current request object passed from the last part of the chain
:returns: The response to be returned to the user
"""
start = perf_counter()
response = self.get_response(request)
time = perf_counter() - start
prepare_metrics(post_response_time, time=time, response=response, request=request)
prepare_metrics(post_client_ip, response=response, request=request)
return response
| 0 | 0 | 0 |
5964a3e664b876222f49cd0a42cdf3eea5b32fd9 | 1,505 | py | Python | pyirc.py | Psyrens/pyirc | 4a7a8dde72108bf7a9bc181e4da8315ecbb31eac | [
"Apache-2.0"
] | null | null | null | pyirc.py | Psyrens/pyirc | 4a7a8dde72108bf7a9bc181e4da8315ecbb31eac | [
"Apache-2.0"
] | null | null | null | pyirc.py | Psyrens/pyirc | 4a7a8dde72108bf7a9bc181e4da8315ecbb31eac | [
"Apache-2.0"
] | null | null | null | from socket import *
while True:
choc = str(input("[1] Host\n[2] Connect\n[0] Exit\n"))
if choc == '1':
hirc()
elif choc == '2':
circ()
elif choc == '0':
exit(0)
else:
print("wrong input, try again")
| 25.948276 | 90 | 0.568771 | from socket import *
def hirc():
username = input("username: ")
ip = input("ip: ")
port = input("port: ")
s = socket(2, 1)
try:
s.bind((str(ip), int(port)))
s.listen(10)
print("listening on " + str(ip) + " through port " + str(port))
c, addr = s.accept()
print("connected to " + str(addr[0]) + " through port " + str(addr[1]))
while True:
try:
message = input(str(username) + " #> ")
c.sendall(bytes(username + " #> " + message, encoding = 'utf-8'))
data = c.recv(5120)
print(data.decode("utf-8"))
except:
print("could not recv/send data to " + str(addr[0]) + " through port " + str(addr[1]))
except:
print("could not bind on " + str(ip) + ":" + str(port))
s.close()
def circ():
username = input("username: ")
ip = input("ip: ")
port = input("port: ")
s = socket(2, 1)
try:
s.connect((str(ip), int(port)))
print("connected to " + str(ip) + " through port " + str(port))
while True:
try:
data = s.recv(5120)
print(data.decode("utf-8"))
message = input(str(username) + " #> ")
s.sendall(bytes(username + " #> " + message, encoding = 'utf-8'))
except:
print("could not recv/send data to " + str(ip) + " through port " + str(port))
except:
print("could not connect to " + str(ip) + " through port " + str(port))
s.close()
while True:
choc = str(input("[1] Host\n[2] Connect\n[0] Exit\n"))
if choc == '1':
hirc()
elif choc == '2':
circ()
elif choc == '0':
exit(0)
else:
print("wrong input, try again")
| 1,245 | 0 | 46 |
e78c346f257ad43b5df6948913c28611c88332ec | 2,345 | py | Python | src/inference/__init__.py | DecBayComp/tramway-tour | d0ac3e3fdc2c3a6110941ab6e251e4e4fc0375a3 | [
"0BSD"
] | null | null | null | src/inference/__init__.py | DecBayComp/tramway-tour | d0ac3e3fdc2c3a6110941ab6e251e4e4fc0375a3 | [
"0BSD"
] | null | null | null | src/inference/__init__.py | DecBayComp/tramway-tour | d0ac3e3fdc2c3a6110941ab6e251e4e4fc0375a3 | [
"0BSD"
] | null | null | null | from tramway.analyzer import *
import numpy as np
import pandas as pd
from ..data import project_dir
datafile = project_dir / 'data/Image_traj.txt'
| 26.348315 | 87 | 0.637527 | from tramway.analyzer import *
import numpy as np
import pandas as pd
from ..data import project_dir
datafile = project_dir / 'data/Image_traj.txt'
def reset_data():
import os
rwa_file = os.path.splitext(str(datafile))[0]+'.rwa'
try:
os.unlink(rwa_file)
except FileNotFoundError:
pass
def preset_analyzer():
a = RWAnalyzer()
a.spt_data.from_ascii_file(datafile)
a.spt_data.frame_interval = .04 # in s
a.spt_data.localization_precision = .02 # in µm
# bounding box for space coordinates (x, y)
center = [17.3619, 19.2112]
side = 2.5
# bounding box for time
t0, t1 = 180., 480.
bb = pd.DataFrame({
'x min': center[0] - .5 * side,
'x max': center[0] + .5 * side,
'y min': center[1] - .5 * side,
'y max': center[1] + .5 * side,
't min': t0,
't max': t1,
}, index=[0])
a.roi.from_bounding_boxes(bb)
r = single(a.roi)
translocations = r.crop()
translocations = r.discard_static_trajectories(translocations)
a.tesseller = tessellers.KMeans
a.tesseller.resolution = .05
a.sampler = sampler.Knn(10)
assignment = a.sampler.sample(translocations)
return a, assignment
def set_notebook_theme(theme='light'):
if theme == 'dark':
import matplotlib as mpl
clr = 'white'
mpl.rcParams['text.color'] = clr
mpl.rcParams['axes.titlecolor'] = clr
mpl.rcParams['axes.labelcolor'] = clr
mpl.rcParams['xtick.color'] = clr
mpl.rcParams['ytick.color'] = clr
def reload_movies():
dv_t = RWAnalyzer()
dv_t.spt_data.from_rwa_file('../data/Image_traj.rwa')
dv_t.spt_data.frame_interval = .04 # in s
dv_t.spt_data.localization_precision = .02 # in µm
dv_t.roi.from_ascii_files() # default filepath will be '../data/Image_traj-roi.txt'
dv_t.tesseller = tessellers.KMeans
dv_t.tesseller.resolution = .1 # in µm
dv_t.time = time.SlidingWindow(duration=60, shift=30)
dv_t.sampler = sampler.Knn(10)
dv_t.mapper = models.DV(start='stochastic')
sampling_label = lambda roi_label: roi_label + ' - kmeans + 60s window'
map_label = 'dv maps'
r = single(dv_t.roi)
assignment_t = r.get_sampling(sampling_label)
dv_t_maps = assignment_t.get_child(map_label)
return dv_t_maps
| 2,106 | 0 | 92 |
57abae4fea2a1eb0b1d9a72b06df545edebc3f4e | 1,859 | py | Python | api/tests/integration/tests/basic/reaction_instrumentation.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | 204 | 2015-11-06T21:34:34.000Z | 2022-03-30T16:17:01.000Z | api/tests/integration/tests/basic/reaction_instrumentation.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | 509 | 2015-11-05T13:54:43.000Z | 2022-03-30T22:15:30.000Z | api/tests/integration/tests/basic/reaction_instrumentation.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | 89 | 2015-11-17T08:22:54.000Z | 2022-03-17T04:26:28.000Z | import os
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.abspath(__file__), '..', '..', '..', "common")))
from env_indigo import *
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
testReactionInstrumentation()
| 39.553191 | 102 | 0.661646 | import os
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.abspath(__file__), '..', '..', '..', "common")))
from env_indigo import *
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
def testReactionInstrumentation ():
rxn = indigo.loadReaction("[PH5].CN>CC>")
rxn.addReactant(indigo.loadMolecule("Oc1ccccc1"))
rxn.addProduct(indigo.loadMolecule("n1ccccc1"))
cat = indigo.createMolecule()
cat.addAtom("2. acid")
rxn.addCatalyst(cat)
print(rxn.smiles())
print(rxn.rxnfile())
print("{0} reactants".format(rxn.countReactants()))
print("{0} catalysts".format(rxn.countCatalysts()))
print("{0} products".format(rxn.countProducts()))
print("{0} molecules".format(rxn.countMolecules()))
for mol in rxn.iterateMolecules():
print("MOLECULE: " + mol.canonicalSmiles())
for mol in rxn.iterateReactants():
print("REACTANT: " + mol.canonicalSmiles())
for mol in rxn.iterateCatalysts():
print("CATALYST: " + mol.canonicalSmiles())
for mol in rxn.iterateProducts():
print("PRODUCT: " + mol.canonicalSmiles())
print("\nREMOVING")
rxn.iterateReactants().next().remove()
rxn.iterateProducts().next().remove()
print("{0} reactants".format(rxn.countReactants()))
print("{0} catalysts".format(rxn.countCatalysts()))
print("{0} products".format(rxn.countProducts()))
print("{0} molecules".format(rxn.countMolecules()))
for mol in rxn.iterateMolecules():
print("MOLECULE: " + mol.canonicalSmiles())
for mol in rxn.iterateReactants():
print("REACTANT: " + mol.canonicalSmiles())
for mol in rxn.iterateCatalysts():
print("CATALYST: " + mol.canonicalSmiles())
for mol in rxn.iterateProducts():
print("PRODUCT: " + mol.canonicalSmiles())
testReactionInstrumentation()
| 1,581 | 0 | 23 |
b69f0ff0899bd61bd2f6bd0ca491ed121c2beea4 | 325 | py | Python | exercicios_pyhton/leia_int_leia_fload/teste.py | wcalazans81/cursoemvideo_de_python | b3f093477407d4936b87907910c85472fea98469 | [
"MIT"
] | null | null | null | exercicios_pyhton/leia_int_leia_fload/teste.py | wcalazans81/cursoemvideo_de_python | b3f093477407d4936b87907910c85472fea98469 | [
"MIT"
] | null | null | null | exercicios_pyhton/leia_int_leia_fload/teste.py | wcalazans81/cursoemvideo_de_python | b3f093477407d4936b87907910c85472fea98469 | [
"MIT"
] | null | null | null | try:
a = int(input('digite um valor'))
b = int(input('di'))
r = a / b
except (ValueError, TypeError):
print('Houve um problema com dados digitados!')
except KeyboardInterrupt:
print('preferio não continuar')
else:
print(f'o resultado é {r:.2f:}')
finally:
print('Volte sempre! Muito obrigado!')
| 21.666667 | 51 | 0.643077 | try:
a = int(input('digite um valor'))
b = int(input('di'))
r = a / b
except (ValueError, TypeError):
print('Houve um problema com dados digitados!')
except KeyboardInterrupt:
print('preferio não continuar')
else:
print(f'o resultado é {r:.2f:}')
finally:
print('Volte sempre! Muito obrigado!')
| 0 | 0 | 0 |
b29906242948a9ff452fe77938a8bdba321cf1c2 | 3,124 | py | Python | Core/bram.py | gabrieloandco/RiscV-Arqui1 | 6495370d23d3a7e9e1f579a1b4e8c1be799c3913 | [
"MIT"
] | null | null | null | Core/bram.py | gabrieloandco/RiscV-Arqui1 | 6495370d23d3a7e9e1f579a1b4e8c1be799c3913 | [
"MIT"
] | null | null | null | Core/bram.py | gabrieloandco/RiscV-Arqui1 | 6495370d23d3a7e9e1f579a1b4e8c1be799c3913 | [
"MIT"
] | null | null | null | from myhdl import *
from hex_reader import *
@block
def BRAM(clk,addr,di,enstore,we,re,do,done,
A_WIDTH=5,
W_WIDTH=32,B_WIDTH=8):
"""
"""
assert len(addr) == A_WIDTH, "Error: Address width mismatch."
if do is not None:
assert len(di) == len(do) == W_WIDTH, "Error: Data width mismatch in portA."
arrayblock1, arrayblock2, arrayblock3, arrayblock4 = Reader(A_WIDTH)
ramregsblock1 = [Signal(modbv(arrayblock1[i])[B_WIDTH:]) for i in range(0, 2**A_WIDTH)]
ramregsblock2 = [Signal(modbv(arrayblock2[i])[B_WIDTH:]) for i in range(0, 2**A_WIDTH)]
ramregsblock3 = [Signal(modbv(arrayblock3[i])[B_WIDTH:]) for i in range(0, 2**A_WIDTH)]
ramregsblock4 = [Signal(modbv(arrayblock4[i])[B_WIDTH:]) for i in range(0, 2**A_WIDTH)]
#for addr in range(A_WIDTH):
# ramregsblock1.append(Signal(modbv(arrayblock1[addr])[B_WIDTH:]))
# ramregsblock2.append(Signal(modbv(arrayblock2[addr])[B_WIDTH:]))
# ramregsblock3.append(Signal(modbv(arrayblock3[addr])[B_WIDTH:])
# ramregsblock4[addr] = Signal(modbv(arrayblock4[addr])[B_WIDTH:])
@always(clk.posedge)
@always(clk.negedge)
@always(clk.posedge)
@always(clk.negedge)
return instances()
| 30.330097 | 100 | 0.660691 | from myhdl import *
from hex_reader import *
@block
def BRAM(clk,addr,di,enstore,we,re,do,done,
A_WIDTH=5,
W_WIDTH=32,B_WIDTH=8):
"""
"""
assert len(addr) == A_WIDTH, "Error: Address width mismatch."
if do is not None:
assert len(di) == len(do) == W_WIDTH, "Error: Data width mismatch in portA."
arrayblock1, arrayblock2, arrayblock3, arrayblock4 = Reader(A_WIDTH)
ramregsblock1 = [Signal(modbv(arrayblock1[i])[B_WIDTH:]) for i in range(0, 2**A_WIDTH)]
ramregsblock2 = [Signal(modbv(arrayblock2[i])[B_WIDTH:]) for i in range(0, 2**A_WIDTH)]
ramregsblock3 = [Signal(modbv(arrayblock3[i])[B_WIDTH:]) for i in range(0, 2**A_WIDTH)]
ramregsblock4 = [Signal(modbv(arrayblock4[i])[B_WIDTH:]) for i in range(0, 2**A_WIDTH)]
#for addr in range(A_WIDTH):
# ramregsblock1.append(Signal(modbv(arrayblock1[addr])[B_WIDTH:]))
# ramregsblock2.append(Signal(modbv(arrayblock2[addr])[B_WIDTH:]))
# ramregsblock3.append(Signal(modbv(arrayblock3[addr])[B_WIDTH:])
# ramregsblock4[addr] = Signal(modbv(arrayblock4[addr])[B_WIDTH:])
@always(clk.posedge)
def write():
if we==1:
if enstore == 1:#'0001':
ramregsblock1[addr].next=di[8:0]
elif enstore == 2:#'0010':
ramregsblock2[addr].next=di[8:0]
elif enstore == 4:#'0100':
ramregsblock3[addr].next=di[8:0]
elif enstore == 8:#'1000':
ramregsblock4[addr].next=di[8:0]
elif enstore == 3:#'0011':
ramregsblock1[addr].next=di[8:0]
ramregsblock2[addr].next=di[16:8]
elif enstore == 12: #'1100':
ramregsblock3[addr].next=di[8:0]
ramregsblock4[addr].next=di[16:8]
elif enstore == 5:#'0101':
ramregsblock1[addr].next=di[8:0]
ramregsblock3[addr].next=di[16:8]
elif enstore == 10:#'1010':
ramregsblock2[addr].next=di[8:0]
ramregsblock4[addr].next=di[16:8]
elif enstore == 9:#'1001':
ramregsblock1[addr].next=di[8:0]
ramregsblock4[addr].next=di[16:8]
elif enstore == 6:#'0110':
ramregsblock2[addr].next=di[8:0]
ramregsblock3[addr].next=di[16:8]
elif enstore == 7:#'0111':
ramregsblock1[addr].next=di[8:0]
ramregsblock2[addr].next=di[16:8]
ramregsblock3[addr].next=di[25:16]
elif enstore == 14:#'1110':
ramregsblock1[addr].next=di[8:0]
ramregsblock2[addr].next=di[16:8]
ramregsblock3[addr].next=di[25:16]
elif enstore == 11:#'1011':
ramregsblock1[addr].next=di[8:0]
ramregsblock2[addr].next=di[16:8]
ramregsblock4[addr].next=di[25:16]
elif enstore == 13:#'1101':
ramregsblock1[addr].next=di[8:0]
ramregsblock3[addr].next=di[16:8]
ramregsblock4[addr].next=di[25:16]
elif enstore == 15:#'1111':
ramregsblock1[addr].next=di[8:0]
ramregsblock2[addr].next=di[16:8]
ramregsblock3[addr].next=di[25:16]
ramregsblock4[addr].next=di[32:25]
@always(clk.negedge)
def read():
if re==1:
do.next = concat(ramregsblock4[addr],ramregsblock3[addr],ramregsblock2[addr],ramregsblock1[addr])
if re==0:
do.next = 0
@always(clk.posedge)
def ramnotdone():
done.next = 0
@always(clk.negedge)
def ramdone():
if re ==1 or we==1:
done.next = not done
return instances()
| 1,846 | 0 | 92 |
2913f9c1c1150f1df6a7a978fb35f0784fed07d3 | 970 | py | Python | mydays/homeinventory-orm/data_models.py | feoh/100daysofcode-with-python-course | 35ee1d8181b80e5bcb28af59e8d81e90095ee784 | [
"MIT"
] | null | null | null | mydays/homeinventory-orm/data_models.py | feoh/100daysofcode-with-python-course | 35ee1d8181b80e5bcb28af59e8d81e90095ee784 | [
"MIT"
] | null | null | null | mydays/homeinventory-orm/data_models.py | feoh/100daysofcode-with-python-course | 35ee1d8181b80e5bcb28af59e8d81e90095ee784 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
ModelBase = declarative_base()
| 30.3125 | 77 | 0.689691 | from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
ModelBase = declarative_base()
class Room(ModelBase):
__tablename__ = "rooms"
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
description = Column(String)
items = relationship("Item", backref="room")
def __repr__(self):
return f"Room Name: {self.name} Room Description: {self.description}"
class Item(ModelBase):
__tablename__ = "items"
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
value = Column(Integer, nullable=False)
description = Column(String)
room_id = Column(Integer, ForeignKey('rooms.id'))
def __repr__(self):
return f"""
Item Name: {self.name}
Item Value: {self.value}
Item Description: {self.description}
Location: {self.room.name}"""
| 241 | 495 | 46 |
b6697cf64cf136ce5293e73907be72df94015a5a | 3,006 | py | Python | atomdata.py | fsponciano/HPB_tools | 845e3fdf2cc0c1b3b93123ea0ca718c5b8406cf3 | [
"MIT"
] | null | null | null | atomdata.py | fsponciano/HPB_tools | 845e3fdf2cc0c1b3b93123ea0ca718c5b8406cf3 | [
"MIT"
] | null | null | null | atomdata.py | fsponciano/HPB_tools | 845e3fdf2cc0c1b3b93123ea0ca718c5b8406cf3 | [
"MIT"
] | null | null | null | """ --- Atom data ---
Written by D. J. Whiting (2017)
** Modified by F. S. Ponciano Ojeda (2021) **
Quantum Light & Matter Group, Durham University, UK
Simple database for housing atomic species data for use in calculations of
energy levels.
"""
def atomic_structure_coefficients(atom,I,L,J):
'''
Currently only contains information for Rb states with n=5, l=0,1,2
and Cs states with n=6, l=0,1,2. Further data to be added...
'''
# global A_fs,A_hfs,B_hfs,gI,gL
if atom == 'Rb':
if int(2*I+1e-10) == 3: # Rb 87
gI = -0.0009951414
gL = 0.99999369
if L == 0:
A_fs = 0
A_hfs = 3417.34130545215e6
B_hfs = 0
elif L == 1:
A_fs = 7.123e12*2/3 # (2*S+1)/(2*L+1) or 1/(S+L)
if int(2*J+1e-10) == 1:
A_hfs = 406.147e6
B_hfs = 0
elif int(2*J+1e-10) == 3:
A_hfs = 84.7185e6
B_hfs = 12.4965e6
elif L == 2:
A_fs = 88.905185e9*2/5 # (2*S+1)/(2*L+1) or 1/(S+L)
if int(2*J+1e-10) == 3:
A_hfs = 14.4303e6
B_hfs = 0.9320e6
elif int(2*J+1e-10) == 5:
A_hfs = -7.4605e6
B_hfs = 1.2713e6
elif int(2*I+1e-10) == 5: # Rb 85
gI = -0.0002936400
gL = 0.99999354
if L == 0:
A_fs = 0
A_hfs = 1011.9108130e6
B_hfs = 0
elif L == 1:
A_fs = 7.123e12*2/3
if int(2*J+1e-10) == 1:
A_hfs = 120.527e6
B_hfs = 0
elif int(2*J+1e-10) == 3:
A_hfs = 25.0020e6
B_hfs = 25.790e6
elif L == 2:
A_fs = 88.905185e9*2/5
if int(2*J+1e-10) == 3:
A_hfs = 4.2699e6
B_hfs = 1.9106e6
elif int(2*J+1e-10) == 5:
A_hfs = -2.2112e6
B_hfs = 2.6804e6
if atom == 'Cs':
gI = -0.00039885395
gL = 0.99999587
if L == 0:
A_fs = 0
A_hfs = 2298.1579425e6
B_hfs = 0
elif L == 1:
A_fs = 16.60966969094428436e12*2/3 # (2*S+1)/(2*L+1) or 1/(S+L)
if int(2*J+1e-10) == 1:
A_hfs = 291.9201e6
B_hfs = 0
elif int(2*J+1e-10) == 3:
A_hfs = 50.28827e6
B_hfs = -0.4934e6
C_hfs = 0.560e3
elif L == 2:
A_fs = 1.285060371217e12*2/5 # (2*S+1)/(2*L+1) or 1/(S+L)
if int(2*J+1e-10) == 3:
A_hfs = 16.34e6
B_hfs = -0.1e6
elif int(2*J+1e-10) == 5:
A_hfs = -4.66e6
B_hfs = 0.9e6
return A_fs,A_hfs,B_hfs,gI,gL
| 33.032967 | 78 | 0.401863 | """ --- Atom data ---
Written by D. J. Whiting (2017)
** Modified by F. S. Ponciano Ojeda (2021) **
Quantum Light & Matter Group, Durham University, UK
Simple database for housing atomic species data for use in calculations of
energy levels.
"""
def atomic_structure_coefficients(atom,I,L,J):
'''
Currently only contains information for Rb states with n=5, l=0,1,2
and Cs states with n=6, l=0,1,2. Further data to be added...
'''
# global A_fs,A_hfs,B_hfs,gI,gL
if atom == 'Rb':
if int(2*I+1e-10) == 3: # Rb 87
gI = -0.0009951414
gL = 0.99999369
if L == 0:
A_fs = 0
A_hfs = 3417.34130545215e6
B_hfs = 0
elif L == 1:
A_fs = 7.123e12*2/3 # (2*S+1)/(2*L+1) or 1/(S+L)
if int(2*J+1e-10) == 1:
A_hfs = 406.147e6
B_hfs = 0
elif int(2*J+1e-10) == 3:
A_hfs = 84.7185e6
B_hfs = 12.4965e6
elif L == 2:
A_fs = 88.905185e9*2/5 # (2*S+1)/(2*L+1) or 1/(S+L)
if int(2*J+1e-10) == 3:
A_hfs = 14.4303e6
B_hfs = 0.9320e6
elif int(2*J+1e-10) == 5:
A_hfs = -7.4605e6
B_hfs = 1.2713e6
elif int(2*I+1e-10) == 5: # Rb 85
gI = -0.0002936400
gL = 0.99999354
if L == 0:
A_fs = 0
A_hfs = 1011.9108130e6
B_hfs = 0
elif L == 1:
A_fs = 7.123e12*2/3
if int(2*J+1e-10) == 1:
A_hfs = 120.527e6
B_hfs = 0
elif int(2*J+1e-10) == 3:
A_hfs = 25.0020e6
B_hfs = 25.790e6
elif L == 2:
A_fs = 88.905185e9*2/5
if int(2*J+1e-10) == 3:
A_hfs = 4.2699e6
B_hfs = 1.9106e6
elif int(2*J+1e-10) == 5:
A_hfs = -2.2112e6
B_hfs = 2.6804e6
if atom == 'Cs':
gI = -0.00039885395
gL = 0.99999587
if L == 0:
A_fs = 0
A_hfs = 2298.1579425e6
B_hfs = 0
elif L == 1:
A_fs = 16.60966969094428436e12*2/3 # (2*S+1)/(2*L+1) or 1/(S+L)
if int(2*J+1e-10) == 1:
A_hfs = 291.9201e6
B_hfs = 0
elif int(2*J+1e-10) == 3:
A_hfs = 50.28827e6
B_hfs = -0.4934e6
C_hfs = 0.560e3
elif L == 2:
A_fs = 1.285060371217e12*2/5 # (2*S+1)/(2*L+1) or 1/(S+L)
if int(2*J+1e-10) == 3:
A_hfs = 16.34e6
B_hfs = -0.1e6
elif int(2*J+1e-10) == 5:
A_hfs = -4.66e6
B_hfs = 0.9e6
return A_fs,A_hfs,B_hfs,gI,gL
| 0 | 0 | 0 |
d1351fb72c06084c084972a6d94ce10a496fcff1 | 1,023 | py | Python | etc/schema/005-ssh-key-parts.py | mikeboers/Spoon | 9fe4a06be7c2c6c307b79e72893e32f2006de4ea | [
"BSD-3-Clause"
] | 4 | 2017-11-05T02:54:39.000Z | 2022-03-01T06:01:20.000Z | etc/schema/005-ssh-key-parts.py | mikeboers/Spoon | 9fe4a06be7c2c6c307b79e72893e32f2006de4ea | [
"BSD-3-Clause"
] | null | null | null | etc/schema/005-ssh-key-parts.py | mikeboers/Spoon | 9fe4a06be7c2c6c307b79e72893e32f2006de4ea | [
"BSD-3-Clause"
] | null | null | null | import sqlalchemy as sa
| 25.575 | 90 | 0.5826 | import sqlalchemy as sa
def upgrade(engine):
meta = sa.MetaData(bind=engine)
meta.reflect()
existing = []
if 'ssh_keys' in meta.tables:
old = meta.tables['ssh_keys']
for row in old.select().execute():
existing.append((row.account_id, row.data))
old.drop()
meta = sa.MetaData(bind=engine)
meta.reflect()
new = sa.Table('ssh_keys', meta,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('account_id', sa.Integer, sa.ForeignKey('accounts.id'), nullable=False),
sa.Column('type', sa.String, nullable=False),
sa.Column('data', sa.String, nullable=False),
sa.Column('comment', sa.String, nullable=False),
)
new.create()
for account_id, encoded in existing:
type_, data, comment = encoded.split(None, 2)
new.insert().execute(
account_id=account_id,
type=type_.strip(),
data=data.strip(),
comment=comment.strip(),
)
| 973 | 0 | 23 |
428c88aaaa087a1067808155b7ff19cff4afa1c3 | 5,440 | py | Python | Stephen/mlp_cg.py | garibaldu/multicauseRBM | f64f54435f23d04682ac7c15f895a1cf470c51e8 | [
"MIT"
] | null | null | null | Stephen/mlp_cg.py | garibaldu/multicauseRBM | f64f54435f23d04682ac7c15f895a1cf470c51e8 | [
"MIT"
] | null | null | null | Stephen/mlp_cg.py | garibaldu/multicauseRBM | f64f54435f23d04682ac7c15f895a1cf470c51e8 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.optimize as so
class mlp_cg:
""" A Multi-Layer Perceptron"""
def __init__(self,inputs,targets,nhidden,beta=1,momentum=0.9,outtype='logistic'):
""" Constructor """
# Set up network size
self.nin = np.shape(inputs)[1]
self.nout = np.shape(targets)[1]
self.ndata = np.shape(inputs)[0]
self.nhidden = nhidden
self.beta = beta
self.momentum = momentum
self.outtype = outtype
# Initialise network
self.weights1 = (np.random.rand(self.nin+1,self.nhidden)-0.5)*2/np.sqrt(self.nin)
self.weights2 = (np.random.rand(self.nhidden+1,self.nout)-0.5)*2/np.sqrt(self.nhidden)
def mlptrain(self,inputs,targets,niterations=100):
""" Train the thing """
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,-np.ones((self.ndata,1))),axis=1)
# Put all the weights into a single row vector
w = np.concatenate((self.weights1.flatten(),self.weights2.flatten()))
#out = so.fmin_cg(self.mlperror, w, fprime=None, args=(inputs,targets), gtol=1e-05, maxiter=5000, full_output=True, disp=1)
out = so.fmin_cg(self.mlperror, w, fprime=self.mlpgrad, args=(inputs,targets), gtol=1e-05, maxiter=10000, full_output=True, disp=1)
wopt = out[0]
# Put the updated weights back into the matrices
split = (self.nin+1)*self.nhidden
self.weights1 = np.reshape(wopt[:split],(self.nin+1,self.nhidden))
self.weights2 = np.reshape(wopt[split:],(self.nhidden+1,self.nout))
def mlpfwd(self,inputs):
""" Run the network forward """
self.hidden = np.dot(inputs,self.weights1);
self.hidden = 1.0/(1.0+np.exp(-self.beta*self.hidden))
self.hidden = np.concatenate((self.hidden,-np.ones((np.shape(inputs)[0],1))),axis=1)
outputs = np.dot(self.hidden,self.weights2);
# Different types of output neurons
if self.outtype == 'linear':
return outputs
elif self.outtype == 'logistic':
return 1.0/(1.0+np.exp(-self.beta*outputs))
elif self.outtype == 'softmax':
normalisers = np.sum(np.exp(outputs),axis=1)*np.ones((1,np.shape(outputs)[0]))
return np.transpose(np.transpose(np.exp(outputs))/normalisers)
else:
print "error"
def confmat(self,inputs,targets):
"""Confusion matrix"""
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,-np.ones((np.shape(inputs)[0],1))),axis=1)
outputs = self.mlpfwd(inputs)
nclasses = np.shape(targets)[1]
if nclasses==1:
nclasses = 2
outputs = np.where(outputs>0.5,1,0)
else:
# 1-of-N encoding
outputs = np.argmax(outputs,1)
targets = np.argmax(targets,1)
cm = np.zeros((nclasses,nclasses))
for i in range(nclasses):
for j in range(nclasses):
cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))
print "Confusion matrix is:"
print cm
print "Percentage Correct: ",np.trace(cm)/np.sum(cm)*100
| 38.309859 | 132 | 0.632169 | import numpy as np
import scipy.optimize as so
class mlp_cg:
""" A Multi-Layer Perceptron"""
def __init__(self,inputs,targets,nhidden,beta=1,momentum=0.9,outtype='logistic'):
""" Constructor """
# Set up network size
self.nin = np.shape(inputs)[1]
self.nout = np.shape(targets)[1]
self.ndata = np.shape(inputs)[0]
self.nhidden = nhidden
self.beta = beta
self.momentum = momentum
self.outtype = outtype
# Initialise network
self.weights1 = (np.random.rand(self.nin+1,self.nhidden)-0.5)*2/np.sqrt(self.nin)
self.weights2 = (np.random.rand(self.nhidden+1,self.nout)-0.5)*2/np.sqrt(self.nhidden)
def mlperror(self,weights,inputs,targets):
split = (self.nin+1)*self.nhidden
self.weights1 = np.reshape(weights[:split],(self.nin+1,self.nhidden))
self.weights2 = np.reshape(weights[split:],(self.nhidden+1,self.nout))
outputs = self.mlpshortfwd(inputs)
# Compute the error
# Different types of output neurons
if self.outtype == 'linear':
error = 0.5*np.sum((outputs-targets)**2)
elif self.outtype == 'logistic':
# Non-zero checks
maxval = -np.log(np.finfo(np.float64).eps)
minval = -np.log(1./np.finfo(np.float64).tiny - 1.)
outputs = np.where(outputs<maxval,outputs,maxval)
outputs = np.where(outputs>minval,outputs,minval)
outputs = 1./(1. + np.exp(-outputs))
error = - np.sum(targets*np.log(outputs) + (1 - targets)*np.log(1 - outputs))
elif self.outtype == 'softmax':
nout = np.shape(outputs)[1]
maxval = np.log(np.finfo(np.float64).max) - np.log(nout)
minval = np.log(np.finfo(np.float32).tiny)
outputs = np.where(outputs<maxval,outputs,maxval)
outputs = np.where(outputs>minval,outputs,minval)
normalisers = np.sum(np.exp(outputs),axis=1)*np.ones((1,np.shape(outputs)[0]))
y = np.transpose(np.transpose(np.exp(outputs))/normalisers)
y[y<np.finfo(np.float64).tiny] = np.finfo(np.float32).tiny
error = - np.sum(targets*np.log(y));
else:
print "error"
return error
def mlpgrad(self,weights,inputs,targets):
split = (self.nin+1)*self.nhidden
self.weights1 = np.reshape(weights[:split],(self.nin+1,self.nhidden))
self.weights2 = np.reshape(weights[split:],(self.nhidden+1,self.nout))
outputs = self.mlpfwd(inputs)
delta_out = outputs-targets
grad_weights2 = np.dot(self.hidden.T,delta_out)
delta_hid = np.dot(delta_out,self.weights2[1:,:].T)
delta_hid *= (1. - self.hidden[:,1:]*self.hidden[:,1:])
grad_weights1 = np.dot(inputs.T,delta_hid)
return np.concatenate((grad_weights1.flatten(),grad_weights2.flatten()))
def mlptrain(self,inputs,targets,niterations=100):
""" Train the thing """
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,-np.ones((self.ndata,1))),axis=1)
# Put all the weights into a single row vector
w = np.concatenate((self.weights1.flatten(),self.weights2.flatten()))
#out = so.fmin_cg(self.mlperror, w, fprime=None, args=(inputs,targets), gtol=1e-05, maxiter=5000, full_output=True, disp=1)
out = so.fmin_cg(self.mlperror, w, fprime=self.mlpgrad, args=(inputs,targets), gtol=1e-05, maxiter=10000, full_output=True, disp=1)
wopt = out[0]
# Put the updated weights back into the matrices
split = (self.nin+1)*self.nhidden
self.weights1 = np.reshape(wopt[:split],(self.nin+1,self.nhidden))
self.weights2 = np.reshape(wopt[split:],(self.nhidden+1,self.nout))
def mlpfwd(self,inputs):
""" Run the network forward """
self.hidden = np.dot(inputs,self.weights1);
self.hidden = 1.0/(1.0+np.exp(-self.beta*self.hidden))
self.hidden = np.concatenate((self.hidden,-np.ones((np.shape(inputs)[0],1))),axis=1)
outputs = np.dot(self.hidden,self.weights2);
# Different types of output neurons
if self.outtype == 'linear':
return outputs
elif self.outtype == 'logistic':
return 1.0/(1.0+np.exp(-self.beta*outputs))
elif self.outtype == 'softmax':
normalisers = np.sum(np.exp(outputs),axis=1)*np.ones((1,np.shape(outputs)[0]))
return np.transpose(np.transpose(np.exp(outputs))/normalisers)
else:
print "error"
def mlpshortfwd(self,inputs):
self.hidden = np.dot(inputs,self.weights1);
self.hidden = 1.0/(1.0+np.exp(-self.beta*self.hidden))
self.hidden = np.concatenate((self.hidden,-np.ones((np.shape(inputs)[0],1))),axis=1)
return np.dot(self.hidden,self.weights2);
def confmat(self,inputs,targets):
"""Confusion matrix"""
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,-np.ones((np.shape(inputs)[0],1))),axis=1)
outputs = self.mlpfwd(inputs)
nclasses = np.shape(targets)[1]
if nclasses==1:
nclasses = 2
outputs = np.where(outputs>0.5,1,0)
else:
# 1-of-N encoding
outputs = np.argmax(outputs,1)
targets = np.argmax(targets,1)
cm = np.zeros((nclasses,nclasses))
for i in range(nclasses):
for j in range(nclasses):
cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))
print "Confusion matrix is:"
print cm
print "Percentage Correct: ",np.trace(cm)/np.sum(cm)*100
| 2,187 | 0 | 81 |
7ceed2a1f72286660c32d46a8d902c63ac35caf7 | 2,596 | py | Python | solar_system/main.py | 0xzhang/taichi-play | f3285f3db235035bf5cff39938e546138317cd70 | [
"MIT"
] | 4 | 2021-10-02T07:12:14.000Z | 2022-02-17T05:51:12.000Z | solar_system/main.py | 0xzhang/taichi-play | f3285f3db235035bf5cff39938e546138317cd70 | [
"MIT"
] | null | null | null | solar_system/main.py | 0xzhang/taichi-play | f3285f3db235035bf5cff39938e546138317cd70 | [
"MIT"
] | 1 | 2022-02-17T05:51:11.000Z | 2022-02-17T05:51:11.000Z | import taichi as ti
from solar_system import SolarSystem, Sun, Planet
from datetime import datetime
ti.init(arch=ti.gpu)
if __name__ == "__main__":
main() | 28.844444 | 77 | 0.607858 | import taichi as ti
from solar_system import SolarSystem, Sun, Planet
from datetime import datetime
ti.init(arch=ti.gpu)
def init_solarsystem():
sun = Sun(0.2)
ss = SolarSystem(sun)
sun.initialize(ti.Vector([1, 0, 0]))
# 2018-01-01
ss.date = datetime.strptime("2018-01-01", '%Y-%m-%d').date()
mercury = Planet(0.03)
pos = ti.Vector(
[-0.3877081979511674, -0.0077847346908167, 0.03493213369519331])
vel = ti.Vector(
[-0.005288319535531131, -0.02691956351115996, -0.001714528496530611])
mercury.initialize(ti.Vector([1, 1, 1]), pos, vel)
ss.add_planet(mercury)
venus = Planet(0.1)
pos = ti.Vector(
[0.0711289554079218, -0.7236895081570862, -0.01403169883793853])
vel = ti.Vector(
[0.01999285129672666, 0.001906111736867988, -0.001127570469009755])
venus.initialize(ti.Vector([1, 1, 0]), pos, vel)
ss.add_planet(venus)
earth = Planet(0.1)
pos = ti.Vector(
[-0.1752173047680441, 0.9675921579569661, -4.003213272786273e-05])
vel = ti.Vector(
[-0.01720893715482063, -0.003129664035086283, 1.997298538006543e-07])
earth.initialize(ti.Vector([0, 1, 2]), pos, vel)
ss.add_planet(earth)
mars = Planet(0.06)
pos = ti.Vector(
[-1.583672712003512, -0.389028323418006, 0.03071411326349248])
vel = ti.Vector(
[0.003860856732981549, -0.01239426308040891, -0.0003544723155743041])
mars.initialize(ti.Vector([2, 1, 0]), pos, vel)
ss.add_planet(mars)
return ss
def main():
ss = init_solarsystem()
window = ti.ui.Window('Solar System', (800, 600), vsync=True)
canvas = window.get_canvas()
scene = ti.ui.Scene()
camera = ti.ui.make_camera()
camera.position(0.0, -10.0, 5.0)
camera.lookat(0.0, 0.0, 0.0)
camera.fov(20)
id_frame = 0
pause_flag = True
print("Press SPACE start move.")
while window.running:
window.GUI.begin("Statistics", 0.03, 0.05, 0.2, 0.12)
window.GUI.text("Date: " + ss.get_date())
window.GUI.text("Frame: " + str(id_frame))
window.GUI.end()
for e in window.get_events(ti.ui.PRESS):
if e.key == ti.ui.ESCAPE:
exit()
elif e.key == ti.ui.SPACE:
pause_flag = not pause_flag
if not pause_flag:
id_frame += 1
ss.update(0.1)
scene.set_camera(camera)
ss.display(scene)
scene.point_light(pos=(0.0, -5.0, 5.0), color=(0.5, 0.5, 0.5))
canvas.scene(scene)
window.show()
if __name__ == "__main__":
main() | 2,387 | 0 | 46 |
848dfae50f9748a1ee75a25434d9657abe428eeb | 486 | py | Python | MTG/controller/ilayercontroller.py | CODE-MTD/OpenMTD | d2a2613260a8eb88d0fb223eebc36d66ea712890 | [
"MIT"
] | null | null | null | MTG/controller/ilayercontroller.py | CODE-MTD/OpenMTD | d2a2613260a8eb88d0fb223eebc36d66ea712890 | [
"MIT"
] | null | null | null | MTG/controller/ilayercontroller.py | CODE-MTD/OpenMTD | d2a2613260a8eb88d0fb223eebc36d66ea712890 | [
"MIT"
] | null | null | null | import abc
from controller.itranslator import ITranslator
| 20.25 | 50 | 0.602881 | import abc
from controller.itranslator import ITranslator
class ILayerController(ITranslator):
@abc.abstractmethod
def process_packet(self, packet):
"""
Return true if packet should be redirected
Return false if packet should be dropped
:rtype: Boolean
:param packet: IP/Ipv6 Scapy Package
"""
pass
@abc.abstractmethod
def layers(self):
"""
:rtype: list of layers
"""
pass
| 0 | 403 | 23 |
ac8875e6ae533305f285688d4d1a5001ebd56ccb | 7,234 | py | Python | visitors/test_views.py | ASquirrelsTail/serve-up | 9533ba82f5b4989434b3b20352d17a8131bb9619 | [
"MIT"
] | null | null | null | visitors/test_views.py | ASquirrelsTail/serve-up | 9533ba82f5b4989434b3b20352d17a8131bb9619 | [
"MIT"
] | 10 | 2021-03-30T14:05:21.000Z | 2022-03-12T00:41:15.000Z | visitors/test_views.py | ASquirrelsTail/serve-up | 9533ba82f5b4989434b3b20352d17a8131bb9619 | [
"MIT"
] | null | null | null | from django.urls import reverse
from django.views import View
from django.http import HttpResponse
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase, RequestFactory
from django.core.exceptions import PermissionDenied
from datetime import timedelta
from tables.models import Table
from orders.models import Order
from visitors.models import Group
from visitors.views import HasGroupMixin
class HasGroupMixinTestCase(TestCase):
'''
Class to test HasGroupMixin.
'''
@classmethod
@classmethod
class TestView(HasGroupMixin, View):
'''
Simple test class using HasGroupMixin.
'''
class CreateGroupViewTestCase(TestCase):
'''
Class to test the Group view for creating a new group
'''
@classmethod
@classmethod
def test_returns_400_if_no_data_is_submitted(self):
'''
Creating a group should fail if no data submitted, return 400 response, and not create a group or add one to the session.
'''
response = self.post({})
self.assertEqual(response.status_code, 400)
self.assertTrue('error' in response.json())
self.assertEqual(Group.objects.count(), 0)
self.assertFalse('group' in self.client.session)
def test_returns_400_if_invalid_data_is_submitted(self):
'''
Creating a group should fail if invalid data submitted, return 400 response, and not create a group or add one to the session.
'''
invalid_data = [
{'visitors': 'Not a list!'},
{'visitors': []},
{'visitors': [{'name': '', 'phone_number': '', 'email': ''}]},
{'visitors': [{'name': 'John Doe', 'phone_number': '', 'email': 'johndoe.com'}]},
{'visitors': [{'name': '', 'phone_number': '', 'email': 'john@doe.com'}]},
{'visitors': [{'name': 'John Doe', 'phone_number': '0161 496 0210', 'email': 'john@doe.com'},
{'name': '', 'phone_number': '', 'email': ''}]},
]
for data in invalid_data:
response = self.post(data)
self.assertEqual(response.status_code, 400)
self.assertTrue('error' in response.json())
self.assertEqual(Group.objects.count(), 0)
self.assertFalse('group' in self.client.session)
def test_response_includes_form_errors_if_invalid_data_is_submitted(self):
'''
The response should include the form errors for each visitor in the form_errors list
'''
response = self.post({'visitors': [{'name': 'John Doe', 'phone_number': '', 'email': 'johndoe.com'}]})
self.assertTrue('email' in response.json()['form_errors'][0])
response = self.post({'visitors': [{'name': '', 'phone_number': '', 'email': ''}]})
self.assertTrue('name' in response.json()['form_errors'][0])
def test_returns_400_if_none_of_the_submitted_visitors_have_contact_details(self):
'''
Creating a group should fail if none o the visitors have any contact details, return 400 response, and not create a group or add one to the session.
'''
response = self.post({'visitors': [{'name': 'John Doe', 'phone_number': '', 'email': ''},
{'name': 'Jane Doe', 'phone_number': '', 'email': ''}]})
self.assertEqual(response.status_code, 400)
self.assertTrue('error' in response.json())
self.assertEqual(Group.objects.count(), 0)
self.assertFalse('group' in self.client.session)
def test_returns_204_if_group_created_successfully(self):
'''
Creating a group should return a 204 response and add the group id to the session if successful.
'''
response = self.post({'visitors': [{'name': 'John Doe', 'phone_number': '0161 496 0210', 'email': ''},
{'name': 'Jane Doe', 'phone_number': '', 'email': ''}]})
self.assertEqual(response.status_code, 204)
self.assertEqual(Group.objects.count(), 1)
self.assertTrue('group' in self.client.session)
| 37.097436 | 156 | 0.641554 | from django.urls import reverse
from django.views import View
from django.http import HttpResponse
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase, RequestFactory
from django.core.exceptions import PermissionDenied
from datetime import timedelta
from tables.models import Table
from orders.models import Order
from visitors.models import Group
from visitors.views import HasGroupMixin
class HasGroupMixinTestCase(TestCase):
'''
Class to test HasGroupMixin.
'''
@classmethod
def setUpTestData(cls):
table = Table.objects.create(name='Test')
cls.new_group = Group.objects.create(table=table)
cls.group_with_recent_order = Group.objects.create(table=table)
cls.group_with_recent_order.time -= timedelta(hours=3)
cls.group_with_recent_order.save()
prev_order = Order.objects.create(group=cls.group_with_recent_order)
prev_order.time -= timedelta(hours=1)
prev_order.save()
cls.old_group = Group.objects.create(table=table)
cls.old_group.time -= timedelta(days=2)
cls.old_group.save()
cls.old_group_with_order = Group.objects.create(table=table)
cls.old_group_with_order.time -= timedelta(hours=5)
cls.old_group_with_order.save()
prev_order = Order.objects.create(group=cls.old_group_with_order)
prev_order.time -= timedelta(hours=3)
prev_order.save()
@classmethod
def tearDownClass(cls):
Table.objects.all().delete()
super(HasGroupMixinTestCase, cls).tearDownClass()
class TestView(HasGroupMixin, View):
'''
Simple test class using HasGroupMixin.
'''
def get(self, request):
return HttpResponse('Success')
def setUp(self):
self.factory = RequestFactory()
def test_returns_403_if_group_session_key_is_missing(self):
request = self.factory.get('')
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
with self.assertRaises(PermissionDenied):
self.TestView.as_view()(request)
def test_returns_403_if_group_is_older_than_2_hours_without_an_order_within_2_hours(self):
request = self.factory.get('')
middleware = SessionMiddleware()
middleware.process_request(request)
request.session['group'] = self.old_group.id
request.session.save()
with self.assertRaises(PermissionDenied):
self.TestView.as_view()(request)
request.session['group'] = self.old_group_with_order.id
request.session.save()
with self.assertRaises(PermissionDenied):
self.TestView.as_view()(request)
def test_returns_OK_if_user_has_valid_group(self):
request = self.factory.get('')
middleware = SessionMiddleware()
middleware.process_request(request)
request.session['group'] = self.new_group.id
request.session.save()
response = self.TestView.as_view()(request)
self.assertEqual(response.status_code, 200)
request.session['group'] = self.group_with_recent_order.id
request.session.save()
response = self.TestView.as_view()(request)
self.assertEqual(response.status_code, 200)
class CreateGroupViewTestCase(TestCase):
'''
Class to test the Group view for creating a new group
'''
@classmethod
def setUpTestData(cls):
table = Table.objects.create(name='Test')
cls.url = reverse('group', kwargs={'slug': table.uuid})
@classmethod
def tearDownClass(cls):
Table.objects.all().delete()
super(CreateGroupViewTestCase, cls).tearDownClass()
def setUp(self):
self.client.session.clear()
def post(self, data):
return self.client.post(self.url, data, content_type='application/json')
def test_returns_400_if_no_data_is_submitted(self):
'''
Creating a group should fail if no data submitted, return 400 response, and not create a group or add one to the session.
'''
response = self.post({})
self.assertEqual(response.status_code, 400)
self.assertTrue('error' in response.json())
self.assertEqual(Group.objects.count(), 0)
self.assertFalse('group' in self.client.session)
def test_returns_400_if_invalid_data_is_submitted(self):
'''
Creating a group should fail if invalid data submitted, return 400 response, and not create a group or add one to the session.
'''
invalid_data = [
{'visitors': 'Not a list!'},
{'visitors': []},
{'visitors': [{'name': '', 'phone_number': '', 'email': ''}]},
{'visitors': [{'name': 'John Doe', 'phone_number': '', 'email': 'johndoe.com'}]},
{'visitors': [{'name': '', 'phone_number': '', 'email': 'john@doe.com'}]},
{'visitors': [{'name': 'John Doe', 'phone_number': '0161 496 0210', 'email': 'john@doe.com'},
{'name': '', 'phone_number': '', 'email': ''}]},
]
for data in invalid_data:
response = self.post(data)
self.assertEqual(response.status_code, 400)
self.assertTrue('error' in response.json())
self.assertEqual(Group.objects.count(), 0)
self.assertFalse('group' in self.client.session)
def test_response_includes_form_errors_if_invalid_data_is_submitted(self):
'''
The response should include the form errors for each visitor in the form_errors list
'''
response = self.post({'visitors': [{'name': 'John Doe', 'phone_number': '', 'email': 'johndoe.com'}]})
self.assertTrue('email' in response.json()['form_errors'][0])
response = self.post({'visitors': [{'name': '', 'phone_number': '', 'email': ''}]})
self.assertTrue('name' in response.json()['form_errors'][0])
def test_returns_400_if_none_of_the_submitted_visitors_have_contact_details(self):
'''
Creating a group should fail if none o the visitors have any contact details, return 400 response, and not create a group or add one to the session.
'''
response = self.post({'visitors': [{'name': 'John Doe', 'phone_number': '', 'email': ''},
{'name': 'Jane Doe', 'phone_number': '', 'email': ''}]})
self.assertEqual(response.status_code, 400)
self.assertTrue('error' in response.json())
self.assertEqual(Group.objects.count(), 0)
self.assertFalse('group' in self.client.session)
def test_returns_204_if_group_created_successfully(self):
'''
Creating a group should return a 204 response and add the group id to the session if successful.
'''
response = self.post({'visitors': [{'name': 'John Doe', 'phone_number': '0161 496 0210', 'email': ''},
{'name': 'Jane Doe', 'phone_number': '', 'email': ''}]})
self.assertEqual(response.status_code, 204)
self.assertEqual(Group.objects.count(), 1)
self.assertTrue('group' in self.client.session)
| 2,801 | 0 | 297 |
ff7e81e102507c4497d7b7914010c6b208c9cea5 | 4,655 | py | Python | tests/unit/test_algorithm.py | benjaminkaplanphd/traveling-salesperson | 5c788554fe90eeb81b6351aeec96f1d64caa7591 | [
"MIT"
] | null | null | null | tests/unit/test_algorithm.py | benjaminkaplanphd/traveling-salesperson | 5c788554fe90eeb81b6351aeec96f1d64caa7591 | [
"MIT"
] | null | null | null | tests/unit/test_algorithm.py | benjaminkaplanphd/traveling-salesperson | 5c788554fe90eeb81b6351aeec96f1d64caa7591 | [
"MIT"
] | null | null | null | """
Unit tests for the algorithm.py module
"""
# pragma pylint: disable=redefined-outer-name
import numpy as np
import pytest
from traveling_salesperson import algorithm
@pytest.fixture()
def pyramid_distance_matrix_fixture():
r"""A distance matrix, corresponding to a lopsided pyramid,
for testing the nearest neighbor algorithm
a -- 10 --,b
\`9 ,2 /
\ `c /
11 | 10
\ 5 /
\|/
d
"""
return [
[0, 10, 9, 11],
[10, 0, 2, 10],
[9, 2, 0, 5],
[11, 10, 5, 0]
]
@pytest.fixture()
def sub_optimal_path_fixture():
"""A sub-optimal path, and associated distance, along the nodes of the pyramid"""
return [0, 2, 1, 3], 9 + 2 + 10
@pytest.fixture()
def optimal_path_fixture():
"""An optimal path, and associated distance, along the nodes of the pyramid"""
return [0, 1, 2, 3], 10 + 2 + 5
def test_nearest_neighbor_path_returns_expected_path(sub_optimal_path_fixture,
pyramid_distance_matrix_fixture):
"""Ensures that the nearest_neighbor_path returns the expected (and not necessarily optimal)
path."""
observed_path = algorithm.nearest_neighbor_path(4,
pyramid_distance_matrix_fixture)
assert observed_path == sub_optimal_path_fixture
def test_nearest_neighbor_path_with_swapping_returns_expected_path(optimal_path_fixture,
pyramid_distance_matrix_fixture):
"""Ensures that the nearest_neighbor_path_with_swapping() method finds a more optimal path
than nearest neighbor alone, assume such a path exists."""
observed_path = algorithm.nearest_neighbor_path_with_swapping(4,
pyramid_distance_matrix_fixture)
assert observed_path == optimal_path_fixture
def test_two_node_swap_optimization_optimizes_path(sub_optimal_path_fixture,
optimal_path_fixture,
pyramid_distance_matrix_fixture):
"""Ensures that if a shorter path can be found through 2-opt swapping, the
two_node_swap_optimization() method returns that path."""
observed_path = algorithm.two_node_swap_optimization(sub_optimal_path_fixture[0],
pyramid_distance_matrix_fixture,
sub_optimal_path_fixture[1])
assert np.array_equal(observed_path, optimal_path_fixture)
def test_two_node_swap_optimization_leaves_optimal_path_as_is(
optimal_path_fixture,
pyramid_distance_matrix_fixture):
"""Ensures that when no further optimization from swapping can be achieved, the
two_node_swap_optimization() method leaves the path as is."""
observed_path = algorithm.two_node_swap_optimization(optimal_path_fixture[0],
pyramid_distance_matrix_fixture,
optimal_path_fixture[1])
assert np.array_equal(observed_path, optimal_path_fixture)
@pytest.fixture()
def expected_segments_fixture():
"""The segments along the pyramid to consider for swapping"""
return [(0, 2), (0, 3), (1, 3)]
def test_path_segments_yields_expected_iterator(expected_segments_fixture):
"""Ensures that the path_segments() iterator yields the expected 2-tuples."""
observed_iterator = algorithm.path_segments([], 0, 3, 2)
for observed_segment, expected_segment in zip(observed_iterator,
expected_segments_fixture):
assert observed_segment == expected_segment
@pytest.mark.parametrize('segment,expected_value',
[((0, 2), -4), ((0, 3), 0), ((1, 3), 0)])
def test_better_path_from_swap_returns_expected_bool(sub_optimal_path_fixture,
pyramid_distance_matrix_fixture,
segment, expected_value):
"""Ensures that the better_path_from_swap() method returns True, if and only if
reversing the nodes between the two to "swap" would lead to a shorter path."""
observed_value = algorithm.delta_if_better_path_from_swap(sub_optimal_path_fixture[0],
pyramid_distance_matrix_fixture,
*segment)
assert observed_value == expected_value
| 42.706422 | 100 | 0.612245 | """
Unit tests for the algorithm.py module
"""
# pragma pylint: disable=redefined-outer-name
import numpy as np
import pytest
from traveling_salesperson import algorithm
@pytest.fixture()
def pyramid_distance_matrix_fixture():
r"""A distance matrix, corresponding to a lopsided pyramid,
for testing the nearest neighbor algorithm
a -- 10 --,b
\`9 ,2 /
\ `c /
11 | 10
\ 5 /
\|/
d
"""
return [
[0, 10, 9, 11],
[10, 0, 2, 10],
[9, 2, 0, 5],
[11, 10, 5, 0]
]
@pytest.fixture()
def sub_optimal_path_fixture():
"""A sub-optimal path, and associated distance, along the nodes of the pyramid"""
return [0, 2, 1, 3], 9 + 2 + 10
@pytest.fixture()
def optimal_path_fixture():
"""An optimal path, and associated distance, along the nodes of the pyramid"""
return [0, 1, 2, 3], 10 + 2 + 5
def test_nearest_neighbor_path_returns_expected_path(sub_optimal_path_fixture,
pyramid_distance_matrix_fixture):
"""Ensures that the nearest_neighbor_path returns the expected (and not necessarily optimal)
path."""
observed_path = algorithm.nearest_neighbor_path(4,
pyramid_distance_matrix_fixture)
assert observed_path == sub_optimal_path_fixture
def test_nearest_neighbor_path_with_swapping_returns_expected_path(optimal_path_fixture,
pyramid_distance_matrix_fixture):
"""Ensures that the nearest_neighbor_path_with_swapping() method finds a more optimal path
than nearest neighbor alone, assume such a path exists."""
observed_path = algorithm.nearest_neighbor_path_with_swapping(4,
pyramid_distance_matrix_fixture)
assert observed_path == optimal_path_fixture
def test_two_node_swap_optimization_optimizes_path(sub_optimal_path_fixture,
optimal_path_fixture,
pyramid_distance_matrix_fixture):
"""Ensures that if a shorter path can be found through 2-opt swapping, the
two_node_swap_optimization() method returns that path."""
observed_path = algorithm.two_node_swap_optimization(sub_optimal_path_fixture[0],
pyramid_distance_matrix_fixture,
sub_optimal_path_fixture[1])
assert np.array_equal(observed_path, optimal_path_fixture)
def test_two_node_swap_optimization_leaves_optimal_path_as_is(
optimal_path_fixture,
pyramid_distance_matrix_fixture):
"""Ensures that when no further optimization from swapping can be achieved, the
two_node_swap_optimization() method leaves the path as is."""
observed_path = algorithm.two_node_swap_optimization(optimal_path_fixture[0],
pyramid_distance_matrix_fixture,
optimal_path_fixture[1])
assert np.array_equal(observed_path, optimal_path_fixture)
@pytest.fixture()
def expected_segments_fixture():
"""The segments along the pyramid to consider for swapping"""
return [(0, 2), (0, 3), (1, 3)]
def test_path_segments_yields_expected_iterator(expected_segments_fixture):
"""Ensures that the path_segments() iterator yields the expected 2-tuples."""
observed_iterator = algorithm.path_segments([], 0, 3, 2)
for observed_segment, expected_segment in zip(observed_iterator,
expected_segments_fixture):
assert observed_segment == expected_segment
@pytest.mark.parametrize('segment,expected_value',
[((0, 2), -4), ((0, 3), 0), ((1, 3), 0)])
def test_better_path_from_swap_returns_expected_bool(sub_optimal_path_fixture,
pyramid_distance_matrix_fixture,
segment, expected_value):
"""Ensures that the better_path_from_swap() method returns True, if and only if
reversing the nodes between the two to "swap" would lead to a shorter path."""
observed_value = algorithm.delta_if_better_path_from_swap(sub_optimal_path_fixture[0],
pyramid_distance_matrix_fixture,
*segment)
assert observed_value == expected_value
| 0 | 0 | 0 |
c51bddc364ce33d4504970da44a3ed490f7192ac | 6,104 | py | Python | ports/esp32/modules/hw_utils.py | tempstabilizer2018group/micropython_esp32 | 0bc16f1954a65adb9ad01578004744d424ef0c27 | [
"MIT"
] | null | null | null | ports/esp32/modules/hw_utils.py | tempstabilizer2018group/micropython_esp32 | 0bc16f1954a65adb9ad01578004744d424ef0c27 | [
"MIT"
] | null | null | null | ports/esp32/modules/hw_utils.py | tempstabilizer2018group/micropython_esp32 | 0bc16f1954a65adb9ad01578004744d424ef0c27 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import gc
import uos
import utime
import hw_urequests
import machine
import portable_firmware_constants
strMAC = ''.join(['%02X'%i for i in machine.unique_id()])
#
# Watchdog
#
objWdt = None
feedWatchdog = lambda: None
'''True if power on. False if reboot by software or watchdog.'''
bPowerOnBoot = machine.PWRON_RESET == machine.reset_cause()
'''True if Watchdog-Reset. False if reboot by software or power on.'''
bWatchdogBoot = machine.WDT_RESET == machine.reset_cause()
#
# SW Version
#
strSwVersion = __getSwVersion()
#
# LED and Button
#
iFreqOn_hz = 1000
class Gpio:
'Scan: slow blink, sharp'
'Scan: blink, dark'
'Connected: blink, bright'
def isButtonPressed(self):
'''Returns True if the Button is pressed.'''
return self.pin_button.value() == 0
objGpio = Gpio()
#
# Utils
#
def deleteVERSION_TXTandReboot():
'''Delete VERSION.TXT so that the filesystem will be formatted during next boot'''
uos.remove(portable_firmware_constants.strFILENAME_VERSION)
reboot('Reboot after deleting VERSION.TXT')
def formatAndReboot():
'''Destroy the filesystem so that it will be formatted during next boot'''
gc.collect()
objGpio.pwmLedReboot()
# This will trigger a format of the filesystem and the creation of booty.py.
# See: https://github.com/micropython/micropython/blob/master/ports/esp32/modules/inisetup.py
import inisetup
inisetup.setup()
reboot('Reboot after format filesystem')
def bootCheckUpdate():
'''
This method is called from 'boot.py' always after boot.
May reboot several times to format the filesystem and do the update.
'''
objGpio.setLed(False)
if objGpio.isButtonPressed() and bPowerOnBoot:
print('Button presed. Format')
activateWatchdog()
formatAndReboot()
if isFilesystemEmpty():
print('Filesystem is empty: Update')
activateWatchdog()
# Don't import at the beginning: It would occupy memory...
import hw_update_ota
hw_update_ota.updateAndReboot()
if not isUpdateFinished():
print('Update was not finished. Format')
activateWatchdog()
formatAndReboot()
objGpio.setLed(False)
#
# Verify SW-Version on Host
#
def getSwVersionGit(wlan):
'''
returns verions: On success
returns None: on failure
'''
strUrl = getVersionCheckUrl(wlan)
print('HTTP-Get ' + strUrl)
try:
feedWatchdog()
r = hw_urequests.get(strUrl)
if r.status_code != 200:
print('FAILED %d %s' % (r.status_code, r.reason))
r.close()
return None
strSwVersionGit = r.text
r.close()
return strSwVersionGit
except OSError as e:
print('FAILED %s' % e)
return None
def checkIfNewSwVersion(wlan):
'''
returns True: The version changed
returns False: Same version or error
'''
strSwVersionGit = getSwVersionGit(wlan)
if strSwVersionGit != None:
print('Software version node: %s' % strSwVersion)
print('Software version git: %s' % strSwVersionGit)
if strSwVersionGit != strSwVersion:
print('Software version CHANGED')
return True
print('Software version EQUAL')
return False
#
# Memory usage
#
#
# Repl-Command
#
| 25.647059 | 176 | 0.717071 | # -*- coding: utf-8 -*-
import gc
import uos
import utime
import hw_urequests
import machine
import portable_firmware_constants
strMAC = ''.join(['%02X'%i for i in machine.unique_id()])
#
# Watchdog
#
objWdt = None
feedWatchdog = lambda: None
'''True if power on. False if reboot by software or watchdog.'''
bPowerOnBoot = machine.PWRON_RESET == machine.reset_cause()
'''True if Watchdog-Reset. False if reboot by software or power on.'''
bWatchdogBoot = machine.WDT_RESET == machine.reset_cause()
def activateWatchdog():
global objWdt
if objWdt != None:
return
objWdt = machine.WDT(0)
global feedWatchdog
feedWatchdog = objWdt.feed
print('Watchdog: ACTIVE')
#
# SW Version
#
def readFile(strFilename, default):
try:
with open(strFilename, 'r') as fIn:
return fIn.read()
except:
return default
def __getSwVersion():
return readFile(portable_firmware_constants.strFILENAME_VERSION, default='none').strip()
strSwVersion = __getSwVersion()
#
# LED and Button
#
iFreqOn_hz = 1000
class Gpio:
def __init__(self):
self.pin_button = machine.Pin(16, machine.Pin.IN, machine.Pin.PULL_UP)
self.pin_led = machine.Pin(22, machine.Pin.OUT)
# LED off
self.pwm = machine.PWM(self.pin_led, freq=iFreqOn_hz, duty=0)
self.__iFreq = None
self.__iDuty = None
def pwmLed(self, iFreq_hz=10, iDuty_1023=512):
if self.__iFreq != iFreq_hz:
self.__iFreq = iFreq_hz
self.pwm.freq(iFreq_hz)
if self.__iDuty != iDuty_1023:
self.__iDuty = iDuty_1023
self.pwm.duty(iDuty_1023)
'Scan: slow blink, sharp'
def pwmLedReboot(self):
self.pwmLed(portable_firmware_constants.iLedReboot_pwm_hz,
portable_firmware_constants.iLedReboot_duty_1023)
'Scan: blink, dark'
def pwmLedWlanScan(self):
self.pwmLed(portable_firmware_constants.iLedWlanScan_pwm_hz,
portable_firmware_constants.iLedWlanScan_duty_1023)
'Connected: blink, bright'
def pwmLedWlanConnected(self):
self.pwmLed(portable_firmware_constants.iLedWlanConnected_pwm_hz,
portable_firmware_constants.iLedWlanConnected_duty_1023)
def setLed(self, bOn=True):
if bOn:
self.pwmLed(iFreqOn_hz, 1023)
return
self.pwmLed(iFreqOn_hz, 0)
def isButtonPressed(self):
'''Returns True if the Button is pressed.'''
return self.pin_button.value() == 0
objGpio = Gpio()
#
# Utils
#
def reboot(strReason):
print(strReason)
objGpio.setLed(bOn=False)
# uos.sync() does not exist. Maybe a pause does the same. Maybe its event not used.
feedWatchdog()
utime.sleep_ms(1000)
machine.reset()
def isFilesystemEmpty():
# Only 'boot.py' exists.
return len(uos.listdir()) == 1
def isUpdateFinished():
return portable_firmware_constants.strFILENAME_VERSION in uos.listdir()
def deleteVERSION_TXTandReboot():
'''Delete VERSION.TXT so that the filesystem will be formatted during next boot'''
uos.remove(portable_firmware_constants.strFILENAME_VERSION)
reboot('Reboot after deleting VERSION.TXT')
def formatAndReboot():
'''Destroy the filesystem so that it will be formatted during next boot'''
gc.collect()
objGpio.pwmLedReboot()
# This will trigger a format of the filesystem and the creation of booty.py.
# See: https://github.com/micropython/micropython/blob/master/ports/esp32/modules/inisetup.py
import inisetup
inisetup.setup()
reboot('Reboot after format filesystem')
def bootCheckUpdate():
'''
This method is called from 'boot.py' always after boot.
May reboot several times to format the filesystem and do the update.
'''
objGpio.setLed(False)
if objGpio.isButtonPressed() and bPowerOnBoot:
print('Button presed. Format')
activateWatchdog()
formatAndReboot()
if isFilesystemEmpty():
print('Filesystem is empty: Update')
activateWatchdog()
# Don't import at the beginning: It would occupy memory...
import hw_update_ota
hw_update_ota.updateAndReboot()
if not isUpdateFinished():
print('Update was not finished. Format')
activateWatchdog()
formatAndReboot()
objGpio.setLed(False)
#
# Verify SW-Version on Host
#
def getServer(wlan):
listIfconfig = wlan.ifconfig()
strGateway = listIfconfig[2]
if strGateway == portable_firmware_constants.strGATEWAY_PI:
return portable_firmware_constants.strSERVER_PI
return portable_firmware_constants.strSERVER_DEFAULT
def getDownloadUrl(wlan):
return __getUrl(wlan, portable_firmware_constants.strHTTP_PATH_SOFTWAREUPDATE)
def getVersionCheckUrl(wlan):
return __getUrl(wlan, portable_firmware_constants.strHTTP_PATH_VERSIONCHECK)
def __getUrl(wlan, strFunction):
return '%s%s?%s=%s&%s=%s' % (getServer(wlan), strFunction, portable_firmware_constants.strHTTP_ARG_MAC, strMAC, portable_firmware_constants.strHTTP_ARG_VERSION, strSwVersion)
def getSwVersionGit(wlan):
'''
returns verions: On success
returns None: on failure
'''
strUrl = getVersionCheckUrl(wlan)
print('HTTP-Get ' + strUrl)
try:
feedWatchdog()
r = hw_urequests.get(strUrl)
if r.status_code != 200:
print('FAILED %d %s' % (r.status_code, r.reason))
r.close()
return None
strSwVersionGit = r.text
r.close()
return strSwVersionGit
except OSError as e:
print('FAILED %s' % e)
return None
def checkIfNewSwVersion(wlan):
'''
returns True: The version changed
returns False: Same version or error
'''
strSwVersionGit = getSwVersionGit(wlan)
if strSwVersionGit != None:
print('Software version node: %s' % strSwVersion)
print('Software version git: %s' % strSwVersionGit)
if strSwVersionGit != strSwVersion:
print('Software version CHANGED')
return True
print('Software version EQUAL')
return False
#
# Memory usage
#
def print_mem_usage(msg=''):
gc.collect()
f=gc.mem_free()
a=gc.mem_alloc()
print('mem_usage {}+{}={} {}'.format(f, a, f+a, msg))
#
# Repl-Command
#
class Command:
def __init__(self, func):
self.__func = func
def __repr__(self):
return self.__func()
def __call__(self):
return self.__repr__()
| 2,462 | -7 | 491 |
9e116ca2287be4b26e9927d7c53033ee159c6e05 | 7,047 | py | Python | netapp/santricity/models/symbol/rtr_attributes.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 5 | 2016-08-23T17:52:22.000Z | 2019-05-16T08:45:30.000Z | netapp/santricity/models/symbol/rtr_attributes.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-11-10T05:30:21.000Z | 2019-04-05T15:03:37.000Z | netapp/santricity/models/symbol/rtr_attributes.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 7 | 2016-08-25T16:11:44.000Z | 2021-02-22T05:31:25.000Z | # coding: utf-8
"""
RTRAttributes.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class RTRAttributes(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
RTRAttributes - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'cru_type': 'str', # (required parameter)
'parent_cru': 'ParentCru',
'rtr_attribute_data': 'RTRAttributeData'
}
self.attribute_map = {
'cru_type': 'cruType', # (required parameter)
'parent_cru': 'parentCru',
'rtr_attribute_data': 'rtrAttributeData'
}
self._cru_type = None
self._parent_cru = None
self._rtr_attribute_data = None
@property
def cru_type(self):
"""
Gets the cru_type of this RTRAttributes.
This enumeration identifies the type of CRU a component has.
:return: The cru_type of this RTRAttributes.
:rtype: str
:required/optional: required
"""
return self._cru_type
@cru_type.setter
def cru_type(self, cru_type):
"""
Sets the cru_type of this RTRAttributes.
This enumeration identifies the type of CRU a component has.
:param cru_type: The cru_type of this RTRAttributes.
:type: str
"""
allowed_values = ["unknown", "dedicated", "shared", "aggregate", "__UNDEFINED"]
if cru_type not in allowed_values:
raise ValueError(
"Invalid value for `cru_type`, must be one of {0}"
.format(allowed_values)
)
self._cru_type = cru_type
@property
def parent_cru(self):
"""
Gets the parent_cru of this RTRAttributes.
This field identifies the CRU that is the parent of this component, i.e., the CRU that it is sharing with other components. It is only present when the cruType field is set to CRU_TYPE_AGGREGATE.
:return: The parent_cru of this RTRAttributes.
:rtype: ParentCru
:required/optional: optional
"""
return self._parent_cru
@parent_cru.setter
def parent_cru(self, parent_cru):
"""
Sets the parent_cru of this RTRAttributes.
This field identifies the CRU that is the parent of this component, i.e., the CRU that it is sharing with other components. It is only present when the cruType field is set to CRU_TYPE_AGGREGATE.
:param parent_cru: The parent_cru of this RTRAttributes.
:type: ParentCru
"""
self._parent_cru = parent_cru
@property
def rtr_attribute_data(self):
"""
Gets the rtr_attribute_data of this RTRAttributes.
This field contains the actual settings for the ready-to-remove data. It is only present when the cruType field is set to CRU_TYPE_DEDICATED.
:return: The rtr_attribute_data of this RTRAttributes.
:rtype: RTRAttributeData
:required/optional: optional
"""
return self._rtr_attribute_data
@rtr_attribute_data.setter
def rtr_attribute_data(self, rtr_attribute_data):
"""
Sets the rtr_attribute_data of this RTRAttributes.
This field contains the actual settings for the ready-to-remove data. It is only present when the cruType field is set to CRU_TYPE_DEDICATED.
:param rtr_attribute_data: The rtr_attribute_data of this RTRAttributes.
:type: RTRAttributeData
"""
self._rtr_attribute_data = rtr_attribute_data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 38.298913 | 844 | 0.639563 | # coding: utf-8
"""
RTRAttributes.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class RTRAttributes(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
RTRAttributes - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'cru_type': 'str', # (required parameter)
'parent_cru': 'ParentCru',
'rtr_attribute_data': 'RTRAttributeData'
}
self.attribute_map = {
'cru_type': 'cruType', # (required parameter)
'parent_cru': 'parentCru',
'rtr_attribute_data': 'rtrAttributeData'
}
self._cru_type = None
self._parent_cru = None
self._rtr_attribute_data = None
@property
def cru_type(self):
"""
Gets the cru_type of this RTRAttributes.
This enumeration identifies the type of CRU a component has.
:return: The cru_type of this RTRAttributes.
:rtype: str
:required/optional: required
"""
return self._cru_type
@cru_type.setter
def cru_type(self, cru_type):
"""
Sets the cru_type of this RTRAttributes.
This enumeration identifies the type of CRU a component has.
:param cru_type: The cru_type of this RTRAttributes.
:type: str
"""
allowed_values = ["unknown", "dedicated", "shared", "aggregate", "__UNDEFINED"]
if cru_type not in allowed_values:
raise ValueError(
"Invalid value for `cru_type`, must be one of {0}"
.format(allowed_values)
)
self._cru_type = cru_type
@property
def parent_cru(self):
"""
Gets the parent_cru of this RTRAttributes.
This field identifies the CRU that is the parent of this component, i.e., the CRU that it is sharing with other components. It is only present when the cruType field is set to CRU_TYPE_AGGREGATE.
:return: The parent_cru of this RTRAttributes.
:rtype: ParentCru
:required/optional: optional
"""
return self._parent_cru
@parent_cru.setter
def parent_cru(self, parent_cru):
"""
Sets the parent_cru of this RTRAttributes.
This field identifies the CRU that is the parent of this component, i.e., the CRU that it is sharing with other components. It is only present when the cruType field is set to CRU_TYPE_AGGREGATE.
:param parent_cru: The parent_cru of this RTRAttributes.
:type: ParentCru
"""
self._parent_cru = parent_cru
@property
def rtr_attribute_data(self):
"""
Gets the rtr_attribute_data of this RTRAttributes.
This field contains the actual settings for the ready-to-remove data. It is only present when the cruType field is set to CRU_TYPE_DEDICATED.
:return: The rtr_attribute_data of this RTRAttributes.
:rtype: RTRAttributeData
:required/optional: optional
"""
return self._rtr_attribute_data
@rtr_attribute_data.setter
def rtr_attribute_data(self, rtr_attribute_data):
"""
Sets the rtr_attribute_data of this RTRAttributes.
This field contains the actual settings for the ready-to-remove data. It is only present when the cruType field is set to CRU_TYPE_DEDICATED.
:param rtr_attribute_data: The rtr_attribute_data of this RTRAttributes.
:type: RTRAttributeData
"""
self._rtr_attribute_data = rtr_attribute_data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 0 | 0 | 0 |
e863de41a399ceaed49791d306754ebb714ed1ab | 1,353 | py | Python | agent_training.py | eym55/power_grid_sim | 35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa | [
"MIT"
] | 1 | 2022-01-26T15:19:15.000Z | 2022-01-26T15:19:15.000Z | agent_training.py | eym55/power_grid_sim | 35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa | [
"MIT"
] | null | null | null | agent_training.py | eym55/power_grid_sim | 35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa | [
"MIT"
] | null | null | null | import gym, ray
#Import any agents you want to train, list found here: https://docs.ray.io/en/latest/rllib/rllib-algorithms.html
from ray.rllib.agents import ppo,dqn,a3c
from environments.defender_game import PowerGrid
import pypsa
import numpy as np
from ray.tune.registry import register_env
import pickle
import time
from ray.tune.logger import pretty_print
from agents import RandomAgent
#Initializing relevant variables
ray.init()
network = pypsa.Network('lopf_grid.nc')
LINES = network.lines.shape[0]
attack_distribution = np.random.dirichlet(np.ones(LINES),size= 1)[0]
agent_config = {
'action_distribution':attack_distribution
}
env_config = {
'network':network,
'agent_config':agent_config,
'agent_class':RandomAgent
}
agent = dqn.DQNTrainer(env=PowerGrid, config={
"env_config": env_config,
"num_workers": 8,
"n_step": 5,
"noisy": True,
"num_atoms": 5,
"v_min": 0,
"v_max": 1000.0,
})
#Change the range to desired amount of training iterations
#Runs our agent in the environment for specified number of iterations
for i in range(300):
try:
pop = agent.train()
print(pretty_print(pop))
time.sleep(5)
if i % 10 == 0:
checkpoint = agent.save()
print("checkpoint saved at", checkpoint)
except Exception as e:
print(e)
print("Error has occurred")
print(i)
| 25.055556 | 112 | 0.717664 | import gym, ray
#Import any agents you want to train, list found here: https://docs.ray.io/en/latest/rllib/rllib-algorithms.html
from ray.rllib.agents import ppo,dqn,a3c
from environments.defender_game import PowerGrid
import pypsa
import numpy as np
from ray.tune.registry import register_env
import pickle
import time
from ray.tune.logger import pretty_print
from agents import RandomAgent
#Initializing relevant variables
ray.init()
network = pypsa.Network('lopf_grid.nc')
LINES = network.lines.shape[0]
attack_distribution = np.random.dirichlet(np.ones(LINES),size= 1)[0]
agent_config = {
'action_distribution':attack_distribution
}
env_config = {
'network':network,
'agent_config':agent_config,
'agent_class':RandomAgent
}
agent = dqn.DQNTrainer(env=PowerGrid, config={
"env_config": env_config,
"num_workers": 8,
"n_step": 5,
"noisy": True,
"num_atoms": 5,
"v_min": 0,
"v_max": 1000.0,
})
#Change the range to desired amount of training iterations
#Runs our agent in the environment for specified number of iterations
for i in range(300):
try:
pop = agent.train()
print(pretty_print(pop))
time.sleep(5)
if i % 10 == 0:
checkpoint = agent.save()
print("checkpoint saved at", checkpoint)
except Exception as e:
print(e)
print("Error has occurred")
print(i)
| 0 | 0 | 0 |
6ac7be1fd3253acff69b48cbe8419a293101969a | 467 | py | Python | Python/math/mod_sqrt.py | NatsubiSogan/comp_library | 9f06d947951db40e051bd506fd8722fb75c3688b | [
"Apache-2.0"
] | 2 | 2021-09-05T13:17:01.000Z | 2021-09-05T13:17:06.000Z | Python/math/mod_sqrt.py | NatsubiSogan/comp_library | 9f06d947951db40e051bd506fd8722fb75c3688b | [
"Apache-2.0"
] | null | null | null | Python/math/mod_sqrt.py | NatsubiSogan/comp_library | 9f06d947951db40e051bd506fd8722fb75c3688b | [
"Apache-2.0"
] | null | null | null | import random
# mod-sqrt | 21.227273 | 42 | 0.423983 | import random
# mod-sqrt
def mod_sqrt(a: int, p: int) -> int:
if a == 0: return 0
if p == 2: return 1
k = (p - 1) // 2
if pow(a, k, p) != 1: return -1
while True:
n = random.randint(2, p - 1)
r = (n ** 2 - a) % p
if r == 0: return n
if pow(r, k, p) == p - 1: break
k += 1
w, x, y, z = n, 1, 1, 0
while k:
if k % 2:
y, z = w * y + r * x * z, x * y + w * z
w, x = w * w + r * x * x, 2 * w * x
w %= p; x %= p; y %= p; z %= p
k >>= 1
return y | 420 | 0 | 22 |
138bb55fa6c8950896b53a74ca9ec35fea63f4c4 | 21,762 | py | Python | src/load_cvs/cvload.py | ODM2/ODM2 | a474e20718f69cac3214c3fc3cb2eb2a410e22aa | [
"BSD-3-Clause"
] | 39 | 2015-07-13T15:03:32.000Z | 2021-12-13T21:17:40.000Z | src/load_cvs/cvload.py | ODM2/ODM2 | a474e20718f69cac3214c3fc3cb2eb2a410e22aa | [
"BSD-3-Clause"
] | 77 | 2015-04-27T21:43:20.000Z | 2020-08-11T03:03:36.000Z | src/load_cvs/cvload.py | ODM2/ODM2 | a474e20718f69cac3214c3fc3cb2eb2a410e22aa | [
"BSD-3-Clause"
] | 18 | 2015-08-11T21:52:21.000Z | 2021-05-17T18:23:05.000Z | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import sys
# Supporting Python3
try:
import urllib.request as request
except ImportError:
import urllib as request
import xml.etree.ElementTree as ET
import argparse
import pymysql
import pyodbc
import psycopg2
# ################################################################################
# CV Objects
# ################################################################################
from sqlalchemy import Column, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
# -----------------------------------------------------------------------------
# handles customizing the error messages from Argparse
# -----------------------------------------------------------------------------
# handle argument parsing
info = "A simple script that loads up cvterms into a blank ODM2 database"
parser = MyParser(description=info, add_help=True)
parser.add_argument(
help="Format: {engine}+{driver}://{user}:{pass}@{address}/{db}\n"
"mysql+pymysql://ODM:odm@localhost/odm2\n\n"
"mssql+pyodbc://ODM:123@localhost/odm2?driver=SQL+Server+Native+Client+11.0\n"
"postgresql+psycopg2://ODM:odm@test.uwrl.usu.edu/odm2\n"
"sqlite+pysqlite:///path/to/file",
default=True, type=str, dest='conn_string')
parser.add_argument('-d', '--debug',
help="Debugging program without committing anything to"
" remote database",
action="store_true")
args = parser.parse_args()
# ------------------------------------------------------------------------------
# Script Begin
# ------------------------------------------------------------------------------
## Verify connection string
conn_string = args.conn_string
engine = None
session = None
try:
# engine = create_engine(conn_string, encoding='utf-8')
engine = create_engine(conn_string, encoding='unicode_escape')
setSchema(engine)
session = sessionmaker(bind=engine)()
except Exception as e:
print (e)
sys.exit(0)
print ("Loading CVs using connection string: %s" % conn_string)
vocab= [("actiontype", CVActionType),
("qualitycode", CVQualityCode),
("samplingfeaturegeotype", CVSamplingFeatureGeoType),
("elevationdatum", CVElevationDatum),
("resulttype", CVResultType),
#("sampledmedium", CVSampledMedium),
("speciation", CVSpeciation),
("aggregationstatistic", CVAggregationStatistic),
("methodtype", CVMethodType),
("taxonomicclassifiertype", CVTaxonomicClassifierType),
("sitetype", CVSiteType),
("censorcode", CVCensorCode),
("directivetype", CVDirectiveType),
("datasettype",CVDatasetType),
("dataqualitytype",CVDataQualityType),
("organizationtype", CVOrganizationType),
("status", CVStatus),
("annotationtype", CVAnnotationType),
("samplingfeaturetype", CVSamplingFeatureType),
("equipmenttype", CVEquipmentType),
#("specimenmedium", CVSpecimenMedium),
("spatialoffsettype", CVSpatialOffsetType),
#("referencematerialmedium", CVReferenceMaterialMedium),
("specimentype", CVSpecimenType),
("variabletype", CVVariableType),
("variablename", CVVariableName),
("propertydatatype", CVPropertyDataType),
("relationshiptype", CVRelationshipType),
("unitstype", CVUnitsType),
("medium", CVMediumType)
]
url = "http://vocabulary.odm2.org/api/v1/%s/?format=skos"
#XML encodings
dc = "{http://purl.org/dc/elements/1.1/}%s"
rdf = "{http://www.w3.org/1999/02/22-rdf-syntax-ns#}%s"
skos = "{http://www.w3.org/2004/02/skos/core#}%s"
odm2 = "{http://vocabulary.odm2.org/ODM2/ODM2Terms/}%s"
# ------------------------------------------------------------------------------
# Progress bar
# ------------------------------------------------------------------------------
for count, (key, value) in enumerate(vocab):
# print (count, key, value)
# print ("\tLoading %s" % key)
update_progress(count, value)
try:
data = request.urlopen(url % key).read()
root = ET.fromstring(data)
CVObject = value
objs = []
for voc in root.findall(rdf %"Description"):
obj = CVObject()
try:
obj.Term = voc.attrib[rdf%"about"].split('/')[-1]
obj.Name = voc.find(skos%"prefLabel").text
# obj.Definition = voc.find(skos%"definition").text.encode('UTF-8') if voc.find(skos%"definition") is not None else None
obj.Definition = voc.find(skos%"definition").text.encode('unicode_escape') if voc.find(skos%"definition") is not None else None
obj.Category = category = voc.find(odm2%"category").text if voc.find(odm2 % "category") is not None else None
obj.SourceVocabularyUri = voc.attrib[rdf%"about"]
objs.append(obj)
except Exception as e:
session.rollback()
if obj.Name is not None:
print ("issue loading single object %s: %s " %(obj.Name, e))
pass
session.add_all(objs)
if not args.debug:
session.commit()
except Exception as e:
session.rollback()
if "Duplicate entry" in e.message:
e = "Controlled Vocabulary has already been loaded"
print ("\t...%s Load was unsuccesful: \n%s" % (key, e))
sys.stdout.write("\n\n... %sLoad was unsuccessful: %s\r"%(key,e))
sys.stdout.flush()
update_progress(len(vocab), "CV_Terms")
sys.stdout.write("\nCV Load has completed\r\n")
sys.stdout.flush()
| 39.711679 | 144 | 0.614006 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import sys
# Supporting Python3
try:
import urllib.request as request
except ImportError:
import urllib as request
import xml.etree.ElementTree as ET
import argparse
import pymysql
import pyodbc
import psycopg2
# ################################################################################
# CV Objects
# ################################################################################
from sqlalchemy import Column, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class CVActionType(Base):
__tablename__ = 'cv_actiontype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CVActionType('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVAggregationStatistic(Base):
__tablename__ = 'cv_aggregationstatistic'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CVAggregationStatisticsType('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVAnnotationType(Base):
__tablename__ = 'cv_annotationtype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CVAnnotationType('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVCensorCode(Base):
__tablename__ = 'cv_censorcode'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CVActionType('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVDatasetType(Base):
__tablename__ = 'cv_datasettype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVDataQualityType(Base):
__tablename__ = 'cv_dataqualitytype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVDirectiveType(Base):
__tablename__ = 'cv_directivetype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVElevationDatum(Base):
__tablename__ = 'cv_elevationdatum'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVEquipmentType(Base):
__tablename__ = 'cv_equipmenttype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVMediumType(Base):
__tablename__ = 'cv_medium'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CVMedium('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVMethodType(Base):
__tablename__ = 'cv_methodtype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVOrganizationType(Base):
__tablename__ = 'cv_organizationtype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVPropertyDataType(Base):
__tablename__ = 'cv_propertydatatype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVQualityCode(Base):
__tablename__ = 'cv_qualitycode'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVRelationshipType(Base):
__tablename__ = 'cv_relationshiptype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVResultType(Base):
__tablename__ = 'cv_resulttype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVSamplingFeatureGeoType(Base):
__tablename__ = 'cv_samplingfeaturegeotype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVSamplingFeatureType(Base):
__tablename__ = 'cv_samplingfeaturetype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVSpatialOffsetType(Base):
__tablename__ = 'cv_spatialoffsettype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVSpeciation(Base):
__tablename__ = 'cv_speciation'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVSpecimenType(Base):
__tablename__ = 'cv_specimentype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVSiteType(Base):
__tablename__ = 'cv_sitetype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVStatus(Base):
__tablename__ = 'cv_status'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVTaxonomicClassifierType(Base):
__tablename__ = 'cv_taxonomicclassifiertype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVUnitsType(Base):
__tablename__ = 'cv_unitstype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVVariableName(Base):
__tablename__ = 'cv_variablename'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" %(self.Term, self.Name, self.Definition, self.Category)
class CVVariableType(Base):
__tablename__ = 'cv_variabletype'
__table_args__ = {u'schema': 'odm2'}
Term = Column('term', String(255), nullable=False)
Name = Column('name', String(255), primary_key=True)
Definition = Column('definition', String(1000))
Category = Column('category', String(255))
SourceVocabularyUri = Column('sourcevocabularyuri', String(255))
def __repr__(self):
return "<CV('%s', '%s', '%s', '%s')>" % (self.Term, self.Name, self.Definition, self.Category)
def _changeSchema(schema):
import inspect
import sys
#get a list of all of the classes in the module
clsmembers = inspect.getmembers(sys.modules[__name__], lambda member: inspect.isclass(member) and member.__module__ == __name__)
for name, Tbl in clsmembers:
import sqlalchemy.ext.declarative.api as api
if isinstance(Tbl, api.DeclarativeMeta):
Tbl.__table__.schema = schema
def _getSchema(engine):
from sqlalchemy.engine import reflection
insp=reflection.Inspector.from_engine(engine)
for name in insp.get_schema_names():
if 'odm2'== name.lower():
return name
else:
return insp.default_schema_name
def setSchema(engine):
s = _getSchema(engine)
_changeSchema(s)
# -----------------------------------------------------------------------------
# handles customizing the error messages from Argparse
# -----------------------------------------------------------------------------
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write("------------------------------\n")
sys.stderr.write('error: %s\n' % message)
sys.stderr.write("------------------------------\n")
self.print_help()
sys.exit(2)
# handle argument parsing
info = "A simple script that loads up cvterms into a blank ODM2 database"
parser = MyParser(description=info, add_help=True)
parser.add_argument(
help="Format: {engine}+{driver}://{user}:{pass}@{address}/{db}\n"
"mysql+pymysql://ODM:odm@localhost/odm2\n\n"
"mssql+pyodbc://ODM:123@localhost/odm2?driver=SQL+Server+Native+Client+11.0\n"
"postgresql+psycopg2://ODM:odm@test.uwrl.usu.edu/odm2\n"
"sqlite+pysqlite:///path/to/file",
default=True, type=str, dest='conn_string')
parser.add_argument('-d', '--debug',
help="Debugging program without committing anything to"
" remote database",
action="store_true")
args = parser.parse_args()
# ------------------------------------------------------------------------------
# Script Begin
# ------------------------------------------------------------------------------
## Verify connection string
conn_string = args.conn_string
engine = None
session = None
try:
# engine = create_engine(conn_string, encoding='utf-8')
engine = create_engine(conn_string, encoding='unicode_escape')
setSchema(engine)
session = sessionmaker(bind=engine)()
except Exception as e:
print (e)
sys.exit(0)
print ("Loading CVs using connection string: %s" % conn_string)
vocab= [("actiontype", CVActionType),
("qualitycode", CVQualityCode),
("samplingfeaturegeotype", CVSamplingFeatureGeoType),
("elevationdatum", CVElevationDatum),
("resulttype", CVResultType),
#("sampledmedium", CVSampledMedium),
("speciation", CVSpeciation),
("aggregationstatistic", CVAggregationStatistic),
("methodtype", CVMethodType),
("taxonomicclassifiertype", CVTaxonomicClassifierType),
("sitetype", CVSiteType),
("censorcode", CVCensorCode),
("directivetype", CVDirectiveType),
("datasettype",CVDatasetType),
("dataqualitytype",CVDataQualityType),
("organizationtype", CVOrganizationType),
("status", CVStatus),
("annotationtype", CVAnnotationType),
("samplingfeaturetype", CVSamplingFeatureType),
("equipmenttype", CVEquipmentType),
#("specimenmedium", CVSpecimenMedium),
("spatialoffsettype", CVSpatialOffsetType),
#("referencematerialmedium", CVReferenceMaterialMedium),
("specimentype", CVSpecimenType),
("variabletype", CVVariableType),
("variablename", CVVariableName),
("propertydatatype", CVPropertyDataType),
("relationshiptype", CVRelationshipType),
("unitstype", CVUnitsType),
("medium", CVMediumType)
]
url = "http://vocabulary.odm2.org/api/v1/%s/?format=skos"
#XML encodings
dc = "{http://purl.org/dc/elements/1.1/}%s"
rdf = "{http://www.w3.org/1999/02/22-rdf-syntax-ns#}%s"
skos = "{http://www.w3.org/2004/02/skos/core#}%s"
odm2 = "{http://vocabulary.odm2.org/ODM2/ODM2Terms/}%s"
# ------------------------------------------------------------------------------
# Progress bar
# ------------------------------------------------------------------------------
def update_progress(count, value):
sys.stdout.write("\033[K\r")
sys.stdout.flush()
sys.stdout.write("[%-26s] %d%% %s Loaded\r" %
('='*count, (count+0.0)/len(vocab)*100, str(value)))
sys.stdout.flush()
for count, (key, value) in enumerate(vocab):
# print (count, key, value)
# print ("\tLoading %s" % key)
update_progress(count, value)
try:
data = request.urlopen(url % key).read()
root = ET.fromstring(data)
CVObject = value
objs = []
for voc in root.findall(rdf %"Description"):
obj = CVObject()
try:
obj.Term = voc.attrib[rdf%"about"].split('/')[-1]
obj.Name = voc.find(skos%"prefLabel").text
# obj.Definition = voc.find(skos%"definition").text.encode('UTF-8') if voc.find(skos%"definition") is not None else None
obj.Definition = voc.find(skos%"definition").text.encode('unicode_escape') if voc.find(skos%"definition") is not None else None
obj.Category = category = voc.find(odm2%"category").text if voc.find(odm2 % "category") is not None else None
obj.SourceVocabularyUri = voc.attrib[rdf%"about"]
objs.append(obj)
except Exception as e:
session.rollback()
if obj.Name is not None:
print ("issue loading single object %s: %s " %(obj.Name, e))
pass
session.add_all(objs)
if not args.debug:
session.commit()
except Exception as e:
session.rollback()
if "Duplicate entry" in e.message:
e = "Controlled Vocabulary has already been loaded"
print ("\t...%s Load was unsuccesful: \n%s" % (key, e))
sys.stdout.write("\n\n... %sLoad was unsuccessful: %s\r"%(key,e))
sys.stdout.flush()
update_progress(len(vocab), "CV_Terms")
sys.stdout.write("\nCV Load has completed\r\n")
sys.stdout.flush()
| 3,982 | 10,965 | 800 |
4c56c7968b1478675e4c35cdac29eb5ac742a750 | 3,837 | py | Python | pymanopt/autodiff/backends/_pytorch.py | NoemieJaquier/pymanopt | f3ae54b5ebc648a193e64acdb75c97885391c3d7 | [
"BSD-3-Clause"
] | null | null | null | pymanopt/autodiff/backends/_pytorch.py | NoemieJaquier/pymanopt | f3ae54b5ebc648a193e64acdb75c97885391c3d7 | [
"BSD-3-Clause"
] | null | null | null | pymanopt/autodiff/backends/_pytorch.py | NoemieJaquier/pymanopt | f3ae54b5ebc648a193e64acdb75c97885391c3d7 | [
"BSD-3-Clause"
] | null | null | null | """
Module containing functions to differentiate functions using pytorch.
"""
import functools
import warnings
import numpy as np
try:
import torch
except ImportError:
torch = None
else:
from torch import autograd
from ._backend import Backend
from .. import make_tracing_backend_decorator
from ...tools import bisect_sequence, unpack_singleton_sequence_return_value
PyTorch = make_tracing_backend_decorator(_PyTorchBackend)
| 35.859813 | 79 | 0.63852 | """
Module containing functions to differentiate functions using pytorch.
"""
import functools
import warnings
import numpy as np
try:
import torch
except ImportError:
torch = None
else:
from torch import autograd
from ._backend import Backend
from .. import make_tracing_backend_decorator
from ...tools import bisect_sequence, unpack_singleton_sequence_return_value
class _PyTorchBackend(Backend):
def __init__(self):
super().__init__("PyTorch")
@staticmethod
def is_available():
return torch is not None and torch.__version__ >= "0.4.1"
@Backend._assert_backend_available
def is_compatible(self, function, arguments):
return callable(function)
@staticmethod
def _from_numpy(array):
"""Wrap numpy ndarray ``array`` in a torch tensor. Since torch does not
support negative strides, we create a copy of the array to reset the
strides in that case.
"""
strides = np.array(array.strides)
if np.any(strides < 0):
warnings.warn(
"PyTorch does not support numpy arrays with negative strides. "
"Copying array to normalize strides.")
array = array.copy()
return torch.from_numpy(array)
@Backend._assert_backend_available
def compile_function(self, function, arguments):
@functools.wraps(function)
def wrapper(*args):
x = function(*map(self._from_numpy, args))
if type(x) in (list, tuple):
return [l.cpu().detach().numpy() for l in x]
else:
return x.cpu().detach().numpy()
# return function(*map(self._from_numpy, args)).detach().numpy()
return wrapper
def _sanitize_gradient(self, tensor):
if tensor.grad is None:
return torch.zeros_like(tensor).numpy()
return tensor.grad.numpy()
def _sanitize_gradients(self, tensors):
return list(map(self._sanitize_gradient, tensors))
@Backend._assert_backend_available
def compute_gradient(self, function, arguments):
def gradient(*args):
torch_arguments = []
for argument in args:
torch_argument = self._from_numpy(argument)
torch_argument.requires_grad_()
torch_arguments.append(torch_argument)
function(*torch_arguments).backward()
return self._sanitize_gradients(torch_arguments)
if len(arguments) == 1:
return unpack_singleton_sequence_return_value(gradient)
return gradient
@Backend._assert_backend_available
def compute_hessian_vector_product(self, function, arguments):
def hessian_vector_product(*args):
points, vectors = bisect_sequence(args)
torch_arguments = []
for point in points:
torch_argument = self._from_numpy(point)
torch_argument.requires_grad_()
torch_arguments.append(torch_argument)
torch_vectors = [self._from_numpy(vector) for vector in vectors]
fx = function(*torch_arguments)
fx.requires_grad_()
gradients = autograd.grad(fx, torch_arguments, create_graph=True,
allow_unused=True)
dot_product = 0
for gradient, vector in zip(gradients, torch_vectors):
dot_product += torch.tensordot(
gradient, vector, dims=gradient.dim())
dot_product.backward()
return self._sanitize_gradients(torch_arguments)
if len(arguments) == 1:
return unpack_singleton_sequence_return_value(
hessian_vector_product)
return hessian_vector_product
PyTorch = make_tracing_backend_decorator(_PyTorchBackend)
| 2,419 | 953 | 23 |
1c37bc84fb69610331ac21e64650183042302f84 | 2,438 | py | Python | Organise-Files-According-To-Their-Extensions/script_dirs.py | A-kriti/Amazing-Python-Scripts | ebf607fe39e6d9e61f30ec3439fc8d6ab1f736b9 | [
"MIT"
] | 930 | 2020-09-05T22:07:28.000Z | 2022-03-30T07:56:18.000Z | Organise-Files-According-To-Their-Extensions/script_dirs.py | maheshdbabar9340/Amazing-Python-Scripts | e2272048cbe49b4bda5072bbdd8479739bb6c18d | [
"MIT"
] | 893 | 2020-09-04T07:57:24.000Z | 2022-02-08T02:12:26.000Z | Organise-Files-According-To-Their-Extensions/script_dirs.py | maheshdbabar9340/Amazing-Python-Scripts | e2272048cbe49b4bda5072bbdd8479739bb6c18d | [
"MIT"
] | 497 | 2020-09-05T08:16:24.000Z | 2022-03-31T00:55:57.000Z | import os
from pathlib import Path
import sys
# Taking input
print_string = """
Type Path of the directory
OR
Press enter for running the script on current directory:
OR
Type quit
"""
print(print_string + "\n\n")
input_path = input("Input:")
print("\n\n")
# Script will terminate if input is 'quit'
if input_path == "quit":
sys.exit(1)
# If nothing is entered then current working directory will be taken as the input path
if input_path == "":
input_path = os.getcwd()
input_path = Path(input_path)
# Changing the working directory to input path
os.chdir(input_path)
# Creates a dictionary "dic" with key,value pairs where key is extension and value is no. of files with that extension
dic = {}
for file in os.listdir(os.getcwd()):
if os.path.isfile(file):
extension = file.split(".")[-1]
dic[extension] = dic.get(extension, 0) + 1
for key in dic:
print(f"There are {dic[key]} files file with extension {key}")
print("\n\n")
# assigning a variable named current Path of current working directory just for simplicity.
# could have used input_path too
current = Path(os.getcwd())
'''
When this script would run the structure of the current directory would change.Hence,
we are assigning list_dir variable the files and dirs in current working directory which the script would modify
'''
list_dir = os.listdir(current)
# keys of dic are extensions of the file
for key in dic:
# try except block for making directory if it doesn't exists already
try:
os.mkdir(key)
except:
print(
f"directory named {key} already exists so it won't be overwrited \n"
)
# goes through the files in list_dir
# we are not using os.listdir() as the directory structure will change during the execution
for file in list_dir:
if file.split(".")[-1] == key and os.path.isfile(file):
# prints absolute path of the file
print(os.path.abspath(file))
# Renames the path of the file or moves the file in to the newly created directory
Path.rename(Path(os.path.abspath(file)),
current / Path("./{}/".format(key) + file))
# This block just prints a note and the current structure of the directory
print(
"\n Script has organised files as per their extensions into different directories! \n"
)
for file in os.listdir(os.getcwd()):
if not (os.path.isfile(file)):
print(file)
| 31.25641 | 118 | 0.684988 | import os
from pathlib import Path
import sys
# Taking input
print_string = """
Type Path of the directory
OR
Press enter for running the script on current directory:
OR
Type quit
"""
print(print_string + "\n\n")
input_path = input("Input:")
print("\n\n")
# Script will terminate if input is 'quit'
if input_path == "quit":
sys.exit(1)
# If nothing is entered then current working directory will be taken as the input path
if input_path == "":
input_path = os.getcwd()
input_path = Path(input_path)
# Changing the working directory to input path
os.chdir(input_path)
# Creates a dictionary "dic" with key,value pairs where key is extension and value is no. of files with that extension
dic = {}
for file in os.listdir(os.getcwd()):
if os.path.isfile(file):
extension = file.split(".")[-1]
dic[extension] = dic.get(extension, 0) + 1
for key in dic:
print(f"There are {dic[key]} files file with extension {key}")
print("\n\n")
# assigning a variable named current Path of current working directory just for simplicity.
# could have used input_path too
current = Path(os.getcwd())
'''
When this script would run the structure of the current directory would change.Hence,
we are assigning list_dir variable the files and dirs in current working directory which the script would modify
'''
list_dir = os.listdir(current)
# keys of dic are extensions of the file
for key in dic:
# try except block for making directory if it doesn't exists already
try:
os.mkdir(key)
except:
print(
f"directory named {key} already exists so it won't be overwrited \n"
)
# goes through the files in list_dir
# we are not using os.listdir() as the directory structure will change during the execution
for file in list_dir:
if file.split(".")[-1] == key and os.path.isfile(file):
# prints absolute path of the file
print(os.path.abspath(file))
# Renames the path of the file or moves the file in to the newly created directory
Path.rename(Path(os.path.abspath(file)),
current / Path("./{}/".format(key) + file))
# This block just prints a note and the current structure of the directory
print(
"\n Script has organised files as per their extensions into different directories! \n"
)
for file in os.listdir(os.getcwd()):
if not (os.path.isfile(file)):
print(file)
| 0 | 0 | 0 |
5b238bc281f162f7e937c41dc61c9bf5d6246f5d | 7,586 | py | Python | main.py | Nebula4869/seu-auto-health-check-in | 70d7e8fab352d616e0299a492957b2a17e56fd4f | [
"MIT"
] | null | null | null | main.py | Nebula4869/seu-auto-health-check-in | 70d7e8fab352d616e0299a492957b2a17e56fd4f | [
"MIT"
] | 1 | 2021-11-11T06:58:40.000Z | 2021-11-11T06:58:40.000Z | main.py | Nebula4869/seu-auto-health-check-in | 70d7e8fab352d616e0299a492957b2a17e56fd4f | [
"MIT"
] | null | null | null | import time
from selenium import webdriver
from selenium import common
import func_timeout
import datetime
import requests
import logging
import zipfile
import winreg
import sys
import os
MAX_RETRIES = 0
def send_massage(content: str):
"""
发送短信/邮件
:param content: 邮件内容
:return: None
"""
# TODO: 发送短信/邮件
logger.info(content)
@func_timeout.func_set_timeout(60)
def download_chrome_driver():
"""
下载Chrome引擎
:return: None
"""
chrome_version = winreg.QueryValueEx(winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\Google\Chrome\BLBeacon'), 'version')[0]
while True:
res = requests.get('https://npm.taobao.org/mirrors/chromedriver/{}/chromedriver_win32.zip'.format(chrome_version), stream=True)
if res.status_code == 200:
with open('chromedriver_win32.zip', 'wb') as f:
f.write(res.content)
with zipfile.ZipFile('chromedriver_win32.zip', 'r') as f:
f.extract('chromedriver.exe')
os.remove('chromedriver_win32.zip')
break
else:
chrome_version = '.'.join(chrome_version.split('.')[:-1]) + '.' + str(int(chrome_version.split('.')[-1]) - 1)
@func_timeout.func_set_timeout(120)
def check_in(driver: webdriver, username: str, password: str, bbt: str):
"""
自动上报
:param driver: Chrome爬虫引擎
:param username: 统一身份认证用户名
:param password: 统一身份认证密码
:param bbt: 上报体温值
:return: None
"""
'''登录界面'''
driver.get('http://ehall.seu.edu.cn/appShow?appId=5821102911870447')
while (len(driver.find_elements_by_id('username')) == 0 or len(driver.find_elements_by_id('password')) == 0 or len(driver.find_elements_by_class_name('auth_login_btn')) == 0) \
and len(driver.find_elements_by_xpath('/html/body/main/article/section/div[2]/div[1]')) == 0:
pass
if len(driver.find_elements_by_id('username')) != 0 or len(driver.find_elements_by_id('password')) != 0 or len(driver.find_elements_by_class_name('auth_login_btn')) != 0:
input_username = driver.find_element_by_id('username')
input_username.click()
input_username.send_keys(username)
input_password = driver.find_element_by_id('password')
input_password.click()
input_password.send_keys(password)
button_xsfw = driver.find_element_by_class_name('auth_login_btn')
button_xsfw.click()
'''每日健康申报界面'''
while len(driver.find_elements_by_xpath('/html/body/main/article/section/div[2]/div[1]')) == 0:
pass
button_add = driver.find_element_by_xpath('/html/body/main/article/section/div[2]/div[1]')
button_add.click()
'''新增上报界面'''
while (len(driver.find_elements_by_name('DZ_JSDTCJTW')) == 0 or len(driver.find_elements_by_id('save')) == 0) and len(driver.find_elements_by_class_name('bh-dialog-center')) == 0:
pass
if len(driver.find_elements_by_class_name('bh-dialog-center')) != 0:
if '每日健康申报截止时间15:00' in driver.page_source:
logger.warning('每日健康申报截止时间15:00')
if '目前每日健康打卡时间是1时~15时,请在此时间内填报。' in driver.page_source:
logger.warning('目前每日健康打卡时间是1时~15时,请在此时间内填报。')
else:
logger.warning('今日已填报!')
driver.quit()
return
'''填写体温并提交'''
input_bbt = driver.find_element_by_name('DZ_JSDTCJTW')
driver.execute_script("arguments[0].click();", input_bbt)
input_bbt.send_keys(bbt)
button_save = driver.find_element_by_id('save')
driver.execute_script("arguments[0].click();", button_save)
while len(driver.find_elements_by_class_name('bh-bg-primary')) == 0:
pass
button_add = driver.find_element_by_class_name('bh-bg-primary')
button_add.click()
def try_to_check_in(driver: webdriver, username: str, password: str, bbt: str) -> int:
"""
尝试自动上报
:param driver: Chrome爬虫引擎
:param username: 统一身份认证用户名
:param password: 统一身份认证密码
:param bbt: 上报体温值
:return: 运行结果 0 成功 -1 失败
"""
global MAX_RETRIES
try:
check_in(driver, username, password, bbt)
return 0
except Exception as e:
if MAX_RETRIES < 10:
MAX_RETRIES += 1
logger.info('{} {} 上报失败\n{}\n正在重试第{}次...'.format(username, datetime.datetime.now().today(), e, MAX_RETRIES))
try_to_check_in(driver, username, password, bbt)
else:
logger.info('{} {} 上报失败\n{}\n重试次数超过上限...'.format(username, datetime.datetime.now().today(), e))
MAX_RETRIES = 0
return -1
def main(check_in_time: str, username: str, password: str, bbt: str, headless: bool):
"""
定时运行
:param check_in_time: 每日脚本运行时间
:param username: 统一身份认证用户名列表
:param password: 统一身份认证密码列表
:param bbt: 上报体温值列表
:param headless: 是否隐藏浏览器界面
:return: None
"""
'''检测引擎存在'''
if not os.path.exists('chromedriver.exe'):
download_chrome_driver()
'''检测引擎版本'''
try:
options = webdriver.ChromeOptions()
options.add_argument('--headless')
driver = webdriver.Chrome('chromedriver.exe', options=options)
driver.quit()
except common.exceptions.SessionNotCreatedException:
download_chrome_driver()
while True:
time.sleep(0.5)
if str(datetime.datetime.now().time())[:8] == check_in_time:
driver = None
'''初始化引擎'''
try:
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
options.add_argument('--disable-blink-features=AutomationControlled')
if headless:
options.add_argument('--headless')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome('chromedriver.exe', options=options)
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {'source': 'Object.defineProperty(navigator, "webdriver", {get: () => undefined})'})
except Exception as e:
logger.error(e)
'''尝试自动上报'''
try:
res = try_to_check_in(driver, username, password, bbt)
content = '{} {} 上报{}!'.format(username, datetime.datetime.now().today(), '成功' if res == 0 else '失败')
except func_timeout.exceptions.FunctionTimedOut:
content = '{} {} 上报失败!'.format(username, datetime.datetime.now().today())
'''关闭引擎'''
try:
driver.quit()
except Exception as e:
logger.error(e)
'''发送短信/邮件'''
try:
send_massage(content)
except Exception as e:
logger.error(e)
if __name__ == '__main__':
log_time = time.localtime()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file = open('{}.log'.format(time.strftime("%Y-%m-%d-%H-%M", log_time)), 'w')
file.close()
handler = logging.FileHandler('{}.log'.format(time.strftime("%Y-%m-%d-%H-%M", log_time)))
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], True)
| 35.615023 | 184 | 0.60928 | import time
from selenium import webdriver
from selenium import common
import func_timeout
import datetime
import requests
import logging
import zipfile
import winreg
import sys
import os
MAX_RETRIES = 0
def send_massage(content: str):
"""
发送短信/邮件
:param content: 邮件内容
:return: None
"""
# TODO: 发送短信/邮件
logger.info(content)
@func_timeout.func_set_timeout(60)
def download_chrome_driver():
"""
下载Chrome引擎
:return: None
"""
chrome_version = winreg.QueryValueEx(winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\Google\Chrome\BLBeacon'), 'version')[0]
while True:
res = requests.get('https://npm.taobao.org/mirrors/chromedriver/{}/chromedriver_win32.zip'.format(chrome_version), stream=True)
if res.status_code == 200:
with open('chromedriver_win32.zip', 'wb') as f:
f.write(res.content)
with zipfile.ZipFile('chromedriver_win32.zip', 'r') as f:
f.extract('chromedriver.exe')
os.remove('chromedriver_win32.zip')
break
else:
chrome_version = '.'.join(chrome_version.split('.')[:-1]) + '.' + str(int(chrome_version.split('.')[-1]) - 1)
@func_timeout.func_set_timeout(120)
def check_in(driver: webdriver, username: str, password: str, bbt: str):
"""
自动上报
:param driver: Chrome爬虫引擎
:param username: 统一身份认证用户名
:param password: 统一身份认证密码
:param bbt: 上报体温值
:return: None
"""
'''登录界面'''
driver.get('http://ehall.seu.edu.cn/appShow?appId=5821102911870447')
while (len(driver.find_elements_by_id('username')) == 0 or len(driver.find_elements_by_id('password')) == 0 or len(driver.find_elements_by_class_name('auth_login_btn')) == 0) \
and len(driver.find_elements_by_xpath('/html/body/main/article/section/div[2]/div[1]')) == 0:
pass
if len(driver.find_elements_by_id('username')) != 0 or len(driver.find_elements_by_id('password')) != 0 or len(driver.find_elements_by_class_name('auth_login_btn')) != 0:
input_username = driver.find_element_by_id('username')
input_username.click()
input_username.send_keys(username)
input_password = driver.find_element_by_id('password')
input_password.click()
input_password.send_keys(password)
button_xsfw = driver.find_element_by_class_name('auth_login_btn')
button_xsfw.click()
'''每日健康申报界面'''
while len(driver.find_elements_by_xpath('/html/body/main/article/section/div[2]/div[1]')) == 0:
pass
button_add = driver.find_element_by_xpath('/html/body/main/article/section/div[2]/div[1]')
button_add.click()
'''新增上报界面'''
while (len(driver.find_elements_by_name('DZ_JSDTCJTW')) == 0 or len(driver.find_elements_by_id('save')) == 0) and len(driver.find_elements_by_class_name('bh-dialog-center')) == 0:
pass
if len(driver.find_elements_by_class_name('bh-dialog-center')) != 0:
if '每日健康申报截止时间15:00' in driver.page_source:
logger.warning('每日健康申报截止时间15:00')
if '目前每日健康打卡时间是1时~15时,请在此时间内填报。' in driver.page_source:
logger.warning('目前每日健康打卡时间是1时~15时,请在此时间内填报。')
else:
logger.warning('今日已填报!')
driver.quit()
return
'''填写体温并提交'''
input_bbt = driver.find_element_by_name('DZ_JSDTCJTW')
driver.execute_script("arguments[0].click();", input_bbt)
input_bbt.send_keys(bbt)
button_save = driver.find_element_by_id('save')
driver.execute_script("arguments[0].click();", button_save)
while len(driver.find_elements_by_class_name('bh-bg-primary')) == 0:
pass
button_add = driver.find_element_by_class_name('bh-bg-primary')
button_add.click()
def try_to_check_in(driver: webdriver, username: str, password: str, bbt: str) -> int:
"""
尝试自动上报
:param driver: Chrome爬虫引擎
:param username: 统一身份认证用户名
:param password: 统一身份认证密码
:param bbt: 上报体温值
:return: 运行结果 0 成功 -1 失败
"""
global MAX_RETRIES
try:
check_in(driver, username, password, bbt)
return 0
except Exception as e:
if MAX_RETRIES < 10:
MAX_RETRIES += 1
logger.info('{} {} 上报失败\n{}\n正在重试第{}次...'.format(username, datetime.datetime.now().today(), e, MAX_RETRIES))
try_to_check_in(driver, username, password, bbt)
else:
logger.info('{} {} 上报失败\n{}\n重试次数超过上限...'.format(username, datetime.datetime.now().today(), e))
MAX_RETRIES = 0
return -1
def main(check_in_time: str, username: str, password: str, bbt: str, headless: bool):
"""
定时运行
:param check_in_time: 每日脚本运行时间
:param username: 统一身份认证用户名列表
:param password: 统一身份认证密码列表
:param bbt: 上报体温值列表
:param headless: 是否隐藏浏览器界面
:return: None
"""
'''检测引擎存在'''
if not os.path.exists('chromedriver.exe'):
download_chrome_driver()
'''检测引擎版本'''
try:
options = webdriver.ChromeOptions()
options.add_argument('--headless')
driver = webdriver.Chrome('chromedriver.exe', options=options)
driver.quit()
except common.exceptions.SessionNotCreatedException:
download_chrome_driver()
while True:
time.sleep(0.5)
if str(datetime.datetime.now().time())[:8] == check_in_time:
driver = None
'''初始化引擎'''
try:
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
options.add_argument('--disable-blink-features=AutomationControlled')
if headless:
options.add_argument('--headless')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome('chromedriver.exe', options=options)
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {'source': 'Object.defineProperty(navigator, "webdriver", {get: () => undefined})'})
except Exception as e:
logger.error(e)
'''尝试自动上报'''
try:
res = try_to_check_in(driver, username, password, bbt)
content = '{} {} 上报{}!'.format(username, datetime.datetime.now().today(), '成功' if res == 0 else '失败')
except func_timeout.exceptions.FunctionTimedOut:
content = '{} {} 上报失败!'.format(username, datetime.datetime.now().today())
'''关闭引擎'''
try:
driver.quit()
except Exception as e:
logger.error(e)
'''发送短信/邮件'''
try:
send_massage(content)
except Exception as e:
logger.error(e)
if __name__ == '__main__':
log_time = time.localtime()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file = open('{}.log'.format(time.strftime("%Y-%m-%d-%H-%M", log_time)), 'w')
file.close()
handler = logging.FileHandler('{}.log'.format(time.strftime("%Y-%m-%d-%H-%M", log_time)))
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], True)
| 0 | 0 | 0 |
d29bcdb8b1f78c5c097ab0032be5256583012fd3 | 392 | py | Python | users/migrations/0003_auto_20200803_1949.py | asanka9/Donation-Project | 7182ccad58bd6e61ccc29b79bbab28e428ed2a17 | [
"Apache-2.0"
] | null | null | null | users/migrations/0003_auto_20200803_1949.py | asanka9/Donation-Project | 7182ccad58bd6e61ccc29b79bbab28e428ed2a17 | [
"Apache-2.0"
] | null | null | null | users/migrations/0003_auto_20200803_1949.py | asanka9/Donation-Project | 7182ccad58bd6e61ccc29b79bbab28e428ed2a17 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.5 on 2020-08-03 14:19
from django.db import migrations, models
| 20.631579 | 47 | 0.59949 | # Generated by Django 3.0.5 on 2020-08-03 14:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20200803_0900'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user_description',
field=models.TextField(default=''),
),
]
| 0 | 278 | 23 |
a5ae6b2ca16dd14e888d229c983beda49888f2d0 | 7,950 | py | Python | code/socialDistribution/tests/test_models.py | CMPUT404F21TEAM/social-distribution | c6775bfa3bf93025d426bc4601431128a51d4c48 | [
"W3C-20150513"
] | null | null | null | code/socialDistribution/tests/test_models.py | CMPUT404F21TEAM/social-distribution | c6775bfa3bf93025d426bc4601431128a51d4c48 | [
"W3C-20150513"
] | 173 | 2021-09-30T00:12:03.000Z | 2021-12-09T00:32:46.000Z | code/socialDistribution/tests/test_models.py | CMPUT404F21TEAM/social-distribution | c6775bfa3bf93025d426bc4601431128a51d4c48 | [
"W3C-20150513"
] | 3 | 2021-12-09T01:27:49.000Z | 2022-03-11T06:04:16.000Z | # python manage.py test api.tests.tests.test_models
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
from django.test.testcases import LiveServerTestCase, LiveServerThread
from mixer.backend.django import mixer
from datetime import datetime, timedelta, timezone
import logging
from socialDistribution.models import *
from socialDistribution.builders import *
from cmput404.constants import API_BASE
class AuthorTests(LiveServerTestCase):
""" Unit tests for Author. """
# the pillow, https://stackoverflow.com/users/2812257/the-pillow, "How can I disable logging while running unit tests in Python Django?"
# https://stackoverflow.com/a/54519433, 2019-02-04, CC BY-SA 4.0
# disable logging before tests
@classmethod
# enable logging after tests
@classmethod
# a bit more work needed to get this to correctly find the debug server
# def test_get_author_json(self):
# # makes an API call, server must be running
# local = mixer.blend(LocalAuthor)
# remote = Author.objects.get(id=local.id)
# print(self.live_server_url)
# print(socket.gethostname())
# author_json = remote.as_json()
# print(author_json)
class LocalAuthorTests(TestCase):
""" Unit tests for LocalAuthor """
# the pillow, https://stackoverflow.com/users/2812257/the-pillow, "How can I disable logging while running unit tests in Python Django?"
# https://stackoverflow.com/a/54519433, 2019-02-04, CC BY-SA 4.0
# disable logging before tests
@classmethod
# enable logging after tests
@classmethod
class LikeTests(TransactionTestCase):
""" Unit tests for Likes, PostLikes and CommentLikes. """
# the pillow, https://stackoverflow.com/users/2812257/the-pillow, "How can I disable logging while running unit tests in Python Django?"
# https://stackoverflow.com/a/54519433, 2019-02-04, CC BY-SA 4.0
# disable logging before tests
@classmethod
# enable logging after tests
@classmethod
def test_post_like(self):
""" Test successfully liking a Post """
post = mixer.blend(LocalPost)
author = mixer.blend(Author)
# like a post
post.likes.create(author=author)
self.assertEqual(1, post.likes.count())
self.assertEqual(author, post.likes.first().author)
def test_comment_like(self):
""" Test successfully liking a Comment """
comment = mixer.blend(Comment)
author = mixer.blend(Author)
# like a comment
comment.likes.create(author=author)
self.assertEqual(1, comment.likes.count())
self.assertEqual(author, comment.likes.first().author)
def test_no_author(self):
""" Test creating a Like with no Author """
post = mixer.blend(LocalPost)
with self.assertRaises(IntegrityError):
post.likes.create()
def test_no_post(self):
""" Test creating a like with no object """
with self.assertRaises(IntegrityError):
author = mixer.blend(Author)
PostLike.objects.create(author=author)
with self.assertRaises(IntegrityError):
author = mixer.blend(Author)
CommentLike.objects.create(author=author)
def test_double_like(self):
""" Test liking a post multiple times """
post = mixer.blend(LocalPost)
author = mixer.blend(Author)
# add a like
like = post.likes.create(author=author)
self.assertEqual(1, post.likes.count())
# adding another like should raise an error
with self.assertRaisesMessage(IntegrityError, "UNIQUE constraint failed"):
post.likes.create(author=author)
# should be able to remove and like again
like.delete()
self.assertEqual(0, post.likes.count())
post.likes.create(author=author)
self.assertEqual(1, post.likes.count())
| 33.544304 | 140 | 0.67522 | # python manage.py test api.tests.tests.test_models
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
from django.test.testcases import LiveServerTestCase, LiveServerThread
from mixer.backend.django import mixer
from datetime import datetime, timedelta, timezone
import logging
from socialDistribution.models import *
from socialDistribution.builders import *
from cmput404.constants import API_BASE
class AuthorTests(LiveServerTestCase):
""" Unit tests for Author. """
# the pillow, https://stackoverflow.com/users/2812257/the-pillow, "How can I disable logging while running unit tests in Python Django?"
# https://stackoverflow.com/a/54519433, 2019-02-04, CC BY-SA 4.0
# disable logging before tests
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
# enable logging after tests
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
def test_create_author(self):
url = "http://notmyserver.com/author/839028403"
author = Author.objects.create(url=url)
id = author.id
fetched = Author.objects.get(id=id)
self.assertEqual(url, fetched.url)
# a bit more work needed to get this to correctly find the debug server
# def test_get_author_json(self):
# # makes an API call, server must be running
# local = mixer.blend(LocalAuthor)
# remote = Author.objects.get(id=local.id)
# print(self.live_server_url)
# print(socket.gethostname())
# author_json = remote.as_json()
# print(author_json)
class LocalAuthorTests(TestCase):
""" Unit tests for LocalAuthor """
# the pillow, https://stackoverflow.com/users/2812257/the-pillow, "How can I disable logging while running unit tests in Python Django?"
# https://stackoverflow.com/a/54519433, 2019-02-04, CC BY-SA 4.0
# disable logging before tests
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
# enable logging after tests
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
def test_create_local_author(self):
author = mixer.blend(LocalAuthor)
fetched = LocalAuthor.objects.get(username=author.username)
self.assertEqual(author.id, fetched.id)
self.assertEqual(author.user, fetched.user)
self.assertEqual(author.username, fetched.username)
self.assertEqual(author.displayName, fetched.displayName)
self.assertEqual(author.githubUrl, fetched.githubUrl)
self.assertEqual(author.profileImageUrl, fetched.profileImageUrl)
self.assertEqual(f"{API_BASE}/author/{author.id}", fetched.url)
vanilla_author = Author.objects.get(id=author.id)
self.assertEqual(f"{API_BASE}/author/{author.id}", vanilla_author.url)
class PostTest(TestCase):
# the pillow, https://stackoverflow.com/users/2812257/the-pillow, "How can I disable logging while running unit tests in Python Django?"
# https://stackoverflow.com/a/54519433, 2019-02-04, CC BY-SA 4.0
# disable logging before tests
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
# enable logging after tests
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
def test_post_is_public(self):
visibility = LocalPost.Visibility.FRIENDS
post = PostBuilder().visibility(visibility).build()
self.assertFalse(post.is_public())
def test_post_is_friends(self):
visibility = LocalPost.Visibility.FRIENDS
post = PostBuilder().visibility(visibility).build()
self.assertTrue(post.is_friends())
def test_post_when(self):
time = datetime.now(timezone.utc)
post = PostBuilder().pub_date(time).build()
self.assertTrue(post.when() == 'just now')
def test_post_total_likes(self):
likes = 25
post = PostBuilder().likes(likes).build()
self.assertTrue(post.total_likes() == likes)
# TODO test all PostQuerySet methods
class CommentModelTests(TestCase):
# the pillow, https://stackoverflow.com/users/2812257/the-pillow, "How can I disable logging while running unit tests in Python Django?"
# https://stackoverflow.com/a/54519433, 2019-02-04, CC BY-SA 4.0
# disable logging before tests
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
# enable logging after tests
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
def test_when_just_now(self):
'''
comment.when() returns just now right after post creation
'''
author = mixer.blend(LocalAuthor)
post = mixer.blend(LocalPost, author=author)
comment = mixer.blend(Comment, author=author, post=post, pub_date=datetime.now(timezone.utc))
self.assertIs(comment.when() == 'just now', True)
def test_when_10_seconds(self):
'''
comment.when() returns 10 seconds ago after the time has passed
'''
author = mixer.blend(LocalAuthor)
post = mixer.blend(LocalPost, author=author)
pub_date = datetime.now(timezone.utc) - timedelta(seconds=10)
comment = mixer.blend(Comment, author=author, post=post, pub_date=pub_date)
self.assertIs(comment.when() == '10 seconds ago', True)
class LikeTests(TransactionTestCase):
""" Unit tests for Likes, PostLikes and CommentLikes. """
# the pillow, https://stackoverflow.com/users/2812257/the-pillow, "How can I disable logging while running unit tests in Python Django?"
# https://stackoverflow.com/a/54519433, 2019-02-04, CC BY-SA 4.0
# disable logging before tests
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
# enable logging after tests
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
def test_post_like(self):
""" Test successfully liking a Post """
post = mixer.blend(LocalPost)
author = mixer.blend(Author)
# like a post
post.likes.create(author=author)
self.assertEqual(1, post.likes.count())
self.assertEqual(author, post.likes.first().author)
def test_comment_like(self):
""" Test successfully liking a Comment """
comment = mixer.blend(Comment)
author = mixer.blend(Author)
# like a comment
comment.likes.create(author=author)
self.assertEqual(1, comment.likes.count())
self.assertEqual(author, comment.likes.first().author)
def test_no_author(self):
""" Test creating a Like with no Author """
post = mixer.blend(LocalPost)
with self.assertRaises(IntegrityError):
post.likes.create()
def test_no_post(self):
""" Test creating a like with no object """
with self.assertRaises(IntegrityError):
author = mixer.blend(Author)
PostLike.objects.create(author=author)
with self.assertRaises(IntegrityError):
author = mixer.blend(Author)
CommentLike.objects.create(author=author)
def test_double_like(self):
""" Test liking a post multiple times """
post = mixer.blend(LocalPost)
author = mixer.blend(Author)
# add a like
like = post.likes.create(author=author)
self.assertEqual(1, post.likes.count())
# adding another like should raise an error
with self.assertRaisesMessage(IntegrityError, "UNIQUE constraint failed"):
post.likes.create(author=author)
# should be able to remove and like again
like.delete()
self.assertEqual(0, post.likes.count())
post.likes.create(author=author)
self.assertEqual(1, post.likes.count())
| 1,945 | 1,738 | 256 |
f71fca6c7b717f1271683d16d11ce61370e99869 | 2,628 | py | Python | Apps/phforescoutcounteract/forescoutcounteract_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 74 | 2019-10-22T02:00:53.000Z | 2022-03-15T12:56:13.000Z | Apps/phforescoutcounteract/forescoutcounteract_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 375 | 2019-10-22T20:53:50.000Z | 2021-11-09T21:28:43.000Z | Apps/phforescoutcounteract/forescoutcounteract_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 175 | 2019-10-23T15:30:42.000Z | 2021-11-05T21:33:31.000Z | # File: forescoutcounteract_consts.py
# Copyright (c) 2018-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
# --
# Define your constants here
FS_DEX_HOST_ENDPOINT = '/fsapi/niCore/Hosts'
FS_DEX_LIST_ENDPOINT = '/fsapi/niCore/Lists'
FS_DEX_TEST_CONNECTIVITY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="true"/>
<HOST_KEY NAME="ip" VALUE="{host_key_value}"/>
<PROPERTIES></PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="{create_host}"/>
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}">
<VALUE>{property_value}</VALUE>
</PROPERTY>
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_DELETE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="delete">
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}" />
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_LIST_PROPERTY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="2.0">
<TRANSACTION TYPE="{transaction_type}">
<LISTS>
{list_body}
</LISTS>
</TRANSACTION>
</FSAPI>"""
FS_WEB_LOGIN = '/api/login'
FS_WEB_HOSTS = '/api/hosts'
FS_WEB_HOSTFIELDS = '/api/hostfields'
FS_WEB_POLICIES = '/api/policies'
# Error message constants
FS_ERR_CODE_MSG = "Error code unavailable"
FS_ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
FS_PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
# validate integer
ERR_VALID_INT_MSG = "Please provide a valid integer value in the {}"
ERR_NON_NEG_INT_MSG = "Please provide a valid non-negative integer value in the {}"
ERR_POSITIVE_INTEGER_MSG = "Please provide a valid non-zero positive integer value in the {}"
HOST_ID_INT_PARAM = "'host_id' action parameter"
| 36.5 | 117 | 0.619102 | # File: forescoutcounteract_consts.py
# Copyright (c) 2018-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
# --
# Define your constants here
FS_DEX_HOST_ENDPOINT = '/fsapi/niCore/Hosts'
FS_DEX_LIST_ENDPOINT = '/fsapi/niCore/Lists'
FS_DEX_TEST_CONNECTIVITY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="true"/>
<HOST_KEY NAME="ip" VALUE="{host_key_value}"/>
<PROPERTIES></PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="{create_host}"/>
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}">
<VALUE>{property_value}</VALUE>
</PROPERTY>
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_DELETE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="delete">
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}" />
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_LIST_PROPERTY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="2.0">
<TRANSACTION TYPE="{transaction_type}">
<LISTS>
{list_body}
</LISTS>
</TRANSACTION>
</FSAPI>"""
FS_WEB_LOGIN = '/api/login'
FS_WEB_HOSTS = '/api/hosts'
FS_WEB_HOSTFIELDS = '/api/hostfields'
FS_WEB_POLICIES = '/api/policies'
# Error message constants
FS_ERR_CODE_MSG = "Error code unavailable"
FS_ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
FS_PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
# validate integer
ERR_VALID_INT_MSG = "Please provide a valid integer value in the {}"
ERR_NON_NEG_INT_MSG = "Please provide a valid non-negative integer value in the {}"
ERR_POSITIVE_INTEGER_MSG = "Please provide a valid non-zero positive integer value in the {}"
HOST_ID_INT_PARAM = "'host_id' action parameter"
| 0 | 0 | 0 |
9471d4657597db82b5566955e1bcfc934590b4c9 | 1,006 | py | Python | tests/test_parse_digest_host_url.py | ybenitezf/flask-static-digest | f7743dfb9cecd163eb04dec06cdf4303009e296c | [
"MIT"
] | null | null | null | tests/test_parse_digest_host_url.py | ybenitezf/flask-static-digest | f7743dfb9cecd163eb04dec06cdf4303009e296c | [
"MIT"
] | null | null | null | tests/test_parse_digest_host_url.py | ybenitezf/flask-static-digest | f7743dfb9cecd163eb04dec06cdf4303009e296c | [
"MIT"
] | null | null | null | from flask_static_digest import parse_digest_host_url
import pytest as pt
@pt.mark.parametrize(
"digest_host_url, expected", [
(
"https://cdn.example.com",
("https://cdn.example.com", "")
),
(
"https://cdn.example.com/",
("https://cdn.example.com", "")
),
(
"https://cdn.example.com/myapp",
("https://cdn.example.com", "myapp")
),
(
"https://cdn.example.com/myapp/",
("https://cdn.example.com", "myapp")
),
(
"https://cdn.example.com/myapp/anotherdir",
("https://cdn.example.com", "myapp/anotherdir")
),
(
"https://cdn.example.com/myapp/anotherdir/",
("https://cdn.example.com", "myapp/anotherdir")
),
]
)
| 25.15 | 61 | 0.516899 | from flask_static_digest import parse_digest_host_url
import pytest as pt
@pt.mark.parametrize(
"digest_host_url, expected", [
(
"https://cdn.example.com",
("https://cdn.example.com", "")
),
(
"https://cdn.example.com/",
("https://cdn.example.com", "")
),
(
"https://cdn.example.com/myapp",
("https://cdn.example.com", "myapp")
),
(
"https://cdn.example.com/myapp/",
("https://cdn.example.com", "myapp")
),
(
"https://cdn.example.com/myapp/anotherdir",
("https://cdn.example.com", "myapp/anotherdir")
),
(
"https://cdn.example.com/myapp/anotherdir/",
("https://cdn.example.com", "myapp/anotherdir")
),
]
)
def test_parse_function(digest_host_url, expected):
assert parse_digest_host_url(digest_host_url) == expected
def test_joined_urls():
pass
| 103 | 0 | 45 |
58ea3a89d749ffe3fc11bef9fbc1ff02d402159e | 111 | py | Python | pocovidnet/pocovidnet/__init__.py | sbaktha/covid19_pocus_ultrasound | 876558a118c7afbe7a520efcc07581af6f8ffbb2 | [
"MIT"
] | null | null | null | pocovidnet/pocovidnet/__init__.py | sbaktha/covid19_pocus_ultrasound | 876558a118c7afbe7a520efcc07581af6f8ffbb2 | [
"MIT"
] | null | null | null | pocovidnet/pocovidnet/__init__.py | sbaktha/covid19_pocus_ultrasound | 876558a118c7afbe7a520efcc07581af6f8ffbb2 | [
"MIT"
] | null | null | null | from .model import get_cam_model, get_model
MODEL_FACTORY = {'vgg_base': get_model, 'vgg_cam': get_cam_model}
| 27.75 | 65 | 0.783784 | from .model import get_cam_model, get_model
MODEL_FACTORY = {'vgg_base': get_model, 'vgg_cam': get_cam_model}
| 0 | 0 | 0 |
fb30c68225ff4cb29b410c7a04cb4eb7cd493066 | 243 | py | Python | students/K33421/laboratory_works/Dzhapua_Esnat/laboratory_work_1/task_1/client.py | esnogram/ITMO_ICT_WebDevelopment_2020-2021 | 22a3d776463d50431a5745facaf7b4d55dd73b55 | [
"MIT"
] | null | null | null | students/K33421/laboratory_works/Dzhapua_Esnat/laboratory_work_1/task_1/client.py | esnogram/ITMO_ICT_WebDevelopment_2020-2021 | 22a3d776463d50431a5745facaf7b4d55dd73b55 | [
"MIT"
] | null | null | null | students/K33421/laboratory_works/Dzhapua_Esnat/laboratory_work_1/task_1/client.py | esnogram/ITMO_ICT_WebDevelopment_2020-2021 | 22a3d776463d50431a5745facaf7b4d55dd73b55 | [
"MIT"
] | null | null | null | import socket
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect (("127.0.0.1", 14900))
hels = "Hello, server"
conn.send(bytes(hels, "utf-8"))
data = conn.recv(16384)
udata = data.decode("utf-8")
print(udata)
conn.close() | 20.25 | 56 | 0.703704 | import socket
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect (("127.0.0.1", 14900))
hels = "Hello, server"
conn.send(bytes(hels, "utf-8"))
data = conn.recv(16384)
udata = data.decode("utf-8")
print(udata)
conn.close() | 0 | 0 | 0 |
262de512c12b00516269ac71506a70c2a6b03971 | 1,926 | py | Python | cpstats/view.py | gye-tgm/cpstats | ab1b3bbabc4fe4dbeaffee23c79333b10b6775ed | [
"MIT"
] | null | null | null | cpstats/view.py | gye-tgm/cpstats | ab1b3bbabc4fe4dbeaffee23c79333b10b6775ed | [
"MIT"
] | null | null | null | cpstats/view.py | gye-tgm/cpstats | ab1b3bbabc4fe4dbeaffee23c79333b10b6775ed | [
"MIT"
] | null | null | null | from cpstats.models.model import VERDICT_AC, Task
from flask import render_template
from cpstats.models import model
from cpstats import app
@app.route('/')
@app.route('/index')
SUBNAV = [
'general',
'submissions',
'tasks',
'achievements'
]
@app.route('/user/<string:username>')
@app.route('/user/<string:username>/general')
@app.route('/user/<string:username>/tasks') | 32.1 | 110 | 0.597092 | from cpstats.models.model import VERDICT_AC, Task
from flask import render_template
from cpstats.models import model
from cpstats import app
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', title='Home')
SUBNAV = [
'general',
'submissions',
'tasks',
'achievements'
]
def get_user_accounts(username):
accounts = []
session = model.Session()
user = session.query(model.User).filter_by(uname=username).one()
for acc in user.accounts:
accounts.append({'handle': acc.handle, 'url': acc.url, 'oj': acc.oj_id})
return accounts
def get_user_tasks(username):
session = model.Session()
user = session.query(model.User).filter_by(uname=username).one()
# solved, not solved
tasks = []
for acc in user.accounts:
submission_ac = session.query(model.Submission).filter_by(account_id=acc.id, verdict=VERDICT_AC).all()
tasks.append([])
for s in submission_ac:
t = session.query(model.Task).filter_by(id=s.task_id).one()
d = {'name': t.name, 'url': t.url}
tasks[0].append(d)
print(d)
return tasks
@app.route('/user/<string:username>')
@app.route('/user/<string:username>/general')
def user_general(username):
return render_template('user_general.html',
username=username,
accounts=get_user_accounts(username),
title='general',
subnav=SUBNAV)
@app.route('/user/<string:username>/tasks')
def user_tasks(username):
# todo: get the tasks man!
return render_template('user_tasks.html',
username=username,
accounts=get_user_accounts(username),
tasks=get_user_tasks(username),
title='tasks',
subnav=SUBNAV) | 1,424 | 0 | 112 |
f40417cc6ec6b8a597aa679e0a71fc8f641c968f | 15,651 | py | Python | pineboolib/fllegacy/flreportviewer.py | juanjosepablos/pineboo | f6ce515aec6e0139821bb9c1d62536d9fb50dae4 | [
"MIT"
] | null | null | null | pineboolib/fllegacy/flreportviewer.py | juanjosepablos/pineboo | f6ce515aec6e0139821bb9c1d62536d9fb50dae4 | [
"MIT"
] | 1 | 2017-10-30T22:00:48.000Z | 2017-11-11T19:34:32.000Z | pineboolib/fllegacy/flreportviewer.py | juanjosepablos/pineboo | f6ce515aec6e0139821bb9c1d62536d9fb50dae4 | [
"MIT"
] | 1 | 2017-10-30T20:16:38.000Z | 2017-10-30T20:16:38.000Z | """Flreportviewer module."""
from PyQt5 import QtWidgets, QtCore, QtXml
from pineboolib.core import decorators
from pineboolib.application.qsatypes.sysbasetype import SysBaseType
from pineboolib.fllegacy.flutil import FLUtil
# from pineboolib.fllegacy.flpicture import FLPicture
from .flsqlquery import FLSqlQuery
from .flsqlcursor import FLSqlCursor
from .flreportengine import FLReportEngine
from pineboolib import logging
from typing import Any, List, Mapping, Sized, Union, Dict, Optional, Callable
from PyQt5.QtGui import QColor
AQ_USRHOME = "." # FIXME
class internalReportViewer(QtWidgets.QWidget):
"""internalReportViewer class."""
rptEngine_: Optional[FLReportEngine]
dpi_: int
report_: List[Any]
num_copies: int
def __init__(self, parent: QtWidgets.QWidget) -> None:
"""Inicialize."""
super().__init__(parent)
self.rptEngine_ = None
self.dpi_ = 300
self.report_ = []
self.num_copies = 1
def setReportEngine(self, rptEngine: FLReportEngine) -> None:
"""Set report engine."""
self.rptEngine_ = rptEngine
def resolution(self) -> int:
"""Return resolution."""
return self.dpi_
def reportPages(self) -> List[Any]:
"""Return report pages."""
return self.report_
def renderReport(self, init_row: int, init_col: int, flags: List[int]) -> Any:
"""Render report."""
if self.rptEngine_ is None:
raise Exception("renderReport. self.rptEngine_ is empty!")
return self.rptEngine_.renderReport(init_row, init_col, flags)
def setNumCopies(self, num_copies: int) -> None:
"""Set number of copies."""
self.num_copies = num_copies
def __getattr__(self, name: str) -> Callable:
"""Return attributes from report engine."""
return getattr(self.rptEngine_, name, None)
class FLReportViewer(QtWidgets.QWidget):
"""FLReportViewer class."""
pdfFile: str
Append: int
Display: int
PageBreak: int
spnResolution_: int
report_: List[Any]
qry_: Any
xmlData_: Any
template_: Any
autoClose_: bool
styleName_: str
PrintGrayScale = 0
PrintColor = 1
def __init__(
self,
parent: Optional[QtWidgets.QWidget] = None,
name: Optional[str] = None,
embedInParent: bool = False,
rptEngine: Optional[FLReportEngine] = None,
) -> None:
"""Inicialize."""
super(FLReportViewer, self).__init__(parent)
self.logger = logging.getLogger("FLReportViewer")
self.loop_ = False
self.eventloop = QtCore.QEventLoop()
self.reportPrinted_ = False
self.rptEngine_: Optional[Any] = None
self.report_ = []
self.slotsPrintDisabled_ = False
self.slotsExportedDisabled_ = False
self.printing_ = False
self.embedInParent_ = True if parent and embedInParent else False
self.ui_: Dict[str, QtCore.QObject] = {}
self.Display = 1
self.Append = 1
self.PageBreak = 1
self.rptViewer_ = internalReportViewer(self)
self.setReportEngine(FLReportEngine(self) if rptEngine is None else rptEngine)
if self.rptViewer_ is None:
raise Exception("self.rptViewer_ is empty!")
self.report_ = self.rptViewer_.reportPages()
def rptViewer(self) -> internalReportViewer:
"""Return report viewer."""
return self.rptViewer_
def rptEngine(self) -> FLReportEngine:
"""Return report engine."""
if self.rptEngine_ is None:
raise Exception("rptEngine_ is not defined!")
return self.rptEngine_
def setReportEngine(self, r: Optional[FLReportEngine] = None) -> None:
"""Set report engine."""
if self.rptEngine_ == r:
return
sender = self.sender()
noSigDestroy = not (sender and sender == self.rptEngine_)
self.rptEngine_ = r
if self.rptEngine_ is not None:
self.template_ = self.rptEngine_.rptNameTemplate()
self.qry_ = self.rptEngine_.rptQueryData()
if noSigDestroy:
self.rptViewer_.setReportEngine(self.rptEngine_)
def exec_(self) -> None:
"""Show report."""
# if self.loop_:
# print("FLReportViewer::exec(): Se ha detectado una llamada recursiva")
# return
if self.rptViewer_.rptEngine_ and hasattr(self.rptViewer_.rptEngine_, "parser_"):
pdf_file = self.rptViewer_.rptEngine_.parser_.get_file_name()
SysBaseType.openUrl(pdf_file)
# self.eventloop.exec_()
# if self.embedInParent_:
# return
# self.loop_ = True
# self.clearWFlags(Qt.WShowModal) # FIXME
@decorators.BetaImplementation
def csvData(self) -> str:
"""Return csv data."""
return self.rptEngine_.csvData() if self.rptEngine_ else ""
def renderReport(
self,
init_row: int = 0,
init_col: int = 0,
append_or_flags: Union[bool, Sized, Mapping[int, Any]] = None,
display_report: bool = False,
) -> bool:
"""Render report."""
if not self.rptEngine_:
return False
flags = [self.Append, self.Display]
if isinstance(append_or_flags, bool):
flags[0] = append_or_flags
if display_report is not None:
flags[0] = display_report
elif isinstance(append_or_flags, list):
if len(append_or_flags) > 0:
flags[0] = append_or_flags[0] # display
if len(append_or_flags) > 1:
flags[1] = append_or_flags[1] # append
if len(append_or_flags) > 2:
flags.append(append_or_flags[2]) # page_break
ret = self.rptViewer_.renderReport(init_row, init_col, flags)
self.report_ = self.rptViewer_.reportPages()
return ret
def setReportData(self, d: Union[FLSqlCursor, FLSqlQuery, QtXml.QDomNode]) -> bool:
"""Set data to report."""
if isinstance(d, FLSqlQuery):
self.qry_ = d
if self.rptEngine_ and self.rptEngine_.setReportData(d):
self.xmlData_ = self.rptEngine_.rptXmlData()
return True
return False
elif isinstance(d, FLSqlCursor):
if not self.rptEngine_:
return False
return self.rptEngine_.setReportData(d)
elif isinstance(d, QtXml.QDomNode):
self.xmlData_ = d
self.qry_ = None
if not self.rptEngine_:
return False
return self.rptEngine_.setReportData(d)
return False
def setReportTemplate(self, t: Union[QtXml.QDomNode, str], style: Optional[str] = None) -> bool:
"""Set template to report."""
if isinstance(t, QtXml.QDomNode):
self.xmlTemplate_ = t
self.template_ = ""
if not self.rptEngine_:
return False
if style is not None:
self.setStyleName(style)
self.rptEngine_.setFLReportTemplate(t)
return True
else:
self.template_ = t
self.styleName_ = style
if self.rptEngine_ and self.rptEngine_.setFLReportTemplate(t):
# self.setStyleName(style)
self.xmlTemplate_ = self.rptEngine_.rptXmlTemplate()
return True
return False
@decorators.BetaImplementation
def sizeHint(self) -> QtCore.QSize:
"""Return sizeHint."""
return self.rptViewer_.sizeHint()
@decorators.BetaImplementation
def setNumCopies(self, numCopies: int) -> None:
"""Set number of copies."""
self.rptViewer_.setNumCopies(numCopies)
@decorators.BetaImplementation
def setPrinterName(self, pName: str) -> None:
"""Set printer name."""
self.rptViewer_.setPrinterName(pName)
@decorators.BetaImplementation
def reportPrinted(self) -> bool:
"""Return if report was printed."""
return self.reportPrinted_
@decorators.pyqtSlot(int)
@decorators.BetaImplementation
def setResolution(self, dpi: int) -> None:
"""Set resolution."""
util = FLUtil()
util.writeSettingEntry("rptViewer/dpi", str(dpi))
self.rptViewer_.setResolution(dpi)
@decorators.pyqtSlot(int)
@decorators.BetaImplementation
def setPixel(self, relDpi: int) -> None:
"""Set pixel size."""
util = FLUtil()
util.writeSettingEntry("rptViewer/pixel", str(float(relDpi / 10.0)))
if self.rptEngine_:
self.rptEngine_.setRelDpi(relDpi / 10.0)
@decorators.BetaImplementation
def setDefaults(self) -> None:
"""Set default values."""
import platform
self.spnResolution_ = 300
system = platform.system()
if system == "Linux":
self.spnPixel_ = 780
elif system == "Windows":
# FIXME
pass
elif system == "Darwin":
# FIXME
pass
@decorators.BetaImplementation
def updateReport(self) -> None:
"""Update report."""
self.requestUpdateReport.emit()
if self.qry_ or (self.xmlData_ and self.xmlData_ != ""):
if not self.rptEngine_:
self.setReportEngine(FLReportEngine(self))
self.setResolution(self.spnResolution_)
self.setPixel(self.spnPixel_)
if self.template_ and self.template_ != "":
self.setReportTemplate(self.template_, self.styleName_)
else:
self.setReportTemplate(self.xmlTemplate_, self.styleName_)
if self.qry_:
self.setReportData(self.qry_)
else:
self.setReportData(self.xmlData_)
self.renderReport(0, 0, False, False)
self.updateDisplay()
@decorators.BetaImplementation
def getCurrentPage(self) -> Any:
"""Return curent page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getCurrentPage(), self)
return 0
@decorators.BetaImplementation
def getFirstPage(self) -> Any:
"""Return first page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getFirstPage(), self)
return 0
@decorators.BetaImplementation
def getPreviousPage(self) -> Any:
"""Return previous page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getPreviousPage(), self)
return 0
@decorators.BetaImplementation
def getNextPage(self) -> Any:
"""Return next page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getNextPage(), self)
return 0
@decorators.BetaImplementation
def getLastPage(self) -> Any:
"""Return last page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getLastPage(), self)
return 0
@decorators.BetaImplementation
def getPageAt(self, i: int) -> Any:
"""Return actual page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getPageAt(i), self)
return 0
@decorators.BetaImplementation
def updateDisplay(self) -> None:
"""Update display."""
self.rptViewer_.slotUpdateDisplay()
@decorators.BetaImplementation
def clearPages(self) -> None:
"""Clear report pages."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.clear()
pass
@decorators.BetaImplementation
def appendPage(self) -> None:
"""Add a new page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.appendPage()
pass
@decorators.BetaImplementation
def getCurrentIndex(self) -> int:
"""Return current index position."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return self.report_.getCurrentIndex()
return -1
@decorators.BetaImplementation
def setCurrentPage(self, idx: int) -> None:
"""Set current page index."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.setCurrentPage(idx)
pass
@decorators.BetaImplementation
def setPageSize(self, w: Union[QtCore.QSize, int], h: Optional[int] = None) -> None:
"""Set page size."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.setPageSize(s)
pass
@decorators.BetaImplementation
def setPageOrientation(self, o: int) -> None:
"""Set page orientation."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.setPageOrientation(o)
pass
@decorators.BetaImplementation
def setPageDimensions(self, dim: QtCore.QSize) -> None:
"""Set page dimensions."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.setPageDimensions(dim)
pass
@decorators.BetaImplementation
def pageSize(self) -> QtCore.QSize:
"""Return page size."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return self.report_.pageSize()
return -1
@decorators.BetaImplementation
def pageOrientation(self) -> int:
"""Return page orientation."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return self.report_.pageOrientation()
return -1
def pageDimensions(self) -> QtCore.QSize:
"""Return page dimensions."""
if self.rptViewer_.rptEngine_ and hasattr(self.rptViewer_.rptEngine_, "parser_"):
return self.rptViewer_.rptEngine_.parser_._page_size
return -1
def pageCount(self) -> int:
"""Return number of pages."""
if self.rptViewer_.rptEngine_:
return self.rptViewer_.rptEngine_.number_pages()
return -1
@decorators.BetaImplementation
def setStyleName(self, style: str) -> None:
"""Set style name."""
self.styleName_ = style
@decorators.BetaImplementation
def setReportPages(self, pgs: Any) -> None:
"""Add pages to actual report."""
self.setReportEngine(None)
self.qry_ = None
self.xmlData_ = QtXml.QDomNode()
self.rptViewer_.setReportPages(pgs.pageCollection() if pgs else 0)
self.report_ = self.rptViewer_.reportPages()
@decorators.BetaImplementation
def setColorMode(self, c: QColor) -> None:
"""Set color mode."""
self.rptViewer_.setColorMode(c)
@decorators.BetaImplementation
def colorMode(self) -> QColor:
"""Return color mode."""
return self.rptViewer_.colorMode()
@decorators.BetaImplementation
def setName(self, n: str) -> None:
"""Set report name."""
self.name_ = n
@decorators.BetaImplementation
def name(self) -> str:
"""Return report name."""
return self.name_
def __getattr__(self, name: str) -> Any:
"""Return attribute from inernal object."""
return getattr(self.rptViewer_, name, None)
| 31.053571 | 100 | 0.605776 | """Flreportviewer module."""
from PyQt5 import QtWidgets, QtCore, QtXml
from pineboolib.core import decorators
from pineboolib.application.qsatypes.sysbasetype import SysBaseType
from pineboolib.fllegacy.flutil import FLUtil
# from pineboolib.fllegacy.flpicture import FLPicture
from .flsqlquery import FLSqlQuery
from .flsqlcursor import FLSqlCursor
from .flreportengine import FLReportEngine
from pineboolib import logging
from typing import Any, List, Mapping, Sized, Union, Dict, Optional, Callable
from PyQt5.QtGui import QColor
AQ_USRHOME = "." # FIXME
class internalReportViewer(QtWidgets.QWidget):
"""internalReportViewer class."""
rptEngine_: Optional[FLReportEngine]
dpi_: int
report_: List[Any]
num_copies: int
def __init__(self, parent: QtWidgets.QWidget) -> None:
"""Inicialize."""
super().__init__(parent)
self.rptEngine_ = None
self.dpi_ = 300
self.report_ = []
self.num_copies = 1
def setReportEngine(self, rptEngine: FLReportEngine) -> None:
"""Set report engine."""
self.rptEngine_ = rptEngine
def resolution(self) -> int:
"""Return resolution."""
return self.dpi_
def reportPages(self) -> List[Any]:
"""Return report pages."""
return self.report_
def renderReport(self, init_row: int, init_col: int, flags: List[int]) -> Any:
"""Render report."""
if self.rptEngine_ is None:
raise Exception("renderReport. self.rptEngine_ is empty!")
return self.rptEngine_.renderReport(init_row, init_col, flags)
def setNumCopies(self, num_copies: int) -> None:
"""Set number of copies."""
self.num_copies = num_copies
def __getattr__(self, name: str) -> Callable:
"""Return attributes from report engine."""
return getattr(self.rptEngine_, name, None)
class FLReportViewer(QtWidgets.QWidget):
"""FLReportViewer class."""
pdfFile: str
Append: int
Display: int
PageBreak: int
spnResolution_: int
report_: List[Any]
qry_: Any
xmlData_: Any
template_: Any
autoClose_: bool
styleName_: str
PrintGrayScale = 0
PrintColor = 1
def __init__(
self,
parent: Optional[QtWidgets.QWidget] = None,
name: Optional[str] = None,
embedInParent: bool = False,
rptEngine: Optional[FLReportEngine] = None,
) -> None:
"""Inicialize."""
super(FLReportViewer, self).__init__(parent)
self.logger = logging.getLogger("FLReportViewer")
self.loop_ = False
self.eventloop = QtCore.QEventLoop()
self.reportPrinted_ = False
self.rptEngine_: Optional[Any] = None
self.report_ = []
self.slotsPrintDisabled_ = False
self.slotsExportedDisabled_ = False
self.printing_ = False
self.embedInParent_ = True if parent and embedInParent else False
self.ui_: Dict[str, QtCore.QObject] = {}
self.Display = 1
self.Append = 1
self.PageBreak = 1
self.rptViewer_ = internalReportViewer(self)
self.setReportEngine(FLReportEngine(self) if rptEngine is None else rptEngine)
if self.rptViewer_ is None:
raise Exception("self.rptViewer_ is empty!")
self.report_ = self.rptViewer_.reportPages()
def rptViewer(self) -> internalReportViewer:
"""Return report viewer."""
return self.rptViewer_
def rptEngine(self) -> FLReportEngine:
"""Return report engine."""
if self.rptEngine_ is None:
raise Exception("rptEngine_ is not defined!")
return self.rptEngine_
def setReportEngine(self, r: Optional[FLReportEngine] = None) -> None:
"""Set report engine."""
if self.rptEngine_ == r:
return
sender = self.sender()
noSigDestroy = not (sender and sender == self.rptEngine_)
self.rptEngine_ = r
if self.rptEngine_ is not None:
self.template_ = self.rptEngine_.rptNameTemplate()
self.qry_ = self.rptEngine_.rptQueryData()
if noSigDestroy:
self.rptViewer_.setReportEngine(self.rptEngine_)
def exec_(self) -> None:
"""Show report."""
# if self.loop_:
# print("FLReportViewer::exec(): Se ha detectado una llamada recursiva")
# return
if self.rptViewer_.rptEngine_ and hasattr(self.rptViewer_.rptEngine_, "parser_"):
pdf_file = self.rptViewer_.rptEngine_.parser_.get_file_name()
SysBaseType.openUrl(pdf_file)
# self.eventloop.exec_()
# if self.embedInParent_:
# return
# self.loop_ = True
# self.clearWFlags(Qt.WShowModal) # FIXME
@decorators.BetaImplementation
def csvData(self) -> str:
"""Return csv data."""
return self.rptEngine_.csvData() if self.rptEngine_ else ""
def renderReport(
self,
init_row: int = 0,
init_col: int = 0,
append_or_flags: Union[bool, Sized, Mapping[int, Any]] = None,
display_report: bool = False,
) -> bool:
"""Render report."""
if not self.rptEngine_:
return False
flags = [self.Append, self.Display]
if isinstance(append_or_flags, bool):
flags[0] = append_or_flags
if display_report is not None:
flags[0] = display_report
elif isinstance(append_or_flags, list):
if len(append_or_flags) > 0:
flags[0] = append_or_flags[0] # display
if len(append_or_flags) > 1:
flags[1] = append_or_flags[1] # append
if len(append_or_flags) > 2:
flags.append(append_or_flags[2]) # page_break
ret = self.rptViewer_.renderReport(init_row, init_col, flags)
self.report_ = self.rptViewer_.reportPages()
return ret
def setReportData(self, d: Union[FLSqlCursor, FLSqlQuery, QtXml.QDomNode]) -> bool:
"""Set data to report."""
if isinstance(d, FLSqlQuery):
self.qry_ = d
if self.rptEngine_ and self.rptEngine_.setReportData(d):
self.xmlData_ = self.rptEngine_.rptXmlData()
return True
return False
elif isinstance(d, FLSqlCursor):
if not self.rptEngine_:
return False
return self.rptEngine_.setReportData(d)
elif isinstance(d, QtXml.QDomNode):
self.xmlData_ = d
self.qry_ = None
if not self.rptEngine_:
return False
return self.rptEngine_.setReportData(d)
return False
def setReportTemplate(self, t: Union[QtXml.QDomNode, str], style: Optional[str] = None) -> bool:
"""Set template to report."""
if isinstance(t, QtXml.QDomNode):
self.xmlTemplate_ = t
self.template_ = ""
if not self.rptEngine_:
return False
if style is not None:
self.setStyleName(style)
self.rptEngine_.setFLReportTemplate(t)
return True
else:
self.template_ = t
self.styleName_ = style
if self.rptEngine_ and self.rptEngine_.setFLReportTemplate(t):
# self.setStyleName(style)
self.xmlTemplate_ = self.rptEngine_.rptXmlTemplate()
return True
return False
@decorators.BetaImplementation
def sizeHint(self) -> QtCore.QSize:
"""Return sizeHint."""
return self.rptViewer_.sizeHint()
@decorators.BetaImplementation
def setNumCopies(self, numCopies: int) -> None:
"""Set number of copies."""
self.rptViewer_.setNumCopies(numCopies)
@decorators.BetaImplementation
def setPrinterName(self, pName: str) -> None:
"""Set printer name."""
self.rptViewer_.setPrinterName(pName)
@decorators.BetaImplementation
def reportPrinted(self) -> bool:
"""Return if report was printed."""
return self.reportPrinted_
@decorators.pyqtSlot(int)
@decorators.BetaImplementation
def setResolution(self, dpi: int) -> None:
"""Set resolution."""
util = FLUtil()
util.writeSettingEntry("rptViewer/dpi", str(dpi))
self.rptViewer_.setResolution(dpi)
@decorators.pyqtSlot(int)
@decorators.BetaImplementation
def setPixel(self, relDpi: int) -> None:
"""Set pixel size."""
util = FLUtil()
util.writeSettingEntry("rptViewer/pixel", str(float(relDpi / 10.0)))
if self.rptEngine_:
self.rptEngine_.setRelDpi(relDpi / 10.0)
@decorators.BetaImplementation
def setDefaults(self) -> None:
"""Set default values."""
import platform
self.spnResolution_ = 300
system = platform.system()
if system == "Linux":
self.spnPixel_ = 780
elif system == "Windows":
# FIXME
pass
elif system == "Darwin":
# FIXME
pass
@decorators.BetaImplementation
def updateReport(self) -> None:
"""Update report."""
self.requestUpdateReport.emit()
if self.qry_ or (self.xmlData_ and self.xmlData_ != ""):
if not self.rptEngine_:
self.setReportEngine(FLReportEngine(self))
self.setResolution(self.spnResolution_)
self.setPixel(self.spnPixel_)
if self.template_ and self.template_ != "":
self.setReportTemplate(self.template_, self.styleName_)
else:
self.setReportTemplate(self.xmlTemplate_, self.styleName_)
if self.qry_:
self.setReportData(self.qry_)
else:
self.setReportData(self.xmlData_)
self.renderReport(0, 0, False, False)
self.updateDisplay()
@decorators.BetaImplementation
def getCurrentPage(self) -> Any:
"""Return curent page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getCurrentPage(), self)
return 0
@decorators.BetaImplementation
def getFirstPage(self) -> Any:
"""Return first page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getFirstPage(), self)
return 0
@decorators.BetaImplementation
def getPreviousPage(self) -> Any:
"""Return previous page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getPreviousPage(), self)
return 0
@decorators.BetaImplementation
def getNextPage(self) -> Any:
"""Return next page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getNextPage(), self)
return 0
@decorators.BetaImplementation
def getLastPage(self) -> Any:
"""Return last page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getLastPage(), self)
return 0
@decorators.BetaImplementation
def getPageAt(self, i: int) -> Any:
"""Return actual page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return FLPicture(self.report_.getPageAt(i), self)
return 0
@decorators.BetaImplementation
def updateDisplay(self) -> None:
"""Update display."""
self.rptViewer_.slotUpdateDisplay()
@decorators.BetaImplementation
def clearPages(self) -> None:
"""Clear report pages."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.clear()
pass
@decorators.BetaImplementation
def appendPage(self) -> None:
"""Add a new page."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.appendPage()
pass
@decorators.BetaImplementation
def getCurrentIndex(self) -> int:
"""Return current index position."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return self.report_.getCurrentIndex()
return -1
@decorators.BetaImplementation
def setCurrentPage(self, idx: int) -> None:
"""Set current page index."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.setCurrentPage(idx)
pass
@decorators.BetaImplementation
def setPageSize(self, w: Union[QtCore.QSize, int], h: Optional[int] = None) -> None:
"""Set page size."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.setPageSize(s)
pass
@decorators.BetaImplementation
def setPageOrientation(self, o: int) -> None:
"""Set page orientation."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.setPageOrientation(o)
pass
@decorators.BetaImplementation
def setPageDimensions(self, dim: QtCore.QSize) -> None:
"""Set page dimensions."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# self.report_.setPageDimensions(dim)
pass
@decorators.BetaImplementation
def pageSize(self) -> QtCore.QSize:
"""Return page size."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return self.report_.pageSize()
return -1
@decorators.BetaImplementation
def pageOrientation(self) -> int:
"""Return page orientation."""
# FIXME: self.report_ is just a List[]
# if self.report_:
# return self.report_.pageOrientation()
return -1
def pageDimensions(self) -> QtCore.QSize:
"""Return page dimensions."""
if self.rptViewer_.rptEngine_ and hasattr(self.rptViewer_.rptEngine_, "parser_"):
return self.rptViewer_.rptEngine_.parser_._page_size
return -1
def pageCount(self) -> int:
"""Return number of pages."""
if self.rptViewer_.rptEngine_:
return self.rptViewer_.rptEngine_.number_pages()
return -1
@decorators.BetaImplementation
def setStyleName(self, style: str) -> None:
"""Set style name."""
self.styleName_ = style
@decorators.BetaImplementation
def setReportPages(self, pgs: Any) -> None:
"""Add pages to actual report."""
self.setReportEngine(None)
self.qry_ = None
self.xmlData_ = QtXml.QDomNode()
self.rptViewer_.setReportPages(pgs.pageCollection() if pgs else 0)
self.report_ = self.rptViewer_.reportPages()
@decorators.BetaImplementation
def setColorMode(self, c: QColor) -> None:
"""Set color mode."""
self.rptViewer_.setColorMode(c)
@decorators.BetaImplementation
def colorMode(self) -> QColor:
"""Return color mode."""
return self.rptViewer_.colorMode()
@decorators.BetaImplementation
def setName(self, n: str) -> None:
"""Set report name."""
self.name_ = n
@decorators.BetaImplementation
def name(self) -> str:
"""Return report name."""
return self.name_
def __getattr__(self, name: str) -> Any:
"""Return attribute from inernal object."""
return getattr(self.rptViewer_, name, None)
| 0 | 0 | 0 |
e155a2ee3108e7539d619edb6c746911caaffb13 | 4,995 | py | Python | text_to_num/lang/english.py | fquirin/text2num | 539c8bb70ecb3991e27220e93e0dcc2005cbe9ea | [
"MIT"
] | 55 | 2018-10-10T14:49:16.000Z | 2022-03-31T19:45:26.000Z | text_to_num/lang/english.py | fquirin/text2num | 539c8bb70ecb3991e27220e93e0dcc2005cbe9ea | [
"MIT"
] | 37 | 2018-10-12T01:04:16.000Z | 2022-03-17T09:17:22.000Z | text_to_num/lang/english.py | fquirin/text2num | 539c8bb70ecb3991e27220e93e0dcc2005cbe9ea | [
"MIT"
] | 27 | 2018-11-12T17:33:44.000Z | 2022-03-15T09:16:48.000Z | # MIT License
# Copyright (c) 2018-2019 Groupe Allo-Media
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Dict, Optional, Set, Tuple
from .base import Language
#
# CONSTANTS
# Built once on import.
#
# Those words multiplies lesser numbers (see Rules)
# Special case: "hundred" is processed apart.
MULTIPLIERS = {
"thousand": 1_000,
"thousands": 1_000,
"million": 1_000_000,
"millions": 1_000_000,
"billion": 1_000_000_000,
"billions": 1_000_000_000,
"trillion": 1_000_000_000_000,
"trillions": 1_000_000_000_000,
}
# Units are terminals (see Rules)
# Special case: "zero/O" is processed apart.
UNITS: Dict[str, int] = {
word: value
for value, word in enumerate(
"one two three four five six seven eight nine".split(), 1
)
}
# Single tens are terminals (see Rules)
STENS: Dict[str, int] = {
word: value
for value, word in enumerate(
"ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen".split(),
10,
)
}
# Ten multiples
# Ten multiples may be followed by a unit only;
MTENS: Dict[str, int] = {
word: value * 10
for value, word in enumerate(
"twenty thirty forty fifty sixty seventy eighty ninety".split(), 2
)
}
# Ten multiples that can be combined with STENS
MTENS_WSTENS: Set[str] = set()
# "hundred" has a special status (see Rules)
HUNDRED = {"hundred": 100, "hundreds": 100}
# Composites are tens already composed with terminals in one word.
# Composites are terminals.
COMPOSITES: Dict[str, int] = {
"-".join((ten_word, unit_word)): ten_val + unit_val
for ten_word, ten_val in MTENS.items()
for unit_word, unit_val in UNITS.items()
}
# All number words
NUMBERS = MULTIPLIERS.copy()
NUMBERS.update(UNITS)
NUMBERS.update(STENS)
NUMBERS.update(MTENS)
NUMBERS.update(HUNDRED)
NUMBERS.update(COMPOSITES)
RAD_MAP = {"fif": "five", "eigh": "eight", "nin": "nine", "twelf": "twelve"}
| 30.272727 | 98 | 0.647047 | # MIT License
# Copyright (c) 2018-2019 Groupe Allo-Media
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Dict, Optional, Set, Tuple
from .base import Language
#
# CONSTANTS
# Built once on import.
#
# Those words multiplies lesser numbers (see Rules)
# Special case: "hundred" is processed apart.
MULTIPLIERS = {
"thousand": 1_000,
"thousands": 1_000,
"million": 1_000_000,
"millions": 1_000_000,
"billion": 1_000_000_000,
"billions": 1_000_000_000,
"trillion": 1_000_000_000_000,
"trillions": 1_000_000_000_000,
}
# Units are terminals (see Rules)
# Special case: "zero/O" is processed apart.
UNITS: Dict[str, int] = {
word: value
for value, word in enumerate(
"one two three four five six seven eight nine".split(), 1
)
}
# Single tens are terminals (see Rules)
STENS: Dict[str, int] = {
word: value
for value, word in enumerate(
"ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen".split(),
10,
)
}
# Ten multiples
# Ten multiples may be followed by a unit only;
MTENS: Dict[str, int] = {
word: value * 10
for value, word in enumerate(
"twenty thirty forty fifty sixty seventy eighty ninety".split(), 2
)
}
# Ten multiples that can be combined with STENS
MTENS_WSTENS: Set[str] = set()
# "hundred" has a special status (see Rules)
HUNDRED = {"hundred": 100, "hundreds": 100}
# Composites are tens already composed with terminals in one word.
# Composites are terminals.
COMPOSITES: Dict[str, int] = {
"-".join((ten_word, unit_word)): ten_val + unit_val
for ten_word, ten_val in MTENS.items()
for unit_word, unit_val in UNITS.items()
}
# All number words
NUMBERS = MULTIPLIERS.copy()
NUMBERS.update(UNITS)
NUMBERS.update(STENS)
NUMBERS.update(MTENS)
NUMBERS.update(HUNDRED)
NUMBERS.update(COMPOSITES)
RAD_MAP = {"fif": "five", "eigh": "eight", "nin": "nine", "twelf": "twelve"}
class English(Language):
MULTIPLIERS = MULTIPLIERS
UNITS = UNITS
STENS = STENS
MTENS = MTENS
MTENS_WSTENS = MTENS_WSTENS
HUNDRED = HUNDRED
NUMBERS = NUMBERS
SIGN = {"plus": "+", "minus": "-"}
ZERO = {"zero", "o"}
DECIMAL_SEP = "point"
DECIMAL_SYM = "."
AND_NUMS: Set[str] = set()
AND = "and"
NEVER_IF_ALONE = {"one"}
# Relaxed composed numbers (two-words only)
# start => (next, target)
RELAXED: Dict[str, Tuple[str, str]] = {}
def ord2card(self, word: str) -> Optional[str]:
"""Convert ordinal number to cardinal.
Return None if word is not an ordinal or is better left in letters.
"""
plur_suff = word.endswith("ths")
sing_suff = word.endswith("th")
if not (plur_suff or sing_suff):
if word.endswith("first"):
source = word.replace("first", "one")
elif word.endswith("second"):
source = word.replace("second", "two")
elif word.endswith("third"):
source = word.replace("third", "three")
else:
return None
else:
source = word[:-3] if plur_suff else word[:-2]
if source in RAD_MAP:
source = RAD_MAP[source]
elif source.endswith("ie"):
source = source[:-2] + "y"
elif source.endswith("fif"): # fifth -> five
source = source[:-1] + "ve"
elif source.endswith("eigh"): # eighth -> eight
source = source + "t"
elif source.endswith("nin"): # ninth -> nine
source = source + "e"
if source not in self.NUMBERS:
return None
return source
def num_ord(self, digits: str, original_word: str) -> str:
"""Add suffix to number in digits to make an ordinal"""
sf = original_word[-3:] if original_word.endswith("s") else original_word[-2:]
return f"{digits}{sf}"
def normalize(self, word: str) -> str:
return word
| 37 | 1,968 | 23 |
275e86f75a011a705356e9c7db8c223789707e62 | 1,464 | py | Python | utils/generators/matrix_multiply.py | sgpthomas/diospyros | 27d4e5e5d4e56a6dc5860d7c7d5eefb27de24a5d | [
"MIT"
] | 27 | 2020-02-16T22:26:34.000Z | 2022-02-17T04:17:19.000Z | utils/generators/matrix_multiply.py | sgpthomas/diospyros | 27d4e5e5d4e56a6dc5860d7c7d5eefb27de24a5d | [
"MIT"
] | 77 | 2020-01-21T15:37:35.000Z | 2022-03-11T19:48:43.000Z | utils/generators/matrix_multiply.py | sgpthomas/diospyros | 27d4e5e5d4e56a6dc5860d7c7d5eefb27de24a5d | [
"MIT"
] | 1 | 2021-09-27T20:35:15.000Z | 2021-09-27T20:35:15.000Z | """
Matrix multiply generator. Generates an Eigen matrix multiply override kernel.
"""
| 26.142857 | 80 | 0.583333 | """
Matrix multiply generator. Generates an Eigen matrix multiply override kernel.
"""
def generator(kernel_name, params, spec_file):
input_rows = params["input_rows"]
input_cols = params["input_cols"]
output_cols = params["output_cols"]
# inject specification template with arguments
spec = """/*!
Specification file of the target kernel to be consumed by the Diosypros tool
*/
#define A_ROWS {}
#define A_COLS {}
#define B_COLS {}
void {}(
float a_in[A_ROWS * A_COLS],
float b_in[A_COLS * B_COLS],
float c_out[A_ROWS * B_COLS]) {{
for (int i = 0; i < A_ROWS; i++) {{
for (int j = 0; j < B_COLS; j++) {{
c_out[j * A_ROWS + i] = 0;
for (int k = 0; k < A_COLS; k++) {{
c_out[j * A_ROWS + i] += a_in[k * A_ROWS + i] * b_in[j * A_COLS + k];
}}
}}
}}
}}""".format(input_rows, input_cols, output_cols, kernel_name)
handle = open(spec_file, "w")
handle.write(spec)
handle.close()
# return a dictionary of the inputs and outputs of this function definition
# and type signatures
manifest_shard = {
"inputs": {
"a": "Eigen::Matrix<float, {}, {}>".format(input_rows, input_cols),
"b": "Eigen::Matrix<float, {}, {}>".format(input_cols, output_cols)
},
"outputs": {
"c": "Eigen::Matrix<float, {}, {}>".format(input_rows, output_cols)
},
"test": "c = a * b"
}
return manifest_shard
| 1,349 | 0 | 23 |
8a516965d3efb4dbcb256edc310602cc80459db0 | 9,646 | py | Python | redash/query_runner/python.py | techscience9/redash | 32669b148ccba47b118c8d390031903a18b0253d | [
"BSD-2-Clause"
] | 3 | 2018-06-01T00:05:18.000Z | 2019-03-07T14:03:10.000Z | redash/query_runner/python.py | techscience9/redash | 32669b148ccba47b118c8d390031903a18b0253d | [
"BSD-2-Clause"
] | 6 | 2021-01-21T16:43:27.000Z | 2022-02-27T09:18:00.000Z | redash/query_runner/python.py | tradingfoe/redash-clone | 94065b8dce0e27f6f40a7adc2b99e078b03115b3 | [
"BSD-2-Clause"
] | 5 | 2018-06-02T11:12:44.000Z | 2020-05-13T18:34:33.000Z | import datetime
import importlib
import logging
import sys
from redash.query_runner import *
from redash.utils import json_dumps, json_loads
from redash import models
from RestrictedPython import compile_restricted
from RestrictedPython.Guards import safe_builtins
logger = logging.getLogger(__name__)
class CustomPrint(object):
"""CustomPrint redirect "print" calls to be sent as "log" on the result object."""
register(Python)
| 34.45 | 141 | 0.611445 | import datetime
import importlib
import logging
import sys
from redash.query_runner import *
from redash.utils import json_dumps, json_loads
from redash import models
from RestrictedPython import compile_restricted
from RestrictedPython.Guards import safe_builtins
logger = logging.getLogger(__name__)
class CustomPrint(object):
"""CustomPrint redirect "print" calls to be sent as "log" on the result object."""
def __init__(self):
self.enabled = True
self.lines = []
def write(self, text):
if self.enabled:
if text and text.strip():
log_line = "[{0}] {1}".format(datetime.datetime.utcnow().isoformat(), text)
self.lines.append(log_line)
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def __call__(self):
return self
class Python(BaseQueryRunner):
should_annotate_query = False
safe_builtins = (
'sorted', 'reversed', 'map', 'reduce', 'any', 'all',
'slice', 'filter', 'len', 'next', 'enumerate',
'sum', 'abs', 'min', 'max', 'round', 'cmp', 'divmod',
'str', 'unicode', 'int', 'float', 'complex',
'tuple', 'set', 'list', 'dict', 'bool',
)
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'allowedImportModules': {
'type': 'string',
'title': 'Modules to import prior to running the script'
},
'additionalModulesPaths': {
'type': 'string'
}
},
}
@classmethod
def enabled(cls):
return True
def __init__(self, configuration):
super(Python, self).__init__(configuration)
self.syntax = "python"
self._allowed_modules = {}
self._script_locals = {"result": {"rows": [], "columns": [], "log": []}}
self._enable_print_log = True
self._custom_print = CustomPrint()
if self.configuration.get("allowedImportModules", None):
for item in self.configuration["allowedImportModules"].split(","):
self._allowed_modules[item] = None
if self.configuration.get("additionalModulesPaths", None):
for p in self.configuration["additionalModulesPaths"].split(","):
if p not in sys.path:
sys.path.append(p)
def custom_import(self, name, globals=None, locals=None, fromlist=(), level=0):
if name in self._allowed_modules:
m = None
if self._allowed_modules[name] is None:
m = importlib.import_module(name)
self._allowed_modules[name] = m
else:
m = self._allowed_modules[name]
return m
raise Exception("'{0}' is not configured as a supported import module".format(name))
@staticmethod
def custom_write(obj):
"""
Custom hooks which controls the way objects/lists/tuples/dicts behave in
RestrictedPython
"""
return obj
@staticmethod
def custom_get_item(obj, key):
return obj[key]
@staticmethod
def custom_get_iter(obj):
return iter(obj)
@staticmethod
def add_result_column(result, column_name, friendly_name, column_type):
"""Helper function to add columns inside a Python script running in Redash in an easier way
Parameters:
:result dict: The result dict
:column_name string: Name of the column, which should be consisted of lowercase latin letters or underscore.
:friendly_name string: Name of the column for display
:column_type string: Type of the column. Check supported data types for details.
"""
if column_type not in SUPPORTED_COLUMN_TYPES:
raise Exception("'{0}' is not a supported column type".format(column_type))
if "columns" not in result:
result["columns"] = []
result["columns"].append({
"name": column_name,
"friendly_name": friendly_name,
"type": column_type
})
@staticmethod
def add_result_row(result, values):
"""Helper function to add one row to results set.
Parameters:
:result dict: The result dict
:values dict: One row of result in dict. The key should be one of the column names. The value is the value of the column in this row.
"""
if "rows" not in result:
result["rows"] = []
result["rows"].append(values)
@staticmethod
def execute_query(data_source_name_or_id, query):
"""Run query from specific data source.
Parameters:
:data_source_name_or_id string|integer: Name or ID of the data source
:query string: Query to run
"""
try:
if type(data_source_name_or_id) == int:
data_source = models.DataSource.get_by_id(data_source_name_or_id)
else:
data_source = models.DataSource.get_by_name(data_source_name_or_id)
except models.NoResultFound:
raise Exception("Wrong data source name/id: %s." % data_source_name_or_id)
# TODO: pass the user here...
data, error = data_source.query_runner.run_query(query, None)
if error is not None:
raise Exception(error)
# TODO: allow avoiding the JSON dumps/loads in same process
return json_loads(data)
@staticmethod
def get_source_schema(data_source_name_or_id):
"""Get schema from specific data source.
:param data_source_name_or_id: string|integer: Name or ID of the data source
:return:
"""
try:
if type(data_source_name_or_id) == int:
data_source = models.DataSource.get_by_id(data_source_name_or_id)
else:
data_source = models.DataSource.get_by_name(data_source_name_or_id)
except models.NoResultFound:
raise Exception("Wrong data source name/id: %s." % data_source_name_or_id)
schema = data_source.query_runner.get_schema()
return schema
@staticmethod
def get_query_result(query_id):
"""Get result of an existing query.
Parameters:
:query_id integer: ID of existing query
"""
try:
query = models.Query.get_by_id(query_id)
except models.NoResultFound:
raise Exception("Query id %s does not exist." % query_id)
if query.latest_query_data is None:
raise Exception("Query does not have results yet.")
if query.latest_query_data.data is None:
raise Exception("Query does not have results yet.")
return json_loads(query.latest_query_data.data)
def get_current_user(self):
return self._current_user.to_dict()
def test_connection(self):
pass
def run_query(self, query, user):
self._current_user = user
try:
error = None
code = compile_restricted(query, '<string>', 'exec')
builtins = safe_builtins.copy()
builtins["_write_"] = self.custom_write
builtins["__import__"] = self.custom_import
builtins["_getattr_"] = getattr
builtins["getattr"] = getattr
builtins["_setattr_"] = setattr
builtins["setattr"] = setattr
builtins["_getitem_"] = self.custom_get_item
builtins["_getiter_"] = self.custom_get_iter
builtins["_print_"] = self._custom_print
# Layer in our own additional set of builtins that we have
# considered safe.
for key in self.safe_builtins:
builtins[key] = __builtins__[key]
restricted_globals = dict(__builtins__=builtins)
restricted_globals["get_query_result"] = self.get_query_result
restricted_globals["get_source_schema"] = self.get_source_schema
restricted_globals["get_current_user"] = self.get_current_user
restricted_globals["execute_query"] = self.execute_query
restricted_globals["add_result_column"] = self.add_result_column
restricted_globals["add_result_row"] = self.add_result_row
restricted_globals["disable_print_log"] = self._custom_print.disable
restricted_globals["enable_print_log"] = self._custom_print.enable
# Supported data types
restricted_globals["TYPE_DATETIME"] = TYPE_DATETIME
restricted_globals["TYPE_BOOLEAN"] = TYPE_BOOLEAN
restricted_globals["TYPE_INTEGER"] = TYPE_INTEGER
restricted_globals["TYPE_STRING"] = TYPE_STRING
restricted_globals["TYPE_DATE"] = TYPE_DATE
restricted_globals["TYPE_FLOAT"] = TYPE_FLOAT
# TODO: Figure out the best way to have a timeout on a script
# One option is to use ETA with Celery + timeouts on workers
# And replacement of worker process every X requests handled.
exec((code), restricted_globals, self._script_locals)
result = self._script_locals['result']
result['log'] = self._custom_print.lines
json_data = json_dumps(result)
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except Exception as e:
error = str(type(e)) + " " + str(e)
json_data = None
return json_data, error
register(Python)
| 4,618 | 4,430 | 157 |
71735f8fa3170ee4f1070bc1e7bbea8132fee378 | 10,138 | py | Python | InvalidPassports/InvalidPassports.py | savvakrasava/odi_tools | 672cd5fd2bd2c48b7be5d945747edbaff0d1247c | [
"MIT"
] | null | null | null | InvalidPassports/InvalidPassports.py | savvakrasava/odi_tools | 672cd5fd2bd2c48b7be5d945747edbaff0d1247c | [
"MIT"
] | null | null | null | InvalidPassports/InvalidPassports.py | savvakrasava/odi_tools | 672cd5fd2bd2c48b7be5d945747edbaff0d1247c | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
'''
Created on 21 06 2019
@author: lukhnevsn
'''
#import packages
##############################################
from java.io import BufferedReader ,FileReader, IOException;
from java.sql import PreparedStatement,Connection,DriverManager,SQLException,Statement,Types, ResultSet;
from java.net import Proxy, InetSocketAddress;
from okhttp3 import Call, OkHttpClient, Request, Response;
from okhttp3.OkHttpClient import newBuilder;
from okhttp3.Request import newBuilder as Builder;
from org.apache.http.impl.client import BasicCredentialsProvider, CloseableHttpClient, HttpClients;
from org.apache.http import HttpEntity,HttpHost;
from org.apache.http.auth import AuthScope, NTCredentials;
from jarray import zeros;
from au.com.bytecode.opencsv import CSVReader;
from java.lang.String import length, replace;
#import org.apache.commons.vfs2.util.Cryptor as Cryptor;
#import org.apache.commons.vfs2.util.CryptorFactory as CryptorFactory;
import okhttp3.Credentials as Credentials;
import java.io.File as File;
import java.io.FileOutputStream as FileOutputStream;
import java.io.InputStreamReader as InputStreamReader;
import java.io.FileInputStream as FileInputStream;
import java.sql.ResultSet;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream as BZip2CompressorInputStream;
import java.nio.file.Files as Files;
import java.nio.file.Paths as Paths;
import java.nio.file.StandardCopyOption as StandardCopyOption;
import java.util.concurrent.TimeUnit as TimeUnit;
import java.lang;
import org.apache.http.client.config.RequestConfig as RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse as CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet as HttpGet;
import java.sql.Timestamp as Timestamp;
import java.lang.System as System;
import java.sql.Timestamp as Timestamp;
#constants
##############################################
archived_file = 'list_of_expired_passports.csv.bz2'; #filname for downloading file
unarchived_file = 'list_of_expired_passports.csv'; #filname for uncompressed file
url = "http://guvm.mvd.ru/upload/expired-passports/list_of_expired_passports.csv.bz2" ; #url = 'https://xn--b1ab2a0a.xn--b1aew.xn--p1ai/upload/expired-passports/list_of_expired_passports.csv.bz2'; #url = 'http://localhost/pass/list_of_expired_passports.csv.bin';
ODIAgent = '<%=odiRef.getSession( "AGENT_NAME" )%>';
ora_odi_pass_e = "BD15AFA077FA694DB138CD75D5BD19BA";
ora_odi_pass = CryptorFactory.getCryptor().decrypt(ora_odi_pass_e);
conn = odiRef.getJDBCConnection( "DEST" );
s_schema = '<%=odiRef.getInfo("DEST_SCHEMA")%>'
table_name = 'DIM_INVALID_PASSPORTS';
batch_size = 500000; #batch for inserting
buffer_size = 1024*8; #buffer for reading from web
downloaded = 0; #help const
lnNum =0; #help const
lastPercent = -1; #help const
totalRecords= 0; #help const
#functions
##############################################
def ins_log(state, err_text) :
'''
ANALSYS PROCCESSES LOGGING FUNC
'''
global conn;
conn.setSchema(s_schema);
cLog = conn.prepareCall("{call P_INS_LOG(DIM_INVALID_PASSPORTS,?,?)}");
cLog.setString("sSTATE",state);
if (err_text != None) :
cLog.setString("sERRTEXT", err_text);
else :
cLog.setNull("sERRTEXT",Types.VARCHAR);
cLog.execute();
cLog.close();
def print_download_process(l1, l2):
'''
FUNC FOR PRINT DOWNLOAD PERCENTS
'''
global lastPercent;
currentPercent = (l1*100)/l2;
if(lastPercent < currentPercent):
print('='*currentPercent);
print('\n current = '+str(l1))
print('\n target = ' +str(l2))
lastPercent = currentPercent;
def decompress_file(archived_file, unarchived_file):
'''
FUNC TO DECOMPRESS BZIP2 ARCHIVE
'''
ins_log("Decompressing...", str(archived_file));
try:
gzis = BZip2CompressorInputStream(FileInputStream(archived_file));
Files.copy(gzis, Paths.get(unarchived_file), StandardCopyOption.REPLACE_EXISTING);
except Exception:
ins_log('Decompressing error', str(Exception));
ins_log("File Decompressed!", str(unarchived_file));
def proxy_authenticator():
'''
FUNC FOR PROXY THIS AUTH.
'''
global proxy_user, proxy_pass;
credential = Credentials.basic(proxy_user, proxy_pass);
return Response.request().newBuilder().header("Proxy-Authorization", credential).build();
def download_file():
'''
FUNC FOR DOWNLOADING DATA FROM MVD.RF WEBSITE
'''
global buffer_size, downloaded;
ins_log('Create proxy settings', 'using '+str(ODIAgent));
if ODIAgent == 'Internal':
proxy_address = TEST_FILE_DOWNLOADS.ProdProxyHost;
proxy_port = TEST_FILE_DOWNLOADS.ProdProxyPort;
proxy_user = 'lukhnevsn';
proxy_passe = "70320DB646F3C6740262E9224E8A88C7";
proxy_domain = "BANKEXP";
proxy_pass = cryptor.decrypt(proxy_passe);
else:
proxy_address = FILE_DOWNLOADS.ProdProxyHost;
proxy_port = FILE_DOWNLOADS.ProdProxyPort;
proxy_user = "ODI_USER";
proxy_passe = "32A47DEE17B2F967BA6094BB609ABF8E";
proxy_domain = "BANKEXP";
proxy_pass = cryptor.decrypt(PROXY_PASSE);
ins_log("Downloading...", url);
builder = OkHttpClient.Builder(); #builder.followRedirects(False).followSslRedirects(False);
builder.connectTimeout(5, TimeUnit.MINUTES).writeTimeout(5, TimeUnit.MINUTES).readTimeout(5, TimeUnit.MINUTES);
httpClient = builder.proxy(Proxy( Proxy.Type.HTTP, InetSocketAddress(proxy_address, proxy_port))).proxyAuthenticator(proxy_authenticator).build();
call = httpClient.newCall(Request.Builder().url(url).get().build()); #//Call to server
response = call.execute(); #//
ins_log('Call to web server', str(response));
#print(response.code())
if (response.code() == 200): #//Check Response code
inputStream = None;
outputStream = None;
target = response.body().contentLength();
try:
inputStream = response.body().byteStream(); #//Get stream of bytes
buffer = zeros(buffer_size, 'b'); #//Creating buffer bytes(1024*4) #bytearray(4096)
outputStream = FileOutputStream(File(archived_file));
print_download_process(0, target);
while (downloaded < target) :
readed = inputStream.read(buffer);
if (readed == -1):
break;
else:
outputStream.write(buffer, 0, readed); #//write buff
downloaded += readed;
print_download_process(downloaded, target);
except Exception:
ins_log("Downloading Error", str(Exception));
finally:
if (inputStream != None):
inputStream.close();
elif(outputStream != None):
outputStream.close();
ins_log("File downloaded!", str(url) + ' filename:'+str(archived_file));
#print("File downloaded! "+ str( url));
def insert_file_data_to_db_batch():
'''
FUNC FOR DATA INSERTING INTO DATABASE
'''
ins_log("Adding data to DB...", 'Using JDBC from File');
global conn, unarchived_file, lnNum, totalRecords, batch_size;
conn.setSchema(s_schema);
truncate_table();
conn.setAutoCommit(False);
#jdbc_insert_sql = 'insert into dim_invalid_passports (pass_serial, pass_no, valid_to_dttm) values (?,?,?)';
jdbc_insert_sql = 'insert into dim_invalid_passports (pass_serial, pass_no, sys_actual_flg, sys_deleted_flg, valid_from_dttm, valid_to_dttm, sys_mod_by) values (?, ?, ?, ?, ?, ?, ? )';
#print(jdbc_insert_sql);
sql_statement = conn.prepareStatement(jdbc_insert_sql);
reader = CSVReader(FileReader(unarchived_file));#/* Read CSV file in OpenCSV */
nextLine = reader.readNext();
while reader.readNext() != None:
lnNum+=1;
sql_statement.setString(1, replace_str(reader.readNext()[0]));
sql_statement.setString(2, replace_str(reader.readNext()[1]));
sql_statement.setInt(3, 1);
sql_statement.setInt(4, 0);
sql_statement.setTimestamp(5, Timestamp(System.currentTimeMillis()));
sql_statement.setString(6, "01.01.2400");
sql_statement.setString(7, "ODI_USER");
sql_statement.addBatch();
if lnNum%batch_size==0 or reader.readNext()==None:
sql_statement.executeBatch();
#print(sql_statement.executeBatch());
conn.commit();
sql_statement.close();
ins_log('File inserted, '+str(lnNum)+" rows added;", str(jdbc_insert_sql) + ' batch_size = '+str(batch_size));
conn.close(); #//Close connection to DB
def do_it():
'''
EASY CALL TO OUR PROCESS
'''
ins_log('Begin', None);
download_file();
decompress_file(archived_file, unarchived_file);
insert_file_data_to_db_batch();
ins_log('Done', None);
conn.close();
#main
##############################################
do_it();
| 38.842912 | 262 | 0.650621 | #-*- coding: utf-8 -*-
'''
Created on 21 06 2019
@author: lukhnevsn
'''
#import packages
##############################################
from java.io import BufferedReader ,FileReader, IOException;
from java.sql import PreparedStatement,Connection,DriverManager,SQLException,Statement,Types, ResultSet;
from java.net import Proxy, InetSocketAddress;
from okhttp3 import Call, OkHttpClient, Request, Response;
from okhttp3.OkHttpClient import newBuilder;
from okhttp3.Request import newBuilder as Builder;
from org.apache.http.impl.client import BasicCredentialsProvider, CloseableHttpClient, HttpClients;
from org.apache.http import HttpEntity,HttpHost;
from org.apache.http.auth import AuthScope, NTCredentials;
from jarray import zeros;
from au.com.bytecode.opencsv import CSVReader;
from java.lang.String import length, replace;
#import org.apache.commons.vfs2.util.Cryptor as Cryptor;
#import org.apache.commons.vfs2.util.CryptorFactory as CryptorFactory;
import okhttp3.Credentials as Credentials;
import java.io.File as File;
import java.io.FileOutputStream as FileOutputStream;
import java.io.InputStreamReader as InputStreamReader;
import java.io.FileInputStream as FileInputStream;
import java.sql.ResultSet;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream as BZip2CompressorInputStream;
import java.nio.file.Files as Files;
import java.nio.file.Paths as Paths;
import java.nio.file.StandardCopyOption as StandardCopyOption;
import java.util.concurrent.TimeUnit as TimeUnit;
import java.lang;
import org.apache.http.client.config.RequestConfig as RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse as CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet as HttpGet;
import java.sql.Timestamp as Timestamp;
import java.lang.System as System;
import java.sql.Timestamp as Timestamp;
#constants
##############################################
archived_file = 'list_of_expired_passports.csv.bz2'; #filname for downloading file
unarchived_file = 'list_of_expired_passports.csv'; #filname for uncompressed file
url = "http://guvm.mvd.ru/upload/expired-passports/list_of_expired_passports.csv.bz2" ; #url = 'https://xn--b1ab2a0a.xn--b1aew.xn--p1ai/upload/expired-passports/list_of_expired_passports.csv.bz2'; #url = 'http://localhost/pass/list_of_expired_passports.csv.bin';
ODIAgent = '<%=odiRef.getSession( "AGENT_NAME" )%>';
ora_odi_pass_e = "BD15AFA077FA694DB138CD75D5BD19BA";
ora_odi_pass = CryptorFactory.getCryptor().decrypt(ora_odi_pass_e);
conn = odiRef.getJDBCConnection( "DEST" );
s_schema = '<%=odiRef.getInfo("DEST_SCHEMA")%>'
table_name = 'DIM_INVALID_PASSPORTS';
batch_size = 500000; #batch for inserting
buffer_size = 1024*8; #buffer for reading from web
downloaded = 0; #help const
lnNum =0; #help const
lastPercent = -1; #help const
totalRecords= 0; #help const
#functions
##############################################
def ins_log(state, err_text) :
'''
ANALSYS PROCCESSES LOGGING FUNC
'''
global conn;
conn.setSchema(s_schema);
cLog = conn.prepareCall("{call P_INS_LOG(DIM_INVALID_PASSPORTS,?,?)}");
cLog.setString("sSTATE",state);
if (err_text != None) :
cLog.setString("sERRTEXT", err_text);
else :
cLog.setNull("sERRTEXT",Types.VARCHAR);
cLog.execute();
cLog.close();
def print_download_process(l1, l2):
'''
FUNC FOR PRINT DOWNLOAD PERCENTS
'''
global lastPercent;
currentPercent = (l1*100)/l2;
if(lastPercent < currentPercent):
print('='*currentPercent);
print('\n current = '+str(l1))
print('\n target = ' +str(l2))
lastPercent = currentPercent;
def decompress_file(archived_file, unarchived_file):
'''
FUNC TO DECOMPRESS BZIP2 ARCHIVE
'''
ins_log("Decompressing...", str(archived_file));
try:
gzis = BZip2CompressorInputStream(FileInputStream(archived_file));
Files.copy(gzis, Paths.get(unarchived_file), StandardCopyOption.REPLACE_EXISTING);
except Exception:
ins_log('Decompressing error', str(Exception));
ins_log("File Decompressed!", str(unarchived_file));
def proxy_authenticator():
'''
FUNC FOR PROXY THIS AUTH.
'''
global proxy_user, proxy_pass;
credential = Credentials.basic(proxy_user, proxy_pass);
return Response.request().newBuilder().header("Proxy-Authorization", credential).build();
def download_file():
'''
FUNC FOR DOWNLOADING DATA FROM MVD.RF WEBSITE
'''
global buffer_size, downloaded;
ins_log('Create proxy settings', 'using '+str(ODIAgent));
if ODIAgent == 'Internal':
proxy_address = TEST_FILE_DOWNLOADS.ProdProxyHost;
proxy_port = TEST_FILE_DOWNLOADS.ProdProxyPort;
proxy_user = 'lukhnevsn';
proxy_passe = "70320DB646F3C6740262E9224E8A88C7";
proxy_domain = "BANKEXP";
proxy_pass = cryptor.decrypt(proxy_passe);
else:
proxy_address = FILE_DOWNLOADS.ProdProxyHost;
proxy_port = FILE_DOWNLOADS.ProdProxyPort;
proxy_user = "ODI_USER";
proxy_passe = "32A47DEE17B2F967BA6094BB609ABF8E";
proxy_domain = "BANKEXP";
proxy_pass = cryptor.decrypt(PROXY_PASSE);
ins_log("Downloading...", url);
builder = OkHttpClient.Builder(); #builder.followRedirects(False).followSslRedirects(False);
builder.connectTimeout(5, TimeUnit.MINUTES).writeTimeout(5, TimeUnit.MINUTES).readTimeout(5, TimeUnit.MINUTES);
httpClient = builder.proxy(Proxy( Proxy.Type.HTTP, InetSocketAddress(proxy_address, proxy_port))).proxyAuthenticator(proxy_authenticator).build();
call = httpClient.newCall(Request.Builder().url(url).get().build()); #//Call to server
response = call.execute(); #//
ins_log('Call to web server', str(response));
#print(response.code())
if (response.code() == 200): #//Check Response code
inputStream = None;
outputStream = None;
target = response.body().contentLength();
try:
inputStream = response.body().byteStream(); #//Get stream of bytes
buffer = zeros(buffer_size, 'b'); #//Creating buffer bytes(1024*4) #bytearray(4096)
outputStream = FileOutputStream(File(archived_file));
print_download_process(0, target);
while (downloaded < target) :
readed = inputStream.read(buffer);
if (readed == -1):
break;
else:
outputStream.write(buffer, 0, readed); #//write buff
downloaded += readed;
print_download_process(downloaded, target);
except Exception:
ins_log("Downloading Error", str(Exception));
finally:
if (inputStream != None):
inputStream.close();
elif(outputStream != None):
outputStream.close();
ins_log("File downloaded!", str(url) + ' filename:'+str(archived_file));
#print("File downloaded! "+ str( url));
def truncate_table():
global conn, table_name;
conn.setSchema(s_schema);
ins_log('Truncate table', str(s_schema)+'.'+str(table_name));
try:
sql = "{call reference_editor.truncate_table(?)}";
print(sql);
statement = conn.prepareCall(sql);
statement.setString(1,table_name);
statement.execute();
statement.close();
except SQLException as e:
ins_log('Truncate error', str(e.getMessage())) ;
#print(e.getMessage());
finally:
#conn.close(); #//Close connection to DB
ins_log("Table truncated ", str(s_schema)+'.'+str(table_name));
def replace_str(char):
if char == None:
return 'NULL';
elif length(char) < 1:
return 'NULL';
else:
return char;
def insert_file_data_to_db_batch():
'''
FUNC FOR DATA INSERTING INTO DATABASE
'''
ins_log("Adding data to DB...", 'Using JDBC from File');
global conn, unarchived_file, lnNum, totalRecords, batch_size;
conn.setSchema(s_schema);
truncate_table();
conn.setAutoCommit(False);
#jdbc_insert_sql = 'insert into dim_invalid_passports (pass_serial, pass_no, valid_to_dttm) values (?,?,?)';
jdbc_insert_sql = 'insert into dim_invalid_passports (pass_serial, pass_no, sys_actual_flg, sys_deleted_flg, valid_from_dttm, valid_to_dttm, sys_mod_by) values (?, ?, ?, ?, ?, ?, ? )';
#print(jdbc_insert_sql);
sql_statement = conn.prepareStatement(jdbc_insert_sql);
reader = CSVReader(FileReader(unarchived_file));#/* Read CSV file in OpenCSV */
nextLine = reader.readNext();
while reader.readNext() != None:
lnNum+=1;
sql_statement.setString(1, replace_str(reader.readNext()[0]));
sql_statement.setString(2, replace_str(reader.readNext()[1]));
sql_statement.setInt(3, 1);
sql_statement.setInt(4, 0);
sql_statement.setTimestamp(5, Timestamp(System.currentTimeMillis()));
sql_statement.setString(6, "01.01.2400");
sql_statement.setString(7, "ODI_USER");
sql_statement.addBatch();
if lnNum%batch_size==0 or reader.readNext()==None:
sql_statement.executeBatch();
#print(sql_statement.executeBatch());
conn.commit();
sql_statement.close();
ins_log('File inserted, '+str(lnNum)+" rows added;", str(jdbc_insert_sql) + ' batch_size = '+str(batch_size));
conn.close(); #//Close connection to DB
def do_it():
'''
EASY CALL TO OUR PROCESS
'''
ins_log('Begin', None);
download_file();
decompress_file(archived_file, unarchived_file);
insert_file_data_to_db_batch();
ins_log('Done', None);
conn.close();
#main
##############################################
do_it();
| 769 | 0 | 62 |
b7fff91cb272ffc6f087516681dde3758d90c6ef | 37,524 | py | Python | tests/test_tron_tools.py | xcorail/paasta | 3f132c73b45fcf0afc31ddb889205ecd9394d4bb | [
"Apache-2.0"
] | null | null | null | tests/test_tron_tools.py | xcorail/paasta | 3f132c73b45fcf0afc31ddb889205ecd9394d4bb | [
"Apache-2.0"
] | null | null | null | tests/test_tron_tools.py | xcorail/paasta | 3f132c73b45fcf0afc31ddb889205ecd9394d4bb | [
"Apache-2.0"
] | null | null | null | import mock
import pytest
from paasta_tools import tron_tools
from paasta_tools.tron_tools import MASTER_NAMESPACE
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
@mock.patch('service_configuration_lib.read_extra_service_information', autospec=True)
@mock.patch('service_configuration_lib.read_extra_service_information', autospec=True)
@mock.patch('service_configuration_lib._read_yaml_file', autospec=True)
@mock.patch('os.path.abspath', autospec=True)
@mock.patch('paasta_tools.tron_tools.load_tron_yaml', autospec=True)
| 36.896755 | 115 | 0.593167 | import mock
import pytest
from paasta_tools import tron_tools
from paasta_tools.tron_tools import MASTER_NAMESPACE
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
class TestTronConfig:
@pytest.fixture
def config_dict(self):
return {
'cluster_name': 'dev-batch',
'default_paasta_cluster': 'dev-oregon',
'url': 'http://mesos-master.com:2000',
}
def test_normal(self, config_dict):
config = tron_tools.TronConfig(config_dict)
assert config.get_cluster_name() == 'dev-batch'
assert config.get_url() == 'http://mesos-master.com:2000'
def test_no_cluster_name(self, config_dict):
del config_dict['cluster_name']
config = tron_tools.TronConfig(config_dict)
with pytest.raises(tron_tools.TronNotConfigured):
config.get_cluster_name()
def test_no_url(self, config_dict):
del config_dict['url']
config = tron_tools.TronConfig(config_dict)
with pytest.raises(tron_tools.TronNotConfigured):
config.get_url()
class TestTronActionConfig:
def test_get_job_name(self):
action_dict = {
'name': 'print',
'command': 'echo something',
}
action_config = tron_tools.TronActionConfig(
service='my_service',
instance=tron_tools.compose_instance('cool_job', 'print'),
cluster="fake-cluster",
config_dict=action_dict,
branch_dict={},
)
assert action_config.get_job_name() == 'cool_job'
def test_get_action_name(self):
action_dict = {
'name': 'sleep',
'command': 'sleep 10',
}
action_config = tron_tools.TronActionConfig(
service='my_service',
instance=tron_tools.compose_instance('my_job', 'sleep'),
cluster="fake-cluster",
config_dict=action_dict,
branch_dict={},
)
assert action_config.get_action_name() == 'sleep'
def test_get_cluster(self):
action_dict = {
'name': 'do_something',
'command': 'echo something',
}
action_config = tron_tools.TronActionConfig(
service='my_service',
instance=tron_tools.compose_instance('my_job', 'do_something'),
cluster="fake-cluster",
config_dict=action_dict,
branch_dict={},
)
assert action_config.get_cluster() == 'fake-cluster'
def test_get_executor_default(self):
action_dict = {
'name': 'do_something',
'command': 'echo something',
}
action_config = tron_tools.TronActionConfig(
service='my_service',
instance=tron_tools.compose_instance('my_job', 'do_something'),
cluster="fake-cluster",
config_dict=action_dict,
branch_dict={},
)
assert action_config.get_executor() is None
def test_get_executor_paasta(self):
action_dict = {
'name': 'do_something',
'command': 'echo something',
'executor': 'paasta',
}
action_config = tron_tools.TronActionConfig(
service='my_service',
instance=tron_tools.compose_instance('my_job', 'do_something'),
cluster="fake-cluster",
config_dict=action_dict,
branch_dict={},
)
assert action_config.get_executor() == 'mesos'
class TestTronJobConfig:
@pytest.mark.parametrize(
'action_service,action_deploy', [
(None, None),
(None, 'special_deploy'),
('other_service', None),
(None, None),
(None, None),
],
)
@mock.patch('paasta_tools.tron_tools.load_v2_deployments_json', autospec=True)
def test_get_action_config(
self,
mock_load_deployments,
action_service,
action_deploy,
):
"""Check resulting action config with various overrides from the action."""
action_dict = {
'name': 'normal',
'command': 'echo first',
}
if action_service:
action_dict['service'] = action_service
if action_deploy:
action_dict['deploy_group'] = action_deploy
job_service = 'my_service'
job_deploy = 'prod'
expected_service = action_service or job_service
expected_deploy = action_deploy or job_deploy
expected_cluster = 'paasta-dev'
job_dict = {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'service': job_service,
'deploy_group': job_deploy,
'max_runtime': '2h',
'actions': [action_dict],
}
soa_dir = '/other_dir'
job_config = tron_tools.TronJobConfig(job_dict, expected_cluster, soa_dir=soa_dir)
action_config = job_config._get_action_config(action_dict=action_dict)
mock_load_deployments.assert_called_once_with(expected_service, soa_dir)
mock_deployments_json = mock_load_deployments.return_value
mock_deployments_json.get_docker_image_for_deploy_group.assert_called_once_with(expected_deploy)
mock_deployments_json.get_git_sha_for_deploy_group.assert_called_once_with(expected_deploy)
expected_branch_dict = {
'docker_image': mock_deployments_json.get_docker_image_for_deploy_group.return_value,
'git_sha': mock_deployments_json.get_git_sha_for_deploy_group.return_value,
'desired_state': 'start',
'force_bounce': None,
}
expected_input_action_config = {
'name': 'normal',
'command': 'echo first',
'service': expected_service,
'deploy_group': expected_deploy,
}
assert action_config == tron_tools.TronActionConfig(
service=expected_service,
instance=tron_tools.compose_instance('my_job', 'normal'),
config_dict=expected_input_action_config,
branch_dict=expected_branch_dict,
soa_dir=soa_dir,
cluster=expected_cluster,
)
@mock.patch('paasta_tools.tron_tools.load_v2_deployments_json', autospec=True)
def test_get_action_config_no_deployment(
self,
mock_load_deployments,
):
action_dict = {
'name': 'normal',
'command': 'echo first',
}
job_dict = {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'service': 'my_service',
'deploy_group': 'prod',
'max_runtime': '2h',
'actions': [action_dict],
}
job_config = tron_tools.TronJobConfig(job_dict, 'fake-cluster')
mock_load_deployments.side_effect = NoDeploymentsAvailable
with pytest.raises(tron_tools.InvalidTronConfig):
job_config._get_action_config(action_dict)
@mock.patch('paasta_tools.tron_tools.load_v2_deployments_json', autospec=True)
def test_get_action_config_load_deployments_false(
self,
mock_load_deployments,
):
action_dict = {
'name': 'normal',
'command': 'echo first',
}
job_dict = {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'service': 'my_service',
'deploy_group': 'prod',
'max_runtime': '2h',
'actions': [action_dict],
}
soa_dir = '/other_dir'
cluster = 'paasta-dev'
job_config = tron_tools.TronJobConfig(
job_dict,
cluster,
load_deployments=False,
soa_dir=soa_dir,
)
mock_load_deployments.side_effect = NoDeploymentsAvailable
action_config = job_config._get_action_config(action_dict)
assert mock_load_deployments.call_count == 0
assert action_config == tron_tools.TronActionConfig(
service='my_service',
cluster=cluster,
instance=tron_tools.compose_instance('my_job', 'normal'),
config_dict={
'name': 'normal',
'command': 'echo first',
'service': 'my_service',
'deploy_group': 'prod',
},
branch_dict=None,
soa_dir=soa_dir,
)
@mock.patch('paasta_tools.tron_tools.TronJobConfig._get_action_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.format_tron_action_dict', autospec=True)
def test_format_tron_job_dict(
self,
mock_format_action,
mock_get_action_config,
):
action_dict = {
'name': 'normal',
'command': 'echo first',
}
job_dict = {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'service': 'my_service',
'deploy_group': 'prod',
'max_runtime': '2h',
'actions': [action_dict],
'expected_runtime': '1h',
}
soa_dir = '/other_dir'
cluster = 'paasta-dev'
job_config = tron_tools.TronJobConfig(job_dict, cluster, soa_dir=soa_dir)
result = tron_tools.format_tron_job_dict(job_config)
mock_get_action_config.assert_called_once_with(job_config, action_dict)
mock_format_action.assert_called_once_with(mock_get_action_config.return_value)
assert result == {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'max_runtime': '2h',
'actions': [mock_format_action.return_value],
'expected_runtime': '1h',
}
@mock.patch('paasta_tools.tron_tools.TronJobConfig._get_action_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.format_tron_action_dict', autospec=True)
def test_format_tron_job_dict_with_cleanup_action(
self,
mock_format_action,
mock_get_action_config,
):
job_dict = {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'service': 'my_service',
'deploy_group': 'prod',
'max_runtime': '2h',
'actions': [{
'name': 'normal',
'command': 'echo first',
}],
'cleanup_action': {
'command': 'rm *',
},
}
job_config = tron_tools.TronJobConfig(job_dict, 'paasta-dev')
result = tron_tools.format_tron_job_dict(job_config)
assert mock_get_action_config.call_count == 2
assert mock_format_action.call_count == 2
assert result == {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'max_runtime': '2h',
'actions': [mock_format_action.return_value],
'cleanup_action': mock_format_action.return_value,
}
def test_validate_all_actions(self):
job_dict = {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'service': 'testservice',
'actions': [
{
'name': 'first',
'command': 'echo first',
'cpus': 'bad string',
},
{
'name': 'second',
'command': 'echo second',
'mem': 'not a number',
},
],
'cleanup_action': {
'command': 'rm *',
'cpus': 'also bad',
},
}
job_config = tron_tools.TronJobConfig(job_dict, 'fake-cluster')
errors = job_config.validate()
assert len(errors) == 3
@mock.patch('paasta_tools.tron_tools.list_teams', autospec=True)
def test_validate_monitoring(self, mock_teams):
job_dict = {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'monitoring': {
'team': 'noop',
'page': True,
},
'actions': [
{
'name': 'first',
'command': 'echo first',
},
],
}
mock_teams.return_value = ['noop']
job_config = tron_tools.TronJobConfig(job_dict, 'fake-cluster')
errors = job_config.validate()
assert len(errors) == 0
@mock.patch('paasta_tools.tron_tools.list_teams', autospec=True)
def test_validate_monitoring_without_team(self, mock_teams):
job_dict = {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'monitoring': {
'page': True,
},
'actions': [
{
'name': 'first',
'command': 'echo first',
},
],
}
job_config = tron_tools.TronJobConfig(job_dict, 'fake-cluster')
errors = job_config.validate()
assert errors == ['Team name is required for monitoring']
@mock.patch('paasta_tools.tron_tools.list_teams', autospec=True)
def test_validate_monitoring_with_invalid_team(self, mock_teams):
job_dict = {
'name': 'my_job',
'node': 'batch_server',
'schedule': 'daily 12:10:00',
'monitoring': {
'team': 'invalid_team',
'page': True,
},
'actions': [
{
'name': 'first',
'command': 'echo first',
},
],
}
mock_teams.return_value = ['valid_team', 'weird_name']
job_config = tron_tools.TronJobConfig(job_dict, 'fake-cluster')
errors = job_config.validate()
assert errors == ["Invalid team name: invalid_team. Do you mean one of these: ['valid_team']"]
class TestTronTools:
@mock.patch('paasta_tools.tron_tools.load_system_paasta_config', autospec=True)
def test_load_tron_config(self, mock_system_paasta_config):
result = tron_tools.load_tron_config()
assert mock_system_paasta_config.return_value.get_tron_config.call_count == 1
assert result == tron_tools.TronConfig(mock_system_paasta_config.return_value.get_tron_config.return_value)
@mock.patch('paasta_tools.tron_tools.load_tron_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.TronClient', autospec=True)
def test_get_tron_client(self, mock_client, mock_system_tron_config):
result = tron_tools.get_tron_client()
assert mock_system_tron_config.return_value.get_url.call_count == 1
mock_client.assert_called_once_with(mock_system_tron_config.return_value.get_url.return_value)
assert result == mock_client.return_value
def test_compose_instance(self):
result = tron_tools.compose_instance('great_job', 'fast_action')
assert result == 'great_job.fast_action'
def test_decompose_instance_valid(self):
result = tron_tools.decompose_instance('job_a.start')
assert result == ('job_a', 'start')
def test_decompose_instance_invalid(self):
with pytest.raises(InvalidInstanceConfig):
tron_tools.decompose_instance('job_a')
def test_format_master_config(self):
master_config = {
'some_key': 101,
'another': 'hello',
'mesos_options': {
'default_volumes': [{
'container_path': '/nail/tmp',
'host_path': '/nail/tmp',
'mode': 'RW',
}],
'other_mesos': True,
},
}
paasta_volumes = [{
'containerPath': '/nail/other',
'hostPath': '/other/home',
'mode': 'RW',
}]
dockercfg = 'file://somewhere'
result = tron_tools.format_master_config(master_config, paasta_volumes, dockercfg)
assert result == {
'some_key': 101,
'another': 'hello',
'mesos_options': {
'default_volumes': [{
'container_path': '/nail/other',
'host_path': '/other/home',
'mode': 'RW',
}],
'dockercfg_location': dockercfg,
'other_mesos': True,
},
}
def test_format_tron_action_dict_default_executor(self):
action_dict = {
'name': 'do_something',
'command': 'echo something',
'requires': ['required_action'],
'retries': 2,
'expected_runtime': '30m',
}
branch_dict = {
'docker_image': 'my_service:paasta-123abcde',
'git_sha': 'aabbcc44',
'desired_state': 'start',
'force_bounce': None,
}
action_config = tron_tools.TronActionConfig(
service='my_service',
instance=tron_tools.compose_instance('my_job', 'do_something'),
config_dict=action_dict,
branch_dict=branch_dict,
cluster="test-cluster",
)
result = tron_tools.format_tron_action_dict(action_config)
assert result == {
'name': 'do_something',
'command': 'echo something',
'requires': ['required_action'],
'retries': 2,
'expected_runtime': '30m',
}
def test_format_tron_action_dict_paasta(self):
action_dict = {
'name': 'do_something',
'command': 'echo something',
'requires': ['required_action'],
'retries': 2,
'retries_delay': '5m',
'service': 'my_service',
'deploy_group': 'prod',
'executor': 'paasta',
'cpus': 2,
'mem': 1200,
'pool': 'special_pool',
'env': {'SHELL': '/bin/bash'},
'extra_volumes': [
{'containerPath': '/nail/tmp', 'hostPath': '/nail/tmp', 'mode': 'RW'},
],
}
branch_dict = {
'docker_image': 'my_service:paasta-123abcde',
'git_sha': 'aabbcc44',
'desired_state': 'start',
'force_bounce': None,
}
action_config = tron_tools.TronActionConfig(
service='my_service',
instance=tron_tools.compose_instance('my_job', 'do_something'),
config_dict=action_dict,
branch_dict=branch_dict,
cluster="test-cluster",
)
with mock.patch.object(
action_config,
'get_docker_registry',
return_value='docker-registry.com:400',
):
result = tron_tools.format_tron_action_dict(action_config)
assert result == {
'name': 'do_something',
'command': 'echo something',
'requires': ['required_action'],
'retries': 2,
'retries_delay': '5m',
'docker_image': mock.ANY,
'executor': 'mesos',
'cpus': 2,
'mem': 1200,
'env': mock.ANY,
'extra_volumes': [{
'container_path': '/nail/tmp',
'host_path': '/nail/tmp',
'mode': 'RW',
}],
'docker_parameters': mock.ANY,
'constraints': [
{
'attribute': 'pool',
'operator': 'LIKE',
'value': 'special_pool',
},
],
}
expected_docker = '{}/{}'.format('docker-registry.com:400', branch_dict['docker_image'])
assert result['docker_image'] == expected_docker
assert result['env']['SHELL'] == '/bin/bash'
assert isinstance(result['docker_parameters'], list)
def test_format_tron_action_dict_paasta_no_branch_dict(self):
action_dict = {
'name': 'do_something',
'command': 'echo something',
'requires': ['required_action'],
'retries': 2,
'service': 'my_service',
'deploy_group': 'prod',
'executor': 'paasta',
'cpus': 2,
'mem': 1200,
'pool': 'special_pool',
'env': {'SHELL': '/bin/bash'},
'extra_volumes': [
{'containerPath': '/nail/tmp', 'hostPath': '/nail/tmp', 'mode': 'RW'},
],
}
action_config = tron_tools.TronActionConfig(
service='my_service',
instance=tron_tools.compose_instance('my_job', 'do_something'),
config_dict=action_dict,
branch_dict=None,
cluster="paasta-dev",
)
result = tron_tools.format_tron_action_dict(action_config)
assert result == {
'name': 'do_something',
'command': 'echo something',
'requires': ['required_action'],
'retries': 2,
'docker_image': '',
'executor': 'mesos',
'cpus': 2,
'mem': 1200,
'env': mock.ANY,
'extra_volumes': [{
'container_path': '/nail/tmp',
'host_path': '/nail/tmp',
'mode': 'RW',
}],
'docker_parameters': mock.ANY,
'constraints': [
{
'attribute': 'pool',
'operator': 'LIKE',
'value': 'special_pool',
},
],
}
assert result['env']['SHELL'] == '/bin/bash'
assert isinstance(result['docker_parameters'], list)
@mock.patch('paasta_tools.tron_tools.service_configuration_lib.read_extra_service_information', autospec=True)
@mock.patch('paasta_tools.tron_tools.service_configuration_lib._read_yaml_file', autospec=True)
@mock.patch('paasta_tools.tron_tools.TronJobConfig', autospec=True)
def test_load_tron_from_service_dir(self, mock_job_config, mock_read_file, mock_read_service_info):
job_1 = mock.Mock()
job_2 = mock.Mock()
config_dict = {
'value_a': 20,
'other_value': 'string',
'jobs': [job_1, job_2],
}
mock_read_service_info.return_value = config_dict
mock_read_file.return_value = {}
soa_dir = '/other/services'
job_configs, extra_config = tron_tools.load_tron_service_config(
service='foo',
cluster='dev',
load_deployments=True,
soa_dir=soa_dir,
)
assert extra_config == {
'value_a': 20,
'other_value': 'string',
}
assert job_configs == [mock_job_config.return_value for i in range(2)]
assert mock_job_config.call_args_list == [
mock.call(config_dict=job_1, cluster='dev', service='foo', load_deployments=True, soa_dir=soa_dir),
mock.call(config_dict=job_2, cluster='dev', service='foo', load_deployments=True, soa_dir=soa_dir),
]
assert mock_read_service_info.call_count == 1
assert mock_read_file.call_count == 0
mock_read_service_info.assert_has_calls([mock.call('foo', 'tron-dev', '/other/services')])
@mock.patch('paasta_tools.tron_tools.service_configuration_lib.read_extra_service_information', autospec=True)
@mock.patch('paasta_tools.tron_tools.service_configuration_lib._read_yaml_file', autospec=True)
@mock.patch('paasta_tools.tron_tools.TronJobConfig', autospec=True)
def test_load_tron_from_tron_dir(self, mock_job_config, mock_read_file, mock_read_service_info):
job_1 = mock.Mock()
job_2 = mock.Mock()
config_dict = {
'value_a': 20,
'other_value': 'string',
'jobs': [job_1, job_2],
}
mock_read_service_info.return_value = {}
mock_read_file.return_value = config_dict
soa_dir = '/other/services'
job_configs, extra_config = tron_tools.load_tron_service_config(
'foo',
'dev',
load_deployments=True,
soa_dir=soa_dir,
)
assert extra_config == {
'value_a': 20,
'other_value': 'string',
}
assert job_configs == [mock_job_config.return_value for i in range(2)]
assert mock_job_config.call_args_list == [
mock.call(config_dict=job_1, cluster='dev', load_deployments=True, service='foo', soa_dir=soa_dir),
mock.call(config_dict=job_2, cluster='dev', load_deployments=True, service='foo', soa_dir=soa_dir),
]
assert mock_read_service_info.call_count == 1
assert mock_read_file.call_count == 1
mock_read_service_info.assert_has_calls([mock.call('foo', 'tron-dev', '/other/services')])
mock_read_file.assert_has_calls([mock.call('/other/services/tron/dev/foo.yaml')])
@mock.patch('paasta_tools.tron_tools.service_configuration_lib.read_extra_service_information', autospec=True)
@mock.patch('paasta_tools.tron_tools.service_configuration_lib._read_yaml_file', autospec=True)
def test_load_tron_service_config_empty(self, mock_read_file, mock_read_service_info):
mock_read_file.return_value = {}
mock_read_service_info.return_value = {}
soa_dir = '/other/services'
with pytest.raises(NoConfigurationForServiceError):
tron_tools.load_tron_service_config('foo', 'dev', soa_dir=soa_dir)
assert mock_read_file.call_count == 1
assert mock_read_service_info.call_count == 1
mock_read_file.assert_has_calls([mock.call('/other/services/tron/dev/foo.yaml')])
mock_read_service_info.assert_has_calls([mock.call('foo', 'tron-dev', soa_dir)])
@mock.patch('paasta_tools.tron_tools.service_configuration_lib.read_extra_service_information', autospec=True)
@mock.patch('paasta_tools.tron_tools.service_configuration_lib._read_yaml_file', autospec=True)
def test_load_tron_service_config_jobs_none(self, mock_read_file, mock_read_service_info):
mock_read_file.return_value = {'jobs': None}
mock_read_service_info.return_value = None
soa_dir = '/other/services'
jc, _ = tron_tools.load_tron_service_config('foo', 'dev', soa_dir=soa_dir)
assert jc == []
@mock.patch('paasta_tools.tron_tools.load_system_paasta_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.load_tron_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.load_tron_service_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.format_tron_job_dict', autospec=True)
@mock.patch('paasta_tools.tron_tools.format_master_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.yaml.dump', autospec=True)
@pytest.mark.parametrize('service', [MASTER_NAMESPACE, 'my_app'])
def test_create_complete_config(
self,
mock_yaml_dump,
mock_format_master_config,
mock_format_job,
mock_tron_service_config,
mock_tron_system_config,
mock_system_config,
service,
):
job_config = tron_tools.TronJobConfig({}, 'fake-cluster')
other_config = {
'my_config_value': [1, 2],
}
mock_format_master_config.return_value = other_config
mock_tron_service_config.return_value = (
[job_config],
other_config,
)
soa_dir = '/testing/services'
cluster = 'fake-cluster'
assert tron_tools.create_complete_config(
service=service, cluster=cluster, soa_dir=soa_dir,
) == mock_yaml_dump.return_value
mock_tron_service_config.assert_called_once_with(
service=service,
cluster=cluster,
load_deployments=True,
soa_dir=soa_dir,
)
if service == MASTER_NAMESPACE:
mock_format_master_config.assert_called_once_with(
other_config,
mock_system_config.return_value.get_volumes.return_value,
mock_system_config.return_value.get_dockercfg_location.return_value,
)
else:
assert mock_format_master_config.call_count == 0
mock_format_job.assert_called_once_with(
job_config,
)
complete_config = other_config.copy()
complete_config.update({
'jobs': [mock_format_job.return_value],
})
mock_yaml_dump.assert_called_once_with(
complete_config,
Dumper=mock.ANY,
default_flow_style=mock.ANY,
)
@mock.patch('paasta_tools.tron_tools.load_tron_service_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.format_tron_job_dict', autospec=True)
@mock.patch('subprocess.run', autospec=True)
def test_validate_complete_config_paasta_validate_fails(
self,
mock_run,
mock_format_job,
mock_load_config,
):
job_config = mock.Mock(spec_set=tron_tools.TronJobConfig)
job_config.validate = mock.Mock(return_value=['some error'])
mock_load_config.return_value = ([job_config], {})
result = tron_tools.validate_complete_config(
'a_service',
'a-cluster',
)
assert mock_load_config.call_count == 1
assert mock_format_job.call_count == 0
assert mock_run.call_count == 0
assert result == ['some error']
@mock.patch('paasta_tools.tron_tools.load_tron_service_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.format_tron_job_dict', autospec=True)
@mock.patch('subprocess.run', autospec=True)
def test_validate_complete_config_tronfig_fails(
self,
mock_run,
mock_format_job,
mock_load_config,
):
job_config = mock.Mock(spec_set=tron_tools.TronJobConfig)
job_config.validate = mock.Mock(return_value=[])
mock_load_config.return_value = ([job_config], {})
mock_format_job.return_value = {}
mock_run.return_value = mock.Mock(
returncode=1,
stdout='tronfig error',
stderr='',
)
result = tron_tools.validate_complete_config(
'a_service',
'a-cluster',
)
assert mock_load_config.call_count == 1
assert mock_format_job.call_count == 1
assert mock_run.call_count == 1
assert result == ['tronfig error']
@mock.patch('paasta_tools.tron_tools.load_tron_service_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.format_tron_job_dict', autospec=True)
@mock.patch('subprocess.run', autospec=True)
def test_validate_complete_config_passes(
self,
mock_run,
mock_format_job,
mock_load_config,
):
job_config = mock.Mock(spec_set=tron_tools.TronJobConfig)
job_config.validate = mock.Mock(return_value=[])
mock_load_config.return_value = ([job_config], {})
mock_format_job.return_value = {}
mock_run.return_value = mock.Mock(
returncode=0,
stdout='OK',
stderr='',
)
result = tron_tools.validate_complete_config(
'a_service',
'a-cluster',
)
assert mock_load_config.call_count == 1
assert mock_format_job.call_count == 1
assert mock_run.call_count == 1
assert not result
@mock.patch('paasta_tools.tron_tools.load_tron_service_config', autospec=True)
@mock.patch('paasta_tools.tron_tools.format_tron_job_dict', autospec=True)
@mock.patch('subprocess.run', autospec=True)
@pytest.mark.parametrize('namespace,valid', [('MASTER', True), ('bob', False)])
def test_validate_complete_config_non_job_keys(
self,
mock_run,
mock_format_job,
mock_load_config,
namespace,
valid,
):
job_config = mock.Mock(spec_set=tron_tools.TronJobConfig)
job_config.validate = mock.Mock(return_value=[])
mock_load_config.return_value = ([job_config], {'time_zone': 'US/Pacific'})
mock_format_job.return_value = {}
result = tron_tools.validate_complete_config(
namespace,
'a-cluster',
)
assert mock_load_config.call_count == 1
if not valid:
assert len(result) == 1
assert 'time_zone' in result[0]
@mock.patch('os.walk', autospec=True)
@mock.patch('os.listdir', autospec=True)
def test_get_tron_namespaces_for_cluster(self, mock_ls, mock_walk):
cluster_name = 'stage'
expected_namespaces = ['app', 'foo', 'cool']
mock_walk.return_value = [
('/my_soa_dir/foo', [], ['tron-stage.yaml']),
('/my_soa_dir/app', [], ['tron-stage.yaml']),
('my_soa_dir/woo', [], ['something-else.yaml']),
]
mock_ls.return_value = ['cool.yaml']
soa_dir = '/my_soa_dir'
namespaces = tron_tools.get_tron_namespaces_for_cluster(
cluster=cluster_name,
soa_dir=soa_dir,
)
for expected_namespace in expected_namespaces:
assert expected_namespace in namespaces
assert len(namespaces) == 3
@mock.patch('os.walk', autospec=True)
@mock.patch('os.listdir', autospec=True)
@mock.patch('paasta_tools.tron_tools.load_tron_config', autospec=True)
def test_get_tron_namespaces_for_cluster_default(self, mock_system_tron_config, mock_ls, mock_walk):
mock_system_tron_config.return_value.get_cluster_name.return_value = 'this-cluster'
mock_walk.return_value = [('/my_soa_dir/this-service', [], ['tron-this-cluster.yaml'])]
soa_dir = '/my_soa_dir'
expected_namespaces = ['this-service']
namespaces = tron_tools.get_tron_namespaces_for_cluster(
soa_dir=soa_dir,
)
assert namespaces == expected_namespaces
@mock.patch('os.walk', autospec=True)
@mock.patch('os.listdir', autospec=True)
def test_get_tron_namespaces_for_cluster_conflict(self, mock_ls, mock_walk):
cluster_name = 'stage'
mock_walk.return_value = [
('/my_soa_dir/cool', [], ['tron-stage.yaml']),
]
mock_ls.return_value = ['cool.yaml']
soa_dir = '/my_soa_dir'
with pytest.raises(tron_tools.ConflictingNamespacesError):
tron_tools.get_tron_namespaces_for_cluster(
cluster=cluster_name,
soa_dir=soa_dir,
)
@mock.patch('glob.glob', autospec=True)
def test_list_tron_clusters(self, mock_glob):
mock_glob.return_value = [
'/home/service/tron-dev-cluster2.yaml',
'/home/service/tron-prod.yaml',
'/home/service/marathon-other.yaml',
]
result = tron_tools.list_tron_clusters('foo')
assert sorted(result) == ['dev-cluster2', 'prod']
@mock.patch('service_configuration_lib.read_extra_service_information', autospec=True)
def test_load_tron_yaml_picks_service_dir_first(mock_read_extra_service_configuration):
config = "test"
mock_read_extra_service_configuration.return_value = config
assert config == tron_tools.load_tron_yaml(service="foo", cluster="bar", soa_dir="test")
mock_read_extra_service_configuration.assert_called_once_with(
service_name='foo', extra_info="tron-bar", soa_dir="test",
)
@mock.patch('service_configuration_lib.read_extra_service_information', autospec=True)
@mock.patch('service_configuration_lib._read_yaml_file', autospec=True)
@mock.patch('os.path.abspath', autospec=True)
def test_load_tron_yaml_picks_service_falls_back_to_tron_dir(
mock_abspath,
mock_read_yaml_file,
mock_read_extra_service_configuration,
):
config = "test"
mock_read_extra_service_configuration.return_value = None
mock_read_yaml_file.return_value = config
assert config == tron_tools.load_tron_yaml(service="foo", cluster="bar", soa_dir="test")
mock_read_extra_service_configuration.assert_called_once_with(
service_name='foo', extra_info="tron-bar", soa_dir="test",
)
mock_read_yaml_file.assert_called_once_with("test/tron/bar/foo.yaml")
@mock.patch('paasta_tools.tron_tools.load_tron_yaml', autospec=True)
def test_load_tron_service_config_interprets_correctly(mock_load_tron_yaml):
mock_load_tron_yaml.return_value = {
'extra': 'data',
'jobs': [{
'job1': {
'actions': [{'action1': {}}],
},
}],
}
actual1, actual2 = tron_tools.load_tron_service_config(
service='service', cluster='test-cluster', load_deployments=False, soa_dir='fake',
)
expected1 = [
tron_tools.TronJobConfig(
service='service',
cluster='test-cluster',
config_dict={'job1': {'actions': [{'action1': {}}]}},
load_deployments=False,
soa_dir='fake',
),
]
expected2 = {'extra': 'data'}
assert actual1 == expected1
assert actual2 == expected2
| 29,117 | 7,458 | 293 |
165e045e914f6250cb2bec371c5b5d7ec48cd7c9 | 121 | py | Python | my_utilize/my_yield.py | FengJunJian/tutorials_for_demo | 36ab749241193aeb17dec14d0c7503d858c84f8e | [
"Apache-2.0"
] | null | null | null | my_utilize/my_yield.py | FengJunJian/tutorials_for_demo | 36ab749241193aeb17dec14d0c7503d858c84f8e | [
"Apache-2.0"
] | null | null | null | my_utilize/my_yield.py | FengJunJian/tutorials_for_demo | 36ab749241193aeb17dec14d0c7503d858c84f8e | [
"Apache-2.0"
] | null | null | null |
fun=generator()
fun.__next__() | 13.444444 | 24 | 0.520661 |
def generator():
for i in range(5):
print('next',i)
yield i
fun=generator()
fun.__next__() | 61 | 0 | 25 |
0c5c7fd38a2d0e9f0be6a523befaf9e1f678032f | 3,944 | py | Python | intprim/util/visualization.py | carlos-cardoso/intprim | cf6d83996e4a1ede3cda19c7906c6166ec5a6f6e | [
"MIT"
] | 49 | 2017-11-23T22:37:36.000Z | 2022-03-05T04:39:13.000Z | intprim/util/visualization.py | souljaboy764/intprim | ecf905ce69dc14215230be3b3819d2236223e9ba | [
"MIT"
] | 5 | 2019-03-31T08:47:33.000Z | 2021-08-19T23:54:51.000Z | intprim/util/visualization.py | souljaboy764/intprim | ecf905ce69dc14215230be3b3819d2236223e9ba | [
"MIT"
] | 20 | 2018-01-22T20:17:24.000Z | 2022-02-18T01:07:54.000Z | import matplotlib.pyplot as plt
import numpy as np
# Displays the probability that the current trajectory matches the stored trajectores at every instant in time.
def plot_distribution(dof_names, mean, upper_bound, lower_bound):
"""Plots a given probability distribution.
"""
figures_per_plot = np.min([4, mean.shape[0]])
for index in range(mean.shape[0]):
if(index % figures_per_plot == 0):
fig = plt.figure()
new_plot = plt.subplot(figures_per_plot, 1, (index % figures_per_plot) + 1)
domain = np.linspace(0, 1, mean.shape[1])
new_plot.fill_between(domain, upper_bound[index], lower_bound[index], color = '#ccf5ff')
new_plot.plot(domain, mean[index], color = '#000000')
new_plot.set_title('Trajectory distribution for degree ' + dof_names[index])
fig.tight_layout()
plt.show(block = False)
def plot_trajectory(dof_names, trajectory, observed_trajectory, mean_trajectory = None):
"""Plots a given trajectory.
"""
fig = plt.figure()
plt.plot(trajectory[0], trajectory[1])
plt.plot(observed_trajectory[0], observed_trajectory[1])
if(mean_trajectory is not None):
plt.plot(mean_trajectory[0], mean_trajectory[1])
fig.suptitle('Probable trajectory')
fig = plt.figure()
for index, degree in enumerate(trajectory):
new_plot = plt.subplot(len(trajectory), 1, index + 1)
domain = np.linspace(0, 1, len(trajectory[index]))
new_plot.plot(domain, trajectory[index], label = "Inferred")
domain = np.linspace(0, 1, len(observed_trajectory[index]))
new_plot.plot(domain, observed_trajectory[index], label = "Observed")
if(mean_trajectory is not None):
domain = np.linspace(0, 1, len(mean_trajectory[index]))
new_plot.plot(domain, mean_trajectory[index], label = "Mean")
new_plot.set_title('Trajectory for degree ' + dof_names[index])
new_plot.legend()
plt.show()
def plot_partial_trajectory(trajectory, partial_observed_trajectory, mean_trajectory = None):
"""Plots a trajectory and a partially observed trajectory.
"""
fig = plt.figure()
plt.plot(partial_observed_trajectory[0], partial_observed_trajectory[1], color = "#6ba3ff", label = "Observed", linewidth = 3.0)
plt.plot(trajectory[0], trajectory[1], "--", color = "#ff6a6a", label = "Inferred", linewidth = 2.0)
if(mean_trajectory is not None):
plt.plot(mean_trajectory[0], mean_trajectory[1], color = "#85d87f", label = "Mean")
fig.suptitle('Probable trajectory')
plt.legend()
plt.text(0.01, 0.7, "Observed samples: " + str(partial_observed_trajectory.shape[1]), transform = fig.axes[0].transAxes)
plt.show()
def plot_approximation(dof_names, trajectory, approx_trajectory, approx_trajectory_deriv):
"""Plots a trajectory and its approximation.
"""
domain = np.linspace(0, 1, len(trajectory[0]))
approx_domain = np.linspace(0, 1, len(approx_trajectory[0]))
for dof in range(len(trajectory)):
plt.figure()
new_plot = plt.subplot(3, 1, 1)
new_plot.plot(domain, trajectory[dof])
new_plot.set_title('Original ' + dof_names[dof] + ' Data')
new_plot = plt.subplot(3, 1, 2)
# The trailing [0] is the dimension of the the state. In this case only plot position.
new_plot.plot(approx_domain, approx_trajectory[dof])
new_plot.set_title('Approximated ' + dof_names[dof] + ' Data')
new_plot = plt.subplot(3, 1, 3)
# The trailing [0] is the dimension of the the state. In this case only plot position.
new_plot.plot(approx_domain, approx_trajectory_deriv[dof])
new_plot.set_title('Approximated ' + dof_names[dof] + ' Derivative')
plt.show()
| 38.666667 | 132 | 0.671653 | import matplotlib.pyplot as plt
import numpy as np
# Displays the probability that the current trajectory matches the stored trajectores at every instant in time.
def plot_distribution(dof_names, mean, upper_bound, lower_bound):
"""Plots a given probability distribution.
"""
figures_per_plot = np.min([4, mean.shape[0]])
for index in range(mean.shape[0]):
if(index % figures_per_plot == 0):
fig = plt.figure()
new_plot = plt.subplot(figures_per_plot, 1, (index % figures_per_plot) + 1)
domain = np.linspace(0, 1, mean.shape[1])
new_plot.fill_between(domain, upper_bound[index], lower_bound[index], color = '#ccf5ff')
new_plot.plot(domain, mean[index], color = '#000000')
new_plot.set_title('Trajectory distribution for degree ' + dof_names[index])
fig.tight_layout()
plt.show(block = False)
def plot_trajectory(dof_names, trajectory, observed_trajectory, mean_trajectory = None):
"""Plots a given trajectory.
"""
fig = plt.figure()
plt.plot(trajectory[0], trajectory[1])
plt.plot(observed_trajectory[0], observed_trajectory[1])
if(mean_trajectory is not None):
plt.plot(mean_trajectory[0], mean_trajectory[1])
fig.suptitle('Probable trajectory')
fig = plt.figure()
for index, degree in enumerate(trajectory):
new_plot = plt.subplot(len(trajectory), 1, index + 1)
domain = np.linspace(0, 1, len(trajectory[index]))
new_plot.plot(domain, trajectory[index], label = "Inferred")
domain = np.linspace(0, 1, len(observed_trajectory[index]))
new_plot.plot(domain, observed_trajectory[index], label = "Observed")
if(mean_trajectory is not None):
domain = np.linspace(0, 1, len(mean_trajectory[index]))
new_plot.plot(domain, mean_trajectory[index], label = "Mean")
new_plot.set_title('Trajectory for degree ' + dof_names[index])
new_plot.legend()
plt.show()
def plot_partial_trajectory(trajectory, partial_observed_trajectory, mean_trajectory = None):
"""Plots a trajectory and a partially observed trajectory.
"""
fig = plt.figure()
plt.plot(partial_observed_trajectory[0], partial_observed_trajectory[1], color = "#6ba3ff", label = "Observed", linewidth = 3.0)
plt.plot(trajectory[0], trajectory[1], "--", color = "#ff6a6a", label = "Inferred", linewidth = 2.0)
if(mean_trajectory is not None):
plt.plot(mean_trajectory[0], mean_trajectory[1], color = "#85d87f", label = "Mean")
fig.suptitle('Probable trajectory')
plt.legend()
plt.text(0.01, 0.7, "Observed samples: " + str(partial_observed_trajectory.shape[1]), transform = fig.axes[0].transAxes)
plt.show()
def plot_approximation(dof_names, trajectory, approx_trajectory, approx_trajectory_deriv):
"""Plots a trajectory and its approximation.
"""
domain = np.linspace(0, 1, len(trajectory[0]))
approx_domain = np.linspace(0, 1, len(approx_trajectory[0]))
for dof in range(len(trajectory)):
plt.figure()
new_plot = plt.subplot(3, 1, 1)
new_plot.plot(domain, trajectory[dof])
new_plot.set_title('Original ' + dof_names[dof] + ' Data')
new_plot = plt.subplot(3, 1, 2)
# The trailing [0] is the dimension of the the state. In this case only plot position.
new_plot.plot(approx_domain, approx_trajectory[dof])
new_plot.set_title('Approximated ' + dof_names[dof] + ' Data')
new_plot = plt.subplot(3, 1, 3)
# The trailing [0] is the dimension of the the state. In this case only plot position.
new_plot.plot(approx_domain, approx_trajectory_deriv[dof])
new_plot.set_title('Approximated ' + dof_names[dof] + ' Derivative')
plt.show()
def plot_weights(weight_matrix):
plt.figure()
plt.imshow(weight_matrix, cmap = "gray", interpolation = "none")
plt.colorbar()
plt.show()
| 131 | 0 | 23 |
e677293c96f1e9bdb3625f862f7f5d5efbd3c275 | 4,620 | py | Python | control_de_flujo.py | EUD-curso-python/control_de_flujo-gusadolfo123 | 6e5ab478c40746383c260f1d6779dfa11bbb80bf | [
"MIT"
] | null | null | null | control_de_flujo.py | EUD-curso-python/control_de_flujo-gusadolfo123 | 6e5ab478c40746383c260f1d6779dfa11bbb80bf | [
"MIT"
] | null | null | null | control_de_flujo.py | EUD-curso-python/control_de_flujo-gusadolfo123 | 6e5ab478c40746383c260f1d6779dfa11bbb80bf | [
"MIT"
] | null | null | null |
"""Guarde en lista `naturales` los primeros 100 números naturales (desde el 1)
usando el bucle while
"""
naturales = []
i = 1
while i <= 100:
naturales.append(i)
i += 1
"""Guarde en `acumulado` una lista con el siguiente patrón:
['1','1 2','1 2 3','1 2 3 4','1 2 3 4 5',...,'...47 48 49 50']
Hasta el número 50.
"""
acumulado = []
i = 1
while True:
if i == 1:
acumulado.append(str(i))
else:
acumulado.append(acumulado[i-2] + ' ' + str(i))
i += 1
if i > 50:
break;
"""Guarde en `suma100` el entero de la suma de todos los números entre 1 y 100:
"""
suma100 = 0
for i in range(1, 101):
suma100 += i
"""Guarde en `tabla100` un string con los primeros 10 múltiplos del número 134,
separados por coma, así:
'134,268,...'
"""
tabla100 = ','.join([str(i * 134) for i in range(1, 11)])
"""Guardar en `multiplos3` la cantidad de números que son múltiplos de 3 y
menores o iguales a 300 en la lista `lista1` que se define a continuación (la lista
está ordenada).
"""
lista1 = [12, 15, 20, 27, 32, 39, 42, 48, 55, 66, 75, 82, 89, 91, 93, 105, 123, 132, 150, 180, 201, 203, 231, 250, 260, 267, 300, 304, 310, 312, 321, 326]
multiplos3 = len([i for i in lista1 if (i % 3 == 0) and (i < 300)])
"""Guardar en `regresivo50` una lista con la cuenta regresiva desde el número
50 hasta el 1, así:
[
'50 49 48 47...',
'49 48 47 46...',
...
'5 4 3 2 1',
'4 3 2 1',
'3 2 1',
'2 1',
'1'
]
"""
regresivo50 = []
i = 0
while True:
if i == 0:
regresivo50.insert(i, str(1))
else:
regresivo50.insert(i, str(i + 1) + ' ' + regresivo50[i-1])
i += 1
if i >= 50:
break;
regresivo50.reverse()
"""Invierta la siguiente lista usando el bucle for y guarde el resultado en
`invertido` (sin hacer uso de la función `reversed` ni del método `reverse`)
"""
lista2 = list(range(1, 70, 5))
invertido = [lista2[i] for i in range(len(lista2) - 1, -1, -1)]
"""Guardar en `primos` una lista con todos los números primos desde el 37 al 300
Nota: Un número primo es un número entero que no se puede calcular multiplicando
otros números enteros.
"""
# forma 1
#primos = [i for i in range(37, 301) if ([True for x in range(2, i) if (i % x) == 0].__contains__(True)) == False]
# forma 2
primos = []
for i in range(37, 301):
esPrimo = True
for x in range(2, i):
if (i % x) == 0:
esPrimo = False
break
if esPrimo == True: primos.append(i)
"""Guardar en `fibonacci` una lista con los primeros 60 términos de la serie de
Fibonacci.
Nota: En la serie de Fibonacci, los 2 primeros términos son 0 y 1, y a partir
del segundo cada uno se calcula sumando los dos anteriores términos de la serie.
[0, 1, 1, 2, 3, 5, 8, ...]
"""
fibonacci = []
for i in range(0, 60):
if i == 0:
fibonacci.append(i)
elif i == 1:
fibonacci.append(i)
else:
fibonacci.append(fibonacci[i-1] + fibonacci[i-2])
"""Guardar en `factorial` el factorial de 30
El factorial (símbolo:!) Significa multiplicar todos los números enteros desde
el 1 hasta el número elegido.
Por ejemplo, el factorial de 5 se calcula así:
5! = 5 × 4 × 3 × 2 × 1 = 120
"""
factorial = 1
for i in range(1, 31):
factorial *= i
"""Guarde en lista `pares` los elementos de la siguiente lista que esten
presentes en posiciones pares, pero solo hasta la posición 80.
"""
lista3 = [941, 149, 672, 208, 99, 562, 749, 947, 251, 750, 889, 596, 836, 742, 512, 19, 674, 142, 272, 773, 859, 598, 898, 930, 119, 107, 798, 447, 348, 402, 33, 678, 460, 144, 168, 290, 929, 254, 233, 563, 48, 249, 890, 871, 484, 265, 831, 694, 366, 499, 271, 123, 870, 986, 449, 894, 347, 346, 519, 969, 242, 57, 985, 250, 490, 93, 999, 373, 355, 466, 416, 937, 214, 707, 834, 126, 698, 268, 217, 406, 334, 285, 429, 130, 393, 396, 936, 572, 688, 765, 404, 970, 159, 98, 545, 412, 629, 361, 70, 602]
pares = [lista3[i] for i in range(0, len(lista3[0:81])) if i % 2 == 0]
"""Guarde en lista `cubos` el cubo (potencia elevada a la 3) de los números del
1 al 100.
"""
cubos = [i**3 for i in range(1, 101)]
"""Encuentre la suma de la serie 2 +22 + 222 + 2222 + .. hasta sumar 10 términos
y guardar resultado en variable `suma_2s`
"""
suma_2s = 0
suma = [str('2') * i for i in range(1, 11)]
for x in range(0, len(suma)):
suma_2s += int(suma[x])
"""Guardar en un string llamado `patron` el siguiente patrón llegando a una
cantidad máxima de asteriscos de 30.
*
**
***
****
*****
******
*******
********
*********
********
*******
******
*****
****
***
**
*
"""
items = [str('*') * i for i in range(1, 31)]
items2 = [str('*') * i for i in range(29, 0, -1)]
items.extend(items2)
patron = '\n'.join(items)
| 24.188482 | 501 | 0.613636 |
"""Guarde en lista `naturales` los primeros 100 números naturales (desde el 1)
usando el bucle while
"""
naturales = []
i = 1
while i <= 100:
naturales.append(i)
i += 1
"""Guarde en `acumulado` una lista con el siguiente patrón:
['1','1 2','1 2 3','1 2 3 4','1 2 3 4 5',...,'...47 48 49 50']
Hasta el número 50.
"""
acumulado = []
i = 1
while True:
if i == 1:
acumulado.append(str(i))
else:
acumulado.append(acumulado[i-2] + ' ' + str(i))
i += 1
if i > 50:
break;
"""Guarde en `suma100` el entero de la suma de todos los números entre 1 y 100:
"""
suma100 = 0
for i in range(1, 101):
suma100 += i
"""Guarde en `tabla100` un string con los primeros 10 múltiplos del número 134,
separados por coma, así:
'134,268,...'
"""
tabla100 = ','.join([str(i * 134) for i in range(1, 11)])
"""Guardar en `multiplos3` la cantidad de números que son múltiplos de 3 y
menores o iguales a 300 en la lista `lista1` que se define a continuación (la lista
está ordenada).
"""
lista1 = [12, 15, 20, 27, 32, 39, 42, 48, 55, 66, 75, 82, 89, 91, 93, 105, 123, 132, 150, 180, 201, 203, 231, 250, 260, 267, 300, 304, 310, 312, 321, 326]
multiplos3 = len([i for i in lista1 if (i % 3 == 0) and (i < 300)])
"""Guardar en `regresivo50` una lista con la cuenta regresiva desde el número
50 hasta el 1, así:
[
'50 49 48 47...',
'49 48 47 46...',
...
'5 4 3 2 1',
'4 3 2 1',
'3 2 1',
'2 1',
'1'
]
"""
regresivo50 = []
i = 0
while True:
if i == 0:
regresivo50.insert(i, str(1))
else:
regresivo50.insert(i, str(i + 1) + ' ' + regresivo50[i-1])
i += 1
if i >= 50:
break;
regresivo50.reverse()
"""Invierta la siguiente lista usando el bucle for y guarde el resultado en
`invertido` (sin hacer uso de la función `reversed` ni del método `reverse`)
"""
lista2 = list(range(1, 70, 5))
invertido = [lista2[i] for i in range(len(lista2) - 1, -1, -1)]
"""Guardar en `primos` una lista con todos los números primos desde el 37 al 300
Nota: Un número primo es un número entero que no se puede calcular multiplicando
otros números enteros.
"""
# forma 1
#primos = [i for i in range(37, 301) if ([True for x in range(2, i) if (i % x) == 0].__contains__(True)) == False]
# forma 2
primos = []
for i in range(37, 301):
esPrimo = True
for x in range(2, i):
if (i % x) == 0:
esPrimo = False
break
if esPrimo == True: primos.append(i)
"""Guardar en `fibonacci` una lista con los primeros 60 términos de la serie de
Fibonacci.
Nota: En la serie de Fibonacci, los 2 primeros términos son 0 y 1, y a partir
del segundo cada uno se calcula sumando los dos anteriores términos de la serie.
[0, 1, 1, 2, 3, 5, 8, ...]
"""
fibonacci = []
for i in range(0, 60):
if i == 0:
fibonacci.append(i)
elif i == 1:
fibonacci.append(i)
else:
fibonacci.append(fibonacci[i-1] + fibonacci[i-2])
"""Guardar en `factorial` el factorial de 30
El factorial (símbolo:!) Significa multiplicar todos los números enteros desde
el 1 hasta el número elegido.
Por ejemplo, el factorial de 5 se calcula así:
5! = 5 × 4 × 3 × 2 × 1 = 120
"""
factorial = 1
for i in range(1, 31):
factorial *= i
"""Guarde en lista `pares` los elementos de la siguiente lista que esten
presentes en posiciones pares, pero solo hasta la posición 80.
"""
lista3 = [941, 149, 672, 208, 99, 562, 749, 947, 251, 750, 889, 596, 836, 742, 512, 19, 674, 142, 272, 773, 859, 598, 898, 930, 119, 107, 798, 447, 348, 402, 33, 678, 460, 144, 168, 290, 929, 254, 233, 563, 48, 249, 890, 871, 484, 265, 831, 694, 366, 499, 271, 123, 870, 986, 449, 894, 347, 346, 519, 969, 242, 57, 985, 250, 490, 93, 999, 373, 355, 466, 416, 937, 214, 707, 834, 126, 698, 268, 217, 406, 334, 285, 429, 130, 393, 396, 936, 572, 688, 765, 404, 970, 159, 98, 545, 412, 629, 361, 70, 602]
pares = [lista3[i] for i in range(0, len(lista3[0:81])) if i % 2 == 0]
"""Guarde en lista `cubos` el cubo (potencia elevada a la 3) de los números del
1 al 100.
"""
cubos = [i**3 for i in range(1, 101)]
"""Encuentre la suma de la serie 2 +22 + 222 + 2222 + .. hasta sumar 10 términos
y guardar resultado en variable `suma_2s`
"""
suma_2s = 0
suma = [str('2') * i for i in range(1, 11)]
for x in range(0, len(suma)):
suma_2s += int(suma[x])
"""Guardar en un string llamado `patron` el siguiente patrón llegando a una
cantidad máxima de asteriscos de 30.
*
**
***
****
*****
******
*******
********
*********
********
*******
******
*****
****
***
**
*
"""
items = [str('*') * i for i in range(1, 31)]
items2 = [str('*') * i for i in range(29, 0, -1)]
items.extend(items2)
patron = '\n'.join(items)
| 0 | 0 | 0 |
592db96b8c56319c711829dab38acc1b8d52ef98 | 3,915 | py | Python | tsutils/cog_mixins.py | kary5678/tsutils | ab6ecdcd2f0e10ba19092028909b3f74bf1708a9 | [
"MIT"
] | 1 | 2021-07-28T19:41:18.000Z | 2021-07-28T19:41:18.000Z | tsutils/cog_mixins.py | kary5678/tsutils | ab6ecdcd2f0e10ba19092028909b3f74bf1708a9 | [
"MIT"
] | 19 | 2020-09-14T07:55:14.000Z | 2022-03-06T17:23:14.000Z | tsutils/cog_mixins.py | kary5678/tsutils | ab6ecdcd2f0e10ba19092028909b3f74bf1708a9 | [
"MIT"
] | 3 | 2020-09-14T07:47:27.000Z | 2021-09-14T02:16:33.000Z | from abc import abstractmethod
from typing import Callable, List, Optional, Type
import redbot.core.commands as commands
from redbot.core.commands import Cog, Command
from .helper_classes import CogABCMeta
| 34.043478 | 115 | 0.65645 | from abc import abstractmethod
from typing import Callable, List, Optional, Type
import redbot.core.commands as commands
from redbot.core.commands import Cog, Command
from .helper_classes import CogABCMeta
class CogMixin(Cog, metaclass=CogABCMeta):
@abstractmethod
def setup_self(self: "CogMixin") -> None: ...
@abstractmethod
async def red_get_data_for_user(self: "CogMixin", *, user_id: int) -> Optional[str]:
...
@abstractmethod
async def red_delete_data_for_user(self: "CogMixin", *, requester: str, user_id: int) -> None:
...
def setup_mixins(self) -> None:
for mixin in self.active_mixins:
super(mixin, self).setup_self() # noqa
async def get_mixin_user_data(self, user_id: int) -> List[str]:
ret = []
for mixin in self.active_mixins:
if (text := await super(mixin, self).red_get_data_for_user(user_id=user_id)): # noqa
ret.append(text)
return ret
async def delete_mixin_user_data(self, requester: str, user_id: int) -> None:
for mixin in self.active_mixins:
await super(mixin, self).red_delete_data_for_user(requester=requester, user_id=user_id) # noqa
@property
def active_mixins(self) -> List[Type["CogMixin"]]:
return [class_ for class_ in self.__class__.__mro__ if issubclass(class_, CogMixin) and class_ != CogMixin]
class MixinCommand:
def __init__(self, function: Callable, parent: Optional[str] = None, **kwargs):
self.function = function
self.parent = parent
self.kwargs = kwargs
def setup(self, cog: Cog, parent: Optional[Command] = None) -> None:
parent = parent or self.parent or commands
if isinstance(parent, str):
parent = getattr(cog, parent)
command = parent.command(**self.kwargs)(self.function)
add_command_to_cog(command, cog)
class MixinGroup:
def __init__(self, function: Callable, parent: Optional[str] = None, **kwargs):
self.function = function
self.parent = parent
self.kwargs = kwargs
self.children = []
def command(self, **kwargs) -> Callable[[Callable], MixinCommand]:
def _decorator(func: Callable) -> MixinCommand:
child = MixinCommand(func, **kwargs)
self.children.append(child)
return child
return _decorator
def group(self, **kwargs) -> Callable[[Callable], "MixinGroup"]:
def _decorator(func: Callable) -> MixinGroup:
child = MixinGroup(func, **kwargs)
self.children.append(child)
return child
return _decorator
def setup(self, cog: Cog, parent: Optional[Command] = None) -> None:
parent = parent or self.parent or commands
if isinstance(parent, str):
parent = getattr(cog, parent)
group = parent.group(**self.kwargs)(self.function)
add_command_to_cog(group, cog)
for child in self.children:
child.setup(cog, group)
def add_command_to_cog(command: Command, cog: Cog) -> None:
command.cog = cog
cog.__cog_commands__ = (*cog.__cog_commands__, command)
setattr(cog, command.callback.__name__, command)
lookup = {cmd.qualified_name: cmd for cmd in cog.__cog_commands__}
parent = command.parent
if parent is not None:
parent = lookup[parent.qualified_name]
parent.remove_command(command.name)
parent.add_command(command)
def mixin_command(parent: Optional[str], **kwargs) -> Callable[[Callable], MixinCommand]:
def _decorator(func: Callable) -> MixinCommand:
return MixinCommand(func, parent, **kwargs)
return _decorator
def mixin_group(parent: Optional[str], **kwargs) -> Callable[[Callable], MixinGroup]:
def _decorator(func: Callable) -> MixinGroup:
return MixinGroup(func, parent, **kwargs)
return _decorator
| 3,126 | 277 | 298 |
526cd21f661404a9ec8403fe8815374f2aa99060 | 1,521 | py | Python | AlgorithmsPractice/dynamicProgramming/354_hard_Russian Doll Envelopes.py | YangXiaoo/NoteBook | 37056acad7a05b876832f72ac34d3d1a41e0dd22 | [
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 58 | 2019-03-03T04:42:23.000Z | 2022-01-13T04:36:31.000Z | AlgorithmsPractice/dynamicProgramming/354_hard_Russian Doll Envelopes.py | YangXiaoo/NoteBook | 37056acad7a05b876832f72ac34d3d1a41e0dd22 | [
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | null | null | null | AlgorithmsPractice/dynamicProgramming/354_hard_Russian Doll Envelopes.py | YangXiaoo/NoteBook | 37056acad7a05b876832f72ac34d3d1a41e0dd22 | [
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 28 | 2019-08-11T01:25:00.000Z | 2021-08-22T06:46:06.000Z | # coding:utf-8
"""
354. Russian Doll Envelopes
Hard
You have a number of envelopes with widths and heights given as a pair of integers (w, h). One envelope can fit into another if and only if both the width and height of one envelope is greater than the width and height of the other envelope.
What is the maximum number of envelopes can you Russian doll? (put one inside other)
Note:
Rotation is not allowed.
Example:
Input: [[5,4],[6,4],[6,7],[2,3]]
Output: 3
Explanation: The maximum number of envelopes you can Russian doll is 3 ([2,3] => [5,4] => [6,7]).
"""
# 2020-7-30
envelopesNums = [
[[5,4],[6,4],[6,7],[2,3]],
]
answers = [
3,
]
test = Solution()
for (en, ans) in zip(envelopesNums, answers):
ret = test.maxEnvelopes(en)
print("{}, my answer is {}, true answer is {}".format(en, ret, ans)) | 28.166667 | 241 | 0.588429 | # coding:utf-8
"""
354. Russian Doll Envelopes
Hard
You have a number of envelopes with widths and heights given as a pair of integers (w, h). One envelope can fit into another if and only if both the width and height of one envelope is greater than the width and height of the other envelope.
What is the maximum number of envelopes can you Russian doll? (put one inside other)
Note:
Rotation is not allowed.
Example:
Input: [[5,4],[6,4],[6,7],[2,3]]
Output: 3
Explanation: The maximum number of envelopes you can Russian doll is 3 ([2,3] => [5,4] => [6,7]).
"""
# 2020-7-30
class Solution(object):
def maxEnvelopes(self, envelopes):
"""
:type envelopes: List[List[int]]
:rtype: int
"""
if len(envelopes) == 0: return 0
sortedEnvelopes = sorted(envelopes)
# print(sortedEnvelopes)
dp = [0 for _ in range(len(envelopes))]
for i in range(0, len(envelopes)):
dp[i] = 1
for j in range(i):
# print(i,j)
if sortedEnvelopes[j][0] < sortedEnvelopes[i][0] and sortedEnvelopes[j][1] < sortedEnvelopes[i][1]:
dp[i] = max(dp[j] + 1, dp[i])
# print("dp: {}".format(dp))
# print(dp)
return max(dp)
envelopesNums = [
[[5,4],[6,4],[6,7],[2,3]],
]
answers = [
3,
]
test = Solution()
for (en, ans) in zip(envelopesNums, answers):
ret = test.maxEnvelopes(en)
print("{}, my answer is {}, true answer is {}".format(en, ret, ans)) | 0 | 684 | 22 |
a0d071624a641b8d3d124e0d0dd6d997b8e68270 | 516 | py | Python | airflow/dags/jobs/check_stuff.py | PythonBiellaGroup/ModernDataEngineering | 369fcb89d119ccd1d73882e492cf7c5331087d20 | [
"MIT"
] | null | null | null | airflow/dags/jobs/check_stuff.py | PythonBiellaGroup/ModernDataEngineering | 369fcb89d119ccd1d73882e492cf7c5331087d20 | [
"MIT"
] | null | null | null | airflow/dags/jobs/check_stuff.py | PythonBiellaGroup/ModernDataEngineering | 369fcb89d119ccd1d73882e492cf7c5331087d20 | [
"MIT"
] | null | null | null | from airflow.dags.common import config
from airflow.models import Variable
# Test variables, state and xcom passing values
| 30.352941 | 53 | 0.718992 | from airflow.dags.common import config
from airflow.models import Variable
# Test variables, state and xcom passing values
def check_variables(state, ti):
print(f"Airflow folder: {config.AIRFLOW_FOLDER}")
print(f"Dag folder: {config.DAGS_FOLDER}")
print(f"Data folder: {config.DATA_FOLDER}")
TESTONE = Variable.get("TESTONE")
TESTONE_NEW = Variable.set("TESTONE_NEW", state)
print(f"TESTONE: {TESTONE}")
print(f"TESTONE: {TESTONE_NEW}")
ti.xcom_push(key="TESTONE", value=TESTONE)
| 369 | 0 | 22 |
2a3fa0f4a34e1b4883b372e096011db287c9aaa2 | 2,215 | py | Python | pulumi/infra/queue_driven_lambda.py | msilvey/grapl | 142dc8068d7955e3e4d24221aa94c236745d5faa | [
"Apache-2.0"
] | null | null | null | pulumi/infra/queue_driven_lambda.py | msilvey/grapl | 142dc8068d7955e3e4d24221aa94c236745d5faa | [
"Apache-2.0"
] | null | null | null | pulumi/infra/queue_driven_lambda.py | msilvey/grapl | 142dc8068d7955e3e4d24221aa94c236745d5faa | [
"Apache-2.0"
] | null | null | null | import json
from typing import Optional
import pulumi_aws as aws
from infra.lambda_ import Lambda, LambdaArgs
from infra.metric_forwarder import MetricForwarder
from infra.network import Network
import pulumi
class QueueDrivenLambda(pulumi.ComponentResource):
""" A lambda function that is triggered by an SQS queue. """
| 31.642857 | 69 | 0.481716 | import json
from typing import Optional
import pulumi_aws as aws
from infra.lambda_ import Lambda, LambdaArgs
from infra.metric_forwarder import MetricForwarder
from infra.network import Network
import pulumi
class QueueDrivenLambda(pulumi.ComponentResource):
""" A lambda function that is triggered by an SQS queue. """
def __init__(
self,
name: str,
queue: aws.sqs.Queue,
args: LambdaArgs,
network: Network,
forwarder: Optional[MetricForwarder] = None,
opts: Optional[pulumi.ResourceOptions] = None,
) -> None:
super().__init__("grapl:QueueDrivenLambda", name, None, opts)
self.function = Lambda(
name,
args=args,
network=network,
forwarder=forwarder,
opts=pulumi.ResourceOptions(parent=self),
)
self.policy = aws.iam.RolePolicy(
f"{name}-consumes-from-queue",
role=args.execution_role.name,
policy=queue.arn.apply(
lambda arn: json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sqs:ChangeMessageVisibility",
"sqs:DeleteMessage",
"sqs:GetQueueAttributes",
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
],
"Resource": arn,
}
],
}
)
),
opts=pulumi.ResourceOptions(parent=self),
)
self.event_source_mapping = aws.lambda_.EventSourceMapping(
f"queue-triggers-{name}",
event_source_arn=queue.arn,
function_name=self.function.function.arn,
batch_size=10, # Default value for SQS queues
opts=pulumi.ResourceOptions(parent=self),
)
self.register_outputs({})
| 1,859 | 0 | 27 |
285f8ea576a28e6e2a1f1246936420ea2e1c98aa | 3,260 | py | Python | bot/guildstatseu.py | Limmek/TibiaDiscordBot | b1c8a6eb34ba4d2d9e2e2e1b1f743d5fd9f9beb8 | [
"MIT"
] | null | null | null | bot/guildstatseu.py | Limmek/TibiaDiscordBot | b1c8a6eb34ba4d2d9e2e2e1b1f743d5fd9f9beb8 | [
"MIT"
] | null | null | null | bot/guildstatseu.py | Limmek/TibiaDiscordBot | b1c8a6eb34ba4d2d9e2e2e1b1f743d5fd9f9beb8 | [
"MIT"
] | null | null | null | import asyncio
import re
import time
import datetime
import aiohttp
from bs4 import BeautifulSoup
| 45.277778 | 108 | 0.476074 | import asyncio
import re
import time
import datetime
import aiohttp
from bs4 import BeautifulSoup
class GuildStats:
URL_CHARACTER_ONLINE_TIME = "https://guildstats.eu/character?nick={0}#tab2"
URL_CHARACTER_EXPERIENCE_CHANGE = "https://guildstats.eu/character?nick={0}#tab7"
async def getTimeOnline(name):
onlineTable = None
if name is not None:
try:
async with aiohttp.ClientSession() as session:
async with session.get(GuildStats.URL_CHARACTER_ONLINE_TIME.format(name)) as resp:
content = await resp.text()
except Exception as e:
print(e)
pass
finally:
soup = BeautifulSoup(content, 'html.parser')
table_div = soup.find('div' , {'id': 'tab2'})
if table_div:
table = table_div.find("table", { "id" : "myTable", "class" : "tablesorter" })
if table:
onlineTable = {}
for row in table.findAll("tr"):
cells = row.findAll("td")
onlineTable['Last month'] = cells[0].find(text=True)
onlineTable['Current month'] = cells[1].find(text=True)
onlineTable['Current week'] = cells[2].find(text=True)
onlineTable['Mon'] = cells[3].find(text=True)
onlineTable['Tue'] = cells[4].find(text=True)
onlineTable['Wed'] = cells[5].find(text=True)
onlineTable['Thu'] = cells[6].find(text=True)
onlineTable['Fri'] = cells[7].find(text=True)
onlineTable['Sat'] = cells[8].find(text=True)
onlineTable['Sun'] = cells[9].find(text=True)
return onlineTable
async def getExperienceChange(name):
expTable = None
if name is not None:
try:
async with aiohttp.ClientSession() as session:
async with session.get(GuildStats.URL_CHARACTER_EXPERIENCE_CHANGE.format(name)) as resp:
content = await resp.text()
except:
pass
finally:
soup = BeautifulSoup(content, 'lxml')
table_div = soup.find('div' , {'id': 'tab7'})
if table_div:
table = table_div.find("table", {"class" : "shadow" })
if table:
expTable = []
for row in table.findAll("tr"):
expRow = {}
cells = row.findAll("td")
expRow['Date'] = cells[0].find(text=True)
expRow['Exp change'] = cells[1].find(text=True)
expRow['Rank'] = cells[2].find(text=True)
expRow['Lvl'] = cells[3].find(text=True)
expRow['Experience'] = cells[4].find(text=True)
expTable.append(expRow)
expTable.reverse()
return expTable
| 2,921 | 217 | 23 |
895a04ddca579bcf47014c6d25e4539f067f7d4b | 4,305 | py | Python | internshala_scraper/scrape_internshala_internships.py | tre3x/awesomeScripts | e70cd64eff7791cfac05f069fb9f7037c1bf05bf | [
"MIT"
] | 245 | 2020-09-24T03:49:20.000Z | 2021-01-31T20:09:57.000Z | internshala_scraper/scrape_internshala_internships.py | tre3x/awesomeScripts | e70cd64eff7791cfac05f069fb9f7037c1bf05bf | [
"MIT"
] | 252 | 2020-09-28T02:19:44.000Z | 2021-01-23T09:00:34.000Z | internshala_scraper/scrape_internshala_internships.py | tre3x/awesomeScripts | e70cd64eff7791cfac05f069fb9f7037c1bf05bf | [
"MIT"
] | 219 | 2020-09-23T18:51:42.000Z | 2021-01-23T09:54:40.000Z | # This script will scrape all the internships with given url
# and create a CSV sheet out of it.
# Necessary imports
import requests
from bs4 import BeautifulSoup
import pandas as pd
# A dictionary that will initially hold all scraped values
scraped_data = {
'heading': [],
'company': [],
'stipend': [],
'apply_by': [],
'logo': []
}
# First, find out total computer science internships available
url = "https://internshala.com/internships/computer%20science-internship"
response = requests.get(url)
data = response.text
soup = BeautifulSoup(data, 'html.parser')
count_of_internships = int(soup.find("div",
class_="heading_4_6").text.split()[0])
num_of_pages = int((count_of_internships / 40) + 1)
# A loop that will go to each page and will scrape the data
for i in range(1, num_of_pages + 1):
# ------------------- Scraping starts here -------------------------------
response = requests.get(f"{url}/page-{0}".format(i))
print(response.status_code) # Check out response whether its 200 or not
# ........ if response is not 200, exit the script ..........
if response.status_code != 200:
print("Task cannot be completed at the moment!!!")
exit()
data = response.text
soup = BeautifulSoup(data, 'html.parser')
# ------------------- Search for heading of the Internship -------------------
heading_data = soup.find_all("div", class_="heading_4_5 profile")
for heading in heading_data:
scraped_data['heading'].append(heading.text.lstrip('\n'))
# ------------------- Search for company of the Internship -------------------
company_data = soup.find_all("div", class_="company_name")
for name in company_data:
# Cleaning of data before saving it
name = name.text
name = name.lstrip('\n')
name = name.lstrip(' ')
name = name.rstrip('\n')
name = name.rstrip(' ')
scraped_data['company'].append(name)
# # ------------------- Search for location of the Internship -----------------
# location_data = soup.find_all("a", class_="location_link")
# for loc in location_data:
# # Cleaning of data before saving it
# loc = loc.text
# loc = loc.lstrip('\n')
# loc = loc.lstrip(' ')
# loc = loc.rstrip('\n')
# loc = loc.rstrip(' ')
# if loc != 'Work From Home':
# scraped_data['location'].append(loc)
# # ------------------- Search for start date of the Internship ---------------
# start_date_data = soup.find_all("span", class_="start_immediately_desktop")
# for date in start_date_data:
# date = date.text
# date = date.lstrip('\n')
# date = date.lstrip(' ')
# date = date.rstrip('\n')
# date = date.rstrip(' ')
# scraped_data['start_date'].append(date)
# ------------------- Search for stipend of the Internship -------------------
stipend_data = soup.find_all("span", class_="stipend")
for stipend in stipend_data:
stipend = stipend.text
stipend = stipend.lstrip('\n')
stipend = stipend.lstrip(' ')
stipend = stipend.rstrip('\n')
stipend = stipend.rstrip(' ')
scraped_data['stipend'].append(stipend)
# ------------------- Search for apply by date of the Internship -------------
apply_by_data = soup.find_all("div", class_="apply_by")
for apply_date in apply_by_data:
apply_date = apply_date.find("div", class_="item_body").text
apply_date = apply_date.lstrip('\n')
apply_date = apply_date.lstrip(' ')
apply_date = apply_date.rstrip('\n')
apply_date = apply_date.rstrip(' ')
scraped_data['apply_by'].append(apply_date)
# ------------------- Search for logo of the company of the Internship -------
logo_data = soup.find_all("div", class_="internship_logo")
for logo in logo_data:
logo = logo.find("img")
if logo is not None:
logo = logo.get('src')
scraped_data['logo'].append(logo)
# Now convert the obtained dictionary to a CSV file via pandas module
df = pd.DataFrame(scraped_data)
df.to_csv('internships.csv', index=False)
| 39.136364 | 80 | 0.575145 | # This script will scrape all the internships with given url
# and create a CSV sheet out of it.
# Necessary imports
import requests
from bs4 import BeautifulSoup
import pandas as pd
# A dictionary that will initially hold all scraped values
scraped_data = {
'heading': [],
'company': [],
'stipend': [],
'apply_by': [],
'logo': []
}
# First, find out total computer science internships available
url = "https://internshala.com/internships/computer%20science-internship"
response = requests.get(url)
data = response.text
soup = BeautifulSoup(data, 'html.parser')
count_of_internships = int(soup.find("div",
class_="heading_4_6").text.split()[0])
num_of_pages = int((count_of_internships / 40) + 1)
# A loop that will go to each page and will scrape the data
for i in range(1, num_of_pages + 1):
# ------------------- Scraping starts here -------------------------------
response = requests.get(f"{url}/page-{0}".format(i))
print(response.status_code) # Check out response whether its 200 or not
# ........ if response is not 200, exit the script ..........
if response.status_code != 200:
print("Task cannot be completed at the moment!!!")
exit()
data = response.text
soup = BeautifulSoup(data, 'html.parser')
# ------------------- Search for heading of the Internship -------------------
heading_data = soup.find_all("div", class_="heading_4_5 profile")
for heading in heading_data:
scraped_data['heading'].append(heading.text.lstrip('\n'))
# ------------------- Search for company of the Internship -------------------
company_data = soup.find_all("div", class_="company_name")
for name in company_data:
# Cleaning of data before saving it
name = name.text
name = name.lstrip('\n')
name = name.lstrip(' ')
name = name.rstrip('\n')
name = name.rstrip(' ')
scraped_data['company'].append(name)
# # ------------------- Search for location of the Internship -----------------
# location_data = soup.find_all("a", class_="location_link")
# for loc in location_data:
# # Cleaning of data before saving it
# loc = loc.text
# loc = loc.lstrip('\n')
# loc = loc.lstrip(' ')
# loc = loc.rstrip('\n')
# loc = loc.rstrip(' ')
# if loc != 'Work From Home':
# scraped_data['location'].append(loc)
# # ------------------- Search for start date of the Internship ---------------
# start_date_data = soup.find_all("span", class_="start_immediately_desktop")
# for date in start_date_data:
# date = date.text
# date = date.lstrip('\n')
# date = date.lstrip(' ')
# date = date.rstrip('\n')
# date = date.rstrip(' ')
# scraped_data['start_date'].append(date)
# ------------------- Search for stipend of the Internship -------------------
stipend_data = soup.find_all("span", class_="stipend")
for stipend in stipend_data:
stipend = stipend.text
stipend = stipend.lstrip('\n')
stipend = stipend.lstrip(' ')
stipend = stipend.rstrip('\n')
stipend = stipend.rstrip(' ')
scraped_data['stipend'].append(stipend)
# ------------------- Search for apply by date of the Internship -------------
apply_by_data = soup.find_all("div", class_="apply_by")
for apply_date in apply_by_data:
apply_date = apply_date.find("div", class_="item_body").text
apply_date = apply_date.lstrip('\n')
apply_date = apply_date.lstrip(' ')
apply_date = apply_date.rstrip('\n')
apply_date = apply_date.rstrip(' ')
scraped_data['apply_by'].append(apply_date)
# ------------------- Search for logo of the company of the Internship -------
logo_data = soup.find_all("div", class_="internship_logo")
for logo in logo_data:
logo = logo.find("img")
if logo is not None:
logo = logo.get('src')
scraped_data['logo'].append(logo)
# Now convert the obtained dictionary to a CSV file via pandas module
df = pd.DataFrame(scraped_data)
df.to_csv('internships.csv', index=False)
| 0 | 0 | 0 |
023bd6855609a18ee78ad27b8d91ed72405263aa | 2,104 | py | Python | examples/EarthMoon_example.py | c-bruce/pysamss | b37206a39391e370da7ab367087ba3c2b29b1d9f | [
"MIT"
] | 1 | 2020-05-02T18:47:50.000Z | 2020-05-02T18:47:50.000Z | examples/EarthMoon_example.py | c-bruce/pysamss | b37206a39391e370da7ab367087ba3c2b29b1d9f | [
"MIT"
] | null | null | null | examples/EarthMoon_example.py | c-bruce/pysamss | b37206a39391e370da7ab367087ba3c2b29b1d9f | [
"MIT"
] | null | null | null | # Date: 24/06/2019
# Author: Callum Bruce
# Earth, Moon example.
# To run, ensure 'de430.bsp' is downloaded to the working directory. See https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/.
import numpy as np
from mayavi import mlab
import datetime
from jplephem.spk import SPK
import pysamss
# Step 1: Setup system
system = pysamss.System('EarthMoon')
system.current.setDatetime(datetime.datetime.utcnow()) # Set current time utc
# Step 1.1: Add Earth and Moon to system
system.current.addCelestialBody(pysamss.CelestialBody('Earth', 5.972e24, 6.371e6))
system.current.addCelestialBody(pysamss.CelestialBody('Moon', 7.348e22, 1.737e6, parent_name='Earth'))
# Step 2: Calculate positions and velocities
time = system.current.getJulianDate()
kernel = SPK.open('de430.bsp')
# Earth
earth_pos, earth_vel = kernel[3,399].compute_and_differentiate(time)
earth_pos *= 1000 # Convert from km -> m
earth_vel /= 86.4 # Convert from km/day -> m/s
# Moon
moon_pos, moon_vel = kernel[3,301].compute_and_differentiate(time)
moon_pos *= 1000 # Convert from km -> m
moon_vel /= 86.4 # Convert from km/day -> m/s
# Step 3: Set positions and velocities
system.current.celestial_bodies['Earth'].setPosition(earth_pos)
system.current.celestial_bodies['Earth'].setVelocity(earth_vel)
system.current.celestial_bodies['Earth'].setAttitudeDot(np.array([0.0, 0.0, np.deg2rad(360 / ((23 * 60 * 60) + (56 * 60) + 4))]))
system.current.celestial_bodies['Earth'].setTexture(pysamss.__file__[:-12] + '/resources/earth.jpg')
system.current.celestial_bodies['Moon'].setPosition(moon_pos)
system.current.celestial_bodies['Moon'].setVelocity(moon_vel)
system.current.celestial_bodies['Moon'].setAttitudeDot(np.array([0.0, 0.0, np.deg2rad(360 / 2358720.0)]))
system.current.celestial_bodies['Moon'].setTexture(pysamss.__file__[:-12] + '/resources/moon.jpg')
# Step 4: Simulate system
system.setDt(60.0)
system.setEndTime(2358720.0)
system.setSaveInterval(100)
system.simulateSystem()
# Step 5: Post processing
system.load('EarthMoon.psm')
fig = pysamss.MainWidget()
fig.loadSystem(system)
fig.showMaximized()
mlab.show() | 41.254902 | 137 | 0.761882 | # Date: 24/06/2019
# Author: Callum Bruce
# Earth, Moon example.
# To run, ensure 'de430.bsp' is downloaded to the working directory. See https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/.
import numpy as np
from mayavi import mlab
import datetime
from jplephem.spk import SPK
import pysamss
# Step 1: Setup system
system = pysamss.System('EarthMoon')
system.current.setDatetime(datetime.datetime.utcnow()) # Set current time utc
# Step 1.1: Add Earth and Moon to system
system.current.addCelestialBody(pysamss.CelestialBody('Earth', 5.972e24, 6.371e6))
system.current.addCelestialBody(pysamss.CelestialBody('Moon', 7.348e22, 1.737e6, parent_name='Earth'))
# Step 2: Calculate positions and velocities
time = system.current.getJulianDate()
kernel = SPK.open('de430.bsp')
# Earth
earth_pos, earth_vel = kernel[3,399].compute_and_differentiate(time)
earth_pos *= 1000 # Convert from km -> m
earth_vel /= 86.4 # Convert from km/day -> m/s
# Moon
moon_pos, moon_vel = kernel[3,301].compute_and_differentiate(time)
moon_pos *= 1000 # Convert from km -> m
moon_vel /= 86.4 # Convert from km/day -> m/s
# Step 3: Set positions and velocities
system.current.celestial_bodies['Earth'].setPosition(earth_pos)
system.current.celestial_bodies['Earth'].setVelocity(earth_vel)
system.current.celestial_bodies['Earth'].setAttitudeDot(np.array([0.0, 0.0, np.deg2rad(360 / ((23 * 60 * 60) + (56 * 60) + 4))]))
system.current.celestial_bodies['Earth'].setTexture(pysamss.__file__[:-12] + '/resources/earth.jpg')
system.current.celestial_bodies['Moon'].setPosition(moon_pos)
system.current.celestial_bodies['Moon'].setVelocity(moon_vel)
system.current.celestial_bodies['Moon'].setAttitudeDot(np.array([0.0, 0.0, np.deg2rad(360 / 2358720.0)]))
system.current.celestial_bodies['Moon'].setTexture(pysamss.__file__[:-12] + '/resources/moon.jpg')
# Step 4: Simulate system
system.setDt(60.0)
system.setEndTime(2358720.0)
system.setSaveInterval(100)
system.simulateSystem()
# Step 5: Post processing
system.load('EarthMoon.psm')
fig = pysamss.MainWidget()
fig.loadSystem(system)
fig.showMaximized()
mlab.show() | 0 | 0 | 0 |
b1c2566c53846ce73cab8ad0f9f8c89def74b926 | 1,331 | py | Python | tests/vasya_clerk_test.py | ibotdotout/coe-pairing | 1a390ca3d07801c98a53de92722981c9697cd68c | [
"MIT"
] | null | null | null | tests/vasya_clerk_test.py | ibotdotout/coe-pairing | 1a390ca3d07801c98a53de92722981c9697cd68c | [
"MIT"
] | null | null | null | tests/vasya_clerk_test.py | ibotdotout/coe-pairing | 1a390ca3d07801c98a53de92722981c9697cd68c | [
"MIT"
] | null | null | null | # http://www.codewars.com/kata/vasya-clerk/python
import unittest
import vasya_clerk
| 25.596154 | 52 | 0.640872 | # http://www.codewars.com/kata/vasya-clerk/python
import unittest
import vasya_clerk
class VasyaClerkTest(unittest.TestCase):
def _help_assert(self, l, ans):
result = vasya_clerk.solve(l)
self.assertEqual(result, ans)
def test_give_25_should_be_true(self):
l = [25]
self._help_assert(l, True)
def test_give_50_should_be_false(self):
l = [50]
self._help_assert(l, False)
def test_give_100_should_be_false(self):
l = [100]
self._help_assert(l, False)
def test_give_25_100_should_be_false(self):
l = [25, 100]
self._help_assert(l, False)
def test_give_25_50_50_should_be_false(self):
l = [25, 50, 50]
self._help_assert(l, False)
def test_give_25_50_100_should_be_false(self):
l = [25, 50, 100]
self._help_assert(l, False)
def test_give_25_25_50_should_be_ture(self):
l = [25, 25, 50]
self._help_assert(l, True)
def test_give_25_25_25_100_should_be_ture(self):
l = [25, 25, 25, 100]
self._help_assert(l, True)
def test_give_25_100_50_should_be_false(self):
l = [25, 100, 50]
self._help_assert(l, False)
def test_give_25_50_25_100_should_be_true(self):
l = [25, 50, 25, 100]
self._help_assert(l, True)
| 905 | 19 | 320 |
34d48f4d94de429ca01e535a91927728d1c23f24 | 6,251 | py | Python | kbsbot/compose_engine/services.py | astandre/cb-compose-engine-ms | ed4141f57dcb544743fd17fe62001d573ae1efc9 | [
"MIT"
] | null | null | null | kbsbot/compose_engine/services.py | astandre/cb-compose-engine-ms | ed4141f57dcb544743fd17fe62001d573ae1efc9 | [
"MIT"
] | null | null | null | kbsbot/compose_engine/services.py | astandre/cb-compose-engine-ms | ed4141f57dcb544743fd17fe62001d573ae1efc9 | [
"MIT"
] | null | null | null | from requests import Session
import requests
import os
# NLP_ENGINE_URL = "http://127.0.0.1:5001"
# INTENTS_MANAGMENT_URL = "http://127.0.0.1:5002"
# CONTEXT_MANAGMENT_URL = "http://127.0.0.1:5003"
NLP_ENGINE_URL = os.environ.get('NLP_ENGINE_URL')
INTENTS_MANAGMENT_URL = os.environ.get('INTENTS_MANAGMENT_URL')
CONTEXT_MANAGMENT_URL = os.environ.get('CONTEXT_MANAGMENT_URL')
session = Session()
session.trust_env = False
session.verify = False
session.headers["Accept"] = "application/json"
session.headers["Content-Type"] = "application/json"
def discover_intent(agent, text):
"""
This method connects to the microservice *NLP Engine* in order to retrieve the intents from a raw text.
:param agent: The agent name of the chatbot
:param text: The raw input to discover the intent
:return: The uri of a intent
"""
url = NLP_ENGINE_URL + "/intents"
try:
r = session.post(url, json={"agent": agent, "sentence": text})
if r.status_code == 200:
response = r.json()
if "intent" in response and len(response["intent"]) > 0:
return response["intent"][0]["prediction"]
else:
return None
except requests.exceptions.RequestException as e:
print(e)
return None
def discover_entities(agent, text):
"""
This method connects to the microservice *NLP Engine* in order to retrieve the entities from a raw text.
:param agent: The agent name of the chatbot
:param text: The raw input to discover the intent
:return: A list with the URIS of the entities and type of entities.
"""
url = NLP_ENGINE_URL + "/entities"
try:
r = session.post(url, json={"agent": agent, "sentence": text})
if r.status_code == 200:
response = r.json()
entities = []
if len(response["entities"]) > 0:
for entity in response["entities"]:
entities.append({
"type": entity["entity"],
"value": entity["prediction"],
})
return entities
except requests.exceptions.RequestException as e:
print(e)
return []
def get_requirements(intent):
"""
This service connects to the microservice *Intents Managment* in order to retrieve requirements of an intent
:param intent: The intent from where requirements will be retrieved
:return: A list of URIS of the different entities needed to complete an intent
"""
url = INTENTS_MANAGMENT_URL + "/intent/requires"
try:
r = session.get(url, json={"intent": intent})
if r.status_code == 200:
response = r.json()
if "requires" in response:
return response["requires"]
else:
return None
except requests.exceptions.RequestException as e:
print(e)
return None
def get_options(entity):
"""
This service connects to the microservice *Intents Management* in order to retrieve options of an entity type
:param entity: The entity from where options will be retrieved
:return: A list of options to complete an entity
.. todo:: give a list of entities
"""
url = INTENTS_MANAGMENT_URL + "/entity/options"
try:
# print({"entity": entity})
r = session.get(url, json={"entity": entity})
if r.status_code == 200:
response = r.json()
return response
except requests.exceptions.RequestException as e:
print(e)
return None
def find_in_context(user, entities):
"""
This method looks for information in the conversation thread.
:param user: The id of the user to find information
:param entities: The entities to be found in context.
"""
url = CONTEXT_MANAGMENT_URL + "/context/entities"
try:
r = session.get(url, json={"user": user, "entities": entities})
if r.status_code == 200:
response = r.json()
if "entities" in response:
entities = response["entities"]
return entities
else:
return []
except requests.exceptions.RequestException as e:
print(e)
return []
def get_answer(intent, entities):
"""
This service connects to the microservice *Intents Management* in order to retrieve the answer of an intent.
:param intent: The intent from where answer will be retrieved
:param entities: A list of entities used to retrieve answer
:return: A list of options to complete an entity
"""
url = INTENTS_MANAGMENT_URL + "/intent/answer"
try:
r = session.get(url, json={"intent": intent, "entities": entities})
if r.status_code == 200:
response = r.json()
return response
except requests.exceptions.RequestException as e:
print(e)
def get_agent_data(agent):
"""
This service connects to the microservice *Intents Management* in order to information about and agent
:param agent: A valid agent name
:return: A dict containing agent, a description and the different intents.
"""
url = INTENTS_MANAGMENT_URL + "/agent/info"
try:
r = session.get(url, json={"agent": agent})
if r.status_code == 200:
response = r.json()
return response
except requests.exceptions.RequestException as e:
print(e)
return None
def get_intent_rq(intent, entity):
"""
This service connects to the microservice *Intents Management* in order to get the resolution question of an intent
:param intent: A valid intent
:param entity: A valid entity
:return: A dict containing intent, a entity and the resolution question.
"""
url = INTENTS_MANAGMENT_URL + "/intent/rq"
try:
r = session.get(url, json={"intent": intent, "entity": entity})
if r.status_code == 200:
response = r.json()
return response
except requests.exceptions.RequestException as e:
print(e)
return None
| 31.099502 | 123 | 0.614782 | from requests import Session
import requests
import os
# NLP_ENGINE_URL = "http://127.0.0.1:5001"
# INTENTS_MANAGMENT_URL = "http://127.0.0.1:5002"
# CONTEXT_MANAGMENT_URL = "http://127.0.0.1:5003"
NLP_ENGINE_URL = os.environ.get('NLP_ENGINE_URL')
INTENTS_MANAGMENT_URL = os.environ.get('INTENTS_MANAGMENT_URL')
CONTEXT_MANAGMENT_URL = os.environ.get('CONTEXT_MANAGMENT_URL')
session = Session()
session.trust_env = False
session.verify = False
session.headers["Accept"] = "application/json"
session.headers["Content-Type"] = "application/json"
def discover_intent(agent, text):
"""
This method connects to the microservice *NLP Engine* in order to retrieve the intents from a raw text.
:param agent: The agent name of the chatbot
:param text: The raw input to discover the intent
:return: The uri of a intent
"""
url = NLP_ENGINE_URL + "/intents"
try:
r = session.post(url, json={"agent": agent, "sentence": text})
if r.status_code == 200:
response = r.json()
if "intent" in response and len(response["intent"]) > 0:
return response["intent"][0]["prediction"]
else:
return None
except requests.exceptions.RequestException as e:
print(e)
return None
def discover_entities(agent, text):
"""
This method connects to the microservice *NLP Engine* in order to retrieve the entities from a raw text.
:param agent: The agent name of the chatbot
:param text: The raw input to discover the intent
:return: A list with the URIS of the entities and type of entities.
"""
url = NLP_ENGINE_URL + "/entities"
try:
r = session.post(url, json={"agent": agent, "sentence": text})
if r.status_code == 200:
response = r.json()
entities = []
if len(response["entities"]) > 0:
for entity in response["entities"]:
entities.append({
"type": entity["entity"],
"value": entity["prediction"],
})
return entities
except requests.exceptions.RequestException as e:
print(e)
return []
def get_requirements(intent):
"""
This service connects to the microservice *Intents Managment* in order to retrieve requirements of an intent
:param intent: The intent from where requirements will be retrieved
:return: A list of URIS of the different entities needed to complete an intent
"""
url = INTENTS_MANAGMENT_URL + "/intent/requires"
try:
r = session.get(url, json={"intent": intent})
if r.status_code == 200:
response = r.json()
if "requires" in response:
return response["requires"]
else:
return None
except requests.exceptions.RequestException as e:
print(e)
return None
def get_options(entity):
"""
This service connects to the microservice *Intents Management* in order to retrieve options of an entity type
:param entity: The entity from where options will be retrieved
:return: A list of options to complete an entity
.. todo:: give a list of entities
"""
url = INTENTS_MANAGMENT_URL + "/entity/options"
try:
# print({"entity": entity})
r = session.get(url, json={"entity": entity})
if r.status_code == 200:
response = r.json()
return response
except requests.exceptions.RequestException as e:
print(e)
return None
def find_in_context(user, entities):
"""
This method looks for information in the conversation thread.
:param user: The id of the user to find information
:param entities: The entities to be found in context.
"""
url = CONTEXT_MANAGMENT_URL + "/context/entities"
try:
r = session.get(url, json={"user": user, "entities": entities})
if r.status_code == 200:
response = r.json()
if "entities" in response:
entities = response["entities"]
return entities
else:
return []
except requests.exceptions.RequestException as e:
print(e)
return []
def get_answer(intent, entities):
"""
This service connects to the microservice *Intents Management* in order to retrieve the answer of an intent.
:param intent: The intent from where answer will be retrieved
:param entities: A list of entities used to retrieve answer
:return: A list of options to complete an entity
"""
url = INTENTS_MANAGMENT_URL + "/intent/answer"
try:
r = session.get(url, json={"intent": intent, "entities": entities})
if r.status_code == 200:
response = r.json()
return response
except requests.exceptions.RequestException as e:
print(e)
def get_agent_data(agent):
"""
This service connects to the microservice *Intents Management* in order to information about and agent
:param agent: A valid agent name
:return: A dict containing agent, a description and the different intents.
"""
url = INTENTS_MANAGMENT_URL + "/agent/info"
try:
r = session.get(url, json={"agent": agent})
if r.status_code == 200:
response = r.json()
return response
except requests.exceptions.RequestException as e:
print(e)
return None
def get_intent_rq(intent, entity):
"""
This service connects to the microservice *Intents Management* in order to get the resolution question of an intent
:param intent: A valid intent
:param entity: A valid entity
:return: A dict containing intent, a entity and the resolution question.
"""
url = INTENTS_MANAGMENT_URL + "/intent/rq"
try:
r = session.get(url, json={"intent": intent, "entity": entity})
if r.status_code == 200:
response = r.json()
return response
except requests.exceptions.RequestException as e:
print(e)
return None
| 0 | 0 | 0 |
5cb5f8aa1715886be25dc44d316e33365e638c31 | 3,350 | py | Python | 2019/day12.py | cy2000yadav/AdventOfCode | 41bc7df9e2c396883fed78369d9aac961bffb7a2 | [
"MIT"
] | 2 | 2019-12-03T20:23:09.000Z | 2020-08-28T10:46:18.000Z | 2019/day12.py | cy2000yadav/AdventOfCode | 41bc7df9e2c396883fed78369d9aac961bffb7a2 | [
"MIT"
] | null | null | null | 2019/day12.py | cy2000yadav/AdventOfCode | 41bc7df9e2c396883fed78369d9aac961bffb7a2 | [
"MIT"
] | 1 | 2021-10-30T07:27:54.000Z | 2021-10-30T07:27:54.000Z | import re
if __name__ == '__main__':
main() | 24.100719 | 83 | 0.453134 | import re
class moon:
def __init__(self, x,y,z):
self.x = x
self.y = y
self.z = z
self.v_x = 0
self.v_y = 0
self.v_z = 0
def printMoon(moon, vel = True):
p = 'pos=<x='+ str(moon.x) +', y='+ str(moon.y) +', z='+ str(moon.z) +'>, '
v = 'vel=<x='+ str(moon.v_x) +', y='+ str(moon.v_y) +', z='+ str(moon.v_z) +'>'
out = p+v if vel else p
print(out)
def calcGravity(moons):
for i in range(len(moons)):
current = moons[i]
for j in range(len(moons)):
moon = moons[j]
if current.x < moon.x:
current.v_x += 1
elif current.x > moon.x:
current.v_x -= 1
if current.y < moon.y:
current.v_y += 1
elif current.y > moon.y:
current.v_y -= 1
if current.z < moon.z:
current.v_z += 1
elif current.z > moon.z:
current.v_z -= 1
def moveMoons(moons):
for moon in moons:
moon.x += moon.v_x
moon.y += moon.v_y
moon.z += moon.v_z
def calcEnergy(moons, steps):
#print('\nAfter', 0, 'steps:')
#for moon in moons:
# printMoon(moon)
for i in range(steps):
calcGravity(moons)
moveMoons(moons)
#print('\nAfter', (i+1), 'steps:')
#for moon in moons:
# printMoon(moon)
energy = 0
for moon in moons:
posEnergy = abs(moon.x) + abs(moon.y) + abs(moon.z)
vecEnergy = abs(moon.v_x) + abs(moon.v_y) + abs(moon.v_z)
energy += posEnergy * vecEnergy
return energy
def timeForLoop(moons):
history = []
time = 0
checkstring = ""
for moon in moons:
checkstring += str(moon.x) + " " + str(moon.y) + " "+ str(moon.z) + " "
history.append(checkstring)
#print('\nstart')
#for moon in moons:
# printMoon(moon, False)
while 1:
time += 1
calcGravity(moons)
moveMoons(moons)
checkstring = ""
for moon in moons:
checkstring += str(moon.x) + " " + str(moon.y) + " "+ str(moon.z) + " "
if checkstring in history:
print('\nend')
for moon in moons:
printMoon(moon, False)
print(history)
return time
else:
history.append(checkstring)
# check for test so it dont loop
if time > 2772:
exit('not working')
def getMoons(filename):
text = open(filename).read()
numbers = re.findall(r'-?\d+', text)
num = list( map(int, numbers))
moons = []
for i in range(0,len(num),3):
moons.append( moon(num[i], num[i+1], num[i+2]))
return moons
def main():
filename = '2019/input/day12test.txt'
steps = 10
moons = getMoons(filename)
energy = calcEnergy(moons.copy(), steps)
print('Part 1', energy)
time = timeForLoop(moons)
print('Part 2', time)
if __name__ == '__main__':
main() | 3,033 | -10 | 262 |
6044864d1133c9dd0d3f7f6ba4e574fd2ebbb87e | 4,721 | py | Python | lib/bx/intervals/operations/join.py | lldelisle/bx-python | 19ab41e0905221e3fcaaed4b74faf2d7cda0d15a | [
"MIT"
] | 122 | 2015-07-01T12:00:22.000Z | 2022-03-02T09:27:35.000Z | lib/bx/intervals/operations/join.py | lldelisle/bx-python | 19ab41e0905221e3fcaaed4b74faf2d7cda0d15a | [
"MIT"
] | 64 | 2015-11-06T21:03:18.000Z | 2022-03-24T00:55:27.000Z | lib/bx/intervals/operations/join.py | lldelisle/bx-python | 19ab41e0905221e3fcaaed4b74faf2d7cda0d15a | [
"MIT"
] | 60 | 2015-10-05T19:19:36.000Z | 2021-11-19T20:53:54.000Z | """
Join two sets of intervals using their overlap as the key. The
intervals MUST be sorted by chrom(lexicographically),
start(arithmetically) and end(arithmetically). This works by simply
walking through the inputs in O(n) time.
"""
import math
from bx.intervals.io import GenomicInterval
from .quicksect import IntervalTree
| 34.459854 | 131 | 0.577632 | """
Join two sets of intervals using their overlap as the key. The
intervals MUST be sorted by chrom(lexicographically),
start(arithmetically) and end(arithmetically). This works by simply
walking through the inputs in O(n) time.
"""
import math
from bx.intervals.io import GenomicInterval
from .quicksect import IntervalTree
def join(leftSet, rightSet, mincols=1, leftfill=True, rightfill=True):
# Read rightSet into memory:
rightlen = 0
leftlen = 0
rightTree = IntervalTree()
for item in rightSet:
if isinstance(item, GenomicInterval):
rightTree.insert(item, rightSet.linenum, item.fields)
if rightlen == 0:
rightlen = item.nfields
for interval in leftSet:
if leftlen == 0 and isinstance(interval, GenomicInterval):
leftlen = interval.nfields
if not isinstance(interval, GenomicInterval):
yield interval
else:
result = []
rightTree.intersect(interval, lambda node: result.append(node))
overlap_not_met = 0
for item in result:
if item.start in range(interval.start, interval.end+1) and item.end not in range(interval.start, interval.end+1):
overlap = interval.end-item.start
elif item.end in range(interval.start, interval.end+1) and item.start not in range(interval.start, interval.end+1):
overlap = item.end-interval.start
elif item.start in range(interval.start, interval.end+1) and item.end in range(interval.start, interval.end+1):
overlap = item.end-item.start
else: # the intersecting item's start and end are outside the interval range
overlap = interval.end-interval.start
if overlap < mincols:
overlap_not_met += 1
continue
outfields = list(interval)
outfields.extend(item.other)
setattr(item, "visited", True)
yield outfields
if (len(result) == 0 or overlap_not_met == len(result)) and rightfill:
outfields = list(interval)
for x in range(rightlen):
outfields.append(".")
yield outfields
if leftfill:
def report_unvisited(node, results):
if not hasattr(node, "visited"):
results.append(node)
results = []
rightTree.traverse(lambda x: report_unvisited(x, results))
for item in results:
outfields = list()
for x in range(leftlen):
outfields.append(".")
outfields.extend(item.other)
yield outfields
def interval_cmp(a, b):
interval1 = a[0]
interval2 = b[0]
if not (isinstance(interval1, GenomicInterval) and isinstance(interval2, GenomicInterval)):
return 0
# Both are intervals
if interval1.chrom == interval2.chrom:
center1 = interval1.start + ((interval1.end - interval1.start) / 2)
center2 = interval2.start + ((interval2.end - interval2.start) / 2)
return center1 - center2
else:
if interval1.chrom > interval2.chrom:
return 1
else:
return -1
return 0
def findintersect(interval, sortedlist, mincols):
# find range of intervals that intersect via a binary search
# find lower bound
x = len(sortedlist) / 2
n = int(math.pow(2, math.ceil(math.log(len(sortedlist), 2))))
not_found = True
not_done = True
while not_found and not_done:
n = n / 2
if n == 0:
n = 1
not_done = False
if x >= len(sortedlist):
x -= n
elif x < 0:
x += n
else:
if findoverlap(sortedlist[x][0], interval) >= mincols:
not_found = False
else:
comp = interval_cmp(sortedlist[x], [interval, 0])
if comp > 0:
x -= n
else:
x += n
print("\t".join(sortedlist[x][0].fields))
print("not_found = " + str(not_found))
if not_found:
return 0, -1
lowerbound = x
upperbound = x
while (lowerbound > -1) and (findoverlap(sortedlist[lowerbound-1][0], interval) >= mincols):
lowerbound -= 1
while (upperbound+1 < len(sortedlist)) and (findoverlap(sortedlist[upperbound+1][0], interval) >= mincols):
upperbound += 1
return lowerbound, upperbound
def findoverlap(a, b):
# overlapping
if a.chrom == b.chrom:
return min(a.end, b.end) - max(a.start, b.start)
else:
return 0
| 4,295 | 0 | 92 |
1ac6b2322d59bfa2eb8a3dbb827a17195caeabba | 17,176 | py | Python | src/agents.py | NeiH2304/ProCon_ver_4 | a51604bc8b1510971d981a1d0f06b9d3ff8494aa | [
"MIT"
] | null | null | null | src/agents.py | NeiH2304/ProCon_ver_4 | a51604bc8b1510971d981a1d0f06b9d3ff8494aa | [
"MIT"
] | null | null | null | src/agents.py | NeiH2304/ProCon_ver_4 | a51604bc8b1510971d981a1d0f06b9d3ff8494aa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 09:45:45 2020
@author: hien
"""
import numpy as np
import torch
from src.deep_q_network import Critic, Actor
from src.replay_memory import ReplayBuffer
from random import random, randint, choices, uniform
from src import utils
from src.utils import flatten
from torch.optim import Adam, SGD
from torch.autograd import Variable
import torch.nn.functional as F
from sklearn.utils import shuffle
from copy import deepcopy as copy
| 39.667436 | 122 | 0.566546 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 09:45:45 2020
@author: hien
"""
import numpy as np
import torch
from src.deep_q_network import Critic, Actor
from src.replay_memory import ReplayBuffer
from random import random, randint, choices, uniform
from src import utils
from src.utils import flatten
from torch.optim import Adam, SGD
from torch.autograd import Variable
import torch.nn.functional as F
from sklearn.utils import shuffle
from copy import deepcopy as copy
class Agent():
def __init__(self, gamma, lr_a, lr_c, state_dim_actor, state_dim_critic, num_agents, num_agent_lim, action_dim,
mem_size, batch_size, agent_name, chkpoint, chkpt_dir, env = None):
self.MAP_SIZE = 5
self.state_dim_actor = state_dim_actor
self.state_dim_critic = state_dim_critic
self.action_dim = action_dim
self.action_lim = action_dim
self.iter = 0
self.lr_a = lr_a
self.lr_c = lr_c
self.tau = 0.01
self.steps_done = 0
self.nrand_action = 0
self.gamma = gamma
self.num_agent_lim = num_agent_lim
self.max_n_agents = self.num_agent_lim
self.learn_step_counter = 0
self.batch_size = batch_size
self.chkpt_dir = chkpt_dir
self.env = env
self.critic_loss_value = 0
self.actor_loss_value = 0
self.chkpoint = chkpoint
self.num_agents = num_agents
self.agent_name = agent_name
self.use_cuda = torch.cuda.is_available()
self.noise = utils.OrnsteinUhlenbeckActionNoise(self.action_dim)
''' Setup CUDA Environment'''
self.device = 'cuda' if self.use_cuda else 'cpu'
if chkpoint:
self.load_models()
else:
self.actor = Actor(self.state_dim_actor, self.action_dim, self.action_dim)
self.critic = Critic(self.state_dim_critic, self.action_dim)
self.target_actor = copy(self.actor)
self.target_critic = copy(self.critic)
if self.use_cuda:
self.actor.to(self.device)
self.target_actor.to(self.device)
self.critic.to(self.device)
self.target_critic.to(self.device)
utils.hard_update(self.target_actor, self.actor)
utils.hard_update(self.target_critic, self.critic)
self.actor_optimizer = Adam(self.actor.parameters(), self.lr_a)
self.critic_optimizer = Adam(self.critic.parameters(), self.lr_c)
self.memories = ReplayBuffer(mem_size)
def set_environment(self, env):
self.env = env
self.num_agents = env.num_agents
def get_exploitation_action(self, state):
"""
gets the action from target actor added with exploration noise
:param state: state (Numpy array)
:return: sampled action (Numpy array)
"""
state = Variable(torch.from_numpy(state).to(self.device))
action = self.target_actor.forward(state).detach()
return action.to('cpu').data.numpy()
def get_exploration_action(self, state):
"""
gets the action from actor added with exploration noise
:param state: state (Numpy array)
:return: sampled action (Numpy array)
"""
state = Variable(torch.from_numpy(state).to(self.device))
action = self.actor.forward(state).detach()
return action.to('cpu').data.numpy()
def optimize(self):
"""
Samples a random batch from replay memory and performs optimization
:return:
"""
if self.memories.len < self.batch_size:
return
s, a, r, ns = self.memories.sample(self.batch_size)
s = Variable(torch.from_numpy(s).to(self.device), requires_grad=True)
a = Variable(torch.from_numpy(a).to(self.device), requires_grad=True)
r = Variable(torch.from_numpy(r).to(self.device), requires_grad=True)
ns = Variable(torch.from_numpy(ns).to(self.device), requires_grad=True)
''' ---------------------- optimize ----------------------
Use target actor exploitation policy here for loss evaluation
y_exp = r + gamma*Q'( s2, pi'(s2))
y_pred = Q( s1, a1)
'''
a2 = self.target_actor.forward(ns).detach()
next_val = torch.squeeze(self.target_critic.forward(ns, a2).detach())
y_expected = r + self.gamma * next_val
y_predicted = 0 + torch.squeeze(self.critic.forward(s, a))
''' compute critic loss, and update the critic '''
loss_critic = F.smooth_l1_loss(y_predicted, y_expected)
self.critic_optimizer.zero_grad()
loss_critic.backward()
self.critic_optimizer.step()
''' ---------------------- optimize actor ----------------------'''
_a = self.actor.forward(s)
loss_actor = -1 * torch.sum(self.critic.forward(s, _a))
self.actor_optimizer.zero_grad()
loss_actor.backward()
self.actor_optimizer.step()
# for parameter in self.actor.parameters():
# print(parameter.grad)
utils.soft_update(self.target_actor, self.actor, self.tau)
utils.soft_update(self.target_critic, self.critic, self.tau)
self.actor_loss_value = -loss_actor.to('cpu').data.numpy()
self.critic_loss_value = loss_critic.to('cpu').data.numpy()
def get_agent_state(self, agents_pos, agent):
agent_state = []
for i in range(self.MAP_SIZE):
agent_state.append([0] * self.MAP_SIZE)
x, y = agents_pos[agent]
agent_state[x][y] = 1
return agent_state
def select_action(self, state, epsilon):
if random() <= epsilon:
action = [0] * self.action_dim
action = np.array(action, dtype = np.float32)
S = 0
for i in range(len(action)):
action[i] = randint(0, 1000)
S += action[i]
for i in range(len(action)):
action[i] = action[i] * 1.0 / S
else:
action = self.get_exploration_action(np.array(flatten(state), dtype=np.float32))
return action
def select_action_smart(self, state):
actions = [0] * self.num_agents
state = copy(state)
state = np.reshape(flatten(state), (7, 20, 20))
state = [state[0], [state[1], state[2]], [state[3], state[4]], state[5], state[6]]
agent_pos_1 = copy(self.env.agent_pos_1)
agent_pos_2 = copy(self.env.agent_pos_2)
init_score = self.env.score_mine - self.env.score_opponent
rewards = []
states = []
next_states = []
order = shuffle(range(self.num_agents))
for i in range(self.num_agents):
agent = order[i]
_state = state
agent_state = self.get_agent_state(agent_pos_1, agent)
_state = flatten([_state, agent_state])
states.append(state)
act = 0
scores = [0] * 9
mn = 1000
mx = -1000
valid_states = []
for act in range(9):
_state, _agent_pos_1, _agent_pos_2 = copy([state, agent_pos_1, agent_pos_2])
valid, _state, _agent_pos, _score = self.env.fit_action(agent, _state, act, _agent_pos_1, _agent_pos_2)
scores[act] = _score - init_score
mn = min(mn, _score - init_score)
mx = max(mx, _score - init_score)
valid_states.append(valid)
scores[0] = mn
for j in range(len(scores)):
scores[j] = (scores[j] - mn) / (mx - mn + 0.0001)
sum = np.sum(scores) + 0.0001
for j in range(len(scores)):
scores[j] = scores[j] / sum
if(valid_states[j] is False):
scores[j] = 0
act = np.array(scores).argmax()
valid, state, agent_pos, score = self.env.fit_action(agent, state, act, agent_pos_1, agent_pos_2)
rewards.append(score - init_score)
init_score = score
actions[agent] = scores
next_states.append(state)
return actions[0]
def select_action_test_not_predict(self, state):
actions = []
state = copy(state)
state = np.reshape(flatten(state), (7, 20, 20))
state = [state[0], [state[1], state[2]], [state[3], state[4]], state[5], state[6]]
agent_pos_1 = copy(self.env.agent_pos_1)
agent_pos_2 = copy(self.env.agent_pos_2)
init_score = self.env.score_mine - self.env.score_opponent
rewards = []
states = []
next_states = []
for i in range(self.num_agents):
_state = state
_state[1] = self.env.get_agent_state(_state[1], i)
_state = flatten(_state)
states.append(state)
act = 0
scores = [0] * 9
mn = 1000
mx = -1000
valid_states = []
for act in range(9):
_state, _agent_pos_1, _agent_pos_2 = copy([state, agent_pos_1, agent_pos_2])
valid, _state, _agent_pos, _score = self.env.fit_action(i, _state, act, _agent_pos_1, _agent_pos_2, False)
scores[act] = _score - init_score
mn = min(mn, _score - init_score)
mx = max(mx, _score - init_score)
valid_states.append(valid)
for j in range(len(scores)):
scores[j] = (scores[j] - mn) / (mx - mn + 0.0001)
scores[j] **= 5
sum = np.sum(scores) + 0.0001
for j in range(len(scores)):
scores[j] = scores[j] / sum
if(valid_states[j] is False):
scores[j] = 0
act = choices(range(9), scores)[0]
valid, state, agent_pos, score = self.env.fit_action(i, state, act, agent_pos_1, agent_pos_2)
init_score = score
actions.append(act)
next_states.append(state)
return states, actions, rewards, next_states
def select_best_actions(self, state):
actions = [0] * self.num_agents
state = copy(state)
state = np.reshape(flatten(state), (7, 20, 20))
state = [state[0], [state[1], state[2]], [state[3], state[4]], state[5], state[6]]
agent_pos_1 = copy(self.env.agent_pos_1)
agent_pos_2 = copy(self.env.agent_pos_2)
init_score = self.env.score_mine - self.env.score_opponent
rewards = []
states = []
next_states = []
order = shuffle(range(self.num_agents))
for i in range(self.num_agents):
agent = order[i]
_state = state
_state[1] = self.env.get_agent_state(_state[1], agent)
_state = flatten(_state)
states.append(state)
act = 0
scores = [0] * 9
mn = 1000
mx = -1000
valid_states = []
for act in range(9):
_state, _agent_pos_1, _agent_pos_2 = copy([state, agent_pos_1, agent_pos_2])
valid, _state, _agent_pos, _score = self.env.fit_action(agent, _state, act, _agent_pos_1, _agent_pos_2)
scores[act] = _score - init_score
mn = min(mn, _score - init_score)
mx = max(mx, _score - init_score)
valid_states.append(valid)
# scores[0] -= 2
for j in range(len(scores)):
scores[j] = (scores[j] - mn) / (mx - mn + 0.0001)
scores[j] **= 10
sum = np.sum(scores) + 0.0001
for j in range(len(scores)):
scores[j] = scores[j] / sum
if(valid_states[j] is False):
scores[j] = 0
scores[0] = 0
act = choices(range(9), scores)[0]
valid, state, agent_pos, score = self.env.fit_action(agent, state, act, agent_pos_1, agent_pos_2)
rewards.append(score - init_score)
init_score = score
actions[agent] = act
next_states.append(state)
return states, actions, rewards, next_states
def select_random(self, state):
actions = []
for i in range(self.num_agents):
actions.append(randint(0, 8))
return state, actions, [0] * self.num_agents, state
def select_action_from_state(self, state):
act = self.get_exploration_action(np.array(flatten(state), dtype=np.float32))
return [act]
def transform_to_critic_state(self, state):
state[1] = self.get_state_critic(state[1])
return state
def get_state_actor(self):
return copy([self.env.score_matrix, self.env.agents_matrix[0],
self.env.conquer_matrix[0]])
def get_state_actor_2(self):
return copy([self.env.score_matrix, self.env.agents_matrix,
self.env.conquer_matrix, self.env.treasures_matrix, self.env.walls_matrix])
def get_state_critic(self, state = None):
if state is None:
state = [self.score_matrix, self.agents_matrix,
self.conquer_matrix, self.treasures_matrix]
state = copy(state)
state[1] = self.get_all_agent_matrix(state[1])
return state
def get_all_agent_matrix(self, agents_matrix):
all_matrix = []
for k in range(8):
matrix = []
for i in range(self.MAP_SIZE):
matrix.append([0] * self.MAP_SIZE)
for j in range(self.MAP_SIZE):
if agents_matrix[i][j] == k:
matrix[i][j] = 1
all_matrix.append(matrix)
return all_matrix
def form_action_predict(self, actions):
form_actions = []
for i in range(self.num_agent_lim):
act = -1
if (i < len(actions)):
act = actions[i]
form_actions.append([1 if i == act else 0 for i in range(9)])
return flatten(form_actions)
def action_flatten(self, acts):
_acts = []
for act in acts:
p = [1 if j == act else 0 for j in range(self.action_lim)]
_acts.append(p)
while(len(_acts) < self.num_agent_lim):
_acts.append([0] * self.action_lim)
return flatten(_acts)
def learn(self, state, actions_1, actions_2, BGame, show_screen):
act = [actions_1.argmax()]
# actions = copy(actions_1)
# for i in range(9):
# actions[i] = actions[i] ** 3
# act = choices([i for i in range(9)], actions)
next_state, reward, done, remaining_turns = self.env.next_frame(
act, actions_2, BGame, show_screen)
# if act[0] == 0:
# reward -= 3
# action = self.form_action_predict(actions_1)
state = flatten(state)
next_state = flatten([next_state[0], next_state[1][0], next_state[2][0]])
# print(next_state)
self.memories.store_transition(state, actions_1, reward, next_state)
self.optimize()
return done
def update_state(self, states_1, actions_1, rewards_1, next_states_1, actions_2, BGame, show_screen):
next_state, reward, done, remaining_turns = self.env.next_frame(
actions_1, actions_2, BGame, show_screen)
return done
def save_models(self):
"""
saves the target actor and critic models
:param episode_count: the count of episodes iterated
:return:
"""
torch.save(self.target_actor, './Models/target_actor.pt')
torch.save(self.target_critic, './Models/target_critic.pt')
torch.save(self.actor, './Models/actor.pt')
torch.save(self.critic, './Models/critic.pt')
print('Models saved successfully')
def load_models(self):
"""
loads the target actor and critic models, and copies them onto actor and critic models
:param episode: the count of episodes iterated (used to find the file name)
:return:
"""
self.target_actor = torch.load('./Models/target_actor.pt', map_location = self.device)
self.target_critic = torch.load('./Models/target_critic.pt', map_location = self.device)
self.actor = torch.load('./Models/actor.pt', map_location = self.device)
self.critic = torch.load('./Models/critic.pt', map_location = self.device)
self.target_actor.eval()
self.target_critic.eval()
self.actor.eval()
self.critic.eval()
# utils.hard_update(self.target_actor, self.actor)
# utils.hard_update(self.target_critic, self.critic)
print('Models loaded succesfully')
| 11,870 | 4,781 | 23 |
77918520b9533f590cf62fc821c8eb5d72734baa | 3,413 | py | Python | deploy/templatetags/myinclusion.py | wxmgcs/devops | 7b0daf6121139c8bec80ec58c119d04d8aeadfe8 | [
"MIT"
] | 3 | 2019-05-06T06:44:43.000Z | 2020-06-10T00:54:43.000Z | deploy/templatetags/myinclusion.py | wxmgcs/devops | 7b0daf6121139c8bec80ec58c119d04d8aeadfe8 | [
"MIT"
] | 1 | 2017-07-11T11:36:54.000Z | 2017-07-11T11:42:23.000Z | deploy/templatetags/myinclusion.py | wxmgcs/devops | 7b0daf6121139c8bec80ec58c119d04d8aeadfe8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf8
'''
@author: qitan
@contact: qqing_lai@hotmail.com
@file: myinclusion.py
@time: 2017/3/30 15:32
@desc:
'''
from django import template
from django.db.models import Q
from deploy.models import *
from userauth.models import *
register = template.Library()
def show_single_minions(pk, user_type):
'''
文件回滚中单项显示主机列表
'''
tgt_list = []
if user_type:
tgt_list = [i['hostname'] for i in SaltHost.objects.filter(status=True).values('hostname')]
else:
tgt_list = [i['hostname'] for d in User.objects.get(pk=pk).department.all() for i in
d.host_department_set.values('hostname')]
return {'tgt_list': sorted(list(set(tgt_list)))}
register.inclusion_tag('tag_single_minions.html')(show_single_minions)
def show_groups(pk, user_type):
'''
远程命令、模块部署及文件管理中显示所有分组
'''
group_dict = {}
if user_type:
group_dict = {i['groupname']:i['nickname'] for i in SaltGroup.objects.values('groupname', 'nickname')}
else:
d = User.objects.get(pk=pk).department
group_dict = {i['groupname']:i['nickname'] for d in User.objects.get(pk=pk).department.all()
for i in d.saltgroup_department_set.values('groupname', 'nickname')}
return {'group_dict':sorted(list(set(group_dict.items())))}
register.inclusion_tag('tag_user_departments.html')(show_groups)
def show_modules(u, user_type):
'''
模块部署中显示所有模块
'''
if user_type:
module_list = ModuleUpload.objects.all()
else:
# 获取用户创建或公开模块
module_visible_list = [{'pk': i.pk, 'name': i.name, 'module': i.module, 'remark': i.remark}
for i in ModuleUpload.objects.filter(Q(user=u) | Q(visible=2))]
# 获取用户组模块
module_user_group_list = [{'pk': i.pk, 'name': i.name, 'module': i.module, 'remark': i.remark}
for g in User.objects.get(pk=u.pk).group.all() for i in
ModuleUpload.objects.filter(user_group=g)]
# 合并list
module_list = module_visible_list + [i for i in module_user_group_list if i not in module_visible_list]
return {'module_list': module_list}
register.inclusion_tag('tag_modules.html')(show_modules)
def show_user_group_minions(pk, user_type, list_type):
'''
远程命令、模块部署及文件上传中显示主机列表
'''
if user_type:
tgt_list = [i['hostname'] for i in SaltHost.objects.filter(status=True).values('hostname')]
else:
tgt_list = [i['hostname'] for g in User.objects.get(pk=pk).group.all() for i in
SaltHost.objects.filter(user_group=g).values('hostname')]
return {'tgt_list':sorted(list(set(tgt_list))), 'list_type':list_type}
register.inclusion_tag('tag_user_group_minions.html')(show_user_group_minions)
def show_user_group_groups(pk, user_type):
'''
远程命令、模块部署及文件管理中显示用户分组
'''
group_dict = {}
if user_type:
group_dict = {i['groupname']:i['nickname'] for i in SaltGroup.objects.values('groupname', 'nickname')}
else:
group_dict = {i['groupname']:i['nickname'] for g in User.objects.get(pk=pk).group.all()
for i in SaltGroup.objects.filter(user_group=g).values('groupname', 'nickname')}
return {'group_dict':sorted(list(set(group_dict.items())))}
register.inclusion_tag('tag_user_group_groups.html')(show_user_group_groups)
| 33.460784 | 111 | 0.64811 | #!/usr/bin/env python
# coding: utf8
'''
@author: qitan
@contact: qqing_lai@hotmail.com
@file: myinclusion.py
@time: 2017/3/30 15:32
@desc:
'''
from django import template
from django.db.models import Q
from deploy.models import *
from userauth.models import *
register = template.Library()
def show_single_minions(pk, user_type):
'''
文件回滚中单项显示主机列表
'''
tgt_list = []
if user_type:
tgt_list = [i['hostname'] for i in SaltHost.objects.filter(status=True).values('hostname')]
else:
tgt_list = [i['hostname'] for d in User.objects.get(pk=pk).department.all() for i in
d.host_department_set.values('hostname')]
return {'tgt_list': sorted(list(set(tgt_list)))}
register.inclusion_tag('tag_single_minions.html')(show_single_minions)
def show_groups(pk, user_type):
'''
远程命令、模块部署及文件管理中显示所有分组
'''
group_dict = {}
if user_type:
group_dict = {i['groupname']:i['nickname'] for i in SaltGroup.objects.values('groupname', 'nickname')}
else:
d = User.objects.get(pk=pk).department
group_dict = {i['groupname']:i['nickname'] for d in User.objects.get(pk=pk).department.all()
for i in d.saltgroup_department_set.values('groupname', 'nickname')}
return {'group_dict':sorted(list(set(group_dict.items())))}
register.inclusion_tag('tag_user_departments.html')(show_groups)
def show_modules(u, user_type):
'''
模块部署中显示所有模块
'''
if user_type:
module_list = ModuleUpload.objects.all()
else:
# 获取用户创建或公开模块
module_visible_list = [{'pk': i.pk, 'name': i.name, 'module': i.module, 'remark': i.remark}
for i in ModuleUpload.objects.filter(Q(user=u) | Q(visible=2))]
# 获取用户组模块
module_user_group_list = [{'pk': i.pk, 'name': i.name, 'module': i.module, 'remark': i.remark}
for g in User.objects.get(pk=u.pk).group.all() for i in
ModuleUpload.objects.filter(user_group=g)]
# 合并list
module_list = module_visible_list + [i for i in module_user_group_list if i not in module_visible_list]
return {'module_list': module_list}
register.inclusion_tag('tag_modules.html')(show_modules)
def show_user_group_minions(pk, user_type, list_type):
'''
远程命令、模块部署及文件上传中显示主机列表
'''
if user_type:
tgt_list = [i['hostname'] for i in SaltHost.objects.filter(status=True).values('hostname')]
else:
tgt_list = [i['hostname'] for g in User.objects.get(pk=pk).group.all() for i in
SaltHost.objects.filter(user_group=g).values('hostname')]
return {'tgt_list':sorted(list(set(tgt_list))), 'list_type':list_type}
register.inclusion_tag('tag_user_group_minions.html')(show_user_group_minions)
def show_user_group_groups(pk, user_type):
'''
远程命令、模块部署及文件管理中显示用户分组
'''
group_dict = {}
if user_type:
group_dict = {i['groupname']:i['nickname'] for i in SaltGroup.objects.values('groupname', 'nickname')}
else:
group_dict = {i['groupname']:i['nickname'] for g in User.objects.get(pk=pk).group.all()
for i in SaltGroup.objects.filter(user_group=g).values('groupname', 'nickname')}
return {'group_dict':sorted(list(set(group_dict.items())))}
register.inclusion_tag('tag_user_group_groups.html')(show_user_group_groups)
| 0 | 0 | 0 |
8161a40448432ddb98fa8a8eb4bb1ff893aeeaab | 6,208 | py | Python | evaluation/old_compare/compare_combined_sift.py | tilman/compositional_elements | 45271196ed01d0515357c7abdf35d6b87f2036d5 | [
"MIT"
] | 2 | 2021-06-13T16:21:52.000Z | 2021-06-13T16:21:53.000Z | evaluation/old_compare/compare_combined_sift.py | tilman/compositional_elements | 45271196ed01d0515357c7abdf35d6b87f2036d5 | [
"MIT"
] | null | null | null | evaluation/old_compare/compare_combined_sift.py | tilman/compositional_elements | 45271196ed01d0515357c7abdf35d6b87f2036d5 | [
"MIT"
] | null | null | null | # call this script with `python -m evaluation.evaluate_poselines_globalaction`
import numpy as np
from numpy.core.fromnumeric import sort
import pandas as pd
import datetime
import torch
from torch.functional import norm
from tqdm import tqdm
from . import eval_utils
import cv2
from .compare_deepfeatures import negative_cosine_dist_flatten
from compoelem.config import config
from compoelem.compare.pose_line import compare_pose_lines_2, compare_pose_lines_3, filter_pose_line_ga_result
from compoelem.compare.normalize import minmax_norm_by_imgrect, minmax_norm_by_bbox, norm_by_global_action
#https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html#brute-force-matching-with-sift-descriptors-and-ratio-test
| 50.064516 | 167 | 0.716173 | # call this script with `python -m evaluation.evaluate_poselines_globalaction`
import numpy as np
from numpy.core.fromnumeric import sort
import pandas as pd
import datetime
import torch
from torch.functional import norm
from tqdm import tqdm
from . import eval_utils
import cv2
from .compare_deepfeatures import negative_cosine_dist_flatten
from compoelem.config import config
from compoelem.compare.pose_line import compare_pose_lines_2, compare_pose_lines_3, filter_pose_line_ga_result
from compoelem.compare.normalize import minmax_norm_by_imgrect, minmax_norm_by_bbox, norm_by_global_action
#https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html#brute-force-matching-with-sift-descriptors-and-ratio-test
def compare_siftBFMatcher1(sift1, sift2):
# BFMatcher with default params
bf = cv2.BFMatcher()
des1 = sift1["descriptors"]
des2 = sift2["descriptors"]
matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
# see compare_orbBFMatcher2 for why we use len(matches) or len(good) from ratio test
return len(good)/max(len(des1), len(des2))
# => fmr: feature match ratio: zwischen 0 und 1. 0 => schlecht, 1 => guter match
def compare_combinedSetupB(data, sort_method):
res_metrics = {}
for query_data in tqdm(data, total=len(data)):
compare_results = []
query_pose_lines_seq = norm_by_global_action(query_data["compoelem"]["pose_lines"], query_data["compoelem"]["global_action_lines"])
for target_data in data:
if query_data["className"] == target_data["className"] and query_data["imgName"] == target_data["imgName"]:
continue
target_pose_lines_seq = norm_by_global_action(target_data["compoelem"]["pose_lines"], target_data["compoelem"]["global_action_lines"])
pair_compare_results = []
# include traditional features:
fmr = compare_siftBFMatcher1(query_data["sift"], target_data["sift"])
# feature match ratio
for query_pose_lines in query_pose_lines_seq:
for target_pose_lines in target_pose_lines_seq:
combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, target_pose_lines)
pair_compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, target_data))
combined_ratio, hit_ratio, neg_mean_distance_hits, target_data = filter_pose_line_ga_result(pair_compare_results)
fmrcr = fmr*combined_ratio
fmrcr2 = fmr+combined_ratio
compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, fmr, fmrcr, fmrcr2, target_data))
compare_results = np.array(compare_results)
sorted_compare_results = sort_method(compare_results)
query_label = query_data["className"]
res_labels = list(map(lambda x: x["className"], sorted_compare_results[:,-1]))
metrics = eval_utils.score_retrievals(query_label, res_labels)
label = metrics["label"]
for key in metrics.keys():
if key != "label":
if key not in res_metrics:
res_metrics[key] = {}
if label not in res_metrics[key]:
res_metrics[key][label] = []
res_metrics[key][label].append(metrics[key])
return eval_utils.get_eval_dataframe(res_metrics)
def lexsort_fmr_cr(compare_results):
# (combined_ratio, hit_ratio, neg_mean_distance_hits, fmr, fmrcr, fmrcr2, target_data)
sorted_compare_results = compare_results[np.lexsort((compare_results[:,3], compare_results[:,0]))]
return sorted_compare_results
def lexsort_fmr_hr(compare_results):
# (combined_ratio, hit_ratio, neg_mean_distance_hits, fmr, fmrcr, fmrcr2, target_data)
sorted_compare_results = compare_results[np.lexsort((compare_results[:,3], compare_results[:,1]))]
return sorted_compare_results
def lexsort_cr_fmr(compare_results):
# (combined_ratio, hit_ratio, neg_mean_distance_hits, fmr, fmrcr, fmrcr2, target_data)
sorted_compare_results = compare_results[np.lexsort((compare_results[:,0], compare_results[:,3]))]
return sorted_compare_results
def lexsort_hr_fmr(compare_results):
# (combined_ratio, hit_ratio, neg_mean_distance_hits, fmr, fmrcr, fmrcr2, target_data)
sorted_compare_results = compare_results[np.lexsort((compare_results[:,1], compare_results[:,3]))]
return sorted_compare_results
def sort_fmrcr1(compare_results):
# (combined_ratio, hit_ratio, neg_mean_distance_hits, fmr, fmrcr, fmrcr2, target_data)
sorted_compare_results = compare_results[np.argsort(compare_results[:, 4])]
return sorted_compare_results
def sort_fmrcr2(compare_results):
# (combined_ratio, hit_ratio, neg_mean_distance_hits, fmr, fmrcr, fmrcr2, target_data)
sorted_compare_results = compare_results[np.argsort(compare_results[:, 5])]
return sorted_compare_results
def eval_all_combinations(datastore, datastore_name):
# TODO: quick and dirty code needs refactoring to look like compare_compoelem or compare_deepfeatures
all_res_metrics = []
for sort_method in [lexsort_fmr_cr, lexsort_fmr_hr, lexsort_cr_fmr, lexsort_hr_fmr, sort_fmrcr1, sort_fmrcr2]:
start_time = datetime.datetime.now()
experiment_id = "cB|"+sort_method.__name__+";A|ceb|normGlAC|th150;sift|bfm1"
print("EXPERIMENT:", experiment_id)
start_time = datetime.datetime.now()
eval_dataframe = compare_combinedSetupB(list(datastore.values()), sort_method)
all_res_metrics.append({
"combinedSetup": "compare_combinedSetupB",
"experiment_id": experiment_id,
"sort_method": sort_method.__name__,
"config": config,
"datetime": start_time,
"eval_time_s": (datetime.datetime.now() - start_time).seconds,
"datastore_name": datastore_name,
"eval_dataframe": eval_dataframe,
"combined":True,
"new": True,
})
return all_res_metrics
| 5,235 | 0 | 206 |
8ac1f16bfc18ec92e12b019d710e5fda17c7d467 | 2,487 | py | Python | train.py | zhoufengfan/light-weight-network | 14b9c4c8f3660a249344837beff80cc573cab167 | [
"MIT"
] | null | null | null | train.py | zhoufengfan/light-weight-network | 14b9c4c8f3660a249344837beff80cc573cab167 | [
"MIT"
] | null | null | null | train.py | zhoufengfan/light-weight-network | 14b9c4c8f3660a249344837beff80cc573cab167 | [
"MIT"
] | null | null | null | import torch.autograd
import torch.nn as nn
from dataset import Dataset2
if __name__ == '__main__':
num_epoch = 500
data_vector_dim = 20
item_of_single_class = 10
train_dataset = Dataset2(item_of_single_class=item_of_single_class, data_vector_dim=data_vector_dim)
test_dataset = Dataset2(item_of_single_class=item_of_single_class, data_vector_dim=data_vector_dim)
class_num = len(train_dataset.noise_scope_list)
dataset_length = item_of_single_class * class_num
train_dataloader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=dataset_length, shuffle=True
)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=dataset_length, shuffle=True
)
net = Network2(input_dim=data_vector_dim, output_dim=class_num)
if torch.cuda.is_available():
net = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.0001)
for epoch in range(num_epoch):
for i, (data_batch, label_batch) in enumerate(train_dataloader):
data_batch = data_batch.cuda()
real_out = net(data_batch)
label_batch = label_batch.cuda()
loss = criterion(real_out, label_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("acc:{:.6f}".format(evaluate(net, test_dataloader)))
| 32.298701 | 105 | 0.599115 | import torch.autograd
import torch.nn as nn
from dataset import Dataset2
class Network2(nn.Module):
def __init__(self, input_dim=20, output_dim=7):
super(Network2, self).__init__()
self.dis = nn.Sequential(
nn.Linear(input_dim, 256),
nn.ReLU(True),
nn.Linear(256, 256),
nn.ReLU(True),
nn.Linear(256, 256),
nn.ReLU(True),
nn.Linear(256, 256),
nn.ReLU(True),
nn.Linear(256, 256),
nn.ReLU(True),
nn.Linear(256, 256),
nn.ReLU(True),
nn.Linear(256, output_dim)
)
def forward(self, x):
x = self.dis(x)
return x
def evaluate(model, loader):
model.eval()
correct = 0
total = len(loader.dataset)
for x, y in loader:
x, y = x.cuda(), y.cuda()
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
if __name__ == '__main__':
num_epoch = 500
data_vector_dim = 20
item_of_single_class = 10
train_dataset = Dataset2(item_of_single_class=item_of_single_class, data_vector_dim=data_vector_dim)
test_dataset = Dataset2(item_of_single_class=item_of_single_class, data_vector_dim=data_vector_dim)
class_num = len(train_dataset.noise_scope_list)
dataset_length = item_of_single_class * class_num
train_dataloader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=dataset_length, shuffle=True
)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=dataset_length, shuffle=True
)
net = Network2(input_dim=data_vector_dim, output_dim=class_num)
if torch.cuda.is_available():
net = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.0001)
for epoch in range(num_epoch):
for i, (data_batch, label_batch) in enumerate(train_dataloader):
data_batch = data_batch.cuda()
real_out = net(data_batch)
label_batch = label_batch.cuda()
loss = criterion(real_out, label_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("acc:{:.6f}".format(evaluate(net, test_dataloader)))
| 908 | 5 | 106 |
a715ff7e13069bece07342b123a7c161f5c6dfa4 | 10,950 | py | Python | taxonresolver/tree.py | biomadeira/taxonomy-resolver | ce7a264c2b8b552dde6284b4a74821184be8e489 | [
"Apache-2.0"
] | null | null | null | taxonresolver/tree.py | biomadeira/taxonomy-resolver | ce7a264c2b8b552dde6284b4a74821184be8e489 | [
"Apache-2.0"
] | null | null | null | taxonresolver/tree.py | biomadeira/taxonomy-resolver | ce7a264c2b8b552dde6284b4a74821184be8e489 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8
"""
Taxonomy Resolver
:copyright: (c) 2020-2021.
:license: Apache 2.0, see LICENSE for more details.
"""
import io
import zipfile
import pandas as pd
from taxonresolver.utils import parse_tax_ids
from taxonresolver.utils import print_and_exit
from taxonresolver.utils import split_line
from taxonresolver.utils import download_taxonomy_dump
from taxonresolver.utils import tree_reparenting
from taxonresolver.utils import tree_traversal
from taxonresolver.utils import get_nested_sets
from taxonresolver.utils import get_children
from taxonresolver.utils import get_parents
def build_tree(inputfile: str, root: str = "1") -> pd.DataFrame:
"""
Given the path to NCBI Taxonomy 'taxdmp.zip' file or simply a
'nodes.dmp' file, builds a slim tree data structure.
:param inputfile: Path to inputfile
:param root: TaxID of the root Node
:return: pandas DataFrame
"""
tree = {}
# read nodes
if zipfile.is_zipfile(inputfile):
with zipfile.ZipFile(inputfile) as taxdmp:
dmp = taxdmp.open("nodes.dmp")
else:
dmp = open(inputfile, "rb")
for line in io.TextIOWrapper(dmp):
fields = split_line(line)
tree[fields[0]] = {
"id": fields[0],
"parent_id": fields[1],
"rank": fields[2]
}
dmp.close()
# creating a full tree
tree = tree_reparenting(tree)
# transversing the tree to find 'left' and 'right' indexes
nodes = []
tree_traversal(tree[root], nodes)
nested_set, visited, counter = [], {}, -1
for i, node in enumerate(nodes):
taxid, depth = node[0], node[1]
parent_id, rank = tree[taxid]["parent_id"], tree[taxid]["rank"]
if taxid not in visited:
# create array with left ('lft') index
nested_set.append([taxid, parent_id, rank, depth, i + 1, 0])
counter += 1
visited[taxid] = counter
else:
# update the right ('rgt') index
nested_set[visited[taxid]][5] = i + 1
# load dict into a pandas DataFrame for fast indexing and operations
df = pd.DataFrame(nested_set, columns=["id", "parent_id", "rank", "depth", "lft", "rgt"]) \
.astype(dtype={"id": str, "parent_id": str, "rank": str})
return df
def write_tree(tree: pd.DataFrame, outputfile: str, outputformat: str = "pickle") -> None:
"""
Writes a pandas DataFrame object to file.
:param tree: pandas DataFrame
:param outputfile: Path to outputfile
:param outputformat: currently "pickle" format
:return: (side-effects) writes to file
"""
if outputformat == "pickle":
tree.to_pickle(outputfile)
else:
print_and_exit(f"Output format '{outputformat}' is not valid!")
def load_tree(inputfile: str, inputformat: str = "pickle") -> pd.DataFrame:
"""
Loads a pre-existing pandas DataFrame from file.
:param inputfile: Path to outputfile
:param inputformat: currently "pickle" format
:return: pandas DataFrame
"""
if inputformat == "pickle":
return pd.read_pickle(inputfile)
else:
print_and_exit(f"Input format '{inputformat}' is not valid!")
def filter_tree(tree: pd.DataFrame, filterids: list or str or None = None,
root: str = "1", ignoreinvalid: bool = True, sep: str = None, indx: int = 0) -> pd.DataFrame:
"""
Filters an existing pandas DataFrame based on a List of TaxIDs.
:param tree: pandas DataFrame
:param filterids: list of TaxIDs or Path to file with TaxIDs to filter
(i.e. to keep) in the final set of results (optional)
:param root: TaxID of the root Node
:param ignoreinvalid: whether to ignore invalid TaxIDs or not
:param sep: separator for splitting the input file lines
:param indx: index used for splicing the the resulting list
:return: pandas DataFrame
"""
message = ("Some of the provided TaxIDs are not valid or not found "
"in the built Tree.")
if type(filterids) is list:
taxids_filter = set(filterids)
elif type(filterids) is str:
taxids_filter = set(parse_tax_ids(filterids, sep, indx))
if ignoreinvalid or validate_taxids(tree, taxids_filter):
# if ignoring invalid, we should still only return TaxIDs that exist in the Tree
taxids_filter = taxids_filter.intersection(set(tree["id"].values))
# get a subset dataset sorted (by 'lft')
subset = tree[tree["id"].isin(taxids_filter)].sort_values("lft")
nested_sets = get_nested_sets(subset)
for l, r in nested_sets:
taxids = get_children(tree, l, r)
taxids_filter.update(taxids)
# expand taxids_filter with parents of the selected node
taxids = get_parents(tree, l, r)
taxids_filter.update(taxids)
df = tree[tree["id"].isin(taxids_filter)].reset_index()
else:
print_and_exit(message)
return df
def search_taxids(tree: pd.DataFrame,
includeids: list or str,
excludeids: list or str or None = None,
filterids: list or str or None = None,
ignoreinvalid: bool = True,
sep: str = None, indx: int = 0) -> list or set:
"""
Searches an existing tree pandas DataFrame and produces a list of TaxIDs.
Search is performed based on a list of TaxIDs (includedids). A search is also
performed on a list of TaxIDs (excludedids), if provided. Those will be
removed from the search on the includedids. From this final list of TaxIDs,
filterids can used to clean the final set. This could be useful to compress a
final list of TaxIDs, to only return those known to exist in another dataset.
:param tree: pandas DataFrame
:param includeids: list of TaxIDs or Path to file with TaxIDs to search with
:param excludeids: list of TaxIDs or Path to file with TaxIDs to exclude
from the search (optional)
:param filterids: list of TaxIDs or Path to file with TaxIDs to filter
(i.e. to keep) in the final set of results (optional)
:param ignoreinvalid: whether to ignore invalid TaxIDs or not
:param sep: separator for splitting the input file lines
:param indx: index used for splicing the the resulting list
:return: list of TaxIDs
"""
message = ("Some of the provided TaxIDs are not valid or not found "
"in the built Tree.")
# find all the children nodes of the list of TaxIDs to be included in the search
if type(includeids) is list:
taxids_include = set(includeids)
elif type(includeids) is str:
taxids_include = set(parse_tax_ids(includeids))
if ignoreinvalid or validate_taxids(tree, taxids_include):
# if ignoring invalid, we should still only return TaxIDs that exist in the Tree
taxids_found = taxids_include.intersection(set(tree["id"].values))
# get a subset dataset sorted (by 'lft')
subset = tree[tree["id"].isin(taxids_found)].sort_values("lft")
nested_sets = get_nested_sets(subset)
for l, r in nested_sets:
taxids = get_children(tree, l, r)
taxids_found.update(taxids)
else:
print_and_exit(message)
# find all the children nodes of the list of TaxIDs to be excluded from the search
if excludeids:
if type(excludeids) is list:
taxids_exclude = set(excludeids)
elif type(excludeids) is str:
taxids_exclude = set(parse_tax_ids(excludeids))
if ignoreinvalid or validate_taxids(tree, taxids_exclude):
subset = tree[tree["id"].isin(taxids_exclude)].sort_values("lft")
nested_sets = get_nested_sets(subset)
for l, r in nested_sets:
taxids = get_children(tree, l, r)
taxids_exclude.update(taxids)
taxids_found = taxids_found.difference(taxids_exclude)
else:
print_and_exit(message)
# keep only TaxIDs that are in the provided list of TaxIDs to filter with
if filterids:
if type(filterids) is list:
taxids_filter = set(filterids)
elif type(filterids) is str:
taxids_filter = set(parse_tax_ids(filterids, sep, indx))
if ignoreinvalid or validate_taxids(tree, taxids_filter):
taxids_found = taxids_found.intersection(taxids_filter)
else:
print_and_exit(message)
return taxids_found
def validate_taxids(tree: pd.DataFrame, validateids: list or set or str) -> bool:
"""
Checks if TaxIDs are in the list and in the Tree.
:param tree: pandas DataFrame
:param validateids: list of TaxIDs or Path to file with TaxIDs to validate
:return: boolean
"""
if type(validateids) is list:
taxids_validate = set(validateids)
elif type(validateids) is set:
taxids_validate = validateids
elif type(validateids) is str:
taxids_validate = set(parse_tax_ids(validateids))
taxids_valid = taxids_validate.intersection(set(tree["id"].values))
if len(taxids_valid) == len(taxids_validate):
return True
return False
| 38.69258 | 109 | 0.652968 | #!/usr/bin/env python
# -*- coding: utf-8
"""
Taxonomy Resolver
:copyright: (c) 2020-2021.
:license: Apache 2.0, see LICENSE for more details.
"""
import io
import zipfile
import pandas as pd
from taxonresolver.utils import parse_tax_ids
from taxonresolver.utils import print_and_exit
from taxonresolver.utils import split_line
from taxonresolver.utils import download_taxonomy_dump
from taxonresolver.utils import tree_reparenting
from taxonresolver.utils import tree_traversal
from taxonresolver.utils import get_nested_sets
from taxonresolver.utils import get_children
from taxonresolver.utils import get_parents
def build_tree(inputfile: str, root: str = "1") -> pd.DataFrame:
"""
Given the path to NCBI Taxonomy 'taxdmp.zip' file or simply a
'nodes.dmp' file, builds a slim tree data structure.
:param inputfile: Path to inputfile
:param root: TaxID of the root Node
:return: pandas DataFrame
"""
tree = {}
# read nodes
if zipfile.is_zipfile(inputfile):
with zipfile.ZipFile(inputfile) as taxdmp:
dmp = taxdmp.open("nodes.dmp")
else:
dmp = open(inputfile, "rb")
for line in io.TextIOWrapper(dmp):
fields = split_line(line)
tree[fields[0]] = {
"id": fields[0],
"parent_id": fields[1],
"rank": fields[2]
}
dmp.close()
# creating a full tree
tree = tree_reparenting(tree)
# transversing the tree to find 'left' and 'right' indexes
nodes = []
tree_traversal(tree[root], nodes)
nested_set, visited, counter = [], {}, -1
for i, node in enumerate(nodes):
taxid, depth = node[0], node[1]
parent_id, rank = tree[taxid]["parent_id"], tree[taxid]["rank"]
if taxid not in visited:
# create array with left ('lft') index
nested_set.append([taxid, parent_id, rank, depth, i + 1, 0])
counter += 1
visited[taxid] = counter
else:
# update the right ('rgt') index
nested_set[visited[taxid]][5] = i + 1
# load dict into a pandas DataFrame for fast indexing and operations
df = pd.DataFrame(nested_set, columns=["id", "parent_id", "rank", "depth", "lft", "rgt"]) \
.astype(dtype={"id": str, "parent_id": str, "rank": str})
return df
def write_tree(tree: pd.DataFrame, outputfile: str, outputformat: str = "pickle") -> None:
"""
Writes a pandas DataFrame object to file.
:param tree: pandas DataFrame
:param outputfile: Path to outputfile
:param outputformat: currently "pickle" format
:return: (side-effects) writes to file
"""
if outputformat == "pickle":
tree.to_pickle(outputfile)
else:
print_and_exit(f"Output format '{outputformat}' is not valid!")
def load_tree(inputfile: str, inputformat: str = "pickle") -> pd.DataFrame:
"""
Loads a pre-existing pandas DataFrame from file.
:param inputfile: Path to outputfile
:param inputformat: currently "pickle" format
:return: pandas DataFrame
"""
if inputformat == "pickle":
return pd.read_pickle(inputfile)
else:
print_and_exit(f"Input format '{inputformat}' is not valid!")
def filter_tree(tree: pd.DataFrame, filterids: list or str or None = None,
root: str = "1", ignoreinvalid: bool = True, sep: str = None, indx: int = 0) -> pd.DataFrame:
"""
Filters an existing pandas DataFrame based on a List of TaxIDs.
:param tree: pandas DataFrame
:param filterids: list of TaxIDs or Path to file with TaxIDs to filter
(i.e. to keep) in the final set of results (optional)
:param root: TaxID of the root Node
:param ignoreinvalid: whether to ignore invalid TaxIDs or not
:param sep: separator for splitting the input file lines
:param indx: index used for splicing the the resulting list
:return: pandas DataFrame
"""
message = ("Some of the provided TaxIDs are not valid or not found "
"in the built Tree.")
if type(filterids) is list:
taxids_filter = set(filterids)
elif type(filterids) is str:
taxids_filter = set(parse_tax_ids(filterids, sep, indx))
if ignoreinvalid or validate_taxids(tree, taxids_filter):
# if ignoring invalid, we should still only return TaxIDs that exist in the Tree
taxids_filter = taxids_filter.intersection(set(tree["id"].values))
# get a subset dataset sorted (by 'lft')
subset = tree[tree["id"].isin(taxids_filter)].sort_values("lft")
nested_sets = get_nested_sets(subset)
for l, r in nested_sets:
taxids = get_children(tree, l, r)
taxids_filter.update(taxids)
# expand taxids_filter with parents of the selected node
taxids = get_parents(tree, l, r)
taxids_filter.update(taxids)
df = tree[tree["id"].isin(taxids_filter)].reset_index()
else:
print_and_exit(message)
return df
def search_taxids(tree: pd.DataFrame,
includeids: list or str,
excludeids: list or str or None = None,
filterids: list or str or None = None,
ignoreinvalid: bool = True,
sep: str = None, indx: int = 0) -> list or set:
"""
Searches an existing tree pandas DataFrame and produces a list of TaxIDs.
Search is performed based on a list of TaxIDs (includedids). A search is also
performed on a list of TaxIDs (excludedids), if provided. Those will be
removed from the search on the includedids. From this final list of TaxIDs,
filterids can used to clean the final set. This could be useful to compress a
final list of TaxIDs, to only return those known to exist in another dataset.
:param tree: pandas DataFrame
:param includeids: list of TaxIDs or Path to file with TaxIDs to search with
:param excludeids: list of TaxIDs or Path to file with TaxIDs to exclude
from the search (optional)
:param filterids: list of TaxIDs or Path to file with TaxIDs to filter
(i.e. to keep) in the final set of results (optional)
:param ignoreinvalid: whether to ignore invalid TaxIDs or not
:param sep: separator for splitting the input file lines
:param indx: index used for splicing the the resulting list
:return: list of TaxIDs
"""
message = ("Some of the provided TaxIDs are not valid or not found "
"in the built Tree.")
# find all the children nodes of the list of TaxIDs to be included in the search
if type(includeids) is list:
taxids_include = set(includeids)
elif type(includeids) is str:
taxids_include = set(parse_tax_ids(includeids))
if ignoreinvalid or validate_taxids(tree, taxids_include):
# if ignoring invalid, we should still only return TaxIDs that exist in the Tree
taxids_found = taxids_include.intersection(set(tree["id"].values))
# get a subset dataset sorted (by 'lft')
subset = tree[tree["id"].isin(taxids_found)].sort_values("lft")
nested_sets = get_nested_sets(subset)
for l, r in nested_sets:
taxids = get_children(tree, l, r)
taxids_found.update(taxids)
else:
print_and_exit(message)
# find all the children nodes of the list of TaxIDs to be excluded from the search
if excludeids:
if type(excludeids) is list:
taxids_exclude = set(excludeids)
elif type(excludeids) is str:
taxids_exclude = set(parse_tax_ids(excludeids))
if ignoreinvalid or validate_taxids(tree, taxids_exclude):
subset = tree[tree["id"].isin(taxids_exclude)].sort_values("lft")
nested_sets = get_nested_sets(subset)
for l, r in nested_sets:
taxids = get_children(tree, l, r)
taxids_exclude.update(taxids)
taxids_found = taxids_found.difference(taxids_exclude)
else:
print_and_exit(message)
# keep only TaxIDs that are in the provided list of TaxIDs to filter with
if filterids:
if type(filterids) is list:
taxids_filter = set(filterids)
elif type(filterids) is str:
taxids_filter = set(parse_tax_ids(filterids, sep, indx))
if ignoreinvalid or validate_taxids(tree, taxids_filter):
taxids_found = taxids_found.intersection(taxids_filter)
else:
print_and_exit(message)
return taxids_found
def validate_taxids(tree: pd.DataFrame, validateids: list or set or str) -> bool:
"""
Checks if TaxIDs are in the list and in the Tree.
:param tree: pandas DataFrame
:param validateids: list of TaxIDs or Path to file with TaxIDs to validate
:return: boolean
"""
if type(validateids) is list:
taxids_validate = set(validateids)
elif type(validateids) is set:
taxids_validate = validateids
elif type(validateids) is str:
taxids_validate = set(parse_tax_ids(validateids))
taxids_valid = taxids_validate.intersection(set(tree["id"].values))
if len(taxids_valid) == len(taxids_validate):
return True
return False
class TaxonResolver(object):
def __init__(self, logging=None, **kwargs):
self.tree = None
self.logging = logging
self.kwargs = kwargs
def download(self, outputfile, outputformat="zip") -> None:
"""Download the NCBI Taxonomy dump file."""
outputformat = outputformat.lower()
download_taxonomy_dump(outputfile, outputformat)
def build(self, inputfile) -> None:
"""Build a Tree from the NCBI Taxonomy dump file."""
self.tree = build_tree(inputfile)
def write(self, outputfile, outputformat="pickle") -> None:
"""Write a Tree in Pickle format."""
write_tree(self.tree, outputfile, outputformat)
def load(self, inputfile, inputformat="pickle") -> None:
"""Load a Tree from a Pickle file."""
self.tree = load_tree(inputfile, inputformat)
def filter(self, taxidfilter, **kwargs) -> None:
"""Re-build a minimal Tree based on the TaxIDs provided."""
if not type(self.tree) is pd.DataFrame:
message = ("The Taxonomy Tree needs to be built "
"before 'filter' can be called.")
print_and_exit(message)
self.tree = filter_tree(self.tree, taxidfilter, **kwargs)
def validate(self, taxidinclude) -> bool:
"""Validate a list of TaxIDs against a Tree."""
return validate_taxids(self.tree, taxidinclude)
def search(self, taxidinclude, taxidexclude=None, taxidfilter=None,
ignoreinvalid=True, **kwargs) -> list or set or None:
"""Search a Tree based on a list of TaxIDs."""
return search_taxids(self.tree, taxidinclude, taxidexclude, taxidfilter,
ignoreinvalid, **kwargs)
| 107 | 1,605 | 23 |
5f8bc16102c8897c15c05a69fddf50e1bfd1254d | 5,205 | py | Python | Code/Server/game/chunkthread.py | Abrasam/Part-II-Project | bf2ac2c8a9a87859ad9fb189405d7ce76e19a8f4 | [
"MIT"
] | 1 | 2020-06-16T08:48:26.000Z | 2020-06-16T08:48:26.000Z | Code/Server/game/chunkthread.py | Abrasam/Voxel-Populi | bf2ac2c8a9a87859ad9fb189405d7ce76e19a8f4 | [
"MIT"
] | null | null | null | Code/Server/game/chunkthread.py | Abrasam/Voxel-Populi | bf2ac2c8a9a87859ad9fb189405d7ce76e19a8f4 | [
"MIT"
] | null | null | null | import json
import threading
import time
import datetime
import socket
import asyncio
from collections import deque
from queue import Empty, Queue
from game.const import *
from game.world import Chunk
from kademlia.server import DHTServer
| 33.365385 | 143 | 0.559078 | import json
import threading
import time
import datetime
import socket
import asyncio
from collections import deque
from queue import Empty, Queue
from game.const import *
from game.world import Chunk
from kademlia.server import DHTServer
class ChunkThread(threading.Thread):
def __init__(self, dht : DHTServer, chunk : Chunk):
threading.Thread.__init__(self)
self.chunk = chunk
self.players = {}
self.clients = []
self.dht = dht
self.done = False
self.q = Queue()
self.setDaemon(True)
self.start()
def run(self):
# ticks = 0
# timer = 0
while True:
t = time.monotonic()
try:
n = self.q.qsize()
for _ in range(n):
packet_data = self.q.get_nowait()
self._process_packet(*packet_data)
except Empty:
pass
for client in self.players:
tmp = datetime.datetime.utcnow()
client.send(Packet(PacketType.TIME, [tmp.hour*60+tmp.minute+tmp.second/60]).dict())
for c in self.players:
tim = time.monotonic()
if tim - self.players[c].touched > 10:
print("Killing this boi")
#c.socket.close()
#self.remove_client(c)
time.sleep(max(0,TICK_LENGTH - (time.monotonic() - t)))
if self.done and self.q.empty():
return
'''ticks += 1
timer += time.monotonic() - t
if timer > 5:
print("TPS:" + str(ticks / timer))
timer = 0
ticks = 0'''
def _process_packet(self, packet, sender):
if packet["type"] == PacketType.PLAYER_REGISTER.value:
if sender not in self.players:
self.players[sender] = Player(sender.name, (0,0,0))
for c in self.clients:
if c == sender: continue
c.send(packet)
elif packet["type"] == PacketType.PLAYER_DEREGISTER.value:
if sender in self.players:
asyncio.run_coroutine_threadsafe(self.dht.save_player(self.players[sender].name, self.players[sender].location), self.dht.loop)
del self.players[sender]
for c in self.clients:
if c == sender: continue
c.send(packet)
elif packet["type"] == PacketType.PLAYER_MOVE.value:
if sender in self.players:
self.players[sender].touch()
self.players[sender].update_location(packet["args"][0:3])
for c in self.clients:
if c == sender: continue
c.send(packet)
elif packet["type"] == PacketType.BLOCK_CHANGE.value:
pos = packet["args"][0:3]
if pos in self.chunk:
self.chunk.update(*packet["args"])
data = json.dumps(self.chunk.encode()).encode()
for client in self.clients:
client.send_raw(data)
def add_client(self, client):
self.clients.append(client)
client.send(self.chunk.encode())
for c in self.players:
if c == client: continue
client.send(Packet(PacketType.PLAYER_REGISTER, self.chunk.location, player=c.name).dict())
def remove_client(self, client):
self.clients.remove(client)
if client in self.players:
self.q.put((Packet(PacketType.PLAYER_DEREGISTER, self.chunk.location, player=client.name).dict(), client))
return len(self.clients) == 0
def stop(self): # maybe need a lock on this?
self.done = True
class Player:
def __init__(self, name, location):
self.name = name
self.location = location
self.touched = time.monotonic()
def touch(self):
self.touched = time.monotonic()
def update_location(self, loc):
self.location = loc
class Client:
def __init__(self, type, chunk_thread, name, conn):
self.type = type # 1 = player, 2 = other server (not used yet).
self.chunk_thread = chunk_thread
self.name = name
self.to_send = deque()
self.buf = b''
self.socket = conn
def send(self, packet):
self.to_send.append(json.dumps(packet).encode() + b'\n')
def send_raw(self, raw_packet):
self.to_send.append(raw_packet + b'\n')
def recv(self, sender):
self.chunk_thread.q.put((json.loads(self.buf.decode()), sender))
self.buf = b''
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"<type={self.type}, chunk={self.chunk_thread.chunk.location}, player={self.name}>"
class Packet:
def __init__(self, type : PacketType, args, player=""):
self.type = type
self.args = args
self.player = player
def encode(self):
return json.dumps({"type":self.type.value, "args": self.args, "player":self.player}).encode()
def dict(self):
return {"type": self.type.value, "args": self.args, "player":self.player}
| 4,394 | -9 | 574 |
58c3911c166eeff3145b577b0906f6c294f5e653 | 2,521 | py | Python | Train/diTau_reference.py | dntaylor/DeepJet | 249610b3b80543c8c84f5ba795bbb07c097f8150 | [
"Apache-2.0"
] | null | null | null | Train/diTau_reference.py | dntaylor/DeepJet | 249610b3b80543c8c84f5ba795bbb07c097f8150 | [
"Apache-2.0"
] | null | null | null | Train/diTau_reference.py | dntaylor/DeepJet | 249610b3b80543c8c84f5ba795bbb07c097f8150 | [
"Apache-2.0"
] | null | null | null | import os
os.environ['DECORRELATE'] = "False"
from DeepJetCore.training.training_base import training_base
from Losses import loss_NLL, loss_meansquared
from DeepJetCore.modeltools import fixLayersContaining,printLayerInfosAndWeights
import subprocess
import tensorflow as tf
from keras import backend as k
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.6
k.tensorflow_backend.set_session(tf.Session(config=config))
#train=training_base(testrun=False,renewtokens=True,useAFS=False,useweights=True)
train=training_base(testrun=False,renewtokens=True,useAFS=False,useweights=False)
if not train.modelSet():
from models import model_diTauReference as trainingModel
#from models import model_diTauDense as trainingModel
datasets = ['global','cpf','npf','sv']
#datasets = ['global']
train.setModel(trainingModel,
datasets=datasets,
dropoutRate=0.1,
momentum=0.9,
batchnorm=False,
depth=4,
width=200,
pattern=[32,32],
)
train.compileModel(learningrate=0.0001,
loss=['categorical_crossentropy'],
metrics=['accuracy'],
loss_weights=[1.],
)
#train.train_data.maxFilesOpen = 1
#train.val_data.maxFilesOpen = 1
with open(train.outputDir + 'summary.txt','w') as f:
train.keras_model.summary(print_fn=lambda x: f.write(x + '\n'))
from keras.utils import plot_model
plot_model(train.keras_model, to_file=train.outputDir+'model.eps')
# convert eps to png
command = ['convert', '-density', '300', train.outputDir+'model.eps', train.outputDir+'model.png']
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
print(train.keras_model.summary())
model, history = train.trainModel(nepochs=500,
batchsize=5000,
stop_patience=100,
lr_factor=0.8,
lr_patience=10,
lr_epsilon=0.0001,
lr_cooldown=8,
lr_minimum=0.00000001,
maxqsize=10,
verbose=1,
)
| 34.534247 | 102 | 0.582705 | import os
os.environ['DECORRELATE'] = "False"
from DeepJetCore.training.training_base import training_base
from Losses import loss_NLL, loss_meansquared
from DeepJetCore.modeltools import fixLayersContaining,printLayerInfosAndWeights
import subprocess
import tensorflow as tf
from keras import backend as k
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.6
k.tensorflow_backend.set_session(tf.Session(config=config))
#train=training_base(testrun=False,renewtokens=True,useAFS=False,useweights=True)
train=training_base(testrun=False,renewtokens=True,useAFS=False,useweights=False)
if not train.modelSet():
from models import model_diTauReference as trainingModel
#from models import model_diTauDense as trainingModel
datasets = ['global','cpf','npf','sv']
#datasets = ['global']
train.setModel(trainingModel,
datasets=datasets,
dropoutRate=0.1,
momentum=0.9,
batchnorm=False,
depth=4,
width=200,
pattern=[32,32],
)
train.compileModel(learningrate=0.0001,
loss=['categorical_crossentropy'],
metrics=['accuracy'],
loss_weights=[1.],
)
#train.train_data.maxFilesOpen = 1
#train.val_data.maxFilesOpen = 1
with open(train.outputDir + 'summary.txt','w') as f:
train.keras_model.summary(print_fn=lambda x: f.write(x + '\n'))
from keras.utils import plot_model
plot_model(train.keras_model, to_file=train.outputDir+'model.eps')
# convert eps to png
command = ['convert', '-density', '300', train.outputDir+'model.eps', train.outputDir+'model.png']
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
print(train.keras_model.summary())
model, history = train.trainModel(nepochs=500,
batchsize=5000,
stop_patience=100,
lr_factor=0.8,
lr_patience=10,
lr_epsilon=0.0001,
lr_cooldown=8,
lr_minimum=0.00000001,
maxqsize=10,
verbose=1,
)
| 0 | 0 | 0 |
91afaf07d2c3f534c87bada2ba28c2ee786e9072 | 583 | py | Python | Projects/Face Detection/main.py | tejas4m/Hacktoberfest2021 | d90e6bc8d5ba74a9064992fe9b6f73d7cb2c4a10 | [
"MIT"
] | 18 | 2021-09-30T07:20:25.000Z | 2021-10-19T07:19:56.000Z | Projects/Face Detection/main.py | tejas4m/Hacktoberfest2021 | d90e6bc8d5ba74a9064992fe9b6f73d7cb2c4a10 | [
"MIT"
] | 94 | 2021-10-01T12:41:41.000Z | 2021-11-01T03:31:27.000Z | Projects/Face Detection/main.py | tejas4m/Hacktoberfest2021 | d90e6bc8d5ba74a9064992fe9b6f73d7cb2c4a10 | [
"MIT"
] | 56 | 2021-09-30T12:03:02.000Z | 2021-10-19T09:51:52.000Z | import cv2
cv2.namedWindow("test")
cv2.namedWindow("test1")
cam = cv2.VideoCapture(0)
detector = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
while True:
temp , frame = cam.read()
if not temp:
break;
faces = detector.detectMultiScale(frame)
cv2.imshow("test1" , frame)
for face in faces:
a,b,c,d = face
cv2.rectangle(frame , (a , b ) , (a+c , b+d) ,(0 , 255 , 0) , 3)
cv2.imshow("test" , frame)
val = cv2.waitKey(1)
if val%256 == 27:
# ESC button pressed
break
cam.release()
cv2.destroyAllWindows()
| 26.5 | 72 | 0.61578 | import cv2
cv2.namedWindow("test")
cv2.namedWindow("test1")
cam = cv2.VideoCapture(0)
detector = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
while True:
temp , frame = cam.read()
if not temp:
break;
faces = detector.detectMultiScale(frame)
cv2.imshow("test1" , frame)
for face in faces:
a,b,c,d = face
cv2.rectangle(frame , (a , b ) , (a+c , b+d) ,(0 , 255 , 0) , 3)
cv2.imshow("test" , frame)
val = cv2.waitKey(1)
if val%256 == 27:
# ESC button pressed
break
cam.release()
cv2.destroyAllWindows()
| 0 | 0 | 0 |
5f5babe057438b85c0ec6c63603d46c40e7910fc | 4,608 | py | Python | ds_cleaning_funcs.py | AlirezaFallahii/sufficient-forecasting | 802a19c7a00c9a60036d8949c65e77a654f371f3 | [
"MIT"
] | 1 | 2020-11-28T10:41:42.000Z | 2020-11-28T10:41:42.000Z | ds_cleaning_funcs.py | AlirezaFallahii/sufficient-forecasting | 802a19c7a00c9a60036d8949c65e77a654f371f3 | [
"MIT"
] | null | null | null | ds_cleaning_funcs.py | AlirezaFallahii/sufficient-forecasting | 802a19c7a00c9a60036d8949c65e77a654f371f3 | [
"MIT"
] | null | null | null | import pandas as pd
def assess_NA(data):
"""
Returns a pandas dataframe denoting the total number of NA values
and the percentage of NA values in each column.
The column names are noted on the index.
Parameters
----------
data: dataframe
"""
# pandas series denoting features and the sum of their null values
null_sum = data.isnull().sum() # instantiate columns for missing data
total = null_sum.sort_values(ascending=False)
percent = (((null_sum / len(data.index)) * 100).
round(2)).sort_values(ascending=False)
# concatenate along the columns to create the complete dataframe
df_NA = pd.concat([total, percent], axis=1,
keys=['Number of NA', 'Percent NA'])
# drop rows that don't have any missing data;
# omit if you want to keep all rows
df_NA = df_NA[(df_NA.T != 0).any()]
return df_NA
| 42.275229 | 80 | 0.588976 | import pandas as pd
def assess_NA(data):
"""
Returns a pandas dataframe denoting the total number of NA values
and the percentage of NA values in each column.
The column names are noted on the index.
Parameters
----------
data: dataframe
"""
# pandas series denoting features and the sum of their null values
null_sum = data.isnull().sum() # instantiate columns for missing data
total = null_sum.sort_values(ascending=False)
percent = (((null_sum / len(data.index)) * 100).
round(2)).sort_values(ascending=False)
# concatenate along the columns to create the complete dataframe
df_NA = pd.concat([total, percent], axis=1,
keys=['Number of NA', 'Percent NA'])
# drop rows that don't have any missing data;
# omit if you want to keep all rows
df_NA = df_NA[(df_NA.T != 0).any()]
return df_NA
def prepare_dataset_from_xls(file_name='stock&watson.xls',
sheet_name='Sheet1',
numEmptyRows=9):
# numEmptyRows = 9 # Number of empty rows in the end of sheet one.
# # This number corresponds to the dates from "4/1/2009" to "12/1/2009".
# numEmptyRows = 3 # Number of empty rows in the end of sheet two.
# # This number corresponds to the dates from "5/1/2009" to "11/1/2009".
raw_df1 = pd.read_excel(file_name,
sheet_name=sheet_name,
index_col=0,
skipfooter=numEmptyRows)
print(f'\nreading "{file_name}" file is finished. Note: '
f'Only sheet "{sheet_name}" is read into the memory.')
print(f'The "{sheet_name}" sheet has {raw_df1.shape[0]} '
f'rows and {raw_df1.shape[1]} columns.')
print('**************************************************************')
# Performing alg. on dataset
# Part 0: Visualizing a small portion of the dataset
print('\nWe want to first Visualize a small portion of the dataset ...')
print('First 10 rows of the dataset:')
print(raw_df1.head(10))
print('Last 10 rows of the dataset:')
print(raw_df1.tail(10))
print('summary of the dataset:')
print(raw_df1.describe())
print('the first 5*5 elements of the raw matrix of the dataset:')
print(raw_df1.iloc[0:5, 0:5].values)
print('the last 5*5 elements of the raw matrix of the dataset:')
print(raw_df1.iloc[-5:, -5:].values)
print('**************************************************************')
print('\nWe want to clean dataset now (Removing metadata and'
'Imputing missing values with "mean") ...\n')
# Performing alg. on dataset
# Part 1: Cleaning Dataset
# Part 1.1: Removing metadata (rows in the beginning of the dataset)
df1 = raw_df1.drop(raw_df1.index.values[:9])
print('the first 5*5 elements of the new matrix of '
'the dataset: (after removing metadata)')
print(df1.iloc[0:5, 0:5].values)
# Part 1.2: Imputing missing values with "mean"
print('\nsummary statistics of present NA values'
' (before removing NA values):')
assess_NA_df = assess_NA(df1)
print(assess_NA_df)
# Impute with mean on NA values
for col_name in assess_NA_df.index.values:
df1[col_name] = df1[col_name].fillna(df1[col_name].mean())
print('\nsummary statistics of NA values: (after removing them)')
print('[this code line is written for validating our imputation alg.]')
assess_NA_df = assess_NA(df1)
print(assess_NA_df)
if len(assess_NA_df.index.values) == 0:
print('As you can see, all of the NA values are removed.')
print('**************************************************************')
print('\nWe want to create "X_input" and "y_input" ...')
# Performing alg. on dataset
# Part 2: creating "X_input" and "y_input"
print('Choose the column that has to become "dependent variable":')
print('list of columns are:\n')
print(df1.columns.values)
column_str = input('\nEnter the column name acronym and end it with spaces'
' until the string length happens to be 8 characters.')
y_input = df1[[column_str]];
X_input = df1.drop(columns=[column_str])
X_input = X_input.values
y_input = y_input.values
print('**************************************************************')
print('\nDataset is now thoroughly prepared for training and testing.\n')
return X_input, y_input
| 3,647 | 0 | 25 |
3ed234bdad8a229ef1d7c5b2cc8bcd7c6168eed3 | 462 | py | Python | pyetltools/tools/test.py | aborecki/pyetltools | ff97ed4b1b6d98ab6b73eded9368e3e2aaf65809 | [
"MIT"
] | null | null | null | pyetltools/tools/test.py | aborecki/pyetltools | ff97ed4b1b6d98ab6b73eded9368e3e2aaf65809 | [
"MIT"
] | null | null | null | pyetltools/tools/test.py | aborecki/pyetltools | ff97ed4b1b6d98ab6b73eded9368e3e2aaf65809 | [
"MIT"
] | null | null | null | import colored | 46.2 | 102 | 0.623377 | import colored
def test(value, expected_value, message ):
if value == expected_value:
print(colored.attr("bold") + colored.bg("green") + "TEST OK: "+message+colored.attr(0))
else:
print(colored.attr("bold") + colored.bg("red") + "TEST FAILED: "+message+colored.attr(0))
print(colored.attr("bold") + colored.fg("red") + "\nEXPECTED VALUE:\n" + str(expected_value) +
"\nTESTED VALUE:\n" + str(value)+colored.attr(0)) | 426 | 0 | 22 |
b955eece982c8d7627d37e20b41ef5826c9b8edb | 102 | py | Python | cap9/ex7/main1.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | cap9/ex7/main1.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | cap9/ex7/main1.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | l = [['felipe', 99], ['tamara', 50], ['fernando', 60]]
l1 = sorted(l)
l = sorted(l)
print(l)
print(l1) | 20.4 | 54 | 0.558824 | l = [['felipe', 99], ['tamara', 50], ['fernando', 60]]
l1 = sorted(l)
l = sorted(l)
print(l)
print(l1) | 0 | 0 | 0 |
6092cfc32f268455d15a9ad5964aebd238cb3f73 | 16,171 | py | Python | se/commands/interactive_replace.py | thewchan/tools | bb9ac50c8d48fd43745d04b4c994ab0af5ef3fba | [
"CC0-1.0"
] | 1 | 2021-07-25T03:29:10.000Z | 2021-07-25T03:29:10.000Z | se/commands/interactive_replace.py | robinwhittleton/tools | 60777eb152e346124ad9646ddd1c3307b7ed7f0c | [
"CC0-1.0"
] | null | null | null | se/commands/interactive_replace.py | robinwhittleton/tools | 60777eb152e346124ad9646ddd1c3307b7ed7f0c | [
"CC0-1.0"
] | null | null | null | """
This module implements the `se interactive-replace` command.
"""
import argparse
import curses
from pathlib import Path
import os
from math import floor
from typing import Tuple
import regex
import se
TAB_SIZE = 8
def _get_text_dimensions(text: str) -> Tuple[int, int]:
"""
Get the number of rows and columns to fit the given text.
Returns (height, width)
"""
text_height = 0
text_width = 0
text = text.rstrip()
for line in text.split("\n"):
text_height = text_height + 1
line_length = 0
for char in line:
if char == "\t":
line_length = line_length + TAB_SIZE
else:
line_length = line_length + 1
if line_length > text_width:
text_width = line_length
return (text_height + 1, text_width + 1)
def _print_ui(screen, filepath: Path) -> None:
"""
Print the header and footer bars to the screen
"""
# Wipe the current screen, in case we resized
screen.clear()
screen_height, screen_width = screen.getmaxyx()
header_bar = str(filepath)
# If the filepath is longer than the screen, use the filename instead
if len(str(filepath)) > screen_width:
header_bar = str(filepath.name)
# Fill blank space in the header bar
fill_space = max(floor((screen_width - len(header_bar)) / 2), 0)
if fill_space:
header_bar = f"{' ': <{fill_space}}{header_bar}{' ': <{fill_space}}"
if len(header_bar) < screen_width:
header_bar = header_bar + " "
# Create the footer bar
# Be very careful with generating a footer of correct width, because unlike
# the header, a footer that is too long will cause curses to crash
footer_bar = "(y)es (n)o (a)ccept remaining (r)eject remaining (c)enter on match (q)uit"
if len(footer_bar) >= screen_width:
footer_bar = "y/n; a/r; c; q"
if len(footer_bar) >= screen_width:
footer_bar = ""
fill_space = max(screen_width - len(footer_bar) - 1, 0)
if fill_space:
footer_bar = f"{footer_bar}{' ': <{fill_space}}"
# Print the header and footer
screen.attron(curses.A_REVERSE)
screen.addstr(0, 0, header_bar)
# Make accelerators bold
footer_index = 0
for char in footer_bar:
if char == "(":
screen.attron(curses.A_BOLD)
# If the previous char was ), turn off bold
if footer_index > 0 and footer_bar[footer_index - 1] == ")":
screen.attroff(curses.A_BOLD)
screen.addstr(screen_height - 1, footer_index, char)
footer_index = footer_index + 1
# The bottom right corner has to be set with insch() for some reason
screen.insch(screen_height - 1, screen_width - 1, " ")
screen.attroff(curses.A_REVERSE)
screen.refresh()
def _get_center_of_match(text: str, match_start: int, match_end: int, screen_height: int, screen_width: int) -> Tuple[int, int]:
"""
Given the text, the start and end of the match, and the screen dimensions, return
a tuple representing the pad x and y that will result in the pad's
view being centered on the match.
"""
# Now we want to try to center the highlighted section on the screen
# First, get the row/col dimensions of the highlighted region
index = 0
highlight_start_x = 0
highlight_start_y = 0
highlight_end_x = 0
highlight_end_y = 0
for char in text:
if index < match_start:
if char == "\n":
highlight_start_y = highlight_start_y + 1
highlight_end_y = highlight_end_y + 1
highlight_start_x = 0
highlight_end_x = 0
elif char == "\t":
highlight_start_x = highlight_start_x + TAB_SIZE
highlight_end_x = highlight_end_x + TAB_SIZE
else:
highlight_start_x = highlight_start_x + 1
highlight_end_x = highlight_end_x + 1
index = index + 1
elif index < match_end:
if char == "\n":
highlight_end_y = highlight_end_y + 1
highlight_end_x = 0
elif char == "\t":
highlight_end_x = highlight_end_x + TAB_SIZE
else:
highlight_end_x = highlight_end_x + 1
index = index + 1
else:
break
pad_y = max(highlight_start_y - floor((highlight_start_y - highlight_end_y) / 2) - floor(screen_height / 2), 0)
pad_x = max(highlight_start_x - floor((highlight_start_x - highlight_end_x) / 2) - floor(screen_width / 2), 0)
return (pad_y, pad_x)
def _print_screen(screen, filepath: Path, text: str, start_matching_at: int, regex_search: str, regex_flags: int):
"""
Print the complete UI to the screen.
Returns a tuple of (pad, line_numbers_pad, pad_y, pad_x, match_start, match_end)
if there are more replacements to be made. If not, returns a tuple of
(None, None, 0, 0, 0, 0)
"""
# Get the dimensions of the complete text, and the terminal screen
text_height, text_width = _get_text_dimensions(text)
screen_height, screen_width = screen.getmaxyx()
line_numbers_height = text_height
line_numbers_width = len(str(text_height))
#print(line_numbers_height)
#exit()
# Create the line numbers pad
line_numbers_pad = curses.newpad(line_numbers_height, line_numbers_width)
# Reset the cursor
line_numbers_pad.addstr(0, 0, "")
line_numbers_pad.attron(curses.A_REVERSE)
line_numbers_pad.attron(curses.A_DIM)
# Add the line numbers
for i in range(line_numbers_height - 1):
line_numbers_pad.addstr(i, 0, f"{i + 1}".rjust(line_numbers_width))
# Create a new pad
pad = curses.newpad(text_height, text_width)
pad.keypad(True)
# Reset the cursor
pad.addstr(0, 0, "")
# Do we have a regex match in the text?
# We only consider text after the last completed match
match = regex.search(fr"{regex_search}", text[start_matching_at:], flags=regex_flags)
if not match:
return (None, None, 0, 0, 0, 0)
match_start = start_matching_at + match.start()
match_end = start_matching_at + match.end()
# Print the text preceding the match
pad.addstr(text[:match_start])
# Print the match itself, in reversed color
if curses.has_colors():
pad.addstr(text[match_start:match_end], curses.color_pair(1) | curses.A_BOLD)
else:
pad.attron(curses.A_REVERSE)
pad.addstr(text[match_start:match_end])
pad.attroff(curses.A_REVERSE)
# Print the text after the match
pad.addstr(text[match_end:len(text)])
pad_y, pad_x = _get_center_of_match(text, match_start, match_end, screen_height, screen_width)
# Print the header and footer
_print_ui(screen, filepath)
# Output to the screen
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
return (pad, line_numbers_pad, pad_y, pad_x, match_start, match_end)
def interactive_replace(plain_output: bool) -> int: # pylint: disable=unused-argument
"""
Entry point for `se interactive-replace`
"""
parser = argparse.ArgumentParser(description="Perform an interactive search and replace on a list of files using Python-flavored regex. The view is scrolled using the arrow keys, with alt to scroll by page in any direction. Basic Emacs (default) or Vim style navigation is available. The following actions are possible: (y) Accept replacement. (n) Reject replacement. (a) Accept all remaining replacements in this file. (r) Reject all remaining replacements in this file. (c) Center on match. (q) Save this file and quit.")
parser.add_argument("-i", "--ignore-case", action="store_true", help="ignore case when matching; equivalent to regex.IGNORECASE")
parser.add_argument("-m", "--multiline", action="store_true", help="make `^` and `$` consider each line; equivalent to regex.MULTILINE")
parser.add_argument("-d", "--dot-all", action="store_true", help="make `.` match newlines; equivalent to regex.DOTALL")
parser.add_argument("-v", "--vim", action="store_true", help="use basic Vim-like navigation shortcuts")
parser.add_argument("regex", metavar="REGEX", help="a regex of the type accepted by Python’s `regex` library.")
parser.add_argument("replace", metavar="REPLACE", help="a replacement regex of the type accepted by Python’s `regex` library.")
parser.add_argument("targets", metavar="TARGET", nargs="+", help="a file or directory on which to perform the search and replace")
args = parser.parse_args()
# By default, the esc key has a delay before its delivered to curses.
# Set the delay to 0
os.environ.setdefault("ESCDELAY", "0")
# Save errors for later, because we can only print them after curses is
# deinitialized
errors = []
return_code = 0
nav_down = b"^N"
nav_up = b"^P"
nav_right = b"^F"
nav_left = b"^B"
if args.vim:
nav_down = b"j"
nav_up = b"k"
nav_right = b"l"
nav_left = b"h"
regex_flags = 0
if args.ignore_case:
regex_flags = regex_flags | regex.IGNORECASE
if args.multiline:
regex_flags = regex_flags | regex.MULTILINE
if args.dot_all:
regex_flags = regex_flags | regex.DOTALL
try:
# Initialize curses
screen = curses.initscr()
curses.start_color()
if curses.has_colors():
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
# Disable the blinking cursor
try:
curses.curs_set(False)
# Because some terminals do not support the invisible cursor, proceeed
# if curs_set fails to change the visibility
except Exception:
pass
for filepath in se.get_target_filenames(args.targets, ".xhtml"):
try:
with open(filepath, "r", encoding="utf-8") as file:
xhtml = file.read()
except:
errors.append(f"Couldn’t open file: {filepath}")
return_code = se.InvalidFileException.code
continue
original_xhtml = xhtml
is_file_dirty = False
screen_height, screen_width = screen.getmaxyx()
# In curses terminology, a "pad" is a window that is larger than the viewport.
# Pads can be scrolled around.
# Create and output our initial pad
pad, line_numbers_pad, pad_y, pad_x, match_start, match_end = _print_screen(screen, filepath, xhtml, 0, args.regex, regex_flags)
while pad:
# Wait for input
char = pad.getch()
esc_pressed = False
alt_pressed = False
if char == 27: # ALT was pressed
pad.nodelay(True)
alt_pressed = True
char = pad.getch() # Get the key pressed after ALT
pad.nodelay(False)
if alt_pressed and char == -1: # ESC
esc_pressed = True
# We have input!
pad_height, pad_width = pad.getmaxyx()
_, line_numbers_width = line_numbers_pad.getmaxyx()
# Accept all remaining replacements and continue to the next file
if curses.keyname(char) in (b"a", b"A"):
xhtml = xhtml[:match_start] + regex.sub(fr"{args.regex}", fr"{args.replace}", xhtml[match_start:], flags=regex_flags)
# Can't check is_file_dirty, we have to compare file contents
if xhtml != original_xhtml:
with open(filepath, "w", encoding="utf-8") as file:
file.write(xhtml)
break
# Reject all remaining replacements and continue to the next file
if curses.keyname(char) in (b"r", b"R") or esc_pressed:
if is_file_dirty:
with open(filepath, "w", encoding="utf-8") as file:
file.write(xhtml)
break
# Save this file and quit immediately
if curses.keyname(char) in (b"q", b"Q"):
if is_file_dirty:
with open(filepath, "w", encoding="utf-8") as file:
file.write(xhtml)
# Throw a blank exception so that we break out of the loop
# and disinitialize curses in `finally`
raise Exception
if curses.keyname(char) in (b"y", b"Y"):
# Do the replacement, but starting from the beginning of the match in case we
# skipped replacements earlier
new_xhtml = xhtml[:match_start] + regex.sub(fr"{args.regex}", fr"{args.replace}", xhtml[match_start:], 1, flags=regex_flags)
# Our replacement has changed the XHTML string, so the
# match_end doesn't point to the right place any more.
# Update match_end to account for the change in string length
# caused by the replacement before passing it to _print_screen()
match_end = match_end + (len(new_xhtml) - len(xhtml))
is_file_dirty = True
# OK, now set our xhtml to the replaced version
xhtml = new_xhtml
pad, line_numbers_pad, pad_y, pad_x, match_start, match_end = _print_screen(screen, filepath, xhtml, match_end, args.regex, regex_flags)
if curses.keyname(char) in (b"n", b"N"):
# Skip this match
pad, line_numbers_pad, pad_y, pad_x, match_start, match_end = _print_screen(screen, filepath, xhtml, match_end, args.regex, regex_flags)
# Center on the match
if curses.keyname(char) in (b"c", b"C"):
pad_y, pad_x = _get_center_of_match(xhtml, match_start, match_end, screen_height, screen_width)
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
# The terminal has been resized, redraw the UI
if curses.keyname(char) == b"KEY_RESIZE":
screen_height, screen_width = screen.getmaxyx()
# Note that we pass match_start instead of match_end to print screen, so that we don't
# appear to increment the search when we resize!
pad, line_numbers_pad, pad_y, pad_x, _, _ = _print_screen(screen, filepath, xhtml, match_start, args.regex, regex_flags)
if curses.keyname(char) in (b"KEY_DOWN", nav_down):
if pad_height - pad_y - screen_height >= 0:
pad_y = pad_y + 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
if curses.keyname(char) in (b"KEY_UP", nav_up):
if pad_y > 0:
pad_y = pad_y - 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
# pgdown or alt + down, which has its own keycode
if curses.keyname(char) in (b"KEY_NPAGE", b"kDN3") or (not args.vim and curses.keyname(char) == b"^V") or (args.vim and curses.keyname(char) == b"^F"):
if pad_height - pad_y - screen_height > 0:
pad_y = pad_y + screen_height
if pad_y + screen_height > pad_height:
pad_y = pad_height - screen_height + 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
# pgup or alt + up, which has its own keycode
if curses.keyname(char) in (b"KEY_PPAGE", b"kUP3") or (not args.vim and alt_pressed and curses.keyname(char) == b"v") or (args.vim and curses.keyname(char) == b"^B"):
if pad_y > 0:
pad_y = max(pad_y - screen_height, 0)
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
if curses.keyname(char) in (b"KEY_RIGHT", nav_right):
if pad_width - pad_x - screen_width + line_numbers_width > 1:
pad_x = pad_x + 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
if curses.keyname(char) in (b"KEY_LEFT", nav_left):
if pad_x > 0:
pad_x = pad_x - 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
# alt + right, which as its own key code
if curses.keyname(char) == b"kRIT3":
if pad_width - pad_x - screen_width + line_numbers_width > 1:
pad_x = pad_x + screen_width - line_numbers_width
if pad_x + screen_width >= pad_width:
pad_x = pad_width - screen_width + line_numbers_width - 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
# alt + left, which as its own key code
if curses.keyname(char) == b"kLFT3":
if pad_x > 0:
pad_x = max(pad_x - screen_width, 0)
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
if is_file_dirty:
with open(filepath, "w", encoding="utf-8") as file:
file.write(xhtml)
except Exception as ex:
# We check for the `pattern` attr instead of catching
# regex._regex_core.error because the regex error type is
# private and pylint will complain
if hasattr(ex, "pattern"):
errors.append(f"Invalid regular expression: {ex}")
return_code = se.InvalidInputException.code
# We may get here if we pressed `q`
finally:
curses.endwin()
for error in errors:
se.print_error(error)
return return_code
| 34.776344 | 524 | 0.704595 | """
This module implements the `se interactive-replace` command.
"""
import argparse
import curses
from pathlib import Path
import os
from math import floor
from typing import Tuple
import regex
import se
TAB_SIZE = 8
def _get_text_dimensions(text: str) -> Tuple[int, int]:
"""
Get the number of rows and columns to fit the given text.
Returns (height, width)
"""
text_height = 0
text_width = 0
text = text.rstrip()
for line in text.split("\n"):
text_height = text_height + 1
line_length = 0
for char in line:
if char == "\t":
line_length = line_length + TAB_SIZE
else:
line_length = line_length + 1
if line_length > text_width:
text_width = line_length
return (text_height + 1, text_width + 1)
def _print_ui(screen, filepath: Path) -> None:
"""
Print the header and footer bars to the screen
"""
# Wipe the current screen, in case we resized
screen.clear()
screen_height, screen_width = screen.getmaxyx()
header_bar = str(filepath)
# If the filepath is longer than the screen, use the filename instead
if len(str(filepath)) > screen_width:
header_bar = str(filepath.name)
# Fill blank space in the header bar
fill_space = max(floor((screen_width - len(header_bar)) / 2), 0)
if fill_space:
header_bar = f"{' ': <{fill_space}}{header_bar}{' ': <{fill_space}}"
if len(header_bar) < screen_width:
header_bar = header_bar + " "
# Create the footer bar
# Be very careful with generating a footer of correct width, because unlike
# the header, a footer that is too long will cause curses to crash
footer_bar = "(y)es (n)o (a)ccept remaining (r)eject remaining (c)enter on match (q)uit"
if len(footer_bar) >= screen_width:
footer_bar = "y/n; a/r; c; q"
if len(footer_bar) >= screen_width:
footer_bar = ""
fill_space = max(screen_width - len(footer_bar) - 1, 0)
if fill_space:
footer_bar = f"{footer_bar}{' ': <{fill_space}}"
# Print the header and footer
screen.attron(curses.A_REVERSE)
screen.addstr(0, 0, header_bar)
# Make accelerators bold
footer_index = 0
for char in footer_bar:
if char == "(":
screen.attron(curses.A_BOLD)
# If the previous char was ), turn off bold
if footer_index > 0 and footer_bar[footer_index - 1] == ")":
screen.attroff(curses.A_BOLD)
screen.addstr(screen_height - 1, footer_index, char)
footer_index = footer_index + 1
# The bottom right corner has to be set with insch() for some reason
screen.insch(screen_height - 1, screen_width - 1, " ")
screen.attroff(curses.A_REVERSE)
screen.refresh()
def _get_center_of_match(text: str, match_start: int, match_end: int, screen_height: int, screen_width: int) -> Tuple[int, int]:
"""
Given the text, the start and end of the match, and the screen dimensions, return
a tuple representing the pad x and y that will result in the pad's
view being centered on the match.
"""
# Now we want to try to center the highlighted section on the screen
# First, get the row/col dimensions of the highlighted region
index = 0
highlight_start_x = 0
highlight_start_y = 0
highlight_end_x = 0
highlight_end_y = 0
for char in text:
if index < match_start:
if char == "\n":
highlight_start_y = highlight_start_y + 1
highlight_end_y = highlight_end_y + 1
highlight_start_x = 0
highlight_end_x = 0
elif char == "\t":
highlight_start_x = highlight_start_x + TAB_SIZE
highlight_end_x = highlight_end_x + TAB_SIZE
else:
highlight_start_x = highlight_start_x + 1
highlight_end_x = highlight_end_x + 1
index = index + 1
elif index < match_end:
if char == "\n":
highlight_end_y = highlight_end_y + 1
highlight_end_x = 0
elif char == "\t":
highlight_end_x = highlight_end_x + TAB_SIZE
else:
highlight_end_x = highlight_end_x + 1
index = index + 1
else:
break
pad_y = max(highlight_start_y - floor((highlight_start_y - highlight_end_y) / 2) - floor(screen_height / 2), 0)
pad_x = max(highlight_start_x - floor((highlight_start_x - highlight_end_x) / 2) - floor(screen_width / 2), 0)
return (pad_y, pad_x)
def _print_screen(screen, filepath: Path, text: str, start_matching_at: int, regex_search: str, regex_flags: int):
"""
Print the complete UI to the screen.
Returns a tuple of (pad, line_numbers_pad, pad_y, pad_x, match_start, match_end)
if there are more replacements to be made. If not, returns a tuple of
(None, None, 0, 0, 0, 0)
"""
# Get the dimensions of the complete text, and the terminal screen
text_height, text_width = _get_text_dimensions(text)
screen_height, screen_width = screen.getmaxyx()
line_numbers_height = text_height
line_numbers_width = len(str(text_height))
#print(line_numbers_height)
#exit()
# Create the line numbers pad
line_numbers_pad = curses.newpad(line_numbers_height, line_numbers_width)
# Reset the cursor
line_numbers_pad.addstr(0, 0, "")
line_numbers_pad.attron(curses.A_REVERSE)
line_numbers_pad.attron(curses.A_DIM)
# Add the line numbers
for i in range(line_numbers_height - 1):
line_numbers_pad.addstr(i, 0, f"{i + 1}".rjust(line_numbers_width))
# Create a new pad
pad = curses.newpad(text_height, text_width)
pad.keypad(True)
# Reset the cursor
pad.addstr(0, 0, "")
# Do we have a regex match in the text?
# We only consider text after the last completed match
match = regex.search(fr"{regex_search}", text[start_matching_at:], flags=regex_flags)
if not match:
return (None, None, 0, 0, 0, 0)
match_start = start_matching_at + match.start()
match_end = start_matching_at + match.end()
# Print the text preceding the match
pad.addstr(text[:match_start])
# Print the match itself, in reversed color
if curses.has_colors():
pad.addstr(text[match_start:match_end], curses.color_pair(1) | curses.A_BOLD)
else:
pad.attron(curses.A_REVERSE)
pad.addstr(text[match_start:match_end])
pad.attroff(curses.A_REVERSE)
# Print the text after the match
pad.addstr(text[match_end:len(text)])
pad_y, pad_x = _get_center_of_match(text, match_start, match_end, screen_height, screen_width)
# Print the header and footer
_print_ui(screen, filepath)
# Output to the screen
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
return (pad, line_numbers_pad, pad_y, pad_x, match_start, match_end)
def interactive_replace(plain_output: bool) -> int: # pylint: disable=unused-argument
"""
Entry point for `se interactive-replace`
"""
parser = argparse.ArgumentParser(description="Perform an interactive search and replace on a list of files using Python-flavored regex. The view is scrolled using the arrow keys, with alt to scroll by page in any direction. Basic Emacs (default) or Vim style navigation is available. The following actions are possible: (y) Accept replacement. (n) Reject replacement. (a) Accept all remaining replacements in this file. (r) Reject all remaining replacements in this file. (c) Center on match. (q) Save this file and quit.")
parser.add_argument("-i", "--ignore-case", action="store_true", help="ignore case when matching; equivalent to regex.IGNORECASE")
parser.add_argument("-m", "--multiline", action="store_true", help="make `^` and `$` consider each line; equivalent to regex.MULTILINE")
parser.add_argument("-d", "--dot-all", action="store_true", help="make `.` match newlines; equivalent to regex.DOTALL")
parser.add_argument("-v", "--vim", action="store_true", help="use basic Vim-like navigation shortcuts")
parser.add_argument("regex", metavar="REGEX", help="a regex of the type accepted by Python’s `regex` library.")
parser.add_argument("replace", metavar="REPLACE", help="a replacement regex of the type accepted by Python’s `regex` library.")
parser.add_argument("targets", metavar="TARGET", nargs="+", help="a file or directory on which to perform the search and replace")
args = parser.parse_args()
# By default, the esc key has a delay before its delivered to curses.
# Set the delay to 0
os.environ.setdefault("ESCDELAY", "0")
# Save errors for later, because we can only print them after curses is
# deinitialized
errors = []
return_code = 0
nav_down = b"^N"
nav_up = b"^P"
nav_right = b"^F"
nav_left = b"^B"
if args.vim:
nav_down = b"j"
nav_up = b"k"
nav_right = b"l"
nav_left = b"h"
regex_flags = 0
if args.ignore_case:
regex_flags = regex_flags | regex.IGNORECASE
if args.multiline:
regex_flags = regex_flags | regex.MULTILINE
if args.dot_all:
regex_flags = regex_flags | regex.DOTALL
try:
# Initialize curses
screen = curses.initscr()
curses.start_color()
if curses.has_colors():
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
# Disable the blinking cursor
try:
curses.curs_set(False)
# Because some terminals do not support the invisible cursor, proceeed
# if curs_set fails to change the visibility
except Exception:
pass
for filepath in se.get_target_filenames(args.targets, ".xhtml"):
try:
with open(filepath, "r", encoding="utf-8") as file:
xhtml = file.read()
except:
errors.append(f"Couldn’t open file: {filepath}")
return_code = se.InvalidFileException.code
continue
original_xhtml = xhtml
is_file_dirty = False
screen_height, screen_width = screen.getmaxyx()
# In curses terminology, a "pad" is a window that is larger than the viewport.
# Pads can be scrolled around.
# Create and output our initial pad
pad, line_numbers_pad, pad_y, pad_x, match_start, match_end = _print_screen(screen, filepath, xhtml, 0, args.regex, regex_flags)
while pad:
# Wait for input
char = pad.getch()
esc_pressed = False
alt_pressed = False
if char == 27: # ALT was pressed
pad.nodelay(True)
alt_pressed = True
char = pad.getch() # Get the key pressed after ALT
pad.nodelay(False)
if alt_pressed and char == -1: # ESC
esc_pressed = True
# We have input!
pad_height, pad_width = pad.getmaxyx()
_, line_numbers_width = line_numbers_pad.getmaxyx()
# Accept all remaining replacements and continue to the next file
if curses.keyname(char) in (b"a", b"A"):
xhtml = xhtml[:match_start] + regex.sub(fr"{args.regex}", fr"{args.replace}", xhtml[match_start:], flags=regex_flags)
# Can't check is_file_dirty, we have to compare file contents
if xhtml != original_xhtml:
with open(filepath, "w", encoding="utf-8") as file:
file.write(xhtml)
break
# Reject all remaining replacements and continue to the next file
if curses.keyname(char) in (b"r", b"R") or esc_pressed:
if is_file_dirty:
with open(filepath, "w", encoding="utf-8") as file:
file.write(xhtml)
break
# Save this file and quit immediately
if curses.keyname(char) in (b"q", b"Q"):
if is_file_dirty:
with open(filepath, "w", encoding="utf-8") as file:
file.write(xhtml)
# Throw a blank exception so that we break out of the loop
# and disinitialize curses in `finally`
raise Exception
if curses.keyname(char) in (b"y", b"Y"):
# Do the replacement, but starting from the beginning of the match in case we
# skipped replacements earlier
new_xhtml = xhtml[:match_start] + regex.sub(fr"{args.regex}", fr"{args.replace}", xhtml[match_start:], 1, flags=regex_flags)
# Our replacement has changed the XHTML string, so the
# match_end doesn't point to the right place any more.
# Update match_end to account for the change in string length
# caused by the replacement before passing it to _print_screen()
match_end = match_end + (len(new_xhtml) - len(xhtml))
is_file_dirty = True
# OK, now set our xhtml to the replaced version
xhtml = new_xhtml
pad, line_numbers_pad, pad_y, pad_x, match_start, match_end = _print_screen(screen, filepath, xhtml, match_end, args.regex, regex_flags)
if curses.keyname(char) in (b"n", b"N"):
# Skip this match
pad, line_numbers_pad, pad_y, pad_x, match_start, match_end = _print_screen(screen, filepath, xhtml, match_end, args.regex, regex_flags)
# Center on the match
if curses.keyname(char) in (b"c", b"C"):
pad_y, pad_x = _get_center_of_match(xhtml, match_start, match_end, screen_height, screen_width)
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
# The terminal has been resized, redraw the UI
if curses.keyname(char) == b"KEY_RESIZE":
screen_height, screen_width = screen.getmaxyx()
# Note that we pass match_start instead of match_end to print screen, so that we don't
# appear to increment the search when we resize!
pad, line_numbers_pad, pad_y, pad_x, _, _ = _print_screen(screen, filepath, xhtml, match_start, args.regex, regex_flags)
if curses.keyname(char) in (b"KEY_DOWN", nav_down):
if pad_height - pad_y - screen_height >= 0:
pad_y = pad_y + 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
if curses.keyname(char) in (b"KEY_UP", nav_up):
if pad_y > 0:
pad_y = pad_y - 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
# pgdown or alt + down, which has its own keycode
if curses.keyname(char) in (b"KEY_NPAGE", b"kDN3") or (not args.vim and curses.keyname(char) == b"^V") or (args.vim and curses.keyname(char) == b"^F"):
if pad_height - pad_y - screen_height > 0:
pad_y = pad_y + screen_height
if pad_y + screen_height > pad_height:
pad_y = pad_height - screen_height + 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
# pgup or alt + up, which has its own keycode
if curses.keyname(char) in (b"KEY_PPAGE", b"kUP3") or (not args.vim and alt_pressed and curses.keyname(char) == b"v") or (args.vim and curses.keyname(char) == b"^B"):
if pad_y > 0:
pad_y = max(pad_y - screen_height, 0)
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
line_numbers_pad.refresh(pad_y, 0, 1, 0, screen_height - 2, line_numbers_width)
if curses.keyname(char) in (b"KEY_RIGHT", nav_right):
if pad_width - pad_x - screen_width + line_numbers_width > 1:
pad_x = pad_x + 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
if curses.keyname(char) in (b"KEY_LEFT", nav_left):
if pad_x > 0:
pad_x = pad_x - 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
# alt + right, which as its own key code
if curses.keyname(char) == b"kRIT3":
if pad_width - pad_x - screen_width + line_numbers_width > 1:
pad_x = pad_x + screen_width - line_numbers_width
if pad_x + screen_width >= pad_width:
pad_x = pad_width - screen_width + line_numbers_width - 1
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
# alt + left, which as its own key code
if curses.keyname(char) == b"kLFT3":
if pad_x > 0:
pad_x = max(pad_x - screen_width, 0)
pad.refresh(pad_y, pad_x, 1, line_numbers_width, screen_height - 2, screen_width - 1)
if is_file_dirty:
with open(filepath, "w", encoding="utf-8") as file:
file.write(xhtml)
except Exception as ex:
# We check for the `pattern` attr instead of catching
# regex._regex_core.error because the regex error type is
# private and pylint will complain
if hasattr(ex, "pattern"):
errors.append(f"Invalid regular expression: {ex}")
return_code = se.InvalidInputException.code
# We may get here if we pressed `q`
finally:
curses.endwin()
for error in errors:
se.print_error(error)
return return_code
| 0 | 0 | 0 |
4e0426cc0a9642d982176a0f7602c3aeb0fde250 | 216 | py | Python | edge/graphics.py | lucasdavid/edge | c7d9cf7e2803cc8d49abbe3ddb9f16eb130c1b01 | [
"MIT"
] | null | null | null | edge/graphics.py | lucasdavid/edge | c7d9cf7e2803cc8d49abbe3ddb9f16eb130c1b01 | [
"MIT"
] | null | null | null | edge/graphics.py | lucasdavid/edge | c7d9cf7e2803cc8d49abbe3ddb9f16eb130c1b01 | [
"MIT"
] | null | null | null | default_style = {
'alpha': .6,
'width': 1,
'node_size': 100,
'node_color': '#2EB1E6',
'edge_color': '#cccccc',
}
solution_style = default_style.copy()
solution_style.update(node_color='#ff0000')
| 19.636364 | 43 | 0.62963 | default_style = {
'alpha': .6,
'width': 1,
'node_size': 100,
'node_color': '#2EB1E6',
'edge_color': '#cccccc',
}
solution_style = default_style.copy()
solution_style.update(node_color='#ff0000')
| 0 | 0 | 0 |
d9548d88c801008eefb74d6391427687fe7c335b | 2,086 | py | Python | server2/migrations/versions/bff862ed6870_.py | Terkea/beds-uni-hackathon-4-notes | db351b2053be5e9568d731006fd2af7002a40ca0 | [
"MIT"
] | null | null | null | server2/migrations/versions/bff862ed6870_.py | Terkea/beds-uni-hackathon-4-notes | db351b2053be5e9568d731006fd2af7002a40ca0 | [
"MIT"
] | 1 | 2019-12-01T13:49:15.000Z | 2019-12-01T13:49:15.000Z | server2/migrations/versions/bff862ed6870_.py | Terkea/beds-uni-hackathon-4-notes | db351b2053be5e9568d731006fd2af7002a40ca0 | [
"MIT"
] | null | null | null | """empty message
Revision ID: bff862ed6870
Revises: 2ee4f69c4623
Create Date: 2019-11-28 17:42:47.448962
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'bff862ed6870'
down_revision = '2ee4f69c4623'
branch_labels = None
depends_on = None
| 37.25 | 98 | 0.691275 | """empty message
Revision ID: bff862ed6870
Revises: 2ee4f69c4623
Create Date: 2019-11-28 17:42:47.448962
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'bff862ed6870'
down_revision = '2ee4f69c4623'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('note',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('public_id', sa.String(length=50), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('public_id')
)
op.drop_index('public_id', table_name='post')
op.drop_table('post')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('public_id', mysql.VARCHAR(length=50), nullable=False),
sa.Column('title', mysql.VARCHAR(length=255), nullable=False),
sa.Column('content', mysql.TEXT(), nullable=False),
sa.Column('category_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('user_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], name='post_ibfk_1'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='post_ibfk_2'),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_index('public_id', 'post', ['public_id'], unique=True)
op.drop_table('note')
# ### end Alembic commands ###
| 1,704 | 0 | 46 |
b7d71d0082ed0f850650efd250f61d0670fcc037 | 19,851 | py | Python | python/sklearn/sklearn/metrics/tests/test_metrics.py | seckcoder/lang-learn | 1e0d6f412bbd7f89b1af00293fd907ddb3c1b571 | [
"Unlicense"
] | 1 | 2017-10-14T04:23:45.000Z | 2017-10-14T04:23:45.000Z | python/sklearn/sklearn/metrics/tests/test_metrics.py | seckcoder/lang-learn | 1e0d6f412bbd7f89b1af00293fd907ddb3c1b571 | [
"Unlicense"
] | null | null | null | python/sklearn/sklearn/metrics/tests/test_metrics.py | seckcoder/lang-learn | 1e0d6f412bbd7f89b1af00293fd907ddb3c1b571 | [
"Unlicense"
] | null | null | null | import random
import warnings
import numpy as np
from nose.tools import raises, assert_not_equal
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal, assert_almost_equal
from sklearn import datasets
from sklearn import svm
from sklearn.metrics import auc
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_squared_error
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import zero_one
from sklearn.metrics import hinge_loss
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = range(n_samples)
random.seed(0)
random.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
def test_roc_curve():
"""Test Area under Receiver Operating Characteristic (ROC) curve"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.80, decimal=2)
assert_almost_equal(roc_auc, auc_score(y_true, probas_pred))
def test_roc_returns_consistency():
"""Test whether the returned threshold matches up with tpr"""
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in range(len(thresholds)):
tp = np.sum((probas_pred >= thresholds[t]) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
@raises(ValueError)
def test_roc_curve_multi():
"""roc_curve not applicable for multi-class problems"""
y_true, _, probas_pred = make_prediction(binary=False)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
def test_roc_curve_confidence():
"""roc_curve for confidence scores"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.80, decimal=2)
def test_roc_curve_hard():
"""roc_curve for hard decisions"""
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.74, decimal=2)
def test_auc():
"""Test Area Under Curve (AUC) computation"""
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_precision_recall_f1_score_binary():
"""Test Precision Recall and F1 Score for binary classification task"""
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.75], 2)
assert_array_almost_equal(r, [0.76, 0.72], 2)
assert_array_almost_equal(f, [0.75, 0.74], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1)
ps = precision_score(y_true, y_pred)
assert_array_almost_equal(ps, 0.75, 2)
rs = recall_score(y_true, y_pred)
assert_array_almost_equal(rs, 0.72, 2)
fs = f1_score(y_true, y_pred)
assert_array_almost_equal(fs, 0.74, 2)
def test_confusion_matrix_binary():
"""Test confusion matrix - binary classification case"""
y_true, y_pred, _ = make_prediction(binary=True)
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 6], [7, 18]])
tp = cm[0, 0]
tn = cm[1, 1]
fp = cm[0, 1]
fn = cm[1, 0]
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
if den == 0.:
true_mcc = 0
else:
true_mcc = num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.48, decimal=2)
def test_precision_recall_f1_score_multiclass():
"""Test Precision Recall and F1 Score for multiclass classification task"""
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.82, 0.55, 0.47], 2)
assert_array_almost_equal(r, [0.92, 0.17, 0.90], 2)
assert_array_almost_equal(f, [0.87, 0.26, 0.62], 2)
assert_array_equal(s, [25, 30, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.61, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.61, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.61, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.62, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.66, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.58, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.62, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.61, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.55, 2)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.82, 0.47, 0.55], 2)
assert_array_almost_equal(r, [0.92, 0.90, 0.17], 2)
assert_array_almost_equal(f, [0.87, 0.62, 0.26], 2)
assert_array_equal(s, [25, 20, 30])
def test_precision_recall_f1_score_multiclass_pos_label_none():
"""Test Precision Recall and F1 Score for multiclass classification task
GH Issue #1296
"""
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
"""Check that pathological cases do not bring NaNs"""
try:
old_error_settings = np.seterr(all='raise')
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
"""Test confusion matrix - multi-class case"""
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[23, 2, 0],
[5, 5, 20],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
cm = confusion_matrix(y_true, y_pred, labels=[0, 2, 1])
assert_array_equal(cm, [[23, 0, 2],
[0, 18, 2],
[5, 20, 5]])
def test_confusion_matrix_multiclass_subset_labels():
"""Test confusion matrix - multi-class case with subset of labels"""
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[23, 2],
[5, 5]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[20, 5]])
def test_classification_report():
"""Test performance report"""
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82 0.92 0.87 25
versicolor 0.56 0.17 0.26 30
virginica 0.47 0.90 0.62 20
avg / total 0.62 0.61 0.56 75
"""
report = classification_report(
y_true, y_pred, labels=range(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.82 0.92 0.87 25
1 0.56 0.17 0.26 30
2 0.47 0.90 0.62 20
avg / total 0.62 0.61 0.56 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def _test_precision_recall_curve(y_true, probas_pred):
"""Test Precision-Recall and aread under PR curve"""
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.82, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
def test_losses():
"""Test loss functions"""
y_true, y_pred, _ = make_prediction(binary=True)
n = y_true.shape[0]
assert_equal(zero_one(y_true, y_pred), 13)
assert_almost_equal(mean_squared_error(y_true, y_pred), 12.999 / n, 2)
assert_almost_equal(mean_squared_error(y_true, y_true), 0.00, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), -0.04, 2)
assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2)
assert_equal(explained_variance_score([0, 0, 0], [0, 1, 1]), 0.0)
assert_almost_equal(r2_score(y_true, y_pred), -0.04, 2)
assert_almost_equal(r2_score(y_true, y_true), 1.00, 2)
assert_equal(r2_score([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(r2_score([0, 0, 0], [0, 1, 1]), 0.0)
def test_symmetry():
"""Test the symmetry of score and loss functions"""
y_true, y_pred, _ = make_prediction(binary=True)
# symmetric
assert_equal(zero_one(y_true, y_pred),
zero_one(y_pred, y_true))
assert_almost_equal(mean_squared_error(y_true, y_pred),
mean_squared_error(y_pred, y_true))
# not symmetric
assert_true(explained_variance_score(y_true, y_pred) !=
explained_variance_score(y_pred, y_true))
assert_true(r2_score(y_true, y_pred) !=
r2_score(y_pred, y_true))
# FIXME: precision and recall aren't symmetric either
| 35.961957 | 79 | 0.660974 | import random
import warnings
import numpy as np
from nose.tools import raises, assert_not_equal
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal, assert_almost_equal
from sklearn import datasets
from sklearn import svm
from sklearn.metrics import auc
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_squared_error
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import zero_one
from sklearn.metrics import hinge_loss
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = range(n_samples)
random.seed(0)
random.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
def test_roc_curve():
"""Test Area under Receiver Operating Characteristic (ROC) curve"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.80, decimal=2)
assert_almost_equal(roc_auc, auc_score(y_true, probas_pred))
def test_roc_returns_consistency():
"""Test whether the returned threshold matches up with tpr"""
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in range(len(thresholds)):
tp = np.sum((probas_pred >= thresholds[t]) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
@raises(ValueError)
def test_roc_curve_multi():
"""roc_curve not applicable for multi-class problems"""
y_true, _, probas_pred = make_prediction(binary=False)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
def test_roc_curve_confidence():
"""roc_curve for confidence scores"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.80, decimal=2)
def test_roc_curve_hard():
"""roc_curve for hard decisions"""
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.74, decimal=2)
def test_auc():
"""Test Area Under Curve (AUC) computation"""
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.5, 1.]
y = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,
1., 1., 1., 1., 1., 1., 1., 1.]
assert_array_almost_equal(auc(x, y), 1.)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
def test_precision_recall_f1_score_binary():
"""Test Precision Recall and F1 Score for binary classification task"""
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.75], 2)
assert_array_almost_equal(r, [0.76, 0.72], 2)
assert_array_almost_equal(f, [0.75, 0.74], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1)
ps = precision_score(y_true, y_pred)
assert_array_almost_equal(ps, 0.75, 2)
rs = recall_score(y_true, y_pred)
assert_array_almost_equal(rs, 0.72, 2)
fs = f1_score(y_true, y_pred)
assert_array_almost_equal(fs, 0.74, 2)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
"""Test confusion matrix - binary classification case"""
y_true, y_pred, _ = make_prediction(binary=True)
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 6], [7, 18]])
tp = cm[0, 0]
tn = cm[1, 1]
fp = cm[0, 1]
fn = cm[1, 0]
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
if den == 0.:
true_mcc = 0
else:
true_mcc = num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.48, decimal=2)
def test_matthews_corrcoef_nan():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_equal(matthews_corrcoef([0], [1]), 0.0)
def test_precision_recall_f1_score_multiclass():
"""Test Precision Recall and F1 Score for multiclass classification task"""
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.82, 0.55, 0.47], 2)
assert_array_almost_equal(r, [0.92, 0.17, 0.90], 2)
assert_array_almost_equal(f, [0.87, 0.26, 0.62], 2)
assert_array_equal(s, [25, 30, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.61, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.61, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.61, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.62, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.66, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.58, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.62, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.61, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.55, 2)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.82, 0.47, 0.55], 2)
assert_array_almost_equal(r, [0.92, 0.90, 0.17], 2)
assert_array_almost_equal(f, [0.87, 0.62, 0.26], 2)
assert_array_equal(s, [25, 20, 30])
def test_precision_recall_f1_score_multiclass_pos_label_none():
"""Test Precision Recall and F1 Score for multiclass classification task
GH Issue #1296
"""
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
"""Check that pathological cases do not bring NaNs"""
try:
old_error_settings = np.seterr(all='raise')
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
"""Test confusion matrix - multi-class case"""
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[23, 2, 0],
[5, 5, 20],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
cm = confusion_matrix(y_true, y_pred, labels=[0, 2, 1])
assert_array_equal(cm, [[23, 0, 2],
[0, 18, 2],
[5, 20, 5]])
def test_confusion_matrix_multiclass_subset_labels():
"""Test confusion matrix - multi-class case with subset of labels"""
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[23, 2],
[5, 5]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[20, 5]])
def test_classification_report():
"""Test performance report"""
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82 0.92 0.87 25
versicolor 0.56 0.17 0.26 30
virginica 0.47 0.90 0.62 20
avg / total 0.62 0.61 0.56 75
"""
report = classification_report(
y_true, y_pred, labels=range(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.82 0.92 0.87 25
1 0.56 0.17 0.26 30
2 0.47 0.90 0.62 20
avg / total 0.62 0.61 0.56 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
def _test_precision_recall_curve(y_true, probas_pred):
"""Test Precision-Recall and aread under PR curve"""
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.82, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_score_scale_invariance():
# Test that average_precision_score and auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = auc_score(y_true, probas_pred)
roc_auc_scaled = auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def test_losses():
"""Test loss functions"""
y_true, y_pred, _ = make_prediction(binary=True)
n = y_true.shape[0]
assert_equal(zero_one(y_true, y_pred), 13)
assert_almost_equal(mean_squared_error(y_true, y_pred), 12.999 / n, 2)
assert_almost_equal(mean_squared_error(y_true, y_true), 0.00, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), -0.04, 2)
assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2)
assert_equal(explained_variance_score([0, 0, 0], [0, 1, 1]), 0.0)
assert_almost_equal(r2_score(y_true, y_pred), -0.04, 2)
assert_almost_equal(r2_score(y_true, y_true), 1.00, 2)
assert_equal(r2_score([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(r2_score([0, 0, 0], [0, 1, 1]), 0.0)
def test_losses_at_limits():
# test limit cases
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test_r2_one_case_error():
# test whether r2_score raises error given one point
assert_raises(ValueError, r2_score, [0], [0])
def test_symmetry():
"""Test the symmetry of score and loss functions"""
y_true, y_pred, _ = make_prediction(binary=True)
# symmetric
assert_equal(zero_one(y_true, y_pred),
zero_one(y_pred, y_true))
assert_almost_equal(mean_squared_error(y_true, y_pred),
mean_squared_error(y_pred, y_true))
# not symmetric
assert_true(explained_variance_score(y_true, y_pred) !=
explained_variance_score(y_pred, y_true))
assert_true(r2_score(y_true, y_pred) !=
r2_score(y_pred, y_true))
# FIXME: precision and recall aren't symmetric either
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(1.2 / 4, hinge_loss(y_true, pred_decision))
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(1.2 / 4,
hinge_loss(y_true, pred_decision, pos_label=2, neg_label=0))
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
with warnings.catch_warnings(True) as w:
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
assert_equal(len(w), 1)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds) + 1))
# assert there are warnings
with warnings.catch_warnings(True) as w:
fpr, tpr, thresholds = roc_curve([1 - x for x in y_true],
y_pred)
assert_equal(len(w), 1)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds) + 1))
| 5,435 | 0 | 299 |
ea54792c432ac442abc7b1aa8dfe5fd91f8f6a98 | 148 | py | Python | mne/externals/h5io/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 1,953 | 2015-01-17T20:33:46.000Z | 2022-03-30T04:36:34.000Z | mne/externals/h5io/__init__.py | alexandrebarachant/mne-python | b54e38c9bbac38c6f53747075b5bad2936fbc5b9 | [
"BSD-3-Clause"
] | 8,490 | 2015-01-01T13:04:18.000Z | 2022-03-31T23:02:08.000Z | mne/externals/h5io/__init__.py | alexandrebarachant/mne-python | b54e38c9bbac38c6f53747075b5bad2936fbc5b9 | [
"BSD-3-Clause"
] | 1,130 | 2015-01-08T22:39:27.000Z | 2022-03-30T21:44:26.000Z | """Python Objects Onto HDF5
"""
__version__ = '0.1.dev0'
from ._h5io import read_hdf5, write_hdf5, _TempDir, object_diff # noqa, analysis:ignore
| 21.142857 | 88 | 0.736486 | """Python Objects Onto HDF5
"""
__version__ = '0.1.dev0'
from ._h5io import read_hdf5, write_hdf5, _TempDir, object_diff # noqa, analysis:ignore
| 0 | 0 | 0 |
c5864362375a81a891800b2885457459c5fea69d | 85 | py | Python | src/dal_select2/__init__.py | pandabuilder/django-autocomplete-light | 41f699aadaa6214acd5d947b717394b1237a7223 | [
"MIT"
] | null | null | null | src/dal_select2/__init__.py | pandabuilder/django-autocomplete-light | 41f699aadaa6214acd5d947b717394b1237a7223 | [
"MIT"
] | null | null | null | src/dal_select2/__init__.py | pandabuilder/django-autocomplete-light | 41f699aadaa6214acd5d947b717394b1237a7223 | [
"MIT"
] | null | null | null | """Select2 support for DAL."""
# default_app_config = 'dal_select2.apps.DefaultApp'
| 21.25 | 52 | 0.741176 | """Select2 support for DAL."""
# default_app_config = 'dal_select2.apps.DefaultApp'
| 0 | 0 | 0 |
f3c95dfe7fb1b5c254dd15bc2c564d79d8da83f3 | 2,207 | py | Python | whatisit/apps/wordfish/storage.py | radinformatics/whatisit | 9cfc8924b722678d3b2ca7e5ad77b9254fcb93f3 | [
"MIT"
] | 1 | 2021-06-08T11:08:09.000Z | 2021-06-08T11:08:09.000Z | whatisit/apps/wordfish/storage.py | radinformatics/whatisit | 9cfc8924b722678d3b2ca7e5ad77b9254fcb93f3 | [
"MIT"
] | 24 | 2016-10-21T00:55:30.000Z | 2017-01-05T03:13:57.000Z | whatisit/apps/wordfish/storage.py | radinformatics/whatisit | 9cfc8924b722678d3b2ca7e5ad77b9254fcb93f3 | [
"MIT"
] | null | null | null | from django.core.files.storage import FileSystemStorage
from django.core.files.move import file_move_safe
from django.contrib.auth.models import User
from django.apps import apps
from fnmatch import fnmatch
from whatisit.settings import (
MEDIA_ROOT,
MEDIA_URL
)
import errno
import itertools
import os
import tempfile
############################################################################
# Storage Models
############################################################################
| 33.953846 | 89 | 0.599003 | from django.core.files.storage import FileSystemStorage
from django.core.files.move import file_move_safe
from django.contrib.auth.models import User
from django.apps import apps
from fnmatch import fnmatch
from whatisit.settings import (
MEDIA_ROOT,
MEDIA_URL
)
import errno
import itertools
import os
import tempfile
############################################################################
# Storage Models
############################################################################
class WhatisitStorage(FileSystemStorage):
def __init__(self, location=None, base_url=None):
if location is None:
location = MEDIA_ROOT
if base_url is None:
base_url = MEDIA_URL
super(WhatisitStorage, self).__init__(location, base_url)
def url(self, name):
uid = None
spath, file_name = os.path.split(name)
urlsects = [v for v in spath.split('/') if v]
for i in range(len(urlsects)):
sect = urlsects.pop(0)
if sect.isdigit():
collection_id = sect
break
report_path = '/'.join(urlsects)
coll_model = apps.get_model('whatisit', 'ReportCollection')
collection = coll_model.objects.get(id=uid)
#if collection.private:
# cid = collection.private_token
#else:
cid = collection.id
return os.path.join(self.base_url, str(cid), cont_path, file_name)
class ImageStorage(WhatisitStorage):
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext))
return name
| 857 | 748 | 99 |
1b7aec803c3ee90240e793dbd50f96641ada4856 | 1,511 | py | Python | tests/test_season_helper.py | BookWorm0103/the-blue-alliance | 39b2f33531308b19218d58c726d51f020ee9d8b5 | [
"MIT"
] | null | null | null | tests/test_season_helper.py | BookWorm0103/the-blue-alliance | 39b2f33531308b19218d58c726d51f020ee9d8b5 | [
"MIT"
] | null | null | null | tests/test_season_helper.py | BookWorm0103/the-blue-alliance | 39b2f33531308b19218d58c726d51f020ee9d8b5 | [
"MIT"
] | null | null | null | import unittest2
from datetime import date, datetime
from helpers.season_helper import SeasonHelper
| 45.787879 | 83 | 0.690933 | import unittest2
from datetime import date, datetime
from helpers.season_helper import SeasonHelper
class TestSeasonHelper(unittest2.TestCase):
def test_kickoff_date(self):
# 2011 - Saturday the 8th (https://en.wikipedia.org/wiki/Logo_Motion)
kickoff_2011 = date(2011, 1, 8)
self.assertEqual(SeasonHelper.kickoff_date(year=2011), kickoff_2011)
# 2010 - Saturday the 9th (https://en.wikipedia.org/wiki/Breakaway_(FIRST))
kickoff_2010 = date(2010, 1, 9)
self.assertEqual(SeasonHelper.kickoff_date(year=2010), kickoff_2010)
# 2009 - Saturday the 3rd (https://en.wikipedia.org/wiki/Lunacy_(FIRST)
kickoff_2009 = date(2009, 1, 3)
self.assertEqual(SeasonHelper.kickoff_date(year=2009), kickoff_2009)
def test_stop_build_date(self):
# 2019 - Feb 19th, 2019
stop_build_2019 = datetime(2019, 2, 19, 23, 59, 59)
self.assertEqual(SeasonHelper.stop_build_date(year=2019), stop_build_2019)
# 2018 - Feb 20th, 2018
stop_build_2018 = datetime(2018, 2, 20, 23, 59, 59)
self.assertEqual(SeasonHelper.stop_build_date(year=2018), stop_build_2018)
# 2017 - Feb 21th, 2017
stop_build_2017 = datetime(2017, 2, 21, 23, 59, 59)
self.assertEqual(SeasonHelper.stop_build_date(year=2017), stop_build_2017)
# 2016 - Feb 23th, 2016
stop_build_2016 = datetime(2016, 2, 23, 23, 59, 59)
self.assertEqual(SeasonHelper.stop_build_date(year=2016), stop_build_2016)
| 1,310 | 22 | 77 |
98cd080ca08dc8b93186bc4314378124fcda069e | 94 | py | Python | zhduan/youtube/apps.py | duanzhiihao/youtubeAnalyzeProject | 41ef276812a87d8d9996c5ba0e30edc67317ccc3 | [
"MIT"
] | 1 | 2021-05-11T10:29:37.000Z | 2021-05-11T10:29:37.000Z | zhduan/youtube/apps.py | duanzhiihao/youtubeAnalyzeProject | 41ef276812a87d8d9996c5ba0e30edc67317ccc3 | [
"MIT"
] | 5 | 2018-11-02T19:11:58.000Z | 2018-12-13T21:06:18.000Z | zhduan/youtube/apps.py | duanzhiihao/youtubeAnalyzeProject | 41ef276812a87d8d9996c5ba0e30edc67317ccc3 | [
"MIT"
] | 1 | 2018-11-07T20:38:12.000Z | 2018-11-07T20:38:12.000Z | from django.apps import AppConfig
| 15.666667 | 34 | 0.712766 | from django.apps import AppConfig
class YoutubeConfig(AppConfig):
name = 'youtube'
| 0 | 32 | 25 |
b7fe3b4e17563af35e72803a4899fe6bba2f9b9b | 3,572 | py | Python | speech-processing/voice-activity-detector/task4/code/VADData.py | Vlados09/machine-learning-projects | 10129c33b2a1bbde68f702c434f46cfd49ed6a56 | [
"MIT"
] | 1 | 2020-08-29T21:28:49.000Z | 2020-08-29T21:28:49.000Z | speech-processing/voice-activity-detector/task4/code/VADData.py | Vlados09/machine-learning-projects | 10129c33b2a1bbde68f702c434f46cfd49ed6a56 | [
"MIT"
] | null | null | null | speech-processing/voice-activity-detector/task4/code/VADData.py | Vlados09/machine-learning-projects | 10129c33b2a1bbde68f702c434f46cfd49ed6a56 | [
"MIT"
] | null | null | null | import os
import numpy as np
from sklearn.utils import shuffle
| 30.271186 | 98 | 0.68785 | import os
import numpy as np
from sklearn.utils import shuffle
class VADData:
def __init__(self, config, pars, options, scale=True):
folders = config['folders']
self.data_folder = folders['data_folder']
self.label_folder = folders['label_folder']
self.data_use = config['general']['data_size']
model_type = config['general']['model_type']
self.reshape = (model_type != 'FNN')
self.data_pars = pars[model_type]['data']
self.n_steps = self.data_pars['n_steps']
self.options = options
self.scale = scale
self.train_start = ['NIS', 'VIT']
self.dev_start = 'EDI'
self.test_start = 'CMU'
def load_train_dev(self, shuffle_data=False):
# Get the file names from audio folder:
data_files = os.listdir(self.data_folder)
train_files = [file for file in data_files if file[:3] in self.train_start]
dev_files = [file for file in data_files if file.startswith(self.dev_start)]
X_train, y_train = self.load_from_files(train_files)
X_dev, y_dev = self.load_from_files(dev_files)
if shuffle_data:
X_train, y_train = shuffle(X_train, y_train)
X_dev, y_dev = shuffle(X_dev, y_dev)
# Keep only certain percentage of data:
n_train = int(self.data_use * X_train.shape[0])
X_train, y_train = X_train[:n_train], y_train[:n_train]
n_dev = int(self.data_use * X_dev.shape[0])
X_dev, y_dev = X_dev[:n_dev], y_dev[:n_dev]
return X_train, y_train, X_dev, y_dev
def load_test(self):
data_files = os.listdir(self.data_folder)
test_files = [file for file in data_files if file[:3] in self.test_start]
X_test, y_test = self.load_from_files(test_files)
return X_test, y_test
def load_from_files(self, files):
file_data = []
file_labels = []
for file in files:
if self.options.verbose:
print(f'Extracting data from {file}')
x = np.load(f'{self.data_folder}{file}').astype(np.float32)
if self.scale:
x = x - np.mean(x, axis=0) / np.std(x, axis=0)
y = np.load(f'{self.label_folder}{file}').astype(np.int)
if self.n_steps:
x, y = self.add_steps(x, y)
if self.reshape:
x = x.reshape((x.shape[0], (self.n_steps*2)+1, -1))
file_data.append(x)
file_labels.append(y)
return np.vstack(file_data), np.vstack(file_labels)
def add_steps(self, data, labels):
# Get n previous features
backward = self.shift_n_steps(data, 1, self.n_steps+1, keep_original=False)
# Get n following features
forward = self.shift_n_steps(data, 1, self.n_steps+1, reverse=True, keep_original=False)
# Stack them together
stacked = np.hstack([backward, data, forward])
# Remove rows with nan values (begining and end)
nan_mask = np.any(np.isnan(stacked), axis=1)
stacked = stacked[~nan_mask]
labels = labels[~nan_mask]
return stacked, labels
def shift_n_steps(self, arr, start, steps, reverse=False, keep_original=True, fill_value=np.nan):
initial_arr = arr.copy()
rng = np.arange(start, steps)
if reverse:
rng = -rng
for i, shift in enumerate(rng):
if i == 0 and not keep_original:
arr = self.shift_step(initial_arr, shift, fill_value=fill_value)
else:
add_arr = self.shift_step(initial_arr, shift, fill_value=fill_value)
if reverse:
arr = np.hstack([arr, add_arr])
else:
arr = np.hstack([add_arr, arr])
return arr
@staticmethod
def shift_step(arr, num, fill_value=np.nan):
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result[:] = arr
return result | 3,301 | 185 | 23 |
f5840ddd768bf8711a9fb5c49a430be015c29d82 | 704 | py | Python | script/generate_header.py | Liby99/Rotamina | 47a588b7e4674d3ab20d9d7afc43b25c7e0fa304 | [
"MIT"
] | 5 | 2018-01-30T02:11:12.000Z | 2021-08-25T09:01:12.000Z | script/generate_header.py | Liby99/Rotamina | 47a588b7e4674d3ab20d9d7afc43b25c7e0fa304 | [
"MIT"
] | null | null | null | script/generate_header.py | Liby99/Rotamina | 47a588b7e4674d3ab20d9d7afc43b25c7e0fa304 | [
"MIT"
] | null | null | null | import os, sys
name = sys.argv[1]
root_dir = sys.argv[2]
header_dir = os.path.join(root_dir, name)
if not os.path.exists(header_dir):
os.makedirs(header_dir)
files = []
dirs = [("", root_dir)]
for i in range(4):
subdirs = []
for (p, d) in dirs:
files = files + [os.path.join(p, item) for item in os.listdir(d) if os.path.isfile(os.path.join(d, item))]
subdirs = subdirs + [(os.path.join(p, sd), os.path.join(d, sd)) for sd in os.listdir(d) if os.path.isdir(os.path.join(d, sd))]
dirs = subdirs
header = open(os.path.join(root_dir, name + "/" + name + ".h"), "w+")
for f in [f for f in files if name not in f]:
header.write("#include \"" + f + "\"\n")
header.close()
| 37.052632 | 134 | 0.612216 | import os, sys
name = sys.argv[1]
root_dir = sys.argv[2]
header_dir = os.path.join(root_dir, name)
if not os.path.exists(header_dir):
os.makedirs(header_dir)
files = []
dirs = [("", root_dir)]
for i in range(4):
subdirs = []
for (p, d) in dirs:
files = files + [os.path.join(p, item) for item in os.listdir(d) if os.path.isfile(os.path.join(d, item))]
subdirs = subdirs + [(os.path.join(p, sd), os.path.join(d, sd)) for sd in os.listdir(d) if os.path.isdir(os.path.join(d, sd))]
dirs = subdirs
header = open(os.path.join(root_dir, name + "/" + name + ".h"), "w+")
for f in [f for f in files if name not in f]:
header.write("#include \"" + f + "\"\n")
header.close()
| 0 | 0 | 0 |
717cb0b8bf9357cc97762908fa917f8f585a6a17 | 2,186 | py | Python | cogs/reactions/reactions.py | n303p4/sailor-fox | 5c7b1077414916bd3ecacd8089a81a10efe7f43b | [
"MIT"
] | null | null | null | cogs/reactions/reactions.py | n303p4/sailor-fox | 5c7b1077414916bd3ecacd8089a81a10efe7f43b | [
"MIT"
] | null | null | null | cogs/reactions/reactions.py | n303p4/sailor-fox | 5c7b1077414916bd3ecacd8089a81a10efe7f43b | [
"MIT"
] | null | null | null | """Generic reaction image commands."""
import json
import secrets
from sailor import commands
from sailor.exceptions import UserInputError
def setup(processor):
"""Set up reaction commands."""
with open("reactions.json") as file_object:
reactions = json.load(file_object)
last_image_for_command = {}
async def coro(event, image_number: int = None):
"""Generic coroutine."""
command_properties = reactions.get(event.command.name)
message = command_properties.get("message")
if message:
await event.reply(message)
images = command_properties.get("images")
if not isinstance(images, list):
return
# Avoid repeats when possible
if len(images) > 1 and isinstance(image_number, int):
image_number -= 1
if image_number not in range(len(images)):
raise UserInputError(f"Number must be from 1 to {len(images)}")
image = images[image_number]
else:
image = secrets.choice(images)
while image == last_image_for_command.get(event.command.name):
image = secrets.choice(images)
if len(images) > 1:
last_image_for_command[event.command.name] = image
await event.reply(image)
async def coro_noselect(event):
"""Generic coroutine, without manual image selection."""
await coro(event)
for command_name, command_properties in reactions.items():
aliases = command_properties.get("aliases", [])
num_images = len(command_properties.get("images", []))
if num_images > 1:
new_command = commands.Command(coro, name=command_name, aliases=aliases)
new_command.help = (
f"Randomly posts any of {num_images} {command_name}-themed images.\n\n"
f"A number from 1-{num_images} can be provided to select a specific image."
)
else:
new_command = commands.Command(coro_noselect, name=command_name, aliases=aliases)
new_command.help = f"Posts a {command_name}-themed image."
processor.add_command(new_command)
| 37.689655 | 93 | 0.631747 | """Generic reaction image commands."""
import json
import secrets
from sailor import commands
from sailor.exceptions import UserInputError
def setup(processor):
"""Set up reaction commands."""
with open("reactions.json") as file_object:
reactions = json.load(file_object)
last_image_for_command = {}
async def coro(event, image_number: int = None):
"""Generic coroutine."""
command_properties = reactions.get(event.command.name)
message = command_properties.get("message")
if message:
await event.reply(message)
images = command_properties.get("images")
if not isinstance(images, list):
return
# Avoid repeats when possible
if len(images) > 1 and isinstance(image_number, int):
image_number -= 1
if image_number not in range(len(images)):
raise UserInputError(f"Number must be from 1 to {len(images)}")
image = images[image_number]
else:
image = secrets.choice(images)
while image == last_image_for_command.get(event.command.name):
image = secrets.choice(images)
if len(images) > 1:
last_image_for_command[event.command.name] = image
await event.reply(image)
async def coro_noselect(event):
"""Generic coroutine, without manual image selection."""
await coro(event)
for command_name, command_properties in reactions.items():
aliases = command_properties.get("aliases", [])
num_images = len(command_properties.get("images", []))
if num_images > 1:
new_command = commands.Command(coro, name=command_name, aliases=aliases)
new_command.help = (
f"Randomly posts any of {num_images} {command_name}-themed images.\n\n"
f"A number from 1-{num_images} can be provided to select a specific image."
)
else:
new_command = commands.Command(coro_noselect, name=command_name, aliases=aliases)
new_command.help = f"Posts a {command_name}-themed image."
processor.add_command(new_command)
| 0 | 0 | 0 |
4359992c7bf4243dc799de8de7f398d1f743e2a6 | 9,122 | py | Python | moldesign/utils/callsigs.py | Autodesk/molecular-design-toolkit | 5f45a47fea21d3603899a6366cb163024f0e2ec4 | [
"Apache-2.0"
] | 147 | 2016-07-15T18:53:55.000Z | 2022-01-30T04:36:39.000Z | moldesign/utils/callsigs.py | cherishyli/molecular-design-toolkit | 5f45a47fea21d3603899a6366cb163024f0e2ec4 | [
"Apache-2.0"
] | 151 | 2016-07-15T21:35:11.000Z | 2019-10-10T08:57:29.000Z | moldesign/utils/callsigs.py | cherishyli/molecular-design-toolkit | 5f45a47fea21d3603899a6366cb163024f0e2ec4 | [
"Apache-2.0"
] | 33 | 2016-08-02T00:04:51.000Z | 2021-09-02T10:05:04.000Z | from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import os
from functools import wraps
import collections
import funcsigs
from .utils import if_not_none
from .docparsers import GoogleDocArgumentInjector
def args_from(original_function,
only=None,
allexcept=None,
inject_kwargs=None,
inject_docs=None,
wraps=None,
update_docstring_args=False):
"""
Decorator to transfer call signatures - helps to hide ugly *args and **kwargs in delegated calls
Args:
original_function (callable): the function to take the call signature from
only (List[str]): only transfer these arguments (incompatible with `allexcept`)
wraps (bool): Transfer documentation and attributes from original_function to
decorated_function, using functools.wraps (default: True if call signature is
unchanged, False otherwise)
allexcept (List[str]): transfer all except these arguments (incompatible with `only`)
inject_kwargs (dict): Inject new kwargs into the call signature
(of the form ``{argname: defaultvalue}``)
inject_docs (dict): Add or modifies argument documentation (requires google-style
docstrings) with a dict of the form `{argname: "(type): description"}`
update_docstring_args (bool): Update "arguments" section of the docstring using the
original function's documentation (requires google-style docstrings and wraps=False)
Note:
To use arguments from a classes' __init__ method, pass the class itself as
``original_function`` - this will also allow us to inject the documentation
Returns:
Decorator function
"""
# NEWFEATURE - verify arguments?
if only and allexcept:
raise ValueError('Error in keyword arguments - '
'pass *either* "only" or "allexcept", not both')
origname = get_qualified_name(original_function)
if hasattr(original_function, '__signature__'):
sig = original_function.__signature__.replace()
else:
sig = funcsigs.signature(original_function)
# Modify the call signature if necessary
if only or allexcept or inject_kwargs:
wraps = if_not_none(wraps, False)
newparams = []
if only:
for param in only:
newparams.append(sig.parameters[param])
elif allexcept:
for name, param in sig.parameters.items():
if name not in allexcept:
newparams.append(param)
else:
newparams = list(sig.parameters.values())
if inject_kwargs:
for name, default in inject_kwargs.items():
newp = funcsigs.Parameter(name, funcsigs.Parameter.POSITIONAL_OR_KEYWORD,
default=default)
newparams.append(newp)
newparams.sort(key=lambda param: param._kind)
sig = sig.replace(parameters=newparams)
else:
wraps = if_not_none(wraps, True)
# Get the docstring arguments
if update_docstring_args:
original_docs = GoogleDocArgumentInjector(original_function.__doc__)
argument_docstrings = collections.OrderedDict((p.name, original_docs.args[p.name])
for p in newparams)
def decorator(f):
"""Modify f's call signature (using the `__signature__` attribute)"""
if wraps:
fname = original_function.__name__
f = functools.wraps(original_function)(f)
f.__name__ = fname # revert name change
else:
fname = f.__name__
f.__signature__ = sig
if update_docstring_args or inject_kwargs:
if not update_docstring_args:
argument_docstrings = GoogleDocArgumentInjector(f.__doc__).args
docs = GoogleDocArgumentInjector(f.__doc__)
docs.args = argument_docstrings
if not hasattr(f, '__orig_docs'):
f.__orig_docs = []
f.__orig_docs.append(f.__doc__)
f.__doc__ = docs.new_docstring()
# Only for building sphinx documentation:
if os.environ.get('SPHINX_IS_BUILDING_DOCS', ""):
sigstring = '%s%s\n' % (fname, sig)
if hasattr(f, '__doc__') and f.__doc__ is not None:
f.__doc__ = sigstring + f.__doc__
else:
f.__doc__ = sigstring
return f
return decorator
def kwargs_from(reference_function, mod_docs=True):
""" Replaces ``**kwargs`` in a call signature with keyword arguments from another function.
Args:
reference_function (function): function to get kwargs from
mod_docs (bool): whether to modify the decorated function's docstring
Note:
``mod_docs`` works ONLY for google-style docstrings
"""
refsig = funcsigs.signature(reference_function)
origname = get_qualified_name(reference_function)
kwparams = []
for name, param in refsig.parameters.items():
if param.default != param.empty or param.kind in (param.VAR_KEYWORD, param.KEYWORD_ONLY):
if param.name[0] != '_':
kwparams.append(param)
if mod_docs:
refdocs = GoogleDocArgumentInjector(reference_function.__doc__)
return decorator
class DocInherit(object):
"""
Allows methods to inherit docstrings from their superclasses
FROM http://code.activestate.com/recipes/576862/
"""
#idiomatic decorator name
doc_inherit = DocInherit | 35.220077 | 100 | 0.629358 | from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import os
from functools import wraps
import collections
import funcsigs
from .utils import if_not_none
from .docparsers import GoogleDocArgumentInjector
def args_from(original_function,
only=None,
allexcept=None,
inject_kwargs=None,
inject_docs=None,
wraps=None,
update_docstring_args=False):
"""
Decorator to transfer call signatures - helps to hide ugly *args and **kwargs in delegated calls
Args:
original_function (callable): the function to take the call signature from
only (List[str]): only transfer these arguments (incompatible with `allexcept`)
wraps (bool): Transfer documentation and attributes from original_function to
decorated_function, using functools.wraps (default: True if call signature is
unchanged, False otherwise)
allexcept (List[str]): transfer all except these arguments (incompatible with `only`)
inject_kwargs (dict): Inject new kwargs into the call signature
(of the form ``{argname: defaultvalue}``)
inject_docs (dict): Add or modifies argument documentation (requires google-style
docstrings) with a dict of the form `{argname: "(type): description"}`
update_docstring_args (bool): Update "arguments" section of the docstring using the
original function's documentation (requires google-style docstrings and wraps=False)
Note:
To use arguments from a classes' __init__ method, pass the class itself as
``original_function`` - this will also allow us to inject the documentation
Returns:
Decorator function
"""
# NEWFEATURE - verify arguments?
if only and allexcept:
raise ValueError('Error in keyword arguments - '
'pass *either* "only" or "allexcept", not both')
origname = get_qualified_name(original_function)
if hasattr(original_function, '__signature__'):
sig = original_function.__signature__.replace()
else:
sig = funcsigs.signature(original_function)
# Modify the call signature if necessary
if only or allexcept or inject_kwargs:
wraps = if_not_none(wraps, False)
newparams = []
if only:
for param in only:
newparams.append(sig.parameters[param])
elif allexcept:
for name, param in sig.parameters.items():
if name not in allexcept:
newparams.append(param)
else:
newparams = list(sig.parameters.values())
if inject_kwargs:
for name, default in inject_kwargs.items():
newp = funcsigs.Parameter(name, funcsigs.Parameter.POSITIONAL_OR_KEYWORD,
default=default)
newparams.append(newp)
newparams.sort(key=lambda param: param._kind)
sig = sig.replace(parameters=newparams)
else:
wraps = if_not_none(wraps, True)
# Get the docstring arguments
if update_docstring_args:
original_docs = GoogleDocArgumentInjector(original_function.__doc__)
argument_docstrings = collections.OrderedDict((p.name, original_docs.args[p.name])
for p in newparams)
def decorator(f):
"""Modify f's call signature (using the `__signature__` attribute)"""
if wraps:
fname = original_function.__name__
f = functools.wraps(original_function)(f)
f.__name__ = fname # revert name change
else:
fname = f.__name__
f.__signature__ = sig
if update_docstring_args or inject_kwargs:
if not update_docstring_args:
argument_docstrings = GoogleDocArgumentInjector(f.__doc__).args
docs = GoogleDocArgumentInjector(f.__doc__)
docs.args = argument_docstrings
if not hasattr(f, '__orig_docs'):
f.__orig_docs = []
f.__orig_docs.append(f.__doc__)
f.__doc__ = docs.new_docstring()
# Only for building sphinx documentation:
if os.environ.get('SPHINX_IS_BUILDING_DOCS', ""):
sigstring = '%s%s\n' % (fname, sig)
if hasattr(f, '__doc__') and f.__doc__ is not None:
f.__doc__ = sigstring + f.__doc__
else:
f.__doc__ = sigstring
return f
return decorator
def kwargs_from(reference_function, mod_docs=True):
""" Replaces ``**kwargs`` in a call signature with keyword arguments from another function.
Args:
reference_function (function): function to get kwargs from
mod_docs (bool): whether to modify the decorated function's docstring
Note:
``mod_docs`` works ONLY for google-style docstrings
"""
refsig = funcsigs.signature(reference_function)
origname = get_qualified_name(reference_function)
kwparams = []
for name, param in refsig.parameters.items():
if param.default != param.empty or param.kind in (param.VAR_KEYWORD, param.KEYWORD_ONLY):
if param.name[0] != '_':
kwparams.append(param)
if mod_docs:
refdocs = GoogleDocArgumentInjector(reference_function.__doc__)
def decorator(f):
sig = funcsigs.signature(f)
fparams = []
found_varkeyword = None
for name, param in sig.parameters.items():
if param.kind == param.VAR_KEYWORD:
fparams.extend(kwparams)
found_varkeyword = name
else:
fparams.append(param)
if not found_varkeyword:
raise TypeError("Function has no **kwargs wildcard.")
f.__signature__ = sig.replace(parameters=fparams)
if mod_docs:
docs = GoogleDocArgumentInjector(f.__doc__)
new_args = collections.OrderedDict()
for argname, doc in docs.args.items():
if argname == found_varkeyword:
for param in kwparams:
default_argdoc = '%s: argument for %s' % (param.name, origname)
new_args[param.name] = refdocs.args.get(param.name, default_argdoc)
else:
new_args[argname] = doc
docs.args = new_args
if not hasattr(f, '__orig_docs'):
f.__orig_docs = []
f.__orig_docs.append(f.__doc__)
f.__doc__ = docs.new_docstring()
return f
return decorator
def get_qualified_name(original_function):
if inspect.ismethod(original_function):
origname = '.'.join([original_function.__module__,
original_function.__self__.__class__.__name__,
original_function.__name__])
return ':meth:`%s`' % origname
else:
origname = original_function.__module__+'.'+original_function.__name__
return ':meth:`%s`' % origname
class DocInherit(object):
"""
Allows methods to inherit docstrings from their superclasses
FROM http://code.activestate.com/recipes/576862/
"""
def __init__(self, mthd):
self.mthd = mthd
self.name = mthd.__name__
def __get__(self, obj, cls):
if obj:
return self.get_with_inst(obj, cls)
else:
return self.get_no_inst(cls)
def get_with_inst(self, obj, cls):
overridden = getattr(super(), self.name, None)
@wraps(self.mthd, assigned=('__name__','__module__'))
def f(*args, **kwargs):
return self.mthd(obj, *args, **kwargs)
return self.use_parent_doc(f, overridden)
def get_no_inst(self, cls):
for parent in cls.__mro__[1:]:
overridden = getattr(parent, self.name, None)
if overridden: break
@wraps(self.mthd, assigned=('__name__','__module__'))
def f(*args, **kwargs):
return self.mthd(*args, **kwargs)
return self.use_parent_doc(f, overridden)
def use_parent_doc(self, func, source):
if source is None:
raise NameError("Can't find '%s' in parents"%self.name)
func.__doc__ = source.__doc__
return func
#idiomatic decorator name
doc_inherit = DocInherit | 2,599 | 0 | 184 |
e378dff356b7c9a889df6eeb44f8e9b82bcceb8c | 3,933 | py | Python | phonebox_plugin/views.py | Xeizzeth/phonebox_plugin | b4e2f6ac04203b25e78292ad2bc98a490645ff47 | [
"MIT"
] | 38 | 2021-02-07T20:54:12.000Z | 2022-02-21T19:18:17.000Z | phonebox_plugin/views.py | Xeizzeth/phonebox_plugin | b4e2f6ac04203b25e78292ad2bc98a490645ff47 | [
"MIT"
] | 13 | 2021-04-19T15:25:31.000Z | 2022-03-12T18:04:38.000Z | phonebox_plugin/views.py | Xeizzeth/phonebox_plugin | b4e2f6ac04203b25e78292ad2bc98a490645ff47 | [
"MIT"
] | 15 | 2021-02-22T11:26:30.000Z | 2022-03-23T12:37:27.000Z | #!./venv/bin/python
from netbox.views import generic
from .models import Number, VoiceCircuit
from . import filters
from . import forms
from . import tables
from django.conf import settings
from packaging import version
NETBOX_CURRENT_VERSION = version.parse(settings.VERSION)
| 32.775 | 74 | 0.764811 | #!./venv/bin/python
from netbox.views import generic
from .models import Number, VoiceCircuit
from . import filters
from . import forms
from . import tables
from django.conf import settings
from packaging import version
NETBOX_CURRENT_VERSION = version.parse(settings.VERSION)
class NumberListView(generic.ObjectListView):
queryset = Number.objects.all()
filterset = filters.NumberFilterSet
filterset_form = forms.NumberFilterForm
table = tables.NumberTable
if NETBOX_CURRENT_VERSION >= version.parse("3.0"):
template_name = "phonebox_plugin/list_view_3.x.html"
else:
template_name = "phonebox_plugin/list_view.html"
class NumberView(generic.ObjectView):
queryset = Number.objects.prefetch_related('tenant')
if NETBOX_CURRENT_VERSION >= version.parse("3.0"):
template_name = "phonebox_plugin/number_3.x.html"
else:
template_name = "phonebox_plugin/number.html"
class NumberEditView(generic.ObjectEditView):
queryset = Number.objects.all()
model_form = forms.NumberEditForm
if NETBOX_CURRENT_VERSION >= version.parse("3.0"):
template_name = "phonebox_plugin/add_number_3.x.html"
else:
template_name = "phonebox_plugin/add_number.html"
class NumberBulkEditView(generic.BulkEditView):
queryset = Number.objects.prefetch_related('tenant')
filterset = filters.NumberFilterSet
table = tables.NumberTable
form = forms.NumberBulkEditForm
class NumberDeleteView(generic.ObjectDeleteView):
queryset = Number.objects.all()
default_return_url = "plugins:phonebox_plugin:list_view"
class NumberBulkDeleteView(generic.BulkDeleteView):
queryset = Number.objects.filter()
filterset = filters.NumberFilterSet
table = tables.NumberTable
default_return_url = "plugins:phonebox_plugin:list_view"
class NumberBulkImportView(generic.BulkImportView):
queryset = Number.objects.all()
model_form = forms.NumberCSVForm
table = tables.NumberTable
class VoiceCircuitListView(generic.ObjectListView):
queryset = VoiceCircuit.objects.all()
filterset = filters.VoiceCircuitFilterSet
filterset_form = forms.VoiceCircuitFilterForm
table = tables.VoiceCircuitTable
if NETBOX_CURRENT_VERSION >= version.parse("3.0"):
template_name = "phonebox_plugin/voice_circuit_list_view_3.x.html"
else:
template_name = "phonebox_plugin/voice_circuit_list_view.html"
class VoiceCircuitView(generic.ObjectView):
queryset = VoiceCircuit.objects.prefetch_related('tenant')
if NETBOX_CURRENT_VERSION >= version.parse("3.0"):
template_name = "phonebox_plugin/voice_circuit_3.x.html"
else:
template_name = "phonebox_plugin/voice_circuit.html"
class VoiceCircuitEditView(generic.ObjectEditView):
queryset = VoiceCircuit.objects.all()
model_form = forms.VoiceCircuitEditForm
if NETBOX_CURRENT_VERSION >= version.parse("3.0"):
template_name = "phonebox_plugin/add_voice_circuit_3.x.html"
else:
template_name = "phonebox_plugin/add_voice_circuit.html"
class VoiceCircuitBulkEditView(generic.BulkEditView):
queryset = VoiceCircuit.objects.prefetch_related('tenant')
filterset = filters.VoiceCircuitFilterSet
table = tables.VoiceCircuitTable
form = forms.VoiceCircuitBulkEditForm
class VoiceCircuitDeleteView(generic.ObjectDeleteView):
queryset = VoiceCircuit.objects.all()
default_return_url = "plugins:phonebox_plugin:voice_circuit_list_view"
class VoiceCircuitBulkDeleteView(generic.BulkDeleteView):
queryset = VoiceCircuit.objects.filter()
filterset = filters.VoiceCircuitFilterSet
table = tables.VoiceCircuitTable
default_return_url = "plugins:phonebox_plugin:voice_circuit_list_view"
class VoiceCircuitBulkImportView(generic.BulkImportView):
queryset = VoiceCircuit.objects.all()
model_form = forms.VoiceCircuitCSVForm
table = tables.VoiceCircuitTable
| 0 | 3,316 | 322 |
7067196f7b2063ce74384bb490b01db8a2ee09fd | 3,985 | py | Python | protogenerator/core/descriptors/enum_descriptor.py | googleinterns/schemaorg-generator | 12b7f41856a8b4a3480d310e8c3f2e97e747efac | [
"Apache-2.0"
] | null | null | null | protogenerator/core/descriptors/enum_descriptor.py | googleinterns/schemaorg-generator | 12b7f41856a8b4a3480d310e8c3f2e97e747efac | [
"Apache-2.0"
] | null | null | null | protogenerator/core/descriptors/enum_descriptor.py | googleinterns/schemaorg-generator | 12b7f41856a8b4a3480d310e8c3f2e97e747efac | [
"Apache-2.0"
] | 1 | 2020-10-29T16:22:43.000Z | 2020-10-29T16:22:43.000Z | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import utils.utils as utils
import utils.constants as constants
import collections
from jinja2 import Environment, FileSystemLoader
from typing import List
from utils.utils import PropertyToParent as PropertyToParent
class EnumDescriptor:
"""The EnumDescriptor generates protocol buffer code for schema
enumeration.
Args:
name (str): Name of the schema enumeration.
field_types (list[PropertyToParent]): The schema properties that belong
to the schema enumeration.
enum_values (list[str]): The possible values of the schema enumeration.
Attributes:
name (str): Name of the schema enumeration.
field_types (list[PropertyToParent]): The schema properties that belong
to the schema enumeration.
enum_values (list[str]): The possible values of the schema enumeration.
"""
def to_proto(self, comment: str) -> str:
"""Return proto code for the schema enumeration.
Args:
comment (str): The comment to be added to the code.
Returns:
str: The proto code for the schema enumeration as a string.
"""
assert isinstance(
comment, str), "Invalid parameter 'comment' must be 'str'."
prop_from_self = list()
prop_inherited = dict()
for x in self.field_types:
if x.parent == self.name:
prop_from_self.append(x.name)
else:
if x.parent not in prop_inherited:
prop_inherited[x.parent] = list()
prop_inherited[x.parent].append(x.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(
sorted(prop_inherited.items()))
file_loader = FileSystemLoader('./core/templates')
env = Environment(
loader=file_loader,
trim_blocks=True,
lstrip_blocks=True)
env.globals['get_property_name'] = utils.get_property_name
env.globals['to_snake_case'] = utils.to_snake_case
env.globals['sorted'] = sorted
env.globals['get_enum_value_name'] = utils.get_enum_value_name
comment = '// ' + comment.replace('\n', '\n// ')
proto_string = env.get_template('enumeration.txt').render(
name=self.name,
prop_from_self=prop_from_self,
prop_inherited=prop_inherited,
comment=comment,
enum_values=self.enum_values,
)
return proto_string
| 36.559633 | 109 | 0.635885 | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import utils.utils as utils
import utils.constants as constants
import collections
from jinja2 import Environment, FileSystemLoader
from typing import List
from utils.utils import PropertyToParent as PropertyToParent
class EnumDescriptor:
"""The EnumDescriptor generates protocol buffer code for schema
enumeration.
Args:
name (str): Name of the schema enumeration.
field_types (list[PropertyToParent]): The schema properties that belong
to the schema enumeration.
enum_values (list[str]): The possible values of the schema enumeration.
Attributes:
name (str): Name of the schema enumeration.
field_types (list[PropertyToParent]): The schema properties that belong
to the schema enumeration.
enum_values (list[str]): The possible values of the schema enumeration.
"""
def __init__(self, name: str,
field_types: List[PropertyToParent], enum_values: List[str]):
assert isinstance(name, str), "Invalid parameter 'name' must be 'str'."
assert isinstance(
field_types, list), "Invalid parameter 'field_types' must be 'list'."
assert isinstance(
enum_values, list), "Invalid parameter 'enum_values' must be 'list'."
for x in field_types:
assert isinstance(
x, utils.PropertyToParent), "Every member of 'field_types' must be 'utils.PropertyToParent'."
for x in enum_values:
assert isinstance(
x, str), "Every member of 'enum_values' must be 'str'."
self.name = name
self.field_types = field_types
self.enum_values = enum_values
def to_proto(self, comment: str) -> str:
"""Return proto code for the schema enumeration.
Args:
comment (str): The comment to be added to the code.
Returns:
str: The proto code for the schema enumeration as a string.
"""
assert isinstance(
comment, str), "Invalid parameter 'comment' must be 'str'."
prop_from_self = list()
prop_inherited = dict()
for x in self.field_types:
if x.parent == self.name:
prop_from_self.append(x.name)
else:
if x.parent not in prop_inherited:
prop_inherited[x.parent] = list()
prop_inherited[x.parent].append(x.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(
sorted(prop_inherited.items()))
file_loader = FileSystemLoader('./core/templates')
env = Environment(
loader=file_loader,
trim_blocks=True,
lstrip_blocks=True)
env.globals['get_property_name'] = utils.get_property_name
env.globals['to_snake_case'] = utils.to_snake_case
env.globals['sorted'] = sorted
env.globals['get_enum_value_name'] = utils.get_enum_value_name
comment = '// ' + comment.replace('\n', '\n// ')
proto_string = env.get_template('enumeration.txt').render(
name=self.name,
prop_from_self=prop_from_self,
prop_inherited=prop_inherited,
comment=comment,
enum_values=self.enum_values,
)
return proto_string
| 796 | 0 | 27 |
be50cf2d47abb25ac1d35571a413795ba331c594 | 13,536 | py | Python | grow/pods/static.py | matthiasrohmer/grow | 88fae5026040ad0f7dd9260ee290cebbe49b39d7 | [
"MIT"
] | null | null | null | grow/pods/static.py | matthiasrohmer/grow | 88fae5026040ad0f7dd9260ee290cebbe49b39d7 | [
"MIT"
] | null | null | null | grow/pods/static.py | matthiasrohmer/grow | 88fae5026040ad0f7dd9260ee290cebbe49b39d7 | [
"MIT"
] | null | null | null | from . import controllers
from . import messages
from grow.common import urls
from grow.translations import locales
from datetime import datetime
import fnmatch
import hashlib
import mimetypes
import os
import re
import time
import webob
import yaml
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('image/bmp', '.cur')
mimetypes.add_type('image/svg+xml', '.svg')
mimetypes.add_type('text/css', '.css')
SKIP_PATTERNS = [
'**/.**',
]
# Allow the yaml dump to write out a representation of the static file.
yaml.SafeDumper.add_representer(StaticFile, static_representer)
| 42.168224 | 110 | 0.594046 | from . import controllers
from . import messages
from grow.common import urls
from grow.translations import locales
from datetime import datetime
import fnmatch
import hashlib
import mimetypes
import os
import re
import time
import webob
import yaml
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('image/bmp', '.cur')
mimetypes.add_type('image/svg+xml', '.svg')
mimetypes.add_type('text/css', '.css')
SKIP_PATTERNS = [
'**/.**',
]
class Error(Exception):
pass
class BadStaticFileError(Error):
pass
class StaticFile(object):
def __init__(self, pod_path, serving_path, locale=None, localization=None,
controller=None, fingerprinted=False, pod=None):
self.pod = pod
self.default_locale = pod.podspec.default_locale
self.locale = pod.normalize_locale(locale)
self.localization = localization
self.pod_path = pod_path
self.serving_path = serving_path
self.controller = controller
self.basename = os.path.basename(pod_path)
self.fingerprinted = fingerprinted
self.base, self.ext = os.path.splitext(self.basename)
def __repr__(self):
if self.locale:
return "<StaticFile({}, locale='{}')>".format(self.pod_path, self.locale)
return "<StaticFile({})>".format(self.pod_path)
def __eq__(self, other):
return (self.pod_path == other.pod_path and self.pod == other.pod
and other.locale == self.locale)
def __ne__(self, other):
return not self.__eq__(other)
@property
def exists(self):
return self.pod.file_exists(self.pod_path)
@property
def modified(self):
return self.pod.file_modified(self.pod_path)
@property
def size(self):
return self.pod.file_size(self.pod_path)
@property
def fingerprint(self):
return StaticFile._create_fingerprint(self.pod, self.pod_path)
@staticmethod
def _create_fingerprint(pod, pod_path):
md5 = hashlib.md5()
with pod.open_file(pod_path, 'rb') as fp:
content = fp.read()
md5.update(content)
return md5.hexdigest()
@staticmethod
def remove_fingerprint(path):
base, _ = os.path.splitext(path)
if base.endswith('.min'):
return re.sub('(.*)-([a-fA-F\d]{32})\.min\.(.*)', r'\g<1>.min.\g<3>', path)
return re.sub('(.*)-([a-fA-F\d]{32})\.(.*)', r'\g<1>.\g<3>', path)
@staticmethod
def apply_fingerprint(path, fingerprint):
base, ext = os.path.splitext(path)
# Special case to preserve ".min.<ext>" extension lockup.
if base.endswith('.min'):
base = base[:-4]
return '{}-{}.min{}'.format(base, fingerprint, ext)
else:
return '{}-{}{}'.format(base, fingerprint, ext)
@property
def url(self):
serving_path = self.serving_path
path_format = self.controller.path_format.replace('{filename}', '')
if '{fingerprint}' in path_format:
path_format = path_format.replace('{fingerprint}', self.fingerprint)
# Determine suffix only after all replacements are made.
suffix = serving_path.replace(path_format, '')
if self.localization:
if self.fingerprinted:
suffix = StaticFile.remove_fingerprint(suffix)
localized_pod_path = self.localization['static_dir'] + suffix
localized_pod_path = localized_pod_path.format(locale=self.locale)
localized_pod_path = localized_pod_path.replace('//', '/')
if self.pod.file_exists(localized_pod_path):
# TODO(jeremydw): Centralize path formatting.
# Internal paths use Babel locales, serving paths use aliases.
locale = self.locale.alias if self.locale is not None else self.locale
localized_serving_path = self.localization['serve_at'] + suffix
kwargs = {
'locale': locale,
'root': self.pod.podspec.root,
}
if '{fingerprint}' in localized_serving_path:
fingerprint = StaticFile._create_fingerprint(
self.pod, localized_pod_path)
kwargs['fingerprint'] = fingerprint
localized_serving_path = localized_serving_path.format(**kwargs)
if self.fingerprinted and localized_serving_path:
fingerprint = StaticFile._create_fingerprint(self.pod, localized_pod_path)
localized_serving_path = StaticFile.apply_fingerprint(localized_serving_path, fingerprint)
serving_path = localized_serving_path.replace('//', '/')
if serving_path:
return urls.Url(
path=serving_path,
host=self.pod.env.host,
port=self.pod.env.port,
scheme=self.pod.env.scheme)
class StaticController(controllers.BaseController):
KIND = messages.Kind.STATIC
def __init__(self, path_format, source_format=None, localized=False,
localization=None, fingerprinted=False, pod=None):
# path_format: "serve_at"
# source_format: "static_dir"
self.path_format = path_format.replace('<grow:', '{').replace('>', '}')
self.source_format = source_format.replace('<grow:', '{').replace('>', '}')
self.pod = pod
self.localized = localized
self.localization = localization
self.fingerprinted = fingerprinted
def __repr__(self):
return '<Static(format=\'{}\')>'.format(self.source_format)
def get_localized_pod_path(self, params):
if (self.localization
and '{locale}' in self.localization['static_dir']
and 'locale' in params):
source_format = self.localization['serve_at']
source_format += '/{filename}'
source_format = source_format.replace('//', '/')
kwargs = params
kwargs['root'] = self.pod.podspec.root
if 'locale' in kwargs:
locale = locales.Locale.from_alias(self.pod, kwargs['locale'])
kwargs['locale'] = str(locale)
if '{root}' in source_format:
kwargs['root'] = self.pod.podspec.root
pod_path = source_format.format(**kwargs)
if self.fingerprinted:
pod_path = StaticFile.remove_fingerprint(pod_path)
if self.pod.file_exists(pod_path):
return pod_path
def get_pod_path(self, params):
# If a localized file exists, serve it. Otherwise, serve the base file.
pod_path = self.get_localized_pod_path(params)
if pod_path:
return pod_path
pod_path = self.source_format.format(**params)
if self.fingerprinted:
pod_path = StaticFile.remove_fingerprint(pod_path)
return pod_path
def validate(self, params):
pod_path = self.get_pod_path(params)
if not self.pod.file_exists(pod_path):
path = self.pod.abs_path(pod_path)
message = '{} does not exist.'.format(path)
raise webob.exc.HTTPNotFound(message)
def render(self, params, inject=False):
pod_path = self.get_pod_path(params)
return self.pod.read_file(pod_path)
def get_mimetype(self, params):
pod_path = self.get_pod_path(params)
return mimetypes.guess_type(pod_path)[0]
def get_http_headers(self, params):
pod_path = self.get_pod_path(params)
path = self.pod.abs_path(pod_path)
headers = super(StaticController, self).get_http_headers(params)
self.pod.storage.update_headers(headers, path)
modified = self.pod.storage.modified(path)
time_obj = datetime.fromtimestamp(modified).timetuple()
time_format = '%a, %d %b %Y %H:%M:%S GMT'
headers['Last-Modified'] = time.strftime(time_format, time_obj)
headers['ETag'] = '"{}"'.format(headers['Last-Modified'])
headers['X-Grow-Pod-Path'] = pod_path
if self.locale:
headers['X-Grow-Locale'] = self.locale
return headers
def match_pod_path(self, pod_path):
if self.path_format == pod_path:
if self.fingerprinted:
fingerprint = StaticFile._create_fingerprint(self.pod, pod_path)
return StaticFile.apply_fingerprint(self.path_format, fingerprint)
return self.path_format
tokens = re.findall('.?{([^}]+)}.?', self.path_format)
if 'filename' in tokens:
source_regex = self.source_format.replace(
'{filename}', '(?P<filename>.*)')
source_regex = source_regex.replace('{locale}', '(?P<locale>[^/]*)')
source_regex = source_regex.replace('{fingerprint}', '(?P<fingerprint>[^/])')
source_regex = source_regex.replace('{root}', '(?P<root>[^/])')
match = re.match(source_regex, pod_path)
if match:
kwargs = match.groupdict()
kwargs['root'] = self.pod.podspec.root
if 'fingerprint' in tokens:
fingerprint = StaticFile._create_fingerprint(self.pod, pod_path)
kwargs['fingerprint'] = fingerprint
if 'locale' in kwargs:
locale = locales.Locale.from_alias(self.pod, kwargs['locale'])
kwargs['locale'] = str(locale)
path = self.path_format.format(**kwargs)
path = path.replace('//', '/')
if self.fingerprinted:
fingerprint = StaticFile._create_fingerprint(self.pod, pod_path)
path = StaticFile.apply_fingerprint(path, fingerprint)
return path
def list_concrete_paths(self):
concrete_paths = set()
tokens = re.findall('.?{([^}]+)}.?', self.path_format)
source_regex = self.source_format.replace('{filename}', '(?P<filename>.*)')
source_regex = source_regex.replace('{locale}', '(?P<locale>[^/]*)')
if '{' not in self.path_format:
if self.fingerprinted:
fingerprint = StaticFile._create_fingerprint(self.pod, self.path_format)
path = StaticFile.apply_fingerprint(self.path_format, fingerprint)
concrete_paths.add(path)
else:
concrete_paths.add(self.path_format)
elif 'filename' in tokens:
# NOTE: This should be updated to support globbing directories,
# and not simply strip all sub-paths beneath {locale}.
source = self.source_format.replace('{filename}', '')[1:]
source = re.sub('{locale}.*', '', source)
source = source.rstrip('/')
paths = self.pod.list_dir(source)
paths = [('/' + source + path).replace(self.pod.root, '')
for path in paths]
# Exclude paths matched by skip patterns.
for pattern in SKIP_PATTERNS:
# .gitignore-style treatment of paths without slashes.
if '/' not in pattern:
pattern = '**{}**'.format(pattern)
for skip_paths in fnmatch.filter(paths, pattern):
paths = [path for path in paths
if path.replace(self.pod.root, '') not in skip_paths]
for pod_path in paths:
match = re.match(source_regex, pod_path)
# Skip adding localized paths in subfolders of other rules.
if not self.localized and self.localization:
localized_source_format = self.localization['static_dir']
localized_source_regex = localized_source_format.replace(
'{filename}', '(?P<filename>.*)')
localized_source_regex = localized_source_regex.replace(
'{locale}', '(?P<locale>[^/]*)')
if re.match(localized_source_regex, pod_path):
continue
if match:
kwargs = match.groupdict()
kwargs['root'] = self.pod.podspec.root
if 'fingerprint' in self.path_format:
fingerprint = StaticFile._create_fingerprint(self.pod, pod_path)
kwargs['fingerprint'] = fingerprint
if 'locale' in kwargs:
normalized_locale = self.pod.normalize_locale(kwargs['locale'])
kwargs['locale'] = (
normalized_locale.alias if normalized_locale is not None
else normalized_locale)
matched_path = self.path_format.format(**kwargs)
matched_path = matched_path.replace('//', '/')
if self.fingerprinted:
fingerprint = StaticFile._create_fingerprint(self.pod, pod_path)
matched_path = StaticFile.apply_fingerprint(matched_path, fingerprint)
concrete_paths.add(matched_path)
return list(concrete_paths)
# Allow the yaml dump to write out a representation of the static file.
def static_representer(dumper, data):
return dumper.represent_scalar(u'!g.static', data.pod_path)
yaml.SafeDumper.add_representer(StaticFile, static_representer)
| 11,948 | 815 | 114 |
ed5dc7fbe5d1853866769daf83a8fd0e7b5abc65 | 1,893 | py | Python | tests/trestle/core/models/write_action_test.py | PritamDutt/compliance-trestle | 7edadde2bd2949e73a085bd78ef57995250fc9cb | [
"Apache-2.0"
] | null | null | null | tests/trestle/core/models/write_action_test.py | PritamDutt/compliance-trestle | 7edadde2bd2949e73a085bd78ef57995250fc9cb | [
"Apache-2.0"
] | null | null | null | tests/trestle/core/models/write_action_test.py | PritamDutt/compliance-trestle | 7edadde2bd2949e73a085bd78ef57995250fc9cb | [
"Apache-2.0"
] | null | null | null | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trestle write actions class."""
import os
from tests import test_utils
import trestle.common.const as const
from trestle.core.models.actions import WriteAction
from trestle.core.models.elements import Element
from trestle.core.models.file_content_type import FileContentType
def test_write_action_yaml(tmp_yaml_file, sample_nist_component_def):
"""Test write yaml action."""
element = Element(sample_nist_component_def, 'component-definition')
with open(tmp_yaml_file, 'w+', encoding=const.FILE_ENCODING) as writer:
wa = WriteAction(writer, element, FileContentType.YAML)
wa.execute()
writer.flush()
writer.close()
test_utils.verify_file_content(tmp_yaml_file, element.get())
os.remove(tmp_yaml_file)
def test_write_action_json(tmp_json_file, sample_nist_component_def):
"""Test write json action."""
element = Element(sample_nist_component_def, 'component-definition')
with open(tmp_json_file, 'w+', encoding=const.FILE_ENCODING) as writer:
wa = WriteAction(writer, element, FileContentType.JSON)
wa.execute()
writer.flush()
writer.close()
test_utils.verify_file_content(tmp_json_file, element.get())
os.remove(tmp_json_file)
| 33.803571 | 75 | 0.743265 | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trestle write actions class."""
import os
from tests import test_utils
import trestle.common.const as const
from trestle.core.models.actions import WriteAction
from trestle.core.models.elements import Element
from trestle.core.models.file_content_type import FileContentType
def test_write_action_yaml(tmp_yaml_file, sample_nist_component_def):
"""Test write yaml action."""
element = Element(sample_nist_component_def, 'component-definition')
with open(tmp_yaml_file, 'w+', encoding=const.FILE_ENCODING) as writer:
wa = WriteAction(writer, element, FileContentType.YAML)
wa.execute()
writer.flush()
writer.close()
test_utils.verify_file_content(tmp_yaml_file, element.get())
os.remove(tmp_yaml_file)
def test_write_action_json(tmp_json_file, sample_nist_component_def):
"""Test write json action."""
element = Element(sample_nist_component_def, 'component-definition')
with open(tmp_json_file, 'w+', encoding=const.FILE_ENCODING) as writer:
wa = WriteAction(writer, element, FileContentType.JSON)
wa.execute()
writer.flush()
writer.close()
test_utils.verify_file_content(tmp_json_file, element.get())
os.remove(tmp_json_file)
| 0 | 0 | 0 |
6a8219fb862a4d1c6fba9135a0bfd9620b4f8a37 | 1,650 | py | Python | ripcord/api/__init__.py | kickstandproject/ripcord | 6a9a59df834f08dad001a8439447ed4b699639ed | [
"Apache-2.0"
] | 1 | 2016-03-26T21:30:17.000Z | 2016-03-26T21:30:17.000Z | ripcord/api/__init__.py | kickstandproject/ripcord | 6a9a59df834f08dad001a8439447ed4b699639ed | [
"Apache-2.0"
] | null | null | null | ripcord/api/__init__.py | kickstandproject/ripcord | 6a9a59df834f08dad001a8439447ed4b699639ed | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
# Register options for the service
API_SERVICE_OPTS = [
cfg.IntOpt('port',
default=9869,
deprecated_name='bind_port',
deprecated_group='DEFAULT',
help='The port for the ripcord API server.',
),
cfg.StrOpt('host',
default='0.0.0.0',
deprecated_name='bind_host',
deprecated_group='DEFAULT',
help='The listen IP for the ripcord API server.',
),
cfg.BoolOpt('enable_reverse_dns_lookup',
default=False,
help=('Set it to False if your environment does not need '
'or have dns server, otherwise it will delay the '
'response from api.')
),
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='api',
title='Options for the ripcord-api service')
CONF.register_group(opt_group)
CONF.register_opts(API_SERVICE_OPTS, opt_group)
| 35.869565 | 74 | 0.631515 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
# Register options for the service
API_SERVICE_OPTS = [
cfg.IntOpt('port',
default=9869,
deprecated_name='bind_port',
deprecated_group='DEFAULT',
help='The port for the ripcord API server.',
),
cfg.StrOpt('host',
default='0.0.0.0',
deprecated_name='bind_host',
deprecated_group='DEFAULT',
help='The listen IP for the ripcord API server.',
),
cfg.BoolOpt('enable_reverse_dns_lookup',
default=False,
help=('Set it to False if your environment does not need '
'or have dns server, otherwise it will delay the '
'response from api.')
),
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='api',
title='Options for the ripcord-api service')
CONF.register_group(opt_group)
CONF.register_opts(API_SERVICE_OPTS, opt_group)
| 0 | 0 | 0 |
164cf8647016545d99d548afb98795e57792cae8 | 1,756 | py | Python | noxfile.py | lewoudar/configuror | 21cc768ca7548e193c3e585491c2b0fd86b49f3c | [
"Apache-2.0"
] | 2 | 2020-04-24T16:24:19.000Z | 2021-12-26T02:28:55.000Z | noxfile.py | lewoudar/configuror | 21cc768ca7548e193c3e585491c2b0fd86b49f3c | [
"Apache-2.0"
] | 8 | 2019-11-08T14:12:43.000Z | 2020-01-05T16:51:30.000Z | noxfile.py | lewoudar/configuror | 21cc768ca7548e193c3e585491c2b0fd86b49f3c | [
"Apache-2.0"
] | null | null | null | import os
import shutil
import nox
nox.options.reuse_existing_virtualenvs = True
PYTHON_VERSIONS = ['3.6', '3.7', '3.8']
@nox.session(python=PYTHON_VERSIONS[-1])
def lint(session):
"""Performs pep8 and security checks."""
source_code = 'configuror'
session.install('flake8==3.7.9', 'bandit==1.6.2')
session.run('flake8', source_code)
session.run('bandit', '-r', source_code)
@nox.session(python=PYTHON_VERSIONS)
def tests(session):
"""Runs the test suite."""
session.install('poetry>=1.0.0,<2.0.0')
session.run('poetry', 'install')
session.cd('tests')
session.run('pytest')
# we notify codecov when the latest version of python is used
if session.python == PYTHON_VERSIONS[-1] and 'APPVEYOR_URL' not in os.environ:
session.notify('codecov')
@nox.session
def codecov(session):
"""Runs codecov command to share coverage information on codecov.io"""
session.install('codecov==2.0.15')
session.cd('tests')
session.run('coverage', 'xml', '-i')
session.run('codecov', '-f', 'coverage.xml')
@nox.session(python=PYTHON_VERSIONS[-1])
def docs(session):
"""Builds the documentation."""
session.install('mkdocs==1.1')
session.run('mkdocs', 'build', '--clean')
@nox.session(python=PYTHON_VERSIONS[-1])
def deploy(session):
"""
Deploys on pypi.
"""
if 'POETRY_PYPI_TOKEN_PYPI' not in os.environ:
session.error('you must specify your pypi token api to deploy your package')
session.install('poetry>=1.0.0,<2.0.0')
session.run('poetry', 'publish', '--build')
@nox.session(python=False)
def clean(*_):
"""Since nox take a bit of memory, this command helps to clean nox environment."""
shutil.rmtree('.nox', ignore_errors=True)
| 27.015385 | 86 | 0.665148 | import os
import shutil
import nox
nox.options.reuse_existing_virtualenvs = True
PYTHON_VERSIONS = ['3.6', '3.7', '3.8']
@nox.session(python=PYTHON_VERSIONS[-1])
def lint(session):
"""Performs pep8 and security checks."""
source_code = 'configuror'
session.install('flake8==3.7.9', 'bandit==1.6.2')
session.run('flake8', source_code)
session.run('bandit', '-r', source_code)
@nox.session(python=PYTHON_VERSIONS)
def tests(session):
"""Runs the test suite."""
session.install('poetry>=1.0.0,<2.0.0')
session.run('poetry', 'install')
session.cd('tests')
session.run('pytest')
# we notify codecov when the latest version of python is used
if session.python == PYTHON_VERSIONS[-1] and 'APPVEYOR_URL' not in os.environ:
session.notify('codecov')
@nox.session
def codecov(session):
"""Runs codecov command to share coverage information on codecov.io"""
session.install('codecov==2.0.15')
session.cd('tests')
session.run('coverage', 'xml', '-i')
session.run('codecov', '-f', 'coverage.xml')
@nox.session(python=PYTHON_VERSIONS[-1])
def docs(session):
"""Builds the documentation."""
session.install('mkdocs==1.1')
session.run('mkdocs', 'build', '--clean')
@nox.session(python=PYTHON_VERSIONS[-1])
def deploy(session):
"""
Deploys on pypi.
"""
if 'POETRY_PYPI_TOKEN_PYPI' not in os.environ:
session.error('you must specify your pypi token api to deploy your package')
session.install('poetry>=1.0.0,<2.0.0')
session.run('poetry', 'publish', '--build')
@nox.session(python=False)
def clean(*_):
"""Since nox take a bit of memory, this command helps to clean nox environment."""
shutil.rmtree('.nox', ignore_errors=True)
| 0 | 0 | 0 |
a04c09a2067d2cdebbbd76490ba595924d663536 | 26,488 | py | Python | QUANTAXIS_Test/mytest.py | xixigaga/QUANTAXIS | 6f8e7aaa976d4e4072c96ede747e07d56618f53e | [
"MIT"
] | null | null | null | QUANTAXIS_Test/mytest.py | xixigaga/QUANTAXIS | 6f8e7aaa976d4e4072c96ede747e07d56618f53e | [
"MIT"
] | 3 | 2020-10-24T03:56:06.000Z | 2021-10-18T03:48:28.000Z | QUANTAXIS_Test/mytest.py | xixigaga/QUANTAXIS | 6f8e7aaa976d4e4072c96ede747e07d56618f53e | [
"MIT"
] | null | null | null | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import time
import numpy as np
import pandas as pd
import pymongo
try:
import QUANTAXIS as QA
from QUANTAXIS.QAUtil import (QASETTING,
DATABASE,
QA_util_log_info,
QA_util_to_json_from_pandas,)
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION
from QUANTAXIS.QAData.QADataStruct import (QA_DataStruct_Index_min,
QA_DataStruct_Index_day,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min)
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
except:
print('PLEASE run "pip install QUANTAXIS" to call these modules')
pass
try:
from GolemQ.GQUtil.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST,
)
except:
class AKA():
"""
趋势状态常量,专有名称指标,定义成常量可以避免直接打字符串造成的拼写错误。
"""
# 蜡烛线指标
CODE = 'code'
NAME = 'name'
OPEN = 'open'
HIGH = 'high'
LOW = 'low'
CLOSE = 'close'
VOLUME = 'volume'
VOL = 'vol'
DATETIME = 'datetime'
LAST_CLOSE = 'last_close'
PRICE = 'price'
SYSTEM_NAME = 'myQuant'
class ST():
"""
趋势状态常量,专有名称指标,定义成常量可以避免直接打字符串造成的拼写错误。
"""
# 状态
POSITION_R5 = 'POS_R5'
TRIGGER_R5 = 'TRG_R5'
CANDIDATE = 'CANDIDATE'
def GQSignal_util_save_indices_day(code,
indices,
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
ui_log=None,
ui_progress=None):
"""
在数据库中保存所有计算出来的股票日线指标,用于汇总评估和筛选数据——日线
save stock_indices, state
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_day
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_day
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.fund_cn_indices_day
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_day
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_day
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! {} \n '.format(market_type), ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
_check_index(coll_indices)
data = _formatter_data(indices)
err = []
# 查询是否新 tick
query_id = {
"code": code,
'date_stamp': {
'$in': data['date_stamp'].tolist()
}
}
refcount = coll_indices.count_documents(query_id)
if refcount > 0:
if (len(data) > 1):
# 删掉重复数据
coll_indices.delete_many(query_id)
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
else:
# 持续更新模式,更新单条记录
data.drop('created_at', axis=1, inplace=True)
data = QA_util_to_json_from_pandas(data)
coll_indices.replace_one(query_id, data[0])
else:
# 新 tick,插入记录
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
return True
def GQSignal_util_save_indices_min(code,
indices,
frequence,
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
ui_log=None,
ui_progress=None):
"""
在数据库中保存所有计算出来的指标信息,用于汇总评估和筛选数据——分钟线
save stock_indices, state
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
_check_index(coll_indices)
data = _formatter_data(indices, frequence)
err = []
# 查询是否新 tick
query_id = {
"code": code,
'type': frequence,
"time_stamp": {
'$in': data['time_stamp'].tolist()
}
}
refcount = coll_indices.count_documents(query_id)
if refcount > 0:
if (len(data) > 1):
# 删掉重复数据
coll_indices.delete_many(query_id)
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
else:
# 持续更新模式,更新单条记录
data.drop('created_at', axis=1, inplace=True)
data = QA_util_to_json_from_pandas(data)
coll_indices.replace_one(query_id, data[0])
else:
# 新 tick,插入记录
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
return True
def GQSignal_fetch_position_singal_day(start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
format='numpy',
ui_log=None,
ui_progress=None):
"""
'获取股票指标日线'
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
if QA_util_date_valid(end):
cursor = coll_indices.find({
ST.TRIGGER_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(date=pd.to_datetime(res.date)).drop_duplicates((['date',
'code'])).set_index(['date',
'code'],
drop=False)
codelist = QA.QA_fetch_stock_name(res[AKA.CODE].tolist())
res['name'] = res.apply(lambda x:codelist.at[x.get(AKA.CODE), 'name'], axis=1)
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error GQSignal_fetch_position_singal_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info('QA Error GQSignal_fetch_position_singal_day data parameter start=%s end=%s is not right' % (start,
end))
def GQSignal_fetch_singal_day(code,
start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
format='numpy',
ui_log=None,
ui_progress=None):
"""
获取股票日线指标/策略信号数据
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
# code checking
code = QA_util_code_tolist(code)
if QA_util_date_valid(end):
cursor = coll_indices.find({
'code': {
'$in': code
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(date=pd.to_datetime(res.date)).drop_duplicates((['date',
'code'])).set_index(['date',
'code'], drop=False)
res.sort_index(inplace=True)
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error GQSignal_fetch_singal_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info('QA Error GQSignal_fetch_singal_day data parameter start=%s end=%s is not right' % (start,
end))
def GQ_save_test(code,
indices,
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
ui_log=None,
ui_progress=None):
"""
在数据库中保存所有计算出来的股票日线指标,用于汇总评估和筛选数据——日线
save stock_indices, state
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
client = QASETTING.client['quantaxis']
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_day
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_day
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! {} \n '.format(market_type), ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
_check_index(coll_indices)
data = _formatter_data(indices)
err = []
# 查询是否新 tick
query_id = {
"code": code,
'date_stamp': {
'$in': data['date_stamp'].tolist()
}
}
refcount = coll_indices.count_documents(query_id)
if refcount > 0:
if (len(data) > 1):
# 删掉重复数据
coll_indices.delete_many(query_id)
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
else:
# 持续更新模式,更新单条记录
data.drop('created_at', axis=1, inplace=True)
data = QA_util_to_json_from_pandas(data)
coll_indices.replace_one(query_id, data[0])
else:
# 新 tick,插入记录
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
return True
data_day = QA.QA_fetch_stock_day_adv(['000001'], '2018-12-01', '2019-05-20')
data_day = data_day.to_qfq().data
GQ_save_test('000001',data_day) | 38.83871 | 169 | 0.54923 | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import time
import numpy as np
import pandas as pd
import pymongo
try:
import QUANTAXIS as QA
from QUANTAXIS.QAUtil import (QASETTING,
DATABASE,
QA_util_log_info,
QA_util_to_json_from_pandas,)
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION
from QUANTAXIS.QAData.QADataStruct import (QA_DataStruct_Index_min,
QA_DataStruct_Index_day,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min)
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
except:
print('PLEASE run "pip install QUANTAXIS" to call these modules')
pass
try:
from GolemQ.GQUtil.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST,
)
except:
class AKA():
"""
趋势状态常量,专有名称指标,定义成常量可以避免直接打字符串造成的拼写错误。
"""
# 蜡烛线指标
CODE = 'code'
NAME = 'name'
OPEN = 'open'
HIGH = 'high'
LOW = 'low'
CLOSE = 'close'
VOLUME = 'volume'
VOL = 'vol'
DATETIME = 'datetime'
LAST_CLOSE = 'last_close'
PRICE = 'price'
SYSTEM_NAME = 'myQuant'
def __setattr__(self, name, value):
raise Exception(u'Const Class can\'t allow to change property\' value.')
return super().__setattr__(name, value)
class ST():
"""
趋势状态常量,专有名称指标,定义成常量可以避免直接打字符串造成的拼写错误。
"""
# 状态
POSITION_R5 = 'POS_R5'
TRIGGER_R5 = 'TRG_R5'
CANDIDATE = 'CANDIDATE'
def __setattr__(self, name, value):
raise Exception(u'Const Class can\'t allow to change property\' value.')
return super().__setattr__(name, value)
class FLD():
DATETIME = 'datetime'
ML_FLU_TREND = 'ML_FLU_TREND'
FLU_POSITIVE = 'FLU_POSITIVE'
FLU_NEGATIVE = 'FLU_NEGATIVE'
def __setattr__(self, name, value):
raise Exception(u'Const Class can\'t allow to change property\' value.')
return super().__setattr__(name, value)
def GQSignal_util_save_indices_day(code,
indices,
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
ui_log=None,
ui_progress=None):
"""
在数据库中保存所有计算出来的股票日线指标,用于汇总评估和筛选数据——日线
save stock_indices, state
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
def _check_index(coll_indices):
coll_indices.create_index([("code",
pymongo.ASCENDING),
(FLD.DATETIME,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("date",
pymongo.ASCENDING),
(ST.TRIGGER_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([("date",
pymongo.ASCENDING),
(ST.POSITION_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([('date_stamp',
pymongo.ASCENDING),
(ST.TRIGGER_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([('date_stamp',
pymongo.ASCENDING),
(ST.POSITION_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([("date",
pymongo.ASCENDING),
(FLD.FLU_POSITIVE,
pymongo.ASCENDING),],)
coll_indices.create_index([('date_stamp',
pymongo.ASCENDING),
(FLD.FLU_POSITIVE,
pymongo.ASCENDING),],)
coll_indices.create_index([("code",
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("date",
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
(FLD.DATETIME,
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("date",
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
def _formatter_data(indices):
frame = indices.reset_index(1, drop=False)
# UTC时间转换为北京时间
frame['date'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['date'] = frame['date'].dt.strftime('%Y-%m-%d')
frame['datetime'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['datetime'] = frame['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S')
# GMT+0 String 转换为 UTC Timestamp
frame['date_stamp'] = pd.to_datetime(frame['date']).astype(np.int64) // 10 ** 9
frame['created_at'] = int(time.mktime(datetime.datetime.now().utctimetuple()))
frame = frame.tail(len(frame) - 150)
return frame
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_day
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_day
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.fund_cn_indices_day
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_day
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_day
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! {} \n '.format(market_type), ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
_check_index(coll_indices)
data = _formatter_data(indices)
err = []
# 查询是否新 tick
query_id = {
"code": code,
'date_stamp': {
'$in': data['date_stamp'].tolist()
}
}
refcount = coll_indices.count_documents(query_id)
if refcount > 0:
if (len(data) > 1):
# 删掉重复数据
coll_indices.delete_many(query_id)
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
else:
# 持续更新模式,更新单条记录
data.drop('created_at', axis=1, inplace=True)
data = QA_util_to_json_from_pandas(data)
coll_indices.replace_one(query_id, data[0])
else:
# 新 tick,插入记录
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
return True
def GQSignal_util_save_indices_min(code,
indices,
frequence,
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
ui_log=None,
ui_progress=None):
"""
在数据库中保存所有计算出来的指标信息,用于汇总评估和筛选数据——分钟线
save stock_indices, state
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
def _check_index(coll_indices):
coll_indices.create_index([("code",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(FLD.DATETIME,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([(FLD.DATETIME,
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(ST.TRIGGER_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([(FLD.DATETIME,
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(ST.POSITION_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),
(ST.TRIGGER_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),
(ST.POSITION_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([(FLD.DATETIME,
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(FLD.FLU_POSITIVE,
pymongo.ASCENDING),],)
coll_indices.create_index([("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),
(FLD.FLU_POSITIVE,
pymongo.ASCENDING),],)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(FLD.DATETIME,
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
def _formatter_data(indices, frequence):
frame = indices.reset_index(1, drop=False)
# UTC时间转换为北京时间
frame['date'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['date'] = frame['date'].dt.strftime('%Y-%m-%d')
frame['datetime'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['datetime'] = frame['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S')
# GMT+0 String 转换为 UTC Timestamp
frame['time_stamp'] = pd.to_datetime(frame['datetime']).astype(np.int64) // 10 ** 9
frame['type'] = frequence
frame['created_at'] = int(time.mktime(datetime.datetime.now().utctimetuple()))
frame = frame.tail(len(frame) - 150)
return frame
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
_check_index(coll_indices)
data = _formatter_data(indices, frequence)
err = []
# 查询是否新 tick
query_id = {
"code": code,
'type': frequence,
"time_stamp": {
'$in': data['time_stamp'].tolist()
}
}
refcount = coll_indices.count_documents(query_id)
if refcount > 0:
if (len(data) > 1):
# 删掉重复数据
coll_indices.delete_many(query_id)
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
else:
# 持续更新模式,更新单条记录
data.drop('created_at', axis=1, inplace=True)
data = QA_util_to_json_from_pandas(data)
coll_indices.replace_one(query_id, data[0])
else:
# 新 tick,插入记录
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
return True
def GQSignal_fetch_position_singal_day(start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
format='numpy',
ui_log=None,
ui_progress=None):
"""
'获取股票指标日线'
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
if QA_util_date_valid(end):
cursor = coll_indices.find({
ST.TRIGGER_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(date=pd.to_datetime(res.date)).drop_duplicates((['date',
'code'])).set_index(['date',
'code'],
drop=False)
codelist = QA.QA_fetch_stock_name(res[AKA.CODE].tolist())
res['name'] = res.apply(lambda x:codelist.at[x.get(AKA.CODE), 'name'], axis=1)
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error GQSignal_fetch_position_singal_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info('QA Error GQSignal_fetch_position_singal_day data parameter start=%s end=%s is not right' % (start,
end))
def GQSignal_fetch_singal_day(code,
start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
format='numpy',
ui_log=None,
ui_progress=None):
"""
获取股票日线指标/策略信号数据
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
# code checking
code = QA_util_code_tolist(code)
if QA_util_date_valid(end):
cursor = coll_indices.find({
'code': {
'$in': code
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(date=pd.to_datetime(res.date)).drop_duplicates((['date',
'code'])).set_index(['date',
'code'], drop=False)
res.sort_index(inplace=True)
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error GQSignal_fetch_singal_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info('QA Error GQSignal_fetch_singal_day data parameter start=%s end=%s is not right' % (start,
end))
def GQ_save_test(code,
indices,
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
ui_log=None,
ui_progress=None):
"""
在数据库中保存所有计算出来的股票日线指标,用于汇总评估和筛选数据——日线
save stock_indices, state
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
def _check_index(coll_indices):
coll_indices.create_index([("code",
pymongo.ASCENDING),
('datetime',
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("date",
pymongo.ASCENDING),
('TRG_R5',
pymongo.ASCENDING),],)
def _formatter_data(indices):
frame = indices.reset_index(1, drop=False)
# UTC时间转换为北京时间
frame['date'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['date'] = frame['date'].dt.strftime('%Y-%m-%d')
frame['datetime'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['datetime'] = frame['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S')
# GMT+0 String 转换为 UTC Timestamp
frame['date_stamp'] = pd.to_datetime(frame['date']).astype(np.int64) // 10 ** 9
frame['created_at'] = int(time.mktime(datetime.datetime.now().utctimetuple()))
frame = frame.tail(len(frame) - 150)
return frame
client = QASETTING.client['quantaxis']
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_day
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_day
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! {} \n '.format(market_type), ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
_check_index(coll_indices)
data = _formatter_data(indices)
err = []
# 查询是否新 tick
query_id = {
"code": code,
'date_stamp': {
'$in': data['date_stamp'].tolist()
}
}
refcount = coll_indices.count_documents(query_id)
if refcount > 0:
if (len(data) > 1):
# 删掉重复数据
coll_indices.delete_many(query_id)
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
else:
# 持续更新模式,更新单条记录
data.drop('created_at', axis=1, inplace=True)
data = QA_util_to_json_from_pandas(data)
coll_indices.replace_one(query_id, data[0])
else:
# 新 tick,插入记录
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
return True
data_day = QA.QA_fetch_stock_day_adv(['000001'], '2018-12-01', '2019-05-20')
data_day = data_day.to_qfq().data
GQ_save_test('000001',data_day) | 8,016 | 166 | 248 |
2ba1e362c4cb6e5981ed640f2c5843d95fc6059d | 155 | py | Python | bayesiancoresets/snnls/__init__.py | trevorcampbell/hilbert-coresets | 63354127953a432c0f35087cf5b75166f652a5f5 | [
"MIT"
] | 118 | 2018-02-10T21:33:57.000Z | 2022-03-22T14:20:53.000Z | bayesiancoresets/snnls/__init__.py | trevorcampbell/hilbert-coresets | 63354127953a432c0f35087cf5b75166f652a5f5 | [
"MIT"
] | 3 | 2018-09-07T16:13:22.000Z | 2020-04-11T14:35:47.000Z | bayesiancoresets/snnls/__init__.py | trevorcampbell/hilbert-coresets | 63354127953a432c0f35087cf5b75166f652a5f5 | [
"MIT"
] | 30 | 2018-03-11T02:37:55.000Z | 2022-01-31T14:51:37.000Z | from .frankwolfe import FrankWolfe
from .sampling import ImportanceSampling, UniformSampling
from .giga import GIGA
from .orthopursuit import OrthoPursuit
| 31 | 57 | 0.858065 | from .frankwolfe import FrankWolfe
from .sampling import ImportanceSampling, UniformSampling
from .giga import GIGA
from .orthopursuit import OrthoPursuit
| 0 | 0 | 0 |
5301e5652125c4c5b4a0f5b510162296ac736436 | 2,667 | py | Python | computer_version/meter_pointer/test_color.py | afterloe/opencv-practice | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | [
"MIT"
] | 5 | 2020-03-13T07:34:30.000Z | 2021-10-01T03:03:05.000Z | computer_version/meter_pointer/test_color.py | afterloe/Opencv-practice | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | [
"MIT"
] | null | null | null | computer_version/meter_pointer/test_color.py | afterloe/Opencv-practice | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | [
"MIT"
] | 1 | 2020-03-01T12:35:02.000Z | 2020-03-01T12:35:02.000Z | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import imutils
import time
import math
import numpy as np
"""
"""
if "__main__" == __name__:
main()
cv.destroyAllWindows()
| 32.925926 | 132 | 0.55643 | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import imutils
import time
import math
import numpy as np
"""
"""
def calculate_distance(x1, y1, x2, y2):
return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
def main():
image = cv.imread("G:\\Project\\opencv-ascs-resources\\meter_pointer_roi\\2020-03-05_22-18-30.jpeg")
# image = cv.imread("G:\\Project\\opencv-ascs-resources\\save\\box.bmp")
start = time.time()
image = imutils.resize(image, width=300)
# HSV 分割
hsv = cv.cvtColor(image.copy(), cv.COLOR_BGR2HSV)
hsv_min = (0, 0, 0)
hsv_max = (180, 255, 50)
mask = cv.inRange(hsv, hsv_min, hsv_max)
lines = cv.HoughLinesP(mask, 1, np.pi / 180, 10, None, 30, 10)
if None is lines:
print("未检测到直线")
return
line = lines[0][0] # 检测到的直线信息
# 获取指针位置
cv.line(image, (line[0], line[1]), (line[2], line[3]), (0, 255, 255), 1, cv.LINE_AA)
# kernel = cv.getStructuringElement(cv.MORPH_RECT, (15, 15))
# edged = cv.morphologyEx(mask, cv.MORPH_DILATE, kernel)
# contours, _ = cv.findContours(edged, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# contours = sorted(contours, key=cv.contourArea, reverse=True)
# # 获取指针位置
# x, y, w, h = cv.boundingRect(contours[0])
# cv.circle(image, (line[0], line[1]), int(calculate_distance(line[0], line[1], line[2], line[3])), (255, 0, 0), 2, cv.LINE_AA)
# cv.line(image.copy(), (x, y), (x + w, y + h), (255, 255, 0), 2, cv.LINE_AA)
hsv = cv.cvtColor(image.copy(), cv.COLOR_BGR2HSV)
hsv_min = (0, 0, 0)
hsv_max = (180, 255, 150)
mask = cv.inRange(hsv, hsv_min, hsv_max)
cv.imshow("mask", mask)
# cnts = cv.findContours(threshed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# cnts = imutils.grab_contours(cnts)
# dig_cnts = []
#
# for cnt in cnts:
# x, y, w, h = cv.boundingRect(cnt)
# # print(x, y, w, h)
# # print("---------------------------")
# if 3 < h < 20 and 1 < w < 75:
# rect = cv.minAreaRect(cnt)
# cx, cy = rect[0]
# dig_cnts.append((x, y, w, h, cx, cy)) # 提取数字
# for cnt in dig_cnts:
# x, y, w, h, cx, cy = cnt
# cv.rectangle(image, (x, y), (x + w, y + h), (255, 255, 0), 2, cv.LINE_8)
print("[INFO] tried_unknow detection took {:.6f} seconds".format(time.time() - start))
cv.imshow("image", image)
# cv.imshow("threshed", threshed)
# cv.imshow("edged", edged)
# cv.imshow("dst", dst)
cv.waitKey(0)
pass
if "__main__" == __name__:
main()
cv.destroyAllWindows()
| 2,459 | 0 | 50 |
2ae688c1e45c4864cee99bc93db6eb2d51954e98 | 997 | py | Python | PythonExe/ex070.py | ScnMatheus/my-python-projects | 9cc3511dd6e25d9b16429347598c105f15bd6916 | [
"MIT"
] | null | null | null | PythonExe/ex070.py | ScnMatheus/my-python-projects | 9cc3511dd6e25d9b16429347598c105f15bd6916 | [
"MIT"
] | null | null | null | PythonExe/ex070.py | ScnMatheus/my-python-projects | 9cc3511dd6e25d9b16429347598c105f15bd6916 | [
"MIT"
] | null | null | null | #crie um programa que leia o nome e o preço de varios produtos
#dever perguntar se o usuário quer continuar, no final mostre:
#qual é o total gasto nas compras
# quantos produtos custam mais de R$ 1000
#qual o nome do produto mais barato?
print('-=-'*10)
print(' MERCADÂO BARATO ')
print('-=-'*10)
preco = 0
menor = 0
maior = 0
soma = 0
quant = 0
menorproduto = ''
while True:
produto = str(input('Digite o nome do produto: '))
preco = int(input('Digite o valor do produto R$: '))
soma = soma + preco
quant = quant + 1
if quant == 1:
maior = preco
menor = preco
if preco > maior:
maior = preco
if preco < menor:
menor = preco
menorproduto = produto
parar = str(input('Deseja parar? [S/N] : ').upper() .strip())[0]
if parar == 'S':
break
print(f'a soma dos produtos comprado foi de R${soma} \n o produto mais caro foi {maior}\n',end='')
print(f'O produto mais barato foi {menor} e era o {menorproduto}') | 28.485714 | 98 | 0.622869 | #crie um programa que leia o nome e o preço de varios produtos
#dever perguntar se o usuário quer continuar, no final mostre:
#qual é o total gasto nas compras
# quantos produtos custam mais de R$ 1000
#qual o nome do produto mais barato?
print('-=-'*10)
print(' MERCADÂO BARATO ')
print('-=-'*10)
preco = 0
menor = 0
maior = 0
soma = 0
quant = 0
menorproduto = ''
while True:
produto = str(input('Digite o nome do produto: '))
preco = int(input('Digite o valor do produto R$: '))
soma = soma + preco
quant = quant + 1
if quant == 1:
maior = preco
menor = preco
if preco > maior:
maior = preco
if preco < menor:
menor = preco
menorproduto = produto
parar = str(input('Deseja parar? [S/N] : ').upper() .strip())[0]
if parar == 'S':
break
print(f'a soma dos produtos comprado foi de R${soma} \n o produto mais caro foi {maior}\n',end='')
print(f'O produto mais barato foi {menor} e era o {menorproduto}') | 0 | 0 | 0 |
c4213ad64416e9f2e14aa285b3ee7be94d395889 | 3,287 | py | Python | smt/tests/test_kpls_auto.py | Laurentww/smt | f124c01ffa78c04b80221dded278a20123dac742 | [
"BSD-3-Clause"
] | 354 | 2017-08-15T22:12:58.000Z | 2022-03-31T08:34:19.000Z | smt/tests/test_kpls_auto.py | enjoyneer87/smt | 4a4df255b9259965439120091007f9852f41523e | [
"BSD-3-Clause"
] | 258 | 2017-08-11T15:08:40.000Z | 2022-03-30T09:54:26.000Z | smt/tests/test_kpls_auto.py | enjoyneer87/smt | 4a4df255b9259965439120091007f9852f41523e | [
"BSD-3-Clause"
] | 184 | 2017-08-11T14:55:17.000Z | 2022-03-17T11:22:50.000Z | """
Author: Paul Saves
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct, Rosenbrock, Branin
from smt.sampling_methods import LHS
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import KPLS
print_output = False
# --------------------------------------------------------------------
# Function: sphere
if __name__ == "__main__":
print_output = True
print("%6s %8s %18s %18s" % ("SM", "Problem", "Train. pt. error", "Test pt. error"))
unittest.main()
| 25.284615 | 88 | 0.583207 | """
Author: Paul Saves
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct, Rosenbrock, Branin
from smt.sampling_methods import LHS
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import KPLS
print_output = False
class Test(SMTestCase):
def setUp(self):
ndim = 10
nt = 50
ne = 100
problems = OrderedDict()
problems["Branin"] = Branin(ndim=2)
problems["Rosenbrock"] = Rosenbrock(ndim=3)
problems["sphere"] = Sphere(ndim=ndim)
problems["exp"] = TensorProduct(ndim=ndim, func="exp")
problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
problems["cos"] = TensorProduct(ndim=ndim, func="cos")
sms = OrderedDict()
sms["KPLS"] = KPLS(eval_n_comp=True)
t_errors = {}
e_errors = {}
t_errors["KPLS"] = 1e-3
e_errors["KPLS"] = 2.5
n_comp_opt = {}
n_comp_opt["Branin"] = 2
n_comp_opt["Rosenbrock"] = 1
n_comp_opt["sphere"] = 1
n_comp_opt["exp"] = 3
n_comp_opt["tanh"] = 1
n_comp_opt["cos"] = 1
self.nt = nt
self.ne = ne
self.problems = problems
self.sms = sms
self.t_errors = t_errors
self.e_errors = e_errors
self.n_comp_opt = n_comp_opt
def run_test(self):
method_name = inspect.stack()[1][3]
pname = method_name.split("_")[1]
sname = method_name.split("_")[2]
prob = self.problems[pname]
sampling = LHS(xlimits=prob.xlimits, random_state=42)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt)
with Silence():
sm.train()
l = sm.options["n_comp"]
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
if print_output:
print("%8s %6s %18.9e %18.9e" % (pname[:6], sname, t_error, e_error))
self.assert_error(t_error, 0.0, self.t_errors[sname], 1e-5)
self.assert_error(e_error, 0.0, self.e_errors[sname], 1e-5)
self.assertEqual(l, self.n_comp_opt[pname])
# --------------------------------------------------------------------
# Function: sphere
def test_Branin_KPLS(self):
self.run_test()
def test_Rosenbrock_KPLS(self):
self.run_test()
def test_sphere_KPLS(self):
self.run_test()
def test_exp_KPLS(self):
self.run_test()
def test_tanh_KPLS(self):
self.run_test()
def test_cos_KPLS(self):
self.run_test()
if __name__ == "__main__":
print_output = True
print("%6s %8s %18s %18s" % ("SM", "Problem", "Train. pt. error", "Test pt. error"))
unittest.main()
| 2,326 | 2 | 238 |
7dee1721ef5108e82bfc4b3a1e5249cfd7753ebc | 6,272 | py | Python | thonnycontrib/pyboard/api_stubs/pyb.py | thonny/thonny-pyboard | d967ffe653ef3678ae4ab8ed6a58ca696b47b6a6 | [
"MIT"
] | 2 | 2019-04-11T08:21:05.000Z | 2020-03-30T20:32:07.000Z | thonnycontrib/pyboard/api_stubs/pyb.py | thonny/thonny-pyboard | d967ffe653ef3678ae4ab8ed6a58ca696b47b6a6 | [
"MIT"
] | null | null | null | thonnycontrib/pyboard/api_stubs/pyb.py | thonny/thonny-pyboard | d967ffe653ef3678ae4ab8ed6a58ca696b47b6a6 | [
"MIT"
] | 1 | 2019-07-14T19:23:32.000Z | 2019-07-14T19:23:32.000Z |
class ADC:
''
class ADCAll:
''
class Accel:
''
class ExtInt:
''
EVT_FALLING = 270663680
EVT_RISING = 269615104
EVT_RISING_FALLING = 271712256
IRQ_FALLING = 270598144
IRQ_RISING = 269549568
IRQ_RISING_FALLING = 271646720
class Flash:
''
class I2C:
''
MASTER = 0
SLAVE = 1
class LCD:
''
class LED:
''
class Pin:
''
AF1_TIM1 = 1
AF1_TIM2 = 1
AF2_TIM3 = 2
AF2_TIM4 = 2
AF2_TIM5 = 2
AF3_TIM10 = 3
AF3_TIM11 = 3
AF3_TIM9 = 3
AF4_I2C1 = 4
AF4_I2C3 = 4
AF5_SPI1 = 5
AF5_SPI2 = 5
AF7_USART1 = 7
AF7_USART2 = 7
AF8_USART6 = 8
AF9_I2C3 = 9
AF_OD = 18
AF_PP = 2
ALT = 2
ALT_OPEN_DRAIN = 18
ANALOG = 3
IN = 0
IRQ_FALLING = 270598144
IRQ_RISING = 269549568
OPEN_DRAIN = 17
OUT = 1
OUT_OD = 17
OUT_PP = 1
PULL_DOWN = 2
PULL_NONE = 0
PULL_UP = 1
board = None
cpu = None
class RTC:
''
SD = None
class SDCard:
''
class SPI:
''
LSB = 128
MASTER = 260
MSB = 0
SLAVE = 0
class Servo:
''
class Switch:
''
class Timer:
''
BOTH = 10
CENTER = 32
DOWN = 16
ENC_A = 9
ENC_AB = 11
ENC_B = 10
FALLING = 2
HIGH = 0
IC = 8
LOW = 2
OC_ACTIVE = 3
OC_FORCED_ACTIVE = 6
OC_FORCED_INACTIVE = 7
OC_INACTIVE = 4
OC_TIMING = 2
OC_TOGGLE = 5
PWM = 0
PWM_INVERTED = 1
RISING = 0
UP = 0
class UART:
''
CTS = 512
RTS = 256
class USB_HID:
''
class USB_VCP:
''
hid_keyboard = None
hid_mouse = None
| 10.851211 | 34 | 0.459343 |
class ADC:
''
def read():
pass
def read_timed():
pass
def read_timed_multi():
pass
class ADCAll:
''
def read_channel():
pass
def read_core_temp():
pass
def read_core_vbat():
pass
def read_core_vref():
pass
def read_vref():
pass
class Accel:
''
def filtered_xyz():
pass
def read():
pass
def tilt():
pass
def write():
pass
def x():
pass
def y():
pass
def z():
pass
class ExtInt:
''
EVT_FALLING = 270663680
EVT_RISING = 269615104
EVT_RISING_FALLING = 271712256
IRQ_FALLING = 270598144
IRQ_RISING = 269549568
IRQ_RISING_FALLING = 271646720
def disable():
pass
def enable():
pass
def line():
pass
def regs():
pass
def swint():
pass
class Flash:
''
def ioctl():
pass
def readblocks():
pass
def writeblocks():
pass
class I2C:
''
MASTER = 0
SLAVE = 1
def deinit():
pass
def init():
pass
def is_ready():
pass
def mem_read():
pass
def mem_write():
pass
def recv():
pass
def scan():
pass
def send():
pass
class LCD:
''
def command():
pass
def contrast():
pass
def fill():
pass
def get():
pass
def light():
pass
def pixel():
pass
def show():
pass
def text():
pass
def write():
pass
class LED:
''
def intensity():
pass
def off():
pass
def on():
pass
def toggle():
pass
class Pin:
''
AF1_TIM1 = 1
AF1_TIM2 = 1
AF2_TIM3 = 2
AF2_TIM4 = 2
AF2_TIM5 = 2
AF3_TIM10 = 3
AF3_TIM11 = 3
AF3_TIM9 = 3
AF4_I2C1 = 4
AF4_I2C3 = 4
AF5_SPI1 = 5
AF5_SPI2 = 5
AF7_USART1 = 7
AF7_USART2 = 7
AF8_USART6 = 8
AF9_I2C3 = 9
AF_OD = 18
AF_PP = 2
ALT = 2
ALT_OPEN_DRAIN = 18
ANALOG = 3
IN = 0
IRQ_FALLING = 270598144
IRQ_RISING = 269549568
OPEN_DRAIN = 17
OUT = 1
OUT_OD = 17
OUT_PP = 1
PULL_DOWN = 2
PULL_NONE = 0
PULL_UP = 1
def af():
pass
def af_list():
pass
board = None
cpu = None
def debug():
pass
def dict():
pass
def gpio():
pass
def high():
pass
def init():
pass
def irq():
pass
def low():
pass
def mapper():
pass
def mode():
pass
def name():
pass
def names():
pass
def off():
pass
def on():
pass
def pin():
pass
def port():
pass
def pull():
pass
def value():
pass
class RTC:
''
def calibration():
pass
def datetime():
pass
def info():
pass
def init():
pass
def wakeup():
pass
SD = None
class SDCard:
''
def info():
pass
def ioctl():
pass
def power():
pass
def present():
pass
def read():
pass
def readblocks():
pass
def write():
pass
def writeblocks():
pass
class SPI:
''
LSB = 128
MASTER = 260
MSB = 0
SLAVE = 0
def deinit():
pass
def init():
pass
def read():
pass
def readinto():
pass
def recv():
pass
def send():
pass
def send_recv():
pass
def write():
pass
def write_readinto():
pass
class Servo:
''
def angle():
pass
def calibration():
pass
def pulse_width():
pass
def speed():
pass
class Switch:
''
def callback():
pass
def value():
pass
class Timer:
''
BOTH = 10
CENTER = 32
DOWN = 16
ENC_A = 9
ENC_AB = 11
ENC_B = 10
FALLING = 2
HIGH = 0
IC = 8
LOW = 2
OC_ACTIVE = 3
OC_FORCED_ACTIVE = 6
OC_FORCED_INACTIVE = 7
OC_INACTIVE = 4
OC_TIMING = 2
OC_TOGGLE = 5
PWM = 0
PWM_INVERTED = 1
RISING = 0
UP = 0
def callback():
pass
def channel():
pass
def counter():
pass
def deinit():
pass
def freq():
pass
def init():
pass
def period():
pass
def prescaler():
pass
def source_freq():
pass
class UART:
''
CTS = 512
RTS = 256
def any():
pass
def deinit():
pass
def init():
pass
def read():
pass
def readchar():
pass
def readinto():
pass
def readline():
pass
def sendbreak():
pass
def write():
pass
def writechar():
pass
class USB_HID:
''
def recv():
pass
def send():
pass
class USB_VCP:
''
def any():
pass
def close():
pass
def isconnected():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def readlines():
pass
def recv():
pass
def send():
pass
def setinterrupt():
pass
def write():
pass
def bootloader():
pass
def delay():
pass
def dht_readinto():
pass
def disable_irq():
pass
def elapsed_micros():
pass
def elapsed_millis():
pass
def enable_irq():
pass
def fault_debug():
pass
def freq():
pass
def hard_reset():
pass
def have_cdc():
pass
def hid():
pass
hid_keyboard = None
hid_mouse = None
def info():
pass
def main():
pass
def micros():
pass
def millis():
pass
def mount():
pass
def pwm():
pass
def repl_info():
pass
def repl_uart():
pass
def servo():
pass
def standby():
pass
def stop():
pass
def sync():
pass
def udelay():
pass
def unique_id():
pass
def usb_mode():
pass
def wfi():
pass
| 696 | 0 | 3,945 |
df2419dd964817e42ac855a45dd5c7a9291c652c | 2,115 | py | Python | index_auth_service/friends/tests/test_managers.py | Silver3310/Index-auth-service | 2dcd71f735f2b160fa0056c0bb752fcad7a84ecd | [
"MIT"
] | 1 | 2022-02-09T02:57:10.000Z | 2022-02-09T02:57:10.000Z | index_auth_service/friends/tests/test_managers.py | Silver3310/Index-auth-service | 2dcd71f735f2b160fa0056c0bb752fcad7a84ecd | [
"MIT"
] | null | null | null | index_auth_service/friends/tests/test_managers.py | Silver3310/Index-auth-service | 2dcd71f735f2b160fa0056c0bb752fcad7a84ecd | [
"MIT"
] | 1 | 2022-02-09T03:03:26.000Z | 2022-02-09T03:03:26.000Z | import pytest
from django.conf import settings
from index_auth_service.friends.models import Friendship
from index_auth_service.users.tests.factories import UserFactory
from .factories import FriendshipFactory
pytestmark = pytest.mark.django_db
| 26.772152 | 79 | 0.640189 | import pytest
from django.conf import settings
from index_auth_service.friends.models import Friendship
from index_auth_service.users.tests.factories import UserFactory
from .factories import FriendshipFactory
pytestmark = pytest.mark.django_db
class TestFriendshipManager:
def test_find_friends(
self,
user: settings.AUTH_USER_MODEL
):
FriendshipFactory(user_to=user)
FriendshipFactory(user_from=user)
# check that filtering works correctly for all items
assert all(
user == friends.user_to or user == friends.user_from
for friends in Friendship.objects.get_queryset().find_friends(user)
)
def test_make_user_friend(
self,
user: settings.AUTH_USER_MODEL
):
friends: Friendship = Friendship.objects.make_user_friend(
user,
UserFactory()
)
assert friends.status == Friendship.WAITING_FOR_REPLY
def test_get_user_friend(
self,
user: settings.AUTH_USER_MODEL,
user_friend: settings.AUTH_USER_MODEL
):
assert Friendship.objects.get_user_friend(
user_to=user,
user_from=user_friend
) == Friendship.objects.get(
user_to=user,
user_from=user_friend
)
def test_approve_user_fried(
self,
user: settings.AUTH_USER_MODEL,
user_friend: settings.AUTH_USER_MODEL
):
Friendship.objects.approve_user_friend(
user_to=user,
user_from=user_friend
)
assert Friendship.objects.get_user_friend(
user_to=user,
user_from=user_friend
).status == Friendship.FRIENDS
def test_delete_user_friend(
self,
user: settings.AUTH_USER_MODEL,
user_friend: settings.AUTH_USER_MODEL
):
Friendship.objects.delete_user_friend(
user_to=user,
user_from=user_friend
)
assert Friendship.objects.get_user_friend(
user_to=user,
user_from=user_friend
) is None
| 1,702 | 7 | 157 |
557cc8a7aa370fe5c8af45f16078aa56b25e405f | 674 | py | Python | mesh/reconstruction.py | gavin971/pyro2 | 55c6d98b9c5d9372badc703ad5deb4a9d2cb8b06 | [
"BSD-3-Clause"
] | 3 | 2017-05-24T14:16:49.000Z | 2019-01-02T19:21:07.000Z | mesh/reconstruction.py | gavin971/pyro2 | 55c6d98b9c5d9372badc703ad5deb4a9d2cb8b06 | [
"BSD-3-Clause"
] | null | null | null | mesh/reconstruction.py | gavin971/pyro2 | 55c6d98b9c5d9372badc703ad5deb4a9d2cb8b06 | [
"BSD-3-Clause"
] | null | null | null | import mesh.patch as patch
import mesh.reconstruction_f as reconstruction_f
import mesh.array_indexer as ai
| 33.7 | 83 | 0.626113 | import mesh.patch as patch
import mesh.reconstruction_f as reconstruction_f
import mesh.array_indexer as ai
def limit(data, myg, idir, limiter):
if limiter < 10:
if limiter == 0:
limit_func = reconstruction_f.nolimit
elif limiter == 1:
limit_func = reconstruction_f.limit2
else:
limit_func = reconstruction_f.limit4
return ai.ArrayIndexer(d=limit_func(idir, data, myg.qx, myg.qy, myg.ng),
grid=myg)
else:
ldax, lday = reconstruction_f.multid_limit(a, qx, qy, myg.ng)
return ai.ArrayIndexer(d=ldax, grid=myg), ai.ArrayIndexer(d=lday, grid=myg)
| 543 | 0 | 23 |
0b1927f39978f94cb677ac03ce285573a8b64c21 | 320 | py | Python | src/os_walk_folder_iterator.py | guionardo/python-folder-iteration | 6a3d076d1dee93dcd9e712e78975a9cd35013893 | [
"MIT"
] | null | null | null | src/os_walk_folder_iterator.py | guionardo/python-folder-iteration | 6a3d076d1dee93dcd9e712e78975a9cd35013893 | [
"MIT"
] | null | null | null | src/os_walk_folder_iterator.py | guionardo/python-folder-iteration | 6a3d076d1dee93dcd9e712e78975a9cd35013893 | [
"MIT"
] | null | null | null | import os
from typing import Generator
from src.abstract_folder_iterator import FolderIterator
| 24.615385 | 55 | 0.69375 | import os
from typing import Generator
from src.abstract_folder_iterator import FolderIterator
class OSWalkFolderIterator(FolderIterator):
def _get_files(self, folder: str) -> Generator:
for root, _, files in os.walk(folder):
for file in files:
yield os.path.join(root, file)
| 151 | 22 | 50 |
ce70a32da7a4765cdeef8eec12e8bade1f28ad59 | 1,848 | py | Python | main/mpv/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | main/mpv/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | main/mpv/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | pkgname = "mpv"
pkgver = "0.34.1"
pkgrel = 0
build_style = "waf"
configure_args = [
"--confdir=/etc/mpv", "--docdir=/usr/share/examples/mpv",
"--zshdir=/usr/share/zsh/site-functions", "--enable-libmpv-shared",
"--enable-cplugins", "--enable-cdda", "--enable-dvbin",
"--enable-dvdnav", "--enable-libarchive", "--enable-pulse",
"--enable-jack", "--enable-lcms2", "--enable-lua", "--enable-vdpau",
"--enable-vulkan", "--enable-shaderc", "--enable-wayland",
"--enable-x11", "--enable-caca", "--enable-vapoursynth",
"--enable-zimg",
"--disable-alsa", "--disable-openal", "--disable-sdl2",
]
hostmakedepends = [
"pkgconf", "python", "python-docutils", "wayland-progs"
]
makedepends = [
"libarchive-devel", "lua5.1-devel", "libuuid-devel", "mesa-devel",
"vulkan-headers", "vulkan-loader", "libplacebo-devel", "shaderc-devel",
"ffmpeg-devel", "libxv-devel", "libxrandr-devel", "libxinerama-devel",
"libxscrnsaver-devel", "libxkbcommon-devel", "wayland-devel",
"wayland-protocols", "libvdpau-devel", "libva-devel", "libpulse-devel",
"pipewire-jack-devel", "lcms2-devel", "libass-devel", "libbluray-devel",
"libdvdnav-devel", "libcdio-paranoia-devel", "rubberband-devel",
"uchardet-devel", "harfbuzz-devel", "libcaca-devel", "zimg-devel",
"vapoursynth-devel",
]
depends = ["hicolor-icon-theme"]
pkgdesc = "Video player based on mplayer2"
maintainer = "q66 <q66@chimera-linux.org>"
license = "GPL-2.0-or-later"
url = "https://mpv.io"
source = f"https://github.com/mpv-player/{pkgname}/archive/v{pkgver}.tar.gz"
sha256 = "32ded8c13b6398310fa27767378193dc1db6d78b006b70dbcbd3123a1445e746"
# no test suite
options = ["!check"]
@subpackage("mpv-devel")
| 40.173913 | 76 | 0.666126 | pkgname = "mpv"
pkgver = "0.34.1"
pkgrel = 0
build_style = "waf"
configure_args = [
"--confdir=/etc/mpv", "--docdir=/usr/share/examples/mpv",
"--zshdir=/usr/share/zsh/site-functions", "--enable-libmpv-shared",
"--enable-cplugins", "--enable-cdda", "--enable-dvbin",
"--enable-dvdnav", "--enable-libarchive", "--enable-pulse",
"--enable-jack", "--enable-lcms2", "--enable-lua", "--enable-vdpau",
"--enable-vulkan", "--enable-shaderc", "--enable-wayland",
"--enable-x11", "--enable-caca", "--enable-vapoursynth",
"--enable-zimg",
"--disable-alsa", "--disable-openal", "--disable-sdl2",
]
hostmakedepends = [
"pkgconf", "python", "python-docutils", "wayland-progs"
]
makedepends = [
"libarchive-devel", "lua5.1-devel", "libuuid-devel", "mesa-devel",
"vulkan-headers", "vulkan-loader", "libplacebo-devel", "shaderc-devel",
"ffmpeg-devel", "libxv-devel", "libxrandr-devel", "libxinerama-devel",
"libxscrnsaver-devel", "libxkbcommon-devel", "wayland-devel",
"wayland-protocols", "libvdpau-devel", "libva-devel", "libpulse-devel",
"pipewire-jack-devel", "lcms2-devel", "libass-devel", "libbluray-devel",
"libdvdnav-devel", "libcdio-paranoia-devel", "rubberband-devel",
"uchardet-devel", "harfbuzz-devel", "libcaca-devel", "zimg-devel",
"vapoursynth-devel",
]
depends = ["hicolor-icon-theme"]
pkgdesc = "Video player based on mplayer2"
maintainer = "q66 <q66@chimera-linux.org>"
license = "GPL-2.0-or-later"
url = "https://mpv.io"
source = f"https://github.com/mpv-player/{pkgname}/archive/v{pkgver}.tar.gz"
sha256 = "32ded8c13b6398310fa27767378193dc1db6d78b006b70dbcbd3123a1445e746"
# no test suite
options = ["!check"]
def post_patch(self):
self.do("python", "bootstrap.py", allow_network = True)
@subpackage("mpv-devel")
def _devel(self):
return self.default_devel()
| 88 | 0 | 45 |
f22df4fc15a7c5e6aa2e77a721d485a5dc92c579 | 804 | py | Python | LeetCode/0515. Find Largest Value in Each Tree Row/solution.py | InnoFang/algorithms | 01847903f757722b6c877e1631e5413b9376c82e | [
"Apache-2.0"
] | null | null | null | LeetCode/0515. Find Largest Value in Each Tree Row/solution.py | InnoFang/algorithms | 01847903f757722b6c877e1631e5413b9376c82e | [
"Apache-2.0"
] | null | null | null | LeetCode/0515. Find Largest Value in Each Tree Row/solution.py | InnoFang/algorithms | 01847903f757722b6c877e1631e5413b9376c82e | [
"Apache-2.0"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
"""
78 / 78 test cases passed.
Runtime: 52 ms
Memory Usage: 17.6 MB
"""
| 26.8 | 67 | 0.497512 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
"""
78 / 78 test cases passed.
Runtime: 52 ms
Memory Usage: 17.6 MB
"""
class Solution:
def largestValues(self, root: Optional[TreeNode]) -> List[int]:
ans = []
if not root:
return ans
q = collections.deque([root])
while q:
level_max = float('-inf')
for _ in range(len(q)):
n = q.popleft()
level_max = max(level_max, n.val)
if n.left:
q.append(n.left)
if n.right:
q.append(n.right)
ans.append(level_max)
return ans
| 497 | -6 | 48 |
b0c0f27cf3bfab65350e69a5ce89c56ac4cad14d | 5,911 | py | Python | examples/pl_getdata.py | sanduanji/reid_platform | daa410acf5ec96bf4e17fceaeaf5e209e3928307 | [
"MIT"
] | null | null | null | examples/pl_getdata.py | sanduanji/reid_platform | daa410acf5ec96bf4e17fceaeaf5e209e3928307 | [
"MIT"
] | null | null | null | examples/pl_getdata.py | sanduanji/reid_platform | daa410acf5ec96bf4e17fceaeaf5e209e3928307 | [
"MIT"
] | null | null | null | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import numpy as np
import sys
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import tkinter as tk
from tkinter import ttk
import tkinter.font as tkFont
from reid import datasets
from reid import models
from reid.loss import TripletLoss
from reid.dist_metric import DistanceMetric
from reid.trainers import Trainer
from reid.evaluators import Evaluator
from reid.utils.data import transforms as T
from reid.utils.data.preprocessor import Preprocessor
from reid.utils.logging import Logger
from reid.utils.serialization import load_checkpoint, save_checkpoint
from reid.utils.data.sampler import RandomIdentitySampler
from reid.loss import OIMLoss
from examples.pl_createmodel import cr_oimmodel, cr_softmaxmodel, cr_tripletmodel
from examples.pl_parameters import *
from examples.pl_optimizer import softmax_op, triplet_op, oim_op
| 32.838889 | 82 | 0.682456 | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import numpy as np
import sys
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import tkinter as tk
from tkinter import ttk
import tkinter.font as tkFont
from reid import datasets
from reid import models
from reid.loss import TripletLoss
from reid.dist_metric import DistanceMetric
from reid.trainers import Trainer
from reid.evaluators import Evaluator
from reid.utils.data import transforms as T
from reid.utils.data.preprocessor import Preprocessor
from reid.utils.logging import Logger
from reid.utils.serialization import load_checkpoint, save_checkpoint
from reid.utils.data.sampler import RandomIdentitySampler
from reid.loss import OIMLoss
from examples.pl_createmodel import cr_oimmodel, cr_softmaxmodel, cr_tripletmodel
from examples.pl_parameters import *
from examples.pl_optimizer import softmax_op, triplet_op, oim_op
def getdata_sm(name, split_id, data_dir, height, width, batch_size, workers,
combine_trainval):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, split_id=split_id)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_set = dataset.trainval if combine_trainval else dataset.train
num_classes = (dataset.num_trainval_ids if combine_trainval
else dataset.num_train_ids)
train_transformer = T.Compose([
T.RandomSizedRectCrop(height, width),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalizer,
])
test_transformer = T.Compose([
T.RectScale(height, width),
T.ToTensor(),
normalizer,
])
train_loader = DataLoader(
Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=True, pin_memory=True, drop_last=True)
val_loader = DataLoader(
Preprocessor(dataset.val, root=dataset.images_dir,
transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
test_loader = DataLoader(
Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return dataset, num_classes, train_loader, val_loader, test_loader
def getdata_tl(name, split_id, data_dir, height, width, batch_size, num_instances,
workers, combine_trainval):
workers = 4
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, split_id=split_id)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_set = dataset.trainval if combine_trainval else dataset.train
num_classes = (dataset.num_trainval_ids if combine_trainval
else dataset.num_train_ids)
train_transformer = T.Compose([
T.RandomSizedRectCrop(height, width),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalizer,
])
test_transformer = T.Compose([
T.RectScale(height, width),
T.ToTensor(),
normalizer,
])
train_loader = DataLoader(
Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer),
batch_size=batch_size, num_workers=workers,
sampler=RandomIdentitySampler(train_set, num_instances),
pin_memory=True, drop_last=True)
val_loader = DataLoader(
Preprocessor(dataset.val, root=dataset.images_dir,
transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
test_loader = DataLoader(
Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return dataset, num_classes, train_loader, val_loader, test_loader
def getdata_oim(name, split_id, data_dir, height, width, batch_size, workers,
combine_trainval):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, split_id=split_id)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_set = dataset.trainval if combine_trainval else dataset.train
num_classes = (dataset.num_trainval_ids if combine_trainval
else dataset.num_train_ids)
train_transformer = T.Compose([
T.RandomSizedRectCrop(height, width),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalizer,
])
test_transformer = T.Compose([
T.RectScale(height, width),
T.ToTensor(),
normalizer,
])
train_loader = DataLoader(
Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=True, pin_memory=True, drop_last=True)
val_loader = DataLoader(
Preprocessor(dataset.val, root=dataset.images_dir,
transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
test_loader = DataLoader(
Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return dataset, num_classes, train_loader, val_loader, test_loader | 4,849 | 0 | 69 |
8e61a50ed948dbcfa3ee82bfe54cebfd85d3d7ea | 1,406 | py | Python | expressions/numbers.py | frapa/gala | cfaba6dd3966d26715dd84101d451ff96ff37c0a | [
"MIT"
] | 1 | 2018-07-22T16:19:29.000Z | 2018-07-22T16:19:29.000Z | expressions/numbers.py | frapa/gala | cfaba6dd3966d26715dd84101d451ff96ff37c0a | [
"MIT"
] | null | null | null | expressions/numbers.py | frapa/gala | cfaba6dd3966d26715dd84101d451ff96ff37c0a | [
"MIT"
] | null | null | null | import expressions
| 23.830508 | 52 | 0.612376 | import expressions
class Decimal(expressions.Expression):
def __init__(self, parent, expr):
super(Decimal, self).__init__(parent)
self.number = "".join(expr.children)
def get_type(self):
return self.get_ctx().get('type')
def to_c(self):
return "{number}".format(number=self.number)
class Binary(expressions.Expression):
def __init__(self, parent, expr):
super(Binary, self).__init__(parent)
digits = map(int, reversed(expr.children))
dec = 0
for m, d in enumerate(digits):
dec += d * 2**m
# convert to oct
self.number = dec
def get_type(self):
return self.get_ctx().get('type')
def to_c(self):
return "{number}".format(number=self.number)
class Octal(expressions.Expression):
def __init__(self, parent, expr):
super(Octal, self).__init__(parent)
self.number = "0" + "".join(expr.children)
def get_type(self):
return self.get_ctx().get('type')
def to_c(self):
return "{number}".format(number=self.number)
class Hex(expressions.Expression):
def __init__(self, parent, expr):
super(Hex, self).__init__(parent)
self.number = "0x" + "".join(expr.children)
def get_type(self):
return self.get_ctx().get('type')
def to_c(self):
return "{number}".format(number=self.number) | 911 | 61 | 412 |
b9cd0b753d491535080a2df3e0a18d2ddaea4805 | 223 | py | Python | dgmr/__init__.py | johmathe/skillful_nowcasting | e1f74144996f830fcf4997c3a644f7a0c4d13f43 | [
"MIT"
] | 42 | 2021-09-09T13:05:18.000Z | 2022-03-30T20:44:19.000Z | dgmr/__init__.py | johmathe/skillful_nowcasting | e1f74144996f830fcf4997c3a644f7a0c4d13f43 | [
"MIT"
] | 17 | 2021-09-06T13:58:06.000Z | 2022-03-23T04:45:09.000Z | dgmr/__init__.py | johmathe/skillful_nowcasting | e1f74144996f830fcf4997c3a644f7a0c4d13f43 | [
"MIT"
] | 15 | 2021-09-30T04:50:39.000Z | 2022-03-24T07:44:01.000Z | from .dgmr import DGMR
from .generators import Sampler, Generator
from .discriminators import SpatialDiscriminator, TemporalDiscriminator, Discriminator
from .common import LatentConditioningStack, ContextConditioningStack
| 44.6 | 86 | 0.874439 | from .dgmr import DGMR
from .generators import Sampler, Generator
from .discriminators import SpatialDiscriminator, TemporalDiscriminator, Discriminator
from .common import LatentConditioningStack, ContextConditioningStack
| 0 | 0 | 0 |
5837127b4be46697ea9b309d1ac4b6a50ace87b7 | 1,592 | py | Python | pyopenproject/business/services/command/time_entry/update.py | webu/pyopenproject | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | [
"MIT"
] | 5 | 2021-02-25T15:54:28.000Z | 2021-04-22T15:43:36.000Z | pyopenproject/business/services/command/time_entry/update.py | webu/pyopenproject | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | [
"MIT"
] | 7 | 2021-03-15T16:26:23.000Z | 2022-03-16T13:45:18.000Z | pyopenproject/business/services/command/time_entry/update.py | webu/pyopenproject | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | [
"MIT"
] | 6 | 2021-06-18T18:59:11.000Z | 2022-03-27T04:58:52.000Z | from contextlib import suppress
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.patch_request import PatchRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.time_entry.time_entry_command import TimeEntryCommand
from pyopenproject.model import time_entry as te
| 46.823529 | 98 | 0.711683 | from contextlib import suppress
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.patch_request import PatchRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.time_entry.time_entry_command import TimeEntryCommand
from pyopenproject.model import time_entry as te
class Update(TimeEntryCommand):
def __init__(self, connection, time_entry):
super().__init__(connection)
self.time_entry = time_entry
def execute(self):
try:
time_entry_id = self.time_entry.id
self.__remove_readonly_attributes()
json_obj = PatchRequest(connection=self.connection,
context=f"{self.CONTEXT}/{time_entry_id}",
json=self.time_entry.__dict__).execute()
return te.TimeEntry(json_obj)
except RequestError as re:
raise BusinessError(f"Error updating a time entry with ID: {time_entry_id}") from re
def __remove_readonly_attributes(self):
with suppress(KeyError): del self.time_entry.__dict__["_type"]
with suppress(KeyError): del self.time_entry.__dict__["_links"]["self"]
with suppress(KeyError): del self.time_entry.__dict__["_links"]["user"]
with suppress(KeyError): del self.time_entry.__dict__["id"]
with suppress(KeyError): del self.time_entry.__dict__["createdAt"]
with suppress(KeyError): del self.time_entry.__dict__["updatedAt"] | 1,063 | 10 | 104 |