hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d2ce2c966a31e97ee5b7a66b2aeabb6f1778574
| 35
|
py
|
Python
|
arcpyext/mapping/_cim/__init__.py
|
PeterReyne/arcpyext
|
9307115da8f0b6a30e2ca741fb6a7d09e54fd0f3
|
[
"BSD-3-Clause"
] | 11
|
2015-05-01T04:08:30.000Z
|
2019-09-21T05:00:58.000Z
|
arcpyext/mapping/_cim/__init__.py
|
PeterReyne/arcpyext
|
9307115da8f0b6a30e2ca741fb6a7d09e54fd0f3
|
[
"BSD-3-Clause"
] | 14
|
2015-06-23T02:46:44.000Z
|
2019-10-11T00:46:11.000Z
|
arcpyext/mapping/_cim/__init__.py
|
PeterReyne/arcpyext
|
9307115da8f0b6a30e2ca741fb6a7d09e54fd0f3
|
[
"BSD-3-Clause"
] | 9
|
2015-02-27T05:25:42.000Z
|
2020-01-19T05:43:14.000Z
|
from .pro_project import ProProject
| 35
| 35
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d2d9019566fcc96f253a9e2a983330775a08ac2
| 3,474
|
py
|
Python
|
o3/operators/filter_logs_to_percentage_operator.py
|
carlba/o3
|
999ff1b06ef9c7a5bf220a3e840c4a42dc81956a
|
[
"Unlicense"
] | null | null | null |
o3/operators/filter_logs_to_percentage_operator.py
|
carlba/o3
|
999ff1b06ef9c7a5bf220a3e840c4a42dc81956a
|
[
"Unlicense"
] | 1
|
2019-01-27T11:04:56.000Z
|
2019-01-27T11:04:56.000Z
|
o3/operators/filter_logs_to_percentage_operator.py
|
carlba/o3
|
999ff1b06ef9c7a5bf220a3e840c4a42dc81956a
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Custom operator for filtering out a percentage of input log files."""
import os
import glob
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from o3.utils import filter_to_percentage
class FilterLogsToPercentageOperator(BaseOperator):
"""Filters input files to a percentage, from src dir glob match to dest dir.
:param float percentage: Percentage of input to keep, e.g. `3.0` for 3 %.
:param str glob_pattern: Glob pattern, e.g. '*.log'.
:param str src_dir: Directory path to find input in.
:param str dest_dir: Directory to write filtered output to.
:param str src_fs_type: Source file system, only 'local' supported.
:param str dest_fs_type: Destination file system, only 'local' supported.
:param int max_files: Maximum number of files to filter.
:param bool remove_src: Remove input file after filtering.
"""
ui_color = '#ffefeb'
@apply_defaults
def __init__(self, percentage: float,
glob_pattern: str, src_dir: str, dest_dir: str,
src_fs_type: str = 'local', dest_fs_type: str = 'local',
max_files: int = None, remove_src: bool = True,
*args, **kwargs):
super(FilterLogsToPercentageOperator, self).__init__(*args, **kwargs)
if 0.0 <= percentage <= 100.0:
self.percentage = percentage
else:
raise AirflowException(f'Out-of-range percentage {percentage!r}.')
self.glob_pattern = glob_pattern
self.src_dir = src_dir.rstrip('/')
self.dest_dir = dest_dir.rstrip('/')
if src_fs_type != 'local':
raise AirflowException(f'Unsupported src_fs_type {src_fs_type!r}.')
else:
self.src_fs_type = src_fs_type
if dest_fs_type != 'local':
raise AirflowException(
f'Unsupported dest_fs_type {dest_fs_type!r}.')
else:
self.dest_fs_type = dest_fs_type
self.max_files = max_files
self.remove_src = remove_src
def execute(self, **_) -> list:
src_dir_glob = os.path.join(self.src_dir, self.glob_pattern)
dest_paths = []
def _get_dest_path(src: str) -> str:
if src.lower().endswith('.bz2') or src.lower().endswith('.gz'):
basename = os.path.splitext(os.path.basename(src))[0]
else:
basename = os.path.basename(src)
return os.path.join(self.dest_dir,
f'{basename}__'
f'{round(self.percentage, 1)}pct_filtered')
if self.src_fs_type == 'local' and self.dest_fs_type == 'local':
for src_path in glob.glob(src_dir_glob):
dest_path = _get_dest_path(src_path)
self.log.info(f'Filtering local {src_path} to {dest_path}...')
filter_to_percentage(src_path, self.percentage, dest_path)
dest_paths.append(dest_path)
if self.remove_src:
self.log.info(f'Removing {src_path}.')
os.remove(src_path)
if self.max_files and len(dest_paths) >= self.max_files:
break
if not dest_paths:
self.log.info('No files found, skipping.')
raise AirflowSkipException()
return dest_paths
| 39.033708
| 80
| 0.61399
| 3,145
| 0.905296
| 0
| 0
| 1,119
| 0.322107
| 0
| 0
| 1,062
| 0.305699
|
3d2f723ddb0882b15b4375b0ad2b7ffa05e4cedb
| 17,541
|
py
|
Python
|
alibExp.py
|
wicknec/WalArt
|
b23488b4e421699155976d5e726d1c7a906c3243
|
[
"MIT"
] | 2
|
2016-02-02T11:33:27.000Z
|
2020-07-28T13:28:25.000Z
|
alibExp.py
|
wicknec/WalArt
|
b23488b4e421699155976d5e726d1c7a906c3243
|
[
"MIT"
] | null | null | null |
alibExp.py
|
wicknec/WalArt
|
b23488b4e421699155976d5e726d1c7a906c3243
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"""
alibExp
=======================
Qt4 interface for alib explorer
To browse alib in a more user-friendly way than simple text
Item.data(1,-1) stores its data, i.e. a str or another alib
"""
# NOTE: the actual command documentation is collected from docstrings of the
# commands and is appended to __doc__ after the class has been defined.
"""
Revisions
=================
151125 completed reading functionality
151209 wordless gui, remove node
151210 edit text, add icon to tree, btAdd function
151214 added btRoot, explore root
151219 added *GetCurrent*, modified *RemoveDataSync* to suited with alib.Pop
151229 added *GetSelectedText*
160112 change data display to waText.Brief
160113 change non-editing to read only to allow scroll
160309 fixed save failure by explore root after lock.
171204 updated alibExp.GetSelectedText to return the path of selected node
fixed bug in reeWidget.ItemToPath
180102 migrate to be compatible with PyQt5
"""
try:
from PyQt4 import QtCore
from PyQt4.QtCore import QTimer
from PyQt4.QtGui import QApplication, QWidget
except ImportError or ModuleNotFoundError:
print('PyQt4 module not found, try using PyQt5')
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtCore import QTimer
from WalArt.gui.QtGui4or5 import QtGuiFinder
QtGui=QtGuiFinder()
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'alibExp.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
from WalArt import waFile,waText
iconPath=waFile.GetFolderName(waFile.Find('add.png'))
class alibTree(QtGui.QTreeWidget):
dropped = QtCore.pyqtSignal(list)
def __init__(self,parent=None):
super(alibTree,self).__init__(parent)
self.setAcceptDrops(True)
self.data=None
self.imgList=QtGui.QIcon(waFile.Join(iconPath,'list.png'))
self.imgData=QtGui.QIcon(waFile.Join(iconPath,'data.png'))
self.imgBlank=QtGui.QIcon(waFile.Join(iconPath,'blank.png'))
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
filePaths = [
str(url.toLocalFile())
for url in event.mimeData().urls()
]
self.dropped.emit(filePaths)
else:
event.ignore()
def Load(self,a):
'''load the alib into the treeWidget
'''
self.clear()
self.setHeaderLabels(['Key','Value'])
self.data=a
for t in a:
if isinstance(a[t],alib):
ti=QtGui.QTreeWidgetItem([str(t)])
self.addTopLevelItem(ti)
self.LoadToNode(ti,a[t])
ti.setIcon(0,self.imgList)
ti.setExpanded(True)
else:
ti=QtGui.QTreeWidgetItem([str(t)])
if a[t]!='':
ti.setIcon(0,self.imgData)
else:
ti.setIcon(0,self.imgBlank)
self.addTopLevelItem(ti)
ti.setData(1,0,waText.Brief(a[t],20))
ti.setData(1,-1,a[t])
#help(ti)
def LoadToNode(self,node,a):
'''load the alib to node recursively
'''
#print(a)
for t in a:
if isinstance(a[t],alib):
ti=QtGui.QTreeWidgetItem([str(t)])
node.addChild(ti)
self.LoadToNode(ti,a[t])
ti.setIcon(0,self.imgList)
ti.setExpanded(True)
else:
ti=QtGui.QTreeWidgetItem([str(t)])
if a[t]!='':
ti.setIcon(0,self.imgData)
else:
ti.setIcon(0,self.imgBlank)
i=node.addChild(ti)
ti.setData(1,0,waText.Brief(a[t],20))
ti.setData(1,-1,a[t])
def ItemFromPath(self,path):
'''Get item from a path string of keys like: a|b|c
path can also be a list of strings
'''
if isinstance(path,str):
path=path.split('|')
item=None
for i in range(len(self.data.keys())):
#print(self.topLevelItem(i).text(0))
if self.topLevelItem(i).text(0)==path[0]:
item=self.topLevelItem(i)
break
if item!=None:
k=1
while k<len(path):
found=False
for i in range(item.childCount()):
#print(item.child(i).text(0))
if item.child(i).text(0)==path[k]:
item=item.child(i)
found=True
break
if found==False:
return None
else:
k+=1
if k<len(path):
return None
return item
def ItemToPath(self,item):
fl=[item.text(0)]
p=item.parent()
while p!=None:
fl.append(p.text(0))
p=p.parent()
fl.reverse()
return fl
def RemoveNodeSync(self,item):
'''Remove the node from both the view and the alib
'''
p=item.parent()
if p==None:
self.data.Pop(waText.atoi(item.text(0)))
else:
p.data(1,-1).Pop(waText.atoi(item.text(0)))
self.Load(self.data)
from WalArt import alib
def New(d):
'''Make a new alib explorer in the dialog, and return the object
'''
a=alibExp()
a.setupUi(d)
return a
class alibExp(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Form"))
Dialog.resize(474, 414)
Dialog.setWindowIcon(QtGui.QIcon(waFile.Join(iconPath,'settings.png')))
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(Dialog)
self.groupBox.setMinimumSize(QtCore.QSize(0, 60))
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 60))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout.addWidget(self.groupBox)
self.horizontalLayout = QtGui.QHBoxLayout(self.groupBox)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btRoot = QtGui.QToolButton(self.groupBox)
self.btRoot.setMinimumSize(QtCore.QSize(30, 30))
self.btRoot.setMaximumSize(QtCore.QSize(30, 30))
self.btRoot.setObjectName(_fromUtf8("btRoot"))
self.btRoot.setIcon(QtGui.QIcon(waFile.Join(iconPath,'circle.png')))
self.btRoot.setToolTip('Explore the root node')
self.horizontalLayout.addWidget(self.btRoot)
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setMinimumSize(QtCore.QSize(0, 30))
self.lineEdit.setMaximumSize(QtCore.QSize(16777215, 30))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.horizontalLayout.addWidget(self.lineEdit)
self.btAdd = QtGui.QToolButton(self.groupBox)
self.btAdd.setMinimumSize(QtCore.QSize(30, 30))
self.btAdd.setMaximumSize(QtCore.QSize(30, 30))
self.btAdd.setObjectName(_fromUtf8("btAdd"))
self.btAdd.setIcon(QtGui.QIcon(waFile.Join(iconPath,'add.png')))
self.btAdd.setToolTip('Add this path to the tree')
self.horizontalLayout.addWidget(self.btAdd)
self.btMinus = QtGui.QPushButton(self.groupBox)
self.btMinus.setMinimumSize(QtCore.QSize(30, 30))
self.btMinus.setMaximumSize(QtCore.QSize(30, 30))
self.btMinus.setObjectName(_fromUtf8("btMinus"))
self.btMinus.setIcon(QtGui.QIcon(waFile.Join(iconPath,'minus.png')))
self.btMinus.setToolTip('Delete this node')
self.horizontalLayout.addWidget(self.btMinus)
import WalArt.gui.buttons
self.btLock = WalArt.gui.buttons.btLock(self.groupBox)
self.btLock.setMinimumSize(QtCore.QSize(30, 30))
self.btLock.setMaximumSize(QtCore.QSize(30, 30))
self.btLock.setObjectName(_fromUtf8("btLock"))
self.btLock.setToolTip('Unlock to start editing node content')
self.horizontalLayout.addWidget(self.btLock)
self.splitter = QtGui.QSplitter(Dialog)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.verticalLayout.addWidget(self.splitter)
self.treeWidget = alibTree(self.splitter)
self.treeWidget.setObjectName(_fromUtf8("treeWidget"))
self.treeWidget.headerItem().setText(0, _fromUtf8("1"))
self.plainTextEdit = QtGui.QPlainTextEdit(self.splitter)
self.plainTextEdit.setObjectName(_fromUtf8("plainTextEdit"))
self.messenger = QtGui.QLabel(Dialog)
self.messenger.setMinimumSize(QtCore.QSize(0, 30))
self.messenger.setMaximumSize(QtCore.QSize(16777215, 30))
self.messenger.setObjectName(_fromUtf8("messenger"))
self.verticalLayout.addWidget(self.messenger)
self.retranslateUi(Dialog)
#QtCore.QObject.connect(self.treeWidget, QtCore.SIGNAL(_fromUtf8("clicked(QModelIndex)")),
# self.itemSelected)
self.treeWidget.clicked.connect(self.itemSelected)
self.plainTextEdit.setAcceptDrops(True)
self.treeWidget.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
self.treeWidget.setFrameShadow(QtGui.QFrame.Plain)
self.treeWidget.setFrameShape(QtGui.QFrame.Box)
#QtCore.QObject.connect(self.btLock, QtCore.SIGNAL(_fromUtf8("clicked()")),self.btLockClicked)
self.btLock.clicked.connect(self.btLockClicked)
#QtCore.QObject.connect(self.btMinus, QtCore.SIGNAL(_fromUtf8("clicked()")),self.btMinusClicked)
self.btMinus.clicked.connect(self.btMinusClicked)
#QtCore.QObject.connect(self.btAdd, QtCore.SIGNAL(_fromUtf8("clicked()")),self.btAddClicked)
self.btAdd.clicked.connect(self.btAddClicked)
#QtCore.QObject.connect(self.btRoot, QtCore.SIGNAL(_fromUtf8("clicked()")),self.btRootClicked)
self.btRoot.clicked.connect(self.btRootClicked)
self.treeWidget.dropEvent=self.itemDropped
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.setEditing(False)
def Message(self,text):
self.messenger.setText(text)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.groupBox.setTitle(_translate("Dialog", "alibExplorer", None))
self.btAdd.setText(_translate("Dialog", "+", None))
self.btMinus.setText(_translate("Dialog", "", None))
self.btLock.setText(_translate("Dialog", "...", None))
self.messenger.setText(_translate("Dialog", "Messege", None))
def itemSelected(self, index):
item=self.treeWidget.itemFromIndex(index)
#help(item)
#d=item.data(1,0)
d=item.data(1,-1)
#self.plainTextEdit.setEnabled(not self.btLock.getState())
if isinstance(d,alib):
self.plainTextEdit.setPlainText(d.ToString(''))
else:
self.plainTextEdit.setPlainText(str(d))
self.setEditing(False)
fl=[item.text(0)]
p=item.parent()
while p!=None:
fl.append(p.text(0))
p=p.parent()
fl.reverse()
self.lineEdit.setText('|'.join(fl))
self.Message('')
def setEditing(self,b):
'''b==True for editing mode, else no editing mode
'''
self.plainTextEdit.setReadOnly(not b)
self.lineEdit.setReadOnly(b)
self.btLock.setState(not b)
if b == True:
self.Message('Modify value and hit lock to save.')
def dragEnterEvent(self,e):
e.acceptPropsedAction()
def btLockClicked(self):
editing=not self.btLock.getState()
si=self.treeWidget.selectedItems()
if editing==True:
if len(si)==0:
#self.Message('Begin editing whole alib')
self.treeWidget.data.FromString(self.plainTextEdit.toPlainText())
self.treeWidget.Load(self.treeWidget.data)
self.Message('Change saved')
self.setEditing(not editing)
return
else:
si=si[0]
#help(self.plainTextEdit)
if str(si.data(1,-1))==self.plainTextEdit.toPlainText():
self.Message('Nothing changed')
else:
#record data
v=alib.Parse(self.plainTextEdit.toPlainText())
k=si.text(0)
if si.parent()==None:
self.treeWidget.data[k]=v
else:
si.parent().data(1,-1)[k]=v
self.treeWidget.Load(self.treeWidget.data)
self.Message('Change saved')
self.btRootClicked()
else:
if len(si)==0:
self.Message('Begin editing whole alib')
self.setEditing(not editing)
def itemDropped(self,event):
#print('called')
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
filePaths = [
str(url.toLocalFile())
for url in event.mimeData().urls()
]
#print(str(event))
self.messenger.setText(filePaths[0])
self.lineEdit.setText(filePaths[0])
#self.dropped.emit(filePaths)
from WalArt import alib
self.treeWidget.Load(alib().Load(filePaths[0]))
else:
event.ignore()
def Load(self,a):
self.treeWidget.Load(a)
self.btRootClicked()
def btMinusClicked(self):
path=self.lineEdit.text()
item=self.treeWidget.ItemFromPath(path)
if item==None:
self.Message('Warning: the node does not exist')
else:
self.treeWidget.RemoveNodeSync(item)
self.Message('node{%s} deleted'%path)
def btAddClicked(self):
path=self.lineEdit.text()
item=self.treeWidget.ItemFromPath(path)
if item==None:
self.treeWidget.data.setValue(path,'')
self.treeWidget.Load(self.treeWidget.data)
self.Message('Node{%s} added'%path)
else:
try:
#insert a number and shift other number forward,
#for convenience of auto numbering
i=int(item.text(0))
if item.parent()==None:
a=self.treeWidget.data
else:
a=item.parent().data(1,-1)
boundary=i
while str(boundary) in a:
boundary+=1
while boundary!=i:
a[str(boundary)]=a[str(boundary-1)]
boundary-=1
a[str(i)]=''
self.treeWidget.Load(self.treeWidget.data)
self.Message('Node{%s} added with shifts'%path)
except ValueError:
self.Message('Node{%s} already exists, nothing added'%path)
def btRootClicked(self):
d=self.treeWidget.data
si=self.treeWidget.selectedItems()
#self.treeWidget.deselectAll()
for i in si:
i.setSelected(False)
#self.plainTextEdit.setEnabled(not self.btLock.getState())
if isinstance(d,alib):
self.plainTextEdit.setPlainText(d.ToString(''))
else:
self.plainTextEdit.setPlainText(str(d))
self.setEditing(False)
self.lineEdit.setText('')
self.Message('Root node explored')
def GetCurrent(self):
'''returns the object that is currently exploring
it is the data of selected treenode, or the whole alib if nothing is selected
'''
si=self.treeWidget.selectedItems()
if len(si)>0:
return si[0].data(1,-1)
else:
return self.treeWidget.data
def GetSelectedText(self):
'''Similar as GetCurrent, but returns the path of selected node'''
si=self.treeWidget.selectedItems()
if len(si)>0:
return '|'.join(self.treeWidget.ItemToPath(si[0]))
else:
return ''
import sys
import time
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = alibExp()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
time.sleep(5)
| 35.580122
| 104
| 0.595462
| 15,048
| 0.857876
| 0
| 0
| 0
| 0
| 0
| 0
| 3,466
| 0.197594
|
3d2f8979ac8231da6f04ccba44cc761dc5cb64c8
| 2,445
|
py
|
Python
|
test/test_batch.py
|
ASemakov/ob-pipelines
|
ea475cd2c34ae2eccbf59563fe7caea06266c450
|
[
"Apache-2.0"
] | 11
|
2017-01-22T22:08:45.000Z
|
2020-03-10T20:17:14.000Z
|
test/test_batch.py
|
BeKitzur/ob-pipelines
|
8ee4ebd5803d72d0babce25b13399c9cdd0f686e
|
[
"Apache-2.0"
] | null | null | null |
test/test_batch.py
|
BeKitzur/ob-pipelines
|
8ee4ebd5803d72d0babce25b13399c9cdd0f686e
|
[
"Apache-2.0"
] | 6
|
2017-01-23T01:24:33.000Z
|
2018-07-18T13:30:06.000Z
|
"""
Integration test for the Luigi wrapper of AWS Batch
Requires:
- boto3 package
- Amazon AWS credentials discoverable by boto3 (e.g., by using ``aws configure``
from awscli_)
- An enabled AWS Batch job queue configured to run on a compute environment.
Written and maintained by Jake Feala (@jfeala) for Outlier Bio (@outlierbio)
"""
import unittest
try:
from ob_pipelines.batch import BatchTask, BatchJobException, client, _get_job_status
except ImportError:
raise unittest.SkipTest('boto3 is not installed. BatchTasks require boto3')
TEST_JOB_DEF = {
'jobDefinitionName': 'hello-world',
'type': 'container',
'parameters': {
'message': 'hll wrld'
},
'containerProperties': {
'image': 'centos',
'command': ['/bin/echo', 'Ref::message'],
'vcpus': 2,
'memory': 4,
}
}
class BatchTaskNoOutput(BatchTask):
def complete(self):
if self.batch_job_id:
return _get_job_status(self.batch_job_id) == 'SUCCEEDED'
return False
class BatchTaskOverrideCommand(BatchTaskNoOutput):
@property
def command(self):
return ['/bin/sleep', '10']
class BatchTaskOverrideFailingCommand(BatchTaskNoOutput):
@property
def command(self):
return ['not', 'a', 'command']
class BatchTaskNonzeroExitCommand(BatchTaskNoOutput):
@property
def command(self):
return ['exit', '1']
class TestBatchTask(unittest.TestCase):
def setUp(self):
# Register the test task definition
response = client.register_job_definition(**TEST_JOB_DEF)
self.arn = response['jobDefinitionArn']
def test_unregistered_task(self):
t = BatchTaskNoOutput(job_def=TEST_JOB_DEF, job_name='test_unregistered')
t.run()
def test_registered_task(self):
t = BatchTaskNoOutput(job_def_arn=self.arn, job_name='test_registered')
t.run()
def test_override_command(self):
t = BatchTaskOverrideCommand(job_def_arn=self.arn, job_name='test_override')
t.run()
def test_failing_command(self):
t = BatchTaskOverrideFailingCommand(job_def_arn=self.arn, job_name='test_failure')
with self.assertRaises(BatchJobException):
t.run()
def test_nonzero_exit(self):
t = BatchTaskNonzeroExitCommand(job_def_arn=self.arn, job_name='test_nonzero_exit')
with self.assertRaises(BatchJobException):
t.run()
| 25.46875
| 91
| 0.67771
| 1,582
| 0.647035
| 0
| 0
| 200
| 0.0818
| 0
| 0
| 743
| 0.303885
|
3d30c11f1ede17efd698bce52b1da5e9569d559a
| 456
|
py
|
Python
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/conftest.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 5
|
2019-01-19T23:53:35.000Z
|
2022-01-29T14:04:31.000Z
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/conftest.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 4
|
2020-09-26T01:30:01.000Z
|
2022-02-10T02:20:35.000Z
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/conftest.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 7
|
2020-03-04T22:23:51.000Z
|
2021-07-13T14:05:46.000Z
|
import pytest
from markov.tests import test_constant
@pytest.fixture
def aws_region():
return test_constant.AWS_REGION
@pytest.fixture
def model_metadata_s3_key():
return test_constant.MODEL_METADATA_S3_KEY
@pytest.fixture
def reward_function_s3_source():
return test_constant.REWARD_FUNCTION_S3_SOURCE
@pytest.fixture
def s3_bucket():
return test_constant.S3_BUCKET
@pytest.fixture
def s3_prefix():
return test_constant.S3_PREFIX
| 19.826087
| 50
| 0.809211
| 0
| 0
| 0
| 0
| 393
| 0.861842
| 0
| 0
| 0
| 0
|
3d319597951dce7996b3f7f4aeae76d89320c801
| 2,716
|
py
|
Python
|
ROS/my_initials.py
|
Vishwajeetiitb/Autumn-of-Automation
|
bd8c78662734f867b6aa6fd9179a12913387a01c
|
[
"MIT"
] | null | null | null |
ROS/my_initials.py
|
Vishwajeetiitb/Autumn-of-Automation
|
bd8c78662734f867b6aa6fd9179a12913387a01c
|
[
"MIT"
] | null | null | null |
ROS/my_initials.py
|
Vishwajeetiitb/Autumn-of-Automation
|
bd8c78662734f867b6aa6fd9179a12913387a01c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
import math
import os
from turtlesim.msg import Pose
import time
os.system("rosrun")
def callback(msg):
global current_angle
current_angle = msg.theta
# print(msg)
def move():
# Starts a new node
rospy.init_node('robot_cleaner', anonymous=True)
velocity_publisher = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)
sub = rospy.Subscriber("turtle1/pose",Pose,callback)
time.sleep(1)
vel_msg = Twist()
speed = 2
distance = 4
angle = math.pi/3
angular_ve1 = 1
vel_msg.angular.z = 0
current_distance = 0
t0 = rospy.Time.now().to_sec()
# t0 = rospy.Time.now().to_sec()
vel_msg.linear.x = 0
vel_msg.angular.z = angular_ve1
while current_angle < angle:
velocity_publisher.publish(vel_msg)
#Takes actual time to velocity calculus
t1=rospy.Time.now().to_sec()
print(current_angle)
vel_msg.linear.x = 0
vel_msg.angular.z = 0
velocity_publisher.publish(vel_msg)
t0 = rospy.Time.now().to_sec()
vel_msg.linear.x = speed
vel_msg.angular.z =0
while current_distance < distance:
velocity_publisher.publish(vel_msg)
t1=rospy.Time.now().to_sec()
current_distance = speed*(t1-t0)
vel_msg.linear.x = 0
vel_msg.angular.z = 0
velocity_publisher.publish(vel_msg)
t0 = rospy.Time.now().to_sec()
vel_msg.linear.x = -speed
vel_msg.angular.z =0
current_distance = 0
while current_distance < distance:
velocity_publisher.publish(vel_msg)
t1=rospy.Time.now().to_sec()
current_distance = speed*(t1-t0)
vel_msg.linear.x = 0
vel_msg.angular.z = 0
velocity_publisher.publish(vel_msg)
t0 = rospy.Time.now().to_sec()
vel_msg.linear.x = 0
vel_msg.angular.z = angular_ve1
while current_angle < 2*angle:
velocity_publisher.publish(vel_msg)
#Takes actual time to velocity calculus
t1=rospy.Time.now().to_sec()
print(current_angle)
vel_msg.linear.x = 0
vel_msg.angular.z = 0
velocity_publisher.publish(vel_msg)
t0 = rospy.Time.now().to_sec()
vel_msg.linear.x = speed
vel_msg.angular.z =0
current_distance = 0
while current_distance < distance:
velocity_publisher.publish(vel_msg)
t1=rospy.Time.now().to_sec()
current_distance = speed*(t1-t0)
vel_msg.linear.x = 0
vel_msg.angular.z = 0
velocity_publisher.publish(vel_msg)
if __name__ == '__main__':
try:
#Testing our function
move()
except rospy.ROSInterruptException: pass
| 28
| 82
| 0.645066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.091311
|
3d321cb4dea8943fb087339fe2547eeaba4b5805
| 2,144
|
py
|
Python
|
Assignment-04/Question-03/mpi_ping_pong.py
|
gnu-user/mcsc-6030-assignments
|
42825cdbc4532d9da6ebdba549b65fb1e36456a0
|
[
"MIT"
] | null | null | null |
Assignment-04/Question-03/mpi_ping_pong.py
|
gnu-user/mcsc-6030-assignments
|
42825cdbc4532d9da6ebdba549b65fb1e36456a0
|
[
"MIT"
] | null | null | null |
Assignment-04/Question-03/mpi_ping_pong.py
|
gnu-user/mcsc-6030-assignments
|
42825cdbc4532d9da6ebdba549b65fb1e36456a0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
###############################################################################
#
# Assignment 4, Question 3 solution for MPI ping-pong timings to calcualate
# alpha and beta, implemented in Python using MPI.
#
# Copyright (C) 2015, Jonathan Gillett (100437638)
# All rights reserved.
#
###############################################################################
import numpy as np
import sys
from mpi4py import MPI
from time import sleep
from random import random
# Define process 0 as PING, process 1 as PONG
PING = 0
PONG = 1
# Number of trials for getting the average time
TRIALS = 100
if __name__ == '__main__':
if len(sys.argv) < 2:
print "ERROR: You must provide the number of bytes to send!."
sys.exit()
N = int(sys.argv[1]) # The number of bytes to generate
comm = MPI.COMM_WORLD
proc_id = comm.Get_rank()
n_proc = comm.Get_size()
status = MPI.Status()
# Error checking only 2 processes can be used
if n_proc > 2:
if proc_id == PING:
print "ERROR: Only two proceses (ping and pong)."
MPI.Finalize()
sys.exit()
if N < 1:
if proc_id == PING:
print "ERROR: You must specify the data size in bytes."
MPI.Finalize()
sys.exit()
# The data to send back and forth, in bytes
A = np.empty(N, dtype=np.int8)
comm.Barrier()
# Send the data back and forth 100 times to get the average time
timings = []
for i in range(0, 100):
if proc_id == PING:
local_time = -MPI.Wtime()
comm.Send(A, PONG, tag=PING)
comm.Recv(A, source=MPI.ANY_SOURCE, tag=PONG, status=status)
timings.append(local_time + MPI.Wtime())
# Simulate random sleeps to account for different scheduling
sleep(random() / 100)
else:
comm.Recv(A, source=MPI.ANY_SOURCE, tag=PING, status=status)
comm.Send(A, PING, tag=PONG)
if proc_id == PING:
print "N bytes sent: %d, trials: %d, average time: %0.8f seconds" \
% (N, TRIALS, sum(timings) / float(len(timings)) / 2.0)
| 32
| 79
| 0.567631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 933
| 0.435168
|
3d33211ca1584c7787f1e93ba17778c1a7d518eb
| 2,286
|
py
|
Python
|
app/monitoring/logging_config.py
|
robmarkcole/python-fastapi-aws-lambda-container
|
56a676f4c0bccce10fd2533daba3ace0201a1bb3
|
[
"Apache-2.0"
] | 15
|
2020-12-29T23:14:33.000Z
|
2022-03-24T03:56:34.000Z
|
app/monitoring/logging_config.py
|
robmarkcole/python-fastapi-aws-lambda-container
|
56a676f4c0bccce10fd2533daba3ace0201a1bb3
|
[
"Apache-2.0"
] | 3
|
2021-09-11T00:41:55.000Z
|
2022-03-24T05:51:17.000Z
|
app/monitoring/logging_config.py
|
robmarkcole/python-fastapi-aws-lambda-container
|
56a676f4c0bccce10fd2533daba3ace0201a1bb3
|
[
"Apache-2.0"
] | 5
|
2021-09-10T23:53:41.000Z
|
2022-03-25T11:31:24.000Z
|
import os
import uuid
import logging
import json
from json import JSONEncoder
from pythonjsonlogger import jsonlogger
from datetime import datetime
from logging.config import dictConfig
# Custom JSON encoder which enforce standard ISO 8601 format, UUID format
class ModelJsonEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, UUID):
return str(o)
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class LogFilter(logging.Filter):
def __init__(self, service=None, instance=None):
self.service = service
self.instance = instance
def filter(self, record):
record.service = self.service
record.instance = self.instance
return True
class JsonLogFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super().add_fields(log_record, record, message_dict)
# Add timestamp field with default : now
if not log_record.get('timestamp'):
now = datetime.utcnow().isoformat()
log_record['timestamp'] = now
# Add level field
if log_record.get('level'):
log_record['level'] = log_record['level'].upper()
else:
log_record['level'] = record.levelname
# Add type field for internal logs
if not log_record.get('type'):
log_record['type'] = 'internal'
# Configure Logging
def configure_logging(level='DEBUG', service=None, instance=None):
dictConfig({
'version': 1,
'formatters': {'default': {
'()': JsonLogFormatter,
'format': '%(timestamp)s %(level)s %(service)s %(instance)s %(type)s %(message)s',
'json_encoder': ModelJsonEncoder
}},
'filters': {'default': {
'()': LogFilter,
'service': service,
'instance': instance
}},
'handlers': {'default_handler': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'filters': ['default'],
'formatter': 'default'
}},
'root': {
'level': level,
'handlers': ['default_handler']
}
})
| 30.078947
| 94
| 0.587927
| 1,210
| 0.529309
| 0
| 0
| 0
| 0
| 0
| 0
| 591
| 0.25853
|
3d332e20398ab4a054c4523a1136617bf5854f9a
| 1,459
|
py
|
Python
|
FPAIT/lib/logger/utils.py
|
D-X-Y/MSPLD-2018
|
71a6a75830ac84c7a861e63367ad3ace991fae77
|
[
"MIT"
] | 63
|
2018-07-12T10:36:25.000Z
|
2019-04-26T11:30:09.000Z
|
FPAIT/lib/logger/utils.py
|
D-X-Y/MSPLD-2018
|
71a6a75830ac84c7a861e63367ad3ace991fae77
|
[
"MIT"
] | null | null | null |
FPAIT/lib/logger/utils.py
|
D-X-Y/MSPLD-2018
|
71a6a75830ac84c7a861e63367ad3ace991fae77
|
[
"MIT"
] | 8
|
2018-07-14T02:47:12.000Z
|
2019-06-03T07:39:13.000Z
|
import time, sys
import numpy as np
def time_for_file():
ISOTIMEFORMAT='%d-%h-at-%H-%M-%S'
return '{}'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
def time_string():
ISOTIMEFORMAT='%Y-%m-%d %X'
string = '[{}]'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
return string
def time_string_short():
ISOTIMEFORMAT='%Y%m%d'
string = '{}'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
return string
def print_log(print_string, log):
print("{}".format(print_string))
if log is not None:
log.write('{}\n'.format(print_string))
log.flush()
def convert_secs2time(epoch_time, return_string=False):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
if return_string:
return '{:02d}:{:02d}:{:02d}'.format(need_hour, need_mins, need_secs)
else:
return need_hour, need_mins, need_secs
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __repr__(self):
return ('{name}(val={val}, avg={avg}, count={count})'.format(name=self.__class__.__name__, **self.__dict__))
| 28.057692
| 112
| 0.666895
| 481
| 0.329678
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.127485
|
3d34dd340fc3d7607de14667552ba62b48a6ce54
| 1,888
|
py
|
Python
|
hoomd/hpmc/test-py/test_ghost_layer.py
|
PetersResearchGroup/PCND
|
584768cc683a6df0152ead69b567d05b781aab2b
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/hpmc/test-py/test_ghost_layer.py
|
PetersResearchGroup/PCND
|
584768cc683a6df0152ead69b567d05b781aab2b
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/hpmc/test-py/test_ghost_layer.py
|
PetersResearchGroup/PCND
|
584768cc683a6df0152ead69b567d05b781aab2b
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from hoomd import *
from hoomd import hpmc
import math
import unittest
context.initialize()
class test_ghost_layer(unittest.TestCase):
def test_implicit(self):
# setup the MC integration
system = init.read_snapshot(data.make_snapshot(N=2,box=data.boxdim(Lx=100,Ly=50,Lz=50),particle_types=['A','B']))
mc = hpmc.integrate.convex_polyhedron(seed=123,implicit=True)
mc.set_params(d=0,a=0)
mc.set_params(nR=0,depletant_type='B')
cube_verts=[(-1, -1, -1), (-1, -1, 1), (-1, 1, -1), (-1, 1, 1), (1, -1, -1), (1, -1, 1), (1, 1, -1), (1, 1, 1)]
mc.shape_param.set('A', vertices=cube_verts)
mc.shape_param.set('B', vertices=cube_verts)
system.particles[0].position = (-.2,0,0)
system.particles[1].position = (1.2,2,0)
# currently we need this to communicate properly
run(1)
self.assertTrue(mc.count_overlaps())
def test_base(self):
# setup the MC integration
system = init.read_snapshot(data.make_snapshot(N=2,box=data.boxdim(Lx=100,Ly=50,Lz=50),particle_types=['A']))
mc = hpmc.integrate.convex_polyhedron(seed=123)
mc.set_params(d=0,a=0)
cube_verts=[(-1, -1, -1), (-1, -1, 1), (-1, 1, -1), (-1, 1, 1), (1, -1, -1), (1, -1, 1), (1, 1, -1), (1, 1, 1)]
mc.shape_param.set('A', vertices=cube_verts)
self.assertRaises(RuntimeError, mc.shape_param.set, types='B', vertices=cube_verts) #This is an error now
system.particles[0].position = (-.2,0,0)
system.particles[1].position = (1.2,2,0)
# currently we need this to communicate properly
run(1)
self.assertTrue(mc.count_overlaps())
def tearDown(self):
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 33.714286
| 121
| 0.613877
| 1,650
| 0.873941
| 0
| 0
| 0
| 0
| 0
| 0
| 216
| 0.114407
|
3d34e6acbf5b6084146e881a817272a730156e45
| 525
|
py
|
Python
|
performanceplatform/collector/ga/plugins/load_plugin.py
|
alphagov/performanceplatform-collector
|
de68ab4aa500c31e436e050fa1268fa928c522a5
|
[
"MIT"
] | 3
|
2015-05-01T14:57:28.000Z
|
2016-04-08T12:53:59.000Z
|
performanceplatform/collector/ga/plugins/load_plugin.py
|
alphagov/performanceplatform-collector
|
de68ab4aa500c31e436e050fa1268fa928c522a5
|
[
"MIT"
] | 15
|
2015-02-11T11:43:02.000Z
|
2021-03-24T10:54:35.000Z
|
performanceplatform/collector/ga/plugins/load_plugin.py
|
alphagov/performanceplatform-collector
|
de68ab4aa500c31e436e050fa1268fa928c522a5
|
[
"MIT"
] | 7
|
2015-05-04T16:56:02.000Z
|
2021-04-10T19:42:35.000Z
|
"""
load_plugin.py
--------------
Responsible for taking plugin strings and returning plugin callables.
"""
# For the linter
import __builtin__
import performanceplatform.collector.ga.plugins
def load_plugins(plugin_names):
return [load_plugin(plugin_name) for plugin_name in plugin_names]
def load_plugin(plugin_name):
expr = compile(plugin_name, "performanceplatform.collector plugin", "eval")
return eval(expr, __builtin__.__dict__,
performanceplatform.collector.ga.plugins.__dict__)
| 21
| 79
| 0.744762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 169
| 0.321905
|
3d36068bd29dc63f314be2d8a4d427fb6770b25d
| 26,243
|
py
|
Python
|
src/Python/Visualization/FrogReconstruction.py
|
ajpmaclean/vtk-examples
|
1a55fc8c6af67a3c07791807c7d1ec0ab97607a2
|
[
"Apache-2.0"
] | 81
|
2020-08-10T01:44:30.000Z
|
2022-03-23T06:46:36.000Z
|
src/Python/Visualization/FrogReconstruction.py
|
ajpmaclean/vtk-examples
|
1a55fc8c6af67a3c07791807c7d1ec0ab97607a2
|
[
"Apache-2.0"
] | 2
|
2020-09-12T17:33:52.000Z
|
2021-04-15T17:33:09.000Z
|
src/Python/Visualization/FrogReconstruction.py
|
ajpmaclean/vtk-examples
|
1a55fc8c6af67a3c07791807c7d1ec0ab97607a2
|
[
"Apache-2.0"
] | 27
|
2020-08-17T07:09:30.000Z
|
2022-02-15T03:44:58.000Z
|
#!/usr/bin/env python
import collections
from pathlib import Path
# noinspection PyUnresolvedReferences
import vtkmodules.vtkInteractionStyle
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkCommonCore import vtkLookupTable
from vtkmodules.vtkCommonMath import vtkMatrix4x4
from vtkmodules.vtkCommonTransforms import vtkTransform
from vtkmodules.vtkFiltersCore import (
vtkContourFilter,
vtkDecimatePro,
vtkExecutionTimer,
vtkFlyingEdges3D,
vtkMarchingCubes,
vtkPolyDataNormals,
vtkStripper,
vtkWindowedSincPolyDataFilter
)
from vtkmodules.vtkFiltersGeneral import vtkTransformPolyDataFilter
from vtkmodules.vtkIOImage import vtkMetaImageReader
from vtkmodules.vtkImagingCore import (
vtkImageShrink3D,
vtkImageThreshold
)
from vtkmodules.vtkImagingGeneral import vtkImageGaussianSmooth
from vtkmodules.vtkImagingMorphological import vtkImageIslandRemoval2D
from vtkmodules.vtkInteractionWidgets import vtkOrientationMarkerWidget
from vtkmodules.vtkRenderingAnnotation import vtkAxesActor
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer
)
def get_program_parameters(argv):
import argparse
description = 'Display all frog parts and translucent skin.'
epilogue = '''
To specify all the tissues at once:
blood brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve skeleton spleen stomach skin
You can leave out brainbin, it is the brain with no gaussian smoothing.
Here are the parameters used to get the views in the VTK Textbook:
Fig12-9a:
blood brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve skeleton spleen stomach skin -a
Fig12-9b:
blood brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve skeleton spleen stomach -a
Fig12-9c:
brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve spleen stomach -c
Fig12-9c:
brain duodenum eye_retna eye_white heart ileum kidney l_intestine liver lung nerve spleen stomach -d
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group()
group.add_argument('-a', action='store_const', dest='view', const='a',
help='The view corresponds to Figs 12-9a and 12-9b in the VTK Textbook')
group.add_argument('-c', action='store_const', dest='view', const='c',
help='The view corresponds to Figs 12-9c in the VTK Textbook')
group.add_argument('-d', action='store_const', dest='view', const='d',
help='The view corresponds to Figs 12-9d in the VTK Textbook')
parser.set_defaults(type=None)
parser.add_argument('-m', action='store_false', dest='flying_edges',
help='Use flying edges by default, marching cubes if set.')
parser.add_argument('-t', action='store_true', dest='decimation',
help='Decimate if set.')
parser.add_argument('data_folder', help='The path to the files: frog.mhd and frogtissue.mhd.')
parser.add_argument('tissues', nargs='+', help='List of one or more tissues.')
args = parser.parse_args()
return args.data_folder, args.tissues, args.view, args.flying_edges, args.decimation
def main(data_folder, tissues, view, flying_edges, decimate):
colors = vtkNamedColors()
path = Path(data_folder)
if path.is_dir():
s = ''
frog_fn = path.joinpath('frog').with_suffix('.mhd')
if not frog_fn.is_file():
s += 'The file: {:s} does not exist.\n'.format(str(frog_fn))
print(s)
frog_tissue_fn = path.joinpath('frogtissue').with_suffix('.mhd')
if not frog_tissue_fn.is_file():
s += 'The file: {:s} does not exist.'.format(str(frog_tissue_fn))
if s:
print(s)
return
else:
print('Expected a path to frog.mhs and frogtissue.mhd')
return
# Tissue parameters
available_tissues = tissue_parameters()
selected_tissues = {key: available_tissues[key] for key in tissues}
if not selected_tissues:
print('No tissues!')
return
missing_parameters = False
for k, v in selected_tissues.items():
res = check_for_required_parameters(k, v)
if res:
print(res)
missing_parameters = True
if missing_parameters:
print('Some required parameters are missing!')
return
# Setup render window, renderer, and interactor.
renderer = vtkRenderer()
render_window = vtkRenderWindow()
render_window.AddRenderer(renderer)
render_window_interactor = vtkRenderWindowInteractor()
render_window_interactor.SetRenderWindow(render_window)
lut = create_frog_lut(colors)
# Time some filters
ict = collections.defaultdict(dict)
for name, tissue in selected_tissues.items():
print('Tissue: {:>9s}, label: {:2d}'.format(name, tissue['TISSUE']))
t, actor = create_frog_actor(frog_fn, frog_tissue_fn, tissue, flying_edges, decimate, lut)
ict[name] = t
renderer.AddActor(actor)
# Initial view (looking down on the dorsal surface).
renderer.GetActiveCamera().Roll(-90)
renderer.ResetCamera()
# Final view
if view:
if view == 'a':
# Figs 12-9a and 12-9b in the VTK Textbook
camera = renderer.GetActiveCamera()
camera.SetPosition(-850.073854, 834.142692, 266.017598)
camera.SetFocalPoint(-72.387897, 109.637349, -306.343185)
camera.SetViewUp(0.284585, -0.387303, 0.876931)
camera.SetDistance(1207.186939)
camera.SetClippingRange(247.737449, 1758.922849)
elif view == 'c':
# Figs 12-9c in the VTK Textbook
camera = renderer.GetActiveCamera()
camera.SetPosition(-438.993734, 404.715262, 105.797836)
camera.SetFocalPoint(-254.193794, 245.672169, -95.535892)
camera.SetViewUp(0.256893, -0.629643, 0.733182)
camera.SetDistance(316.197712)
camera.SetClippingRange(0.789810, 789.809963)
elif view == 'd':
# Fig 12-9d in the VTK Textbook
camera = renderer.GetActiveCamera()
camera.SetPosition(-262.252604, 229.863144, 562.084505)
camera.SetFocalPoint(-288.693092, 228.870041, -91.185421)
camera.SetViewUp(0.729526, -0.683360, -0.028488)
camera.SetDistance(653.805539)
camera.SetClippingRange(452.459105, 905.003135)
print('Timings:')
print('\n'.join(format_timings(ict)))
renderer.SetBackground(colors.GetColor3d('LightSteelBlue'))
render_window.SetSize(640, 640)
render_window.SetWindowName('FrogReconstruction')
render_window.Render()
axes = vtkAxesActor()
widget = vtkOrientationMarkerWidget()
rgba = [0.0, 0.0, 0.0, 0.0]
colors.GetColor("Carrot", rgba)
widget.SetOutlineColor(rgba[0], rgba[1], rgba[2])
widget.SetOrientationMarker(axes)
widget.SetInteractor(render_window_interactor)
widget.SetViewport(0.0, 0.0, 0.2, 0.2)
widget.SetEnabled(1)
widget.InteractiveOn()
render_window.Render()
render_window_interactor.Start()
def create_frog_actor(frog_fn, frog_tissue_fn, tissue, flying_edges, decimate, lut):
# Get the tissue parameters
pixel_size = tissue['PIXEL_SIZE']
columns = tissue['COLUMNS']
rows = tissue['ROWS']
voi = tissue['VOI']
spacing = float(tissue['SPACING'])
start_slice = float(tissue['START_SLICE'])
data_spacing = [pixel_size, pixel_size, spacing]
data_origin = [-(columns / 2.0) * pixel_size, -(rows / 2.0) * pixel_size, start_slice * spacing]
#
# adjust y bounds for PNM coordinate system
#
tmp = voi[2]
voi[2] = rows - voi[3] - 1
voi[3] = rows - tmp - 1
if tissue['NAME'] == 'skin':
fn = frog_fn
else:
fn = frog_tissue_fn
reader = vtkMetaImageReader()
reader.SetFileName(str(fn))
reader.SetDataSpacing(data_spacing)
reader.SetDataOrigin(data_origin)
reader.SetDataExtent(voi)
reader.Update()
last_connection = reader
if not tissue['NAME'] == 'skin':
if tissue['ISLAND_REPLACE'] >= 0:
island_remover = vtkImageIslandRemoval2D()
island_remover.SetAreaThreshold(tissue['ISLAND_AREA'])
island_remover.SetIslandValue(tissue['ISLAND_REPLACE'])
island_remover.SetReplaceValue(tissue['TISSUE'])
island_remover.SetInput(last_connection.GetOutput())
island_remover.Update()
last_connection = island_remover
select_tissue = vtkImageThreshold()
select_tissue.ThresholdBetween(tissue['TISSUE'], tissue['TISSUE'])
select_tissue.SetInValue(255)
select_tissue.SetOutValue(0)
select_tissue.SetInputConnection(last_connection.GetOutputPort())
last_connection = select_tissue
shrinker = vtkImageShrink3D()
shrinker.SetInputConnection(last_connection.GetOutputPort())
shrinker.SetShrinkFactors(tissue['SAMPLE_RATE'])
shrinker.AveragingOn()
last_connection = shrinker
if not all(v == 0 for v in tissue['GAUSSIAN_STANDARD_DEVIATION']):
gaussian = vtkImageGaussianSmooth()
gaussian.SetStandardDeviation(*tissue['GAUSSIAN_STANDARD_DEVIATION'])
gaussian.SetRadiusFactors(*tissue['GAUSSIAN_RADIUS_FACTORS'])
gaussian.SetInputConnection(shrinker.GetOutputPort())
last_connection = gaussian
# Time the isocontouring.
ict = collections.defaultdict()
iso_value = tissue['VALUE']
if flying_edges:
iso_surface = vtkFlyingEdges3D()
iso_surface.SetInputConnection(last_connection.GetOutputPort())
iso_surface.ComputeScalarsOff()
iso_surface.ComputeGradientsOff()
iso_surface.ComputeNormalsOff()
iso_surface.SetValue(0, iso_value)
timer = vtkExecutionTimer()
timer.SetFilter(iso_surface)
iso_surface.Update()
ict['Flying Edges'] = timer.GetElapsedWallClockTime()
else:
iso_surface = vtkMarchingCubes()
iso_surface.SetInputConnection(last_connection.GetOutputPort())
iso_surface.ComputeScalarsOff()
iso_surface.ComputeGradientsOff()
iso_surface.ComputeNormalsOff()
iso_surface.SetValue(0, iso_value)
timer = vtkExecutionTimer()
timer.SetFilter(iso_surface)
iso_surface.Update()
ict['Marching Cubes'] = timer.GetElapsedWallClockTime()
so = SliceOrder()
# transform = so.get(tissue['SLICE_ORDER'])
# Match Frog.py
transform = so.get('hfap')
transform.Scale(1, -1, 1)
tf = vtkTransformPolyDataFilter()
tf.SetTransform(transform)
tf.SetInputConnection(iso_surface.GetOutputPort())
last_connection = tf
if decimate:
decimator = vtkDecimatePro()
decimator.SetInputConnection(last_connection.GetOutputPort())
decimator.SetFeatureAngle(tissue['DECIMATE_ANGLE'])
decimator.MaximumIterations = tissue['DECIMATE_ITERATIONS']
decimator.PreserveTopologyOn()
decimator.SetErrorIsAbsolute(1)
decimator.SetAbsoluteError(tissue['DECIMATE_ERROR'])
decimator.SetTargetReduction(tissue['DECIMATE_REDUCTION'])
last_connection = decimator
smoother = vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(last_connection.GetOutputPort())
smoother.SetNumberOfIterations(tissue['SMOOTH_ITERATIONS'])
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(tissue['SMOOTH_ANGLE'])
smoother.SetPassBand(tissue['SMOOTH_FACTOR'])
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOff()
smoother.Update()
normals = vtkPolyDataNormals()
normals.SetInputConnection(smoother.GetOutputPort())
normals.SetFeatureAngle(tissue['FEATURE_ANGLE'])
stripper = vtkStripper()
stripper.SetInputConnection(normals.GetOutputPort())
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(stripper.GetOutputPort())
# Create iso-surface
contour = vtkContourFilter()
contour.SetInputConnection(reader.GetOutputPort())
contour.SetValue(0, iso_value)
actor = vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(tissue['OPACITY'])
actor.GetProperty().SetDiffuseColor(lut.GetTableValue(tissue['TISSUE'])[:3])
actor.GetProperty().SetSpecular(0.5)
actor.GetProperty().SetSpecularPower(10)
return ict, actor
class SliceOrder:
"""
These transformations permute image and other geometric data to maintain proper
orientation regardless of the acquisition order. After applying these transforms with
vtkTransformFilter, a view up of 0,-1,0 will result in the body part
facing the viewer.
NOTE: some transformations have a -1 scale factor for one of the components.
To ensure proper polygon orientation and normal direction, you must
apply the vtkPolyDataNormals filter.
Naming (the nomenclature is medical):
si - superior to inferior (top to bottom)
is - inferior to superior (bottom to top)
ap - anterior to posterior (front to back)
pa - posterior to anterior (back to front)
lr - left to right
rl - right to left
"""
def __init__(self):
self.si_mat = vtkMatrix4x4()
self.si_mat.Zero()
self.si_mat.SetElement(0, 0, 1)
self.si_mat.SetElement(1, 2, 1)
self.si_mat.SetElement(2, 1, -1)
self.si_mat.SetElement(3, 3, 1)
self.is_mat = vtkMatrix4x4()
self.is_mat.Zero()
self.is_mat.SetElement(0, 0, 1)
self.is_mat.SetElement(1, 2, -1)
self.is_mat.SetElement(2, 1, -1)
self.is_mat.SetElement(3, 3, 1)
self.lr_mat = vtkMatrix4x4()
self.lr_mat.Zero()
self.lr_mat.SetElement(0, 2, -1)
self.lr_mat.SetElement(1, 1, -1)
self.lr_mat.SetElement(2, 0, 1)
self.lr_mat.SetElement(3, 3, 1)
self.rl_mat = vtkMatrix4x4()
self.rl_mat.Zero()
self.rl_mat.SetElement(0, 2, 1)
self.rl_mat.SetElement(1, 1, -1)
self.rl_mat.SetElement(2, 0, 1)
self.rl_mat.SetElement(3, 3, 1)
"""
The previous transforms assume radiological views of the slices (viewed from the feet). other
modalities such as physical sectioning may view from the head. These transforms modify the original
with a 180° rotation about y
"""
self.hf_mat = vtkMatrix4x4()
self.hf_mat.Zero()
self.hf_mat.SetElement(0, 0, -1)
self.hf_mat.SetElement(1, 1, 1)
self.hf_mat.SetElement(2, 2, -1)
self.hf_mat.SetElement(3, 3, 1)
def s_i(self):
t = vtkTransform()
t.SetMatrix(self.si_mat)
return t
def i_s(self):
t = vtkTransform()
t.SetMatrix(self.is_mat)
return t
@staticmethod
def a_p():
t = vtkTransform()
return t.Scale(1, -1, 1)
@staticmethod
def p_a():
t = vtkTransform()
return t.Scale(1, -1, -1)
def l_r(self):
t = vtkTransform()
t.SetMatrix(self.lr_mat)
t.Update()
return t
def r_l(self):
t = vtkTransform()
t.SetMatrix(self.lr_mat)
return t
def h_f(self):
t = vtkTransform()
t.SetMatrix(self.hf_mat)
return t
def hf_si(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Concatenate(self.si_mat)
return t
def hf_is(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Concatenate(self.is_mat)
return t
def hf_ap(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Scale(1, -1, 1)
return t
def hf_pa(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Scale(1, -1, -1)
return t
def hf_lr(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Concatenate(self.lr_mat)
return t
def hf_rl(self):
t = vtkTransform()
t.Concatenate(self.hf_mat)
t.Concatenate(self.rl_mat)
return t
def get(self, order):
"""
Returns the vtkTransform corresponding to the slice order.
:param order: The slice order
:return: The vtkTransform to use
"""
if order == 'si':
return self.s_i()
elif order == 'is':
return self.i_s()
elif order == 'ap':
return self.a_p()
elif order == 'pa':
return self.p_a()
elif order == 'lr':
return self.l_r()
elif order == 'rl':
return self.r_l()
elif order == 'hf':
return self.h_f()
elif order == 'hfsi':
return self.hf_si()
elif order == 'hfis':
return self.hf_is()
elif order == 'hfap':
return self.hf_ap()
elif order == 'hfpa':
return self.hf_pa()
elif order == 'hflr':
return self.hf_lr()
elif order == 'hfrl':
return self.hf_rl()
else:
s = 'No such transform "{:s}" exists.'.format(order)
raise Exception(s)
def default_parameters():
p = dict()
p['NAME'] = ''
p['TISSUE'] = '1'
p['START_SLICE'] = '0'
p['END_SLICE'] = '255'
p['STUDY'] = 'frogtissue'
p['VALUE'] = 127.5
p['ROWS'] = 470
p['COLUMNS'] = 500
p['HEADER_SIZE'] = 0
p['PIXEL_SIZE'] = 1
p['SPACING'] = 1.5
p['START_SLICE'] = 1
p['END_SLICE'] = 138
p['REDUCTION'] = 1
p['FEATURE_ANGLE'] = 60
p['DECIMATE_ANGLE'] = 60
p['SMOOTH_ANGLE'] = 60
p['SMOOTH_ITERATIONS'] = 10
p['SMOOTH_FACTOR'] = 0.1
p['DECIMATE_ITERATIONS'] = 1
p['DECIMATE_REDUCTION'] = 1
p['DECIMATE_ERROR'] = 0.0002
p['DECIMATE_ERROR_INCREMENT'] = 0.0002
p['ISLAND_AREA'] = 4
p['ISLAND_REPLACE'] = -1
p['GAUSSIAN_STANDARD_DEVIATION'] = [2, 2, 2]
p['GAUSSIAN_RADIUS_FACTORS'] = [2, 2, 2]
p['VOI'] = [0, p['COLUMNS'] - 1, 0, p['ROWS'] - 1, 0, p['END_SLICE']]
p['SAMPLE_RATE'] = [1, 1, 1]
p['OPACITY'] = 1.0
return p
def blood():
p = frog()
p['NAME'] = 'blood'
p['TISSUE'] = 1
p['START_SLICE'] = 14
p['END_SLICE'] = 131
p['VALUE'] = 4
p['VOI'] = [33, 406, 62, 425, p['START_SLICE'], p['END_SLICE']]
return p
def brain():
p = frog()
p['NAME'] = 'brain'
p['TISSUE'] = 2
p['START_SLICE'] = 1
p['END_SLICE'] = 33
p['VOI'] = [349, 436, 211, 252, p['START_SLICE'], p['END_SLICE']]
return p
def brainbin():
p = frog()
p['NAME'] = 'brainbin'
p['TISSUE'] = 2
p['START_SLICE'] = 1
p['END_SLICE'] = 33
p['VOI'] = [349, 436, 211, 252, p['END_SLICE'], p['START_SLICE']]
p['GAUSSIAN_STANDARD_DEVIATION'] = [0, 0, 0]
p['DECIMATE_ITERATIONS'] = 0
return p
def duodenum():
p = frog()
p['NAME'] = 'duodenum'
p['TISSUE'] = 3
p['START_SLICE'] = 35
p['END_SLICE'] = 105
p['VOI'] = [189, 248, 191, 284, p['START_SLICE'], p['END_SLICE']]
return p
def eye_retna():
p = frog()
p['NAME'] = 'eye_retna'
p['TISSUE'] = 4
p['START_SLICE'] = 1
p['END_SLICE'] = 41
p['VOI'] = [342, 438, 180, 285, p['START_SLICE'], p['END_SLICE']]
return p
def eye_white():
p = frog()
p['NAME'] = 'eye_white'
p['TISSUE'] = 5
p['START_SLICE'] = 1
p['END_SLICE'] = 37
p['VOI'] = [389, 433, 183, 282, p['START_SLICE'], p['END_SLICE']]
return p
def frog():
p = default_parameters()
p['ROWS'] = 470
p['COLUMNS'] = 500
p['STUDY'] = 'frogtissue'
p['SLICE_ORDER'] = 'si'
p['PIXEL_SIZE'] = 1
p['SPACING'] = 1.5
p['VALUE'] = 127.5
p['SAMPLE_RATE'] = [1, 1, 1]
p['GAUSSIAN_STANDARD_DEVIATION'] = [2, 2, 2]
p['DECIMATE_REDUCTION'] = 0.95
p['DECIMATE_ITERATIONS'] = 5
p['DECIMATE_ERROR'] = 0.0002
p['DECIMATE_ERROR_INCREMENT'] = 0.0002
p['SMOOTH_FACTOR'] = 0.1
return p
def heart():
p = frog()
p['NAME'] = 'heart'
p['TISSUE'] = 6
p['START_SLICE'] = 49
p['END_SLICE'] = 93
p['VOI'] = [217, 299, 186, 266, p['START_SLICE'], p['END_SLICE']]
return p
def ileum():
p = frog()
p['NAME'] = 'ileum'
p['TISSUE'] = 7
p['START_SLICE'] = 25
p['END_SLICE'] = 93
p['VOI'] = [172, 243, 201, 290, p['START_SLICE'], p['END_SLICE']]
return p
def kidney():
p = frog()
p['NAME'] = 'kidney'
p['TISSUE'] = 8
p['START_SLICE'] = 24
p['END_SLICE'] = 78
p['VOI'] = [116, 238, 193, 263, p['START_SLICE'], p['END_SLICE']]
return p
def l_intestine():
p = frog()
p['NAME'] = 'l_intestine'
p['TISSUE'] = 9
p['START_SLICE'] = 56
p['END_SLICE'] = 106
p['VOI'] = [115, 224, 209, 284, p['START_SLICE'], p['END_SLICE']]
return p
def liver():
p = frog()
p['NAME'] = 'liver'
p['TISSUE'] = 10
p['START_SLICE'] = 25
p['END_SLICE'] = 126
p['VOI'] = [167, 297, 154, 304, p['START_SLICE'], p['END_SLICE']]
return p
def lung():
p = frog()
p['NAME'] = 'lung'
p['TISSUE'] = 11
p['START_SLICE'] = 24
p['END_SLICE'] = 59
p['VOI'] = [222, 324, 157, 291, p['START_SLICE'], p['END_SLICE']]
return p
def nerve():
p = frog()
p['NAME'] = 'nerve'
p['TISSUE'] = 12
p['START_SLICE'] = 7
p['END_SLICE'] = 113
p['VOI'] = [79, 403, 63, 394, p['START_SLICE'], p['END_SLICE']]
return p
def skin():
p = default_parameters()
p['NAME'] = 'skin'
p['TISSUE'] = 0
p['ROWS'] = 470
p['COLUMNS'] = 500
p['STUDY'] = 'frog'
p['SLICE_ORDER'] = 'si'
p['PIXEL_SIZE'] = 1
p['SPACING'] = 1.5
p['START_SLICE'] = 1
p['END_SLICE'] = 138
p['VOI'] = [0, 499, 0, 469, p['START_SLICE'], p['END_SLICE']]
p['VALUE'] = 10.5
p['SAMPLE_RATE'] = [2, 2, 1]
p['DECIMATE_REDUCTION'] = 0.95
p['DECIMATE_ITERATIONS'] = 10
p['DECIMATE_ERROR'] = 0.0002
p['DECIMATE_ERROR_INCREMENT'] = 0.0002
p['FEATURE_ANGLE'] = 60
p['OPACITY'] = 0.4
return p
def skeleton():
p = frog()
p['STUDY'] = 'frogtissue'
p['NAME'] = 'skeleton'
p['TISSUE'] = 13
p['VALUE'] = 64.5
p['START_SLICE'] = 1
p['END_SLICE'] = 136
p['VOI'] = [23, 479, 8, 469, p['START_SLICE'], p['END_SLICE']]
p['GAUSSIAN_STANDARD_DEVIATION'] = [1.5, 1.5, 1]
return p
def spleen():
p = frog()
p['NAME'] = 'spleen'
p['TISSUE'] = 14
p['START_SLICE'] = 45
p['END_SLICE'] = 68
p['VOI'] = [166, 219, 195, 231, p['START_SLICE'], p['END_SLICE']]
return p
def stomach():
p = frog()
p['NAME'] = 'stomach'
p['TISSUE'] = 15
p['START_SLICE'] = 26
p['END_SLICE'] = 119
p['VOI'] = [143, 365, 158, 297, p['START_SLICE'], p['END_SLICE']]
return p
def tissue_parameters():
t = dict()
t['blood'] = blood()
t['brain'] = brain()
t['brainbin'] = brainbin()
t['duodenum'] = duodenum()
t['eye_retna'] = eye_retna()
t['eye_white'] = eye_white()
t['frog'] = frog()
t['heart'] = heart()
t['ileum'] = ileum()
t['kidney'] = kidney()
t['l_intestine'] = l_intestine()
t['liver'] = liver()
t['lung'] = lung()
t['nerve'] = nerve()
t['skin'] = skin()
t['skeleton'] = skeleton()
t['spleen'] = spleen()
t['stomach'] = stomach()
return t
def create_frog_lut(colors):
lut = vtkLookupTable()
lut.SetNumberOfColors(16)
lut.SetTableRange(0, 15)
lut.Build()
lut.SetTableValue(0, colors.GetColor4d('LimeGreen')) # skin
lut.SetTableValue(1, colors.GetColor4d('salmon')) # blood
lut.SetTableValue(2, colors.GetColor4d('beige')) # brain
lut.SetTableValue(3, colors.GetColor4d('orange')) # duodenum
lut.SetTableValue(4, colors.GetColor4d('misty_rose')) # eye_retina
lut.SetTableValue(5, colors.GetColor4d('white')) # eye_white
lut.SetTableValue(6, colors.GetColor4d('tomato')) # heart
lut.SetTableValue(7, colors.GetColor4d('raspberry')) # ileum
lut.SetTableValue(8, colors.GetColor4d('banana')) # kidney
lut.SetTableValue(9, colors.GetColor4d('peru')) # l_intestine
lut.SetTableValue(10, colors.GetColor4d('pink')) # liver
lut.SetTableValue(11, colors.GetColor4d('powder_blue')) # lung
lut.SetTableValue(12, colors.GetColor4d('carrot')) # nerve
lut.SetTableValue(13, colors.GetColor4d('wheat')) # skeleton
lut.SetTableValue(14, colors.GetColor4d('violet')) # spleen
lut.SetTableValue(15, colors.GetColor4d('plum')) # stomach
return lut
def check_for_required_parameters(tissue, parameters):
required = {'NAME', 'END_SLICE', 'TISSUE', 'STUDY', 'ROWS',
'COLUMNS', 'VALUE', 'SPACING',
'GAUSSIAN_STANDARD_DEVIATION', 'VOI',
'DECIMATE_ITERATIONS'}
k = set(parameters.keys())
s = None
if len(k) == 0:
s = 'Missing parameters for {:11s}: {:s}'.format(tissue, ', '.join(map(str, required)))
else:
d = required.difference(k)
if d:
s = 'Missing parameters for {:11s}: {:s}'.format(tissue, ', '.join(map(str, d)))
return s
def format_timings(ict):
res = list()
total = 0
sk = sorted(ict.keys())
for k in sk:
sigma = 0
res.append('{:11s}'.format(k))
skk = sorted(ict[k].keys())
for kk in skk:
sigma += ict[k][kk]
res.append('{:11s}{:13s} {:5.2f}s'.format(' ', kk, ict[k][kk]))
total += sigma
res.append('Subtotal: {:5.2f}s'.format(sigma))
res.append(' Total: {:5.2f}s'.format(total))
return res
if __name__ == '__main__':
import sys
data_folder, tissue, view, flying_edges, decimate = get_program_parameters(sys.argv)
main(data_folder, tissue, view, flying_edges, decimate)
| 31.093602
| 121
| 0.622185
| 4,791
| 0.182556
| 0
| 0
| 177
| 0.006744
| 0
| 0
| 6,854
| 0.261164
|
3d3611984ad47f38b9bcaf5c70b8693991e55438
| 3,202
|
py
|
Python
|
mfr/extensions/tabular/libs/stdlib_tools.py
|
yacchin1205/RDM-modular-file-renderer
|
5bd18175a681d21e7be7fe0238132335a1cd8ded
|
[
"Apache-2.0"
] | 36
|
2015-08-31T20:24:22.000Z
|
2021-12-17T17:02:44.000Z
|
mfr/extensions/tabular/libs/stdlib_tools.py
|
yacchin1205/RDM-modular-file-renderer
|
5bd18175a681d21e7be7fe0238132335a1cd8ded
|
[
"Apache-2.0"
] | 190
|
2015-01-02T06:22:01.000Z
|
2022-01-19T11:27:03.000Z
|
mfr/extensions/tabular/libs/stdlib_tools.py
|
yacchin1205/RDM-modular-file-renderer
|
5bd18175a681d21e7be7fe0238132335a1cd8ded
|
[
"Apache-2.0"
] | 47
|
2015-01-27T15:45:22.000Z
|
2021-01-27T22:43:03.000Z
|
import re
import csv
from mfr.extensions.tabular.exceptions import EmptyTableError, TabularRendererError
from mfr.extensions.tabular import utilities
def csv_stdlib(fp):
"""Read and convert a csv file to JSON format using the python standard library
:param fp: File pointer object
:return: tuple of table headers and data
"""
data = fp.read(2048)
fp.seek(0)
try:
dialect = csv.Sniffer().sniff(data)
except csv.Error:
dialect = csv.excel
else:
_set_dialect_quote_attrs(dialect, data)
del data
reader = csv.DictReader(fp, dialect=dialect)
columns = []
# update the reader field names to avoid duplicate column names when performing row extraction
for idx, fieldname in enumerate(reader.fieldnames or []):
column_count = sum(1 for column in columns if fieldname == column['name'])
if column_count:
unique_fieldname = '{}-{}'.format(fieldname, column_count + 1)
reader.fieldnames[idx] = unique_fieldname
else:
unique_fieldname = fieldname
columns.append({
'id': unique_fieldname,
'field': unique_fieldname,
'name': fieldname,
'sortable': True,
})
try:
rows = [row for row in reader]
except csv.Error as e:
if any("field larger than field limit" in errorMsg for errorMsg in e.args):
raise TabularRendererError(
'This file contains a field too large to render. '
'Please download and view it locally.',
code=400,
extension='csv',
) from e
else:
raise TabularRendererError('csv.Error: {}'.format(e), extension='csv') from e
if not columns and not rows:
raise EmptyTableError('Table empty or corrupt.', extension='csv')
del reader
return {'Sheet 1': (columns, rows)}
def sav_stdlib(fp):
"""Read and convert a .sav file to .csv with pspp, then convert that to JSON format using
the python standard library
:param fp: File pointer object to a .sav file
:return: tuple of table headers and data
"""
csv_file = utilities.sav_to_csv(fp)
with open(csv_file.name, 'r') as file:
csv_file.close()
return csv_stdlib(file)
def _set_dialect_quote_attrs(dialect, data):
"""Set quote-related dialect attributes based on up to 2kb of csv data.
The regular expressions search for things that look like the beginning of
a list, wrapped in a quotation mark that is not dialect.quotechar, with
list items wrapped in dialect.quotechar and seperated by commas.
Example matches include:
"['1', '2', '3' for quotechar == '
'{"a", "b", "c" for quotechar == "
"""
if dialect.quotechar == '"':
if re.search('\'[[({]".+",', data):
dialect.quotechar = "'"
if re.search("'''[[({]\".+\",", data):
dialect.doublequote = True
elif dialect.quotechar == "'":
if re.search("\"[[({]'.+',", data):
dialect.quotechar = '"'
if re.search('"""[[({]\'.+\',', data):
dialect.doublequote = True
| 33.705263
| 98
| 0.605559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,221
| 0.381324
|
3d36845f210b13d26d7504e09092d4846041c87f
| 4,191
|
py
|
Python
|
code/master_web/app/template_support.py
|
glenn-edgar/lacima_ranch_cloud
|
0827bdd497295c931cf1a06e97880009773e77be
|
[
"MIT"
] | null | null | null |
code/master_web/app/template_support.py
|
glenn-edgar/lacima_ranch_cloud
|
0827bdd497295c931cf1a06e97880009773e77be
|
[
"MIT"
] | null | null | null |
code/master_web/app/template_support.py
|
glenn-edgar/lacima_ranch_cloud
|
0827bdd497295c931cf1a06e97880009773e77be
|
[
"MIT"
] | null | null | null |
#
#
# This is Support for Drawing Bullet Charts
#
#
#
#
#
#
#
'''
This is the return json value to the javascript front end
{ "canvasName":"canvas1","featuredColor":"Green", "featuredMeasure":14.5,
"qualScale1":14.5, "qualScale1Color":"Black","titleText":"Step 1" },
{ "canvasName":"canvas2","featuredColor":"Blue", "featuredMeasure":14.5,
"qualScale1":14.5, "qualScale1Color":"Black","titleText":"Step 2" },
{ "canvasName":"canvas3","featuredColor":"Red", "featuredMeasure":14.5,
"qualScale1":14.5, "qualScale1Color":"Black","titleText":"Step 3" },
'''
class template_support():
def __init__(self , redis_handle, statistics_module):
self.redis_handle = redis_handle
self.statistics_module = statistics_module
def generate_current_canvas_list( self, schedule_name, *args, **kwargs ):
return_value = []
self.schedule_name = schedule_name
data = self.statistics_module.schedule_data[ schedule_name ]
current_data = self.statistics_module.get_current_data( data["step_number"],schedule_name )
limit_values = self.statistics_module.get_current_limit_values( data["step_number"],schedule_name )
for i in range(0,data["step_number"]):
temp = {}
temp["canvasName"] = "canvas1" +str(i+1)
temp["titleText"] = "Step " +str(i+1)
temp["qualScale1Color"] = "Black"
temp["featuredColor"] = "Red"
temp["qualScale1"] = limit_values[i]['limit_avg']
temp["featuredMeasure"] = current_data[i]
temp["limit"] = limit_values[i]['limit_std']
temp["step"] = i
return_value.append(temp)
return return_value
def generate_canvas_list(self, schedule_name, flow_id , *args,**kwargs):
return_value = []
self.schedule_name = schedule_name
data = self.statistics_module.schedule_data[ schedule_name ]
flow_sensors = self.statistics_module.sensor_names
flow_sensor_name = flow_sensors[flow_id]
conversion_rate = self.statistics_module.conversion_rate[flow_id]
flow_data = self.statistics_module.get_average_flow_data( data["step_number"], flow_sensor_name, schedule_name )
limit_values = self.statistics_module.get_flow_limit_values( data["step_number"], flow_sensor_name, schedule_name )
for i in limit_values:
try:
i['limit_avg'] = float(i['limit_avg'])*conversion_rate
i['limit_std'] = float(i['limit_std'])*conversion_rate
except:
pass
corrected_flow = []
for i in flow_data:
temp1 = []
for j in i:
temp1.append( j *conversion_rate)
corrected_flow.append(temp1)
for i in range(0,data["step_number"]):
temp = {}
temp["canvasName"] = "canvas1" +str(i+1)
temp["titleText"] = "Step " +str(i+1)
temp["qualScale1Color"] = "Black"
temp["featuredColor"] = "Red"
try:
temp["qualScale1"] = limit_values[i]['limit_avg']
except:
temp["qualScale1"] = 0
try:
temp["featuredMeasure"] = corrected_flow[i]
except:
temp["featuredMeasure"] = 0
try:
temp["limit"] = limit_values[i]['limit_std']
except:
temp["limit"] = 0
return_value.append(temp)
return return_value
| 37.756757
| 124
| 0.504653
| 3,394
| 0.809831
| 0
| 0
| 0
| 0
| 0
| 0
| 1,226
| 0.292532
|
3d372653470996da64ee3dcad6250a21dbd2e6ea
| 52
|
py
|
Python
|
zucchini/graders/exceptions.py
|
dbecker1/zucchini
|
47eb9a40b47bb1b131dcfd0073596ccf8816562c
|
[
"Apache-2.0"
] | 3
|
2018-03-27T18:09:54.000Z
|
2021-04-08T03:03:55.000Z
|
zucchini/graders/exceptions.py
|
dbecker1/zucchini
|
47eb9a40b47bb1b131dcfd0073596ccf8816562c
|
[
"Apache-2.0"
] | 337
|
2017-12-17T13:22:26.000Z
|
2022-03-28T02:05:09.000Z
|
zucchini/graders/exceptions.py
|
dbecker1/zucchini
|
47eb9a40b47bb1b131dcfd0073596ccf8816562c
|
[
"Apache-2.0"
] | 7
|
2018-01-10T18:46:26.000Z
|
2020-10-17T17:47:07.000Z
|
class InvalidGraderConfigError(Exception):
pass
| 17.333333
| 42
| 0.807692
| 51
| 0.980769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d373999e9b389d4982c3184efb41a30e1a5425d
| 1,108
|
py
|
Python
|
datapack/data/scripts/custom/8871_gve/__init__.py
|
DigitalCoin1/L2SPERO
|
f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94
|
[
"Unlicense"
] | null | null | null |
datapack/data/scripts/custom/8871_gve/__init__.py
|
DigitalCoin1/L2SPERO
|
f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94
|
[
"Unlicense"
] | null | null | null |
datapack/data/scripts/custom/8871_gve/__init__.py
|
DigitalCoin1/L2SPERO
|
f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94
|
[
"Unlicense"
] | null | null | null |
# Author ProGramMoS, Scoria Dev
# Version 0.2b
import sys
from com.l2jfrozen.gameserver.model.actor.instance import L2PcInstance
from com.l2jfrozen.util.database import L2DatabaseFactory
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "8871_gve"
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent(self,event,st):
st.getPlayer().setTarget(st.getPlayer())
if event == "1": #good
st.getPlayer.setGood(true)
st.setState(COMPLETED)
if event == "2": #evil
st.getPlayer.setEvil(true)
st.setState(COMPLETED)
if event == "3": #unfact good
st.getPlayer.setGood(false)
st.setState(COMPLETED)
if event == "4": #unfact evil
st.getPlayer.setEvil(false)
st.setState(COMPLETED)
return
QUEST = Quest(8871,qn,"custom")
CREATED = State('Start',QUEST)
STARTED = State('Started',QUEST)
COMPLETED = State('Completed',QUEST)
QUEST.setInitialState(CREATED)
| 25.767442
| 77
| 0.731949
| 531
| 0.479242
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.122744
|
3d37fed769b12cfb4e9da6c616fc01c8b6b51490
| 3,740
|
py
|
Python
|
src/hypergol/base_data.py
|
hypergol/hypergol
|
0beee71c8f72d517ef376030baff9c840a2f7eeb
|
[
"MIT"
] | 49
|
2020-07-09T10:22:25.000Z
|
2022-02-21T16:55:34.000Z
|
src/hypergol/base_data.py
|
hypergol/hypergol
|
0beee71c8f72d517ef376030baff9c840a2f7eeb
|
[
"MIT"
] | 16
|
2020-08-18T17:06:05.000Z
|
2022-02-19T16:30:04.000Z
|
src/hypergol/base_data.py
|
hypergol/hypergol
|
0beee71c8f72d517ef376030baff9c840a2f7eeb
|
[
"MIT"
] | 3
|
2020-07-16T08:42:09.000Z
|
2021-03-06T15:09:13.000Z
|
import json
import base64
import pickle
from hypergol.repr import Repr
class NoIdException(Exception):
pass
class BaseData(Repr):
"""
Base class for all domain objects.
Extends the Repr convenience base class that provides printing facilities.
Provides to_data and from_data serialisation interfaces.
Provides get_id, get_hash_id interfaces.
Provides test capabilities for all of the above so changes to the derived classes can be checked quickly.
"""
def __eq__(self, other):
if not isinstance(self, type(other)):
return False
try:
for k in set(self.__dict__.keys()) | set(self.__dict__.keys()):
if not self.__dict__.__eq__(other.__dict__[k]):
return False
return True
except KeyError:
return False
def get_id(self):
"""Returns the class's id if exists"""
raise NoIdException(f"{self.__class__.__name__} doesn't have an id")
def get_hash_id(self):
"""Returns the class's hash id if exists, defaults to get_id()"""
return self.get_id()
def to_data(self):
"""Converts class to a dictionary, usually overridden"""
return self.__dict__.copy()
@staticmethod
def to_string(data):
return base64.b64encode(pickle.dumps(data)).decode('utf-8')
@staticmethod
def from_string(data):
return pickle.loads(base64.b64decode(data.encode('utf-8')))
@classmethod
def from_data(cls, data):
"""Creates a class from data, usually overridden
Parameters
----------
data : dictionary form of data representing the object
Usually the result of a previous to_data() call.
"""
return cls(**data)
def test_get_hash_id(self):
"""Tests if the derived class correctly returns a tuple for an id"""
try:
classId = self.get_hash_id() # pylint: disable=assignment-from-no-return
except NoIdException:
return True
if not isinstance(classId, tuple):
raise ValueError(f'Return of get_id must be a tuple instead of {type(classId)}')
return True
def test_to_data(self):
"""Tests if the output of the derived class's to_data() function can be converted to a string by ``json.dumps()``"""
originalData = self.__dict__.copy()
data = self.to_data()
for k, v in self.__dict__.items():
if v != originalData[k]:
raise AssertionError(f'{self.__class__.__name__}.to_data() changes the instance itself: {k}: {v} != {originalData[k]}')
try:
_ = json.dumps(data)
except TypeError as ex:
raise TypeError(f'{self.__class__.__name__} JSON serde test failed: {ex}')
return True
def test_from_data(self):
"""Tests if a roundtrip of ``self.from_data(self.to_data())`` modifies the class"""
selfCopy = self.from_data(self.to_data())
if not isinstance(self, type(selfCopy)):
raise AssertionError(f'{self.__class__.__name__}.from_data() does not return the correct type: {self.__class__.__name__} vs {selfCopy.__class__.__name__}, from_data() return value should be "cls(**data)"')
for k, v in selfCopy.__dict__.items():
if v != self.__dict__[k]:
if str(k) == str(v):
raise AssertionError(f'{self.__class__.__name__}.from_data() returns keys as values: {k}: {v} != {self.__dict__[k]}, from_data() return value should be "cls(**data)"')
raise AssertionError(f'{self.__class__.__name__}.from_data() does not deserialise: {k}: {v} != {self.__dict__[k]}')
return True
| 37.029703
| 217
| 0.625936
| 3,662
| 0.979144
| 0
| 0
| 515
| 0.137701
| 0
| 0
| 1,739
| 0.464973
|
3d392bdfd33f424fff8045fe8d11d2926903d55e
| 829
|
py
|
Python
|
examples/spark-function.py
|
Hedingber/mlrun
|
e2269718fcc7caa7e1aa379ac28495830b45f9da
|
[
"Apache-2.0"
] | 1
|
2021-02-17T08:12:33.000Z
|
2021-02-17T08:12:33.000Z
|
examples/spark-function.py
|
Hedingber/mlrun
|
e2269718fcc7caa7e1aa379ac28495830b45f9da
|
[
"Apache-2.0"
] | 1
|
2020-12-31T14:36:29.000Z
|
2020-12-31T14:36:29.000Z
|
examples/spark-function.py
|
Hedingber/mlrun
|
e2269718fcc7caa7e1aa379ac28495830b45f9da
|
[
"Apache-2.0"
] | 1
|
2021-08-30T21:43:38.000Z
|
2021-08-30T21:43:38.000Z
|
# Pyspark example called by mlrun_spark_k8s.ipynb
from pyspark.sql import SparkSession
from mlrun import get_or_create_ctx
# Acquire MLRun context
mlctx = get_or_create_ctx("spark-function")
# Get MLRun parameters
mlctx.logger.info("!@!@!@!@!@ Getting env variables")
READ_OPTIONS = mlctx.get_param("data_sources")
QUERY = mlctx.get_param("query")
WRITE_OPTIONS = mlctx.get_param("write_options")
# Create spark session
spark = SparkSession.builder.appName("Spark function").getOrCreate()
# Loading data from a JDBC source
for data_source in READ_OPTIONS:
spark.read.load(**READ_OPTIONS[data_source]).createOrReplaceTempView(data_source)
# Transform the data using SQL query
spark.sql(QUERY).write.save(**WRITE_OPTIONS)
# write the result datadrame to destination
mlctx.logger.info("!@!@!@!@!@ Saved")
spark.stop()
| 26.741935
| 85
| 0.772014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 348
| 0.419783
|
3d39a4a34099b547fd394be7429e0efce238f402
| 3,654
|
py
|
Python
|
scripts/create_dataset.py
|
maxrousseau/dl-anesthesia
|
e5de2ecfc9d9e954f3ee36eedb13332589dfc27e
|
[
"MIT"
] | null | null | null |
scripts/create_dataset.py
|
maxrousseau/dl-anesthesia
|
e5de2ecfc9d9e954f3ee36eedb13332589dfc27e
|
[
"MIT"
] | null | null | null |
scripts/create_dataset.py
|
maxrousseau/dl-anesthesia
|
e5de2ecfc9d9e954f3ee36eedb13332589dfc27e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import os, glob
import datetime
import xmltodict as xd
import numpy as np
import pandas as pd
import h5py
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
# lets make a little data set for fun...
mh_dir = os.path.abspath('./db/mh_data/')
mh_cases = glob.glob(os.path.join(mh_dir, '*'))
# sample = os.path.abspath('./db/asac_data/case10.xml') >> TODO: we will need
# to make modifications for this dataset
def xml_parser(xml_path):
with open(xml_path) as fd:
doc = xd.parse(fd.read())
fd.close()
raw_db = doc['anaesthetic']['data']['var']
print("FILE READ")
for i in raw_db[3:]:
name = i['vaname']
times = str(i['vatimes']).replace('None','000000').split(',')
values = str(i['vavalues']).replace('NA','nan').split(',')
times = np.asarray(times)
values = np.asarray(values).astype('float')
var_df = pd.DataFrame(data = {'time' : times, name : values})
if 'full_df' in locals():
full_df = full_df.join(var_df.set_index('time'), on='time')
else:
full_df = var_df
print("XML PARSED")
return full_df
class Input: # an input struct
pass
db = [] # list of all input structs
def delta_spo2(spo2_arr):
# compute the difference between the maximum value
max_val = max(spo2_arr)
min_val = min(spo2_arr)
d = max_val - min_val
return d
# a sample will be 6 entries (=60 seconds) of every datapoint to determine if
# there will be a change in spo2 in the next 60 seconds
# spo2, hr,
def data_generator(patient_df):
# slice the df into array of 6 element dfs
interval_df = []
for i in range(patient_df.shape[0]):
if (i+1) % 6 == 0:
# split every 6 timestamp (60 seconds)
a = i - 5
interval_df.append(patient_df[a:i+1])
else:
continue
# compute spo2 delta
for i in range(len(interval_df)):
sample = Input()
sample.x = np.asarray(interval_df[i].unstack()) # vector of input data from
try:
sample.d = delta_spo2(interval_df[i+1]['spo2.SpO2'])
except:
print("end of dataset")
break
# label
if sample.d > 0.011:
sample.y = 1
else:
sample.y = 0
db.append(sample)
return db
# parse every xml file and save each to a separate h5 file for future use
# spo2.SpO2, co2.et, ecg.hr, nibp.sys, nibp.dia
def mk_npy():
for i in mh_cases:
print(i)
df = xml_parser(i)
# for all features simply use df
# spo2.SpO2, co2.et, ecg.hr, nibp.sys, nibp.dia
df2 = pd.DataFrame(df,
columns=['ecg.hr',
'co2.et', 'nibp.sys',
'nibp.dia', 'spo2.SpO2']
)
df2 = df2[np.abs(df2-df2.mean()) <= (3*df2.std())]
df2 = df2.dropna()
# scale the values between 1-0 the data by patient....
x = df2.values
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df2 = pd.DataFrame(x_scaled, columns=df2.columns)
data_generator(df2)
X = []
Y = []
for i in db:
X.append(i.x)
Y.append(i.y)
X = np.asarray(X).astype('float')
Y = np.asarray(Y).astype('int')
print("stable: " + str(np.sum(Y == 0)))
print("unstable: " + str(np.sum(Y == 1)))
np.save("x3.npy", X)
np.save("y3.npy", Y)
mk_npy()
# boom load it...
#X = np.load("x.npy", X) # (3740, 306)
#Y = np.load("y.npy", Y) # (3740,)
| 24.36
| 84
| 0.570881
| 39
| 0.010673
| 0
| 0
| 0
| 0
| 0
| 0
| 1,170
| 0.320197
|
3d39d78b8b90f5a0e60b1cd9c3435a778082fd09
| 636
|
py
|
Python
|
ossdbtoolsservice/admin/contracts/__init__.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 33
|
2019-05-27T13:04:35.000Z
|
2022-03-17T13:33:05.000Z
|
ossdbtoolsservice/admin/contracts/__init__.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 31
|
2019-06-10T01:55:47.000Z
|
2022-03-09T07:27:49.000Z
|
ossdbtoolsservice/admin/contracts/__init__.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 25
|
2019-05-13T18:39:24.000Z
|
2021-11-16T03:07:33.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from ossdbtoolsservice.admin.contracts.get_database_info_request import (
DatabaseInfo, GetDatabaseInfoParameters, GetDatabaseInfoResponse, GET_DATABASE_INFO_REQUEST)
__all__ = [
'DatabaseInfo', 'GetDatabaseInfoParameters', 'GetDatabaseInfoResponse', 'GET_DATABASE_INFO_REQUEST'
]
| 53
| 103
| 0.575472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 434
| 0.68239
|
3d3a5919d0773f6fa55679eeb76000e332ce88f7
| 38,534
|
py
|
Python
|
whacc/image_tools.py
|
hireslab/whacc
|
e0ccfe4ee784609cacd4cf62a17192687a5dff51
|
[
"MIT"
] | 1
|
2021-05-27T00:34:46.000Z
|
2021-05-27T00:34:46.000Z
|
whacc/image_tools.py
|
hireslab/whacc
|
e0ccfe4ee784609cacd4cf62a17192687a5dff51
|
[
"MIT"
] | null | null | null |
whacc/image_tools.py
|
hireslab/whacc
|
e0ccfe4ee784609cacd4cf62a17192687a5dff51
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import h5py
import copy
import time
import os
from whacc import utils
def isnotebook():
try:
c = str(get_ipython().__class__)
shell = get_ipython().__class__.__name__
if 'colab' in c:
return True
elif shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
if isnotebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
def stack_imgs_lag(imgs, frames_1=None, buffer=2, shift_to_the_right_by=0):
if frames_1 is None:
frames_1 = [imgs.shape[0]]
array_group = []
for k1, k2 in utils.loop_segments(frames_1):
x = (np.random.random(imgs[0].shape) * 255).astype(np.uint8)
tile_axes = [1] * len(x.shape) + [buffer]
x = np.tile(x[:, :, None], tile_axes)
tmp1 = x.copy()
for ii, stack_i in enumerate(range(k1, k2)):
x = np.concatenate((x, imgs[stack_i][:, :, None]), axis=2)
x = np.concatenate((x, tmp1), axis=2)
for k3 in range(k2 - k1):
array_group.append(x[:, :, k3 + shift_to_the_right_by: k3 + 1 + buffer + shift_to_the_right_by])
return np.asarray(array_group)
def get_h5_key_and_concatenate(h5_list, key_name='labels'):
"""
simply extract and concatenate all of one key "key_name" from many H5 files, I use it to get balance the data touch
and not touch frames when training a model with a list of different H5 files
Parameters
----------
h5_list : list
list of full paths to H5 file(s).
key_name : str
default 'labels', the key to get the data from the H5 file
"""
h5_list = utils.make_list(h5_list, suppress_warning=True)
for i, k in enumerate(h5_list):
with h5py.File(k, 'r') as h:
if i == 0:
out = np.asarray(h[key_name][:])
else:
out = np.concatenate((out, h[key_name][:]))
return out
def get_h5_key_and_dont_concatenate(h5_list, key_name='labels'):
"""
simply extract and concatenate all of one key "key_name" from many H5 files, I use it to get balance the data touch
and not touch frames when training a model with a list of different H5 files
Parameters
----------
h5_list : list
list of full paths to H5 file(s).
key_name : str
default 'labels', the key to get the data from the H5 file
"""
out = []
for i, k in enumerate(h5_list):
with h5py.File(k, 'r') as h:
out.append(list(h[key_name][:]))
return out
def clone_h5_basic_info(H5_list, fold_name=None, file_end='_QUICK_SAVE.h5'):
"""
copies all the info form H5 into another H5 file NOT INCLUDING the labels or images. so it have all the file info,
like names and pole locations and polate match max value stack. anything with 'images' , 'MODEL__' or 'labels' is
not copied over to the new file.
Parameters
----------
H5_list : list
list of H5 files to clone
fold_name : str
default None, where to place the cloned H5 files. if left blank it will place in the same folder as the original file
file_end : str
default '_QUICK_SAVE.h5', how to change the name of the H5 file to be cloned to differentiate it from the original
Returns
-------
all_new_h5s: list
list of new H5 full file names
"""
if fold_name is not None:
try:
os.mkdir(fold_name)
except:
pass
all_new_h5s = []
for h5 in H5_list:
if fold_name is not None:
new_fn = fold_name + os.path.sep + os.path.basename(h5)[:-3] + file_end
else: #
new_fn = os.path.dirname(h5) + os.path.sep + os.path.basename(h5)[:-3] + file_end
all_new_h5s.append(new_fn)
try:
os.remove(new_fn)
except:
pass
with h5py.File(new_fn, 'w') as f1:
with h5py.File(h5, 'r') as f2:
for i, k in enumerate(f2.keys()):
if 'images' != k and 'MODEL__' not in k and 'labels' not in k:
f1.create_dataset(k, data=f2[k][:])
f2.close()
f1.close()
return all_new_h5s
def del_h5_with_term(h5_list, str_2_cmp):
"""
Parameters
----------
h5_list : list
list of H5 strings (full path)
str_2_cmp : str
will delete keys with this in their title ... e.g. '__RETRAIN'
"""
for k2 in h5_list:
with h5py.File(k2, 'a') as h5_source:
for k in h5_source.keys():
if str_2_cmp in k:
print('del--> ' + k)
del h5_source[k]
print('_______')
def split_h5_loop_segments(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000,
add_numbers_to_name=True,
disable_TQDM=False, set_seed=None, color_channel=True):
"""Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.
Parameters
----------
h5_to_split_list : list
list of strings with full file names to the H5 file(s) to be split
split_percentages : list
list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage
temp_base_name : str or list
full path to new h5 file e.g "'/Users/phil/tempH5_" and the program will add the number and the ".h5"
in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to
'split_percentages' and each file will be named based on that list
chunk_size = int
default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never
be an issue but just in case you can set to a lower value if you experience memory issues.
add_numbers_to_name = bool
default true, just in case you don't want the numbers on the end of your h5 file.
Returns
Examples
--------
from whacc import image_tools, utils
h5_to_split_list = "/Users/phil/Downloads/untitled folder 2/AH0000x000000_small_tester.h5"
h5_to_split_list = [h5_to_split_list]
utils.print_h5_keys(h5_to_split_list[0])
bd = '/Users/phil/Downloads/untitled folder 2/'
image_tools.split_h5_loop_segments(h5_to_split_list, [1, 3], [bd+'TRASH', bd+'TRASH2'], chunk_size=10000, add_numbers_to_name=False,
disable_TQDM=False, set_seed = None)
-------
"""
if isinstance(temp_base_name, str):
temp_base_name = [temp_base_name] * len(split_percentages)
else:
assert len(temp_base_name) == len(
split_percentages), """if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'"""
for i, k in enumerate(temp_base_name):
if k[-3:] == '.h5':
temp_base_name[i] = temp_base_name[i][:-3]
frame_num_array_list = get_h5_key_and_dont_concatenate(h5_to_split_list, 'frame_nums')
total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))
cnt1 = 0
h5_creators = dict()
split_percentages = split_percentages / np.sum(split_percentages)
# assert(sum(split_percentages)==1)
final_names = []
for iii, h5_to_split in enumerate(h5_to_split_list):
with h5py.File(h5_to_split, 'r') as h:
tmp_frame_list = frame_num_array_list[iii]
L = len(tmp_frame_list)
if set_seed is not None:
np.random.seed(set_seed)
mixed_inds = np.random.choice(L, L, replace=False)
random_segment_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))
random_segment_inds = [sorted(tmpk) for tmpk in random_segment_inds]
random_frame_inds = [[None]] * len(random_segment_inds)
list_of_new_frame_nums = [[None]] * len(random_segment_inds)
loop_seg_list = list(utils.loop_segments(tmp_frame_list))
for pi, p in enumerate(random_segment_inds):
tmp1 = []
tmp2 = []
for pp in p:
x = list(loop_seg_list[pp])
tmp1 += list(range(x[0], x[1]))
tmp2.append(tmp_frame_list[pp])
random_frame_inds[pi] = tmp1
list_of_new_frame_nums[pi] = tmp2
for i, k in enumerate(split_percentages): # for each new h5 created
if iii == 0: # create the H5 creators
if add_numbers_to_name:
final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')
else:
final_names.append(temp_base_name[i] + '.h5')
h5_creators[i] = h5_iterative_creator(final_names[-1],
overwrite_if_file_exists=True,
close_and_open_on_each_iteration=True,
color_channel=color_channel)
ims = []
labels = []
for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):
cnt1 += 1
ims.append(h['images'][ii])
labels.append(h['labels'][ii])
if ii > 0 and ii % chunk_size == 0:
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
ims = []
labels = []
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
with h5py.File(h5_creators[i].h5_full_file_name,
'r+') as h2: # wanted to do this to allow NONE as input and still have frame nums, but I need to have an append after creating and its a pain
frame_nums = np.asarray(list_of_new_frame_nums[i])
if 'frame_nums' not in h2.keys():
h2.create_dataset('frame_nums', shape=np.shape(frame_nums), maxshape=(None,), chunks=True,
data=frame_nums)
else:
h2['frame_nums'].resize(h2['frame_nums'].shape[0] + frame_nums.shape[0], axis=0)
h2['frame_nums'][-frame_nums.shape[0]:] = frame_nums
# # add the frame info to each
# for i, frame_nums in enumerate(list_of_new_frame_nums):
# with h5py.File(h5_creators[i].h5_full_file_name, 'r+') as h:
# h.create_dataset('frame_nums', shape=np.shape(frame_nums), data=frame_nums)
return final_names
def make_sure_frame_nums_exist(h5file):
with h5py.File(h5file, 'r+') as h:
key_list = list(h.keys())
if 'frame_nums' in key_list:
print("""'frame_nums' already in the key list""")
return None
if 'trial_nums_and_frame_nums' not in key_list:
print(
"""key 'trial_nums_and_frame_nums' must be in the provided h5 this is the only reason program exists""")
return None
frame_nums = h['trial_nums_and_frame_nums'][1, :]
h.create_dataset('frame_nums', shape=np.shape(frame_nums), data=frame_nums)
def split_h5(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000, add_numbers_to_name=True,
disable_TQDM=False, skip_if_label_is_neg_1=False, set_seed=None, color_channel=True):
"""Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.
Parameters
----------
h5_to_split_list : list
list of strings with full file names to the H5 file(s) to be split
split_percentages : list
list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage
temp_base_name : str or list
full path to new h5 file e.g "'/Users/phil/tempH5_" and the program will add the number and the ".h5"
in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to
'split_percentages' and each file will be named based on that list
chunk_size = int
default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never
be an issue but just in case you can set to a lower value if you experience memory issues.
add_numbers_to_name = bool
default true, just in case you don't want the numbers on the end of your h5 file.
Returns
-------
"""
if isinstance(temp_base_name, str):
temp_base_name = [temp_base_name] * len(split_percentages)
else:
assert len(temp_base_name) == len(
split_percentages), """if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'"""
total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))
cnt1 = 0
h5_creators = dict()
split_percentages = split_percentages / np.sum(split_percentages)
# assert(sum(split_percentages)==1)
final_names = []
for iii, h5_to_split in enumerate(h5_to_split_list):
with h5py.File(h5_to_split, 'r') as h:
L = len(h['labels'][:])
if set_seed is not None:
np.random.seed(set_seed)
mixed_inds = np.random.choice(L, L, replace=False)
if skip_if_label_is_neg_1: # remove -1s
mixed_inds = mixed_inds[mixed_inds != -1]
random_frame_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))
for i, k in enumerate(split_percentages):
if iii == 0: # create the H5 creators
if add_numbers_to_name:
final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')
else:
final_names.append(temp_base_name[i] + '.h5')
h5_creators[i] = h5_iterative_creator(final_names[-1],
overwrite_if_file_exists=True,
close_and_open_on_each_iteration=True,
color_channel=color_channel)
ims = []
labels = []
# print('starting ' + str(iii*i + 1) + ' of ' + str(len(split_percentages)*len(h5_to_split_list)))
for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):
cnt1 += 1
ims.append(h['images'][ii])
labels.append(h['labels'][ii])
if ii > 0 and ii % chunk_size == 0:
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
ims = []
labels = []
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
return final_names
class h5_iterative_creator():
"""Create an H5 file using a for loop easily. used to create the augmented H5 file for training
Attributes:
Parameters
----------
h5_new_full_file_name : string
full path name to your H5 file to be created
overwrite_if_file_exists : bool
overwrites the h5 file if it already exists
max_img_height : int
default 61, only the max size, can be larger in case you are going to have larger images
max_img_width : int
default 61, only the max size, can be larger in case you are going to have larger images
close_and_open_on_each_iteration : bool
default True, this prevents the user from forgetting to close H5 which
can lead to corruption.
Example
_______
h5creator = h5_iterative_creator(new_H5_file)
h5creator.add_to_h5(img_stack1, labels_stack1)
h5creator.add_to_h5(img_stack2, labels_stack2)
h5creator.add_to_h5(img_stack3, labels_stack3)
"""
def __init__(self, h5_new_full_file_name,
overwrite_if_file_exists=False,
max_img_height=61,
max_img_width=61,
close_and_open_on_each_iteration=True,
color_channel=True,
add_to_existing_H5=False):
if not close_and_open_on_each_iteration:
print('**remember to CLOSE the H5 file when you are done!!!**')
if overwrite_if_file_exists and os.path.isfile(h5_new_full_file_name):
os.remove(h5_new_full_file_name)
self.h5_full_file_name = h5_new_full_file_name
if add_to_existing_H5:
self.hf_file = h5py.File(h5_new_full_file_name, "r+")
else:
self.hf_file = h5py.File(h5_new_full_file_name, "w")
self.color_channel = color_channel
self.max_img_height = max_img_height
self.max_img_width = max_img_width
self._went_through_create_h5 = False
self.close_it = close_and_open_on_each_iteration
if self.close_it:
self.hf_file.close()
def add_to_h5(self, images, labels):
"""
Parameters
----------
images : numpy tensor
chunk of images
labels : numpy array
array oof labels
"""
if self.close_it:
self.open_or_close_h5('r+')
if self._went_through_create_h5: # already initialized with the correct size
self._add_next_chunk_to_h5(images, labels)
else:
self._create_h5(images, labels)
if self.close_it:
self.open_or_close_h5('close')
def _create_h5(self, images, labels):
"""
Parameters
----------
images :
labels :
"""
# if set_multiplier:
self.hf_file.create_dataset("multiplier", [1], h5py.h5t.STD_I32LE, data=images.shape[0])
if self.color_channel:
self.hf_file.create_dataset('images',
np.shape(images),
h5py.h5t.STD_U8BE,
maxshape=(None, self.max_img_height, self.max_img_width, 3),
chunks=True,
data=images)
else:
self.hf_file.create_dataset('images',
np.shape(images),
h5py.h5t.STD_U8BE,
maxshape=(None, self.max_img_height, self.max_img_width),
chunks=True,
data=images)
self.hf_file.create_dataset('labels',
np.shape(labels),
h5py.h5t.STD_I32LE,
maxshape=(None,),
chunks=True,
data=labels)
self._went_through_create_h5 = True
def _add_next_chunk_to_h5(self, images, labels):
"""
Parameters
----------
images :
labels :
Returns
-------
"""
self.hf_file['images'].resize(self.hf_file['images'].shape[0] + images.shape[0], axis=0)
self.hf_file['labels'].resize(self.hf_file['labels'].shape[0] + labels.shape[0], axis=0)
self.hf_file['images'][-images.shape[0]:] = images
self.hf_file['labels'][-labels.shape[0]:] = labels
def read_h5(self):
""" """
self.open_or_close_h5('r')
print('''**remember to CLOSE the H5 file when you are done!!!** with ".close_h5()" method''')
def close_h5(self):
""" """
self.open_or_close_h5('close')
print('H5 file was closed')
def open_or_close_h5(self, mode_='r'):
"""
Parameters
----------
mode_ : str
mode can be H5py modes 'r', 'r+' 'w' (w overwrites file!) etc OR 'close' to
# ensure it is closed. separate function to prevent a bunch of try statements (Default value = 'r')
Returns
-------
"""
try:
self.hf_file.close()
finally:
if mode_.lower() != 'close':
self.hf_file = h5py.File(self.h5_full_file_name, mode_)
#
def augment_helper(keras_datagen, num_aug_ims, num_reg_ims, in_img, in_label):
"""
Parameters
----------
keras_datagen : keras_datagen: keras_datagen: keras.preprocessing.image.ImageDataGenerator
from keras.preprocessing.image import ImageDataGenerator-- keras_datagen = ImageDataGenerator(...)
num_aug_ims : int
number of augmented images to generate from single input image
num_reg_ims : int
number of copies of in_img to produce. will be stacked at the beginning of all_augment variable.
Use dot see augmentation when testing and can be useful if splitting into many H5s if you want an original in each.
in_img : numpy array
numpy array either 3D with color channel for the last dim ot 2D
in_label : int
the label associate with in_img. simply repeats it creating 'out_labels' the be size of 'all_augment'
Returns
-------
"""
if len(in_img.shape) == 2: # or not np.any(np.asarray(in_img.shape)==3)
in_img = np.repeat(in_img[..., np.newaxis], 3, -1) # for 2D arrays without color channels
set_zoom = keras_datagen.zoom_range
in_img = np.expand_dims(in_img, 0)
it = keras_datagen.flow(in_img, batch_size=1)
all_augment = np.tile(in_img, [num_reg_ims, 1, 1, 1])
for i in range(num_aug_ims): ##
if set_zoom != [0, 0]: # if zoom is being used...
# keras 'zoom' is annoying. it zooms x and y differently randomly
# in order to get an equal zoom I use the following workaround.
z_val = np.random.uniform(low=set_zoom[0], high=set_zoom[1])
keras_datagen.zoom_range = [z_val, z_val]
it = keras_datagen.flow(in_img, batch_size=1)
batch = it.next()
image = batch[0].astype('uint8')
all_augment = np.append(all_augment, np.expand_dims(image, 0), 0)
out_labels = np.repeat(in_label, sum([num_aug_ims, num_reg_ims]))
keras_datagen.zoom_range = set_zoom
return all_augment, out_labels
def img_unstacker(img_array, num_frames_wide=8, color_channel=True):
"""unstacks image stack and combines them into one large image for easy display. reads left to right and then top to bottom.
Parameters
----------
img_array : numpy array
stacked image array
num_frames_wide : int
width of destacked image. if = 8 with input 20 images it will be 8 wide 3 long and 4 blank images (Default value = 8)
Returns
-------
"""
im_stack = None
for i, k in enumerate(img_array):
if i % num_frames_wide == 0:
if i != 0: # stack it
if im_stack is None:
im_stack = im_stack_tmp
else:
im_stack = np.vstack((im_stack, im_stack_tmp))
im_stack_tmp = k # must be at the end
else:
im_stack_tmp = np.hstack((im_stack_tmp, k))
x = num_frames_wide - len(img_array) % num_frames_wide
if x != 0:
if x != num_frames_wide:
for i in range(x):
im_stack_tmp = np.hstack((im_stack_tmp, np.ones_like(k)))
if im_stack is None:
return im_stack_tmp
else:
im_stack = np.vstack((im_stack, im_stack_tmp))
return im_stack
def original_image(x):
"""This is used to transform batch generated images [-1 1] to the original image [0,255] for plotting
Parameters
----------
x :
Returns
-------
"""
image = tf.cast((x + 1) * 127.5, tf.uint8)
return image
def predict_multiple_H5_files(H5_file_list, model_2_load, append_model_and_labels_to_name_string=False,
batch_size=1000, model_2_load_is_model=False, save_on=False,
label_save_name=None, disable_TQDM=False,
save_labels_to_this_h5_file_instead=None) -> object:
"""
Parameters
----------
H5_file_list : list: list
list of string(s) of H5 file full paths
model_2_load : param append_model_and_labels_to_name_string: if True label_save_name = 'MODEL__' + label_save_name + '__labels',
it is a simple way to keep track of labels form many models in a single H5 file. also make sit easier to find :
those labels for later processing. :
either full path to model folder ending with ".ckpt" OR the loaded model itself. if the later,
the user MUST set "model_2_load_is_model" is True and "label_save_name" must be explicitly defined (when using model
path we use the model name to name the labels).
append_model_and_labels_to_name_string : bool
if True label_save_name = 'MODEL__' + label_save_name + '__labels',it is a simple way to keep track of labels
form many models in a single H5 file. also make sit easier to find those labels for later processing. (Default value = False)
batch_size : int
number of images to process per batch, -- slower prediction speeds << ideal predictionsspeed <<
memory issues and crashes -- 1000 is normally pretty good on Google CoLab (Default value = 1000)
model_2_load_is_model : bool
lets the program know if you are directly inserting a model (instead of a path to model folder) (Default value = False)
save_on : bool
saves to H5 file. either the original H5 (image source) or new H5 if a path to "save_labels_to_this_h5_file_instead"
is given (Default value = False)
label_save_name : string
h5 file key used to save the labels to, default is 'MODEL__' + **model_name** + '__labels'
disable_TQDM : bool
if True, turns off loading progress bar. (Default value = False)
save_labels_to_this_h5_file_instead : string
full path to H5 file to insert labels into instead of the H5 used as the image source (Default value = None)
Returns
-------
"""
for i, H5_file in enumerate(H5_file_list):
# save_what_is_left_of_your_h5_file(H5_file, do_del_and_rename = 1) # only matters if file is corrupt otherwise doesnt touch it
gen = ImageBatchGenerator(batch_size, [H5_file])
if model_2_load_is_model:
if label_save_name is None and save_on == True:
assert 1 == 0, 'label_save_name must be assigned if you are loading a model in directly and saveon == True.'
model = model_2_load
else:
if label_save_name is None:
label_save_name = model_2_load.split(os.path.sep)[-1].split('.')[0]
label_save_name = 'MODEL__' + label_save_name + '__labels'
append_model_and_labels_to_name_string = False # turn off because defaults to this naming scheme if user doesnt put in name
model = tf.keras.models.load_model(model_2_load)
if append_model_and_labels_to_name_string:
label_save_name = 'MODEL__' + label_save_name + '__labels'
start = time.time()
labels_2_save = np.asarray([])
for k in tqdm(range(gen.__len__()), disable=disable_TQDM):
TMP_X, tmp_y = gen.getXandY(k)
outY = model.predict(TMP_X)
labels_2_save = np.append(labels_2_save, outY)
total_seconds = time.time() - start
time_per_mil = np.round(1000000 * total_seconds / len(labels_2_save))
print(str(time_per_mil) + ' seconds per 1 million images predicted')
if save_on:
if save_labels_to_this_h5_file_instead is not None: # add to differnt H5 file
H5_file = save_labels_to_this_h5_file_instead # otherwise it will add to the current H5 file
# based on the loop through "H5_file_list" above
try:
hf.close()
except:
pass
with h5py.File(H5_file, 'r+') as hf:
try:
del hf[label_save_name]
time.sleep(10) # give time to process the deleted file... maybe???
hf.create_dataset(label_save_name, data=np.float64(labels_2_save))
except:
hf.create_dataset(label_save_name, data=np.float64(labels_2_save))
hf.close()
return labels_2_save
def get_total_frame_count(h5_file_list):
"""
Parameters
----------
h5_file_list :
Returns
-------
"""
total_frame_count = []
for H5_file in h5_file_list:
H5 = h5py.File(H5_file, 'r')
images = H5['images']
total_frame_count.append(images.shape[0])
return total_frame_count
def batch_size_file_ind_selector(num_in_each, batch_size):
"""batch_size_file_ind_selector - needed for ImageBatchGenerator to know which H5 file index
to use depending on the iteration number used in __getitem__ in the generator.
this all depends on the variable batch size.
Example: the output of the following...
batch_size_file_ind_selector([4000, 4001, 3999], [2000])
would be [0, 0, 1, 1, 1, 2, 2] which means that there are 2 chunks in the first
H5 file, 3 in the second and 2 in the third based on chunk size of 2000
Parameters
----------
num_in_each :
param batch_size:
batch_size :
Returns
-------
"""
break_into = np.ceil(np.array(num_in_each) / batch_size)
extract_inds = np.array([])
for k, elem in enumerate(break_into):
tmp1 = np.array(np.ones(np.int(elem)) * k)
extract_inds = np.concatenate((extract_inds, tmp1), axis=0)
return extract_inds
# file_inds_for_H5_extraction is the same as extract_inds output from the above function
def reset_to_first_frame_for_each_file_ind(file_inds_for_H5_extraction):
"""reset_to_first_frame_for_each_file_ind - uses the output of batch_size_file_ind_selector
to determine when to reset the index for each individual H5 file. using the above example
the out put would be [0, 0, 2, 2, 2, 5, 5], each would be subtracted from the indexing to
set the position of the index to 0 for each new H5 file.
Parameters
----------
file_inds_for_H5_extraction :
Returns
-------
"""
subtract_for_index = []
for k, elem in enumerate(file_inds_for_H5_extraction):
tmp1 = np.diff(file_inds_for_H5_extraction)
tmp1 = np.where(tmp1 != 0)
tmp1 = np.append(-1, tmp1[0]) + 1
subtract_for_index.append(tmp1[np.int(file_inds_for_H5_extraction[k])])
return subtract_for_index
class ImageBatchGenerator(keras.utils.Sequence):
""" """
def __init__(self, batch_size, h5_file_list, label_key = 'labels'):
h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)
num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)
file_inds_for_H5_extraction = batch_size_file_ind_selector(
num_frames_in_all_H5_files, batch_size)
subtract_for_index = reset_to_first_frame_for_each_file_ind(
file_inds_for_H5_extraction)
# self.to_fit = to_fit #set to True to return XY and False to return X
self.label_key = label_key
self.batch_size = batch_size
self.H5_file_list = h5_file_list
self.num_frames_in_all_H5_files = num_frames_in_all_H5_files
self.file_inds_for_H5_extraction = file_inds_for_H5_extraction
self.subtract_for_index = subtract_for_index
self.IMG_SIZE = 96
def __len__(self):
return len(self.file_inds_for_H5_extraction)
def __getitem__(self, num_2_extract):
b = self.batch_size
h = self.H5_file_list
i = self.file_inds_for_H5_extraction
H5_file = h[np.int(i[num_2_extract])]
with h5py.File(H5_file, 'r') as H5:
# H5 = h5py.File(H5_file, 'r')
images = H5['images']
num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]
raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
rgb_tensor = self.image_transform(raw_X)
labels_tmp = H5[self.label_key]
raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
H5.close()
return rgb_tensor, raw_Y
# def __getitem__(self, num_2_extract):
# b = self.batch_size
# h = self.H5_file_list
# i = self.file_inds_for_H5_extraction
# H5_file = h[np.int(i[num_2_extract])]
# H5 = h5py.File(H5_file, 'r')
# # list(H5.keys())
#
# images = H5['images']
# num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]
# raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
# rgb_tensor = self.image_transform(raw_X)
#
# # if self.to_fit:
# # labels_tmp = H5[self.label_key]
# # raw_Y = labels_tmp[b*num_2_extract_mod:b*(num_2_extract_mod+1)]
# # return rgb_tensor, raw_Y
# # else:
# return rgb_tensor
def getXandY(self, num_2_extract):
"""
Parameters
----------
num_2_extract :
Returns
-------
"""
b = self.batch_size
h = self.H5_file_list
i = self.file_inds_for_H5_extraction
H5_file = h[np.int(i[num_2_extract])]
H5 = h5py.File(H5_file, 'r')
# list(H5.keys())
images = H5['images']
num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]
raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
rgb_tensor = self.image_transform(raw_X)
labels_tmp = H5[self.label_key]
raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
return rgb_tensor, raw_Y
def image_transform(self, raw_X):
"""input num_of_images x H x W, image input must be grayscale
MobileNetV2 requires certain image dimensions
We use N x 61 x 61 formated images
self.IMG_SIZE is a single number to change the images into, images must be square
Parameters
----------
raw_X :
Returns
-------
"""
# rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
# rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
# rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
# rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing
# self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)
# return rgb_tensor
if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:
rgb_batch = copy.deepcopy(raw_X)
else:
rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing
self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)
return rgb_tensor
def plot_batch_distribution(self):
""" """
# randomly select a batch and generate images and labels
batch_num = np.random.choice(np.arange(0, self.__len__()))
samp_x, samp_y = self.getXandY(batch_num)
# look at the distribution of classes
plt.pie([1 - np.mean(samp_y), np.mean(samp_y)],
labels=['non-touch frames', 'touch frames'], autopct='%1.1f%%', )
plt.title('class distribution from batch ' + str(batch_num))
plt.show()
# generate indices for positive and negative classes
images_to_sample = 20
neg_class = [i for i, val in enumerate(samp_y) if val == 0]
pos_class = [i for i, val in enumerate(samp_y) if val == 1]
neg_index = np.random.choice(neg_class, images_to_sample)
pos_index = np.random.choice(pos_class, images_to_sample)
# plot sample positive and negative class images
plt.figure(figsize=(10, 10))
samp_x = (samp_x + 1) / 2
for i in range(images_to_sample):
plt.subplot(5, 10, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
_ = plt.imshow(samp_x[neg_index[i]])
plt.xlabel('0')
plt.subplot(5, 10, images_to_sample + i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(samp_x[pos_index[i]])
plt.xlabel('1')
plt.suptitle('sample images from batch ' + str(batch_num))
plt.show()
def image_transform_(IMG_SIZE, raw_X):
"""
input num_of_images x H x W, image input must be grayscale
MobileNetV2 requires certain image dimensions
We use N x 61 x 61 formated images
self.IMG_SIZE is a single number to change the images into, images must be square
Parameters
----------
raw_X :
Returns
-------
"""
if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:
rgb_batch = copy.deepcopy(raw_X)
else:
rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
rgb_tensor = tf.image.resize(rgb_tensor, (IMG_SIZE, IMG_SIZE)) # resizing
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
return rgb_tensor
| 40.223382
| 173
| 0.602896
| 11,626
| 0.301708
| 0
| 0
| 0
| 0
| 0
| 0
| 16,060
| 0.416775
|
3d3c48e30dea59b0f2566984a39668435562eafb
| 10,007
|
py
|
Python
|
tests/texts/declerations.py
|
Intsights/flake8-intsights
|
b3785a3be855e05090641696e0648486107dba72
|
[
"MIT"
] | 12
|
2020-02-18T17:47:57.000Z
|
2021-07-13T10:23:40.000Z
|
tests/texts/declerations.py
|
Intsights/flake8-intsights
|
b3785a3be855e05090641696e0648486107dba72
|
[
"MIT"
] | 7
|
2020-02-25T12:14:11.000Z
|
2020-12-01T08:14:58.000Z
|
tests/texts/declerations.py
|
Intsights/flake8-intsights
|
b3785a3be855e05090641696e0648486107dba72
|
[
"MIT"
] | 1
|
2020-07-01T15:49:28.000Z
|
2020-07-01T15:49:28.000Z
|
declerations_test_text_001 = '''
list1 = [
1,
]
'''
declerations_test_text_002 = '''
list1 = [
1,
2,
]
'''
declerations_test_text_003 = '''
tuple1 = (
1,
)
'''
declerations_test_text_004 = '''
tuple1 = (
1,
2,
)
'''
declerations_test_text_005 = '''
set1 = {
1,
}
'''
declerations_test_text_006 = '''
set1 = {
1,
2,
}
'''
declerations_test_text_007 = '''
dict1 = {
'key': 1,
}
'''
declerations_test_text_008 = '''
dict1 = {
'key1': 1,
'key2': 2,
}
'''
declerations_test_text_009 = '''
return [
1,
]
'''
declerations_test_text_010 = '''
return [
1,
2,
]
'''
declerations_test_text_011 = '''
return (
1,
)
'''
declerations_test_text_012 = '''
return (
1,
2,
)
'''
declerations_test_text_013 = '''
return {
1,
}
'''
declerations_test_text_014 = '''
return {
1,
2,
}
'''
declerations_test_text_015 = '''
return {
'key': 1,
}
'''
declerations_test_text_016 = '''
return {
'key1': 1,
'key2': 2,
}
'''
declerations_test_text_017 = '''
yield [
1,
]
'''
declerations_test_text_018 = '''
yield [
1,
2,
]
'''
declerations_test_text_019 = '''
yield (
1,
)
'''
declerations_test_text_020 = '''
yield (
1,
2,
)
'''
declerations_test_text_021 = '''
yield {
1,
}
'''
declerations_test_text_022 = '''
yield {
1,
2,
}
'''
declerations_test_text_023 = '''
yield {
'key': 1,
}
'''
declerations_test_text_024 = '''
yield {
'key1': 1,
'key2': 2,
}
'''
declerations_test_text_025 = '''
list1 = [
[
1,
],
]
'''
declerations_test_text_026 = '''
list1 = [
[
1,
2,
],
]
'''
declerations_test_text_027 = '''
tuple1 = (
(
1,
),
)
'''
declerations_test_text_028 = '''
tuple1 = (
(
1,
2,
),
)
'''
declerations_test_text_029 = '''
set1 = {
{
1,
},
}
'''
declerations_test_text_030 = '''
set1 = {
{
1,
2,
},
}
'''
declerations_test_text_031 = '''
dict1 = {
'key': {
'key': 1,
},
}
'''
declerations_test_text_032 = '''
dict1 = {
'key1': {
'key1': 1,
'key2': 2,
},
'key2': {
'key1': 1,
'key2': 2,
},
}
'''
declerations_test_text_033 = '''
return [
[
1,
],
]
'''
declerations_test_text_034 = '''
return [
[
1,
2,
],
]
'''
declerations_test_text_035 = '''
return (
(
1,
),
)
'''
declerations_test_text_036 = '''
return (
(
1,
2,
),
)
'''
declerations_test_text_037 = '''
return {
{
1,
},
}
'''
declerations_test_text_038 = '''
return {
{
1,
2,
},
}
'''
declerations_test_text_039 = '''
return {
'key': {
'key': 1,
},
}
'''
declerations_test_text_040 = '''
return {
'key1': {
'key1': 1,
'key2': 2,
},
'key2': {
'key1': 1,
'key2': 2,
},
}
'''
declerations_test_text_041 = '''
yield [
[
1,
],
]
'''
declerations_test_text_042 = '''
yield [
[
1,
2,
],
]
'''
declerations_test_text_043 = '''
yield (
(
1,
),
)
'''
declerations_test_text_044 = '''
yield (
(
1,
2,
),
)
'''
declerations_test_text_045 = '''
yield {
{
1,
},
}
'''
declerations_test_text_046 = '''
yield {
{
1,
2,
},
}
'''
declerations_test_text_047 = '''
yield {
'key': {
'key': 1,
},
}
'''
declerations_test_text_048 = '''
yield {
'key1': {
'key1': 1,
'key2': 2,
},
'key2': {
'key1': 1,
'key2': 2,
},
}
'''
declerations_test_text_049 = '''
list1 = [
[
2,
],
]
'''
declerations_test_text_050 = '''
list_1 = [
[
[
2,
],
],
]
'''
declerations_test_text_051 = '''
list_1 = [
(
2,
),
]
'''
declerations_test_text_052 = '''
list_1 = [
{
'key1': 'value1',
},
]
'''
declerations_test_text_053 = '''
list_1 = [
call(
param1,
),
]
'''
declerations_test_text_054 = '''
entry_1, entry_2 = call()
'''
declerations_test_text_055 = '''
(
entry_1,
entry_2,
) = call()
'''
declerations_test_text_056 = '''
[
1
for a, b in call()
]
'''
declerations_test_text_057 = '''
{
'key': [
'entry_1',
'entry_2',
]
}
'''
declerations_test_text_058 = '''
list_1 = [instance.attribute]
'''
declerations_test_text_059 = '''
list_1 = [1]
'''
declerations_test_text_060 = '''
list_1 = [test]
'''
declerations_test_text_061 = '''
dict_1 = {}
'''
declerations_test_text_062 = '''
list_1 = [term[1]]
'''
declerations_test_text_063 = '''
test = {
'list_of_lists': [
[],
],
}
'''
declerations_test_text_064 = '''
class ClassName:
pass
'''
declerations_test_text_065 = '''
class ClassName(
Class1,
Class2,
):
pass
'''
declerations_test_text_066 = '''
class ClassName():
pass
'''
declerations_test_text_067 = '''
class ClassName(Class1, Class2):
pass
'''
declerations_test_text_068 = '''
class ClassName(
Class1,
Class2
):
pass
'''
declerations_test_text_069 = '''
def function_name():
pass
'''
declerations_test_text_070 = '''
def function_name( ):
pass
'''
declerations_test_text_071 = '''
def function_name(
):
pass
'''
declerations_test_text_072 = '''
def function_name(
):
pass
'''
declerations_test_text_073 = '''
def function_name(
arg1,
arg2,
):
pass
'''
declerations_test_text_074 = '''
def function_name(
arg1,
arg2
):
pass
'''
declerations_test_text_075 = '''
def function_name(arg1):
pass
'''
declerations_test_text_076 = '''
def function_name(
arg1, arg2,
):
pass
'''
declerations_test_text_077 = '''
def function_name(
arg1,
arg2,
):
pass
'''
declerations_test_text_078 = '''
def function_name(
arg1,
**kwargs
):
pass
'''
declerations_test_text_079 = '''
class Class:
def function_name_two(
self,
arg1,
arg2,
):
pass
'''
declerations_test_text_080 = '''
class Class:
@property
def function_name_one(
self,
):
pass
'''
declerations_test_text_081 = '''
def function_name(
*args,
**kwargs
):
pass
'''
declerations_test_text_082 = '''
class A:
def b():
class B:
pass
'''
declerations_test_text_083 = '''
@decorator(
param=1,
)
def function_name(
param_one,
param_two,
):
pass
'''
declerations_test_text_084 = '''
class ClassA:
def function_a():
pass
class TestServerHandler(
http.server.BaseHTTPRequestHandler,
):
pass
'''
declerations_test_text_085 = '''
def function(
param_a,
param_b=[
'test',
],
):
pass
'''
declerations_test_text_086 = '''
@decorator
class DecoratedClass(
ClassBase,
):
pass
'''
declerations_test_text_087 = '''
class ClassName(
object,
):
pass
'''
declerations_test_text_088 = '''
pixel[x,y] = 10
'''
declerations_test_text_089 = '''
@decorator.one
@decorator.two()
class DecoratedClass:
pass
'''
declerations_test_text_090 = '''
@staticmethod
def static_method():
pass
'''
declerations_test_text_091 = '''
@decorator1
@decorator2
def static_method(
param1,
param2,
):
pass
'''
declerations_test_text_092 = '''
@decorator1(
param=1,
)
def method():
pass
'''
declerations_test_text_093 = '''
try:
pass
except Exception:
pass
'''
declerations_test_text_094 = '''
try:
pass
except (
Exception1,
Exception2,
):
pass
'''
declerations_test_text_095 = '''
try:
pass
except Exception as exception:
pass
'''
declerations_test_text_096 = '''
try:
pass
except (
Exception1,
Exception2,
) as exception:
pass
'''
declerations_test_text_097 = '''
try:
pass
except Exception as e:
pass
'''
declerations_test_text_098 = '''
try:
pass
except (
Exception1,
Exception2,
) as e:
pass
'''
declerations_test_text_099 = '''
dict1 = {
'key_one': 1, 'key_two': 2,
}
'''
declerations_test_text_100 = '''
dict1 = {
'key_one': 1,
'key_two': 2,
}
'''
declerations_test_text_101 = '''
dict1 = {
'key_one': 1,
'key_two': 2,
}
'''
declerations_test_text_102 = '''
dict1 = {
'key_one':
1,
}
'''
declerations_test_text_103 = '''
dict_one = {
'list_comp': [
{
'key_one': 'value',
}
for i in range(5)
],
'dict_comp': {
'key_one': i
for i in range(5)
},
'set_comp': {
i
for i in range(5)
},
'generator_comp': (
i
for i in range(5)
),
}
'''
declerations_test_text_104 = '''
dict_one = {
'text_key': 'value',
f'formatted_text_key': 'value',
name_key: 'value',
1: 'value',
dictionary['name']: 'value',
object.attribute: 'value',
}
dict_two = {
'key_text_multiline': \'\'\'
text
\'\'\',
1: 'text',
function(
param=1,
): 'text',
'text'.format(
param=1,
): 'text',
'long_text': (
'first line'
'second line'
),
**other_dict,
}
'''
declerations_test_text_105 = '''
async def function(
param1,
):
pass
'''
declerations_test_text_106 = '''
def no_args_function():
pass
def no_args_function() :
pass
def no_args_function ():
pass
def no_args_function( ):
pass
def no_args_function():
pass
def no_args_function() -> None:
pass
def no_args_function() -> None :
pass
def no_args_function () -> None:
pass
def no_args_function( ) -> None:
pass
def no_args_function() -> None:
pass
'''
declerations_test_text_107 = '''
class Class:
@decorator(
param=1,
)
async def function():
pass
'''
declerations_test_text_108 = '''
list_a = [
\'\'\'
multiline
string
\'\'\',
\'\'\'
multiline
string
\'\'\',
]
'''
declerations_test_text_109 = '''
list_with_empty_tuple = [
(),
]
'''
| 13.098168
| 47
| 0.540122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,737
| 0.673229
|
3d3d066b8c43e8060d3eeba6ff779ba80c45bf11
| 1,437
|
py
|
Python
|
data/preprocess_original.py
|
Nstats/pytorch_senti_analysis_ch
|
bb01cc508c37638670b26259a6ee35c4e857f2b6
|
[
"Apache-2.0"
] | 1
|
2019-09-29T02:26:14.000Z
|
2019-09-29T02:26:14.000Z
|
data/preprocess_original.py
|
Nstats/pytorch_senti_analysis_ch
|
bb01cc508c37638670b26259a6ee35c4e857f2b6
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:24:55.000Z
|
2021-06-02T00:24:55.000Z
|
data/preprocess_original.py
|
Nstats/pytorch_senti_analysis_ch
|
bb01cc508c37638670b26259a6ee35c4e857f2b6
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import os
import random
train_df = pd.read_csv("./data/Train_DataSet.csv")
train_label_df = pd.read_csv("./data/Train_DataSet_Label.csv")
test_df = pd.read_csv("./data/Test_DataSet.csv")
train_df = train_df.merge(train_label_df, on='id', how='left')
train_df['label'] = train_df['label'].fillna(-1)
train_df = train_df[train_df['label'] != -1]
train_df['label'] = train_df['label'].astype(int)
test_df['label'] = 0
test_df['content'] = test_df['content'].fillna('无')
train_df['content'] = train_df['content'].fillna('无')
test_df['title'] = test_df['title'].fillna('无')
train_df['title'] = train_df['title'].fillna('无')
index = set(range(train_df.shape[0]))
K_fold = []
for i in range(5):
if i == 4:
tmp = index
else:
tmp = random.sample(index, int(1.0 / 5 * train_df.shape[0]))
index = index - set(tmp)
print("Number:", len(tmp))
K_fold.append(tmp)
for i in range(5):
print("Fold", i)
if os.path.exists('./data/data_{}'.format(i)):
os.system("rm -rf ./data/data_{}".format(i))
os.system("mkdir ./data/data_{}".format(i))
dev_index = list(K_fold[i])
train_index = []
for j in range(5):
if j != i:
train_index += K_fold[j]
train_df.iloc[train_index].to_csv("./data/data_{}/train.csv".format(i))
train_df.iloc[dev_index].to_csv("./data/data_{}/dev.csv".format(i))
test_df.to_csv("./data/data_{}/test.csv".format(i))
| 33.418605
| 75
| 0.636047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 370
| 0.256055
|
3d3d56ea2024a56958685b39631e50240545177c
| 304
|
py
|
Python
|
tools/load_save.py
|
zs-liu/Pytorch-AS
|
4e41f96522cce7a35f6625bdbe3863c0b74ee0ca
|
[
"MIT"
] | null | null | null |
tools/load_save.py
|
zs-liu/Pytorch-AS
|
4e41f96522cce7a35f6625bdbe3863c0b74ee0ca
|
[
"MIT"
] | null | null | null |
tools/load_save.py
|
zs-liu/Pytorch-AS
|
4e41f96522cce7a35f6625bdbe3863c0b74ee0ca
|
[
"MIT"
] | null | null | null |
import torch
def save_checkpoint(save_dir, model, optimizer):
torch.save({'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()}, save_dir)
return True
def load_checkpoint(load_dir):
checkpoint = torch.load(load_dir)
return checkpoint
| 23.384615
| 74
| 0.713816
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.131579
|
3d3ee67b67a8537dbe3c66ff4a5cb8e8c72ee707
| 706
|
py
|
Python
|
support/send_broadcast_message.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | 1
|
2015-11-02T09:11:12.000Z
|
2015-11-02T09:11:12.000Z
|
support/send_broadcast_message.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
support/send_broadcast_message.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
from xlrd import open_workbook
from scheduler.smsclient import SMSClient
filename = "/Users/twer/Downloads/SchoolsSMSGhana.xlsx"
workbook = open_workbook(filename)
organization_number = "1902"
area_code = "233"
sheets_ = workbook.sheets()[0]
sms_client = SMSClient()
print 'Start'
for row_num in range(1, sheets_.nrows):
row = sheets_.row_values(row_num)
_, _, data_sender_phone_number, message = tuple(row)
phone_number = area_code + str(int(data_sender_phone_number))[1:]
print ("Sending broadcast message to %s from %s.") % (phone_number, organization_number)
sms_sent = sms_client.send_sms(organization_number, phone_number, message)
print 'Response:', sms_sent
print 'End'
| 32.090909
| 92
| 0.756374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.169972
|
3d41aeb36fe4c0327c92ba2fb851e5ac557d9a0b
| 960
|
py
|
Python
|
typhon/oem/error.py
|
jmollard/typhon
|
68d5ae999c340b60aa69e095b336d438632ad55c
|
[
"MIT"
] | null | null | null |
typhon/oem/error.py
|
jmollard/typhon
|
68d5ae999c340b60aa69e095b336d438632ad55c
|
[
"MIT"
] | null | null | null |
typhon/oem/error.py
|
jmollard/typhon
|
68d5ae999c340b60aa69e095b336d438632ad55c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Functions to estimate the different sources of retrieval error.
"""
from typhon.oem import common
__all__ = [
'smoothing_error',
'retrieval_noise',
]
def smoothing_error(x, x_a, A):
"""Return the smoothing error through the averaging kernel.
Parameters:
x (ndarray): Atmospherice profile.
x_a (ndarray): A priori profile.
A (ndarray): Averaging kernel matrix.
Returns:
ndarray: Smoothing error due to correlation between layers.
"""
return A @ (x - x_a)
def retrieval_noise(K, S_a, S_y, e_y):
"""Return the retrieval noise.
Parameters:
K (np.array): Simulated Jacobians.
S_a (np.array): A priori error covariance matrix.
S_y (np.array): Measurement covariance matrix.
e_y (ndarray): Total measurement error.
Returns:
ndarray: Retrieval noise.
"""
return common.retrieval_gain_matrix(K, S_a, S_y) @ e_y
| 23.414634
| 67
| 0.644792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 730
| 0.760417
|
3d41b25f4537cebd266bfc51daa90f8c3d503433
| 16,155
|
py
|
Python
|
nicos/core/spm.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos/core/spm.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 4
|
2019-11-08T10:18:16.000Z
|
2021-01-13T13:07:29.000Z
|
nicos/core/spm.py
|
ISISComputingGroup/nicos
|
94cb4d172815919481f8c6ee686f21ebb76f2068
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <georg.brandl@frm2.tum.de>
#
# *****************************************************************************
"""
SPM (Simple Parameter Mode) is an alternate command input mode for NICOS where
entering Python code is not required.
The syntax is very simple and allows no variables, loops or conditionals: a
command line consists of a command and optional arguments, separated by spaces.
Arguments can be numbers, device names, strings and symbols (words that signify
a command option). Strings can be quoted or unquoted as long as they start
with a nondigit character.
Examples::
read
move a1 180
scan sth 10.4 0.4 25 t 2
"""
# XXX SPM todos:
# * figure out how to convert code examples in docstrings
# * add a way to make commands unavailable (e.g. manualscan)
import re
from itertools import chain, cycle, islice
from nicos.core.device import Device
from nicos.core.errors import SPMError
id_re = re.compile('[a-zA-Z_][a-zA-Z0-9_]*$')
string1_re = re.compile(r"'(\\\\|\\'|[^'])*'")
string2_re = re.compile(r'"(\\\\|\\"|[^"])*"')
spaces_re = re.compile(r'\s+')
nospace_re = re.compile(r'[^ \t;]+')
def spmsyntax(*arguments, **options):
"""Decorator to give a function specific SPM syntax advice, for parameter
checking and completion.
"""
def deco(func):
func.spmsyntax = arguments, options
return func
return deco
class bare(str):
"""String that repr()s as itself without quotes."""
def __repr__(self):
return str(self)
class NoParse(Exception):
def __init__(self, expected, token):
Exception.__init__(self, expected, token)
self.token = token
self.expected = expected
class Token:
desc = 'token'
def handle(self, arg, session):
raise NoParse('strange token', arg)
def complete(self, text, session, argsofar):
return []
class String(Token):
desc = 'string'
def handle(self, arg, session):
if string1_re.match(arg) or string2_re.match(arg):
return bare(arg)
return arg
String = String()
class Bare(Token):
desc = 'value'
def handle(self, arg, session):
if id_re.match(arg):
if arg not in session.namespace:
return arg
return bare('(' + arg + ')')
Bare = Bare()
class Num(Token):
desc = 'number'
def handle(self, arg, session):
try:
return float(arg)
except ValueError:
raise NoParse('number', arg) from None
Num = Num()
class Int(Token):
desc = 'integer'
def handle(self, arg, session):
try:
return int(arg)
except ValueError:
raise NoParse('integer', arg) from None
Int = Int()
class Oneof(Token):
def __init__(self, *choices):
self.choices = choices
@property
def desc(self):
return 'one of ' + ', '.join(self.choices)
def handle(self, arg, session):
if arg.lower() not in self.choices:
raise NoParse(self.desc, arg)
return arg.lower()
def complete(self, text, session, argsofar):
return [c for c in self.choices if c.startswith(text)]
class Bool(Token):
desc = 'boolean'
def handle(self, arg, session):
if arg.lower() not in ['true', 'false']:
raise NoParse('true or false', arg)
return bare(arg.capitalize())
def complete(self, text, session, argsofar):
return [c for c in ['true', 'false'] if c.startswith(text)]
Bool = Bool()
class Dev(Token):
desc = 'device name'
def __init__(self, devtype=Device):
self.devtype = devtype
def clsrep(self, cls):
if isinstance(cls, tuple):
return ' or '.join(self.clsrep(c) for c in cls)
return cls.__name__
def handle(self, arg, session):
if arg not in session.explicit_devices:
raise NoParse('device name', arg)
if not isinstance(session.devices[arg], self.devtype):
raise NoParse('%s device' % self.clsrep(self.devtype), arg)
return bare(arg)
def complete(self, text, session, argsofar):
return [dev for dev in session.explicit_devices if dev.startswith(text)
and isinstance(session.devices[dev], self.devtype)]
AnyDev = Dev()
class DevParam(Token):
desc = 'parameter name'
def handle(self, arg, session):
return arg
def complete(self, text, session, argsofar):
try:
dev = session.getDevice(argsofar[-2])
return [p for p in dev.parameters if p.startswith(text)]
except Exception:
return []
DevParam = DevParam()
class SetupName(Token):
desc = 'setup name'
def __init__(self, what):
self.what = what
def handle(self, arg, session):
if arg not in session._setup_info:
raise NoParse('setup name', arg)
return arg
def complete(self, text, session, argsofar):
all_setups = [name for (name, info) in session._setup_info.items()
if info and info['group'] in ('basic', 'optional',
'plugplay', '')]
if self.what == 'all':
candidates = all_setups
elif self.what == 'unloaded':
candidates = [setup for setup in all_setups
if setup not in session.explicit_setups]
elif self.what == 'loaded':
candidates = session.explicit_setups
return [c for c in candidates if c.startswith(text)]
class DeviceName(Token):
desc = 'device name'
def handle(self, arg, session):
if arg not in session.configured_devices:
raise NoParse('device name', arg)
return arg
def complete(self, text, session, argsofar):
return [c for c in session.configured_devices
if c.startswith(text) and
c not in session.devices and not
session.configured_devices[c][1].get('lowlevel')]
DeviceName = DeviceName()
class Multi:
def __init__(self, *types):
self.types = types
class SPMHandler:
"""The main handler for SPM commands."""
def __init__(self, session):
self.session = session
def error(self, msg):
raise SPMError(msg)
def complete(self, command, word):
def select(candidates, word):
return [c for c in candidates if c.startswith(word)]
try:
# XXX could complete "?" too
if command.startswith(('!', '?')) or command.endswith('?'):
return []
if command.startswith(':'):
return self.complete(command[1:].strip(), word)
commands = self.tokenize(command, partial=True)
tokens = commands[-1] # only last command is interesting
if not word:
tokens.append('')
command = tokens[0]
if len(tokens) == 1:
# complete command
return select([n for (n, o) in self.session.namespace.items()
if hasattr(o, 'is_usercommand') or
isinstance(o, Device)], word)
cmdobj = self.session.namespace.get(command)
if isinstance(cmdobj, Device):
return []
if not hasattr(cmdobj, 'is_usercommand'):
return []
return self.complete_command(cmdobj, tokens[1:], word)
except Exception as err:
self.session.log.debug('error during completion: %s', err)
return []
def complete_command(self, command, args, word):
syntax = getattr(command, 'spmsyntax', None)
if syntax is None:
return []
arguments, options = syntax
posargs = len(arguments)
multargs = 0
if arguments and isinstance(arguments[-1], Multi):
multargs = len(arguments[-1].types)
posargs -= 1
arguments = chain(arguments[:-1], cycle(arguments[-1].types))
# assume we're completing the last word on the command line
if multargs or len(args) <= posargs:
# is it a positional argument
el = next(islice(arguments, len(args) - 1, len(args)))
return el.complete(word, self.session, args)
else:
# must be an option
which = (len(args) - posargs) % 2
if which == 1:
# option name
return [n for n in options if n.startswith(word)]
else:
# option value
optname = args[-2]
if optname in options:
return options[optname].complete(word, self.session, args)
return []
def handle_script(self, code, fn):
lines = []
for lineno, command in enumerate(code.splitlines()):
try:
lines.append(self.handle_line(command))
except SPMError as err:
err.args = ('in %s, line %d: ' % (fn or 'unnamed',
lineno + 1) + err.args[0],)
raise
return '\n'.join(lines)
def handle_line(self, command):
if command.startswith('#'):
# Comments (only in script files)
return 'pass'
if command.startswith('!'):
# Python escape
return command[1:].strip()
if command.startswith('?') or command.endswith('?'):
# Help escape
return 'help(%s)' % command.strip('?')
if command.startswith(':'):
# Simulation escape
code = self.handle_line(command[1:])
return 'sim(%r)' % code
try:
commands = self.tokenize(command)
except NoParse as err:
return self.error('could not parse starting at %r, expected %s' %
(err.token, err.expected))
code = []
for tokens in commands:
if not tokens:
code.append('pass')
continue
command = tokens[0]
cmdobj = self.session.namespace.get(command)
if hasattr(cmdobj, 'is_usercommand'):
code.append(self.handle_command(cmdobj, tokens[1:]))
elif isinstance(cmdobj, Device):
code.append(self.handle_device(cmdobj, tokens[1:]))
else:
return self.error('no such command or device: %r' % command)
return '; '.join(code)
def tokenize(self, command, partial=False):
rest = command
commands = [[]]
tokens = commands[0]
while rest:
if rest.startswith("'"):
m = string1_re.match(rest)
if not m:
if partial:
tokens.append(rest)
return tokens
raise NoParse('single-quoted string', rest)
tokens.append(m.group())
rest = rest[m.end():]
elif rest.startswith('"'):
m = string2_re.match(rest)
if not m:
if partial:
tokens.append(rest)
return tokens
raise NoParse('double-quoted string', rest)
tokens.append(m.group())
rest = rest[m.end():]
elif rest.startswith('('):
i = 1
while i < len(rest):
if rest[i] == ')':
break
i += 1
else:
if partial:
tokens.append(rest)
return tokens
raise NoParse('closing parenthesis', rest)
tokens.append(rest[:i + 1])
rest = rest[i + 1:]
elif rest.startswith('['):
i = 1
while i < len(rest):
if rest[i] == ']':
break
i += 1
else:
if partial:
tokens.append(rest)
return tokens
raise NoParse('closing bracket', rest)
tokens.append(rest[:i + 1])
rest = rest[i + 1:]
elif rest[0].isspace():
m = spaces_re.match(rest)
rest = rest[m.end():]
elif rest.startswith(';'):
# serial command execution
commands.append([])
tokens = commands[-1]
rest = rest[1:]
else:
m = nospace_re.match(rest)
tokens.append(m.group())
rest = rest[m.end():]
return commands
def handle_device(self, device, args):
if not args:
return 'read(%s)' % device
elif len(args) == 1:
return 'maw(%s, %s)' % (device, args[0])
return self.error('too many arguments for simple device command')
def handle_command(self, command, args):
syntax = getattr(command, 'spmsyntax', None)
if syntax is None:
syntax = ((Bare,) * len(args), {})
arguments, options = syntax
posargs = len(arguments)
multargs = 1
if arguments and isinstance(arguments[-1], Multi):
multargs = len(arguments[-1].types)
posargs -= 1
arguments = chain(arguments[:-1], cycle(arguments[-1].types))
# first, parse positional arguments (all must be given)
cmdargs = []
nargs = 0
for element in arguments:
if not args:
if nargs < posargs or (nargs - posargs) % multargs != 0:
return self.error('premature end of command, expected %s'
% element.desc)
break
try:
parg = element.handle(args[0], self.session)
except NoParse as err:
return self.error('invalid argument at %r, expected %s' %
(err.token, err.expected))
cmdargs.append(parg)
args = args[1:]
nargs += 1
# now come options
cmdopts = {}
if len(args) % 2:
return self.error('too many arguments at %r, expected end of '
'command' % args[-1])
while args:
opt, val = args[:2]
args = args[2:]
if not id_re.match(opt):
return self.error('invalid syntax at %r, expected option name'
% opt)
if opt in options:
try:
val = options[opt].handle(val, self.session)
except NoParse as err:
return self.error('invalid argument at %r, expected %s' %
(err.token, err.expected))
else:
val = bare(val)
cmdopts[opt] = val
# now nothing should be left
return command.__name__ + '(*%s, **%s)' % (cmdargs, cmdopts)
| 32.50503
| 79
| 0.532281
| 13,618
| 0.842959
| 0
| 0
| 80
| 0.004952
| 0
| 0
| 3,413
| 0.211266
|
3d42299242b673c35a88a568c3b956825f9d2deb
| 514
|
py
|
Python
|
2_Regression/ARX_Regression/empirical_id.py
|
abe-mart/arduino
|
1bbd88b6bcc3bb9092c259a071c8f3237c391c6a
|
[
"Apache-2.0"
] | 1
|
2020-06-23T16:28:34.000Z
|
2020-06-23T16:28:34.000Z
|
2_Regression/ARX_Regression/empirical_id.py
|
abe-mart/arduino
|
1bbd88b6bcc3bb9092c259a071c8f3237c391c6a
|
[
"Apache-2.0"
] | null | null | null |
2_Regression/ARX_Regression/empirical_id.py
|
abe-mart/arduino
|
1bbd88b6bcc3bb9092c259a071c8f3237c391c6a
|
[
"Apache-2.0"
] | 1
|
2020-07-22T17:43:30.000Z
|
2020-07-22T17:43:30.000Z
|
import numpy as np
import apm_id as arx
######################################################
# Configuration
######################################################
# number of terms
ny = 2 # output coefficients
nu = 1 # input coefficients
# number of inputs
ni = 1
# number of outputs
no = 1
# load data and parse into columns
data = np.loadtxt('data_step_test.csv',delimiter=',')
######################################################
# generate time-series model
arx.apm_id(data,ni,nu,ny)
| 25.7
| 55
| 0.470817
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 368
| 0.715953
|
3d42e0a9f4a4977092186d96df6c6ef12958272d
| 75,635
|
py
|
Python
|
setup.py
|
Alexhuszagh/toolchains
|
6428c889dd0def79ddf8498f9af7a9d3ddc0423e
|
[
"Unlicense"
] | 22
|
2021-06-16T08:33:22.000Z
|
2022-01-31T05:17:54.000Z
|
setup.py
|
Alexhuszagh/toolchains
|
6428c889dd0def79ddf8498f9af7a9d3ddc0423e
|
[
"Unlicense"
] | 1
|
2022-03-21T16:09:20.000Z
|
2022-03-21T16:09:20.000Z
|
setup.py
|
Alexhuszagh/xcross
|
6428c889dd0def79ddf8498f9af7a9d3ddc0423e
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
'''
setup
=====
This is a relatively complicated setup script, since
it does a few things to simplify version control
and configuration files.
There's a simple script that overrides the `build_py`
command to ensure there's proper version control set
for the library.
There's also a more complex `configure` command
that configures all images from template files,
and also configures the `cmake` wrapper and the
shell version information.
'''
# IMPORTS
# -------
import ast
import enum
import glob
import itertools
import json
import re
import os
import setuptools
import shutil
import stat
import subprocess
import sys
import textwrap
try:
from setuptools import setup, Command
from setuptools.command.build_py import build_py
from setuptools.command.install import install
has_setuptools = True
except ImportError:
from distutils.core import setup, Command
from distutils.command.build_py import build_py
from distutils.command.install import install
has_setuptools = False
try:
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print('Cannot import py2exe', file=sys.stderr)
exit(1)
# CONFIG
# ------
def load_json(path):
'''Load JSON files with C++-style comments.'''
# Note: we need comments for maintainability, so we
# can annotate what works and the rationale, but
# we don't want to prevent code from working without
# a complex parser, so we do something very simple:
# only remove lines starting with '//'.
with open(path) as file:
lines = file.read().splitlines()
lines = [i for i in lines if not i.strip().startswith('//')]
return json.loads('\n'.join(lines))
HOME = os.path.dirname(os.path.realpath(__file__))
config = load_json(f'{HOME}/config/config.json')
# A lot of logic depends on being on the proper directory:
# this allows us to do out-of-source builds.
os.chdir(HOME)
def get_version(key):
'''Get the version data from the JSON config.'''
data = config[key]['version']
major = data['major']
minor = data['minor']
patch = data.get('patch', '')
release = data.get('release', '')
number = data.get('number', '')
build = data.get('build', '')
return (major, minor, patch, release, number, build)
# Read the xcross version information.
major, minor, patch, release, number, build = get_version('xcross')
version = f'{major}.{minor}'
if patch != '0':
version = f'{version}.{patch}'
release_type = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'post': '.post'}
if release and not number:
raise ValueError('Must provide a release number with a non-final build.')
elif release:
version = f'{version}{release_type[release]}{number}'
# py2exe version is valid one of the following:
# [0-255].[0-255].[0-65535]
# [0-255].[0-255].[0-255].[0-255]
# Therefore, we can never provide release candidate
# values or omit the patch field.
py2exe_version = f'{major}.{minor}.{patch}'
docker_major, docker_minor, docker_patch, docker_build, *_ = get_version('docker')
docker_version = f'{docker_major}.{docker_minor}'
if docker_patch != '0':
docker_version = f'{docker_version}.{docker_patch}'
# Read the dependency version information.
# This is the GCC and other utilities version from crosstool-NG.
ubuntu_major, ubuntu_minor, *_ = get_version('ubuntu')
ubuntu_version = f'{ubuntu_major}.{ubuntu_minor}'
emsdk_major, emsdk_minor, emsdk_patch, *_ = get_version('emsdk')
emsdk_version = f'{emsdk_major}.{emsdk_minor}.{emsdk_patch}'
gcc_major, gcc_minor, gcc_patch, *_ = get_version('gcc')
gcc_version = f'{gcc_major}.{gcc_minor}.{gcc_patch}'
binutils_major, binutils_minor, *_ = get_version('binutils')
binutils_version = f'{binutils_major}.{binutils_minor}'
mingw_major, mingw_minor, mingw_patch, *_ = get_version('mingw')
mingw_version = f'{mingw_major}.{mingw_minor}.{mingw_patch}'
glibc_major, glibc_minor, *_ = get_version('glibc')
glibc_version = f'{glibc_major}.{glibc_minor}'
musl_major, musl_minor, musl_patch, *_ = get_version('musl')
musl_version = f'{musl_major}.{musl_minor}.{musl_patch}'
musl_cross_major, musl_cross_minor, musl_cross_patch, *_ = get_version('musl-cross')
musl_cross_version = f'{musl_cross_major}.{musl_cross_minor}.{musl_cross_patch}'
avr_major, avr_minor, avr_patch, *_ = get_version('avr')
avr_version = f'{avr_major}.{avr_minor}.{avr_patch}'
uclibc_major, uclibc_minor, uclibc_patch, *_ = get_version('uclibc')
uclibc_version = f'{uclibc_major}.{uclibc_minor}.{uclibc_patch}'
expat_major, expat_minor, expat_patch, *_ = get_version('expat')
expat_version = f'{expat_major}.{expat_minor}.{expat_patch}'
isl_major, isl_minor, *_ = get_version('isl')
isl_version = f'{isl_major}.{isl_minor}'
linux_major, linux_minor, linux_patch, *_ = get_version('linux')
linux_version = f'{linux_major}.{linux_minor}.{linux_patch}'
linux_headers_major, linux_headers_minor, linux_headers_patch, *_ = get_version('linux-headers')
linux_headers_version = f'{linux_headers_major}.{linux_headers_minor}.{linux_headers_patch}'
gmp_major, gmp_minor, gmp_patch, *_ = get_version('gmp')
gmp_version = f'{gmp_major}.{gmp_minor}.{gmp_patch}'
mpc_major, mpc_minor, mpc_patch, *_ = get_version('mpc')
mpc_version = f'{mpc_major}.{mpc_minor}.{mpc_patch}'
mpfr_major, mpfr_minor, mpfr_patch, *_ = get_version('mpfr')
mpfr_version = f'{mpfr_major}.{mpfr_minor}.{mpfr_patch}'
buildroot_major, buildroot_minor, buildroot_patch, *_ = get_version('buildroot')
buildroot_version = f'{buildroot_major}.{buildroot_minor}.{buildroot_patch}'
ct_major, ct_minor, ct_patch, *_ = get_version('crosstool-ng')
ct_version = f'{ct_major}.{ct_minor}.{ct_patch}'
qemu_major, qemu_minor, qemu_patch, *_ = get_version('qemu')
qemu_version = f'{qemu_major}.{qemu_minor}.{qemu_patch}'
riscv_toolchain_version = config['riscv-gnu-toolchain']['riscv-version']
riscv_binutils_version = config['riscv-gnu-toolchain']['binutils-version']
riscv_gdb_version = config['riscv-gnu-toolchain']['gdb-version']
riscv_glibc_version = config['riscv-gnu-toolchain']['glibc-version']
riscv_newlib_version = config['riscv-gnu-toolchain']['newlib-version']
# Other config options.
bin_directory = f'{config["options"]["sysroot"]}/bin/'
# Read the long description.
description = 'Zero-setup cross compilation.'
with open(f'{HOME}/README.md') as file:
long_description = file.read()
# COMMANDS
# --------
# Literal boolean type for command arguments.
bool_type = (type(None), bool, int)
def parse_literal(inst, key, default, valid_types=None):
'''Parse literal user options.'''
value = getattr(inst, key)
if value != default:
value = ast.literal_eval(value)
if valid_types is not None:
assert isinstance(value, valid_types)
setattr(inst, key, value)
def check_call(code):
'''Wrap `subprocess.call` to exit on failure.'''
if code != 0:
sys.exit(code)
def has_module(module):
'''Check if the given module is installed.'''
devnull = subprocess.DEVNULL
code = subprocess.call(
[sys.executable, '-m', module, '--version'],
stdout=devnull,
stderr=devnull,
)
return code == 0
def semver():
'''Create a list of semantic versions for images.'''
versions = [
f'{docker_major}.{docker_minor}',
f'{docker_major}.{docker_minor}.{docker_patch}'
]
if docker_major != '0':
versions.append(docker_major)
return versions
def image_from_target(target, with_pkg=False):
'''Get the full image name from the target.'''
username = config['metadata']['username']
repository = config['metadata']['repository']
if with_pkg:
repository = f'pkg{repository}'
return f'{username}/{repository}:{target}'
def sorted_image_targets():
'''Get a sorted list of image targets.'''
# Need to write the total image list.
os_images = []
metal_images = []
other_images = []
for image in images:
if image.os.is_os():
os_images.append(image.target)
elif image.os.is_baremetal():
metal_images.append(image.target)
else:
other_images.append(image.target)
os_images.sort()
metal_images.sort()
other_images.sort()
return os_images + metal_images + other_images
def subslice_targets(start=None, stop=None):
'''Extract a subslice of all targets.'''
targets = sorted_image_targets()
if start is not None:
targets = targets[targets.index(start):]
if stop is not None:
targets = targets[:targets.index(stop) + 1]
return targets
def build_image(docker, target, with_pkg=False):
'''Call Docker to build a single target.'''
image = image_from_target(target, with_pkg)
image_dir = 'images'
if with_pkg:
image_dir = f'pkg{image_dir}'
path = f'{HOME}/docker/{image_dir}/Dockerfile.{target}'
return subprocess.call([docker, 'build', '-t', image, HOME, '--file', path])
class CleanDistCommand(Command):
'''A custom command to clean Python dist artifacts.'''
description = 'clean artifacts from previous python builds'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Clean build data.'''
shutil.rmtree(f'{HOME}/build', ignore_errors=True)
shutil.rmtree(f'{HOME}/dist', ignore_errors=True)
shutil.rmtree(f'{HOME}/xcross.egg-info', ignore_errors=True)
# Clean py2exe files
dlls = glob.glob(f'{HOME}/*.dll')
exes = glob.glob(f'{HOME}/*.exe')
sos = glob.glob(f'{HOME}/*.so')
for file in dlls + exes + sos:
os.remove(file)
class CleanCommand(Command):
'''A custom command to clean any previous builds.'''
description = 'clean all previous builds'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Clean build data.'''
self.run_command('clean_dist')
shutil.rmtree(f'{HOME}/cmake/toolchain', ignore_errors=True)
shutil.rmtree(f'{HOME}/docker/images', ignore_errors=True)
shutil.rmtree(f'{HOME}/docker/pkgimages', ignore_errors=True)
shutil.rmtree(f'{HOME}/musl/config', ignore_errors=True)
shutil.rmtree(f'{HOME}/symlink/toolchain', ignore_errors=True)
class VersionCommand(Command):
'''A custom command to configure the library version.'''
description = 'set library version'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def replace(self, string, replacements):
'''Replace template variable with value.'''
for variable, value in replacements:
string = string.replace(f'^{variable}^', value)
return string
def chmod(self, file):
'''Make a file executable.'''
flags = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
st = os.stat(file)
os.chmod(file, st.st_mode | flags)
def write_file(self, path, contents, chmod):
'''Check if we need to write a file.'''
try:
with open(path, 'r') as file:
old_contents = file.read()
should_update = old_contents != contents
except FileNotFoundError:
should_update = True
if should_update:
with open(path, 'w') as file:
file.write(contents)
if chmod:
self.chmod(path)
def configure(self, template, outfile, chmod, replacements):
'''Configure a template file.'''
with open(template, 'r') as file:
contents = file.read()
contents = self.replace(contents, replacements)
self.write_file(outfile, contents, chmod)
def run(self):
'''Modify the library version.'''
version_info = f"""
version_info(
major='{major}',
minor='{minor}',
patch='{patch}',
release='{release}',
number='{number}',
build='{build}'
)"""
xcross = f'{HOME}/xcross/__init__.py'
self.configure(f'{xcross}.in', xcross, True, [
('BIN', f'"{bin_directory}"'),
('REPOSITORY', config['metadata']['repository']),
('USERNAME', config['metadata']['username']),
('VERSION_MAJOR', f"'{major}'"),
('VERSION_MINOR', f"'{minor}'"),
('VERSION_PATCH', f"'{patch}'"),
('VERSION_RELEASE', f"'{release}'"),
('VERSION_NUMBER', f"'{number}'"),
('VERSION_BUILD', f"'{build}'"),
('VERSION_INFO', textwrap.dedent(version_info)[1:]),
('VERSION', f"'{version}'"),
])
class TagCommand(Command):
'''Scripts to automatically tag new versions.'''
description = 'tag version for release'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Tag version for git release.'''
# Get our config.
git = shutil.which('git')
if not git:
raise FileNotFoundError('Unable to find program git.')
tag = f'v{version}'
# Delete any existing, conflicting tags.
devnull = subprocess.DEVNULL
env = os.environ.copy()
env['GIT_DIR'] = f'{HOME}/.git'
code = subprocess.call(
['git', 'rev-parse', tag],
stdout=devnull,
stderr=devnull,
env=env,
)
if code == 0:
check_call(subprocess.call(
['git', 'tag', '-d', tag],
stdout=devnull,
stderr=devnull,
))
# Tag the release.
check_call(subprocess.call(
['git', 'tag', tag],
stdout=devnull,
stderr=devnull,
))
class BuildImageCommand(Command):
'''Build a single Docker image.'''
description = 'build a single docker image'
user_options = [
('target=', None, 'Target name'),
('with-package-managers=', None, 'Build an image with package managers.'),
]
def initialize_options(self):
self.target = None
self.with_package_managers = None
def finalize_options(self):
assert self.target is not None
parse_literal(self, 'with_package_managers', None, bool_type)
def build_image(self, docker):
'''Build a Docker image.'''
if build_image(docker, self.target, self.with_package_managers) != 0:
print(f'Error: failed to build target {self.target}', file=sys.stderr)
sys.exit(1)
def run(self):
'''Build single Docker image.'''
docker = shutil.which('docker')
if not docker:
raise FileNotFoundError('Unable to find command docker.')
self.build_image(docker)
class BuildImagesCommand(Command):
'''Build all Docker images.'''
description = 'build all docker images'
user_options = [
('start=', None, 'Start point for images to build.'),
('stop=', None, 'Stop point for images to build.'),
('with-package-managers=', None, 'Build package manager images.'),
]
def initialize_options(self):
self.start = None
self.stop = None
self.with_package_managers = None
def finalize_options(self):
parse_literal(self, 'with_package_managers', None, bool_type)
def build_image(self, docker, target, with_package_managers=False):
'''Build a Docker image.'''
if build_image(docker, target, with_package_managers) != 0:
self.failures.append(target)
return False
return True
def tag_image(self, docker, target, tag_name, with_package_managers=False):
'''Tag an image.'''
image = image_from_target(target, with_package_managers)
tag = image_from_target(tag_name, with_package_managers)
check_call(subprocess.call([docker, 'tag', image, tag]))
def build_versions(self, docker, target, with_pkg=False):
'''Build all versions of a given target.'''
if not self.build_image(docker, target, with_pkg):
return
for version in semver():
self.tag_image(docker, target, f'{target}-{version}', with_pkg)
if target.endswith('-unknown-linux-gnu'):
self.tag_versions(docker, target, target[:-len('-unknown-linux-gnu')], with_pkg)
def tag_versions(self, docker, target, tag_name, with_pkg=False):
'''Build all versions of a given target.'''
self.tag_image(docker, target, tag_name, with_pkg)
for version in semver():
self.tag_image(docker, target, f'{tag_name}-{version}', with_pkg)
def run(self):
'''Build all Docker images.'''
docker = shutil.which('docker')
if not docker:
raise FileNotFoundError('Unable to find command docker.')
# Need to build our base vcpkg for package files.
if self.with_package_managers:
if build_image(docker, 'vcpkg', True) != 0:
print('Error: failed to build target vcpkg', file=sys.stderr)
sys.exit(1)
# Build all our Docker images.
self.failures = []
for target in subslice_targets(self.start, self.stop):
self.build_versions(docker, target)
# Only build if the previous image succeeded, and if
# the image with a package manager exists.
if self.failures and self.failures[-1] == target:
continue
elif not self.with_package_managers:
continue
if os.path.exists(f'{HOME}/docker/pkgimages/Dockerfile.{target}'):
self.build_versions(docker, target, with_pkg=True)
# Print any failures.
if self.failures:
print('Error: Failures occurred.', file=sys.stderr)
print('-------------------------', file=sys.stderr)
for failure in self.failures:
print(failure, file=sys.stderr)
sys.exit(1)
class BuildAllCommand(BuildImagesCommand):
'''Build Docker images and the Python library for dist.'''
description = 'build all docker images and wheels for release'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Build all images and package for release.'''
BuildImagesCommand.run(self)
self.run_command('clean_dist')
self.run_command('configure')
self.run_command('build')
self.run_command('sdist')
self.run_command('bdist_wheel')
class BuildCommand(build_py):
'''Override build command to configure builds.'''
def run(self):
self.run_command('version')
build_py.run(self)
class InstallCommand(install):
'''Override install command to configure builds.'''
def run(self):
# Note: this should already be run, and this is redundant.
# However, if `skip_build` is provided, this needs to be run.
self.run_command('version')
install.run(self)
class PushCommand(Command):
'''Push all Docker images to Docker hub.'''
description = 'push all docker images to docker hub'
user_options = [
('start=', None, 'Start point for images to push.'),
('stop=', None, 'Stop point for images to push.'),
('with-package-managers=', None, 'Build package manager images.'),
]
def initialize_options(self):
self.start = None
self.stop = None
self.with_package_managers = None
def finalize_options(self):
parse_literal(self, 'with_package_managers', None, bool_type)
def push_image(self, docker, target, with_package_managers=False):
'''Push an image to Docker Hub.'''
image = image_from_target(target, with_package_managers)
check_call(subprocess.call([docker, 'push', image]))
def push_versions(self, docker, target, with_package_managers=False):
'''Push all versions of a given target.'''
self.push_image(docker, target, with_package_managers)
for version in semver():
self.push_image(docker, f'{target}-{version}', with_package_managers)
def push_target(self, docker, target, with_package_managers=False):
'''Push all images for a given target.'''
self.push_versions(docker, target, with_package_managers)
if target.endswith('-unknown-linux-gnu'):
base = target[:-len('-unknown-linux-gnu')]
self.push_versions(docker, base, with_package_managers)
def run(self):
'''Push all Docker images to Docker hub.'''
docker = shutil.which('docker')
if not docker:
raise FileNotFoundError('Unable to find command docker.')
# Push all our Docker images.
for target in subslice_targets(self.start, self.stop):
self.push_target(docker, target)
if not self.with_package_managers:
continue
if os.path.exists(f'{HOME}/docker/pkgimages/Dockerfile.{target}'):
self.push_target(docker, target, with_package_managers=True)
class PublishCommand(Command):
'''Publish a Python version.'''
description = 'publish python version to PyPi'
user_options = [
('test=', None, 'Upload to the test repository.'),
]
def initialize_options(self):
self.test = None
def finalize_options(self):
parse_literal(self, 'test', None, bool_type)
def run(self):
'''Run the unittest suite.'''
if not has_module('twine'):
raise FileNotFoundError('Unable to find module twine.')
self.run_command('clean_dist')
self.run_command('configure')
self.run_command('build')
self.run_command('sdist')
self.run_command('bdist_wheel')
files = glob.glob(f'{HOME}/dist/*')
command = [sys.executable, '-m', 'twine', 'upload']
if self.test:
command += ['--repository', 'testpypi']
command += files
check_call(subprocess.call(command))
class TestCommand(Command):
'''Run the unittest suite.'''
description = 'run unittest suite'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Run the unittest suite.'''
if not has_module('tox'):
raise FileNotFoundError('Unable to find module tox.')
check_call(subprocess.call(['tox', HOME]))
class LintCommand(Command):
'''Lint python code.'''
description = 'lint python code'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
'''Run the unittest suite.'''
if not has_module('flake8'):
raise FileNotFoundError('Unable to find module flake8.')
self.run_command('configure')
check_call(subprocess.call(['flake8', HOME]))
class TestImagesCommand(Command):
'''Run the Docker test suite.'''
description = 'run docker test suite'
user_options = [
('start=', None, 'Start point for test suite.'),
('stop=', None, 'Stop point for test suite.'),
('os=', None, 'Do operating system tests tests.'),
('metal=', None, 'Do bare-metal tests.'),
]
metal_tests = [
'arm',
'arm64',
'avr',
'ppc',
'mips',
'mipsel',
'riscv32',
'riscv64',
('i686', 'x86'),
]
def initialize_options(self):
self.start = None
self.stop = None
self.os = True
self.metal = None
def finalize_options(self):
parse_literal(self, 'os', None, bool_type)
parse_literal(self, 'metal', None, bool_type)
def git_clone(self, git, repository):
'''Clone a given repository.'''
check_call(subprocess.call([git, 'clone', repository, f'{HOME}/buildtests']))
def run_test(
self,
docker,
target,
os_type,
script=None,
cpu=None,
**envvars
):
'''Run test for a single target.'''
# Get our command.
if script is None:
script = 'image-test'
command = f'/test/{script}.sh'
if cpu is not None:
command = f'export CPU={cpu}; {command}'
# Check for additional flags.
if self.nostartfiles(target):
flags = envvars.get('FLAGS')
if flags:
flags = f'{flags} -nostartfiles'
else:
flags = '-nostartfiles'
envvars['FLAGS'] = flags
# Build and call our docker command.
docker_command = [
docker,
'run',
'--name', f'xcross-test-{target}',
'-v', f'{HOME}/test:/test',
'--env', f'IMAGE={target}',
'--env', f'TYPE={os_type}',
]
for key, value in envvars.items():
docker_command += ['--env', f'{key}={value}']
docker_command.append(image_from_target(target))
docker_command += ['/bin/bash', '-c', command]
subprocess.check_call(docker_command)
# Clean up our stoppd container.
subprocess.check_call([docker, 'rm', f'xcross-test-{target}'])
def nostartfiles(self, target):
'''Check if an image does not have startfiles.'''
# i[3-6]86 does not provide start files, a known bug with newlib.
# moxie cannot find `__bss_start__` and `__bss_end__`.
# sparc cannot find `__stack`.
# there is no crt0 for x86_64
regex = re.compile(r'''^(?:
(?:i[3-7]86-unknown-elf)|
(?:moxie.*-none-elf)|
(?:sparc-unknown-elf)|
(?:x86_64-unknown-elf)
)$''', re.X)
return regex.match(target)
def skip(self, target):
'''Check if we should skip a given target.'''
# Check if we should skip a test.
# PPCLE is linked to the proper library, which contains the
# proper symbols, but still fails with an error:
# undefined reference to `_savegpr_29`.
return target == 'ppcle-unknown-elf'
def run_wasm(self, docker, **envvars):
'''Run a web-assembly target.'''
self.run_test(
docker,
'wasm',
'script',
**envvars,
NO_PERIPHERALS='1',
TOOLCHAIN1='jsonly',
TOOLCHAIN2='wasm',
TOOLCHAIN1_FLAGS='-s WASM=0',
TOOLCHAIN2_FLAGS='-s WASM=1',
)
def run_os(self, docker):
'''Run the tests with an operating system.'''
# Configure our test runner.
has_started = True
has_stopped = False
if self.start is not None:
has_started = False
metal_images = sorted([i.target for i in images if i.os.is_baremetal()])
os_images = sorted([i.target for i in images if i.os.is_os()])
# Run OS images.
testdir = f'{HOME}/test/buildtests'
shutil.copytree(f'{HOME}/test/cpp-helloworld', testdir, dirs_exist_ok=True)
try:
for target in os_images:
if has_started or self.start == target:
has_started = True
if not self.skip(target):
self.run_test(docker, target, 'os')
if self.stop == target:
has_stopped = True
break
# Run the special images.
if has_started and not has_stopped:
self.run_wasm(docker)
self.run_wasm(docker, CMAKE_FLAGS='-DJS_ONLY=1')
self.run_test(docker, os_images[0], 'os', CMAKE_FLAGS='-GNinja')
self.run_wasm(docker, CMAKE_FLAGS='-GNinja')
self.run_test(docker, 'ppc-unknown-linux-gnu', 'os', cpu='e500mc', NORUN2='1')
self.run_test(docker, 'ppc64-unknown-linux-gnu', 'os', cpu='power9')
self.run_test(docker, 'mips-unknown-linux-gnu', 'os', cpu='24Kf')
finally:
shutil.rmtree(testdir, ignore_errors=True)
if has_stopped:
return
# Run metal images.
shutil.copytree(f'{HOME}/test/cpp-atoi', testdir, dirs_exist_ok=True)
try:
for target in metal_images:
if has_started or self.start == target:
has_started = True
if not self.skip(target):
self.run_test(docker, target, 'metal')
if self.stop == target:
has_stopped = True
break
finally:
shutil.rmtree(testdir, ignore_errors=True)
if has_stopped:
return
def run_metal(self, docker):
'''Run the bare-metal tests.'''
for arch in self.metal_tests:
if isinstance(arch, tuple):
image = f'{arch[0]}-unknown-elf'
script = f'{arch[1]}-hw'
else:
image = f'{arch}-unknown-elf',
script = f'{arch}-hw'
self.run_test(docker, image, 'metal', script=script)
def run(self):
'''Run the docker test suite.'''
# Find our necessary commands.
docker = shutil.which('docker')
if not docker:
raise FileNotFoundError('Unable to find command docker.')
if self.os:
self.run_os(docker)
if self.metal:
self.run_metal(docker)
class TestAllCommand(TestImagesCommand):
'''Run the Python and Docker test suites.'''
def run(self):
'''Run the docker test suite.'''
self.run_command('test')
TestImagesCommand.run(self)
# IMAGES
# ------
# There are two types of images:
# 1). Images with an OS layer.
# 2). Bare-metal machines.
# Bare-metal machines don't use newlibs nanomalloc, so these do not
# support system allocators.
class OperatingSystem(enum.Enum):
'''Enumerations for known operating systems.'''
Android = enum.auto()
BareMetal = enum.auto()
Linux = enum.auto()
Emscripten = enum.auto()
Windows = enum.auto()
Unknown = enum.auto()
def is_baremetal(self):
return self == OperatingSystem.BareMetal
def is_emscripten(self):
return self == OperatingSystem.Emscripten
def is_os(self):
return (
self == OperatingSystem.Android
or self == OperatingSystem.Linux
or self == OperatingSystem.Windows
)
def to_cmake(self):
'''Get the identifier for the CMake system name.'''
return cmake_string[self]
def to_conan(self):
'''Get the identifier for the Conan system name.'''
return conan_string[self]
def to_meson(self):
'''Get the identifier for the Meson system name.'''
return meson_string[self]
def to_triple(self):
'''Get the identifier as a triple string.'''
return triple_string[self]
def to_vcpkg(self):
'''Get the identifier for the vcpkg system name.'''
return vcpkg_string[self]
@staticmethod
def from_triple(string):
'''Get the operating system from a triple string.'''
return triple_os[string]
cmake_string = {
OperatingSystem.Android: 'Android',
OperatingSystem.BareMetal: 'Generic',
# This gets ignored anyway.
OperatingSystem.Emscripten: 'Emscripten',
OperatingSystem.Linux: 'Linux',
OperatingSystem.Windows: 'Windows',
OperatingSystem.Unknown: 'Generic',
}
conan_string = {
# Conan uses CMake's feature detection for Android,
# which is famously broken. We have our custom toolchains
# to pass the proper build arguments. Just say Linux,
# and run with it.
OperatingSystem.Android: 'Linux',
OperatingSystem.Linux: 'Linux',
OperatingSystem.Windows: 'Windows',
}
meson_string = {
# The default use is just to use 'linux' for Android.
OperatingSystem.Android: 'linux',
OperatingSystem.BareMetal: 'bare metal',
OperatingSystem.Linux: 'linux',
OperatingSystem.Windows: 'windows',
}
triple_string = {
OperatingSystem.Android: 'linux',
OperatingSystem.BareMetal: None,
OperatingSystem.Emscripten: 'emscripten',
OperatingSystem.Linux: 'linux',
OperatingSystem.Windows: 'w64',
}
vcpkg_string = {
**cmake_string,
# Uses MinGW for to differentiate between legacy Windows apps, the
# Universal Windows Platform. Since we only support MinGW, use it.
OperatingSystem.Windows: 'MinGW',
}
triple_os = {v: k for k, v in triple_string.items()}
oses = {
'linux': OperatingSystem.Linux,
'none': OperatingSystem.BareMetal,
}
def extract_triple(triple):
'''Extract components from the LLVM triple.'''
# Due to how we designed this, we can only
# 1. Omit the vendor, os and system.
# 2. Omit the vendor.
# 3. Omit the os.
# 4. Have all 4 components.
split = triple.split('-')
arch = split[0]
if len(split) == 1:
# ('arch',)
vendor = None
os = OperatingSystem.BareMetal
system = None
elif len(split) == 2 and split[1] in oses:
# ('arch', 'os')
vendor = None
os = oses[split[1]]
system = None
elif len(split) == 3 and split[2] == 'mingw32':
# ('arch', 'vendor', 'system')
vendor = None
os = OperatingSystem.Windows
system = split[2]
elif len(split) == 3:
# ('arch', 'vendor', 'system')
vendor = split[1]
os = OperatingSystem.BareMetal
system = split[2]
elif len(split) == 4:
# ('arch', 'vendor', 'os', 'system')
vendor = split[1]
os = OperatingSystem.from_triple(split[2])
system = split[3]
else:
raise ValueError(f'Invalid LLVM triple, got {triple}')
return (arch, vendor, os, system)
class Image:
'''
Parameters (and defaults) for custom images.
* `target` - Image name of the target (resembling an LLVM triple).
* `triple` - LLVM triple of the target. (arch, vendor, os, system)
'''
def __init__(self, target, triple=None, **kwds):
self.target = target
self.triple = triple or target
self.arch, self.vendor, self.os, self.system = extract_triple(self.triple)
for key, value in kwds.items():
setattr(self, key, value)
@classmethod
def from_dict(cls, data):
return cls(**data)
@staticmethod
def from_json(data):
image_type = data.pop('type')
return image_types[image_type].from_dict(data)
@property
def config(self):
return getattr(self, '_config', self.target)
@config.setter
def config(self, value):
self._config = value
@property
def hardcoded_cpulist(self):
cpus = getattr(self, 'cpulist', '')
if cpus:
return f'export HARDCODED="{cpus}"\n'
return ''
@property
def ld_library_path(self):
path = getattr(self, 'library_path', '')
if path:
return f'export LD_LIBRARY_PATH="{path}"\n'
return ''
@property
def ld_preload(self):
path = getattr(self, 'preload', '')
if path:
return f'export LD_PRELOAD="{path}"\n'
return ''
@property
def cc_cpu_list(self):
cpulist = getattr(self, 'cc_cpulist', '')
if cpulist:
return f'export CC_CPU_LIST="{cpulist}"\n'
return ''
@property
def run_cpu_list(self):
cpulist = getattr(self, 'run_cpulist', '')
if cpulist:
return f'export RUN_CPU_LIST="{cpulist}"\n'
return ''
@property
def flags(self):
return getattr(self, '_flags', '')
@flags.setter
def flags(self, value):
self._flags = value
@property
def optional_flags(self):
return getattr(self, '_optional_flags', '')
@optional_flags.setter
def optional_flags(self, value):
self._optional_flags = value
@property
def cflags(self):
flags = self.flags
if flags:
return f'CFLAGS="{flags}" '
return ''
@property
def optional_cflags(self):
flags = self.optional_flags
if flags:
return f'OPTIONAL_CFLAGS="{flags}" '
return ''
@property
def os(self):
return self._os
@os.setter
def os(self, value):
if isinstance(value, str):
value = OperatingSystem.from_triple(value)
self._os = value
@property
def processor(self):
return getattr(self, '_processor', self.arch)
@processor.setter
def processor(self, value):
self._processor = value
@property
def family(self):
return getattr(self, '_family', self.processor)
@family.setter
def family(self, value):
self._family = value
@property
def qemu(self):
return getattr(self, '_qemu', False)
@qemu.setter
def qemu(self, value):
self._qemu = value
@property
def linkage(self):
return getattr(self, '_linkage', 'static')
@linkage.setter
def linkage(self, value):
self._linkage = value
class AndroidImage(Image):
'''Specialized properties for Android images.'''
@property
def os(self):
return OperatingSystem.Android
@os.setter
def os(self, _):
pass
@property
def abi(self):
return getattr(self, '_abi', self.arch)
@abi.setter
def abi(self, value):
self._abi = value
@property
def prefix(self):
return getattr(self, '_prefix', self.arch)
@prefix.setter
def prefix(self, value):
self._prefix = value
@property
def toolchain(self):
return f'{self.arch}-linux-{self.system}'
@property
def qemu(self):
return False
class BuildRootImage(Image):
'''Specialized properties for buildroot images.'''
@property
def use_32(self):
return getattr(self, '_use_32', False)
@use_32.setter
def use_32(self, value):
self._use_32 = value
@property
def symlink_sysroot(self):
return getattr(self, '_symlink_sysroot', False)
@symlink_sysroot.setter
def symlink_sysroot(self, value):
self._symlink_sysroot = value
class CrosstoolImage(Image):
'''Specialized properties for crosstool-NG images.'''
@property
def patches(self):
return getattr(self, '_patches', [])
@patches.setter
def patches(self, value):
self._patches = value
class DebianImage(Image):
'''Specialized properties for Debian images.'''
@property
def cxx(self):
default = f'g++-{{version}}-{self.processor}-{self.os.to_triple()}-{self.system}'
return getattr(self, '_cxx', default).format(version=gcc_major)
@cxx.setter
def cxx(self, value):
self._cxx = value
@property
def libc(self):
default = f'libc6-{self.arch}-cross'
return getattr(self, '_libc', default)
@libc.setter
def libc(self, value):
self._libc = value
@property
def prefix(self):
return getattr(self, '_prefix', self.processor)
@prefix.setter
def prefix(self, value):
self._prefix = value
@property
def qemu(self):
return True
class MuslCrossImage(Image):
'''Specialized properties for musl-cross images.'''
@property
def gcc_config(self):
config = getattr(self, '_gcc_config', '')
if config:
return f'{config} '
return ''
@gcc_config.setter
def gcc_config(self, value):
self._gcc_config = value
class RiscvImage(Image):
'''Specialized properties for RISC-V images.'''
@property
def processor(self):
return self.target.split('-')[0]
@property
def bits(self):
return int(re.match(r'^riscv(\d+)$', self.processor).group(1))
@property
def optional_flags(self):
march = f'rv{self.bits}{self.extensions}'
flags = f'-march={march} -mabi={self.abi}'
if Image.optional_flags.fget(self):
flags = f'{self.optional_flags} {flags}'
return flags
class OtherImage(Image):
'''Specialized properties for miscellaneous images.'''
@property
def dockerfile(self):
return getattr(self, '_dockerfile', {})
@dockerfile.setter
def dockerfile(self, value):
self._dockerfile = value
image_types = {
'android': AndroidImage,
'buildroot': BuildRootImage,
'crosstool': CrosstoolImage,
'debian': DebianImage,
'musl-cross': MuslCrossImage,
'riscv': RiscvImage,
'other': OtherImage,
}
# Get all images.
images = [Image.from_json(i) for i in load_json(f'{HOME}/config/images.json')]
# Add extensions
def add_android_extensions():
'''Add Android extensions (null-op).'''
def add_buildroot_extensions():
'''Add buildroot extensions (null-op).'''
def add_crosstool_extensions():
'''Add crosstool-NG toolchain extensions (null-op).'''
def add_debian_extensions():
'''Add Debian toolchain extensions (null-op).'''
def add_musl_cross_extensions():
'''Add musl-cross toolchain extensions (null-op).'''
# Add our RISC-V images with extensions.
def create_riscv_image(os, bits, arch, abi):
'''Create a RISC-V image.'''
prefix = f'riscv{bits}-{arch}-{abi}'
if os == OperatingSystem.Linux:
target = f'{prefix}-multilib-linux-gnu'
triple = 'riscv64-unknown-linux-gnu'
qemu = True
elif os == OperatingSystem.BareMetal:
target = f'{prefix}-unknown-elf'
triple = 'riscv64-unknown-elf'
qemu = False
else:
raise ValueError(f'Unknown operating system {os.to_triple()}')
return RiscvImage.from_dict({
'target': target,
'triple': triple,
'qemu': qemu,
'extensions': arch,
'abi': abi
})
def add_riscv_extensions():
'''Add RISC-V extensions.'''
riscv = config['riscv-gnu-toolchain']
bits = riscv['bits']
extensions = riscv['extensions']
for key in extensions:
os = OperatingSystem.from_triple(extensions[key]['type'])
required_ext = extensions[key]['required']
all_ext = extensions[key]['all']
diff = ''.join([i for i in all_ext if i not in required_ext])
for bits in riscv['bits']:
abi = riscv['abi'][bits]
for count in range(len(diff) + 1):
for combo in itertools.combinations(diff, count):
arch = f'{required_ext}{"".join(combo)}'
images.append(create_riscv_image(os, bits, arch, abi))
if 'd' in arch:
images.append(create_riscv_image(os, bits, arch, f'{abi}d'))
def add_extensions():
'''Add extensions for supported operating systems.'''
add_android_extensions()
add_buildroot_extensions()
add_crosstool_extensions()
add_debian_extensions()
add_musl_cross_extensions()
add_riscv_extensions()
add_extensions()
# Filter images by types.
android_images = [i for i in images if isinstance(i, AndroidImage)]
buildroot_images = [i for i in images if isinstance(i, BuildRootImage)]
crosstool_images = [i for i in images if isinstance(i, CrosstoolImage)]
debian_images = [i for i in images if isinstance(i, DebianImage)]
musl_cross_images = [i for i in images if isinstance(i, MuslCrossImage)]
riscv_images = [i for i in images if isinstance(i, RiscvImage)]
other_images = [i for i in images if isinstance(i, OtherImage)]
def create_array(values):
'''Create a bash array from a list of values.'''
start = "(\n \""
joiner = "\"\n \""
end = "\"\n)"
return start + joiner.join(values) + end
class ConfigureCommand(VersionCommand):
'''Modify all configuration files.'''
description = 'configure template files'
def configure_scripts(self):
'''Configure the build scripts.'''
android = f'{HOME}/docker/android.sh'
bashrc = f'{HOME}/docker/bash.bashrc'
buildroot = f'{HOME}/docker/buildroot.sh'
buildroot32 = f'{HOME}/docker/buildroot32.sh'
cmake = f'{HOME}/docker/cmake.sh'
conan = f'{HOME}/docker/conan.sh'
entrypoint = f'{HOME}/docker/entrypoint.sh'
gcc = f'{HOME}/docker/gcc.sh'
gcc_patch = f'{HOME}/docker/gcc-patch.sh'
meson = f'{HOME}/docker/meson.sh'
musl = f'{HOME}/docker/musl.sh'
qemu = f'{HOME}/docker/qemu.sh'
qemu_apt = f'{HOME}/docker/qemu-apt.sh'
riscv_gcc = f'{HOME}/docker/riscv-gcc.sh'
shortcut = f'{HOME}/symlink/shortcut.sh'
target_features = f'{HOME}/spec/target_features.py'
vcpkg = f'{HOME}/docker/vcpkg.sh'
vcpkg_triplet = f'{HOME}/docker/vcpkg-triplet.sh'
self.configure(f'{android}.in', android, True, [
('CLANG_VERSION', config['android']['clang_version']),
('NDK_DIRECTORY', config['android']['ndk_directory']),
('NDK_VERSION', config['android']['ndk_version']),
('PREFIXES', create_array([i.prefix for i in android_images])),
('TOOLCHAINS', create_array([i.toolchain for i in android_images]))
])
self.configure(f'{bashrc}.in', bashrc, False, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{cmake}.in', cmake, True, [
('UBUNTU_NAME', config['ubuntu']['version']['name']),
])
self.configure(f'{conan}.in', conan, True, [
('BIN', f'"{bin_directory}"'),
('CONAN', "'/usr/local/bin/conan'"),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{buildroot}.in', buildroot, True, [
('BUILDROOT_VERSION', buildroot_version),
('JOBS', config["options"]["build_jobs"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{buildroot32}.in', buildroot32, True, [
('BUILDROOT_VERSION', buildroot_version),
('JOBS', config["options"]["build_jobs"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{entrypoint}.in', entrypoint, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{gcc}.in', gcc, True, [
('CROSSTOOL_VERSION', f'"{ct_version}"'),
('JOBS', config["options"]["build_jobs"]),
('SLEEP', config["options"]["sleep"]),
('TIMEOUT', config["options"]["timeout"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{gcc_patch}.in', gcc_patch, True, [
('CROSSTOOL_VERSION', f'"{ct_version}"'),
('JOBS', config["options"]["build_jobs"]),
('SLEEP', config["options"]["sleep"]),
('TIMEOUT', config["options"]["timeout"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{meson}.in', meson, True, [
('BIN', f'"{bin_directory}"'),
('MESON', "'/usr/local/bin/meson'"),
])
self.configure(f'{musl}.in', musl, True, [
('BINUTILS_VERSION', binutils_version),
('BINUTILS_XZ_SHA1', config['binutils']['version']['xz_sha1']),
('GCC_VERSION', gcc_version),
('GCC_XZ_SHA1', config['gcc']['version']['xz_sha1']),
('GMP_VERSION', gmp_version),
('GMP_BZ2_SHA1', config['gmp']['version']['bz2_sha1']),
('ISL_VERSION', isl_version),
('ISL_BZ2_SHA1', config['isl']['version']['bz2_sha1']),
('MPC_VERSION', mpc_version),
('MPC_GZ_SHA1', config['mpc']['version']['gz_sha1']),
('MPFR_VERSION', mpfr_version),
('MPFR_BZ2_SHA1', config['mpfr']['version']['bz2_sha1']),
('LINUX_HEADERS_VERSION', linux_headers_version),
('LINUX_HEADERS_XZ_SHA1', config['linux-headers']['version']['xz_sha1']),
('LINUX_VERSION', linux_version),
('LINUX_XZ_SHA1', config['linux']['version']['xz_sha1']),
('MUSL_CROSS_VERSION', musl_cross_version),
('MUSL_VERSION', musl_version),
('MUSL_GZ_SHA1', config['musl']['version']['gz_sha1']),
('JOBS', config["options"]["build_jobs"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{qemu}.in', qemu, True, [
('JOBS', config["options"]["build_jobs"]),
('QEMU_VERSION', qemu_version),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
self.configure(f'{qemu_apt}.in', qemu_apt, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{riscv_gcc}.in', riscv_gcc, True, [
('BINUTILS_VERSION', riscv_binutils_version),
('GCC_VERSION', gcc_version),
('GDB_VERSION', riscv_gdb_version),
('GLIBC_VERSION', riscv_glibc_version),
('JOBS', config["options"]["build_jobs"]),
('NEWLIB_VERSION', riscv_newlib_version),
('TOOLCHAIN_VERSION', riscv_toolchain_version),
])
self.configure(f'{shortcut}.in', shortcut, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{target_features}.in', target_features, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{vcpkg}.in', vcpkg, True, [
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
self.configure(f'{vcpkg_triplet}.in', vcpkg_triplet, True, [
('BIN', f'"{bin_directory}"'),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
def configure_ctng_config(self):
'''Configure the scripts for crosstool-NG.'''
patch = f'{HOME}/ct-ng/patch.sh'
replacements = []
# Patch the GCC version.
old_gcc_major = '8'
old_gcc_version = '8.3.0'
replacements.append(('GCC_V_OLD', f'CT_GCC_V_{old_gcc_major}=y'))
ct_gcc = [f'CT_GCC_V_{gcc_major}=y']
for gcc_v in reversed(range(int(old_gcc_major), int(gcc_major))):
ct_gcc.append(f'# CT_GCC_V_{gcc_v} is not set')
replacements.append(('GCC_V_NEW', '\\n'.join(ct_gcc)))
replacements.append(('GCC_OLD', old_gcc_version))
replacements.append(('GCC_NEW', gcc_version))
# Patch the MinGW version.
old_mingw_major = '6'
old_mingw_version = '6.0.0'
replacements.append(('MINGW_V_OLD', f'CT_MINGW_V_{old_mingw_major}=y'))
ct_mingw = [f'CT_MINGW_V_{mingw_major}=y']
for mingw_v in reversed(range(int(old_mingw_major), int(mingw_major))):
ct_mingw.append(f'# CT_MINGW_V_{mingw_v} is not set')
replacements.append(('MINGW_V_NEW', '\\n'.join(ct_mingw)))
replacements.append(('MINGW_OLD', old_mingw_version))
replacements.append(('MINGW_NEW', mingw_version))
# Configure the glibc version.
old_glibc_major = '2'
old_glibc_minor = '29'
old_glibc_version = '2.29'
replacements.append(('GLIBC_V_OLD', f'CT_GLIBC_V_{old_glibc_major}_{old_glibc_minor}=y'))
ct_glibc = [f'CT_GLIBC_V_{glibc_major}_{glibc_minor}=y']
if old_glibc_major == glibc_major:
for glibc_v in reversed(range(int(old_glibc_minor), int(glibc_minor))):
ct_glibc.append(f'# CT_GLIBC_V_{glibc_major}_{glibc_v} is not set')
else:
ct_glibc.append(f'# CT_GLIBC_V_{old_glibc_major}_{old_glibc_minor} is not set')
for glibc_v in reversed(range(int(old_glibc_major) + 1, int(glibc_major))):
ct_glibc.append(f'# CT_GLIBC_V_{glibc_major}_0 is not set')
replacements.append(('GLIBC_V_NEW', '\\n'.join(ct_glibc)))
replacements.append(('GLIBC_OLD', old_glibc_version))
replacements.append(('GLIBC_NEW', glibc_version))
# Configure the musl version.
old_musl_major = '1'
old_musl_minor = '1'
old_musl_patch = '21'
old_musl_version = '1.1.21'
replacements.append((
'MUSL_V_OLD',
f'CT_MUSL_V_{old_musl_major}_{old_musl_minor}_{old_musl_patch}=y'
))
ct_musl = [
f'CT_MUSL_V_{musl_major}_{musl_minor}_{musl_patch}=y',
f'# CT_MUSL_V_{old_musl_major}_{old_musl_minor}_{old_musl_patch} is not set'
]
replacements.append(('MUSL_V_NEW', '\\n'.join(ct_musl)))
replacements.append(('MUSL_OLD', old_musl_version))
replacements.append(('MUSL_NEW', musl_version))
# Configure the expat version.
old_expat_major = '2'
old_expat_minor = '2'
old_expat_version = '2.2.6'
replacements.append(('EXPAT_V_OLD', f'CT_EXPAT_V_{old_expat_major}_{old_expat_minor}=y'))
ct_expat = [
f'CT_EXPAT_V_{expat_major}_{expat_minor}=y',
f'# CT_EXPAT_V_{old_expat_major}_{old_expat_minor} is not set'
]
replacements.append(('EXPAT_V_NEW', '\\n'.join(ct_expat)))
replacements.append(('EXPAT_OLD', old_expat_version))
replacements.append(('EXPAT_NEW', expat_version))
self.configure(f'{patch}.in', patch, True, replacements)
def configure_musl_config(self):
'''Configure the MUSL libc config files.'''
template = f'{HOME}/musl/config.mak.in'
for image in musl_cross_images:
outfile = f'{HOME}/musl/config/{image.target}.mak'
self.configure(template, outfile, False, [
('BINUTILS_VERSION', binutils_version),
('GCC_CONFIG', image.gcc_config),
('GCC_VERSION', gcc_version),
('GMP_VERSION', gmp_version),
('ISL_VERSION', isl_version),
('LINUX_HEADERS_VERSION', linux_headers_version),
('LINUX_VERSION', linux_version),
('MPC_VERSION', mpc_version),
('MPFR_VERSION', mpfr_version),
('MUSL_VERSION', musl_version),
('TARGET', image.config),
('USERNAME', config['options']['username']),
])
def configure_dockerfile(
self,
image,
template=None,
replacements=None,
base='ubuntu',
spec='spec',
symlink='symlink',
toolchain='toolchain',
wrapper='wrapper',
linker='',
cc='',
cxx='',
):
'''Configure a Dockerfile from template.'''
# These files are read in the order they're likely to change,
# as well as compile-time.
# Any template files may have long compilations, and will
# change rarely. Qemu is an apt package, and unlikely to change.
# Symlinks, toolchains, and entrypoints change often, but are
# cheap and easy to fix.
contents = []
# Mandatory Docker templates, the base image.
# These will **never** change,
with open(f'{HOME}/docker/Dockerfile.{base}.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.adduser.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.build-essential.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.directory.in', 'r') as file:
contents.append(file.read())
# Optional docker templates, in order of compiler time.
# These will change, but it's important later templates
# build faster than earlier templates. If done incorrectly,
# a full rebuild can take well over a week.
if template is not None:
with open(template, 'r') as file:
contents.append(file.read())
if image.qemu:
with open(f'{HOME}/docker/Dockerfile.qemu.in', 'r') as file:
contents.append(file.read())
if wrapper is not None:
with open(f'{HOME}/docker/Dockerfile.{wrapper}.in', 'r') as file:
contents.append(file.read())
if symlink is not None:
with open(f'{HOME}/docker/Dockerfile.{symlink}.in', 'r') as file:
contents.append(file.read())
if spec is not None:
with open(f'{HOME}/docker/Dockerfile.{spec}.in', 'r') as file:
contents.append(file.read())
if toolchain is not None:
with open(f'{HOME}/docker/Dockerfile.{toolchain}.in', 'r') as file:
contents.append(file.read())
# Add the mandatory entrypoint.
with open(f'{HOME}/docker/Dockerfile.entrypoint.in', 'r') as file:
contents.append(file.read())
# Add image labels and metadata.
with open(f'{HOME}/docker/Dockerfile.metadata.in', 'r') as file:
contents.append(file.read())
contents = '\n'.join(contents)
# Add to the replacements all the shared values.
if replacements is None:
replacements = []
replacements = replacements + [
('AUTHORS', config['metadata']['authors']),
('EMSDK_VERSION', emsdk_version),
('BIN', f'"{bin_directory}"'),
('CC', f'"{cc}"'),
('CXX', f'"{cxx}"'),
('ENTRYPOINT', f'"{bin_directory}/entrypoint.sh"'),
('FLAGS', f'"{image.flags}"'),
('LINKER', f'"{linker}"'),
('MAINTAINER', config['metadata']['maintainer']),
('OPTIONAL_FLAGS', f'"{image.optional_flags}"'),
('OS', image.os.to_triple() or 'unknown'),
('TARGET', image.target),
('UBUNTU_VERSION', ubuntu_version),
('URL', config['metadata']['url']),
('USERNAME', config['options']['username']),
('VCS_URL', config['metadata']['vcs-url']),
]
# Replace the contents and write the output to file.
outfile = f'{HOME}/docker/images/Dockerfile.{image.target}'
contents = self.replace(contents, replacements)
self.write_file(outfile, contents, False)
def configure_vcpkg_dockerfile(self, base='ubuntu'):
'''Configure only the vcpkg Dockefile.'''
# This is a base image shared by multiple builds.
contents = []
with open(f'{HOME}/docker/Dockerfile.{base}.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.vcpkg.in', 'r') as file:
contents.append(file.read())
contents = '\n'.join(contents)
# Replace the contents and write the output to file.
replacements = [
('UBUNTU_VERSION', ubuntu_version),
]
outfile = f'{HOME}/docker/pkgimages/Dockerfile.vcpkg'
contents = self.replace(contents, replacements)
self.write_file(outfile, contents, False)
def configure_package_dockerfile(
self,
image,
compiler=None,
compiler_version=None,
conan_system=None,
meson_system=None,
vcpkg_system=None,
):
'''Configure a Dockerfile with package managers enabled.'''
if compiler is None:
compiler = 'gcc'
if compiler_version is None:
compiler_version = gcc_major
if conan_system is None:
conan_system = image.os.to_conan()
if meson_system is None:
meson_system = image.os.to_meson()
if vcpkg_system is None:
vcpkg_system = image.os.to_vcpkg()
template = f'{HOME}/docker/Dockerfile.package.in'
outfile = f'{HOME}/docker/pkgimages/Dockerfile.{image.target}'
self.configure(template, outfile, False, [
('COMPILER', compiler),
('COMPILER_VERSION', f'"{compiler_version}"'),
('CONAN_SYSTEM', conan_system),
('CPU_FAMILY', image.family),
('IMAGE_USER', config['options']['username']),
('LINKAGE', image.linkage),
('MESON_SYSTEM', meson_system),
('PROCESSOR', image.processor),
('REPOSITORY', config['metadata']['repository']),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
('TARGET', image.target),
('TRIPLE', image.triple),
('USERNAME', config['metadata']['username']),
('VCPKG_SYSTEM', vcpkg_system),
])
def configure_cmake(self, image, template, replacements):
'''Configure a CMake template.'''
replacements = replacements + [
('PROCESSOR', image.processor),
('OS', image.os.to_cmake()),
('USERNAME', config["options"]["username"]),
]
contents = []
with open(template, 'r') as file:
contents.append(file.read())
with open(f'{HOME}/cmake/toolchain-include.cmake.in', 'r') as file:
contents.append(file.read())
contents = '\n'.join(contents)
# Replace the contents and write the output to file.
cmake = f'{HOME}/cmake/toolchain/{image.target}.cmake'
contents = self.replace(contents, replacements)
self.write_file(cmake, contents, False)
def configure_symlinks(self, image, template, replacements):
'''Configure a symlink template.'''
replacements = replacements + [
('CC_CPU_LIST', image.cc_cpu_list),
('FLAGS', image.cflags),
('HARDCODED', image.hardcoded_cpulist),
('LD_LIBRARY_PATH', image.ld_library_path),
('LD_PRELOAD', image.ld_preload),
('OPTIONAL_FLAGS', image.optional_cflags),
('RUN_CPU_LIST', image.run_cpu_list),
('TRIPLE', image.triple),
('USERNAME', config["options"]["username"]),
]
symlink = f'{HOME}/symlink/toolchain/{image.target}.sh'
self.configure(template, symlink, True, replacements)
def configure_android(self, image):
'''Configure an Android-SDK image.'''
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.android.in'
self.configure_dockerfile(image, template, [
('ARCH', image.arch),
('TOOLCHAIN', image.toolchain),
])
# Configure the CMake toolchain.
cmake_template = f'{HOME}/cmake/android.cmake.in'
self.configure_cmake(image, cmake_template, [
('ABI', image.abi),
('NDK_DIRECTORY', config['android']['ndk_directory']),
('SDK_VERSION', config['android']['sdk_version']),
])
# Configure the symlinks.
symlink_template = f'{HOME}/symlink/android.sh.in'
self.configure_symlinks(image, symlink_template, [
('NDK_DIRECTORY', config['android']['ndk_directory']),
('PREFIX', f'{image.prefix}-linux-{image.system}'),
('SDK_VERSION', config['android']['sdk_version']),
('TOOLCHAIN', image.toolchain),
])
# Build derived images with package managers enabled.
# Only want the major version, Conan fails othewise.
compiler_version = config['android']['clang_version']
major_version = re.match(r'^(\d+).*$', compiler_version).group(1)
self.configure_package_dockerfile(image, 'clang', major_version)
def configure_buildroot(self, image):
'''Configure a buildroot image.'''
# Get the proper dependent parameters for our image.
if image.symlink_sysroot:
cmake_template = f'{HOME}/cmake/buildroot-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/buildroot-qemu-sysroot.sh.in'
elif image.qemu:
cmake_template = f'{HOME}/cmake/buildroot-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/buildroot-qemu.sh.in'
else:
cmake_template = f'{HOME}/cmake/buildroot.cmake.in'
symlink_template = f'{HOME}/symlink/buildroot.sh.in'
if image.use_32:
template = f'{HOME}/docker/Dockerfile.buildroot32.in'
else:
template = f'{HOME}/docker/Dockerfile.buildroot.in'
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('CONFIG', image.config),
])
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [
('TRIPLE', image.config),
])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('ARCH', image.processor),
('TRIPLE', image.triple),
])
# Build derived images with package managers enabled.
if image.os == OperatingSystem.Linux or image.os == OperatingSystem.Windows:
self.configure_package_dockerfile(image)
def configure_crosstool(self, image):
'''Configure a crosstool-NG image.'''
# Configure the dockerfile.
if image.patches:
template = f'{HOME}/docker/Dockerfile.crosstool-patch.in'
files = []
for patch in image.patches:
files += glob.glob(f'diff/{patch}.*')
patches = [f'COPY ["{i}", "/src/diff/"]' for i in files]
patches = '\n'.join(patches)
else:
template = f'{HOME}/docker/Dockerfile.crosstool.in'
patches = ''
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('CONFIG', image.config),
('PATCH', patches),
])
# Get the proper dependent parameters for our image.
if image.os == OperatingSystem.BareMetal:
cmake_template = f'{HOME}/cmake/crosstool-elf.cmake.in'
symlink_template = f'{HOME}/symlink/crosstool.sh.in'
elif image.qemu:
cmake_template = f'{HOME}/cmake/crosstool-os-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/crosstool-qemu.sh.in'
else:
cmake_template = f'{HOME}/cmake/crosstool-os.cmake.in'
symlink_template = f'{HOME}/symlink/crosstool.sh.in'
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [
('TRIPLE', image.triple),
])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('ARCH', image.processor),
('TRIPLE', image.triple),
])
# Build derived images with package managers enabled.
if image.os == OperatingSystem.Linux or image.os == OperatingSystem.Windows:
self.configure_package_dockerfile(image)
def configure_debian(self, image):
'''Configure a debian-based docker file.'''
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.debian.in'
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('G++', image.cxx),
('LIBC', image.libc),
])
# Get the proper dependent parameters for our image.
if image.os != OperatingSystem.Linux:
raise NotImplementedError
if image.target == 'x86_64-unknown-linux-gnu':
cmake_template = f'{HOME}/cmake/native.cmake.in'
symlink_template = f'{HOME}/symlink/native.sh.in'
else:
cmake_template = f'{HOME}/cmake/debian.cmake.in'
symlink_template = f'{HOME}/symlink/debian.sh.in'
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('GCC_MAJOR', gcc_major),
('PREFIX', image.prefix),
('PROCESSOR', image.processor),
('OS', image.os.to_triple()),
('SYSTEM', image.system),
])
# Build derived images with package managers enabled.
self.configure_package_dockerfile(image)
def configure_musl(self, image):
'''Configure a musl-cross-based image.'''
# Get the proper dependent parameters for our image.
if image.qemu:
cmake_template = f'{HOME}/cmake/musl-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/musl-qemu.sh.in'
else:
cmake_template = f'{HOME}/cmake/musl.cmake.in'
symlink_template = f'{HOME}/symlink/musl.sh.in'
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.musl.in'
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('TRIPLE', image.config),
])
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [
('TRIPLE', image.config),
])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('ARCH', image.processor),
('TRIPLE', image.config),
])
# Build derived images with package managers enabled.
self.configure_package_dockerfile(image)
def configure_riscv(self, image):
'''Configure a RISC-V-based image.'''
# Get the proper dependent parameters for our image.
if image.os == OperatingSystem.Linux:
cmake_template = f'{HOME}/cmake/riscv-linux.cmake.in'
elif image.os == OperatingSystem.BareMetal:
cmake_template = f'{HOME}/cmake/riscv-elf.cmake.in'
else:
raise NotImplementedError
if image.qemu:
symlink_template = f'{HOME}/symlink/riscv-qemu.sh.in'
else:
symlink_template = f'{HOME}/symlink/riscv.sh.in'
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.riscv.in'
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('TRIPLE', image.triple),
])
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('ARCH', image.processor),
('TRIPLE', image.triple),
])
# Build derived images with package managers enabled.
if image.os == OperatingSystem.Linux:
self.configure_package_dockerfile(image)
def configure_other(self, image):
'''Configure a miscellaneous image.'''
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.{image.target}.in'
if not os.path.exists(template):
template = None
self.configure_dockerfile(image, template, [
('ARCH', image.target),
('BINDIR', bin_directory),
], **image.dockerfile)
# Configure the CMake toolchain.
cmake_template = f'{HOME}/cmake/{image.target}.cmake.in'
self.configure_cmake(image, cmake_template, [])
# Configure the symlinks.
symlink_template = f'{HOME}/symlink/{image.target}.sh.in'
self.configure_symlinks(image, symlink_template, [])
# Build derived images with package managers enabled.
if hasattr(image, 'package_dockerfile'):
self.configure_package_dockerfile(image, **image.package_dockerfile)
def run(self):
'''Modify configuration files.'''
VersionCommand.run(self)
# Make the required subdirectories.
os.makedirs(f'{HOME}/cmake/toolchain', exist_ok=True)
os.makedirs(f'{HOME}/docker/images', exist_ok=True)
os.makedirs(f'{HOME}/docker/pkgimages', exist_ok=True)
os.makedirs(f'{HOME}/musl/config', exist_ok=True)
os.makedirs(f'{HOME}/symlink/toolchain', exist_ok=True)
# Configure base version info.
cmake = f'{HOME}/cmake/cmake'
emmake = f'{HOME}/symlink/emmake'
make = f'{HOME}/symlink/make.in'
self.configure(f'{cmake}.in', cmake, True, [
('CMAKE', "'/usr/bin/cmake'"),
('WRAPPER', ''),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
self.configure(make, emmake, True, [
('MAKE', "'/usr/bin/make'"),
('WRAPPER', 'emmake '),
])
# Configure our build scripts, and other configurations.
self.configure_scripts()
self.configure_ctng_config()
self.configure_musl_config()
# Configure images.
self.configure_vcpkg_dockerfile()
for image in android_images:
self.configure_android(image)
for image in buildroot_images:
self.configure_buildroot(image)
for image in crosstool_images:
self.configure_crosstool(image)
for image in debian_images:
self.configure_debian(image)
for image in musl_cross_images:
self.configure_musl(image)
for image in riscv_images:
self.configure_riscv(image)
for image in other_images:
self.configure_other(image)
script = f'{HOME}/bin/xcross'
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = {
'console': [{
'script': f'{HOME}/xcross/__main__.py',
'dest_base': 'xcross',
'description': description,
'comments': long_description,
'product_name': 'xcross',
}],
'options': {
'py2exe': {
'bundle_files': 1,
'compressed': 1,
'optimize': 2,
'dist_dir': f'{HOME}',
'dll_excludes': [],
}
},
'zipfile': None
}
elif has_setuptools:
params = {
'entry_points': {
'console_scripts': ['xcross = xcross:main']
}
}
else:
params = {
'scripts': [f'{HOME}/bin/xcross']
}
setuptools.setup(
name="xcross",
author="Alex Huszagh",
author_email="ahuszagh@gmail.com",
version=version,
packages=['xcross'],
**params,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>3.6.0',
license='Unlicense',
keywords='compilers cross-compilation embedded',
url='https://github.com/Alexhuszagh/xcross',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: The Unlicense (Unlicense)',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Embedded Systems',
],
cmdclass={
'build_all': BuildAllCommand,
'build_image': BuildImageCommand,
'build_images': BuildImagesCommand,
'build_py': BuildCommand,
'clean': CleanCommand,
'clean_dist': CleanDistCommand,
'configure': ConfigureCommand,
'install': InstallCommand,
'lint': LintCommand,
'publish': PublishCommand,
'push': PushCommand,
'tag': TagCommand,
'test_images': TestImagesCommand,
'test': TestCommand,
'test_all': TestAllCommand,
'version': VersionCommand,
},
)
| 33.96273
| 97
| 0.598202
| 58,096
| 0.76811
| 0
| 0
| 5,340
| 0.070602
| 0
| 0
| 26,063
| 0.344589
|
3d4379916a421e4f16400672da640d246b4981ac
| 27,082
|
py
|
Python
|
src/sgfsdriver/plugins/ftp/ftp_client.py
|
syndicate-storage/syndicate-fs-driver-plugins
|
8e455d6bb4838c2313bb6cd72ed5fa6bbbc871d2
|
[
"Apache-2.0"
] | null | null | null |
src/sgfsdriver/plugins/ftp/ftp_client.py
|
syndicate-storage/syndicate-fs-driver-plugins
|
8e455d6bb4838c2313bb6cd72ed5fa6bbbc871d2
|
[
"Apache-2.0"
] | 3
|
2016-11-18T21:31:00.000Z
|
2017-08-16T15:35:52.000Z
|
src/sgfsdriver/plugins/ftp/ftp_client.py
|
syndicate-storage/syndicate-fs-driver-plugins
|
8e455d6bb4838c2313bb6cd72ed5fa6bbbc871d2
|
[
"Apache-2.0"
] | 2
|
2016-03-31T18:55:58.000Z
|
2017-08-02T19:57:12.000Z
|
#!/usr/bin/env python
"""
Copyright 2016 The Trustees of University of Arizona
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import traceback
import os
import logging
import time
import ftplib
import threading
from datetime import datetime
from expiringdict import ExpiringDict
from io import BytesIO
logger = logging.getLogger('ftp_client')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('ftp_client.log')
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
METADATA_CACHE_SIZE = 10000
METADATA_CACHE_TTL = 60 * 60 # 1 hour
FTP_TIMEOUT = 5 * 60 # 5 min
FTP_OPERATION_TIMEOUT = 30 # 30 sec
BYTES_MAX_SKIP = 1024 * 1024 * 2 # 2MB
CONNECTIONS_MAX_NUM = 5
"""
Interface class to FTP
"""
class MLSD_NOT_SUPPORTED(Exception):
pass
class ftp_status(object):
def __init__(self,
directory=False,
symlink=False,
path=None,
name=None,
size=0,
create_time=0,
modify_time=0):
self.directory = directory
self.symlink = symlink
self.path = path
self.name = name
self.size = size
self.create_time = create_time
self.modify_time = modify_time
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
rep_d = "F"
if self.directory:
rep_d = "D"
rep_s = "-"
if self.symlink:
rep_s = "S"
return "<ftp_status %s%s %s %d>" % \
(rep_d, rep_s, self.name, self.size)
class downloader_connection(object):
def __init__(self,
path,
connection,
offset=0,
last_comm=0):
self.path = path
self.offset = offset
self.connection = connection
self.last_comm = last_comm
self._lock = threading.RLock()
def __repr__(self):
return "<downloader_connection path(%s) off(%d) last_comm(%s)>" % \
(self.path, self.offset, self.last_comm)
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
class ftp_session(object):
def __init__(self,
host,
port=21,
user="anonymous",
password="anonymous@email.com"):
self.host = host
self.port = port
self.user = user
self.password = password
self.session = None
self.last_comm = None
self.connections = {}
self._lock = threading.RLock()
def connect(self):
logger.info("connect session")
self.lock()
try:
ftp_session = ftplib.FTP()
ftp_session.connect(self.host, self.port, FTP_OPERATION_TIMEOUT)
ftp_session.login(self.user, self.password)
self.session = ftp_session
logger.info("new ftp session to %s:%d - %d" % (self.host, self.port, id(self.session)))
self.last_comm = datetime.now()
finally:
self.unlock()
def close(self):
logger.info("close session")
self.lock()
for connection in self.connections.values():
logger.info("_close_connection : %s" % connection.path)
connection.lock()
try:
conn = connection.connection
conn.close()
self.session.voidresp()
except ftplib.error_temp:
# abortion of transfer causes this type of error
pass
except EOFError:
pass
del self.connections[connection.path]
connection.unlock()
try:
logger.info("close ftp session to %s:%d - %d" % (self.host, self.port, id(self.session)))
self.session.close()
finally:
self.session = None
self.last_comm = None
self.unlock()
def reconnect(self):
logger.info("reconnect session")
self.lock()
self.close()
self.connect()
self.unlock()
def reconnect_if_needed(self):
expired = True
reconnected = False
if self.last_comm:
delta = datetime.now() - self.last_comm
if delta.total_seconds() < FTP_TIMEOUT:
expired = False
if expired:
# perform a short command then reconnect at fail
logger.info("reconnect_if_needed: expired - check live")
self.lock()
try:
self.pwd()
reconnected = False
except:
self.reconnect()
reconnected = True
finally:
self.unlock()
else:
reconnected = False
return reconnected
def retrlines(self, op, callback):
logger.info("retrlines")
self.lock()
try:
self.session.retrlines(op, callback)
self.last_comm = datetime.now()
finally:
self.unlock()
def pwd(self):
logger.info("pwd")
self.lock()
try:
self.session.pwd()
self.last_comm = datetime.now()
finally:
self.unlock()
def cwd(self, path):
logger.info("cwd - %s" % path)
self.lock()
try:
self.session.cwd(path)
self.last_comm = datetime.now()
finally:
self.unlock()
def mkd(self, path):
logger.info("mkd - %s" % path)
self.lock()
try:
self.session.mkd(path)
self.last_comm = datetime.now()
finally:
self.unlock()
def storbinary(self, op, path, buf, rest):
logger.info("storbinary - %s" % path)
self.lock()
try:
self.session.storbinary("%s %s" % (op, path), buf, rest=rest)
self.last_comm = datetime.now()
finally:
self.unlock()
def delete(self, path):
logger.info("delete - %s" % path)
self.lock()
try:
self.session.delete(path)
self.last_comm = datetime.now()
finally:
self.unlock()
def rename(self, path1, path2):
logger.info("rename - %s => %s" % (path1, path2))
self.lock()
try:
self.session.rename(path1, path2)
self.last_comm = datetime.now()
finally:
self.unlock()
def __repr__(self):
return "<ftp_session host(%s) port(%d)>" % \
(self.host, self.port)
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def _new_connection(self, path, offset=0):
logger.info("_new_connection : %s, off(%d)" % (path, offset))
self.lock()
try:
self.session.voidcmd("TYPE I")
conn = self.session.transfercmd("RETR %s" % path, offset)
connection = downloader_connection(path, conn, offset, datetime.now())
self.connections[path] = connection
self.last_comm = datetime.now()
finally:
self.unlock()
return connection
def _close_connection(self, connection):
path = connection.path
logger.info("_close_connection : %s" % path)
self.lock()
connection.lock()
try:
conn = connection.connection
conn.close()
self.session.voidresp()
except ftplib.error_temp:
# abortion of transfer causes this type of error
pass
del self.connections[path]
connection.unlock()
self.unlock()
def _get_connection(self, path, offset):
connection = None
logger.info("_get_connection : %s, off(%d)" % (path, offset))
self.lock()
if path in self.connections:
connection = self.connections[path]
if connection:
reusable = False
connection.lock()
coffset = connection.offset
if coffset <= offset and coffset + BYTES_MAX_SKIP >= offset:
reusable = True
time_delta = datetime.now() - connection.last_comm
if time_delta.total_seconds() < FTP_TIMEOUT:
reusable = True
connection.unlock()
if not reusable:
self._close_connection(connection)
connection = None
if len(self.connections) >= CONNECTIONS_MAX_NUM:
# remove oldest
oldest = None
for live_connection in self.connections.values():
if not oldest:
oldest = live_connection
else:
if oldest.last_comm > live_connection.last_comm:
oldest = live_connection
if oldest:
self._close_connection(oldest)
if connection:
connection.lock()
# may need to move offset
skip_bytes = offset - connection.offset
total_read = 0
EOF = False
while total_read < skip_bytes:
data = connection.connection.recv(skip_bytes - total_read)
if data:
data_len = len(data)
total_read += data_len
connection.offset += data_len
else:
#EOF
EOF = True
break
if total_read > 0:
connection.last_comm = datetime.now()
#connection.unlock()
else:
connection = self._new_connection(path, offset)
connection.lock()
self.unlock()
return connection
def read_data(self, path, offset, size):
logger.info("read_data : %s, off(%d), size(%d)" % (path, offset, size))
#connection is locked
self.lock()
connection = self._get_connection(path, offset)
buf = BytesIO()
total_read = 0
EOF = False
if connection.offset != offset:
connection.unlock()
self._close_connection(connection)
connection = self._new_connection(path, offset)
connection.lock()
#raise Exception("Connection does not have right offset %d - %d" % (connection.offset, offset))
while total_read < size:
data = connection.connection.recv(size - total_read)
if data:
buf.write(data)
data_len = len(data)
total_read += data_len
connection.offset += data_len
else:
#EOF
EOF = True
break
if total_read > 0:
connection.last_comm = datetime.now()
connection.unlock()
if EOF:
self._close_connection(connection)
self.unlock()
return buf.getvalue()
class prefetch_task(threading.Thread):
def __init__(self,
group=None,
target=None,
name=None,
args=(),
kwargs=None,
verbose=None):
threading.Thread.__init__(self, group=group, target=target, name=name, verbose=verbose)
self.args = args
self.kwargs = kwargs
self.host = kwargs["host"]
self.port = int(kwargs["port"])
self.path = kwargs["path"]
self.offset = kwargs["offset"]
self.size = kwargs["size"]
self.ftp_client = kwargs["ftp_client"]
self.complete = False
self.data = None
def run(self):
logger.info("prefetch_task : %s:%d - %s, off(%d), size(%d)" % (self.host, self.port, self.path, self.offset, self.size))
session = self.ftp_client.get_download_session()
session.lock()
buf = None
try:
session.reconnect_if_needed()
buf = session.read_data(self.path, self.offset, self.size)
logger.info("prefetch_task: read done")
except Exception, e:
logger.error("prefetch_task: " + traceback.format_exc())
finally:
session.unlock()
self.data = buf
self.complete = True
class ftp_client(object):
def __init__(self,
host=None,
port=21,
user='anonymous',
password='anonymous@email.com'):
self.host = host
if port > 0:
self.port = int(port)
else:
self.port = 21
if user:
self.user = user
else:
self.user = "anonymous"
if password:
self.password = password
else:
self.password = "anonymous@email.com"
self.session = ftp_session(self.host, self.port, self.user, self.password)
self.download_session = ftp_session(self.host, self.port, self.user, self.password)
self.mlsd_supported = True
self.prefetch_thread = None
# init cache
self.meta_cache = ExpiringDict(
max_len=METADATA_CACHE_SIZE,
max_age_seconds=METADATA_CACHE_TTL
)
def connect(self):
logger.info("connect: connecting to FTP server (%s)" % self.host)
self.session.connect()
self.download_session.connect()
def close(self):
try:
logger.info("close: closing a connectinn to FTP server (%s)" % self.host)
self.session.close()
self.download_session.close()
except:
pass
def reconnect(self):
self.close()
self.connect()
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def get_download_session(self):
return self.download_session
def _parse_MLSD(self, parent, line):
# modify=20170623195719;perm=adfr;size=454060;type=file;unique=13U670966;UNIX.group=570;UNIX.mode=0444;UNIX.owner=14; gbrel.txt
fields = line.split(";")
stat_dict = {}
for field in fields:
if "=" in field:
# key-value field
fv = field.strip()
fv_idx = fv.index("=")
key = fv[:fv_idx].lower()
value = fv[fv_idx+1:]
else:
key = "name"
value = field.strip()
stat_dict[key] = value
full_path = parent.rstrip("/") + "/" + stat_dict["name"]
directory = False
symlink = False
if "type" in stat_dict:
t = stat_dict["type"]
if t in ["cdir", "pdir"]:
return None
if t == "dir":
directory = True
elif t == "OS.unix=symlink":
symlink = True
elif t.startswith("OS.unix=slink:"):
symlink = True
if t not in ["dir", "file", "OS.unix=symlink"]:
raise IOError("Unknown type : %s" % t)
size = 0
if "size" in stat_dict:
size = long(stat_dict["size"])
modify_time = None
if "modify" in stat_dict:
modify_time_obj = datetime.strptime(stat_dict["modify"], "%Y%m%d%H%M%S")
modify_time = time.mktime(modify_time_obj.timetuple())
create_time = None
if "create" in stat_dict:
create_time_obj = datetime.strptime(stat_dict["create"], "%Y%m%d%H%M%S")
create_time = time.mktime(create_time_obj.timetuple())
if "name" in stat_dict:
return ftp_status(
directory=directory,
symlink=symlink,
path=full_path,
name=stat_dict["name"],
size=size,
create_time=create_time,
modify_time=modify_time
)
else:
return None
def _parse_LIST(self, parent, line):
# drwxr-xr-x 8 20002 2006 4096 Dec 08 2015 NANOGrav_9y
fields = line.split()
stat_dict = {}
stat_dict["perm"] = fields[0]
stat_dict["owner"] = fields[2]
stat_dict["group"] = fields[3]
stat_dict["size"] = fields[4]
stat_dict["month"] = fields[5]
stat_dict["day"] = fields[6]
stat_dict["d3"] = fields[7]
stat_dict["name"] = fields[8]
full_path = parent.rstrip("/") + "/" + stat_dict["name"]
directory = False
symlink = False
if stat_dict["perm"].startswith("d"):
directory = True
elif stat_dict["perm"].startswith("-"):
directory = False
elif stat_dict["perm"].startswith("l"):
directory = True
symlink = True
else:
raise IOError("Unknown type : %s" % stat_dict["perm"])
size = 0
if "size" in stat_dict:
size = long(stat_dict["size"])
now = datetime.now()
year = now.year
hour = 0
minute = 0
if stat_dict["d3"].isdigit():
year = int(stat_dict["d3"])
else:
hm = stat_dict["d3"].split(":")
hour = int(hm[0])
minute = int(hm[1])
d = "%d %s %s %d %d" % (
year, stat_dict["month"], stat_dict["day"], hour, minute
)
modify_time = None
if "modify" in stat_dict:
modify_time_obj = datetime.strptime(d, "%Y %b %d %H %M")
modify_time = time.mktime(modify_time_obj.timetuple())
create_time = None
if "create" in stat_dict:
create_time_obj = datetime.strptime(d, "%Y %b %d %H %M")
create_time = time.mktime(create_time_obj.timetuple())
if "name" in stat_dict:
return ftp_status(
directory=directory,
symlink=symlink,
path=full_path,
name=stat_dict["name"],
size=size,
create_time=create_time,
modify_time=modify_time
)
else:
return None
def _list_dir_and_stat_MLSD(self, path):
logger.info("_list_dir_and_stat_MLSD: retrlines with MLSD - %s" % path)
stats = []
try:
entries = []
try:
self.session.retrlines("MLSD", entries.append)
except ftplib.error_perm, e:
msg = str(e)
if "500" in msg or "unknown command" in msg.lower():
raise MLSD_NOT_SUPPORTED("MLSD is not supported")
for ent in entries:
st = self._parse_MLSD(path, ent)
if st:
stats.append(st)
except ftplib.error_perm:
logger.error("_list_dir_and_stat_MLSD: " + traceback.format_exc())
return stats
def _list_dir_and_stat_LIST(self, path):
logger.info("_list_dir_and_stat_LIST: retrlines with LIST - %s" % path)
stats = []
try:
entries = []
self.session.retrlines("LIST", entries.append)
for ent in entries:
st = self._parse_LIST(path, ent)
if st:
stats.append(st)
except ftplib.error_perm:
logger.error("_list_dir_and_stat_LIST: " + traceback.format_exc())
return stats
def _ensureDirEntryStatLoaded(self, path):
# reuse cache
if path in self.meta_cache:
return self.meta_cache[path]
logger.info("_ensureDirEntryStatLoaded: loading - %s" % path)
self.session.lock()
self.session.reconnect_if_needed()
self.session.cwd(path)
stats = []
if self.mlsd_supported:
try:
stats = self._list_dir_and_stat_MLSD(path)
except MLSD_NOT_SUPPORTED:
self.mlsd_supported = False
stats = self._list_dir_and_stat_LIST(path)
else:
stats = self._list_dir_and_stat_LIST(path)
self.session.unlock()
self.meta_cache[path] = stats
return stats
def _invoke_prefetch(self, path, offset, size):
logger.info("_invoke_prefetch : %s, off(%d), size(%d)" % (path, offset, size))
self.prefetch_thread = prefetch_task(name="prefetch_task_thread", kwargs={'host':self.session.host, 'port':self.session.port, 'path':path, 'offset':offset, 'size':size, 'ftp_client':self})
self.prefetch_thread.start()
def _get_prefetch_data(self, path, offset, size):
logger.info("_get_prefetch_data : %s, off(%d), size(%d)" % (path, offset, size))
invoke_new_thread = False
if self.prefetch_thread:
if not self.prefetch_thread.complete:
self.prefetch_thread.join()
if self.prefetch_thread.path != path or self.prefetch_thread.offset != offset or self.prefetch_thread.size != size:
invoke_new_thread = True
else:
invoke_new_thread = True
if invoke_new_thread:
self._invoke_prefetch(path, offset, size)
self.prefetch_thread.join()
data = self.prefetch_thread.data
self.prefetch_thread = None
return data
"""
Returns ftp_status
"""
def stat(self, path):
logger.info("stat: %s" % path)
try:
# try bulk loading of stats
parent = os.path.dirname(path)
stats = self._ensureDirEntryStatLoaded(parent)
if stats:
for sb in stats:
if sb.path == path:
return sb
return None
except Exception:
# fall if cannot access the parent dir
return None
"""
Returns directory entries in string
"""
def list_dir(self, path):
logger.info("list_dir: %s" % path)
stats = self._ensureDirEntryStatLoaded(path)
entries = []
if stats:
for sb in stats:
entries.append(sb.name)
return entries
def is_dir(self, path):
sb = self.stat(path)
if sb:
return sb.directory
return False
def make_dirs(self, path):
logger.info("make_dirs: %s" % path)
if not self.exists(path):
self.session.lock()
# make parent dir first
self.reconnect_if_needed()
self.make_dirs(os.path.dirname(path))
self.session.mkd(path)
self.session.unlock()
# invalidate stat cache
self.clear_stat_cache(os.path.dirname(path))
def exists(self, path):
logger.info("exists: %s" % path)
try:
sb = self.stat(path)
if sb:
return True
return False
except Exception:
return False
def clear_stat_cache(self, path=None):
logger.info("clear_stat_cache: %s" % path)
if(path):
if path in self.meta_cache:
# directory
del self.meta_cache[path]
else:
# file
parent = os.path.dirname(path)
if parent in self.meta_cache:
del self.meta_cache[parent]
else:
self.meta_cache.clear()
def read(self, path, offset, size):
logger.info("read : %s, off(%d), size(%d)" % (path, offset, size))
buf = None
try:
sb = self.stat(path)
if offset >= sb.size:
# EOF
buf = BytesIO()
return buf.getvalue()
time1 = datetime.now()
buf = self._get_prefetch_data(path, offset, size)
#buf = self.session.read_data(path, offset, size)
read_len = len(buf)
if read_len + offset < sb.size:
self._invoke_prefetch(path, offset + read_len, size)
time2 = datetime.now()
delta = time2 - time1
logger.info("read: took - %s" % delta)
logger.info("read: read done")
except Exception, e:
logger.error("read: " + traceback.format_exc())
raise e
return buf
def write(self, path, offset, buf):
logger.info("write : %s, off(%d), size(%d)" % (path, offset, len(buf)))
try:
logger.info("write: writing buffer %d" % len(buf))
bio = BytesIO()
bio.write(buf)
bio.seek(0)
self.session.lock()
self.session.reconnect_if_needed()
self.session.storbinary("STOR", path, bio, offset)
self.session.unlock()
logger.info("write: writing done")
except Exception, e:
logger.error("write: " + traceback.format_exc())
raise e
# invalidate stat cache
self.clear_stat_cache(path)
def truncate(self, path, size):
logger.info("truncate : %s" % path)
raise IOError("truncate is not supported")
def unlink(self, path):
logger.info("unlink : %s" % path)
try:
logger.info("unlink: deleting a file - %s" % path)
self.session.lock()
self.session.reconnect_if_needed()
self.session.delete(path)
self.session.unlock()
logger.info("unlink: deleting done")
except Exception, e:
logger.error("unlink: " + traceback.format_exc())
raise e
# invalidate stat cache
self.clear_stat_cache(path)
def rename(self, path1, path2):
logger.info("rename : %s -> %s" % (path1, path2))
try:
logger.info("rename: renaming a file - %s to %s" % (path1, path2))
self.session.lock()
self.session.reconnect_if_needed()
self.session.rename(path1, path2)
self.session.unlock()
logger.info("rename: renaming done")
except Exception, e:
logger.error("rename: " + traceback.format_exc())
raise e
# invalidate stat cache
self.clear_stat_cache(path1)
self.clear_stat_cache(path2)
| 30.259218
| 196
| 0.535633
| 25,595
| 0.945093
| 0
| 0
| 0
| 0
| 0
| 0
| 3,960
| 0.146223
|
3d440ce993f7a5cda0551a5a0f0c5294985fb68c
| 2,338
|
py
|
Python
|
py/ops/ops/mob/keys.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | 3
|
2016-01-04T06:28:52.000Z
|
2020-09-20T13:18:40.000Z
|
py/ops/ops/mob/keys.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
py/ops/ops/mob/keys.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
__all__ = [
'keys',
]
from pathlib import Path
import logging
from garage import apps
from garage import scripts
LOG = logging.getLogger(__name__)
HOST_KEYS = [
('dsa', 1024),
('ecdsa', 521),
('ed25519', None),
('rsa', 4096),
]
# ECDSA requires less bits than RSA at same level of strength and
# thus seems to be the best choice
USER_KEY_ALGORITHM = 'ecdsa'
USER_KEY_SIZE = 521
def ssh_host_key_filename(algorithm):
return 'ssh_host_%s_key' % algorithm
def ssh_user_key_filename(algorithm):
return 'id_' + algorithm
@apps.with_prog('gen-host-key')
@apps.with_help('generate host keys')
@apps.with_argument('output_dir', type=Path, help='set output directory')
def generate_host_key(args):
"""Generate SSH host keys with ssh-keygen."""
key_paths = [
args.output_dir / ssh_host_key_filename(algorithm)
for algorithm, _ in HOST_KEYS
]
okay = True
for key_path in key_paths:
if key_path.exists():
LOG.error('attempt to overwrite %s', key_path)
okay = False
if not okay:
return 1
scripts.mkdir(args.output_dir)
for (algorithm, key_size), key_path in zip(HOST_KEYS, key_paths):
cmd = [
'ssh-keygen',
'-t', algorithm,
'-N', '', # No password
'-C', 'root@localhost',
'-f', key_path,
]
if key_size:
cmd.extend(['-b', key_size])
scripts.execute(cmd)
return 0
@apps.with_prog('gen-user-key')
@apps.with_help('generate user key pair')
@apps.with_argument('output_dir', type=Path, help='set output directory')
def generate_user_key(args):
"""Generate SSH key pair with ssh-keygen."""
key_path = args.output_dir / ssh_user_key_filename(USER_KEY_ALGORITHM)
if key_path.exists():
LOG.error('attempt to overwrite %s', key_path)
return 1
scripts.mkdir(args.output_dir)
scripts.execute([
'ssh-keygen',
'-t', USER_KEY_ALGORITHM,
'-b', USER_KEY_SIZE,
'-C', 'plumber@localhost',
'-f', key_path,
])
return 0
@apps.with_help('manage security keys')
@apps.with_apps(
'operation', 'operation on keys',
generate_host_key,
generate_user_key,
)
def keys(args):
"""Manage security keys."""
return args.operation(args)
| 23.38
| 74
| 0.630881
| 0
| 0
| 0
| 0
| 1,773
| 0.75834
| 0
| 0
| 628
| 0.268606
|
3d4433d949aa6f4076c88076dfa660972581d142
| 28,882
|
py
|
Python
|
reconcile/test/test_saasherder.py
|
bhushanthakur93/qontract-reconcile
|
fd8eea9f92d353224113955d08e3592864e37df8
|
[
"Apache-2.0"
] | null | null | null |
reconcile/test/test_saasherder.py
|
bhushanthakur93/qontract-reconcile
|
fd8eea9f92d353224113955d08e3592864e37df8
|
[
"Apache-2.0"
] | null | null | null |
reconcile/test/test_saasherder.py
|
bhushanthakur93/qontract-reconcile
|
fd8eea9f92d353224113955d08e3592864e37df8
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any
from unittest import TestCase
from unittest.mock import patch, MagicMock
import yaml
from github import GithubException
from reconcile.utils.openshift_resource import ResourceInventory
from reconcile.utils.saasherder import SaasHerder
from reconcile.utils.jjb_client import JJB
from reconcile.utils.saasherder import TARGET_CONFIG_HASH
from .fixtures import Fixtures
class MockJJB:
def __init__(self, data):
self.jobs = data
def get_all_jobs(self, job_types):
return self.jobs
@staticmethod
def get_repo_url(job):
return JJB.get_repo_url(job)
@staticmethod
def get_ref(job):
return JJB.get_ref(job)
class TestSaasFileValid(TestCase):
def setUp(self):
self.saas_files = [
{
"path": "path1",
"name": "a1",
"managedResourceTypes": [],
"resourceTemplates": [
{
"name": "rt",
"url": "url",
"targets": [
{
"namespace": {
"name": "ns",
"environment": {"name": "env1", "parameters": "{}"},
"cluster": {"name": "cluster"},
},
"ref": "main",
"upstream": {"instance": {"name": "ci"}, "name": "job"},
"parameters": {},
},
{
"namespace": {
"name": "ns",
"environment": {"name": "env2", "parameters": "{}"},
"cluster": {"name": "cluster"},
},
"ref": "master",
"upstream": {"instance": {"name": "ci"}, "name": "job"},
"parameters": {},
},
],
}
],
"roles": [{"users": [{"org_username": "myname"}]}],
}
]
jjb_mock_data = {
"ci": [
{
"name": "job",
"properties": [{"github": {"url": "url"}}],
"scm": [{"git": {"branches": ["main"]}}],
},
{
"name": "job",
"properties": [{"github": {"url": "url"}}],
"scm": [{"git": {"branches": ["master"]}}],
},
]
}
self.jjb = MockJJB(jjb_mock_data)
def test_check_saas_file_env_combo_unique(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertTrue(saasherder.valid)
def test_check_saas_file_env_combo_not_unique(self):
self.saas_files[0][
"name"
] = "long-name-which-is-too-long-to-produce-unique-combo"
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertFalse(saasherder.valid)
def test_check_saas_file_upstream_not_used_with_commit_sha(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertTrue(saasherder.valid)
def test_check_saas_file_upstream_used_with_commit_sha(self):
self.saas_files[0]["resourceTemplates"][0]["targets"][0][
"ref"
] = "2637b6c41bda7731b1bcaaf18b4a50d7c5e63e30"
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertFalse(saasherder.valid)
def test_validate_image_tag_not_equals_ref_valid(self):
self.saas_files[0]["resourceTemplates"][0]["targets"][0][
"parameters"
] = '{"IMAGE_TAG": "2637b6c"}'
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertTrue(saasherder.valid)
def test_validate_image_tag_not_equals_ref_invalid(self):
self.saas_files[0]["resourceTemplates"][0]["targets"][0][
"ref"
] = "2637b6c41bda7731b1bcaaf18b4a50d7c5e63e30"
self.saas_files[0]["resourceTemplates"][0]["targets"][0][
"parameters"
] = '{"IMAGE_TAG": "2637b6c"}'
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertFalse(saasherder.valid)
def test_validate_upstream_jobs_valid(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
saasherder.validate_upstream_jobs(self.jjb)
self.assertTrue(saasherder.valid)
def test_validate_upstream_jobs_invalid(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
jjb = MockJJB({"ci": []})
saasherder.validate_upstream_jobs(jjb)
self.assertFalse(saasherder.valid)
def test_check_saas_file_promotion_same_source(self):
rts = [
{
"name": "rt_publisher",
"url": "repo_publisher",
"targets": [
{
"namespace": {
"name": "ns",
"environment": {"name": "env1"},
"cluster": {"name": "cluster"},
},
"parameters": {},
"ref": "0000000000000",
"promotion": {
"publish": ["channel-1"],
},
}
],
},
{
"name": "rt_subscriber",
"url": "this-repo-will-not-match-the-publisher",
"targets": [
{
"namespace": {
"name": "ns2",
"environment": {"name": "env1"},
"cluster": {"name": "cluster"},
},
"parameters": {},
"ref": "0000000000000",
"promotion": {
"auto": "true",
"subscribe": ["channel-1"],
},
}
],
},
]
self.saas_files[0]["resourceTemplates"] = rts
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=True,
)
self.assertFalse(saasherder.valid)
class TestGetMovingCommitsDiffSaasFile(TestCase):
def setUp(self):
self.saas_files = [
{
"path": "path1",
"name": "a1",
"managedResourceTypes": [],
"resourceTemplates": [
{
"name": "rt",
"url": "http://github.com/user/repo",
"targets": [
{
"namespace": {
"name": "ns",
"environment": {"name": "env1"},
"cluster": {"name": "cluster1"},
},
"parameters": {},
"ref": "main",
},
{
"namespace": {
"name": "ns",
"environment": {"name": "env2"},
"cluster": {"name": "cluster2"},
},
"parameters": {},
"ref": "secondary",
},
],
}
],
"roles": [{"users": [{"org_username": "myname"}]}],
}
]
self.initiate_gh_patcher = patch.object(
SaasHerder, "_initiate_github", autospec=True
)
self.get_pipelines_provider_patcher = patch.object(
SaasHerder, "_get_pipelines_provider"
)
self.get_commit_sha_patcher = patch.object(
SaasHerder, "_get_commit_sha", autospec=True
)
self.initiate_gh = self.initiate_gh_patcher.start()
self.get_pipelines_provider = self.get_pipelines_provider_patcher.start()
self.get_commit_sha = self.get_commit_sha_patcher.start()
self.maxDiff = None
def tearDown(self):
for p in (
self.initiate_gh_patcher,
self.get_pipelines_provider_patcher,
self.get_commit_sha_patcher,
):
p.stop()
def test_get_moving_commits_diff_saas_file_all_fine(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=False,
)
saasherder.state = MagicMock()
saasherder.state.get.return_value = "asha"
self.get_commit_sha.side_effect = ("abcd4242", "4242efg")
self.get_pipelines_provider.return_value = "apipelineprovider"
expected = [
{
"saas_file_name": self.saas_files[0]["name"],
"env_name": "env1",
"timeout": None,
"ref": "main",
"commit_sha": "abcd4242",
"cluster_name": "cluster1",
"pipelines_provider": "apipelineprovider",
"namespace_name": "ns",
"rt_name": "rt",
},
{
"saas_file_name": self.saas_files[0]["name"],
"env_name": "env2",
"timeout": None,
"ref": "secondary",
"commit_sha": "4242efg",
"cluster_name": "cluster2",
"pipelines_provider": "apipelineprovider",
"namespace_name": "ns",
"rt_name": "rt",
},
]
self.assertEqual(
saasherder.get_moving_commits_diff_saas_file(self.saas_files[0], True),
expected,
)
def test_get_moving_commits_diff_saas_file_bad_sha1(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
validate=False,
)
saasherder.state = MagicMock()
saasherder.state.get.return_value = "asha"
self.get_pipelines_provider.return_value = "apipelineprovider"
self.get_commit_sha.side_effect = GithubException(
401, "somedata", {"aheader": "avalue"}
)
# At least we don't crash!
self.assertEqual(
saasherder.get_moving_commits_diff_saas_file(self.saas_files[0], True), []
)
class TestPopulateDesiredState(TestCase):
def setUp(self):
saas_files = []
self.fxts = Fixtures("saasherder_populate_desired")
for file in [self.fxts.get("saas_remote_openshift_template.yaml")]:
saas_files.append(yaml.safe_load(file))
self.assertEqual(1, len(saas_files))
self.saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={"hashLength": 7},
)
# Mock GitHub interactions.
self.initiate_gh_patcher = patch.object(
SaasHerder,
"_initiate_github",
autospec=True,
return_value=None,
)
self.get_file_contents_patcher = patch.object(
SaasHerder,
"_get_file_contents",
wraps=self.fake_get_file_contents,
)
self.initiate_gh_patcher.start()
self.get_file_contents_patcher.start()
# Mock image checking.
self.get_check_images_patcher = patch.object(
SaasHerder,
"_check_images",
autospec=True,
return_value=None,
)
self.get_check_images_patcher.start()
def fake_get_file_contents(self, options):
self.assertEqual("https://github.com/rhobs/configuration", options["url"])
content = self.fxts.get(options["ref"] + (options["path"].replace("/", "_")))
return yaml.safe_load(content), "yolo", options["ref"]
def tearDown(self):
for p in (
self.initiate_gh_patcher,
self.get_file_contents_patcher,
self.get_check_images_patcher,
):
p.stop()
def test_populate_desired_state_saas_file_delete(self):
spec = {"delete": True}
desired_state = self.saasherder.populate_desired_state_saas_file(spec, None)
self.assertIsNone(desired_state)
def test_populate_desired_state_cases(self):
ri = ResourceInventory()
for resource_type in (
"Deployment",
"Service",
"ConfigMap",
):
ri.initialize_resource_type("stage-1", "yolo-stage", resource_type)
ri.initialize_resource_type("prod-1", "yolo", resource_type)
self.saasherder.populate_desired_state(ri)
cnt = 0
for (cluster, namespace, resource_type, data) in ri:
for _, d_item in data["desired"].items():
expected = yaml.safe_load(
self.fxts.get(
f"expected_{cluster}_{namespace}_{resource_type}.json",
)
)
self.assertEqual(expected, d_item.body)
cnt += 1
self.assertEqual(5, cnt, "expected 5 resources, found less")
class TestCollectRepoUrls(TestCase):
def test_collect_repo_urls(self):
repo_url = "git-repo"
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [{"name": "name", "url": repo_url, "targets": []}],
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
self.assertEqual({repo_url}, saasherder.repo_urls)
class TestGetSaasFileAttribute(TestCase):
def test_attribute_none(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("no_such_attribute")
self.assertEqual(att, None)
def test_attribute_not_none(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("attrib")
self.assertEqual(att, True)
def test_attribute_none_with_default(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("no_such_att", default=True)
self.assertEqual(att, True)
def test_attribute_not_none_with_default(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("attrib", default=False)
self.assertEqual(att, True)
def test_attribute_multiple_saas_files_return_false(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
},
{
"path": "path2",
"name": "name2",
"managedResourceTypes": [],
"resourceTemplates": [],
},
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
self.assertFalse(saasherder._get_saas_file_feature_enabled("attrib"))
def test_attribute_multiple_saas_files_with_default_return_false(self):
saas_files = [
{
"path": "path1",
"name": "name1",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
},
{
"path": "path2",
"name": "name2",
"managedResourceTypes": [],
"resourceTemplates": [],
"attrib": True,
},
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
settings={},
)
att = saasherder._get_saas_file_feature_enabled("attrib", default=True)
self.assertFalse(att)
class TestConfigHashPromotionsValidation(TestCase):
"""TestCase to test SaasHerder promotions validation. SaasHerder is
initialized with ResourceInventory population. Like is done in
openshift-saas-deploy"""
cluster: str
namespace: str
fxt: Any
template: Any
@classmethod
def setUpClass(cls):
cls.fxt = Fixtures("saasherder")
cls.cluster = "test-cluster"
cls.template = cls.fxt.get_anymarkup("template_1.yml")
def setUp(self) -> None:
self.all_saas_files = [self.fxt.get_anymarkup("saas.gql.yml")]
self.state_patcher = patch("reconcile.utils.saasherder.State", autospec=True)
self.state_mock = self.state_patcher.start().return_value
self.ig_patcher = patch.object(SaasHerder, "_initiate_github", autospec=True)
self.ig_patcher.start()
self.image_auth_patcher = patch.object(SaasHerder, "_initiate_image_auth")
self.image_auth_patcher.start()
self.gfc_patcher = patch.object(SaasHerder, "_get_file_contents", autospec=True)
gfc_mock = self.gfc_patcher.start()
self.saas_file = self.fxt.get_anymarkup("saas.gql.yml")
# ApiVersion is set in the saas gql query method in queries module
self.saas_file["apiVersion"] = "v2"
gfc_mock.return_value = (self.template, "url", "ahash")
self.deploy_current_state_fxt = self.fxt.get_anymarkup("saas_deploy.state.json")
self.post_deploy_current_state_fxt = self.fxt.get_anymarkup(
"saas_post_deploy.state.json"
)
self.saasherder = SaasHerder(
[self.saas_file],
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
accounts={"name": "test-account"}, # Initiates State in SaasHerder
settings={"hashLength": 24},
)
# IMPORTANT: Populating desired state modify self.saas_files within
# saasherder object.
self.ri = ResourceInventory()
for ns in ["test-ns-publisher", "test-ns-subscriber"]:
for kind in ["Service", "Deployment"]:
self.ri.initialize_resource_type(self.cluster, ns, kind)
self.saasherder.populate_desired_state(self.ri)
if self.ri.has_error_registered():
raise Exception("Errors registered in Resourceinventory")
def tearDown(self):
self.state_patcher.stop()
self.ig_patcher.stop()
self.gfc_patcher.stop()
def test_config_hash_is_filled(self):
"""Ensures the get_config_diff_saas_file fills the promotion data
on the publisher target. This data is used in publish_promotions
method to add the hash to subscribed targets.
IMPORTANT: This is not the promotion_data within promotion. This
fields are set by _process_template method in saasherder
"""
job_spec = self.saasherder.get_configs_diff_saas_file(self.saas_file)[0]
promotion = job_spec["target_config"]["promotion"]
self.assertIsNotNone(promotion[TARGET_CONFIG_HASH])
def test_promotion_state_config_hash_match_validates(self):
"""A promotion is valid if the parent target config_hash set in
the state is equal to the one set in the subscriber target
promotion data. This is the happy path.
"""
publisher_state = {
"success": True,
"saas_file": self.saas_file["name"],
TARGET_CONFIG_HASH: "ed2af38cf21f268c",
}
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions()
self.assertTrue(result)
def test_promotion_state_config_hash_not_match_no_validates(self):
"""Promotion is not valid if the parent target config hash set in
the state does not match with the one set in the subsriber target
promotion_data. This could happen if the parent target has run again
with the same ref before before the subscriber target promotion MR is
merged.
"""
publisher_state = {
"success": True,
"saas_file": self.saas_file["name"],
TARGET_CONFIG_HASH: "will_not_match",
}
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions()
self.assertFalse(result)
def test_promotion_without_state_config_hash_validates(self):
"""Existent states won't have promotion data. If there is an ongoing
promotion, this ensures it will happen.
"""
publisher_state = {
"success": True,
}
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions()
self.assertTrue(result)
def test_promotion_without_promotion_data_validates(self):
"""A manual promotion might be required, subsribed targets without
promotion_data should validate if the parent target job has succed
with the same ref.
"""
publisher_state = {
"success": True,
"saas_file": self.saas_file["name"],
TARGET_CONFIG_HASH: "whatever",
}
# Remove promotion_data on the promoted target
self.saasherder.promotions[1]["promotion_data"] = None
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions()
self.assertTrue(result)
class TestConfigHashTrigger(TestCase):
"""TestCase to test Openshift SAAS deploy configs trigger. SaasHerder is
initialized WITHOUT ResourceInventory population. Like is done in the
config changes trigger"""
cluster: str
namespace: str
fxt: Any
template: Any
@classmethod
def setUpClass(cls):
cls.fxt = Fixtures("saasherder")
cls.cluster = "test-cluster"
def setUp(self) -> None:
self.all_saas_files = [self.fxt.get_anymarkup("saas.gql.yml")]
self.state_patcher = patch("reconcile.utils.saasherder.State", autospec=True)
self.state_mock = self.state_patcher.start().return_value
self.saas_file = self.fxt.get_anymarkup("saas.gql.yml")
# ApiVersion is set in the saas gql query method in queries module
self.saas_file["apiVersion"] = "v2"
self.deploy_current_state_fxt = self.fxt.get_anymarkup("saas_deploy.state.json")
self.post_deploy_current_state_fxt = self.fxt.get_anymarkup(
"saas_post_deploy.state.json"
)
self.state_mock.get.side_effect = [
self.deploy_current_state_fxt,
self.post_deploy_current_state_fxt,
]
self.saasherder = SaasHerder(
[self.saas_file],
thread_pool_size=1,
gitlab=None,
integration="",
integration_version="",
accounts={"name": "test-account"}, # Initiates State in SaasHerder
settings={"hashLength": 24},
)
def tearDown(self):
self.state_patcher.stop()
def test_same_configs_do_not_trigger(self):
"""Ensures that if the same config is found, no job is triggered
current Config is fetched from the state
"""
job_specs = self.saasherder.get_configs_diff_saas_file(self.saas_file)
self.assertListEqual(job_specs, [])
def test_config_hash_change_do_trigger(self):
"""Ensures a new job is triggered if the parent config hash changes"""
configs = self.saasherder.get_saas_targets_config(self.saas_file)
desired_tc = list(configs.values())[1]
desired_promo_data = desired_tc["promotion"]["promotion_data"]
desired_promo_data[0]["data"][0][TARGET_CONFIG_HASH] = "Changed"
job_specs = self.saasherder.get_configs_diff_saas_file(self.saas_file)
self.assertEqual(len(job_specs), 1)
def test_non_existent_config_triggers(self):
self.state_mock.get.side_effect = [self.deploy_current_state_fxt, None]
job_specs = self.saasherder.get_configs_diff_saas_file(self.saas_file)
self.assertEqual(len(job_specs), 1)
class TestRemoveNoneAttributes(TestCase):
def testSimpleDict(self):
input = {"a": 1, "b": {}, "d": None, "e": {"aa": "aa", "bb": None}}
expected = {"a": 1, "b": {}, "e": {"aa": "aa"}}
res = SaasHerder.remove_none_values(input)
self.assertEqual(res, expected)
def testNoneValue(self):
input = None
expected = {}
res = SaasHerder.remove_none_values(input)
self.assertEqual(res, expected)
| 33.544715
| 88
| 0.522263
| 28,462
| 0.985458
| 0
| 0
| 437
| 0.015131
| 0
| 0
| 6,322
| 0.218891
|
3d452a7b2a000511d4c3041100856759bae15e44
| 8,235
|
py
|
Python
|
configs/example/garnet_synth_traffic.py
|
georgia-tech-synergy-lab/gem5_astra
|
41695878a2b60c5a28fa104465558cd1acb8a695
|
[
"BSD-3-Clause"
] | 5
|
2020-11-15T12:27:28.000Z
|
2021-09-20T03:50:54.000Z
|
configs/example/garnet_synth_traffic.py
|
georgia-tech-synergy-lab/gem5_astra
|
41695878a2b60c5a28fa104465558cd1acb8a695
|
[
"BSD-3-Clause"
] | null | null | null |
configs/example/garnet_synth_traffic.py
|
georgia-tech-synergy-lab/gem5_astra
|
41695878a2b60c5a28fa104465558cd1acb8a695
|
[
"BSD-3-Clause"
] | 2
|
2020-10-27T01:15:41.000Z
|
2020-11-16T02:30:32.000Z
|
# Copyright (c) 2016 Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Tushar Krishna
from __future__ import print_function
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
import os, optparse, sys
addToPath('../')
from common import Options
from ruby import Ruby
def parse_network_input_options(options,var, val):
if var=="num-npus:":
options.num_cpus = int(val)
options.num_dirs = int(val)
print("num cpus is: "+str(val))
elif var=="num-packages:":
options.num_packages = int(val)
elif var=="package-rows:":
options.package_rows= int(val)
elif var=="topology:":
options.topology= str(val)
elif var=="links-per-tile:":
options.links_per_tile=int(val)
elif var=="local-rings:":
options.local_rings=int(val)
elif var=="vertical-rings:":
options.vertical_rings=int(val)
elif var=="horizontal-rings:":
options.horizontal_rings=int(val)
if var=="flit-width:":
options.ni_flit_size = int(val)
options.flit_width = int(val)
elif var=="local-packet-size:":
options.local_packet_size=int(val)
elif var=="package-packet-size:":
options.package_packet_size=int(val)
elif var=="tile-link-width:":
options.tile_link_width=int(val)
elif var=="package-link-width:":
options.package_link_width=int(val)
elif var=="vcs-per-vnet:":
options.vcs_per_vnet = int(val)
elif var=="routing-algorithm:":
options.routing_algorithm=str(val)
elif var=="router-latency:":
options.router_latency=int(val)
elif var=="local-link-latency:":
options.local_link_latency=int(val)
elif var=="package-link-latency:":
options.package_link_latency=int(val)
elif var=="buffers-per-vc:":
options.buffers_per_ctrl_vc = int(val)
options.buffers_per_data_vc = int(val)
elif var=="local-link-efficiency:":
options.local_link_efficiency = float(val)
elif var=="package-link-efficiency:":
options.package_link_efficiency = float(val)
# Get paths we might need. It's expected this file is in m5/configs/example.
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
m5_root = os.path.dirname(config_root)
parser = optparse.OptionParser()
Options.addNoISAOptions(parser)
parser.add_option("--synthetic", type="choice", default="uniform_random",
choices=['uniform_random', 'tornado', 'bit_complement', \
'bit_reverse', 'bit_rotation', 'neighbor', \
'shuffle', 'transpose','training'])
parser.add_option("-i", "--injectionrate", type="float", default=0.1,
metavar="I",
help="Injection rate in packets per cycle per node. \
Takes decimal value between 0 to 1 (eg. 0.225). \
Number of digits after 0 depends upon --precision.")
parser.add_option("--precision", type="int", default=3,
help="Number of digits of precision after decimal point\
for injection rate")
parser.add_option("--sim-cycles", type="int", default=1000,
help="Number of simulation cycles")
parser.add_option("--num-packets-max", type="int", default=-1,
help="Stop injecting after --num-packets-max.\
Set to -1 to disable.")
parser.add_option("--single-sender-id", type="int", default=-1,
help="Only inject from this sender.\
Set to -1 to disable.")
parser.add_option("--single-dest-id", type="int", default=-1,
help="Only send to this destination.\
Set to -1 to disable.")
parser.add_option("--inj-vnet", type="int", default=-1,
help="Only inject in this vnet (0, 1 or 2).\
0 and 1 are 1-flit, 2 is 5-flit.\
Set to -1 to inject randomly in all vnets.")
#
# Add the ruby specific and protocol specific options
#
Ruby.define_options(parser)
execfile(os.path.join(config_root, "common", "Options.py"))
(options, args) = parser.parse_args()
if args:
print("Error: script doesn't take any positional arguments")
sys.exit(1)
if options.inj_vnet > 2:
print("Error: Injection vnet %d should be 0 (1-flit), 1 (1-flit) "
"or 2 (5-flit) or -1 (random)" % (options.inj_vnet))
sys.exit(1)
try:
netInput = open("network_inputs/"+options.net+".txt", "r")
print("Success in opening net file!")
index=0
inps=["",""]
with netInput as f:
for line in f:
for word in line.split():
inps[index%2]=word
index+=1
if index%2==0:
parse_network_input_options(options,inps[0],inps[1])
except IOError:
print("Could not open net file!")
cpus = [ GarnetSyntheticTraffic(
num_packets_max=options.num_packets_max,
single_sender=options.single_sender_id,
single_dest=options.single_dest_id,
sim_cycles=options.sim_cycles,
traffic_type=options.synthetic,
inj_rate=options.injectionrate,
inj_vnet=options.inj_vnet,
precision=options.precision,
burst_length=options.local_burst_length,
burst_interval=options.burst_interval,
num_packages=options.num_packages,
num_dest=options.num_dirs) \
for i in xrange(options.num_cpus) ]
# create the desired simulated system
system = System(cpu = cpus, mem_ranges = [AddrRange(options.mem_size)])
# Create a top-level voltage domain and clock domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
Ruby.create_system(options, False, system)
# Create a seperate clock domain for Ruby
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
i = 0
for ruby_port in system.ruby._cpu_ports:
#
# Tie the cpu test ports to the ruby cpu port
#
cpus[i].test = ruby_port.slave
i += 1
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
# instantiate configuration
m5.instantiate()
# simulate until program terminates
exit_event = m5.simulate(options.abs_max_tick)
print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause())
| 37.262443
| 79
| 0.664602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,748
| 0.455131
|
3d45fc30ab899b62ab8e13a78f05b881621256c2
| 9,329
|
py
|
Python
|
tests/unit/service/test_messaging.py
|
davetobin/ignition
|
eb183dca3fb2041d3f6249467a3265e7eb1d8905
|
[
"Apache-2.0"
] | 1
|
2019-09-02T15:23:08.000Z
|
2019-09-02T15:23:08.000Z
|
tests/unit/service/test_messaging.py
|
davetobin/ignition
|
eb183dca3fb2041d3f6249467a3265e7eb1d8905
|
[
"Apache-2.0"
] | 62
|
2019-09-16T14:51:32.000Z
|
2020-07-08T13:28:50.000Z
|
tests/unit/service/test_messaging.py
|
accanto-systems/ignition
|
87087b81dfa7f8f69525f4dd9c74db715e336eca
|
[
"Apache-2.0"
] | 4
|
2021-08-17T14:38:54.000Z
|
2022-02-09T14:33:57.000Z
|
import unittest
import time
import copy
from unittest.mock import patch, MagicMock, call
from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties
from kafka import KafkaProducer
class TestPostalService(unittest.TestCase):
def setUp(self):
self.mock_delivery_service = MagicMock()
def test_init_without_delivery_service_throws_error(self):
with self.assertRaises(ValueError) as context:
PostalService()
self.assertEqual(str(context.exception), 'delivery_service argument not provided')
def test_post_sends_envelope_to_delivery_service(self):
postal_service = PostalService(delivery_service=self.mock_delivery_service)
test_envelope = Envelope('test', Message('test message'))
postal_service.post(test_envelope)
self.mock_delivery_service.deliver.assert_called_once_with(test_envelope)
def test_post_throws_error_when_envelope_is_none(self):
postal_service = PostalService(delivery_service=self.mock_delivery_service)
with self.assertRaises(ValueError) as context:
postal_service.post(None)
self.assertEqual(str(context.exception), 'An envelope must be passed to post a message')
class TestKafkaDeliveryService(unittest.TestCase):
def setUp(self):
self.messaging_properties = MessagingProperties()
self.messaging_properties.connection_address='test:9092'
self.messaging_properties.config={'api_version_auto_timeout_ms': 5000}
def test_init_without_messaging_config_throws_error(self):
with self.assertRaises(ValueError) as context:
KafkaDeliveryService()
self.assertEqual(str(context.exception), 'messaging_properties argument not provided')
def test_init_without_bootstrap_servers_throws_error(self):
messaging_properties = MessagingProperties()
messaging_properties.connection_address=None
with self.assertRaises(ValueError) as context:
KafkaDeliveryService(messaging_properties=messaging_properties)
self.assertEqual(str(context.exception), 'connection_address not set on messaging_properties')
@patch('ignition.service.messaging.KafkaProducer')
def test_deliver(self, mock_kafka_producer_init):
# need to set this explicitly because we've patched KafkaProducer
mock_kafka_producer_init.DEFAULT_CONFIG = KafkaProducer.DEFAULT_CONFIG
delivery_service = KafkaDeliveryService(messaging_properties=self.messaging_properties)
test_envelope = Envelope('test_topic', Message('test message'))
delivery_service.deliver(test_envelope)
mock_kafka_producer_init.assert_called_once_with(bootstrap_servers='test:9092', api_version_auto_timeout_ms=5000, client_id='ignition')
self.assertEqual(delivery_service.producer, mock_kafka_producer_init.return_value)
mock_kafka_producer = mock_kafka_producer_init.return_value
mock_kafka_producer.send.assert_called_once_with('test_topic', b'test message')
@patch('ignition.service.messaging.KafkaProducer')
def test_deliver_throws_error_when_envelope_is_none(self, mock_kafka_producer_init):
delivery_service = KafkaDeliveryService(messaging_properties=self.messaging_properties)
with self.assertRaises(ValueError) as context:
delivery_service.deliver(None)
self.assertEqual(str(context.exception), 'An envelope must be passed to deliver a message')
class TestKafkaInboxService(unittest.TestCase):
def setUp(self):
self.messaging_properties = MessagingProperties()
self.messaging_properties.connection_address='test:9092'
self.messaging_properties.config={'api_version_auto_timeout_ms':5000}
def test_init_without_messaging_config_throws_error(self):
with self.assertRaises(ValueError) as context:
KafkaInboxService()
self.assertEqual(str(context.exception), 'messaging_properties argument not provided')
def test_init_without_bootstrap_servers_throws_error(self):
messaging_properties = MessagingProperties()
messaging_properties.connection_address=None
with self.assertRaises(ValueError) as context:
KafkaInboxService(messaging_properties=messaging_properties)
self.assertEqual(str(context.exception), 'connection_address not set on messaging_properties')
@patch('ignition.service.messaging.KafkaInboxThread')
def test_watch_inbox_starts_thread(self, mock_kafka_inbox_thread_init):
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
mock_kafka_inbox_thread_init.assert_called_once_with('test:9092', 'test_group', 'test_topic', mock_read_inbox_func, inbox_service._KafkaInboxService__thread_exit_func, self.messaging_properties.config)
mock_kafka_inbox_thread_init.return_value.start.assert_called_once()
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_inits_consumer(self, mock_kafka_consumer_init):
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
mock_kafka_consumer_init.assert_called_once_with('test_topic', bootstrap_servers='test:9092', group_id='test_group', enable_auto_commit=False)
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_inits_consumer(self, mock_kafka_consumer_init):
mock_kafka_consumer = mock_kafka_consumer_init.return_value
mock_record_1 = MagicMock()
mock_record_2 = MagicMock()
infinite_iter_stop = False
infinite_iter_has_stopped = False
ready_for_second_message = False
second_message_sent = False
def build_iter():
def iter():
yield mock_record_1
while not infinite_iter_stop:
if ready_for_second_message:
yield mock_record_2
break
while not infinite_iter_stop:
time.sleep(0.001)
infinite_iter_has_stopped = True
return iter
mock_kafka_consumer.__iter__.side_effect = build_iter()
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
time.sleep(0.01)
try:
self.assertEqual(len(inbox_service.active_threads), 1)
expected_config = copy.copy(self.messaging_properties.config)
expected_config = {
'bootstrap_servers': 'test:9092',
'group_id': 'test_group',
'enable_auto_commit': False,
'client_id': 'ignition'
}
mock_kafka_consumer_init.assert_called_once_with('test_topic', **expected_config)
mock_kafka_consumer.__iter__.assert_called_once()
mock_record_1.value.decode.assert_called_once_with('utf-8')
mock_record_2.value.decode.assert_not_called()
mock_read_inbox_func.assert_called_once_with(mock_record_1.value.decode.return_value)
mock_kafka_consumer.commit.assert_called_once()
ready_for_second_message = True
time.sleep(1)
mock_record_2.value.decode.assert_called_once_with('utf-8')
mock_read_inbox_func.assert_called_with(mock_record_2.value.decode.return_value)
mock_kafka_consumer.commit.assert_has_calls([call(), call()])
finally:
infinite_iter_stop = True
time.sleep(1)
mock_kafka_consumer.close.assert_called_once()
self.assertEqual(len(inbox_service.active_threads), 0)
@patch('ignition.service.messaging._thread')
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_calls_exit_func_on_error(self, mock_kafka_consumer_init, mock_thread):
mock_kafka_consumer = mock_kafka_consumer_init.return_value
mock_record_1 = MagicMock()
infinite_iter_stop = False
ready_for_message = True
def build_iter():
def iter():
while not infinite_iter_stop:
if ready_for_message:
yield mock_record_1
break
return iter
mock_kafka_consumer.__iter__.side_effect = build_iter()
inbox_service = KafkaInboxService(test_mode=True, messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
mock_read_inbox_func.side_effect = ValueError('Test error')
self.assertFalse(inbox_service.exited)
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
ready_for_message = True
time.sleep(0.03)
## Indicates the exit func on inbox_service was called when in "test_mode"
self.assertTrue(inbox_service.exited)
mock_kafka_consumer.commit.assert_not_called()
| 51.541436
| 209
| 0.731054
| 9,066
| 0.971808
| 3,574
| 0.383106
| 6,161
| 0.660414
| 0
| 0
| 1,228
| 0.131633
|
3d470989d588fa1b7b09836531c89bcfed89beee
| 1,011
|
py
|
Python
|
app/core/management/commands/wait_for_db.py
|
denis240997/recipe-app-api
|
c03c079b8df9d2b527c6d32a7c213be2b1478c6b
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
denis240997/recipe-app-api
|
c03c079b8df9d2b527c6d32a7c213be2b1478c6b
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
denis240997/recipe-app-api
|
c03c079b8df9d2b527c6d32a7c213be2b1478c6b
|
[
"MIT"
] | null | null | null |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
# This is bullshit! Problem was not solved!
# The first connection is successful, but after that postgres closes the connection and
# reconnects. At the moment, this script has already worked, so the application container crashes.
class Command(BaseCommand):
'''Django command to pause execution until database is avaliable'''
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_connection = None
while not db_connection:
try:
time.sleep(2) # ha-ha, yes! it's just a delay for the database to start up
db_connection = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database is up!'))
| 38.884615
| 98
| 0.680514
| 633
| 0.626113
| 0
| 0
| 0
| 0
| 0
| 0
| 449
| 0.444115
|
3d4788f3f357f54449458d8a9feead4ef160065f
| 835
|
py
|
Python
|
clusters/actions.py
|
bhaugen/localecon
|
ee3134f701e6a786767cf7eeb165ee03f077e9da
|
[
"MIT"
] | 10
|
2015-02-14T14:22:31.000Z
|
2022-02-22T17:40:34.000Z
|
clusters/actions.py
|
bhaugen/localecon
|
ee3134f701e6a786767cf7eeb165ee03f077e9da
|
[
"MIT"
] | 3
|
2017-02-01T16:44:04.000Z
|
2018-04-02T13:48:03.000Z
|
clusters/actions.py
|
bhaugen/localecon
|
ee3134f701e6a786767cf7eeb165ee03f077e9da
|
[
"MIT"
] | null | null | null |
import csv
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
def export_as_csv(modeladmin, request, queryset):
"""
Generic csv export admin action.
"""
if not request.user.is_staff:
raise PermissionDenied
opts = modeladmin.model._meta
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')
writer = csv.writer(response)
field_names = [field.name for field in opts.fields]
# Write a first row with header information
writer.writerow(field_names)
# Write data rows
for obj in queryset:
writer.writerow([getattr(obj, field) for field in field_names])
return response
export_as_csv.short_description = "Export selected objects as csv file"
| 37.954545
| 101
| 0.720958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.252695
|
3d479358107ba6396633f05381cdd46111709044
| 37,605
|
py
|
Python
|
rbac/common/protobuf/task_transaction_pb2.py
|
knagware9/sawtooth-next-directory
|
be80852e08d2b27e105d964c727509f2a974002d
|
[
"Apache-2.0"
] | 1
|
2019-04-14T20:16:59.000Z
|
2019-04-14T20:16:59.000Z
|
rbac/common/protobuf/task_transaction_pb2.py
|
crazyrex/sawtooth-next-directory
|
210b581c8c92c307fab2f6d2b9a55526b56b790a
|
[
"Apache-2.0"
] | null | null | null |
rbac/common/protobuf/task_transaction_pb2.py
|
crazyrex/sawtooth-next-directory
|
210b581c8c92c307fab2f6d2b9a55526b56b790a
|
[
"Apache-2.0"
] | 1
|
2018-12-07T10:55:08.000Z
|
2018-12-07T10:55:08.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: task_transaction.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='task_transaction.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x16task_transaction.proto\"n\n\x13ProposeAddTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"q\n\x16ProposeRemoveTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"n\n\x13ProposeAddTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"q\n\x16ProposeRemoveTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"\\\n\x13\x43onfirmAddTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"_\n\x16\x43onfirmRemoveTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"\\\n\x13\x43onfirmAddTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"_\n\x16\x43onfirmRemoveTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"[\n\x12RejectAddTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"^\n\x15RejectRemoveTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"[\n\x12RejectAddTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"^\n\x15RejectRemoveTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"]\n\nCreateTask\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06\x61\x64mins\x18\x03 \x03(\t\x12\x0e\n\x06owners\x18\x04 \x03(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"b\n\nUpdateTask\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08new_name\x18\x02 \x01(\t\x12\x1b\n\x13old_metadata_sha512\x18\x03 \x01(\t\x12\x14\n\x0cnew_metadata\x18\x04 \x01(\tb\x06proto3')
)
_PROPOSEADDTASKOWNER = _descriptor.Descriptor(
name='ProposeAddTaskOwner',
full_name='ProposeAddTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ProposeAddTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ProposeAddTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ProposeAddTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ProposeAddTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ProposeAddTaskOwner.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=136,
)
_PROPOSEREMOVETASKOWNER = _descriptor.Descriptor(
name='ProposeRemoveTaskOwner',
full_name='ProposeRemoveTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ProposeRemoveTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ProposeRemoveTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ProposeRemoveTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ProposeRemoveTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ProposeRemoveTaskOwner.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=251,
)
_PROPOSEADDTASKADMIN = _descriptor.Descriptor(
name='ProposeAddTaskAdmin',
full_name='ProposeAddTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ProposeAddTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ProposeAddTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ProposeAddTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ProposeAddTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ProposeAddTaskAdmin.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=253,
serialized_end=363,
)
_PROPOSEREMOVETASKADMIN = _descriptor.Descriptor(
name='ProposeRemoveTaskAdmin',
full_name='ProposeRemoveTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ProposeRemoveTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ProposeRemoveTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ProposeRemoveTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ProposeRemoveTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ProposeRemoveTaskAdmin.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=365,
serialized_end=478,
)
_CONFIRMADDTASKOWNER = _descriptor.Descriptor(
name='ConfirmAddTaskOwner',
full_name='ConfirmAddTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ConfirmAddTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ConfirmAddTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ConfirmAddTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ConfirmAddTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=480,
serialized_end=572,
)
_CONFIRMREMOVETASKOWNER = _descriptor.Descriptor(
name='ConfirmRemoveTaskOwner',
full_name='ConfirmRemoveTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ConfirmRemoveTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ConfirmRemoveTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ConfirmRemoveTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ConfirmRemoveTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=574,
serialized_end=669,
)
_CONFIRMADDTASKADMIN = _descriptor.Descriptor(
name='ConfirmAddTaskAdmin',
full_name='ConfirmAddTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ConfirmAddTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ConfirmAddTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ConfirmAddTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ConfirmAddTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=671,
serialized_end=763,
)
_CONFIRMREMOVETASKADMIN = _descriptor.Descriptor(
name='ConfirmRemoveTaskAdmin',
full_name='ConfirmRemoveTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ConfirmRemoveTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ConfirmRemoveTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ConfirmRemoveTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ConfirmRemoveTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=765,
serialized_end=860,
)
_REJECTADDTASKOWNER = _descriptor.Descriptor(
name='RejectAddTaskOwner',
full_name='RejectAddTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='RejectAddTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='RejectAddTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='RejectAddTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='RejectAddTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=862,
serialized_end=953,
)
_REJECTREMOVETASKOWNER = _descriptor.Descriptor(
name='RejectRemoveTaskOwner',
full_name='RejectRemoveTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='RejectRemoveTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='RejectRemoveTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='RejectRemoveTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='RejectRemoveTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=955,
serialized_end=1049,
)
_REJECTADDTASKADMIN = _descriptor.Descriptor(
name='RejectAddTaskAdmin',
full_name='RejectAddTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='RejectAddTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='RejectAddTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='RejectAddTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='RejectAddTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1051,
serialized_end=1142,
)
_REJECTREMOVETASKADMIN = _descriptor.Descriptor(
name='RejectRemoveTaskAdmin',
full_name='RejectRemoveTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='RejectRemoveTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='RejectRemoveTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='RejectRemoveTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='RejectRemoveTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1144,
serialized_end=1238,
)
_CREATETASK = _descriptor.Descriptor(
name='CreateTask',
full_name='CreateTask',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='CreateTask.task_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='CreateTask.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='admins', full_name='CreateTask.admins', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='owners', full_name='CreateTask.owners', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='CreateTask.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1240,
serialized_end=1333,
)
_UPDATETASK = _descriptor.Descriptor(
name='UpdateTask',
full_name='UpdateTask',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='UpdateTask.task_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_name', full_name='UpdateTask.new_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_metadata_sha512', full_name='UpdateTask.old_metadata_sha512', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_metadata', full_name='UpdateTask.new_metadata', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1335,
serialized_end=1433,
)
DESCRIPTOR.message_types_by_name['ProposeAddTaskOwner'] = _PROPOSEADDTASKOWNER
DESCRIPTOR.message_types_by_name['ProposeRemoveTaskOwner'] = _PROPOSEREMOVETASKOWNER
DESCRIPTOR.message_types_by_name['ProposeAddTaskAdmin'] = _PROPOSEADDTASKADMIN
DESCRIPTOR.message_types_by_name['ProposeRemoveTaskAdmin'] = _PROPOSEREMOVETASKADMIN
DESCRIPTOR.message_types_by_name['ConfirmAddTaskOwner'] = _CONFIRMADDTASKOWNER
DESCRIPTOR.message_types_by_name['ConfirmRemoveTaskOwner'] = _CONFIRMREMOVETASKOWNER
DESCRIPTOR.message_types_by_name['ConfirmAddTaskAdmin'] = _CONFIRMADDTASKADMIN
DESCRIPTOR.message_types_by_name['ConfirmRemoveTaskAdmin'] = _CONFIRMREMOVETASKADMIN
DESCRIPTOR.message_types_by_name['RejectAddTaskOwner'] = _REJECTADDTASKOWNER
DESCRIPTOR.message_types_by_name['RejectRemoveTaskOwner'] = _REJECTREMOVETASKOWNER
DESCRIPTOR.message_types_by_name['RejectAddTaskAdmin'] = _REJECTADDTASKADMIN
DESCRIPTOR.message_types_by_name['RejectRemoveTaskAdmin'] = _REJECTREMOVETASKADMIN
DESCRIPTOR.message_types_by_name['CreateTask'] = _CREATETASK
DESCRIPTOR.message_types_by_name['UpdateTask'] = _UPDATETASK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ProposeAddTaskOwner = _reflection.GeneratedProtocolMessageType('ProposeAddTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _PROPOSEADDTASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ProposeAddTaskOwner)
))
_sym_db.RegisterMessage(ProposeAddTaskOwner)
ProposeRemoveTaskOwner = _reflection.GeneratedProtocolMessageType('ProposeRemoveTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _PROPOSEREMOVETASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ProposeRemoveTaskOwner)
))
_sym_db.RegisterMessage(ProposeRemoveTaskOwner)
ProposeAddTaskAdmin = _reflection.GeneratedProtocolMessageType('ProposeAddTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _PROPOSEADDTASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ProposeAddTaskAdmin)
))
_sym_db.RegisterMessage(ProposeAddTaskAdmin)
ProposeRemoveTaskAdmin = _reflection.GeneratedProtocolMessageType('ProposeRemoveTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _PROPOSEREMOVETASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ProposeRemoveTaskAdmin)
))
_sym_db.RegisterMessage(ProposeRemoveTaskAdmin)
ConfirmAddTaskOwner = _reflection.GeneratedProtocolMessageType('ConfirmAddTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _CONFIRMADDTASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ConfirmAddTaskOwner)
))
_sym_db.RegisterMessage(ConfirmAddTaskOwner)
ConfirmRemoveTaskOwner = _reflection.GeneratedProtocolMessageType('ConfirmRemoveTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _CONFIRMREMOVETASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ConfirmRemoveTaskOwner)
))
_sym_db.RegisterMessage(ConfirmRemoveTaskOwner)
ConfirmAddTaskAdmin = _reflection.GeneratedProtocolMessageType('ConfirmAddTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _CONFIRMADDTASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ConfirmAddTaskAdmin)
))
_sym_db.RegisterMessage(ConfirmAddTaskAdmin)
ConfirmRemoveTaskAdmin = _reflection.GeneratedProtocolMessageType('ConfirmRemoveTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _CONFIRMREMOVETASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ConfirmRemoveTaskAdmin)
))
_sym_db.RegisterMessage(ConfirmRemoveTaskAdmin)
RejectAddTaskOwner = _reflection.GeneratedProtocolMessageType('RejectAddTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _REJECTADDTASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:RejectAddTaskOwner)
))
_sym_db.RegisterMessage(RejectAddTaskOwner)
RejectRemoveTaskOwner = _reflection.GeneratedProtocolMessageType('RejectRemoveTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _REJECTREMOVETASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:RejectRemoveTaskOwner)
))
_sym_db.RegisterMessage(RejectRemoveTaskOwner)
RejectAddTaskAdmin = _reflection.GeneratedProtocolMessageType('RejectAddTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _REJECTADDTASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:RejectAddTaskAdmin)
))
_sym_db.RegisterMessage(RejectAddTaskAdmin)
RejectRemoveTaskAdmin = _reflection.GeneratedProtocolMessageType('RejectRemoveTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _REJECTREMOVETASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:RejectRemoveTaskAdmin)
))
_sym_db.RegisterMessage(RejectRemoveTaskAdmin)
CreateTask = _reflection.GeneratedProtocolMessageType('CreateTask', (_message.Message,), dict(
DESCRIPTOR = _CREATETASK,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:CreateTask)
))
_sym_db.RegisterMessage(CreateTask)
UpdateTask = _reflection.GeneratedProtocolMessageType('UpdateTask', (_message.Message,), dict(
DESCRIPTOR = _UPDATETASK,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:UpdateTask)
))
_sym_db.RegisterMessage(UpdateTask)
# @@protoc_insertion_point(module_scope)
| 41.506623
| 2,776
| 0.737216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,303
| 0.220795
|
3d487f498a05799cec579339e7396f36837a8077
| 14,560
|
py
|
Python
|
courses/machine_learning/asl/open_project/ASL_youtube8m_models/video_using_datasets/trainer/model.py
|
Glairly/introduction_to_tensorflow
|
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
|
[
"Apache-2.0"
] | 2
|
2022-01-06T11:52:57.000Z
|
2022-01-09T01:53:56.000Z
|
courses/machine_learning/asl/open_project/ASL_youtube8m_models/video_using_datasets/trainer/model.py
|
Glairly/introduction_to_tensorflow
|
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
|
[
"Apache-2.0"
] | null | null | null |
courses/machine_learning/asl/open_project/ASL_youtube8m_models/video_using_datasets/trainer/model.py
|
Glairly/introduction_to_tensorflow
|
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import libraries and modules
import tensorflow as tf
# Set logging verbosity to INFO for richer output
tf.logging.set_verbosity(tf.logging.INFO)
# The number of video classes
NUM_CLASSES = 4716
# Create an input function to read our training and validation data
# Then provide the results to the Estimator API
def read_dataset_video(file_pattern, mode, batch_size):
def _input_fn():
print("\nread_dataset_video: _input_fn: file_pattern = {}".format(file_pattern))
print("read_dataset_video: _input_fn: mode = {}".format(mode))
print("read_dataset_video: _input_fn: batch_size = {}".format(batch_size))
# This function will decode frame examples from the frame level TF Records
def decode_example(serialized_examples):
# Create feature map
feature_map = {
'video_id': tf.FixedLenFeature(shape = [], dtype = tf.string),
'labels': tf.VarLenFeature(dtype = tf.int64),
'mean_rgb': tf.FixedLenFeature(shape = [1024], dtype = tf.float32),
'mean_audio': tf.FixedLenFeature(shape = [128], dtype = tf.float32)
}
# Parse TF Records into our features
features = tf.parse_single_example(serialized = serialized_examples, features = feature_map)
print("\nread_dataset_video: _input_fn: decode_example: features = {}".format(features)) # shape = video_id = (), mean_rgb = (1024,), mean_audio = (128,), labels = SparseTensor object
# Extract and format labels
sparse_labels = features.pop("labels") # SparseTensor object
print("read_dataset_video: _input_fn: decode_example: sparse_labels = {}\n".format(sparse_labels))
labels = tf.cast(x = tf.sparse_to_dense(sparse_indices = sparse_labels.values, output_shape = (NUM_CLASSES,), sparse_values = 1, validate_indices = False), dtype = tf.float32)
print("read_dataset_video: _input_fn: decode_example: labels = {}\n".format(labels)) # shape = (NUM_CLASSES,)
return features, labels
# Create list of files from file pattern
file_list = tf.gfile.Glob(filename = file_pattern)
#print("read_dataset_video: _input_fn: file_list = {}".format(file_list))
# Create dataset from file list
dataset = tf.data.TFRecordDataset(filenames = file_list)
print("read_dataset_video: _input_fn: dataset.TFRecordDataset = {}".format(dataset))
# Decode TF Record dataset examples
dataset = dataset.map(map_func = lambda x: decode_example(serialized_examples = x))
print("read_dataset_video: _input_fn: dataset.map = {}".format(dataset))
# Determine amount of times to repeat file and if we should shuffle based on if we are training or evaluating
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # read files forever
# Shuffle the dataset within a buffer
dataset = dataset.shuffle(buffer_size = batch_size * 10, seed = None)
print("read_dataset_video: _input_fn: dataset.shuffle = {}".format(dataset))
else:
num_epochs = 1 # read files only once
# Repeat files num_epoch times
dataset = dataset.repeat(count = num_epochs)
print("read_dataset_video: _input_fn: dataset.repeat = {}".format(dataset))
# Group the data into batches
dataset = dataset.batch(batch_size = batch_size)
print("read_dataset_video: _input_fn: dataset.batch = {}".format(dataset))
# Create a iterator and then pull the next batch of features and labels from the example queue
batch_features, batch_labels = dataset.make_one_shot_iterator().get_next()
print("read_dataset_video: _input_fn: batch_features = {}".format(batch_features))
print("read_dataset_video: _input_fn: batch_labels = {}\n".format(batch_labels))
return batch_features, batch_labels
return _input_fn
# Create our model function to be used in our custom estimator
def video_level_model(features, labels, mode, params):
print("\nvideo_level_model: features = {}".format(features))
print("video_level_model: labels = {}".format(labels))
print("video_level_model: mode = {}".format(mode))
# 0. Configure network
# Get dynamic batch size
current_batch_size = tf.shape(features['mean_rgb'])[0]
print("video_level_model: current_batch_size = {}".format(current_batch_size))
# Stack all of the features into a 3-D tensor
combined_features = tf.concat(values = [features['mean_rgb'], features['mean_audio']], axis = 1) # shape = (current_batch_size, 1024 + 128)
print("video_level_model: combined_features = {}".format(combined_features))
# 1. Create the DNN structure now
# Create the input layer to our frame DNN
network = combined_features # shape = (current_batch_size, 1024 + 128)
print("video_level_model: network = combined_features = {}".format(network))
# Add hidden layers with the given number of units/neurons per layer
for units in params['hidden_units']:
network = tf.layers.dense(inputs = network, units = units, activation = tf.nn.relu) # shape = (current_batch_size, units)
print("video_level_model: network = {}, units = {}".format(network, units))
# Connect the final hidden layer to a dense layer with no activation to get the logits
logits = tf.layers.dense(inputs = network, units = NUM_CLASSES, activation = None) # shape = (current_batch_size, NUM_CLASSES)
print("video_level_model: logits = {}".format(logits))
# Select the top k logits in descending order
top_k_logits = tf.nn.top_k(input = logits, k = params['top_k'], sorted = True) # shape = (current_batch_size, top_k)
print("video_level_model: top_k_logits = {}".format(top_k_logits))
# Since this is a multi-class, multi-label problem we will apply a sigmoid, not a softmax, to each logit to get its own probability
probabilities = tf.sigmoid(logits) # shape = (current_batch_size, NUM_CLASSES)
print("video_level_model: probabilities = {}".format(probabilities))
# Select the top k probabilities in descending order
top_k_probabilities = tf.sigmoid(top_k_logits.values) # shape = (current_batch_size, top_k)
print("video_level_model: top_k_probabilities = {}".format(top_k_probabilities))
# Select the top k classes in descending order of likelihood
top_k_classes = top_k_logits.indices # shape = (current_batch_size, top_k)
print("video_level_model: top_k_classes = {}".format(top_k_classes))
# The 0/1 predictions based on a threshold, in this case the threshold is if the probability it greater than random chance
predictions = tf.where(
condition = probabilities > 1.0 / NUM_CLASSES, # shape = (current_batch_size, NUM_CLASSES)
x = tf.ones_like(tensor = probabilities),
y = tf.zeros_like(tensor = probabilities))
print("video_level_model: predictions = {}".format(predictions))
top_k_predictions = tf.where(
condition = top_k_probabilities > 1.0 / NUM_CLASSES, # shape = (current_batch_size, top_k)
x = tf.ones_like(tensor = top_k_probabilities),
y = tf.zeros_like(tensor = top_k_probabilities))
print("video_level_model: top_k_predictions = {}\n".format(top_k_predictions))
# 2. Loss function, training/eval ops
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
# Since this is a multi-class, multi-label problem, we will use sigmoid activation and cross entropy loss
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels = labels, logits = logits)
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = 0.01,
optimizer = "Adam")
eval_metric_ops = {
"accuracy": tf.metrics.mean_per_class_accuracy(labels = labels, predictions = predictions, num_classes = NUM_CLASSES)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"logits": top_k_logits.values,
"probabilities": top_k_probabilities,
"predictions": top_k_predictions,
"classes": top_k_classes}
# 4. Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions_dict)}
# 5. Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = export_outputs)
# Create our serving input function to accept the data at serving and send it in the right format to our custom estimator
def serving_input_fn():
# This function fixes the shape and type of our input strings
def fix_shape_and_type_for_serving(placeholder):
# String split each string in the batch and output the values from the resulting SparseTensors
split_string = tf.map_fn(
fn = lambda x: tf.string_split(source = [placeholder[x]], delimiter=',').values,
elems = tf.range(start = 0, limit = tf.shape(input = placeholder)[0]),
dtype = tf.string) # shape = (batch_size, input_sequence_length)
print("serving_input_fn: fix_shape_and_type_for_serving: split_string = {}".format(split_string))
# Convert each string in the split tensor to float
feature_tensor = tf.string_to_number(string_tensor = split_string, out_type = tf.float32) # shape = (batch_size, input_sequence_length)
print("serving_input_fn: fix_shape_and_type_for_serving: feature_tensor = {}".format(feature_tensor))
return feature_tensor
# This function fixes dynamic shape ambiguity of last dimension so that we will be able to use it in our DNN (since tf.layers.dense require the last dimension to be known)
def get_shape_and_set_modified_shape_2D(tensor, additional_dimension_sizes):
# Get static shape for tensor and convert it to list
shape = tensor.get_shape().as_list()
# Set outer shape to additional_dimension_sizes[0] since we know that this is the correct size
shape[1] = additional_dimension_sizes[0]
# Set the shape of tensor to our modified shape
tensor.set_shape(shape = shape) # shape = (batch_size, additional_dimension_sizes[0])
print("serving_input_fn: get_shape_and_set_modified_shape_2D: tensor = {}, additional_dimension_sizes = {}".format(tensor, additional_dimension_sizes))
return tensor
# Create placeholders to accept the data sent to the model at serving time
feature_placeholders = { # all features come in as a batch of strings, shape = (batch_size,), this was so because of passing the arrays to online ml-engine prediction
'video_id': tf.placeholder(dtype = tf.string, shape = [None]),
'mean_rgb': tf.placeholder(dtype = tf.string, shape = [None]),
'mean_audio': tf.placeholder(dtype = tf.string, shape = [None])
}
print("\nserving_input_fn: feature_placeholders = {}".format(feature_placeholders))
# Create feature tensors
features = {
"video_id": feature_placeholders["video_id"],
"mean_rgb": fix_shape_and_type_for_serving(placeholder = feature_placeholders["mean_rgb"]),
"mean_audio": fix_shape_and_type_for_serving(placeholder = feature_placeholders["mean_audio"])
}
print("serving_input_fn: features = {}".format(features))
# Fix dynamic shape ambiguity of feature tensors for our DNN
features["mean_rgb"] = get_shape_and_set_modified_shape_2D(tensor = features["mean_rgb"], additional_dimension_sizes = [1024])
features["mean_audio"] = get_shape_and_set_modified_shape_2D(tensor = features["mean_audio"], additional_dimension_sizes = [128])
print("serving_input_fn: features = {}\n".format(features))
return tf.estimator.export.ServingInputReceiver(features = features, receiver_tensors = feature_placeholders)
# Create custom estimator's train and evaluate function
def train_and_evaluate(args):
# Create custom estimator's train and evaluate function
estimator = tf.estimator.Estimator(
model_fn = video_level_model,
model_dir = args['output_dir'],
params = {'hidden_units': args['hidden_units'], 'top_k': args['top_k']})
# Create train spec to read in our training data
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset_video(
file_pattern = args['train_file_pattern'],
mode = tf.estimator.ModeKeys.TRAIN,
batch_size = args['batch_size']),
max_steps = args['train_steps'])
# Create exporter to save out the complete model to disk
exporter = tf.estimator.LatestExporter(name = 'exporter', serving_input_receiver_fn = serving_input_fn)
# Create eval spec to read in our validation data and export our model
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset_video(
file_pattern = args['eval_file_pattern'],
mode = tf.estimator.ModeKeys.EVAL,
batch_size = args['batch_size']),
steps = None,
exporters = exporter,
start_delay_secs = args['start_delay_secs'],
throttle_secs = args['throttle_secs'])
# Create train and evaluate loop to train and evaluate our estimator
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| 54.943396
| 196
| 0.682212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,742
| 0.463049
|
3d48f55b2e9c4409d2a293fd05fd3f37f16ba6df
| 22,394
|
py
|
Python
|
allennlp/tests/semparse/worlds/wikitables_world_test.py
|
kyleclo/allennlp
|
0205c26f3db7ef44d7ee70fa9ebdf5a7f6b43baf
|
[
"Apache-2.0"
] | 24
|
2019-09-16T00:10:54.000Z
|
2021-09-08T19:31:51.000Z
|
allennlp/tests/semparse/worlds/wikitables_world_test.py
|
TalSchuster/allennlp-MultiLang
|
dbb28b939652491d2f633326edccca2cd0e528c8
|
[
"Apache-2.0"
] | 2
|
2019-01-12T00:19:06.000Z
|
2019-02-27T05:29:31.000Z
|
allennlp/tests/semparse/worlds/wikitables_world_test.py
|
TalSchuster/allennlp-MultiLang
|
dbb28b939652491d2f633326edccca2cd0e528c8
|
[
"Apache-2.0"
] | 10
|
2019-12-06T11:32:37.000Z
|
2022-01-06T15:39:09.000Z
|
# pylint: disable=no-self-use,invalid-name
from typing import List
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import Token
from allennlp.semparse import ParsingError
from allennlp.semparse.contexts import TableQuestionKnowledgeGraph
from allennlp.semparse.worlds import WikiTablesWorld
from allennlp.semparse.type_declarations import wikitables_lambda_dcs as types
def check_productions_match(actual_rules: List[str], expected_right_sides: List[str]):
actual_right_sides = [rule.split(' -> ')[1] for rule in actual_rules]
assert set(actual_right_sides) == set(expected_right_sides)
class TestWikiTablesWorld(AllenNlpTestCase):
def setUp(self):
super().setUp()
question_tokens = [Token(x) for x in ['what', 'was', 'the', 'last', 'year', '2000', '?']]
self.table_file = self.FIXTURES_ROOT / 'data' / 'wikitables' / 'sample_table.tsv'
self.table_kg = TableQuestionKnowledgeGraph.read_from_file(self.table_file, question_tokens)
self.world = WikiTablesWorld(self.table_kg)
def test_get_valid_actions_returns_correct_set(self):
# This test is long, but worth it. These are all of the valid actions in the grammar, and
# we want to be sure they are what we expect.
# This test checks that our valid actions for each type match PNP's, except for the
# terminal productions for type 'p'.
valid_actions = self.world.get_valid_actions()
assert set(valid_actions.keys()) == {
'<#1,#1>',
'<#1,<#1,#1>>',
'<#1,n>',
'<<#1,#2>,<#2,#1>>',
'<c,d>',
'<c,n>',
'<c,p>',
'<c,r>',
'<d,c>',
'<d,d>',
'<d,n>',
'<d,r>',
'<n,<n,<#1,<<#2,#1>,#1>>>>',
'<n,<n,<n,d>>>',
'<n,<n,n>>',
'<n,c>',
'<n,d>',
'<n,n>',
'<n,p>',
'<n,r>',
'<nd,nd>',
'<p,c>',
'<p,n>',
'<r,c>',
'<r,d>',
'<r,n>',
'<r,p>',
'<r,r>',
'@start@',
'c',
'd',
'n',
'p',
'r',
}
check_productions_match(valid_actions['<#1,#1>'],
['!='])
check_productions_match(valid_actions['<#1,<#1,#1>>'],
['and', 'or'])
check_productions_match(valid_actions['<#1,n>'],
['count'])
check_productions_match(valid_actions['<<#1,#2>,<#2,#1>>'],
['reverse'])
check_productions_match(valid_actions['<c,d>'],
["['lambda x', d]", '[<<#1,#2>,<#2,#1>>, <d,c>]'])
check_productions_match(valid_actions['<c,n>'],
["['lambda x', n]", '[<<#1,#2>,<#2,#1>>, <n,c>]'])
check_productions_match(valid_actions['<c,p>'],
['[<<#1,#2>,<#2,#1>>, <p,c>]'])
# Most of these are instance-specific production rules. These are the columns in the
# table. Remember that SEMPRE did things backwards: fb:row.row.division takes a cell ID
# and returns the row that has that cell in its row.division column. This is why we have
# to reverse all of these functions to go from a row to the cell in a particular column.
check_productions_match(valid_actions['<c,r>'],
['fb:row.row.null', # This one is global, representing an empty set.
'fb:row.row.year',
'fb:row.row.league',
'fb:row.row.avg_attendance',
'fb:row.row.division',
'fb:row.row.regular_season',
'fb:row.row.playoffs',
'fb:row.row.open_cup'])
# These might look backwards, but that's because SEMPRE chose to make them backwards.
# fb:a.b is a function that takes b and returns a. So fb:cell.cell.date takes cell.date
# and returns cell and fb:row.row.index takes row.index and returns row.
check_productions_match(valid_actions['<d,c>'],
['fb:cell.cell.date',
'[<<#1,#2>,<#2,#1>>, <c,d>]'])
check_productions_match(valid_actions['<d,d>'],
["['lambda x', d]", '[<<#1,#2>,<#2,#1>>, <d,d>]'])
check_productions_match(valid_actions['<d,n>'],
["['lambda x', n]", '[<<#1,#2>,<#2,#1>>, <n,d>]'])
check_productions_match(valid_actions['<d,r>'],
['[<<#1,#2>,<#2,#1>>, <r,d>]'])
check_productions_match(valid_actions['<n,<n,<#1,<<#2,#1>,#1>>>>'],
['argmax', 'argmin'])
# "date" is a function that takes three numbers: (date 2018 01 06).
check_productions_match(valid_actions['<n,<n,<n,d>>>'],
['date'])
check_productions_match(valid_actions['<n,<n,n>>'],
['-'])
check_productions_match(valid_actions['<n,c>'],
['fb:cell.cell.num2', 'fb:cell.cell.number',
'[<<#1,#2>,<#2,#1>>, <c,n>]'])
check_productions_match(valid_actions['<n,d>'],
["['lambda x', d]", '[<<#1,#2>,<#2,#1>>, <d,n>]'])
check_productions_match(valid_actions['<n,n>'],
['avg', 'sum', 'number',
"['lambda x', n]", '[<<#1,#2>,<#2,#1>>, <n,n>]'])
check_productions_match(valid_actions['<n,p>'],
['[<<#1,#2>,<#2,#1>>, <p,n>]'])
check_productions_match(valid_actions['<n,r>'],
['fb:row.row.index', '[<<#1,#2>,<#2,#1>>, <r,n>]'])
check_productions_match(valid_actions['<nd,nd>'],
['<', '<=', '>', '>=', 'min', 'max'])
# PART_TYPE rules. A cell part is for when a cell has text that can be split into multiple
# parts.
check_productions_match(valid_actions['<p,c>'],
['fb:cell.cell.part'])
check_productions_match(valid_actions['<p,n>'],
["['lambda x', n]"])
check_productions_match(valid_actions['<r,c>'],
['[<<#1,#2>,<#2,#1>>, <c,r>]'])
check_productions_match(valid_actions['<r,d>'],
["['lambda x', d]"])
check_productions_match(valid_actions['<r,n>'],
["['lambda x', n]", '[<<#1,#2>,<#2,#1>>, <n,r>]'])
check_productions_match(valid_actions['<r,p>'],
["['lambda x', p]", '[<<#1,#2>,<#2,#1>>, <p,r>]'])
check_productions_match(valid_actions['<r,r>'],
['fb:row.row.next', 'fb:type.object.type', '[<<#1,#2>,<#2,#1>>, <r,r>]'])
check_productions_match(valid_actions['@start@'],
['d', 'c', 'p', 'r', 'n'])
check_productions_match(valid_actions['c'],
['[<#1,#1>, c]',
'[<#1,<#1,#1>>, c, c]',
'[<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, c, <n,c>]',
'[<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, c, <d,c>]',
'[<d,c>, d]',
'[<n,c>, n]',
'[<p,c>, p]',
'[<r,c>, r]',
'fb:cell.null',
'fb:cell.2',
'fb:cell.2001',
'fb:cell.2005',
'fb:cell.4th_round',
'fb:cell.4th_western',
'fb:cell.5th',
'fb:cell.6_028',
'fb:cell.7_169',
'fb:cell.did_not_qualify',
'fb:cell.quarterfinals',
'fb:cell.usl_a_league',
'fb:cell.usl_first_division'])
check_productions_match(valid_actions['d'],
['[<n,<n,<n,d>>>, n, n, n]',
'[<#1,#1>, d]',
'[<#1,<#1,#1>>, d, d]',
'[<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, d, <d,d>]',
'[<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, d, <n,d>]',
'[<c,d>, c]',
'[<nd,nd>, d]'])
check_productions_match(valid_actions['n'],
['-1',
'0',
'1',
'2000',
'[<#1,#1>, n]',
'[<#1,<#1,#1>>, n, n]',
'[<#1,n>, c]',
'[<#1,n>, d]',
'[<#1,n>, n]',
'[<#1,n>, p]',
'[<#1,n>, r]',
'[<c,n>, c]',
'[<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, n, <d,n>]',
'[<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, n, <n,n>]',
'[<n,<n,n>>, n, n]',
'[<n,n>, n]',
'[<nd,nd>, n]',
'[<r,n>, r]'])
check_productions_match(valid_actions['p'],
['[<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, p, <n,p>]',
'[<#1,#1>, p]',
'[<c,p>, c]',
'[<#1,<#1,#1>>, p, p]',
'fb:part.4th',
'fb:part.5th',
'fb:part.western'])
check_productions_match(valid_actions['r'],
['fb:type.row',
'[<#1,#1>, r]',
'[<#1,<#1,#1>>, r, r]',
'[<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, r, <d,r>]',
'[<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, r, <n,r>]',
'[<n,r>, n]',
'[<c,r>, c]',
'[<r,r>, r]'])
def test_world_processes_sempre_forms_correctly(self):
sempre_form = "((reverse fb:row.row.year) (fb:row.row.league fb:cell.usl_a_league))"
expression = self.world.parse_logical_form(sempre_form)
# We add columns to the name mapping in sorted order, so "league" and "year" end up as C2
# and C6.
f = types.name_mapper.get_alias
assert str(expression) == f"{f('reverse')}(C6,C2(cell:usl_a_league))"
def test_world_parses_logical_forms_with_dates(self):
sempre_form = "((reverse fb:row.row.league) (fb:row.row.year (fb:cell.cell.date (date 2000 -1 -1))))"
expression = self.world.parse_logical_form(sempre_form)
f = types.name_mapper.get_alias
assert str(expression) == \
f"{f('reverse')}(C2,C6({f('fb:cell.cell.date')}({f('date')}(num:2000,num:~1,num:~1))))"
def test_world_parses_logical_forms_with_decimals(self):
question_tokens = [Token(x) for x in ['0.2']]
table_kg = TableQuestionKnowledgeGraph.read_from_file(
self.FIXTURES_ROOT / "data" / "wikitables" / "sample_table.tsv", question_tokens)
world = WikiTablesWorld(table_kg)
sempre_form = "(fb:cell.cell.number (number 0.200))"
expression = world.parse_logical_form(sempre_form)
f = types.name_mapper.get_alias
assert str(expression) == f"{f('fb:cell.cell.number')}({f('number')}(num:0_200))"
def test_get_action_sequence_removes_currying_for_all_wikitables_functions(self):
# minus
logical_form = "(- (number 0) (number 1))"
parsed_logical_form = self.world.parse_logical_form(logical_form)
action_sequence = self.world.get_action_sequence(parsed_logical_form)
assert 'n -> [<n,<n,n>>, n, n]' in action_sequence
# date
logical_form = "(count (fb:cell.cell.date (date 2000 -1 -1)))"
parsed_logical_form = self.world.parse_logical_form(logical_form)
action_sequence = self.world.get_action_sequence(parsed_logical_form)
assert 'd -> [<n,<n,<n,d>>>, n, n, n]' in action_sequence
# argmax
logical_form = ("(argmax (number 1) (number 1) (fb:row.row.division fb:cell.2) "
"(reverse (lambda x ((reverse fb:row.row.index) (var x)))))")
parsed_logical_form = self.world.parse_logical_form(logical_form)
action_sequence = self.world.get_action_sequence(parsed_logical_form)
assert 'r -> [<n,<n,<#1,<<#2,#1>,#1>>>>, n, n, r, <n,r>]' in action_sequence
# and
logical_form = "(and (number 1) (number 1))"
parsed_logical_form = self.world.parse_logical_form(logical_form)
action_sequence = self.world.get_action_sequence(parsed_logical_form)
assert 'n -> [<#1,<#1,#1>>, n, n]' in action_sequence
def test_parsing_logical_forms_fails_with_unmapped_names(self):
with pytest.raises(ParsingError):
_ = self.world.parse_logical_form("(number 20)")
def test_world_has_only_basic_numbers(self):
valid_actions = self.world.get_valid_actions()
assert 'n -> -1' in valid_actions['n']
assert 'n -> 0' in valid_actions['n']
assert 'n -> 1' in valid_actions['n']
assert 'n -> 17' not in valid_actions['n']
assert 'n -> 231' not in valid_actions['n']
assert 'n -> 2007' not in valid_actions['n']
assert 'n -> 2107' not in valid_actions['n']
assert 'n -> 1800' not in valid_actions['n']
def test_world_adds_numbers_from_question(self):
question_tokens = [Token(x) for x in ['what', '2007', '2,107', '0.2', '1800s', '1950s', '?']]
table_kg = TableQuestionKnowledgeGraph.read_from_file(
self.FIXTURES_ROOT / "data" / "wikitables" / "sample_table.tsv", question_tokens)
world = WikiTablesWorld(table_kg)
valid_actions = world.get_valid_actions()
assert 'n -> 2007' in valid_actions['n']
assert 'n -> 2107' in valid_actions['n']
# It appears that sempre normalizes floating point numbers.
assert 'n -> 0.200' in valid_actions['n']
# We want to add the end-points to things like "1800s": 1800 and 1900.
assert 'n -> 1800' in valid_actions['n']
assert 'n -> 1900' in valid_actions['n']
assert 'n -> 1950' in valid_actions['n']
assert 'n -> 1960' in valid_actions['n']
def test_world_returns_correct_actions_with_reverse(self):
sempre_form = "((reverse fb:row.row.year) (fb:row.row.league fb:cell.usl_a_league))"
expression = self.world.parse_logical_form(sempre_form)
actions = self.world.get_action_sequence(expression)
target_action_sequence = ['@start@ -> c', 'c -> [<r,c>, r]', '<r,c> -> [<<#1,#2>,<#2,#1>>, <c,r>]',
'<<#1,#2>,<#2,#1>> -> reverse', '<c,r> -> fb:row.row.year',
'r -> [<c,r>, c]', '<c,r> -> fb:row.row.league', 'c -> fb:cell.usl_a_league']
assert actions == target_action_sequence
def test_world_returns_correct_actions_with_two_reverses(self):
sempre_form = ("(max ((reverse fb:cell.cell.date) ((reverse fb:row.row.year) "
"(fb:row.row.league fb:cell.usl_a_league))))")
expression = self.world.parse_logical_form(sempre_form)
actions = self.world.get_action_sequence(expression)
target_action_sequence = ['@start@ -> d', 'd -> [<nd,nd>, d]', '<nd,nd> -> max', 'd -> [<c,d>, c]',
'<c,d> -> [<<#1,#2>,<#2,#1>>, <d,c>]', '<<#1,#2>,<#2,#1>> -> reverse',
'<d,c> -> fb:cell.cell.date', 'c -> [<r,c>, r]',
'<r,c> -> [<<#1,#2>,<#2,#1>>, <c,r>]', '<<#1,#2>,<#2,#1>> -> reverse',
'<c,r> -> fb:row.row.year', 'r -> [<c,r>, c]',
'<c,r> -> fb:row.row.league', 'c -> fb:cell.usl_a_league']
assert actions == target_action_sequence
def test_world_returns_correct_actions_with_lambda_with_var(self):
sempre_form = ("((reverse fb:cell.cell.date) ((reverse fb:row.row.year) (argmax (number 1) "
"(number 1) (fb:row.row.league fb:cell.usl_a_league) (reverse (lambda x "
"((reverse fb:row.row.index) (var x)))))))")
expression = self.world.parse_logical_form(sempre_form, remove_var_function=False)
actions_with_var = self.world.get_action_sequence(expression)
assert '<#1,#1> -> var' in actions_with_var
assert 'r -> x' in actions_with_var
def test_world_returns_correct_actions_with_lambda_without_var(self):
sempre_form = ("((reverse fb:cell.cell.date) ((reverse fb:row.row.year) (argmax (number 1) "
"(number 1) (fb:row.row.league fb:cell.usl_a_league) (reverse (lambda x "
"((reverse fb:row.row.index) (var x)))))))")
expression = self.world.parse_logical_form(sempre_form)
actions_without_var = self.world.get_action_sequence(expression)
assert '<#1,#1> -> var' not in actions_without_var
assert 'r -> x' in actions_without_var
@pytest.mark.skip(reason="fibonacci recursion currently going on here")
def test_with_deeply_nested_logical_form(self):
question_tokens = [Token(x) for x in ['what', 'was', 'the', 'district', '?']]
table_filename = self.FIXTURES_ROOT / 'data' / 'wikitables' / 'table' / '109.tsv'
table_kg = TableQuestionKnowledgeGraph.read_from_file(table_filename, question_tokens)
world = WikiTablesWorld(table_kg)
logical_form = ("(count ((reverse fb:cell.cell.number) (or (or (or (or (or (or (or (or "
"(or (or (or (or (or (or (or (or (or (or (or (or (or fb:cell.virginia_1 "
"fb:cell.virginia_10) fb:cell.virginia_11) fb:cell.virginia_12) "
"fb:cell.virginia_13) fb:cell.virginia_14) fb:cell.virginia_15) "
"fb:cell.virginia_16) fb:cell.virginia_17) fb:cell.virginia_18) "
"fb:cell.virginia_19) fb:cell.virginia_2) fb:cell.virginia_20) "
"fb:cell.virginia_21) fb:cell.virginia_22) fb:cell.virginia_3) "
"fb:cell.virginia_4) fb:cell.virginia_5) fb:cell.virginia_6) "
"fb:cell.virginia_7) fb:cell.virginia_8) fb:cell.virginia_9)))")
print("Parsing...")
world.parse_logical_form(logical_form)
def _get_world_with_question_tokens(self, tokens: List[Token]) -> WikiTablesWorld:
table_kg = TableQuestionKnowledgeGraph.read_from_file(self.table_file, tokens)
world = WikiTablesWorld(table_kg)
return world
def test_get_agenda(self):
tokens = [Token(x) for x in ['what', 'was', 'the', 'last', 'year', '2000', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'n -> 2000',
'<c,r> -> fb:row.row.year',
'<n,<n,<#1,<<#2,#1>,#1>>>> -> argmax'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'difference', 'in', 'attendance',
'between', 'years', '2001', 'and', '2005', '?']]
world = self._get_world_with_question_tokens(tokens)
# The agenda contains cells here instead of numbers because 2001 and 2005 actually link to
# entities in the table whereas 2000 (in the previous case) does not.
assert set(world.get_agenda()) == {'c -> fb:cell.2001',
'c -> fb:cell.2005',
'<c,r> -> fb:row.row.year',
'<n,<n,n>> -> -'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'total', 'avg.', 'attendance', 'in',
'years', '2001', 'and', '2005', '?']]
world = self._get_world_with_question_tokens(tokens)
# The agenda contains cells here instead of numbers because 2001 and 2005 actually link to
# entities in the table whereas 2000 (in the previous case) does not.
assert set(world.get_agenda()) == {'c -> fb:cell.2001',
'c -> fb:cell.2005',
'<c,r> -> fb:row.row.year',
'<c,r> -> fb:row.row.avg_attendance',
'<n,n> -> sum'}
tokens = [Token(x) for x in ['when', 'was', 'the', 'least', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<c,r> -> fb:row.row.avg_attendance',
'<n,<n,<#1,<<#2,#1>,#1>>>> -> argmin'
}
tokens = [Token(x) for x in ['what', 'is', 'the', 'least', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<c,r> -> fb:row.row.avg_attendance',
'<nd,nd> -> min'
}
| 51.958237
| 111
| 0.466732
| 21,743
| 0.97093
| 0
| 0
| 1,330
| 0.059391
| 0
| 0
| 8,117
| 0.362463
|
3d4903f05506c73039c6cca6466ba4b87575d105
| 395
|
py
|
Python
|
FishCDailyQuestion/ex001-010/Python3_008/008_05.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
FishCDailyQuestion/ex001-010/Python3_008/008_05.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
FishCDailyQuestion/ex001-010/Python3_008/008_05.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
#!/usr/bin/evn python3
# coding:utf-8
from math import sqrt
def is_prime_num(num):
for i in range(2, int(sqrt(num))+1):
if num % i == 0:
return False # 非素数,返回 False
return True # 是素数,返回 True
count = 0
for i in range(100, 201):
if is_prime_num(i):
print(i, end=' ')
count += 1
print("\n\nThere are {} prime numbers in total.".format(count))
| 20.789474
| 63
| 0.582278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 132
| 0.315036
|
3d490f3f5ae32168776078a1279b5239c7a6960d
| 4,324
|
py
|
Python
|
models/015_bolasso.py
|
cmougan/Novartis2021
|
72a6f088929a5a4546760f4a453ec4a77faf5856
|
[
"MIT"
] | null | null | null |
models/015_bolasso.py
|
cmougan/Novartis2021
|
72a6f088929a5a4546760f4a453ec4a77faf5856
|
[
"MIT"
] | null | null | null |
models/015_bolasso.py
|
cmougan/Novartis2021
|
72a6f088929a5a4546760f4a453ec4a77faf5856
|
[
"MIT"
] | null | null | null |
# %% Imports
from numpy.lib import select
import pandas as pd
import sys
import numpy as np
import random
from functools import partial
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sktools import IsEmptyExtractor
from lightgbm import LGBMRegressor
from category_encoders import TargetEncoder
from sklearn.linear_model import QuantileRegressor
from sklego.preprocessing import ColumnSelector
from sklearn.preprocessing import StandardScaler
from memo import memlist, memfile, grid, time_taken, Runner
sys.path.append("../")
from metrics.metric_participants import (ComputeMetrics, print_metrics)
from eda.checker import check_train_test
random.seed(0)
sales_train = pd.read_csv("../data/data_raw/sales_train.csv")
df_full = pd.read_csv("../data/split.csv")
df_region = pd.read_csv("../data/data_raw/regions.csv")
regions_hcps = pd.read_csv("../data/data_raw/regions_hcps.csv")
activity_features = pd.read_csv("../data/features/activity_features.csv")
brands_3_12 = pd.read_csv("../data/features/brand_3_12_market_features_lagged.csv")
rte_basic = pd.read_csv("../data/features/rte_features_v2.csv").drop(
columns=["sales", "validation"]
)
market_size = pd.read_csv("../data/market_size.csv")
# For reproducibility
random.seed(0)
VAL_SIZE = 38
SUBMISSION_NAME = "linear_model_simple"
# %% Training weights
market_size = (
market_size
.assign(weight=lambda x: 1 / x['sales'])
)
# %% Add region data
df_feats = df_full.merge(df_region, on="region", how="left")
df_feats = pd.merge(left=df_feats, right=regions_hcps, how="left", on="region")
df_feats = df_feats.merge(
activity_features, on=["month", "region", "brand"], how="left"
)
df_feats = df_feats.merge(rte_basic, on=["month", "region", "brand"], how="left")
df_feats = df_feats.merge(brands_3_12, on=["month", "region"], how="left")
df_feats["whichBrand"] = np.where(df_feats.brand == "brand_1", 1, 0)
df_feats['month_brand'] = df_feats.month + '_' + df_feats.brand
# drop sum variables
cols_to_drop = ["region", "sales", "validation"]
# %% Split train val test
X_train = df_feats.query("validation == 0").drop(columns=cols_to_drop)
y_train = df_feats.query("validation == 0").sales
X_val = df_feats.query("validation == 1").drop(columns=cols_to_drop)
y_val = df_feats.query("validation == 1").sales
X_test = df_feats.query("validation.isnull()", engine="python").drop(
columns=cols_to_drop
)
y_test = df_feats.query("validation.isnull()", engine="python").sales
check_train_test(X_train, X_val)
check_train_test(X_train, X_test, threshold=0.3)
check_train_test(X_val, X_test)
# %%
for quantile in [0.5, 0.1, 0.9]:
selected = {}
for iter in range(100):
print("Quantile: ", quantile, "iter: ", iter)
df_train = df_feats.query("validation == 0")
sample = df_train.sample(replace=True, frac=1)
X_train = sample.drop(columns=cols_to_drop)
y_train = sample.sales
models = {}
pipes = {}
train_preds = {}
val_preds = {}
models[quantile] = QuantileRegressor(
quantile=quantile,
alpha=0.05,
solver="highs-ds"
)
pipes[quantile] = Pipeline(
[
("te", TargetEncoder(cols=["month_brand", "month", "brand"])),
("imputer", SimpleImputer(strategy="median")),
("scale", StandardScaler()),
("lgb", models[quantile])
]
)
# Fit cv model
pipes[quantile].fit(X_train, y_train)
train_preds[quantile] = pipes[quantile].predict(X_train)
coefs = models[quantile].coef_
cols_pipe = pipes[quantile][:1].fit_transform(X_train.head(), y_train.head()).columns
coefs_dict = dict(zip(cols_pipe, coefs))
selected_features = list({k: v for k, v in coefs_dict.items() if v != 0}.keys())
selected[iter] = selected_features
all_selected = {}
for k, v in selected.items():
for feature in v:
all_selected[feature] = all_selected.get(feature, 0) + 1
all_selected_df = pd.DataFrame(all_selected.items(), columns=["feature", "count"]).sort_values("count", ascending=False)
all_selected_df.to_csv(f"../data/features/bolasso_features_0{int(quantile * 10)}.csv", index=False)
| 32.268657
| 124
| 0.679695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 953
| 0.220398
|
3d49492a4f368cab1e5d3dbd044945f99690e2f6
| 40,274
|
py
|
Python
|
docx.py
|
highcat/python-docx
|
05627c6330970f91771174c9e5d849ce28703b3e
|
[
"MIT"
] | null | null | null |
docx.py
|
highcat/python-docx
|
05627c6330970f91771174c9e5d849ce28703b3e
|
[
"MIT"
] | null | null | null |
docx.py
|
highcat/python-docx
|
05627c6330970f91771174c9e5d849ce28703b3e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and 'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
'''
from copy import deepcopy
import logging
from lxml import etree
try:
from PIL import Image
except ImportError:
import Image
import zipfile
import shutil
import distutils.dir_util
import re
import time
import os
from os.path import join
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
TEMPLATE_DIR = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(TEMPLATE_DIR):
TEMPLATE_DIR = join(os.path.dirname(__file__), 'template') # dev
_DOCX_DIR_NAME = 'docx-template'
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
# Text Content
'mv':'urn:schemas-microsoft-com:mac:vml',
'mo':'http://schemas.microsoft.com/office/mac/office/2008/main',
've':'http://schemas.openxmlformats.org/markup-compatibility/2006',
'o':'urn:schemas-microsoft-com:office:office',
'r':'http://schemas.openxmlformats.org/officeDocument/2006/relationships',
'm':'http://schemas.openxmlformats.org/officeDocument/2006/math',
'v':'urn:schemas-microsoft-com:vml',
'w':'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10':'urn:schemas-microsoft-com:office:word',
'wne':'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'wp':'http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing',
'a':'http://schemas.openxmlformats.org/drawingml/2006/main',
'pic':'http://schemas.openxmlformats.org/drawingml/2006/picture',
# Properties (core and extended)
'cp':"http://schemas.openxmlformats.org/package/2006/metadata/core-properties",
'dc':"http://purl.org/dc/elements/1.1/",
'dcterms':"http://purl.org/dc/terms/",
'dcmitype':"http://purl.org/dc/dcmitype/",
'xsi':"http://www.w3.org/2001/XMLSchema-instance",
'ep':'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties',
# Content Types (we're just making up our own namespaces here to save time)
'ct':'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships (we're just making up our own namespaces here to save time)
'pr':'http://schemas.openxmlformats.org/package/2006/relationships'
}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname,tagtext=None,nsprefix='w',attributes=None,attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
nsprefix = nsprefix[0] # FIXME: rest of code below expects a single prefix
if nsprefix:
namespace = '{'+nsprefixes[nsprefix]+'}'
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty string
# (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute, attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
raise ValueError('Page break style "%s" not implemented. Valid styles: %s.' % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br',attributes={'type':type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz',attributes={'w':'12240','h':'15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz',attributes={'h':'12240','w':'15840', 'orient':'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
'''Make a new paragraph element, containing a run, and some text.
Return the paragraph element.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If paratext is a list, spawn multiple run/text elements.
Support text styles (paratext must then be a list of lists in the form
<text> / <style>. Style is a string containing a combination of 'bui' chars
example
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu'),
]
'''
# Make our elements
paragraph = makeelement('p')
if isinstance(paratext, list):
text = []
for pt in paratext:
if isinstance(pt, (list,tuple)):
text.append([makeelement('t',tagtext=pt[0]), pt[1]])
else:
text.append([makeelement('t',tagtext=pt), ''])
else:
text = [[makeelement('t',tagtext=paratext),''],]
pPr = makeelement('pPr')
pStyle = makeelement('pStyle',attributes={'val':style})
pJc = makeelement('jc',attributes={'val':jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text the run, and the run to the paragraph
paragraph.append(pPr)
for t in text:
run = makeelement('r')
rPr = makeelement('rPr')
if isinstance(t[1], list):
for prop in t[1]: # custom properties
rPr.append(prop)
else:
# Apply styles
if t[1].find('b') > -1:
b = makeelement('b')
rPr.append(b)
if t[1].find('u') > -1:
u = makeelement('u',attributes={'val':'single'})
rPr.append(u)
if t[1].find('i') > -1:
i = makeelement('i')
rPr.append(i)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(t[0])
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
# FIXME - doesn't quite work...read from string as temp hack...
#types = makeelement('Types',nsprefix='ct')
types = etree.fromstring('''<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types"></Types>''')
parts = {
'/word/theme/theme1.xml':'application/vnd.openxmlformats-officedocument.theme+xml',
'/word/fontTable.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.fontTable+xml',
'/docProps/core.xml':'application/vnd.openxmlformats-package.core-properties+xml',
'/docProps/app.xml':'application/vnd.openxmlformats-officedocument.extended-properties+xml',
'/word/document.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml',
'/word/settings.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml',
'/word/numbering.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml',
'/word/styles.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml',
'/word/webSettings.xml':'application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml'
}
for part in parts:
types.append(makeelement('Override',nsprefix=None,attributes={'PartName':part,'ContentType':parts[part]}))
# Add support for filetypes
filetypes = {'rels':'application/vnd.openxmlformats-package.relationships+xml','xml':'application/xml','jpeg':'image/jpeg','gif':'image/gif','png':'image/png'}
for extension in filetypes:
types.append(makeelement('Default',nsprefix=None,attributes={'Extension':extension,'ContentType':filetypes[extension]}))
return types
def heading(headingtext,headinglevel,lang='en'):
'''Make a new heading, return the heading element'''
lmap = {
'en': 'Heading',
'it': 'Titolo',
}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement('pStyle',attributes={'val':lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t',tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0, twunit='auto', borders={}, celstyle=None, rowstyle=None, table_props=None):
'''Get a list of lists, return a table
@param list contents: A list of lists describing contents
Every item in the list can be a string or a valid
XML element itself. It can also be a list. In that case
all the listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be threated as heading
or not
@param list colw: A list of interger. The list must have same element
count of content lines. Specify column Widths in
wunitS
@param string cwunit: Unit user for column width:
'pct': fifties of a percent
'dxa': twenties of a point
'nil': no width
'auto': automagically determined
@param int tblw: Table width
@param int twunit: Unit used for table width. Same as cwunit
@param dict borders: Dictionary defining table border. Supported keys are:
'top', 'left', 'bottom', 'right', 'insideH', 'insideV', 'all'
When specified, the 'all' key has precedence over others.
Each key must define a dict of border attributes:
color: The color of the border, in hex or 'auto'
space: The space, measured in points
sz: The size of the border, in eights of a point
val: The style of the border, see http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align': specify the alignment, see paragraph documentation,
@return lxml.etree: Generated XML etree element
'''
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle',attributes={'val':''})
tableprops.append(tablestyle)
if not table_props:
table_props = {}
for k, attr in table_props.items():
if isinstance(attr, etree._Element):
tableprops.append(attr)
else:
prop = makeelement(k, attributes=attr)
tableprops.append(prop)
tablewidth = makeelement('tblW',attributes={'w':str(tblw),'type':str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = str(borders[k][a])
borderelem = makeelement(b,attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook',attributes={'val':'0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
tablegrid.append(makeelement('gridCol',attributes={'w':str(colw[i]) if colw else '2390'}))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle',attributes={'val':'000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w':str(colw[i]),'type':cwunit}
else:
wattr = {'w':'0','type':'auto'}
cellwidth = makeelement('tcW',attributes=wattr)
cellstyle = makeelement('shd',attributes={'val':'clear','color':'auto','fill':'FFFFFF','themeFill':'text2','themeFillTint':'99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading,]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h,jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
if rowstyle:
rowprops = makeelement('trPr')
if 'height' in rowstyle:
rowHeight = makeelement('trHeight', attributes={'val': str(rowstyle['height']),
'hRule': 'exact'})
rowprops.append(rowHeight)
row.append(rowprops)
i = 0
for content_cell in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w':str(colw[i]),'type':cwunit}
else:
wattr = {'w':'0','type':'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
align = 'left'
cell_spec_style = {}
if celstyle:
cell_spec_style = deepcopy(celstyle[i])
if isinstance(content_cell, dict):
cell_spec_style.update(content_cell['style'])
content_cell = content_cell['content']
# spec. align property
SPEC_PROPS = ['align',]
if 'align' in cell_spec_style:
align = celstyle[i]['align']
# any property for cell, by OOXML specification
for cs, attrs in cell_spec_style.items():
if cs in SPEC_PROPS:
continue
cell_prop = makeelement(cs, attributes=attrs)
cellprops.append(cell_prop)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content_cell, (list, tuple)):
content_cell = [content_cell,]
for c in content_cell:
# cell.append(cellprops)
if isinstance(c, etree._Element):
cell.append(c)
else:
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
temp_dir=None):
'''Take a relationshiplist, picture file name, and return a paragraph containing the image
and an updated relationshiplist'''
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture'''
# Copy the file into the media dir
assert temp_dir
media_dir = join(temp_dir, _DOCX_DIR_NAME, 'word', 'media')
if not os.path.isdir(media_dir):
os.makedirs(media_dir)
shutil.copyfile(picname, join(media_dir,picname))
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth,pixelheight = Image.open(picname).size[0:2]
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12667
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# Set relationship ID to the first available
picid = '2'
picrelid = 'rId'+str(len(relationshiplist)+1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relationships/image',
'media/'+picname])
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area (stretch, tile, etc.)
blipfill = makeelement('blipFill',nsprefix='pic')
blipfill.append(makeelement('blip',nsprefix='a',attrnsprefix='r',attributes={'embed':picrelid}))
stretch = makeelement('stretch',nsprefix='a')
stretch.append(makeelement('fillRect',nsprefix='a'))
blipfill.append(makeelement('srcRect',nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr',nsprefix='pic')
cnvpr = makeelement('cNvPr',nsprefix='pic',
attributes={'id':'0','name':'Picture 1','descr':picname})
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr',nsprefix='pic')
cnvpicpr.append(makeelement('picLocks', nsprefix='a',
attributes={'noChangeAspect':str(int(nochangeaspect)),
'noChangeArrowheads':str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr',nsprefix='pic',attributes={'bwMode':'auto'})
xfrm = makeelement('xfrm',nsprefix='a')
xfrm.append(makeelement('off',nsprefix='a',attributes={'x':'0','y':'0'}))
xfrm.append(makeelement('ext',nsprefix='a',attributes={'cx':width,'cy':height}))
prstgeom = makeelement('prstGeom',nsprefix='a',attributes={'prst':'rect'})
prstgeom.append(makeelement('avLst',nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic',nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement('graphicData',nsprefix='a',
attributes={'uri':'http://schemas.openxmlformats.org/drawingml/2006/picture'})
graphicdata.append(pic)
graphic = makeelement('graphic',nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks',nsprefix='a',attributes={'noChangeAspect':'1'})
framepr = makeelement('cNvGraphicFramePr',nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr',nsprefix='wp',
attributes={'id':picid,'name':'Picture 1','descr':picdescription})
effectextent = makeelement('effectExtent',nsprefix='wp',
attributes={'l':'25400','t':'0','r':'0','b':'0'})
extent = makeelement('extent',nsprefix='wp',attributes={'cx':width,'cy':height})
inline = makeelement('inline',
attributes={'distT':"0",'distB':"0",'distL':"0",'distR':"0"},nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
return relationshiplist,paragraph
def search(document,search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document,search,replace):
'''Replace all occurences of string with a different string, return updated document'''
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search,replace,element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1,len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s,s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document,search,replace,bs=3):
'''Replace all occurences of string with a different string, return updated document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
'''
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1,len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s,s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s", searchre.pattern)
log.debug("Requested replacement: %s", replace)
log.debug("Matched text: %s", txtsearch)
log.debug( "Matched text (splitted): %s", map(lambda i:i.text,searchels))
log.debug("Matched at position: %s", match.start())
log.debug( "matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace (list, tuple)):
log.debug("Will replace with LIST OF ELEMENTS")
else:
log.debug("Will replace with:", re.sub(search,replace,txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element. Puth in the
# whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process it later
replace = [ replace, ]
if isinstance(replace, (list,tuple)):
# I'm replacing with a list of etree elements
# clear the text in the tag and append the element after the
# parent paragraph
# (because t elements cannot have childs)
p = findTypeParent(searchels[i], '{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(search,'',txtsearch)
insindex = p.getparent().index(p) + 1
for r in replace:
p.getparent().insert(insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(search,replace,txtsearch)
replaced = True
log.debug("Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist=[]
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements, iterate through each
# paragraph, appending all text (t) children to that paragraphs text.
for para in paralist:
paratext=u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title,subject,creator,keywords,lastmodifiedby=None):
'''Create core properties (common document properties referred to in the 'Dublin Core' specification).
See appproperties() for other stuff.'''
coreprops = makeelement('coreProperties',nsprefix='cp')
coreprops.append(makeelement('title',tagtext=title,nsprefix='dc'))
coreprops.append(makeelement('subject',tagtext=subject,nsprefix='dc'))
coreprops.append(makeelement('creator',tagtext=creator,nsprefix='dc'))
coreprops.append(makeelement('keywords',tagtext=','.join(keywords),nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy',tagtext=lastmodifiedby,nsprefix='cp'))
coreprops.append(makeelement('revision',tagtext='1',nsprefix='cp'))
coreprops.append(makeelement('category',tagtext='Examples',nsprefix='cp'))
coreprops.append(makeelement('description',tagtext='Examples',nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the lement from a string as a workaround...
for doctime in ['created','modified']:
coreprops.append(etree.fromstring('''<dcterms:'''+doctime+''' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:W3CDTF">'''+currenttime+'''</dcterms:'''+doctime+'''>'''))
pass
return coreprops
def appproperties():
'''Create app-specific properties. See docproperties() for more common document properties.'''
appprops = makeelement('Properties',nsprefix='ep')
appprops = etree.fromstring(
b'''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes"></Properties>''')
props = {
'Template':'Normal.dotm',
'TotalTime':'6',
'Pages':'1',
'Words':'83',
'Characters':'475',
'Application':'Microsoft Word 12.0.0',
'DocSecurity':'0',
'Lines':'12',
'Paragraphs':'8',
'ScaleCrop':'false',
'LinksUpToDate':'false',
'CharactersWithSpaces':'583',
'SharedDoc':'false',
'HyperlinksChanged':'false',
'AppVersion':'12.0000',
}
for prop in props:
appprops.append(makeelement(prop,tagtext=props[prop],nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist = [
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/numbering','numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles','styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/settings','settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/webSettings','webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/fontTable','fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme','theme/theme1.xml'],
]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships',nsprefix='pr')
relationships = etree.fromstring(
'''<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
</Relationships>'''
)
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
relationships.append(makeelement('Relationship',attributes={'Id':'rId'+str(count+1),
'Type':relationship[0],'Target':relationship[1]},nsprefix=None))
count += 1
return relationships
def savedocx(document, coreprops, appprops, contenttypes, websettings, wordrelationships, output,
temp_dir=None):
'''Save a modified document'''
assert temp_dir
assert os.path.isdir(temp_dir)
docx_dir = join(temp_dir, _DOCX_DIR_NAME)
# Copy whole template to temporary directory
distutils.dir_util.copy_tree(TEMPLATE_DIR, docx_dir) # directory can already exist
docxfile = zipfile.ZipFile(output,mode='w',compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(docx_dir)
# Serialize our trees into out zip file
treesandfiles = {document:'word/document.xml',
coreprops:'docProps/core.xml',
appprops:'docProps/app.xml',
contenttypes:'[Content_Types].xml',
websettings:'word/webSettings.xml',
wordrelationships:'word/_rels/document.xml.rels'}
for tree in treesandfiles:
log.info('Saving: '+treesandfiles[tree] )
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree],treestring)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath,dirnames,filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
| 43.305376
| 242
| 0.590133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18,062
| 0.448478
|
3d49f7eaf598f54df886dcfb77904d84e8c9f173
| 108
|
py
|
Python
|
nylas/util/__init__.py
|
nylas/nylas-production-python
|
a0979cd104a43f80750b2361aa580516b8dbfcfc
|
[
"Apache-2.0",
"MIT"
] | 19
|
2015-11-20T12:38:34.000Z
|
2022-01-13T15:40:25.000Z
|
nylas/api/__init__.py
|
nylas/nylas-production-python
|
a0979cd104a43f80750b2361aa580516b8dbfcfc
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
nylas/api/__init__.py
|
nylas/nylas-production-python
|
a0979cd104a43f80750b2361aa580516b8dbfcfc
|
[
"Apache-2.0",
"MIT"
] | 10
|
2016-03-12T00:38:54.000Z
|
2018-12-13T05:58:13.000Z
|
from pkgutil import extend_path
# Allow out-of-tree submodules.
__path__ = extend_path(__path__, __name__)
| 21.6
| 42
| 0.805556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.287037
|
3d4abb2320ad6d11a7ab8694b9e07545a91044dd
| 885
|
py
|
Python
|
project/migrations/0002_auto_20180801_1907.py
|
mcdale/django-material
|
3bd5725cc4a4b6f2fb1439333e9033d0cd2b6a9c
|
[
"MIT"
] | null | null | null |
project/migrations/0002_auto_20180801_1907.py
|
mcdale/django-material
|
3bd5725cc4a4b6f2fb1439333e9033d0cd2b6a9c
|
[
"MIT"
] | 2
|
2020-07-21T12:52:29.000Z
|
2021-06-17T20:23:36.000Z
|
project/migrations/0002_auto_20180801_1907.py
|
mcdale/django-material
|
3bd5725cc4a4b6f2fb1439333e9033d0cd2b6a9c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.8 on 2018-08-01 19:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='knowledgearea',
options={'verbose_name': 'Knowledge Area', 'verbose_name_plural': 'Knowledge Areas'},
),
migrations.AddField(
model_name='knowledgeareaindexpage',
name='knowledge_area',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='project.KnowledgeArea'),
),
migrations.AddField(
model_name='knowledgeareapage',
name='abstract',
field=models.TextField(blank=True, help_text='Text to describe the article'),
),
]
| 30.517241
| 121
| 0.628249
| 759
| 0.857627
| 0
| 0
| 0
| 0
| 0
| 0
| 275
| 0.310734
|
3d4dc9ef0428e142bdd3d4e674dd5dce9410a4ab
| 8,925
|
py
|
Python
|
src/core/src/tortuga/objects/softwareProfile.py
|
sutasu/tortuga
|
48d7cde4fa652346600b217043b4a734fa2ba455
|
[
"Apache-2.0"
] | 33
|
2018-03-02T17:07:39.000Z
|
2021-05-21T18:02:51.000Z
|
src/core/src/tortuga/objects/softwareProfile.py
|
sutasu/tortuga
|
48d7cde4fa652346600b217043b4a734fa2ba455
|
[
"Apache-2.0"
] | 201
|
2018-03-05T14:28:24.000Z
|
2020-11-23T19:58:27.000Z
|
src/core/src/tortuga/objects/softwareProfile.py
|
sutasu/tortuga
|
48d7cde4fa652346600b217043b4a734fa2ba455
|
[
"Apache-2.0"
] | 23
|
2018-03-02T17:21:59.000Z
|
2020-11-18T14:52:38.000Z
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
from functools import cmp_to_key
from typing import Dict, Iterable, Optional
import tortuga.objects.admin
import tortuga.objects.component
import tortuga.objects.hardwareProfile
import tortuga.objects.kitSource
import tortuga.objects.nic
import tortuga.objects.node
import tortuga.objects.osInfo
import tortuga.objects.partition
from tortuga.objects.tortugaObject import TortugaObject, TortugaObjectList
from tortuga.utility.helper import str2bool
from .validators import RegexValidator
class SoftwareProfile(TortugaObject): \
# pylint: disable=too-many-public-methods
ROOT_TAG = 'softwareprofile'
validators = {
'name': RegexValidator(pattern='[a-zA-Z0-9-_]+')
}
def __init__(self, name=None):
TortugaObject.__init__(
self, {
'name': name,
'admins': TortugaObjectList(),
'partitions': TortugaObjectList(),
'components': TortugaObjectList(),
'nodes': TortugaObjectList(),
'kitsources': TortugaObjectList(),
}, ['name', 'id'], SoftwareProfile.ROOT_TAG)
def __repr__(self):
return self.getName()
def setId(self, id_):
""" Set software profile id."""
self['id'] = id_
def getId(self):
""" Return software profile id. """
return self.get('id')
def setName(self, name):
""" Set software profile name."""
self['name'] = name
def getName(self):
""" Return software profile name. """
return self.get('name')
def setDescription(self, description):
""" Set description."""
self['description'] = description
def getDescription(self):
""" Return description. """
return self.get('description')
def setKernel(self, kernel):
""" Set kernel."""
self['kernel'] = kernel
def getKernel(self):
""" Return kernel. """
return self.get('kernel')
def setKernelParams(self, kernelParams):
""" Set kernel params."""
self['kernelParams'] = kernelParams
def getKernelParams(self):
""" Return kernel params. """
return self.get('kernelParams')
def setInitrd(self, initrd):
""" Set initrd."""
self['initrd'] = initrd
def getInitrd(self):
""" Return initird. """
return self.get('initrd')
def setOsId(self, osId):
""" Set OS id."""
self['osId'] = osId
def getOsId(self):
""" Return OS id. """
return self.get('osId')
def setType(self, type_):
""" Set type."""
self['type'] = type_
def getType(self):
""" Return type. """
return self.get('type')
def setMinNodes(self, val):
self['minNodes'] = val
def getMinNodes(self):
return self.get('minNodes')
def setMaxNodes(self, value):
self['maxNodes'] = value
def getMaxNodes(self):
return self.get('maxNodes')
def setLockedState(self, val):
self['lockedState'] = val
def getLockedState(self):
return self.get('lockedState')
def setOsInfo(self, osInfo):
""" Set OS info. """
self['os'] = osInfo
def getOsInfo(self):
""" Get OS info. """
return self.get('os')
def setComponents(self, comp):
""" Set components. """
self['components'] = comp
def getComponents(self):
""" Get Components """
return self.get('components')
def setAdmins(self, admins):
""" set Admins """
self['admins'] = admins
def getAdmins(self):
""" Get Admins """
return self.get('admins')
def setPartitions(self, val):
self['partitions'] = val
def getPartitions(self):
""" We want to always return the partitions sorted by
device and partition number """
partitions = self.get('partitions')
if partitions:
partitions.sort(key=cmp_to_key(_partition_compare))
return partitions
def setNodes(self, val):
self['nodes'] = val
def getNodes(self):
return self.get('nodes')
def setUsableHardwareProfiles(self, val):
self['hardwareprofiles'] = val
def getUsableHardwareProfiles(self):
return self.get('hardwareprofiles')
def getKitSources(self):
return self.get('kitsources')
def setKitSources(self, kitsources):
self['kitsources'] = kitsources
def getTags(self) -> Dict[str, str]:
"""
Gets all the tags for this software profile.
:return Dict[str, str]: the tags
"""
return self.get('tags')
def setTags(self, tags: Dict[str, str]):
"""
Sets the tags for this hardware profile.
:param Dict[str, str] tags: the tags to set for this hardware profile
"""
self['tags'] = tags
def getMetadata(self):
return self.get('metadata')
def setMetadata(self, value):
self['metadata'] = value
def getDataRoot(self):
return self.get('dataRoot')
def setDataRoot(self, value):
self['dataRoot'] = value
def getDataRsync(self):
return self.get('dataRsync')
def setDataRsync(self, value):
self['dataRsync'] = value
@staticmethod
def getKeys():
return [
'id',
'name',
'osId',
'description',
'kernel',
'initrd',
'kernelParams',
'type',
'minNodes',
'maxNodes',
'lockedState',
'isIdle',
'metadata',
'tags',
'dataRoot',
'dataRsync',
]
@classmethod
def getFromDict(cls, _dict, ignore: Optional[Iterable[str]] = None):
""" Get software profile from _dict. """
softwareProfile = super(SoftwareProfile, cls).getFromDict(_dict)
softwareProfile.setAdmins(
tortuga.objects.admin.Admin.getListFromDict(_dict))
softwareProfile.setComponents(
tortuga.objects.component.Component.getListFromDict(_dict))
softwareProfile.setNodes(
tortuga.objects.node.Node.getListFromDict(_dict))
osDict = _dict.get(tortuga.objects.osInfo.OsInfo.ROOT_TAG)
if osDict:
softwareProfile.setOsInfo(
tortuga.objects.osInfo.OsInfo.getFromDict(osDict))
softwareProfile.setPartitions(
tortuga.objects.partition.Partition.getListFromDict(_dict))
softwareProfile.\
setUsableHardwareProfiles(
tortuga.objects.hardwareProfile.HardwareProfile.
getListFromDict(_dict))
# kitsources
softwareProfile.setKitSources(
tortuga.objects.kitSource.KitSource.getListFromDict(_dict))
return softwareProfile
@classmethod
def getFromDbDict(cls, _dict, ignore: Optional[Iterable[str]] = None):
softwareProfile = super(SoftwareProfile, cls).getFromDict(
_dict, ignore=ignore)
softwareProfile.setAdmins(
tortuga.objects.admin.Admin.getListFromDbDict(_dict))
softwareProfile.setComponents(
tortuga.objects.component.Component.getListFromDbDict(_dict))
if not ignore or 'nodes' not in ignore:
softwareProfile.setNodes(
tortuga.objects.node.Node.getListFromDbDict(_dict))
osDict = _dict.get(tortuga.objects.osInfo.OsInfo.ROOT_TAG)
if osDict:
softwareProfile.setOsInfo(
tortuga.objects.osInfo.OsInfo.getFromDbDict(
osDict.__dict__))
softwareProfile.setPartitions(
tortuga.objects.partition.Partition.getListFromDbDict(_dict))
softwareProfile.setUsableHardwareProfiles(
tortuga.objects.hardwareProfile.HardwareProfile.
getListFromDbDict(_dict))
tags = {tag.name: tag.value for tag in _dict.get('tags', [])}
softwareProfile.setTags(tags)
return softwareProfile
def _partition_compare(x, y):
deviceDiff = x.getDeviceTuple()[0] - y.getDeviceTuple()[0]
if deviceDiff == 0:
deviceDiff = x.getDeviceTuple()[1] - y.getDeviceTuple()[1]
return deviceDiff
| 27.631579
| 77
| 0.607731
| 7,608
| 0.852437
| 0
| 0
| 2,773
| 0.3107
| 0
| 0
| 2,239
| 0.250868
|
3d4e4b8f64fdbc0b44c87b38d3ece2354dc7dd2f
| 579
|
py
|
Python
|
src/utils/workspace.py
|
sidcmsft/ResponsibleAI
|
a8c691574690a8316e054c21ec9e6d0e0ca4e494
|
[
"MIT"
] | 2
|
2020-09-03T16:13:56.000Z
|
2021-02-18T15:58:41.000Z
|
src/utils/workspace.py
|
sidcmsft/ResponsibleAI
|
a8c691574690a8316e054c21ec9e6d0e0ca4e494
|
[
"MIT"
] | null | null | null |
src/utils/workspace.py
|
sidcmsft/ResponsibleAI
|
a8c691574690a8316e054c21ec9e6d0e0ca4e494
|
[
"MIT"
] | 4
|
2020-09-03T16:14:19.000Z
|
2021-05-05T05:59:59.000Z
|
import sys
from azureml.core import Workspace
from azureml.core.authentication import ServicePrincipalAuthentication
def get_workspace(
workspace_name: str,
resource_group: str,
subscription_id: str):
try:
aml_workspace = Workspace.get(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group)
return aml_workspace
except Exception as caught_exception:
print("Error while retrieving Workspace...")
print(str(caught_exception))
sys.exit(1)
| 26.318182
| 70
| 0.680484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.063903
|
3d4f711206b2fd9dbd8a3177d589e3c33373c8b1
| 822
|
py
|
Python
|
tools/test_tmp.py
|
Z-XQ/mmdetection
|
9f3756889969c0c21e6d84e0d993f302e7f07460
|
[
"Apache-2.0"
] | null | null | null |
tools/test_tmp.py
|
Z-XQ/mmdetection
|
9f3756889969c0c21e6d84e0d993f302e7f07460
|
[
"Apache-2.0"
] | null | null | null |
tools/test_tmp.py
|
Z-XQ/mmdetection
|
9f3756889969c0c21e6d84e0d993f302e7f07460
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/9/28 下午9:49
# @Author : zxq
# @File : test_tmp.py
# @Software: PyCharm
import mmcv
import torch
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.apis import train_detector, inference_detector, show_result_pyplot
from tools.train_tmp import CustomerTrain
customer_train = CustomerTrain()
cfg = customer_train.cfg
# Build dataset
datasets = [build_dataset(cfg.data.train)]
# Build the detector
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
# Add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
img = mmcv.imread('../data/kitti_tiny/training/image_2/000068.jpeg')
model.cfg = cfg
result = inference_detector(model, img)
show_result_pyplot(model, img, result)
| 27.4
| 77
| 0.770073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.300242
|
3d4fb10a65167e4ffb44c4897ed5483e2f0d23c0
| 2,439
|
py
|
Python
|
users/models.py
|
Mansi3546/CareerCradle
|
e040e763b1058aef937deb9eac4e1f9b2421ae25
|
[
"MIT"
] | null | null | null |
users/models.py
|
Mansi3546/CareerCradle
|
e040e763b1058aef937deb9eac4e1f9b2421ae25
|
[
"MIT"
] | 1
|
2021-04-14T12:24:41.000Z
|
2021-04-18T07:33:11.000Z
|
users/models.py
|
Mansi3546/CareerCradle
|
e040e763b1058aef937deb9eac4e1f9b2421ae25
|
[
"MIT"
] | 3
|
2021-04-06T13:54:44.000Z
|
2021-05-03T17:28:59.000Z
|
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
from django.utils import timezone
from django.db.models import BooleanField
class UserManager(BaseUserManager):
def _create_user(self, email, password, usertype, is_staff, is_superuser, **extra_fields):
if not email:
raise ValueError('Users must have an email address')
now = timezone.now()
email = self.normalize_email(email)
user = self.model(
email=email,
usertype=usertype,
is_staff=is_staff,
is_active=True,
is_superuser=is_superuser,
last_login=now,
date_joined=now,
**extra_fields
)
user.set_password(password)
user.save(using=self._db)
return user
def create_candidate(self, email, password, **extra_fields):
return self._create_user(email, password, 1, False, False, **extra_fields)
def create_recruiter(self, email, password, **extra_fields):
return self._create_user(email, password, 0, False, False, **extra_fields)
def create_staff(self, email, password, **extra_fields):
return self._create_user(email, password, None, True, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
user=self._create_user(email, password, None, True, True, **extra_fields)
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
'''
Usertype can have 3 values:
Candidate - 1
Recruiter - 0
Staff - None
'''
USERTYPES = [
(1, 'Candidates'),
(0, 'Recruiters'),
(None, 'Staff'),
]
email = models.EmailField(max_length=254, unique=True)
name = models.CharField(max_length=254, null=True, blank=True)
usertype = models.PositiveSmallIntegerField(choices=USERTYPES, null=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
last_login = models.DateTimeField(null=True, blank=True)
date_joined = models.DateTimeField(auto_now_add=True)
USERNAME_FIELD = 'email'
EMAIL_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def get_absolute_url(self):
return "/users/%i/" % (self.pk)
| 33.410959
| 94
| 0.660107
| 2,235
| 0.916359
| 0
| 0
| 0
| 0
| 0
| 0
| 199
| 0.081591
|
3d4fe154eecbbf658beca88c248a4a382f051e30
| 2,656
|
py
|
Python
|
scripts/dump_ubd.py
|
sbreuers/BiternionNets-ROS
|
954d6a2fbd97a01231f3411b366f3a3cccae5cf9
|
[
"MIT"
] | 1
|
2018-08-29T07:11:22.000Z
|
2018-08-29T07:11:22.000Z
|
scripts/dump_ubd.py
|
sbreuers/BiternionNets-ROS
|
954d6a2fbd97a01231f3411b366f3a3cccae5cf9
|
[
"MIT"
] | null | null | null |
scripts/dump_ubd.py
|
sbreuers/BiternionNets-ROS
|
954d6a2fbd97a01231f3411b366f3a3cccae5cf9
|
[
"MIT"
] | 1
|
2018-10-20T12:09:58.000Z
|
2018-10-20T12:09:58.000Z
|
#!/usr/bin/env python
# encoding: utf-8
from os.path import abspath, expanduser, join as pjoin
import os
from sys import stderr
import cv2
import rospy
from cv_bridge import CvBridge
import message_filters
from sensor_msgs.msg import Image as ROSImage
# Distinguish between STRANDS and SPENCER.
try:
from rwth_perception_people_msgs.msg import UpperBodyDetector
except ImportError:
from upper_body_detector.msg import UpperBodyDetector
def cutout(img, detrect, hfact=3):
x, y, w, h = detrect
# Need to be careful for negative indices in conjunction with
# numpy's (and thus OpenCV's) wrap-around.
y2, x2 = y+hfact*h, x+w
y1, x1 = max(y, 0), max(x, 0)
return img[y1:y2, x1:x2]
class Dumper(object):
def __init__(self):
rospy.loginfo("Initializing UBD dumper")
self.counter = 0
self.dirname = abspath(expanduser(rospy.get_param("~dir", ".")))
self.full = rospy.get_param("~fullbody", "False") in ("True", "true", "yes", "1", 1, True)
self.hfact = 3 if self.full else 1
rospy.loginfo("Dumping {} into {}".format("full-bodies" if self.full else "upper-bodies", self.dirname))
# Create target directory, or warn if non-empty.
try:
if len(os.listdir(self.dirname)) > 0:
rospy.logwarn("CAREFUL, files may be overwritten since directory is not empty: {}".format(self.dirname))
except OSError:
os.makedirs(self.dirname)
subs = [
message_filters.Subscriber(rospy.get_param("~ubd", "/upper_body_detector/detections"), UpperBodyDetector),
message_filters.Subscriber(rospy.get_param("~rgb", "/head_xtion/rgb/image_rect_color"), ROSImage),
message_filters.Subscriber(rospy.get_param("~d", "/head_xtion/depth/image_rect_meters"), ROSImage),
]
ts = message_filters.ApproximateTimeSynchronizer(subs, queue_size=50, slop=0.1)
ts.registerCallback(self.cb)
def cb(self, ubd, rgb, d):
b = CvBridge()
rgb = b.imgmsg_to_cv2(rgb)[:,:,::-1] # Need to do BGR-RGB conversion manually.
d = b.imgmsg_to_cv2(d)
for i, detrect in enumerate(zip(ubd.pos_x, ubd.pos_y, ubd.width, ubd.height)):
cv2.imwrite(pjoin(self.dirname, "{:06d}.png".format(self.counter)), cutout(rgb, detrect, self.hfact))
det_d = cutout(d, detrect, self.hfact)
stderr.write("\r{}".format(self.counter)) ; stderr.flush()
self.counter += 1
if __name__ == "__main__":
rospy.init_node("dump_ubd")
d = Dumper()
rospy.spin()
rospy.loginfo("Dumped a total of {} UBDs.".format(d.counter))
| 34.947368
| 120
| 0.651355
| 1,778
| 0.669428
| 0
| 0
| 0
| 0
| 0
| 0
| 645
| 0.242846
|
3d5041bc56fbfaccca116aec98a24987eddba5f7
| 2,046
|
py
|
Python
|
site_scons/site_tools/findPkgPath.py
|
fermi-lat/SConsFiles
|
54124ec1031142b4fee76b12fdcfe839845e9fda
|
[
"BSD-3-Clause"
] | null | null | null |
site_scons/site_tools/findPkgPath.py
|
fermi-lat/SConsFiles
|
54124ec1031142b4fee76b12fdcfe839845e9fda
|
[
"BSD-3-Clause"
] | null | null | null |
site_scons/site_tools/findPkgPath.py
|
fermi-lat/SConsFiles
|
54124ec1031142b4fee76b12fdcfe839845e9fda
|
[
"BSD-3-Clause"
] | null | null | null |
import os,platform,os.path
# Usual case: find where package is; add to env include path
# If 'subdir' argument, instead set construction env variable
# to point to it
def generate(env, **kw):
pkgName = kw.get('package', '')
if pkgName == '':
print 'findPkgPath called with no arg'
return None
path = None
subDir = kw.get('subDir', '')
usualCase = (subDir == '')
# paths all start with .\ so strip it off
#if forStatic.first == True:
# print 'findPkgPath called with argument ', pkgName
## First look in env['packageNameList']. If we *have* supersede,
## this is the supersede list. Otherwise it's the base list
for p in env['packageNameList']:
bname = os.path.basename(str(p[2:]))
if pkgName == bname:
if env.GetOption('supersede') == '.':
path = '#' + str(p[2:])
else:
path = os.path.join(str(env['absSuperPath']), pkgName)
if pkgName + '-' == bname[:len(bname)+1]:
if env.GetOption('supersede') == '.':
path = '#' + str(p[2:])
else: path = os.path.join(str(env['absSuperPath']), str(p[2:]))
# If not found up to this point, we must be in case
# -have supersede dir
# -package we have been called for is not in supersede area
if path == None and env.GetOption('supersede') != '.': # look in base
for p in env['basePackageNameList']:
bname = os.path.basename(str(p[2:]))
##if pkgName == bname: path = '#' + str(p[2:])
if pkgName == bname:
path = os.path.join(env['absBasePath'], str(p[2:]))
if pkgName + '-' == bname[:len(bname)+1]:
path = os.path.join(env['absBasePath'], str(p[2:]))
if path != None:
if usualCase:
env.AppendUnique(CPPPATH = [path])
else:
conVarName = pkgName + '_' + subDir
env[conVarName] = os.path.join(path, subDir)
def exists(env):
return 1
| 40.117647
| 75
| 0.544966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 796
| 0.389052
|
3d506074ec9756c4fb5eb16d2309de5778a6c989
| 1,380
|
py
|
Python
|
examples/model_zoo/test_binaries.py
|
Embracing/unrealcv
|
19305da8554c3a0e683a5e27a1e487cc2cf42776
|
[
"MIT"
] | 1,617
|
2016-09-10T04:41:33.000Z
|
2022-03-31T20:03:28.000Z
|
examples/model_zoo/test_binaries.py
|
Embracing/unrealcv
|
19305da8554c3a0e683a5e27a1e487cc2cf42776
|
[
"MIT"
] | 199
|
2016-09-13T09:40:59.000Z
|
2022-03-16T02:37:23.000Z
|
examples/model_zoo/test_binaries.py
|
Embracing/unrealcv
|
19305da8554c3a0e683a5e27a1e487cc2cf42776
|
[
"MIT"
] | 431
|
2016-09-10T03:20:35.000Z
|
2022-03-19T13:44:21.000Z
|
import subprocess, os
win_binary_path = 'UE4Binaries/{project_name}/WindowsNoEditor/{project_name}.exe'
linux_binary_path = './UE4Binaries/{project_name}/LinuxNoEditor/{project_name}/Binaries/Linux/{project_name}'
mac_binary_path = './UE4Binaries/{project_name}/MacNoEditor/{project_name}.app'
project_names = [
'RealisticRendering', 'ArchinteriorsVol2Scene1', 'ArchinteriorsVol2Scene2',
'ArchinteriorsVol2Scene3', 'UrbanCity', 'Matinee', 'PhotorealisticCharacter'
]
binaries = []
binaries += [linux_binary_path.format(project_name = v) for v in project_names]
binaries += [win_binary_path.format(project_name = v) for v in project_names]
binaries += [mac_binary_path.format(project_name = v) for v in project_names]
if __name__ == '__main__':
if not os.path.isdir('output'):
os.mkdir('output')
for binary_path in binaries:
project_name = os.path.basename(binary_path).split('.')[0]
output_folder = os.path.join('output', project_name)
if not os.path.isfile(binary_path) and not os.path.isdir(binary_path):
print('Can not find binary "%s", skip' % binary_path)
continue
print('Testing %s ..., output will be saved to "%s"' % (binary_path, output_folder))
subprocess.call([
'python', 'examples/commands_demo.py',
binary_path, '--output', output_folder
])
| 41.818182
| 109
| 0.698551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 513
| 0.371739
|
3d50aca1b7a9e65ec91502519f8c8985d2d96649
| 4,629
|
py
|
Python
|
pyslam/feature_tracker_configs.py
|
velvetThunder25/Feature-based-Monocular-Visual-Odometry
|
e6b108e8ce71ec0ec535932e2fc1023fc6fcaf92
|
[
"MIT"
] | 7
|
2022-01-12T22:46:06.000Z
|
2022-03-16T13:57:52.000Z
|
pyslam/feature_tracker_configs.py
|
velvetThunder25/Feature-based-Monocular-Visual-Odometry
|
e6b108e8ce71ec0ec535932e2fc1023fc6fcaf92
|
[
"MIT"
] | null | null | null |
pyslam/feature_tracker_configs.py
|
velvetThunder25/Feature-based-Monocular-Visual-Odometry
|
e6b108e8ce71ec0ec535932e2fc1023fc6fcaf92
|
[
"MIT"
] | 1
|
2022-01-12T22:52:29.000Z
|
2022-01-12T22:52:29.000Z
|
"""
* This file is part of PYSLAM
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
from feature_tracker import feature_tracker_factory, FeatureTrackerTypes
from feature_manager import feature_manager_factory
from feature_types import FeatureDetectorTypes, FeatureDescriptorTypes, FeatureInfo
from feature_matcher import feature_matcher_factory, FeatureMatcherTypes
from parameters import Parameters
# some default parameters
kNumFeatures=Parameters.kNumFeatures
kRatioTest=Parameters.kFeatureMatchRatioTest
kTrackerType = FeatureTrackerTypes.DES_BF # default descriptor-based, brute force matching with knn
#kTrackerType = FeatureTrackerTypes.DES_FLANN # default descriptor-based, FLANN-based matching
"""
A collection of ready-to-used feature tracker configurations
"""
class FeatureTrackerConfigs(object):
# Test/Template configuration: you can use this to quickly test
# - your custom parameters and
# - favourite descriptor and detector (check the file feature_types.py)
TEST = dict(num_features=kNumFeatures,
num_levels = 8, # N.B: some detectors/descriptors do not allow to set num_levels or they set it on their own
scale_factor = 1.2, # N.B: some detectors/descriptors do not allow to set scale_factor or they set it on their own
detector_type = FeatureDetectorTypes.ORB2,
descriptor_type = FeatureDescriptorTypes.ORB2,
match_ratio_test = kRatioTest,
tracker_type = kTrackerType)
######################################################
#################### TEST CONFIG COMBINATION #########
######################################################
# Format: T[XX]_[YY]_[ZZ];
# XX is test ID number
# YY is detector name
# ZZ is descriptor name
# there are 42 combinations + some deep learning
test_configs = {
'T1_SIFT':dict(num_features=kNumFeatures,
detector_type = FeatureDetectorTypes.SIFT,
descriptor_type = FeatureDescriptorTypes.SIFT,
match_ratio_test = kRatioTest,
tracker_type = kTrackerType), # done
'T2_SURF':dict(num_features=kNumFeatures,
num_levels = 8,
detector_type = FeatureDetectorTypes.SURF,
descriptor_type = FeatureDescriptorTypes.SURF,
match_ratio_test = kRatioTest,
tracker_type = kTrackerType), # done
'T3_BRISK':dict(num_features=kNumFeatures,
num_levels = 4,
scale_factor = 1.2,
detector_type = FeatureDetectorTypes.BRISK,
descriptor_type = FeatureDescriptorTypes.BRISK,
match_ratio_test = kRatioTest,
tracker_type = kTrackerType), # done
'T4_FAST':dict(num_features=kNumFeatures,
num_levels = 8,
scale_factor = 1.2,
detector_type = FeatureDetectorTypes.FAST,
descriptor_type = FeatureDescriptorTypes.NONE,
tracker_type = FeatureTrackerTypes.LK), # done
'T5_ORB':dict(num_features=kNumFeatures,
num_levels = 8,
scale_factor = 1.2,
detector_type = FeatureDetectorTypes.ORB,
descriptor_type = FeatureDescriptorTypes.ORB,
match_ratio_test = kRatioTest,
tracker_type = kTrackerType),
}
| 44.509615
| 159
| 0.579175
| 3,163
| 0.683301
| 0
| 0
| 0
| 0
| 0
| 0
| 1,705
| 0.36833
|
3d524c3bd35810437426c4644ee0f769511b58ea
| 152
|
py
|
Python
|
bindings/python/examples/05b_get_output.py
|
GoldenPedro/iota.rs
|
71464f96b8e29d9fbed34a6ff77e757a112fedd4
|
[
"Apache-2.0"
] | 256
|
2017-06-27T02:37:21.000Z
|
2022-03-28T07:51:48.000Z
|
bindings/python/examples/05b_get_output.py
|
GoldenPedro/iota.rs
|
71464f96b8e29d9fbed34a6ff77e757a112fedd4
|
[
"Apache-2.0"
] | 379
|
2017-06-25T05:49:14.000Z
|
2022-03-29T18:57:11.000Z
|
bindings/python/examples/05b_get_output.py
|
GoldenPedro/iota.rs
|
71464f96b8e29d9fbed34a6ff77e757a112fedd4
|
[
"Apache-2.0"
] | 113
|
2017-06-25T14:07:05.000Z
|
2022-03-30T09:10:12.000Z
|
import iota_client
client = iota_client.Client()
print(
client.get_output("a22cba0667c922cbb1f8bdcaf970b2a881ccd6e88e2fcce50374de2aac7c37720000")
)
| 25.333333
| 93
| 0.848684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.460526
|
3d5394f2af4816cbcec8e499c06b15d66ed6fb8e
| 920
|
py
|
Python
|
simple_ml/__init__.py
|
Yangruipis/simple_ml
|
09657f6b017b973a5201aa611774d6ac8f0fc0a2
|
[
"MIT"
] | 25
|
2018-04-17T04:38:51.000Z
|
2021-10-09T04:07:53.000Z
|
simple_ml/__init__.py
|
Yangruipis/simple_ml
|
09657f6b017b973a5201aa611774d6ac8f0fc0a2
|
[
"MIT"
] | null | null | null |
simple_ml/__init__.py
|
Yangruipis/simple_ml
|
09657f6b017b973a5201aa611774d6ac8f0fc0a2
|
[
"MIT"
] | 5
|
2018-04-17T05:27:00.000Z
|
2020-12-01T02:55:15.000Z
|
# -*- coding:utf-8 -*-
"""
==================================
Simple Machine Learning
一个简单的机器学习算法实现
==================================
"""
from simple_ml.bayes import *
from simple_ml.classify_data import *
from simple_ml.auto import *
from simple_ml.classify_data import *
from simple_ml.ensemble import *
from simple_ml.evaluation import *
from simple_ml.feature_select import *
from simple_ml.knn import *
from simple_ml.logistic import *
from simple_ml.neural_network import *
from simple_ml.pca import *
from simple_ml.regression import *
from simple_ml.support_vector import *
# from simple_ml.svm import *
from simple_ml.tree import *
__all__ = [
'bayes',
'auto',
'classify_data',
'cluster',
'data_handle',
'ensemble',
'evaluation',
'feature_select',
'knn',
'svm',
'logistic',
'neural_network',
'pca',
'regression',
'support_vector',
'tree',
]
| 20
| 38
| 0.644565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 358
| 0.378436
|
3d53c39285c2bdec8b3434c91e5427cdb7617eb5
| 5,470
|
py
|
Python
|
Modelos/Game.py
|
joaofanti/TrabRedesIIFinal
|
3cae5db7ef88e20d9426043e926260ccedc79d10
|
[
"MIT"
] | 1
|
2017-07-05T01:24:20.000Z
|
2017-07-05T01:24:20.000Z
|
Modelos/Game.py
|
joaofanti/TrabRedesIIFinal
|
3cae5db7ef88e20d9426043e926260ccedc79d10
|
[
"MIT"
] | null | null | null |
Modelos/Game.py
|
joaofanti/TrabRedesIIFinal
|
3cae5db7ef88e20d9426043e926260ccedc79d10
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(0, "Modelos/Mapa")
from Map import *
from Item import Item
"""
Define a classe que manipula a logica do jogo.
"""
class Game:
"""
Define um jogador do jogo.
"""
class Player:
"""
Cria uma nova instancia de jogador
"""
def __init__(self, name, addr, map):
self.Name = name
self.Addr = addr #IP
self.Room = 1 # Jogador sempre inicia na sala 1
self.Inventario = []
self.Inventario.append(Item("Mapa", map))
"""
Cria uma nova instancia de jogo.
"""
def __init__(self, map):
self.Map = map
self.Players = []
"""
Cria um novo jogador. Retorna falso se jogador ja existe. Retorna verdadeiro se jogador foi criado.
"""
def CriaJogador(self, playerId, addr):
if (self.getPlayer(playerId) != None):
return "FAIL"
self.Players.append(self.Player(playerId, addr, self.Map.showMap()))
return "OK"
"""
Examina a sala em que o jogador se encontra.
"""
def Examina(self, playerId):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
room = self.Map.getRoom(player.Room)
return room.ToString()
"""
Move o jogador para outra sala.
"""
def Move(self, playerId, direction):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
room = self.Map.getRoom(player.Room)
roomInDirection = room.GetRoomInDirection(direction)
if (roomInDirection != None):
if (room.CanMoveTo(direction)):
player.Room = roomInDirection
for item in player.Inventario:
if item.Name == "Mapa":
item.Description = self.Map.showMap(roomInDirection)
return "O jogador se moveu para a sala " + str(roomInDirection) + "."
else:
return "A porta esta fechada."
else:
return "Nao ha sala nesta direcao."
def Inventario(self, playerId):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
result = ""
ln = len(player.Inventario)
for i in range(0, ln):
result += player.Inventario[i].Name
if (i + 1 != ln):
result += " ; "
return result
def UsaItem(self, playerId, itemName, target = None):
player = self.getPlayer(playerId)
abriuPorta = False
if(player == None):
return "Player nao encontrado"
salaAtual = self.Map.getRoom(player.Room)
for item in player.Inventario:
if item.Name == itemName:
if "Nota" in str(item.Name):
return item.Description
elif item.Name == "Mapa":
return item.Description
elif item.Name == "ObjetoFinal":
if salaAtual.ID == 1:
return "Fim"
else:
return "Voce precisa estar na sala inicial para utilizar este objeto"
elif ("Chave" in str(item.Name)):
if target == None:
return "Escolha uma porta para abrir"
else:
for x in range(0, len(salaAtual.Doors)):
if str(x) == target:
abriuPorta = True
self.Map.getRoom(player.Room).Doors[x].OpenDoor()
if(abriuPorta == True):
return "Porta "+target+" foi aberta"
else:
return "Nao foi possivel abrir a porta "+target
return "Portas da sala "+str(salaAtual.ID)+" foram abertas"
else:
return "Item nao existente no inventario"
"""
Jogador pega um objeto que esta na sala atual
"""
def Pegar(self, playerId, objeto):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
salaAtual = self.Map.getRoom(player.Room)
if(salaAtual == None):
return "Sala nao encontrada"
objetoAdicionado = False
lenObjetos = len(salaAtual.Objects)
for x in range(0, lenObjetos):
objetoEncontrado = salaAtual.Objects[x]
if(str(objeto) == str(objetoEncontrado.Name)):
objetoAdicionado = True
del salaAtual.Objects[x]
player.Inventario.append(Item(objetoEncontrado.Name, objetoEncontrado.Description))
break
if(objetoAdicionado == True):
return "Objeto " + objeto + " adicionado ao inventario"
else:
return "Objeto " + objeto + " nao foi encontrado nesta sala"
"""
Larga objeto do inventario na sala atual
"""
def Largar(self, playerId, objeto):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
salaAtual = self.Map.getRoom(player.Room)
objetoDeletado = False
for x in range(0, len(player.Inventario)):
itemPlayer = player.Inventario[x]
if(itemPlayer.Name == str(objeto)):
objetoDeletado = True
del player.Inventario[x]
salaAtual.Objects.append(Item(itemPlayer.Name, itemPlayer.Description))
if(objetoDeletado == True):
return "Objeto " + objeto + " adicionado a sala"
else:
return "Objeto " + objeto + " nao foi encontrado no inventario"
"""
Envia um texto para um jogador especifico
"""
def Cochichar(self, playerSource, text, playerTarget):
player = self.getPlayer(playerSource)
for x in range(0, len(self.Players)):
if(self.Players[x].Name == str(playerTarget)):
return (self.Players[x].Addr, text)
"""
Retorna os players presente na sala passada por parametro
"""
def getPlayersInRoom(self, room):
sala = self.Map.getRoom(room)
if(sala == None):
return "Sala nao encontrada"
playersNaSala = []
for x in range(0, len(self.Players)):
if(self.Players[x].Room == room):
playersNaSala.append(self.Players[x].Addr)
return playersNaSala
"""
Busca o jogador na lista de jogadores conectados ao jogo.
"""
def getPlayer(self, playerName):
for player in self.Players:
if player.Name == playerName:
return player
return None
| 28.051282
| 101
| 0.67404
| 5,325
| 0.973492
| 0
| 0
| 0
| 0
| 0
| 0
| 1,430
| 0.261426
|
3d550a112ff51ab3601284d3bb247c868ab1d733
| 2,062
|
py
|
Python
|
test/sample_data/get_observation_histogram_week.py
|
eduramirezh/pyinaturalist
|
e5da7ced7fae31f27310868bdb2d349bdff8e0d4
|
[
"MIT"
] | 47
|
2019-07-23T08:18:02.000Z
|
2022-03-17T16:32:17.000Z
|
test/sample_data/get_observation_histogram_week.py
|
eduramirezh/pyinaturalist
|
e5da7ced7fae31f27310868bdb2d349bdff8e0d4
|
[
"MIT"
] | 219
|
2019-08-22T14:45:20.000Z
|
2022-03-30T02:39:35.000Z
|
test/sample_data/get_observation_histogram_week.py
|
eduramirezh/pyinaturalist
|
e5da7ced7fae31f27310868bdb2d349bdff8e0d4
|
[
"MIT"
] | 9
|
2020-02-28T04:29:13.000Z
|
2022-02-23T03:02:32.000Z
|
from datetime import datetime
{
datetime(2019, 12, 30, 0, 0): 35,
datetime(2020, 1, 6, 0, 0): 27,
datetime(2020, 1, 13, 0, 0): 39,
datetime(2020, 1, 20, 0, 0): 120,
datetime(2020, 1, 27, 0, 0): 73,
datetime(2020, 2, 3, 0, 0): 48,
datetime(2020, 2, 10, 0, 0): 35,
datetime(2020, 2, 17, 0, 0): 89,
datetime(2020, 2, 24, 0, 0): 81,
datetime(2020, 3, 2, 0, 0): 116,
datetime(2020, 3, 9, 0, 0): 90,
datetime(2020, 3, 16, 0, 0): 195,
datetime(2020, 3, 23, 0, 0): 406,
datetime(2020, 3, 30, 0, 0): 642,
datetime(2020, 4, 6, 0, 0): 652,
datetime(2020, 4, 13, 0, 0): 684,
datetime(2020, 4, 20, 0, 0): 1393,
datetime(2020, 4, 27, 0, 0): 1755,
datetime(2020, 5, 4, 0, 0): 1251,
datetime(2020, 5, 11, 0, 0): 1566,
datetime(2020, 5, 18, 0, 0): 1986,
datetime(2020, 5, 25, 0, 0): 2141,
datetime(2020, 6, 1, 0, 0): 1581,
datetime(2020, 6, 8, 0, 0): 1640,
datetime(2020, 6, 15, 0, 0): 1406,
datetime(2020, 6, 22, 0, 0): 1902,
datetime(2020, 6, 29, 0, 0): 2078,
datetime(2020, 7, 6, 0, 0): 1821,
datetime(2020, 7, 13, 0, 0): 1854,
datetime(2020, 7, 20, 0, 0): 2308,
datetime(2020, 7, 27, 0, 0): 2637,
datetime(2020, 8, 3, 0, 0): 2275,
datetime(2020, 8, 10, 0, 0): 1717,
datetime(2020, 8, 17, 0, 0): 1474,
datetime(2020, 8, 24, 0, 0): 2234,
datetime(2020, 8, 31, 0, 0): 2275,
datetime(2020, 9, 7, 0, 0): 2180,
datetime(2020, 9, 14, 0, 0): 1824,
datetime(2020, 9, 21, 0, 0): 1609,
datetime(2020, 9, 28, 0, 0): 1714,
datetime(2020, 10, 5, 0, 0): 2849,
datetime(2020, 10, 12, 0, 0): 1425,
datetime(2020, 10, 19, 0, 0): 569,
datetime(2020, 10, 26, 0, 0): 210,
datetime(2020, 11, 2, 0, 0): 331,
datetime(2020, 11, 9, 0, 0): 229,
datetime(2020, 11, 16, 0, 0): 162,
datetime(2020, 11, 23, 0, 0): 164,
datetime(2020, 11, 30, 0, 0): 102,
datetime(2020, 12, 7, 0, 0): 75,
datetime(2020, 12, 14, 0, 0): 55,
datetime(2020, 12, 21, 0, 0): 150,
datetime(2020, 12, 28, 0, 0): 11,
}
| 35.551724
| 39
| 0.532978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d553fd4a5642d493db1017f36467ff8b535228c
| 65
|
py
|
Python
|
wave_1d_fwi_tf/__init__.py
|
ar4/wave_1d_fwi_tf
|
0a543149dc3bd5ca6ec0e5bfe34add4796e0b879
|
[
"MIT"
] | 2
|
2017-08-07T13:35:50.000Z
|
2019-02-28T08:26:49.000Z
|
wave_1d_fwi_tf/__init__.py
|
ar4/wave_1d_fwi_tf
|
0a543149dc3bd5ca6ec0e5bfe34add4796e0b879
|
[
"MIT"
] | null | null | null |
wave_1d_fwi_tf/__init__.py
|
ar4/wave_1d_fwi_tf
|
0a543149dc3bd5ca6ec0e5bfe34add4796e0b879
|
[
"MIT"
] | 5
|
2018-06-26T20:43:44.000Z
|
2021-12-11T20:00:03.000Z
|
"""1D FWI implemented using TensorFlow
"""
__version__ = '0.0.1'
| 16.25
| 38
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.753846
|
3d555476cff1bc071aa2e2a1ea0c596baf77825f
| 1,586
|
py
|
Python
|
scripts/space_heating_demand/ecofys_space_heating_demand.py
|
quintel/etmoses
|
e1e682d0ef68928e5a015c44d916ec151917b1ff
|
[
"MIT"
] | 16
|
2015-09-22T11:33:52.000Z
|
2019-09-09T13:37:14.000Z
|
scripts/space_heating_demand/ecofys_space_heating_demand.py
|
quintel/etmoses
|
e1e682d0ef68928e5a015c44d916ec151917b1ff
|
[
"MIT"
] | 1,445
|
2015-05-20T22:42:50.000Z
|
2022-02-26T19:16:02.000Z
|
scripts/space_heating_demand/ecofys_space_heating_demand.py
|
quintel/etloader
|
e1e682d0ef68928e5a015c44d916ec151917b1ff
|
[
"MIT"
] | 3
|
2015-11-03T10:41:26.000Z
|
2017-02-11T07:39:52.000Z
|
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
import os
time_steps = 8760
file_name = "../input_data/Ecofys_ECN_heating_profiles.csv"
data = zip(*genfromtxt(file_name, delimiter=','))
names = ["tussenwoning_laag", "tussenwoning_midden", "tussenwoning_hoog",
"hoekwoning_laag", "hoekwoning_midden", "hoekwoning_hoog",
"twee_onder_een_kapwoning_laag", "twee_onder_een_kapwoning_midden", "twee_onder_een_kapwoning_hoog",
"appartement_laag", "appartement_midden", "appartement_hoog",
"vrijstaande_woning_laag", "vrijstaande_woning_midden", "vrijstaande_woning_hoog"]
profiles = []
totals = []
counter = 0
for profile in data:
if len(profile) == time_steps:
profiles.append(profile)
totals.append(np.sum(profile))
print "Writing: ", names[counter]+".csv"
out_file = open("../output_data/"+names[counter]+".csv","w")
for item in profile:
for i in range(4):
out_file.write(str(item) + "\n")
out_file.close()
else:
print "Error! profile #"+str(counter)+" has "+ str(len(profile)) + " lines"
counter += 1
print totals
plt.close()
plt.figure(figsize=(19, 7))
mini = 0
maxi = 24 * 7
for name,profile in zip(names,profiles):
#if "appartement" in name:
#plt.plot(profile[mini:maxi]/np.sum(profile),linewidth=1.0, label=name)
plt.plot(profile[mini:maxi],linewidth=1.0, label=name)
plt.xlabel('time (hours)')
plt.ylabel('kW')
plt.legend()
plt.show()
| 26.433333
| 109
| 0.645019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 587
| 0.370113
|
3d55a052fc466e9d762d5638ce7970aab1dc7f8b
| 1,362
|
py
|
Python
|
parsers/lyrics_az.py
|
taynaron/lyrics2mp3
|
339f4dfd94c88896278a7be4143ea586ada8194f
|
[
"MIT"
] | null | null | null |
parsers/lyrics_az.py
|
taynaron/lyrics2mp3
|
339f4dfd94c88896278a7be4143ea586ada8194f
|
[
"MIT"
] | null | null | null |
parsers/lyrics_az.py
|
taynaron/lyrics2mp3
|
339f4dfd94c88896278a7be4143ea586ada8194f
|
[
"MIT"
] | null | null | null |
from .lyrics import Lyrics
class LyricsAZ(Lyrics):
def __init__(self, **kwargs):
super().__init__("azlyrics", **kwargs)
def parse_single_song(self, href):
soup = self.parse_html(href)
parsed_lyrics = (
soup.find("div", class_="col-xs-12 col-lg-8 text-center")
.contents[16]
.get_text()
)
return parsed_lyrics
def parse(self, soup, title, artist=None):
found_td = soup.find("td", class_="text-left")
self.validate_lyrics_found(found_td, title)
try:
found_artist = found_td.find_all("b")[1].text.lower()
found_title = found_td.find("b").text.lower()
except IndexError as e:
if self.verbose > 1:
print("HTML format for site has changed: ", e)
return None
if artist:
self.validate_artist(artist, found_artist)
self.validate_title(title, found_title)
href = found_td.find("a")["href"]
parsed_lyrics = self.validate_parse_song(href, title)
return parsed_lyrics
def request(self, title, artist=None):
search_url = "http://search.azlyrics.com/search.php?q="
artist_q = f"{artist} " if artist else ""
url = f"{search_url}{artist_q}{title}"
return super().request(url, title, artist=artist)
| 31.674419
| 69
| 0.592511
| 1,332
| 0.977974
| 0
| 0
| 0
| 0
| 0
| 0
| 201
| 0.147577
|
3d56210042ea856581699506b54c8a673f17ffaa
| 1,414
|
py
|
Python
|
senorge/listfiles.py
|
kojitominaga/scratch
|
5eaf4de30c89ff1e855a6be493105d1201f07f74
|
[
"FSFAP"
] | null | null | null |
senorge/listfiles.py
|
kojitominaga/scratch
|
5eaf4de30c89ff1e855a6be493105d1201f07f74
|
[
"FSFAP"
] | null | null | null |
senorge/listfiles.py
|
kojitominaga/scratch
|
5eaf4de30c89ff1e855a6be493105d1201f07f74
|
[
"FSFAP"
] | null | null | null |
import os
d = '/Volumes/Seagate Expansion Drive/SeNorge'
vars = ['bn', 'eva', 'frd', 'gwt', 'is', 'os', 'q', 'rr',
'sd', 'smd', 'swe', 'tm']
# Massebalanse isbre (mm/døgn) gwb_bn_2014_06_15.asc
# Fordampning (mm/døgn) gwb_eva_2014_06_15.asc
# Frostdyp (mm/døgn) gwb_frd_2014_06_15.asc
# Grunnvannsmagasin (mm) gwb_gwt_2014_06_15.asc
# Infiltrasjon i rotsonen (mm/døgn) gwb_is_2014_06_15.asc
# Perkolasjon fra rotsonen til grunnvansonen (mm/døgn) gwb_os_2014_06_15.asc
# Avrenning (mm/døgn) gwb_q_2014_06_15.asc
# Nedbør (mm/døgn) gwb_rr_2014_06_15.asc
# Snødyp (mm) gwb_sd_2014_06_15.asc
# Markvannsunderskudd (mm) gwb_smd_2014_06_15.asc
# Snøens vannekvivalent (mm) gwb_swe_2014_06_15.asc
# Temperatur (°C) gwb_tm_2014_06_15.asc
counts = {}
for year in range(1957, 2015):
fns = os.listdir(os.path.join(d, 'gwb_ascii_%s' % year))
counts[year] = [len([f for f in fns if v in f]) for v in vars]
out = ' '.join(['year'] + vars)
out += '\n'
out += '\n'.join([' '.join(map(str, [e] + counts[e])) for e in counts.keys()])
out += '\n'
counts2 = {}
for year in range(1957, 2015):
fns = os.listdir(os.path.join(d, 'gwb_ascii_%s' % year))
counts2[year] = [len([f for f in fns if v in f and '.gz' in f])
for v in vars]
out2 = ' '.join(['year'] + vars)
out2 += '\n'
out2 += '\n'.join([' '.join(map(str, [e] + counts2[e])) for e in counts2.keys()])
out2 += '\n'
| 33.666667
| 81
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 767
| 0.538246
|
3d56d13a865c0fd22d417834c65ef6529f433ba4
| 104
|
py
|
Python
|
Python/jump-to-python/Exponential.py
|
leeheefull/blog-source
|
5f8370de5b0f62801fffc9e5f0f0bcb98dc2e6d1
|
[
"MIT"
] | null | null | null |
Python/jump-to-python/Exponential.py
|
leeheefull/blog-source
|
5f8370de5b0f62801fffc9e5f0f0bcb98dc2e6d1
|
[
"MIT"
] | null | null | null |
Python/jump-to-python/Exponential.py
|
leeheefull/blog-source
|
5f8370de5b0f62801fffc9e5f0f0bcb98dc2e6d1
|
[
"MIT"
] | null | null | null |
# 지수부 표현
a = 1e9
print(a) # 1000000000.0
a = 7.525e2
print(a) # 752.5
a = 3954e-3
print(a) # 3.954
| 10.4
| 24
| 0.576923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.403509
|
3d56d5d2a7208245fa6af52b9cc12f9423e31653
| 11,289
|
py
|
Python
|
src/lib_yolo_detect.py
|
felixchenfy/ros_yolo_as_template_matching
|
0d5c0a52ba5540d2a644e0b426f9041a2a5e7858
|
[
"MIT"
] | 29
|
2019-12-02T01:54:18.000Z
|
2022-02-15T09:23:27.000Z
|
src/lib_yolo_detect.py
|
felixchenfy/ros_yolo_as_template_matching
|
0d5c0a52ba5540d2a644e0b426f9041a2a5e7858
|
[
"MIT"
] | 8
|
2019-12-24T13:13:44.000Z
|
2022-02-10T00:16:31.000Z
|
src/lib_yolo_detect.py
|
felixchenfy/ros_yolo_as_template_matching
|
0d5c0a52ba5540d2a644e0b426f9041a2a5e7858
|
[
"MIT"
] | 5
|
2020-01-31T00:31:37.000Z
|
2022-03-28T06:14:09.000Z
|
# -*- coding: future_fstrings -*-
from __future__ import division
if 1: # Set path
import sys, os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../" # root of the project
sys.path.append(ROOT)
import sys
from src.PyTorch_YOLOv3.models import Darknet
from src.PyTorch_YOLOv3.utils.utils import non_max_suppression, load_classes
from src.PyTorch_YOLOv3.utils.datasets import ImgfolderDataset
from utils.lib_yolo_datasets import ImgfolderDataset, UsbcamDataset, VideofileDataset
from utils.lib_yolo_plot import Yolo_Detection_Plotter_CV2
import utils.lib_common_funcs as cf
from config.config import read_all_args
import os
import sys
import time
import datetime
import argparse
import cv2
import numpy as np
from PIL import Image
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torchvision.transforms as transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
def tensor_images_to_list_numpy_images(tensor_imgs):
'''
Arguments:
tensor_imgs {tensor, BxCxHxW}
Return:
list_of_imgs {list of numpy images}
'''
imgs = tensor_imgs.permute(0, 2, 3, 1).data.numpy() # convert to: RGB, float, (20, H, W, 3)
list_of_imgs = [img for img in imgs] # convert to: list of numpy images
return list_of_imgs
def rescale_boxes(boxes, current_dim, original_shape):
''' Rescales bounding boxes to the original shape
This is copied from src/PyTorch_YOLOv3/utils/utils.py
'''
orig_h, orig_w = original_shape
# The amount of padding that was added
pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))
pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))
# Image height and width after padding is removed
unpad_h = current_dim - pad_y
unpad_w = current_dim - pad_x
# Rescale bounding boxes to dimension of original image
boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h
boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h
return boxes
def resize(image, size):
''' Resize image to `size` '''
image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0)
return image
def pad_to_square(img, pad_value):
c, h, w = img.shape
dim_diff = np.abs(h - w)
# (upper / left) padding and (lower / right) padding
pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2
# Determine padding
pad = (0, 0, pad1, pad2) if h <= w else (pad1, pad2, 0, 0)
# Add padding
img = F.pad(img, pad, "constant", value=pad_value)
return img, pad
def rgbimg_to_yoloimg(img, img_size):
'''
Input:
img: 3xHxW, tensor, rgb
img_size: int
Output:
(let Z = img_size)
img: 3xZxZ, tensor, rgb
'''
# img = np.moveaxis(img, -1, 0) # no need for this. torchvision.transforms does this for us.
# img = transforms.ToTensor()(img) # numpy, HxWx3 --> tensor, 3xHxW
# img = img[np.newaxis, ...] # no need for this. DataLoader itself will add the additional channel.
# Pad to square resolution
img, _ = pad_to_square(img, 0) # 3 x H(W) x H(W)
# Resize
img = resize(img, img_size) # 3 x img_size x img_size
return img
def rgbimgs_to_yoloimgs(imgs, img_size):
'''
Input:
imgs: Batch x (3xHxW), tensor, rgb, uint8
img_size: int
Output:
(let Z = img_size)
yoloimgs: Batch x (3xZxZ), tensor, rgb, float
'''
imgs = imgs.type(torch.float32)
imgs = imgs.permute(0, 3, 1, 2) # [B, W, H, 3] --> [B, 3, W, H]
imgs /= 255.0
yoloimgs = [rgbimg_to_yoloimg(img, img_size) for img in imgs]
yoloimgs = torch.stack((yoloimgs))
return yoloimgs
# ------------------ Main functions used for inference ------------------
def detetions_to_labels_and_pos(self, detections, classes):
'''
Input:
detections: the output of "detect_targets()"
'''
labels_and_pos = []
for x1, y1, x2, y2, conf, cls_conf, cls_idx in detections:
label = classes[int(cls_idx)]
pos = (int((x1+x2)/2), int((y1+y2)/2))
labels_and_pos.append((label, pos))
return labels_and_pos
def create_model(weights_path, f_yolo_config, img_size):
# Set up model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Darknet(f_yolo_config, img_size=img_size).to(device)
# Load darknet weights
if weights_path.endswith(".weights"):
model.load_darknet_weights(weights_path)
else: # Load checkpoint weights
model.load_state_dict(torch.load(weights_path))
model.eval() # Set in evaluation mode
return model
def set_dataloader(src_data_type, image_data_path, img_size, batch_size, n_cpu):
print(f"Load data from: {src_data_type}; Data path: {image_data_path}")
if src_data_type == "folder":
dataloader = DataLoader(
ImgfolderDataset(image_data_path, img_size=img_size),
batch_size=batch_size,
shuffle=False,
num_workers=n_cpu,
)
elif src_data_type == "video":
dataloader = DataLoader(
VideofileDataset(image_data_path, img_size=img_size),
batch_size=batch_size,
shuffle=False,
)
elif src_data_type == "webcam":
dataloader = DataLoader(
UsbcamDataset(max_framerate=10, img_size=img_size),
batch_size=batch_size,
shuffle=False,
)
else:
raise ValueError("Wrong data source for yolo")
return dataloader
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
def detect_targets(args_inference, model,
rgb_imgs, # Batch x (3xHxW), tensor, rgb, uint8
is_one_obj_per_class=False, # single instance for each class
):
'''
Output:
detections: [bbox, conf, cls_conf, cls_idx]
where: bbox = [x1, y1, x2, y2] is represented in the original image coordinate
'''
# -- Convert images to required type
Z = args_inference.img_size
yolo_imgs = rgbimgs_to_yoloimgs(rgb_imgs, Z) # [B, 3, W, H] --> [B, 3, Z, Z], uint8 --> float
imgs_on_gpu = Variable(yolo_imgs.type(Tensor))
# Get detections
with torch.no_grad():
imgs_detections = model(imgs_on_gpu)
N_elements = 7 # format of imgs_detections[jth_img]: x1, y1, x2, y2, conf, cls_conf, cls_idx
idx_conf = 5
imgs_detections = non_max_suppression(imgs_detections, args_inference.conf_thres, args_inference.nms_thres)
# convert to numpy array
imgs_detections = [d.numpy() if d is not None else None for d in imgs_detections]
# Sort detections based on confidence;
# Convert box to the current image coordinate;
# Convert detections to 2d list
for jth_img in range(len(imgs_detections)):
if imgs_detections[jth_img] is None: # no detected object
imgs_detections[jth_img] = []
continue
# sort
detections = sorted(imgs_detections[jth_img], key=lambda x: x[idx_conf])
detections = np.array(detections)
# change bbox pos to yoloimg
detections = rescale_boxes(detections, args_inference.img_size, rgb_imgs[jth_img].shape[:2])
# save result
imgs_detections[jth_img] = detections.tolist()
# Remove duplicated objects in the single-instance mode
if is_one_obj_per_class:
for jth_img, jth_detections in enumerate(imgs_detections):
if not imgs_detections[jth_img]:
continue
detected_objects = set()
jth_unique_detections = []
for kth_object in jth_detections:
x1, y1, x2, y2, conf, cls_conf, cls_idx = kth_object
if cls_idx not in detected_objects: # Add object if not detected before
detected_objects.add(cls_idx)
jth_unique_detections.append(kth_object)
imgs_detections[jth_img] = jth_unique_detections
return imgs_detections
class ObjDetector(object):
''' Yolo detector for single image '''
def __init__(self, config_path, weights_path):
args = read_all_args(config_path)
args_inference = cf.dict2class(args.yolo_inference)
self.model = create_model(weights_path, args.f_yolo_config, args_inference.img_size)
self.classes = load_classes(args.f_yolo_classes) # Extracts class labels from file
self.plotter = Yolo_Detection_Plotter_CV2(classes=self.classes, if_show=False)
self.args, self.args_inference = args, args_inference
def detect_cv2_img(self, cv2_img, is_one_obj_per_class=False):
'''
Argument:
cv2_img {a clor image read from cv2.imread}
Return:
detections {2d list}: Each element is a 1D list indicating the detected object
[[x1, y1, x2, y2, conf, cls_conf, cls_idx], [...], ...],
where (x1, yi) represents in the original image coordinate
'''
# Change format to the required one: bgr to rgb, numpy to tensor, unsqueeze 0
imgs = self._cv2_to_torch_img(cv2_img)
# Detect
imgs_detections = detect_targets(
self.args_inference, self.model, imgs, is_one_obj_per_class)
# Return
detections = imgs_detections[0] # there is only 1 image here
return detections
def detect_cv2_imgs(self, cv2_imgs, is_one_obj_per_class=False):
imgs = self._cv2_to_torch_imgs(cv2_imgs)
imgs_detections = detect_targets(
self.args_inference, self.model, imgs, is_one_obj_per_class)
return imgs_detections
def detect_torch_imgs(self, torch_imgs, is_one_obj_per_class=False):
return detect_targets(
self.args_inference, self.model, torch_imgs, is_one_obj_per_class)
def draw_bboxes(self, img, detections):
'''
Arguments:
detections {Nx7 arrays}: Info of each obj: Bbox(4), conf, cls_conf, cls_idx.
This can be {2d list} or {np.ndarray} or {torch.Tensor}
'''
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_disp = self.plotter.plot(img, detections, if_print=False)
return img_disp
def _cv2_to_torch_img(self, img): # Output: [1, W, H, 3], tensor, rgb
''' Change image format to what the detector requires. '''
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
imgs = torch.from_numpy(img).unsqueeze(0)
return imgs
def _cv2_to_torch_imgs(self, imgs): # Output: [B, W, H, 3], tensor, rgb
for i in range(len(imgs)):
imgs[i] = cv2.cvtColor(imgs[i], cv2.COLOR_BGR2RGB)
torch_imgs = torch.from_numpy(np.array(imgs))
return torch_imgs
| 36.182692
| 111
| 0.639738
| 2,827
| 0.250421
| 0
| 0
| 0
| 0
| 0
| 0
| 3,369
| 0.298432
|
3d582b494cb98544a7b8b83f15184b7f8c7c6d2b
| 43
|
py
|
Python
|
python/parse_ddl/tests/ddl_examples/test_vs.py
|
jared-ong/data-projects
|
21ceccacb8e408ca45fe95c1c4d311f48e8f7708
|
[
"MIT"
] | null | null | null |
python/parse_ddl/tests/ddl_examples/test_vs.py
|
jared-ong/data-projects
|
21ceccacb8e408ca45fe95c1c4d311f48e8f7708
|
[
"MIT"
] | null | null | null |
python/parse_ddl/tests/ddl_examples/test_vs.py
|
jared-ong/data-projects
|
21ceccacb8e408ca45fe95c1c4d311f48e8f7708
|
[
"MIT"
] | null | null | null |
import json
import re
print("Hello world")
| 10.75
| 20
| 0.767442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.302326
|
3d58e1aeb6209bbf0ac5b1e7058c942f20cd4768
| 733
|
py
|
Python
|
tests/test_missing_variable.py
|
specfault/GreenerPython
|
976260c3e78969cfd3e1e40639325f104325c703
|
[
"MIT"
] | null | null | null |
tests/test_missing_variable.py
|
specfault/GreenerPython
|
976260c3e78969cfd3e1e40639325f104325c703
|
[
"MIT"
] | null | null | null |
tests/test_missing_variable.py
|
specfault/GreenerPython
|
976260c3e78969cfd3e1e40639325f104325c703
|
[
"MIT"
] | null | null | null |
from tests.framework import AbstractFilePair
from tests.framework import in_test_function
from tests.framework import standard_test_spec
from tests.framework import SavingFixesSUT
from tests.framework import fixing_test
variable_names = ('x', 'y')
def missing_variable_in_source(variable_name):
test_code = in_test_function('bla = blubb.' + variable_name)
return AbstractFilePair('blubb', test=test_code)
@fixing_test
class TestSavingFixesMissingVariables(SavingFixesSUT):
tests = [
standard_test_spec( # add several variables to SUT
"""
bla = blubb.x
bla = blubb.y
bla = blubb.z""")
] + [missing_variable_in_source(name) for name in variable_names]
| 29.32
| 73
| 0.718963
| 300
| 0.409277
| 0
| 0
| 313
| 0.427012
| 0
| 0
| 141
| 0.19236
|
3d59c021cf7fb75f7a11d364d01cd243b711a413
| 3,186
|
py
|
Python
|
aiida/storage/psql_dos/migrations/versions/django_0040_data_migration_legacy_process_attributes.py
|
mkrack/aiida-core
|
bab1ad6cfc8e4ff041bce268f9270c613663cb35
|
[
"MIT",
"BSD-3-Clause"
] | 153
|
2016-12-23T20:59:03.000Z
|
2019-07-02T06:47:52.000Z
|
aiida/storage/psql_dos/migrations/versions/django_0040_data_migration_legacy_process_attributes.py
|
mkrack/aiida-core
|
bab1ad6cfc8e4ff041bce268f9270c613663cb35
|
[
"MIT",
"BSD-3-Clause"
] | 2,466
|
2016-12-24T01:03:52.000Z
|
2019-07-04T13:41:08.000Z
|
aiida/storage/psql_dos/migrations/versions/django_0040_data_migration_legacy_process_attributes.py
|
mkrack/aiida-core
|
bab1ad6cfc8e4ff041bce268f9270c613663cb35
|
[
"MIT",
"BSD-3-Clause"
] | 88
|
2016-12-23T16:28:00.000Z
|
2019-07-01T15:55:20.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Migrate some legacy process attributes.
Attribute keys that are renamed:
* `_sealed` -> `sealed`
Attribute keys that are removed entirely:
* `_finished`
* `_failed`
* `_aborted`
* `_do_abort`
Finally, after these first migrations, any remaining process nodes that still do not have a sealed attribute and have
it set to `True`. Excluding the nodes that have a `process_state` attribute of one of the active states `created`,
running` or `waiting`, because those are actual valid active processes that are not yet sealed.
This is identical to migration e734dd5e50d7
Revision ID: django_0040
Revises: django_0039
"""
from alembic import op
import sqlalchemy as sa
revision = 'django_0040'
down_revision = 'django_0039'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
conn = op.get_bind()
statement = sa.text(
"""
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', attributes->'_sealed')
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Copy `_sealed` -> `sealed`
UPDATE db_dbnode SET attributes = attributes - '_sealed'
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Delete `_sealed`
UPDATE db_dbnode SET attributes = attributes - '_finished'
WHERE attributes ? '_finished' AND node_type LIKE 'process.%';
-- Delete `_finished`
UPDATE db_dbnode SET attributes = attributes - '_failed'
WHERE attributes ? '_failed' AND node_type LIKE 'process.%';
-- Delete `_failed`
UPDATE db_dbnode SET attributes = attributes - '_aborted'
WHERE attributes ? '_aborted' AND node_type LIKE 'process.%';
-- Delete `_aborted`
UPDATE db_dbnode SET attributes = attributes - '_do_abort'
WHERE attributes ? '_do_abort' AND node_type LIKE 'process.%';
-- Delete `_do_abort`
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', to_jsonb(True))
WHERE
node_type LIKE 'process.%' AND
NOT (attributes ? 'sealed') AND
attributes->>'process_state' NOT IN ('created', 'running', 'waiting');
-- Set `sealed=True` for process nodes that do not yet have a `sealed` attribute AND are not in an active state
"""
)
conn.execute(statement)
def downgrade():
"""Migrations for the downgrade."""
raise NotImplementedError('Downgrade of django_0040.')
| 35.797753
| 119
| 0.607031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,887
| 0.906152
|
3d5a102883a7bb1dd52786e30fc8cbb5261af1f1
| 1,108
|
py
|
Python
|
hdvw/ops/matrix.py
|
shaoshitong/hdvw
|
fbb39da9ad8a765f74225eec7e9614978c740dde
|
[
"Apache-2.0"
] | 2
|
2022-03-26T09:08:43.000Z
|
2022-03-26T09:09:27.000Z
|
hdvw/ops/matrix.py
|
shaoshitong/hdvw
|
fbb39da9ad8a765f74225eec7e9614978c740dde
|
[
"Apache-2.0"
] | null | null | null |
hdvw/ops/matrix.py
|
shaoshitong/hdvw
|
fbb39da9ad8a765f74225eec7e9614978c740dde
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.metrics import confusion_matrix
import torch
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from tensorflow.keras.utils import to_categorical
def confusion_matrix_pyplot(y_true,y_pred,num_classes,name=""):
if isinstance(y_true,torch.Tensor):
y_true=y_true.clone().detach().cpu().numpy()
if isinstance(y_pred,torch.Tensor):
y_pred=y_pred.clone().detach().cpu().numpy()
if y_true.shape[-1]==num_classes:
y_true=np.argmax(y_true,1)
if y_pred.shape[-1]==num_classes:
y_pred=np.argmax(y_pred,1)
array=confusion_matrix(y_true,y_pred)
sns.set(font_scale=2.0)
df=pd.DataFrame(array,index=range(num_classes),columns=range(num_classes))
fig=plt.figure(figsize=(10,10))
ax=fig.add_subplot(111)
ax=sns.heatmap(df,square=True,annot=True,ax=ax,cmap="YlGnBu")
plt.title("---")
fig=ax.get_figure()
if name!="":
fig.savefig(name)
else:
fig.savefig("test.png")
| 38.206897
| 81
| 0.652527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.024368
|
3d5a3b5d8a7ee8e5a8d60b3408e8aa8d46c512c1
| 346
|
py
|
Python
|
{{cookiecutter.app_name}}/search_indexes.py
|
rickydunlop/cookiecutter-django-app-template-drf-haystack
|
8ea9034c371950628b3d312639964753899c8c5d
|
[
"MIT"
] | null | null | null |
{{cookiecutter.app_name}}/search_indexes.py
|
rickydunlop/cookiecutter-django-app-template-drf-haystack
|
8ea9034c371950628b3d312639964753899c8c5d
|
[
"MIT"
] | null | null | null |
{{cookiecutter.app_name}}/search_indexes.py
|
rickydunlop/cookiecutter-django-app-template-drf-haystack
|
8ea9034c371950628b3d312639964753899c8c5d
|
[
"MIT"
] | null | null | null |
from haystack import indexes
from .models import {{ cookiecutter.model_name }}
class {{ cookiecutter.model_name }}Index(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='name')
def get_model(self):
return {{ cookiecutter.model_name }}
| 28.833333
| 81
| 0.736994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.017341
|
3d5db5e05861ba4f7444a52667354a11e6f370f2
| 6,018
|
py
|
Python
|
utility_functions.py
|
andrewli2403/California-Basketball-Data-Processor
|
19582bef72d6a4f4281ddb61eceb4bee033b5ceb
|
[
"MIT"
] | null | null | null |
utility_functions.py
|
andrewli2403/California-Basketball-Data-Processor
|
19582bef72d6a4f4281ddb61eceb4bee033b5ceb
|
[
"MIT"
] | null | null | null |
utility_functions.py
|
andrewli2403/California-Basketball-Data-Processor
|
19582bef72d6a4f4281ddb61eceb4bee033b5ceb
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup as bs
import re
import pandas as pd
#collect & process data based on GAME ID
def processor(game_id):
url = "https://www.espn.com/mens-college-basketball/matchup?gameId=" + str(game_id)
r = requests.get(url)
webpage = bs(r.content, features="html.parser")
#create dictionary for CAL & opponent with respective scores
team_name = [name.string for name in webpage.find_all("td", attrs={"class", "team-name"})]
score = [float(final_score.string) for final_score in webpage.find_all("td", attrs={"class", "final-score"})]
team_score = dict(zip(team_name, score))
#determine opponent
for team in team_name:
if team != "CAL":
opponent = team
#locate table for main data regarding game
table = webpage.select("table.mod-data")[0]
#create column_names and locate the rows of the table
column_names = (list(team_score.keys()))
#columns = table.find_all("td", string=re.compile("[A-Za-z%]+"))
#column_names = [c.get_text().strip() for c in columns]
table_rows = table.find("tbody").find_all("tr")
#accumulate all the table_rows into one list
row_names, l = [], []
for tr in table_rows:
td = tr.find_all("td")
#row: [STAT_NAME, NUM1, NUM2]
row = [tr.get_text().strip() for tr in td]
#.append([NUM1, NUM2])
l.append(row[1:])
#.append([STAT_NAME])
row_names.append(row[0])
#create data table with column_names and rows within table.mod-data
df = pd.DataFrame(l, columns=column_names)
#searches a string with - and seperates elements from either side into a list
def try_convert(val):
hyphen_index = val.index("-")
return [val[:hyphen_index], val[hyphen_index + 1:]]
#append new rows for FGA, FGM, 3PTA, 3PTM, FTA, FTM for later calculations in MAKES-ATTEMPT format
for index in df.index:
if re.search("-", df.loc[index, "CAL"]) and re.search("-", df.loc[index, opponent]):
#multiple assignment with try_convert to extract attempts and makes
makes_cal, att_cal = try_convert(df.loc[index, "CAL"])[0], try_convert(df.loc[index, "CAL"])[1]
makes_opp, att_opp = try_convert(df.loc[index, opponent])[0], try_convert(df.loc[index, opponent])[1]
#assigns current row with makes
df.loc[index, "CAL"], df.loc[index, opponent] = makes_cal, makes_opp
#append new row with attempts, increasing index to reorder later
df2 = pd.DataFrame({'CAL': att_cal, opponent: att_opp}, index=[index + .1])
df = df.append(df2, ignore_index = False)
#reorder rows with increasing index
df = df.sort_index().reset_index(drop=True)
#adds FGA, FGM, 3PTA, 3PTM, FTA, FTM row names
row = []
def add_makes_att(lst, new_lst):
for stat in lst:
if re.search("FG|3PT|FT", stat):
new_lst.append(stat + "M")
new_lst.append(stat +"A")
else:
new_lst.append(stat)
return new_lst
row_names = add_makes_att(row_names, row)
#set row indexes as STAT_NAME within row
df.index = row_names
#turns all stats into float, tranpose matrix
df["CAL"] = pd.to_numeric(df["CAL"], downcast="float")
df[opponent] = pd.to_numeric(df[opponent], downcast="float")
df = df.T
#calculates +/-
def net(cal, opp):
if cal - opp > 0:
return "+" + str(cal - opp)
elif opp - cal > 0:
return "-" + str(opp - cal)
else:
return 0
#create a list respresenting calculations for the game
poss = df.loc['CAL', 'FGA']-df.loc['CAL', 'Offensive Rebounds']+df.loc['CAL', 'Total Turnovers']+.475*df.loc['CAL', 'FTA']
calc = [opponent, poss, team_score[opponent]/poss, (df.loc[opponent, 'FGM']+.5*df.loc[opponent, '3PTM'])*100/df.loc[opponent, 'FGA'], df.loc['CAL', 'Defensive Rebounds']*100/(df.loc['CAL', 'Defensive Rebounds']+df.loc[opponent, 'Offensive Rebounds']), df.loc[opponent, 'Total Turnovers']*100/poss, df.loc[opponent, 'Field Goal %'], df.loc[opponent, 'Three Point %'],(df.loc[opponent, 'FTA']*100/df.loc[opponent, 'FGA'], df.loc[opponent, 'FTM']*100/df.loc[opponent, 'FTA']), team_score['CAL']/poss, (df.loc['CAL', 'FGM']+.5*df.loc['CAL', '3PTM'])*100/df.loc['CAL', 'FGA'], df.loc['CAL', 'Offensive Rebounds']*100/(df.loc[opponent, 'Defensive Rebounds']+df.loc['CAL', 'Offensive Rebounds']), df.loc['CAL', 'Total Turnovers']*100/poss, df.loc['CAL', 'Field Goal %'], df.loc['CAL', 'Three Point %'],(df.loc['CAL', 'FTA']*100/df.loc['CAL', 'FGA'], df.loc['CAL', 'FTM']*100/df.loc['CAL', 'FTA']), net(df.loc['CAL', 'Offensive Rebounds']+df.loc["CAL", 'Defensive Rebounds'], df.loc[opponent, 'Offensive Rebounds']+df.loc[opponent, 'Defensive Rebounds']), net(df.loc['CAL', '3PTM'], df.loc[opponent, '3PTM'])]
return calc
#find date of game
def get_date(game_id):
url = "https://www.espn.com/mens-college-basketball/game/_/gameId/" + str(game_id)
r = requests.get(url)
webpage = bs(r.content, features="html.parser")
date = re.findall('(?:January|February|March|April|May|June|July|August|September|October|November|December)\s[1-9][1-9]?,\s[0-9]{4}', webpage.find("title").get_text())[0]
return date
#rounds data based on stat parameters
def clean(series):
if series.name == "Game" or series.name == "Reb +/-" or series.name == "3pt +/-":
return series
elif series.name == "OER" or series.name == "DER":
return series.astype('float64').round(2)
elif series.name == "Def FT RATE, %" or series.name == "Off FT Rate, %":
#encounters tuple data structure of two valued stat
return series.apply(lambda stats: tuple(map(round, stats)))
else:
return series.astype('int32')
#converts datetime object into MM/DD/YYYY format
def date_convert(datetime_obj):
return f"{datetime_obj.month}/{datetime_obj.day}/{datetime_obj.year}"
| 47.015625
| 1,105
| 0.633599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,353
| 0.390994
|
3d5eb8fb4fedfe6ddb55250652317a407099a204
| 3,787
|
py
|
Python
|
site_crawler/cleaner/cleaner.py
|
kwoshvick/NSE-Financial-News-Crawler-and-Predictor
|
8acee7c660c5487d18321dc7a169eba3043ef2b8
|
[
"MIT"
] | 11
|
2018-04-24T12:05:45.000Z
|
2021-07-12T05:30:41.000Z
|
site_crawler/cleaner/cleaner.py
|
kwoshvick/NSE-Financial-News-Crawler-and-Predictor
|
8acee7c660c5487d18321dc7a169eba3043ef2b8
|
[
"MIT"
] | null | null | null |
site_crawler/cleaner/cleaner.py
|
kwoshvick/NSE-Financial-News-Crawler-and-Predictor
|
8acee7c660c5487d18321dc7a169eba3043ef2b8
|
[
"MIT"
] | 5
|
2019-08-09T04:43:23.000Z
|
2021-08-28T18:05:56.000Z
|
import csv
import re
import string
import html
class Cleaner:
def __init__(self):
self.remove_punctuations = str.maketrans('', '', string.punctuation)
def read_csv(self,csv_name):
cleaned_text = []
with open('../data/twitter_data/raw_data/'+csv_name+'.csv', newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
text = row['text']
clean_text = self.clean_tweets(text)
cleaned_text.append(clean_text)
self.save_cleaned_csv('cleaned_'+csv_name,cleaned_text)
def clean_tweets(self,tweet):
# harmonize the cases
lower_case_text = tweet.lower()
# remove urls
removed_url = re.sub(r'http\S+', '', lower_case_text)
# remove hashtags
removed_hash_tag = re.sub(r'#\w*', '', removed_url) # hastag
# remove usernames from tweets
removed_username = re.sub(r'@\w*\s?','',removed_hash_tag)
# removed retweets
removed_retweet = removed_username.replace("rt", "", True) # remove to retweet
# removing punctuations
removed_punctuation = removed_retweet.translate(self.remove_punctuations)
# remove spaces
remove_g_t = removed_punctuation.replace(">", "", True)
remove_a_m_p = remove_g_t.replace("&", "", True)
final_text = remove_a_m_p
return final_text
def pre_cleaning(self,text):
html_escaped = html.unescape(text)
final_text = html_escaped.replace(';','')
return final_text
def pre_labeling(self,text):
lower_case_text = text.lower()
removed_url = re.sub(r'http\S+', '', lower_case_text)
return removed_url
def save_cleaned_csv(self,name,tweets_list):
with open('../data/twitter_data/cleaned_data/' + name + '.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(["text"])
for tweet in tweets_list:
writer.writerow([tweet,])
pass
def save_pre_labled_csv(self,csv_name):
cleaned_text = []
with open('../data/twitter_data/raw_data/' + csv_name + '.csv', newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
text = row['text']
clean_text = self.pre_labeling(text)
cleaned_text.append(clean_text)
self.save_pre_labeled_csv('unlabeled_' + csv_name, cleaned_text)
def save_pre_labeled_csv(self,name,tweets_list):
with open('../data/twitter_data/pre_labeled/' + name + '.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(["text","label"])
for tweet in tweets_list:
writer.writerow([tweet,])
pass
if __name__ == "__main__":
c = Cleaner()
tweets_csvs = [
'Business_KE',
'MadeItInAfrica',
'IFCAfrica',
'africareview',
'AfDB_Group',
'_AfricanUnion',
'Taifa_Leo',
'BD_Africa',
'RadioCitizenFM',
'citizentvkenya',
'KTNKenya',
'K24Tv',
'StandardKenya',
'TheStarKenya',
'radiomaisha',
'KBCChannel1',
'CapitalFMKenya',
'African_Markets',
'Africafinancial',
'InvestInAfrica',
'AfricanInvestor',
'forbesafrica',
'cnbcafrica',
'BBCAfrica',
'CNNAfrica',
'allafrica',
'ReutersAfrica',
'VenturesAfrica',
'BBGAfrica',
'GhettoRadio895',
'kenyanwalstreet',
'SokoAnalyst',
'NSEKenya',
'wazua'
]
for tweets_csv in tweets_csvs:
c.save_pre_labled_csv(tweets_csv)
| 29.818898
| 113
| 0.578558
| 2,784
| 0.735147
| 0
| 0
| 0
| 0
| 0
| 0
| 939
| 0.247954
|
3d5f940e0e5788ca23c26f2a301fe14e51745333
| 1,161
|
py
|
Python
|
003.branch/if.py
|
cjp1016/python-samples
|
ca5a7284cf4cb9fe42fa1487d4944815a00487ec
|
[
"Apache-2.0"
] | null | null | null |
003.branch/if.py
|
cjp1016/python-samples
|
ca5a7284cf4cb9fe42fa1487d4944815a00487ec
|
[
"Apache-2.0"
] | null | null | null |
003.branch/if.py
|
cjp1016/python-samples
|
ca5a7284cf4cb9fe42fa1487d4944815a00487ec
|
[
"Apache-2.0"
] | null | null | null |
"""
用户身份验证
Version: 0.1
Author: cjp
"""
username = input('请输入用户名: ')
password = input('请输入口令: ')
# 用户名是admin且密码是123456则身份验证成功否则身份验证失败
if username == 'admin' and password == '123456':
print('身份验证成功!')
else:
print('身份验证失败!')
"""
Python中没有用花括号来构造代码块而是使用了缩进的方式来设置代码的层次结构,
如果if条件成立的情况下需要执行多条语句,只要保持多条语句具有相同的缩进就可以了,
换句话说连续的代码如果又保持了相同的缩进那么它们属于同一个代码块,相当于是一个执行的整体。
"""
# 当然如果要构造出更多的分支,可以使用if…elif…else…结构,例如下面的分段函数求值。
"""
分段函数求值
3x - 5 (x > 1)
f(x) = x + 2 (-1 <= x <= 1)
5x + 3 (x < -1)
Version: 0.1
Author: cjp
"""
x = float(input('x = '))
if x > 1:
y = 3 * x - 5
elif x >= -1:
y = x + 2
else:
y = 5 * x + 3
print('f(%.2f) = %.2f' % (x, y))
"""
同理elif和else中也可以再构造新的分支,我们称之为嵌套的分支结构
"""
"""
分段函数求值
3x - 5 (x > 1)
f(x) = x + 2 (-1 <= x <= 1)
5x + 3 (x < -1)
Version: 0.1
Author: cjp
"""
x = float(input('x = '))
if x > 1:
y = 3 * x - 5
else:
if x >= -1:
y = x+2
else:
y = 5 * x + 3
print('f(%.2f) = %.2f' % (x, y))
"""
大家可以自己感受一下这两种写法到底是哪一种更好。
在之前我们提到的Python之禅中有这么一句话“Flat is better than nested.”,
之所以提倡代码“扁平化”是因为嵌套结构的嵌套层次多了之后会严重的影响代码的可读性
,所以能使用扁平化的结构时就不要使用嵌套。
"""
| 15.077922
| 53
| 0.564169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,489
| 0.800107
|
3d602a949005e0184acfd82e6822740a19d36fb9
| 7,210
|
bzl
|
Python
|
bazel/antlr4_cc.bzl
|
kyle-winkelman/fhir
|
01038aa235189fd043fd2981ebf40f4dc1e826e0
|
[
"Apache-2.0"
] | null | null | null |
bazel/antlr4_cc.bzl
|
kyle-winkelman/fhir
|
01038aa235189fd043fd2981ebf40f4dc1e826e0
|
[
"Apache-2.0"
] | 2
|
2020-07-24T14:20:45.000Z
|
2020-07-24T19:43:52.000Z
|
bazel/antlr4_cc.bzl
|
kyle-winkelman/fhir
|
01038aa235189fd043fd2981ebf40f4dc1e826e0
|
[
"Apache-2.0"
] | 1
|
2020-07-10T15:03:45.000Z
|
2020-07-10T15:03:45.000Z
|
"""Build rules to create C++ code from an Antlr4 grammar."""
def antlr4_cc_lexer(name, src, namespaces = None, imports = None, deps = None, lib_import = None):
"""Generates the C++ source corresponding to an antlr4 lexer definition.
Args:
name: The name of the package to use for the cc_library.
src: The antlr4 g4 file containing the lexer rules.
namespaces: The namespace used by the generated files. Uses an array to
support nested namespaces. Defaults to [name].
imports: A list of antlr4 source imports to use when building the lexer.
deps: Dependencies for the generated code.
lib_import: Optional target for importing grammar and token files.
"""
namespaces = namespaces or [name]
imports = imports or []
deps = deps or []
if not src.endswith(".g4"):
fail("Grammar must end with .g4", "src")
if (any([not imp.endswith(".g4") for imp in imports])):
fail("Imported files must be Antlr4 grammar ending with .g4", "imports")
file_prefix = src[:-3]
base_file_prefix = _strip_end(file_prefix, "Lexer")
out_files = [
"%sLexer.h" % base_file_prefix,
"%sLexer.cpp" % base_file_prefix,
]
native.java_binary(
name = "antlr_tool",
jvm_flags = ["-Xmx256m"],
main_class = "org.antlr.v4.Tool",
runtime_deps = ["@maven//:org_antlr_antlr4_4_7_1"],
)
command = ";\n".join([
# Use the first namespace, we'll add the others afterwards.
_make_tool_invocation_command(namespaces[0], lib_import),
_make_namespace_adjustment_command(namespaces, out_files),
])
native.genrule(
name = name + "_source",
srcs = [src] + imports,
outs = out_files,
cmd = command,
heuristic_label_expansion = 0,
tools = ["antlr_tool"],
)
native.cc_library(
name = name,
srcs = [f for f in out_files if f.endswith(".cpp")],
hdrs = [f for f in out_files if f.endswith(".h")],
deps = ["@antlr_cc_runtime//:antlr4_runtime"] + deps,
copts = [
"-fexceptions",
],
features = ["-use_header_modules"], # Incompatible with -fexceptions.
)
def antlr4_cc_parser(
name,
src,
namespaces = None,
token_vocab = None,
imports = None,
listener = True,
visitor = False,
deps = None,
lib_import = None):
"""Generates the C++ source corresponding to an antlr4 parser definition.
Args:
name: The name of the package to use for the cc_library.
src: The antlr4 g4 file containing the parser rules.
namespaces: The namespace used by the generated files. Uses an array to
support nested namespaces. Defaults to [name].
token_vocab: The antlr g4 file containing the lexer tokens.
imports: A list of antlr4 source imports to use when building the parser.
listener: Whether or not to include listener generated files.
visitor: Whether or not to include visitor generated files.
deps: Dependencies for the generated code.
lib_import: Optional target for importing grammar and token files.
"""
suffixes = ()
if listener:
suffixes += (
"%sBaseListener.cpp",
"%sListener.cpp",
"%sBaseListener.h",
"%sListener.h",
)
if visitor:
suffixes += (
"%sBaseVisitor.cpp",
"%sVisitor.cpp",
"%sBaseVisitor.h",
"%sVisitor.h",
)
namespaces = namespaces or [name]
imports = imports or []
deps = deps or []
if not src.endswith(".g4"):
fail("Grammar must end with .g4", "src")
if token_vocab != None and not token_vocab.endswith(".g4"):
fail("Token Vocabulary must end with .g4", "token_vocab")
if (any([not imp.endswith(".g4") for imp in imports])):
fail("Imported files must be Antlr4 grammar ending with .g4", "imports")
file_prefix = src[:-3]
base_file_prefix = _strip_end(file_prefix, "Parser")
out_files = [
"%sParser.h" % base_file_prefix,
"%sParser.cpp" % base_file_prefix,
] + _make_outs(file_prefix, suffixes)
if token_vocab:
imports.append(token_vocab)
command = ";\n".join([
# Use the first namespace, we'll add the others afterwardsm thi .
_make_tool_invocation_command(namespaces[0], lib_import, listener, visitor),
_make_namespace_adjustment_command(namespaces, out_files),
])
native.genrule(
name = name + "_source",
srcs = [src] + imports,
outs = out_files,
cmd = command,
heuristic_label_expansion = 0,
tools = [
":antlr_tool",
],
)
native.cc_library(
name = name,
srcs = [f for f in out_files if f.endswith(".cpp")],
hdrs = [f for f in out_files if f.endswith(".h")],
deps = ["@antlr_cc_runtime//:antlr4_runtime"] + deps,
copts = [
"-fexceptions",
# FIXME: antlr generates broken C++ code that attempts to construct
# a std::string from nullptr. It's not clear whether the relevant
# constructs are reachable.
"-Wno-nonnull",
],
features = ["-use_header_modules"], # Incompatible with -fexceptions.
)
def _make_outs(file_prefix, suffixes):
return [file_suffix % file_prefix for file_suffix in suffixes]
def _strip_end(text, suffix):
if not text.endswith(suffix):
return text
return text[:len(text) - len(suffix)]
def _to_c_macro_name(filename):
# Convert the filenames to a format suitable for C preprocessor definitions.
char_list = [filename[i].upper() for i in range(len(filename))]
return "ANTLR4_GEN_" + "".join(
[a if (("A" <= a) and (a <= "Z")) else "_" for a in char_list],
)
def _make_tool_invocation_command(package, lib_import, listener = False, visitor = False):
return "$(location :antlr_tool) " + \
"$(SRCS)" + \
(" -visitor" if visitor else " -no-visitor") + \
(" -listener" if listener else " -no-listener") + \
(" -lib $$(dirname $(location " + lib_import + "))" if lib_import else "") + \
" -Dlanguage=Cpp" + \
" -package " + package + \
" -o $(@D)" + \
" -Xexact-output-dir"
def _make_namespace_adjustment_command(namespaces, out_files):
if len(namespaces) == 1:
return "true"
commands = []
extra_header_namespaces = "\\\n".join(["namespace %s {" % namespace for namespace in namespaces[1:]])
for filepath in out_files:
if filepath.endswith(".h"):
commands.append("sed -i '/namespace %s {/ a%s' $(@D)/%s" % (namespaces[0], extra_header_namespaces, filepath))
for namespace in namespaces[1:]:
commands.append("sed -i '/} \/\/ namespace %s/i} \/\/ namespace %s' $(@D)/%s" % (namespaces[0], namespace, filepath))
else:
commands.append("sed -i 's/using namespace %s;/using namespace %s;/' $(@D)/%s" % (namespaces[0], "::".join(namespaces), filepath))
return ";\n".join(commands)
| 38.55615
| 142
| 0.600971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,960
| 0.410541
|
3d62c9779cfa7f3da2b542252bdcb812a8982541
| 234
|
py
|
Python
|
src/scenic/simulators/gta/map.py
|
cahartsell/Scenic
|
2e7979011aef426108687947668d9ba6f5439136
|
[
"BSD-3-Clause"
] | 141
|
2019-03-07T07:17:19.000Z
|
2022-03-19T16:15:48.000Z
|
src/scenic/simulators/gta/map.py
|
cahartsell/Scenic
|
2e7979011aef426108687947668d9ba6f5439136
|
[
"BSD-3-Clause"
] | 27
|
2019-06-18T23:04:29.000Z
|
2022-03-31T13:42:05.000Z
|
src/scenic/simulators/gta/map.py
|
cahartsell/Scenic
|
2e7979011aef426108687947668d9ba6f5439136
|
[
"BSD-3-Clause"
] | 59
|
2019-04-08T15:20:15.000Z
|
2022-03-29T07:23:26.000Z
|
# stub to allow changing the map without having to alter gta_model.sc
import os
mapPath = 'map.npz'
def setLocalMap(module, relpath):
global mapPath
base = os.path.dirname(module)
mapPath = os.path.join(base, relpath)
| 19.5
| 69
| 0.717949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.333333
|
e9e2bdbc8442df5b9a587f4296d83d87e0d66ce8
| 6,982
|
py
|
Python
|
bot/messages.py
|
pyaf/tpobot
|
d96a3650de46f6d43ab346d61b922b170cd5fdb2
|
[
"MIT"
] | 4
|
2017-07-19T19:18:15.000Z
|
2017-11-24T16:15:51.000Z
|
bot/messages.py
|
rishabhiitbhu/tpobot
|
d96a3650de46f6d43ab346d61b922b170cd5fdb2
|
[
"MIT"
] | 5
|
2020-02-11T23:53:50.000Z
|
2021-12-13T19:45:22.000Z
|
bot/messages.py
|
pyaf/tpobot
|
d96a3650de46f6d43ab346d61b922b170cd5fdb2
|
[
"MIT"
] | 1
|
2017-08-27T20:40:50.000Z
|
2017-08-27T20:40:50.000Z
|
# -*- coding: utf-8 -*-
message_dict = {
'welcome': "Hi! TPO Baba is here to give you updates about TPO portal, set willingness reminders, ppt "\
"reminders, exam date reminders and lot more...:D \n\n"\
"To personalise your experience, I gotta register you. It's simple two step process.\n",
'greetings': "Hello pal :)",
'haalchaal': "hamaar to mauj ahaai guru 🙏, tohaar batawa kaa haal chaal bate?"\
" ;P",
'no_idea': "Oops, didn't get you, Baba is a simple AI bot not Jarvis, don't be so cryptic. 😅\n"\
"Baba has gotta master, Baba will learn this soon. B) \n\n"\
"Ask for help to know what options you have.",
'user_invalid': "You account is Invalid.\n"\
"Contact https://m.me/rishabh.ags/ for help",
'get_email': "Baba needs to know your official IIT email id, drop it as a text message.",
'email_set': "Baba has set your email to {0}",
'not_iit_email': "Oops!, seems like you didn't enter your official email id\n"\
"As I am running on a heroku server, which costs 7$ pm. Don't misuse this. "\
"I cannot afford offering services to others,.\nIf you ain't student of IIT (BHU), please"\
" don't register ,.. Bhawnao ko samjho yaaar 😅",
'get_course': "Baba needs to know your course, select your course among btech, idd or imd, "\
"then drop a text message.",
'course_set': "Baba has set your course to {0}",
'reg_error': "Oops!, you got me wrong, retry entering it correctly..\n\n"\
"And you gotta register first, we'll chat afterwards. :)\n"\
"if you're facing issues contact https://m.me/rishabh.ags",
'email_already_set': "Pal, you already got your email set to {0}",
'invalid_email': "Baba wants a valid email id.\nRetry please.",
'course_already_set': "Pal, you already got your email set to {0}",
'reg_success': "And congratulations! 🎉 you have successfully registered!, your email id "\
"will be verified soon. :) \n\nIf found misleading or wrong, I'll find you and I'll "\
"deregister you ;P \n\n"\
"Ask for features to know what I've got for you in my Jhola B) \n\n"\
"Ask for help to know what options you have. :)",
'features': "Baba is a messenger bot created by a high functioning sociopathic nerd of IIT (BHU) :D\n"\
"\nI have got a simple AI brain powered by Wit and has not been trained too much, "\
"so please don't use too off the track keywords 😅 \n\n",
'features1': "What I currently do:\n"\
"1. Text you whenever a new company opens for your course and department, "\
"you'll get all details of such companies.\n"\
"2. Text you whenever companies your course and department get any changes in their "\
"parameters like willingness deadlines, exam dates, ppt dates, etc.. \n\n",
'features2':"What I plan to do pretty soon:\n"\
"1. Remind you about deadlines of willingness application, ppt dates "\
"and exam dates etc.. B) \n" \
"2. Give replies to your queries about companies...\n\n"\
"P.S. To know why that nerd made me? you are free to ask me :P\n"\
"Ask for help to know what options you have.",
'help': "Baba has got you some help:\n\n"\
"1. You can ask me to unsubscribe/deactivate you from receiving updates .\n"\
"2. You can ask me subscribe/activate your account. from receiving updates.\n",
'deactivate': "Alright pal, It's been a good chat with you, deactivating your account.\n"\
"You can ask me to reactivate it if necessary.",
'activate': "Welcome back!, your account is reactivated",
'wit_error': "Ohho, I'm sick, my brain is not working, Please call my master! 😰 \n"\
"https:/m.me/rishabhags/",
'new_company': "Hola!\nNew Company Open for you! 🎉🎊🎁\n\n"\
"Company Name: {company_name}\n"\
"Open for: {course}\n"\
"Departments: {department}\n"\
"BTech CTC: {btech_ctc}\n"\
"IDD/IMD CTC: {idd_imd_ctc}\n"\
"X cutoff: {x}\n"\
"XII cutoff: {xii}\n"\
"CGPA cutoff: {cgpa}\n"\
"Status: {status}\n\n"\
"Will keep you updated with this company :D.\n"\
"Cya :)",
'updated_company': "Baba has updates to deliver!\n\n"\
"{0} got updated on the portal\n\n"\
"Updated fields are: \n\n"\
"{1}\n"\
"{2}"\
"\n\nThis is it for now.\nCya :)",
#{1} will store update message
'abuse': "You are so abusive, next time, I'll deactivate your account 😡😠😡",
'lol': "Lol, I was kidding,,. 😜😝😂",
'master': "My master made me because TPO developers ko to `सीनेमा` ne barbaad karke rakkha hai.. "\
"and he knows very well, that jab tak iss des me `सीनेमा` hai, tab tak log * "\
"bante rahege ;P \n\n"\
"P.S. This was a joke, it has nothing to do with anything, we respect TPO portal "\
"developers they have made a great portal. \n"\
"Ask for me for help, if you wanna know what you have got to do.",
'idd_imd_4th_year': "Ops!, you are from 4rth year IDD/IMD, I don't wanna disturb you with updates. \n"\
"I'll have to set your account Invalid.\n\n"\
"For further queries contact https://m.me/rishabh.ags/"
}
field_msg_dict = {
'company_profile': 'Company Profile',
'x': 'X',
'xii': 'XII',
'cgpa': 'CGPA',
'course': 'Course',
'purpose': 'Purpose',
'department': 'Department',
'a_backlog': 'Active backlogs allowed',
't_backlog': 'Total backlogs allowed',
'ppt_date': 'PPT date',
'exam_date': 'Exam date',
'status': 'Status',
'branch_issue_dead': 'Branch issue deadline',
'willingness_dead': 'Willingness deadline',
'btech_ctc': 'B.Tech CTC',
'idd_imd_ctc':'IDD/IMD CTC',
# 'jd': 'JD',
}
# "TPO developers ko to `सीनेमा` ne barbaad karke rakkha hai.. ;P\n"
# "So, hum denge aapko sare updates, about new companies listed in the portal,willingness opening "\
# "and closing reminders ppt reminders, exam date reminders aur bhi bahot kuchh..\n"\
# 'invalid_course': "Baba wants valid course name (btech or idd or imd).\n retry please.",
# "Active backlogs allowed: {8}\n"\
# "Total backlogs allowed: {9}\n"\
| 48.151724
| 112
| 0.560011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,366
| 0.759734
|
e9e2e74f010f4bd4956a3cbde97bcbf8f121ba63
| 5,208
|
py
|
Python
|
geomstats/geometry/matrices.py
|
PabloJ-1/geomstats
|
b53f62b745b21972b80bd7222df9af2549b66d64
|
[
"MIT"
] | null | null | null |
geomstats/geometry/matrices.py
|
PabloJ-1/geomstats
|
b53f62b745b21972b80bd7222df9af2549b66d64
|
[
"MIT"
] | null | null | null |
geomstats/geometry/matrices.py
|
PabloJ-1/geomstats
|
b53f62b745b21972b80bd7222df9af2549b66d64
|
[
"MIT"
] | null | null | null |
"""Module exposing the `Matrices` and `MatricesMetric` class."""
from functools import reduce
import geomstats.backend as gs
from geomstats.geometry.euclidean import Euclidean
from geomstats.geometry.riemannian_metric import RiemannianMetric
TOLERANCE = 1e-5
class Matrices(Euclidean):
"""Class for the space of matrices (m, n)."""
def __init__(self, m, n):
assert isinstance(m, int) and isinstance(n, int) and m > 0 and n > 0
super(Matrices, self).__init__(dimension=m * n)
self.m = m
self.n = n
self.default_point_type = 'matrix'
self.metric = MatricesMetric(m, n)
def belongs(self, point):
"""Check if point belongs to the Matrix space."""
point = gs.to_ndarray(point, to_ndim=3)
_, mat_dim_1, mat_dim_2 = point.shape
return mat_dim_1 == self.m & mat_dim_2 == self.n
@staticmethod
def equal(mat_a, mat_b, atol=TOLERANCE):
"""
Test if matrices a and b are close.
Parameters
----------
mat_a : array-like, shape=[n_samples, dim1, dim2]
mat_b : array-like, shape=[n_samples, dim2, dim3]
Returns
-------
eq : array-like boolean, shape=[n_samples]
"""
is_vectorized = \
(gs.ndim(gs.array(mat_a)) == 3) or (gs.ndim(gs.array(mat_b)) == 3)
axes = (1, 2) if is_vectorized else (0, 1)
return gs.all(gs.isclose(mat_a, mat_b, atol=atol), axes)
@staticmethod
def mul(*args):
"""
Return the product of matrices a1, ..., an.
Parameters
----------
a1 : array-like, shape=[n_samples, dim_1, dim_2]
a2 : array-like, shape=[n_samples, dim_2, dim_3]
...
an : array-like, shape=[n_samples, dim_n-1, dim_n]
Returns
-------
mul : array-like, shape=[n_samples, dim_1, dim_n]
"""
return reduce(gs.matmul, args)
@classmethod
def bracket(cls, mat_a, mat_b):
"""
Return the commutator of a and b, i.e. `[a, b] = ab - ba`.
Parameters
----------
mat_a : array-like, shape=[n_samples, dim, dim]
mat_b : array-like, shape=[n_samples, dim, dim]
Returns
-------
mat_c : array-like, shape=[n_samples, dim, dim]
"""
return cls.mul(mat_a, mat_b) - cls.mul(mat_b, mat_a)
@staticmethod
def transpose(mat):
"""Return the transpose of matrices.
Parameters
----------
mat : array-like, shape=[n_samples, dim, dim]
Returns
-------
transpose : array-like, shape=[n_samples, dim, dim]
"""
is_vectorized = (gs.ndim(gs.array(mat)) == 3)
axes = (0, 2, 1) if is_vectorized else (1, 0)
return gs.transpose(mat, axes)
@classmethod
def is_symmetric(cls, mat, atol=TOLERANCE):
"""
Check if a matrix is symmetric.
Parameters
----------
mat : array-like, shape=[n_samples, n, n]
atol : float, absolute tolerance. defaults to TOLERANCE
Returns
-------
is_sym : array-like boolean, shape=[n_samples]
"""
return cls.equal(mat, cls.transpose(mat), atol)
@classmethod
def make_symmetric(cls, mat):
"""
Make a matrix symmetric, by averaging with its transpose.
Parameters
----------
mat : array-like, shape=[n_samples, n, n]
Returns
-------
sym : array-like, shape=[n_samples, n, n]
"""
return 1 / 2 * (mat + cls.transpose(mat))
def random_uniform(self, n_samples=1):
"""Generate n samples from a uniform distribution."""
point = gs.random.rand(n_samples, self.m, self.n)
return point
@classmethod
def congruent(cls, mat_1, mat_2):
"""Compute the congruent action of mat_2 on mat_1.
This is :math: `mat_2 mat_1 mat_2^T`.
Parameters
----------
mat_1 : array-like, shape=[n_samples, n, n]
mat_2 : array-like, shape=[n_samples, n, n]
Returns
-------
cong : array-like, shape=[n_samples, n, n]
"""
return cls.mul(mat_2, mat_1, cls.transpose(mat_2))
class MatricesMetric(RiemannianMetric):
"""Euclidean metric on matrices given by Frobenius inner product."""
def __init__(self, m, n):
dimension = m * n
super(MatricesMetric, self).__init__(
dimension=dimension,
signature=(dimension, 0, 0))
def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):
"""Compute Frobenius inner product of two tan vecs at `base_point`."""
tangent_vec_a = gs.to_ndarray(tangent_vec_a, to_ndim=3)
n_tangent_vecs_a, _, _ = tangent_vec_a.shape
tangent_vec_b = gs.to_ndarray(tangent_vec_b, to_ndim=3)
n_tangent_vecs_b, _, _ = tangent_vec_b.shape
assert n_tangent_vecs_a == n_tangent_vecs_b
inner_prod = gs.einsum("nij,nij->n", tangent_vec_a, tangent_vec_b)
inner_prod = gs.to_ndarray(inner_prod, to_ndim=1)
inner_prod = gs.to_ndarray(inner_prod, to_ndim=2, axis=1)
return inner_prod
| 29.590909
| 78
| 0.576421
| 4,939
| 0.948349
| 0
| 0
| 3,149
| 0.604647
| 0
| 0
| 2,490
| 0.478111
|
e9e2f70538bbc55ae42d19558eee76ef0345309a
| 2,338
|
py
|
Python
|
gamutrf/mqtt_reporter.py
|
cglewis/gamutRF
|
d95b36f5893f165ff02701636c82662727d6e275
|
[
"Apache-2.0"
] | null | null | null |
gamutrf/mqtt_reporter.py
|
cglewis/gamutRF
|
d95b36f5893f165ff02701636c82662727d6e275
|
[
"Apache-2.0"
] | null | null | null |
gamutrf/mqtt_reporter.py
|
cglewis/gamutRF
|
d95b36f5893f165ff02701636c82662727d6e275
|
[
"Apache-2.0"
] | null | null | null |
import gpsd
import json
import logging
import socket
import httpx
import paho.mqtt.client as mqtt
class MQTTReporter:
def __init__(self, name, mqtt_server=None, gps_server=None, compass=False):
self.name = name
self.mqtt_server = mqtt_server
self.compass = compass
self.gps_server = gps_server
self.mqttc = None
self.bearing = 'no bearing'
def connect(self):
logging.info(f'connecting to {self.mqtt_server}')
self.mqttc = mqtt.Client()
self.mqttc.connect(self.mqtt_server)
self.mqttc.loop_start()
if self.gps_server:
gpsd.connect(host=self.gps_server, port=2947)
def get_bearing(self):
try:
self.bearing = str(float(httpx.get(f'http://{self.gps_server}:8000/v1/').text))
except Exception as err:
logging.error('could not update bearing: %s', err)
def add_gps(self, publish_args):
if not self.gps_server:
return publish_args
publish_args.update({
'position': [0, 0],
'altitude': None,
'gps_time': None,
'map_url': None,
'bearing': self.bearing,
'gps': 'no fix'})
try:
if self.compass:
self.get_bearing()
packet = gpsd.get_current()
publish_args.update({
'position': packet.position(),
'altitude': packet.altitude(),
'gps_time': packet.get_time().timestamp(),
'map_url': packet.map_url(),
'bearing': self.bearing,
'gps': 'fix'})
except (gpsd.NoFixError, AttributeError) as err:
logging.error('could not update with GPS: %s', err)
return publish_args
def publish(self, publish_path, publish_args):
if not self.mqtt_server:
return
try:
if self.mqttc is None:
self.connect()
publish_args = self.add_gps(publish_args)
publish_args['name'] = self.name
self.mqttc.publish(publish_path, json.dumps(publish_args))
except (socket.gaierror, ConnectionRefusedError, mqtt.WebsocketConnectionError, ValueError) as err:
logging.error(f'failed to publish to MQTT {self.mqtt_server}: {err}')
| 33.4
| 107
| 0.579983
| 2,236
| 0.956373
| 0
| 0
| 0
| 0
| 0
| 0
| 323
| 0.138152
|
e9e522181523a4e229d498e313189c98d24c3d87
| 7,377
|
py
|
Python
|
2onnx.py
|
Yifanfanfanfan/flops-counter.pytorch
|
5e7670106511f42f258083a01318b386605b61e7
|
[
"MIT"
] | null | null | null |
2onnx.py
|
Yifanfanfanfan/flops-counter.pytorch
|
5e7670106511f42f258083a01318b386605b61e7
|
[
"MIT"
] | null | null | null |
2onnx.py
|
Yifanfanfanfan/flops-counter.pytorch
|
5e7670106511f42f258083a01318b386605b61e7
|
[
"MIT"
] | null | null | null |
import os, sys, time, shutil, argparse
from functools import partial
import pickle
sys.path.append('../')
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
#import torchvision.models as models
import torch.optim as optim
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim as optim
import torch.multiprocessing as mp
from collections import OrderedDict
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx as torch_onnx
import onnx
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import lab2rgb
from skimage import io
# import prune_util
# from prune_util import GradualWarmupScheduler
# from prune_util import CrossEntropyLossMaybeSmooth
# from prune_util import mixup_data, mixup_criterion
# from utils import save_checkpoint, AverageMeter, visualize_image, GrayscaleImageFolder
# from model import ColorNet
#from wdsr_b import *
#from args import *
import captioning.utils.opts as opts
import captioning.models as models
import captioning.utils.misc as utils
import onnxruntime
def main():
use_gpu = torch.cuda.is_available()
# Create model
# models.resnet18(num_classes=365)
# model = ColorNet()
#args = get_args()
#model = MODEL(args)
# state_dict = torch.load("./checkpoint/checkpoint6/model_epoch133_step1.pth")
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# k = k.replace('module.', '')
# new_state_dict[k] = v
# model = torch.nn.DataParallel(model)
# model.load_state_dict(new_state_dict)
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--model', type=str, default='',
help='path to model to evaluate')
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
parser.add_argument('--infos_path', type=str, default='',
help='path to infos to evaluate')
parser.add_argument('--only_lang_eval', type=int, default=0,
help='lang eval on saved results')
parser.add_argument('--force', type=int, default=0,
help='force to evaluate no matter if there are results available')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
opt = parser.parse_args()
opt.caption_model = 'newfc'
opt.infos_path = '/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/infos_fc_nsc-best.pkl'
with open(opt.infos_path, 'rb') as f:
infos = utils.pickle_load(f)
replace = ['input_fc_dir', 'input_att_dir', 'input_box_dir', 'input_label_h5', 'input_json', 'batch_size', 'id']
ignore = ['start_from']
for k in vars(infos['opt']).keys():
if k in replace:
setattr(opt, k, getattr(opt, k) or getattr(infos['opt'], k, ''))
elif k not in ignore:
if not k in vars(opt):
vars(opt).update({k: vars(infos['opt'])[k]}) # copy over options from model
vocab = infos['vocab'] # ix -> word mapping
opt.vocab = vocab
model = models.setup(opt)
checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth")
model.load_state_dict(checkpoint)
# print(model)
#input_shape = (1, 256, 256)
cocotest_bu_fc_size = (10, 2048)
cocotest_bu_att_size = (10, 0, 0)
labels_size = (10, 5, 18)
masks_size = (10, 5, 18)
model_onnx_path = "./image_captioning.onnx"
model.train(False)
# Export the model to an ONNX file
# dummy_input = Variable(torch.randn(1, *input_shape))
# dummy_input = Variable(torch.randn(10, 2048), torch.randn(10, 0, 0), torch.randint(5200, (10, 5, 18)), torch.randint(1, (10, 5, 18)))
dummy_cocotest_bu_fc = Variable(torch.randn(10, 2048))
dummy_cocotest_bu_att = Variable(torch.randn(10, 0, 0))
dummy_labels = Variable(torch.randint(5200, (10, 5, 18)))
dummy_masks = Variable(torch.randint(1, (10, 5, 18)))
#output = torch_onnx.export(model, dummy_input, model_onnx_path, verbose=False)
output = torch_onnx.export(model, (dummy_cocotest_bu_fc, dummy_cocotest_bu_att, dummy_labels, dummy_masks), model_onnx_path, verbose=False)
print("Export of torch_model.onnx complete!")
def check():
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--model', type=str, default='',
help='path to model to evaluate')
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
parser.add_argument('--infos_path', type=str, default='',
help='path to infos to evaluate')
parser.add_argument('--only_lang_eval', type=int, default=0,
help='lang eval on saved results')
parser.add_argument('--force', type=int, default=0,
help='force to evaluate no matter if there are results available')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
opt = parser.parse_args()
opt.caption_model = 'newfc'
opt.infos_path = '/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/infos_fc_nsc-best.pkl'
with open(opt.infos_path, 'rb') as f:
infos = utils.pickle_load(f)
replace = ['input_fc_dir', 'input_att_dir', 'input_box_dir', 'input_label_h5', 'input_json', 'batch_size', 'id']
ignore = ['start_from']
for k in vars(infos['opt']).keys():
if k in replace:
setattr(opt, k, getattr(opt, k) or getattr(infos['opt'], k, ''))
elif k not in ignore:
if not k in vars(opt):
vars(opt).update({k: vars(infos['opt'])[k]}) # copy over options from model
vocab = infos['vocab'] # ix -> word mapping
opt.vocab = vocab
model = models.setup(opt)
checkpoint = torch.load("/home/zzgyf/github_yifan/ImageCaptioning.pytorch/models/model-best.pth")
model.load_state_dict(checkpoint)
# torch.nn.utils.remove_weight_norm(model.head[0])
# for i in range(2):
# for j in [0,2,3]:
# torch.nn.utils.remove_weight_norm(model.body[i].body[j])
# torch.nn.utils.remove_weight_norm(model.tail[0])
# torch.nn.utils.remove_weight_norm(model.skip[0])
model.eval()
ort_session = onnxruntime.InferenceSession("image_captioning.onnx")
dummy_cocotest_bu_fc = Variable(torch.randn(10, 2048))
dummy_cocotest_bu_att = Variable(torch.randn(10, 0, 0))
dummy_labels = Variable(torch.randint(5200, (10, 5, 18)))
dummy_masks = Variable(torch.randint(1, (10, 5, 18)))
x = (dummy_cocotest_bu_fc, dummy_cocotest_bu_att, dummy_labels, dummy_masks)
#x = torch.randn(1, 3, 392, 392, requires_grad=False)
#torch_out = model(x)
# # Load the ONNX model
# model = onnx.load("wdsr_b.onnx")
# # Check that the IR is well formed
# onnx.checker.check_model(model)
# # Print a human readable representation of the graph
# onnx.helper.printable_graph(model.graph)
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
if __name__ == '__main__':
main()
check()
| 38.222798
| 143
| 0.682256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,015
| 0.408703
|
e9e8878237d9fdf426e86b2606cac1e238054e1a
| 8,888
|
py
|
Python
|
arapheno/phenotypedb/migrations/0001_initial.py
|
svengato/AraPheno
|
d6918e2e69c497b7096d9291d904c69310e84d06
|
[
"MIT"
] | 5
|
2018-03-24T08:54:50.000Z
|
2021-01-19T03:19:42.000Z
|
arapheno/phenotypedb/migrations/0001_initial.py
|
svengato/AraPheno
|
d6918e2e69c497b7096d9291d904c69310e84d06
|
[
"MIT"
] | 38
|
2016-08-14T12:09:15.000Z
|
2020-10-30T06:02:24.000Z
|
arapheno/phenotypedb/migrations/0001_initial.py
|
svengato/AraPheno
|
d6918e2e69c497b7096d9291d904c69310e84d06
|
[
"MIT"
] | 8
|
2016-08-15T06:07:32.000Z
|
2020-11-06T06:43:56.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-27 14:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Accession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, db_index=True, max_length=255, null=True)),
('country', models.CharField(blank=True, max_length=255, null=True)),
('sitename', models.TextField(blank=True, null=True)),
('collector', models.TextField(blank=True, null=True)),
('collection_date', models.DateTimeField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, db_index=True, null=True)),
('latitude', models.FloatField(blank=True, db_index=True, null=True)),
('cs_number', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(blank=True, max_length=100, null=True)),
('lastname', models.CharField(blank=True, db_index=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='ObservationUnit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('accession', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Accession')),
],
),
migrations.CreateModel(
name='OntologySource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('acronym', models.CharField(max_length=50)),
('name', models.CharField(max_length=255)),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='OntologyTerm',
fields=[
('id', models.CharField(max_length=50, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('definition', models.TextField(blank=True, null=True)),
('comment', models.TextField(blank=True, null=True)),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.OntologySource')),
],
),
migrations.CreateModel(
name='Phenotype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doi', models.CharField(blank=True, db_index=True, max_length=255, null=True)),
('name', models.CharField(db_index=True, max_length=255)),
('scoring', models.TextField(blank=True, null=True)),
('source', models.TextField(blank=True, null=True)),
('type', models.CharField(blank=True, max_length=255, null=True)),
('growth_conditions', models.TextField(blank=True, null=True)),
('shapiro_test_statistic', models.FloatField(blank=True, null=True)),
('shapiro_p_value', models.FloatField(blank=True, null=True)),
('number_replicates', models.IntegerField(default=0)),
('integration_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PhenotypeMetaDynamic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phenotype_meta_field', models.CharField(db_index=True, max_length=255)),
('phenotype_meta_value', models.TextField()),
('phenotype_public', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Phenotype')),
],
),
migrations.CreateModel(
name='PhenotypeValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField()),
('obs_unit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.ObservationUnit')),
('phenotype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Phenotype')),
],
),
migrations.CreateModel(
name='Publication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_order', models.TextField()),
('publication_tag', models.CharField(max_length=255)),
('pub_year', models.IntegerField(blank=True, null=True)),
('title', models.CharField(db_index=True, max_length=255)),
('journal', models.CharField(max_length=255)),
('volume', models.CharField(blank=True, max_length=255, null=True)),
('pages', models.CharField(blank=True, max_length=255, null=True)),
('doi', models.CharField(blank=True, db_index=True, max_length=255, null=True)),
('pubmed_id', models.CharField(blank=True, db_index=True, max_length=255, null=True)),
('authors', models.ManyToManyField(to='phenotypedb.Author')),
],
),
migrations.CreateModel(
name='Species',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ncbi_id', models.IntegerField(blank=True, null=True)),
('genus', models.CharField(max_length=255)),
('species', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Study',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('publications', models.ManyToManyField(blank=True, to='phenotypedb.Publication')),
('species', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Species')),
],
),
migrations.AddField(
model_name='phenotype',
name='dynamic_metainformations',
field=models.ManyToManyField(to='phenotypedb.PhenotypeMetaDynamic'),
),
migrations.AddField(
model_name='phenotype',
name='eo_term',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='eo_term', to='phenotypedb.OntologyTerm'),
),
migrations.AddField(
model_name='phenotype',
name='species',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Species'),
),
migrations.AddField(
model_name='phenotype',
name='study',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Study'),
),
migrations.AddField(
model_name='phenotype',
name='to_term',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='to_term', to='phenotypedb.OntologyTerm'),
),
migrations.AddField(
model_name='phenotype',
name='uo_term',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='uo_term', to='phenotypedb.OntologyTerm'),
),
migrations.AddField(
model_name='observationunit',
name='study',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Study'),
),
migrations.AddField(
model_name='accession',
name='species',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phenotypedb.Species'),
),
]
| 50.5
| 159
| 0.588884
| 8,698
| 0.978623
| 0
| 0
| 0
| 0
| 0
| 0
| 1,458
| 0.164041
|
e9e93ad17a56b7c2432a305bc659635d4fd17d0c
| 1,870
|
py
|
Python
|
prior_library_release.py
|
DReichLab/adna-workflow
|
07c6da8e64234decb7373fe7109e09395a45cb58
|
[
"BSD-3-Clause"
] | 9
|
2019-05-28T11:16:14.000Z
|
2022-02-24T01:22:47.000Z
|
prior_library_release.py
|
DReichLab/adna-workflow
|
07c6da8e64234decb7373fe7109e09395a45cb58
|
[
"BSD-3-Clause"
] | 3
|
2020-01-09T20:12:02.000Z
|
2020-11-17T14:50:28.000Z
|
prior_library_release.py
|
DReichLab/adna-workflow
|
07c6da8e64234decb7373fe7109e09395a45cb58
|
[
"BSD-3-Clause"
] | 1
|
2019-08-04T12:46:01.000Z
|
2019-08-04T12:46:01.000Z
|
from release_libraries import LibraryParameters
from bam_finder import getBamPath, library_default_dir, MT_default_dir, ShopVersion
import argparse
import re
from has_read_groups import read_group_checks
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Augment the bam list for a release with a prior existing version of the library")
parser.add_argument("bam_list", help="Each line contains the parameters to build a library bam for release. This includes the library ID, the individual ID, experiment, read group description (sequencing run name with experiment type and udg treatment), experiment, and (bam, sequencing run date) pairs ")
args = parser.parse_args()
with open(args.bam_list) as f:
library_parameters = [LibraryParameters(line) for line in f]
for x in library_parameters:
experiment = x.experiment
if '1240k' in experiment:
experiment = '1240k'
search_directory = MT_default_dir if x.reference == 'rsrs' else library_default_dir
existingBAM = getBamPath(x.library_id, experiment=experiment, reference=x.reference, version_policy='latest', shop_parent_directory=search_directory)
bam = str(existingBAM)
#print(bam)
if len(bam) > 0:
try: # this will match a new pipeline bam
match = re.search('v([0-9]+).bam', bam)
new_version = int(match.group(1)) + 1
has_read_groups, has_real_library_name, date_string = read_group_checks(bam)
except: # if the existing version is Shop's
new_version = 1
shop = ShopVersion(bam)
date_string = shop.date_string
#print('{}\t{}\t{:d}'.format(x.library_id, bam, new_version))
x.version = new_version
x.bam_filenames.append(str(existingBAM))
x.bam_date_strings.append(date_string) # the bam date string is used for generating read groups, which the existing bam does not need
#print('{}\t{}'.format(x.library_id, bam))
print(x)
| 49.210526
| 306
| 0.758289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 690
| 0.368984
|
e9e95132c690c91397faab36e332edee82e1ac48
| 3,818
|
py
|
Python
|
scratch/msf/fast_sample_data.py
|
sasgc6/pysmurf
|
a370b515ab717c982781223da147bea3c8fb3a9c
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-10-17T02:37:59.000Z
|
2022-03-09T16:42:34.000Z
|
scratch/msf/fast_sample_data.py
|
sasgc6/pysmurf
|
a370b515ab717c982781223da147bea3c8fb3a9c
|
[
"BSD-3-Clause-LBNL"
] | 446
|
2019-04-10T04:46:20.000Z
|
2022-03-15T20:27:57.000Z
|
scratch/msf/fast_sample_data.py
|
sasgc6/pysmurf
|
a370b515ab717c982781223da147bea3c8fb3a9c
|
[
"BSD-3-Clause-LBNL"
] | 13
|
2019-02-05T18:02:05.000Z
|
2021-03-02T18:41:49.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
plt.ion()
bands = [2,3]
single_channel_readout = 2
nsamp = 2**25
new_chans = False
def etaPhaseModDegree(etaPhase):
return (etaPhase+180)%360-180
#For resonator I/Q high sampled data use eta_mag + eta_phase found in eta scans for Q and +/- 90 deg for I, for off resonance data to look at HEMT, etc set eta_mag = 1 and eta_phase = 0 & 90 or the eta_phase from the closest resonator for "Q" and that +/- 90 for "I"
#In single_channel_readout mode 2 you take data at 2.4MHz and don't need to worry about decimation & filter_alpha, for single_channel_reaout = 1 600 kHz data you do, see confluence page https://confluence.slac.stanford.edu/display/SMuRF/SMuRF+firmware#SMuRFfirmware-Datamodes
if new_chans == True:
chans = {}
freqs = {}
sbs = {}
eta_mags_scaled = {}
eta_phases = {}
for band in bands:
chans[band] = S.which_on(band)
freqs[band] = []
sbs[band] = []
eta_mags_scaled[band] = []
eta_phases[band] = []
for chan in chans[band]:
freqs[band].append(S.channel_to_freq(band,chan))
sbs[band].append(S.freq_to_subband(band,S.channel_to_freq(band,chan))[0])
eta_mags_scaled[band].append(S.get_eta_mag_scaled_channel(band,chan))
eta_phases[band].append(S.get_eta_phase_degree_channel(band,chan))
S.channel_off(band,chan)
freqs[band] = np.asarray(freqs[band])
sbs[band] = np.asarray(sbs[band])
eta_mags_scaled[band] = np.asarray(eta_mags_scaled[band])
eta_phases[band] = np.asarray(eta_phases[band])
for band in bands:
for i,chan in enumerate(chans[band]):
plt.figure()
S.set_fixed_tone(freqs[band][i],12)
S.set_feedback_enable(band,0)
#S.run_serial_gradient_descent(band)
#S.run_serial_eta_scan(band)
S.flux_ramp_off()
#qEtaPhaseDegree = eta_phases[band][i]
qEtaPhaseDegree = 0
#EtaMag = eta_mags_scaled[band][i]
EtaMag = 1
channel = S.which_on(band)[0]
S.set_eta_mag_scaled_channel(band,channel,EtaMag)
alpha = 1.0
for IorQ in ['Q0','Q+','I+','I-']:
if IorQ is 'Q0':
S.set_eta_phase_degree_channel(band,channel,qEtaPhaseDegree)
if IorQ is 'Q+':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree+180))
if IorQ is 'I+':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree+90))
if IorQ is 'I-':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree-90))
ctime1=int(S.get_timestamp())
filename='%d.dat'%ctime1
# take ~56 sec of data (18750 Hz)^-1 * (2^20) ~ 55.9sec. Have to set kludge_sec=60.
f, df, sync = S.take_debug_data(band, channel=channel, IQstream=False, single_channel_readout=single_channel_readout, nsamp=nsamp,filename=str(ctime1));
f,Pxx = signal.welch(df,nperseg = 2**16,fs=2.4e6)
Pxx = np.sqrt(Pxx)
plt.loglog(f,Pxx,alpha=alpha,label = IorQ+': '+str(ctime1))
alpha = alpha*0.8
#dfs.append(df)
#data=fmt.format([str(ctime1),'%0.6f'%(S.channel_to_freq(band,channel)),filename,IorQ])
#of.write(data)
#of.flush()
plt.xlabel('Frequency [Hz]',fontsize = 16)
plt.ylabel('I/Q Noise',fontsize = 16)
plt.title('Resonator at '+str(np.round(freqs[band][i],1))+ 'MHz')
plt.legend()
plt.show()
plt.savefig(S.plot_dir+'/'+str(ctime1)+'_band_'+str(band)+'_chan_'+str(chan)+'.png')
plt.close()
S.channel_off(band,channel)
S.flux_ramp_on()
| 41.956044
| 275
| 0.628078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,005
| 0.263227
|
e9e9975a7e35ce3210ca6631964e51dc707d8e9b
| 2,667
|
py
|
Python
|
kwiklib/utils/settings.py
|
fiath/test
|
b50898dafa90e93da48f573e0b3feb1bb6acd8de
|
[
"MIT",
"BSD-3-Clause"
] | 7
|
2015-01-20T13:55:51.000Z
|
2018-02-06T09:31:21.000Z
|
kwiklib/utils/settings.py
|
fiath/test
|
b50898dafa90e93da48f573e0b3feb1bb6acd8de
|
[
"MIT",
"BSD-3-Clause"
] | 6
|
2015-01-08T18:13:53.000Z
|
2016-06-22T09:53:53.000Z
|
kwiklib/utils/settings.py
|
fiath/test
|
b50898dafa90e93da48f573e0b3feb1bb6acd8de
|
[
"MIT",
"BSD-3-Clause"
] | 8
|
2015-01-22T22:57:19.000Z
|
2020-03-19T11:43:56.000Z
|
"""Internal persistent settings store with cPickle."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import cPickle
import os
from kwiklib.utils.globalpaths import ensure_folder_exists
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def load(filepath):
"""Load the settings from the file, and creates it if it does not exist."""
if not os.path.exists(filepath):
save(filepath)
with open(filepath, 'rb') as f:
settings = cPickle.load(f)
return settings
def save(filepath, settings={}):
"""Save the settings in the file."""
with open(filepath, 'wb') as f:
cPickle.dump(settings, f)
return settings
# -----------------------------------------------------------------------------
# Settings
# -----------------------------------------------------------------------------
class Settings(object):
"""Manage internal settings.
They are stored in a binary file in the user home folder.
Settings are only loaded once from disk as soon as an user preference field
is explicitely requested.
"""
def __init__(self, appname=None, folder=None, filepath=None,
autosave=True):
"""The settings file is not loaded here, but only once when a field is
first accessed."""
self.appname = appname
self.folder = folder
self.filepath = filepath
self.settings = {}
self.settings = None
self.autosave = autosave
# I/O methods
# -----------
def _load_once(self):
"""Load or create the settings file, unless it has already been
loaded."""
if self.settings is None:
# Create the folder if it does not exist.
ensure_folder_exists(self.folder)
# Load or create the settings file.
self.settings = load(self.filepath)
def save(self):
save(self.filepath, self.settings)
# Getter and setter methods
# -------------------------
def set(self, key, value):
self._load_once()
self.settings[key] = value
if self.autosave:
self.save()
def get(self, key, default=None):
self._load_once()
return self.settings.get(key, default)
def __setitem__(self, key, value):
self.set(key, value)
def __getitem__(self, key):
return self.get(key)
| 31.011628
| 79
| 0.490064
| 1,563
| 0.586052
| 0
| 0
| 0
| 0
| 0
| 0
| 1,243
| 0.466067
|
e9e9ed2bd4fb85cec280f41104f00f0f5fe284be
| 24,098
|
py
|
Python
|
cpu.py
|
philippechataignon/applepy
|
1b9d1709a4490f49fa06739bb44c0602bb07b730
|
[
"MIT"
] | null | null | null |
cpu.py
|
philippechataignon/applepy
|
1b9d1709a4490f49fa06739bb44c0602bb07b730
|
[
"MIT"
] | null | null | null |
cpu.py
|
philippechataignon/applepy
|
1b9d1709a4490f49fa06739bb44c0602bb07b730
|
[
"MIT"
] | null | null | null |
import sys
import pygame
from utils import signed
class CPU:
STACK_PAGE = 0x100
RESET_VECTOR = 0xFFFC
def __init__(self, options, memory):
self.options = options
self.memory = memory
self.accumulator = 0x00
self.x_index = 0x00
self.y_index = 0x00
self.carry_flag = 0
self.zero_flag = 0
self.interrupt_disable_flag = 0
self.decimal_mode_flag = 0
self.break_flag = 1
self.overflow_flag = 0
self.sign_flag = 0
self.stack_pointer = 0xFF
self.cycles = 0
self.setup_ops()
self.reset()
def setup_ops(self):
self.ops = [None] * 0x100
self.ops[0x00] = lambda: self.BRK()
self.ops[0x01] = lambda: self.ORA(self.indirect_x_mode())
self.ops[0x05] = lambda: self.ORA(self.zero_page_mode())
self.ops[0x06] = lambda: self.ASL(self.zero_page_mode())
self.ops[0x08] = lambda: self.PHP()
self.ops[0x09] = lambda: self.ORA(self.immediate_mode())
self.ops[0x0A] = lambda: self.ASL()
self.ops[0x0D] = lambda: self.ORA(self.absolute_mode())
self.ops[0x0E] = lambda: self.ASL(self.absolute_mode())
self.ops[0x10] = lambda: self.BPL(self.relative_mode())
self.ops[0x11] = lambda: self.ORA(self.indirect_y_mode())
self.ops[0x15] = lambda: self.ORA(self.zero_page_x_mode())
self.ops[0x16] = lambda: self.ASL(self.zero_page_x_mode())
self.ops[0x18] = lambda: self.CLC()
self.ops[0x19] = lambda: self.ORA(self.absolute_y_mode())
self.ops[0x1D] = lambda: self.ORA(self.absolute_x_mode())
self.ops[0x1E] = lambda: self.ASL(self.absolute_x_mode(rmw=True))
self.ops[0x20] = lambda: self.JSR(self.absolute_mode())
self.ops[0x21] = lambda: self.AND(self.indirect_x_mode())
self.ops[0x24] = lambda: self.BIT(self.zero_page_mode())
self.ops[0x25] = lambda: self.AND(self.zero_page_mode())
self.ops[0x26] = lambda: self.ROL(self.zero_page_mode())
self.ops[0x28] = lambda: self.PLP()
self.ops[0x29] = lambda: self.AND(self.immediate_mode())
self.ops[0x2A] = lambda: self.ROL()
self.ops[0x2C] = lambda: self.BIT(self.absolute_mode())
self.ops[0x2D] = lambda: self.AND(self.absolute_mode())
self.ops[0x2E] = lambda: self.ROL(self.absolute_mode())
self.ops[0x30] = lambda: self.BMI(self.relative_mode())
self.ops[0x31] = lambda: self.AND(self.indirect_y_mode())
self.ops[0x35] = lambda: self.AND(self.zero_page_x_mode())
self.ops[0x36] = lambda: self.ROL(self.zero_page_x_mode())
self.ops[0x38] = lambda: self.SEC()
self.ops[0x39] = lambda: self.AND(self.absolute_y_mode())
self.ops[0x3D] = lambda: self.AND(self.absolute_x_mode())
self.ops[0x3E] = lambda: self.ROL(self.absolute_x_mode(rmw=True))
self.ops[0x40] = lambda: self.RTI()
self.ops[0x41] = lambda: self.EOR(self.indirect_x_mode())
self.ops[0x45] = lambda: self.EOR(self.zero_page_mode())
self.ops[0x46] = lambda: self.LSR(self.zero_page_mode())
self.ops[0x48] = lambda: self.PHA()
self.ops[0x49] = lambda: self.EOR(self.immediate_mode())
self.ops[0x4A] = lambda: self.LSR()
self.ops[0x4C] = lambda: self.JMP(self.absolute_mode())
self.ops[0x4D] = lambda: self.EOR(self.absolute_mode())
self.ops[0x4E] = lambda: self.LSR(self.absolute_mode())
self.ops[0x50] = lambda: self.BVC(self.relative_mode())
self.ops[0x51] = lambda: self.EOR(self.indirect_y_mode())
self.ops[0x55] = lambda: self.EOR(self.zero_page_x_mode())
self.ops[0x56] = lambda: self.LSR(self.zero_page_x_mode())
self.ops[0x58] = lambda: self.CLI()
self.ops[0x59] = lambda: self.EOR(self.absolute_y_mode())
self.ops[0x5D] = lambda: self.EOR(self.absolute_x_mode())
self.ops[0x5E] = lambda: self.LSR(self.absolute_x_mode(rmw=True))
self.ops[0x60] = lambda: self.RTS()
self.ops[0x61] = lambda: self.ADC(self.indirect_x_mode())
self.ops[0x65] = lambda: self.ADC(self.zero_page_mode())
self.ops[0x66] = lambda: self.ROR(self.zero_page_mode())
self.ops[0x68] = lambda: self.PLA()
self.ops[0x69] = lambda: self.ADC(self.immediate_mode())
self.ops[0x6A] = lambda: self.ROR()
self.ops[0x6C] = lambda: self.JMP(self.indirect_mode())
self.ops[0x6D] = lambda: self.ADC(self.absolute_mode())
self.ops[0x6E] = lambda: self.ROR(self.absolute_mode())
self.ops[0x70] = lambda: self.BVS(self.relative_mode())
self.ops[0x71] = lambda: self.ADC(self.indirect_y_mode())
self.ops[0x75] = lambda: self.ADC(self.zero_page_x_mode())
self.ops[0x76] = lambda: self.ROR(self.zero_page_x_mode())
self.ops[0x78] = lambda: self.SEI()
self.ops[0x79] = lambda: self.ADC(self.absolute_y_mode())
self.ops[0x7D] = lambda: self.ADC(self.absolute_x_mode())
self.ops[0x7E] = lambda: self.ROR(self.absolute_x_mode(rmw=True))
self.ops[0x81] = lambda: self.STA(self.indirect_x_mode())
self.ops[0x84] = lambda: self.STY(self.zero_page_mode())
self.ops[0x85] = lambda: self.STA(self.zero_page_mode())
self.ops[0x86] = lambda: self.STX(self.zero_page_mode())
self.ops[0x88] = lambda: self.DEY()
self.ops[0x8A] = lambda: self.TXA()
self.ops[0x8C] = lambda: self.STY(self.absolute_mode())
self.ops[0x8D] = lambda: self.STA(self.absolute_mode())
self.ops[0x8E] = lambda: self.STX(self.absolute_mode())
self.ops[0x90] = lambda: self.BCC(self.relative_mode())
self.ops[0x91] = lambda: self.STA(self.indirect_y_mode(rmw=True))
self.ops[0x94] = lambda: self.STY(self.zero_page_x_mode())
self.ops[0x95] = lambda: self.STA(self.zero_page_x_mode())
self.ops[0x96] = lambda: self.STX(self.zero_page_y_mode())
self.ops[0x98] = lambda: self.TYA()
self.ops[0x99] = lambda: self.STA(self.absolute_y_mode(rmw=True))
self.ops[0x9A] = lambda: self.TXS()
self.ops[0x9D] = lambda: self.STA(self.absolute_x_mode(rmw=True))
self.ops[0xA0] = lambda: self.LDY(self.immediate_mode())
self.ops[0xA1] = lambda: self.LDA(self.indirect_x_mode())
self.ops[0xA2] = lambda: self.LDX(self.immediate_mode())
self.ops[0xA4] = lambda: self.LDY(self.zero_page_mode())
self.ops[0xA5] = lambda: self.LDA(self.zero_page_mode())
self.ops[0xA6] = lambda: self.LDX(self.zero_page_mode())
self.ops[0xA8] = lambda: self.TAY()
self.ops[0xA9] = lambda: self.LDA(self.immediate_mode())
self.ops[0xAA] = lambda: self.TAX()
self.ops[0xAC] = lambda: self.LDY(self.absolute_mode())
self.ops[0xAD] = lambda: self.LDA(self.absolute_mode())
self.ops[0xAE] = lambda: self.LDX(self.absolute_mode())
self.ops[0xB0] = lambda: self.BCS(self.relative_mode())
self.ops[0xB1] = lambda: self.LDA(self.indirect_y_mode())
self.ops[0xB4] = lambda: self.LDY(self.zero_page_x_mode())
self.ops[0xB5] = lambda: self.LDA(self.zero_page_x_mode())
self.ops[0xB6] = lambda: self.LDX(self.zero_page_y_mode())
self.ops[0xB8] = lambda: self.CLV()
self.ops[0xB9] = lambda: self.LDA(self.absolute_y_mode())
self.ops[0xBA] = lambda: self.TSX()
self.ops[0xBC] = lambda: self.LDY(self.absolute_x_mode())
self.ops[0xBD] = lambda: self.LDA(self.absolute_x_mode())
self.ops[0xBE] = lambda: self.LDX(self.absolute_y_mode())
self.ops[0xC0] = lambda: self.CPY(self.immediate_mode())
self.ops[0xC1] = lambda: self.CMP(self.indirect_x_mode())
self.ops[0xC4] = lambda: self.CPY(self.zero_page_mode())
self.ops[0xC5] = lambda: self.CMP(self.zero_page_mode())
self.ops[0xC6] = lambda: self.DEC(self.zero_page_mode())
self.ops[0xC8] = lambda: self.INY()
self.ops[0xC9] = lambda: self.CMP(self.immediate_mode())
self.ops[0xCA] = lambda: self.DEX()
self.ops[0xCC] = lambda: self.CPY(self.absolute_mode())
self.ops[0xCD] = lambda: self.CMP(self.absolute_mode())
self.ops[0xCE] = lambda: self.DEC(self.absolute_mode())
self.ops[0xD0] = lambda: self.BNE(self.relative_mode())
self.ops[0xD1] = lambda: self.CMP(self.indirect_y_mode())
self.ops[0xD5] = lambda: self.CMP(self.zero_page_x_mode())
self.ops[0xD6] = lambda: self.DEC(self.zero_page_x_mode())
self.ops[0xD8] = lambda: self.CLD()
self.ops[0xD9] = lambda: self.CMP(self.absolute_y_mode())
self.ops[0xDD] = lambda: self.CMP(self.absolute_x_mode())
self.ops[0xDE] = lambda: self.DEC(self.absolute_x_mode(rmw=True))
self.ops[0xE0] = lambda: self.CPX(self.immediate_mode())
self.ops[0xE1] = lambda: self.SBC(self.indirect_x_mode())
self.ops[0xE4] = lambda: self.CPX(self.zero_page_mode())
self.ops[0xE5] = lambda: self.SBC(self.zero_page_mode())
self.ops[0xE6] = lambda: self.INC(self.zero_page_mode())
self.ops[0xE8] = lambda: self.INX()
self.ops[0xE9] = lambda: self.SBC(self.immediate_mode())
self.ops[0xEA] = lambda: self.NOP()
self.ops[0xEC] = lambda: self.CPX(self.absolute_mode())
self.ops[0xED] = lambda: self.SBC(self.absolute_mode())
self.ops[0xEE] = lambda: self.INC(self.absolute_mode())
self.ops[0xF0] = lambda: self.BEQ(self.relative_mode())
self.ops[0xF1] = lambda: self.SBC(self.indirect_y_mode())
self.ops[0xF5] = lambda: self.SBC(self.zero_page_x_mode())
self.ops[0xF6] = lambda: self.INC(self.zero_page_x_mode())
self.ops[0xF8] = lambda: self.SED()
self.ops[0xF9] = lambda: self.SBC(self.absolute_y_mode())
self.ops[0xFD] = lambda: self.SBC(self.absolute_x_mode())
self.ops[0xFE] = lambda: self.INC(self.absolute_x_mode(rmw=True))
def reset(self):
self.program_counter = self.read_word(self.RESET_VECTOR)
def run(self):
update_cycle = 0
quit = False
while not quit:
pc = self.program_counter
if self.options.log:
if not ((0xfb78 <= pc < 0xfb97) or (0xfca8 <= pc < 0xfcb4) or (0xFD1B <= pc < 0xFD2F)):
print(f"PC={hex(self.program_counter)} A={hex(self.accumulator)} \
X={hex(self.x_index)} Y={hex(self.y_index)} P={hex(self.stack_pointer)} S={hex(self.status_as_byte())} ",
file=self.memory.logfile
)
self.cycles += 2 # all instructions take this as a minimum
op = self.read_pc_byte()
func = self.ops[op]
if func is None:
print("UNKNOWN OP",hex(self.program_counter - 1), hex(op), file=sys.stderr)
self.BRK()
else:
func()
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit = True
if event.type == pygame.KEYDOWN:
key = ord(event.unicode) if event.unicode else 0
if event.key == pygame.K_LEFT:
key = 0x08
if event.key == pygame.K_RIGHT:
key = 0x15
if key:
if key == 0x7F:
key = 0x08
self.memory.softswitches.kbd = 0x80 + key
update_cycle += 1
if update_cycle >= 1024:
self.memory.display.flash()
pygame.display.flip()
update_cycle = 0
####
def get_pc(self, inc=1):
pc = self.program_counter
self.program_counter += inc
return pc
def read_byte(self, address):
return self.memory.read_byte(self.cycles, address)
def read_word(self, address):
return self.memory.read_word(self.cycles, address)
def read_word_bug(self, address):
return self.memory.read_word_bug(self.cycles, address)
def read_pc_byte(self):
return self.read_byte(self.get_pc())
def read_pc_word(self):
return self.read_word(self.get_pc(2))
####
def status_from_byte(self, status):
self.carry_flag = [0, 1][0 != status & 1]
self.zero_flag = [0, 1][0 != status & 2]
self.interrupt_disable_flag = [0, 1][0 != status & 4]
self.decimal_mode_flag = [0, 1][0 != status & 8]
self.break_flag = [0, 1][0 != status & 16]
self.overflow_flag = [0, 1][0 != status & 64]
self.sign_flag = [0, 1][0 != status & 128]
def status_as_byte(self):
return self.carry_flag | self.zero_flag << 1 | self.interrupt_disable_flag << 2 | self.decimal_mode_flag << 3 | self.break_flag << 4 | 1 << 5 | self.overflow_flag << 6 | self.sign_flag << 7
####
def push_byte(self, byte):
self.memory.write_byte(self.STACK_PAGE + self.stack_pointer, byte)
self.stack_pointer = (self.stack_pointer - 1) % 0x100
def pull_byte(self):
self.stack_pointer = (self.stack_pointer + 1) % 0x100
return self.read_byte(self.STACK_PAGE + self.stack_pointer)
def push_word(self, word):
hi, lo = divmod(word, 0x100)
self.push_byte(hi)
self.push_byte(lo)
def pull_word(self):
s = self.STACK_PAGE + self.stack_pointer + 1
self.stack_pointer += 2
return self.read_word(s)
####
def immediate_mode(self):
return self.get_pc()
def absolute_mode(self):
self.cycles += 2
return self.read_pc_word()
def absolute_x_mode(self, rmw=False):
if rmw:
self.cycles += 1
return self.absolute_mode() + self.x_index
def absolute_y_mode(self, rmw=False):
if rmw:
self.cycles += 1
return self.absolute_mode() + self.y_index
def zero_page_mode(self):
self.cycles += 1
return self.read_pc_byte()
def zero_page_x_mode(self):
self.cycles += 1
return (self.zero_page_mode() + self.x_index) % 0x100
def zero_page_y_mode(self):
self.cycles += 1
return (self.zero_page_mode() + self.y_index) % 0x100
def indirect_mode(self):
self.cycles += 2
return self.read_word_bug(self.absolute_mode())
def indirect_x_mode(self):
self.cycles += 4
return self.read_word_bug((self.read_pc_byte() + self.x_index) % 0x100)
def indirect_y_mode(self, rmw=False):
if rmw:
self.cycles += 4
else:
self.cycles += 3
return self.read_word_bug(self.read_pc_byte()) + self.y_index
def relative_mode(self):
pc = self.get_pc()
return pc + 1 + signed(self.read_byte(pc))
####
def update_nz(self, value):
value = value % 0x100
self.zero_flag = [0, 1][(value == 0)]
self.sign_flag = [0, 1][((value & 0x80) != 0)]
return value
def update_nzc(self, value):
self.carry_flag = [0, 1][(value > 0xFF)]
return self.update_nz(value)
####
# LOAD / STORE
def LDA(self, operand_address):
self.accumulator = self.update_nz(self.read_byte(operand_address))
def LDX(self, operand_address):
self.x_index = self.update_nz(self.read_byte(operand_address))
def LDY(self, operand_address):
self.y_index = self.update_nz(self.read_byte(operand_address))
def STA(self, operand_address):
self.memory.write_byte(operand_address, self.accumulator)
def STX(self, operand_address):
self.memory.write_byte(operand_address, self.x_index)
def STY(self, operand_address):
self.memory.write_byte(operand_address, self.y_index)
# TRANSFER
def TAX(self):
self.x_index = self.update_nz(self.accumulator)
def TXA(self):
self.accumulator = self.update_nz(self.x_index)
def TAY(self):
self.y_index = self.update_nz(self.accumulator)
def TYA(self):
self.accumulator = self.update_nz(self.y_index)
def TSX(self):
self.x_index = self.update_nz(self.stack_pointer)
def TXS(self):
self.stack_pointer = self.x_index
# SHIFTS / ROTATES
def ASL(self, operand_address=None):
if operand_address is None:
self.accumulator = self.update_nzc(self.accumulator << 1)
else:
self.cycles += 2
self.memory.write_byte(operand_address, self.update_nzc(self.read_byte(operand_address) << 1))
def ROL(self, operand_address=None):
if operand_address is None:
a = self.accumulator << 1
if self.carry_flag:
a = a | 0x01
self.accumulator = self.update_nzc(a)
else:
self.cycles += 2
m = self.read_byte(operand_address) << 1
if self.carry_flag:
m = m | 0x01
self.memory.write_byte(operand_address, self.update_nzc(m))
def ROR(self, operand_address=None):
if operand_address is None:
if self.carry_flag:
self.accumulator = self.accumulator | 0x100
self.carry_flag = self.accumulator % 2
self.accumulator = self.update_nz(self.accumulator >> 1)
else:
self.cycles += 2
m = self.read_byte(operand_address)
if self.carry_flag:
m = m | 0x100
self.carry_flag = m % 2
self.memory.write_byte(operand_address, self.update_nz(m >> 1))
def LSR(self, operand_address=None):
if operand_address is None:
self.carry_flag = self.accumulator % 2
self.accumulator = self.update_nz(self.accumulator >> 1)
else:
self.cycles += 2
self.carry_flag = self.read_byte(operand_address) % 2
self.memory.write_byte(operand_address, self.update_nz(self.read_byte(operand_address) >> 1))
# JUMPS / RETURNS
def JMP(self, operand_address):
self.cycles -= 1
self.program_counter = operand_address
def JSR(self, operand_address):
self.cycles += 2
self.push_word(self.program_counter - 1)
self.program_counter = operand_address
def RTS(self):
self.cycles += 4
self.program_counter = self.pull_word() + 1
# BRANCHES
def BCC(self, operand_address):
if not self.carry_flag:
self.cycles += 1
self.program_counter = operand_address
def BCS(self, operand_address):
if self.carry_flag:
self.cycles += 1
self.program_counter = operand_address
def BEQ(self, operand_address):
if self.zero_flag:
self.cycles += 1
self.program_counter = operand_address
def BNE(self, operand_address):
if not self.zero_flag:
self.cycles += 1
self.program_counter = operand_address
def BMI(self, operand_address):
if self.sign_flag:
self.cycles += 1
self.program_counter = operand_address
def BPL(self, operand_address):
if not self.sign_flag:
self.cycles += 1
self.program_counter = operand_address
def BVC(self, operand_address):
if not self.overflow_flag:
self.cycles += 1
self.program_counter = operand_address
def BVS(self, operand_address):
if self.overflow_flag:
self.cycles += 1
self.program_counter = operand_address
# SET / CLEAR FLAGS
def CLC(self):
self.carry_flag = 0
def CLD(self):
self.decimal_mode_flag = 0
def CLI(self):
self.interrupt_disable_flag = 0
def CLV(self):
self.overflow_flag = 0
def SEC(self):
self.carry_flag = 1
def SED(self):
self.decimal_mode_flag = 1
def SEI(self):
self.interrupt_disable_flag = 1
# INCREMENT / DECREMENT
def DEC(self, operand_address):
self.cycles += 2
self.memory.write_byte(operand_address, self.update_nz(self.read_byte(operand_address) - 1))
def DEX(self):
self.x_index = self.update_nz(self.x_index - 1)
def DEY(self):
self.y_index = self.update_nz(self.y_index - 1)
def INC(self, operand_address):
self.cycles += 2
self.memory.write_byte(operand_address, self.update_nz(self.read_byte(operand_address) + 1))
def INX(self):
self.x_index = self.update_nz(self.x_index + 1)
def INY(self):
self.y_index = self.update_nz(self.y_index + 1)
# PUSH / PULL
def PHA(self):
self.cycles += 1
self.push_byte(self.accumulator)
def PHP(self):
self.cycles += 1
self.push_byte(self.status_as_byte())
def PLA(self):
self.cycles += 2
self.accumulator = self.update_nz(self.pull_byte())
def PLP(self):
self.cycles += 2
self.status_from_byte(self.pull_byte())
# LOGIC
def AND(self, operand_address):
self.accumulator = self.update_nz(self.accumulator & self.read_byte(operand_address))
def ORA(self, operand_address):
self.accumulator = self.update_nz(self.accumulator | self.read_byte(operand_address))
def EOR(self, operand_address):
self.accumulator = self.update_nz(self.accumulator ^ self.read_byte(operand_address))
# ARITHMETIC
def ADC(self, operand_address):
# @@@ doesn't handle BCD yet
assert not self.decimal_mode_flag
a2 = self.accumulator
a1 = signed(a2)
m2 = self.read_byte(operand_address)
m1 = signed(m2)
# twos complement addition
result1 = a1 + m1 + self.carry_flag
# unsigned addition
result2 = a2 + m2 + self.carry_flag
self.accumulator = self.update_nzc(result2)
# perhaps this could be calculated from result2 but result1 is more intuitive
self.overflow_flag = [0, 1][(result1 > 127) | (result1 < -128)]
def SBC(self, operand_address):
# @@@ doesn't handle BCD yet
assert not self.decimal_mode_flag
a2 = self.accumulator
a1 = signed(a2)
m2 = self.read_byte(operand_address)
m1 = signed(m2)
# twos complement subtraction
result1 = a1 - m1 - [1, 0][self.carry_flag]
# unsigned subtraction
result2 = a2 - m2 - [1, 0][self.carry_flag]
self.accumulator = self.update_nz(result2)
self.carry_flag = [0, 1][(result2 >= 0)]
# perhaps this could be calculated from result2 but result1 is more intuitive
self.overflow_flag = [0, 1][(result1 > 127) | (result1 < -128)]
# BIT
def BIT(self, operand_address):
value = self.read_byte(operand_address)
self.sign_flag = ((value >> 7) % 2) # bit 7
self.overflow_flag = ((value >> 6) % 2) # bit 6
self.zero_flag = [0, 1][((self.accumulator & value) == 0)]
# COMPARISON
def CMP(self, operand_address):
result = self.accumulator - self.read_byte(operand_address)
self.carry_flag = [0, 1][(result >= 0)]
self.update_nz(result)
def CPX(self, operand_address):
result = self.x_index - self.read_byte(operand_address)
self.carry_flag = [0, 1][(result >= 0)]
self.update_nz(result)
def CPY(self, operand_address):
result = self.y_index - self.read_byte(operand_address)
self.carry_flag = [0, 1][(result >= 0)]
self.update_nz(result)
# SYSTEM
def NOP(self):
pass
def BRK(self):
self.cycles += 5
self.break_flag = 1
self.push_word(self.program_counter + 1)
self.push_byte(self.status_as_byte())
self.program_counter = self.read_word(0xFFFE)
def RTI(self):
self.cycles += 4
self.status_from_byte(self.pull_byte())
self.program_counter = self.pull_word()
# @@@ IRQ
# @@@ NMI
| 36.960123
| 197
| 0.606523
| 24,046
| 0.997842
| 0
| 0
| 0
| 0
| 0
| 0
| 776
| 0.032202
|
e9ead4efec2b488b003bd50670c0f814058b8f19
| 29
|
py
|
Python
|
router/tasks/__init__.py
|
smallwat3r/shopify-webhook-processor
|
4f16017cb9695ca00eb6d95e4381a8442b3dc0e3
|
[
"MIT"
] | 1
|
2021-08-30T14:01:03.000Z
|
2021-08-30T14:01:03.000Z
|
router/tasks/__init__.py
|
smallwat3r/shopify-webhook-processor
|
4f16017cb9695ca00eb6d95e4381a8442b3dc0e3
|
[
"MIT"
] | null | null | null |
router/tasks/__init__.py
|
smallwat3r/shopify-webhook-processor
|
4f16017cb9695ca00eb6d95e4381a8442b3dc0e3
|
[
"MIT"
] | 2
|
2021-08-30T14:01:04.000Z
|
2021-09-07T01:07:41.000Z
|
from .tasks import Processor
| 14.5
| 28
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e9ebfd8edc0153bf61129fe91fefdc9f0a9e4300
| 1,392
|
py
|
Python
|
dogs/dogs.py
|
RafaelBadaro-zz/dogtour-backend
|
30a83eac46dddaf29c3c643e2dc4dd71948484f0
|
[
"Unlicense"
] | null | null | null |
dogs/dogs.py
|
RafaelBadaro-zz/dogtour-backend
|
30a83eac46dddaf29c3c643e2dc4dd71948484f0
|
[
"Unlicense"
] | 2
|
2019-11-10T18:08:39.000Z
|
2020-07-11T21:22:42.000Z
|
dogs/dogs.py
|
RafaelBadaro-zz/dogtour-backend
|
30a83eac46dddaf29c3c643e2dc4dd71948484f0
|
[
"Unlicense"
] | 1
|
2022-02-12T12:14:40.000Z
|
2022-02-12T12:14:40.000Z
|
import uuid
from nameko.rpc import RpcProxy, rpc
from nameko_redis import Redis
class DogsService:
name = "dogs"
users_rpc = RpcProxy('users')
redis = Redis('development')
@rpc
def get(self, dog_id):
dog = self.redis.hgetall(dog_id)
return dog
@rpc
def create(self, dog_data):
key = 'dog:' + dog_data['name']
dog_id = uuid.uuid4().hex
dog = {
key: dog_id
}
user_email = self.redis.get(dog_data['user_id'])
self.redis.hmset(user_email, dog)
self.redis.hmset(dog_id, {
"name": dog_data['name'],
"sex": dog_data['sex'],
"size": dog_data['size'],
"temper": dog_data['temper']
})
response = {
"dog_id": dog_id,
"name": dog_data['name'],
"status": 200
}
return response
@rpc
def get_user_dogs(self, user_id):
response = {
"dogs": {},
"status": 200
}
user = self.users_rpc.get(user_id)
dogs = {}
keys = user.keys()
for key in keys:
if key.startswith('dog:'):
dogs[user[key]] = self.get(user[key])
if dogs:
response['dogs'] = dogs
response['status'] = 200
return response
| 19.068493
| 56
| 0.481322
| 1,306
| 0.938218
| 0
| 0
| 1,179
| 0.846983
| 0
| 0
| 159
| 0.114224
|
e9ec78a38e45c3ed801db04c7a18df698501ab39
| 1,531
|
py
|
Python
|
examples/demo_OT_2D_samples.py
|
agramfort/POT
|
8dbfd3edae649f5f3e87be4a3ce446c59729b2f7
|
[
"MIT"
] | null | null | null |
examples/demo_OT_2D_samples.py
|
agramfort/POT
|
8dbfd3edae649f5f3e87be4a3ce446c59729b2f7
|
[
"MIT"
] | null | null | null |
examples/demo_OT_2D_samples.py
|
agramfort/POT
|
8dbfd3edae649f5f3e87be4a3ce446c59729b2f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Demo for 2D Optimal transport between empirical distributions
@author: rflamary
"""
import numpy as np
import matplotlib.pylab as pl
import ot
#%% parameters and data generation
n=20 # nb samples
mu_s=np.array([0,0])
cov_s=np.array([[1,0],[0,1]])
mu_t=np.array([4,4])
cov_t=np.array([[1,-.8],[-.8,1]])
xs=ot.datasets.get_2D_samples_gauss(n,mu_s,cov_s)
xt=ot.datasets.get_2D_samples_gauss(n,mu_t,cov_t)
a,b = ot.unif(n),ot.unif(n) # uniform distribution on samples
# loss matrix
M=ot.dist(xs,xt)
M/=M.max()
#%% plot samples
pl.figure(1)
pl.plot(xs[:,0],xs[:,1],'+b',label='Source samples')
pl.plot(xt[:,0],xt[:,1],'xr',label='Target samples')
pl.legend(loc=0)
pl.title('Source and traget distributions')
pl.figure(2)
pl.imshow(M,interpolation='nearest')
pl.title('Cost matrix M')
#%% EMD
G0=ot.emd(a,b,M)
pl.figure(3)
pl.imshow(G0,interpolation='nearest')
pl.title('OT matrix G0')
pl.figure(4)
ot.plot.plot2D_samples_mat(xs,xt,G0,c=[.5,.5,1])
pl.plot(xs[:,0],xs[:,1],'+b',label='Source samples')
pl.plot(xt[:,0],xt[:,1],'xr',label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix with samples')
#%% sinkhorn
# reg term
lambd=5e-3
Gs=ot.sinkhorn(a,b,M,lambd)
pl.figure(5)
pl.imshow(Gs,interpolation='nearest')
pl.title('OT matrix sinkhorn')
pl.figure(6)
ot.plot.plot2D_samples_mat(xs,xt,Gs,color=[.5,.5,1])
pl.plot(xs[:,0],xs[:,1],'+b',label='Source samples')
pl.plot(xt[:,0],xt[:,1],'xr',label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix Sinkhorn with samples')
| 19.379747
| 61
| 0.677335
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 534
| 0.348792
|
e9ed96eda4a6de7f5ebf0c1ccffa4e86b1a28787
| 7,594
|
py
|
Python
|
src/morphometrics/utils/surface_utils.py
|
kevinyamauchi/morphometrics
|
f48cb4fa8c06b726f0b699940c32ac8df466f71c
|
[
"BSD-3-Clause"
] | 5
|
2022-03-17T18:14:18.000Z
|
2022-03-23T00:48:17.000Z
|
src/morphometrics/utils/surface_utils.py
|
kevinyamauchi/morphometrics
|
f48cb4fa8c06b726f0b699940c32ac8df466f71c
|
[
"BSD-3-Clause"
] | 11
|
2022-01-27T14:10:43.000Z
|
2022-03-20T18:22:30.000Z
|
src/morphometrics/utils/surface_utils.py
|
kevinyamauchi/morphometrics
|
f48cb4fa8c06b726f0b699940c32ac8df466f71c
|
[
"BSD-3-Clause"
] | 1
|
2022-03-17T18:17:21.000Z
|
2022-03-17T18:17:21.000Z
|
from typing import List, Tuple
import numpy as np
import pymeshfix
import trimesh.voxel.creation
from skimage.measure import marching_cubes
from trimesh import Trimesh
from trimesh.smoothing import filter_taubin
from ..types import BinaryImage, LabelImage
def _round_to_pitch(coordinate: np.ndarray, pitch: float) -> np.ndarray:
"""Round a point to the nearest point on a grid that starts at the origin
with a specified pitch.
Parameters
----------
coordinate : np.ndarray
The coordinate to round
pitch : float
The pitch of the grid. Assumed to the be same in all directions.
Returns
-------
rounded_point : np.ndarray
The point after rounding to the nearest grid point.
"""
return pitch * np.round(coordinate / pitch, decimals=0)
def repair_mesh(mesh: Trimesh) -> Trimesh:
"""Repair a mesh using pymeshfix.
Parameters
----------
mesh : Trimesh
The mesh to be repaired
"""
vertices = np.asarray(mesh.vertices)
faces = np.asarray(mesh.faces)
vertices_clean, faces_clean = pymeshfix.clean_from_arrays(vertices, faces)
# create the mesh object
repaired_mesh = Trimesh(vertices=vertices_clean, faces=faces_clean)
assert repaired_mesh.is_watertight, "Mesh was unable to be repaired"
return repaired_mesh
def binary_mask_to_surface(
object_mask: BinaryImage, n_mesh_smoothing_interations: int = 50
) -> Trimesh:
"""Convert surface of a 3D binary mask (segmented object) into a watertight mesh.
Parameters
----------
object_mask : BinaryMask
A 3D binary image corresponding to the object you want to mesh.
n_mesh_smoothing_interations : int
The number of interations of smooting to perform. Smoothing is
done by the trimesh taubin filter:
https://trimsh.org/trimesh.smoothing.html#trimesh.smoothing.filter_taubin
Default value is 50.
Returns
-------
mesh : trimesh.Trimesh
The resulting mesh as a trimesh.Trimesh object.
https://trimsh.org/trimesh.base.html#github-com-mikedh-trimesh
"""
vertices, faces, _, _ = marching_cubes(object_mask, 0)
vertices_clean, faces_clean = pymeshfix.clean_from_arrays(vertices, faces)
# create the mesh object
mesh = Trimesh(vertices=vertices_clean, faces=faces_clean)
# optionally clean up the mesh
if n_mesh_smoothing_interations > 0:
filter_taubin(mesh, iterations=n_mesh_smoothing_interations)
return mesh
def voxelize_closed_surface(
mesh: Trimesh, pitch: float, repair_mesh: bool = True
) -> Tuple[BinaryImage, np.ndarray]:
"""Voxelize a closed surface mesh.
Parameters
----------
mesh : Trimesh
The surface to voxelize
pitch : float
The voxel width in mesh units. Voxels have the
same width in each dimension (i.e., are cubes).
repair_mesh : bool
Flag to attept to repair the mesh if set to True.
Default value is True.
Returns
-------
image : BinaryImage
The binary mask created from the
image_origin : np.ndarray
The upper left hand corner of the voxelized image in mesh units
(i.e., minimun of the axis aligned bounding box)
"""
bounding_box = mesh.bounds
centroid = np.mean(bounding_box, axis=0)
# convert the centroid to the nearest integer multiple of the pitch
rounded_centroid = _round_to_pitch(coordinate=centroid, pitch=pitch)
# find the minimum cube half-width that encompases the full mesh
cube_half_width = np.max(bounding_box - rounded_centroid)
# get the number of voxels for the cube half-width
n_voxels_cube_half_width = int(np.ceil(cube_half_width / pitch))
# pad with one voxel on each side to make sure the full mesh is in range
n_voxels_cube_half_width += 1
# get the upper left hand (i.e., minimum) corner of the voxelized image in mesh coordinates
image_origin = rounded_centroid - (n_voxels_cube_half_width * pitch)
# if and (not mesh.is_watertight):
# mesh = repair_mesh(mesh)
voxel_grid = trimesh.voxel.creation.local_voxelize(
mesh=mesh,
point=rounded_centroid,
pitch=pitch,
radius=n_voxels_cube_half_width,
fill=True,
)
return voxel_grid.matrix.astype(bool), image_origin
def closed_surfaces_to_label_image(
meshes: List[Trimesh],
pitch: float,
crop_around_mesh: bool = False,
repair_mesh: bool = False,
) -> Tuple[LabelImage, np.ndarray]:
"""Create a label image from a set of meshes with closed surfaces.
Notes:
- meshes must be water tight for accurate voxelization.
- Labels are assigned in the order the meshes appear in the list.
- all meshes must be in the same coordinate system and scale.
Parameters
----------
meshes : List[Trimesh]
The meshes to convert to a label image.
pitch : float
The width of a voxel in mesh units. Voxels are assumed to be cubes.
crop_around_mesh : bool
When set to True, the image is cropped around the axis aligned bounding box
of the set of meshes with a one voxel pad in each direction.
The default value is False
repair_mesh : bool
When set to True, will attempt to repair meshes with PyMeshFix.
Default value is False.
Returns
-------
label_image : LabelImage
The label image generated from the meshes.
image_origin : np.ndarray
The coordinate of the upper left hand corner (i.e., minimum) of the
label_image in mesh coordinates.
"""
# get the bounding box around the meshes
bounding_boxes = [mesh.bounds for mesh in meshes]
# get the bounding box around all of them
all_corners = np.concatenate(bounding_boxes, axis=0)
min_corner = np.min(all_corners, axis=0)
max_corner = np.max(all_corners, axis=0)
# round the corners to the nearest voxel (in mesh coordinates)
min_corner_rounded = _round_to_pitch(coordinate=min_corner, pitch=pitch)
max_corner_rounded = _round_to_pitch(coordinate=max_corner, pitch=pitch)
# pad the bounding box to make sure everything is accounted for
min_corner_rounded -= pitch
max_corner_rounded += pitch
if crop_around_mesh is True:
image_origin = min_corner_rounded
else:
image_origin = np.array([0, 0, 0])
# determine the size of the image in pixels
image_shape_mesh_units = max_corner_rounded - image_origin
image_shape_voxels = np.round(image_shape_mesh_units / pitch, decimals=0).astype(
int
)
# create the blank label image
label_image = np.zeros(image_shape_voxels, dtype=np.uint16)
for i, mesh in enumerate(meshes):
voxelized, origin = voxelize_closed_surface(
mesh, pitch=pitch, repair_mesh=repair_mesh
)
# get the coordinates of the voxels inside of the mesh
filled_voxel_coordinates = np.argwhere(voxelized)
# get the offset between the label image indices and the voxelized mesh indices
mesh_offset = np.round((origin - image_origin) / pitch, decimals=0)
# offset the voxel coordinates
filled_voxel_indices = np.round(
filled_voxel_coordinates + mesh_offset, decimals=0
).astype(int)
# set the label value
label_value = i + 1
label_image[
filled_voxel_indices[:, 0],
filled_voxel_indices[:, 1],
filled_voxel_indices[:, 2],
] = label_value
return label_image, image_origin
| 32.314894
| 95
| 0.684093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,881
| 0.511061
|
e9eda2a3fc73ffe30b97e1cd86e60cd02bdf72a7
| 1,402
|
py
|
Python
|
bigflow_python/python/bigflow/pipeline/test/testdata/columns/columns/column_sum.py
|
advancedxy/bigflow_python
|
8a244b483404fde7afc42eee98bc964da8ae03e2
|
[
"Apache-2.0"
] | 1,236
|
2017-11-14T11:10:10.000Z
|
2022-03-08T11:54:41.000Z
|
bigflow_python/python/bigflow/pipeline/test/testdata/columns/columns/column_sum.py
|
advancedxy/bigflow_python
|
8a244b483404fde7afc42eee98bc964da8ae03e2
|
[
"Apache-2.0"
] | 38
|
2017-11-14T16:29:12.000Z
|
2020-01-23T08:32:04.000Z
|
bigflow_python/python/bigflow/pipeline/test/testdata/columns/columns/column_sum.py
|
advancedxy/bigflow_python
|
8a244b483404fde7afc42eee98bc964da8ae03e2
|
[
"Apache-2.0"
] | 184
|
2017-11-27T07:23:36.000Z
|
2022-03-14T02:54:16.000Z
|
#!/usr/bin/env python
# encoding: utf-8
########################################################################
#
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
########################################################################
from bigflow import transforms
def column_sum(pcollection, columns):
"""
对输入的PCollection,求所有元素按指定列相加的结果
Args:
pcollection (PCollection): 输入PCollection
columns(list):要计算的列
Returns:
PObject: 聚合结果
>>> import columns
>>> _p = _pipeline.parallelize([(1, 1, 1), (1, 2, 2), (1, 3, 1)])
>>> columns.column_sum(_p, [0, 1]).get()
[3, 6]
"""
cols = columns
def _get_columns(record):
return [record[column] for column in cols]
return pcollection.map(_get_columns) \
.reduce(lambda x, y: [a + b for a, b in zip(x, y)])
| 28.04
| 74
| 0.597004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,146
| 0.782787
|
e9ee58711825a498c9db3c3f37e476c5e56bb0a6
| 282
|
py
|
Python
|
auction/models/bidbasket.py
|
littlepea/django-auction
|
fe0219faabe17efbeca1be51869d750e82299941
|
[
"MIT"
] | 10
|
2015-01-13T02:51:35.000Z
|
2021-01-25T21:02:29.000Z
|
auction/models/bidbasket.py
|
JohnRomanski/django-auction
|
bc6982c8f34a9a6914badb203424eca7f3219685
|
[
"MIT"
] | 2
|
2016-08-05T09:24:30.000Z
|
2020-06-28T06:00:11.000Z
|
auction/models/bidbasket.py
|
JohnRomanski/django-auction
|
bc6982c8f34a9a6914badb203424eca7f3219685
|
[
"MIT"
] | 22
|
2015-03-12T10:41:52.000Z
|
2021-11-23T14:33:09.000Z
|
import importlib
from django.conf import settings
from auction.utils.loader import load_class
AUCTION_BIDBASKET_MODEL = getattr(settings, 'AUCTION_BIDBASKET_MODEL',
'auction.models.defaults.BidBasket')
BidBasket = load_class(AUCTION_BIDBASKET_MODEL, 'AUCTION_BIDBASKET_MODEL')
| 35.25
| 74
| 0.840426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.301418
|
e9eed597103f69eb9973238f713e70a5ed271b2e
| 551
|
py
|
Python
|
stixpy/timeseries/tests/test_quicklook.py
|
nicHoch/stixpy
|
cdb86094995590da36f3ae5e01f4ca4b9aac819c
|
[
"BSD-3-Clause"
] | 4
|
2021-07-06T14:42:09.000Z
|
2022-02-24T10:19:18.000Z
|
stixpy/timeseries/tests/test_quicklook.py
|
nicHoch/stixpy
|
cdb86094995590da36f3ae5e01f4ca4b9aac819c
|
[
"BSD-3-Clause"
] | 30
|
2020-10-02T20:24:28.000Z
|
2022-03-31T18:29:07.000Z
|
stixpy/timeseries/tests/test_quicklook.py
|
nicHoch/stixpy
|
cdb86094995590da36f3ae5e01f4ca4b9aac819c
|
[
"BSD-3-Clause"
] | 8
|
2021-04-16T11:00:13.000Z
|
2022-03-31T10:09:29.000Z
|
from pathlib import Path
import pytest
from sunpy.timeseries import TimeSeries
from stixpy.data import test
from stixpy.timeseries.quicklook import *
def test_ql_lightcurve():
ql_lc = TimeSeries(test.STIX_QL_LIGHTCURVE_TIMESERIES)
assert isinstance(ql_lc, QLLightCurve)
def test_qlbackground():
ql_lc = TimeSeries(test.STIX_QL_BACKGROUND_TIMESERIES)
assert isinstance(ql_lc, QLBackground)
def test_qlvariance():
ql_lc = TimeSeries(test.STIX_QL_VARIANCE_TIMESERIES)
ql_lc.peek()
assert isinstance(ql_lc, QLVariance)
| 22.958333
| 58
| 0.787659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e9f001a0eb4f10eb622617d07d8ad3650ace4a3c
| 2,284
|
py
|
Python
|
roberta_ses/datasets/sst_dataset.py
|
sythello/Roberta_SES
|
289d575b9330cb6ae61190846448bd5368d73453
|
[
"Apache-2.0"
] | null | null | null |
roberta_ses/datasets/sst_dataset.py
|
sythello/Roberta_SES
|
289d575b9330cb6ae61190846448bd5368d73453
|
[
"Apache-2.0"
] | null | null | null |
roberta_ses/datasets/sst_dataset.py
|
sythello/Roberta_SES
|
289d575b9330cb6ae61190846448bd5368d73453
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file : sst_dataset.py
@author: zijun
@contact : zijun_sun@shannonai.com
@date : 2020/11/17 11:45
@version: 1.0
@desc : sst5 and imdb task use the same dataset
"""
import os
from functools import partial
import torch
from transformers import RobertaTokenizer
from torch.utils.data import Dataset, DataLoader
from roberta_ses.datasets.collate_functions import collate_to_max_length
class SSTDataset(Dataset):
def __init__(self, directory, prefix, bert_path, max_length: int = 512):
super().__init__()
self.max_length = max_length
with open(os.path.join(directory, prefix + '.txt'), 'r', encoding='utf8') as f:
lines = f.readlines()
self.lines = lines
self.tokenizer = RobertaTokenizer.from_pretrained(bert_path)
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
line = self.lines[idx]
label, sentence = line.split('\t', 1)
# delete .
sentence = sentence.strip()
if sentence.endswith("."):
sentence = sentence[:-1]
input_ids = self.tokenizer.encode(sentence, add_special_tokens=False)
if len(input_ids) > self.max_length - 2:
input_ids = input_ids[:self.max_length - 2]
# convert list to tensor
length = torch.LongTensor([len(input_ids) + 2])
input_ids = torch.LongTensor([0] + input_ids + [2])
label = torch.LongTensor([int(label)])
return input_ids, label, length
def unit_test():
root_path = "/data/nfsdata2/sunzijun/sstc/imdb_data"
bert_path = "/data/nfsdata2/sunzijun/loop/roberta-base"
prefix = "train"
dataset = SSTDataset(directory=root_path, prefix=prefix, bert_path=bert_path)
dataloader = DataLoader(
dataset=dataset,
batch_size=10,
num_workers=0,
shuffle=False,
collate_fn=partial(collate_to_max_length, fill_values=[1, 0, 0])
)
for input_ids, label, length, start_index, end_index, span_mask in dataloader:
print(input_ids.shape)
print(start_index.shape)
print(end_index.shape)
print(span_mask.shape)
print(label.view(-1).shape)
print()
if __name__ == '__main__':
unit_test()
| 30.453333
| 87
| 0.651926
| 1,097
| 0.480298
| 0
| 0
| 0
| 0
| 0
| 0
| 370
| 0.161996
|
e9f017283f2c9870d465de8537e58d7f7588313c
| 8,068
|
py
|
Python
|
tests/gem5/configs/boot_kvm_fork_run.py
|
darchr/gem5
|
0feb0a34db519523a8595f6d1543f7412259ba17
|
[
"BSD-3-Clause"
] | 19
|
2018-07-20T15:08:50.000Z
|
2022-03-26T16:15:59.000Z
|
tests/gem5/configs/boot_kvm_fork_run.py
|
darchr/gem5
|
0feb0a34db519523a8595f6d1543f7412259ba17
|
[
"BSD-3-Clause"
] | 148
|
2018-07-20T00:58:36.000Z
|
2021-11-16T01:52:33.000Z
|
tests/gem5/configs/boot_kvm_fork_run.py
|
darchr/gem5
|
0feb0a34db519523a8595f6d1543f7412259ba17
|
[
"BSD-3-Clause"
] | 10
|
2019-01-10T03:01:30.000Z
|
2022-01-21T18:36:18.000Z
|
# Copyright (c) 2021 The University of Texas at Austin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Austin Harris
#
"""
This script tests forking gem5 with the KVM cores and switching cores in the
child process. First, the test boots linux with KVM and tests fast-forwarding
with instruction exit events. Then the test forks the simulation, waits for the
child to simulate until completion, and then simulates to completion in the
parent process.
"""
import argparse
import os
import sys
from textwrap import dedent
import m5
from m5.objects import Root
from gem5.components.boards.x86_board import X86Board
from gem5.coherence_protocol import CoherenceProtocol
from gem5.isas import ISA
from gem5.components.memory.single_channel import SingleChannelDDR3_1600
from gem5.components.processors.cpu_types import CPUTypes
from gem5.components.processors.simple_switchable_processor import (
SimpleSwitchableProcessor,
)
from gem5.resources.resource import Resource
from gem5.runtime import (
get_runtime_coherence_protocol, get_runtime_isa
)
from gem5.utils.requires import requires
parser = argparse.ArgumentParser(
description="A script to test forking gem5 and switching cpus."
)
parser.add_argument(
"-m",
"--mem-system",
type=str,
choices=("classic", "mi_example", "mesi_two_level"),
required=True,
help="The memory system.",
)
parser.add_argument(
"-n",
"--num-cpus",
type=int,
choices=(1, 2, 4, 8),
default=4,
help="The number of CPUs.",
)
parser.add_argument(
"-c",
"--cpu",
type=str,
choices=("kvm", "atomic", "timing", "o3"),
required=True,
help="The CPU type.",
)
parser.add_argument(
"-r",
"--resource-directory",
type=str,
required=False,
help="The directory in which resources will be downloaded or exist.",
)
parser.add_argument(
"-o",
"--override-download",
action="store_true",
help="Override a local resource if the hashes do not match.",
)
parser.add_argument(
"-k",
"--kernel-args",
type=str,
default="init=/root/gem5_init.sh",
help="Additional kernel boot arguments.",
)
parser.add_argument(
"-f",
"--num-forks",
type=int,
default=4,
help="The number of times to fork gem5.",
)
args = parser.parse_args()
coherence_protocol_required = None
if args.mem_system == "mi_example":
coherence_protocol_required = CoherenceProtocol.MI_EXAMPLE
elif args.mem_system == "mesi_two_level":
coherence_protocol_required = CoherenceProtocol.MESI_TWO_LEVEL
requires(
isa_required=ISA.X86,
coherence_protocol_required=coherence_protocol_required,
kvm_required=(args.cpu == "kvm"),
)
cache_hierarchy = None
if args.mem_system == "mi_example":
from gem5.components.cachehierarchies.ruby.\
mi_example_cache_hierarchy import (
MIExampleCacheHierarchy,
)
cache_hierarchy = MIExampleCacheHierarchy(size="32kB", assoc=8)
elif args.mem_system == "mesi_two_level":
from gem5.components.cachehierarchies.ruby.\
mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
cache_hierarchy = MESITwoLevelCacheHierarchy(
l1d_size="16kB",
l1d_assoc=8,
l1i_size="16kB",
l1i_assoc=8,
l2_size="256kB",
l2_assoc=16,
num_l2_banks=1,
)
elif args.mem_system == "classic":
from gem5.components.cachehierarchies.classic.\
private_l1_cache_hierarchy import (
PrivateL1CacheHierarchy,
)
cache_hierarchy = PrivateL1CacheHierarchy(l1d_size="16kB", l1i_size="16kB")
else:
raise NotImplementedError(
"Memory system '{}' is not supported in the boot tests.".format(
args.mem_system
)
)
assert cache_hierarchy != None
# Setup the system memory.
memory = SingleChannelDDR3_1600(size="3GB")
# Setup a Processor.
cpu_type = None
if args.cpu == "kvm":
cpu_type = CPUTypes.KVM
elif args.cpu == "atomic":
cpu_type = CPUTypes.ATOMIC
elif args.cpu == "timing":
cpu_type = CPUTypes.TIMING
elif args.cpu == "o3":
cpu_type = CPUTypes.O3
else:
raise NotImplementedError(
"CPU type '{}' is not supported in the boot tests.".format(args.cpu)
)
assert cpu_type != None
processor = SimpleSwitchableProcessor(
starting_core_type=CPUTypes.KVM,
switch_core_type=cpu_type,
num_cores=args.num_cpus,
)
# Setup the motherboard.
motherboard = X86Board(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
exit_on_work_items=True,
)
motherboard.connect_things()
# Set the Full System workload.
motherboard.set_workload(
kernel=Resource(
"x86-linux-kernel-5.4.49",
override=args.override_download,
resource_directory=args.resource_directory,
),
disk_image=Resource(
"x86-ubuntu-img",
override=args.override_download,
resource_directory=args.resource_directory,
),
command=dedent(
"""
m5 exit # signal end of boot
m5 exit # exit in children and parent
"""
),
kernel_args=[args.kernel_args]
)
# Begin running of the simulation. This will exit once the Linux system boot
# is complete.
print("Running with ISA: " + get_runtime_isa().name)
print("Running with protocol: " + get_runtime_coherence_protocol().name)
print()
root = Root(full_system=True, system=motherboard)
# TODO: This of annoying. Is there a way to fix this to happen
# automatically when running KVM?
root.sim_quantum = int(1e9)
# Disable the gdb ports. Required for forking.
m5.disableAllListeners()
m5.instantiate()
# Simulate the inital boot with the starting KVM cpu
exit_event = m5.simulate()
print("Boot finished", exit_event.getCause())
print("Starting fork and switch processors test")
pids = []
for i in range(args.num_forks):
pid = m5.fork("%(parent)s/" + str(m5.curTick()))
if pid == 0: # in child
print(f"Switching processors in child {i}.")
processor.switch()
exit_event = m5.simulate()
if exit_event.getCause() != "m5_exit instruction encountered":
raise Exception(f"Expected m5 exit, got {exit_event.getCause()}")
print("Child finished, exiting: ", exit_event.getCause())
sys.exit(0)
else:
pids.append(pid)
print("Waiting for children...")
for pid in pids:
print (os.waitpid(pid, 0))
print("Children finished! Running to completion in parent.")
exit_event = m5.simulate()
if exit_event.getCause() != "m5_exit instruction encountered":
raise Exception(f"Expected m5 exit, got {exit_event.getCause()}")
| 29.661765
| 79
| 0.716534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,601
| 0.446331
|
e9f050b89ff8d6e83255108084e3c376a0039fc7
| 1,203
|
py
|
Python
|
rioxarray/write.py
|
kadyb/raster-benchmark
|
78733ff75181713071cc0694e187a2ac83f76752
|
[
"MIT"
] | 11
|
2021-04-15T09:51:48.000Z
|
2022-02-08T13:01:28.000Z
|
rioxarray/write.py
|
kadyb/raster-benchmark
|
78733ff75181713071cc0694e187a2ac83f76752
|
[
"MIT"
] | 11
|
2021-02-16T12:43:07.000Z
|
2021-12-14T19:57:10.000Z
|
rioxarray/write.py
|
kadyb/raster-benchmark
|
78733ff75181713071cc0694e187a2ac83f76752
|
[
"MIT"
] | 2
|
2021-07-22T14:01:46.000Z
|
2021-07-25T05:24:51.000Z
|
# -*- coding: utf-8 -*-
import os
import timeit
import xarray
import rioxarray
import pandas as pd
wd = os.getcwd()
catalog = os.path.join('data', 'LC08_L1TP_190024_20200418_20200822_02_T1')
rasters = os.listdir(catalog)
rasters = [r for r in rasters if r.endswith(('.TIF'))]
rasters = [os.path.join(wd, catalog, r) for r in rasters]
### raster stack
band_names = ["B1", "B10", "B11", "B2", "B3", "B4", "B5", "B6", "B7", "B9"]
ras = []
for i, path in enumerate(rasters):
ras.append(rioxarray.open_rasterio(path, masked = True).squeeze())
ras = xarray.concat(ras, "band")
ras.coords["band"] = band_names
t_list = [None] * 10
stack_file = 'stack.TIF'
for i in range(10):
tic = timeit.default_timer()
ras.rio.to_raster(stack_file, dtype = "uint16", compress = "LZW")
toc = timeit.default_timer()
t_list[i] = round(toc - tic, 2)
os.remove(stack_file)
df = {'task': ['write'] * 10, 'package': ['rioxarray'] * 10, 'time': t_list}
df = pd.DataFrame.from_dict(df)
if not os.path.isdir('results'): os.mkdir('results')
savepath = os.path.join('results', 'write-rioxarray.csv')
df.to_csv(savepath, index = False, decimal = ',', sep = ';')
| 27.340909
| 77
| 0.633416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 266
| 0.221114
|
e9f15a2385f1ea0dee9385406e24c070bd322820
| 14,534
|
py
|
Python
|
manifold/manifold.py
|
timotheosh/Manifest
|
d3917cb386aa351335c38f08e4c7d36136d8863f
|
[
"MIT"
] | 2
|
2021-08-13T12:38:24.000Z
|
2021-08-21T19:36:42.000Z
|
manifold/manifold.py
|
timotheosh/Manifold
|
d3917cb386aa351335c38f08e4c7d36136d8863f
|
[
"MIT"
] | null | null | null |
manifold/manifold.py
|
timotheosh/Manifold
|
d3917cb386aa351335c38f08e4c7d36136d8863f
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
'''manifold
An SMF service manifest creation tool.
'''
__author__ = 'Chris Miles'
__copyright__ = '(c) Chris Miles 2008. All rights reserved.'
__license__ = 'GPL http://www.gnu.org/licenses/gpl.txt'
__id__ = '$Id: manifold.py 7 2009-03-24 09:10:48Z miles.chris $'
__url__ = '$URL: https://manifold.googlecode.com/svn/trunk/manifold/manifold.py $'
# ---- Imports ----
# - Python Modules -
import logging
import os
import optparse
import sys
# - Genshi Modules -
from genshi.template import MarkupTemplate
# - Project Modules -
from .release import version
# ---- Genshi Templates ----
MANIFEST_TEMPLATE = """<?xml version="1.0"?>
<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
<!--
Created by Manifold
-->
<service_bundle type='manifest' name='${service_name}' xmlns:py='http://genshi.edgewall.org/'>
<service
name='${service_category}/${service_name}'
type='service'
version='${service_version}'>
<create_default_instance py:if="not multi_instance" enabled='${instance_enabled}' />
<single_instance py:if="not multi_instance" />
<dependency py:if="depends_on_network"
name='network'
grouping='require_all'
restart_on='error'
type='service'>
<service_fmri value='svc:/milestone/network:default'/>
</dependency>
<dependency py:if="depends_on_filesystem"
name='filesystem'
grouping='require_all'
restart_on='error'
type='service'>
<service_fmri value='svc:/system/filesystem/local'/>
</dependency>
<instance py:if="multi_instance" name='${instance_name}' enabled='${instance_enabled}'>
<!--! This part used for a multi instance service. -->
<method_context>
<method_credential py:if="method_credential_user and method_credential_group" user='${method_credential_user}' group='${method_credential_group}' />
</method_context>
<exec_method
type='method'
name='start'
exec='${exec_method_start}'
timeout_seconds='60' />
<exec_method
type='method'
name='stop'
exec='${exec_method_stop}'
timeout_seconds='60' />
<property_group name='startd' type='framework'>
<propval py:if="startd_model=='wait'" name='duration' type='astring' value='child' />
<propval py:if="startd_model=='transient'" name='duration' type='astring' value='transient' />
<propval py:if="startd_model=='contract'" name='duration' type='astring' value='contract' />
<propval name='ignore_error' type='astring' value='core,signal' />
</property_group>
<property_group name='application' type='application'>
<propval py:if="config_file" name='config_file' type='astring' value='${config_file}' />
</property_group>
</instance>
<a_single_instance py:if="not multi_instance" py:strip="True">
<!--! This part used for a single instance only service. -->
<method_context>
<method_credential py:if="method_credential_user and method_credential_group" user='${method_credential_user}' group='${method_credential_group}' />
</method_context>
<exec_method
type='method'
name='start'
exec='${exec_method_start}'
timeout_seconds='60' />
<exec_method
type='method'
name='stop'
exec='${exec_method_stop}'
timeout_seconds='60' />
<property_group name='startd' type='framework'>
<propval py:if="startd_model=='wait'" name='duration' type='astring' value='child' />
<propval py:if="startd_model=='transient'" name='duration' type='astring' value='transient' />
<propval py:if="startd_model=='contract'" name='duration' type='astring' value='contract' />
<propval name='ignore_error' type='astring' value='core,signal' />
</property_group>
<property_group name='application' type='application'>
<propval py:if="config_file" name='config_file' type='astring' value='${config_file}' />
</property_group>
</a_single_instance>
<stability value='Evolving' />
<template>
<common_name>
<loctext xml:lang='C'>
${common_name}
</loctext>
</common_name>
</template>
</service>
</service_bundle>
"""
# ---- Classes ----
class CONFIG_BASE(object):
def __init__(self, name, require_value=False, default=None, description=None, example=None, accepted_values=None):
self.name = name
self.require_value = require_value
self.default = default
self.description = description
self.example = example
self.accepted_values = accepted_values
def prompt(self):
raise NotImplemented()
def ask(self, config):
raise NotImplemented()
class CONFIG_STR(CONFIG_BASE):
def prompt(self):
if self.description:
s = self.description
else:
s = "Enter value for %s" %self.name
if self.example:
s += " (example: %s)" %self.example
if self.default:
s += " [%s] "% self.default
else:
s += " [] "
return s
def ask(self, config):
r = None
while r is None or (self.require_value and not r):
r = input(self.prompt()).strip()
if not r and self.default is not None:
r = self.default
elif self.accepted_values and r not in self.accepted_values:
print("Sorry, you must enter one of: " + ', '.join(['"%s"'%s for s in self.accepted_values]))
r = None
if not r:
r = None
return r
class CONFIG_BOOL(CONFIG_BASE):
def prompt(self):
if self.description:
s = self.description
else:
s = "%s" %self.name
s += " (yes/no)"
if self.default is not None:
if self.default:
default = "yes"
else:
default = "no"
s += " [%s]"% default
s += " ? "
return s
def ask(self, config):
answers = dict(
yes = True,
ye = True,
y = True,
no = False,
n = False
)
r = None
while not r in list(answers.keys()) and r != '':
r = input(self.prompt()).strip().lower()
if r:
r = answers[r]
elif self.default is not None:
r = self.default
if r:
r = 'true'
else:
r = 'false'
return r
class CONFIG_IF(CONFIG_BASE):
def __init__(self, *args, **kwargs):
self.questions = kwargs.get('questions', [])
del kwargs['questions']
super(CONFIG_IF, self).__init__(*args, **kwargs)
def prompt(self):
if self.description:
s = self.description
else:
s = "%s" %self.name
s += " (yes/no)"
if self.default is not None:
if self.default:
default = "yes"
else:
default = "no"
s += " [%s]"% default
s += " ? "
return s
def ask(self, config):
answers = dict(
yes = True,
ye = True,
y = True,
no = False,
n = False
)
r = None
while not r in list(answers.keys()) and r != '':
r = input(self.prompt()).strip().lower()
if r:
r = answers[r]
elif self.default is not None:
r = self.default
if r:
# if answer to this question is "yes" then ask user extra questions
config.update(ask_user(self.questions))
return r
# ---- Functions ----
def ask_user(service_questions):
response = {}
for q in service_questions:
print()
response[q.name] = q.ask(response)
return response
def generate_service_config():
service_questions = [
CONFIG_STR(
'service_category',
require_value=True,
default='site',
description='The service category',
example="'site' or '/application/database'"
),
CONFIG_STR(
'service_name',
require_value=True,
description="""The name of the service, which follows the service category
""",
example="'myapp'"
),
CONFIG_STR(
'service_version',
require_value=True,
description="The version of the service manifest",
default='1',
example="'1'"
),
CONFIG_STR(
'common_name',
require_value=False,
description="""The human readable name of the service
""",
example="'My service.'"
),
CONFIG_IF(
'multi_instance',
description="Can this service run multiple instances",
default=False,
questions=[
CONFIG_STR('instance_name', require_value=True, default='default', example="default")
]
),
CONFIG_STR(
'config_file',
require_value=False,
description="""Full path to a config file; leave blank if no config file
required""",
example="'/etc/myservice.conf'"
),
CONFIG_STR(
'exec_method_start',
require_value=True,
description="""The full command to start the service; may contain
'%{config_file}' to substitute the configuration file
""",
example="'/usr/bin/myservice %{config_file}'"
),
CONFIG_STR(
'exec_method_stop',
require_value=True,
default = ':kill',
description="""The full command to stop the service; may specify ':kill' to let
SMF kill the service processes automatically
""",
example="""'/usr/bin/myservice_ctl stop' or ':kill' to let SMF kill
the service processes automatically"""
),
CONFIG_STR(
'startd_model',
require_value=True,
default = 'wait',
description="""Choose a process management model:
'wait' : long-running process that runs in the foreground (default)
'contract' : long-running process that daemonizes or forks itself
(i.e. start command returns immediately)
'transient' : short-lived process, performs an action and ends quickly
""",
# example="",
accepted_values = ('wait', 'contract', 'transient'),
),
CONFIG_BOOL(
'depends_on_network',
description="Does this service depend on the network being ready",
default=True
),
CONFIG_BOOL(
'depends_on_filesystem',
description="Does this service depend on the local filesystems being ready",
default=True
),
CONFIG_BOOL(
'instance_enabled',
default=False,
description="Should the service be enabled by default"
),
CONFIG_STR(
'method_credential_user',
require_value=False,
description="""The user to change to when executing the
start/stop/refresh methods""",
example="'webservd'"
),
CONFIG_STR(
'method_credential_group',
require_value=False,
description="""The group to change to when executing the
start/stop/refresh methods""",
example="'webservd'"
),
]
service_config = ask_user(service_questions)
logging.debug(service_config)
return service_config
def create_manifest(outfp, service_config):
tmpl = MarkupTemplate(MANIFEST_TEMPLATE)
xml = tmpl.generate(**service_config).render('xml', strip_whitespace=False)
outfp.write(xml)
def main(argv=None):
if argv is None:
argv = sys.argv
# define usage and version messages
usageMsg = "usage: %s [options] output.xml" % sys.argv[0]
versionMsg = """%s %s""" % (os.path.basename(argv[0]), version)
description = """Create an SMF service manifest file. The resulting
XML file can be validated and imported into SMF using the 'svccfg' command.
For example, "svccfg validate myservice.xml", "svccfg -v import myservice.xml".
"""
# get a parser object and define our options
parser = optparse.OptionParser(usage=usageMsg, version=versionMsg, description=description)
# Switches
parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False,
help="verbose output")
parser.add_option('-d', '--debug', dest='debug',
action='store_true', default=False,
help="debugging output (very verbose)")
# Parse options & arguments
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("Output file must be specified.")
if len(args) > 1:
parser.error("Only one output file can be specified.")
if options.verbose:
loglevel = logging.INFO
elif options.debug:
loglevel = logging.DEBUG
else:
loglevel = logging.WARNING
logging.basicConfig(
level=loglevel,
# format='%(asctime)s %(levelname)s %(message)s',
format='%(message)s',
)
output_filename = args[0]
output = open(output_filename, 'w')
service_config = generate_service_config()
create_manifest(output, service_config)
output.close()
print("\nManifest written to %s" %output_filename)
print('You can validate the XML file with "svccfg validate %s"' %output_filename)
print('And create the SMF service with "svccfg import %s"' %output_filename)
return 0
if __name__ == "__main__":
sys.exit(main())
| 30.923404
| 164
| 0.555938
| 3,478
| 0.239301
| 0
| 0
| 0
| 0
| 0
| 0
| 7,461
| 0.513348
|
e9f182577a3561deeedd13bd4f63beb75d349a4d
| 7,183
|
py
|
Python
|
lemon_boy.py
|
hug58/Lemon-Boy-platformer
|
5ec5dd8974088fce5084e6249d13e7bb47621669
|
[
"MIT"
] | 4
|
2019-03-12T09:02:17.000Z
|
2019-05-06T20:31:18.000Z
|
lemon_boy.py
|
hug58/Lemon-Boy-platformer
|
5ec5dd8974088fce5084e6249d13e7bb47621669
|
[
"MIT"
] | null | null | null |
lemon_boy.py
|
hug58/Lemon-Boy-platformer
|
5ec5dd8974088fce5084e6249d13e7bb47621669
|
[
"MIT"
] | 2
|
2019-03-11T06:51:06.000Z
|
2020-09-01T16:17:06.000Z
|
from script import *
from script.menu import Menu
from script import image,sound,resolve_route
from script.player import Player
from script.enemy import Apple
from script.elementos import Trap,Door,Trampoline,Key,Lemon
from script.camera import Camera
from script.tilemap import TileMap
pg.display.init()
pg.joystick.init()
pg.font.init()
WIDTH = 620
HEIGHT = 480
WHITE2 = (252,252,238)
LEMON = (249,215,0)
GREEN = (140,196,51)
SCREEN = pg.display.set_mode((WIDTH,HEIGHT))
pg.display.set_caption("Project Hugo")
pg.display.set_icon(pg.image.load(resolve_route("lemon.ico") ))
class Plataform(pg.sprite.Sprite):
def __init__(self,x,y,w,h):
pg.sprite.Sprite.__init__(self)
self.rect = pg.Rect((x,y),(w,h))
self.rect.x = x
self.rect.y = y
self.vlx = 0
class Spikes(pg.sprite.Sprite):
def __init__(self,x,y,w,h,game):
pg.sprite.Sprite.__init__(self)
self.rect = pg.Rect((x,y),(w,h))
self.rect.x = x
self.rect.y = y
self.game = game
def update(self):
if self.rect.colliderect(self.game.player.rect):
self.game.player.dead = True
for enemy in self.game.enemies:
if self.rect.colliderect(enemy.rect):
enemy.kill()
class Game:
def __init__(self,maps):
self.maps = maps
self.sound = sound
self.map_cont = 0
self.map = TileMap(self.maps[self.map_cont])
self.Mapimage = self.map.make_map()
self.Maprect = self.Mapimage.get_rect()
self.surface_size = (WIDTH,HEIGHT)
self.camera = Camera(self.map.width,self.map.height,self.surface_size)
self.changes_maps = False
#Es más rápido crear otra Surface que dibujarlo directamente en la pantalla
def load(self):
self.effect = pg.sprite.Group()
self.arrow = pg.sprite.Group()
self.plataform = pg.sprite.Group()
self.plataform_m = pg.sprite.Group()
self.enemies = pg.sprite.Group()
self.objs = pg.sprite.Group()
self.spike = pg.sprite.Group()
self.trap = pg.sprite.Group()
self.fire_cannon = pg.sprite.Group()
for sprite in self.map.tmxdata.objectgroups:
for tile_object in sprite:
if tile_object.name == "Player":
self.player = Player(tile_object.x,
tile_object.y,self)
for tile_object in self.map.tmxdata.objects:
if tile_object.name == "plataform":
self.plataform.add(
Plataform(tile_object.x,
tile_object.y,
tile_object.width,
tile_object.height)
)
elif tile_object.name == "Door":
self.objs.add(
Door(tile_object.x,
tile_object.y,
self,"YELLOW"))
elif tile_object.name == "Apple":
self.enemies.add(
Apple(tile_object
.x,tile_object.y,
self,tile_object.type))
elif tile_object.name == "Key":
self.objs.add(
Key(tile_object.x,
tile_object.y,
self))
elif tile_object.name == "Lemon":
self.objs.add(
Lemon(tile_object.x,
tile_object.y,
self))
elif tile_object.name == "Spike_trap":
self.trap.add(
Trap(tile_object.x,
tile_object.y,
self,tile_object.type))
elif tile_object.name == "plataform_m":
self.plataform_m.add(
plataform_m(
tile_object.x,
tile_object.y,
tile_object.type))
elif tile_object.name == "Spike":
self.spike.add(
Spikes(tile_object.x,
tile_object.y,
tile_object.width,
tile_object.height,
self))
elif tile_object.name == "Fire_cannon":
self.fire_cannon.add(
Fire_Cannon(
tile_object.x,
tile_object.y,
self,
tile_object.type))
elif tile_object.name == "jump": self.objs.add(
Trampoline(
tile_object.x,
tile_object.y,
self))
def update(self):
self.camera.update(self.player)
self.spike.update()
self.trap.update()
self.fire_cannon.update()
self.arrow.update()
self.enemies.update()
self.plataform_m.update()
self.effect.update()
for objs in self.objs:
objs.update()
try:
if objs.next == True:
if self.map_cont < len(self.maps) -1: self.map_cont +=1
else: self.map_cont = 0
self.map = TileMap(self.maps[self.map_cont])
self.Mapimage = self.map.make_map()
self.Maprect = self.Mapimage.get_rect()
self.camera = Camera(self.map.width,self.map.height)
self.load()
except Exception as e: pass
if self.changes_maps == True:
self.map = TileMap(self.maps[self.map_cont])
self.Mapimage = self.map.make_map()
self.Maprect = self.Mapimage.get_rect()
self.camera = Camera(self.map.width,self.map.height,self.surface_size)
self.load()
self.changes_maps = False
self.player.update()
def draw(self):
for arrow in self.arrow:
SCREEN.blit(arrow.image,self.camera.apply(arrow))
SCREEN.blit(self.Mapimage,self.camera.apply_rect(self.Maprect))
for plataform_m in self.plataform_m:
SCREEN.blit(plataform_m.image,self.camera.apply(plataform_m))
for cannon in self.fire_cannon:
for fireball in cannon.fireball:
SCREEN.blit(fireball.image,self.camera.apply(fireball))
for enemies in self.enemies:
SCREEN.blit(enemies.image,self.camera.apply(enemies))
for objs in self.objs:
SCREEN.blit(objs.image,self.camera.apply(objs))
SCREEN.blit(self.player.image,self.camera.apply(self.player))
for trap in self.trap:
SCREEN.blit(trap.image,self.camera.apply(trap))
for effect in self.effect:
SCREEN.blit(effect.image,self.camera.apply(effect))
def main():
exit = False
clock = pg.time.Clock()
maps= ["map/map1.tmx",
"map/map2.tmx",
"map/map3.tmx",
"map/map4.tmx",
"map/map5.tmx",
"map/map6.tmx",
"map/map7.tmx"]
menu = Menu(maps)
game = Game(menu.maps)
game.load()
#Creando un objeto joystick e iniciando
joystick = pg.joystick.Joystick(0) if pg.joystick.get_count() > 0 else None
joystick.init() if joystick != None else None
background = pg.Surface((WIDTH,HEIGHT)).convert()
background.blit(pg.transform.scale(image["background"],(WIDTH,HEIGHT)),(0,0))
draw_background = lambda background: SCREEN.blit(background,(0,0))
while exit != True and menu.exit_game != True:
clock.tick(60)
for event in pg.event.get():
if event.type == pg.QUIT: exit = True
if event.type == pg.KEYDOWN:
if event.key == pg.K_x:
if game.player.cont_jump > 0:
game.player.diffx = 0
game.sound["jump"].stop()
game.sound["jump"].play()
game.player.vly = -8
game.player.cont_jump -=1
game.player.direcciony = -1
if event.key == pg.K_RETURN: menu.exit = False
if event.type == pg.KEYUP:
if event.key == pg.K_RIGHT or event.key == pg.K_LEFT: game.player.stop = True
if event.key == pg.K_c:
if game.player.cont_shot >= 13:
game.player.shot()
game.player.cont_shot = 0
else: game.player.cont_shot = 0
if menu.changes_maps == True:
game.map_cont = menu.position
game.changes_maps = True
menu.changes_maps = False
if menu.exit != True:
menu.update(SCREEN)
draw_background(background)
#Cerrar el videojuego completamente sin pasar por dibujar el nivel actual(lvl1 por defecto)
if menu.exit_game != True:
game.draw()
game.update()
pg.display.flip()
if __name__ == "__main__":
main()
| 24.599315
| 93
| 0.667688
| 4,797
| 0.667641
| 0
| 0
| 0
| 0
| 0
| 0
| 467
| 0.064997
|
e9f1fbbda761ade5d0893da97c048863bb481369
| 4,915
|
py
|
Python
|
pliers/utils/base.py
|
jsmentch/pliers
|
ef13552793ab5789065249a89230baced407c472
|
[
"BSD-3-Clause"
] | null | null | null |
pliers/utils/base.py
|
jsmentch/pliers
|
ef13552793ab5789065249a89230baced407c472
|
[
"BSD-3-Clause"
] | null | null | null |
pliers/utils/base.py
|
jsmentch/pliers
|
ef13552793ab5789065249a89230baced407c472
|
[
"BSD-3-Clause"
] | null | null | null |
''' Miscellaneous internal utilities. '''
import collections
import os
from abc import ABCMeta, abstractmethod, abstractproperty
from types import GeneratorType
from itertools import islice
from tqdm import tqdm
import pandas as pd
from pliers import config
from pliers.support.exceptions import MissingDependencyError
def listify(obj):
''' Wraps all non-list or tuple objects in a list; provides a simple way
to accept flexible arguments. '''
return obj if isinstance(obj, (list, tuple, type(None))) else [obj]
def flatten(l):
''' Flatten an iterable. '''
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, str):
yield from flatten(el)
else:
yield el
def flatten_dict(d, parent_key='', sep='_'):
''' Flattens a multi-level dictionary into a single level by concatenating
nested keys with the char provided in the sep argument.
Solution from https://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys'''
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def batch_iterable(l, n):
''' Chunks iterable into n sized batches
Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery'''
i = iter(l)
piece = list(islice(i, n))
while piece:
yield piece
piece = list(islice(i, n))
def set_iterable_type(obj):
''' Returns either a generator or a list depending on config-level
settings. Should be used to wrap almost every internal iterable return.
Also inspects elements recursively in the case of list returns, to
ensure that there are no nested generators. '''
if not isiterable(obj):
return obj
if config.get_option('use_generators'):
return obj if isgenerator(obj) else (i for i in obj)
else:
return [set_iterable_type(i) for i in obj]
class classproperty:
''' Implements a @classproperty decorator analogous to @classmethod.
Solution from: http://stackoverflow.com/questions/128573/using-property-on-classmethodss
'''
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
def isiterable(obj):
''' Returns True if the object is one of allowable iterable types. '''
return isinstance(obj, (list, tuple, pd.Series, GeneratorType, tqdm))
def isgenerator(obj):
''' Returns True if object is a generator, or a generator wrapped by a
tqdm object. '''
return isinstance(obj, GeneratorType) or (hasattr(obj, 'iterable') and
isinstance(getattr(obj, 'iterable'), GeneratorType))
def progress_bar_wrapper(iterable, **kwargs):
''' Wrapper that applies tqdm progress bar conditional on config settings.
'''
return tqdm(iterable, **kwargs) if (config.get_option('progress_bar')
and not isinstance(iterable, tqdm)) else iterable
module_names = {}
Dependency = collections.namedtuple('Dependency', 'package value')
def attempt_to_import(dependency, name=None, fromlist=None):
if name is None:
name = dependency
try:
mod = __import__(dependency, fromlist=fromlist)
except ImportError:
mod = None
module_names[name] = Dependency(dependency, mod)
return mod
def verify_dependencies(dependencies):
missing = []
for dep in listify(dependencies):
if module_names[dep].value is None:
missing.append(module_names[dep].package)
if missing:
raise MissingDependencyError(missing)
class EnvironmentKeyMixin:
@classproperty
def _env_keys(cls):
pass
@classproperty
def env_keys(cls):
return listify(cls._env_keys)
@classproperty
def available(cls):
return all([k in os.environ for k in cls.env_keys])
class APIDependent(EnvironmentKeyMixin, metaclass=ABCMeta):
_rate_limit = 0
def __init__(self, rate_limit=None, **kwargs):
self.transformed_stim_count = 0
self.validated_keys = set()
self.rate_limit = rate_limit if rate_limit else self._rate_limit
self._last_request_time = 0
super().__init__(**kwargs)
@abstractproperty
def api_keys(self):
pass
def validate_keys(self):
if all(k in self.validated_keys for k in self.api_keys):
return True
else:
valid = self.check_valid_keys()
if valid:
for k in self.api_keys:
self.validated_keys.add(k)
return valid
@abstractmethod
def check_valid_keys(self):
pass
| 29.431138
| 126
| 0.673042
| 1,389
| 0.282604
| 528
| 0.107426
| 338
| 0.068769
| 0
| 0
| 1,358
| 0.276297
|
e9f2022d8957402e8b079abe1da08f467caf510b
| 2,431
|
py
|
Python
|
lmnet/lmnet/datasets/cifar100_distribute.py
|
toohsk/blueoil
|
596922caa939db9c5ecbac3286fbf6f703865ee6
|
[
"Apache-2.0"
] | null | null | null |
lmnet/lmnet/datasets/cifar100_distribute.py
|
toohsk/blueoil
|
596922caa939db9c5ecbac3286fbf6f703865ee6
|
[
"Apache-2.0"
] | 1
|
2018-11-21T07:06:17.000Z
|
2018-11-21T07:06:17.000Z
|
lmnet/lmnet/datasets/cifar100_distribute.py
|
toohsk/blueoil
|
596922caa939db9c5ecbac3286fbf6f703865ee6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import functools
import numpy as np
from lmnet.datasets.cifar100 import Cifar100
from lmnet.datasets.base import DistributionInterface
from lmnet.utils.random import shuffle
class Cifar100Distribution(Cifar100, DistributionInterface):
def __init__(
self,
subset="train",
batch_size=100,
*args,
**kwargs
):
super().__init__(
subset=subset,
batch_size=batch_size,
*args,
**kwargs,
)
self._init_images_and_labels()
@functools.lru_cache(maxsize=None)
def _images_and_labels(self):
if self.subset == "train":
files = ["train"]
else:
files = ["test"]
data = [self._load_data(filename) for filename in files]
images = [images for images, labels in data]
images = np.concatenate(images, axis=0)
labels = [labels for images, labels in data]
labels = np.concatenate(labels, axis=0)
return images, labels
def update_dataset(self, indices):
"""Update own dataset by indices."""
# Re Initialize dataset
self._init_images_and_labels()
# Update dataset by given indices
self.images = self.images[indices, :]
self.labels = self.labels[indices]
self.current_element_index = 0
def get_shuffle_index(self):
"""Return list of shuffled index."""
images, _ = self._images_and_labels()
random_indices = shuffle(range(len(images)), seed=self.seed)
print("Shuffle {} train dataset with random state {}.".format(self.__class__.__name__, self.seed))
self.seed += 1
return random_indices
| 31.571429
| 106
| 0.626903
| 1,543
| 0.634718
| 0
| 0
| 478
| 0.196627
| 0
| 0
| 897
| 0.368984
|
e9f22bb1ea67ab94d6fe17f1e1dc1a68f58ceef8
| 3,149
|
py
|
Python
|
gridengine/functional.py
|
MiqG/gridengine
|
457c34b16f2c43b9be985cd822f30305d68afd91
|
[
"BSD-3-Clause"
] | 20
|
2015-01-31T16:52:15.000Z
|
2019-03-22T20:09:50.000Z
|
gridengine/functional.py
|
MiqG/gridengine
|
457c34b16f2c43b9be985cd822f30305d68afd91
|
[
"BSD-3-Clause"
] | 1
|
2021-11-27T16:33:59.000Z
|
2021-11-27T16:33:59.000Z
|
gridengine/functional.py
|
MiqG/gridengine
|
457c34b16f2c43b9be985cd822f30305d68afd91
|
[
"BSD-3-Clause"
] | 7
|
2015-10-27T16:49:52.000Z
|
2021-09-22T10:16:25.000Z
|
import inspect
import functools
from gridengine import job, dispatch, schedulers
# ----------------------------------------------------------------------------
# Partial
# ----------------------------------------------------------------------------
def isexception(x):
"""Test whether the value is an Exception instance"""
return isinstance(x, Exception)
def isnumeric(x):
"""Test whether the value can be represented as a number"""
try:
float(x)
return True
except:
return False
def partial(f, *args, **kwargs):
"""Return a callable partially closed over the input function and arguments
partial is functionally equivalent to functools.partial, however it also
applies a variant of functools.update_wrapper, with:
__doc__ = f.__doc__
__module__ = f.__module__
__name__ = f.__name__ + string_representation_of_closed_arguments
This is useful for running functions with different parameter sets, whilst
being able to identify the variants by name
"""
def name(var):
try:
return var.__name__
except AttributeError:
return str(var)[0:5] if isnumeric(var) else var.__class__.__name__
g = functools.partial(f, *args, **kwargs)
g.__doc__ = f.__doc__
g.__module__ = f.__module__
g.__name__ = '_'.join([f.__name__] + [name(arg) for arg in list(args)+list(kwargs.values())])
return g
# ----------------------------------------------------------------------------
# Map
# ----------------------------------------------------------------------------
def map(f, args, scheduler=schedulers.best_available, reraise=True):
"""Perform a functional-style map operation
Apply a function f to each argument in the iterable args. This is equivalent to
y = [f(x) for x in args]
or
y = map(f, args)
except that each argument in the iterable is assigned to a separate Job
and scheduled to run via the scheduler.
The default scheduler is a schedulers.ProcessScheduler instance. To run map
on a grid engine, simply pass a schedulers.GridEngineScheduler instance.
Args:
f (func): A picklable function
args (iterable): An iterable (list) of arguments to f
Keyword Args:
scheduler: A schedulers.Scheduler instance or class. By default, the
system tries to return the best_available() scheduler. Use this if you
want to set a scheduler specifically.
reraise (bool): Reraise exceptions that occur in any of the jobs. Set this
to False if you want to salvage any good results.
Returns:
List of return values equivalent to the builtin map function
Raises:
Any exception that would occur when applying [f(x) for x in args]
"""
# setup the dispatcher
dispatcher = dispatch.JobDispatcher(scheduler)
# allocate the jobs
jobs = [job.Job(target=f, args=(arg,)) for arg in args]
# run the jobs (guaranteed to return in the same order)
dispatcher.dispatch(jobs)
results = dispatcher.join()
# check for exceptions
if reraise:
for exception in filter(isexception, results):
# an error occurred during execution of one of the jobs, reraise it
raise exception
return results
| 32.463918
| 97
| 0.64719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,155
| 0.684344
|
e9f24ec99f076ba98908603ffa1d50f5644d6aa7
| 31,441
|
py
|
Python
|
Bio/Prosite/__init__.py
|
nuin/biopython
|
045d57b08799ef52c64bd4fa807629b8a7e9715a
|
[
"PostgreSQL"
] | 2
|
2016-05-09T04:20:06.000Z
|
2017-03-07T10:25:53.000Z
|
Bio/Prosite/__init__.py
|
nuin/biopython
|
045d57b08799ef52c64bd4fa807629b8a7e9715a
|
[
"PostgreSQL"
] | null | null | null |
Bio/Prosite/__init__.py
|
nuin/biopython
|
045d57b08799ef52c64bd4fa807629b8a7e9715a
|
[
"PostgreSQL"
] | 1
|
2019-08-19T22:05:14.000Z
|
2019-08-19T22:05:14.000Z
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# Revisions Copyright 2007 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with the prosite dat file from
Prosite.
http://www.expasy.ch/prosite/
Tested with:
Release 15.0, July 1998
Release 16.0, July 1999
Release 17.0, Dec 2001
Release 19.0, Mar 2006
Functions:
parse Iterates over entries in a Prosite file.
scan_sequence_expasy Scan a sequence for occurrences of Prosite patterns.
index_file Index a Prosite file for a Dictionary.
_extract_record Extract Prosite data from a web page.
_extract_pattern_hits Extract Prosite patterns from a web page.
Classes:
Record Holds Prosite data.
PatternHit Holds data from a hit against a Prosite pattern.
Dictionary Accesses a Prosite file using a dictionary interface.
RecordParser Parses a Prosite record into a Record object.
Iterator Iterates over entries in a Prosite file; DEPRECATED.
_Scanner Scans Prosite-formatted data.
_RecordConsumer Consumes Prosite data to a Record object.
"""
from types import *
import re
import sgmllib
from Bio import File
from Bio import Index
from Bio.ParserSupport import *
# There is probably a cleaner way to write the read/parse functions
# if we don't use the "parser = RecordParser(); parser.parse(handle)"
# approach. Leaving that for the next revision of Bio.Prosite.
def parse(handle):
import cStringIO
parser = RecordParser()
text = ""
for line in handle:
text += line
if line[:2]=='//':
handle = cStringIO.StringIO(text)
record = parser.parse(handle)
text = ""
if not record: # Then this was the copyright notice
continue
yield record
def read(handle):
parser = RecordParser()
try:
record = parser.parse(handle)
except ValueError, error:
if error.message=="There doesn't appear to be a record":
raise ValueError("No Prosite record found")
else:
raise error
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one Prosite record found")
return record
class Record:
"""Holds information from a Prosite record.
Members:
name ID of the record. e.g. ADH_ZINC
type Type of entry. e.g. PATTERN, MATRIX, or RULE
accession e.g. PS00387
created Date the entry was created. (MMM-YYYY)
data_update Date the 'primary' data was last updated.
info_update Date data other than 'primary' data was last updated.
pdoc ID of the PROSITE DOCumentation.
description Free-format description.
pattern The PROSITE pattern. See docs.
matrix List of strings that describes a matrix entry.
rules List of rule definitions (from RU lines). (strings)
prorules List of prorules (from PR lines). (strings)
NUMERICAL RESULTS
nr_sp_release SwissProt release.
nr_sp_seqs Number of seqs in that release of Swiss-Prot. (int)
nr_total Number of hits in Swiss-Prot. tuple of (hits, seqs)
nr_positive True positives. tuple of (hits, seqs)
nr_unknown Could be positives. tuple of (hits, seqs)
nr_false_pos False positives. tuple of (hits, seqs)
nr_false_neg False negatives. (int)
nr_partial False negatives, because they are fragments. (int)
COMMENTS
cc_taxo_range Taxonomic range. See docs for format
cc_max_repeat Maximum number of repetitions in a protein
cc_site Interesting site. list of tuples (pattern pos, desc.)
cc_skip_flag Can this entry be ignored?
cc_matrix_type
cc_scaling_db
cc_author
cc_ft_key
cc_ft_desc
cc_version version number (introduced in release 19.0)
DATA BANK REFERENCES - The following are all
lists of tuples (swiss-prot accession,
swiss-prot name)
dr_positive
dr_false_neg
dr_false_pos
dr_potential Potential hits, but fingerprint region not yet available.
dr_unknown Could possibly belong
pdb_structs List of PDB entries.
"""
def __init__(self):
self.name = ''
self.type = ''
self.accession = ''
self.created = ''
self.data_update = ''
self.info_update = ''
self.pdoc = ''
self.description = ''
self.pattern = ''
self.matrix = []
self.rules = []
self.prorules = []
self.postprocessing = []
self.nr_sp_release = ''
self.nr_sp_seqs = ''
self.nr_total = (None, None)
self.nr_positive = (None, None)
self.nr_unknown = (None, None)
self.nr_false_pos = (None, None)
self.nr_false_neg = None
self.nr_partial = None
self.cc_taxo_range = ''
self.cc_max_repeat = ''
self.cc_site = []
self.cc_skip_flag = ''
self.dr_positive = []
self.dr_false_neg = []
self.dr_false_pos = []
self.dr_potential = []
self.dr_unknown = []
self.pdb_structs = []
class PatternHit:
"""Holds information from a hit against a Prosite pattern.
Members:
name ID of the record. e.g. ADH_ZINC
accession e.g. PS00387
pdoc ID of the PROSITE DOCumentation.
description Free-format description.
matches List of tuples (start, end, sequence) where
start and end are indexes of the match, and sequence is
the sequence matched.
"""
def __init__(self):
self.name = None
self.accession = None
self.pdoc = None
self.description = None
self.matches = []
def __str__(self):
lines = []
lines.append("%s %s %s" % (self.accession, self.pdoc, self.name))
lines.append(self.description)
lines.append('')
if len(self.matches) > 1:
lines.append("Number of matches: %s" % len(self.matches))
for i in range(len(self.matches)):
start, end, seq = self.matches[i]
range_str = "%d-%d" % (start, end)
if len(self.matches) > 1:
lines.append("%7d %10s %s" % (i+1, range_str, seq))
else:
lines.append("%7s %10s %s" % (' ', range_str, seq))
return "\n".join(lines)
class Iterator:
"""Returns one record at a time from a Prosite file.
Methods:
next Return the next record from the stream, or None.
"""
def __init__(self, handle, parser=None):
"""__init__(self, handle, parser=None)
Create a new iterator. handle is a file-like object. parser
is an optional Parser object to change the results into another form.
If set to None, then the raw contents of the file will be returned.
"""
import warnings
warnings.warn("Bio.Prosite.Iterator is deprecated; we recommend using the function Bio.Prosite.parse instead. Please contact the Biopython developers at biopython-dev@biopython.org you cannot use Bio.Prosite.parse instead of Bio.Prosite.Iterator.",
DeprecationWarning)
if type(handle) is not FileType and type(handle) is not InstanceType:
raise ValueError("I expected a file handle or file-like object")
self._uhandle = File.UndoHandle(handle)
self._parser = parser
def next(self):
"""next(self) -> object
Return the next Prosite record from the file. If no more records,
return None.
"""
# Skip the copyright info, if it's the first record.
line = self._uhandle.peekline()
if line[:2] == 'CC':
while 1:
line = self._uhandle.readline()
if not line:
break
if line[:2] == '//':
break
if line[:2] != 'CC':
raise ValueError("Oops, where's the copyright?")
lines = []
while 1:
line = self._uhandle.readline()
if not line:
break
lines.append(line)
if line[:2] == '//':
break
if not lines:
return None
data = "".join(lines)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __iter__(self):
return iter(self.next, None)
class Dictionary:
"""Accesses a Prosite file using a dictionary interface.
"""
__filename_key = '__filename'
def __init__(self, indexname, parser=None):
"""__init__(self, indexname, parser=None)
Open a Prosite Dictionary. indexname is the name of the
index for the dictionary. The index should have been created
using the index_file function. parser is an optional Parser
object to change the results into another form. If set to None,
then the raw contents of the file will be returned.
"""
self._index = Index.Index(indexname)
self._handle = open(self._index[Dictionary.__filename_key])
self._parser = parser
def __len__(self):
return len(self._index)
def __getitem__(self, key):
start, len = self._index[key]
self._handle.seek(start)
data = self._handle.read(len)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __getattr__(self, name):
return getattr(self._index, name)
class ExPASyDictionary:
"""Access PROSITE at ExPASy using a read-only dictionary interface.
"""
def __init__(self, delay=5.0, parser=None):
"""__init__(self, delay=5.0, parser=None)
Create a new Dictionary to access PROSITE. parser is an optional
parser (e.g. Prosite.RecordParser) object to change the results
into another form. If set to None, then the raw contents of the
file will be returned. delay is the number of seconds to wait
between each query.
"""
import warnings
from Bio.WWW import RequestLimiter
warnings.warn("Bio.Prosite.ExPASyDictionary is deprecated. Please use the function Bio.ExPASy.get_prosite_raw instead.",
DeprecationWarning)
self.parser = parser
self.limiter = RequestLimiter(delay)
def __len__(self):
raise NotImplementedError("Prosite contains lots of entries")
def clear(self):
raise NotImplementedError("This is a read-only dictionary")
def __setitem__(self, key, item):
raise NotImplementedError("This is a read-only dictionary")
def update(self):
raise NotImplementedError("This is a read-only dictionary")
def copy(self):
raise NotImplementedError("You don't need to do this...")
def keys(self):
raise NotImplementedError("You don't really want to do this...")
def items(self):
raise NotImplementedError("You don't really want to do this...")
def values(self):
raise NotImplementedError("You don't really want to do this...")
def has_key(self, id):
"""has_key(self, id) -> bool"""
try:
self[id]
except KeyError:
return 0
return 1
def get(self, id, failobj=None):
try:
return self[id]
except KeyError:
return failobj
def __getitem__(self, id):
"""__getitem__(self, id) -> object
Return a Prosite entry. id is either the id or accession
for the entry. Raises a KeyError if there's an error.
"""
from Bio import ExPASy
# First, check to see if enough time has passed since my
# last query.
self.limiter.wait()
try:
handle = ExPASy.get_prosite_entry(id)
except IOError:
raise KeyError(id)
try:
handle = File.StringHandle(_extract_record(handle))
except ValueError:
raise KeyError(id)
if self.parser is not None:
return self.parser.parse(handle)
return handle.read()
class RecordParser(AbstractParser):
"""Parses Prosite data into a Record object.
"""
def __init__(self):
self._scanner = _Scanner()
self._consumer = _RecordConsumer()
def parse(self, handle):
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _Scanner:
"""Scans Prosite-formatted data.
Tested with:
Release 15.0, July 1998
"""
def feed(self, handle, consumer):
"""feed(self, handle, consumer)
Feed in Prosite data for scanning. handle is a file-like
object that contains prosite data. consumer is a
Consumer object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
consumer.finished = False
while not consumer.finished:
line = uhandle.peekline()
if not line:
break
elif is_blank_line(line):
# Skip blank lines between records
uhandle.readline()
continue
elif line[:2] == 'ID':
self._scan_record(uhandle, consumer)
elif line[:2] == 'CC':
self._scan_copyrights(uhandle, consumer)
else:
raise ValueError("There doesn't appear to be a record")
def _scan_copyrights(self, uhandle, consumer):
consumer.start_copyrights()
self._scan_line('CC', uhandle, consumer.copyright, any_number=1)
self._scan_terminator(uhandle, consumer)
consumer.end_copyrights()
def _scan_record(self, uhandle, consumer):
consumer.start_record()
for fn in self._scan_fns:
fn(self, uhandle, consumer)
# In Release 15.0, C_TYPE_LECTIN_1 has the DO line before
# the 3D lines, instead of the other way around.
# Thus, I'll give the 3D lines another chance after the DO lines
# are finished.
if fn is self._scan_do.im_func:
self._scan_3d(uhandle, consumer)
consumer.end_record()
def _scan_line(self, line_type, uhandle, event_fn,
exactly_one=None, one_or_more=None, any_number=None,
up_to_one=None):
# Callers must set exactly one of exactly_one, one_or_more, or
# any_number to a true value. I do not explicitly check to
# make sure this function is called correctly.
# This does not guarantee any parameter safety, but I
# like the readability. The other strategy I tried was have
# parameters min_lines, max_lines.
if exactly_one or one_or_more:
read_and_call(uhandle, event_fn, start=line_type)
if one_or_more or any_number:
while 1:
if not attempt_read_and_call(uhandle, event_fn,
start=line_type):
break
if up_to_one:
attempt_read_and_call(uhandle, event_fn, start=line_type)
def _scan_id(self, uhandle, consumer):
self._scan_line('ID', uhandle, consumer.identification, exactly_one=1)
def _scan_ac(self, uhandle, consumer):
self._scan_line('AC', uhandle, consumer.accession, exactly_one=1)
def _scan_dt(self, uhandle, consumer):
self._scan_line('DT', uhandle, consumer.date, exactly_one=1)
def _scan_de(self, uhandle, consumer):
self._scan_line('DE', uhandle, consumer.description, exactly_one=1)
def _scan_pa(self, uhandle, consumer):
self._scan_line('PA', uhandle, consumer.pattern, any_number=1)
def _scan_ma(self, uhandle, consumer):
self._scan_line('MA', uhandle, consumer.matrix, any_number=1)
## # ZN2_CY6_FUNGAL_2, DNAJ_2 in Release 15
## # contain a CC line buried within an 'MA' line. Need to check
## # for that.
## while 1:
## if not attempt_read_and_call(uhandle, consumer.matrix, start='MA'):
## line1 = uhandle.readline()
## line2 = uhandle.readline()
## uhandle.saveline(line2)
## uhandle.saveline(line1)
## if line1[:2] == 'CC' and line2[:2] == 'MA':
## read_and_call(uhandle, consumer.comment, start='CC')
## else:
## break
def _scan_pp(self, uhandle, consumer):
#New PP line, PostProcessing, just after the MA line
self._scan_line('PP', uhandle, consumer.postprocessing, any_number=1)
def _scan_ru(self, uhandle, consumer):
self._scan_line('RU', uhandle, consumer.rule, any_number=1)
def _scan_nr(self, uhandle, consumer):
self._scan_line('NR', uhandle, consumer.numerical_results,
any_number=1)
def _scan_cc(self, uhandle, consumer):
self._scan_line('CC', uhandle, consumer.comment, any_number=1)
def _scan_dr(self, uhandle, consumer):
self._scan_line('DR', uhandle, consumer.database_reference,
any_number=1)
def _scan_3d(self, uhandle, consumer):
self._scan_line('3D', uhandle, consumer.pdb_reference,
any_number=1)
def _scan_pr(self, uhandle, consumer):
#New PR line, ProRule, between 3D and DO lines
self._scan_line('PR', uhandle, consumer.prorule, any_number=1)
def _scan_do(self, uhandle, consumer):
self._scan_line('DO', uhandle, consumer.documentation, exactly_one=1)
def _scan_terminator(self, uhandle, consumer):
self._scan_line('//', uhandle, consumer.terminator, exactly_one=1)
#This is a list of scan functions in the order expected in the file file.
#The function definitions define how many times each line type is exected
#(or if optional):
_scan_fns = [
_scan_id,
_scan_ac,
_scan_dt,
_scan_de,
_scan_pa,
_scan_ma,
_scan_pp,
_scan_ru,
_scan_nr,
_scan_cc,
# This is a really dirty hack, and should be fixed properly at
# some point. ZN2_CY6_FUNGAL_2, DNAJ_2 in Rel 15 and PS50309
# in Rel 17 have lines out of order. Thus, I have to rescan
# these, which decreases performance.
_scan_ma,
_scan_nr,
_scan_cc,
_scan_dr,
_scan_3d,
_scan_pr,
_scan_do,
_scan_terminator
]
class _RecordConsumer(AbstractConsumer):
"""Consumer that converts a Prosite record to a Record object.
Members:
data Record with Prosite data.
"""
def __init__(self):
self.data = None
def start_record(self):
self.data = Record()
def end_record(self):
self._clean_record(self.data)
def identification(self, line):
cols = line.split()
if len(cols) != 3:
raise ValueError("I don't understand identification line\n%s" \
% line)
self.data.name = self._chomp(cols[1]) # don't want ';'
self.data.type = self._chomp(cols[2]) # don't want '.'
def accession(self, line):
cols = line.split()
if len(cols) != 2:
raise ValueError("I don't understand accession line\n%s" % line)
self.data.accession = self._chomp(cols[1])
def date(self, line):
uprline = line.upper()
cols = uprline.split()
# Release 15.0 contains both 'INFO UPDATE' and 'INF UPDATE'
if cols[2] != '(CREATED);' or \
cols[4] != '(DATA' or cols[5] != 'UPDATE);' or \
cols[7][:4] != '(INF' or cols[8] != 'UPDATE).':
raise ValueError("I don't understand date line\n%s" % line)
self.data.created = cols[1]
self.data.data_update = cols[3]
self.data.info_update = cols[6]
def description(self, line):
self.data.description = self._clean(line)
def pattern(self, line):
self.data.pattern = self.data.pattern + self._clean(line)
def matrix(self, line):
self.data.matrix.append(self._clean(line))
def postprocessing(self, line):
postprocessing = self._clean(line).split(";")
self.data.postprocessing.extend(postprocessing)
def rule(self, line):
self.data.rules.append(self._clean(line))
def numerical_results(self, line):
cols = self._clean(line).split(";")
for col in cols:
if not col:
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/RELEASE':
release, seqs = data.split(",")
self.data.nr_sp_release = release
self.data.nr_sp_seqs = int(seqs)
elif qual == '/FALSE_NEG':
self.data.nr_false_neg = int(data)
elif qual == '/PARTIAL':
self.data.nr_partial = int(data)
elif qual in ['/TOTAL', '/POSITIVE', '/UNKNOWN', '/FALSE_POS']:
m = re.match(r'(\d+)\((\d+)\)', data)
if not m:
raise Exception("Broken data %s in comment line\n%s" \
% (repr(data), line))
hits = tuple(map(int, m.groups()))
if(qual == "/TOTAL"):
self.data.nr_total = hits
elif(qual == "/POSITIVE"):
self.data.nr_positive = hits
elif(qual == "/UNKNOWN"):
self.data.nr_unknown = hits
elif(qual == "/FALSE_POS"):
self.data.nr_false_pos = hits
else:
raise ValueError("Unknown qual %s in comment line\n%s" \
% (repr(qual), line))
def comment(self, line):
#Expect CC lines like this:
#CC /TAXO-RANGE=??EPV; /MAX-REPEAT=2;
#Can (normally) split on ";" and then on "="
cols = self._clean(line).split(";")
for col in cols:
if not col or col[:17] == 'Automatic scaling':
# DNAJ_2 in Release 15 has a non-standard comment line:
# CC Automatic scaling using reversed database
# Throw it away. (Should I keep it?)
continue
if col.count("=") == 0 :
#Missing qualifier! Can we recover gracefully?
#For example, from Bug 2403, in PS50293 have:
#CC /AUTHOR=K_Hofmann; N_Hulo
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/TAXO-RANGE':
self.data.cc_taxo_range = data
elif qual == '/MAX-REPEAT':
self.data.cc_max_repeat = data
elif qual == '/SITE':
pos, desc = data.split(",")
self.data.cc_site.append((int(pos), desc))
elif qual == '/SKIP-FLAG':
self.data.cc_skip_flag = data
elif qual == '/MATRIX_TYPE':
self.data.cc_matrix_type = data
elif qual == '/SCALING_DB':
self.data.cc_scaling_db = data
elif qual == '/AUTHOR':
self.data.cc_author = data
elif qual == '/FT_KEY':
self.data.cc_ft_key = data
elif qual == '/FT_DESC':
self.data.cc_ft_desc = data
elif qual == '/VERSION':
self.data.cc_version = data
else:
raise ValueError("Unknown qual %s in comment line\n%s" \
% (repr(qual), line))
def database_reference(self, line):
refs = self._clean(line).split(";")
for ref in refs:
if not ref:
continue
acc, name, type = [word.strip() for word in ref.split(",")]
if type == 'T':
self.data.dr_positive.append((acc, name))
elif type == 'F':
self.data.dr_false_pos.append((acc, name))
elif type == 'N':
self.data.dr_false_neg.append((acc, name))
elif type == 'P':
self.data.dr_potential.append((acc, name))
elif type == '?':
self.data.dr_unknown.append((acc, name))
else:
raise ValueError("I don't understand type flag %s" % type)
def pdb_reference(self, line):
cols = line.split()
for id in cols[1:]: # get all but the '3D' col
self.data.pdb_structs.append(self._chomp(id))
def prorule(self, line):
#Assume that each PR line can contain multiple ";" separated rules
rules = self._clean(line).split(";")
self.data.prorules.extend(rules)
def documentation(self, line):
self.data.pdoc = self._chomp(self._clean(line))
def terminator(self, line):
self.finished = True
def _chomp(self, word, to_chomp='.,;'):
# Remove the punctuation at the end of a word.
if word[-1] in to_chomp:
return word[:-1]
return word
def _clean(self, line, rstrip=1):
# Clean up a line.
if rstrip:
return line[5:].rstrip()
return line[5:]
def scan_sequence_expasy(seq=None, id=None, exclude_frequent=None):
"""scan_sequence_expasy(seq=None, id=None, exclude_frequent=None) ->
list of PatternHit's
Search a sequence for occurrences of Prosite patterns. You can
specify either a sequence in seq or a SwissProt/trEMBL ID or accession
in id. Only one of those should be given. If exclude_frequent
is true, then the patterns with the high probability of occurring
will be excluded.
"""
from Bio import ExPASy
if (seq and id) or not (seq or id):
raise ValueError("Please specify either a sequence or an id")
handle = ExPASy.scanprosite1(seq, id, exclude_frequent)
return _extract_pattern_hits(handle)
def _extract_pattern_hits(handle):
"""_extract_pattern_hits(handle) -> list of PatternHit's
Extract hits from a web page. Raises a ValueError if there
was an error in the query.
"""
class parser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self.hits = []
self.broken_message = 'Some error occurred'
self._in_pre = 0
self._current_hit = None
self._last_found = None # Save state of parsing
def handle_data(self, data):
if data.find('try again') >= 0:
self.broken_message = data
return
elif data == 'illegal':
self.broken_message = 'Sequence contains illegal characters'
return
if not self._in_pre:
return
elif not data.strip():
return
if self._last_found is None and data[:4] == 'PDOC':
self._current_hit.pdoc = data
self._last_found = 'pdoc'
elif self._last_found == 'pdoc':
if data[:2] != 'PS':
raise ValueError("Expected accession but got:\n%s" % data)
self._current_hit.accession = data
self._last_found = 'accession'
elif self._last_found == 'accession':
self._current_hit.name = data
self._last_found = 'name'
elif self._last_found == 'name':
self._current_hit.description = data
self._last_found = 'description'
elif self._last_found == 'description':
m = re.findall(r'(\d+)-(\d+) (\w+)', data)
for start, end, seq in m:
self._current_hit.matches.append(
(int(start), int(end), seq))
def do_hr(self, attrs):
# <HR> inside a <PRE> section means a new hit.
if self._in_pre:
self._current_hit = PatternHit()
self.hits.append(self._current_hit)
self._last_found = None
def start_pre(self, attrs):
self._in_pre = 1
self.broken_message = None # Probably not broken
def end_pre(self):
self._in_pre = 0
p = parser()
p.feed(handle.read())
if p.broken_message:
raise ValueError(p.broken_message)
return p.hits
def index_file(filename, indexname, rec2key=None):
"""index_file(filename, indexname, rec2key=None)
Index a Prosite file. filename is the name of the file.
indexname is the name of the dictionary. rec2key is an
optional callback that takes a Record and generates a unique key
(e.g. the accession number) for the record. If not specified,
the id name will be used.
"""
import os
if not os.path.exists(filename):
raise ValueError("%s does not exist" % filename)
index = Index.Index(indexname, truncate=1)
index[Dictionary._Dictionary__filename_key] = filename
handle = open(filename)
records = parse(handle)
end = 0L
for record in records:
start = end
end = long(handle.tell())
length = end - start
if rec2key is not None:
key = rec2key(record)
else:
key = record.name
if not key:
raise KeyError("empty key was produced")
elif key in index:
raise KeyError("duplicate key %s found" % key)
index[key] = start, length
# This function can be deprecated once Bio.Prosite.ExPASyDictionary
# is removed.
def _extract_record(handle):
"""_extract_record(handle) -> str
Extract PROSITE data from a web page. Raises a ValueError if no
data was found in the web page.
"""
# All the data appears between tags:
# <pre width = 80>ID NIR_SIR; PATTERN.
# </PRE>
class parser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self._in_pre = 0
self.data = []
def handle_data(self, data):
if self._in_pre:
self.data.append(data)
def do_br(self, attrs):
if self._in_pre:
self.data.append('\n')
def start_pre(self, attrs):
self._in_pre = 1
def end_pre(self):
self._in_pre = 0
p = parser()
p.feed(handle.read())
if not p.data:
raise ValueError("No data found in web page.")
return "".join(p.data)
| 35.326966
| 256
| 0.580675
| 26,177
| 0.832575
| 377
| 0.011991
| 0
| 0
| 0
| 0
| 11,972
| 0.380777
|
e9f29e0f95ccd2b1945aff6967594472289887d8
| 21,120
|
py
|
Python
|
build/lib/mrgaze/pupilometry.py
|
jmtyszka/mrgaze
|
29217eab9ea431686fd200f08bddd6615c45d0d3
|
[
"MIT"
] | 18
|
2016-01-22T02:47:45.000Z
|
2021-09-23T18:37:51.000Z
|
build/lib/mrgaze/pupilometry.py
|
jmtyszka/mrgaze
|
29217eab9ea431686fd200f08bddd6615c45d0d3
|
[
"MIT"
] | 7
|
2015-05-26T21:33:16.000Z
|
2020-05-26T11:47:54.000Z
|
build/lib/mrgaze/pupilometry.py
|
jmtyszka/mrgaze
|
29217eab9ea431686fd200f08bddd6615c45d0d3
|
[
"MIT"
] | 7
|
2016-02-06T00:17:52.000Z
|
2021-02-22T03:51:55.000Z
|
#!/usr/bin/env python
#
# Video pupilometry functions
# - takes calibration and gaze video filenames as input
# - controls calibration and gaze estimation workflow
#
# USAGE : mrgaze.py <Calibration Video> <Gaze Video>
#
# AUTHOR : Mike Tyszka
# PLACE : Caltech
# DATES : 2014-05-07 JMT From scratch
# 2016-02-22 JMT Update print for python3. Remove unused vars, imports
#
# This file is part of mrgaze.
#
# mrgaze is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mrgaze is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mrgaze. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014 California Institute of Technology.
import os
import time
import getpass
import cv2
from mrgaze import media, utils, config, calibrate, report, engine
def LivePupilometry(data_dir, live_eyetracking=False):
"""
Perform pupil boundary ellipse fitting on camera feed
Arguments
----
data_dir : string
Root data directory path.
cfg :
Analysis configuration parameters
Returns
----
pupils : boolean
Completion status (True = successful)
"""
# If user did not provide a root data directory, we use HOME/mrgaze
if data_dir == '':
data_dir = os.path.join(os.getenv("HOME"), 'mrgaze')
# Full video file paths
hostname = os.uname()[1]
username = getpass.getuser()
ss_dir = os.path.join(data_dir, "%s_%s_%s" % (hostname, username, int(time.time())))
else:
ss_dir = data_dir
# Load Configuration
cfg = config.LoadConfig(data_dir)
cfg_ts = time.time()
# Output flags
verbose = cfg.getboolean('OUTPUT', 'verbose')
overwrite = cfg.getboolean('OUTPUT', 'overwrite')
# Video information
# vin_ext = cfg.get('VIDEO', 'inputextension')
vout_ext = cfg.get('VIDEO' ,'outputextension')
# vin_fps = cfg.getfloat('VIDEO', 'inputfps')
# Flag for freeze frame
freeze_frame = False
vid_dir = os.path.join(ss_dir, 'videos')
res_dir = os.path.join(ss_dir, 'results')
vout_path = os.path.join(vid_dir, 'gaze' + vout_ext)
cal_vout_path = os.path.join(vid_dir, 'cal' + vout_ext)
# if we do live eye-tracking, we read in what would be the output of the live eye-tracking
if not live_eyetracking:
vin_path = vout_path
cal_vin_path = cal_vout_path
else:
vin_path = 0
# Raw and filtered pupilometry CSV file paths
cal_pupils_csv = os.path.join(res_dir, 'cal_pupils.csv')
pupils_csv = os.path.join(res_dir, 'gaze_pupils.csv')
# Check that output directory exists
if not os.path.isdir(res_dir):
os.makedirs(res_dir)
print('* %s does not exist - creating' % res_dir)
if not os.path.isdir(vid_dir):
os.makedirs(vid_dir)
print('* %s does not exist - creating' % vid_dir)
# Set up the LBP cascade classifier
LBP_path = os.path.join(utils._package_root(), 'Cascade/cascade.xml')
print(' Loading LBP cascade')
cascade = cv2.CascadeClassifier(LBP_path)
if cascade.empty():
print('* LBP cascade is empty - mrgaze installation problem')
return False
# Check for output CSV existance and overwrite flag
if os.path.isfile(pupils_csv):
print('+ Pupilometry output already exists - checking overwrite flag')
if overwrite:
print('+ Overwrite allowed - continuing')
else:
print('+ Overwrite forbidden - skipping pupilometry')
return True
#
# Camera Input
#
print(' Opening camera stream')
try:
if not live_eyetracking:
vin_stream = cv2.VideoCapture(vin_path)
cal_vin_stream = cv2.VideoCapture(cal_vin_path)
else:
vin_stream = cv2.VideoCapture(vin_path)
cal_vin_stream = vin_stream
except:
print('* Problem opening input video stream - skipping pupilometry')
return False
while not vin_stream.isOpened():
print("Waiting for Camera.")
key = utils._waitKey(500)
if key == 'ESC':
print("User Abort.")
break
if not vin_stream.isOpened():
print('* Video input stream not opened - skipping pupilometry')
return False
if not cal_vin_stream.isOpened():
print('* Calibration video input stream not opened - skipping pupilometry')
return False
# Video FPS from metadata
# TODO: may not work with Quicktime videos
# fps = vin_stream.get(cv2.cv.CV_CAP_PROP_FPS)
# fps = cfg.getfloat('CAMERA', 'fps')
# Desired time between frames in milliseconds
# time_bw_frames = 1000.0 / fps
vin_stream.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
vin_stream.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
vin_stream.set(cv2.cv.CV_CAP_PROP_FPS, 30)
# Total number of frames in video file
# nf = vin_stream.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# print(' Video has %d frames at %0.3f fps' % (nf, vin_fps))
# Read first preprocessed video frame from stream
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Get size of preprocessed frame for output video setup
nx, ny = frame.shape[1], frame.shape[0]
# By default we start in non-calibration mode
# switch between gaze/cal modes by pressing key "c"
do_cal = False
while keep_going:
if do_cal == False:
#
# Output video
#
if live_eyetracking:
print(' Opening output video stream')
# Output video codec (MP4V - poor quality compression)
# TODO : Find a better multiplatform codec
fourcc = cv2.cv.CV_FOURCC('m','p','4','v')
try:
vout_stream = cv2.VideoWriter(vout_path, fourcc, 30, (nx, ny), True)
except:
print('* Problem creating output video stream - skipping pupilometry')
return False
if not vout_stream.isOpened():
print('* Output video not opened - skipping pupilometry')
return False
# Open pupilometry CSV file to write
try:
pupils_stream = open(pupils_csv, 'w')
except:
print('* Problem opening pupilometry CSV file - skipping pupilometry')
return False
#
# Main Video Frame Loop
#
# Print verbose column headers
if verbose:
print('')
print(' %10s %10s %10s %10s %10s' % (
'Time (s)', 'Area', 'Blink', 'Artifact', 'FPS'))
# Init frame counter
fc = 0
# Init processing timer
t0 = time.time()
t = t0
while keep_going:
# check whether config file has been updated, reload of that is the case
if fc % 30 == 0:
cfg_mtime = os.path.getmtime(os.path.join(data_dir, 'mrgaze.cfg'))
if cfg_mtime > cfg_ts:
print("Updating Configuration")
cfg = config.LoadConfig(data_dir)
cfg_ts = time.time()
# Current video time in seconds
t = time.time()
# -------------------------------------
# Pass this frame to pupilometry engine
# -------------------------------------
# b4_engine = time.time()
pupil_ellipse, roi_rect, blink, glint, frame_rgb = engine.PupilometryEngine(frame, cascade, cfg)
# print "Enging took %s ms" % (time.time() - b4_engine)
# Derive pupilometry parameters
px, py, area = engine.PupilometryPars(pupil_ellipse, glint, cfg)
# Write data line to pupilometry CSV file
pupils_stream.write(
'%0.4f,%0.3f,%0.3f,%0.3f,%d,%0.3f,\n' %
(t, area, px, py, blink, art_power)
)
if live_eyetracking:
# Write output video frame
vout_stream.write(frame_orig)
# Read next frame, unless we want to figure out the correct settings for this frame
if not freeze_frame:
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Increment frame counter
fc = fc + 1
# Report processing FPS
if verbose:
if fc % 100 == 0:
pfps = fc / (time.time() - t0)
print(' %10.1f %10.1f %10d %10.3f %10.1f' % (
t, area, blink, art_power, pfps))
t0 = time.time()
fc = 0
# wait whether user pressed esc to exit the experiment
key = utils._waitKey(1)
if key == 'ESC':
# Clean up
if live_eyetracking:
vout_stream.release()
pupils_stream.close()
keep_going = False
elif key == 'c':
# Clean up
if live_eyetracking:
vout_stream.release()
pupils_stream.close()
do_cal = True
print("Starting calibration.")
break
elif key == 'f':
freeze_frame = not freeze_frame
else: # do calibration
#
# Output video
#
if live_eyetracking:
print(' Opening output video stream')
# Output video codec (MP4V - poor quality compression)
# TODO : Find a better multiplatform codec
fourcc = cv2.cv.CV_FOURCC('m','p','4','v')
try:
cal_vout_stream = cv2.VideoWriter(cal_vout_path, fourcc, 30, (nx, ny), True)
except:
print('* Problem creating output video stream - skipping pupilometry')
return False
if not cal_vout_stream.isOpened():
print('* Output video not opened - skipping pupilometry')
return False
# Open pupilometry CSV file to write
try:
cal_pupils_stream = open(cal_pupils_csv, 'w')
except:
print('* Problem opening pupilometry CSV file - skipping pupilometry')
return False
#
# Main Video Frame Loop
#
# Print verbose column headers
if verbose:
print('')
print(' %10s %10s %10s %10s %10s' % (
'Time (s)', 'Area', 'Blink', 'Artifact', 'FPS'))
# Init frame counter
fc = 0
# Init processing timer
t0 = time.time()
t = t0
while keep_going:
# check whether config file has been updated, reload of that is the case
if fc % 30 == 0:
cfg_mtime = os.path.getmtime(os.path.join(data_dir, 'mrgaze.cfg'))
if cfg_mtime > cfg_ts:
print("Updating Configuration")
cfg = config.LoadConfig(data_dir)
cfg_ts = time.time()
# Current video time in seconds
t = time.time()
# -------------------------------------
# Pass this frame to pupilometry engine
# -------------------------------------
# b4_engine = time.time()
pupil_ellipse, roi_rect, blink, glint, frame_rgb = engine.PupilometryEngine(frame, cascade, cfg)
# print "Engine took %s ms" % (time.time() - b4_engine)
# Derive pupilometry parameters
px, py, area = engine.PupilometryPars(pupil_ellipse, glint, cfg)
# Write data line to pupilometry CSV file
cal_pupils_stream.write(
'%0.4f,%0.3f,%0.3f,%0.3f,%d,%0.3f,\n' %
(t, area, px, py, blink, art_power)
)
# Write output video frame
if live_eyetracking:
cal_vout_stream.write(frame_orig)
# Read next frame (if available)
# if verbose:
# b4_frame = time.time()
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
#if verbose:
# print "Time to load frame: %s" % (time.time() - b4_frame)
# Increment frame counter
fc = fc + 1
# Report processing FPS
if verbose:
if fc % 100 == 0:
pfps = fc / (time.time() - t0)
print(' %10.1f %10.1f %10d %10.3f %10.1f' % (
t, area, blink, art_power, pfps))
t0 = time.time()
fc = 0
# wait whether user pressed esc to exit the experiment
key = utils._waitKey(1)
if key == 'ESC':
keep_going = False
# Clean up
if live_eyetracking:
cal_vout_stream.release()
cal_pupils_stream.close()
elif key == 'v' or not keep_going:
do_cal = False
print("Stopping calibration.")
# Clean up
if live_eyetracking:
cal_vout_stream.release()
cal_pupils_stream.close()
break
print(' Create calibration model')
C, central_fix = calibrate.AutoCalibrate(res_dir, cfg)
if not C.any():
print('* Empty calibration matrix detected - skipping')
try:
print(' Calibrate pupilometry')
calibrate.ApplyCalibration(ss_dir, C, central_fix, cfg)
except UnboundLocalError:
print(' No calibration data found')
cv2.destroyAllWindows()
vin_stream.release()
print('')
print(' Generate Report')
print(' ---------------')
report.WriteReport(ss_dir, cfg)
# Return pupilometry timeseries
return t, px, py, area, blink, art_power
def VideoPupilometry(data_dir, subj_sess, v_stub, cfg):
"""
Perform pupil boundary ellipse fitting on entire video
Arguments
----
data_dir : string
Root data directory path.
subj_sess : string
Subject/Session name used for subdirectory within data_dir
v_stub : string
Video filename stub, eg 'cal' or 'gaze'
cfg :
Analysis configuration parameters
Returns
----
pupils : boolean
Completion status (True = successful)
"""
# Output flags
verbose = cfg.getboolean('OUTPUT', 'verbose')
overwrite = cfg.getboolean('OUTPUT','overwrite')
# Video information
vin_ext = cfg.get('VIDEO', 'inputextension')
vout_ext = cfg.get('VIDEO' ,'outputextension')
vin_fps = cfg.getfloat('VIDEO', 'inputfps')
# Full video file paths
ss_dir = os.path.join(data_dir, subj_sess)
vid_dir = os.path.join(ss_dir, 'videos')
res_dir = os.path.join(ss_dir, 'results')
vin_path = os.path.join(vid_dir, v_stub + vin_ext)
vout_path = os.path.join(res_dir, v_stub + '_pupils' + vout_ext)
# Raw and filtered pupilometry CSV file paths
pupils_csv = os.path.join(res_dir, v_stub + '_pupils.csv')
# Check that input video file exists
if not os.path.isfile(vin_path):
print('* %s does not exist - returning' % vin_path)
return False
# Set up the LBP cascade classifier
LBP_path = os.path.join(utils._package_root(), 'Cascade/cascade.xml')
print(' Loading LBP cascade')
cascade = cv2.CascadeClassifier(LBP_path)
if cascade.empty():
print('* LBP cascade is empty - mrgaze installation problem')
return False
# Check for output CSV existance and overwrite flag
if os.path.isfile(pupils_csv):
print('+ Pupilometry output already exists - checking overwrite flag')
if overwrite:
print('+ Overwrite allowed - continuing')
else:
print('+ Overwrite forbidden - skipping pupilometry')
return True
#
# Input video
#
print(' Opening input video stream')
try:
vin_stream = cv2.VideoCapture(vin_path)
except:
print('* Problem opening input video stream - skipping pupilometry')
return False
if not vin_stream.isOpened():
print('* Video input stream not opened - skipping pupilometry')
return False
# Video FPS from metadata
# TODO: may not work with Quicktime videos
# fps = vin_stream.get(cv2.cv.CV_CAP_PROP_FPS)
# Total number of frames in video file
nf = vin_stream.get(cv2.CAP_PROP_FRAME_COUNT)
print(' Video has %d frames at %0.3f fps' % (nf, vin_fps))
# Read first preprocessed video frame from stream
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Get size of preprocessed frame for output video setup
nx, ny = frame.shape[1], frame.shape[0]
#
# Output video
#
print(' Opening output video stream')
# Output video codec (MP4V - poor quality compression)
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
try:
vout_stream = cv2.VideoWriter(vout_path, fourcc, 30, (nx, ny), True)
except:
print('* Problem creating output video stream - skipping pupilometry')
return False
if not vout_stream.isOpened():
print('* Output video not opened - skipping pupilometry')
return False
# Open pupilometry CSV file to write
try:
pupils_stream = open(pupils_csv, 'w')
except:
print('* Problem opening pupilometry CSV file - skipping pupilometry')
return False
#
# Main Video Frame Loop
#
# Print verbose column headers
if verbose:
print('')
print(' %10s %10s %10s %10s %10s %10s' % (
'Time (s)', '% Done', 'Area', 'Blink', 'Artifact', 'FPS'))
# Init frame counter
fc = 0
# Init processing timer
t0 = time.time()
while keep_going:
# Current video time in seconds
t = fc / vin_fps
# -------------------------------------
# Pass this frame to pupilometry engine
# -------------------------------------
pupil_ellipse, roi_rect, blink, glint, frame_rgb = engine.PupilometryEngine(frame, cascade, cfg)
# Derive pupilometry parameters
px, py, area = engine.PupilometryPars(pupil_ellipse, glint, cfg)
# Write data line to pupilometry CSV file
pupils_stream.write(
'%0.3f,%0.3f,%0.3f,%0.3f,%d,%0.3f,\n' %
(t, area, px, py, blink, art_power)
)
# Write output video frame
vout_stream.write(frame_rgb)
# Read next frame (if available)
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Increment frame counter
fc = fc + 1
# Report processing FPS
if verbose:
if fc % 100 == 0:
perc_done = fc / float(nf) * 100.0
pfps = fc / (time.time() - t0)
print(' %10.1f %10.1f %10.1f %10d %10.3f %10.1f' % (
t, perc_done, area, blink, art_power, pfps))
# Clean up
cv2.destroyAllWindows()
vin_stream.release()
vout_stream.release()
pupils_stream.close()
# Return pupilometry timeseries
return t, px, py, area, blink, art_power
| 33.051643
| 112
| 0.550284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,518
| 0.403314
|
e9f462dbb1b4b480ae079d20eb179ca06f53f704
| 1,927
|
py
|
Python
|
aws_glue/combine_csv_files/combine_csv_files.py
|
veben/aws_python_snippets
|
39fa3cda8290fb097a5b9e8168829b62ab1af41e
|
[
"MIT"
] | 1
|
2020-09-08T09:22:25.000Z
|
2020-09-08T09:22:25.000Z
|
aws_glue/combine_csv_files/combine_csv_files.py
|
veben/aws_python_snippets
|
39fa3cda8290fb097a5b9e8168829b62ab1af41e
|
[
"MIT"
] | null | null | null |
aws_glue/combine_csv_files/combine_csv_files.py
|
veben/aws_python_snippets
|
39fa3cda8290fb097a5b9e8168829b62ab1af41e
|
[
"MIT"
] | 1
|
2020-09-08T09:26:58.000Z
|
2020-09-08T09:26:58.000Z
|
from lib_combination.aws_client.aws_client import get_session_for_profile, run_job, get_job, create_job
from lib_combination.aws_client.aws_client import upload_file_to_s3_bucket
from lib_combination.conf_utils.conf_utils import get_job_name, get_profile_name, get_bucket_name, get_database_name
from lib_combination.file_utils.file_utils import get_local_script_folder_path
def main():
profile_name = get_profile_name()
aws_session = get_session_for_profile(profile_name)
job_name = get_job_name()
bucket_name = get_bucket_name()
base_name = get_database_name()
script_name = 'pokemon-v1-v2-combination.py'
local_script_sub_path = get_local_script_folder_path() + '/' + script_name
remote_script_sub_path = 'combination/scripts' + '/' + script_name
remote_script_location = "s3://" + bucket_name + "/" + remote_script_sub_path
remote_result_file_full_path = "s3://" + bucket_name + "/combination/csv_file_result_job"
remote_tmp_files_path = "s3://" + bucket_name + "/combination/tmp"
glue_client = aws_session.client('glue')
upload_file_to_s3_bucket(aws_session,
local_script_sub_path,
bucket_name,
remote_script_sub_path)
job: dict
try:
job = get_job(glue_client, job_name)
except glue_client.exceptions.EntityNotFoundException:
job = create_job(glue_client, job_name, remote_script_location, remote_result_file_full_path, base_name,
remote_tmp_files_path)
print(f"Create job <{job_name}>")
if job['ResponseMetadata']['HTTPStatusCode'] == 200:
print(f"Run job <{job_name}>"
f"with <{profile_name}> profile...")
run_job(glue_client, job_name, remote_result_file_full_path, base_name)
else:
print(f"Error to define <{job_name}> job")
if __name__ == "__main__":
main()
| 41.891304
| 116
| 0.70576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 302
| 0.15672
|
e9f4dc1139fdd0b79eb9f6a5670984a538e5b297
| 1,062
|
py
|
Python
|
p850h/rectangle_area.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | 1
|
2020-02-20T12:04:46.000Z
|
2020-02-20T12:04:46.000Z
|
p850h/rectangle_area.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | null | null | null |
p850h/rectangle_area.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def rectangleArea(self, rectangles: List[List[int]]) -> int:
xs = sorted(set([x for x1, y1, x2, y2 in rectangles for x in [x1, x2]]))
x_i = {v: i for i, v in enumerate(xs)}
count = [0] * len(x_i)
L = []
for x1, y1, x2, y2 in rectangles:
L.append([y1, x1, x2, 1])
L.append([y2, x1, x2, -1])
L.sort()
cur_y = cur_x_sum = area = 0
for y, x1, x2, sig in L:
area += (y - cur_y) * cur_x_sum
cur_y = y
for i in range(x_i[x1], x_i[x2]):
count[i] += sig
cur_x_sum = sum(x2 - x1 if c else 0 for x1, x2, c in zip(xs, xs[1:], count))
return area % (10 ** 9 + 7)
# TESTS
for rectangles, expected in [
([[0, 0, 2, 2], [1, 0, 2, 3], [1, 0, 3, 1]], 6),
([[0, 0, 1000000000, 1000000000]], 49),
]:
sol = Solution()
actual = sol.rectangleArea(rectangles)
print("Total area covered by rectangles", rectangles, "->", actual)
assert actual == expected
| 32.181818
| 88
| 0.508475
| 729
| 0.686441
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.042373
|