content
stringlengths 5
1.05M
|
|---|
'''
Heart rate monitor class which characterizes ECG signal using autocorrelation
and other processing techniques
'''
import numpy as np
import pandas as pd
from scipy import signal
import logging
import matplotlib as mpl
import os
import warnings
import json
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
logging.basicConfig(filename='out.log', level=logging.DEBUG)
log = logging.getLogger(__name__)
class HeartRateMonitor(object):
'''Main heart rate monitor class to perform various characterizations of
ECG signal
'''
def __init__(self, data=None, filename=None, t_units='ms', v_units='mV'):
'''Initialize HeartRateMonitor object
:param data: 2D numpy array with time values in the first column and
ECG voltage values in the second column. Defaults to None.
:param filename: CSV file with time in first column and voltage in the
second column. Defaults to None.
:param t_units: Time units, either 'ms', 's', or 'min'. Defaults to
's'.
:param v_units: Voltage units, either 'mV' or 'V'
'''
log.info('Initalize HeartRateMonitor')
self.t_units = t_units
self.v_units = v_units
self.__t_converter = None
self.__v_converter = None
(self.__t_converter, self.__v_converter) = self.__get_converters(
self.t_units, self.v_units)
log.debug('''T units/conversion {}/{}. V units/converstion
{}/{}'''.format(self.t_units, self.__t_converter,
self.v_units, self.__v_converter))
if data is None and filename is None:
self.data = []
elif data is not None:
self.data = data
elif filename is not None:
self.filename = filename
self.import_data(filename)
else:
self.data = []
self.__clean_data()
log.debug('Converting data to ms and mV')
self.__convert_data()
self.mean_hr_bpm = None
self.voltage_extremes = None
self.duration = None
self.num_beats = None
self.beats = None
self.__filt_data = None
log.debug('Filtering data')
self.__filter_data()
@property
def filename(self):
'''Filename of imported data'''
return self.__filename
@filename.setter
def filename(self, filename):
'''Setter for filename
:param filename: Filename'''
self.__filename = filename
@property
def data(self):
'''Internal time-dependent ECG data property'''
return self.__data
@data.setter
def data(self, data):
'''Set data
:param data: ECG values to set
'''
self.__data = data
@property
def mean_hr_bpm(self):
'''Mean bpm over specified amount of time'''
return self.__mean_hr_bpm
@mean_hr_bpm.setter
def mean_hr_bpm(self, bpm):
'''Set mean_hr_bpm
:param bpm: Mean bpm
'''
self.__mean_hr_bpm = bpm
@property
def voltage_extremes(self):
'''Minimum and maximum lead voltages'''
return self.__voltage_extremes
@voltage_extremes.setter
def voltage_extremes(self, voltages):
'''Set voltage_extremes
:param voltages: Tuple of min and max voltages
'''
self.__voltage_extremes = voltages
@property
def duration(self):
'''Duration of ECG strip'''
return self.__duration
@duration.setter
def duration(self, duration):
'''Set duration
:param duration: Duration of ECG
'''
self.__duration = duration
@property
def num_beats(self):
'''Number of beats detected'''
return self.__num_beats
@num_beats.setter
def num_beats(self, num_beats):
'''Set num_beats
:param num_beats: Number of beats detected
'''
self.__num_beats = num_beats
@property
def beats(self):
'''Numpy array of times beats occured'''
return self.__beats
@beats.setter
def beats(self, beats):
'''Set beats
:param beats: Numpy array of beat times
'''
self.__beats = beats
def import_data(self, filename):
'''Import data from file
:param filename: csv file to import from
'''
df = pd.read_csv(filename, names=['Time', 'Voltage'])
data = df.as_matrix()
self.data = data
log.info('Successfully imported {}'.format(filename))
def __convert_data(self):
self.data[:, 0] *= self.__t_converter
self.data[:, 1] *= self.__v_converter
def detect_bpm(self, time=None, units=None):
'''Detects BPM using autocorrelation.
:param time: Time over which to find mean BPM. Defaults to find mean
from beginning to end of given signal. If scalar given, mean is
found from t = 0 to t = time seconds. If two element list or
tuple of times is given, mean is found between the two times.
Begin and end sample points chosen to be as close to given
arguments as possible.
:param units: Time units of the time limits parameter
:returns: Beats per minute
:raise IndexError: Only one beat detected in time limits, cannot find
BPM
'''
if units is None:
units = self.t_units
data = self.data
t_lim = None
(lim_converter, v_con_temp) = self.__get_converters(
units, self.v_units)
t_raw = data[:, 0]
dt = t_raw[1] - t_raw[0]
log.info('dt found to be {}'.format(dt))
if time is None:
t_lim = np.array((0, max(t_raw)))
elif isinstance(time, (list, tuple)):
if (len(time) == 2):
time = np.array(time)
time *= lim_converter
t_lim = time
else:
raise ValueError('''Iterable time input must have two elements
for start and end times''')
log.error(
'''Iterable time input must have two elements for start
and end times''')
elif isinstance(time, (int, float)):
time *= lim_converter
t_lim = (0, time)
else:
raise TypeError('''Time argument takes scalar or two element
iterable''')
log.error('Time argument takes scalar or two element iterable.')
(start, end) = self.find_nearest_limits(t_raw, t_lim)
log.info('''Closest start time: {}. Closest end time:
{}'''.format(t_raw[start], t_raw[end]))
v = self.__filt_data[start:end]
# Remove dc offsets
corr1 = np.correlate(v, v, mode='full')
corr1 = np.divide(corr1, max(corr1))
corr1 = corr1[int(len(corr1) / 2) + 000:]
# Autocorrelation peak detection with scipy.
widths = np.arange(1, 400)
peaks = signal.find_peaks_cwt(
corr1,
widths,
noise_perc=10,
min_snr=20,
max_distances=np.divide(widths, 10))
# Calculate BPM
try:
period = peaks[1] - peaks[0]
except IndexError:
log.error('''Only one peak detected in time region specified.
Expand time region to detect BPM.''')
raise IndexError(
'''Only one peak detected in time region specified.
Unable to detect BPM''')
bpm = 60 * self.__t_converter / (dt * period)
self.mean_hr_bpm = bpm
plt.plot(corr1)
plt.plot(peaks, np.zeros(len(peaks)), 'o')
plt.ion()
plt.show()
log.info('BPM found to be {}'.format(bpm))
return bpm
def find_nearest_limits(self, t, t_lim):
'''Find nearest t values to given limits
:param t: Array of sample times
:param t_lim: Two element tuple of start and end times
:return: Tuple of start and end indices of t
'''
begin = t_lim[0]
end = t_lim[1]
begin_i = np.argmin(np.abs(t - begin))
end_i = np.argmin(np.abs(t - end))
return (begin_i, end_i)
def detect_voltage_extremes(self, thresh=None, units=None):
'''Detect voltage extremes above positive and negative threshold.
Returns maximum and minimum voltages.
:param thresh: Positive threshold voltage for extreme values (Defaults
to +- 300mV)
:param units: Units of threshold. Defaults to class units
:return: Tuple (minimum voltage, maximum voltage)
'''
if units is None:
units = self.v_units
(t_converter, v_converter) = self.__get_converters(self.t_units, units)
if thresh is None:
thresh = 300 / v_converter
thresh_conv = thresh * v_converter
t_thresh = np.where(np.abs(self.data[:, 1]) >= thresh_conv)[0]
log.debug('V thresh set to {} mV'.format(thresh_conv))
log.debug('{} data points outside thresh'.format(len(t_thresh)))
if len(t_thresh) > 0:
for t in t_thresh:
warnings.warn('''Extreme voltage above {}{} of {}{} found at
{}{}'''.format(
thresh, units,
np.divide(self.data[t, 1],
self.__v_converter), self.v_units,
np.divide(t, self.__t_converter), self.t_units))
max_v = np.max(self.data[:, 1])
min_v = np.min(self.data[:, 1])
log.info('(min, max) voltage set to {}'.format((min_v, max_v)))
self.voltage_extremes = (min_v, max_v)
return (min_v, max_v)
def __get_converters(self, t_units, v_units):
if type(t_units) is not str:
log.error('Non-string time units')
raise TypeError('Please input string for time units')
if type(v_units) is not str:
log.error('Non-string voltage units')
raise TypeError('Please input string for voltage units')
if (t_units == 's'):
t_converter = 1000
elif (t_units == 'ms'):
t_converter = 1
elif (t_units == 'min'):
t_converter = 60000
else:
log.error('Unknown time units of {}'.format(t_units))
raise ValueError('Time units must be \'s\', \'ms\', or \'min\'.')
if (v_units == 'V'):
v_converter = 1000
elif (v_units == 'mV'):
v_converter = 1
else:
log.error('Unknown voltage units of {}'.format(v_units))
raise ValueError('Voltage units must be \'mV\' or \'V\'.')
return (t_converter, v_converter)
def __filter_data(self):
'''Filter raw data with 5-15 Hz passband according to Pan-Tompkins
algorithm, then rectified and squared'''
dt = self.data[1, 0] - self.data[0, 0] # dt in ms
nyq = (1 / (dt / 1000)) * 0.5
log.info('Nyquist frequency found to be {} Hz'.format(nyq))
low = 5 / nyq
hi = 15 / nyq
log.info('Cutoff frequencies set to {} to {} Hz'.format(low, hi))
b, a = signal.butter(2, (low, hi), btype='bandpass')
filt = signal.lfilter(b, a, self.data[:, 1])
# Rectify
filt[filt < 0] = 0
# Square
filt = np.multiply(filt, filt)
self.__filt_data = filt
def get_peaks(self):
'''Detect peaks and return timing of beats array
:return beats: Beat times array in ms'''
widths = np.arange(1, 400)
log.info('Begin peak detection')
peaks = signal.find_peaks_cwt(
self.__filt_data,
widths,
noise_perc=10,
min_snr=20,
max_distances=np.divide(widths, 10))
dt = self.data[1, 0] - self.data[0, 0]
self.beats = np.multiply(peaks, dt)
self.num_beats = len(peaks)
log.info('{} beats found in signal'.format(len(peaks)))
return (peaks)
def get_duration(self):
'''Find signal duration
:return duration: Total duration'''
dur = max(self.data[:, 0]) - min(self.data[:, 0])
log.info('Duration of ECG found to be {} ms'.format(dur))
self.duration = dur
return dur
def __clean_data(self):
'''Find NaN in input data and fixes gap'''
log.debug('Begin cleaning data')
interp_t = 0
interp_v = 0
for i, t in enumerate(self.data[:, 0]):
if np.isnan(t):
if (i == 0):
interp_t = self.data[i + 1, 0]
elif i == len(self.data[:, 0]) - 1:
interp_t = self.data[i - 1, 0]
else:
interp_t = (self.data[i - 1, 0] + self.data[i + 1, 0]) / 2
warnings.warn('''Blank time value at index {} interpolating as
{}'''.format(i, interp_t))
log.info('''Blank time value at index {} interpolating as
{}'''.format(i, interp_t))
self.data[i, 0] = interp_t
for i, t in enumerate(self.data[:, 1]):
if np.isnan(t):
log.debug('{}{}'.format(t, np.isnan(t)))
if (i == 0):
interp_v = self.data[i + 1, 1]
elif i == len(self.data[:, 1]) - 1:
interp_v = self.data[i - 1, 1]
else:
interp_v = (self.data[i - 1, 1] + self.data[i + 1, 1]) / 2
warnings.warn('''Blank voltage value at index {} interpolating
as {}'''.format(i, interp_v))
log.info('''Blank voltage value at index {} interpolating
as {}'''.format(i, interp_v))
self.data[i, 1] = interp_v
def export_json(self, filename=None):
'''Export ECG characteristics as JSON file
:param filename: Filename to store as. Default is input filename as
.json
'''
data_dict = {
'BPM': self.mean_hr_bpm,
'Voltage Min': self.voltage_extremes[0],
'Voltage Max': self.voltage_extremes[1],
'Duration': self.duration,
'Number of Beats': self.num_beats,
'Beat Times': self.beats.tolist()
}
if filename is None:
if self.filename is not None:
csv_name = self.filename
filename = os.path.splitext(csv_name)[0] + '.json'
log.info('Filename is {}'.format(filename))
else:
raise ValueError('''No filename specified at object
initialization or at export_json call''')
log.info('Writing json to {}'.format(filename))
with open(filename, 'w') as output:
json.dump(data_dict, output)
|
import numpy as np
from sklearn.metrics import accuracy_score
from scipy.stats import hmean
def one_hot(y):
n_values = np.max(y) + 1
y_new = np.eye(n_values)[y[:]]
return y_new
def get_accuracy(y_pred: list, y: list):
"""
This method computes the accuracy for each level in the taxonomy.
:param y_pred: a 2d array where d1 is the taxonomy level, and d2 is the prediction for each example.
:type y_pred: list
:param y: a 2d array where d1 is the taxonomy level, and d2 is the ground truth for each example.
:type y: list
:return: accuracy for each level of the taxonomy.
:rtype: list
"""
if len(y) != len(y_pred):
raise Exception('Size of the inputs should be the same.')
accuracy = [accuracy_score(y_, y_pred_) for y_, y_pred_ in zip(y, y_pred)]
return accuracy
def get_h_accuracy(y_pred: list, y: list):
"""
This method computes the harmonic mean of accuracies of all level in the taxonomy.
:param y_pred: a 2d array where d1 is the taxonomy level, and d2 is the prediction for each example.
:type y_pred: list
:param y: a 2d array where d1 is the taxonomy level, and d2 is the ground truth for each example.
:type y: list
:return: accuracy for each level of the taxonomy.
:rtype: list
"""
if len(y) != len(y_pred):
raise Exception('Size of the inputs should be the same.')
accuracy = [accuracy_score(y_, y_pred_) for y_, y_pred_ in zip(y, y_pred)]
return hmean(accuracy)
def get_m_accuracy(y_pred: list, y: list):
"""
This method computes the mean of accuracies of all level in the taxonomy.
:param y_pred: a 2d array where d1 is the taxonomy level, and d2 is the prediction for each example.
:type y_pred: list
:param y: a 2d array where d1 is the taxonomy level, and d2 is the ground truth for each example.
:type y: list
:return: accuracy for each level of the taxonomy.
:rtype: list
"""
if len(y) != len(y_pred):
raise Exception('Size of the inputs should be the same.')
accuracy = [accuracy_score(y_, y_pred_) for y_, y_pred_ in zip(y, y_pred)]
return np.mean(accuracy)
def get_exact_match(y_pred: list, y: list):
"""
This method compute the exact match score. Exact match is defined as the #of examples for
which the predictions for all level in the taxonomy is correct by the total #of examples.
:param y_pred: a 2d array where d1 is the taxonomy level, and d2 is the prediction for each example.
:type y_pred: list
:param y: a 2d array where d1 is the taxonomy level, and d2 is the ground truth for each example.
:type y: list
:return: the exact match value
:rtype: float
"""
if len(y) != len(y_pred):
raise Exception('Shape of the inputs should be the same')
exact_match = []
for j in range(len(y[0])):
v = 1
for i in range(len(y)):
if y[i][j] != y_pred[i][j]:
v = 0
break
exact_match.append(v)
return np.mean(exact_match)
def get_consistency(y_pred: list, taxo: list):
"""
This methods estimates the consistency.
:param y_pred: a 2d array where d1 is the taxonomy level, and d2 is the prediction for each example.
:type y_pred: np.array
:param taxo: a
:type taxo: np.array
:return: value of consistency.
:rtype: float
"""
if len(y_pred) - 1 != len(taxo):
raise Exception('The predictions do not match the taxonomy.')
consistency = []
for j in range(len(y_pred[0])):
v = 1
for i in range(len(y_pred) - 1):
l = int(y_pred[i][j])
l_next = int(y_pred[i + 1][j])
if taxo[i][l][l_next] == 0:
v = 0
break
consistency.append(v)
return np.mean(consistency)
if __name__ == '__main__':
y = [[1, 0, 1, 0, 0], [1, 2, 3, 4, 0], [3, 4, 5, 8, 0]]
y_pred = [[0, 1, 1, 0, 0], [1, 2, 1, 4, 0], [3, 1, 5, 8, 0]]
taxo = [[[1, 1, 0, 0, 0], [0, 0, 1, 1, 1]],
[[1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]
]
print(get_accuracy(y, y_pred))
print(get_exact_match(y, y_pred))
print(get_consistency(y_pred, taxo))
|
import numpy as np
import pandas as pd
import pickle
import itertools
from collections import defaultdict
from collections import Counter
import re
import json
import os
from os.path import join, exists, split
from gensim.models import word2vec
def train_word2vec(data, vocabulary, num_features=300, min_word_count=1, context=10):
"""
Trains, saves, loads Word2Vec model
Returns initial weights for embedding layer.
"""
num_workers = 2 # Number of threads to run in parallel
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model
print('Training Word2Vec model...')
sentences = [i['tokens'] for i in data]
embedding_model = word2vec.Word2Vec(sentences, workers=num_workers,
size=num_features, min_count=min_word_count,
window=context, sample=downsampling)
# If we don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
embedding_model.init_sims(replace=True)
# add unknown words
embedding_weights = {word: np.array(embedding_model[word]) if word in embedding_model else
np.random.uniform(-0.25, 0.25, embedding_model.vector_size)
for word in vocabulary}
return embedding_weights
def build_data(data_train, train_ratio=0.9, clean_string=True):
input_dir = "data"
data_train_path = join(input_dir, data_train)
if exists(data_train_path):
json_file_annotation_all = open(data_train_path, 'r')
annotation_all = json.load(json_file_annotation_all)
tweets = []
vocab = defaultdict(float)
emo_labels = []
sentences = []
for item in annotation_all:
sentence = item['text']
tokens = item['tokens']
emo_label = item['emotion']
sentences.append(sentence)
if emo_label == 'joy':
emo_labels.append([1, 0, 0, 0, 0, 0])
y = 0
if emo_label == 'sad':
emo_labels.append([0, 1, 0, 0, 0, 0])
y = 1
if emo_label == 'dis':
emo_labels.append([0, 0, 1, 0, 0, 0])
y = 2
if emo_label == 'sup':
emo_labels.append([0, 0, 0, 1, 0, 0])
y = 3
if emo_label == 'ang':
emo_labels.append([0, 0, 0, 0, 1, 0])
y = 4
if emo_label == 'fea':
emo_labels.append([0, 0, 0, 0, 0, 1])
y = 5
if clean_string:
orig_tweet = clean_str(sentence)
else:
orig_tweet = sentence.lower()
words = set(tokens)
for word in words:
vocab[word] += 1
data = {'y': y,
'text': orig_tweet,
'num_words': len(tokens),
'split': int(np.random.rand() < train_ratio),
'tokens': tokens}
tweets.append(data)
return tweets, vocab
else:
print("File does not exist")
def get_W(word_vecs, k):
"""
Get word matrix. W[i] is the vector for word indexed by i
"""
vocab_size = len(word_vecs)
word_idx_map = dict()
W = np.zeros(shape=(vocab_size + 1, k), dtype=np.float32)
W[0] = np.zeros(k, dtype=np.float32)
i = 1
for word in word_vecs:
W[i] = word_vecs[word]
word_idx_map[word] = i
i += 1
return W, word_idx_map
def load_bin_vec(fname, vocab):
"""
Loads 300x1 word vecs from Google (Mikolov) word2vec
"""
word_vecs = {}
with open(fname, 'rb') as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * layer1_size
for line in range(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == b' ':
word = ''.join(word)
break
if ch != b'\n':
new_ch = str(ch).split('\'')[1]
word.append(new_ch)
if word in vocab:
word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs
def load_glove_wordvecs(fname, vocab):
word_vecs = {}
with open(fname, 'r', encoding="utf8") as f:
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
if word in vocab:
word_vecs[word] = embedding
return word_vecs
def append_vectors(fname, vocab, w2v):
Matrix_dir = 'Matrix'
matrix_path = join(Matrix_dir, fname)
w2v_dict_file_name = open(matrix_path, "rb")
w2v_dict = pickle.load(w2v_dict_file_name)
w2v_dict_file_name.close()
new_w2v = {}
for word, y_v, z_v in zip(vocab, w2v, w2v_dict):
new_vec = np.concatenate((w2v[word],w2v_dict[word]))
new_w2v.__setitem__(word, new_vec)
return new_w2v
def add_unknown_words(word_vecs, vocab, k, min_df=1):
"""
For words that occur in at least min_df documents, create a separate word vector.
0.25 is chosen so the unknown vectors have (approximately) same variance as pre-trained ones
"""
i = 0
for word in vocab:
if word not in word_vecs and vocab[word] >= min_df:
i += 1
word_vecs[word] = np.random.uniform(-0.25, 0.25, k)
return i, word_vecs
def random_word_vectors(vocab, k, min_df=1):
"""
"""
word_vecs = {}
for word in vocab:
word_vecs[word] = np.random.uniform(-0.5, 0.5, k)
return word_vecs
def clean_str(string):
"""
Tokenization/string cleaning for the dataset.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: access.py
# Project: routes
# Author: Brian Cherinka
# Created: Wednesday, 16th September 2020 6:16:40 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2020 Brian Cherinka
# Last Modified: Wednesday, 16th September 2020 6:16:40 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
from sdss_access.path import Path
from fastapi import APIRouter, Request, Depends, HTTPException, Query, Path
from fastapi_utils.cbv import cbv
from pydantic import BaseModel, validator, PrivateAttr
from typing import Type
from enum import Enum
from pydantic import ValidationError
from valis.routes.base import Base, get_access, BaseBody
class PathPart(str, Enum):
""" A set of pre-defined choices for the `part` query param """
full = "full"
url = "url"
file = "file"
location = "location"
all = "all"
class PathModel(BaseModel):
""" A validator class for sdss_access path names and kwargs """
name: str
kwargs: dict = {}
template: str = None
full: str = None
url: str = None
file: str = None
location: str = None
exists: bool = None
needs_kwargs: bool = None
_path: Path = PrivateAttr() # private attr so model has correct sdss_access pat
def __new__(cls, *args, **kwargs):
cls._path = kwargs.get('_path', None)
return super(PathModel, cls).__new__(cls)
@validator('name')
def is_name(cls, v, values):
if v not in cls._path.lookup_names():
release = 'WORK' if cls._path.release in ('sdss5', 'sdss4', 'sdsswork') else cls._path.release.upper()
raise ValueError(f'Validation error: path name {v} not a valid sdss_access name for release {release}')
return v
@validator('kwargs')
def good_kwargs(cls, v, values):
name = values.get('name')
keys = set(cls._path.lookup_keys(name))
# check for valid
valid = set(v) & set(keys)
if not valid:
return {}
# check for missing kwargs
missing = set(keys) - set(v)
if missing:
mstr = ', '.join(missing)
raise ValueError(f'Validation error: Missing kwargs {mstr} for name: {name}')
return v
@validator('needs_kwargs', always=True)
def check_kwargs(cls, v, values):
''' Check and assign the needs_kwargs attribute'''
return any(cls._path.lookup_keys(values.get('name')))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.template = self._path.templates[self.name]
if self.kwargs or not self.needs_kwargs:
self.full = self._path.full(self.name, **self.kwargs)
self.url = self._path.url(self.name, **self.kwargs)
self.file = self._path.name(self.name, **self.kwargs)
self.location = self._path.location(self.name, **self.kwargs)
self.exists = self._path.exists(self.name, **self.kwargs)
class PathBody(BaseBody):
""" Body for SDSS access paths post requests """
kwargs: dict = {}
part: PathPart = 'full'
exists: bool = False
async def extract_path(request: Request, name: str = Path(..., example='apStar'), access: Path = Depends(get_access)) -> Type[PathModel]:
""" Dependency to extract and parse generic query parameters """
params = str(request.query_params)
kwargs = dict(map(lambda x: x.split('='), params.split('&'))) if params else {}
try:
path = PathModel(name=name, kwargs=kwargs, _path=access)
except ValidationError as ee:
raise HTTPException(status_code=422, detail=ee.errors()) from ee
else:
return path
router = APIRouter()
@cbv(router)
class Paths(Base):
@router.get("/", summary='Get a list of all sdss_access path names or templates')
async def get_paths(self, templates: bool = False):
""" Get a list of sdss_access path names """
if templates:
return self.path.templates
else:
return {'names': list(self.path.lookup_names())}
@router.get("/keywords/{name}", summary='Get a list of keyword variables for a sdss_acccess path name.')
async def get_path_kwargs(self, path: Type[PathModel] = Depends(extract_path)):
""" Get a list of input keyword arguments
Given an sdss_access path name, get the list of input keywords needed
to construct the full path.
Parameters
----------
name : str
a sdss_access path name
Returns
-------
A dict of path name and list of string keywords
"""
return {'name': path.name, 'kwargs': self.path.lookup_keys(path.name)}
@router.get("/{name}", summary='Get the template or resolved path for an sdss_access path name.')
async def get_path_name(self, path: Type[PathModel] = Depends(extract_path), part: PathPart = 'full',
exists: bool = False):
""" Construct an sdss_access path
Given a sdss_access path name, constructs the fully resolved path. sdss_access path
keyword arguments are passed in as url query parameters,
e.g. `paths/mangacube?drpver=v2_4_3&wave=LOG&plate=8485&ifu=1901`. When no query
parameters, are specified, returns the sdss_access template.
Parameters
----------
name : str
a sdss_access path name
part : str
the part of the path to extract
exists : bool
If set, checks for local file existence and returns True/False
Returns
-------
A string path name
"""
return self.process_path(path, part, exists)
@router.post("/{name}", summary='Get the template or resolved path for an sdss_access path name.')
async def post_path_name(self, name: str, body: PathBody = None):
""" Construct an sdss_access path
Given an sdss_access path name and set of input keyword arguments,
construct the file path using sdss_access methods. Set `part` keyword to
indicate the part of the path to form, e.g. "full", "url". Set `exists` to
check whether the file exists on the server.
Parameters
----------
name : str
a sdss_access path name
kwargs: dict
a set of keyword arguments to construct the file path
part : str
the part of the path to extract. Default is "full".
exists : bool
If set, checks for local file existence and returns True/False
Returns
-------
A string path name
"""
# if no kwargs set to empty dict
kwargs = body.kwargs or {}
try:
path = PathModel(name=name, kwargs=kwargs, _path=self.path)
except ValidationError as ee:
raise HTTPException(status_code=422, detail=ee.errors()) from ee
else:
return self.process_path(path, body.part, body.exists)
def process_path(self, path: Type[PathModel], part: PathPart, exists: bool) -> dict:
if not path.kwargs and path.needs_kwargs:
out = path.dict(include={'template'})
out['warning'] = 'Warning: No kwargs specified to construct a path. Returning only template.'
return out
elif exists:
return path.dict(include={'exists'})
else:
return path.dict() if part == 'all' else path.dict(include={part})
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class GoogleTopLoc:
top_image = (By.ID, 'hplogo')
input_form = (By.NAME, 'q')
class GoogleTop:
def __init__(self, driver):
self.driver = driver
def open(self):
self.driver.get("http://www.google.com")
def search_field(self):
return WebDriverWait(self.driver, 10).until(EC.presence_of_element_located(GoogleTopLoc.input_form))
def google_logo(self):
return WebDriverWait(self.driver, 10).until(EC.presence_of_element_located(GoogleTopLoc.top_image))
|
from db_helper import db_helper
class Module:
def __init__(self, code, studentID):
self._code = code
self.db_helper = db_helper(studentID)
self.assignments = []
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
def getAssignmentCount(self):
return self.db_helper.assignmentCount(self._code)
def getAssignments(self):
self.assignments = self.db_helper.getAllAssignments(self.code)
print len(self.assignments)
for assignment in self.assignments:
print(assignment.name)
|
#!/usr/bin/env python
activate_this = "/vagrant/env/bin/activate_this.py"
execfile(activate_this, dict(__file__=activate_this))
print "Content-type: text/html\n"
print "ok"
|
from .views import TeamsViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'teams',TeamsViewSet,basename = 'teams')
urlpatterns = router.urls
|
import keras
import pickle
import fire
from elapsedtimer import ElapsedTimer
#path = '/home/santanu/Downloads/Mobile_App/aclImdb/tokenizer.pickle'
#path_out = '/home/santanu/Downloads/Mobile_App/word_ind.txt'
def tokenize(path,path_out):
with open(path, 'rb') as handle:
tokenizer = pickle.load(handle)
dict_ = tokenizer.word_index
keys = list(dict_.keys())[:50000]
values = list(dict_.values())[:50000]
total_words = len(keys)
f = open(path_out,'w')
for i in range(total_words):
line = str(keys[i]) + ',' + str(values[i]) + '\n'
f.write(line)
f.close()
if __name__ == '__main__':
with ElapsedTimer('Tokenize'):
fire.Fire(tokenize)
|
from kontrol import visutils
from kontrol import fakeezca as ezca
import kontrol
import matplotlib.pyplot as plt
# import ezca # Alternatively import fakeezca as ezca for testing.
def test_actuator_diag():
"""Expected output at the end (numbers are ~ pi for the oringal matrix):
original EUL2COIL:
[[3.12307805 3.10118585 3.00159104]
[3.04553458 3.29171708 3.12165122]
[3.25032618 3.14461291 3.20363117]]
new EUL2COIL:
[[-16.47052991 0.23618048 19.3326145 ]
[ 85.14613189 -73.80129973 -8.02682917]
[-35.42087779 29.40472295 9.15054005]]
[[-16.47052991 0.23618048 19.3326145 ]
[ 85.14613189 -73.80129973 -8.02682917]
[-35.42087779 29.40472295 9.15054005]]
"""
BS = visutils.Vis('BS', ezca) # Define a visutils.Vis object with optic\
# name and the ezca module.
stage = 'IP'
dofs = ['L', 'T', 'Y'] # Make sure the order is the same as appeared\
# in the real-time system.
force =[1000, 2000, 3000] # Actuate 1000 counts in longitudinal,\
# 2000 counts in transverse and 3000 counts in yaw. Do specify\
# this or else the program will default actuations to 1000 counts.
no_of_coils = 3 # Optional. Determined by the EUL2COIL matrix if\
# not specified.
t_ramp = 0.1 # For testing pupose, we put a small number.\
# In practice, this should be around 10 seconds or more.
t_avg = 1 # Again, this is for test only. Put a reasonable number
# when using this.
EUL2COIL_new = BS.actuator_diag(stage, dofs, act_block='TEST',
act_suffix='OFFSET', sense_block='DAMP', sense_suffix='INMON',
matrix='EUL2COIL', force=force, no_of_coils=no_of_coils,
update_matrix=False, t_ramp=t_ramp, t_avg=t_avg, dt=1/8)
print(EUL2COIL_new)
def test_find_sensor_correction_gain():
"""
"""
BS = visutils.Vis('BS', ezca)
rms_threshold = 0.01 # The adaptive loop terminates when the RMS of the
# adaptive gain is less than 0.01. This corresponds to 1% of
# inter calibration mismatch.
t_int = 10 # The integration time for calculating the RMS.
update_law = kontrol.unsorted.nlms_update # Normalized LMS algorithm
# Normal LMS algorithm is also avaiable in kontrol.unsorted, but is less
# robust.
reducing_lms_step = True # If True, then the step size of the LMS will
# be reduced by a factor of reduction ratio when the mean square error
# is higher or equal to previous iterations. This leads to better
# convergence of the sensor correction gain.
timeout = 20 # The loop will terminate regardless of the convergence when
# the algorithm has been running for 20 seconds. Set to, say 300 when
# using it in the real system.
ts, gains, inputs, errors = BS.find_sensor_correction_gain(
gain_channel='IP_SENSCORR_L_GAIN',
input_channel='IP_SENSCORR_L_INMON',
error_channel='IP_BLEND_ACCL_OUT16',
rms_threshold=rms_threshold, t_int=t_int, dt=1/8, update_law=update_law,
step_size=0.5, step_size_limits=(1e-3, 1),
reducing_lms_step=reducing_lms_step,
reduction_ratio=0.99, timeout=timeout)
plt.subplot(211)
plt.plot(ts, gains, label='Gain')
plt.legend(loc=0)
plt.subplot(212)
plt.plot(ts, errors, label='Error')
plt.legend(loc=0)
plt.show()
|
from alvi.client.scenes.create_graph import CreateGraph
class TraverseGraph(CreateGraph):
"""depth first graph traversing"""
def traverse(self, marker, graph, node):
if node in marker:
return
marker.append(node)
graph.stats.traversed_nodes += 1
graph.sync()
for child in node.children:
self.traverse(marker, graph, child)
def run(self, **kwargs):
graph = kwargs['container']
with graph.postpone_sync():
first_node = super().run(**kwargs)
marker = graph.create_multi_marker("Traversed")
graph.stats.traversed_nodes = 0
self.traverse(marker, graph, first_node)
if __name__ == "__main__":
TraverseGraph.start()
|
#!/usr/bin/env python3
import json
def main():
fonts = json.load(open('googlefonts.json'))
forcards = {}
for item in fonts['items']:
forcards[item['family']] = item['files']
json.dump(forcards, open('webfonts.json', 'w'), indent=4)
if __name__ == '__main__':
main()
|
import logging
import time
from typing import Any
import requests
from jsonrpcclient import Error, Ok, parse, request_hex
from prometheus_client import Gauge, Info, start_http_server
from . import __version__
from .config import settings
logging.basicConfig(level=settings.get("LOG_LEVEL", default="INFO"))
class RouterMetrics:
# Setting up Prometheus metrics to collect
sinr = Gauge("sinr", "Signal to Noise Ratio (dB)")
rssi = Gauge("rssi", "Received Signal Strength Indicator (dB)")
rsrp = Gauge("rsrp", "Reference Symbol Received Power (dBm)")
rsrq = Gauge("rsrq", "Reference Signal Received Quality (dB)")
signal_strength = Gauge("signal_strength", "Mobile Signal Strength")
connected_devices = Gauge(
"connected_devices", "Number of connected devices on WiFi"
)
connection_time = Gauge("connection_time", "Current Connection Time (s)")
network_info = Info("network_info", "Network connection information")
total_upload_this_month = Gauge(
"total_upload_this_month", "Total uploaded data this month (bytes)"
)
total_download_this_month = Gauge(
"total_download_this_month", "Total downloaded data this month (bytes)"
)
total_transfer_this_month = Gauge(
"total_transfer_this_month",
"Total transferred data this month (bytes)",
)
linkhub_up = Gauge(
"linkhub_up",
"Marker whether or not the LinkHub scrape has worked (yes=1, no=0)",
)
def __init__(
self,
request_key: str,
box_addr: str = "192.168.1.1",
polling_interval_seconds: int = 5,
):
logging.info("Setting up exporter.")
self.request_key = request_key
self.box_addr = box_addr
self.url = f"http://{self.box_addr}/jrd/webapi"
self.polling_interval_seconds = polling_interval_seconds
self.timeout = self.polling_interval_seconds
self.headers = {
"_TclRequestVerificationKey": self.request_key,
"Referer": f"http://{self.box_addr}/index.html",
}
def run_metrics_loop(self) -> None:
"""Metrics fetching loop"""
while True:
logging.debug("Fetching metrics.")
self.fetch_metrics()
time.sleep(self.polling_interval_seconds)
def _box_api_request(self, method: str) -> dict[str, Any]:
response = requests.post(
self.url,
json=request_hex(method),
headers=self.headers,
timeout=self.timeout,
)
logging.debug("Method: %s; response: %s", method, response.json())
match parse(response.json()):
case Ok(result, _):
return result
case Error(_, message, _, _):
logging.error(
"API error: method: %s; message: %s", method, message
)
raise RuntimeError(message)
case _:
raise AssertionError("Impossible parsed response received.")
def _read_network_info(self) -> None:
"""Requesting, parsing, and updating network info metrics."""
results = self._box_api_request("GetNetworkInfo")
logging.debug("Network info: %s", results)
# Set Prometheus metrics
if value := results.get("SINR"):
self.sinr.set(value)
if value := results.get("RSSI"):
self.rssi.set(value)
if value := results.get("RSRP"):
self.rsrp.set(value)
if value := results.get("RSRQ"):
self.rsrq.set(value)
if value := results.get("SignalStrength"):
self.signal_strength.set(value)
if (network_name := results.get("NetworkName")) and (
cell_id := results.get("CellId")
):
self.network_info.info(
{
"network_name": network_name,
"cell_id": cell_id,
}
)
def _read_system_status(self) -> None:
"""Requesting, parsing, and updating system status metrics."""
results = self._box_api_request("GetSystemStatus")
logging.debug("System status: %s", results)
# Set Prometheus metrics
if value := results.get("TotalConnNum"):
self.connected_devices.set(value)
def _read_usage_record(self) -> None:
"""Requesting, parsing, and updating usage record metrics."""
results = self._box_api_request("GetUsageRecord")
logging.debug("Usage record: %s", results)
# Set Prometheus metrics
if value := results.get("CurrConnTimes"):
self.connection_time.set(value)
if value := results.get("HCurrUseUL"):
self.total_upload_this_month.set(value)
if value := results.get("HCurrUseDL"):
self.total_download_this_month.set(value)
if value := results.get("HUseData"):
self.total_transfer_this_month.set(value)
def fetch_metrics(self) -> None:
"""Fetch all relevant metrics."""
try:
self._read_network_info()
self._read_system_status()
self._read_usage_record()
self.linkhub_up.set(1)
except: # noqa: E722
# TODO: This is not really working here yet,
# since we are crashing right after
self.linkhub_up.set(0)
raise
def main() -> None:
"""Main entry point for the exporter"""
logging.info("Linkhub Prometheus Exporter, version %s", __version__)
# Add exporter metadata to what's exported
exporter_info = Info("exporter_info", "Exporter information")
exporter_info.info(
{
"version": __version__,
}
)
try:
router_metrics = RouterMetrics(
request_key=settings.REQUEST_KEY,
box_addr=settings.BOX_ADDRESS,
polling_interval_seconds=settings.POLLING_INTERVAL_SECONDS,
)
except AttributeError as exc:
# Every other setting besides REQUEST_KEY has defaults
logging.error("Missing REQUEST_KEY configuration.")
raise RuntimeError("Missing REQUEST_KEY configuration.") from exc
logging.info(
"Server starting on http://%s:%d",
settings.EXPORTER_ADDRESS,
settings.EXPORTER_PORT,
)
start_http_server(
port=settings.EXPORTER_PORT, addr=settings.EXPORTER_ADDRESS
)
router_metrics.run_metrics_loop()
if __name__ == "__main__":
main()
|
import os
import _pickle as cpickle
from argparse import ArgumentParser
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
matplotlib.use("TkAgg") # backend so that the figure can stay in the background
def run_display_server(file, refresh):
"""
Displays the data from a display_data.pkl file created by the SensorimotorPredictiveNetwork.track_progress() method.
The figure refreshes every 5s, stays in the background but stays interactive.
Argument:
file - path to the display_data.pkl file
"""
# interactive mode
plt.ion()
# wait for the file to be created
while True:
if os.path.exists(file):
break
plt.pause(refresh)
while True:
# load the data
try:
with open(file, 'rb') as f:
try:
data = cpickle.load(f)
except (IOError, EOFError):
plt.pause(refresh)
continue
except FileNotFoundError:
plt.pause(refresh)
continue
fig = display_data(data)
# save the figure
fig.savefig(os.path.dirname(file) + '/figure.png')
fig.savefig(os.path.dirname(file) + '/figure.svg')
# wait
plt.pause(refresh)
def display_data(data, fig_number=1, name=""):
"""
Displays the data from a display_data.pkl file created by the SensorimotorPredictiveNetwork.track_progress() method.
Argument:
data - data to display
fig_number - index of the figure to plat in
name - text to add to the figure
"""
# get useful dimensions
dim_motor = data["motor"].shape[1]
dim_sensor = data["gt_sensation"].shape[1]
dim_encoding = data["encoded_motor"].shape[1]
# open the figure
if not plt.fignum_exists(fig_number):
fig = plt.figure(num=fig_number, figsize=(16, 5))
# create the axis for the motor space
ax1 = plt.subplot(141) if dim_motor in (1, 2) else plt.subplot(141, projection='3d')
# create the axis for the encoding space
ax2 = plt.subplot(142) if dim_motor in (1, 2) else plt.subplot(142, projection='3d')
# create the axis for the egocentric position
ax3 = plt.subplot(143)
# create the axis for the sensory space
ax4 = plt.subplot(144) if dim_motor in (1, 2) else plt.subplot(144, projection='3d')
else:
fig = plt.figure(num=fig_number)
ax1, ax2, ax3, ax4 = fig.axes
# display the updated title
plt.suptitle(name + " - epoch: " + str(data["epoch"]), fontsize=14)
# plot the motor configurations
ax1.cla()
ax1.set_title("motor space")
if dim_motor == 1:
ax1.plot(data["motor"][:, 0], 0 * data["motor"][:, 0], 'b.')
ax1.set_xlabel('$m_1$')
elif dim_motor == 2:
ax1.plot(data["motor"][:, 0], data["motor"][:, 1], 'b.')
ax1.set_xlabel('$m_1$')
ax1.set_ylabel('$m_2$')
elif dim_motor >= 3:
ax1.plot(data["motor"][:, 0], data["motor"][:, 1], data["motor"][:, 2], 'b.')
ax1.set_xlabel('$m_1$')
ax1.set_ylabel('$m_2$')
ax1.set_zlabel('$m_3$')
ax1.axis('equal')
# plot the encoded motor configurations
ax2.cla()
ax2.set_title("encoding space")
if dim_encoding == 1:
ax2.plot(data["encoded_motor"][:, 0], 0 * data["encoded_motor"][:, 0], 'r.')
ax2.set_xlabel('$h_1$')
ax2.text(0.05, 0.05, "topo_error_in_H={:.2e}".format(data["topo_error_in_H"]), transform=ax2.transAxes,
fontsize=9, verticalalignment="top", bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.2))
elif dim_encoding == 2:
ax2.plot(data["encoded_motor"][:, 0], data["encoded_motor"][:, 1], 'r.')
ax2.set_xlabel('$h_1$')
ax2.set_ylabel('$h_2$')
ax2.text(0.05, 0.05, "topo_error_in_H={:.2e}".format(data["topo_error_in_H"]), transform=ax2.transAxes,
fontsize=9, verticalalignment="top", bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.2))
elif dim_encoding >= 3:
ax2.plot(data["encoded_motor"][:, 0], data["encoded_motor"][:, 1], data["encoded_motor"][:, 2], 'r.')
ax2.set_xlabel('$h_1$')
ax2.set_ylabel('$h_2$')
ax2.set_zlabel('$h_3$')
ax2.text(0.05, 0.05, 0.05, "topo_error_in_H={:.2e}".format(data["topo_error_in_H"]), transform=ax2.transAxes,
fontsize=9, verticalalignment="top", bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.2))
ax2.axis('equal')
# plot the sensor positions and the linear projection of the encoded motor configurations in the same space
ax3.cla()
ax3.set_title("sensor position")
#
if data["gt_pos"].shape[0] < 1000:
for k in range(data["gt_pos"].shape[0]):
ax3.plot((data["gt_pos"][k, 0], data["projected_encoding"][k, 0]),
(data["gt_pos"][k, 1], data["projected_encoding"][k, 1]), 'r-', lw=0.4)
#
ax3.plot(data["gt_pos"][:, 0], data["gt_pos"][:, 1], 'bo', mfc="none", ms=8)
ax3.plot(data["projected_encoding"][:, 0], data["projected_encoding"][:, 1], 'r.')
ax3.set_xlabel('$x$')
ax3.set_ylabel('$y$')
ax3.text(0.05, 0.95, "topo_error_in_P={:.2e}\nmetric error={:.2e}".format(data["topo_error_in_P"], data["metric_error"]), transform=ax3.transAxes,
fontsize=9, verticalalignment="top", bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.2))
ax3.axis('equal')
# plot the ground-truth and predicted sensory configurations
ax4.cla()
ax4.set_title("sensory space")
if dim_sensor == 1:
ax4.plot(data["gt_sensation"][:, 0], 0 * data["gt_sensation"][:, 0], 'o', color=[0, 0.5, 0], ms=8, mfc="none")
ax4.plot(data["predicted_sensation"][:, 0], 0 * data["predicted_sensation"][:, 0], 'm.')
ax4.set_xlabel('$s_1$')
ax4.text(0.05, 0.05, "loss={:.2e}".format(data["loss"]), transform=ax4.transAxes,
fontsize=9, verticalalignment="top", bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.2))
elif dim_sensor == 2:
ax4.plot(data["gt_sensation"][:, 0], data["gt_sensation"][:, 1], 'o', color=[0, 0.5, 0], ms=8, mfc="none")
ax4.plot(data["predicted_sensation"][:, 0], data["predicted_sensation"][:, 1], 'm.')
ax4.set_xlabel('$s_1$')
ax4.set_ylabel('$s_2$')
ax4.text(0.05, 0.05, "loss={:.2e}".format(data["loss"]), transform=ax4.transAxes,
fontsize=9, verticalalignment="top", bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.2))
elif dim_sensor >= 3:
ax4.plot(data["gt_sensation"][:, 0], data["gt_sensation"][:, 1], data["gt_sensation"][:, 2], 'o', color=[0, 0.5, 0], ms=8, mfc="none")
ax4.plot(data["predicted_sensation"][:, 0], data["predicted_sensation"][:, 1], data["predicted_sensation"][:, 2], 'm.')
ax4.set_xlabel('$s_1$')
ax4.set_ylabel('$s_2$')
ax4.set_zlabel('$s_3$')
ax4.text(0.05, 0.05, 0.05, "loss={:.2e}".format(data["loss"]), transform=ax4.transAxes,
fontsize=9, verticalalignment="top", bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.2))
ax4.axis('equal')
return fig
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-f", "--filename", dest="filename", help="path to the file display_data.pkl")
args = parser.parse_args()
filename = args.filename
run_display_server(filename, refresh=5)
|
from __future__ import print_function
from os.path import dirname, join
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def _get_version():
"""Return the project version from VERSION file."""
with open(join(dirname(__file__), 'marvin_teste_engine/VERSION'), 'rb') as f:
version = f.read().decode('ascii').strip()
return version
class Tox(TestCommand):
"""Run the test cases using TOX command."""
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# Import here, cause outside the eggs aren't loaded
import tox
import shlex
import sys
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
else:
# Run all tests by default
args = ['-c', join(dirname(__file__), 'tox.ini'), 'tests']
errno = tox.cmdline(args=args)
sys.exit(errno)
setup(
name='marvin_teste_engine',
version=_get_version(),
url='',
description='teste',
long_description=open(join(dirname(__file__), 'README.md')).read(),
author='teste',
maintainer='teste',
maintainer_email='teste',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'scikit-learn==0.18.2',
'scipy==0.19.1',
'numpy==1.13.1',
'pandas==0.20.3',
'matplotlib==2.0.2',
'marvin-python-toolbox==0',
'Fabric==1.14.0',
],
dependency_links=['git+https://github.com/marvin-ai/marvin-python-toolbox.git/@v0.0.3#egg=marvin_python_toolbox-0'],
tests_require=[
'pytest>=2.6.4',
'pytest-cov>=1.8.1',
'mock>=2.0.0',
'virtualenv>=15.0.1',
'tox>=2.2.0',
],
cmdclass={
'test': Tox,
},
)
|
from telegram import InlineKeyboardButton
from telegram import InlineKeyboardMarkup
from core import database
from core.utils import chunks
from powers.games import Games
COVID = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🇷🇴", callback_data="local_quick_stats"),
InlineKeyboardButton("🏙", callback_data="local_counties"),
InlineKeyboardButton("👵", callback_data="local_age"),
InlineKeyboardButton("🌎", callback_data="local_global_stats"),
InlineKeyboardButton("🗞", callback_data="local_latest_article"),
InlineKeyboardButton("📊", callback_data="datelazi"),
InlineKeyboardButton("✅", callback_data="end"),
]
]
)
MORE = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("⬅️", callback_data="back"),
InlineKeyboardButton("✅", callback_data="end"),
]
]
)
def get_game_markup(chat_id):
games = Games.get_list(chat_id=chat_id)
return InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
game["name"], callback_data=f"games_{game['name']}"
)
for game in chunk
]
for chunk in chunks(list(games), 5)
]
+ [[InlineKeyboardButton("✅", callback_data="end")]]
)
|
class ConsulDetailsModel(object):
name = ''
service_id = ''
service_name = ''
service_adres = ''
service_port = 0
checks = []
def __init__(self):
self.name = ''
self.service_id = ''
self.service_name = ''
self.service_adres = ''
self.service_port = 0
self.checks = []
class ConsulChecksModel(object):
check_id = ''
name = ''
status = ''
output = ''
service_id = ''
service_name = ''
statuscolor = [0, 1, 0.3, 0.2]
def __init__(self):
self.check_id = ''
self.name = ''
self.status = ''
self.output = ''
self.service_id = ''
self.service_name = ''
self.statuscolor = [0, 1, 0.3, 0.2]
def status_color(self, status):
if status == 'warning':
self.statuscolor = [1, 0.6, 0, 0.2]
elif status != 'passing':
self.statuscolor = [1, 0, 0, 0.2]
|
# Third party modules
import setuptools
setuptools.setup(
install_requires=[
"numpy",
"scikit-learn",
"umap-learn",
"tensorflow",
"scipy",
"h5py >= 2.0.0",
"matplotlib",
"seaborn",
"pandas",
]
)
|
"""
Status: The algorithm works and an example of using the algorithm is finished,
so I am done working on this module.
A module that implements the Knuth-Plass text formatting algorithm in Python.
"""
from typing import List, Callable, Union, Dict, Generator
from collections import namedtuple
class JUSTIFY:
LEFT = "LEFT"
RIGHT = "RIGHT"
CENTER = "CENTER"
FULL = "FULL"
WHITESPACE_CHARS = ' \t\r\n\f\v'
WHITESPACE = set(ch for ch in WHITESPACE_CHARS)
Num = Union[int, float]
INF = 10000
GLUE, BOX, PENALTY = 1, 2, 3
# =============================================================================
# Specifications (Glue, Box, Penalty)
# -----------------------------------------------------------------------------
class Specification:
# Specify default values
t = default_t = None # t in the paper; the type of the Spec
width = default_width = 0.0 # w in the paper; the ideal width of the glue, the width of added typeset material for the penalty, or the static width of the box
stretch = default_stretch = 0.0 # y in the paper; the amount this glue can stretch/enlarge its width by
shrink = default_shrink = 0.0 # z in the paper; the amount this glue can shrink its width by
penalty = default_penalty = 0.0 # p in the paper; the amount to be penalized if use this penalty
flagged = default_flagged = 0 # f in the paper; used to say whether a hyphen will need to be put here. Is either 1 for True or 0 for False
def is_glue(self): return False
def is_box(self): return False
def is_penalty(self): return False
def is_forced_break(self): return False
class Glue(Specification):
"""
Glue refers to blank space that can vary its width in specified ways; it is
an elastic mortar used between boxes in a typeset line.
"""
__slots__ = ['width', 'stretch', 'shrink']
t = GLUE
def __init__(self, shrink:Num, width:Num, stretch:Num):
"""
Init for a Glue Object. You can think of shrink, width, and stretch as
shrink: the max you can lessen the width by
width: ideal width
stretch: the max amount of space you can add to the width
In other words: this glue has minimum width (`width` - `shrink`) and
maximum width (`width` + `stretch`)
NOTE: in the paper, a Glue is specified with order
"width, stretch, shrink". That makes absolutely no sense so I've
changed the parameters to be in order "shrink, width, stretch"
instead.
"""
self.shrink: Num = shrink
self.width: Num = width
self.stretch: Num = stretch
def r_width(self, r):
"""
Returns the width of this glue for the given ratio r.
"""
if r < 0:
# As r is negative, will be subtracting width
return self.width + (r * self.shrink)
else:
# r is positive, so will be adding width (or r is 0 and so adding nothing)
return self.width + (r * self.stretch)
def is_glue(self): return True
def copy(self):
return Glue(self.shrink, self.width, self.stretch)
def __eq__(self, o:object):
if isinstance(o, self.__class__):
return o.width == self.width and o.stretch == self.stretch and o.shrink == self.shrink
return False
def __repr__(self):
return f'<{self.__class__.__name__}(width={self.width}, stretch={self.stretch}, shrink={self.shrink})>'
class Box(Specification):
"""
A box refers to something that is to be typeset: either a character from
some font of type, or a black rectangle such as a horizontal or
vertical rule, or something built up from several characters such as an
accented letter or a mathematical formula. The contents of a box may be
extremely complicated, or they may be extremely simple; the
line-breaking algorithm does not peek inside a box to see what it
contains, so we may consider the boxes to be sealed and locked.
"""
__slots__ = ['width', 'value']
t = BOX
def __init__(self, width:Num, value:Num):
self.width: Num = width # The fixed width of the box (so width of what is in the box)
self.value: Num = value # Value is something like a glyph/character. Algorithm does not use this, only width param so value can be whatever you want, as long as the width reflects its width.
def is_box(self): return True
def copy(self):
return Box(self.width, self.value)
def __eq__(self, o:object):
if isinstance(o, self.__class__):
return o.width == self.width and o.value == self.value
return False
def __repr__(self):
return f'<{self.__class__.__name__}(width={self.width}, value={self.value})>'
class Penalty(Specification):
"""
Penalty specifications refer to potential places to end one line of a
paragraph and begin another (AKA, a linebreak), with a certain
‘aesthetic cost’ indicating how desirable or undesirable such a
breakpoint would be. The width of a penalty is how much typset material
needs to be added if you break here AKA 0 if nothing and the width of
a hyphen if you want to add a hyphen here because you are breaking off
a word.
"""
__slots__ = ['width', 'penalty', 'flagged']
t = PENALTY
def __init__(self, width:Num, penalty:Num, flagged:bool):
self.width: Num = width # Width of extra typeset material (width of the hyphen)
self.penalty: Num = penalty # The penalty to breaking here
self.flagged: Num = flagged # Whether there is a hyphen here
def is_penalty(self): return True
def is_forced_break(self): (self.penalty >= INF)
def copy(self):
return Penalty(self.width, self.penalty, self.flagged)
def __eq__(self, o:object):
if isinstance(o, self.__class__):
return o.width == self.width and o.penalty == self.penalty and o.flagged == self.flagged
return False
def __repr__(self):
return f'<{self.__class__.__name__}(width={self.width}, penalty={self.penalty}, flagged={self.flagged})>'
Spec = Union[Glue, Box, Penalty]
# =============================================================================
# Parsing Text into List of Specs
# -----------------------------------------------------------------------------
def make_paragraph(text):
"""
An example function that takes in text and returns a paragraph from it that
can be used in the Knuth-Plass Algorithm.
"""
# Turn chunk of text into a paragraph
L = []
for ch in text:
if ch in ' \n':
# Add the space between words
# it's 2 units +/- 1 so can be 1, 2, or 3 units long
L.append(Glue(1, 2, 1))
elif ch == '@':
# Append forced break
L.append(Penalty(0, -INF, False))
elif ch == '~':
# Append unallowed break
L.append(Penalty(0, INF, False))
else:
# All characters are 1 unit wide
L.append(Box(1, ch))
# Append closing penalty and glue
L.extend(std_paragraph_end())
return L
def std_paragraph_end():
"""
Returns the standard closing penalty for a paragraph as a list of Penalty,
Glue, and Penalty Objects. Just extend your List[Spec] by it and it
should end properly.
"""
return [Penalty(0, INF, 0), # Forced non-break (must not break here, otherwise a Box coming before the Glue after this would allow a break to be here)
Glue( 0, 0, INF), # Glue that fills the rest of the last line (even if that fill is 0 width)
Penalty(0, -INF, 1)] # Forced break (Ends last line)
# =============================================================================
# The Actual Knuth-Plass Algorithm
# -----------------------------------------------------------------------------
class Break:
"""
A class representing a break in the text as calculated by the Knuth-Plass
algorithm.
"""
__slots__ = ["position", "line", "fitness_class", "demerits", "previous"]
def __init__(self, position, line, fitness_class, demerits, previous=None):
self.position = position # Index in the Knuth-Plass paragraph this break occurs (excludes i in last line, includes i on this current line)
self.line = line # What line of the resulting paragraph this break causes
self.fitness_class = fitness_class # The fitness class of this break
self.demerits = demerits # How 'bad' this break is
self.previous = previous # The previous break that had to occur to get this one
def copy(self):
return Break(self.position, self.line, self.fitness_class, self.demerits, self.previous)
def __repr__(self):
return f"<{self.__class__.__name__}(pos={self.position}, line={self.line}, fitness_class={self.fitness_class}, total_width={self.total_width}, total_stretch={self.total_stretch}, total_shrink={self.total_shrink}, demerits={self.demerits}, previous={self.previous})>"
# -- Give the Algorithm Function Itself
BreakpointInfo = namedtuple('BreakpointInfo', ['break_point_obj', 'line_info'])
LineInfo = namedtuple('LineInfo', ["total_num_lines", "ratio", "line_num", "line_length", "line_contents"])
from tools import profile
@profile()
def knuth_plass_breaks(
paragraph:List[Spec],
line_lengths:Union[List[Num], Num, \
Generator[Num, None, None]], # l1, l2,... in the paper
looseness:int=0, # q in the paper
tolerance:int=1, # rho in the paper
fitness_demerit:Num=100, # gamma in the paper
flagged_demerit:Num=100, # alpha in the paper
ret_vals:bool=False
):
"""
Takes in a list of Glue, Box, and Penalty objects, runs the Knuth-Plass
algorithm, and yields the results.
paragraph : A list of Glue, Box, and Penalty items that you want the breaks
for.
line_lengths : a list of integers giving the lengths of each line. The
last element of the list is reused for subsequent lines after it.
looseness : An integer value. If it's positive, the paragraph will be set
to take that many lines more than the optimum value. If it's negative,
the paragraph is set as tightly as possible. Defaults to zero, meaning the
optimal length for the paragraph.
tolerance : the maximum adjustment ratio allowed for a line. Defaults to 1.
fitness_demerit : additional value added to the demerit score when two
consecutive lines are in different fitness classes.
flagged_demerit : additional value added to the demerit score when breaking
at the second of two flagged penalties.
ret_vals : If True, it will return the values, otherwise this
method returns the values as a generator. The generator implementation
is default and saves on a lot of memmory, but means that the output can
only iterated through once before you have to run this method again to
get another generator.
return : the return value is a generator/list that returns BreakpointInfo
namedtuples. These have the following format:
BreakpointInfo(
break_point_obj: the actual breakpoint object generated
line_info: namedtuple (contains info for each line) LineInfo(
total_num_lines: int, the total number of lines generated
ratio: int, for each Glue object on this line, give this
ratio to the Glue object's `r_width()` method to have the
method return what this Glue's width should be if you want
to JUSTIFY.FULL your text
line_num: int, the 1-indexed number of the line you are
currently on. So the first line yielded by the generator
is line 1
line_length: int, how long this line is supposed to be,
according to what was given to the generator
line_contents :
the list/generator that yields Glue, Box, and Penalty
objects that specify what is supposed to be on this line
)
)
"""
def is_feasible_breakpoint(i):
"""Return true if position 'i' is a feasible breakpoint."""
spec = paragraph[i]
if spec.t == PENALTY and spec.penalty < INF:
# Specified breakpoint
return 1
elif i > 0 and paragraph[i-1].t == BOX and spec.t == GLUE:
# Breakpoint when glue directly follows a box
return 1
else:
return 0
if isinstance(line_lengths, int) or isinstance(line_lengths, float):
line_lengths = [line_lengths]
m = len(paragraph)
if m == 0: return [] # No text, so no breaks
# Precompute the running sums of width, stretch, and shrink (W,Y,Z in the
# original paper). These make it easy to measure the width/stretch/shrink
# between two indexes; just compute sum_*[pos2] - sum_*[pos1]. Note that
# sum_*[i] is the total up to but not including the box at position i.
sum_width = [0] * m; sum_stretch = [0] * m; sum_shrink = [0] * m
width_sum = stretch_sum = shrink_sum = 0.0
for i, spec in enumerate(paragraph):
sum_width[i] = width_sum
sum_stretch[i] = stretch_sum
sum_shrink[i] = shrink_sum
width_sum += spec.width
stretch_sum += spec.stretch
shrink_sum += spec.shrink
def compute_adjustment_ratio(pos1, pos2, line, line_lengths):
"""
Compute adjustment ratio for the line between pos1 and pos2.
This is how much you would have to shrink (if r < 0) or
stretch (if r > 0) the line we are currently looking at in order to
make it exactly fit exactly the current line (make it have the same
exact same length as the current line).
"""
ideal_width = sum_width[pos2] - sum_width[pos1] # ideal width
if paragraph[pos2].t == PENALTY:
ideal_width += paragraph[pos2].width
# Get the length of the current line; if the line_lengths list
# is too short, the last value is always used for subsequent
# lines.
if line < len(line_lengths):
available_width = line_lengths[line]
else:
available_width = line_lengths[-1]
# Compute how much the contents of the line would have to be
# stretched or shrunk to fit into the available space.
if ideal_width < available_width:
# You would have to stretch this line if you want it to fit on the
# desired line
y = sum_stretch[pos2] - sum_stretch[pos1] # The total amount of stretch (in whatever units all the parts of the paragraph are measured in) you can stretch this line by
if y > 0:
# Since it is possible to stretch the line, found out how much
# you should stretch it by to take up the full width of the line
r = (available_width - ideal_width) / float(y)
else:
r = INF
elif ideal_width > available_width:
# Must shrink the line by removing space from glue if you want it
# to fit on the line
z = sum_shrink[pos2] - sum_shrink[pos1] # Total amount you could possibly shrink this line by to make it fit on the current desired line
if z > 0:
# Since it is possible to shrink the line, find how much you
# should shrink it to fit it perfectly (width matches desired
# width) on the line
r = (available_width - ideal_width) / float(z)
else:
r = INF
else:
# Exactly the right length!
r = 0
return r
A = Break(position=0, line=0, fitness_class=1, demerits=0)
active_nodes = [A]
def add_active_node(node):
"""
Add a node to the active node list.
The node is added so that the list of active nodes is always sorted by
line number, and so that the set of (position, line, fitness_class)
tuples has no repeated values.
"""
index = 0
length = len(active_nodes)
node_line = node.line
node_fitness_class = node.fitness_class
node_position = node.position
# Find the first index at which the active node's line number
# is equal to or greater than the line for 'node'. This gives
# us the insertion point.
while (index < length and active_nodes[index].line < node_line):
index += 1
insert_index = index
# Check if there's a node with the same line number and
# position and fitness. This lets us ensure that the list of
# active nodes always has unique (line, position, fitness)
# values.
while (index < length and active_nodes[index].line == node_line):
if (active_nodes[index].fitness_class == node_fitness_class and
active_nodes[index].position == node_position):
# A match, so just return without adding the node
return
index += 1
active_nodes.insert(insert_index, node)
# -- End Function
breaks_to_deactivate = [] # List of breaks that were feasible but no longer are
breaks_to_activate = [] # List of newly-found feasible breaks
for i, B in enumerate(paragraph):
# Determine if this box is a feasible breakpoint and
# perform the main loop if it is.
if is_feasible_breakpoint(i):
# Loop over the list of active nodes, and compute the fitness
# of the line formed by breaking at A and B. The resulting
for A in active_nodes:
r = compute_adjustment_ratio(A.position, i, A.line, line_lengths)
if (r < -1 or B.is_forced_break()):
# Deactivate node A
breaks_to_deactivate.append(A)
if -1 <= r <= tolerance:
# Compute demerits and fitness class
if B.penalty >= 0:
demerits = (1 + 100 * abs(r)**3 + B.penalty) ** 3
elif B.is_forced_break():
demerits = (1 + 100 * abs(r)**3) ** 2 - B.penalty**2
else:
demerits = (1 + 100 * abs(r)**3) ** 2
# two consecutive breaks with flagged demerits causes an
# additional demerit to be added (don't want two lines with
# with a hyphen at the end of them)
if B.flagged and paragraph[A.position].flagged:
demerits += flagged_demerit
# Figure out the fitness class of this line (tight, loose,
# very tight, or very loose).
if r < -.5: fitness_class = 0
elif r <= .5: fitness_class = 1
elif r <= 1: fitness_class = 2
else: fitness_class = 3
# If two consecutive lines are in very different fitness
# classes, add to the demerit score for this break.
if abs(fitness_class - A.fitness_class) > 1:
demerits += fitness_demerit
# Record a feasible break from A to B
brk = Break(
position = i,
line = A.line + 1,
fitness_class = fitness_class,
demerits = demerits,
previous = A
)
breaks_to_activate.append(brk)
# end for A in active_nodes
# Deactivate nodes that need to be deactivated
for node in breaks_to_deactivate:
if len(active_nodes) > 1:
active_nodes.remove(node)
else:
break
breaks_to_deactivate.clear()
# Activate the new nodes that need to be activated
for node in breaks_to_activate:
add_active_node(node)
breaks_to_activate.clear()
# end if self.feasible_breakpoint()
# end for i in range(m)
# For some reason, some of the active_nodes that reach this point do not
# represent a break at the very end of the paragraph so only consider
# ending breakpoints that actually include the ending line of the
# paragraph
for node in active_nodes[:]:
if node.position != len(paragraph) - 1:
active_nodes.remove(node)
assert len(active_nodes) > 0, \
'Could not find any set of beakpoints that both met the given criteria and ended at the end of the paragraph.'
# Find the active node with the lowest number of demerits.
A = min(active_nodes, key=lambda A: A.demerits)
if looseness != 0:
# The search for the appropriate active node is a bit more complicated;
# we look for a node with a paragraph length that's as close as
# possible to (A.line + looseness) with the minimum number of demerits.
best = 0
d = INF
for br in active_nodes:
delta = br.line - A.line
# The two branches of this 'if' statement are for handling values
# of looseness that are either positive or negative.
if ((looseness <= delta < best) or (best < delta < looseness)):
s = delta
d = br.demerits
b = br
elif delta == best and br.demerits < d:
# This break is of the same length, but has fewer demerits and
# hence is the one we should use.
d = br.demerits
b = br
A = b
# -- Generate the list of chosen break points
breaks = []
while A is not None:
breaks.append(A.position)
A = A.previous
breaks.reverse()
# -- Now Actually Yield/Return the Results
assert breaks[0] == 0
def line_length_gen():
i = 0
while True:
if i < len(line_lengths):
yield line_lengths[i]
else:
yield line_lengths[-1]
i += 1
total_num_lines = (len(breaks) - 1) # How many lines the text was broken into
def ret_vals_gen():
line_start = 0
line_num = 0
for break_point, line_length in zip(breaks[1:], line_length_gen()):
ratio = compute_adjustment_ratio(line_start, break_point, line_num, line_lengths)
def line_contents():
for i in range(line_start, break_point, 1):
yield paragraph[i]
# line_num + 1 because line_num is 0 indexed but line_num given should not be
yield BreakpointInfo(break_point, LineInfo(total_num_lines, ratio, line_num + 1, line_length, line_contents()))
line_num += 1
line_start = break_point + 1
if ret_vals:
# Return the values as lists rather than a generator
rets = []
for break_point, line_info in ret_vals_gen():
rets.append(BreakpointInfo(break_point, LineInfo(*line_info[:-1], tuple(spec.copy() for spec in line_info.line_contents))))
return rets
else:
# Return a generator that will yield the values without taking up more memory
return ret_vals_gen()
def str_for_breaks(breaks, justify:str=JUSTIFY.LEFT, end_mark:str=''):
"""
Takes what is returned by the knuth_plass_breaks() function and turns it
into a string depending on the given justification.
Note: This method assumes that all boxes in the given breaks have
characters (strings) in them and not other things like a picture or
something.
"""
def insert_spaces(string, num_spaces):
"""
Inserts the given number of spaces into the given string, trying to put
them inbetween words from the left side to the right.
"""
from random import randint
while True:
out = ''
added_space = False
add_space = False # used to make sure that we only add whitespace to where there was already whitespace
for ch in string:
if num_spaces > 0 and add_space == True and ch in WHITESPACE:
if randint(0, 1): # 50% chance to add space here
out += ' '
num_spaces -= 1
added_space = True
add_space = False
else:
add_space = True
out += ch
# If had no opportunity to add a space, then probably last line of
# Justified paragraph so its left justified anyway. Just add a
# space to the end.
if not added_space and num_spaces > 0:
out += ' '
num_spaces -= 1
if num_spaces <= 0:
break
string = out
return out
justify = justify.upper() # Justify constants are all upper-case, so make sure this matches as long as same word used
out = ''
curr_line = ''
for break_point_obj, line_info in breaks:
total_num_lines = line_info.total_num_lines
line_num = line_info.line_num
ratio = line_info.ratio
line_length = line_info.line_length
line_contents = line_info.line_contents
# -- Build the current line
for spec in line_contents:
if spec.is_glue():
if justify == JUSTIFY.FULL and (not (line_num == total_num_lines)):
# Need to add space inbetween words to fully justify text
# on the left and right
width = int(spec.r_width(ratio))
else:
# Not Full justified, so no extra spaces between the words.
width = 1
curr_line += ' ' * width
elif spec.is_box():
curr_line += spec.value # This assumes that the value is a string character
# -- Justify The Built Line
if (justify == JUSTIFY.LEFT) or (justify == JUSTIFY.FULL and line_num == total_num_lines):
curr_line = curr_line.lstrip(WHITESPACE_CHARS)
out += curr_line + (' ' * (line_length - len(curr_line)))
elif justify == JUSTIFY.RIGHT:
curr_line = curr_line.rstrip(WHITESPACE_CHARS)
out += (' ' * (line_length - len(curr_line))) + curr_line
elif justify == JUSTIFY.CENTER:
curr_line = curr_line.strip(WHITESPACE_CHARS)
total_spaces_needed = line_length - len(curr_line)
# NOTE: this will skew the text of this line left by 1 space if
# this line's text is not perfectly centerable. If had floating
# point width spaces, then would be perfectly centered always, but
# can't because using str's instead
right_spaces = total_spaces_needed // 2
left_spaces = total_spaces_needed - right_spaces
out += (' ' * left_spaces) + curr_line + (' ' * right_spaces)
elif justify == JUSTIFY.FULL:
# NOTE: Because the algorithm assumes that glues can have decimal
# widths but strings need ints, we have cut off some space when we
# converted them to integer widths. That is why we have to use
# `insert_spaces` here: some space was probably cut off so we need
# to add some back.
curr_line = insert_spaces(curr_line, line_length - len(curr_line))
out += curr_line
else:
raise Exception(f"Gave unknown justification specification: {justify}")
curr_line = ''
out += end_mark + "\n"
return out
# =============================================================================
# Main
# -----------------------------------------------------------------------------
def main():
short_text = """Among other public buildings in a certain town, which for many reasons it will be prudent to refrain from mentioning, and to which I will assign no fictitious name, there is one anciently common to most towns, great or small: to wit, a workhouse; and in this workhouse was born; on a day and date which I need not trouble myself to repeat, inasmuch as it can be of no possible consequence to the reader, in this stage of the business at all events; the item of mortality whose name is prefixed to the head of this chapter."""
medium_text = """For the next eight or ten months, Oliver was the victim of a systematic course of treachery and deception. He was brought up by hand. The hungry and destitute situation of the infant orphan was duly reported by the workhouse authorities to the parish authorities. The parish authorities inquired with dignity of the workhouse authorities, whether there was no female then domiciled in “the house” who was in a situation to impart to Oliver Twist, the consolation and nourishment of which he stood in need. The workhouse authorities replied with humility, that there was not. Upon this, the parish authorities magnanimously and humanely resolved, that Oliver should be “farmed,” or, in other words, that he should be dispatched to a branch-workhouse some three miles off, where twenty or thirty other juvenile offenders against the poor-laws, rolled about the floor all day, without the inconvenience of too much food or too much clothing, under the parental superintendence of an elderly female, who received the culprits at and for the consideration of sevenpence-halfpenny per small head per week. Sevenpence-halfpenny’s worth per week is a good round diet for a child; a great deal may be got for sevenpence-halfpenny, quite enough to overload its stomach, and make it uncomfortable. The elderly female was a woman of wisdom and experience; she knew what was good for children; and she had a very accurate perception of what was good for herself. So, she appropriated the greater part of the weekly stipend to her own use, and consigned the rising parochial generation to even a shorter allowance than was originally provided for them. Thereby finding in the lowest depth a deeper still; and proving herself a very great experimental philosopher."""
def print_out(*breaks_args, **kwargs):
kwargs["ret_vals"] = True
breaks = knuth_plass_breaks(*breaks_args, **kwargs)
print()
print("JUSTIFIED LEFT")
print("==============")
print(str_for_breaks(breaks, JUSTIFY.LEFT, '|'))
print()
print("JUSTIFIED RIGHT")
print("===============")
print(str_for_breaks(breaks, JUSTIFY.RIGHT, '|'))
print()
print("JUSTIFIED CENTER")
print("================")
print(str_for_breaks(breaks, JUSTIFY.CENTER, '|'))
print()
print("JUSTIFIED FULL")
print("==============")
print(str_for_breaks(breaks, JUSTIFY.FULL, '|'))
print("----------------------------------------")
print_out(make_paragraph(short_text), range(120, 20, -10), tolerance=1)
#print_out(make_paragraph(medium_text), 100, tolerance=1)
#print_out(make_paragraph(medium_long_text), 100, tolerance=1)
medium_long_text = \
"""Whether I shall turn out to be the hero of my own life, or whether that
station will be held by anybody else, these pages must show. To begin my
life with the beginning of my life, I record that I was born (as I have
been informed and believe) on a Friday, at twelve o’clock at night.
It was remarked that the clock began to strike, and I began to cry,
simultaneously.
In consideration of the day and hour of my birth, it was declared by
the nurse, and by some sage women in the neighbourhood who had taken a
lively interest in me several months before there was any possibility
of our becoming personally acquainted, first, that I was destined to be
unlucky in life; and secondly, that I was privileged to see ghosts and
spirits; both these gifts inevitably attaching, as they believed, to
all unlucky infants of either gender, born towards the small hours on a
Friday night.
I need say nothing here, on the first head, because nothing can show
better than my history whether that prediction was verified or falsified
by the result. On the second branch of the question, I will only remark,
that unless I ran through that part of my inheritance while I was still
a baby, I have not come into it yet. But I do not at all complain of
having been kept out of this property; and if anybody else should be in
the present enjoyment of it, he is heartily welcome to keep it.
I was born with a caul, which was advertised for sale, in the
newspapers, at the low price of fifteen guineas. Whether sea-going
people were short of money about that time, or were short of faith and
preferred cork jackets, I don’t know; all I know is, that there was but
one solitary bidding, and that was from an attorney connected with the
bill-broking business, who offered two pounds in cash, and the balance
in sherry, but declined to be guaranteed from drowning on any higher
bargain. Consequently the advertisement was withdrawn at a dead
loss--for as to sherry, my poor dear mother’s own sherry was in the
market then--and ten years afterwards, the caul was put up in a raffle
down in our part of the country, to fifty members at half-a-crown a
head, the winner to spend five shillings. I was present myself, and I
remember to have felt quite uncomfortable and confused, at a part of
myself being disposed of in that way. The caul was won, I recollect, by
an old lady with a hand-basket, who, very reluctantly, produced from it
the stipulated five shillings, all in halfpence, and twopence halfpenny
short--as it took an immense time and a great waste of arithmetic, to
endeavour without any effect to prove to her. It is a fact which will
be long remembered as remarkable down there, that she was never drowned,
but died triumphantly in bed, at ninety-two. I have understood that it
was, to the last, her proudest boast, that she never had been on the
water in her life, except upon a bridge; and that over her tea (to which
she was extremely partial) she, to the last, expressed her indignation
at the impiety of mariners and others, who had the presumption to go
‘meandering’ about the world. It was in vain to represent to her
that some conveniences, tea perhaps included, resulted from this
objectionable practice. She always returned, with greater emphasis and
with an instinctive knowledge of the strength of her objection, ‘Let us
have no meandering.’"""
if __name__ == "__main__":
main()
|
import requests
def status(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'}
try:
if 'http' not in url:
newURL = 'http://' + url
status = requests.get(newURL, headers=headers)
else:
status = requests.get(url, headers=headers)
return status.status_code
except:
return 'Could not make connection'
def appendPrefix(url):
if 'http' not in url:
newURL = 'http://' + url
return newURL
else:
return url
|
def my_max(num :list)->int:
return num.sort(reverse=True)
def main():
# input
num = list(map(int,input().split()))
# compute
# output
print(my_max(num[0]))
if __name__ == '__main__':
main()
#未完
|
# -*- coding: utf-8 -*-
import json
from datetime import datetime
import pytz
from utilities.sql_queries import logging_insert
async def log(*args, **kwargs):
"""
Logging data in sqlite
:param args:
:param kwargs:
request - request to the route
result - dict with answer to the client
time - processing time
code - http response code
:return:
"""
request = kwargs.get("request", None)
if request is None:
return
result = kwargs.get("result", {})
processing_time = kwargs.get("time", 0)
code = kwargs.get("code", 200)
await request.app["engine"].execute(
logging_insert,
(
str(request.url),
json.dumps(result),
str(datetime.now(tz=pytz.UTC)),
str(processing_time),
int(code)
)
)
await request.app["engine"].commit()
|
from typing import Dict, List, Callable
import traceback
from PyQt5 import uic
from PyQt5.QtWidgets import *
import utils
class Gui(QMainWindow):
def __init__(self, widget_type_id_dict: Dict[str, List[str]], gui_file_path: str, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.gui_file_path: str = utils.get_path_in_bundle_dir(gui_file_path)
self.widget_type_id_dict: dict = utils.load_json(widget_type_id_dict)
uic.loadUi(self.gui_file_path, self)
# Stores all loaded widgets for easy access
self._widget_objects = self._load_widget_objects()
self.show()
def _load_widget_objects(self) -> Dict[str, QWidget]:
"""
Returns a dictionary of QWidgets mapped by their ids
"""
widget_objects: Dict[str, QWidget] = {}
for object_type, id_list in self.widget_type_id_dict.items():
for object_id in id_list:
# I use 'exec' here to avoid having to specify object type and
# instead only define it in ids.json
exec("widget_objects[object_id] = self.findChild(" + object_type + ", object_id)")
return widget_objects
def get_widget(self, widget_id) -> QWidget:
"""
Returns a QWidget that matches the given id
"""
try:
return self._widget_objects[widget_id]
except Exception as e:
traceback.print_exc()
def add_event_listener(self, widget_id: str, on_event: Callable) -> None:
self._widget_objects[widget_id].clicked.connect(on_event)
|
# \HEADER\-------------------------------------------------------------------------
#
# CONTENTS : 1D2 read matching
#
# DESCRIPTION : none
#
# RESTRICTIONS : none
#
# REQUIRES : none
#
# ---------------------------------------------------------------------------------
# Copyright (c) 2018-2021, Pay Giesselmann, Max Planck Institute for Molecular Genetics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Written by Pay Giesselmann
# ---------------------------------------------------------------------------------
import sys, re
import argparse
import pandas as pd
from signal import signal, SIGPIPE, SIG_DFL
# helper
# decode cigar into list of edits
def decodeCigar(cigar):
ops = [(int(op[:-1]), op[-1]) for op in re.findall('(\d*\D)',cigar) if op[:-1]]
return ops
# return length of recognized operations in decoded cigar
def opsLength(ops, recOps='MIS=X'):
n = [op[0] for op in ops if op[1] in recOps]
return sum(n)
if __name__ == '__main__':
signal(SIGPIPE,SIG_DFL)
# cmd arguments
parser = argparse.ArgumentParser(description="Compute summary from alignments")
parser.add_argument("output", help="output file")
args = parser.parse_args()
def sam_parser(iterable):
while True:
try:
line = next(iterable)
except StopIteration:
return
fields = line.strip().split('\t')
opt_fields = [tuple(x.split(':')) for x in fields[11:]]
opt_nm = [f[2] for f in opt_fields if f[0] == 'NM']
cigar = fields[5]
length = opsLength(decodeCigar(cigar), recOps='MIS=X') if cigar != "*" else len(fields[9]) if fields[9] != "*" else 0
mapped_length = opsLength(decodeCigar(cigar), recOps='MI=X') if cigar != "*" else 0
if opt_nm:
nm = int(opt_nm[0])
blast_identity = ((mapped_length-nm)/float(mapped_length)) if mapped_length > 0 else 0.0
yield fields[0], int(fields[1]), length, mapped_length, blast_identity
else:
yield fields[0], int(fields[1]), length, mapped_length
record_iter = (line for line in sys.stdin if not line.startswith('@'))
stat_iter = (sam_parser(record_iter))
df = pd.DataFrame(stat_iter)
if df.shape[1] == 2:
df.columns = ['ID', 'flag', 'length', 'mapped_length']
df = df.astype({'ID': 'object', 'flag': 'int32', 'length': 'int32', 'mapped_length': 'int32'})
else:
df.columns = ['ID', 'flag', 'length', 'mapped_length', 'identity']
df = df.astype({'ID': 'object', 'flag': 'int32', 'length': 'int32', 'mapped_length': 'int32', 'identity': 'float32'})
df.to_hdf(args.output, 'stats')
|
# Generated by Django 3.0.4 on 2020-03-31 21:15
from django.db import migrations
def remove_unpublished(apps, schema_editor):
# Remove every abstract that is set as a submitted work
Work = apps.get_model("abstracts", "Work")
Work.objects.filter(state="su").delete()
class Migration(migrations.Migration):
dependencies = [("abstracts", "0017_auto_20200331_1642")]
operations = [migrations.RunPython(remove_unpublished)]
|
cu,cd = map(int,raw_input().split())
a = map(int,raw_input().split(' '))
eff = cu-cd
count = 0
total = 0
for i in a:
t=i/eff
if((i-cu)%eff==0):
t=(i-cu)/eff
if((i%eff)>0):
t+=1
total+=t
print total
|
del_items(0x8013AE8C)
SetType(0x8013AE8C, "unsigned char map_buf[61440]")
del_items(0x80149E8C)
SetType(0x80149E8C, "unsigned short *imgbuf[21]")
del_items(0x80149EE0)
SetType(0x80149EE0, "struct POLY_FT4 br[10][2][2]")
del_items(0x8014A520)
SetType(0x8014A520, "struct POLY_FT4 tmdc_pol[10][2][2]")
del_items(0x8014AB60)
SetType(0x8014AB60, "struct RECT mdc_buf[2]")
del_items(0x8014AB70)
SetType(0x8014AB70, "struct SVECTOR tmdc_pol_offs[10][10][2]")
del_items(0x8014B1B0)
SetType(0x8014B1B0, "struct mdc_header *mdc_idx[10]")
del_items(0x8014B1D8)
SetType(0x8014B1D8, "struct _mdecanim mdec_queue[16]")
del_items(0x8014B318)
SetType(0x8014B318, "struct DR_ENV mdec_drenv[2]")
del_items(0x8014B398)
SetType(0x8014B398, "int (*stream_buf[504])[30]")
del_items(0x8014B410)
SetType(0x8014B410, "struct strheader *stream_bufh[30]")
|
"""
Errors like the error cases from Rackspace Monitoring.
"""
from __future__ import division, unicode_literals
import attr
from six import text_type
@attr.s
class ParentDoesNotExist(Exception):
"""
Error that occurs when a parent object does not exist.
For instance, trying to access or modify a Check under a
non-existing Entity will cause this error.
"""
object_type = attr.ib(validator=attr.validators.instance_of(text_type))
key = attr.ib(validator=attr.validators.instance_of(text_type))
code = attr.ib(validator=attr.validators.instance_of(int), default=404)
def to_json(self):
"""
Serializes this error to a JSON-encodable dict.
"""
return {'type': 'notFoundError',
'code': self.code,
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf',
'message': 'Parent does not exist',
'details': 'Object "{0}" with key "{1}" does not exist'.format(
self.object_type, self.key)}
@attr.s
class ObjectDoesNotExist(Exception):
"""
Error that occurs when a required object does not exist.
"""
object_type = attr.ib(validator=attr.validators.instance_of(text_type))
key = attr.ib(validator=attr.validators.instance_of(text_type))
code = attr.ib(validator=attr.validators.instance_of(int), default=404)
def to_json(self):
"""
Serializes this error to a JSON-encodable dict.
"""
return {'type': 'notFoundError',
'code': self.code,
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf',
'message': 'Object does not exist',
'details': 'Object "{0}" with key "{1}" does not exist'.format(
self.object_type, self.key)}
|
# -*- coding: utf-8 -*-
import datetime
import filecmp
import logging
import os
import shutil
import time
import fileCleanerConfig as config
class FileCleaner(object):
u"""不要なファイルを一か所にまとめる"""
def __init__(self):
self.sep = os.sep
self.test_mode = False
now = datetime.datetime.now()
file_name = 'log%s%s.log' % (self.sep, now.strftime('%Y-%m-%d'))
logging.basicConfig(filename=file_name, level=logging.DEBUG)
self.save_dir = '%s%s' % (config.SAVE_DIR, now.strftime('%Y-%m-%d'))
self.ignore_files = self._load_files(config.IGNORE_FILE)
self._create_save_dir(self.save_dir)
def _create_save_dir(self, save_dir):
u"""保存ディレクトリを作成する
:param str save_dir: 保存ディレクトリの絶対パス
"""
try:
if os.path.exists(save_dir):
logging.info('save directory: %s' % save_dir)
return
os.mkdir(save_dir)
logging.info('create save directory: %s' % save_dir)
except:
raise
def execute(self):
try:
start_time = time.time()
now = datetime.datetime.now()
logging.info('%s ---start---' % now.strftime('%Y/%m/%d %H:%M:%S'))
ignore_files = self._load_files(config.IGNORE_FILE)
list_dirs = os.listdir(config.TARGET_DIR)
logging.info('target lists: %s' % len(list_dirs))
for list_dir in list_dirs:
file_path = os.path.join(config.TARGET_DIR, list_dir)
if os.path.isdir(file_path):
for file_name in os.listdir(file_path):
self._file_clean(list_dir , file_name)
else:
self._file_clean('', list_dir)
end_time = time.time()
logging.info('execute time: (%.2fsec)' % (end_time - start_time))
logging.info('%s ---end---' % now.strftime('%Y/%m/%d %H:%M:%S'))
except Exception as e:
logging.WARNING(e)
print(e)
raise
def _load_files(self, file_name):
u"""処理対象外ファイルを読み込む
:param str file_name: ファイル名
:return: 対象外ファイルリスト
:rtype : list
"""
try:
ignore_files = []
if not os.path.exists(file_name):
return ignore_files
file = open(file_name)
ignore_files = file.readlines()
file.close()
return ignore_files
except:
raise
def _is_ignore(self, chk_file, ignore_files):
u"""対象外ファイル判定
:param str chk_file : 判定対象ファイル
:param list ignore_files: 対象外ファイルリスト
"""
try:
# To Do
return False
except:
raise
def _file_clean(self, dir_name, file_name):
u"""ファイルを目的の場所に移動させる
:param str dir_name : ディレクトリ名
:param str file_name : ファイル名
"""
try:
copy_src_file = os.path.join(config.TARGET_DIR, dir_name, file_name)
# check ignore lists
if self._is_ignore(copy_src_file, self.ignore_files):
logging.info('ignore file: %s' % file_name)
return
# copy
copy_dest_file = os.path.join(self.save_dir, dir_name, file_name)
if dir_name:
copy_dest_dir = os.path.join(self.save_dir, dir_name)
if not os.path.exists(copy_dest_dir):
os.mkdir(copy_dest_dir)
shutil.copy2(copy_src_file, copy_dest_file)
# check
if not filecmp.cmp(copy_src_file, copy_dest_file):
logging.info('find diff file: %s' % copy_src_file)
return
if self.test_mode:
return
# remove
# ToDo
except:
raise
if __name__ == '__main__':
from optparse import OptionParser
option_parser = OptionParser()
option_parser.add_option('--test',
dest='test_mode',
type='string',
help='test mode',
default=False)
options, args = option_parser.parse_args()
print('testMode=%s' % options.test_mode)
file_cleaner = FileCleaner()
file_cleaner.test_mode = options.test_mode
file_cleaner.execute()
|
# conda install -c conda-forge pyyaml
import yaml
import numpy as np
voltages = np.random.randint(2, size=(1,10))
d = {'Experiment':{
'name': 'This is a test Experiment',
'list' : ['first', 'second','third'],}
}
print(d['Experiment'])
with open(r'examples\yaml_test\experiment.yml', 'w') as f:
f.write(yaml.dump(d, default_flow_style=False))
|
import os
from argparse import ArgumentParser
try:
import winreg as reg
except ImportError:
import _winreg as reg
DEFAULT_CHROME_PATH = r'C:\Program Files'
CHROME_KEY = r'SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe'
QUERY_AND_SET_RIGHTS = reg.KEY_QUERY_VALUE | reg.KEY_SET_VALUE
KEY_INFO = dict(
key=reg.HKEY_LOCAL_MACHINE,
sub_key=CHROME_KEY,
access=QUERY_AND_SET_RIGHTS,
)
def patch(custom_path):
with reg.OpenKeyEx(**KEY_INFO) as key:
default_value, value_type = reg.QueryValueEx(key, '')
if default_value.startswith(DEFAULT_CHROME_PATH):
reg.SetValueEx(key, '', 0, value_type, custom_path)
print('Custom Chrome App Path changed')
else:
print(default_value)
def is_valid(parser, file_path):
if os.path.exists(file_path):
return file_path
else:
parser.error(f'Invalid file path {file_path}')
def main():
parser = ArgumentParser(description='Chrome Custom App Path')
parser.add_argument(
'-p',
dest='custom_path',
help='custom filename path to binary wrapper',
required=True,
type=lambda path: is_valid(parser, path),
)
args = parser.parse_args()
try:
patch(args.custom_path)
except PermissionError:
parser.exit(status=1, message='Run as Administrator\n')
except FileNotFoundError:
parser.exit(status=1, message='Chrome App Path key not found\n')
if __name__ == '__main__':
exit(main())
|
import duckdb
try:
import pyarrow as pa
import pyarrow.parquet
import numpy as np
can_run = True
except:
can_run = False
def compare_results(query):
true_answer = duckdb.query(query).fetchall()
t = duckdb.query(query).arrow()
from_arrow = duckdb.from_arrow_table(duckdb.query(query).arrow()).fetchall()
assert true_answer == from_arrow
class TestArrowNested(object):
def test_lists_basic(self,duckdb_cursor):
if not can_run:
return
#Test Constant List
query = duckdb.query("SELECT a from (select list_value(3,5,10) as a) as t").arrow()['a'].to_numpy()
assert query[0][0] == 3
assert query[0][1] == 5
assert query[0][2] == 10
# Empty List
query = duckdb.query("SELECT a from (select list_value() as a) as t").arrow()['a'].to_numpy()
assert len(query[0]) == 0
#Test Constant List With Null
query = duckdb.query("SELECT a from (select list_value(3,NULL) as a) as t").arrow()['a'].to_numpy()
assert query[0][0] == 3
assert np.isnan(query[0][1])
def test_list_types(self,duckdb_cursor):
if not can_run:
return
#Large Lists
data = pyarrow.array([[1],None, [2]], type=pyarrow.large_list(pyarrow.int64()))
arrow_table = pa.Table.from_arrays([data],['a'])
rel = duckdb.from_arrow_table(arrow_table)
res = rel.execute().fetchall()
assert res == [([1],), (None,), ([2],)]
#Fixed Size Lists
data = pyarrow.array([[1],None, [2]], type=pyarrow.list_(pyarrow.int64(),1))
arrow_table = pa.Table.from_arrays([data],['a'])
rel = duckdb.from_arrow_table(arrow_table)
res = rel.execute().fetchall()
assert res == [([1],), (None,), ([2],)]
#Complex nested structures with different list types
data = [pyarrow.array([[1],None, [2]], type=pyarrow.list_(pyarrow.int64(),1)),pyarrow.array([[1],None, [2]], type=pyarrow.large_list(pyarrow.int64())),pyarrow.array([[1,2,3],None, [2,1]], type=pyarrow.list_(pyarrow.int64()))]
arrow_table = pa.Table.from_arrays([data[0],data[1],data[2]],['a','b','c'])
rel = duckdb.from_arrow_table(arrow_table)
res = rel.project('a').execute().fetchall()
assert res == [([1],), (None,), ([2],)]
res = rel.project('b').execute().fetchall()
assert res == [([1],), (None,), ([2],)]
res = rel.project('c').execute().fetchall()
assert res == [([1,2,3],), (None,), ([2,1],)]
def test_lists_roundtrip(self,duckdb_cursor):
if not can_run:
return
# Integers
compare_results("SELECT a from (select list_value(3,5,10) as a) as t")
compare_results("SELECT a from (select list_value(3,5,NULL) as a) as t")
compare_results("SELECT a from (select list_value(NULL,NULL,NULL) as a) as t")
compare_results("SELECT a from (select list_value() as a) as t")
#Strings
compare_results("SELECT a from (select list_value('test','test_one','test_two') as a) as t")
compare_results("SELECT a from (select list_value('test','test_one',NULL) as a) as t")
#Big Lists
compare_results("SELECT a from (SELECT LIST(i) as a FROM range(10000) tbl(i)) as t")
#Multiple Lists
compare_results("SELECT a from (SELECT LIST(i) as a FROM range(10000) tbl(i) group by i%10) as t")
#Unique Constants
compare_results("SELECT a from (SELECT list_value(1) as a FROM range(10) tbl(i)) as t")
#Nested Lists
compare_results("SELECT LIST(le) FROM (SELECT LIST(i) le from range(100) tbl(i) group by i%10) as t")
#LIST[LIST[LIST[LIST[LIST[INTEGER]]]]]]
compare_results("SELECT list (lllle) llllle from (SELECT list (llle) lllle from (SELECT list(lle) llle from (SELECT LIST(le) lle FROM (SELECT LIST(i) le from range(100) tbl(i) group by i%10) as t) as t1) as t2) as t3")
compare_results('''SELECT grp,lst,cs FROM (select grp, lst, case when grp>1 then lst else list_value(null) end as cs
from (SELECT a%4 as grp, list(a) as lst FROM range(7) tbl(a) group by grp) as lst_tbl) as T;''')
#Tests for converting multiple lists to/from Arrow with NULL values and/or strings
compare_results("SELECT list(st) from (select i, case when i%10 then NULL else i::VARCHAR end as st from range(1000) tbl(i)) as t group by i%5")
|
# WAP to take the number of students as input,
# then ask marks for five subjects as English,
# Physics, Chemistry, Maths and Computer. If
# the total marks for any student are less than
# 200, then print he failed or else print passed.
# Use the dictionary to store the student name
# as key and marks as value.
n = int(input("Enter number of students: "))
result = {}
for i in range(n):
name = input("Enter student name: ")
english = int(input("Please enter English Marks: "))
math = int(input("Please enter Math score: "))
computers = int(input("Please enter Computer Marks: "))
physics = int(input("Please enter Physics Marks: "))
chemistry = int(input("Please enter Chemistry Marks: "))
total = english + math + computers + physics + chemistry
result[name] = total
for keys, values in result.items():
if values >= 200:
print(f'{keys}: Passed')
else:
print(f'{keys}: Failed')
|
#!/usr/bin/python
#
# Ensomniac 2022 Ryan Martin, ryan@ensomniac.com
# Andrew Stet, stetandrew@gmail.com
import os
import sys
from os.path import expanduser
class _SyncUtils:
def __init__(self):
pass
def GetServerSyncPackages(self, quiet=False):
from json import loads
from requests import post
# Get all active packages for the logged-in user
dash_data_path = os.path.join(expanduser("~"), ".dash")
if not os.path.exists(dash_data_path):
sys.exit("\nNot Authenticated\n")
dash_data = loads(open(dash_data_path, "r").read())
token = dash_data["user"]["token"]
response = post(
"https://dash.guide/Packages",
data={"f": "get_sync_manifest", "token": token}
)
try:
response = loads(response.text)
except:
sys.exit(f"\n** SERVER ERROR **\n\n{response}\n{response.text}\n")
if response.get("error"):
sys.exit(f"\n**** SERVER ERROR ****\n\n{response['error']}")
sync_packages = []
for package_data in response["packages"]:
if not quiet:
self.PrintPackageDetails(package_data)
sync_packages.append(package_data)
return sync_packages
def PrintPackageDetails(self, package_data):
print_keys = [
"asset_path",
"domain",
"git_repo",
"srv_path_git_oapi",
"srv_path_local",
"srv_path_http_root",
"usr_path_git",
]
if not package_data.get("usr_path_git"):
msg = "Warning: " + package_data["display_name"] + " "
msg += "is missing a local path to sync to and will be ignored"
print(msg)
print("\tResolve this by adding a local path at https://dash.guide/")
return
usr_path_git = package_data.get("usr_path_git")
if not os.path.exists(usr_path_git):
msg = "Warning: " + package_data["display_name"] + " "
msg += "has a local sync path set, but it doesn't exist on this machine."
msg += "\n\tExpected: '" + usr_path_git + "'"
print(msg)
print("\tResolve this by correcting your Local GitHub Repo Path path at https://dash.guide/")
return
print(package_data["display_name"])
for key in print_keys:
print("\t" + key + ": " + str(package_data[key]))
print()
def CheckForRunningProcess(self, script_name):
# script_name = dashsync
from psutil import Process
pid_list = self.get_pids(script_name)
another_process_running = False
active_ids = [str(os.getpid()), str(os.getpid() - 1)]
for pid in pid_list:
if str(pid) in active_ids:
continue
process = Process(int(pid))
if script_name not in str(process.cmdline()):
continue
another_process_running = pid
break
if another_process_running:
from signal import SIGKILL
try:
os.killpg(int(another_process_running), SIGKILL)
except:
os.system(f"kill -9 {str(another_process_running)}")
print("\nFound existing '" + script_name + "' running...")
print("\tKilled.\n")
def get_pids(self, script_name):
from subprocess import check_output
pids = []
result = check_output(["ps -eaf"], shell=True).decode()
try:
for line in result.split("\n"):
if script_name not in line:
continue
if "terminated" in line.lower():
continue
pid = str(line.split()[1].strip())
pids.append(pid)
except:
pass
return pids
def FindServerClientRoots(self, package):
server_root, client_root = "", ""
if not package["usr_path_git"]:
sys.exit("\nERROR: You haven't set your 'usr_path_git' for package: " + package["asset_path"] + "\n")
print(package["usr_path_git"])
print(os.path.expanduser(package["usr_path_git"]))
all_files = [
os.path.join(dp, f)
for dp, dn, fn in os.walk(os.path.expanduser(package["usr_path_git"]))
for f in fn
]
# For all packages except pydash, we assume there is a cgi-bin/ folder
server_path_anchor = "/cgi-bin/"
if package["asset_path"] == "pydash":
server_path_anchor = "/pydash/Dash/"
for filename in all_files:
if "/.git/" in filename:
continue
if not client_root and filename.endswith("index.html"):
croot = "/".join(filename.split("/")[:-1]) + "/"
cbin_root = os.path.join(croot, "bin/")
cdash_root = os.path.join(croot, "dash/")
if os.path.exists(cbin_root) and os.path.exists(cdash_root):
client_root = croot
if not server_root and server_path_anchor in filename:
server_root = "/".join(filename.split(server_path_anchor)[:-1]) + "/"
if package["asset_path"] == "pydash":
# Slightly different naming convention since this is a py module
server_root = os.path.join(server_root, *server_path_anchor.split("/"))
if server_root and client_root:
break
# These are just tests to triple check the
# things we're expecting to exist actually do
if server_root and not os.path.exists(server_root):
sys.exit("\nError: Failed to find local system path - expected " + server_root + "\n")
if client_root and not os.path.exists(client_root):
sys.exit("\nError: Failed to find local system path - expected " + client_root + "\n")
return server_root, client_root
def FindDashClientPaths(self, packages):
pydash_package = None
for package in packages:
if package["asset_path"] != "pydash":
continue
pydash_package = package
break
if not pydash_package:
raise Exception("\nError: Did not find PyDash package locally, cannot monitor dash client\n")
# print("\nWarning: Did not find PyDash package locally, will not monitor dash client\n")
#
# return
dash_git_root = pydash_package["usr_path_git"]
client_path_full = os.path.join(dash_git_root, "client", "full/")
client_path_min = os.path.join(dash_git_root, "client", "min/")
if not os.path.exists(client_path_full):
raise Exception("\nWarning: Dash client code missing. Expected: '" + client_path_full + "'\n")
# print("\nWarning: Dash client code missing. Expected: '" + client_path_full + "'\n")
# if not os.path.exists(client_path_min):
# print("\nWarning: Did Dash client code missing. Expected: '" + client_path_min + "'\n")
return client_path_full, client_path_min, pydash_package
def GetLocalDashClientPaths(self, packages):
# return all valid local paths to any Dash
# client packages on this user's machine
distribution_packages = []
for package in packages:
usr_path_git = package.get("usr_path_git")
if not usr_path_git:
continue
client_root = os.path.join(usr_path_git, "client/")
if not os.path.exists(client_root):
print("\tWarning: Client path doesn't exist! Expected: " + client_root)
continue
package["client_root"] = client_root
sync_client_root = os.path.join(usr_path_git, "sync_client", "client/")
if os.path.exists(sync_client_root):
package["sync_client_root"] = sync_client_root
distribution_packages.append(package)
return distribution_packages
@property
def LocalDashPackageRoot(self):
dash_link_path = __file__.split("/Dash/DashSync/")[0] + "/Dash/"
pydash_root = os.path.realpath(dash_link_path)
dash_package_root = pydash_root.split("/pydash/")[0] + "/"
if not os.path.exists(dash_package_root):
sys.exit("Failed to locate dash root! Expected " + dash_package_root)
return dash_package_root
@property
def VersionInfoPath(self):
dash_package_root = self.LocalDashPackageRoot
version_path = os.path.join(dash_package_root, "local", "version_info.json")
if not os.path.exists(version_path):
sys.exit("Failed to locate version path! Expected " + version_path)
return version_path
SyncUtils = _SyncUtils()
|
#!/usr/bin/env python3
import json
import string
import random
import argparse
from bs4 import BeautifulSoup
draw_obj_keys = {'x': 1, 'y': 1,
'rotation': 1, 'id': 1,
'width': 1, 'height': 1,
'uid': 1, 'order': 1,
'lockAspectRatio': 1, 'lockShape': 1,
'constraints': 1, 'graphic': 1,
'linkMap': 1, 'hidden': 1,
'layerId': 1, 'flipHorizontal': 1,
'flipVertical': 1, 'children': 1}
# From stackoverflow with mods
def id_generator(size=20, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
class Gliffy(object):
def __init__(self, filename):
self.draw_objs = []
self.obj_keys = {}
self.draw_io_id = id_generator()
with open(filename, "r") as ifh:
self.gobj = json.load(ifh)
self.stage = self.gobj['stage']
self.raw_draw_objs = self.stage['objects']
# print(self.raw_draw_objs)
for raw_draw_obj in self.raw_draw_objs:
dobj = GliffyObj(raw_draw_obj, self.draw_io_id)
#print(dobj)
self.draw_objs.append(dobj)
def emit_drawio(self):
text_array = []
text_array.append('<?xml version="1.0" encoding="UTF-8"?>')
text_array.append('<mxfile host="Electron" modified="2021-06-09T19:48:40.870Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.5.1 Chrome/89.0.4389.82 Electron/12.0.1 Safari/537.36" etag="43a6SbNFbkbtn3BeCMYf" version="14.5.1" type="device">')
text_array.append('<diagram id="Q0eEivNgb1EWn1eKrHUD" name="Page-1">')
text_array.append('<mxGraphModel dx="1426" dy="1025" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1169" pageHeight="827" math="0" shadow="0">')
text_array.append('<root>')
text_array.append('<mxCell id="0" />')
text_array.append('<mxCell id="1" parent="0" />')
placement = []
shape_count = 1
for i, draw_obj in enumerate(self.draw_objs):
#print(i)
#print(draw_obj.emit_drawio(i+1))
emitted_drawing_elem, shape_count = draw_obj.emit_drawio_elem(shape_count)
text_array.extend( emitted_drawing_elem )
if draw_obj.placement is not None:
placement.append(draw_obj.placement)
#print(placement)
text_array.append('</root>')
text_array.append('</mxGraphModel>')
text_array.append('</diagram>')
text_array.append('</mxfile>')
return '\n'.join(text_array)
if False:
keys = list(raw_draw_objs.keys())
if False:
for key in keys:
print()
print()
print("Key", key)
print(raw_draw_objs[key])
print()
stage = raw_draw_objs['stage']
vobj = stage
keys = list(vobj.keys())
if False:
for key in keys:
print()
print()
print("Key", key)
print(vobj[key])
print()
vobj = stage['objects'][67]
keys = list(vobj.keys())
if False:
for key in keys:
print()
print()
print("Key", key)
print(vobj[key])
if True:
for key in keys:
print(f" self.{key} = self.obj.get('{key}', None)")
class GliffyObj(object):
def __init__(self, raw_draw_obj, draw_io_id, is_child=False):
self.obj = raw_draw_obj
self.my_children = []
self.draw_io_id = draw_io_id
self.is_child = is_child
self._populate()
def _populate(self):
self.x = self.obj.get('x', None)
self.y = self.obj.get('y', None)
self.rotation = self.obj.get('rotation', None)
self.id = self.obj.get('id', None)
self.width = self.obj.get('width', None)
self.height = self.obj.get('height', None)
self.uid = self.obj.get('uid', None)
self.order = self.obj.get('order', None)
self.lockAspectRatio = self.obj.get('lockAspectRatio', None)
self.lockShape = self.obj.get('lockShape', None)
self.constraints = self.obj.get('constraints', None)
self.graphic = self.obj.get('graphic', None)
self.linkMap = self.obj.get('linkMap', None)
self.children = self.obj.get('children', None)
self.hidden = self.obj.get('hidden', None)
self.layerId = self.obj.get('layerId', None)
self.flipHorizontal = self.obj.get('flipHorizontal', None)
self.flipVertical = self.obj.get('flipVertical', None)
self.text = None
self._get_graphic()
# Check for new fields
def _shape_drawio(self,tid):
self.shape_lookup_drawio = {
"com.gliffy.stencil.ellipse.basic_v1": "ellipse;whiteSpace=wrap;html=1;aspect=fixed;fillColor=#ffe6cc;strokeColor=#d79b00;",
"com.gliffy.stencil.rhombus.basic_v1": "shape=parallelogram;perimeter=parallelogramPerimeter;whiteSpace=wrap;html=1;fixedSize=1;",
"com.gliffy.stencil.start_end.flowchart_v1": "strokeWidth=1;html=1;shape=mxgraph.flowchart.start_2;whiteSpace=wrap;",
"com.gliffy.stencil.diamond.basic_v1": "strokeWidth=1;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;",
"com.gliffy.stencil.document.flowchart_v1": "shape=note;whiteSpace=wrap;html=1;backgroundOutline=1;darkOpacity=0.05;size=8;",
"com.gliffy.stencil.rectangle.basic_v1": "rounded=0;whiteSpace=wrap;html=1;",
"com.gliffy.stencil.network.network_v4.business.user": "shape=actor;whiteSpace=wrap;html=1;labelPosition=center;verticalLabelPosition=bottom;align=center;verticalAlign=top;"
}
drawio_shape = self.shape_lookup_drawio.get(tid, None)
if drawio_shape is None:
print(f"WARNING: Giffy tid {tid} doesn't have a translation. Giving it an ORANGE RECTANGLE for now")
print(f" Adjust data structure self.shape_lookup_drawio to add new translation.\n")
return "rounded=0;whiteSpace=wrap;html=1;fillColor=#FF8000;"
return drawio_shape
def _get_text(self):
text = ""
if self.text is not None:
soup = BeautifulSoup(self.text, 'lxml')
text = soup.text
text = text.replace('&', '&')
text = text.replace('"', '"')
return text
def emit_drawio_shapes(self, number):
output = []
self.placement = None
if self.type == 'Shape':
self.my_shape_count = number
# <mxCell id="WpHb4AEowC1BbJerPiXC-2" value="Text for the&nbsp;<br>ages" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
# <mxGeometry x="200" y="420" width="120" height="60" as="geometry" />
# </mxCell>
text = ""
if len(self.my_children) > 0:
for child in self.my_children:
child._get_text()
text = text + child._get_text()
style = self._shape_drawio(self.tid)
x = self.x
y = self.y
width = self.width
height = self.height
self.placement = {f"{self.draw_io_id}-{number}": [x,y,width,height]}
mxCell_open = f'<mxCell id="{self.draw_io_id}-{number}" value="{text}" style="{style}" vertex="1" parent="1">'
mxGeometry = f'\t<mxGeometry x="{x}" y="{y}" width="{width}" height="{height}" as="geometry" />'
mxCell_close = '</mxCell>'
output = [mxCell_open, mxGeometry, mxCell_close]
number += 1
return output, number
def emit_drawio_lines(self, number):
"""Not Complete or even Called"""
output = []
self.placement = None
if self.type == 'Line':
self.my_shape_count = number
# <mxCell id="WpHb4AEowC1BbJerPiXC-5" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="WpHb4AEowC1BbJerPiXC-3" target="WpHb4AEowC1BbJerPiXC-2">
# <mxGeometry relative="1" as="geometry" />
# </mxCell>
text = ""
if len(self.my_children) > 0:
for child in self.my_children:
child._get_text()
text = text + child._get_text()
style = self._shape_drawio(self.tid)
x = self.x
y = self.y
width = self.width
height = self.height
self.placement = {f"{self.draw_io_id}-{number}": [x,y,width,height]}
mxCell_open = f'<mxCell id="{self.draw_io_id}-{number}" value="{text}" style="{style}" vertex="1" parent="1">'
mxGeometry = f'\t<mxGeometry x="{x}" y="{y}" width="{width}" height="{height}" as="geometry" />'
mxCell_close = '</mxCell>'
output = [mxCell_open, mxGeometry, mxCell_close]
number += 1
return output, number
def emit_drawio_text(self, number):
output = []
self.placement = None
if self.type == 'Text' and not self.is_child:
self.my_shape_count = number
# <mxCell id="MH50X9NqIrNpNXkGlf9i-19" value="Text" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontColor=#000066;" vertex="1" parent="1">
# <mxGeometry x="530" y="440" width="40" height="20" as="geometry" />
# </mxCell>
# TODO pull style from text of gliffy
style = "text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontColor=#000066;"
text = self._get_text()
x = self.x
y = self.y
width = self.width
height = self.height
self.placement = {f"{self.draw_io_id}-{number}": [x,y,width,height]}
mxCell_open = f'<mxCell id="{self.draw_io_id}-{number}" value="{text}" style="{style}" vertex="1" parent="1">'
mxGeometry = f'\t<mxGeometry x="{x}" y="{y}" width="{width}" height="{height}" as="geometry" />'
mxCell_close = '</mxCell>'
output = [mxCell_open, mxGeometry, mxCell_close]
number += 1
return output, number
def emit_drawio_elem(self, shape_count):
output = []
if self.type == 'Shape':
output, shape_count = self.emit_drawio_shapes(shape_count)
elif self.type == 'Text':
output, shape_count = self.emit_drawio_text(shape_count)
return output, shape_count
def _get_graphic(self):
#print(self.graphic)
#print()
self.type = self.graphic.get('type', None)
#print(self.type)
if self.type == 'Shape':
self.shape = self.graphic['Shape']
self.tid = self.shape['tid']
# TODO: This may be problematic for a number of reasons keep and eye on this
self.__dict__.update({k: v for k, v in self.shape.items()})
print(self.fillColor)
print(self.strokeColor)
#print(list(self.shape.keys()))
#print(self.tid)
if self.children is not None:
#print(self.children)
for child in self.children:
self.my_children.append(GliffyObj(child, self.draw_io_id, True))
#print()
elif self.type == 'Line':
self.line = self.graphic['Line']
print(self.line)
print()
#print(list(line.keys()))
elif self.type == 'Text':
self.text = self.graphic['Text']['html']
else:
print("WARNING: FOUND SOMETHING UNEXPECTED")
def give_me_keys(self):
key_list = list(self.obj.keys())
# {key: value for (key, value) in iterable}
key_dict = {key: 1 for key in key_list}
return(key_dict)
if __name__ == "__main__":
# Parse the CLI
parser = argparse.ArgumentParser(description='Convert (partially) Gliffy drawings to Draw.io/Diagram.net drawings ')
parser.add_argument('gliffy_drawing',
help='Name of the gliffy to convert')
parser.add_argument('drawio_drawing_xml',
help='Name of the output drawio aware xml document. Example: "ThanksGliffy.xml"')
args = parser.parse_args()
print("Attempting to convert:")
print("\tGliffy File", args.gliffy_drawing)
print("\tDraw.io XML File", args.drawio_drawing_xml)
glif = Gliffy(args.gliffy_drawing)
with open(args.drawio_drawing_xml, "w") as ofh:
ofh.write(glif.emit_drawio())
|
class Pessoa:
def __init__(self, nome, rg, cpf, telefone):
self.nome = nome
self.__rg = rg
self.__cpf = cpf
self.telefone = telefone
def get_cpf(self):
return self.__cpf
def set_cpf(self, cpf):
self.__cpf = cpf
return self.__cpf
def get_rg(self):
return self.__rg
def set_rg(self, rg):
self.__rg = rg
return self.__rg
def exibir_dados(self):
pass
class Medico(Pessoa):
def __init__(self, nome, rg, cpf, telefone, crm, salario, especialidade):
super().__init__(nome, rg, cpf, telefone)
self.crm = crm
self.salario = salario
self.especialidade = especialidade
def dados_medico(self):
super().exibir_dados()
print('Telefone: ', self.telefone)
print('CRM: ', self.crm)
print('Salário: ', self.salario)
print('Especialidade: ', self.especialidade)
class Paciente(Pessoa):
def __init__(self, nome, rg, cpf, telefone, endereco, nascimento):
super().__init__(nome, rg, cpf, telefone)
self.endereco = endereco
self.nascimento = nascimento
medico_01 = Medico('Fulano de Tal', 12345647, 99933366645, 1199553644, '1111/SP', 2500.50, 'Ortopedista')
medico_01.dados_medico()
#https://pt.stackoverflow.com/q/480274/101
|
def load_Data_forCox():
""" Function that loads data for the three matrix needed:
X containing the clinical, images, Immunoscore DATA
Y the survival time
delta the censure status
open the file for precision about the clinical data available,drop the not-needed ones in the notebook
cleaning of data consists of deleting the clinical data having only one level when categorical
"""
import os
import pandas
import numpy
#pathway to the folder containing all the datas
SAVE_CLI_PATH=os.path.join('..')
#List of usable patients
data_ID=pandas.read_csv(os.path.join('..','List_Patient_HEGP.csv'),sep=",")
#### GENERATING THE X MATRIX ####
# Data theoreticaly available can be found in file : Immunoscore Clinical Data and Formating Requirements 10.2.2015.xlsx
# In practice not all the columns are filled, for different reason, one is that the information does not apply to the cohort, so the whole of the column is empty.
# And for ethical reason, the column race is always ignored.
# Other columns ignored are the ones pertaining to:
# the dates of possible undergone surgeries
# the Survival data
# the additional markers, text not usable in a survival study
# Some of the data is categorical, from 2 to 6 different categories (not counting missing information)
# Missing data is represented by an empty cell in the original file
#the immunoscore
Data_IS_MDT=pandas.read_csv(os.path.join('../Data_output','HEGP_Immunoscore_mean.csv'),sep=',')
#Clinical data, only requested columns
data_cl=pandas.read_csv(os.path.join('../Data_output','data_HEGP_Clinical.txt'),sep=';')
col_names=['OfficialID','gender','age','t_stage_mod_6is4_7is1','n_stage','plnode','m_stage','UICC_TNM','preop_chemo','postop_chemo','postop_biotherapy','surg_pt_type','nlnode','plnode','colon','sidedness','cecum','ascending','splenflex','transverse','hepflex','descending','sigmoid','immuno_tx','differentiation','mucinous_colloide','occlusion','perforation','venous_emboli','lymphatic_invasion','perineural_invasion','msi_ihc','msi_gen','lynch_syndrome','fap','ibs','tumor_budding','cimp','p53_status','kras_status','apc_status','braf_status','pi3k_status','MSI_NBP','CD3_Tumor_Density','CD3_IM_Density','CD8_Tumor_Density','CD8_IM_Density','Immunoscore']
df_final=pandas.DataFrame(data=data_cl[['OfficialID','gender','age','t_stage_mod_6is4_7is1','n_stage','plnode','m_stage','UICC_TNM','preop_chemo','postop_chemo','postop_biotherapy',
'surg_pt_type','nlnode','plnode','colon','sidedness','cecum','ascending','splenflex','transverse','hepflex','descending',
'sigmoid','immuno_tx','differentiation','mucinous_colloide','occlusion','perforation','venous_emboli','lymphatic_invasion','perineural_invasion',
'msi_ihc','msi_gen','lynch_syndrome','fap','ibs','tumor_budding','cimp','p53_status','kras_status','apc_status','braf_status','pi3k_status','MSI_NBP']])
df_final.set_index('OfficialID',inplace=True,drop=False)
Data_IS_MDT.set_index('OfficialID',drop=False,inplace=True)
#Clinical data for the usable patient, other are droped
data_ID['OfficialID']
df_final=df_final.merge(data_ID,on='OfficialID')
df_final.set_index('OfficialID',inplace=True,drop=False)
#getting the density and immunoscore of the usable patients in their own dataFrame, with always the OfficialID for safety
df_CD3_CT=pandas.DataFrame(columns=['OfficialID','CD3_Tumor_Density'])
df_CD3_IM=pandas.DataFrame(columns=['OfficialID','CD3_IM_Density'])
df_CD8_CT=pandas.DataFrame(columns=['OfficialID','CD8_Tumor_Density'])
df_CD8_IM=pandas.DataFrame(columns=['OfficialID','CD8_IM_Density'])
df_IS=pandas.DataFrame(columns=['OfficialID','Immunoscore'])
for patient_id in list(data_ID['OfficialID']):
CD3PATH=os.path.join(SAVE_CLI_PATH,'Data_output','cache_HEGP','cd3',patient_id)
CD8PATH=os.path.join(SAVE_CLI_PATH,'Data_output','cache_HEGP','cd8',patient_id)
df=pandas.DataFrame(data=[[patient_id,[pandas.read_csv(os.path.join(CD3PATH,'Ratio_Cells_per_Tumor'),sep=' ',header=None)]]],columns=['OfficialID','CD3_Tumor_Density'])
df_CD3_CT=df_CD3_CT.append(df)
df=pandas.DataFrame(data=[[patient_id,[pandas.read_csv(os.path.join(CD3PATH,'Ratio_Cells_per_InvasiveFront'),sep=' ',header=None)]]],columns=['OfficialID','CD3_IM_Density'])
df_CD3_IM=df_CD3_IM.append(df)
df=pandas.DataFrame(data=[[patient_id,[pandas.read_csv(os.path.join(CD8PATH,'Ratio_Cells_per_Tumor'),sep=' ',header=None)]]],columns=['OfficialID','CD8_Tumor_Density'])
df_CD8_CT=df_CD8_CT.append(df)
df=pandas.DataFrame(data=[[patient_id,[pandas.read_csv(os.path.join(CD8PATH,'Ratio_Cells_per_InvasiveFront'),sep=' ',header=None)]]],columns=['OfficialID','CD8_IM_Density'])
df_CD8_IM=df_CD8_IM.append(df)
df=pandas.DataFrame(data=[[patient_id,Data_IS_MDT.at[patient_id,'IS_MDT']]],columns=['OfficialID','Immunoscore'])
df_IS=df_IS.append(df)
# Merging the dataFrame, based on OfficialID
df_final=df_final.merge(df_CD3_CT,on='OfficialID')
df_final=df_final.merge(df_CD3_IM,on='OfficialID')
df_final=df_final.merge(df_CD8_CT,on='OfficialID')
df_final=df_final.merge(df_CD8_IM,on='OfficialID')
df_final=df_final.merge(df_IS,on='OfficialID')
#Setting the index on the final DataFrame, patient id.
df_final.set_index('OfficialID',inplace=True,drop=True)
colTypes = pandas.read_csv('datatypes.csv',sep=',')
#Iterate through each row and assign variable type.
#Note: astype is used to assign types
for i, row in colTypes.iterrows(): #i: dataframe index; row: each row in series format
if row['type']=="categorical":
df_final[row['feature']]=df_final[row['feature']].astype(numpy.object)
elif row['type']=="continuous":
df_final[row['feature']]=df_final[row['feature']].astype(numpy.float)
#### GENERATING Y and Delta ####
#Survival data
data_Survival=pandas.read_csv(os.path.join('../Data_output','data_HEGP_Survival.txt'),sep=";")
# We are looking here at the TTR, Time To Relapse.
# The event is the relapse.
# In the file,rc_stat: 1 means relapse has occured
# time in TTR_2012_Days.
# output: time dataframe index on OfficialID time in TTR_2012_Days.
# output: censure dataframe index on Official ID
data_TTR=pandas.DataFrame(data=data_Survival[['OfficialID','TTR_2012_Days','rc_stat']])
data_TTR.set_index('OfficialID',inplace=True,drop=False)
#Selecting only usable patients
data_ID['OfficialID']
data_TTR=data_TTR.merge(data_ID,on='OfficialID')
data_TTR.set_index('OfficialID',inplace=True,drop=False)
data_time=pandas.DataFrame(data=data_TTR[['OfficialID','TTR_2012_Days']])
data_time.set_index('OfficialID',inplace=True,drop=True)
data_cens=pandas.DataFrame(data=data_TTR[['OfficialID','rc_stat']])
data_cens.set_index('OfficialID',inplace=True,drop=True)
return df_final,data_time,data_cens
|
class <warning descr="Python version 3.7 does not allow 'async' and 'await' as names">a<caret>wait</warning>(object):
pass
|
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
import json
import math
from typing import Any, List, Optional, Dict
from fastapi import APIRouter, Depends, HTTPException, Query, Request, Response
from app import crud, schemas
from app.api import deps
router = APIRouter()
@router.post("/")
def create_operator_bot(
*,
cryptobot_config_in: schemas.OperatorBotCreate,
) -> Any:
"""
Create new operator bot.
"""
bot_name, operator_bot = crud.operator_bot.create_bot(obj_in=cryptobot_config_in)
# bot_name = f'{cryptobot_config_in.user_id}-{cryptobot_config_in.binance_config_base_currency}{cryptobot_config_in.binance_config_quote_currency}'
return {"message": f"Operator '{bot_name}' created."}
@router.put("/{name}")
def update_cryptobot(
*,
name: str,
cryptobot_config_in: schemas.OperatorBotUpdate,
) -> Any:
"""
Update an operator bot.
"""
crud.operator_bot.update_bot(bot_name=name, obj_in=cryptobot_config_in)
return {"message": f"Operator '{name}' updated."}
@router.get("/{name}", response_model=schemas.OperatorBot)
def read_cryptobot(
*,
name: str,
) -> Any:
"""
Get operator bot by name.
"""
operator_bot = crud.operator_bot.get_bot(bot_name=name)
return operator_bot
# @router.delete("/{name}", response_model=schemas.CryptobotDelete)
@router.delete("/{name}")
def delete_cryptobot(
*,
name: str,
) -> Any:
"""
Delete an operator bot.
"""
crud.operator_bot.delete_bot(bot_name=name)
return {"message": f"Operator '{name}' deleted."}
|
import os
import re
import shutil
import subprocess
import click
import yaml
@click.command()
@click.option("-c", "--commit")
@click.argument("file_to_upgrade")
def main(commit, file_to_upgrade):
file_upgraded = ".tmp".join(os.path.splitext(file_to_upgrade))
file_to_upgrade_final_name = ".old".join(os.path.splitext(file_to_upgrade))
ref_file = "en.yml"
ref_file_backup = ".bak".join(os.path.splitext(ref_file))
old_ref_file = ".old".join(os.path.splitext(ref_file))
if os.path.isfile(file_to_upgrade_final_name):
answer = input(
f"The file {file_to_upgrade_final_name} will be overwritten. Confirm? [y/N] "
)
if not answer.lower().replace(" ", "") in ("y", "yes"):
print("Exiting...")
return
shutil.copyfile(ref_file, ref_file_backup)
subprocess.run(["git", "checkout", "-q", commit, ref_file])
shutil.copyfile(ref_file, old_ref_file)
os.replace(ref_file_backup, ref_file)
def extract_translation_flat_keys(filename):
# Return a 1 dimensionnal dictionnary with keys in format a.b.c
with open(filename, "r") as f:
content = yaml.safe_load(f)
def gettrans(pre, d):
keys = {}
for k in d:
new_pre = k if pre == "" else pre + "." + k
if type(d[k]) == dict:
keys.update(gettrans(new_pre, d[k]))
else:
keys[new_pre] = d[k]
return keys
return gettrans("", content)
old_ref = extract_translation_flat_keys(old_ref_file)
new_ref = extract_translation_flat_keys(ref_file)
mapping = {}
for key_old, val_old in old_ref.items():
found = False
for key_new, val_new in new_ref.items():
if val_old == val_new:
mapping[key_old] = key_new
found = True
break
if not found:
print(f"Can't find mapping from {key_old}")
to_upgrade = extract_translation(file_to_upgrade)
upgraded = {}
for key, val in to_upgrade.items():
if not key in mapping:
print(f"Can't upgrade key {key} from file")
continue
upgraded[mapping[key]] = val
with open(ref_file, "r") as f:
ref_content = f.readlines()
path = []
new_file = ""
for raw_line in ref_content:
line = raw_line.replace("/n", "")
line = line.replace(" ", " ")
search_for_key = re.findall("^ *(([a-z]|_|-|\.)*):(.*)", line)
if len(search_for_key) == 0:
new_file += "\n"
continue
key = search_for_key[0][0]
val = search_for_key[0][2]
level = 0
for c in line:
if c == " ":
level += 1
else:
break
if level > len(path):
print("Badly formatted file. Halting.")
break
if level == len(path):
path.append(key)
else:
path = path[:level]
path.append(key)
new_val = ""
if val.replace(" ", "") != "":
flat_key = ".".join(path)
if not flat_key in upgraded:
print(f"Cannot find key {flat_key} in update")
continue
new_val = f' "{upgraded[flat_key]}"'
new_file += " " * level + key + ":" + new_val + "\n"
with open(file_upgraded, "w") as f:
f.write(new_file)
shutil.copyfile(file_to_upgrade, file_to_upgrade_final_name)
shutil.copyfile(file_upgraded, file_to_upgrade)
os.remove(file_upgraded)
os.remove(old_ref_file)
if __name__ == "__main__":
main()
|
"""
Delays all received messages, then dispatches them to another handler.
"""
import collections
import time
import skytools
from zmq.eventloop.ioloop import PeriodicCallback
from cc.handler import CCHandler
__all__ = ['Delay']
CC_HANDLER = 'Delay'
class Delay (CCHandler):
""" Delays all received messages, then dispatches them to another handler. """
CC_ROLES = ['local', 'remote']
log = skytools.getLogger ('h:Delay')
tick = 250 # ms
def __init__ (self, hname, hcf, ccscript):
super(Delay, self).__init__(hname, hcf, ccscript)
self.fwd_hname = self.cf.get ('forward-to')
self.delay = self.cf.getint ('delay', 0)
self.fwd_handler = ccscript.get_handler (self.fwd_hname)
self.queue = collections.deque()
self.timer = PeriodicCallback (self.process_queue, self.tick, self.ioloop)
self.timer.start()
def handle_msg (self, cmsg):
""" Got message from client -- queue it """
self.queue.append ((time.time() + self.delay, cmsg))
def process_queue (self):
now = time.time()
try:
while (self.queue[0][0] <= now):
at, cmsg = self.queue.popleft()
size = cmsg.get_size()
try:
self.fwd_handler.handle_msg (cmsg)
stat = 'ok'
except Exception:
self.log.exception ('crashed, dropping msg: %s', cmsg.get_dest())
stat = 'crashed'
self.stat_inc ('delay.count')
self.stat_inc ('delay.bytes', size)
self.stat_inc ('delay.count.%s' % stat)
self.stat_inc ('delay.bytes.%s' % stat, size)
except IndexError:
pass
|
"""fixture_data.py
Installs some sample data. Here we have a handful of postal codes for a few US/
Canadian cities. Then, 100 Person records are installed, each with a
randomly selected postal code.
"""
from environment import Session, Base
from model import City, Country, PostalCode, Person, Address
import random
def install():
Base.metadata.create_all(Session().bind)
data = [
('Chicago', 'United States', ('60601', '60602', '60603', '60604')),
('Montreal', 'Canada', ('H2S 3K9', 'H2B 1V4', 'H7G 2T8')),
('Edmonton', 'Canada', ('T5J 1R9', 'T5J 1Z4', 'T5H 1P6')),
('New York', 'United States',
('10001', '10002', '10003', '10004', '10005', '10006')),
('San Francisco', 'United States',
('94102', '94103', '94104', '94105', '94107', '94108'))
]
countries = {}
all_post_codes = []
for city, country, postcodes in data:
try:
country = countries[country]
except KeyError:
countries[country] = country = Country(country)
city = City(city, country)
pc = [PostalCode(code, city) for code in postcodes]
Session.add_all(pc)
all_post_codes.extend(pc)
for i in range(1, 51):
person = Person(
"person %.2d" % i,
Address(
street="street %.2d" % i,
postal_code=all_post_codes[
random.randint(0, len(all_post_codes) - 1)]
)
)
Session.add(person)
Session.commit()
# start the demo fresh
Session.remove()
|
import datetime
import errno
import os
import sys
import time
class Utils(object):
@staticmethod
def Log(message):
print("\t" + message)
@staticmethod
def TLog(message):
timestamp = time.time()
timestamp_str = datetime.datetime.fromtimestamp(timestamp).strftime('%d/%m/%y %H:%M:%S')
print(timestamp_str + " " + message)
@staticmethod
def RunCondorTool(cmd):
output = os.popen(cmd).read()
return output
@staticmethod
def MakedirsIgnoreExist(directory):
try:
os.makedirs(directory)
except:
exctype, oe = sys.exc_info()[:2]
if not issubclass(exctype, OSError): raise
if oe.errno != errno.EEXIST:
raise
@staticmethod
def RemoveIgnoreMissing(file):
try:
os.unlink(file)
except:
exctype, oe = sys.exc_info()[:2]
if not issubclass(exctype, OSError): raise
if oe.errno != errno.ENOENT:
raise
|
# Import local modules
from photoshop.api._core import Photoshop
from photoshop.api._document import Document
from photoshop.api.enumerations import BitsPerChannelType
from photoshop.api.enumerations import DocumentFill
from photoshop.api.enumerations import NewDocumentMode
from photoshop.api.errors import PhotoshopPythonAPIError
# pylint: disable=too-many-public-methods, too-many-arguments
class Documents(Photoshop):
"""The collection of open documents."""
def __init__(self, parent):
super().__init__(parent=parent)
def __len__(self) -> int:
return self.length
def add(
self,
width: int = 960,
height: int = 540,
resolution: float = 72.0,
name: str = None,
mode: int = NewDocumentMode.NewRGB,
initialFill: int = DocumentFill.White,
pixelAspectRatio: float = 1.0,
bitsPerChannel: int = BitsPerChannelType.Document8Bits,
colorProfileName: str = None,
) -> Document:
"""Creates a new document object and adds it to this collections.
Args:
width (int): The width of the document.
height (int): The height of the document.
resolution (int): The resolution of the document (in pixels per inch)
name (str): The name of the document.
mode (): The document mode.
initialFill : The initial fill of the document.
pixelAspectRatio: The initial pixel aspect ratio of the document.
Default is `1.0`, the range is `0.1-10.00`.
bitsPerChannel: The number of bits per channel.
colorProfileName: The name of color profile for document.
Returns:
.Document: Document instance.
"""
return Document(
self.app.add(
width,
height,
resolution,
name,
mode,
initialFill,
pixelAspectRatio,
bitsPerChannel,
colorProfileName,
)
)
def __iter__(self) -> Document:
for doc in self.app:
self.adobe.activeDocument = doc
yield Document(doc)
def __getitem__(self, item) -> Document:
try:
return Document(self.app[item])
except IndexError:
raise PhotoshopPythonAPIError("Currently Photoshop did not find Documents.")
@property
def length(self) -> int:
return len(list(self.app))
def getByName(self, document_name: str):
return Document(self.app.getByName(document_name))
|
import math
from and_beyond.abstract_player import AbstractPlayer
from and_beyond.common import GRAVITY, TERMINAL_VELOCITY
from and_beyond.utils import autoslots
from and_beyond.world import BlockTypes
EPSILON = 0.001
@autoslots
class PlayerPhysics:
x_velocity: float
y_velocity: float
player: AbstractPlayer
dirty: bool
fix_dx: float
fix_dy: float
air_time: int
def __init__(self, player: AbstractPlayer) -> None:
self.x_velocity = 0
self.y_velocity = 0
self.player = player
self.dirty = True
self.sequential_fixes = 0
self.air_time = 0
def tick(self, delta: float) -> None:
old_x = self.player.x
old_y = self.player.y
if old_x == math.inf or old_y == math.inf:
return
self.y_velocity += GRAVITY * delta
self.x_velocity *= 0.75
# if self.x_velocity > 0.1 or self.x_velocity < -0.1:
# pass # self.x_velocity *= 0.7
# else:
# self.x_velocity = 0
self.player.x += self.x_velocity
if self.fix_collision_in_direction(self.x_velocity, 0):
self.x_velocity = 0
self.player.y += self.y_velocity
if self.fix_collision_in_direction(0, self.y_velocity):
self.y_velocity = 0
self.air_time = 0
else:
self.air_time += 1
if self.y_velocity < TERMINAL_VELOCITY:
self.y_velocity = TERMINAL_VELOCITY
self.dirty = self.player.y != old_y or self.player.x != old_x
# Thanks to Griffpatch and his amazing Tile Scrolling Platformer series for this code :)
def fix_collision_in_direction(self, dx: float, dy: float) -> bool:
self.fix_dx = dx
self.fix_dy = dy
return any((
self.fix_collision_at_point(self.player.x + 0.2, self.player.y - EPSILON),
self.fix_collision_at_point(self.player.x + 0.2, self.player.y + 0.5),
self.fix_collision_at_point(self.player.x + 0.2, self.player.y + 1),
self.fix_collision_at_point(self.player.x + 0.2, self.player.y + 1.5),
self.fix_collision_at_point(self.player.x + 0.8, self.player.y - EPSILON),
self.fix_collision_at_point(self.player.x + 0.8, self.player.y + 0.5),
self.fix_collision_at_point(self.player.x + 0.8, self.player.y + 1),
self.fix_collision_at_point(self.player.x + 0.8, self.player.y + 1.5),
))
def fix_collision_in_direction_reduced_hitbox(self, dx: float, dy: float) -> bool:
self.fix_dx = dx
self.fix_dy = dy
return any((
self.fix_collision_at_point(self.player.x + 0.4, self.player.y + 0.2),
self.fix_collision_at_point(self.player.x + 0.4, self.player.y + 1.3),
self.fix_collision_at_point(self.player.x + 0.6, self.player.y + 0.2),
self.fix_collision_at_point(self.player.x + 0.6, self.player.y + 1.3),
))
def fix_collision_at_point(self, x: float, y: float) -> bool:
ix = math.floor(x)
iy = math.floor(y)
cx = ix >> 4
cy = iy >> 4
bx = ix - (cx << 4)
by = iy - (cy << 4)
cpos = (cx, cy)
if cpos in self.player.loaded_chunks:
tile = self.player.loaded_chunks[cpos].get_tile_type(bx, by)
if tile == BlockTypes.AIR:
return False
mx = x - ix
my = y - iy
if self.fix_dy < 0:
self.player.y += 1 - my
if self.fix_dx < 0:
self.player.x += 1 - mx
if self.fix_dy > 0:
self.player.y -= EPSILON + my
if self.fix_dx > 0:
self.player.x -= EPSILON + mx
return True
def is_grounded(self) -> bool:
x = self.player.x
iy = math.floor(self.player.y - 2 * EPSILON)
return (
self.get_tile_type(math.floor(x + 0.2), iy) != BlockTypes.AIR
or self.get_tile_type(math.floor(x + 0.8), iy) != BlockTypes.AIR
)
def get_tile_type(self, x: int, y: int) -> BlockTypes:
cx = x >> 4
cy = y >> 4
bx = x - (cx << 4)
by = y - (cy << 4)
cpos = (cx, cy)
if cpos in self.player.loaded_chunks:
return self.player.loaded_chunks[cpos].get_tile_type(bx, by)
return BlockTypes.STONE # If we get in an unloaded chunk (assume solid)
|
#!/usr/bin/env python
try:
from cdecimal import Decimal
except ImportError: #pragma: no cover
from decimal import Decimal
try:
import unittest2 as unittest
except ImportError:
import unittest
from journalism import Table
from journalism.columns import TextType, NumberType
from journalism.exceptions import ColumnDoesNotExistError, UnsupportedOperationError
class TestTable(unittest.TestCase):
def setUp(self):
self.rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c')
)
self.column_names = ('one', 'two', 'three')
self.number_type = NumberType()
self.text_type = TextType()
self.column_types = (self.number_type, self.number_type, self.text_type)
def test_create_table(self):
table = Table(self.rows, self.column_types, self.column_names)
self.assertEqual(len(table.rows), 3)
self.assertSequenceEqual(table.rows[0], (1, 4, 'a'))
self.assertSequenceEqual(table.rows[1], (2, 3, 'b'))
self.assertSequenceEqual(table.rows[2], (None, 2, 'c'))
def test_create_table_args(self):
with self.assertRaises(ValueError):
Table(self.rows, [self.number_type, self.number_type, self.text_type, self.text_type], self.column_names)
with self.assertRaises(ValueError):
Table(self.rows, self.column_types, ['one', 'two', 'three', 'four'])
with self.assertRaises(ValueError):
Table(self.rows, [self.number_type, self.number_type], ['one', 'two'])
def test_create_duplicate_column_names(self):
with self.assertRaises(ValueError):
Table(self.rows, self.column_types, ['one', 'two', 'one'])
def test_column_names_immutable(self):
column_names = ['one', 'two', 'three']
table = Table(self.rows, self.column_types, column_names)
column_names[0] = 'five'
self.assertEqual(table.get_column_names()[0], 'one')
def test_get_column_types(self):
table = Table(self.rows, self.column_types, self.column_names)
self.assertEqual(table.get_column_types(), self.column_types)
def test_get_column_names(self):
table = Table(self.rows, self.column_types, self.column_names)
self.assertSequenceEqual(table.get_column_names(), ('one', 'two', 'three'))
def test_select(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.select(('three',))
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 3)
self.assertSequenceEqual(new_table.rows[0], ('a',))
self.assertSequenceEqual(new_table.rows[1], ('b',))
self.assertSequenceEqual(new_table.rows[2], ('c',))
self.assertEqual(len(new_table.columns), 1)
self.assertSequenceEqual(new_table._column_types, (self.text_type,))
self.assertSequenceEqual(new_table._column_names, ('three',))
self.assertSequenceEqual(new_table.columns['three'], ('a', 'b', 'c'))
def test_where(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.where(lambda r: r['one'] in (2, None))
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 2)
self.assertSequenceEqual(new_table.rows[0], (2, 3, 'b'))
self.assertSequenceEqual(new_table.columns['one'], (2, None))
def test_find(self):
table = Table(self.rows, self.column_types, self.column_names)
row = table.find(lambda r: r['two'] - r['one'] == 1)
self.assertIs(row, table.rows[1])
def test_find_none(self):
table = Table(self.rows, self.column_types, self.column_names)
row = table.find(lambda r: r['one'] == 'FOO')
self.assertIs(row, None)
def test_stdev_outliers(self):
rows = [
(50, 4, 'a'),
] * 10
rows.append((200, 1, 'b'))
table = Table(rows, self.column_types, self.column_names)
new_table = table.stdev_outliers('one')
self.assertEqual(len(new_table.rows), 10)
self.assertNotIn(200, new_table.columns['one'])
def test_stdev_outliers_reject(self):
rows = [
(50, 4, 'a'),
] * 10
rows.append((200, 1, 'b'))
table = Table(rows, self.column_types, self.column_names)
new_table = table.stdev_outliers('one', reject=True)
self.assertEqual(len(new_table.rows), 1)
self.assertSequenceEqual(new_table.columns['one'], (200,))
def test_mad_outliers(self):
rows = [
(50, 4, 'a'),
] * 10
rows.append((200, 1, 'b'))
table = Table(rows, self.column_types, self.column_names)
new_table = table.mad_outliers('one')
self.assertEqual(len(new_table.rows), 10)
self.assertNotIn(200, new_table.columns['one'])
def test_mad_outliers_reject(self):
rows = [
(50, 4, 'a'),
] * 10
rows.append((200, 1, 'b'))
table = Table(rows, self.column_types, self.column_names)
new_table = table.mad_outliers('one', reject=True)
self.assertEqual(len(new_table.rows), 1)
self.assertSequenceEqual(new_table.columns['one'], (200,))
def test_order_by(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.order_by('two')
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 3)
self.assertSequenceEqual(new_table.rows[0], (None, 2, 'c'))
self.assertSequenceEqual(new_table.rows[1], (2, 3, 'b'))
self.assertSequenceEqual(new_table.rows[2], (1, 4, 'a'))
# Verify old table not changed
self.assertSequenceEqual(table.rows[0], (1, 4, 'a'))
self.assertSequenceEqual(table.rows[1], (2, 3, 'b'))
self.assertSequenceEqual(table.rows[2], (None, 2, 'c'))
def test_order_by_func(self):
rows = (
(1, 2, 'a'),
(2, 1, 'b'),
(1, 1, 'c')
)
table = Table(rows, self.column_types, self.column_names)
new_table = table.order_by(lambda r: (r['one'], r['two']))
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 3)
self.assertSequenceEqual(new_table.rows[0], (1, 1, 'c'))
self.assertSequenceEqual(new_table.rows[1], (1, 2, 'a'))
self.assertSequenceEqual(new_table.rows[2], (2, 1, 'b'))
def test_order_by_reverse(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.order_by(lambda r: r['two'], reverse=True)
self.assertEqual(len(new_table.rows), 3)
self.assertSequenceEqual(new_table.rows[0], (1, 4, 'a'))
self.assertSequenceEqual(new_table.rows[1], (2, 3, 'b'))
self.assertSequenceEqual(new_table.rows[2], (None, 2, 'c'))
def test_order_by_nulls(self):
rows = (
(1, 2, None),
(2, None, None),
(1, 1, 'c'),
(1, None, 'a')
)
table = Table(rows, self.column_types, self.column_names)
new_table = table.order_by('two')
self.assertSequenceEqual(new_table.columns['two'], (1, 2, None, None))
new_table = table.order_by('three')
self.assertSequenceEqual(new_table.columns['three'], ('a', 'c', None, None))
def test_limit(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.limit(2)
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 2)
self.assertSequenceEqual(new_table.rows[0], (1, 4, 'a'))
self.assertSequenceEqual(new_table.columns['one'], (1, 2))
def test_limit_slice(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.limit(0, 3, 2)
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 2)
self.assertSequenceEqual(new_table.rows[0], (1, 4, 'a'))
self.assertSequenceEqual(new_table.rows[1], (None, 2, 'c'))
self.assertSequenceEqual(new_table.columns['one'], (1, None))
def test_limit_slice_negative(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.limit(-2, step=-1)
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 2)
self.assertSequenceEqual(new_table.rows[0], (2, 3, 'b'))
self.assertSequenceEqual(new_table.rows[1], (1, 4, 'a'))
self.assertSequenceEqual(new_table.columns['one'], (2, 1))
def test_limit_step_only(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.limit(step=2)
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 2)
self.assertSequenceEqual(new_table.rows[0], (1, 4, 'a'))
self.assertSequenceEqual(new_table.rows[1], (None, 2, 'c'))
self.assertSequenceEqual(new_table.columns['one'], (1, None))
def test_distinct_column(self):
rows = (
(1, 2, 'a'),
(2, None, None),
(1, 1, 'c'),
(1, None, None)
)
table = Table(rows, self.column_types, self.column_names)
new_table = table.distinct('one')
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 2)
self.assertSequenceEqual(new_table.rows[0], (1, 2, 'a'))
self.assertSequenceEqual(new_table.rows[1], (2, None, None))
self.assertSequenceEqual(new_table.columns['one'], (1, 2))
def test_distinct_func(self):
rows = (
(1, 2, 'a'),
(2, None, None),
(1, 1, 'c'),
(1, None, None)
)
table = Table(rows, self.column_types, self.column_names)
new_table = table.distinct(lambda row: (row['two'], row['three']))
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 3)
self.assertSequenceEqual(new_table.rows[0], (1, 2, 'a'))
self.assertSequenceEqual(new_table.rows[1], (2, None, None))
self.assertSequenceEqual(new_table.rows[2], (1, 1, 'c'))
self.assertSequenceEqual(new_table.columns['one'], (1, 2, 1))
def test_distinct_none(self):
rows = (
(1, 2, 'a'),
(1, None, None),
(1, 1, 'c'),
(1, None, None)
)
table = Table(rows, self.column_types, self.column_names)
new_table = table.distinct()
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 3)
self.assertSequenceEqual(new_table.rows[0], (1, 2, 'a'))
self.assertSequenceEqual(new_table.rows[1], (1, None, None))
self.assertSequenceEqual(new_table.rows[2], (1, 1, 'c'))
self.assertSequenceEqual(new_table.columns['one'], (1, 1, 1))
def test_chain_select_where(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.select(('one', 'two')).where(lambda r: r['two'] == 3)
self.assertEqual(len(new_table.rows), 1)
self.assertSequenceEqual(new_table.rows[0], (2, 3))
self.assertEqual(len(new_table.columns), 2)
self.assertSequenceEqual(new_table._column_types, (self.number_type, self.number_type))
self.assertEqual(new_table._column_names, ('one', 'two'))
self.assertSequenceEqual(new_table.columns['one'], (2,))
class TestTableGrouping(unittest.TestCase):
def setUp(self):
self.rows = (
('a', 2, 3, 4),
(None, 3, 5, None),
('a', 2, 4, None),
('b', 3, 4, None)
)
self.number_type = NumberType()
self.text_type = TextType()
self.column_types = (self.text_type, self.number_type, self.number_type, self.number_type)
self.column_names = ('one', 'two', 'three', 'four')
def test_group_by(self):
table = Table(self.rows, self.column_types, self.column_names)
new_tables = table.group_by('one')
self.assertEqual(len(new_tables), 3)
self.assertIn('a', new_tables.keys())
self.assertIn('b', new_tables.keys())
self.assertIn(None, new_tables.keys())
self.assertSequenceEqual(new_tables['a'].columns['one'], ('a', 'a'))
self.assertSequenceEqual(new_tables['b'].columns['one'], ('b',))
self.assertSequenceEqual(new_tables[None].columns['one'], (None,))
def test_group_by_bad_column(self):
table = Table(self.rows, self.column_types, self.column_names)
with self.assertRaises(ColumnDoesNotExistError):
table.group_by('bad')
def test_aggregate_sum(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.aggregate('one', (('two', 'sum'), ))
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 3)
self.assertEqual(len(new_table.columns), 3)
self.assertSequenceEqual(new_table._column_names, ('one', 'one_count', 'two_sum'))
self.assertSequenceEqual(new_table.rows[0], ('a', 2, 4))
self.assertSequenceEqual(new_table.rows[1], (None, 1, 3))
self.assertSequenceEqual(new_table.rows[2], ('b', 1, 3))
def test_aggregate_sum_two_columns(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.aggregate('one', (('two', 'sum'), ('four', 'sum')))
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 3)
self.assertEqual(len(new_table.columns), 4)
self.assertSequenceEqual(new_table._column_names, ('one', 'one_count', 'two_sum', 'four_sum'))
self.assertSequenceEqual(new_table.rows[0], ('a', 2, 4, 4))
self.assertSequenceEqual(new_table.rows[1], (None, 1, 3, 0))
self.assertSequenceEqual(new_table.rows[2], ('b', 1, 3, 0))
def test_aggregate_two_ops(self):
table = Table(self.rows, self.column_types, self.column_names)
new_table = table.aggregate('one', (('two', 'sum'), ('two', 'mean')))
self.assertIsNot(new_table, table)
self.assertEqual(len(new_table.rows), 3)
self.assertEqual(len(new_table.columns), 4)
self.assertSequenceEqual(new_table._column_names, ('one', 'one_count', 'two_sum', 'two_mean'))
self.assertSequenceEqual(new_table.rows[0], ('a', 2, 4, 2))
self.assertSequenceEqual(new_table.rows[1], (None, 1, 3, 3))
self.assertSequenceEqual(new_table.rows[2], ('b', 1, 3, 3))
def test_aggregate_sum_invalid(self):
table = Table(self.rows, self.column_types, self.column_names)
with self.assertRaises(UnsupportedOperationError):
table.aggregate('two', (('one', 'sum'), ))
def test_aggregeate_bad_column(self):
table = Table(self.rows, self.column_types, self.column_names)
with self.assertRaises(ColumnDoesNotExistError):
table.aggregate('bad', (('one', 'sum'), ))
with self.assertRaises(ColumnDoesNotExistError):
table.aggregate('two', (('bad', 'sum'), ))
class TestTableCompute(unittest.TestCase):
def setUp(self):
self.rows = (
('a', 2, 3, 4),
(None, 3, 5, None),
('a', 2, 4, None),
('b', 3, 4, None)
)
self.number_type = NumberType()
self.text_type = TextType()
self.column_types = (self.text_type, self.number_type, self.number_type, self.number_type)
self.column_names = ('one', 'two', 'three', 'four')
self.table = Table(self.rows, self.column_types, self.column_names)
def test_compute(self):
new_table = self.table.compute('test', self.number_type, lambda r: r['two'] + r['three'])
self.assertIsNot(new_table, self.table)
self.assertEqual(len(new_table.rows), 4)
self.assertEqual(len(new_table.columns), 5)
self.assertSequenceEqual(new_table.rows[0], ('a', 2, 3, 4, 5))
self.assertSequenceEqual(new_table.columns['test'], (5, 8, 6, 7))
def test_percent_change(self):
new_table = self.table.percent_change('two', 'three', 'test')
self.assertIsNot(new_table, self.table)
self.assertEqual(len(new_table.rows), 4)
self.assertEqual(len(new_table.columns), 5)
to_one_place = lambda d: d.quantize(Decimal('0.1'))
self.assertSequenceEqual(new_table.rows[0], ('a', Decimal('2'), Decimal('3'), Decimal('4'), Decimal('50.0')))
self.assertEqual(to_one_place(new_table.columns['test'][0]), Decimal('50.0'))
self.assertEqual(to_one_place(new_table.columns['test'][1]), Decimal('66.7'))
self.assertEqual(to_one_place(new_table.columns['test'][2]), Decimal('100.0'))
self.assertEqual(to_one_place(new_table.columns['test'][3]), Decimal('33.3'))
def test_rank(self):
new_table = self.table.rank(lambda r: r['two'], 'rank')
self.assertEqual(len(new_table.rows), 4)
self.assertEqual(len(new_table.columns), 5)
self.assertSequenceEqual(new_table.rows[0], ('a', 2, 3, 4, 1))
self.assertSequenceEqual(new_table.rows[1], (None, 3, 5, None, 3))
self.assertSequenceEqual(new_table.rows[2], ('a', 2, 4, None, 1))
self.assertSequenceEqual(new_table.rows[3], ('b', 3, 4, None, 3))
self.assertSequenceEqual(new_table.columns['rank'], (1, 3, 1, 3))
def test_rank2(self):
new_table = self.table.rank(lambda r: r['one'], 'rank')
self.assertEqual(len(new_table.rows), 4)
self.assertEqual(len(new_table.columns), 5)
self.assertSequenceEqual(new_table.rows[0], ('a', 2, 3, 4, 1))
self.assertSequenceEqual(new_table.rows[1], (None, 3, 5, None, 4))
self.assertSequenceEqual(new_table.rows[2], ('a', 2, 4, None, 1))
self.assertSequenceEqual(new_table.rows[3], ('b', 3, 4, None, 3))
self.assertSequenceEqual(new_table.columns['rank'], (1, 4, 1, 3))
def test_rank_column_name(self):
new_table = self.table.rank('two', 'rank')
self.assertEqual(len(new_table.rows), 4)
self.assertEqual(len(new_table.columns), 5)
self.assertSequenceEqual(new_table.rows[0], ('a', 2, 3, 4, 1))
self.assertSequenceEqual(new_table.rows[1], (None, 3, 5, None, 3))
self.assertSequenceEqual(new_table.rows[2], ('a', 2, 4, None, 1))
self.assertSequenceEqual(new_table.rows[3], ('b', 3, 4, None, 3))
self.assertSequenceEqual(new_table.columns['rank'], (1, 3, 1, 3))
class TestTableJoin(unittest.TestCase):
def setUp(self):
self.left_rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c')
)
self.right_rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c')
)
self.number_type = NumberType()
self.text_type = TextType()
self.column_types = (self.number_type, self.number_type, self.text_type)
self.left = Table(self.left_rows, self.column_types, ('one', 'two', 'three'))
self.right = Table(self.right_rows, self.column_types, ('four', 'five', 'six'))
def test_inner_join(self):
new_table = self.left.inner_join('one', self.right, 'four')
self.assertEqual(len(new_table.rows), 3)
self.assertEqual(len(new_table.columns), 6)
self.assertSequenceEqual(new_table.rows[0], (1, 4, 'a', 1, 4, 'a'))
self.assertSequenceEqual(new_table.rows[1], (2, 3, 'b', 2, 3, 'b'))
self.assertSequenceEqual(new_table.rows[2], (None, 2, 'c', None, 2, 'c'))
def test_inner_join2(self):
new_table = self.left.inner_join('one', self.right, 'five')
self.assertEqual(len(new_table.rows), 1)
self.assertEqual(len(new_table.columns), 6)
self.assertSequenceEqual(new_table.rows[0], (2, 3, 'b', None, 2, 'c'))
def test_inner_join_func(self):
new_table = self.left.inner_join(
lambda left: '%i%s' % (left['two'], left['three']),
self.right,
lambda right: '%i%s' % (right['five'], right['six'])
)
self.assertEqual(len(new_table.rows), 3)
self.assertEqual(len(new_table.columns), 6)
def test_left_outer_join(self):
new_table = self.left.left_outer_join('one', self.right, 'four')
self.assertEqual(len(new_table.rows), 3)
self.assertEqual(len(new_table.columns), 6)
self.assertSequenceEqual(new_table.rows[0], (1, 4, 'a', 1, 4, 'a'))
self.assertSequenceEqual(new_table.rows[1], (2, 3, 'b', 2, 3, 'b'))
self.assertSequenceEqual(new_table.rows[2], (None, 2, 'c', None, 2, 'c'))
def test_left_outer_join2(self):
new_table = self.left.left_outer_join('one', self.right, 'five')
self.assertEqual(len(new_table.rows), 3)
self.assertEqual(len(new_table.columns), 6)
self.assertSequenceEqual(new_table.rows[0], (1, 4, 'a', None, None, None))
self.assertSequenceEqual(new_table.rows[1], (2, 3, 'b', None, 2, 'c'))
self.assertSequenceEqual(new_table.rows[2], (None, 2, 'c', None, None, None))
def test_left_outer_func(self):
new_table = self.left.left_outer_join(
lambda left: '%i%s' % (left['two'], left['three']),
self.right,
lambda right: '%i%s' % (right['five'], right['six'])
)
self.assertEqual(len(new_table.rows), 3)
self.assertEqual(len(new_table.columns), 6)
self.assertSequenceEqual(new_table.rows[0], (1, 4, 'a', 1, 4, 'a'))
self.assertSequenceEqual(new_table.rows[1], (2, 3, 'b', 2, 3, 'b'))
self.assertSequenceEqual(new_table.rows[2], (None, 2, 'c', None, 2, 'c'))
class TestTableData(unittest.TestCase):
def setUp(self):
self.rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c')
)
self.number_type = NumberType()
self.text_type = TextType()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
def test_data_immutable(self):
rows = [
[1, 4, 'a'],
[2, 3, 'b'],
[None, 2, 'c']
]
table = Table(rows, self.column_types, self.column_names)
rows[0] = [2, 2, 2]
self.assertSequenceEqual(table.rows[0], [1, 4, 'a'])
def test_fork_preserves_data(self):
table = Table(self.rows, self.column_types, self.column_names)
table2 = table._fork(table.rows)
self.assertIs(table.rows[0], table2._data[0])
self.assertIs(table.rows[1], table2._data[1])
self.assertIs(table.rows[2], table2._data[2])
self.assertIs(table.rows[0], table2.rows[0])
self.assertIs(table.rows[1], table2.rows[1])
self.assertIs(table.rows[2], table2.rows[2])
def test_where_preserves_rows(self):
table = Table(self.rows, self.column_types, self.column_names)
table2 = table.where(lambda r: r['one'] == 1)
table3 = table2.where(lambda r: r['one'] == 1)
self.assertIsNot(table._data[0], table2._data[0])
self.assertIs(table2._data[0], table3._data[0])
def test_order_by_preserves_rows(self):
table = Table(self.rows, self.column_types, self.column_names)
table2 = table.order_by(lambda r: r['one'])
table3 = table2.order_by(lambda r: r['one'])
self.assertIsNot(table._data[0], table2._data[0])
self.assertIs(table2._data[0], table3._data[0])
def test_limit_preserves_rows(self):
table = Table(self.rows, self.column_types, self.column_names)
table2 = table.limit(2)
table3 = table2.limit(2)
self.assertIsNot(table._data[0], table2._data[0])
self.assertIs(table2._data[0], table3._data[0])
def test_compute_creates_rows(self):
table = Table(self.rows, self.column_types, self.column_names)
table2 = table.compute('new2', self.number_type, lambda r: r['one'])
table3 = table2.compute('new3', self.number_type, lambda r: r['one'])
self.assertIsNot(table._data[0], table2._data[0])
self.assertNotEqual(table._data[0], table2._data[0])
self.assertIsNot(table2._data[0], table3._data[0])
self.assertNotEqual(table2._data[0], table3._data[0])
self.assertSequenceEqual(table._data[0], (1, 4, 'a'))
|
import argparse
import torch
from tqdm import tqdm
import data_loader as module_dataloader
from module import losses as module_loss
from module import metrics as module_metric
from module import models as module_models
from module import predict_hook as module_predict_hook
import trainer as module_trainer
import numpy as np
from parse_config import ConfigParser
def main(config):
logger = config.get_logger('test')
# setup data_loader instances
data_loader = config.init_obj('data_loader', module_dataloader)
data_loader = data_loader.split_validation()
# build model architecture
model = config.init_obj('model', module_models)
logger.info(model)
# get function handles of loss and metrics
if isinstance(config['loss'],dict):
loss_fn = config.init_obj('loss',module_loss)
else:
loss_fn = getattr(module_loss, config['loss'])
metric_fns = [getattr(module_metric, met) for met in config['metrics']]
predict_hooks = [getattr(module_predict_hook, met) for met in config['predict_hook']]
best_model_path = config.save_dir / "model_best.pth"
if config.resume == None and best_model_path.exists():
print("find best model %s" % best_model_path)
config.resume = best_model_path
logger.info('Loading checkpoint: {} ...'.format(config.resume))
checkpoint = torch.load(config.resume)
state_dict = checkpoint['state_dict']
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(state_dict)
# prepare model for testing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
total_loss = 0.0
total_metrics = torch.zeros(len(metric_fns))
preds = np.zeros((0, 2))
targets = np.zeros((0, 1))
with torch.no_grad():
for i, (data, target) in enumerate(tqdm(data_loader)):
data, target = data.to(device), target.to(device)
output = model(data)
preds = np.vstack([preds, output.cpu().detach().numpy()])
targets = np.vstack([targets, target.view((-1, 1)).cpu().detach().numpy()])
#
# save sample images, or do something with output here
#
# computing loss, metrics on test set
loss = loss_fn(output, target)
batch_size = data.shape[0]
total_loss += loss.item() * batch_size
for i, metric in enumerate(metric_fns):
total_metrics[i] += metric(output, target) * batch_size
n_samples = len(data_loader.sampler)
log = {'loss': total_loss / n_samples}
log.update({
met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns)
})
logger.info(log)
targets = np.squeeze(targets)
for hook in predict_hooks:
hook(targets, preds, config.save_dir)
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
config = ConfigParser.from_args(args)
main(config)
|
from .plan import OpenLoopController
|
from pathlib import Path
import pandas as pd
for file_name in Path('./').glob('scores_*.csv'):
print(file_name.stem)
df = pd.read_csv(file_name, index_col=0)
c1 = df.columns[0]
c2 = df.columns[0].replace("_mean", "_std")
# https://stats.stackexchange.com/a/160481
my_col = df[c2] * 1.96 # 95% confidence interval
scores = pd.Series([f"{m:.3f} ± {s:.3f}" for m, s in zip(df[c1], my_col)], index=df.index)
# automatic to latex
# scores.to_latex(f"abc.tex")
print(scores)
|
import setuptools
setuptools.setup(
name="singer-conainer-utils",
version="0.0.1",
description="Utility classes to run Singer taps and targets in containers.",
url="http://github.com/immuta/singer-container-utils",
classifiers=["Programming Language :: Python :: 3 :: Only"],
py_modules=["singer_container_utils"],
author="Stephen Bailey",
install_requires=["pandas"],
author_email="sbailey@immuta.com",
license="MIT",
packages=setuptools.find_packages(),
)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf import yamlutil
from asdf.versioning import AsdfSpec
from ..types import AstronomyDataModelType
from ..proposal import Proposal
class ProposalType(AstronomyDataModelType):
name = 'datamodel/proposal'
version = '1.0.0'
supported_versions = ['1.0.0']
types = ['astronomy_datamodels.proposal.Proposal']
requires = []
@classmethod
def to_tree(cls, node, ctx): # to ASDF representation
d = {}
d['proposal_id'] = node.id
if node.proposers is not None:
d['proposers'] = yamlutil.custom_tree_to_tagged_tree(node.proposers, ctx)
if node.title is not None:
d['proposal_title'] = node.title
if node.meta is not None:
d['meta'] = yamlutil.custom_tree_to_tagged_tree(node.meta, ctx)
return d
@classmethod
def from_tree(cls, node, ctx): # from ASDF to object representation
id = node['proposal_id']
prop = Proposal(id)
if 'proposers' in node:
prop.proposers = yamlutil.tagged_tree_to_custom_tree(node['proposers'], ctx)
if 'proposal_title' in node:
prop.title = node['proposal_title']
if 'meta' in node:
prop.meta = yamlutil.tagged_tree_to_custom_tree(node['meta'], ctx)
return prop
@classmethod
def assert_equal(cls, old, new):
pass
|
"""
VDF (de)serialization
Copyright (c) 2010-2013, Anthony Garcia <anthony@lagg.me>
Distributed under the ISC License (see LICENSE)
"""
STRING = '"'
NODE_OPEN = '{'
NODE_CLOSE = '}'
BR_OPEN = '['
BR_CLOSE = ']'
COMMENT = '/'
CR = '\r'
LF = '\n'
SPACE = ' '
TAB = '\t'
WHITESPACE = set(' \t\r\n')
try:
from collections import OrderedDict as odict
except ImportError:
odict = dict
def _symtostr(line, i, token=STRING):
opening = i + 1
closing = 0
ci = line.find(token, opening)
while ci != -1:
if line[ci - 1] != '\\':
closing = ci
break
ci = line.find(token, ci + 1)
finalstr = line[opening:closing]
return finalstr, i + len(finalstr) + 1
def _unquotedtostr(line, i):
ci = i
_len = len(line)
while ci < _len:
if line[ci] in WHITESPACE:
break
ci += 1
return line[i:ci], ci
def _parse(stream, ptr=0):
i = ptr
laststr = None
lasttok = None
lastbrk = None
next_is_value = False
deserialized = {}
while i < len(stream):
c = stream[i]
if c == NODE_OPEN:
next_is_value = False # Make sure next string is interpreted as a key.
if laststr in deserialized.keys():
# If this key already exists then we need to make it a list and append the current value.
if type(deserialized[laststr]) is not list:
# If the value already set is not a list, let's make it one.
deserialized[laststr] = [deserialized[laststr]]
# Append the current value to the list
_value, i = _parse(stream, i + 1)
deserialized[laststr].append(_value)
else:
# Key is brand new!
deserialized[laststr], i = _parse(stream, i + 1)
elif c == NODE_CLOSE:
return deserialized, i
elif c == BR_OPEN:
lastbrk, i = _symtostr(stream, i, BR_CLOSE)
elif c == COMMENT:
if (i + 1) < len(stream) and stream[i + 1] == '/':
i = stream.find('\n', i)
elif c == CR or c == LF:
ni = i + 1
if ni < len(stream) and stream[ni] == LF:
i = ni
if lasttok != LF:
c = LF
elif c != SPACE and c != TAB:
string, i = (
_symtostr if c == STRING else
_unquotedtostr)(stream, i)
if lasttok == STRING and next_is_value:
if laststr in deserialized and lastbrk is not None:
# ignore this entry if it's the second bracketed expression
lastbrk = None
else:
if laststr in deserialized.keys():
# If this key already exists then we're dealing with a list of items
if type(deserialized[laststr]) is not list:
# If the existing val is not a list, we need to cast it to one.
deserialized[laststr] = [deserialized[laststr]]
# Append current val to list
deserialized[laststr].append(string)
else:
# First occurence of laststr in deserialized. Assign the value as normal
deserialized[laststr] = string
# force c = STRING so that lasttok will be set properly
c = STRING
laststr = string
next_is_value = not next_is_value
else:
c = lasttok
lasttok = c
i += 1
return deserialized, i
def _run_parse_encoded(string):
try:
encoded = bytearray(string, "utf-16")
except:
encoded = bytearray(string) # Already byte object?
try:
encoded = encoded.decode("ascii")
except UnicodeDecodeError:
try:
encoded = encoded.decode("utf-8")
except:
encoded = encoded.decode("utf-16")
except UnicodeEncodeError:
pass # Likely already decoded
res, ptr = _parse(encoded)
return res
def load(stream):
"""
Deserializes `stream` containing VDF document to Python object.
"""
return _run_parse_encoded(stream.read())
def loads(string):
"""
Deserializes `string` containing VDF document to Python object.
"""
return _run_parse_encoded(string)
indent = 0
mult = 2
def _i():
return u' ' * (indent * mult)
def _dump(obj):
nodefmt = u'\n' + _i() + '"{0}"\n' + _i() + '{{\n{1}' + _i() + '}}\n\n'
podfmt = _i() + '"{0}" "{1}"\n'
lstfmt = _i() + (' ' * mult) + '"{0}" "1"'
global indent
indent += 1
nodes = []
for k, v in obj.items():
if isinstance(v, dict):
nodes.append(nodefmt.format(k, _dump(v)))
else:
try:
try:
v.isdigit
nodes.append(podfmt.format(k, v))
except AttributeError:
lst = map(lstfmt.format, v)
nodes.append(nodefmt.format(k, u'\n'.join(lst) + '\n'))
except TypeError:
nodes.append(podfmt.format(k, v))
indent -= 1
return u''.join(nodes)
def _run_dump(obj):
res = _dump(obj)
return res.encode("utf-16")
def dump(obj, stream):
"""
Serializes `obj` as VDF formatted stream to `stream` object, encoded as
UTF-16 by default.
"""
stream.write(_run_dump(obj))
def dumps(obj):
"""
Serializes `obj` as VDF formatted string, encoded as UTF-16 by default.
"""
return _run_dump(obj)
|
"""
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
from .topicmgr import (
TopicManager,
TreeConfig
)
from .. import py2and3
class PublisherBase:
"""
Represent the class that send messages to listeners of given
topics and that knows how to subscribe/unsubscribe listeners
from topics.
"""
def __init__(self, treeConfig = None):
"""If treeConfig is None, a default one is created from an
instance of TreeConfig."""
self.__treeConfig = treeConfig or TreeConfig()
self.__topicMgr = TopicManager(self.__treeConfig)
def getTopicMgr(self):
"""Get the topic manager created for this publisher."""
return self.__topicMgr
def getListenerExcHandler(self):
"""Get the listener exception handler that was registered
via setListenerExcHandler(), or None of none registered."""
return self.__treeConfig.listenerExcHandler
def setListenerExcHandler(self, handler):
"""Set the function to call when a listener raises an exception
during a sendMessage(). The handler must adhere to the
IListenerExcHandler API. """
self.__treeConfig.listenerExcHandler = handler
def addNotificationHandler(self, handler):
"""Add a handler for tracing pubsub activity. The handler should be
a class that adheres to the API of INotificationHandler. """
self.__treeConfig.notificationMgr.addHandler(handler)
def clearNotificationHandlers(self):
"""Remove all notification handlers that were added via
self.addNotificationHandler(). """
self.__treeConfig.notificationMgr.clearHandlers()
def setNotificationFlags(self, **kwargs):
"""Set the notification flags on or off for each type of
pubsub activity. The kwargs keys can be any of the following:
- subscribe: if True, get notified whenever a listener subscribes to a topic;
- unsubscribe: if True, get notified whenever a listener unsubscribes from a topic;
- deadListener: if True, get notified whenever a subscribed listener has been garbage-collected;
- sendMessage: if True, get notified whenever sendMessage() is called;
- newTopic: if True, get notified whenever a new topic is created;
- delTopic: if True, get notified whenever a topic is "deleted" from topic tree;
- all: set all of the above to the given value (True or False).
The kwargs that are None are left at their current value. Those that are
False will cause corresponding notification to be silenced. The 'all'
is set first, then the others. E.g.
mgr.setFlagStates(all=True, delTopic=False)
will toggle all notifications on, but will turn off the 'delTopic'
notification.
"""
self.__treeConfig.notificationMgr.setFlagStates(**kwargs)
def getNotificationFlags(self):
"""Return a dictionary with the notification flag states."""
return self.__treeConfig.notificationMgr.getFlagStates()
def setTopicUnspecifiedFatal(self, newVal=True, checkExisting=True):
"""Changes the creation policy for topics.
By default, pubsub will accept topic names for topics that
don't have a message data specification (MDS). This default behavior
makes pubsub easier to use initially, but allows topic
names with typos to go uncaught in common operations such as
sendMessage() and subscribe(). In a large application, this
can lead to nasty bugs. Pubsub's default behavior is equivalent
to setTopicUnspecifiedFatal(false).
When called with newVal=True, any future pubsub operation that
requires a topic (such as subscribe and sendMessage) will require
an MDS; if none is available, pubsub will raise a TopicDefnError
exception.
If checkExisting is not given or True, all existing
topics are validated. A TopicDefnError exception is
raised if one is found to be incomplete (has hasMDS() false).
Returns previous value of newVal.
Note that this method can be used in several ways:
1. Only use it in your application when something is not working
as expected: just add a call at the beginning of your app when
you have a problem with topic messages not being received
(for instance), and remove it when you have fixed the problem.
2. Use it from the beginning of your app and never use newVal=False:
add a call at the beginning of your app and you leave it in
(forever), and use Topic Definition Providers to provide the
listener specifications. These are easy to use via the
pub.addTopicDefnProvider().
3. Use it as in #1 during app development, and once stable, use
#2. This is easiest to do in combination with
pub.exportTopicTreeSpec().
"""
oldVal = self.__treeConfig.raiseOnTopicUnspecified
self.__treeConfig.raiseOnTopicUnspecified = newVal
if newVal and checkExisting:
self.__topicMgr.checkAllTopicsHaveMDS()
return oldVal
def sendMessage(self, topicName, *args, **kwargs):
"""Send a message for topic name with given data (args and kwargs).
This will be overridden by derived classes that implement
message-sending for different messaging protocols; not all
parameters may be accepted."""
raise NotImplementedError
def subscribe(self, listener, topicName):
"""Subscribe listener to named topic. Raises ListenerMismatchError
if listener isn't compatible with the topic's MDS. Returns
(pubsub.core.Listener, success), where success is False if listener
was already subscribed. The pub.core.Listener wraps the callable
subscribed and provides introspection-based info about
the callable.
Note that if 'subscribe' notification is on, the handler's
'notifySubscribe' method is called after subscription."""
topicObj = self.__topicMgr.getOrCreateTopic(topicName)
subscribedListener, success = topicObj.subscribe(listener)
return subscribedListener, success
def unsubscribe(self, listener, topicName):
"""Unsubscribe from given topic. Returns the pubsub.core.Listener
instance that was used to wrap listener at subscription
time. Raises an TopicNameError if topicName doesn't exist.
Note that if 'unsubscribe' notification is on, the handler's
notifyUnsubscribe() method will be called after unsubscribing. """
topicObj = self.__topicMgr.getTopic(topicName)
unsubdLisnr = topicObj.unsubscribe(listener)
return unsubdLisnr
def unsubAll(self, topicName = None,
listenerFilter = None, topicFilter = None):
"""By default (no args given), unsubscribe all listeners from all
topics. A listenerFilter can be given so that only the listeners
that satisfy listenerFilter(listener) == True will be unsubscribed
(with listener being a pub.Listener wrapper instance for each listener
subscribed). A topicFilter can also be given so that only topics
that satisfy topicFilter(topic name) == True will be affected.
If only one topic should have listeners unsubscribed, then a topic
name 'topicName' can be given *instead* instead of a topic filter.
Returns the list of all listeners (instances of pub.Listener) that
were unsubscribed from the topic tree).
Note: this method will generate one 'unsubcribe' notification message
(see pub.setNotificationFlags()) for each listener unsubscribed."""
unsubdListeners = []
if topicName is None:
# unsubscribe all listeners from all topics
topicsMap = self.__topicMgr._topicsMap
for topicName, topicObj in py2and3.iteritems(topicsMap):
if topicFilter is None or topicFilter(topicName):
tmp = topicObj.unsubscribeAllListeners(listenerFilter)
unsubdListeners.extend(tmp)
else:
topicObj = self.__topicMgr.getTopic(topicName)
unsubdListeners = topicObj.unsubscribeAllListeners(listenerFilter)
return unsubdListeners
|
import os
import pathlib
import itertools
import numpy as np
from fusilib.config import DATA_ROOT
from fusilib.utils import band_pass_signal
def can_int(val):
try:
fval = float(val)
return fval == int(val)
except:
return False
def datetuple2isoformat(date_tuple):
'''
'''
year, month, day = date_tuple[:3]
return '{year}-{month}-{day}'.format(year=year,
month='%02i' % month,
day='%02i' % day)
def isoformat_filename2info(flname):
'''Extract subject, date and block number from file name
E.g. 2020-11-01_13_CR024_data.csv -> ('CR024', '2020-11-01', '13')
Parameters
----------
flname : str or pathlib.path
Returns
-------
subject : str
date : str
blocknum : str
'''
stem = pathlib.Path(flname).stem
date, blocknum, subject = stem.split('_')[:3]
return subject, date, blocknum
def isoformat_filename2fullpath(flname,
root=DATA_ROOT):
'''Convert a cortex-lab formated filename to a full path
E.g. 2020-11-01_13_CR024_data.csv gets translated to:
/root/subject/date/block_number/2020-11-01_13_CR024_data.csv
Parameters
----------
flname : str or pathlib.Path
Returns
-------
outpath : pathlib.Path
Examples
--------
>>> isoformat_filename2fullpath('2020-11-01_13_CR024_data.csv')
PosixPath('/DATA_ROOT/CR024/2020-11-01/13/2020-11-01_13_CR024_data.csv')
'''
stem = pathlib.Path(flname).stem
date, blocknum, subject = stem.split('_')[:3]
outpath = pathlib.Path(root).joinpath(subject, date, blocknum)
return outpath / pathlib.Path(flname).name
class DataStore(object):
def __init__(self, subject='CR01', expnum=1, date=(2017, 11, 17), root=DATA_ROOT):
self.subject = subject
self.date = date
self.root = root
self._set_file_paths(expnum)
self.isoformat_date = datetuple2isoformat(date)
year, month, day = date
path_pattern = '{root}/{subject}/{year}-{month}-{day}/{expnum}/'
path = path_pattern.format(root=root,
year=year,
month='%02i' % month,
day='%02i' % day,
expnum=expnum,
subject=subject)
self.__path__ = path
self.__pathobj__ = pathlib.Path(path)
def __repr__(self):
details = (self.subject, self.date, self.expnum)
year, month, day = self.date
description = '<experiment dataset: experiment #{sl}. {sub} ({yr}/{mo}/{day})>'
return description.format(sub=self.subject,
sl=self.expnum,
yr=year, mo=month, day=day)
def _set_file_paths(self, expnum):
'''
'''
self.expnum = expnum
# file template
template = '{root}/{subject}/{year}-{month}-{day}/{expnum}/{year}-{month}-{day}_{expnum}_{subject}_{dtype}.mat'
dtypes = ['Timeline']
year, month, day = self.date
files = {dt: template.format(root=self.root,
year=year,
month='%02i' % month,
day='%02i' % day,
subject=self.subject,
expnum=self.expnum,
dtype=dt)
for dt in dtypes}
# protocol_pattern = '{root}/{subject}/{year}-{month}-{day}/{expnum}/Protocol.mat'
# files['Protocol'] = protocol_pattern.format(root=self.root,
# year=year,
# month='%02i'%month,
# day='%02i'%day,
# expnum=self.expnum,
# subject=self.subject)
# store paths
for fll in files.values():
assert os.path.exists(fll)
self.files = files
self.dtypes = list(files.keys())
@property
def available_sessions(self):
'''
'''
return os.listdir(os.path.join(self.root, self.subject))
@property
def available_experiments(self):
'''
'''
items = os.listdir(os.path.join(self.root,
self.subject,
self.isoformat_date))
# str_items = sorted([item for item in items if not can_int(item)])
int_items = sorted(
[int(item) for item in items if can_int(item) and int(item) < 1000])
return int_items # + str_items
def set_experiment(self, expnum):
'''
'''
self._set_file_paths(expnum)
def load_data(self, dtype):
'''
'''
from scipy import io as sio
assert dtype in self.dtypes
dtype2keys = {'fus': 'doppler',
'Protocol': 'Protocol',
'Timeline': 'Timeline',
}
if dtype not in dtype2keys:
raise IOError("Give info on how to load '%s':\n%s" %
(dtype, self.files[dtype]))
key = dtype2keys[dtype]
try:
dat = sio.loadmat(self.files[dtype], struct_as_record=False,
verify_compressed_data_integrity=False)[key][0, 0]
except OSError:
# Assume I/O latency issues and try again
import time
nsec = np.random.rand()*5
time.sleep(nsec)
dat = sio.loadmat(self.files[dtype], struct_as_record=False,
verify_compressed_data_integrity=False)[key][0, 0]
return dat
class ExperimentData(DataStore):
def __init__(self, *args, **kwargs):
super(ExperimentData, self).__init__(*args, **kwargs)
self.load_timeline()
# self.load_photodiode_events()
# self.load_stimulus_events()
# self.load_protocol()
def load_timeline(self):
'''
'''
timeline = self.load_data('Timeline')
HWEVENTS = [t.name[0] for t in timeline.hw[0, 0].inputs[0]]
self.hwevents = HWEVENTS
self.timeline = timeline
def load_protocol(self):
'''
'''
protocol = self.load_data('Protocol')
self.protocol = protocol
nstim, nreps = protocol.seqnums.shape
self.nstimuli = nstim
self.nrepeats = nreps
self.stimulus_sequence = protocol.seqnums - 1 # change to 0-index
@property
def timeline_sample_ratehz(self):
sample_ratehz = float(1.0/self.timeline.hw[0, 0].samplingInterval)
return sample_ratehz
def get_timeline_data(self, event_name):
'''
'''
assert 'neuralFrames' in self.hwevents
channel_index = self.hwevents.index(event_name)
ttl = self.timeline.rawDAQData[:, channel_index]
times = self.timeline.rawDAQTimestamps[0]
# shift frame time to middle of acquisition
return times, ttl
def get_fusi_acq_times(self, shift=0.5):
'''
'''
assert 'neuralFrames' in self.hwevents
channel_index = self.hwevents.index('neuralFrames')
ttl = self.timeline.rawDAQData[:, channel_index]
nonzero = np.diff(ttl).nonzero()[0]
frame_times = self.timeline.rawDAQTimestamps[0, nonzero]
# shift frame time to middle of acquisition
offset = 0 if shift == 0 else 1.0/shift
fusi_times = (frame_times + self.nBFpframe /
offset/self.bfrate).squeeze()
return fusi_times
def load_doppler(self, trim_end=2):
'''
'''
doppler = self.load_data('fus')
frames = doppler.frames
# store sampling info
self.nBFpframe = 180.0 if not hasattr(
doppler, 'nBFPerFrame') else doppler.nBFPerFrame
self.bfrate = 1.0/doppler.dtBF
# get time stamps
acq_times = self.get_fusi_acq_times(shift=0.5)
# trim data
if trim_end:
acq_times = acq_times[:-trim_end]
nframes = len(acq_times)
nskipped_frames = doppler.softTimes.shape[0] - nframes
frames = frames[..., nskipped_frames:]
self.fusi_frames = frames
self.fusi_times = acq_times
self.xaxis = doppler.xAxis
self.zaxis = doppler.zAxis
self.aspect = doppler.zAxis.max() / doppler.xAxis.max()
def load_stimulus_events(self):
'''
'''
timeline = self.timeline
event_names = [t[0][0].split()[0]
for t in timeline.mpepUDPEvents[:int(timeline.mpepUDPCount)]]
# start and end events
start_idx = np.asarray(
[i for i, t in enumerate(event_names) if 'StimStart' == t])
end_idx = np.asarray(
[i for i, t in enumerate(event_names) if 'StimEnd' == t])
# start and end times
start_times = timeline.mpepUDPTimes[:int(
timeline.mpepUDPCount)][start_idx].squeeze()
end_times = timeline.mpepUDPTimes[:int(
timeline.mpepUDPCount)][end_idx].squeeze()
self.stimulus_start_times = np.atleast_1d(start_times)
self.stimulus_end_times = np.atleast_1d(end_times)
def get_timeline_object(self):
'''
'''
return self.timeline
def load_photodiode_events(self, **kwargs):
'''
kwargs are passed to bandpass filter
'''
assert 'photoDiode' in self.hwevents
timeline = self.timeline
channel_index = self.hwevents.index('photoDiode')
phd = timeline.rawDAQData[:, channel_index]
sample_rate = float(1.0/timeline.hw[0, 0].samplingInterval)
self.photod_data = phd
self.photod_samplerate = sample_rate
fphd = band_pass_signal(phd, sample_rate, **kwargs)
self.photod_fdata = fphd
def load_photod_stimuli(self, daqtimes=None):
'''
'''
if not hasattr(self, 'photod_data'):
self.load_photodiode_events()
if not hasattr(self, 'stimulus_start_times'):
self.load_stimulus_events()
if daqtimes is None:
daqtimes = self.timeline.rawDAQTimestamps[0]
threshold = 0.0
phd = self.photod_data
phdfilt = self.photod_fdata
timeline = self.timeline
start_times = self.stimulus_start_times
end_times = self.stimulus_end_times
phdabove = (phdfilt > threshold).astype(np.float32)
phdtransitions = np.r_[[0], np.diff(phdabove)]
print((phdtransitions == -1).sum())
all_ups = daqtimes[phdtransitions == 1]
all_downs = daqtimes[phdtransitions == -1]
raw_transitions = np.r_[[0], np.diff(
((phd - phd.mean()) > threshold).astype(np.float32))]
raw_ups = daqtimes[raw_transitions == 1]
raw_downs = daqtimes[raw_transitions == -1]
# align stimuli to photodiode
##############################
filter_offset = 0.016
phdtimes = []
for stimidx, (onset, offset) in enumerate(zip(start_times, end_times)):
first_up = raw_ups[raw_ups > onset][0] - filter_offset
last_down = raw_downs[raw_downs < offset][-1] + filter_offset
if 0:
print(stimidx, onset, first_up, last_down)
# use filtered signals
# find the first up event
up_first_idx = (all_ups > first_up).nonzero()[0][0]
# find the last down event
down_last_idx = (all_downs < last_down).nonzero()[0][-1]
# find the last up event
up_last_idx = (all_ups < all_downs[down_last_idx]).nonzero()[0][-1]
# find the first down event
down_first_idx = (
all_downs > all_ups[up_first_idx]).nonzero()[0][0]
print(all_ups[up_first_idx], all_ups[up_last_idx])
print(all_downs[down_first_idx], all_downs[down_last_idx])
print()
# store the onset off-set times per stimulus
up_times = all_ups[up_first_idx:up_last_idx]
down_times = all_downs[down_first_idx:down_last_idx]
phdtimes.append((up_times, down_times))
phdtimes = np.asarray(phdtimes)
# more robust to skipped frames
stimuli_start = np.asarray([t[0][0] for t in phdtimes])
stimuli_end = np.asarray([t[1][-1] for t in phdtimes])
# stimulus with frame vectors:
# [(onset1, offset1), (onset2, offset2), ..., (onsetN, offsetN)]
if phdtimes.ndim == 3:
stimulus_frame_times = np.sort(
phdtimes.reshape(phdtimes.shape[0], -1), axis=-1)
else:
stimulus_frame_times = []
for start_times, end_times in zip(phdtimes[:, 0], phdtimes[:, 1]):
vec = np.r_[start_times, end_times]
np.sort(vec)
stimulus_frame_times.append(vec)
stimulus_frame_times = np.asarray(stimulus_frame_times)
self.phd_frame_times = stimulus_frame_times[self.stimulus_sequence]
self.phd_stim_start = stimuli_start[self.stimulus_sequence]
self.phd_stim_end = stimuli_end[self.stimulus_sequence]
self.phd_raw = phdtimes[self.stimulus_sequence] # new
def set_frame_markers(self):
'''
'''
import itertools
frame_markers = np.zeros(
(self.nstimuli, self.nrepeats, self.fusi_times.shape[0]))
for idx, (sdx, rdx) in enumerate(itertools.product(range(self.nstimuli), range(self.nrepeats))):
ontime = self.phd_stim_start[sdx, rdx]
offtime = self.phd_stim_end[sdx, rdx]
marker = np.logical_and(self.fusi_times < offtime,
self.fusi_times > ontime)
print(np.asarray([idx, sdx, rdx, ontime,
offtime, offtime-ontime, marker.sum()]))
frame_markers[sdx, rdx, :] = marker
self.stimulus_markers = frame_markers
class StimulusInfo(DataStore):
def __init__(self, *args, **kwargs):
super(StimulusInfo, self).__init__(*args, **kwargs)
self.load_protocol()
def load_timeline(self):
'''
'''
timeline = self.load_data('Timeline')
HWEVENTS = [t.name[0] for t in timeline.hw[0, 0].inputs[0]]
self.hwevents = HWEVENTS
self.timeline = timeline
def load_protocol(self):
'''
'''
protocol = self.load_data('Protocol')
self.protocol = protocol
nstim, nreps = protocol.seqnums.shape
self.nframes_per_swipe = 499
self.monitor_fps = 60
self.nswipes = 6
self.nstimuli = nstim
self.nrepeats = nreps
self.stimulus_sequence = protocol.seqnums - 1 # change to 0-index
def load(self):
self.load_software_times()
self._load_photodiode_data()
self.load_hardware_times()
self.guess_swipe_stimulus_times()
def load_software_times(self):
'''Stimulus events from software
'''
self.load_timeline()
timeline = self.timeline
event_names = [t[0][0].split()[0]
for t in timeline.mpepUDPEvents[:int(timeline.mpepUDPCount)]]
# start and end events
start_idx = np.asarray(
[i for i, t in enumerate(event_names) if 'StimStart' == t])
end_idx = np.asarray(
[i for i, t in enumerate(event_names) if 'StimEnd' == t])
# start and end times
start_times = timeline.mpepUDPTimes[:int(
timeline.mpepUDPCount)][start_idx].squeeze()
end_times = timeline.mpepUDPTimes[:int(
timeline.mpepUDPCount)][end_idx].squeeze()
self.software_start_times = np.atleast_1d(start_times)
self.software_end_times = np.atleast_1d(end_times)
def _load_photodiode_data(self, **kwargs):
'''
kwargs are passed to bandpass filter
'''
assert 'photoDiode' in self.hwevents
timeline = self.timeline
channel_index = self.hwevents.index('photoDiode')
phd = timeline.rawDAQData[:, channel_index]
sample_rate = float(1.0/timeline.hw[0, 0].samplingInterval)
fphd = band_pass_signal(phd, sample_rate, **kwargs)
self.photod_data = phd
self.photod_samplerate = sample_rate
self.photod_fdata = fphd
def load_hardware_times(self):
'''
'''
# if not hasattr(self, 'photod_data'):
# self.load_photodiode_events()
# if not hasattr(self, 'stimulus_start_times'):
# self.load_stimulus_events()
threshold = 0.0
phd = self.photod_data
phdfilt = self.photod_fdata
timeline = self.timeline
start_times = self.software_start_times
end_times = self.software_end_times
phdabove = (phdfilt > threshold).astype(np.float32)
phdtransitions = np.r_[[0], np.diff(phdabove)]
print((phdtransitions == -1).sum())
all_ups = timeline.rawDAQTimestamps[0][phdtransitions == 1]
all_downs = timeline.rawDAQTimestamps[0][phdtransitions == -1]
raw_transitions = np.r_[[0], np.diff(
((phd - phd.mean()) > threshold).astype(np.float32))]
raw_ups = timeline.rawDAQTimestamps[0][raw_transitions == 1]
raw_downs = timeline.rawDAQTimestamps[0][raw_transitions == -1]
# align stimuli to photodiode
##############################
filter_offset = (1./60) # 0.016
phdtimes = []
for stimidx, (onset, offset) in enumerate(zip(start_times, end_times)):
first_up = raw_ups[raw_ups > onset][0] - filter_offset
last_down = raw_downs[raw_downs < offset][-1] + filter_offset
if 0:
print(stimidx, onset, first_up, last_down)
# use filtered signals
# find the first up event
up_first_idx = (all_ups > first_up).nonzero()[0][0]
# find the last down event
down_last_idx = (all_downs < last_down).nonzero()[0][-1]
# find the last up event
up_last_idx = (all_ups < all_downs[down_last_idx]).nonzero()[0][-1]
# find the first down event
down_first_idx = (
all_downs > all_ups[up_first_idx]).nonzero()[0][0]
print(all_ups[up_first_idx], all_ups[up_last_idx])
print(all_downs[down_first_idx], all_downs[down_last_idx])
print()
# store the onset off-set times per stimulus
up_times = all_ups[up_first_idx:up_last_idx]
down_times = all_downs[down_first_idx:down_last_idx]
phdtimes.append((up_times, down_times))
phdtimes = np.asarray(phdtimes)
# more robust to skipped frames
stimuli_start = np.asarray([t[0][0] for t in phdtimes])
stimuli_end = np.asarray([t[1][-1] for t in phdtimes])
# stimulus with frame vectors:
# [(onset1, offset1), (onset2, offset2), ..., (onsetN, offsetN)]
if phdtimes.ndim == 3:
stimulus_frame_times = np.sort(
phdtimes.reshape(phdtimes.shape[0], -1), axis=-1)
else:
stimulus_frame_times = []
for start_times, end_times in zip(phdtimes[:, 0], phdtimes[:, 1]):
vec = np.r_[start_times, end_times]
print(vec.shape, start_times.shape, end_times.shape)
vec = np.sort(vec)
stimulus_frame_times.append(vec)
stimulus_frame_times = np.asarray(stimulus_frame_times)
# re-order to keep repeat and stimulus identitites
self.phd_frame_times = stimulus_frame_times[self.stimulus_sequence]
self.phd_stim_start = stimuli_start[self.stimulus_sequence]
self.phd_stim_end = stimuli_end[self.stimulus_sequence]
self.phd_raw = phdtimes[self.stimulus_sequence]
# get swipe events
##############################
swipes = []
for idx, (sdx, rdx) in enumerate(itertools.product(range(self.nstimuli), range(self.nrepeats))):
times = self.phd_raw[sdx, rdx]
diff = times[1] - times[0]
stats = np.asarray([diff.min(), diff.mean(), diff.max()])
print(idx, sdx, rdx, times[0].shape,
times[1].shape, times[1][-1]-times[0][0], stats)
# nphotoevents = (self.frames_per_swipe*self.nswipes)/2
times = np.r_[times[0], times[1]]
times = np.sort(times)
times = np.array_split(times, self.nswipes)
# swipes
swipes.append(times)
swipes = np.asarray(swipes).reshape(self.nstimuli, self.nrepeats, -1)
self.phd_swipes = swipes
def guess_swipe_stimulus_times(self, stimid=0):
swipes = np.zeros((self.nstimuli, self.nrepeats,
self.nswipes, self.nframes_per_swipe))
for idx, (sdx, rdx) in enumerate(itertools.product(range(self.nstimuli), range(self.nrepeats))):
times = self.phd_raw[sdx, rdx]
diff = times[1] - times[0]
stats = np.asarray([diff.min(), diff.mean(), diff.max()])
print(idx, sdx, rdx, times[0].shape,
times[1].shape, times[1][-1]-times[0][0], stats)
# nphotoevents = (self.frames_per_swipe*self.nswipes)/2
times = np.arange(self.nframes_per_swipe*self.nswipes,
dtype=np.float32)/self.monitor_fps + times[0][0]
times = np.array_split(times, self.nswipes)
# swipes
swipes[sdx, rdx] = times
swipes = np.asarray(swipes)
self.hardcoded_times = swipes
if 0:
ds = DataStore('CR017', date=(2019, 11, 13))
ds = ExperimentData('CR017', date=(2019, 11, 13))
|
"""
Salidas
La posicion 12 y sumatoria de los datos
"""
contador=6
suma=6
#Caja negra
for i in range(0,11):
contador=contador+5
suma=suma+contador
#Salida
print(contador)
print(suma)
|
# -*- coding: utf-8 -*-
'''
Given a string that consists of only uppercase English letters, you can replace any letter in the string with another letter at most k times. Find the length of a longest substring containing all repeating letters you can get after performing the above operations.
Note:
Both the string's length and k will not exceed 104.
Example 1:
Input:
s = "ABAB", k = 2
Output:
4
Explanation:
Replace the two 'A's with two 'B's or vice versa.
Example 2:
Input:
s = "AABABBA", k = 1
Output:
4
Explanation:
Replace the one 'A' in the middle with 'B' and form "AABBBBA".
The substring "BBBB" has the longest repeating letters, which is 4.
'''
import collections
class Solution(object):
def characterReplacement(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
l = len(s)
cc = collections.defaultdict(int)
dk = k
mch = s[0]
i, j = 0, 0
ret = 0
for j, ch in enumerate(s):
lret = j-i+1
cc[ch] += 1
if ch != mch:
if cc[mch] < cc[ch]:
mch = ch
dk = k - (lret-cc[ch])
else:
dk -= 1
if dk >= 0:
#if lret > ret:
# print(i, j, lret)
ret = max(ret, lret)
while dk < 0:
# find most frequent char in s[i:j]
cc[s[i]] -= 1
i += 1
fc = 0
for ch, c in cc.items():
if c > fc:
fc = c
mch = ch
dk = k - (j-i+1-fc)
return ret
s = Solution()
print(s.characterReplacement("KRSCDCSONAJNHLBMDQGIFCPEKPOHQIHLTDIQGEKLRLCQNBOHNDQGHJPNDQPERNFSSSRDEQLFPCCCARFMDLHADJADAGNNSBNCJQOF", 4))
#print(s.characterReplacement("AAAB", 0))
#print(s.characterReplacement("AABABBA", 1))
#print(s.characterReplacement("ABAB", 2))
#print(s.characterReplacement("CUTQSTZSZBMBCVMQLHTHCEQCICXEKJPYEPKLUJRCUULAZRAVPVKNYQIMMYTTMRCZVSXWNFUWXEOVWQMMKMWVFRBKTVISHXUYFXYKIJUCFUMMJZMPAFDDHBALJZGWQMSDXSLYLJHDHSQXSVEEYKFQMMPRRIESRHVJBLAVLYWMTWUTMULMKRNGYBTXBLEGLTWCFEGGIQMJCXEWNKCKBLOVQOXAKBUJIWXYNVCNVGRJKPCSJPZVPUXKVVEJPUWEQPBQKGJBKRKEIQWNGHWDSRUXURWCTMRUFBOWDNPJWYKPKGUXECVQNVKNGDFRSCGJKYVCRAMHHUPTGITNPIDORQIARYILWKBKUVUBUOZFSKBUWLGOWCNMCILCMLHEFPQBMRYTMIZONXRBKXPUGAMMWXDMKDCAWOXRVQNUEAPXOFWFNSVTQCWMDFALZKOFIMKQDICYFWLDILBSPGXTELZROWSOBHNDZMOBJHFUTPUZLVYOUECDWKBZDJTJIPJHAHOFECLMPGHJSDFMENLRAJMWAQQOTTAGYQFRKNMPJVAFUPFRSZGKHCAGGBQCMCCFVOPYQYYCAQIOUBEKODXUAASRLGCHFVWURZHLSZZSISJGHSEEKKTJETSYSXTRHUAQGTLRYGQPVHCKKUUDNHNWRPOGWDSQFBRVAAPEUYFTBBDNHABKOVVLCGMTFIENQWFSWHIQDDTRBLJBYBRHYEQWETUGJWWQNJYWIBWBNWUSPFQKFFDNRZXPZALQNDXIOJWYUIKGWKUHYPMMZOIJSEBOJFOXYVQRZIDNXCZWEDVFAYYEXDYGUHCSCANKJRELKPWWHPMBBOZNJDDZAHMTDYZNVHMAYZVBRDSSIFYOTKUZXGFVZMURMPANIWSLNKYXSYCAKFFKAZPTGADMVSAPRCPMEJKLNWBUKAFKVFEMTCWIRAKSCMKNLTCFUIUTZDOYRIDXHOTQEZAERTFWXVNKSCTVGKAXRTWLKXYSMEWRRVYLHEGLWSWBRGBASLRRTYYHKXTMAKGZKPSZEOCMFWFNULIFJWXOUMXPPCVRRIYLEURKJTVQIKFYCGPLTDCFINDZSNIGFZQGOWDLRTCCYEHKXINCUYIPXQEPABLMGVNHZHOSWOCYPUJIJMVSWFYSOFRITVKSKDRHCNSTLCWZLRLYNDJVPANHXDWYKLGANHAVFDGMIZVARBDGMMESZZUIUTAYOMLKBMAIJDWVAVRURUEOHXWZGLJFWVUCPBNNDMDBGYJBQPPTHITPHWKHTUVDRJMCQDPRXXHYNQXDVYQRPSHRBZCYTUWYTFRCWIBDDOBDCNWGXUEPSEZCEVZLYHXAAOGUEILKSVWHAFLAAKXAWVBFHLJYNXNRCKHAJCLTZYPBJPFABUBQTBYXHGUZHWZJJSHWAXDOMIQZWUUXYYZQVGNIQPREBELTWDUQJJUEKIAJTUAZDXBDEHSRQYXHOXYEXGKYDMBJKJIMPEEFQRRARUDEPANMEROXGHTVKTOJFZDTGZMJLEPIXSKCWYEMERBWIDMIKFXRJUDVNRNSOCXEDAUFXFRZRTZHTKWCQHSNBRGBKHUSYIYLEAIOCIUMWRYLQXAFXRRJKLEUNQXQUOGGDQRCGKBWHSJHWYUYSWUGKIHCGPNBCOTHWIDCZOVVTBUURUSZTCJRAWTNFEQMLVBPVXSTUGZXCBKAZCCPLPGKSHUUGRGWQAAWURSUXPMZCZBPDJZVGVZPOCXOFYWMMFOCQUQGHBMHIZXEJFXXTNSNCJYHJXLLAEQFKCMQLQTIVESDRDOMJIPXXAIGXEULGRZZAMUNBHBVYJGKCLVBFTCAOBFUXOHOECQWBIGYVJGJISMPNXMKURJSMKOLUGTJJPLTZTRRVGSLWCBPOXFTJQXJJWSDMUSOZXIABESOWKYHHORVNIEOVGPDMHFSWWVGGVPSWGHDQHLDBRQNEKJXVNJKPUYAFGVJLBXADKSJWHJYEOPWDVDZWCUXBMMKAWKGJZZEMDODAWDLHHUOSDQCDCXCYGAOIDROWMMVSPNCMRBBWFMXGJEETLXIHJJSUURIRRMPGRPYINRIJGKSHKPWEZDALQEVJIGICMONLBXIPHQFJPMFMCYMWFADFKJNLZMMHCINCOGBBVKITFHENTZZQTDNWGDOPJUNHJZEJIOZXFBDPIPTDYFCUQDBCPSNUKKLVFIZTUZWPDBGXCHUOACWEBUQQNMYNCRNVKJUEDUJKYXOHWSFQWGBQHPTOWRITQALXROVQZWIOILGDXEMKZIMGRHDLGNFUMOWWQJVQIOKRRPRVULSSYGJVGTVWYNKCCYQJNAMXGVRMMCPULLSIDPQQMGMFSQWFBPTQFCSZXZBZQLZEEAQBYLFWQHKVWAWUWUYMFYQLVBWNBYFEXOFTUMOVDJYEWYORUWQWDFLNGMNHIPJLFTRFAGFRZHCOZTYHDFFRIKYRKCJOPWPUSDWLRUTKWTXTMECNBXQAVGQTXQHNYHUDMAUSYJUMAAPJUMDIJDAYBWYFHOKVWZQYWYYEQBOMARSFHEZZDYYMONCVIDGFIRDJJLTNCTPIAXJMKMBQSXNPYZPURGWYSHXWVZLOBKWNINJJYOHKWMGKNFQBQIPISJDIOJPQBXTVFEMIRSZAPEITGIFEJITKPSMFAEJFWRJCWOXMKDPVVCDPQWFTRCIOYBWALWRRWWGKXURBDRGVUCKIFTJODJQDKHCOHOFLSNLFAMWBXYZASGCBFZFGFRQVJGRDTKJKTABPPYPHWZKWRLCFKJDEABOSIJNVBBELNBMYJUKZSJBNHHXUCMAXGLYSJDBGJCHTYKJQCVPPJVNSCHRFRUEZOHGKVQLVYKENZHPIODXFTJUVRFCLQPPGAFNMZJCLGDTZCWRVNERBASFLEAJFEYGIRLFWAFSLSUXVQJUOSSYHEVVHZYMRQFDRVQVMGABPTFSFJDGGYZRGHWAQTJCYNRHDGOOCTVGHLUOZWVOJBLSQPDSQBBRGQADDQQKBOFBRUADIYPPOYPVDWUBRTUTPTMDLSFDUVWPEECKFHYXDUPVLNPPNKBSYYOFFQKEJKJMBUQYJSRUJHITJOQGMSBIHFXQOQIFQRLQDDCSOSPUOEZBLJSJZNEWNFOAOKHEEKXDCYITHYSXOYMEMNZOCAZBGEFZCWAPIZXYDKSUJHTQPGPHYPDOWEBNODTLLMYOGGUSNKKKFVMVAGDPZRJKJRRQABJZUQKOUSBIGBJAEYFYWZMPNAHQJPZVUNVUACTHACPECEBJMVVIRXHHDBUPQUWRXICTYHLSBVRCLFPMLQVLQJDDZPKZYNLQXZZIETGYBURDFXPDAABRWDCCOTYPVPFUWNRGAGLXIZLEEHSNPIVSNRSFVFRIGIKOMLUMQEVJKEEEQUAQTRSJDAXPFYMDVNOLSICQDTYKBYPQYODWREMAUZZAPHPOCBAJRTELEYGKCANGNTMKTOODJMDOHLLYDILSFMPJNCBKXSCOEQJREWSRZWIKJUIAEIIKWCDAWRDRFLXQVYMIZLKDKSSIJGJZYOKIZXWOFNHKMVUTZALRZMBSKADAZTBSRRCWJEFPBDZMWRMNERMQPMUKPBJXQKAHJOBBVVMEHAOYHLZMQHOXVIGSRMDWCNBATUSVMBFIDIYWEBWKLKWOVJROPXNDRWKWKDDQENSAGTQKGQBJUWSUKBOIJNVBJGABTOSSMVGQFAPCRGVGUSBRCUVJEMCVUFPKMEXUDSVJBEDGUVNMNBJKEVWVFFSHWOVPNUBWLZYVREWRRVGVYFMXEUIRXPNEIDEEVHJLVHSXGNYWLOCRWQUUPRZNXQEECDSYCTKFEZYXCLUBVTDSWKCFRTHKDKNORMMQNPNSZVSKSCOQTJTSGCCSHAXFLRYSBWCLTZAHCOUEAJGOHRTBVIXSQFECEVPEOKRTXFPGCTVDKDQJIPMLOGFAWGXKWRSSGNLVQIAOKJFXQGVPEIMIKKGVZGKYQXYKAAZLTJDEJXYGVCXPLYVTQQIAZBABDPAKZODIASBJJEGVOJBMKEMQEVWFBKFLZXVNGWFSTGBQMHBRSFJFENSKZJUBQFLWEMBBUQLYFYRMQCMJXPZVULTZALBZHVLCTPRBKVHRGVJDKPRUAVOEHQJCIMWSYIXNTYYTGBAPZJFODZKDYERJSBUPAPPHBNSLYKKAFNQPNZSRHSXCOQPSNWYMXCGFPPGWAPNATEBEDSJFDTZYMKEXNTGJJOZKACFNFVSEECSIOBCMINUIWNLUDSZQRFADMJGWGAOZGWPYXARPYYHPWZCESKNROBWBCSXAMMHHYPWELCPPRPQIFCVNGVXDQHULUPRRQEOMSGESICSRYEHCWECEORJFNUQVJMAOTSJNBWKTMHMKDTAVWFHZYZZIOAIKRVPITZPYSUWDVRLAUPALXXQVYTJTVEFBETLBSZBWAUZVIYUMUVTIMUZQBLMPNVSFWVMTLIFYAHSRIVICTNDZZOZHAXFPSYGHBRYDXAUZEGFMEHNZEJGJDIBHXPVYENHJOHOHDWQVGVJJDGBSITVCZYPLKXZIBHBIQPZNDDUTBYSYKNFYVBWFENYUAAVJDWHMZHIQUBZQBYIZYFKYJSKKLUSFFLMDUIHWOBZQBAZGAIFHPMORCYKUINDJOIKKKCENXJSHZMYOEGLTVBHRJPXYGHUWJSXXMYJSETKWDEDARIQPLWHZXTUGEDSCHSNMOGMZASXNEODTQVSLTVSVQBJWXCSGTCUIAOLGEKSEEFBCFVOPIKOVJONZNRMGZZZAXFOPPLICTFTNSSEFWABMXQPRIGBBUBWSXWTNQMZRYRHROHECVLVBDTLDZWTOWJUCDKPUOZNHTFOYJIALSZDCCNXTUEKXEZRWVLWRJYWCYONNZVLTOGXKWMUYJTGEVRROPFLETSGLWRUWDLPYJAXOTWMKVBPDNESLBOYOSSSFVARDTEXNHCZTVBJRQWWZJDYZKPTZCBPAXSQYUDEKWZVCHDNHUPKSOLWSHDCHXYFVEBDTXISBUCFWPCKXDHUCLTOPFJOSCCOWHYJGRNDWASIFKYECWWBTILDQJTWXNRTKNBPISULXMOFDXVNVFVLIMVHTTSFXUQCPKJPZIJUASYITFIMKVQDZBTYJBFHMYHRHYRMZWUXRBDCLTLZEUGHGNAZHVFOTEOOOKMBSTIPNATUFDWMPLGQSGPEXOJUIFZKOMHCXADOOPCZAFTIDDMLEFSFDMPGFHKNITQNZDQDEUHPYQSBJTXJFIAOONGAQMTRQDTKGWPIXSESRVPFHRMHNHNSNOIIHOOHBYRLYVBHWHSDMYZVNLVKLEEDTGQTTXDYANXZKXJZRNSTUPOCDWJILNVVDDSCTJSZJKDCEUJRETFTZBBDKXQNNLSXIRVAVSCRBQBQWBVIWPEPTPBTZOYWJNDUUVNYAMZRCKOBAKVPEPDGSMRDQCZEZEHPOXTKGMBOBXDRYJLBGIMFFVNLCLRYWUEFTBHOKZXJANRFUOBNTYSNLTKQSFDTNDOAPLTKSZBSXFJWAUNVPXYIAXXBOCLVTALHYARTBPFSUPILRNNBXFTHFJCRNANYEWYYCZGITRPYSPBPYXSNMZBMXUGWIIBNBUCKMQCCHKLZVPRMPPKMXSBKQVQSJSDGHWVFSJHLCSMQAARRPOPITAHIPJPJITNRQSICWTBVKJCZSVBJWPQJPGELEMLNSHNQIEXIMRQAKYVVNIFSIEIXHCJUBZZPCOLPWGFTHFCUTWBSWDHDUSPHZCUQIMOEXICIOEBRRFPMODZLQNEKEKFBPJTTZHMOMVOAKVYOMBOGWOIVVPUCKHSOEFKDIRQLPRHWIKXRFWXJZWNIJZXPTJARTJQTGEEYSIDKIGXMNQNPPFIJNESULETTHMXJWDOEOBWVGNSXLLBHIGFWKXBZPLQOAZDMVGKEPGREVSNRAVJBXCBWFVPZLDTNXRFCOEMOBOJEEPSABSCOTEVXVFWZNQDVTFYFZCLDIFTJKYBUAFNQJEMPFBLTJGOEFOPQACKREKFCCOOEKRVDQZGPPROJOTJOONCIYKSUOJWZULHYWPMJDPCRXRIKCAAOWWKDARYGVPGZFZBXKZOCQVHUQQAQGKTTDKPAIEYKYETDDJAUQDIPPZKFOLGKNISJUVBMSATGVVZXOMYKFTPTWASRETFOBISTNJWSTFGZXURAUDOOORMQOUNXBFDFWFUDFYMOWUAGZPKZPWCMKBUKORLYZXAMTIRYATQXXXAESNGICMYMXVUWNSUSDGHHZAXJQPTCBVUBWSBUCIXUTJUJIRDRFETQKMMREECNIPGGZHUXWAZAUTFTDXZIKCUCFVXLFBZOOVRYWEFBGQUPJDSDLPLESIBXGZHZFZKXFFEKTJMAJAZSAJXAVBERMTPEFFNRFOCEVPPUVNRIXQQQTCSVQNUWCKHEZBGYJXKMYJCIUTNIXVCAIULWIOUDSHZXZUGJDGNUQBYQPEUEGZISLMPEJJBFBREEXJKZEPEVJSQGMWCJDEAXXPUJJEMMYUPPEKARWYDMFXJXNSZLBKHOCQPIZWVDLWQSLQFAHWSIQQSBJWOCEXDOJBYIXMTLIPWQUXREGJOCSBEOCJETKQSXLGINLDEHHWXKNMEGSDEWGVHRAINVVTYNCOPKHLYPDZWQQQCXGUQXISWMEZNUYTLDFWEHCSCMYODRMAPJAUJSBXUYHAGWIULXKSXYRUYTLEOAJNBPEWIMWPQXUFUGKBWFAKRFRSVXGTZBVZIBDWODYTLGKJMSXHSFKHELYFAJAOPYSNFBQAVELIRWUSGNGVVWYBUWKWRJUNCRCQZJQSJUTKQLLXMWUOHLURQRWKDUVGFMNCDTCJOTNFLEVQWSKKHIILKGUBAWCNJFGSLMTGQADDNZENJQBTYRTVXCJYTFMNXCNUGBTKWOZMLBAJEBCEHOZSSUSYWEGVLDJPHPNEHQDUPRRKIGWJLXBJALUYQQPVTEVREDIHLBXPWWPLZIIPWDBNAIUACHLTPFRUBERZGBRZKSNDFOMFDCHQXEUWRWVHYIRQKDSCCQWMZNEBQSBKHNNYHXSDWPPPSVYAFFSWIRDBEIGNBDILUWMTXAIAAQOUZSSQYUQEGQCDEXPNKDWOSXBFRWQHOXLPNPPQMQFBQQNKGRSLHJJQOECCIRTEGTHIDNZOSEMSUHZCBQGUNUOVJSILDUUIRFONLSWUJDMSZYSPZITJTIDOXEXVHUCVULHDYEGSIZWQFLZVWDNCEXZRDZILLKBUQHUFXIVLAPYGYFTUNAVCUYEHWZWJHQXWWLRPERVMJIQADVNMASOYPQSXITDPCWMEFEJGZUKCCYMPIWHGXVUUQZCAZLXIYIHJUAVTLACTSYESSDFWHDYDHYAPETWKTZVQLOVFYMMCKPGQZGMRFSALGJXXUXLTMOQSYQBAKBJMLXVKTXZXHXYXTRAKRDIONDBZVLCDKBQMPBIVXLXXKNDPTCZFNLINXJIJZZKBNBTMTDNIULDLHSAQSPXZKXDTBKVVCYZKGFNKPPGCWVWLNPJRJKAJAPLJDCVPOHVRUCBMLWGIAJTWBUOZGDRBPCLUWFIWDGMNWONHDISLCUHYYYLGHGQEORYEFGYFRHQBKRKFPICJNMEWTEATTSTYOYLQHLNALECIZMWNBCQMVBWFSPSYCZPVZYXPDNYQWVFRVAWWHTYHWEPIKDHJCUCAREYZKERRCXQKVUKPWGFUJPYQAEFXYQLMCTAGCZHDOBJREYXQCXIBPRUMVJNPCJDBMPQQQUDCENPVLMYJPBBXZRLEJXECCIIWHONWQPDKYRWCKLFMVLPFXCSNERJLOEWHUGFEOGNZWBHAXRKDGGVTQBWGYEIFRJQBFNKGSQECQJVEZXOZRDJNTCYYEOFPSEYKJAMJTXQUFRARYDGZCNRYSIUNGBZXRAZOXFZVMEEEQJYLGWMYRIRVZDFOHZMPQPYRQNHLEPVLHXNUNWKAHSOWLATKJCDZDPEWUEWTKQMAZSJPVOQCUBJRERXRRJOGWHVZJUJXXJZMLCSLPEPMBGGULNYGCTPPDSERBNUAAZRKUYXRCIWLQZDREJEPKPPAVLOQDJNXCAIQWDAVCMOQRHOWTRPHONBVRMIQAZKLPEPDYCZAGCWETPIECCITHJGDWOHYCMJLJVXFBXJNWSOAOXBWBCAIOLGHAGNWOEEOMXHDVWVSFKFZTQISTBFXVCERUAOEUQZMGBJMODTZPTNHKGYYRPGRAMNDMACMLBKZDHMVNKKGCSJVMQOGDOLCWYYDKWWCBPPVRSVEKDUUDOIVWWFMBIAUDGDIWOGAMXVOOSQOOVAOVNUYNYSLNYRIVBBTBMPUXUCORMRMBYVDDGTPOWZRFYLVRJDWYMGDYFPTPEJSUFNITSPSBCQZHTCUSMZFDTSSNBGHPQHRWCRMATJIPJCRPVGIOCWFNVOCYYHEUQEADNUHRWNPDEKWPOHJGLLWNJKCWJJGHXROZSEWQGRYTIFANKGCQNTUJDKTIMDIPRYCVJEYHAHNLBXKXYMNBLUNUNPSXQCHWKFBBPSQCJNPZGHGEQJICGDMGQOMCXQXFGXACNPXCYBTJTVJYJEQAYCGXSHUONCNGIYWOFIBFEWWECBLEVYQMXZEALYMIFLSIZCBKOJAOSFKESVEEFWMMWMKCDZVGOLGGSUFLHEVFJBSEOQTORDWCUNKYMOUMFOKKVZTWLTAYCCFXEYYGXDIHFVIUKYJTBNZUYDOPJGEZMUPCPJXLMOSXTBGCVMMCUOUKCZQEKUNOQPCDLZJSLBOENWKDCYKFWYNOCTFQAGXRLCJJFFROJUGGLZKIBGXHIAMIYRBWVUYSSJCMFVBQSHJOMXOFCMTTDIUPYRMGDRACBPQZCHXKZOWUOQRWZTGDEBILBMNGEAJWRXMUDRUODXGSUCCNMUPYWABYZZBUZOPRGUXYFBZYHIQLOCTRANAQGCRFATDBADZMSYFJHCMZJEJZLXLIZVQTDLSJNIOXNTXNILOSEEFVQQZYGQUGZCUNHEEEPXYPHQJUHHMKUAMYHXWROSTAEYSHGXIJKAOXIESLOBICIRZMVAOKEFTJRXGOOHHGRFEWVXMSEHPTPUKUQZCOWFQFVAIOAITOVYQSZNRLTZSNOKCWIDNWMICTQUGJDJXMUJLNIGVBPCVAROSANVUPWWHCFVDSUVVFDBXQBVWBALEGBQOGIKKIHNWWUTDHBGEZUMEVHBTRSAKAYGYOSFEKPZRUBZQWATYHMSDBOCUZTMJEULVROFNNHXAEOGKHVFNEUISWISTUDLAOWEFXIIFEIXPWSJZAKXPSJFCJTBCDVSTAGGCAKECUQPIMCDXBGUAATNDTKDVVQPLMQQGOCWSEDNNACJCHLGDJOJZUANACMTURVPONNOHRIOSBGCKHTYNVCVBVNXLDOLOFFIGVKXQVUNHDBUGPNTBFODMHHNDUGPTRFSZPFRQUDMUHVRKXDTZNDHJLCMMDKOXROGQKQPWLSCVZMUHOLNOWFBZYAPBMCDXAZEMJNWLUFNJOZQHWHSEMZEUQZBEJTDGERQGTACWPWWIDKJMBFDZCYTCZLDDGVPVJAFAJODLFJQMKTNCHOZFEZJUFIQMDKUZKABARZTGYAJDUMKLCOCKOAGLIEHHRQSTITNCYTCXMNHZJWEFPJDROKIQULNMMMLTJNCOCHSTTOHORDAALCSDRNRWFSVAKHLBVJTTXVBDKRWYFMJNYQQKIBAMLITRKMHEZNFBIWDWVBOHSRODSLOBCFCGXHDCEGMOTAFOFWUTIOFIKJLDQKFBXDLNZWCMJTFEZONCKOURMZURYSQWYXWMSNXSXYBRYYKBHFUENSZGYEICSJNXSMBOOJAHWXRCKCRKOPRZWNXMUMEVBNJYKDAHPSTUUNCQCHRUYWZULAWNSZMGZCNZIGHPBXSOEGSALRIYXFPFFGBCIZUCMFZUEFGAYQKBQJRQHJJHCGKHFCBNBRAISTMUHSKQSXBDBLAFZRTVOSKLGLBKOLMYLZTEDRZOARTZVVSWRVXSVMZXGIAEDDDKPSPPENZGDIK", 100))
|
"""
Author: Abas Farah
This is code for graphs.py
This file has some prebuilt graphs using the:
--> Adjecency List
--> Adjecency Matrix
--> Adjecency List with edge weights
--> Object oriented graph with edge weights
"""
import sys
import os
dirpath = os.path.dirname(os.path.abspath(__file__))
sys.path.append('.')
sys.path.append(dirpath + '/../../data_structures/linkedList/')
# A Undirected graph with 7 nodes and edge weigths
# if nodes aren't connected it has a None value
adjecencyMatrix = [[None, 5, 7, 3, None, None, None, None],
[ 5, None, None, None, 2, 10, None, None],
[ 7, None, None, None, None, None, 1, None],
[ 3, None, None, None, None, None, None, 11],
[None, 2, None, None, None, None, None, 9],
[None, 10, None, None, None, None, None, 4],
[None, None, 1, None, None, None, None, 6],
[None, None, None, 11, 9, 4, 6, None]]
# generating adjecencyList to match adjecency Matrix
def genAdjecencyList():
adjecencyList = [None]*8
for i in range(0,8):
adjecencyList[i] = []
for i in range(0,8):
for j in range(0,8):
if adjecencyMatrix[i][j] != None:
adjecencyList[i].append(j)
return adjecencyList
adjecencyList = genAdjecencyList()
dag = {'A':['C'],
'B':['C','E'],
'C':['D'],
'E':['F'],
'D':['F'],
'F':['G'],
'G':[]}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
import pyro.distributions as dist
from pyro.ops.hessian import hessian
from tests.common import assert_equal
def test_hessian_mvn():
tmp = torch.randn(3, 10)
cov = torch.matmul(tmp, tmp.t())
mvn = dist.MultivariateNormal(cov.new_zeros(3), cov)
x = torch.randn(3, requires_grad=True)
y = mvn.log_prob(x)
assert_equal(hessian(y, x), -mvn.precision_matrix)
def test_hessian_multi_variables():
x = torch.randn(3, requires_grad=True)
z = torch.randn(3, requires_grad=True)
y = (x ** 2 * z + z ** 3).sum()
H = hessian(y, (x, z))
Hxx = (2 * z).diag()
Hxz = (2 * x).diag()
Hzz = (6 * z).diag()
target_H = torch.cat([torch.cat([Hxx, Hxz]), torch.cat([Hxz, Hzz])], dim=1)
assert_equal(H, target_H)
|
## SIMPLE SCRIPT FOR ALLOWING USER TO SPECIFY CONFIG ENTRIES INTERACTIVELY
import os
import sys
import yaml
print('Would you like to input setup values interactively (y/n)?')
x = input()
if (x != "y") & (x != "yes"):
sys.exit(0)
with open('config.yaml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
print("Simply click enter if you do not have input for any of the statements.")
print('What directory are your input files located in?')
x = input()
|
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The :class:`DXObject` class is the abstract base class for all remote
object handlers, and its subclass :class:`DXDataObject` is the abstract
base class for all remote data object handlers.
"""
from __future__ import print_function, unicode_literals, division, absolute_import
import time, copy, re
import dxpy.api
from ..exceptions import (DXError, DXAPIError, DXFileError, DXGTableError, DXSearchError, DXAppletError,
DXJobFailureError, AppError, AppInternalError, DXCLIError)
from ..compat import basestring
def verify_string_dxid(dxid, expected_classes):
'''
:param dxid: Value to verify as a DNAnexus ID of class *expected_class*
:param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"]
:type expected_classes: string or list of strings
:raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class
'''
if isinstance(expected_classes, basestring):
expected_classes = [expected_classes]
if not isinstance(expected_classes, list) or len(expected_classes) == 0:
raise DXError('verify_string_dxid: expected_classes should be a string or list of strings')
if not (isinstance(dxid, basestring) and
re.match('^(' + '|'.join(expected_classes) + ')-[0-9a-zA-Z]{24}$', dxid)):
if len(expected_classes) == 1:
str_expected_classes = expected_classes[0]
elif len(expected_classes) == 2:
str_expected_classes = ' or '.join(expected_classes)
else:
str_expected_classes = ', '.join(expected_classes[:-1]) + ', or ' + expected_classes[-1]
raise DXError('Invalid ID of class %s: %r' % (str_expected_classes, dxid))
class DXObject(object):
"""Abstract base class for all remote object handlers."""
def __init__(self, dxid=None, project=None):
# Initialize _dxid and _proj to None values, and have
# subclasses actually perform the setting of the values once
# they have been validated.
self._dxid, self._proj = None, None
self._desc = {}
def _repr(self, use_name=False):
dxid = self._dxid if self._dxid is not None else "no ID stored"
dxproj_id = self._proj if self._proj is not None else "no project ID stored"
if use_name:
if self._class not in ["container", "project", "app", "globalworkflow"]:
desc = "<dxpy.{classname}: {name} ({dxid} ({dxproj_id}))>"
else:
desc = "<dxpy.{classname}: {name} ({dxid})>"
else:
if self._class not in ["container", "project", "app", "globalworkflow"]:
desc = "<{module}.{classname} object at 0x{mem_loc:x}: {dxid} ({dxproj_id})>"
else:
desc = "<{module}.{classname} object at 0x{mem_loc:x}: {dxid}>"
desc = desc.format(module=self.__module__,
classname=self.__class__.__name__,
dxid=dxid,
dxproj_id = dxproj_id,
mem_loc=id(self),
name=self._desc.get('name'))
return desc
def __str__(self):
return self._repr(use_name=True)
def __repr__(self):
return self._repr()
def __getattr__(self, attr):
if not self._desc:
self.describe()
try:
return self._desc[attr]
except:
raise AttributeError()
def describe(self, *args, **kwargs):
'''
Avoid infinite recursion in __getattr__ if describe is not defined.
'''
raise NotImplementedError()
def set_id(self, dxid):
'''
:param dxid: New ID to be associated with the handler
:type dxid: string
Discards the currently stored ID and associates the handler with *dxid*
'''
if dxid is not None:
verify_string_dxid(dxid, self._class)
self._dxid = dxid
def get_id(self):
'''
:returns: ID of the associated object
:rtype: string
Returns the ID that the handler is currently associated with.
'''
return self._dxid
class DXDataObject(DXObject):
"""Abstract base class for all remote data object handlers.
.. note:: The attribute values below are current as of the last time
:meth:`~dxpy.bindings.DXDataObject.describe` was run.
(Access to any of the below attributes causes
:meth:`~dxpy.bindings.DXDataObject.describe` to be called
if it has never been called before.)
.. py:attribute:: name
String giving the name of the object
.. py:attribute:: folder
String giving the full path to the folder containing the object
.. py:attribute:: types
List of strings indicating the types associated with the object
.. py:attribute:: state
A string containing one of the values "open", "closing", or "closed"
.. py:attribute:: hidden
Boolean indicating whether the object is hidden or not
.. py:attribute:: links
List of strings indicating object IDs that are pointed to by the
object
.. py:attribute:: sponsored
Boolean indicating whether the object is sponsored by DNAnexus
.. py:attribute:: tags
List of strings indicating the tags that are assocated with the
object
.. py:attribute:: created
Timestamp at which the object was created, in milliseconds since
January 1, 1970 at midnight (UTC).
.. py:attribute:: modified
Timestamp at which the object was last modified, in milliseconds
since January 1, 1970 at midnight (UTC).
.. py:attribute:: createdBy
dict containing the following keys and values:
* user: the string ID of the user who created the object or
launched the job that created it
* job (optional): the string ID of the job that created the
object, if a job created the object
* executable (optional): the string ID of the app or applet that
the job was running, if a job created the object
"""
def __init__(self, dxid=None, project=None):
if not hasattr(self, '_class'):
raise NotImplementedError(
"DXDataObject is an abstract class; a subclass should be initialized instead.")
DXObject.__init__(self)
self.set_ids(dxid, project)
@staticmethod
def _get_creation_params(kwargs):
common_creation_params = {"project", "name", "tags", "types", "hidden", "properties", "details", "folder", "parents"}
dx_hash = {p: kwargs[p] for p in kwargs if p in common_creation_params and kwargs[p] is not None}
remaining_kwargs = {p: kwargs[p] for p in kwargs if p not in common_creation_params}
if "project" not in dx_hash:
dx_hash["project"] = dxpy.WORKSPACE_ID
return dx_hash, remaining_kwargs
def new(self, **kwargs):
'''
:param project: Project ID in which to create the new remote object
:type project: string
:param name: Name for the object
:type name: string
:param tags: Tags to add for the object
:type tags: list of strings
:param types: Types to add to the object
:type types: list of strings
:param hidden: Whether the object is to be hidden
:type hidden: boolean
:param properties: Properties given as key-value pairs of strings
:type properties: dict
:param details: Details to set for the object
:type details: dict or list
:param folder: Full path to the destination folder
:type folder: string
:param parents: If True, recursively create all parent folders if they are missing
:type parents: boolean
:rtype: :class:`DXDataObject`
Creates a data object with the given fields. Only *project* is
required, and only if no default project or workspace is set;
the remaining arguments are optional and have default behavior
as specified in the API documentation for the ``/new`` method of
each data object class.
'''
if not hasattr(self, '_class'):
raise NotImplementedError(
"DXDataObject is an abstract class; a subclass should" + \
"be initialized instead.")
dx_hash, remaining_kwargs = self._get_creation_params(kwargs)
self._new(dx_hash, **remaining_kwargs)
def set_id(self, dxid):
'''
:param dxid: Object ID or a DNAnexus link (a dict with key "$dnanexus_link"); if a project ID is provided in the DNAnexus link, it will also be used to set the project ID
:type dxid: string or dict
Equivalent to calling
:meth:`~dxpy.bindings.DXDataObject.set_ids` with the same
arguments.
'''
self.set_ids(dxid)
def set_ids(self, dxid, project=None):
'''
:param dxid: Object ID or a DNAnexus link (a dict with key "$dnanexus_link"); if a project ID is provided in the DNAnexus link, it will be used as *project* unless *project* has been explictly provided
:type dxid: string or dict
:param project: Project ID
:type project: string
Discards the currently stored ID and associates the handler with
*dxid*. Associates the handler with the copy of the object in
*project* (if no project is explicitly specified, the default
data container is used).
'''
if is_dxlink(dxid):
dxid, project_from_link = get_dxlink_ids(dxid)
if project is None:
project = project_from_link
if dxid is not None:
verify_string_dxid(dxid, self._class)
self._dxid = dxid
if project is None:
self._proj = dxpy.WORKSPACE_ID
elif project is not None:
verify_string_dxid(project, ['project', 'container'])
self._proj = project
def get_proj_id(self):
'''
:returns: Project ID of associated object
:rtype: string
Returns the project ID, if any, that the handler is currently
associated with.
'''
return self._proj
def describe(self, incl_properties=False, incl_details=False, fields=None, default_fields=None, **kwargs):
"""
:param fields: set of fields to include in the output, for
example ``{'name', 'modified'}``. The field ``id`` is always
implicitly included. If ``fields`` is specified, the default
fields are not included (that is, only the fields specified
here, and ``id``, are included) unless ``default_fields`` is
additionally set to True.
:type fields: set or sequence of str
:param default_fields: if True, include the default fields in
addition to fields requested in ``fields``, if any; if
False, only the fields specified in ``fields``, if any, are
returned (defaults to False if ``fields`` is specified, True
otherwise)
:type default_fields: bool
:param incl_properties: if true, includes the properties of the
object in the output (deprecated; use
``fields={'properties'}, default_fields=True`` instead)
:type incl_properties: bool
:param incl_details: if true, includes the details of the object
in the output (deprecated; use ``fields={'details'},
default_fields=True`` instead)
:type incl_details: bool
:returns: Description of the remote object
:rtype: dict
Return a dict with a description of the remote data object.
The result includes the key-value pairs as specified in the API
documentation for the ``/describe`` method of each data object
class. The API defines some default set of fields that will be
included (at a minimum, "id", "class", etc. should be available,
and there may be additional fields that vary based on the
class); the set of fields may be customized using ``fields`` and
``default_fields``.
Any project-specific metadata fields (name, properties, and
tags) are obtained from the copy of the object in the project
associated with the handler, if possible.
"""
if self._dxid is None:
raise DXError('This {handler} handler has not been initialized with a {_class} ID and cannot be described'.format(
handler=self.__class__.__name__,
_class=self._class)
)
if (incl_properties or incl_details) and (fields is not None or default_fields is not None):
raise ValueError('Cannot specify properties or details in conjunction with fields or default_fields')
if incl_properties or incl_details:
describe_input = dict(properties=incl_properties, details=incl_details)
else:
describe_input = {}
if default_fields is not None:
describe_input['defaultFields'] = default_fields
if fields is not None:
describe_input['fields'] = {field_name: True for field_name in fields}
if self._proj is not None:
describe_input["project"] = self._proj
self._desc = self._describe(self._dxid, describe_input, **kwargs)
return self._desc
def add_types(self, types, **kwargs):
"""
:param types: Types to add to the object
:type types: list of strings
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Adds each of the specified types to the remote object. Takes no
action for types that are already listed for the object.
"""
self._add_types(self._dxid, {"types": types}, **kwargs)
def remove_types(self, types, **kwargs):
"""
:param types: Types to remove from the object
:type types: list of strings
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Removes each the specified types from the remote object. Takes
no action for types that the object does not currently have.
"""
self._remove_types(self._dxid, {"types": types}, **kwargs)
def get_details(self, **kwargs):
"""
Returns the contents of the details of the object.
:rtype: list or dict
"""
return self._get_details(self._dxid, **kwargs)
def set_details(self, details, **kwargs):
"""
:param details: Details to set for the object
:type details: dict or list
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Sets the details for the remote object with the specified value.
If the input contains the string ``"$dnanexus_link"`` as a key
in a hash, it must be the only key in the hash, and its value
must be a valid ID of an existing object.
"""
return self._set_details(self._dxid, details, **kwargs)
def hide(self, **kwargs):
"""
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Hides the remote object.
"""
return self._set_visibility(self._dxid, {"hidden": True}, **kwargs)
def unhide(self, **kwargs):
"""
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Makes the remote object visible.
"""
return self._set_visibility(self._dxid, {"hidden": False}, **kwargs)
def rename(self, name, **kwargs):
"""
:param name: New name for the object
:type name: string
Renames the remote object.
The name is changed on the copy of the object in the project
associated with the handler.
"""
return self._rename(self._dxid, {"project": self._proj,
"name": name}, **kwargs)
def get_properties(self, **kwargs):
"""
:returns: Properties given as key-value pairs of strings
:rtype: dict
Returns the properties of the object.
The properties are read from the copy of the object in the
project associated with the handler.
"""
return self.describe(incl_properties=True, **kwargs)["properties"]
def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the object for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
The properties are written to the copy of the object in the
project associated with the handler.
The following example sets the properties for "name" and
"project" for a remote file::
dxfile.set_properties({"name": "George", "project": "cancer"})
Subsequently, the following would delete the property "project"::
dxfile.set_properties({"project": None})
"""
self._set_properties(self._dxid, {"project": self._proj,
"properties": properties},
**kwargs)
def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the object
:type tags: list of strings
Adds each of the specified tags to the remote object. Takes no
action for tags that are already listed for the object.
The tags are added to the copy of the object in the project
associated with the handler.
"""
self._add_tags(self._dxid, {"project": self._proj, "tags": tags},
**kwargs)
def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the object
:type tags: list of strings
Removes each of the specified tags from the remote object. Takes
no action for tags that the object does not currently have.
The tags are removed from the copy of the object in the project
associated with the handler.
"""
self._remove_tags(self._dxid, {"project": self._proj, "tags": tags},
**kwargs)
def close(self, **kwargs):
"""
Closes the object for further modification to its types,
details, visibility, and contents.
"""
return self._close(self._dxid, **kwargs)
def list_projects(self, **kwargs):
"""
:rtype: list of strings
Returns a list of project IDs of the projects that contain this
object and are visible to the requesting user.
"""
return self._list_projects(self._dxid, **kwargs)
def remove(self, **kwargs):
'''
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
Permanently removes the associated remote object from the
associated project.
'''
if self._proj is None:
raise DXError("Remove called when a project ID was not associated with this object handler")
dxpy.api.project_remove_objects(self._proj, {"objects": [self._dxid]},
**kwargs)
# Reset internal state
self._dxid = None
self._proj = None
self._desc = {}
def move(self, folder, **kwargs):
'''
:param folder: Folder route to which to move the object
:type folder: string
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
Moves the associated remote object to *folder*.
'''
if self._proj is None:
raise DXError("Move called when a project ID was not associated with this object handler")
dxpy.api.project_move(self._proj, {"objects": [self._dxid],
"destination": folder},
**kwargs)
def clone(self, project, folder="/", **kwargs):
'''
:param project: Destination project ID
:type project: string
:param folder: Folder route to which to move the object
:type folder: string
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
:returns: An object handler for the new cloned object
:rtype: :class:`DXDataObject`
Clones the associated remote object to *folder* in *project* and
returns an object handler for the new object in the destination
project.
'''
if self._proj is None:
raise DXError("Clone called when a project ID was not associated with this object handler")
dxpy.api.project_clone(self._proj,
{"objects": [self._dxid],
"project": project,
"destination": folder},
**kwargs)
cloned_copy = copy.copy(self)
cloned_copy.set_ids(cloned_copy.get_id(), project)
return cloned_copy
def _get_state(self, **kwargs):
'''
:returns: State of the remote object
:rtype: string
Queries the API server for the object's state. Returns a string
in {"open", "closing", "closed"}.
Note that this function is shorthand for:
dxclass.describe()["state"]
'''
return self.describe(fields={'state'}, **kwargs)["state"]
def _wait_on_close(self, timeout=3600*24*1, **kwargs):
elapsed = 0
while True:
state = self._get_state(**kwargs)
if state == "closed":
break
if state != "closing":
raise DXError("Unexpected state: " + state)
if elapsed >= timeout or elapsed < 0:
raise DXError("Reached timeout while waiting for the remote object to close")
time.sleep(2)
elapsed += 2
from .dxfile import DXFile, DXFILE_HTTP_THREADS, DEFAULT_BUFFER_SIZE
from .download_all_inputs import download_all_inputs
from .dxfile_functions import open_dxfile, new_dxfile, download_dxfile, upload_local_file, upload_string, list_subfolders, download_folder
from .dxgtable import DXGTable, NULL, DXGTABLE_HTTP_THREADS
from .dxgtable_functions import open_dxgtable, new_dxgtable
from .dxrecord import DXRecord, new_dxrecord
from .dxproject import DXContainer, DXProject
from .dxjob import DXJob, new_dxjob
from .dxanalysis import DXAnalysis
from .dxapplet import DXExecutable, DXApplet
from .dxapp import DXApp
from .dxglobalworkflow import DXGlobalWorkflow
from .dxworkflow import DXWorkflow, new_dxworkflow
from .auth import user_info, whoami
from .dxdataobject_functions import dxlink, is_dxlink, get_dxlink_ids, get_handler, describe, get_details, remove
from .search import (find_data_objects, find_executions, find_jobs, find_analyses, find_projects, find_apps, find_global_workflows,
find_one_data_object, find_one_project, find_one_app, resolve_data_objects, find_orgs,
org_find_members, org_find_projects, org_find_apps)
|
""" Nutritional characterization of the ingredients of the OFF taxonomy """
|
from __future__ import print_function
def parsefile(filepath):
# type: (str) -> List
peoplenames = []
with open(filepath, 'r') as peoplefile:
for line in peoplefile:
stripped_newline = line.strip('\n')
peoplenames.append(stripped_newline)
return peoplenames
|
#!/usr/bin/env python3
# date: 2019.09.24
# https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/
# replaca pixel when `R > G > B`
import cv2
import numpy as np
img = cv2.imread('/home/furas/Obrazy/images/image.png')
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0]) ] = 0
cv2.imshow('image', img)
cv2.waitKey(0)
|
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
__doc__='''Iterator class
Unlike the builtin iterators of Python 2.2+, these classes are
designed to maintain information about the state of an iteration.
The Iterator() function accepts either a sequence or a Python
iterator. The next() method fetches the next item, and returns
true if it succeeds.
'''
__docformat__ = 'restructuredtext'
import string
class Iterator:
'''Simple Iterator class'''
__allow_access_to_unprotected_subobjects__ = 1
nextIndex = 0
def __init__(self, seq):
self.seq = iter(seq) # force seq to be an iterator
self._inner = iterInner
self._prep_next = iterInner.prep_next
def __getattr__(self, name):
try:
inner = getattr(self._inner, 'it_' + name)
except AttributeError:
raise AttributeError, name
return inner(self)
def next(self):
if not (hasattr(self, '_next') or self._prep_next(self)):
return 0
self.index = i = self.nextIndex
self.nextIndex = i+1
self._advance(self)
return 1
def _advance(self, it):
self.item = self._next
del self._next
del self.end
self._advance = self._inner.advance
self.start = 1
def number(self): return self.nextIndex
def even(self): return not self.index % 2
def odd(self): return self.index % 2
def letter(self, base=ord('a'), radix=26):
index = self.index
s = ''
while 1:
index, off = divmod(index, radix)
s = chr(base + off) + s
if not index: return s
def Letter(self):
return self.letter(base=ord('A'))
def Roman(self, rnvalues=(
(1000,'M'),(900,'CM'),(500,'D'),(400,'CD'),
(100,'C'),(90,'XC'),(50,'L'),(40,'XL'),
(10,'X'),(9,'IX'),(5,'V'),(4,'IV'),(1,'I')) ):
n = self.index + 1
s = ''
for v, r in rnvalues:
rct, n = divmod(n, v)
s = s + r * rct
return s
def roman(self, lower=string.lower):
return lower(self.Roman())
def first(self, name=None):
if self.start: return 1
return not self.same_part(name, self._last, self.item)
def last(self, name=None):
if self.end: return 1
return not self.same_part(name, self.item, self._next)
def same_part(self, name, ob1, ob2):
if name is None:
return ob1 == ob2
no = []
return getattr(ob1, name, no) == getattr(ob2, name, no) is not no
def __iter__(self):
return IterIter(self)
class InnerBase:
'''Base Inner class for Iterators'''
# Prep sets up ._next and .end
def prep_next(self, it):
it.next = self.no_next
it.end = 1
return 0
# Advance knocks them down
def advance(self, it):
it._last = it.item
it.item = it._next
del it._next
del it.end
it.start = 0
def no_next(self, it):
return 0
def it_end(self, it):
if hasattr(it, '_next'):
return 0
return not self.prep_next(it)
class SeqInner(InnerBase):
'''Inner class for sequence Iterators'''
def _supports(self, ob):
try: ob[0]
except (TypeError, AttributeError): return 0
except: pass
return 1
def prep_next(self, it):
i = it.nextIndex
try:
it._next = it.seq[i]
except IndexError:
it._prep_next = self.no_next
it.end = 1
return 0
it.end = 0
return 1
def it_length(self, it):
it.length = l = len(it.seq)
return l
try:
StopIteration=StopIteration
except NameError:
StopIteration="StopIteration"
class IterInner(InnerBase):
'''Iterator inner class for Python iterators'''
def _supports(self, ob):
try:
if hasattr(ob, 'next') and (ob is iter(ob)):
return 1
except:
return 0
def prep_next(self, it):
try:
it._next = it.seq.next()
except StopIteration:
it._prep_next = self.no_next
it.end = 1
return 0
it.end = 0
return 1
class IterIter:
def __init__(self, it):
self.it = it
self.skip = it.nextIndex > 0 and not it.end
def next(self):
it = self.it
if self.skip:
self.skip = 0
return it.item
if it.next():
return it.item
raise StopIteration
seqInner = SeqInner()
iterInner = IterInner()
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
from functools import partial
import numpy as np
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
class TestScatterOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=7), min_size=2, max_size=6))
update_shape = in_shape
assume(
len(update_shape) == len(in_shape) and
update_shape[1:] == in_shape[1:])
index_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=len(update_shape)),
min_size=1,
max_size=1))
index_shape[0] = in_shape[0]
assume(
len(index_shape) == 1 or
(len(index_shape) == 2 and index_shape[1] == 1))
index_type = draw(st.sampled_from(["int32", "int64"]))
overwrite = draw(st.booleans())
def generate_data(*args, **kwargs):
low, high = -10, 10
dtype = "float32"
shape = kwargs["shape"]
if "low" in kwargs:
low = kwargs["low"]
if "high" in kwargs:
high = kwargs["high"]
if "dtype" in kwargs:
dtype = kwargs["dtype"]
if dtype == "int32":
if low == high:
return low * np.ones(shape).astype(np.int32)
else:
return np.random.randint(low, high, shape).astype(np.int32)
elif dtype == "int64":
if low == high:
return low * np.ones(shape).astype(np.int64)
else:
return np.random.randint(low, high, shape).astype(np.int64)
elif dtype == "float32":
return (high - low
) * np.random.random(shape).astype(np.float32) + low
def generate_index(*args, **kwargs):
index_np = np.ones(index_shape).astype(np.int64)
for i in range(index_shape[0]):
index_np[i] = i
if kwargs["dtype"] == "int32":
index_np = index_np.astype(np.int32)
return index_np
scatter_op = OpConfig(
type="scatter",
inputs={
"X": ["input_data"],
"Ids": ["index"],
"Updates": ["updates"]
},
outputs={"Out": ["output_data"]},
attrs={"overwrite": overwrite})
program_config = ProgramConfig(
ops=[scatter_op],
weights={},
inputs={
"input_data": TensorConfig(data_gen=partial(
generate_data, shape=in_shape)),
"index": TensorConfig(data_gen=partial(
generate_index, dtype=index_type)),
"updates": TensorConfig(data_gen=partial(
generate_data, shape=update_shape))
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["scatter"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
target_str = self.get_target()
max_examples = 25
if target_str == "ARM":
# Make sure to generate enough valid cases for ARM
max_examples = 100
self.run_and_statis(quant=False, max_examples=max_examples)
if __name__ == "__main__":
unittest.main(argv=[''])
|
import os
import json
from flask import Blueprint, request
import talib as ta
from app.utils.stock import Stock
bp = Blueprint("api", __name__, url_prefix="/api/v1")
stock = Stock()
def get_indicator_desc(indicator) -> str:
indicators = json.load(open(os.path.join(os.getcwd(), "app/utils/static/indicators.json"), 'r'))
try:
return indicators[indicator]
except Exception as e:
return None
@bp.route("/stockdetail", methods=["GET", "POST"])
def stock_detail():
# reading args
ticker = request.form["ticker"]
indicator = request.form["indicator"] if request.form["indicator"] != "" else None
# intraday_mode = request.form["intraday-mode"]
# defining response
response = {}
response["name"] = stock.get_info(ticker)
data = stock.get_data(ticker)
try:
if indicator:
indicator_function = getattr(ta, indicator)
result = indicator_function(data["Close"], timeperiod=5)
data["Indicator"] = result
response["indicator"] = get_indicator_desc(indicator)
except Exception as e:
pass
# graph data
graph_data = stock.parse_data(data)
response['graph_data'] = graph_data
return json.dumps(response)
|
import unittest
import warnings
from geopy import exc
from geopy.compat import u
from geopy.point import Point
from geopy.geocoders import ArcGIS
from test.geocoders.util import GeocoderTestBase, env
class ArcGISTestCaseUnitTest(GeocoderTestBase):
def test_user_agent_custom(self):
geocoder = ArcGIS(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
class ArcGISTestCase(GeocoderTestBase):
@classmethod
def setUpClass(cls):
cls.geocoder = ArcGIS(timeout=3)
def test_config_error(self):
"""
ArcGIS.__init__ invalid authentication
"""
with self.assertRaises(exc.ConfigurationError):
ArcGIS(username='a')
def test_scheme_config_error(self):
"""
ArcGIS.__init__ invalid scheme
"""
with self.assertRaises(exc.ConfigurationError):
ArcGIS(
username='a',
password='b',
referer='http://www.example.com',
scheme='http'
)
def test_geocode(self):
"""
ArcGIS.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
ArcGIS.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
def test_empty_response(self):
self.geocode_run(
{"query": "dksahdksahdjksahdoufydshf"},
{},
expect_failure=True
)
def test_geocode_with_out_fields_string(self):
"""
ArcGIS.geocode with outFields string
"""
result = self.geocode_run(
{"query": "Trafalgar Square, London",
"out_fields": "Country"},
{}
)
self.assertDictEqual(result.raw['attributes'],
{'Country': 'GBR'})
def test_geocode_with_out_fields_list(self):
"""
ArcGIS.geocode with outFields list
"""
result = self.geocode_run(
{"query": "Trafalgar Square, London",
"out_fields": ["City", "Type"]},
{}
)
self.assertDictEqual(result.raw['attributes'],
{'City': 'London', 'Type': 'Tourist Attraction'})
def test_reverse_point(self):
"""
ArcGIS.reverse using point
"""
location = self.reverse_run(
{"query": Point(40.753898, -73.985071)},
{"latitude": 40.75376406311989, "longitude": -73.98489005863667},
)
self.assertIn('New York', location.address)
def test_reverse_not_exactly_one(self):
self.reverse_run(
{"query": Point(40.753898, -73.985071), "exactly_one": False},
{"latitude": 40.75376406311989, "longitude": -73.98489005863667},
)
def test_reverse_no_result(self):
self.reverse_run(
# North Atlantic Ocean
{"query": (35.173809, -37.485351)},
{},
expect_failure=True
)
def test_custom_wkid(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Custom wkid should be ignored and a warning should be issued.
location = self.reverse_run(
{"query": Point(40.753898, -73.985071), "wkid": 2000},
{"latitude": 40.75376406311989,
"longitude": -73.98489005863667},
)
self.assertIn('New York', location.address)
self.assertEqual(1, len(w))
@unittest.skipUnless(
(env.get('ARCGIS_USERNAME') is not None
or env.get('ARCGIS_PASSWORD') is not None
or env.get('ARCGIS_REFERER') is not None),
"No ARCGIS_USERNAME or ARCGIS_PASSWORD or ARCGIS_REFERER env variable set"
)
class ArcGISAuthenticatedTestCase(GeocoderTestBase):
@classmethod
def setUpClass(cls):
cls.geocoder = ArcGIS(
username=env['ARCGIS_USERNAME'],
password=env['ARCGIS_PASSWORD'],
referer=env['ARCGIS_REFERER'],
timeout=3
)
def test_basic_address(self):
"""
ArcGIS.geocode using authentication
"""
self.geocode_run(
{"query": "Potsdamer Platz, Berlin, Deutschland"},
{"latitude": 52.5094982, "longitude": 13.3765983},
)
|
from django.db.models import Model, CharField, ForeignKey
from tree.fields import PathField
from tree.models import TreeModelMixin
class Place(Model, TreeModelMixin):
name = CharField(max_length=50)
parent = ForeignKey('self', null=True, blank=True)
path = PathField()
class Meta:
ordering = ('path', 'name')
|
# import win32api
# import win32con
import time
class Game:
keymap = {
'up': 0x30,
'down': 0x31,
'left': 0x32,
'right': 0x33,
'a': 0x34,
'b': 0x35,
'start': 0x36,
'select': 0x37
}
def get_valid_buttons(self):
return [button for button in self.keymap.keys()]
def is_valid_button(self, button):
return button in self.keymap.keys()
def button_to_key(self, button):
return self.keymap[button]
def push_button(self, button):
print "Button pressed:", button, self.button_to_key(button)
# win32api.keybd_event(self.button_to_key(button), 0, 0, 0)
# time.sleep(.15)
# win32api.keybd_event(self.button_to_key(button), 0, win32con.KEYEVENTF_KEYUP, 0)
|
import math
import unittest
from spatial import Facet, Ray, Transform, Vector3, vector3
class TestRay(unittest.TestCase):
def setUp(self):
self.direction = Vector3(-1, 4, 5)
self.ray = Ray(Vector3(1, 2, 3), self.direction)
def test__init__normalized_the_direction(self):
self.assertAlmostEqual(self.ray.direction, vector3.normalize(self.direction))
def test__init__raises_if_direction_is_zero_vector(self):
with self.assertRaises(ValueError):
Ray(Vector3(), Vector3())
def test__str__contains_the_components_of_the_ray(self) -> None:
self.assertTrue(str(self.ray.origin) in str(self.ray))
normalized = vector3.normalize(self.ray.direction)
self.assertTrue(str(normalized) in str(self.ray))
def test_closest_intersection_returns_the_closest_intersection(self) -> None:
facets = [
Vector3(), # Ignored by the algorithm.
Facet([Vector3(), Vector3.X(), Vector3.Y()]),
Facet([Vector3(0, 0, 1), Vector3(1, 0, 1), Vector3(0, 1, 1)]),
]
r = Ray(Vector3(0, 0, 2), Vector3.Z())
actual = r.closest_intersection(facets)
self.assertIsNone(actual.t)
self.assertIsNone(actual.obj)
r = Ray(Vector3(0, 0, 2), -Vector3.Z())
actual = r.closest_intersection(facets)
self.assertEqual(actual.t, 1)
self.assertEqual(actual.obj, facets[2])
def test_evaluate_returns_the_location_along_the_ray(self) -> None:
self.assertEqual(self.ray.evaluate(0), self.ray.origin)
self.assertEqual(
self.ray.evaluate(2), self.ray.origin + 2 * vector3.normalize(self.ray.direction)
)
def test_tranform_transforms_the_rays_origin_and_direction(self) -> None:
r = Ray(Vector3(1, 1, 0), Vector3(1, 1, 0))
t = Transform.from_axis_angle_translation(
axis=Vector3.Z(), angle=math.radians(-45), translation=Vector3(-math.sqrt(2), 0, 0)
)
expected = Ray(Vector3(), Vector3.X())
actual = r.transform(t)
self.assertAlmostEqual(actual.origin, expected.origin)
self.assertAlmostEqual(actual.direction, expected.direction)
|
from django.urls import path
from .views import TestView
urlpatterns = [
path('', TestView.as_view()),
]
|
# encoding: utf-8
#my blog:http://www.lylinux.org
import urllib2
from BeautifulSoup import BeautifulSoup
import socket
import uuid
def user_agent(url):
req_header = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
req_timeout = 20
try:
req = urllib2.Request(url,None,req_header)
page = urllib2.urlopen(req,None,req_timeout)
except urllib2.URLError as e:
print e.message
except socket.timeout as e:
user_agent(url)
return page
def page_loop(url, save_path):
page = user_agent(url)
soup = BeautifulSoup(page)
total_img = 0
img = soup.findAll(['img'])
for myimg in img:
link = myimg.get('src')
total_img += 1
# content2 = urllib2.urlopen(link).read()
content2 = user_agent(link).read()
with open(save_path + str(uuid.uuid1()),'wb') as code:
code.write(content2)
print total_img
return total_img
pictrues_path = u'D:\\var\\data\\pictures\\mm\\'
def douban():
baseurl = "http://dbmeizi.com/"
page_start = 0
page_stop = 4
total = 0
for pageid in range(page_start,page_stop):
url = baseurl + '?p=%s' % pageid
total += page_loop(url, pictrues_path)
print total # 总共下载的图片数
def jiandan():
baseurl = "http://jandan.net/ooxx/"
page_start = 1000
page_stop = 1100
total = 0
for pageid in range(page_start,page_stop):
url = baseurl + '?p=%s' % pageid
total += page_loop(url, pictrues_path)
print total # 总共下载的图片数
jiandan()
|
#!/usr/bin/env python3
_, k, *h = map(int, open(0).read().split())
print(sum(i >= k for i in h))
|
import sys
import threading
import unittest
import pydevconsole
from _pydev_comm.pydev_rpc import make_rpc_client, start_rpc_server_and_make_client, start_rpc_server
from _pydevd_bundle import pydevd_io
from pydev_console.protocol import PythonConsoleFrontendService, PythonConsoleBackendService
from pydevconsole import enable_thrift_logging, create_server_handler_factory
try:
raw_input
raw_input_name = 'raw_input'
except NameError:
raw_input_name = 'input'
#=======================================================================================================================
# Test
#=======================================================================================================================
class Test(unittest.TestCase):
def test_console_hello(self):
self.original_stdout = sys.stdout
sys.stdout = pydevd_io.IOBuf()
try:
sys.stdout.encoding = sys.stdin.encoding
except AttributeError:
# In Python 3 encoding is not writable (whereas in Python 2 it doesn't exist).
pass
try:
rpc_client = self.start_client_thread() #@UnusedVariable
import time
time.sleep(.3) #let's give it some time to start the threads
from _pydev_bundle import pydev_localhost
interpreter = pydevconsole.InterpreterInterface(threading.currentThread(), rpc_client=rpc_client)
(result,) = interpreter.hello("Hello pydevconsole")
self.assertEqual(result, "Hello eclipse")
finally:
sys.stdout = self.original_stdout
def test_console_requests(self):
self.original_stdout = sys.stdout
sys.stdout = pydevd_io.IOBuf()
try:
rpc_client = self.start_client_thread() #@UnusedVariable
import time
time.sleep(.3) #let's give it some time to start the threads
from _pydev_bundle import pydev_localhost
from _pydev_bundle.pydev_console_types import CodeFragment
interpreter = pydevconsole.InterpreterInterface(threading.currentThread(), rpc_client=rpc_client)
sys.stdout = pydevd_io.IOBuf()
interpreter.add_exec(CodeFragment('class Foo:\n CONSTANT=1\n'))
interpreter.add_exec(CodeFragment('foo=Foo()'))
interpreter.add_exec(CodeFragment('foo.__doc__=None'))
interpreter.add_exec(CodeFragment('val = %s()' % (raw_input_name,)))
interpreter.add_exec(CodeFragment('50'))
interpreter.add_exec(CodeFragment('print (val)'))
found = sys.stdout.getvalue().split()
try:
self.assertEqual(['50', 'input_request'], found)
except:
try:
self.assertEqual(['input_request'], found) #IPython
except:
self.assertEqual([u'50', u'input_request'], found[1:]) # IPython 5.1
self.assertTrue(found[0].startswith(u'Out'))
comps = interpreter.do_get_completions('foo.', 'foo.')
self.assertTrue(
('CONSTANT', '', '', '3') in comps or ('CONSTANT', '', '', '4') in comps, \
'Found: %s' % comps
)
comps = interpreter.do_get_completions('"".', '"".')
self.assertTrue(
('__add__', 'x.__add__(y) <==> x+y', '', '3') in comps or
('__add__', '', '', '4') in comps or
('__add__', 'x.__add__(y) <==> x+y\r\nx.__add__(y) <==> x+y', '()', '2') in comps or
('__add__', 'x.\n__add__(y) <==> x+yx.\n__add__(y) <==> x+y', '()', '2'),
'Did not find __add__ in : %s' % (comps,)
)
completions = interpreter.do_get_completions('', '')
for c in completions:
if c[0] == 'AssertionError':
break
else:
self.fail('Could not find AssertionError')
completions = interpreter.do_get_completions('Assert', 'Assert')
for c in completions:
if c[0] == 'RuntimeError':
self.fail('Did not expect to find RuntimeError there')
self.assertTrue(('__doc__', None, '', '3') not in interpreter.do_get_completions('foo.CO', 'foo.'))
comps = interpreter.do_get_completions('va', 'va')
self.assertTrue(('val', '', '', '3') in comps or ('vars', '', '', '4') in comps)
interpreter.add_exec(CodeFragment('s = "mystring"'))
desc = interpreter.getDescription('val')
self.assertTrue(desc.find('str(object) -> string') >= 0 or
desc == "'input_request'" or
desc.find('str(string[, encoding[, errors]]) -> str') >= 0 or
desc.find('str(Char* value)') >= 0 or
desc.find('str(object=\'\') -> string') >= 0 or
desc.find('str(value: Char*)') >= 0 or
desc.find('str(object=\'\') -> str') >= 0 or
desc.find('unicode(object=\'\') -> unicode object') >= 0 or
desc.find('The most base type') >= 0 # Jython 2.7 is providing this :P
,
'Could not find what was needed in %s' % desc)
desc = interpreter.getDescription('val.join')
self.assertTrue(desc.find('S.join(sequence) -> string') >= 0 or
desc.find('S.join(sequence) -> str') >= 0 or
desc.find('S.join(iterable) -> string') >= 0 or
desc == "<builtin method 'join'>" or
desc == "<built-in method join of str object>" or
desc.find('str join(str self, list sequence)') >= 0 or
desc.find('S.join(iterable) -> str') >= 0 or
desc.find('S.join(iterable) -> unicode') >= 0 or
desc.find('join(self: str, sequence: list) -> str') >= 0 or
desc.find('Concatenate any number of strings.') >= 0,
"Could not recognize: %s" % (desc,))
finally:
sys.stdout = self.original_stdout
def create_frontend_handler(self):
class HandleRequestInput:
def __init__(self):
self.requested_input = False
self.notified_finished = 0
self.rpc_client = None
def requestInput(self, path):
self.requested_input = True
return 'input_request'
def notifyFinished(self, needs_more_input):
self.notified_finished += 1
def notifyAboutMagic(self, commands, is_auto_magic):
pass
return HandleRequestInput()
def start_client_thread(self):
from _pydev_bundle import pydev_localhost
enable_thrift_logging()
# here we start the test server
server_socket = start_rpc_server_and_make_client(pydev_localhost.get_localhost(), 0,
PythonConsoleFrontendService,
PythonConsoleBackendService,
create_server_handler_factory(self.create_frontend_handler()))
host, port = server_socket.getsockname()
import time
time.sleep(1)
rpc_client, _ = make_rpc_client(PythonConsoleFrontendService, host, port)
return rpc_client
def start_debugger_server_thread(self, debugger_port, socket_code):
class DebuggerServerThread(threading.Thread):
def __init__(self, debugger_port, socket_code):
threading.Thread.__init__(self)
self.debugger_port = debugger_port
self.socket_code = socket_code
def run(self):
import socket
s = socket.socket()
s.bind(('', debugger_port))
s.listen(1)
socket, unused_addr = s.accept()
socket_code(socket)
debugger_thread = DebuggerServerThread(debugger_port, socket_code)
debugger_thread.setDaemon(True)
debugger_thread.start()
return debugger_thread
def get_free_addresses(self):
from _pydev_bundle.pydev_localhost import get_socket_names
socket_names = get_socket_names(2, True)
port0 = socket_names[0][1]
port1 = socket_names[1][1]
assert port0 != port1
assert port0 > 0
assert port1 > 0
return port0, port1
def test_server(self):
self.original_stdout = sys.stdout
sys.stdout = pydevd_io.IOBuf()
try:
from _pydev_bundle.pydev_localhost import get_socket_name
host, port = get_socket_name(close=True)
class ServerThread(threading.Thread):
def __init__(self, backend_port):
threading.Thread.__init__(self)
self.backend_port = backend_port
def run(self):
from _pydev_bundle import pydev_localhost
pydevconsole.start_server(self.backend_port)
server_thread = ServerThread(port)
server_thread.setDaemon(True)
server_thread.start()
import time
time.sleep(1) #let's give it some time to start the threads
rpc_client, server_transport = make_rpc_client(PythonConsoleBackendService, host, port)
server_service = PythonConsoleFrontendService
server_handler = self.create_frontend_handler()
start_rpc_server(server_transport, server_service, server_handler)
rpc_client.execLine('class Foo:')
rpc_client.execLine(' pass')
rpc_client.execLine('')
rpc_client.execLine('foo = Foo()')
rpc_client.execLine('a = %s()' % (raw_input_name,))
rpc_client.execLine('print (a)')
initial = time.time()
while not server_handler.requested_input:
if time.time() - initial > 2:
raise AssertionError('Did not get the return asked before the timeout.')
time.sleep(.1)
found = sys.stdout.getvalue()
while ['input_request'] != found.split():
found += sys.stdout.getvalue()
if time.time() - initial > 2:
break
time.sleep(.1)
self.assertIn('input_request', found.split())
finally:
sys.stdout = self.original_stdout
|
"""
This function is DEPRECATED.
use chainer.function.concat instead!
"""
from chainer import function, cuda
from chainer.utils import type_check
class Fusion(function.Function):
"""Fusion numpy/cupy element"""
def __init__(self, in_channel_list=None):
self.in_channel_list = in_channel_list
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 2,
)
h1_type, h2_type = in_types
type_check.expect(
h1_type.shape[2] == h2_type.shape[2],
h1_type.shape[3] == h2_type.shape[3]
)
@property
def label(self):
return 'Fusion'
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
h1, h2 = inputs
return xp.concatenate((h1, h2), axis=1),
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
gy = grad_outputs[0]
gh1, gh2 = xp.split(gy, [self.in_channel_list[0], ], axis=1)
return gh1, gh2
def fusion(h1, h2):
"""
:param h1:
:param h2:
:return:
"""
"""Flip (order in reverse) the element of an input variable without copy
:param x: (~chainer.Variable) Input variable
:param axes: (tuple of ints) By default, flip all axes,
otherwise flip only specified axes
:return: (~chainer.Variable) Variable whose element is flipped
"""
in_channel_h1 = h1.data.shape[1]
in_channel_h2 = h2.data.shape[1]
return Fusion((in_channel_h1, in_channel_h2))(h1, h2)
|
# -*- coding: utf-8 -*-
"""
Defines the common interface that must be implemented by all transformation
databases.
"""
__all__ = ('TransformationDatabase',)
from typing import Collection, Iterator
import abc
import typing
if typing.TYPE_CHECKING:
from ..base import Transformation
class TransformationDatabase(Collection['Transformation'], abc.ABC):
"""Stores the set of possible transformations for a given program."""
@abc.abstractmethod
def __contains__(self, transformation: object) -> bool:
"""Determines if a given transformation belongs to this database."""
...
@abc.abstractmethod
def __iter__(self) -> Iterator['Transformation']:
"""Returns an iterator over all transformations in this database."""
...
@abc.abstractmethod
def __len__(self) -> int:
"""Returns the number of transformations in this database."""
...
@abc.abstractmethod
def choice(self) -> 'Transformation':
"""Selects a single transformation from this database at random."""
...
|
import torch
from abc import ABC, abstractmethod
from .vector import FVector, PVector
class AbstractPushForward(ABC):
@abstractmethod
def __init__(self, generator):
return NotImplementedError
class PushForwardDense(AbstractPushForward):
def __init__(self, generator, data=None):
self.generator = generator
if data is not None:
self.data = data
else:
self.data = generator.get_jacobian()
def get_dense_tensor(self):
return self.data
def mv(self, v):
v_flat = torch.mv(self.data.view(-1, self.data.size(-1)),
v.get_flat_representation())
v_flat = v_flat.view(self.data.size(0), self.data.size(1))
return FVector(vector_repr=v_flat)
class PushForwardImplicit(AbstractPushForward):
def __init__(self, generator):
self.generator = generator
def mv(self, v):
return self.generator.implicit_Jv(v)
class PullBackAbstract(ABC):
@abstractmethod
def __init__(self, generator):
return NotImplementedError
class PullBackDense(PullBackAbstract):
def __init__(self, generator, data=None):
self.generator = generator
if data is not None:
self.data = data
else:
self.data = generator.get_jacobian()
def get_dense_tensor(self):
return self.data
def mv(self, v):
v_flat = torch.mv(self.data.view(-1, self.data.size(-1)).t(),
v.get_flat_representation().view(-1))
return PVector(self.generator.layer_collection, vector_repr=v_flat)
|
#!/usr/bin/env python3
"""
Someone's Mattermost API v4 bindings.
Copyright (c) 2016-2021 by Someone <someone@somenet.org> (aka. Jan Vales <jan@jvales.net>)
published under MIT-License
post stdin to a channel.
"""
import os
import sys
from inspect import cleandoc
import mattermost
def main():
show_usage = False
mm_api = None
chan_id = None
if "MM_APIURL" in os.environ:
mm_api = mattermost.MMApi(os.environ["MM_APIURL"])
else:
show_usage = True
if ("MM_USER" in os.environ and "MM_PASS" in os.environ) or ("MM_BEARER" in os.environ):
if "MM_BEARER" in os.environ:
mm_api.login(bearer=os.environ["MM_BEARER"])
else:
mm_api.login(os.environ["MM_USER"], os.environ["MM_PASS"])
else:
show_usage = True
if "MM_CHANID" in os.environ:
chan_id = os.environ["MM_CHANID"]
else:
show_usage = True
if len(sys.argv) < 1 or len(sys.argv) > 3:
show_usage = True
if show_usage:
print(cleandoc("""required ENV-parameters: MM_APIURL, MM_USER+MM_PASS or MM_BEARER, MM_CHANID.
arguments: [PREFIX] [SUFFIX]
"""))
sys.exit(1)
prefix = suffix = ""
try:
prefix = sys.argv[1].replace("\\n", "\n")
suffix = sys.argv[2].replace("\\n", "\n")
except:
pass
print("Posting to channel:" +str(mm_api.create_post(chan_id, prefix+sys.stdin.read()+suffix, props={"from_webhook":"true"})))
mm_api.logout()
if __name__ == '__main__':
main()
|
# coding: utf-8
###
# @file gar_bench.py
# @author Arsany Guirguis <arsany.guirguis@epfl.ch>
#
# @section LICENSE
#
# Copyright (c) 2020 Arsany Guirguis.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# @section DESCRIPTION
# Benchmarking GARs in the Garfield++ library
###
#!/usr/bin/env python
import os
import torch
from time import time
import sys
import aggregators
gars = aggregators.gars
ds = [pow(10,i) for i in range(5)]
ns = [pow(2,i) for i in range(10)]
fs = [pow(2,i) for i in range(10)]
dev='cuda'
def bench_n(d,f):
print("Benchmarking GARs with increasing n")
for k in gars:
for n in ns:
grads = [torch.rand(d).to(dev) for i in range(n)]
if n < 2*f+1 or (n < 2*f+3 and k.find('krum') != -1) or ((n < 4*f+3 or n > 23) and k.find('bulyan') != -1):
continue
print("** n={} f={} d={} GAR={} **".format(n,f,d,k))
gar = gars.get(k)
t = time()
gar(gradients=grads, f=f)
print("time: ", time()-t)
del grads
def bench_d(n,f):
print("Benchmarking GARs with increasing d")
for k in gars:
for d in ds:
grads = [torch.rand(d).to(dev) for i in range(n)]
if n < 2*f+1 or (n < 2*f+3 and k.find('krum') != -1) or (n < 4*f+3 and k.find('bulyan') != -1):
continue
print("** n={} f={} d={} GAR={} **".format(n,f,d,k))
gar = gars.get(k)
t = time()
gar(gradients=grads, f=f)
print("time: ", time()-t)
del grads
def bench_f(n,d):
print("Benchmarking GARs with increasing f")
grads = [torch.rand(d).to(dev) for i in range(n)]
for k in gars:
for f in fs:
if n < 2*f+1 or (n < 2*f+3 and k.find('krum') != -1) or (n < 4*f+3 and k.find('bulyan') != -1):
break
print("** n={} f={} d={} GAR={} **".format(n,f,d,k))
gar = gars.get(k)
t = time()
gar(gradients=grads, f=f)
print("time: ", time()-t)
bench_n(100000,1) #bench_n(d,f)
#bench_d(23,5) #bench_d(n,f)
#bench_f(23,100000) #bench_f(n,d)
|
import sys
from itertools import izip
'''
Code used to replace 0 and 1's with the original words from the source
'''
file1 = sys.argv[1] # Source file
file2 = sys.argv[2] # Target file with 0 and 1's
with open(file1) as textfile1, open(file2) as textfile2:
for x, y in izip(textfile1, textfile2):
src = x.strip()
tgt = y.strip()
src_words = src.split()
tgt_words = tgt.split()
#print(src_words, len(src_words))
#print(tgt_words, len(tgt_words))
if len(src_words) != len(tgt_words):
print('Error in files')
exit(0)
line = ''
for index, tgt_word in enumerate(tgt_words):
if tgt_word == '1':
line += src_words[index] + ' '
print(line.strip())
|
inputs = [] #Somewhere to store the current status (not really necessary)
pygame = None
buttonIDs = {0 : "1",
1 : "2",
2 : "3",
3 : "4",
4 : "LB",
5 : "RB",
6 : "LT",
7 : "RT",
8 : "Back",
9 : "Start",
10 : "L Joystick",
11 : "R Joystick",
}
"""
Axis Configuration
0 - Left Stick L/R (-1 to +1)
1 - Left Stick U/D (-1 to +1)
2 - Right Stick L/R (-1 to +1)
3 - Right Stick U/D (-1 to +1)
Hat Configuration
(X/Y (-1 to +1), U/D (-1 to +1))
"""
#Start up pygame if necessary
def startPygame():
global pygame
import pygame
#Initialize all libraries and the joystick
try:
pygame.init()
pygame.joystick.init()
except:
print "Error: PyGame failed to intialize!"
#Stop pygame if necessary
def stopPygame():
pygame.quit()
#Start the joystick
def startJoystick():
print "Initializing gamepad..."
global joystick
try:
joystick = pygame.joystick.Joystick(0)
except:
print "Error: Joystick not found!"
try:
joystick.init()
print "Gamepad ready!"
except:
print "Error: Joystick failed to initialize!"
#Duplicate of the Arduino map function (borrowed from here: https://mail.python.org/pipermail/tutor/2013-August/097291.html)
def map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
#Convert joystick value to a hex value (-1 to 1 mapped to 0 to F)
def toHex(num):
mapped = int(map(float(num), -1.0, 1.0, 0, 15)) #Map value between 0 and 15
return ["0", "1","2","3","4","5","6","7","8","9","A","B","C","D","E","F"][mapped] #Convert to hex
#Get the state of all buttons/hats/sticks on the gamepad
def getState():
global inputs
z = pygame.event.get()
#axes = [toHex(joystick.get_axis(0)), toHex(joystick.get_axis(1)), toHex(joystick.get_axis(2)), toHex(joystick.get_axis(3)), toHex(joystick.get_hat(0)[0]), toHex(joystick.get_hat(0)[1])]
axes = [joystick.get_axis(0), joystick.get_axis(1), joystick.get_axis(2), joystick.get_axis(3)]
hat = joystick.get_hat(0)
buttons = [joystick.get_button(button) for button in range(12)]
return axes, hat, buttons
#Automatically start the necessary functions (so the user doesn't have to)
startPygame()
startJoystick()
"""
while True:
pygame.event.get()
print "".join([str(joystick.get_button(x)) for x in range(12)])
"""
|
from __future__ import annotations
import os
import shutil
import logging
from utils.run import run, RunError
from manage.components.base import Component, Task, Context
from manage.paths import TARGET_DIR
def clone(path: str, remote: str, branch: str = None, clean: bool = False) -> str:
# FIXME: Pull if update available
if os.path.exists(path):
logging.info(f"Repo '{remote}' is cloned already")
return False
try:
os.makedirs(TARGET_DIR, exist_ok=True)
run(
["git", "clone", remote, os.path.basename(path)],
cwd=os.path.dirname(path),
add_env={"GIT_TERMINAL_PROMPT": "0"},
)
if branch:
run(["git", "checkout", branch], cwd=path)
run(["git", "submodule", "update", "--init", "--recursive"], cwd=path)
except RunError:
if os.path.exists(path):
shutil.rmtree(path)
raise
if clean:
shutil.rmtree(os.path.join(path, ".git"))
return True
class GitCloneTask(Task):
def __init__(self, path: str, sources: list[(str, str)]):
super().__init__()
self.path = path
self.sources = sources
def run(self, ctx: Context) -> bool:
last_error = None
for remote, branch in self.sources:
try:
return clone(self.path, remote, branch, clean=True)
except RunError as e:
last_error = e
continue
if last_error is not None:
raise last_error
def artifacts(self) -> str[list]:
return [self.path]
class RepoList(Component):
def __init__(self, name: str, sources: list[(str, str)]):
super().__init__()
self.name = name
self.path = os.path.join(TARGET_DIR, name)
self.sources = sources
self.clone_task = GitCloneTask(self.path, self.sources)
def tasks(self) -> dict[str, Task]:
return {
"clone": self.clone_task,
}
class Repo(RepoList):
def __init__(self, name: str, remote: str, branch: str = None):
super().__init__(name, [remote, branch])
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
print("This script takes data.csv as default data")
x_label=input("Enter x label: ")
y_label=input("Enter y label: ")
data=pd.read_csv("data.csv", header=None)
m=len(data)
x=np.hstack((data.iloc[:, 0])).reshape(m,1)
Y=np.hstack((data.iloc[:, 1])).reshape(m,1)
ones= np.ones((m,1))
X=np.hstack((ones,x))
def costfn(X, Y, theta):
temp=X.dot(theta)-Y
return np.sum(np.power(temp,2))/(2*m)
def normal(X, Y):
temp=X.transpose()
temp=temp.dot(X)
temp=np.linalg.inv(temp)
temp=temp.dot(X.transpose())
theta=temp.dot(Y)
return theta
theta=normal(X, Y)
cost_value=costfn(X, Y, theta)
print("Value of theta and minimum error")
print(theta)
print(cost_value)
plt.scatter(x, Y)
plt.plot(x, X.dot(theta), 'r')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
min_theta=int(np.amin(theta)-5)
max_theta=int(np.amax(theta)+5)
theta0=np.arange(min_theta,max_theta,0.1)
theta1=np.arange(min_theta,max_theta,0.1)
j_vals= np.zeros((len(theta0),len(theta1)))
t=np.ones((2,1))
for i in range(len(theta0)):
for j in range(len(theta1)):
t[0][0]=theta0[i]
t[1][0]=theta1[j]
j_vals[i][j]=costfn(X, Y, t)
plt.contourf(theta0,theta1, j_vals)
plt.show()
|
import ast
import json
import numpy as np
from methods.utils import gaussPartial
def cuadraticSpline(x, y):
x = ast.literal_eval(x)
y = ast.literal_eval(y)
res = {}
x = np.asfarray(x)
y = np.asfarray(y)
n = len(x)
m = 3*(n-1)
A = np.zeros([m,m])
b = np.zeros([m,1])
S = []
if n != len(y):
res["source"] = "X and Y boundaries are different"
res["error"] = True
return
A[0][0:3] = [x[0]**2,x[0], 1]
b[0] = y[0]
for val in range(1,n):
A[val][ 3*val-3:3*val] = [x[val]**2, x[val], 1]
b[val] = y[val]
for val in range(1,n-1):
A[n-1+val][3*val-3:3*val+3] = [x[val]**2, x[val], 1,-x[val]**2, -x[val], -1]
for val in range(1,n-1):
A[2*n-3+val][3*val-3:3*val+3] = [x[val]*2, 1, 0,-x[val]*2, -1, 0]
A[m-1,0] = 2;
values = gaussPartial(A,b)
if values["error"]:
res["error"] = True
res["source"] = values["source"]
return res
vals = sorted(values["values"], key = lambda x: x[0])
pols = []
for val in range(0,len(vals)-2,3):
vals[val][1] = round(vals[val][1],4)
vals[val + 1][1] = round( vals[val + 1][1],4)
vals[val + 2][1] = round(vals[val + 2][1],4)
pol = "{0}*x^2+{1}*x+{2}".format(vals[val][1], vals[val+1][1],vals[val+2][1])
pol = pol.replace(" ", "").replace("--", "+").replace("++", "+").replace("+-", "-").replace("-+", "-")
S.append([vals[val][1], vals[val+1][1], vals[val+2][1]])
pols.append(pol)
A = np.array(A).astype(float)
S = np.array(S).astype(float)
res["polynoms"] = pols
res["values"] = S.tolist()
res["error"] = False
res["matrix"] = json.dumps(A.tolist())
return res
|
import random, side, discord
#Variables
token = open('note_token_test', 'r').read()
client = discord.Client()
results = [('Water','Aether'),('Water','Fire'),('Air','Water'),('Air','Earth'),('Fire','Air'),
('Fire','Aether'),('Earth','Fire'),('Earth','Water'),('Aether','Earth'),('Aether','Air'),]
moves = [result[1] for result in results]
@client.event
async def on_message(message):
if message.author == client.user:
return
if client.user.mentioned_in(message):
await message.channel.send('Greetings gladiator! My prefix is B \n Stands for "Battle!" easy to remember!')
if message.content == 'Bhelp':
await message.channel.send('balblabla')
if message.content.startswith('Bcreature'):
creature_id = message.content[9:12]
for data in side.get_data():
# if message.content == 'Bsolo':
# bot_life = 10
# player_life = 10
# player = random.choice(get_data())
# bot = random.choice(get_data())
# if player == bot:
# while player == bot:
# player = random.choice(get_data())
# bot = random.choice(get_data())
# while bot_life > 0 and player_life > 0:
# bot_move = random.choice(moves)
# player_move =
if message.content == 'Bchallenge':
pass
if message.content == 'Bbo3':
pass
if message.content == 'Btournament':
pass
# if message.content == prefix+'sologame':
# while True:
# #como player escolhe
# bot = random.choice(moves)
# if choice == bot:
# print(f"bot choose {bot}, It's a tie!")
# elif (choice, bot) in results:
# print(f'bot chooses {bot} You win!')
# elif (bot, choice) in results:
# print(f"bot chooses {bot}, you loses")
client.run(token)
|
import os
import collections
from nose.tools import istest
if os.environ.get("HAMCREST"):
from hamcrest import *
else:
from precisely.hamcrest import *
User = collections.namedtuple("User", ["username", "email_address"])
@istest
def test_anything():
assert_that(1, anything())
@istest
def test_equal_to():
assert_that(1, equal_to(2))
@istest
def test_has_property_wrong_value():
assert_that(User("bob", None), has_property("username", "bobbity"))
@istest
def test_has_property_missing():
assert_that("bob", has_property("username", "bobbity"))
@istest
def test_has_properties_wrong_value():
assert_that(User("bob", "bob@example.com"), has_properties(
username="bob",
email_address="bobbity@example.com",
))
@istest
def test_all_of():
assert_that(User("bob", "bob@example.com"), all_of(
has_property("username", "bob"),
has_property("email_address", "bobbity@example.com"),
))
@istest
def test_contains_inanyorder_missing_elements():
assert_that(
[
User("bob", "jim@example.com"),
User("jim", "bob@example.com"),
],
contains_inanyorder(
has_properties(username="bob", email_address="bob@example.com"),
has_properties(username="jim", email_address="jim@example.com"),
)
)
@istest
def test_contains_inanyorder_extra_elements():
assert_that(
["apple", "banana"],
contains_inanyorder("apple"),
)
@istest
def test_contains_missing_elements():
assert_that(
[
User("bob", "jim@example.com"),
User("jim", "bob@example.com"),
],
contains(
has_properties(username="bob", email_address="bob@example.com"),
has_properties(username="jim", email_address="jim@example.com"),
)
)
|
import numpy as np
from scipy import sparse
from sklearn.metrics.pairwise import cosine_similarity
RATINGS = 'anime_ratings.txt'
TARGET = 'Limpparipoju'
USERNAMES = 'usernames.txt'
def main():
ratings = np.loadtxt(RATINGS)
ratings = sparse.csr_matrix(ratings)
target = np.loadtxt(TARGET + '_ratings.txt')
similarities = cosine_similarity(ratings, target)
sorted_indices = similarities.argsort(axis=0)[::-1]
top_indices = list(sorted_indices[:5])
top_matches = [(None, None)]*5
with open(USERNAMES, 'r') as usernames:
for i, username in enumerate(usernames):
if i in top_indices:
top_matches[top_indices.index(i)] = (username, str(similarities[i][0]))
col_width = max(len(user) for user, _ in top_matches)
for user, similarity in top_matches:
print(user.strip().ljust(col_width) + '\t' + similarity)
if __name__ == '__main__':
main()
|
from queue import Queue
BFS_Queue = Queue()
visited = set()
STOP = False
n = int(input())
end = (n-2, n-2)
map = [input() for _ in range(n)]
empty = BFS_Queue.empty
next_step = BFS_Queue.get
add_state = BFS_Queue.put
visit = visited.add
def BFS(step, x, y):
global STOP
temp = (x, y)
step += 1
if temp in visited or not (-1 < x < n and -1 < y < n) or map[x][y] == '#':
return
if temp == end:
print(step)
exit()
visit(temp)
add_state((step, x, y+1))
add_state((step, x+1, y))
add_state((step, x-1, y))
add_state((step, x, y-1))
add_state((0, 1, 1))
while True:
if empty():
print("No solution!")
break
BFS(*next_step())
|
import numpy as np
import torch
from torchvision.utils import make_grid
from base import BaseTrainer
from utils import inf_loop, MetricTracker
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(self, model, loss_fn_class, loss_fn_domain, metric_ftns, optimizer, config, device,
data_loader_source, valid_data_loader_source=None, data_loader_target=None,
valid_data_loader_target=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, metric_ftns, optimizer, config)
self.config = config
self.device = device
self.loss_fn_class = loss_fn_class
self.loss_fn_domain = loss_fn_domain
self.data_loader_source = data_loader_source
self.valid_data_loader_source = valid_data_loader_source
self.data_loader_target = data_loader_target
self.valid_data_loader_target = valid_data_loader_target
self.model.to(self.device)
if len_epoch is None:
# epoch-based training
self.len_epoch = min(len(self.data_loader_source),
len(self.data_loader_target))
else:
# FIXME: implement source/target style training or remove this feature
# iteration-based training
self.data_loader = inf_loop(data_loader)
self.len_epoch = len_epoch
# FIXME: handle validation round
self.valid_data_loader = valid_data_loader_source
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = 64
self.train_metrics = MetricTracker(
'loss', 'class_loss', 'domain_loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
self.valid_metrics = MetricTracker(
'loss', 'class_loss', 'domain_loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
# Setting model into train mode, required_grad
self.model.train()
# Reset all metric in metric dataframe
self.train_metrics.reset()
batch_idx = 0
for source, target in zip(self.data_loader_source, self.data_loader_target):
# source, target = source.to(self.device), target.to(self.device)
# Calculate training progress and GRL λ
p = float(batch_idx + (epoch-1) * self.len_epoch) / \
(self.epochs * self.len_epoch)
λ = 2. / (1. + np.exp(-10 * p)) - 1
# === Train on source domain
X_source, y_source = source
X_source, y_source = X_source.to(
self.device), y_source.to(self.device)
# generate source domain labels: 0
y_s_domain = torch.zeros(X_source.shape[0], dtype=torch.float32)
y_s_domain = y_s_domain.to(self.device)
class_pred_source, domain_pred_source = self.model(X_source, λ)
# source classification loss
loss_s_label = self.loss_fn_class(
class_pred_source.squeeze(), y_source)
# Compress from tensor size batch*1*1*1 => batch
domain_pred_source = torch.squeeze(domain_pred_source)
loss_s_domain = self.loss_fn_domain(
domain_pred_source, y_s_domain) # source domain loss (via GRL)
# === Train on target domain
X_target, _ = target
# generate source domain labels: 0
y_t_domain = torch.ones(X_target.shape[0], dtype=torch.float32)
X_target = X_target.to(self.device)
y_t_domain = y_t_domain.to(self.device)
_, domain_pred_target = self.model(X_target, λ)
domain_pred_target = torch.squeeze(domain_pred_target)
loss_t_domain = self.loss_fn_domain(
domain_pred_target, y_t_domain) # source domain loss (via GRL)
# === Optimizer ====
self.optimizer.zero_grad()
loss_s_label = torch.log(loss_s_label + 1e-9)
loss = loss_t_domain + loss_s_domain + loss_s_label
loss.backward()
self.optimizer.step()
self.writer.set_step((epoch-1) * self.len_epoch + batch_idx)
self.train_metrics.update('loss', loss.item())
self.train_metrics.update('class_loss', loss_s_label.item())
self.train_metrics.update('domain_loss', loss_s_domain.item())
for met in self.metric_ftns:
self.train_metrics.update(
met.__name__, met(class_pred_source, y_source))
if batch_idx % self.log_step == 0:
self.logger.debug(
f'Train Epoch: {epoch} {self._progress(batch_idx)} Loss: {loss.item():.4f} Source class loss: {loss_s_label.item():3f} Source domain loss {loss_s_domain.item():3f}')
self.writer.add_image('input', make_grid(
X_source.cpu(), nrow=4, normalize=True))
batch_idx += 1
if batch_idx == self.len_epoch:
break
# Average the accumulated result to log the result
log = self.train_metrics.result()
# update lambda value to metric tracker
log["lambda"] = λ
# Run validation after each epoch if validation dataloader is available.
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{'val_'+k: v for k, v in val_log.items()})
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
# Set model to evaluation mode, required_grad = False
# disables dropout and has batch norm use the entire population statistics
self.model.eval()
# Reset validation metrics in dataframe for a new validation round
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
# ignore labmda value
output, _ = self.model(data, 1)
loss = self.loss_fn_class(output.squeeze(), target)
self.writer.set_step(
(epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')
self.valid_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.valid_metrics.update(
met.__name__, met(output, target))
self.writer.add_image('input', make_grid(
data.cpu(), nrow=4, normalize=True))
# add histogram of model parameters to the tensorboard
for name, p in self.model.named_parameters():
self.writer.add_histogram(name, p, bins='auto')
return self.valid_metrics.result()
def _progress(self, batch_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader_source, 'n_samples'):
current = batch_idx * self.data_loader_source.batch_size
total = self.data_loader_source.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
|
import numpy as np
import torch as T
import time
import ray
import gym
from gym.wrappers import RescaleAction
from actor_learner import Actor, Learner
import random
import os
import copy
from torch.utils.tensorboard import SummaryWriter
@ray.remote
class ReplayBuffer:
def __init__(self, args):
self.args = args
self.states = np.zeros([args['buffer_size'], args['n_states']], dtype=np.float32)
self.next_states = np.zeros([args['buffer_size'], args['n_states']], dtype=np.float32)
self.actions = np.zeros([args['buffer_size'], args['n_actions']], dtype=np.float32)
self.rewards = np.zeros([args['buffer_size']], dtype=np.float32)
self.masks = np.zeros([args['buffer_size']], dtype=np.float32)
self.max_size = args['buffer_size']
self.ptr, self.cur_len, = 0, 0
def store(self, state, action, reward, next_state, mask):
self.states[self.ptr] = state
self.actions[self.ptr] = action
self.rewards[self.ptr] = reward
self.next_states[self.ptr] = next_state
self.masks[self.ptr] = mask
self.ptr = (self.ptr + 1) % self.max_size
self.cur_len = min(self.cur_len + 1, self.max_size)
def sample_batch(self):
index = np.random.choice(self.cur_len, self.args['batch_size'], replace = False)
return dict(
state = self.states[index],
action = self.actions[index],
reward = self.rewards[index],
next_state = self.next_states[index],
mask = self.masks[index],
)
def __len__(self):
return self.cur_len
def ready(self):
if self.cur_len >= self.args['batch_size']:
return True
@ray.remote
class ParameterServer(object):
def __init__(self, weights, args, weights_save_dir):
self.args = args
self.weights_save_dir = weights_save_dir
if args['restore']:
self.weights = T.load(self.weights_save_dir)
else:
self.weights = copy.deepcopy(weights)
def push(self, weights):
self.weights = copy.deepcopy(weights)
def pull(self):
return copy.deepcopy(self.weights)
def save_weights(self):
T.save(self.weights, self.weights_save_dir)
@ray.remote(num_gpus=1, max_calls=1)
def worker_train(ps, replay_buffer, args):
writer = SummaryWriter('./logs/' + args['algorithm'])
T.manual_seed(args['seed'])
T.cuda.manual_seed(args['seed'])
T.cuda.manual_seed_all(args['seed'])
np.random.seed(args['seed'])
random.seed(args['seed'])
agent = Learner(args)
weights = ray.get(ps.pull.remote())
agent.set_weights(weights)
cnt = 1
while True:
agent.learn(replay_buffer, writer)
if cnt % 300 == 0:
print('Weights push to PS !!!')
weights = agent.get_weights()
ps.push.remote(weights)
cnt += 1
@ray.remote
def worker_rollout(ps, replay_buffer, args, worker_id):
env = gym.make(args['env_name'])
env = RescaleAction(env, -1, 1)
T.manual_seed(args['seed'] + worker_id * 1000)
np.random.seed(args['seed'] + worker_id * 1000)
random.seed(args['seed'] + worker_id * 1000)
env.seed(args['seed'] + worker_id * 1000)
env.action_space.np_random.seed(args['seed'] + worker_id * 1000)
agent = Actor(args)
weights = ray.get(ps.pull.remote())
agent.set_weights(weights)
max_ep_len = env.spec.max_episode_steps
state = env.reset()
done = False
ep_len = 0
while True:
if args['render']:
env.render()
agent.total_step += 1
action = agent.select_exploration_action(state)
next_state, reward, done, _ = env.step(action)
ep_len += 1
real_done = False if ep_len >= max_ep_len else done
mask = 0.0 if real_done else args['gamma']
replay_buffer.store.remote(state, action, reward, next_state, mask)
state = next_state
if real_done or (ep_len >= max_ep_len):
if replay_buffer.ready.remote():
weights = ray.get(ps.pull.remote())
agent.set_weights(weights)
state = env.reset()
done = False
ep_len = 0
@ray.remote
def worker_test(ps, args):
writer = SummaryWriter('./logs/' + args['algorithm'])
env = gym.make(args['env_name'])
env = RescaleAction(env, -1, 1)
T.manual_seed(args['seed'] * 1000 + 99999)
np.random.seed(args['seed'] * 1000 + 99999)
random.seed(args['seed'] * 1000 + 99999)
env.seed(args['seed'] * 1000 + 99999)
env.action_space.np_random.seed(args['seed'] * 1000 + 99999)
best_score = env.reward_range[0]
agent = Actor(args)
weights = ray.get(ps.pull.remote())
agent.set_weights(weights)
scores = []
cnt = 0
while True:
cnt += 1
ave_ret = agent._evaluate_agent(env, agent, args)
scores.append(ave_ret)
writer.add_scalar('Reward/Test', ave_ret, cnt)
print("test_reward:", ave_ret)
if ave_ret > best_score:
ps.save_weights.remote()
print("****** weights saved! ******")
best_score = ave_ret
np.savetxt("./return.txt", scores, delimiter=",")
weights = ray.get(ps.pull.remote())
agent.set_weights(weights)
time.sleep(5)
if __name__ == '__main__':
from utils import Dict
from configparser import ConfigParser
from argparse import ArgumentParser
parser = ArgumentParser('sac parameters')
parser.add_argument("--algorithm", type=str, default = 'sac', help = 'algorithm to adjust (default : sac)')
args = parser.parse_args()
parser = ConfigParser()
parser.read('config.ini')
args = Dict(parser, args.algorithm)
env = gym.make(args['env_name'])
env = RescaleAction(env, -1, 1)
args['n_states'] = env.observation_space.shape[0]
args['n_actions'] = env.action_space.shape[0]
args['max_action'] = env.action_space.high[0]
args['low_action'] = env.action_space.low[0]
args['max_ep_len'] = env.spec.max_episode_steps
weights_save_dir = os.path.join(args['save_dir'] + '/' + args['algorithm'] +'/' + args['env_name'], 'sac_weights.pth')
# Storage location creation
if not os.path.exists(args['save_dir']):
os.mkdir(args['save_dir'])
model_path = args['save_dir'] + '/' + args['algorithm']
if not os.path.exists(model_path):
os.mkdir(model_path)
model_path = model_path + '/' + args['env_name']
if not os.path.exists(model_path):
os.mkdir(model_path)
ray.init()
if args['restore']:
ps = ParameterServer.remote([], args, weights_save_dir)
else:
net = Learner(args)
weights = net.get_weights()
ps = ParameterServer.remote(weights, args, weights_save_dir)
replay_buffer = ReplayBuffer.remote(args)
# Start some training tasks.
for i in range(args['num_workers']):
worker_rollout.remote(ps, replay_buffer, args, i)
time.sleep(20)
for _ in range(args['num_learners']):
worker_train.remote(ps, replay_buffer, args)
time.sleep(10)
task_test = worker_test.remote(ps, args)
ray.wait([task_test,])
|
from typing import Dict, Union
import torch
from catalyst_gan.registry import BATCH_TRANSFORMS, METRICS, DATASETS
# config parsing
def get_metric(params: Union[str, Dict[str, str]]):
if isinstance(params, str):
params = {"metric": params}
if not isinstance(params, dict):
raise NotImplementedError()
metric_fn = METRICS.get_from_params(**params)
return metric_fn
def get_batch_transform(params: Union[str, Dict[str, str]]):
if isinstance(params, str):
params = {"batch_transform": params}
if not isinstance(params, dict):
raise NotImplementedError()
transform_fn = BATCH_TRANSFORMS.get_from_params(**params)
return transform_fn
def get_dataset(**params):
if isinstance(params, str):
params = {"name": params}
if not isinstance(params, dict):
raise NotImplementedError()
dataset = DATASETS.get_from_params(**params)
return dataset
# torch models
def get_module(model, path):
if path == '':
return model
curr = model
for attrib_name in path.split('.'):
# prev = curr
curr = getattr(curr, attrib_name)
return curr
# preprocessing
def as_tensor(x):
if isinstance(x, torch.Tensor):
return x
if isinstance(x, (list, tuple)):
return torch.stack(x, dim=0)
raise NotImplementedError()
def change_num_image_channels(x: torch.Tensor,
channels_num_out: int,
channels_dim: int = 1):
assert x.ndim > channels_dim
channels_num_in = x.size(channels_dim)
if channels_num_out != channels_num_in:
if channels_num_in == 1:
x = torch.cat([x] * channels_num_out, dim=channels_dim)
elif channels_num_out == 1:
x = torch.mean(x, dim=channels_dim, keepdim=True)
else:
raise NotImplementedError()
return x
# interpolation
def slerp(x1, x2, t):
"""spherical interpolation"""
x1_norm = x1 / torch.norm(x1, dim=1, keepdim=True)
x2_norm = x2 / torch.norm(x2, dim=1, keepdim=True)
omega = torch.acos((x1_norm * x2_norm).sum(1))
sin_omega = torch.sin(omega)
return (
(torch.sin((1 - t) * omega) / sin_omega).unsqueeze(1) * x1
+ (torch.sin(t * omega) / sin_omega).unsqueeze(1) * x2
)
|
import pymongo
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError, BulkWriteError
from infrastructure import log
logger = log.get_logger("mongo")
class Mongo:
SYMBOL_KEY = "symbol"
DATE_KEY = "date"
TRADING_DAYS_PER_YEAR = 252
STOCK_LIST = "stocklist"
STOCKS_CURRENT_DATA = "stocks_current_data"
HISTORICAL_DATA_SUFIX = "_historical_data"
def collection_exists(self, collection_name):
return collection_name in self.db.collection_names()
def create_regular_collection_if_not_exists(self, collection_name, index_key):
if not self.collection_exists(collection_name):
collection = self.db[collection_name]
collection.create_index([(index_key, pymongo.ASCENDING)], unique=True)
def create_historical_collection_if_not_exists(self, collection_name):
if not self.collection_exists(collection_name):
collection = self.db[collection_name]
collection.create_index([(self.SYMBOL_KEY, pymongo.ASCENDING), (self.DATE_KEY, pymongo.DESCENDING)], unique=True)
def __init__(self, dbHost, dbPort, dbName):
client = MongoClient(dbHost, dbPort)
self.db = client[dbName]
self.create_regular_collection_if_not_exists(self.STOCK_LIST, self.SYMBOL_KEY)
self.create_regular_collection_if_not_exists(self.STOCKS_CURRENT_DATA, self.SYMBOL_KEY)
# Do not use map and filter for side effects: http://stackoverflow.com/a/18433519/2420718
def save_stock_list(self, stocks):
if len(stocks) > 0:
try:
stocklist_collection = self.db[self.STOCK_LIST]
stocklist_collection.insert_many(stocks, ordered=False)
except (DuplicateKeyError, BulkWriteError) as err:
logger.error("save_stock_list: %i %s", len(stocks), err)
def read_stocks_from_stock_list(self):
stocks = []
stocklist_collection = self.db[self.STOCK_LIST]
cursor = stocklist_collection.find()
for stock in cursor:
stocks.append(stock)
return stocks
def stock_exists(self, quote):
stock_current_data_collection = self.db[self.STOCK_LIST]
cursor = stock_current_data_collection.find({ self.SYMBOL_KEY: quote }).limit(1)
return (cursor.count() > 0)
def get_stock_by_quote(self, quote):
stocklist_collection = self.db[self.STOCK_LIST]
stock = stocklist_collection.find_one({ self.SYMBOL_KEY: quote })
return stock
def save_stock_historical_data(self, quote, stock_historical_data_array):
if len(stock_historical_data_array) > 0:
try:
collection_name = quote + self.HISTORICAL_DATA_SUFIX
self.create_historical_collection_if_not_exists(collection_name)
stock_historical_data_collection = self.db[collection_name]
stock_historical_data_collection.insert_many(stock_historical_data_array, ordered=False)
except (DuplicateKeyError, BulkWriteError) as err:
logger.error("save_stock_historical_data: %s %i %s", quote, len(stock_historical_data_array), err)
def get_stock_historical_data(self, quote):
stock_historical_data_collection = self.db[quote + self.HISTORICAL_DATA_SUFIX]
cursor = stock_historical_data_collection.find({ self.SYMBOL_KEY: quote }).limit(self.TRADING_DAYS_PER_YEAR)
return list(cursor)
def upsert_stock_current_data(self, quote, stock_current_data):
if stock_current_data is not None:
try:
stock_current_data_collection = self.db[self.STOCKS_CURRENT_DATA]
query = { self.SYMBOL_KEY: quote }
stock_current_data_collection.replace_one(query, stock_current_data, upsert=True)
except DuplicateKeyError as err:
logger.error("upsert_stock_current_data: %s %s", quote, err)
def get_stock_current_data(self, quote):
stockCurrentDataCollection = self.db[self.STOCKS_CURRENT_DATA]
stock = stockCurrentDataCollection.find_one({ self.SYMBOL_KEY: quote })
return stock
|
#! /usr/bin/python
import os
import helpers
import copyright_notes
filename_help = "Output python name(s)"
argument_parser_help = "User argument parser."
postgres_help = "Include postgres in argument parser."
template = """#! /usr/bin/python
\"\"\" $FILENAME \"\"\"
__date__ = "$DATE"
__author__ = "$AUTHOR"
__email__ = "$AUTHOREMAIL"
import sys
$INCLUDES
$ARGPARSE
$INITLOGGING
def main( argv ):
$MAIN
if __name__ == "__main__" :
main( sys.argv )
"""
argparsetemplate = """def parseCmd( argv ):
parser = argparse.ArgumentParser( description = "Application description" )
parser.add_argument( "-l" , "--logfile" , help="Logile" , default="log.log" )
args = parser.parse_args( argv[1:] )
return args"""
argparsetemplatedb = """def parseCmd( argv ):
parser = argparse.ArgumentParser( description = "Application description" )
parser.add_argument( "-l" , "--logfile" , help="Logile" , default="log.log" )
parser.add_argument( "--dbname" , help="database name" , default="db" )
parser.add_argument( "--dbhost" , help="database host" , default="localhost" )
parser.add_argument( "--dbport" , help="database port" , default=5432 )
parser.add_argument( "--dbuser" , help="database user" , default="user" )
parser.add_argument( "--dbpw" , help="database user" , default="pw" )
args = parser.parse_args( argv[1:] )
return args"""
initlogging = """def initLogging( args ):
formatString = '[%(levelname)s][%(asctime)s] : %(message)s'
# formatString = '[%(levelname)s][%(name)s] : %(message)s'
logLevel = logging.INFO
logging.basicConfig( format=formatString , level=logLevel , datefmt='%Y-%m-%d %I:%M:%S')
ch = logging.FileHandler( args.logfile , "w" )
ch.setLevel( logLevel )
ch.setFormatter( logging.Formatter( formatString , datefmt='%Y-%m-%d %I:%M:%S') )
logging.getLogger().addHandler( ch )"""
maintemplateargsdb = """ args = parseCmd( argv )
initLogging( args )
logging.info( "Connect to db " + args.dbname )
db = psycopg2.connect( host=args.dbhost , port=args.dbport , database=args.dbname , user=args.dbuser , password=args.dbpw )"""
maintemplateargs = """ args = parseCmd( argv )
initLogging( args )"""
maintemplateempty = """ pass"""
class python_application_template():
def __init__( self , name , description , path = [] ):
self.name = name
self.description = description
self.path = path
def register_in_arg_parser( self , subparsers ):
parser = helpers.create_subparser( self , subparsers )
parser.add_argument( "filename" , nargs = "+" , help = filename_help )
parser.add_argument( "-a" , "--args" , action="store_true" , help = argument_parser_help )
parser.add_argument( "-p" , "--postgres" , action="store_true" , help = postgres_help )
def do_work( self , args , replacements ):
print "Creating " + self.name + " template(s) ..."
path = helpers.find_path( self.path )
if args.args and args.postgres :
replacements[ "INCLUDES" ] = "import argparse\nimport logging\nimport psycopg2"
replacements[ "ARGPARSE" ] = argparsetemplatedb
replacements[ "INITLOGGING" ] = initlogging
replacements[ "MAIN" ] = maintemplateargsdb
if args.args and not args.postgres :
replacements[ "INCLUDES" ] = "import argparse\nimport logging"
replacements[ "ARGPARSE" ] = argparsetemplate
replacements[ "INITLOGGING" ] = initlogging
replacements[ "MAIN" ] = maintemplateargs
if not args.args :
replacements[ "INCLUDES" ] = ""
replacements[ "ARGPARSE" ] = ""
replacements[ "INITLOGGING" ] = ""
replacements[ "MAIN" ] = " pass"
if hasattr( args , "filename" ) :
for filename in args.filename:
p = path
p.append( filename )
f = helpers.full_join( p )
helpers.add_filename_replacements( replacements , filename )
replacements[ "FILENAME" ] = f
helpers.default_processing( filename , replacements , template )
|
from __future__ import print_function
import mysql.connector
from mysql.connector import errorcode
import csv
config = {
'user': '',
'password': '',
'host': '',
'database': '',
'raise_on_warnings': True,
}
try:
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
cursor.execute("show tables from one15yachtchartering;")
tablenames = cursor.fetchall()
print(tablenames)
with open('tables_dict.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
fieldnames = ['Field', 'Type', 'Null', 'Key', 'Default', 'Extra']
for tbname in tablenames:
query = "describe %s;" % tbname
print(query)
cursor.execute(query)
tablerows = cursor.fetchall()
print(tablerows)
writer.writerow([' '])
tablename = 'Table %s' % tbname
writer.writerow([tablename.upper()])
writer.writerow(fieldnames)
for row in tablerows:
writer.writerow(row)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
|
from machine import Pin
p_out = Pin(("PB2", 16*1+2), Pin.OUT_PP)
p_out.value(1) # set io high
p_out.value(0) # set io low
p_out.init(Pin.IN, Pin.PULL_UP)
print(p_out.value()) # get value, 0 or 1
print(p_out.name())
|
import tensorflow as tf
import argparse
import tfextensions
parser = argparse.ArgumentParser(description="Loads a given tf model and plots it.")
parser.add_argument('model')
args = parser.parse_args()
model = tf.keras.models.load_model(args.model, tfextensions.functionMap)
tf.keras.utils.plot_model(model, args.model+".pdf", show_shapes=True, show_layer_names=False)
|
#!/usr/bin/env python3
import sys
import time
import threading
import itertools
print (50 * '#')
print (50 * '-')
print (" WELCOME TO MAGIC RAM INSTALLER")
print (50 * '-')
print (50 * '#')
time.sleep(1)
print ("MIT License, v. 1.4")
time.sleep(1)
print()
def main():
spinner = itertools.cycle(['-', '/', '|', '\\'])
t_end = time.time() + 15
ram = str(input("Enter the amount of RAM you want to install: "))
print()
print("Gathering System Information...")
print()
time.sleep(5.0)
print("Wow...your photos are...interesting...")
print()
time.sleep(3.0)
print("Installing RAM Now...")
print()
while time.time() < t_end:
sys.stdout.write(next(spinner)) # write the next character
sys.stdout.flush() # flush stdout buffer (actual character display)
time.sleep(1.0)
sys.stdout.write('\b')
else:
print("Installation Complete!")
print()
time.sleep(2.0)
print("You may want to hide those photos in the future")
time.sleep(2.0)
print()
print("PLEASE REBOOT TO MAKE CHANGES PERMANENT")
time.sleep(2.0)
print()
main()
|
"""
最初始的版本,留作纪念
"""
import json
from dataclasses import dataclass, is_dataclass
import typing
def ex_dataclass(*args, **kwargs):
"""
desc:
dataclass增强版,支持原生功能及以下扩展能力;
1、支持class类型正反递归解析;
2、支持列表正反解析;
3、支持列表简易嵌套正反解析,如:{a: [[1, 2, 3]]}
4、支持typing.Type类型注解的多态行为,精确匹配字段存在最多的class;
5、允许反向解析存在冗余字段;
function:
json_loads(value: str) // 直接载入json数据
"""
def wrapper(check_class):
# passing class to investigate
check_class = dataclass(check_class, **kwargs)
if not hasattr(check_class, '__annotations__'):
raise Exception(f"type obejct {check_class.__name__} missing required attribute.")
o_init = check_class.__init__
def __get_typing_type_subclasses(type_: typing.Type) -> typing.List[typing.Type]:
subclasses = []
if hasattr(type_, '_name'):
if type_._name == "Type":
subclasses = type_.__dict__['__args__'][0].__subclasses__()
return subclasses
def __get_class_from_typing_type(type_: object) -> typing.ClassVar:
return type_.__dict__['__args__'][0]
def __get_cls_attr(cls: typing.Callable) -> typing.Dict:
return cls.__dict__['__annotations__']
def __get_high_compatibility_cls(subclass: typing.List[typing.Callable], value: typing.Dict) -> typing.Callable:
ret_cls: typing.Callable = None
max_cnt = 0
for cls in subclass:
tmp_cnt = 0
attr_dict = __get_cls_attr(cls)
for k, v in value.items():
v_type = attr_dict.get(k, None)
if v_type:
if isinstance(v, v_type):
tmp_cnt += 1
if tmp_cnt > max_cnt:
max_cnt = tmp_cnt
ret_cls = cls
return ret_cls
def __get_all_cls_typing_type(typing_type_ft: typing.ClassVar) -> typing.List[typing.Type]:
if typing_type_ft:
classes = __get_typing_type_subclasses(typing_type_ft)
if classes:
classes.append(__get_class_from_typing_type(typing_type_ft))
return classes
return []
def __handle_typing_list(field_type: typing.Callable, value: typing.List) -> typing.List:
tmp_list = []
if field_type.__dict__.get('_name', None) == 'List':
ft_tuple = field_type.__dict__.get('__args__', ())
if ft_tuple:
v = value
if value:
v = value[0] if isinstance(value[0], list) else value
return __handle_typing_list(ft_tuple[0], v)
return value
ft_cls = field_type
# print(f"sub_type: {s_type}")
all_classes = __get_all_cls_typing_type(ft_cls)
if all_classes:
for v in value:
# print(f"v.__class__: {v.__class__}")
if ft_cls == v.__class__:
tmp_list.append(v)
else:
ft_cls = __get_high_compatibility_cls(all_classes, v)
if ft_cls:
tmp_list.append(ft_cls(**v))
elif is_dataclass(ft_cls):
for v in value:
if ft_cls == v.__class__:
tmp_list.append(v)
else:
tmp_list.append(ft_cls(**v))
else:
tmp_list = value
return tmp_list
def __calculate_recursive_layer(value: typing.List, deal_with_value: typing.List) -> typing.List:
if isinstance(value, list):
if value:
if not isinstance(value[0], list):
return deal_with_value
return [__calculate_recursive_layer(value[0], deal_with_value)]
return []
def json_loads(cls, json_data: str) -> typing.Callable:
return cls(**json.loads(json_data))
def __init__(self, *args, **kwargs):
tmp_kwargs = {}
tmp_kwargs.update(kwargs)
for name, value in kwargs.items():
# print(name)
# getting field type
field_type = check_class.__annotations__.get(name, None)
if field_type is None:
for cls_ in check_class.__mro__:
if hasattr(cls_, "__annotations__"):
field_type = cls_.__annotations__.get(name, None)
if field_type:
break
else:
tmp_kwargs.pop(name)
# 支持类型 typing.Type
all_maybe_cls = __get_all_cls_typing_type(field_type)
if all_maybe_cls:
field_type = __get_high_compatibility_cls(all_maybe_cls, value)
# 支持类型 typing.List & 嵌套typing.List[typing.List[str]]
if field_type is not None and isinstance(value, list):
tmp_kwargs[name] = __calculate_recursive_layer(value, __handle_typing_list(field_type, value))
if is_dataclass(field_type) and isinstance(value, dict):
obj = field_type(**value)
tmp_kwargs[name] = obj
# print(f"tmp_kwargs: {tmp_kwargs}")
o_init(self, *args, **tmp_kwargs)
check_class.__init__ = __init__
# 加入json_loads
check_class.json_loads = classmethod(json_loads)
return check_class
return wrapper(args[0]) if args else wrapper
|
#Instructions
#In this exercise, we’ll refactor a DAG with a single overloaded task into a DAG with several tasks with well-defined boundaries
#1 - Read through the DAG and identify points in the DAG that could be split apart
#2 - Split the DAG into multiple PythonOperators
#3 - Run the DAG
import datetime
import logging
from airflow import DAG
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.python_operator import PythonOperator
def log_oldest():
redshift_hook = PostgresHook("redshift")
records = redshift_hook.get_records("""
SELECT birthyear FROM older_riders ORDER BY birthyear ASC LIMIT 1
""")
if len(records) > 0 and len(records[0]) > 0:
logging.info(f"Oldest rider was born in {records[0][0]}")
def log_youngest():
redshift_hook = PostgresHook("redshift")
records = redshift_hook.get_records("""
SELECT birthyear FROM younger_riders ORDER BY birthyear DESC LIMIT 1
""")
if len(records) > 0 and len(records[0]) > 0:
logging.info(f"Youngest rider was born in {records[0][0]}")
dag = DAG(
"lesson3.exercise2",
start_date=datetime.datetime.utcnow()
)
create_oldest_task = PostgresOperator(
task_id="create_oldest",
dag=dag,
sql="""
BEGIN;
DROP TABLE IF EXISTS older_riders;
CREATE TABLE older_riders AS (
SELECT * FROM trips WHERE birthyear > 0 AND birthyear <= 1945
);
COMMIT;
""",
postgres_conn_id="redshift"
)
log_oldest_task = PythonOperator(
task_id="log_oldest",
dag=dag,
python_callable=log_oldest
)
create_youngest_task = PostgresOperator(
task_id="create_youngest",
dag=dag,
sql="""
BEGIN;
DROP TABLE IF EXISTS younger_riders;
CREATE TABLE younger_riders AS (
SELECT * FROM trips WHERE birthyear > 2000
);
COMMIT;
""",
postgres_conn_id="redshift"
)
log_youngest_task = PythonOperator(
task_id="log_youngest",
dag=dag,
python_callable=log_youngest
)
create_usage_task = PostgresOperator(
task_id="create_usage",
dag=dag,
sql="""
BEGIN;
DROP TABLE IF EXISTS lifetime_rides;
CREATE TABLE lifetime_rides AS (
SELECT bikeid, COUNT(bikeid)
FROM trips
GROUP BY bikeid
);
COMMIT;
""",
postgres_conn_id="redshift"
)
create_station_count_task = PostgresOperator(
task_id="create_station_count",
dag=dag,
sql="""
BEGIN;
DROP TABLE IF EXISTS city_station_counts;
CREATE TABLE city_station_counts AS(
SELECT city, COUNT(city)
FROM stations
GROUP BY city
);
COMMIT;
""",
postgres_conn_id="redshift"
)
create_oldest_task >> log_oldest_task
create_youngest_task >> log_youngest_task
create_station_count_task
create_usage_task
|
"""
Graph class
"""
import os
import numpy as np
import networkx as nx
from scipy.sparse import issparse, csr_matrix
from ..exrpc import rpclib
from ..exrpc.server import FrovedisServer
from ..matrix.crs import FrovedisCRSMatrix
# re-implementation of networkx.to_scipy_sparse_matrix()
# TODO: result confirmation
def to_scipy_sparse_matrix(nx_graph, format='csr'):
'''
# rows are assumed to be in sorted order 1, 2, ..., num_vertices
# hence extraction is not required
stime = time.time()
rows = nx_graph.adj.keys()
print("rows: {0}".format(time.time() - stime) + " [sec]")
'''
#stime = time.time()
edges = [list(i.keys()) for i in nx_graph.adj.values()]
#print("edges: {0}".format(time.time() - stime) + " [sec]")
#stime = time.time()
num_edges = sum([len(i) for i in edges])
num_vertices = len(nx_graph.adj)
data = np.ones(num_edges) # assumes, graph weight = 1.0
indices = np.zeros(num_edges)
indptr = np.zeros(num_vertices + 1)
#print("allocation: {0}".format(time.time() - stime) + " [sec]")
#stime = time.time()
ctr = 0
for i in range(len(edges)): #consider-using-enumerate
vec = edges[i]
for j in range(len(vec)): #consider-using-enumerate
indices[ctr + j] = vec[j] - 1
ctr = ctr + len(vec)
indptr[i + 1] = ctr
#print("flatten indices: {0}".format(time.time() - stime) + " [sec]")
'''
# not required, assuming frovedis graph edges have weight = 1.0 always
stime = time.time()
d = [i.values() for i in nx_graph.adj.values()]
print("data: {0}".format(time.time() - stime) + " [sec]")
stime = time.time()
ctr = 0
for i in range(len(d)):
vec = d[i]
for j in range(len(vec)):
try:
data[ctr + j] = vec[j]['weight']
except KeyError:
data[ctr + j] = 1.0
ctr = ctr + len(vec)
print("flatten data: {0}".format(time.time() - stime) + " [sec]")
'''
return csr_matrix((data, indices, indptr), dtype=np.float64, \
shape=(num_vertices, num_vertices))
class Graph(object):
"""
Graph class for frovedis
"""
def __init__(self, nx_graph=None): #TODO: Update name of nx_graph
"""
DESC: Graph constructor
PARAM: nx_graph
"""
self.fdata = None
self.num_edges = None
self.num_vertices = None
if issparse(nx_graph): # any sparse matrix
mat = nx_graph.tocsr()
self.load_csr(mat)
elif isinstance(nx_graph, nx.classes.graph.Graph):
self.load(nx_graph)
elif nx_graph is not None:
raise ValueError("Graph: Supported types are networkx graph or scipy sparse matrices!")
def load(self, nx_graph):
"""
DESC: load a networkx graph to create a frovedis graph
PARAM: nx_graph
RETURN: self
"""
self.release()
self.num_edges = nx_graph.number_of_edges()
self.num_vertices = nx_graph.number_of_nodes()
#import time
#t1 = time.time()
#TODO: use reimplemented version after result correctness
order = sorted(list(nx_graph.nodes()))
nx_smat = nx.to_scipy_sparse_matrix(nx_graph, format='csr', nodelist=order)
#print("Graph.py -> nx.to_scipy_sparse_matrix: ", time.time() - t1)
# by default, edge data is loaded as float64
# and node data is loaded as int64
#TODO: support loading data as same dtype/itype in input nx-graph
smat = FrovedisCRSMatrix(mat=nx_smat, dtype=np.float64, itype=np.int64)
(host, port) = FrovedisServer.getServerInstance()
self.fdata = rpclib.set_graph_data(host, port, smat.get())
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
return self
def load_csr(self, smat):
"""
DESC: loads Frovedis graph from a scipy csr_matrix
PARAM: any sparse matrix
RETURN: self
"""
self.release()
self.num_edges = len(smat.data)
self.num_vertices = smat.shape[0]
# by default, edge data is loaded as float64
# and node data is loaded as int64
#TODO: support loading data as same dtype/itype in input matrix
fsmat = FrovedisCRSMatrix(mat=smat, dtype=np.float64, itype=np.int64)
(host, port) = FrovedisServer.getServerInstance()
self.fdata = rpclib.set_graph_data(host, port, fsmat.get())
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
return self
def debug_print(self):
"""
DESC: Print number of edges and vertices in graph
PARAM: None
RETURN: None
"""
if self.fdata != None: #Consider using 'is not'
print("Num of edges: ", self.num_edges)
print("Num of vertices: ", self.num_vertices)
(host, port) = FrovedisServer.getServerInstance()
rpclib.show_graph_py(host, port, self.get())
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
def release(self):
"""
DESC: Release the graph resources
PARAM: None
RETURN: None
"""
if self.fdata != None: #Consider using 'is not'
(host, port) = FrovedisServer.getServerInstance()
rpclib.release_graph_py(host, port, self.get())
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
self.fdata = None
self.num_edges = None
self.num_vertices = None
def clear(self):
"""
DESC: Wrapper for release
PARAM: None
RETURN: None
"""
self.release()
def number_of_edges(self):
"""
DESC: Returns number of edges
PARAM: None
RETURN: Long
"""
return self.num_edges
def number_of_nodes(self):
"""
DESC: Returns number of nodes
PARAM: None
RETURN: Long
"""
return self.num_vertices
def save(self, fname):
"""
DESC: Saves graph to persistent storage.
PARAM: string-> file path
RETURN: None
"""
if self.fdata != None: #Consider using 'is not'
if os.path.exists(fname):
raise ValueError(\
"another graph object with %s name already exists!" % fname)
(host, port) = FrovedisServer.getServerInstance()
rpclib.save_graph_py(host, port, self.get(), fname.encode('ascii'))
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
def load_text(self, fname):
"""
DESC: Loads graph from persistent storage.
PARAM: string-> file path
RETURN: None
"""
if not os.path.exists(fname):
raise ValueError(\
"the graph object with name %s does not exist!" % fname)
self.release()
(host, port) = FrovedisServer.getServerInstance()
dummy_graph = \
rpclib.load_graph_from_text_file(host, port, fname.encode('ascii'))
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
self.fdata = dummy_graph['dptr']
self.num_edges = dummy_graph['nEdges']
self.num_vertices = dummy_graph['nNodes']
return self
def to_networkx_graph(self):
"""
DESC: Convert from frovedis graph to networkx graph.
PARAM: None
Return: Networkx graph
"""
(host, port) = FrovedisServer.getServerInstance()
dmat = \
rpclib.get_graph_data(host, port, self.get())
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
# TODO: support other types
fmat = FrovedisCRSMatrix(dmat, dtype=np.float64, itype=np.int64)
smat = fmat.to_scipy_matrix()
return nx.from_scipy_sparse_matrix(smat)
def copy(self):
"""
DESC: Create a copy of graph data(used in pagerank)
PARAM: None
RETURN: Frovedis graph
"""
(host, port) = FrovedisServer.getServerInstance()
gptr = \
rpclib.copy_graph_py(host, port, self.get())
excpt = rpclib.check_server_exception()
if excpt["status"]:
raise RuntimeError(excpt["info"])
c_graph = Graph()
c_graph.fdata = gptr
c_graph.num_edges = self.num_edges
c_graph.num_vertices = self.num_vertices
return c_graph
def get(self):
"""
DESC: Fetches fdata
PARAM: None
Return: Long
"""
return self.fdata
def __del__(self):
if FrovedisServer.isUP():
self.release()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.