content stringlengths 5 1.05M |
|---|
from .helper import dict_char_to_huoxing_lang, dict_char_to_phonetic, dict_char_to_shape_closed, list_junk_charaters
__all__ = [
'dict_char_to_huoxing_lang', 'dict_char_to_phonetic', 'dict_char_to_shape_closed', 'list_junk_charaters'
]
|
import os
import sys
import pandas as pd
import re
import numpy as np
import pickle
import codecs
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as D
from torch.autograd import Variable
import torch.nn.functional as F
from models.BiLSTM_ATT import BiLSTM_ATT
from models.PCNN_ATT import PCNN_ATT
from sklearn.model_selection import train_test_split
import lightgbm as lgb
char2id = {'unknown':0}
relation2id = {"unknown":0,
"Test_Disease":1,
"Symptom_Disease":2,
"Treatment_Disease":3,
"Drug_Disease":4,
"Anatomy_Disease":5,
"Frequency_Drug":6,
"Duration_Drug":7,
"Amount_Drug":8,
"Method_Drug":9,
"SideEff_Drug":10}
id2relation = {0: "unknown",
1: "Test_Disease",
2: "Symptom_Disease",
3: "Treatment_Disease",
4: "Drug_Disease",
5: "Anatomy_Disease",
6: "Frequency_Drug",
7: "Duration_Drug",
8: "Amount_Drug",
9: "Method_Drug",
10: "SideEff_Drug"}
relations = {frozenset(["Test","Disease"]):"Test_Disease",
frozenset(["Symptom","Disease"]):"Symptom_Disease",
frozenset(["Treatment","Disease"]):"Treatment_Disease",
frozenset(["Drug","Disease"]):"Drug_Disease",
frozenset(["Anatomy","Disease"]):"Anatomy_Disease",
frozenset(["Frequency","Drug"]):"Frequency_Drug",
frozenset(["Duration","Drug"]):"Duration_Drug",
frozenset(["Amount","Drug"]):"Amount_Drug",
frozenset(["Method","Drug"]):"Method_Drug",
frozenset(["SideEff","Drug"]):"SideEff_Drug"}
def f1score(y_test, pred):
corrct = [y_test[i] for i in range(len(y_test)) if y_test[i]==pred[i]]
tp = len(list(filter(lambda a: a != 0, corrct)))
truePos = len(list(filter(lambda a: a != 0, y_test)))
predPos = len(list(filter(lambda a: a != 0, pred)))
try:
precision = tp/predPos
recall = tp/truePos
f1 = 2*precision*recall/(precision+recall)
print("Precision:",precision,"Recall:",recall)
print("F1 score: ",f1)
except ZeroDivisionError:
pass
def feat_mega(file):
# This funtion is similar but different with feat_nn
# Character Embedding and Position Embedding
# Read Text Files
with open(origin_data_dir+file+".txt", "rb") as f:
text = f.read().decode('utf-8').replace(" 。", " "*3).replace(" "*2, " 。")
# Read annotations and build dataframe
total_df = pd.read_csv(origin_data_dir+file+".ann", header=None, sep='\t')
ents_df = total_df[total_df[0].str.contains('T')]
ents_df = ents_df.rename(index=str, columns={0: "id", 1:"infs", 2:"name"})
ents_df['type'] = pd.Series([e[0] for e in ents_df["infs"].str.split()]).values
ents_df['start'] = pd.Series([int(e[1]) for e in ents_df["infs"].str.split()]).values
ents_df['end'] = pd.Series([int(e[-1]) for e in ents_df["infs"].str.split()]).values
ents_df = ents_df.drop(columns="infs").sort_values(by=['start']).reset_index(drop=True)
# Build relation annotations for query
rel_df = total_df[total_df[0].str.contains('R')]
rel_df = rel_df.drop(columns=2).reset_index(drop=True)
rel_df['pairs'] = pd.Series([set((r.split()[1].split(':')[1], r.split()[2].split(':')[1])) for r in rel_df[1]])
rel_df['relation'] = pd.Series([r.split()[0].replace('-','_') for r in rel_df[1]])
for i, row in ents_df.iterrows():
for ii, rrow in ents_df[min(i+1, ents_df.shape[0]): min(find_pair(i, ents_df), ents_df.shape[0])].iterrows():
if frozenset([row.type, rrow.type]) not in relations:
continue
# Extract entities
ent_1 = re.sub('\s', '', row['name'])
ent_2 = re.sub('\s', '', rrow['name'])
# Find sentence containing entities
ent_start = row.start
ent_end = rrow.end
sent_start = max(ent_start-50, 0)
sent_end = min(ent_end+50, len(text))
for s in [';','。','!','?']:
if text[:ent_start].rfind(s)+1 != -1: sent_start = max(text[:ent_start].rfind(s)+1, sent_start)
if text.find(s, ent_end) != -1: sent_end = min(text.find(s, ent_end),sent_end)
sent_raw = text[sent_start: sent_end]
if len(sent_raw)>200: continue
sent = re.sub('\s', '', sent_raw)
# Write location of entities in each sentence sample
rs_space = len(text[sent_start: row.start]) - len(re.sub('\s', '', text[sent_start: row.start]))
re_space = len(text[sent_start: row.end]) - len(re.sub('\s', '', text[sent_start: row.end]))
rrs_space = len(text[sent_start: rrow.start]) - len(re.sub('\s', '', text[sent_start: rrow.start]))
rre_space = len(text[sent_start: rrow.end]) - len(re.sub('\s', '', text[sent_start: rrow.end]))
pos1 = (row.start-sent_start-rs_space,row.end-sent_start-re_space)
pos2 = (rrow.start-sent_start-rrs_space,rrow.end-sent_start-rre_space)
# Make some features
fakeResTT.append(relation2id[relations[frozenset([row.type, rrow.type])]])
nStripTT.append(len(sent_raw) - len(sent))
senLenTT.append(len(sent))
nSepTT.append(sent.count('。')+sent.count('!')+sent.count('?'))
if pos1[0] > pos2[0]:
disBwTT.append(pos1[0]-pos2[1])
else:
disBwTT.append(pos2[0]-pos1[1])
# Add sentence matrix
sentIdx = [char2id[c] if c in char2id else 0 for c in sent]
sentIdx.extend([0] * (maxLen - len(sentIdx)))
sent = np.array(sentIdx)
# Add position matrix
pos1List = [i-pos1[0] if i<pos1[0] else 0 if i in range(pos1[0],pos1[1]) else i-pos1[1] for i in range(len(sentIdx))]
pos1List = [i+maxDis if abs(i)<=maxDis else 2*maxDis if i>maxDis else 0 for i in pos1List]
pos2List = [i-pos2[0] if i<pos2[0] else 0 if i in range(pos2[0],pos2[1]) else i-pos2[1] for i in range(len(sentIdx))]
pos2List = [i+maxDis if abs(i)<=maxDis else 2*maxDis if i>maxDis else 0 for i in pos2List]
pos1 = np.array(pos1List)
pos2 = np.array(pos2List)
# Load features for pytorch
sent = Variable(torch.cat([torch.cuda.LongTensor(sent).unsqueeze(0)]*32))
pos1 = Variable(torch.cat([torch.cuda.LongTensor(pos1).unsqueeze(0)]*32))
pos2 = Variable(torch.cat([torch.cuda.LongTensor(pos2).unsqueeze(0)]*32))
# Get predictions from BiLSTM-Attention and PCNN-Attention model
y_lstm = lstm_model(sent,pos1,pos2)
y_pcnn = pcnn_model(sent,pos1,pos2)
prob_lstm.append(float(y_lstm[:,0].mean()))
prob_pcnn.append(float(y_pcnn[:,0].mean()))
# Add relationId if mode=="train"
query = set({row.id, rrow.id})
q_rel = rel_df.relation[rel_df['pairs']==query].to_string(index=False)
if q_rel != 'Series([], )':
relation = rel_df.relation[rel_df['pairs']==query].to_string(index=False)
else:
relation = 'unknown'
try:
relTT.append(relation2id[relation])
except KeyError:
relTT.append(relation2id[relation.split()[0]])
#print(nStripTT, senLenTT, disBwTT, nSepTT, prob_lstm, prob_pcnn, fakeResTT, rel)
def find_pair(i, ents_df):
# Find the most distant entity that is within 90 character
j = i + 1
d = 0
while j<ents_df.shape[0]:
d = ents_df.start.loc[j] - ents_df.start.loc[i]
if d > 100:
break
else:
j += 1
return j
origin_data_dir = 'datasets/train/'
# Load files
with open ('itmd_files/fs_lgb.pkl', 'rb') as fp:
lgb_files = pickle.load(fp)
# Load dictionary
with open ('itmd_files/dict.pkl', 'rb') as fp:
char2id = pickle.load(fp)
# Load trained BiLSTM-Attention and PCNN-Attention model
pcnn_model = torch.load('models/pcnn_att.pt')
lstm_model = torch.load('models/bilstm_att.pt')
# Generate features for Mega-model
nStripTT, senLenTT, disBwTT, nSepTT = [],[],[],[]
fakeResTT, relTT, prob_lstm, prob_pcnn = [],[],[],[]
maxLen = 200
maxDis = 60
for file in lgb_files[:11]:
feat_mega(file)
# Prepare data
y = np.array(relTT)
n_values = np.max(fakeResTT) + 1
fakeResTT = np.eye(n_values)[fakeResTT]
senLenTT = np.array(senLenTT)
nStripTT = np.array(nStripTT)
disBwTT = np.array(disBwTT)
nSepTT = np.array(nSepTT)
X = np.stack((senLenTT, nStripTT, disBwTT, nSepTT)).T
X = np.concatenate((X, fakeResTT), axis=1)
# Data split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.06, random_state=41)
X_val, X_test, y_val, y_test = train_test_split(X_val, y_val, test_size=0.3, random_state=41)
# Train LGB as mege-model
train_data = lgb.Dataset(X, label=y)
val_data = lgb.Dataset(X_val, label=y_val)
parameters = {
'objective': 'softmax',
'metric': 'softmax',
'is_unbalance': 'true',
'boosting': 'gbdt',
'num_class':11,
'num_leaves': 8,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 20,
'learning_rate': 0.05
}
lgb_model = lgb.train(parameters,
train_data,
valid_sets=val_data,
num_boost_round=50000,
early_stopping_rounds=300,
verbose_eval=200)
lgb_smx = lgb_model.predict(X_test)
lgb_pred = np.argmax(lgb_smx, axis=1)
f1score(y_test, lgb_pred)
# save model to file
lgb_model.save_model('models/lgb_model.txt')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------
# @ File : f_score.py
# @ Description:
# @ Author : Alex Chung
# @ Contact : yonganzhong@outlook.com
# @ License : Copyright (c) 2017-2018
# @ Time : 2020/12/4 上午10:18
# @ Software : PyCharm
#-------------------------------------------------------
import numpy as np
import warnings
from sklearn.metrics import precision_score, recall_score, f1_score, multilabel_confusion_matrix
def get_confusion_matrix(y_label, y_pred):
y_label = np.asarray(y_label, dtype=np.int32)
y_pred = np.array(y_pred, dtype=np.int32)
# get unique label
labels = np.asarray(sorted(set(y_label)))
# num classes
num_labels = labels.size
# label_to_ind = {y: x for x, y in enumerate(labels)}
# # # convert yt, yp into index
# y_pred = np.array([label_to_ind.get(x, num_labels + 1) for x in y_pred])
# y_label = np.array([label_to_ind.get(x, num_labels + 1) for x in y_label])
# -------------------------- get confusion matrix-----------------------------
cm = np.zeros((num_labels, num_labels), dtype=np.int32)
for l_index, p_index in zip(y_label, y_pred):
cm[l_index, p_index] += 1
return cm
def get_multi_confusion_matrix_old(y_label, y_pred):
"""
:param y_label:
:param y_pred:
:return:
"""
cm = get_confusion_matrix(y_label, y_pred)
# --------------------get multi confusion matrix----------------------------
tp = np.diagonal(cm)
fp = cm.sum(axis=0) - tp
fn = cm.sum(axis=1) - tp
tn = cm.sum() - tp - fp - fn
multi_cm = np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
return cm, multi_cm
def get_multi_confusion_matrix(y_label, y_pred):
y_label = np.array(y_label, dtype=np.int32)
y_pred = np.array(y_pred, dtype=np.int32)
# label_ind = np.where(y_label)
labels = np.asarray(sorted(set(y_label)))
# num classes
num_labels = labels.size
multi_cm = []
# computer tp fp fn tn per class
for l in range(num_labels):
class_label = y_label == l
class_pred = y_pred == l
tp = np.sum(class_label * class_pred)
fp = np.sum((1 - class_label) * class_pred)
fn = np.sum(class_label * (1 - class_pred))
tn = np.sum((1 - class_label) * (1 - class_pred))
multi_cm.append([tn, fp, fn, tp])
multi_cm = np.array(multi_cm).reshape(-1, 2, 2)
return multi_cm
def single_label_precision_recall_f(y_true, y_pred, beta=1.0, average='micro'):
# ----------------------get confusion matrix of per class------------------------------
multi_cm = get_multi_confusion_matrix(y_true, y_pred)
# ----------------------computer precision recall and f-score-------------------------
tp = multi_cm[:, 1, 1]
fp = multi_cm[:, 0, 1]
fn = multi_cm[:, 1, 0]
tp_sum = tp
pred_sum = tp + fp
label_sum = tp + fn
if average == 'micro':
tp_sum = np.array([tp.sum()])
pred_sum = np.array([pred_sum.sum()])
label_sum = np.array([label_sum.sum()])
# removing warnings if zero_division is set to something different than its default value
warnings.filterwarnings("ignore")
precision = tp_sum / pred_sum
recall = tp_sum / label_sum
f1_score = (1+ beta **2) * precision * recall / ( beta **2 * precision + recall)
f1_score[np.isnan(f1_score)] = 0
precision = np.average(precision)
recall = np.average(recall)
f1_score = np.average(f1_score)
return precision, recall, f1_score
def multi_label_precision_recall_f(y_label, y_pred, beta=1.0, average='micro'):
"""
:param y_label:
:param y_pred:
:param beta:
:return:
"""
y_label = np.asarray(y_label, dtype=np.int32)
y_pred = np.asarray(y_pred, dtype=np.int32)
assert y_label.shape == y_pred.shape
# ----------------------get confusion matrix of per class------------------------------
num_class = y_label.shape[1]
multi_cms = np.zeros((0, 2, 2))
for i in range(num_class):
multi_cm = get_multi_confusion_matrix(y_label[:, i], y_pred[:, i])
multi_cms = np.concatenate([multi_cms, multi_cm[1][np.newaxis, :]])
# ----------------------computer precision recall and f-score-------------------------
tp = multi_cms[:, 1, 1]
fp = multi_cms[:, 0, 1]
fn = multi_cms[:, 1, 0]
tp_sum = tp
pred_sum = tp + fp
label_sum = tp + fn
if average == 'micro':
tp_sum = np.array([tp.sum()])
pred_sum = np.array([pred_sum.sum()])
label_sum = np.array([label_sum.sum()])
precision = tp_sum / pred_sum
# removing warnings if zero_division is set to something different than its default value
warnings.filterwarnings('ignore')
recall = tp_sum / label_sum
f1_score = (1 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall)
f1_score[np.isnan(f1_score)] = 0
precision = np.average(precision)
recall = np.average(recall)
f1_score = np.average(f1_score)
return precision, recall, f1_score
def main():
# binary class classify
binary_class_true = [0, 1, 1, 0, 1, 0, 0, 1, 0, 0]
binary_class_pred = [0, 1, 0, 0, 1, 1, 0, 1, 0, 1]
# multi class classify
multi_class_true = [0, 1, 2, 1, 1, 0, 2, 1, 0, 2]
multi_class_pred = [0, 1, 1, 2, 1, 0, 2, 0, 0, 2]
# multi label classify
multi_label_true = [[1, 1, 0, 0, 1],
[1, 0, 0, 1, 0],
[0, 1, 1, 0, 1]]
multi_label_pred = [[1, 0, 0, 1, 1],
[1, 0, 1, 1, 0],
[0, 1, 0, 0, 1]]
y_label = binary_class_true
y_pred = binary_class_pred
#
# sk_multi_cm = multilabel_confusion_matrix(y_label, y_pred)
cm, multi_cm = get_multi_confusion_matrix_old(y_label=y_label, y_pred=y_pred)
multi_cm = get_multi_confusion_matrix(y_label=y_label, y_pred=y_pred)
# micro precision recall f1_score
micro_precision = precision_score(y_label, y_pred, average='micro')
micro_recall = recall_score(y_label, y_pred, average='micro')
micro_f1 = f1_score(y_label, y_pred, average='micro')
micro_precision, micro_recall, micro_f1 = single_label_precision_recall_f(y_label, y_pred, average='micro')
print(micro_precision, micro_recall, micro_f1)
# macro precision recall f_score
# macro_precision = precision_score(y_label, y_pred, average='micro')
# macro_recall = recall_score(y_label, y_pred, average='micro')
# macro_f1 = f1_score(y_label, y_pred, average='micro')
macro_precision, macro_recall, macro_f1 = single_label_precision_recall_f(y_label, y_pred, average='macro')
print(macro_precision, macro_recall, macro_f1)
print('Done')
# micro_precision = precision_score(multi_label_true, multi_label_pred, average="micro") # 0.6
# macro_precision = recall_score(multi_label_true, multi_label_pred, average="micro")
# micro_f1 = f1_score(multi_label_true, multi_label_pred, average="micro")
micro_precision, macro_precision, micro_f1 = multi_label_precision_recall_f(multi_label_true, multi_label_pred)
print(micro_precision, macro_precision, micro_f1)
print('Done')
if __name__ == "__main__":
main() |
""" Features
The objective of this task is to explore the corpus, deals.txt.
The deals.txt file is a collection of deal descriptions, separated by a new line, from which
we want to glean the following insights:
1. What is the most popular term across all the deals?
2. What is the least popular term across all the deals?
3. How many types of guitars are mentioned across all the deals?
"""
### SOLUTION - TASK 1
# For getting the word counts, we remove the stopwords since they are
# meaningless. This stopwords list populated using the most common terms
# occurring in the deals without any metric and adding to the list of
# common english words.
# I also have filtered all the numbers when finding the most frequent and
# least frequent terms in the file.
import nltk
from nltk.collocations import BigramAssocMeasures, BigramCollocationFinder
from task2 import populateStopWords, ignoreWordsList
def wordCounts(raw_data):
"""Computing the word counts here to get the most and the
least frequent words"""
import sklearn.feature_extraction.text
stopwords = populateStopWords()
# Vectorizing the data
print "Vectorizing"
tfidfv = sklearn.feature_extraction.text.TfidfVectorizer(stop_words = stopwords)
tfidf_matrix = tfidfv.fit_transform(raw_data)
print "Post processing"
freqs = [(word, tfidf_matrix.getcol(idx).sum()) for word, idx in tfidfv.vocabulary_.items()]
sorted_freqs = sorted (freqs, key = lambda x: -x[1])
# Filtering to remove numbers that might be occurring
# Similar filter can be used to find the most common discount percentages
sorted_list = [word for word,score in sorted_freqs if word.isalpha()]
return sorted_list
def typesOfObject(deals, obj):
"""The method takes in raw text and determines the types of Guitars
found in the text"""
from nltk import pos_tag
# Filter deals only that contain the required object
guit_list = [sen for sen in deals if obj in sen.lower()]
join_guit_list = " & ".join(guit_list)
print "Computing Bigram association metrics"
guitar_bigrams = BigramMetrics(join_guit_list, obj)
types = set()
# Looking for adjective associations of guitars that describe the guitars
for word in guitar_bigrams:
(first,second) = word.lower().split()
first_tag, second_tag = pos_tag(word.lower().split())
word, tag = first_tag
if obj in second and tag in 'JJ':
types.add(word)
return (types, len(types))
def BigramMetrics(word_list, obj):
"""Considers the bigram associations in the sentences
that contain the object, and computes their scores based
on their occurrence"""
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(nltk.word_tokenize(word_list))
# Removing the stop words
"""Test once with populatestopwords also"""
stopwords = nltk.corpus.stopwords.words('english')
finder.apply_word_filter(lambda w: w in stopwords)
finder.apply_freq_filter(2)
scored = finder.score_ngrams(bigram_measures.raw_freq)
sorted_scores = sorted(bigram for bigram, score in scored)
join_sorted_scores = [cur_tuple[0]+" "+cur_tuple[1] for cur_tuple in sorted_scores if obj in cur_tuple[1].lower()]
guitar_scores = filter(lambda word: obj in word.lower(), join_sorted_scores)
return guitar_scores
def main():
"""Retrieving the location of the file"""
filename = "deals.txt"
path = "../data/"+filename
"""path describes where the corpus is located"""
raw_data = open(path).readlines()
print "Computing Word Counts using TF-IDF"
tfidfWords = wordCounts(raw_data)
max_term = tfidfWords[0]
min_term = tfidfWords[-1]
obj = u"guitar" #Since all comparisons are done in lower, specify unicode lowercase string
print "Getting the types of the object ", obj
(guitar_types, num_guitar_types) = typesOfObject(raw_data, obj)
print "Most common term in the deals ", max_term
print "Least common term in the deals ", min_term
print guitar_types, num_guitar_types
if __name__ == '__main__':
main() |
import config
import argparse
import logging
import sys
import zeroconf
import sys
import pychromecast
from pychromecast.controllers.youtube import YouTubeController
import logging
import time
logging.basicConfig(level=logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('%s' % config.SERVER.LOGFILE)
fh.setFormatter(formatter)
falcon_logger = logging.getLogger('gunicorn.error')
def Play(bid):
falcon_logger.info("Arrancando Chromecast BID %s" %(bid))
chromecasts, browser = pychromecast.get_listed_chromecasts(friendly_names=[config.CHROMECAST.NAME])
if not chromecasts:
msg="No encuentro el chromecast con nombre %s" % config.CHROMECAST.NAME
falcon_logger.info(msg)
return 9,msg
browser.stop_discovery()
cast = chromecasts[0]
# print(cast)
cast.wait()
mc = cast.media_controller
falcon_logger.info("Arrancando Youtube Controller")
yt = YouTubeController()
cast.register_handler(yt)
retry=6
while (retry):
yt.play_video(bid)
if (mc.status.player_is_playing):
break
falcon_logger.info("No arranca, espero")
time.sleep(2)
retry-=1
falcon_logger.info("Chromecast Terminado")
def Media_Stop():
falcon_logger.info("Parando Cast")
chromecasts, browser = pychromecast.get_listed_chromecasts(friendly_names=[config.CHROMECAST.NAME])
if not chromecasts:
msg="No encuentro el chromecast con nombre %s" % config.CHROMECAST.NAME
falcon_logger.info(msg)
return 9,msg
cast = chromecasts[0]
cast.wait()
mc = cast.media_controller
falcon_logger.info("Arrancando Stadnby")
yt = YouTubeController()
cast.register_handler(yt)
retry=6
while (retry):
yt.play_video("9m6bfFWPIVE")
if (mc.status.player_is_playing):
break
falcon_logger.info("No arranca, espero")
time.sleep(2)
retry-=1
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager for key bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import curses.ascii
import app.config
import app.curses_util
import app.log
#import app.window
class Controller:
"""A Controller is a keyboard mapping from keyboard/mouse events to editor
commands."""
def __init__(self, view, name):
if app.config.strict_debug:
assert issubclass(self.__class__, Controller)
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.textBuffer = None
self.name = name
def parentController(self):
view = self.view.parent
while view is not None:
if view.controller is not None:
return view.controller
view = view.parent
def changeToConfirmClose(self):
self.findAndChangeTo('confirmClose')
def changeToConfirmOverwrite(self):
self.findAndChangeTo('confirmOverwrite')
def changeToFileManagerWindow(self, *args):
self.findAndChangeTo('fileManagerWindow')
def changeToConfirmQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToHostWindow(self, *args):
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.view.__class__, app.window.Window), self.view
assert issubclass(host.__class__, app.window.Window), host
self.view.changeFocusTo(host)
def changeToInputWindow(self, *args):
self.findAndChangeTo('inputWindow')
def changeToFind(self):
self.findAndChangeTo('interactiveFind')
def changeToFindPrior(self):
curses.ungetch(self.savedCh)
self.findAndChangeTo('interactiveFind')
def changeToGoto(self):
self.findAndChangeTo('interactiveGoto')
def changeToPaletteWindow(self):
self.findAndChangeTo('paletteWindow')
def changeToPopup(self):
self.findAndChangeTo('popupWindow')
def changeToPrediction(self):
self.findAndChangeTo('predictionWindow')
#self.findAndChangeTo('interactivePrediction')
def changeToPrompt(self):
self.findAndChangeTo('interactivePrompt')
def changeToQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToSaveAs(self):
view = self.getNamedWindow('fileManagerWindow')
view.setMode('saveAs')
view.bringToFront()
view.changeFocusTo(view)
def createNewTextBuffer(self):
bufferManager = self.view.program.bufferManager
self.view.setTextBuffer(bufferManager.newTextBuffer())
def doCommand(self, ch, meta):
# Check the commandSet for the input with both its string and integer
# representation.
self.savedCh = ch
cmd = (self.commandSet.get(ch) or
self.commandSet.get(app.curses_util.cursesKeyName(ch)))
if cmd:
cmd()
else:
self.commandDefault(ch, meta)
self.textBuffer.compoundChangePush()
def getNamedWindow(self, windowName):
view = self.view
while view is not None:
if hasattr(view, windowName):
return getattr(view, windowName)
view = view.parent
app.log.fatal(windowName + ' not found')
return None
def currentInputWindow(self):
return self.getNamedWindow('inputWindow')
def findAndChangeTo(self, windowName):
window = self.getNamedWindow(windowName)
window.bringToFront()
self.view.changeFocusTo(window)
def changeTo(self, window):
window.bringToFront()
self.view.changeFocusTo(window)
def focus(self):
app.log.info('base controller focus()')
def confirmationPromptFinish(self, *args):
window = self.getNamedWindow('inputWindow')
window.userIntent = 'edit'
window.bringToFront()
self.view.changeFocusTo(window)
def __closeHostFile(self, host):
"""Close the current file and switch to another or create an empty
file."""
bufferManager = host.program.bufferManager
bufferManager.closeTextBuffer(host.textBuffer)
host.userIntent = 'edit'
tb = bufferManager.getUnsavedBuffer()
if not tb:
tb = bufferManager.nextBuffer()
if not tb:
tb = bufferManager.newTextBuffer()
host.setTextBuffer(tb)
def closeFile(self):
app.log.info()
host = self.getNamedWindow('inputWindow')
self.__closeHostFile(host)
self.confirmationPromptFinish()
def closeOrConfirmClose(self):
"""If the file is clean, close it. If it is dirty, prompt the user
about whether to lose unsaved changes."""
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isDirty():
self.__closeHostFile(host)
return
if host.userIntent == 'edit':
host.userIntent = 'close'
self.changeToConfirmClose()
def initiateClose(self):
"""Called from input window controller."""
self.view.userIntent = 'close'
tb = self.view.textBuffer
if not tb.isDirty():
self.__closeHostFile(self.view)
return
self.view.changeFocusTo(self.view.confirmClose)
def initiateQuit(self):
"""Called from input window controller."""
self.view.userIntent = 'quit'
tb = self.view.textBuffer
if tb.isDirty():
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
self.view.setTextBuffer(tb)
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager.debugLog()
self.view.quitNow()
def initiateSave(self):
"""Called from input window controller."""
self.view.userIntent = 'edit'
tb = self.view.textBuffer
if tb.fullPath:
if not tb.isSafeToWrite():
self.view.changeFocusTo(self.view.confirmOverwrite)
return
tb.fileWrite()
return
self.changeToSaveAs()
def overwriteHostFile(self):
"""Close the current file and switch to another or create an empty
file.
"""
host = self.getNamedWindow('inputWindow')
host.textBuffer.fileWrite()
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
def nextFocusableWindow(self):
window = self.view.parent.nextFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def priorFocusableWindow(self):
window = self.view.parent.priorFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def writeOrConfirmOverwrite(self):
"""Ask whether the file should be overwritten."""
app.log.debug()
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isSafeToWrite():
self.changeToConfirmOverwrite()
return
tb.fileWrite()
# TODO(dschuyler): Is there a deeper issue here that necessitates saving
# the message? Does this only need to wrap the changeToHostWindow()?
# Store the save message so it is not overwritten.
saveMessage = tb.message
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
tb.message = saveMessage # Restore the save message.
def quitOrSwitchToConfirmQuit(self):
app.log.debug(self, self.view)
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
host.userIntent = 'quit'
if tb.isDirty():
self.changeToConfirmQuit()
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
host.setTextBuffer(tb)
self.changeToConfirmQuit()
return
bufferManager.debugLog()
host.quitNow()
def saveOrChangeToSaveAs(self):
app.log.debug()
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.__class__, Controller), self
assert issubclass(self.view.__class__, app.window.Window), self
assert issubclass(host.__class__, app.window.Window), self
assert self.view.textBuffer is self.textBuffer
assert self.view.textBuffer is not host.textBuffer
if host.textBuffer.fullPath:
self.writeOrConfirmOverwrite()
return
self.changeToSaveAs()
def onChange(self):
pass
def saveEventChangeToHostWindow(self, *args):
curses.ungetch(self.savedCh)
host = self.getNamedWindow('inputWindow')
host.bringToFront()
self.view.changeFocusTo(host)
def setTextBuffer(self, textBuffer):
if app.config.strict_debug:
assert issubclass(textBuffer.__class__,
app.text_buffer.TextBuffer), textBuffer
assert self.view.textBuffer is textBuffer
self.textBuffer = textBuffer
def unfocus(self):
pass
class MainController:
"""The different keyboard mappings are different controllers. This class
manages a collection of keyboard mappings and allows the user to switch
between them."""
def __init__(self, view):
if app.config.strict_debug:
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.controllers = {}
self.controller = None
def add(self, controller):
self.controllers[controller.name] = controller
self.controller = controller
def doCommand(self, ch, meta):
self.controller.doCommand(ch, meta)
def focus(self):
app.log.info('MainController.focus')
self.controller.focus()
if 0:
self.commandDefault = self.controller.commandDefault
commandSet = self.controller.commandSet.copy()
commandSet.update({
app.curses_util.KEY_F2: self.nextController,
})
self.controller.commandSet = commandSet
def onChange(self):
self.controller.onChange()
def nextController(self):
app.log.info('nextController')
if 0:
if self.controller is self.controllers['cuaPlus']:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
elif self.controller is self.controllers['cua']:
app.log.info('MainController.nextController emacs')
self.controller = self.controllers['emacs']
elif self.controller is self.controllers['emacs']:
app.log.info('MainController.nextController vi')
self.controller = self.controllers['vi']
else:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
self.controller.setTextBuffer(self.textBuffer)
self.focus()
def setTextBuffer(self, textBuffer):
app.log.info('MainController.setTextBuffer', self.controller)
if app.config.strict_debug:
assert issubclass(textBuffer.__class__, app.text_buffer.TextBuffer)
self.textBuffer = textBuffer
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.controller.unfocus()
|
from typing import Callable, Optional
from simphony.netlist import Subcircuit
from gdsfactory.simulation.simphony.components.mmi1x2 import mmi1x2
from gdsfactory.simulation.simphony.components.straight import (
straight as straight_function,
)
def mzi(
delta_length: float = 10.0,
length_y: float = 4.0,
length_x: float = 0.1,
splitter: Callable = mmi1x2,
combiner: Optional[Callable] = None,
straight_top: Callable = straight_function,
straight_bot: Callable = straight_function,
port_name_splitter_w0: str = "o1",
port_name_splitter_e1: str = "o2",
port_name_splitter_e0: str = "o3",
port_name_combiner_w0: str = "o1",
port_name_combiner_e1: str = "o2",
port_name_combiner_e0: str = "o3",
):
"""Mzi circuit model.
Args:
delta_length: bottom arm vertical extra length
length_y: vertical length for both and top arms
length_x: horizontal length
splitter: model function for combiner
combiner: model function for combiner
wg: straight model function
Return: mzi circuit model
.. code::
__Lx__
| |
Ly Lyr
| |
splitter=| |==combiner
| |
Ly Lyr
| |
DL/2 DL/2
| |
|__Lx__|
.. plot::
:include-source:
import gdsfactory as gf
c = gf.components.mzi(delta_length=10)
c.plot()
.. plot::
:include-source:
import gdsfactory.simulation simphony as gs
import gdsfactory.simulation.simphony.components as gc
c = gc.mzi()
gs.plot_circuit(c)
"""
combiner = combiner or splitter
splitter = splitter() if callable(splitter) else splitter
combiner = combiner() if callable(combiner) else combiner
wg_short = straight_top(length=2 * length_y + length_x)
wg_long = straight_bot(length=2 * length_y + delta_length + length_x)
# Create the circuit, add all individual instances
circuit = Subcircuit("mzi")
circuit.add(
[
(splitter, "splitter"),
(combiner, "recombiner"),
(wg_long, "wg_long"),
(wg_short, "wg_short"),
]
)
# Circuits can be connected using the elements' string names:
circuit.connect_many(
[
("splitter", port_name_splitter_e0, "wg_long", "o1"),
("splitter", port_name_splitter_e1, "wg_short", "o1"),
("recombiner", port_name_combiner_e0, "wg_long", "o2"),
("recombiner", port_name_combiner_e1, "wg_short", "o2"),
]
)
circuit.elements["splitter"].pins[port_name_splitter_w0] = "o1"
circuit.elements["recombiner"].pins[port_name_combiner_w0] = "o2"
return circuit
if __name__ == "__main__":
import matplotlib.pyplot as plt
from gdsfactory.simulation.simphony.plot_circuit import plot_circuit
c = mzi()
plot_circuit(c)
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 01 15:29:15 2018
@author: pp34747
"""
import random
import numpy as np
import math
import candidates
import graphs
import matplotlib.pyplot as plt
import os
import psutil
import time
from itertools import chain, izip
import graph_tool.all as gt
import dynamic_connectivity as dc
from scipy.stats.mstats import gmean
class Optimize():
def __init__(self, cost=None, nIterations=200):
self.T_start = 1.0
self.T_min = 0.000000001
self.n_runs = 1
self.cost = cost
self.nIterations = nIterations
def run(self, candInfo, conflictList, mergeList, adjacencyList, dummyConflictsList):
pid = os.getpid()
py = psutil.Process(pid)
print "Resolving root hairs ..."
nCandidates = len(conflictList)
if nCandidates > 1: # If more than one candidate we need to find optimal set of roothairs
# Set initial solution and parameters
print " - Resolving..."
# Determine number of iterations
maxLevels = 5*self.nIterations
# Initialize state object
state = State( candInfo=candInfo,
mergeList=mergeList,
conflictList=conflictList,
adjacencyList=adjacencyList,
dummyConflictsList=dummyConflictsList)
# Create random state
shuffleState(state,nCandidates)
# Get values for cooling schedule
finalProb = 0.01/nCandidates # 10% chance of upward move being accepted at last temperature level
csMaker = CoolingScheduleMaker(state, costFunction=self.cost, initialProb=0.95, finalProb=finalProb)
csMaker.simulate(int(0.2*self.nIterations*nCandidates)) # 20% of iterations in actual optimization
normArray = csMaker.normalization() # calculates avergae values of sub costs for normalization
print("norm curvature: "+str(1./normArray[0])+", norm length: "+str(1./normArray[1])+". norm distance: "+str(1./normArray[2]))
self.cost.setNormValues(normArray) # set normalization value in cost function
csMaker.recalculateCost() # calculate costs with normalization
csMaker.calculateUpwardCosts()
initialTemp = csMaker.getInitialTemp()
alpha = csMaker.getAlpha(initialTemp, self.nIterations)
finalTemp = csMaker.getFinalTemp(initialTemp,alpha, self.nIterations)
averageCost = csMaker.initialCost()
print "averageCost:"+str(averageCost), "averageDeltaCost: "+str(csMaker.averageDeltaCost()), ", initialTemp: ", str(initialTemp), ", finalTemp: ", str(finalTemp), ", alpha: ", str(alpha)
# Initialize Simulated Annealing object
sa = SimulatedAnnealing(initialState=state, initalTemp=initialTemp, finalTemp=finalTemp, averageCost=averageCost, alpha=alpha, maxLevels=maxLevels, costFunction=self.cost)
memoryUse = py.memory_info()[0]/2.**30 # memory use in GB...I think
print ' - memory use: '+ str(round(memoryUse,4))
start_time = time.time()
# Run optimization n_run times
for _ in range(self.n_runs):
best_sol, best_cost, ratio_complete, bestMetricsNorm = sa.anneal()
memoryUse = py.memory_info()[0]/2.**30 # memory use in GB...I think
print ' - memory use: '+ str(round(memoryUse,4))
#summary[i_run+1] = {'n':n_candidates, 'T':T_arr, 'cost':cost_arr, 'best':best_arr}
# Final solution
sol_out = np.where(best_sol)[0]
elapsed_time = time.time() - start_time
print 'Time: ' + time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
elif nCandidates == 1: # If only one candidate -> trivial solution
sol_out = [0]
else: # If no candidates -> no solution
sol_out = []
memoryUse = py.memory_info()[0]/2.**30 # memory use in GB...I think
print ' - memory use: '+ str(round(memoryUse,4))
# Create roothair connected components from overall best solution
components = dc.ConnectedComponents(mergeList)
for v in sol_out:
components.addVertex(v)
# Construct paths of connected components
roothair_paths = []
for cc in components.components.values():
g = graphs.Candidate_Graph([])
for candidate_id in cc:
g.merge(graphs.Candidate_Graph(candInfo.paths[candidate_id]))
roothair_paths.append(g.get_path())
memoryUse = py.memory_info()[0]/2.**30 # memory use in GB...I think
print ' - memory use: '+ str(round(memoryUse,4))
return roothair_paths, best_cost, ratio_complete, bestMetricsNorm
def shuffleState(state, n=100):
for _ in range(n):
# Get position (component id) to be changed
position = random.randint(0,n-1)
# Change current solution
isvalid = state.neighbor(position)
# If is invalid reverse and skip
if not isvalid:
state.reverseChanges()
continue
class SimulatedAnnealing:
def __init__(self, initialState, initalTemp, finalTemp, averageCost, alpha, maxLevels, costFunction):
self.state = initialState
self.currTemp = initalTemp
self.finalTemp = finalTemp
self.averageCost = averageCost
self.alpha = alpha
self.maxLevels = maxLevels
self.costFunction = costFunction
self.iterationsPerTemp = len(self.state.candInfo.paths) # Number of iterations per temperature level
self.R_max = 2 * self.iterationsPerTemp
self.R = 0
def anneal(self):
# Initialize cost
metrics = self.costFunction.calculateMetrics(self.state)
metricsNorm = self.costFunction.normalizeMetrics(metrics)
cost = self.costFunction.calculateCost(metricsNorm) # Initial best cost is initial cost
# Save first solution as best solution
best_sol = np.array(self.state.binaryList) # Initial best solution is initial solution
new_cost = cost
best_cost = cost
best_metrics = metrics
newMetricsNorm = metricsNorm
bestMetricsNorm = metricsNorm
# Uncomment for plotting images:
#solutions_arr = [np.where(self.state.binaryList)[0]] # List with all states
#cost_arr = [cost] # List with all costs
#metrics_arr = [bestMetricsNorm] # List with all metrics
n_iterations = 0
print " - " + str([n_iterations, "{0:.2E}".format(self.currTemp), round(cost,5), round(best_cost,5)]) \
+ str([round(c,3) for c in metricsNorm]) + str(round(float(self.R)/self.R_max,3))
while (self.currTemp > self.finalTemp or self.R < self.R_max) and n_iterations < self.maxLevels: # T must be less than finalTemp and R must be larger than R_max to stop
for _ in range(self.iterationsPerTemp):
# Get position (component id) to be changed
position = random.randint(0,len(self.state.binaryList)-1)
# Change current solution
isvalid = self.state.neighbor(position)
# If is invalid reverse and skip
if not isvalid:
self.state.reverseChanges()
continue
# Calculate cost for current state
metrics = self.costFunction.calculateMetrics(self.state)
newMetricsNorm = self.costFunction.normalizeMetrics(metrics)
new_cost = self.costFunction.calculateCost(newMetricsNorm)
# Acceptance probability
ap = self.probability(self.averageCost, cost, new_cost, self.currTemp)
# If acceptance probability is larger than random value between 0. and 1.
if ap > random.random():
self.R = 0 # Reset number of rejected moves
cost = new_cost # Cost is updated
if new_cost < best_cost: # New cost is better than overall best cost
best_sol = np.array(self.state.binaryList) # Update best solution
best_cost = new_cost # Update best cost
best_metrics = metrics
bestMetricsNorm = newMetricsNorm # Update best metrics
# Uncomment for plotting all accepted states:
#solutions_arr.append(np.where(self.state.binaryList)[0])
#cost_arr.append(new_cost)
#metrics_arr.append(newMetricsNorm)
else:
self.state.reverseChanges()
self.R += 1 # Increase number of consecutive rejeced moves
# Reduce temperature
self.currTemp = self.currTemp*self.alpha
# Increase number of iterations
n_iterations += 1
print " - " + str([n_iterations, "{0:.2E}".format(self.currTemp), round(cost,5), round(best_cost,5)]) \
+ str([round(c,3) for c in bestMetricsNorm]) + str(round(float(self.R)/self.R_max,3))
ratio_complete = 1.-best_metrics[1]
# Uncomment for plotting images:
#return best_sol, best_cost, ratio_complete, bestMetricsNorm, solutions_arr, cost_arr, metrics_arr
return best_sol, best_cost, ratio_complete, bestMetricsNorm
def probability(self,average_cost,prev_score,next_score,temperature):
if next_score < prev_score:
return 1.0
else:
return math.exp((prev_score-next_score)/average_cost/temperature)
class State:
def __init__(self, candInfo, mergeList, conflictList, adjacencyList, dummyConflictsList):
self.candInfo = candInfo
self.binaryList = np.zeros(len(candInfo.paths),dtype=int) #sol is a binary 1D numpy array (e.g. sol = array([0,1,0,1,1]))
self.components = dc.ConnectedComponents(mergeList)
self.conflictList = conflictList
self.adjacencyList = adjacencyList
self.dummyConflictsList = dummyConflictsList
# Create a graph from path of each candidate
self.candidate_graphs = []
for p in self.candInfo.paths:
self.candidate_graphs.append(graphs.Candidate_Graph(p))
# Initialize individual items of cost
# Add all dummies, because there are no candidate root hairs yet
self.cost_items = CostItems(sum_length_dummy=sum(self.candInfo.dummy_lengths),
sum_strain_dummy=sum(self.candInfo.dummy_strain),
sum_length_all=sum(self.candInfo.dummy_lengths) )
# No conflicts yet for dummies
# Dummies can have more conflicting candidates in solution e.g. at intersection
n_dummies = max([max(rh) for rh in dummyConflictsList if len(rh)>0])+1
self.n_dummy_conflicts = np.zeros(n_dummies, dtype=int)
# Items to track recent changes to candidates and connected components
self.addedTips = []
self.removedTips = []
self.removedCandidates = []
self.addedCandidates = []
self.removedComponents = []
self.addedComponents = []
self.addedDummies = []
self.removedDummies = []
# Track difference in cost items
self.cost_items_difference = CostItemDifference()
def neighbor(self, position):
# Reset previous tracked changes
self.addedTips = []
self.removedTips = []
self.removedCandidates = []
self.addedCandidates = []
self.removedComponents = []
self.addedComponents = []
self.addedDummies = []
self.removedDummies = []
self.cost_items_difference = CostItemDifference()
# Get neighbor of current solution
self.addedCandidates, self.removedCandidates = self.getChangesFromBinaryList(position)
self.updateBinaryList(self.addedCandidates, self.removedCandidates)
# Remove vertices from components graph
for c in self.removedCandidates:
comp_add, comp_remove = self.components.removeVertex(c)
for value in comp_remove.values():
self.removedComponents.append(value)
for value in comp_add.values():
self.addedComponents.append(value)
# Add new vertices to components graph
for c in self.addedCandidates:
comp_add, comp_remove = self.components.addVertex(c)
for value in comp_remove.values():
self.removedComponents.append(value)
for value in comp_add.values():
self.addedComponents.append(value)
# Fill with dummies
self.addedDummies, self.removedDummies, self.n_dummy_conflicts = self.updateDummies(self.n_dummy_conflicts, self.addedCandidates, self.removedCandidates)
self.addedTips = getComponentTips(self.addedComponents, self.candidate_graphs)
self.removedTips = getComponentTips(self.removedComponents, self.candidate_graphs)
# Calcule difference in cost items
self.cost_items_difference.extract(self.candInfo,
self.candidate_graphs,
self.addedCandidates,
self.removedCandidates,
self.addedComponents,
self.removedComponents,
self.addedDummies,
self.removedDummies)
# Update to new cost items
self.cost_items = self.cost_items + self.cost_items_difference
if not self.isvalid():
return False
if not self.hasTwoTips():
return False
else:
return True
def isvalid(self):
"""
Tests if candidates in a new connected component overlap
"""
for c in self.addedComponents:
if self.selfintersect(c):
#print "Invalid component: "+str(c)
return False
return True
def hasTwoTips(self):
for item in self.addedTips:
if len(item) != 2:
return False
#for item in self.changes['removedTips']:
# if len(item) != 2:
# return False
return True
def selfintersect(self, componentPath):
"""
Tests if candidates overlap
"""
lenPath = len(componentPath)
for i in range(lenPath):
for j in range(i+2,lenPath):
if componentPath[j] > componentPath[i]:
if componentPath[j] in self.adjacencyList[componentPath[i]]:
return True
elif componentPath[i] in self.adjacencyList[componentPath[j]]:
return True
return False
def getChanges(self):
return self.addedTips, self.removedTips, self.addedCandidates, self.removedCandidates, self.addedComponents, self.removedComponents, self.addedDummies, self.removedDummies
def reverseChanges(self):
# Reverse changes
self.updateBinaryList(self.removedCandidates, self.addedCandidates)
for c in self.addedCandidates:
self.components.removeVertex(c)
for c in self.removedCandidates:
self.components.addVertex(c)
_, _, self.n_dummy_conflicts = self.updateDummies(self.n_dummy_conflicts, self.removedCandidates, self.addedCandidates)
self.cost_items = self.cost_items - self.cost_items_difference
# Reset tracked changes
self.addedTips = []
self.removedTips = []
self.removedCandidates = []
self.addedCandidates = []
self.removedComponents = []
self.addedComponents = []
self.addedDummies = []
self.removedDummies = []
self.cost_items_difference = CostItemDifference
def getChangesFromBinaryList(self, pos):
if self.binaryList[pos] == 1: # If candidate is already in solution
cand_add = [] # Add none
cand_remove = [pos] # Remove
else:
cand_add = [pos]
c = self.conflictList[pos]
cand_remove = c[np.where(self.binaryList[c])]
return cand_add, cand_remove
def updateBinaryList(self, add, remove):
self.binaryList[add] = 1
self.binaryList[remove] = 0
def updateDummies(self, n_dummy_conflicts, cand_add, cand_remove):
# For each removed candidate, dummies have to be added
n_dummy_conflicts_copy = np.array(n_dummy_conflicts)
dum_add = []
for c in cand_remove: # For each removed candidate
dummy_ids = self.dummyConflictsList[c]
n_dummy_conflicts_copy[dummy_ids] -= 1 # Reduce number of conflicting candidates for this dummy
ids = np.where(n_dummy_conflicts_copy[dummy_ids]==0)[0] # If no more conflicts add a dummy
dum_add.append(dummy_ids[ids])
dum_add = list(chain(*dum_add))
# For each added candidate, dummies have to be removed
dum_remove = []
for c in cand_add:
dummy_ids = self.dummyConflictsList[c]
ids = np.where(n_dummy_conflicts_copy[dummy_ids]==0)[0]
dum_remove.append(dummy_ids[ids])
n_dummy_conflicts_copy[dummy_ids] += 1
dum_remove = list(chain(*dum_remove))
return dum_add, dum_remove, n_dummy_conflicts_copy
def getComponentTips(components, candidateGraphs):
tips = []
# print "get_component_tips:"
for candidates in components:
g = graphs.Candidate_Graph([])
for candidate_id in candidates:
g.merge(candidateGraphs[candidate_id])
tips.append(g.all_degree_one())
return tips
class CoolingScheduleMaker:
def __init__(self, initialState, costFunction, initialProb=0.95, finalProb=0.0001):
"""
Class to determine cooling schedule based on simulated solution
"""
self.initialProb = initialProb
self.finalProb = finalProb
self.state = initialState # State object
#self.temperatureLevels = temperatureLevels # Number of temperatre levels
self.deltaCostArray = [] # Holds upward changed costs of simulation
self.costArray = [] # Holds all costs
self.subCostArray = []
self.costFunction = costFunction
def simulate(self, n):
"""
Creates neighbors n times start at given solution
"""
nPaths = len(self.state.binaryList)
for _ in range(n):
# Get position (component id) to be changed
position = random.randint(0,nPaths-1)
# Change current solution
isvalid = self.state.neighbor(position)
# If is invalid reverse and skip
if not isvalid:
self.state.reverseChanges()
continue
# Calculate cost for current state
metrics = self.costFunction.calculateMetrics(self.state)
cost = self.costFunction.calculateCost(metrics)
self.costArray.append(cost)
self.subCostArray.append(metrics)
def recalculateCost(self):
"""
Recalculates costs with normalized sub costs
"""
self.costArray = []
for metrics in self.subCostArray:
metricsNorm = self.costFunction.normalizeMetrics(metrics)
cost = self.costFunction.calculateCost(metricsNorm)
self.costArray.append(cost)
def calculateUpwardCosts(self):
self.deltaCostArray = []
previous_cost = self.costArray[0]
for current_cost in self.costArray:
if current_cost > previous_cost:
self.deltaCostArray.append(current_cost-previous_cost)
previous_cost = current_cost
def normalization(self):
"""
Determines average values of sub cost for normalization of cost.
Output: numpy array with 3 normalization values (float)
"""
normArray = 1. / np.median(self.subCostArray,0)
normArray[normArray == np.inf] = 0.0
return normArray
def averageDeltaCost(self):
"""
Calculates average increasing cost
"""
return np.mean(self.deltaCostArray)
def initialCost(self):
"""
Calculates average initial cost
"""
return np.mean(self.costArray)
def getInitialTemp(self):
"""
Calculates initial temperature
"""
t = - self.averageDeltaCost() / (self.initialCost() * np.log(self.initialProb))
return t
def getAlpha(self,initialTemp,nIterations):
"""
Calculate the cooling rate
"""
return (-self.averageDeltaCost() / (initialTemp * np.log(self.finalProb) * self.initialCost()))**(1.0/nIterations)
def getFinalTemp(self, initialTemp, alpha, nIterations):
"""
Calculates final temperature
"""
return initialTemp*(alpha**nIterations)
def matrix_to_list(adjmat):
# converts adjacency matrix to adjaceny list
graph = [[] for v in adjmat]
for i, v in enumerate(adjmat, 0):
for j, u in enumerate(v, 0):
if u != 0:
#edges.add(frozenset([i, j]))
graph[i].append(j)
return [np.array(v,dtype=int) for v in graph]
class CandidateInformation:
def __init__(self):
# Path of each candidate
self.paths = []
# Dummie values
self.dummy_strain = np.array([])
self.dummy_lengths = np.array([])
self.dummy_median_min_distance = 0.0
self.dummy_median_max_distance = 0.0
self.dummy_max_max_distance = 0.0
# Candidate values
self.strain = np.array([])
self.min_distance = np.array([])
self.max_distance = np.array([])
self.min_reference_strain = np.array([])
# Segment distance to root
self.minDistToEdge = {}
class CostItems:
def __init__( self,
sum_strain_roothair=0.0,\
sum_strain_dummy=0.0, \
sum_min_reference_strain=0.0, \
sum_length_dummy=0.0, \
sum_length_all=0.0, \
sum_min_distance_roothair=0.0, \
num_roothair=0, \
sum_max_distance_roothair=0.0):
self.sum_strain_roothair = sum_strain_roothair
self.sum_strain_dummy = sum_strain_dummy
self.sum_min_reference_strain = sum_min_reference_strain
# Total length of remaining dummies measure
self.sum_length_dummy = sum_length_dummy
self.sum_length_all = sum_length_all
# Min distance to root
self.sum_min_distance_roothair = sum_min_distance_roothair
self.num_roothair = num_roothair
# Max distance to root
self.sum_max_distance_roothair = sum_max_distance_roothair
def __add__(self, other):
sum_strain_roothair = self.sum_strain_roothair + other.sum_strain_roothair
sum_strain_dummy = self.sum_strain_dummy + other.sum_strain_dummy
sum_min_reference_strain = self.sum_min_reference_strain + other.sum_min_reference_strain
# Total length of remaining dummies measure
sum_length_dummy = self.sum_length_dummy + other.sum_length_dummy
sum_length_all = self.sum_length_all + other.sum_length_all
# Min distance to root
sum_min_distance_roothair = self.sum_min_distance_roothair + other.sum_min_distance_roothair
num_roothair = self.num_roothair + other.num_roothair
# Max distance to root
sum_max_distance_roothair = self.sum_max_distance_roothair + other.sum_max_distance_roothair
return CostItems(sum_strain_roothair, sum_strain_dummy, sum_min_reference_strain, \
sum_length_dummy, sum_length_all,
sum_min_distance_roothair, num_roothair, sum_max_distance_roothair)
def __sub__(self, other):
sum_strain_roothair = self.sum_strain_roothair - other.sum_strain_roothair
sum_strain_dummy = self.sum_strain_dummy - other.sum_strain_dummy
sum_min_reference_strain = self.sum_min_reference_strain - other.sum_min_reference_strain
# Total length of remaining dummies measure
sum_length_dummy = self.sum_length_dummy - other.sum_length_dummy
sum_length_all = self.sum_length_all - other.sum_length_all
# Min distance to root
sum_min_distance_roothair = self.sum_min_distance_roothair - other.sum_min_distance_roothair
num_roothair = self.num_roothair - other.num_roothair
# Max distance to root
sum_max_distance_roothair = self.sum_max_distance_roothair - other.sum_max_distance_roothair
return CostItems(sum_strain_roothair, sum_strain_dummy, sum_min_reference_strain, \
sum_length_dummy, sum_length_all,
sum_min_distance_roothair, num_roothair, sum_max_distance_roothair)
class CostItemDifference(CostItems):
def extract(self, candInfo, candidate_graphs, cand_add, cand_remove, comp_add, comp_remove, dum_add, dum_remove):
# Curvature measure
self.sum_strain_roothair = sum(candInfo.strain[cand_add]) \
- sum(candInfo.strain[cand_remove])
self.sum_min_reference_strain = sum(candInfo.min_reference_strain[cand_add]) \
- sum(candInfo.min_reference_strain[cand_remove])
# Total length of remaining dummies measure
self.sum_length_dummy = sum(candInfo.dummy_lengths[dum_add]) \
- sum(candInfo.dummy_lengths[dum_remove])
self.sum_strain_dummy = sum(candInfo.dummy_strain[dum_add]) \
- sum(candInfo.dummy_strain[dum_remove])
# Number of components
self.num_roothair = len(comp_add) - len(comp_remove)
# Get tips of component
tips_remove = getComponentTips(comp_remove, candidate_graphs)
tips_add = getComponentTips(comp_add, candidate_graphs)
# Sum min/max distances of tips
min_distance = 0.0
max_distance = 0.0
for tip_pair in tips_add:
distances = [candInfo.minDistToEdge[t] for t in tip_pair]
if len(distances) == 2:
min_distance += min(distances)
max_distance += max(distances)
else:
#print "for tip_pair in tips_add: len(distances) = ", len(distances)
return False
for tip_pair in tips_remove:
distances = [candInfo.minDistToEdge[t] for t in tip_pair]
if len(distances) == 2:
min_distance -= min(distances)
max_distance -= max(distances)
else:
#print "for tip_pair in tips_remove: len(distances) = ", len(distances)
return False
# Min distance to root
self.sum_min_distance_roothair = min_distance
# Max distance to root
self.sum_max_distance_roothair = max_distance
return True
class Cost:
def __init__(self, measure, cost_type, weights=[1., 1., 1.], normValues=np.array([1., 1., 1.])):
self.measure = measure
self.cost_type = cost_type
self.normValues = normValues
self.weights = np.array(weights)
sum_weights = np.sum(weights)
self.weights = np.float_(weights)/sum_weights
def setNormValues(self, newValues):
self.normValues = newValues
def calculateMetrics(self, state):
"""
Calculates metrics of state
"""
cost_items = state.cost_items
curvature_measure = (cost_items.sum_strain_roothair - cost_items.sum_min_reference_strain) #\
#/ (cost_items.sum_min_reference_strain)
tot_len_measure = cost_items.sum_length_dummy / cost_items.sum_length_all
if cost_items.num_roothair > 0:
min_dist_measure = cost_items.sum_min_distance_roothair / cost_items.num_roothair
else:
min_dist_measure = 0.0
return np.array([curvature_measure, tot_len_measure, min_dist_measure])
def normalizeMetrics(self, metrics):
"""
Returns metrics normalized with Cost.normValues
"""
return metrics * self.normValues
def calculateCost(self, metrics):
"""
Calculates cost from metrics using cost function settings
"""
if self.cost_type == 'exp':
return np.sum(self.weights * np.exp(metrics))
elif self.cost_type == 'mean':
return np.sum(self.weights * metrics)
elif self.cost_type == 'rms':
return weighted_root_mean_square(metrics, self.weights)
elif self.cost_type == 'pow3':
return np.mean(metrics**3.)**(1/3.0)
elif self.cost_type == 'pow4':
return np.mean(metrics**4.)**(1/4.0)
elif self.cost_type == 'geom':
return gmean(metrics)
else:
return None
def weighted_root_mean_square(arr, weights):
'''
Get the root mean square value of the array values
'''
return np.sqrt(np.sum(weights * np.array(arr)**2.)) |
from PyQt5.QtWidgets import QApplication, QMainWindow, QSizePolicy
import xtd_ui
import rospy
from geometry_msgs.msg import Twist, PoseStamped, TwistStamped
from std_msgs.msg import String
from multiprocessing import Process,Queue
from PyQt5.QtCore import *
from receive import Ros2Gui
from PIL import Image
import random
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from plotcanvas import PlotCanvas
class Gui2Ros(QMainWindow,xtd_ui.Ui_MainWindow):
def __init__(self):
super(Gui2Ros, self).__init__()
self.setupUi(self)
self.map = 'indoor1'
self.comboBox_maps.currentIndexChanged.connect(self.initplot)
self.button_run.clicked.connect(self.startrun)
self.close_flag = False
self.local_pose = PoseStamped()
self.local_vel = Twist()
self.m = PlotCanvas(self, self.map)
self.m.move(180, 0)
self.flag = 0
# rospy.init_node('multirotor_pyqt5_control')
def initplot(self):
self.map = self.comboBox_maps.currentText()
self.m.canvas_update(self.map)
def startrun(self):
print 'start run!'
self.init_controller()
self.pSend2ros = Process(target=self.run_process)
self.pSend2ros.start()
self.text_thread = Ros2Gui(self.multirotor_select, self.multirotor_num, self.multi_type)
self.text_thread.update_text.connect(self.display)
self.text_thread.plot_array.connect(self.plot)
self.text_thread.start()
# self.pSend2ros = Process(target=self.run_process)
# self.pSend2ros.start()
def init_controller(self):
self.text_show_info.setPlainText('data')
self.multi_num = 0
self.multi_type = []
counnnt = 0
print self.multirotor_select
for j in self.multirotor_select:
self.multi_num = self.multi_num + self.multirotor_num[j]
for id_1 in range(self.multirotor_num[j]):
self.multi_type.append(self.multirotor_type[j])
counnnt+=1
self.color_plot = ['' for i in range(self.multi_num)]
for i in range(self.multi_num):
color_R = hex(random.randint(16,255))
color_G = hex(random.randint(16,255))
color_B = hex(random.randint(16,255))
self.color_plot[i] = '#'+str(color_R)+str(color_G)+str(color_B)
self.color_plot[i] = self.color_plot[i].replace('0x','')
#publish messages to ros nodes like a keyboard
def run_process(self):
rospy.init_node('multirotor_pyqt5_control')
counnnt = 0
if self.control_type == 'vel':
self.multi_cmd_vel_flu_pub = [None] * self.multi_num
self.multi_cmd_pub = [None] * self.multi_num
for i in self.multirotor_select:
for k in range(self.multirotor_num[i]):
if i == 7:
self.multi_cmd_vel_flu_pub[counnnt] = rospy.Publisher('/ugv_' + str(k) + '/cmd_vel', Twist, queue_size=1)
self.multi_cmd_pub[counnnt] = rospy.Publisher('/ugv_' + str(k) + '/cmd', String,queue_size=1)
else:
self.multi_cmd_vel_flu_pub[counnnt] = rospy.Publisher('/xtdrone/' + self.multi_type[counnnt] + '_' + str(k) + '/cmd_vel_flu', Twist, queue_size=1)
self.multi_cmd_pub[counnnt] = rospy.Publisher('/xtdrone/' + self.multi_type[counnnt] + '_' + str(k) + '/cmd', String,queue_size=1)
counnnt += 1
self.leader_cmd_vel_flu_pub = rospy.Publisher("/xtdrone/leader/cmd_vel_flu", Twist, queue_size=1)
self.leader_cmd_pub = rospy.Publisher("/xtdrone/leader/cmd", String, queue_size=1)
else:
self.multi_cmd_accel_flu_pub = [None] * self.multi_num
self.multi_cmd_pub = [None] * self.multi_num
for i in self.multirotor_select:
for k in range(self.multirotor_num[i]):
self.multi_cmd_accel_flu_pub[i] = rospy.Publisher(
'/xtdrone/' + self.multi_type[counnnt] + '_' + str(k) + '/cmd_accel_flu', Twist, queue_size=1)
self.multi_cmd_pub[i] = rospy.Publisher('/xtdrone/' + self.multi_type[counnnt] + '_' + str(k) + '/cmd',
String,
queue_size=1)
counnnt = 0
self.leader_cmd_accel_flu_pub = rospy.Publisher("/xtdrone/leader/cmd_accel_flu", Twist, queue_size=1)
self.leader_cmd_pub = rospy.Publisher("/xtdrone/leader/cmd", String, queue_size=1)
self.twist = [Twist() for i in range (self.multi_num)]
self.cmd = ['' for i in range (self.multi_num)]
self.ctrl_leader = True
self.cmd_vel_mask = False
for j in range(self.multi_num):
self.twist[j].angular.x = 0.0
self.twist[j].angular.y = 0.0
last_forward = [0.0 for i in range(self.multi_num)]
last_upward = [0.0 for i in range(self.multi_num)]
last_leftward = [0.0 for i in range(self.multi_num)]
last_orientation = [0.0 for i in range(self.multi_num)]
last_ctrl_leader = False
last_cmd_vel_mask = False
last_multirotor_get_control = [0 for i in range(self.multi_num)]
last_forward_all = 0.0
last_upward_all = 0.0
last_leftward_all = 0.0
last_orientation_all = 0.0
num = 0
rate = rospy.Rate(30)
check_stop_flag = False
print('StartRun!')
start_flag = False
flag = False
time = 0
while True:
if not start_flag:
flag = self.q_start_control_flag.get()
if flag:
time += 1
start_flag = True
num += 1
if self.q_multirotor_get_control.empty():
multirotor_get_control = last_multirotor_get_control
else:
multirotor_get_control = self.q_multirotor_get_control.get()
last_multirotor_get_control = multirotor_get_control
if self.q_forward.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.x = last_forward[i]
else:
forward = self.q_forward.get()
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.x = forward
last_forward[i] = self.twist[i].linear.x
if self.q_upward.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.z = last_upward[i]
else:
upward = self.q_upward.get()
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.z = upward
last_upward[i] = self.twist[i].linear.z
if self.q_leftward.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.y = last_leftward[i]
else:
leftward = self.q_leftward.get()
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.y = leftward
last_leftward[i] = self.twist[i].linear.y
if self.q_orientation.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].angular.z = last_orientation[i]
else:
orientation = self.q_orientation.get()
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].angular.z = orientation
last_orientation[i] = self.twist[i].angular.z
if self.q_ctrl_leader.empty():
self.ctrl_leader = last_ctrl_leader
else:
self.ctrl_leader = self.q_ctrl_leader.get()
last_ctrl_leader = self.ctrl_leader
if self.q_cmd.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.cmd[i] = ''
else:
cmd = self.q_cmd.get()
if self.ctrl_leader:
for i in range(self.multi_num):
if i == 1:
self.cmd[i] = cmd
else:
self.cmd[i] = ''
else:
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.cmd[i] = cmd
print(self.cmd[i])
if self.q_cmd_vel_mask.empty():
self.cmd_vel_mask = last_cmd_vel_mask
else:
self.cmd_vel_mask = self.q_cmd_vel_mask.get()
last_cmd_vel_mask = self.cmd_vel_mask
if self.q_stop_flag.empty():
pass
else:
check_stop_flag = self.q_stop_flag.get()
if check_stop_flag:
for i in range(self.multi_num):
self.cmd[i] = 'AUTO.RTL'
if self.ctrl_leader:
if self.control_type == 'vel':
self.leader_cmd_vel_flu_pub.publish(self.twist[1])
else:
self.leader_cmd_accel_flu_pub.publish(self.twist[1])
self.leader_cmd_pub.publish(self.cmd[1])
print self.cmd[1]
else:
for i in range(self.multi_num):
if not self.cmd_vel_mask:
if self.control_type == 'vel':
self.multi_cmd_vel_flu_pub[i].publish(self.twist[i])
else:
self.multi_cmd_accel_flu_pub[i].publish(self.twist[i])
self.multi_cmd_pub[i].publish(self.cmd[i])
# print self.cmd[0]
else:
print 'shut down!'
rate.sleep()
if check_stop_flag:
self.q_stop_flag.put(True)
rospy.signal_shutdown('STOP!')
break
def display(self, data):
self.text_show_info.setPlainText(data)
def plot(self, data):
for i in range(self.multi_num):
self.m.ax.plot(data[i][0], data[i][1], color = self.color_plot[i])
# self.m.canvas_update(self.map)
self.m.draw()
|
import discord
from discord.ext import commands
import time
import datetime
import asyncio
import requests
import json
import sys
import os
client = commands.Bot(command_prefix = '?')
pingEveryXMinutes = 6
live_status = 2 # Preset status
#Youtube data api payload
payload = {
'part' : 'snippet',
'channelId' : 'YOUTBUE CHANNEL ID',
'type' : 'video',
'eventType' : 'live',
'key' : 'YOUTUBE DATA API KEY',
'maxResults': 50
}
@client.event
async def on_ready():
#------------------------------------------------------------------------------------
while True:
dateEvent = ((datetime.datetime.utcnow())+datetime.timedelta(hours=8))# utc+8 taiwan time zone
dateEventStr = dateEvent.strftime("%Y-%m-%d %H:%M:%S")
print('Logged on as:',client.user,'is online.')#use # Choose not to display
print('----------------------------------')#use # Choose not to display
#print bot name and command log
print('First Time run Program :',dateEventStr)#use # Choose not to display
print('Log')#use # Choose not to display
print('----------------------------------')#use # Choose not to display
dateNow = ((datetime.datetime.utcnow())+datetime.timedelta(hours=8))# utc+8 taiwan time zone
dateNowStr = dateNow.strftime("%Y-%m-%d %H:%M:%S")
channel = client.get_channel(COPY DISCORD CHANNEL ID)
#---------------------------------------------------------------------------
# countdown in cmd
#---------------------------------------------------------------------------
print(dateNowStr,'waittime')
waittime = pingEveryXMinutes * 60
while waittime > 0:
m, s = divmod(waittime, 60)
time_left = str(m).zfill(2) + ":" + str(s).zfill(2)
print (time_left + "\r",end="")
waittime -= 1
time.sleep(1)
print('\n')
print('waittime finish ') # use # Choose not to display
readlocal=open("locallive.txt", "r") #read local txt to import local_live value
reader_local=readlocal.read(14)
#print(readfile)
local_live =int(reader_local.split("=", 1)[1]) #string to int
print(local_live)
#初始化
print('Detecting~')#use # Choose not to display
#------------------------------------------------------------------------------
#Detect
#------------------------------------------------------------------------------
r = requests.get('https://www.googleapis.com/youtube/v3/search', params = payload).json()
try:
info = r["items"][0]["snippet"]["liveBroadcastContent"]
except:
## no live
live_status = 0
print('NO STREAM NOW')#use # Choose not to display
print('---------------------------------------------------')#use # Choose not to display
else:
## live now
if info == "live":
live_status = 1
print('STREAMING NOW')#use # Choose not to display
print('---------------------------------------------------')#use # Choose not to display
#----------------------------------------------------------------------------
# local_live == live_status -> do nothing
#----------------------------------------------------------------------------
if local_live == live_status:
if live_status == 1:
print('local_live == live_status==1')# use # Choose not to display
print('---------------------------------------------------')# use # Choose not to display
elif live_status == 0:
print('local_live == live_status==0')# use # Choose not to display
print('---------------------------------------------------')# use # Choose not to display
#------------------------------------------------------------------------------
#local_live != live_status ->
#------------------------------------------------------------------------------
elif local_live != live_status:
#------------------------------------------------------------------------------
#send stream on message,local_live=1
#------------------------------------------------------------------------------
if live_status == 1:
#local_live = 1
file2=open("locallive.txt", "w")
str2="live_status=1"
file2.write(str2)
print("change locallive.txt :",str2)#use # Choose not to display
file2.close()
dateOn = ((datetime.datetime.utcnow())+datetime.timedelta(hours=8)) # utc+8 taiwan time zone
dateOnStr = dateOn.strftime("%Y-%m-%d %H:%M:%S")
url='https://www.youtube.com/watch?v='+ r["items"][0]["id"]["videoId"]
send_text_start = '@everyone \n```' + r["items"][0]["snippet"]["channelTitle"] + '\n' + r["items"][0]["snippet"]["title"] +'```\n'
plus_send_text_start = send_text_start + url
await channel.send(plus_send_text_start)
print(dateOnStr,'sending online message ')#use # Choose not to display
print('----------------------------------')#use # Choose not to display
#-------------------------------------------------------------------------------
#send stream off message,local_live=0
#------------------------------------------------------------------------------
elif live_status == 0: #send stop live message,local_live=0,clock_down
#local_live = 0
file3=open("locallive.txt", "w")
str3="live_status=0"
file3.write(str3)
print("change locallive.txt :",str3)#use # Choose not to display
file3.close()
#clock_down
dateEnd = ((datetime.datetime.utcnow())+datetime.timedelta(hours=8)) # utc+8 taiwan time zone
dateEndStr = dateEnd.strftime("%Y-%m-%d %H:%M:%S")
send_text_stop ='```Stream is off ,thank you for your watching ~~```\n'
await channel.send(send_text_stop)
print(dateEndStr,'sending offline message ')#use # Choose not to display
print('----------------------------------')#use # Choose not to display
#-------------------------------------------------------------------------------
openfile.close()
client.run('DISCORD BOT TOKEN')
|
import os
import shutil
from setuptools import setup, find_packages, Command
from certbuilder import version
class CleanCommand(Command):
user_options = [
('all', 'a', '(Compatibility with original clean command)')
]
def initialize_options(self):
self.all = False
def finalize_options(self):
pass
def run(self):
folder = os.path.dirname(os.path.abspath(__file__))
for sub_folder in ['build', 'dist', 'certbuilder.egg-info']:
full_path = os.path.join(folder, sub_folder)
if os.path.exists(full_path):
shutil.rmtree(full_path)
setup(
name='certbuilder',
version=version.__version__,
description='Creates and signs X.509 certificates',
long_description='Docs for this project are maintained at https://github.com/wbond/certbuilder#readme.',
url='https://github.com/wbond/certbuilder',
author='wbond',
author_email='will@wbond.net',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Security :: Cryptography',
],
keywords='crypto pki x509 certificate rsa dsa ec',
install_requires=[
'asn1crypto>=0.22.0',
'oscrypto>=0.18.0'
],
packages=find_packages(exclude=['tests*', 'dev*']),
test_suite='tests.make_suite',
cmdclass={
'clean': CleanCommand,
}
)
|
"""Hook utils."""
import collections
import logging
import os
import sys
from runway.util import load_object_from_string
from runway.variables import Variable, resolve_variables
from ..exceptions import FailedVariableLookup
LOGGER = logging.getLogger(__name__)
def full_path(path):
"""Return full path."""
return os.path.abspath(os.path.expanduser(path))
def handle_hooks(stage, hooks, provider, context):
"""Handle pre/post_build hooks.
These are pieces of code that we want to run before/after the builder
builds the stacks.
Args:
stage (str): The current stage (pre_run, post_run, etc).
hooks (List[:class:`runway.cfngin.config.Hook`]): Hooks to execute.
provider (:class:`runway.cfngin.providers.base.BaseProvider`): Provider
instance.
context (:class:`runway.cfngin.context.Context`): Context instance.
"""
if not hooks:
LOGGER.debug("No %s hooks defined.", stage)
return
hook_paths = []
for i, hook in enumerate(hooks):
try:
hook_paths.append(hook.path)
except KeyError:
raise ValueError("%s hook #%d missing path." % (stage, i))
LOGGER.info("Executing %s hooks: %s", stage, ", ".join(hook_paths))
for hook in hooks:
data_key = hook.data_key
required = hook.required
if not hook.enabled:
LOGGER.debug("hook with method %s is disabled, skipping",
hook.path)
continue
try:
method = load_object_from_string(hook.path)
except (AttributeError, ImportError):
LOGGER.exception("Unable to load method at %s:", hook.path)
if required:
raise
continue
if isinstance(hook.args, dict):
args = [Variable(k, v) for k, v in hook.args.items()]
try: # handling for output or similar being used in pre_build
resolve_variables(args, context, provider)
except FailedVariableLookup:
if 'pre' in stage:
LOGGER.error('Lookups that change the order of '
'execution, like "output", can only be '
'used in "post_*" hooks. Please '
'ensure that the hook being used does '
'not rely on a stack, hook_data, or '
'context that does not exist yet.')
raise
kwargs = {v.name: v.value for v in args}
else:
kwargs = hook.args or {}
try:
result = method(context=context, provider=provider, **kwargs)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Method %s threw an exception:", hook.path)
if required:
raise
continue
if not result:
if required:
LOGGER.error("Required hook %s failed. Return value: %s",
hook.path, result)
sys.exit(1)
LOGGER.warning("Non-required hook %s failed. Return value: %s",
hook.path, result)
else:
if isinstance(result, collections.Mapping):
if data_key:
LOGGER.debug("Adding result for hook %s to context in "
"data_key %s.", hook.path, data_key)
context.set_hook_data(data_key, result)
else:
LOGGER.debug("Hook %s returned result data, but no data "
"key set, so ignoring.", hook.path)
|
# Resample TIFF image to a higher resolution
import arcpy
arcpy.env.parallelProcessingFactor = "100%"
arcpy.env.workspace = r"S:/Jorddjup"
print('Resampling Jorddjup')
arcpy.Resample_management("jorddjup_10x10.tif", "resample2m.tif", "2", "NEAREST")
|
from django.contrib.auth.forms import UserCreationForm
from core.BootstrapFormMixin import BootstrapFormMixin
class SignUpForm(UserCreationForm,
BootstrapFormMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setup_form()
|
# coding: UTF-8
import urllib.request as req
import discord
from discord.ext import tasks, commands
import datetime
import customFunc
class periodic(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.minloop.start()
print('Inf: Class "periodic" is instantiated.')
@tasks.loop(seconds=60)
async def minloop(self):
if datetime.datetime.now().strftime('%H-%M') == '22-00':
forecast = customFunc.forecast(1)
for guild in self.bot.guilds:
send = False
for channel in guild.text_channels:
if '天気予報' in channel.name and channel.permissions_for(guild.me).send_messages:
await channel.send(embed=forecast)
send = True
break
if send == False:
for channel in guild.text_channels:
if channel.permissions_for(guild.me).send_messages:
await channel.send(embed=forecast)
break
def setup(bot):
bot.add_cog(periodic(bot))
|
import pytest
from trio_vis.registry import *
from trio_vis.trio_fake import FakeTrioNursery, FakeTrioTask
@pytest.fixture
def drawer() -> SerialNumberGen:
return SerialNumberGen()
@pytest.fixture
def registry() -> SCRegistry:
return SCRegistry()
def test_drawer(drawer: SerialNumberGen):
assert drawer.draw("a") == 0
assert drawer.draw("b") == 0
assert drawer.draw("a") == 1
assert drawer.draw("c") == 0
assert drawer.draw("c") == 1
def test_parser():
assert parse_obj_type(FakeTrioTask(name="t1")) == TYPE_TRIO_TASK
assert parse_obj_type(FakeTrioNursery(name="n1")) == TYPE_TRIO_NURSERY
def test_registry_add(registry: SCRegistry):
t0 = FakeTrioTask(name="func1")
t1 = FakeTrioTask(name="func1")
assert "func1-0" == registry.get_name(t0)
assert "func1-1" == registry.get_name(t1)
assert "func1-0" == registry.get_name(t0)
def test_registry_remove(registry: SCRegistry):
t0 = FakeTrioTask(name="func1")
t1 = FakeTrioTask(name="func1")
assert "func1-0" == registry.get_name(t0)
assert "func1-1" == registry.get_name(t1)
assert True == registry.remove(t0)
with pytest.raises(RuntimeError):
registry.remove(t0)
|
from re import fullmatch
from api import api_call
from config import SETTINGS
from helpers import create_embed, LetterboxdError
async def film_embed(keywords, with_mkdb=False):
input_year = __check_year(keywords)
lbxd_id = __check_if_fixed_search(keywords)
film_json = await __search_request(keywords, input_year, lbxd_id)
lbxd_id = film_json['id']
title = film_json['name']
year = film_json.get('releaseYear')
lbxd_url, tmdb_id, poster_path = __get_links(film_json)
description = await __create_description(lbxd_id, tmdb_id, title)
if with_mkdb:
description += await __get_mkdb_rating(lbxd_url)
description += await __get_stats(lbxd_id)
if year:
title += ' (' + str(year) + ')'
return create_embed(title, lbxd_url, description, poster_path)
async def film_details(keywords):
input_year = __check_year(keywords)
lbxd_id = __check_if_fixed_search(keywords)
film_json = await __search_request(keywords, input_year, lbxd_id)
lbxd_id = film_json['id']
title = film_json['name']
year = film_json.get('releaseYear')
lbxd_url, _, poster_path = __get_links(film_json)
return lbxd_id, title, year, poster_path, lbxd_url
def __check_year(keywords):
last_word = keywords.split()[-1]
if fullmatch(r'\(\d{4}\)', last_word):
return last_word.replace('(', '').replace(')', '')
return ''
def __check_if_fixed_search(keywords):
for title, lbxd_id in SETTINGS['fixed_film_search'].items():
if title.lower() == keywords.lower():
return lbxd_id
return ''
async def __search_request(keywords, input_year, lbxd_id):
found = False
if input_year:
keywords = ' '.join(keywords.split()[:-1])
if lbxd_id:
film_json = await api_call('film/{}'.format(lbxd_id))
return film_json
params = {'input': keywords, 'include': 'FilmSearchItem'}
response = await api_call('search', params)
if not response.get('items'):
raise LetterboxdError('No film was found with this search.')
results = response['items']
if input_year:
for result in results:
if not result['film'].get('releaseYear'):
continue
film_year = str(result['film']['releaseYear'])
if film_year == input_year:
film_json = result['film']
found = True
break
else:
film_json = results[0]['film']
if input_year and not found:
raise LetterboxdError('No film was found with this search.')
return film_json
def __get_links(film_json):
for link in film_json['links']:
if link['type'] == 'letterboxd':
lbxd_url = link['url']
elif link['type'] == 'tmdb':
tmdb_id = link['id']
poster_path = ''
if film_json.get('poster'):
for poster in film_json['poster']['sizes']:
if poster['height'] > 400:
poster_path = poster['url']
break
if not poster_path:
poster_path = film_json['poster']['sizes'][0]['url']
return lbxd_url, tmdb_id, poster_path
async def __create_description(lbxd_id, tmdb_id, title):
description = ''
film_json = await api_call('film/{}'.format(lbxd_id))
original_title = film_json.get('originalName')
if original_title:
description += '**Original Title:** ' + original_title + '\n'
director_str = ''
for contribution in film_json['contributions']:
if contribution['type'] == 'Director':
for dir_count, director in enumerate(contribution['contributors']):
director_str += director['name'] + ', '
break
if director_str:
if dir_count:
description += '**Directors:** '
else:
description += '**Director:** '
description += director_str[:-2] + '\n'
description += await __get_countries(tmdb_id, title)
runtime = film_json.get('runTime')
description += '**Length:** ' + str(runtime) + ' mins\n' if runtime else ''
genres_str = ''
for genres_count, genre in enumerate(film_json['genres']):
genres_str += genre['name'] + ', '
if genres_str:
if genres_count:
description += '**Genres:** '
else:
description += '**Genre:** '
description += genres_str[:-2] + '\n'
return description
async def __get_countries(tmdb_id, title):
api_url = 'https://api.themoviedb.org/3/movie/' + tmdb_id\
+ '?api_key=' + SETTINGS['tmdb']
country_text = ''
country_str = ''
response = await api_call(api_url, None, False)
if response and response['title'] == title:
for count, country in enumerate(response['production_countries']):
if country['name'] == 'United Kingdom':
country_str += 'UK, '
elif country['name'] == 'United States of America':
country_str += 'USA, '
else:
country_str += country['name'] + ', '
if country_str:
if count:
country_text += '**Countries:** '
else:
country_text += '**Country:** '
country_text += country_str[:-2] + '\n'
return country_text
async def __get_mkdb_rating(lbxd_url):
mkdb_url = lbxd_url.replace('letterboxd.com', 'eiga.me/api')
response = await api_call(mkdb_url + 'summary', None, False)
if not response or not response['total']:
return ''
mkdb_description = '**MKDb Average:** [' + str(response['mean'])
mkdb_description += ' / ' + str(response['total']) + ' ratings\n]'
mkdb_description += '(' + mkdb_url.replace('/api', '') + ')'
return mkdb_description
async def __get_stats(lbxd_id):
text = ''
stats_json = await api_call('film/{}/statistics'.format(lbxd_id))
views = stats_json['counts']['watches']
if views > 999999:
views = str(round(views / 1000000, 2)) + 'M'
elif views > 9999:
views = str(round(views / 1000)) + 'k'
elif views > 999:
views = str(round(views / 1000, 1)) + 'k'
if stats_json.get('rating'):
ratings_count = stats_json['counts']['ratings']
if ratings_count > 999:
ratings_count = str(round(ratings_count / 1000, 1)) + 'k'
text += '**Average Rating:** ' + str(round(stats_json['rating'], 2))
text += ' / ' + str(ratings_count) + ' ratings\n'
text += 'Watched by ' + str(views) + ' members'
return text
|
'''
DictAdapter
===========
.. versionadded:: 1.5
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
A :class:`~kivy.adapters.dictadapter.DictAdapter` is an adapter around a
python dictionary of records. It extends the list-like capabilities of the
:class:`~kivy.adapters.listadapter.ListAdapter`.
If you wish to have a bare-bones list adapter, without selection, use the
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter`.
'''
__all__ = ('DictAdapter', )
from kivy.properties import ListProperty, DictProperty
from kivy.adapters.listadapter import ListAdapter
class DictAdapter(ListAdapter):
'''A :class:`~kivy.adapters.dictadapter.DictAdapter` is an adapter around a
python dictionary of records. It extends the list-like capabilities of
the :class:`~kivy.adapters.listadapter.ListAdapter`.
'''
sorted_keys = ListProperty([])
'''The sorted_keys list property contains a list of hashable objects (can
be strings) that will be used directly if no args_converter function is
provided. If there is an args_converter, the record received from a
lookup of the data, using keys from sorted_keys, will be passed
to it for instantiation of list item view class instances.
:attr:`sorted_keys` is a :class:`~kivy.properties.ListProperty` and
defaults to [].
'''
data = DictProperty(None)
'''A dict that indexes records by keys that are equivalent to the keys in
sorted_keys, or they are a superset of the keys in sorted_keys.
The values can be strings, class instances, dicts, etc.
:attr:`data` is a :class:`~kivy.properties.DictProperty` and defaults
to None.
'''
def __init__(self, **kwargs):
if 'sorted_keys' in kwargs:
if type(kwargs['sorted_keys']) not in (tuple, list):
msg = 'DictAdapter: sorted_keys must be tuple or list'
raise Exception(msg)
else:
self.sorted_keys = sorted(kwargs['data'].keys())
super(DictAdapter, self).__init__(**kwargs)
self.bind(sorted_keys=self.initialize_sorted_keys)
def bind_triggers_to_view(self, func):
self.bind(sorted_keys=func)
self.bind(data=func)
# self.data is paramount to self.sorted_keys. If sorted_keys is reset to
# mismatch data, force a reset of sorted_keys to data.keys(). So, in order
# to do a complete reset of data and sorted_keys, data must be reset
# first, followed by a reset of sorted_keys, if needed.
def initialize_sorted_keys(self, *args):
stale_sorted_keys = False
for key in self.sorted_keys:
if not key in self.data:
stale_sorted_keys = True
break
else:
if len(self.sorted_keys) != len(self.data):
stale_sorted_keys = True
if stale_sorted_keys:
self.sorted_keys = sorted(self.data.keys())
self.delete_cache()
self.initialize_selection()
# Override ListAdapter.update_for_new_data().
def update_for_new_data(self, *args):
self.initialize_sorted_keys()
# Note: this is not len(self.data).
def get_count(self):
return len(self.sorted_keys)
def get_data_item(self, index):
if index < 0 or index >= len(self.sorted_keys):
return None
return self.data[self.sorted_keys[index]]
# [TODO] Also make methods for scroll_to_sel_start, scroll_to_sel_end,
# scroll_to_sel_middle.
def trim_left_of_sel(self, *args):
'''Cut list items with indices in sorted_keys that are less than the
index of the first selected item, if there is a selection.
sorted_keys will be updated by update_for_new_data().
'''
if len(self.selection) > 0:
selected_keys = [sel.text for sel in self.selection]
first_sel_index = self.sorted_keys.index(selected_keys[0])
desired_keys = self.sorted_keys[first_sel_index:]
self.data = dict([(key, self.data[key]) for key in desired_keys])
def trim_right_of_sel(self, *args):
'''Cut list items with indices in sorted_keys that are greater than
the index of the last selected item, if there is a selection.
sorted_keys will be updated by update_for_new_data().
'''
if len(self.selection) > 0:
selected_keys = [sel.text for sel in self.selection]
last_sel_index = self.sorted_keys.index(selected_keys[-1])
desired_keys = self.sorted_keys[:last_sel_index + 1]
self.data = dict([(key, self.data[key]) for key in desired_keys])
def trim_to_sel(self, *args):
'''Cut list items with indices in sorted_keys that are les than or
greater than the index of the last selected item, if there is a
selection. This preserves intervening list items within the selected
range.
sorted_keys will be updated by update_for_new_data().
'''
if len(self.selection) > 0:
selected_keys = [sel.text for sel in self.selection]
first_sel_index = self.sorted_keys.index(selected_keys[0])
last_sel_index = self.sorted_keys.index(selected_keys[-1])
desired_keys = self.sorted_keys[first_sel_index:last_sel_index + 1]
self.data = dict([(key, self.data[key]) for key in desired_keys])
def cut_to_sel(self, *args):
'''Same as trim_to_sel, but intervening list items within the selected
range are also cut, leaving only list items that are selected.
sorted_keys will be updated by update_for_new_data().
'''
if len(self.selection) > 0:
selected_keys = [sel.text for sel in self.selection]
self.data = dict([(key, self.data[key]) for key in selected_keys])
|
import ipdb
import yaml
import os
import sys
from shutil import rmtree
import argparse
from time import sleep
from subprocess import Popen, PIPE
from pathlib import Path
from typing import List
from e3sm_to_cmip import resources
DESC = '''test the output of the e3sm_to_cmip package from
two git branches and run a comparison check on the output. Returns 0 if
all checks run successfully, 1 otherwise. These checks
use the included CWL workflows to post-process the data and prepare it
to be ingested by e3sm_to_cmip.
At the moment it only tests atmospheric monthly variables, but more will be added in the future'''
def run_cmd(cmd: str, shell=False, show_output=True):
print(f"running: '{cmd}'")
if not shell:
proc = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)
else:
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
output = []
while proc.poll() is None:
out = proc.stdout.read()
if out:
o = out.decode('utf-8')
if show_output:
print(o)
output.append(o)
err = proc.stderr.read()
if err:
print(err.decode('utf-8'))
sleep(0.1)
return proc.returncode, ''.join(output)
def swap_branch(target_branch):
cmd = 'git status --porcelain'
retcode, output = run_cmd(cmd)
if retcode:
print('Error checking the git status')
return 1
if output:
if '??' in output or 'M' in output or 'nothing to commit, working tree clean' not in output:
print('git status is not clean, commit or stash your changes and try again')
return 1
del output
# check out the branch to compare this one against
cmd = f'git checkout {target_branch}'
retcode, output = run_cmd(cmd)
if retcode:
print(f'Error checking out {target_branch}')
return 1
del output
def install_and_run_test(branchname, vars, frequency, input_path, output_path, tables_path):
test_output_path = Path(output_path, branchname)
if test_output_path.exists():
print("removing previous testing source output")
rmtree(str(test_output_path))
print(f"Creating output directory {test_output_path}")
test_output_path.mkdir(parents=True)
# swap over to the comparison branch
# ipdb.set_trace()
if retcode := swap_branch(branchname):
print("Unable to swap to branch {branchname}, exiting")
return retcode
# install the comparison version of the package so CWL uses the right version
cmd = "find . -name '*.pyc' -delete; python setup.py install"
retcode, output = run_cmd(cmd, shell=True, show_output=False)
if retcode:
print(f'Error installing from comparison branch {branchname}')
return 1
del output
if retcode := run_test(vars, frequency, input_path, test_output_path, tables_path):
print(f"Error running {branchname} branch test")
return retcode
return 0
def run_test(vars, freq, input_path, out_path, tables_path):
# ipdb.set_trace()
resource_path, _ = os.path.split(os.path.abspath(resources.__file__))
default_metadata_path = Path(resource_path, 'default_metadata.json')
if not default_metadata_path.exists():
print(f"Error: cannot file default CMOR metadata file: {default_metadata_path}")
return 1
# actually run the package and make some output
cmd = f"e3sm_to_cmip --serial -v {', '.join(vars)} -i {input_path} -o {out_path} -t {tables_path} -u {default_metadata_path} -f {freq}"
retcode, output = run_cmd(cmd)
if output:
print(output)
return retcode
def get_input_vars(output_path, input_path, vars, tables_path, freq):
info_path = Path(output_path, 'var_info.yaml')
cmd = f"e3sm_to_cmip --info -v {vars} -t {tables_path} -f {freq} -i {input_path} --info-out {info_path}"
retcode, output = run_cmd(cmd, show_output=False)
if retcode:
print("Error getting available timeseries input files")
return retcode
with open(info_path, 'r') as infile:
info = yaml.load(infile, Loader=yaml.SafeLoader)
vars = []
def compare_output(output_path, src_name, cmp_name):
# i like to have the heavy imports happen after the command line arguments get parsed
# so that startup is faster when running --help
import xarray as xr
import numpy as np
# compare the output between the two runs
issues = []
tables = Path(output_path, cmp_name, 'CMIP6/CMIP/E3SM-Project/E3SM-1-0/piControl/r1i1p1f1/').glob("*")
for table in tables:
src_table_path = Path(output_path, src_name, 'CMIP6/CMIP/E3SM-Project/E3SM-1-0/piControl/r1i1p1f1/', table.name)
for variable in table.glob('*'):
# check that the variable exists in both sets of output
source_var_path = src_table_path / variable.name
if not source_var_path.exists():
msg = f"{variable} exists in the comparison branch {cmp_name}, but not in the source branch {src_name}"
issues.append(msg)
continue
# get the paths to the highest number dataset for the source and comparison datasets
try:
cmp_var_data_path = sorted([x for x in Path(table, variable.name, 'gr').glob('v*')])[-1]
except IndexError:
msg = f"Empty variable list in comparison branch {cmp_name}"
issues.append(msg)
continue
try:
src_var_data_path = sorted([x for x in Path(src_table_path, variable.name, 'gr').glob('v*')])[-1]
except IndexError:
msg = f"Empty variable list in source branch {src_name}"
issues.append(msg)
continue
print(f"Running comparison for {variable.name}", end=" ... ")
# each dataset should only have a single file in it
with xr.open_dataset(cmp_var_data_path.glob('*.nc').__next__()) as cmp_ds, \
xr.open_dataset(src_var_data_path.glob('*.nc').__next__()) as src_ds:
if np.allclose(cmp_ds[str(variable.name)], src_ds[str(variable.name)]):
print(f"{variable.name} test pass")
else:
msg = f"{variable.name}: values do not match"
issues.append(msg)
return issues
def test(vars: List, cmp_branch: str, input_path: Path, output_path: Path, tables: str, freq: str, cleanup=False):
if not input_path.exists():
raise ValueError(f"Input directory {input_path} does not exist")
output_path.mkdir(exist_ok=True)
# store what the source branch name is
cmd = "git branch --show-current"
retcode, output = run_cmd(cmd)
if retcode:
print('Error getting the source git branch')
return 1
src_branch = output.strip()
if ret := install_and_run_test(cmp_branch, vars, freq, input_path=input_path, output_path=output_path, tables_path=tables):
print(f"Failure to run {cmp_branch}")
return ret
if ret := install_and_run_test(src_branch, vars, freq, input_path=input_path, output_path=output_path, tables_path=tables):
print(f"Failure to run {src_branch}")
return ret
if issues := compare_output(output_path, src_branch, cmp_branch):
for issue in issues:
print(issue)
return 1
else:
if cleanup:
cleanup()
return 0
def main():
parser = argparse.ArgumentParser(
prog='e3sm_to_cmip',
description=DESC)
parser.add_argument(
'input',
help='directory of timeseries netcdf files to as input to the e3sm_to_cmip package. these should already be regridded/remapped as needed')
parser.add_argument(
'tables',
help='Path to the Tables directory in the cmip6-cmor-tables repository')
default_output_path = Path(os.environ['PWD'], 'testing_output')
parser.add_argument(
'-o', '--output',
default=default_output_path,
required=False,
help=f'path to where the output files from the test should be stored, default is {default_output_path}')
parser.add_argument(
'-v', '--var-list',
default=['all'],
nargs="*",
help='select which variables to include in the comparison, default is all')
parser.add_argument(
'-f', '--frequency',
default='mon',
help='temporal frequency of the input timeseries files, default is mon')
parser.add_argument(
'-c', '--compare',
default='master',
help='select which branch to run the comparison against, default is master')
parser.add_argument(
'--cleanup',
action='store_true',
help='remove the generated data if the test result is a success')
parsed_args = parser.parse_args()
try:
retval = test(
input_path=Path(parsed_args.input),
tables=parsed_args.tables,
vars=parsed_args.var_list,
cmp_branch=parsed_args.compare,
output_path=Path(parsed_args.output),
cleanup=parsed_args.cleanup,
freq=parsed_args.frequency)
except Exception as e:
print(e)
retval = 1
# test success actions
if retval == 0:
if parsed_args.cleanup:
os.rmdir(parsed_args.output)
print('Testing successful')
else:
print('Testing error')
return retval
if __name__ == "__main__":
sys.exit(main())
|
import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql.functions import monotonically_increasing_id
from pyspark.sql.types import StructType as R, StructField as Fld, DoubleType as Dbl, StringType as Str, IntegerType as Int, DateType as Dat, TimestampType
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
"""
Create or retrieve a Spark Session
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""
Description: This function loads song_data from S3 and processes it by extracting the songs and artist tables
and then again loaded back to S3
Parameters:
spark : Spark Session
input_data : location of song_data json files with the songs metadata
output_data : S3 bucket were dimensional tables in parquet format will be stored
"""
song_data = input_data + 'song_data/*/*/*/*.json'
songSchema = R([
Fld("artist_id",Str()),
Fld("artist_latitude",Dbl()),
Fld("artist_location",Str()),
Fld("artist_longitude",Dbl()),
Fld("artist_name",Str()),
Fld("duration",Dbl()),
Fld("num_songs",Int()),
Fld("title",Str()),
Fld("year",Int()),
])
df = spark.read.json(song_data, schema=songSchema)
song_fields = ["title", "artist_id","year", "duration"]
songs_table = df.select(song_fields).dropDuplicates().withColumn("song_id", monotonically_increasing_id())
songs_table.write.partitionBy("year", "artist_id").parquet(output_data + 'songs/')
artists_fields = ["artist_id", "artist_name as name", "artist_location as location", "artist_latitude as latitude", "artist_longitude as longitude"]
artists_table = df.selectExpr(artists_fields).dropDuplicates()
artists_table.write.parquet(output_data + 'artists/')
def process_log_data(spark, input_data, output_data):
"""
Description: This function loads log_data from S3 and processes it by extracting the songs and artist tables
and then again loaded back to S3. Also output from previous function is used in by spark.read.json command
Parameters:
spark : Spark Session
input_data : location of log_data json files with the events data
output_data : S3 bucket were dimensional tables in parquet format will be stored
"""
log_data = input_data + 'log_data/*/*/*.json'
df = spark.read.json(log_data)
df = df.filter(df.page == 'NextSong')
users_fields = ["userdId as user_id", "firstName as first_name", "lastName as last_name", "gender", "level"]
users_table = df.selectExpr(users_fields).dropDuplicates()
users_table.write.parquet(output_data + 'users/')
get_datetime = udf(date_convert, TimestampType())
df = df.withColumn("start_time", get_datetime('ts'))
time_table = df.select("start_time").dropDuplicates() \
.withColumn("hour", hour(col("start_time")).withColumn("day", day(col("start_time")) \
.withColumn("week", week(col("start_time")).withColumn("month", month(col("start_time")) \
.withColumn("year", year(col("start_time")).withColumn("weekday", date_format(col("start_time"), 'E'))
songs_table.write.partitionBy("year", "month").parquet(output_data + 'time/')
df_songs = spark.read.parquet(output_data + 'songs/*/*/*')
df_artists = spark.read.parquet(output_data + 'artists/*')
songs_logs = df.join(songs_df, (df.song == songs_df.title))
artists_songs_logs = songs_logs.join(df_artists, (songs_logs.artist == df_artists.name))
songplays = artists_songs_logs.join(
time_table,
artists_songs_logs.ts == time_table.start_time, 'left'
).drop(artists_songs_logs.year)
songplays_table = songplays.select(
col('start_time').alias('start_time'),
col('userId').alias('user_id'),
col('level').alias('level'),
col('song_id').alias('song_id'),
col('artist_id').alias('artist_id'),
col('sessionId').alias('session_id'),
col('location').alias('location'),
col('userAgent').alias('user_agent'),
col('year').alias('year'),
col('month').alias('month'),
).repartition("year", "month")
songplays_table.write.partitionBy("year", "month").parquet(output_data + 'songplays/')
def main():
"""
Extract songs and events data from S3, Transform it into dimensional tables format, and Load it back to S3 in Parquet format
"""
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://sparkify-dend/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main() |
import os
from os.path import basename, normcase, splitext
import click
from packaging.version import Version
SHELLS = ["bash", "zsh", "fish"]
@click.command("shell-completion")
@click.option(
"-s",
"--shell",
type=click.Choice(["auto"] + SHELLS),
default="auto",
show_default=True,
help="The shell for which to generate completion code; `auto` attempts autodetection",
)
def shell_completion(shell):
"""
Emit shell script for enabling command completion.
The output of this command should be "sourced" by bash or zsh to enable
command completion.
Example:
\b
$ source <(dandi shell-completion)
$ dandi --<PRESS TAB to display available option>
"""
if shell == "auto":
try:
shell = basename(os.environ["SHELL"])
except KeyError:
raise click.UsageError(
"Could not determine running shell: SHELL environment variable not set"
)
shell = normcase(shell)
stem, ext = splitext(shell)
if ext in (".com", ".exe", ".bat"):
shell = stem
if shell not in SHELLS:
raise click.UsageError(f"Unsupported/unrecognized shell {shell!r}")
if Version(click.__version__) < Version("8.0.0"):
varfmt = "source_{shell}"
else:
varfmt = "{shell}_source"
os.environ["_DANDI_COMPLETE"] = varfmt.format(shell=shell)
from .command import main
main.main(args=[])
|
import json
import logging
import os
import joblib
import pytest
from prediction_service.prediction import validate_input
import prediction_service
input_data = {
"incorrect_range":
{
"tenure" : 76,
"MonthlyCharges":5
},
"correct_range":
{
"tenure" : 66,
"MonthlyCharges":105
},
"incorrect_col":
{
"Tenure" : 25,
"montlycharges":90
}
}
TARGET_range = {
"min": 0.0,
"max": 118.0
}
def test_form_response_correct_range(data=input_data["correct_range"]):
res = validate_input(data)
assert TARGET_range["min"] <= res <= TARGET_range["max"]
#def test_api_response_correct_range(data=input_data["correct_range"]):
# res = api_response(data)
# assert TARGET_range["min"] <= res["response"] <= TARGET_range["max"]
def test_form_response_incorrect_range(data=input_data["incorrect_range"]):
with pytest.raises(prediction_service.prediction.NotinRange):
res = validate_input(data)
#def test_api_response_incorrect_range(data=input_data["incorrect_range"]):
# res = api_response(data)
# assert res["response"] == prediction_service.prediction.NotinRange().message
#def test_api_response_incorrect_col(data=input_data["incorrect_col"]):
# res = validate_input(data)
# assert res["response"] == prediction_service.prediction.NotinCols().message
|
import pytest
day09 = __import__("day-09")
process = day09.process_gen
def builder():
with open('day-19.txt', 'r') as f:
text = f.read().strip()
idata = [int(x) for x in text.split(',')]
def get_value(x, y):
assert x >= 0, y >= 0
inp = [y, x]
v = next(process(idata, inp))
return v == 1
return get_value
def build_map(x, y):
get_value = builder()
data = dict()
for j in range(y):
for i in range(x):
value = get_value(i, j)
ch = '#' if value else '.'
data[(i, j)] = ch
return data
def run1():
data = build_map(50, 50)
print_map(data)
ones = [k for k, v in data.items() if v == '#']
rv = len(ones)
print(rv)
return rv
def test_case1():
assert run1() == 220
def run2():
check = builder()
size = 100 - 1
x, y = 0, size
# (x, y-s) (x+s, y-s)
# (x, y) (x+s, y)
while True:
while not check(x, y):
x += 1
if check(x + size, y - size) and check(x, y-size) and check(x+size, y):
rv = x*10000 + (y - size)
print(rv)
return rv
y += 1
def test_case2():
assert run2() == 10010825
def print_map(data):
xmin = min(k[0] for k in data.keys())
xmax = max(k[0] for k in data.keys())
ymin = min(k[1] for k in data.keys())
ymax = max(k[1] for k in data.keys())
for y in range(ymin, ymax + 1):
for x in range(xmin, xmax + 1):
ch = data.get((x, y), 'x')
print(ch, end='')
print()
if __name__ == '__main__':
run2()
|
import time
import platform
import datetime
import argparse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
url = "https://web.whatsapp.com/"
browser_binary = "C:\\Users\\User\\Downloads\\PortableApps\\PortableApps\\FirefoxPortable\\App\\Firefox64\\firefox.exe" # Change this
geckodriver_path = "C:\\Users\\User\\Downloads\\geckodriver-v0.29.1-win64\\" # Change this
# colors
YELLOW = '\x1b[33m'
LIGHT_CYAN = '\x1b[96m'
RESET = '\x1b[39m'
RED = '\x1b[31m'
GREEN = '\x1b[32m'
LIGHT_GREEN = '\x1b[92m'
BLACK = '\x1b[30m'
WHITE = '\x1b[47m'
LIGHT_RED = '\x1b[91m'
LIGHT_BLUE = '\x1b[94m'
OS = platform.platform().split('-', 1)[0]
class CanNotGetUserName(Exception):
"""
Error class
"""
def _login():
global driver
binary = FirefoxBinary(browser_binary)
try:
if OS == "Linux":
driver = webdriver.Firefox(
service=Service(geckodriver_path), firefox_binary=binary)
if OS == "Windows":
driver = webdriver.Firefox(
geckodriver_path, firefox_binary=binary)
except Exception as e:
print(f"{YELLOW}[{RED}-{YELLOW}]{RED} An error occoured: {e}It looks like you didn't change firefox and geckodriver path. If both paths are right but you still get this error, please feel free to submit it here: {LIGHT_GREEN}https://github.com/TralseDev/WhatsPy")
exit(-1)
driver.get(url)
print("> Ready to scan QRCode... Type in `y` if finished scanning QRCode:")
time.sleep(0.1)
input(">> ")
def _get_user(username):
try:
# Searches for the user name and returns `True` if it was successful
search_box_element = driver.find_elements_by_class_name(
"selectable-text")[0]
search_box_element.send_keys(username)
search_box_element.send_keys(Keys.ENTER)
search_box_element.send_keys(
Keys.CONTROL + Keys.LEFT_SHIFT + Keys.BACKSPACE)
return True, ""
except Exception as e:
# Otherwise it returns `False`
return False, e
def _get_status(username) -> bool:
get_user = _get_user(username)
if not get_user[0]:
raise CanNotGetUserName(f"{get_user[1]}")
try:
return driver.find_element_by_class_name("_7yrSq").text == "online"
except Exception:
return False
def _logs(data: str, file: str):
with open(file, "a+") as f:
f.write(data+"\n")
def print_loop(char: chr, loops: int) -> str:
return "".join(char for _ in range(loops))
def spy(usernames: list, seconds: int, logs: str, duration: int = 1, md=False):
global longest_length
length = 0
if md and logs.endswith(".txt"):
print(
f"{YELLOW}[{LIGHT_CYAN}i{YELLOW}]{RESET} Detected txt extension for md file, changing extension to 'md'...")
logs = logs.split(".txt")[:-1]+'.md'
for username in usernames:
if len(username)+65 > length:
length = len(username)+65
longest_length = len(username)
print_line = print_loop("-", 27+longest_length)
print_line_with_space = "|"+print_loop(" ", 27+longest_length-2)+"|"
print_line_with_space_logs = "|" + \
print_loop(" ", 27+longest_length-2+10)+"|"
print(
f"\n\n{RED}<{GREEN} Start of logs {RED}>")
print(print_line)
print(f"{LIGHT_CYAN}Scanning for {', '.join(usernames)}{RED}")
print(print_line)
print(f"{RED}{print_line_with_space}")
print(f"{print_line_with_space}")
print(f"{print_line_with_space}{RESET}")
if md:
_logs(
f"\n### Start of logs @ {datetime.datetime.now().strftime('%H:%M:%S, %d.%m.%Y')}", logs)
_logs(print_line+'---', logs)
_logs(f"Scanning for `{', '.join(usernames)}`:", logs)
_logs(print_line+'---', logs)
else:
_logs(
f"\n< Start of logs @ {datetime.datetime.now().strftime('%H:%M:%S, %d.%m.%Y')} >", logs)
_logs(print_line+'----------', logs)
_logs(f"Scanning for {', '.join(usernames)}:", logs)
_logs(print_line+'----------', logs)
_logs(print_line_with_space_logs, logs)
_logs(print_line_with_space_logs, logs)
_logs(print_line_with_space_logs, logs)
for _ in range(int((duration*60*60)/seconds)):
if len(usernames) > 1:
for username in usernames:
if _get_status(username):
msg = f"{RED}| >{YELLOW} [{LIGHT_CYAN}{username}{YELLOW}]{LIGHT_CYAN} online{RESET}: {LIGHT_CYAN}{datetime.datetime.now().strftime('%H:%M:%S')} {RED} "
msg_log = f"| > [{username}] online: {datetime.datetime.now().strftime('%H:%M:%S, %d.%m.%Y')} "
while len(msg) <= length:
msg += ' '
while len(msg_log) <= length-8*5+10:
msg_log += ' '
msg += '|'
msg_log += '|'
print(msg)
_logs(data=msg_log,
file=logs)
elif _get_status(usernames):
msg = f"{RED}| >{YELLOW} [{LIGHT_CYAN}{usernames}{YELLOW}]{LIGHT_GREEN} online{RESET}: {LIGHT_CYAN}{datetime.datetime.now().strftime('%H:%M:%S')} {RED} "
msg_log = f"| > [{usernames}] online: {datetime.datetime.now().strftime('%H:%M:%S, %d.%m.%Y')} "
while len(msg) <= length:
msg += ' '
while len(msg_log) <= length-8*5+10:
msg_log += ' '
msg += '|'
msg_log += '|'
print(msg)
_logs(data=msg_log,
file=logs)
time.sleep(seconds)
print(f"{RED}{print_line_with_space}")
print(f"{print_line_with_space}")
print(f"{print_line_with_space}")
print(f"{print_line}{RESET}")
print(f"{RED}<{GREEN} End of logs {RED}>\n\n")
print(f"{YELLOW}->{RESET} Good bye!")
_logs(print_line_with_space_logs, logs)
_logs(print_line_with_space_logs, logs)
_logs(print_line_with_space_logs, logs)
if md:
_logs(print_line+"---", logs)
_logs(
f"### End of logs @ {datetime.datetime.now().strftime('%H:%M:%S, %d.%m.%Y')}\n", logs)
_logs('### Good bye! Exiting program', logs)
else:
_logs(print_line+"----------", logs)
_logs(
f"< End of logs @ {datetime.datetime.now().strftime('%H:%M:%S, %d.%m.%Y')} >\n", logs)
_logs('-> Good bye! Exiting program', logs)
driver.close()
exit(0)
def console_management():
cmd = ""
print(f"{BLACK}{WHITE}Help: Type in:\n b -> break\n p {time} -> pause for that time\n s -> stop until starting\n c -> continue{RESET}")
while cmd != "b":
cmd = input(
f"{LIGHT_RED}>>> {LIGHT_CYAN}")
cmds = ["p", "s", "c"]
if cmd not in cmds:
print("Command not found. Please check the help menu!")
elif cmd.startswith("p"):
if len(cmd) > 2:
print("Isn't implemented yet.")
else:
print("Use: p {time}")
elif cmd == "s":
print("Isn't implemented yet.")
elif cmd == "c":
print("Isn't implemented yet.")
def main(usernames, seconds, logs, duration):
"""
username(s) should be in form: username1, username2, username3, ...
"""
_login()
if ',' in usernames:
usernames = [i.strip() for i in usernames.split(',')]
spy(usernames, seconds, logs, duration)
else:
spy([usernames], seconds, logs, duration)
if __name__ == '__main__':
welcome_msg = f"""
{LIGHT_BLUE}o.0 always spying
{LIGHT_GREEN} _ ____ __ ____
| | / / /_ ____ _/ /______/ __ \__ __
| | /| / / __ \/ __ `/ __/ ___/ /_/ / / / /
| |/ |/ / / / / /_/ / /_(__ ) ____/ /_/ /
|__/|__/_/ /_/\__,_/\__/____/_/ \__, /
/____/{LIGHT_BLUE}
"""
print(welcome_msg)
time.sleep(3)
try:
parser = argparse.ArgumentParser(
description='WhatsSpy is a tool for spying whatsapp users')
parser.add_argument('-u', '--users', metavar='', type=str,
help="Users (should be in format: 'username1, username2, username3, ...')")
parser.add_argument('-t', '--timeout', metavar='',
type=int, help="Time to sleep between checks")
parser.add_argument('-l', '--logfile', metavar='', type=str,
help="Log file, any extension is allowed.")
parser.add_argument('-d', '--duration', metavar='',
type=float, help='Duration (in hours) as float')
args = parser.parse_args()
main(args.users, args.timeout, args.logfile, duration=args.duration)
except KeyboardInterrupt:
print(f"\n{YELLOW}> Good bye!")
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for gcloud ml language commands."""
from googlecloudsdk.api_lib.ml.language import util
def AddContentToRequest(unused_ref, args, request):
"""The Python hook for yaml commands to inject content into the request."""
source = util.GetContentSource(args.content, args.content_file)
source.UpdateContent(request.document)
return request
|
import shutil
import os
import cv2
import matplotlib.pyplot as plt
import errno
import pathlib
rootdir = "./af2019-cv-testA-20190318/"
patha = "./a/"
pathb = "./b/"
pathc = "./c/"
pathout = "./merged_test/"
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exec:
if exec.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def move(path):
"""root dir"""
print("Start to move images")
for f in os.listdir(path):
if os.path.isdir(path+f):
for file in os.listdir(path+f):
name, ext = os.path.splitext(file)
if name.endswith("_a"):
#print(path+f+"/"+file)
shutil.copy(path+f+"/"+file, patha+file)
elif name.endswith("_b"):
#print(path+f+"/"+file)
shutil.copy(path+f+"/"+file, pathb+file)
elif name.endswith("_c"):
#print(path+f+"/"+file)
shutil.copy(path+f+"/"+file, pathc+file)
print("Move completed!!!")
def mergeImg(patha):
print("Start to merge three images into one image")
for f in os.listdir(patha):
#print(f)
name,ext = f.split('_')
#print(patha+name+"_a.jpg")
imga = cv2.imread(patha+name+"_a.jpg", -1)
imgb = cv2.imread(pathb+name+"_b.jpg", -1)
imgc = cv2.imread(pathc+name+"_c.jpg", -1)
img = cv2.merge([imgc,imgb,imga])
outname = name+".jpg"
cv2.imwrite(pathout+outname, img)
del imga,imgb,imgc,img
print("Merge completed!!")
def delete_folder(dest):
shutil.rmtree(dest, ignore_errors=True)
if __name__ == "__main__":
mkdir_p(patha)
mkdir_p(pathb)
mkdir_p(pathc)
mkdir_p(pathout)
move(rootdir)
mergeImg(patha)
print("Deleting a,b,c")
delete_folder(patha)
delete_folder(pathb)
delete_folder(pathc)
print("Delete completed!!!")
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Unit-e developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Replaces CamelCase with snake_case in symbols like m_helloWorld -> m_hello_world.
One-liner:
for f in $(find src/esperanza -name '*cpp' -or -name '*h'); do contrib/devtools/camel_to_snake.py $f; done
"""
import sys
def kill_camels(line):
"""
Replaces words in the string from m_prefixHelloWorld style to m_prefix_hello_world one.
Applies for any words.
"""
result = ''
# finite state machine or alike
have_m = False
have_delimiter = True
have_full_prefix = False
for idx, c in enumerate(line):
if have_full_prefix and (c.isalpha() or c.isdigit() or c == '_'):
if c.isupper():
if result[-1:] != '_' and not line[idx-1].isupper():
result += '_'
result += c.lower()
else:
result += c
else:
have_full_prefix = False
if have_m and c == '_':
have_full_prefix = True
result += c
elif have_delimiter and c == 'm':
have_m = True
result += c
elif not(c.isalpha() or c.isdigit()):
have_delimiter = True
result += c
else:
result += c
have_delimiter = False
have_m = False
return result
def main(argv):
if (len(argv) < 2):
print('Usage: {0} <filename>\n'
'\n'
'Replaces m_variableNameWhatever with m_variable_name_whatever in the specified <filename>')
exit(1)
filename = argv[1]
result = []
with open(filename, 'r') as f:
for line in f:
result += [kill_camels(line)]
with open(filename, 'w') as f:
for line in result:
f.write(line)
if __name__ == '__main__':
main(sys.argv)
|
#Answer = 6857
#cost = 0.017s
import time
import math
start = time.time()
def is_prime(num):
if num == 1 or num == 0:
return False
for x in range(2, int(math.sqrt(num)) + 1):
if num % x == 0:
return False
return True
num = 600851475143
limit = 10000
L = [0] * limit
maxprime = 0
x = 1
while x >= 1:
if x >= limit or x > num:
break
if L[x] == 0 and is_prime(x):
L[x] = 1
if L[x] == 1 and num % x == 0:
num = num // x
maxprime = x
else:
x += 1
print(maxprime)
end = time.time()
print(end - start) |
import re
from pddl import Utils, PDDLPart, fluenttree, Operator
class Domain(PDDLPart.PDDLPart):
def __init__(self, dom_string):
if dom_string is not None:
super().__init__(dom_string)
self.prelude = ""
self.actions = []
for child in self.children:
tokens = re.split(r'\s', child)
identifier = tokens[0]
if identifier == 'domain':
self.name = tokens[1]
elif identifier == ':requirements':
self.requirements = Utils.get_colon_sections(child)[1:]
elif identifier == ':types':
self.type_string = child.replace(":types", "")
elif identifier == ':predicates':
# self.predicates = PDDLPart.PDDLPart(child).children # TODO: Map these to their own objects eventually?
self.predicates = [fluenttree.AbstractPredicate(ch) for ch in PDDLPart.PDDLPart(child).children]
elif identifier == ':action':
self.actions.append(Operator.Operator(child))
self.summary = self.name + "\nPredicates: " + str(len(self.predicates)) + "\nActions: " + str(len(self.actions))
else: # Blank domain for writing into
super().__init__("")
self.actions = []
self.name = "EMPTY-DOMAIN"
self.requirements = []
self.type_string = ""
self.predicates = []
self.summary = self.name + "\nPredicates: " + str(len(self.predicates)) + "\nActions: " + str(len(self.actions))
self.prelude = ""
def to_pddl(self):
res = self.prelude + "\n"
nl = "\n"
tab = "\t"
res += f"""
(define (domain {self.name})
(:requirements {" ".join(self.requirements)})
(:types
{self.type_string}
)
(:predicates
{(nl+tab+tab).join(["(" + p.typed_string() + ")" for p in self.predicates])}
)
{(nl+tab).join([str(x) for x in self.actions if x.effect.identifier != ""])}
)
"""
return res
def print_actions(self):
for action in self.actions:
print("\n\n", str(action))
# print("Pre:\n",action.precondition.to_string())
# print("Eff:\n",action.effect.to_string())
# print("Params:\n",action.parameters)
|
# -*- coding: utf-8 -*-
"""
MATPOWER module in `psst`
Copyright (C) 2016 Dheepak Krishnamurthy
"""
from __future__ import print_function, absolute_import
import re
import logging
import os
import pandas as pd
from .reader import parse_file, find_attributes, find_name
from .utils import COLUMNS
|
from constants import dictionary_of_display_formats
def number_to_lcd_representation(number):
return [dictionary_of_display_formats.get(number_to_look_up) for number_to_look_up in map(int, str(number))]
def print_number_from_representation(list_representation_of_number):
for representation in zip(*list_representation_of_number):
print("".join(representation))
|
# Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import pytest
from distutils.version import LooseVersion
from f5.bigip.tm.gtm.listener import Listener
from f5.sdk_exception import MissingRequiredCreationParameter
from f5.sdk_exception import MissingRequiredReadParameter
from f5.sdk_exception import UnsupportedOperation
from pytest import symbols
from requests.exceptions import HTTPError
from six import iteritems
pytestmark = pytest.mark.skipif(
symbols
and hasattr(symbols, 'modules')
and not symbols.modules['gtm'],
reason='The modules symbol for GTM is set to False.'
)
def delete_listener(mgmt_root, name, partition):
try:
foo = mgmt_root.tm.gtm.listeners.listener.load(name=name,
partition=partition)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def setup_create_test(request, mgmt_root, name, partition):
def teardown():
delete_listener(mgmt_root, name, partition)
request.addfinalizer(teardown)
def setup_basic_test(request, mgmt_root, name, address, partition):
def teardown():
delete_listener(mgmt_root, name, partition)
reg1 = mgmt_root.tm.gtm.listeners.listener.create(name=name,
address=address,
partition=partition)
request.addfinalizer(teardown)
return reg1
class TestCreate(object):
def test_create_no_args(self, mgmt_root):
with pytest.raises(MissingRequiredCreationParameter):
mgmt_root.tm.gtm.listeners.listener.create()
def test_create(self, request, mgmt_root):
setup_create_test(request, mgmt_root, 'fake_listener', 'Common')
reg1 = mgmt_root.tm.gtm.listeners.listener.create(
name='fake_listener', partition='Common', address='10.10.10.10')
assert reg1.name == 'fake_listener'
assert reg1.partition == 'Common'
assert reg1.address == '10.10.10.10'
assert reg1.generation and isinstance(reg1.generation, int)
assert reg1.kind == 'tm:gtm:listener:listenerstate'
assert reg1.selfLink.startswith(
'https://localhost/mgmt/tm/gtm/listener/~Common~fake_listener')
def test_create_optional_args(self, request, mgmt_root):
setup_create_test(request, mgmt_root, 'fake_listener', 'Common')
reg1 = mgmt_root.tm.gtm.listeners.listener.create(
name='fake_listener', partition='Common', address='10.10.10.10',
description='NewListener')
assert hasattr(reg1, 'description')
assert reg1.description == 'NewListener'
def test_create_duplicate(self, request, mgmt_root):
setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(HTTPError) as err:
mgmt_root.tm.gtm.listeners.listener.create(
name='fake_listener', partition='Common',
address='10.10.10.10')
assert err.value.response.status_code == 409
class TestRefresh(object):
def test_refresh(self, request, mgmt_root):
setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
r1 = mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
r2 = mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
assert r1.name == 'fake_listener'
assert r2.name == 'fake_listener'
r2.update(description='NewListener')
assert hasattr(r2, 'description')
assert not hasattr(r1, 'description')
assert r2.description == 'NewListener'
r1.refresh()
assert hasattr(r1, 'description')
assert r1.description == 'NewListener'
class TestLoad(object):
def test_load_no_object(self, mgmt_root):
with pytest.raises(HTTPError) as err:
mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
if LooseVersion(pytest.config.getoption('--release')) >= \
LooseVersion('12.0.0'):
assert err.value.response.status_code == 400
else:
assert err.value.response.status_code == 500
def test_load(self, request, mgmt_root):
setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
r1 = mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
assert r1.name == 'fake_listener'
assert not hasattr(r1, 'description')
r1.update(description='NewListener')
assert hasattr(r1, 'description')
assert r1.description == 'NewListener'
r2 = mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
assert hasattr(r2, 'description')
assert r2.description == 'NewListener'
class TestExists(object):
def test_not_exists(self, request, mgmt_root):
result = mgmt_root.tm.gtm.listeners.listener.exists(
name='my_listener', partition='Common'
)
assert result is False
def test_exists(self, request, mgmt_root):
r1 = setup_basic_test(
request, mgmt_root, 'fake_listener', '10.10.10.10', 'Common'
)
result = mgmt_root.tm.gtm.listeners.listener.exists(
name='fake_listener', partition='Common'
)
assert r1.name == 'fake_listener'
assert result is True
class TestUpdate(object):
def test_update(self, request, mgmt_root):
r1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
assert r1.name == 'fake_listener'
assert not hasattr(r1, 'description')
r1.update(description='NewListener')
assert hasattr(r1, 'description')
assert r1.description == 'NewListener'
class TestModify(object):
def test_modify(self, request, mgmt_root):
r1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
original_dict = copy.copy(r1.__dict__)
value = 'description'
r1.modify(description='NewListener')
for k, v in iteritems(original_dict):
if k != value:
original_dict[k] = r1.__dict__[k]
elif k == value:
assert r1.__dict__[k] == 'NewListener'
class TestDelete(object):
def test_delete(self, request, mgmt_root):
r1 = mgmt_root.tm.gtm.listeners.listener.create(
name='fake_listener', address='10.10.10.10')
r1.delete()
with pytest.raises(HTTPError) as err:
mgmt_root.tm.gtm.listeners.listener.load(
name='fake_region', partition='Common')
if LooseVersion(pytest.config.getoption('--release')) >= \
LooseVersion('12.0.0'):
assert err.value.response.status_code == 400
else:
assert err.value.response.status_code == 500
class TestListenerCollection(object):
def test_listener_collection(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
assert reg1.name == 'fake_listener'
assert reg1.partition == 'Common'
assert reg1.address == '10.10.10.10'
assert reg1.generation and isinstance(reg1.generation, int)
assert reg1.kind == 'tm:gtm:listener:listenerstate'
assert reg1.selfLink.startswith(
'https://localhost/mgmt/tm/gtm/listener/~Common~fake_listener')
rc = mgmt_root.tm.gtm.listeners.get_collection()
assert isinstance(rc, list)
assert len(rc)
assert isinstance(rc[0], Listener)
class TestProfile(object):
def test_load_missing_args(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
profcol = reg1.profiles_s.get_collection()
prname = str(profcol[0].name)
with pytest.raises(MissingRequiredReadParameter):
reg1.profiles_s.profile.load(name=prname)
def test_load(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
profcol = reg1.profiles_s.get_collection()
prname = str(profcol[0].name)
prpart = str(profcol[0].partition)
pr1 = reg1.profiles_s.profile.load(name=prname, partition=prpart)
assert pr1.kind == 'tm:gtm:listener:profiles:profilesstate'
assert pr1.selfLink.startswith('https://localhost/mgmt/tm/gtm/listener'
'/~Common~fake_listener/profiles/'
'~Common~dns')
def test_refresh(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
profcol = reg1.profiles_s.get_collection()
prname = str(profcol[0].name)
prpart = str(profcol[0].partition)
pr1 = reg1.profiles_s.profile.load(name=prname, partition=prpart)
assert pr1.kind == 'tm:gtm:listener:profiles:profilesstate'
assert pr1.selfLink.startswith('https://localhost/mgmt/tm/gtm/listener'
'/~Common~fake_listener/profiles/'
'~Common~dns')
pr2 = reg1.profiles_s.profile.load(name=prname, partition=prpart)
pr1.refresh()
assert pr1.kind == pr2.kind
assert pr1.selfLink == pr2.selfLink
pr2.refresh()
assert pr2.kind == pr1.kind
assert pr2.selfLink == pr1.selfLink
def test_create_raises(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(UnsupportedOperation):
reg1.profiles_s.profile.create()
def test_modify_raises(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(UnsupportedOperation):
reg1.profiles_s.profile.modify()
def test_update_raises(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(UnsupportedOperation):
reg1.profiles_s.profile.update()
def test_delete_raises(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(UnsupportedOperation):
reg1.profiles_s.profile.delete()
|
# -*- coding: utf-8 -*-
"""The Custom lifeform allows you to easily pass any arbitrary array as a
:obj:`seagull.lifeforms.base.Lifeform` to the Board. However, it is important
that the array passes two conditions:
* It must be a 2-dimensional array. For lines such as Blinkers, we often use an array of shape :code:`(2, 1)`.
* It must be a binary array where :code:`True` represents active cells and :code:`False` for inactive cells. You can also use 0s and 1s as input.
If any of these conditions aren't fulfilled, then Seagull will raise a
:code:`ValueError`
Here's an example in creating a custom lifeform:
.. code-block:: python
import seagull as sg
from seagull.lifeforms import Custom
board = sg.Board(size=(30,30))
board.add(Custom([[0,1,1,0], [0,0,1,1]]))
"""
# Import standard library
from typing import Union
# Import modules
import numpy as np
from loguru import logger
from .base import Lifeform
class Custom(Lifeform):
"""Create custom lifeforms"""
def __init__(self, X: Union[np.ndarray, list]):
"""Initialize the class
Parameters
----------
X : array_like
Custom binary array for the lifeform
"""
self.validate_input_values(np.array(X))
self.validate_input_shapes(np.array(X))
self.X = X
def validate_input_values(self, X: np.ndarray):
"""Check if all elements are binary"""
if not ((X == 0) | (X == 1) | (X is True) | (X is False)).all():
msg = "Input array should only contain {0,1} or {True,False}"
logger.error(msg)
raise ValueError(msg)
def validate_input_shapes(self, X: np.ndarray):
"""Check if input array is of size 2"""
if X.ndim != 2:
msg = (
"Input array should have 2 dimensions: {} != 2. "
"For a 1-d lifeform, please add a new axis".format(X.ndim)
)
logger.error(msg)
raise ValueError(msg)
@property
def layout(self) -> np.ndarray:
return np.array(self.X)
|
import unittest
from weather import read_config_file, get_coordinates, get_current_weather
class TestWeather(unittest.TestCase):
CONFIG_FILE_PATH = 'config.ini'
def test_config_content(self):
config = read_config_file(cfg_path=self.CONFIG_FILE_PATH)
self.assertIn('GEOLOCALIZATION', config)
self.assertIn('website', config['GEOLOCALIZATION'])
self.assertIn('apikey', config['GEOLOCALIZATION'])
self.assertIn('WEATHER', config)
self.assertIn('website', config['WEATHER'])
self.assertIn('apikey', config['WEATHER'])
self.assertIn('realtime_endpoint', config['WEATHER'])
self.assertIn('forecast_daily_endpoint', config['WEATHER'])
def test_get_coordinates_ret_value(self):
config = read_config_file()
geolocalization_data = config['GEOLOCALIZATION']
coordinates = get_coordinates(website=geolocalization_data['website'], apikey=geolocalization_data['apikey'],
location='gdansk')
self.assertIn('lat', coordinates)
self.assertIn('lng', coordinates)
def test_gdansk_coordinates(self):
config = read_config_file()
geolocalization_data = config['GEOLOCALIZATION']
coordinates = get_coordinates(website=geolocalization_data['website'], apikey=geolocalization_data['apikey'],
location='gdansk')
self.assertEqual(coordinates['lat'], 54.348226)
self.assertEqual(coordinates['lng'], 18.654289)
def test_get_current_weather_ret_value(self):
config = read_config_file()
geolocalization_data = config['GEOLOCALIZATION']
weather_data = config['WEATHER']
coordinates = get_coordinates(website=geolocalization_data['website'], apikey=geolocalization_data['apikey'],
location='gdansk')
d = get_current_weather(website=weather_data['website'], apikey=weather_data['apikey'],
endpoint=weather_data['realtime_endpoint'], longitude=coordinates["lng"],
latitude=coordinates["lat"])
self.assertIn('temp', d)
self.assertIn('feels_like', d)
self.assertIn('humidity', d)
self.assertIn('wind_speed', d)
self.assertIn('cloud_cover', d)
if __name__ == '__main__':
unittest.main()
|
from django.forms import ModelChoiceField
from django.contrib import admin
from django.forms import ModelForm, ModelChoiceField, ModelMultipleChoiceField
#from django.utils.translation import ugettext as _
from cms.admin.placeholderadmin import PlaceholderAdminMixin
#from cms.admin.placeholderadmin import FrontendEditableAdminMixin
#from djangocms_text_ckeditor.widgets import TextEditorWidget
#from src.apps.core.admin_actions import *
from .models.LearningObjModels import Learning_Level, Learning_Verb, Learning_Outcome, \
Learning_Objective
from src.apps.core.forms import (
add_LessonForm,
Edit_LessonForm,
#SectionForm,
ReadingSectionForm,
ActivitySectionForm,
)
from src.apps.core.models.ModuleModels import (
# Module,
# Topic,
Lesson,
Section,
Collaboration,
)
from src.apps.core.models.SectionTypeModels import (
ReadingSection,
ActivitySection,
QuizSection,
)
from src.apps.core.models.QuizQuestionModels import (
QuizQuestion,
# MultiChoice_question,
# MultiChoice_answer,
# MultiSelect_question,
# MultiSelect_answer
QuizAnswer,
)
from src.apps.core.models.ResourceModels import (
Resource,
)
from src.apps.core.models.HS_AppFrameModels import (
AppReference,
)
#from src.apps.core.QuerysetManagers import *
from adminsortable2.admin import SortableAdminMixin, SortableInlineAdminMixin
from polymorphic.admin import PolymorphicParentModelAdmin,PolymorphicChildModelAdmin,PolymorphicChildModelFilter,StackedPolymorphicInline, PolymorphicInlineSupportMixin
#from easy_select2 import select2_modelform
# form = select2_modelform( MODEL_NAME, attrs={'width': '250px'})
#from pprint import pprint
# can add list of fields that are editable within the admin from the list view
# can provide group actions for admin listing (makes calls to local methods)
# actions = ['publish']
# def publish(self, modeladmin, request, queryset):
# queryset.update(publication_status=Post.PUB_STATUS_PUBLISHED)
class CreationTrackingMixin(object):
'''
mixin to automate the updating of 'CreationTrackingModel's
created_by and changed by fields for forms saved in the admin
stores references to the current user based on if the
submitted form is a new instance or a changed existing instance
'''
def save_model(self, request, obj, form, change):
#print('***************** IN CUSTOM PUBLICATION SAVE MODEL')
# if this is the first save of the model set 'created_by' to the current user
if not obj.pk:
#print("*** changing created by for '{obj.name}'")
obj.created_by = request.user
obj.changed_by = request.user
# if the object exists and there are changes
# store the current user as the most recent updator
if obj.pk and change:
#print("*** updating 'last updator' for '{obj.name}'")
obj.changed_by = request.user
obj.save()
# ============================================================
# Inline Admin interfaces
# ============================================================
#class ReadingInline(SortableTabularInline):
class ReadingInline(admin.TabularInline):
model = ReadingSection
fk_name = 'readingsection'
readonly_fields = ['section_ptr']
#class ActivityInline(SortableTabularInline):
class ActivityInline(admin.TabularInline):
model = ActivitySection
fk_name = 'activitysection'
readonly_fields = ['section_ptr']
#class QuizInline(SortableTabularInline):
class QuizInline(admin.TabularInline):
model = QuizSection
fk_name = 'quizsection'
readonly_fields = ['section_ptr']
# class QuizQuestionInline(SortableInlineAdminMixin, admin.TabularInline):
# model = QuizQuestion
# base_model = QuizQuestion
# #form = SectionForm
# extra = 0
# sortable_field_name = "position"
# show_change_link = True
#
# def has_add_permission(self, request):
# return False
class SectionInline(SortableInlineAdminMixin, admin.TabularInline):
model = Section
base_model = Section
#form = SectionForm
#fk_name = 'parent_lesson'
extra = 0
sortable_field_name = "position"
show_change_link = True
# fields = (
# 'position',
# 'name',
# 'short_name',
# 'duration',
# 'tags',
# )
# must overwrite this method as polymorphic models aren't handled appropriately
def has_add_permission(self, request):
return False
class LessonInline(SortableInlineAdminMixin, admin.TabularInline):
model = Lesson
fk_name = "parent_lesson"
verbose_name = "Sub-Lesson"
verbose_name_plural = "Sub-Lessons"
# form = Lesson_form
extra = 0 # number of extra empty fields to generate (makes things confusing, so ZERO)
show_change_link = True
sortable_field_name = "position"
fields = (
'position',
'name',
'short_name',
'tags',
)
# exclude = [
# 'depth',
# 'depth_label',
# 'created_by',
# 'changed_by',
# # 'position',
# ]
class CollaboratorInline(admin.TabularInline):
model = Collaboration
verbose_name = "Collaboration"
verbose_name_plural = "Collaborations"
extra = 0
fields = (
'collaborator',
'can_edit',
)
class AppRefInline(admin.TabularInline):
model = AppReference
verbose_name = "Application Reference"
verbose_name_plural = "Application References"
extra = 0
fields = (
'app_name',
'app_link',
)
class QuizQuestionInline(SortableInlineAdminMixin, admin.TabularInline):
model = QuizQuestion
fields = ['quiz', 'question_text', 'position']
verbose_name = "Question"
verbose_name_plural = "Questions"
extra = 0
class QuizAnswerInline(SortableInlineAdminMixin, admin.TabularInline):
model = QuizAnswer
fields = ['question', 'answer_text', 'is_correct', 'position']
verbose_name = "Answer"
verbose_name_plural = "Answers"
extra = 0
# ============================================================
# Regular Admin interfaces
# ============================================================
class SectionChildAdmin(CreationTrackingMixin, PolymorphicChildModelAdmin, SortableAdminMixin):
#model = Section
base_model = Section
show_in_index = False
#base_form = SectionForm
#sortable_field_name = "position"
exclude = ['position']
list_display = [
'name',
'lesson',
'creation_date',
'changed_date',
]
list_select_related = (
'lesson',
)
class ReadingSectionAdmin(PlaceholderAdminMixin, SectionChildAdmin):
model = ReadingSection
base_model = ReadingSection
#show_in_index = False
base_form = ReadingSectionForm
sortable_field_name = "position"
exclude = ['position', 'created_by', 'changed_by']
list_display = [
'lesson',
'name',
'creation_date',
'changed_date',
'tag_list',
]
list_select_related = (
'lesson',
)
list_display_links = ['name']
def tag_list(self, obj):
return u", ".join(o.name for o in obj.tags.all())
class ActivitySectionAdmin(PlaceholderAdminMixin, SectionChildAdmin):
model = ActivitySection
base_model = ActivitySection
#show_in_index = False
sortable_field_name = "position"
base_form = ActivitySectionForm
exclude = ['position', 'created_by', 'changed_by']
list_display = [
'lesson',
'name',
'creation_date',
'changed_date',
'tag_list',
]
list_select_related = (
'lesson',
)
list_display_links = ['name']
def tag_list(self, obj):
return u", ".join(o.name for o in obj.tags.all())
class QuizSectionAdmin(PlaceholderAdminMixin, SectionChildAdmin, SortableAdminMixin):
base_model = QuizSection
#show_in_index = False
sortable_field_name = "position"
exclude = ['position', 'created_by', 'changed_by']
list_display = [
'lesson',
'name',
'creation_date',
'changed_date',
]
list_select_related = (
'lesson',
)
list_display_links = ['name']
frontend_editable_fields = ("content")
inlines = [QuizQuestionInline,]
class SectionParentAdmin(PlaceholderAdminMixin, PolymorphicParentModelAdmin):
""" The parent model admin """
base_model = Section
show_in_index = True
ordering = ('lesson','name',)
exclude = ['created_by', 'changed_by']
child_models = (
ReadingSection,
ActivitySection,
QuizSection
)
list_display = [
'lesson',
'name',
'creation_date',
'changed_date',
#'tag_list',
]
list_select_related = (
'lesson',
)
search_fields = ['name', 'short_name']
list_filter = (PolymorphicChildModelFilter, 'creation_date', 'tags')
list_display_links = ['name']
frontend_editable_fields = ("content")
def get_queryset(self, request):
return super(SectionParentAdmin, self).get_queryset(request).prefetch_related('tags')
def tag_list(self, obj):
return u", ".join(o.name for o in obj.tags.all())
class LessonAdmin(PolymorphicInlineSupportMixin, CreationTrackingMixin, PlaceholderAdminMixin, admin.ModelAdmin):
model = Lesson
form = Edit_LessonForm
# ordering = ('topic', 'name',)
ordering = ('name',)
# sortable_field_name = "position"
list_display = [
'parent_lesson',
'name',
'creation_date',
'changed_date',
#'collaborators',
]
list_select_related = (
'parent_lesson',
)
list_display_links = ['name']
search_fields = ['name', 'short_name']
list_filter = ('creation_date',)
#filter_horizontal = ('collaborators',)
inlines = [AppRefInline, CollaboratorInline, SectionInline, LessonInline]
# override 'get_form' to have a separate form for adding a new topic
# additionally, this may be editable to be contextually aware and
# provide different forms between 'Admin' and 'Wizard' views etc.
def get_form(self, request, obj=None, **kwargs):
# if this is a new lesson
if obj is None:
# check for a passed module id
curr_module_id = request.GET.get('parent_lesson', '')
# otherwise return the full add topic form
return add_LessonForm
else:
return super(LessonAdmin, self).get_form(request, obj, **kwargs)
##############################################
# Quiz Question admin
##############################################
class QuizQuestionAdmin(SortableInlineAdminMixin, admin.ModelAdmin):
model = QuizQuestion
fields = ['quiz', 'question_text', 'position']
##############################################
# (activity) Resource admin
##############################################
class ResourceAdmin(admin.ModelAdmin):
model = Resource
##############################################
# Learning Objective admin forms
##############################################
class Learning_OutcomeAdmin(admin.ModelAdmin):
model = Learning_Outcome
sortable_field_name = "outcome"
list_display = ['outcome']
class Learning_LevelAdmin(admin.ModelAdmin):
model = Learning_Level
sortable_field_name = "label"
list_display = ['label']
class Learning_LevelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return "%s" % (obj.label)
class Learning_VerbAdminForm(ModelForm):
level = Learning_LevelChoiceField(queryset=Learning_Level.objects.all())
class Meta:
model = Learning_Verb
fields = ['verb', 'level']
class Learning_VerbAdmin(admin.ModelAdmin):
form = Learning_VerbAdminForm
sortable_field_name = "verb"
list_display = ['verb']
class Learning_VerbChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return "%s" % (obj.verb)
class Learning_OutcomeChoiceField(ModelMultipleChoiceField):
def label_from_instance(self, obj):
return "%s" % (obj.outcome)
class Learning_ObjectiveAdminForm(ModelForm):
verb = Learning_VerbChoiceField(queryset=Learning_Verb.objects.all())
outcomes = Learning_OutcomeChoiceField(queryset=Learning_Outcome.objects.all())
class Meta:
model = Learning_Objective
fields = ['condition', 'task', 'degree', 'verb', 'outcomes']
class Learning_ObjectiveAdmin(PlaceholderAdminMixin, admin.ModelAdmin):
form = Learning_ObjectiveAdminForm
list_display = ['condition', 'task', 'degree']
# REGISTER THE ABOVE DEFINED ADMIN OBJECTS
admin.site.register(Lesson, LessonAdmin)
admin.site.register(Section, SectionParentAdmin)
admin.site.register(ReadingSection, ReadingSectionAdmin)
admin.site.register(ActivitySection, ActivitySectionAdmin)
admin.site.register(QuizSection, QuizSectionAdmin)
#admin.site.register(Collaboration)
#admin.site.register(LayerRef, LayerRefAdmin)
admin.site.register(QuizQuestion, QuizQuestionAdmin)
admin.site.register(QuizAnswer)
# admin.site.register(QuizQuestion, QuizQuestionParentAdmin)
# admin.site.register(MultiChoice_answer, MultiChoice_AnswerAdmin)
# admin.site.register(MultiSelect_answer, MultiSelect_AnswerAdmin)
admin.site.register(Resource, ResourceAdmin)
admin.site.register(Learning_Outcome, Learning_OutcomeAdmin)
admin.site.register(Learning_Level, Learning_LevelAdmin)
admin.site.register(Learning_Verb, Learning_VerbAdmin)
admin.site.register(Learning_Objective, Learning_ObjectiveAdmin)
# admin.site.register(QuizAnswer, QuizAnswerParentAdmin)
|
from app import app
app.run(debug=True,host='127.0.0.1',port=8080) |
#!/usr/bin/env python3
from mendeleev import element # For looking up atomic numbers, etc
import numpy as np
from base_pot import PotProvider
# Scaling factor for ZBL potential in our units
a_0 = 0.5292
# fundamental charge squared in our units
esqr = 14.398
def a_ij_func(Z_i, Z_j):
'ZBL Length function'
a_ij = 0.8853 * a_0 / (Z_i ** 0.23 + Z_j ** 0.23)
return a_ij
def phi(x):
'ZBL Screening Function'
phi = 0.1818 * np.exp(-3.2000 * x) + \
0.5099 * np.exp(-0.9423 * x) + \
0.2802 * np.exp(-0.4029 * x) + \
0.02817 * np.exp(-0.2016 * x)
return phi
def phiprime(x):
'Derivative of ZBL Screening Function'
phiprime = -0.58176 * np.exp(-3.2000 * x) - \
0.48048 * np.exp(-0.9423 * x) - \
0.112893 * np.exp(-0.4029 * x) - \
0.005679 * np.exp(-0.2016 * x)
return phiprime
def zbl(Z_i, Z_j, r):
a_ij = a_ij_func(Z_i, Z_j)
# Scale to the length
x = r / a_ij
# Compute phi from scaled length
phi_vals = phi(x)
zz = Z_i * Z_j
return zz * esqr * phi_vals / r
def d_zbl_dr(Z_i, Z_j, r):
a_ij = a_ij_func(Z_i, Z_j)
# Scale to the length
x = r / a_ij
# Compute phi from scaled length
phi_vals = phi(x)
phiprime_vals = phiprime(x)
zz = Z_i * Z_j
return -zz * esqr * (phiprime_vals * x - phi_vals) / (r*r)
class ZBL(PotProvider):
def V_r(self, A, B, r):
Z_i = element(A).atomic_number
Z_j = element(B).atomic_number
return zbl(Z_i, Z_j, r)
def dV_dr(self, A, B, r):
Z_i = element(A).atomic_number
Z_j = element(B).atomic_number
return d_zbl_dr(Z_i, Z_j, r)
def name(self):
return "ZBL"
def abbrv(self):
return "ZBL" |
# Pythono3 code to rename multiple
# files in a directory or folder
# importing os module
import os
dirpath = "C:/Users/Shweta/Documents/MCA/SEM6/Zargun/Datasets/PET"
# Function to rename multiple files
def main():
i = 1
for filename in os.listdir(dirpath):
dst ="pet" + str(i) + ".jpg"
src = dirpath + filename
dst = dirpath + dst
# rename() function will
# rename all the files
os.rename(src, dst)
i += 1
# Driver Code
if __name__ == '__main__':
# Calling main() function
main() |
L = [100, 200]
L.remove(200)
print L |
import nltk
output = nltk.TextTilingTokenizer().tokenize(nltk.brown.raw()[0:10000])
|
import random
from typing import Optional
from hathor.simulator import FakeConnection, MinerSimulator, RandomTransactionGenerator, Simulator
from tests import unittest
class HathorSimulatorTestCase(unittest.TestCase):
seed_config: Optional[int] = None
def setUp(self):
super().setUp()
self.clock = None
if self.seed_config is None:
self.seed_config = random.randint(0, 2**32 - 1)
self.simulator = Simulator()
self.simulator.set_seed(self.seed_config)
self.simulator.start()
print('-'*30)
print('Simulation seed config:', self.simulator.seed)
print('-'*30)
def tearDown(self):
self.simulator.stop()
super().tearDown()
def test_one_node(self):
manager1 = self.simulator.create_peer()
miner1 = MinerSimulator(manager1, hashpower=100e6)
miner1.start()
self.simulator.run(10)
gen_tx1 = RandomTransactionGenerator(manager1, rate=2 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx1.start()
self.simulator.run(60 * 60)
def test_two_nodes(self):
manager1 = self.simulator.create_peer()
manager2 = self.simulator.create_peer()
miner1 = MinerSimulator(manager1, hashpower=10e6)
miner1.start()
self.simulator.run(10)
gen_tx1 = RandomTransactionGenerator(manager1, rate=3 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx1.start()
self.simulator.run(60)
conn12 = FakeConnection(manager1, manager2, latency=0.150)
self.simulator.add_connection(conn12)
self.simulator.run(60)
miner2 = MinerSimulator(manager2, hashpower=100e6)
miner2.start()
self.simulator.run(120)
gen_tx2 = RandomTransactionGenerator(manager2, rate=10 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx2.start()
self.simulator.run(10 * 60)
miner1.stop()
miner2.stop()
gen_tx1.stop()
gen_tx2.stop()
self.simulator.run(5 * 60)
self.assertTrue(conn12.is_connected)
self.assertTipsEqual(manager1, manager2)
def test_many_miners_since_beginning(self):
nodes = []
miners = []
for hashpower in [10e6, 5e6, 1e6, 1e6, 1e6]:
manager = self.simulator.create_peer()
for node in nodes:
conn = FakeConnection(manager, node, latency=0.085)
self.simulator.add_connection(conn)
nodes.append(manager)
miner = MinerSimulator(manager, hashpower=hashpower)
miner.start()
miners.append(miner)
self.simulator.run(600)
for miner in miners:
miner.stop()
self.simulator.run(15)
for node in nodes[1:]:
self.assertTipsEqual(nodes[0], node)
def test_new_syncing_peer(self):
nodes = []
miners = []
tx_generators = []
manager = self.simulator.create_peer()
nodes.append(manager)
miner = MinerSimulator(manager, hashpower=10e6)
miner.start()
miners.append(miner)
self.simulator.run(600)
for hashpower in [10e6, 8e6, 5e6]:
manager = self.simulator.create_peer()
for node in nodes:
conn = FakeConnection(manager, node, latency=0.085)
self.simulator.add_connection(conn)
nodes.append(manager)
miner = MinerSimulator(manager, hashpower=hashpower)
miner.start()
miners.append(miner)
for i, rate in enumerate([5, 4, 3]):
tx_gen = RandomTransactionGenerator(nodes[i], rate=rate * 1 / 60., hashpower=1e6, ignore_no_funds=True)
tx_gen.start()
tx_generators.append(tx_gen)
self.simulator.run(600)
late_manager = self.simulator.create_peer()
for node in nodes:
conn = FakeConnection(late_manager, node, latency=0.300)
self.simulator.add_connection(conn)
self.simulator.run(600)
for tx_gen in tx_generators:
tx_gen.stop()
for miner in miners:
miner.stop()
self.simulator.run(600)
for idx, node in enumerate(nodes):
print('Checking node {}...'.format(idx))
self.assertTipsEqual(late_manager, node)
|
"""
-----------------------------------------------------------------------------------------------------------
Package: AequilibraE
Name: Traffic assignment
Purpose: Loads GUI to perform traffic assignment procedures
Original Author: Pedro Camargo (c@margo.co)
Contributors: Pedro Camargo
Last edited by: Pedro Camargo
Website: www.AequilibraE.com
Repository: https://github.com/AequilibraE/AequilibraE
Created: 2016-10-30
Updated:... 2017-12-11
Copyright: (c) AequilibraE authors
Licence: See LICENSE.TXT
-----------------------------------------------------------------------------------------------------------
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import QtGui, uic
from qgis.gui import QgsMapLayerProxyModel
import sys
from functools import partial
import numpy as np
from collections import OrderedDict
from ..common_tools.global_parameters import *
from ..common_tools.auxiliary_functions import *
from ..matrix_procedures import LoadMatrixDialog
from ..common_tools import ReportDialog
from ..common_tools import GetOutputFolderName, GetOutputFileName
from ..aequilibrae.matrix import AequilibraeMatrix
from load_select_link_query_builder_dialog import LoadSelectLinkQueryBuilderDialog
no_binary = False
try:
from aequilibrae.paths import Graph, AssignmentResults, allOrNothing
except:
no_binary = True
sys.modules['qgsmaplayercombobox'] = qgis.gui
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'forms/ui_traffic_assignment.ui'))
class TrafficAssignmentDialog(QDialog, FORM_CLASS):
def __init__(self, iface):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.path = standard_path()
self.output_path = None
self.temp_path = None
self.error = None
self.report = None
self.method = {}
self.matrices = OrderedDict()
self.skims = []
self.matrix = None
self.graph = Graph()
self.results = AssignmentResults()
self.block_centroid_flows = None
self.worker_thread = None
# Signals for the matrix_procedures tab
self.but_load_new_matrix.clicked.connect(self.find_matrices)
# Signals from the Network tab
self.load_graph_from_file.clicked.connect(self.load_graph)
# Signals for the algorithm tab
self.progressbar0.setVisible(False)
self.progressbar0.setValue(0)
self.progress_label0.setVisible(False)
self.do_assignment.clicked.connect(self.run)
self.cancel_all.clicked.connect(self.exit_procedure)
self.select_output_folder.clicked.connect(self.choose_folder_for_outputs)
self.cb_choose_algorithm.addItem('All-Or-Nothing')
self.cb_choose_algorithm.currentIndexChanged.connect(self.changing_algorithm)
# slots for skim tab
self.but_build_query.clicked.connect(partial(self.build_query, 'select link'))
self.changing_algorithm()
# path file
self.path_file = OutputType()
# Queries
tables = [self.select_link_list, self.list_link_extraction]
for table in tables:
table.setColumnWidth(0, 280)
table.setColumnWidth(1, 40)
table.setColumnWidth(2, 150)
table.setColumnWidth(3, 40)
self.graph_properties_table.setColumnWidth(0, 190)
self.graph_properties_table.setColumnWidth(1, 240)
# critical link
self.but_build_query.clicked.connect(partial(self.build_query, 'select link'))
self.do_select_link.stateChanged.connect(self.set_behavior_special_analysis)
self.tot_crit_link_queries = 0
self.critical_output = OutputType()
# link flow extraction
self.but_build_query_extract.clicked.connect(partial(self.build_query, 'Link flow extraction'))
self.do_extract_link_flows.stateChanged.connect(self.set_behavior_special_analysis)
self.tot_link_flow_extract = 0
self.link_extract = OutputType()
# Disabling resources not yet implemented
self.do_select_link.setEnabled(False)
self.but_build_query.setEnabled(False)
self.select_link_list.setEnabled(False)
self.do_extract_link_flows.setEnabled(False)
self.but_build_query_extract.setEnabled(False)
self.list_link_extraction.setEnabled(False)
self.new_matrix_to_assign()
self.table_matrix_list.setColumnWidth(0, 135)
self.table_matrix_list.setColumnWidth(1, 135)
self.table_matrices_to_assign.setColumnWidth(0, 125)
self.table_matrices_to_assign.setColumnWidth(1, 125)
self.skim_list_table.setColumnWidth(0, 70)
self.skim_list_table.setColumnWidth(1, 490)
def choose_folder_for_outputs(self):
new_name = GetOutputFolderName(self.path, 'Output folder for traffic assignment')
if new_name:
self.output_path = new_name
self.lbl_output.setText(new_name)
else:
self.output_path = None
self.lbl_output.setText(new_name)
def load_graph(self):
self.lbl_graphfile.setText('')
file_types = ["AequilibraE graph(*.aeg)"]
default_type = '.aeg'
box_name = 'Traffic Assignment'
graph_file, _ = GetOutputFileName(self, box_name, file_types, default_type, self.path)
if graph_file is not None:
self.graph.load_from_disk(graph_file)
fields = list(set(self.graph.graph.dtype.names) - set(self.graph.required_default_fields))
self.minimizing_field.addItems(fields)
self.update_skim_list(fields)
self.lbl_graphfile.setText(graph_file)
cores = get_parameter_chain(['system', 'cpus'])
self.results.set_cores(cores)
# show graph properties
def centers_item(qt_item):
cell_widget = QWidget()
lay_out = QHBoxLayout(cell_widget)
lay_out.addWidget(qt_item)
lay_out.setAlignment(Qt.AlignCenter)
lay_out.setContentsMargins(0, 0, 0, 0)
cell_widget.setLayout(lay_out)
return cell_widget
items = [['Graph ID', self.graph.__id__],
['Number of links', self.graph.num_links],
['Number of nodes', self.graph.num_nodes],
['Number of centroids', self.graph.num_zones]]
self.graph_properties_table.clearContents()
self.graph_properties_table.setRowCount(5)
for i, item in enumerate(items):
self.graph_properties_table.setItem(i, 0, QTableWidgetItem(item[0]))
self.graph_properties_table.setItem(i, 1, QTableWidgetItem(str(item[1])))
self.graph_properties_table.setItem(4, 0, QTableWidgetItem('Block flows through centroids'))
self.block_centroid_flows = QCheckBox()
self.block_centroid_flows.setChecked(self.graph.block_centroid_flows)
self.graph_properties_table.setCellWidget(4, 1, centers_item(self.block_centroid_flows))
else:
self.graph = Graph()
self.set_behavior_special_analysis()
def changing_algorithm(self):
if self.cb_choose_algorithm.currentText() == 'All-Or-Nothing':
self.method['algorithm'] = 'AoN'
def run_thread(self):
QObject.connect(self.worker_thread, SIGNAL("assignment"), self.signal_handler)
self.worker_thread.start()
self.exec_()
def job_finished_from_thread(self):
self.report = self.worker_thread.report
self.produce_all_outputs()
self.exit_procedure()
def run(self):
if self.check_data():
self.set_output_names()
self.progress_label0.setVisible(True)
self.progressbar0.setVisible(True)
self.progressbar0.setRange(0, self.graph.num_zones)
try:
if self.method['algorithm'] == 'AoN':
self.worker_thread = allOrNothing(self.matrix, self.graph, self.results)
self.run_thread()
except ValueError as error:
qgis.utils.iface.messageBar().pushMessage("Input error", error.message, level=3)
else:
qgis.utils.iface.messageBar().pushMessage("Input error", self.error, level=3)
def set_output_names(self):
self.path_file.temp_file = os.path.join(self.temp_path, 'path_file.aed')
self.path_file.output_name = os.path.join(self.output_path, 'path_file')
self.path_file.extension = 'aed'
if self.do_path_file.isChecked():
self.results.setSavePathFile(save=True, path_result=self.path_file.temp_file)
self.link_extract.temp_file = os.path.join(self.temp_path, 'link_extract')
self.link_extract.output_name = os.path.join(self.output_path, 'link_extract')
self.link_extract.extension = 'aed'
self.critical_output.temp_file = os.path.join(self.temp_path, 'critical_output')
self.critical_output.output_name = os.path.join(self.output_path, 'critical_output')
self.critical_output.extension = 'aed'
def check_data(self):
self.error = None
self.change_graph_settings()
if not self.graph.num_links:
self.error = 'Graph was not loaded'
return False
self.matrix = None
self.prepare_assignable_matrices()
if self.matrix is None:
self.error = 'Demand matrix missing'
return False
if self.output_path is None:
self.error = 'Parameters for output missing'
return False
self.temp_path = os.path.join(self.output_path, 'temp')
if not os.path.exists(self.temp_path):
os.makedirs(self.temp_path)
self.results.prepare(self.graph, self.matrix)
return True
def load_assignment_queries(self):
# First we load the assignment queries
query_labels = []
query_elements = []
query_types = []
if self.tot_crit_link_queries:
for i in range(self.tot_crit_link_queries):
links = eval(self.select_link_list.item(i, 0).text())
query_type = self.select_link_list.item(i, 1).text()
query_name = self.select_link_list.item(i, 2).text()
for l in links:
d = directions_dictionary[l[1]]
lk = self.graph.ids[(self.graph.graph['link_id'] == int(l[0])) &
(self.graph.graph['direction'] == d)]
query_labels.append(query_name)
query_elements.append(lk)
query_types.append(query_type)
self.critical_queries = {'labels': query_labels,
'elements': query_elements,
' type': query_types}
def signal_handler(self, val):
if val[0] == 'zones finalized':
self.progressbar0.setValue(val[1])
elif val[0] == 'text AoN':
self.progress_label0.setText(val[1])
elif val[0] == 'finished_threaded_procedure':
self.job_finished_from_thread()
# TODO: Write code to export skims
def produce_all_outputs(self):
extension = 'aed'
if not self.do_output_to_aequilibrae.isChecked():
extension = 'csv'
if self.do_output_to_sqlite.isChecked():
extension = 'sqlite'
# Save link flows to disk
self.results.save_to_disk(os.path.join(self.output_path, 'link_flows.' + extension), output='loads')
# save Path file if that is the case
if self.do_path_file.isChecked():
if self.method['algorithm'] == 'AoN':
if self.do_output_to_sqlite.isChecked():
self.results.save_to_disk(file_name=os.path.join(self.output_path, 'path_file.' + extension),
output='path_file')
# if self.do_select_link.isChecked():
# if self.method['algorithm'] == 'AoN':
# del(self.results.critical_links['results'])
# self.results.critical_links = None
#
# shutil.move(self.critical_output.temp_file + '.aep', self.critical_output.output_name)
# shutil.move(self.critical_output.temp_file + '.aed', self.critical_output.output_name[:-3] + 'aed')
#
# if self.do_extract_link_flows.isChecked():
# if self.method['algorithm'] == 'AoN':
# del(self.results.link_extraction['results'])
# self.results.link_extraction = None
#
# shutil.move(self.link_extract.temp_file + '.aep', self.link_extract.output_name)
# shutil.move(self.link_extract.temp_file + '.aed', self.link_extract.output_name[:-3] + 'aed')
# Procedures related to critical analysis. Not yet fully implemented
def build_query(self, purpose):
if purpose == 'select link':
button = self.but_build_query
message = 'Select Link Analysis'
table = self.select_link_list
counter = self.tot_crit_link_queries
else:
button = self.but_build_query_extract
message = 'Link flow extraction'
table = self.list_link_extraction
counter = self.tot_link_flow_extract
button.setEnabled(False)
dlg2 = LoadSelectLinkQueryBuilderDialog(self.iface, self.graph.graph, message)
dlg2.exec_()
if dlg2.links is not None:
table.setRowCount(counter + 1)
text = ''
for i in dlg2.links:
text = text + ', (' + i[0].encode('utf-8') + ', "' + i[1].encode('utf-8') + '")'
text = text[2:]
table.setItem(counter, 0, QTableWidgetItem(text))
table.setItem(counter, 1, QTableWidgetItem(dlg2.query_type))
table.setItem(counter, 2, QTableWidgetItem(dlg2.query_name))
del_button = QPushButton('X')
del_button.clicked.connect(partial(self.click_button_inside_the_list, purpose))
table.setCellWidget(counter, 3, del_button)
counter += 1
if purpose == 'select link':
self.tot_crit_link_queries = counter
elif purpose == 'Link flow extraction':
self.tot_link_flow_extract = counter
button.setEnabled(True)
def click_button_inside_the_list(self, purpose):
if purpose == 'select link':
table = self.select_link_list
else:
table = self.list_link_extraction
button = self.sender()
index = self.select_link_list.indexAt(button.pos())
row = index.row()
table.removeRow(row)
if purpose == 'select link':
self.tot_crit_link_queries -= 1
elif purpose == 'Link flow extraction':
self.tot_link_flow_extract -= 1
def set_behavior_special_analysis(self):
if self.graph.num_links < 1:
behavior = False
else:
behavior = True
self.do_path_file.setEnabled(behavior)
# This line of code turns off the features of select link analysis and link flow extraction while these
# features are still being developed
behavior = False
self.do_select_link.setEnabled(behavior)
self.do_extract_link_flows.setEnabled(behavior)
self.but_build_query.setEnabled(behavior * self.do_select_link.isChecked())
self.select_link_list.setEnabled(behavior * self.do_select_link.isChecked())
self.list_link_extraction.setEnabled(behavior * self.do_extract_link_flows.isChecked())
self.but_build_query_extract.setEnabled(behavior * self.do_extract_link_flows.isChecked())
def update_skim_list(self, skims):
self.skim_list_table.clearContents()
self.skim_list_table.setRowCount(len(skims))
for i, skm in enumerate(skims):
self.skim_list_table.setItem(i, 1, QTableWidgetItem(skm))
chb = QCheckBox()
my_widget = QWidget()
lay_out = QHBoxLayout(my_widget)
lay_out.addWidget(chb)
lay_out.setAlignment(Qt.AlignCenter)
lay_out.setContentsMargins(0, 0, 0, 0)
my_widget.setLayout(lay_out)
self.skim_list_table.setCellWidget(i, 0, my_widget)
# All Matrix loading and assignables selection
def update_matrix_list(self):
self.table_matrix_list.clearContents()
self.table_matrix_list.clearContents()
self.table_matrix_list.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.table_matrix_list.setRowCount(len(self.matrices.keys()))
for i, data_name in enumerate(self.matrices.keys()):
self.table_matrix_list.setItem(i, 0, QTableWidgetItem(data_name))
cbox = QComboBox()
for idx in self.matrices[data_name].index_names:
cbox.addItem(str(idx))
self.table_matrix_list.setCellWidget(i, 1, cbox)
def find_matrices(self):
dlg2 = LoadMatrixDialog(self.iface)
dlg2.show()
dlg2.exec_()
if dlg2.matrix is not None:
matrix_name = dlg2.matrix.file_path
matrix_name = os.path.splitext(os.path.basename(matrix_name))[0]
matrix_name = self.find_non_conflicting_name(matrix_name, self.matrices)
self.matrices[matrix_name] = dlg2.matrix
self.update_matrix_list()
row_count = self.table_matrices_to_assign.rowCount()
new_matrix = list(self.matrices.keys())[-1]
for i in range(row_count):
cb = self.table_matrices_to_assign.cellWidget(i, 0)
cb.insertItem(-1, new_matrix)
def find_non_conflicting_name(self, data_name, dictio):
if data_name in dictio:
i = 1
new_data_name = data_name + '_' + str(i)
while new_data_name in dictio:
i += 1
new_data_name = data_name + '_' + str(i)
data_name = new_data_name
return data_name
def changed_assignable_matrix(self, mi):
chb = self.sender()
mat_name = chb.currentText()
table = self.table_matrices_to_assign
for row in range(table.rowCount()):
if table.cellWidget(row, 0) == chb:
break
if len(mat_name) == 0:
if row + 1 < table.rowCount():
self.table_matrices_to_assign.removeRow(row)
else:
mat_cores = self.matrices[mat_name].names
cbox2 = QComboBox()
cbox2.addItems(mat_cores)
self.table_matrices_to_assign.setCellWidget(row, 1, cbox2)
if row + 1 == table.rowCount():
self.new_matrix_to_assign()
def new_matrix_to_assign(self):
# We edit ALL the combo boxes to have the current list of matrices
row_count = self.table_matrices_to_assign.rowCount()
self.table_matrices_to_assign.setRowCount(row_count + 1)
cbox = QComboBox()
cbox.addItems(list(self.matrices.keys()))
cbox.addItem('')
cbox.setCurrentIndex(cbox.count()-1)
cbox.currentIndexChanged.connect(self.changed_assignable_matrix)
self.table_matrices_to_assign.setCellWidget(row_count, 0, cbox)
def prepare_assignable_matrices(self):
table = self.table_matrices_to_assign
idx = self.graph.centroids
mat_names = []
if table.rowCount() > 1:
for row in range(table.rowCount() - 1):
mat = table.cellWidget(row, 0).currentText()
core = table.cellWidget(row, 1).currentText()
if not np.array_equal(idx,self.matrices[mat].index):
self.error = 'Assignable matrix ' + mat + ' has indices that do not match the centroids'
if core in mat_names:
self.error = 'Assignable matrices cannot have same names'
mat_names.append(core.encode('utf-8'))
self.matrix = AequilibraeMatrix()
self.matrix.create_empty(file_name=self.matrix.random_name(),
zones=idx.shape[0],
matrix_names=mat_names)
self.matrix.index[:] = idx[:]
for row in range(table.rowCount() - 1):
mat = table.cellWidget(row, 0).currentText()
core = table.cellWidget(row, 1).currentText()
self.matrix.matrix[core][:, :] = self.matrices[mat].matrix[core][:, :]
self.matrix.computational_view()
else:
self.error = 'You need to have at least one matrix to assign'
# Run preparation procedures
def change_graph_settings(self):
skims = []
table = self.skim_list_table
for i in range(table.rowCount()):
for chb in table.cellWidget(i, 0).findChildren(QCheckBox):
if chb.isChecked():
skims.append(table.item(i, 1).text().encode('utf-8'))
if len(skims) == 0:
skims = False
self.graph.set_graph(cost_field=self.minimizing_field.currentText(),
skim_fields=skims,
block_centroid_flows=self.block_centroid_flows.isChecked())
def exit_procedure(self):
self.close()
if self.report:
dlg2 = ReportDialog(self.iface, self.report)
dlg2.show()
dlg2.exec_()
class OutputType():
def __init__(self):
self.temp_file = None
self.extension = None
self.output_name = None |
# -*- coding: utf-8 -*-
"""ThreatConnect Batch Import Module."""
import hashlib
import json
import math
import os
import re
import shelve
import time
import uuid
from .tcex_ti_indicator import (
custom_indicator_class_factory,
Indicator,
Address,
ASN,
CIDR,
EmailAddress,
File,
Host,
Mutex,
RegistryKey,
URL,
UserAgent,
)
from .tcex_ti_group import (
Group,
Adversary,
Campaign,
Document,
Email,
Event,
Incident,
IntrusionSet,
Report,
Signature,
Threat,
)
# import local modules for dynamic reference
module = __import__(__name__)
class TcExBatch(object):
"""ThreatConnect Batch Import Module"""
def __init__(
self,
tcex,
owner,
action=None,
attribute_write_type=None,
halt_on_error=True,
playbook_triggers_enabled=None,
):
"""Initialize Class Properties.
Args:
tcex (obj): An instance of TcEx object.
owner (str): The ThreatConnect owner for Batch action.
action (str, default:Create): Action for the batch job ['Create', 'Delete'].
attribute_write_type (str, default:Replace): Write type for Indicator attributes
['Append', 'Replace'].
halt_on_error (bool, default:True): If True any batch error will halt the batch job.
"""
self.tcex = tcex
self._action = action or 'Create'
self._attribute_write_type = attribute_write_type or 'Replace'
self._batch_max_chunk = 5000
self._halt_on_error = halt_on_error
self._hash_collision_mode = None
self._file_merge_mode = None
self._owner = owner
self._playbook_triggers_enabled = playbook_triggers_enabled
# shelf settings
self._group_shelf_fqfn = None
self._indicator_shelf_fqfn = None
# global overrides on batch/file errors
self._halt_on_batch_error = None
self._halt_on_file_error = None
self._halt_on_poll_error = None
# debug/saved flags
self._saved_xids = None
self._saved_groups = None # indicates groups shelf file was provided
self._saved_indicators = None # indicates indicators shelf file was provided
self.enable_saved_file = False
# default properties
self._batch_data_count = None
self._poll_interval = None
self._poll_interval_times = []
self._poll_timeout = 3600
# containers
self._files = {}
self._groups = None
self._groups_shelf = None
self._indicators = None
self._indicators_shelf = None
# build custom indicator classes
self._gen_indicator_class()
@property
def _critical_failures(self):
"""Return Batch critical failure messages."""
return [
'Encountered an unexpected Exception while processing batch job',
'would exceed the number of allowed indicators',
]
def _gen_indicator_class(self):
"""Generate Custom Indicator Classes."""
for entry in self.tcex.indicator_types_data.values():
name = entry.get('name')
class_name = name.replace(' ', '')
# temp fix for API issue where boolean are returned as strings
entry['custom'] = self.tcex.utils.to_bool(entry.get('custom'))
if class_name in globals():
# skip Indicator Type if a class already exists
continue
# Custom Indicator can have 3 values. Only add the value if it is set.
value_fields = []
if entry.get('value1Label'):
value_fields.append(entry['value1Label'])
if entry.get('value2Label'):
value_fields.append(entry['value2Label'])
if entry.get('value3Label'):
value_fields.append(entry['value3Label'])
value_count = len(value_fields)
class_data = {}
# Add Class for each Custom Indicator type to this module
custom_class = custom_indicator_class_factory(name, Indicator, class_data, value_fields)
setattr(module, class_name, custom_class)
# Add Custom Indicator Method
self._gen_indicator_method(name, custom_class, value_count)
def _gen_indicator_method(self, name, custom_class, value_count):
"""Dynamically generate custom Indicator methods.
Args:
name (str): The name of the method.
custom_class (object): The class to add.
value_count (int): The number of value parameters to support.
"""
method_name = name.replace(' ', '_').lower()
# Add Method for each Custom Indicator class
def method_1(value1, xid, **kwargs): # pylint: disable=W0641
"""Add Custom Indicator data to Batch object"""
indicator_obj = custom_class(value1, xid, **kwargs)
return self._indicator(indicator_obj)
def method_2(value1, value2, xid, **kwargs): # pylint: disable=W0641
"""Add Custom Indicator data to Batch object"""
indicator_obj = custom_class(value1, value2, xid, **kwargs)
return self._indicator(indicator_obj)
def method_3(value1, value2, value3, xid, **kwargs): # pylint: disable=W0641
"""Add Custom Indicator data to Batch object"""
indicator_obj = custom_class(value1, value2, value3, xid, **kwargs)
return self._indicator(indicator_obj)
method = locals()['method_{}'.format(value_count)]
setattr(self, method_name, method)
def _group(self, group_data):
"""Return previously stored group or new group.
Args:
group_data (dict|obj): An Group dict or instance of Group object.
Returns:
dict|obj: The new Group dict/object or the previously stored dict/object.
"""
if isinstance(group_data, dict):
# get xid from dict
xid = group_data.get('xid')
else:
# get xid from object
xid = group_data.xid
if self.groups.get(xid) is not None:
# return existing group from memory
group_data = self.groups.get(xid)
elif self.groups_shelf.get(xid) is not None:
# return existing group from shelf
group_data = self.groups_shelf.get(xid)
else:
# store new group
self.groups[xid] = group_data
return group_data
def _indicator(self, indicator_data):
"""Return previously stored indicator or new indicator.
Args:
indicator_data (dict|obj): An Indicator dict or instance of Indicator object.
Returns:
dict|obj: The new Indicator dict/object or the previously stored dict/object.
"""
if isinstance(indicator_data, dict):
# get xid from dict
xid = indicator_data.get('xid')
else:
# get xid from object
xid = indicator_data.xid
if self.indicators.get(xid) is not None:
# return existing indicator from memory
indicator_data = self.indicators.get(xid)
elif self.indicators_shelf.get(xid) is not None:
# return existing indicator from shelf
indicator_data = self.indicators_shelf.get(xid)
else:
# store new indicators
self.indicators[xid] = indicator_data
return indicator_data
@staticmethod
def _indicator_values(indicator):
"""Process indicators expanding file hashes/custom indicators into multiple entries.
Args:
indicator (str): " : " delimited string
Returns:
list: The list of indicators split on " : ".
"""
indicator_list = [indicator]
if indicator.count(' : ') > 0:
# handle all multi-valued indicators types (file hashes and custom indicators)
indicator_list = []
# group 1 - lazy capture everything to first <space>:<space> or end of line
iregx_pattern = r'^(.*?(?=\s\:\s|$))?'
iregx_pattern += r'(?:\s\:\s)?' # remove <space>:<space>
# group 2 - look behind for <space>:<space>, lazy capture everything
# to look ahead (optional <space>):<space> or end of line
iregx_pattern += r'((?<=\s\:\s).*?(?=(?:\s)?\:\s|$))?'
iregx_pattern += r'(?:(?:\s)?\:\s)?' # remove (optional <space>):<space>
# group 3 - look behind for <space>:<space>, lazy capture everything
# to look ahead end of line
iregx_pattern += r'((?<=\s\:\s).*?(?=$))?$'
iregx = re.compile(iregx_pattern)
indicators = iregx.search(indicator)
if indicators is not None:
indicator_list = list(indicators.groups())
return indicator_list
@property
def action(self):
"""Return batch action."""
return self._action
@action.setter
def action(self, action):
"""Set batch action."""
self._action = action
def add_group(self, group_data):
"""Add a group to Batch Job.
.. code-block:: javascript
{
"name": "Example Incident",
"type": "Incident",
"attribute": [{
"type": "Description",
"displayed": false,
"value": "Example Description"
}],
"xid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904",
"associatedGroupXid": [
"e336e2dd-5dfb-48cd-a33a-f8809e83e904:58",
],
"tag": [{
"name": "China"
}]
}
Args:
group_data (dict): The full Group data including attributes, labels, tags, and
associations.
"""
return self._group(group_data)
def add_indicator(self, indicator_data):
"""Add an indicator to Batch Job.
.. code-block:: javascript
{
"type": "File",
"rating": 5.00,
"confidence": 50,
"summary": "53c3609411c83f363e051d455ade78a7
: 57a49b478310e4313c54c0fee46e4d70a73dd580
: db31cb2a748b7e0046d8c97a32a7eb4efde32a0593e5dbd58e07a3b4ae6bf3d7",
"associatedGroups": [
{
"groupXid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904"
}
],
"attribute": [{
"type": "Source",
"displayed": true,
"value": "Malware Analysis provided by external AMA."
}],
"fileOccurrence": [{
"fileName": "drop1.exe",
"date": "2017-03-03T18:00:00-06:00"
}],
"tag": [{
"name": "China"
}],
"xid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904:170139"
}
Args:
indicator_data (dict): The Full Indicator data including attributes, labels, tags,
and associations.
"""
if indicator_data.get('type') not in ['Address', 'EmailAddress', 'File', 'Host', 'URL']:
# for custom indicator types the valueX fields are required.
# using the summary we can build the values
index = 1
for value in self._indicator_values(indicator_data.get('summary')):
indicator_data['value{}'.format(index)] = value
index += 1
if indicator_data.get('type') == 'File':
# convert custom field name to the appropriate value for batch v2
size = indicator_data.pop('size', None)
if size is not None:
indicator_data['intValue1'] = size
if indicator_data.get('type') == 'Host':
# convert custom field name to the appropriate value for batch v2
dns_active = indicator_data.pop('dnsActive', None)
if dns_active is not None:
indicator_data['flag1'] = dns_active
whois_active = indicator_data.pop('whoisActive', None)
if whois_active is not None:
indicator_data['flag2'] = whois_active
return self._indicator(indicator_data)
def address(self, ip, **kwargs):
"""Add Address data to Batch object.
Args:
ip (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Address.
"""
indicator_obj = Address(ip, **kwargs)
return self._indicator(indicator_obj)
def adversary(self, name, **kwargs):
"""Add Adversary data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Adversary.
"""
group_obj = Adversary(name, **kwargs)
return self._group(group_obj)
def asn(self, as_number, **kwargs):
"""Add ASN data to Batch object.
Args:
as_number (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of ASN.
"""
indicator_obj = ASN(as_number, **kwargs)
return self._indicator(indicator_obj)
@property
def attribute_write_type(self):
"""Return batch attribute write type."""
return self._attribute_write_type
@attribute_write_type.setter
def attribute_write_type(self, attribute_write_type):
"""Set batch attribute write type."""
self._attribute_write_type = attribute_write_type
def campaign(self, name, **kwargs):
"""Add Campaign data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
first_seen (str, kwargs): The first seen datetime expression for this Group.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Campaign.
"""
group_obj = Campaign(name, **kwargs)
return self._group(group_obj)
def cidr(self, block, **kwargs):
"""Add CIDR data to Batch object.
Args:
block (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of CIDR.
"""
indicator_obj = CIDR(block, **kwargs)
return self._indicator(indicator_obj)
def close(self):
"""Cleanup batch job."""
self.groups_shelf.close()
self.indicators_shelf.close()
if self.debug and self.enable_saved_file:
fqfn = os.path.join(self.tcex.args.tc_temp_path, 'xids-saved')
if os.path.isfile(fqfn):
os.remove(fqfn) # remove previous file to prevent duplicates
with open(fqfn, 'w') as fh:
for xid in self.saved_xids:
fh.write('{}\n'.format(xid))
else:
# delete saved files
if os.path.isfile(self.group_shelf_fqfn):
os.remove(self.group_shelf_fqfn)
if os.path.isfile(self.group_shelf_fqfn):
os.remove(self.indicator_shelf_fqfn)
@property
def data(self):
"""Return the batch data to be sent to the ThreatConnect API.
**Processing Order:**
* Process groups in memory up to max batch size.
* Process groups in shelf to max batch size.
* Process indicators in memory up to max batch size.
* Process indicators in shelf up to max batch size.
This method will remove the group/indicator from memory and/or shelf.
"""
entity_count = 0
data = {'group': [], 'indicator': []}
# process group data
group_data, entity_count = self.data_groups(self.groups, entity_count)
data['group'].extend(group_data)
if entity_count >= self._batch_max_chunk:
return data
group_data, entity_count = self.data_groups(self.groups_shelf, entity_count)
data['group'].extend(group_data)
if entity_count >= self._batch_max_chunk:
return data
# process indicator data
indicator_data, entity_count = self.data_indicators(self.indicators, entity_count)
data['indicator'].extend(indicator_data)
if entity_count >= self._batch_max_chunk:
return data
indicator_data, entity_count = self.data_indicators(self.indicators_shelf, entity_count)
data['indicator'].extend(indicator_data)
if entity_count >= self._batch_max_chunk:
return data
return data
def data_group_association(self, xid):
"""Return group dict array following all associations.
Args:
xid (str): The xid of the group to retrieve associations.
Returns:
list: A list of group dicts.
"""
groups = []
group_data = None
# get group data from one of the arrays
if self.groups.get(xid) is not None:
group_data = self.groups.get(xid)
del self.groups[xid]
elif self.groups_shelf.get(xid) is not None:
group_data = self.groups_shelf.get(xid)
del self.groups_shelf[xid]
if group_data is not None:
# convert any obj into dict and process file data
group_data = self.data_group_type(group_data)
groups.append(group_data)
# recursively get associations
for assoc_xid in group_data.get('associatedGroupXid', []):
groups.extend(self.data_group_association(assoc_xid))
return groups
def data_group_type(self, group_data):
"""Return dict representation of group data.
Args:
group_data (dict|obj): The group data dict or object.
Returns:
dict: The group data in dict format.
"""
if isinstance(group_data, dict):
# process file content
file_content = group_data.pop('fileContent', None)
if file_content is not None:
self._files[group_data.get('xid')] = {
'fileContent': file_content,
'fileName': group_data.get('fileName'),
'type': group_data.get('type'),
}
else:
GROUPS_STRINGS_WITH_FILE_CONTENTS = ['Document', 'Report']
# process file content
if group_data.data.get('type') in GROUPS_STRINGS_WITH_FILE_CONTENTS:
self._files[group_data.data.get('xid')] = group_data.file_data
group_data = group_data.data
return group_data
def data_groups(self, groups, entity_count):
"""Process Group data.
Args:
groups (list): The list of groups to process.
Returns:
list: A list of groups including associations
"""
data = []
# process group objects
for xid in groups.keys():
# get association from group data
assoc_group_data = self.data_group_association(xid)
data += assoc_group_data
entity_count += len(assoc_group_data)
if entity_count >= self._batch_max_chunk:
break
return data, entity_count
def data_indicators(self, indicators, entity_count):
"""Process Indicator data."""
data = []
# process indicator objects
for xid, indicator_data in indicators.items():
entity_count += 1
if isinstance(indicator_data, dict):
data.append(indicator_data)
else:
data.append(indicator_data.data)
del indicators[xid]
if entity_count >= self._batch_max_chunk:
break
return data, entity_count
@property
def debug(self):
"""Return debug setting"""
debug = False
if os.path.isfile(os.path.join(self.tcex.args.tc_temp_path, 'DEBUG')):
debug = True
return debug
def document(self, name, file_name, **kwargs):
"""Add Document data to Batch object.
Args:
name (str): The name for this Group.
file_name (str): The name for the attached file for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
file_content (str;method, kwargs): The file contents or callback method to retrieve
file content.
malware (bool, kwargs): If true the file is considered malware.
password (bool, kwargs): If malware is true a password for the zip archive is
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Document.
"""
group_obj = Document(name, file_name, **kwargs)
return self._group(group_obj)
def email(self, name, subject, header, body, **kwargs):
"""Add Email data to Batch object.
Args:
name (str): The name for this Group.
subject (str): The subject for this Email.
header (str): The header for this Email.
body (str): The body for this Email.
date_added (str, kwargs): The date timestamp the Indicator was created.
from_addr (str, kwargs): The **from** address for this Email.
to_addr (str, kwargs): The **to** address for this Email.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Email.
"""
group_obj = Email(name, subject, header, body, **kwargs)
return self._group(group_obj)
def email_address(self, address, **kwargs):
"""Add Email Address data to Batch object.
Args:
address (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of EmailAddress.
"""
indicator_obj = EmailAddress(address, **kwargs)
return self._indicator(indicator_obj)
@property
def error_codes(self):
"""Static list of Batch error codes and short description"""
error_codes = {}
error_codes['0x1001'] = 'General Error'
error_codes['0x1002'] = 'Permission Error'
error_codes['0x1003'] = 'JsonSyntax Error'
error_codes['0x1004'] = 'Internal Error'
error_codes['0x1005'] = 'Invalid Indicator Error'
error_codes['0x1006'] = 'Invalid Group Error'
error_codes['0x1007'] = 'Item Not Found Error'
error_codes['0x1008'] = 'Indicator Limit Error'
error_codes['0x1009'] = 'Association Error'
error_codes['0x100A'] = 'Duplicate Item Error'
error_codes['0x100B'] = 'File IO Error'
return error_codes
def errors(self, batch_id, halt_on_error=True):
"""Retrieve Batch errors to ThreatConnect API.
.. code-block:: javascript
[{
"errorReason": "Incident incident-001 has an invalid status.",
"errorSource": "incident-001 is not valid."
}, {
"errorReason": "Incident incident-002 has an invalid status.",
"errorSource":"incident-002 is not valid."
}]
Args:
batch_id (str): The ID returned from the ThreatConnect API for the current batch job.
halt_on_error (bool, default:True): If True any exception will raise an error.
"""
errors = []
try:
r = self.tcex.session.get('/v2/batch/{}/errors'.format(batch_id))
# if r.status_code == 404:
# time.sleep(5) # allow time for errors to be processed
# r = self.tcex.session.get('/v2/batch/{}/errors'.format(batch_id))
self.tcex.log.debug(
'Retrieve Errors for ID {}: status code {}, errors {}'.format(
batch_id, r.status_code, r.text
)
)
# self.tcex.log.debug('Retrieve Errors URL {}'.format(r.url))
# API does not return correct content type
if r.ok:
errors = json.loads(r.text)
# temporarily process errors to find "critical" errors.
# FR in core to return error codes.
for error in errors:
error_reason = error.get('errorReason')
for error_msg in self._critical_failures:
if re.findall(error_msg, error_reason):
self.tcex.handle_error(10500, [error_reason], halt_on_error)
return errors
except Exception as e:
self.tcex.handle_error(560, [e], halt_on_error)
def event(self, name, **kwargs):
"""Add Event data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
event_date (str, kwargs): The event datetime expression for this Group.
status (str, kwargs): The status for this Group.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Event.
"""
group_obj = Event(name, **kwargs)
return self._group(group_obj)
def file(self, md5=None, sha1=None, sha256=None, **kwargs):
"""Add File data to Batch object.
.. note:: A least one file hash value must be specified.
Args:
md5 (str, optional): The md5 value for this Indicator.
sha1 (str, optional): The sha1 value for this Indicator.
sha256 (str, optional): The sha256 value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
size (str, kwargs): The file size for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of File.
"""
indicator_obj = File(md5, sha1, sha256, **kwargs)
return self._indicator(indicator_obj)
def file_merge_mode(self, value):
"""Set the file merge mode for the entire batch job.
Args:
value (str): A value of Distribute or Merge.
"""
self._file_merge_mode = value
@property
def files(self):
"""Return dictionary containing all of the file content or callbacks."""
return self._files
@staticmethod
def generate_xid(identifier=None):
"""Generate xid from provided identifiers.
.. Important:: If no identifier is provided a unique xid will be returned, but it will
not be reproducible. If a list of identifiers are provided they must be
in the same order to generate a reproducible xid.
Args:
identifier (list|str): Optional *string* value(s) to be used to make a unique and
reproducible xid.
"""
if identifier is None:
identifier = str(uuid.uuid4())
elif isinstance(identifier, list):
identifier = '-'.join([str(i) for i in identifier])
identifier = hashlib.sha256(identifier.encode('utf-8')).hexdigest()
return hashlib.sha256(identifier.encode('utf-8')).hexdigest()
def group(self, group_type, name, **kwargs):
"""Add Group data to Batch object.
Args:
group_type (str): The ThreatConnect define Group type.
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Group.
"""
group_obj = Group(group_type, name, **kwargs)
return self._group(group_obj)
@property
def group_shelf_fqfn(self):
"""Return groups shelf fully qualified filename.
For testing/debugging a previous shelf file can be copied into the tc_temp_path directory
instead of creating a new shelf file.
"""
if self._group_shelf_fqfn is None:
# new shelf file
self._group_shelf_fqfn = os.path.join(
self.tcex.args.tc_temp_path, 'groups-{}'.format(str(uuid.uuid4()))
)
# saved shelf file
if self.saved_groups:
self._group_shelf_fqfn = os.path.join(self.tcex.args.tc_temp_path, 'groups-saved')
return self._group_shelf_fqfn
@property
def groups(self):
"""Return dictionary of all Groups data."""
if self._groups is None:
# plain dict, but could be something else in future
self._groups = {}
return self._groups
@property
def groups_shelf(self):
"""Return dictionary of all Groups data."""
if self._groups_shelf is None:
self._groups_shelf = shelve.open(self.group_shelf_fqfn, writeback=False)
return self._groups_shelf
@property
def halt_on_error(self):
"""Return batch halt on error setting."""
return self._halt_on_error
@halt_on_error.setter
def halt_on_error(self, halt_on_error):
"""Set batch halt on error setting."""
self._halt_on_error = halt_on_error
@property
def halt_on_batch_error(self):
"""Return halt on batch error value."""
return self._halt_on_batch_error
@halt_on_batch_error.setter
def halt_on_batch_error(self, value):
"""Set batch halt on batch error value."""
if isinstance(value, bool):
self._halt_on_batch_error = value
@property
def halt_on_file_error(self):
"""Return halt on file post error value."""
return self._halt_on_file_error
@halt_on_file_error.setter
def halt_on_file_error(self, value):
"""Set halt on file post error value."""
if isinstance(value, bool):
self._halt_on_file_error = value
@property
def halt_on_poll_error(self):
"""Return halt on poll error value."""
return self._halt_on_poll_error
@halt_on_poll_error.setter
def halt_on_poll_error(self, value):
"""Set batch halt on poll error value."""
if isinstance(value, bool):
self._halt_on_poll_error = value
def hash_collision_mode(self, value):
"""Set the file hash collision mode for the entire batch job.
Args:
value (str): A value of Split, IgnoreIncoming, IgnoreExisting, FavorIncoming,
and FavorExisting.
"""
self._hash_collision_mode = value
def host(self, hostname, **kwargs):
"""Add Email Address data to Batch object.
Args:
hostname (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
dns_active (bool, kwargs): If True DNS active is enabled for this indicator.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
whois_active (bool, kwargs): If True WhoIs active is enabled for this indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Host.
"""
indicator_obj = Host(hostname, **kwargs)
return self._indicator(indicator_obj)
def incident(self, name, **kwargs):
"""Add Incident data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
event_date (str, kwargs): The event datetime expression for this Group.
status (str, kwargs): The status for this Group.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Incident.
"""
group_obj = Incident(name, **kwargs)
return self._group(group_obj)
def indicator(self, indicator_type, summary, **kwargs):
"""Add Indicator data to Batch object.
Args:
indicator_type (str): The ThreatConnect define Indicator type.
summary (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Indicator.
"""
indicator_obj = Indicator(indicator_type, summary, **kwargs)
return self._indicator(indicator_obj)
@property
def indicator_shelf_fqfn(self):
"""Return indicator shelf fully qualified filename.
For testing/debugging a previous shelf file can be copied into the tc_temp_path directory
instead of creating a new shelf file.
"""
if self._indicator_shelf_fqfn is None:
# new shelf file
self._indicator_shelf_fqfn = os.path.join(
self.tcex.args.tc_temp_path, 'indicators-{}'.format(str(uuid.uuid4()))
)
# saved shelf file
if self.saved_indicators:
self._indicator_shelf_fqfn = os.path.join(
self.tcex.args.tc_temp_path, 'indicators-saved'
)
return self._indicator_shelf_fqfn
@property
def indicators(self):
"""Return dictionary of all Indicator data."""
if self._indicators is None:
# plain dict, but could be something else in future
self._indicators = {}
return self._indicators
@property
def indicators_shelf(self):
"""Return dictionary of all Indicator data."""
if self._indicators_shelf is None:
self._indicators_shelf = shelve.open(self.indicator_shelf_fqfn, writeback=False)
return self._indicators_shelf
def intrusion_set(self, name, **kwargs):
"""Add Intrusion Set data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of IntrusionSet.
"""
group_obj = IntrusionSet(name, **kwargs)
return self._group(group_obj)
def mutex(self, mutex, **kwargs):
"""Add Mutex data to Batch object.
Args:
mutex (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Mutex.
"""
indicator_obj = Mutex(mutex, **kwargs)
return self._indicator(indicator_obj)
def poll(self, batch_id, retry_seconds=None, back_off=None, timeout=None, halt_on_error=True):
"""Poll Batch status to ThreatConnect API.
.. code-block:: javascript
{
"status": "Success",
"data": {
"batchStatus": {
"id":3505,
"status":"Completed",
"errorCount":0,
"successCount":0,
"unprocessCount":0
}
}
}
Args:
batch_id (str): The ID returned from the ThreatConnect API for the current batch job.
retry_seconds (int): The base number of seconds used for retries when job is not
completed.
back_off (float): A multiplier to use for backing off on each poll attempt when job has
not completed.
timeout (int, optional): The number of seconds before the poll should timeout.
halt_on_error (bool, default:True): If True any exception will raise an error.
Returns:
dict: The batch status returned from the ThreatConnect API.
"""
# check global setting for override
if self.halt_on_poll_error is not None:
halt_on_error = self.halt_on_poll_error
# initial poll interval
if self._poll_interval is None and self._batch_data_count is not None:
# calculate poll_interval base off the number of entries in the batch data
# with a minimum value of 5 seconds.
self._poll_interval = max(math.ceil(self._batch_data_count / 300), 5)
elif self._poll_interval is None:
# if not able to calculate poll_interval default to 15 seconds
self._poll_interval = 15
# poll retry back_off factor
if back_off is None:
poll_interval_back_off = 2.5
else:
poll_interval_back_off = float(back_off)
# poll retry seconds
if retry_seconds is None:
poll_retry_seconds = 5
else:
poll_retry_seconds = int(retry_seconds)
# poll timeout
if timeout is None:
timeout = self.poll_timeout
else:
timeout = int(timeout)
params = {'includeAdditional': 'true'}
poll_count = 0
poll_time_total = 0
data = {}
while True:
poll_count += 1
poll_time_total += self._poll_interval
time.sleep(self._poll_interval)
self.tcex.log.info('Batch poll time: {} seconds'.format(poll_time_total))
try:
# retrieve job status
r = self.tcex.session.get('/v2/batch/{}'.format(batch_id), params=params)
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error)
return data
data = r.json()
if data.get('status') != 'Success':
self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error)
except Exception as e:
self.tcex.handle_error(540, [e], halt_on_error)
if data.get('data', {}).get('batchStatus', {}).get('status') == 'Completed':
# store last 5 poll times to use in calculating average poll time
modifier = poll_time_total * 0.7
self._poll_interval_times = self._poll_interval_times[-4:] + [modifier]
weights = [1]
poll_interval_time_weighted_sum = 0
for poll_interval_time in self._poll_interval_times:
poll_interval_time_weighted_sum += poll_interval_time * weights[-1]
# weights will be [1, 1.5, 2.25, 3.375, 5.0625] for all 5 poll times depending
# on how many poll times are available.
weights.append(weights[-1] * 1.5)
# pop off the last weight so its not added in to the sum
weights.pop()
# calculate the weighted average of the last 5 poll times
self._poll_interval = math.floor(poll_interval_time_weighted_sum / sum(weights))
if poll_count == 1:
# if completed on first poll, reduce poll interval.
self._poll_interval = self._poll_interval * 0.85
self.tcex.log.debug('Batch Status: {}'.format(data))
return data
# update poll_interval for retry with max poll time of 20 seconds
self._poll_interval = min(
poll_retry_seconds + int(poll_count * poll_interval_back_off), 20
)
# time out poll to prevent App running indefinitely
if poll_time_total >= timeout:
self.tcex.handle_error(550, [timeout], True)
@property
def poll_timeout(self):
"""Return current poll timeout value."""
return self._poll_timeout
@poll_timeout.setter
def poll_timeout(self, seconds):
"""Set the poll timeout value."""
self._poll_timeout = int(seconds)
def registry_key(self, key_name, value_name, value_type, **kwargs):
"""Add Registry Key data to Batch object.
Args:
key_name (str): The key_name value for this Indicator.
value_name (str): The value_name value for this Indicator.
value_type (str): The value_type value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Registry Key.
"""
indicator_obj = RegistryKey(key_name, value_name, value_type, **kwargs)
return self._indicator(indicator_obj)
def report(self, name, **kwargs):
"""Add Report data to Batch object.
Args:
name (str): The name for this Group.
file_name (str): The name for the attached file for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
file_content (str;method, kwargs): The file contents or callback method to retrieve
file content.
publish_date (str, kwargs): The publish datetime expression for this Group.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Report.
"""
group_obj = Report(name, **kwargs)
return self._group(group_obj)
def save(self, resource):
"""Save group|indicator dict or object to shelve.
Best effort to save group/indicator data to disk. If for any reason the save fails
the data will still be accessible from list in memory.
Args:
resource (dict|obj): The Group or Indicator dict or object.
"""
resource_type = None
xid = None
if isinstance(resource, dict):
resource_type = resource.get('type')
xid = resource.get('xid')
else:
resource_type = resource.type
xid = resource.xid
if resource_type is not None and xid is not None:
saved = True
if resource_type in self.tcex.group_types:
try:
# groups
self.groups_shelf[xid] = resource
except Exception:
saved = False
if saved:
try:
del self._groups[xid]
except KeyError:
# if group was saved twice it would already be delete
pass
elif resource_type in self.tcex.indicator_types_data.keys():
try:
# indicators
self.indicators_shelf[xid] = resource
except Exception:
saved = False
if saved:
try:
del self._indicators[xid]
except KeyError:
# if indicator was saved twice it would already be delete
pass
@property
def saved_groups(self):
"""Return True if saved group files exits, else False."""
if self._saved_groups is None:
self._saved_groups = False
fqfn_saved = os.path.join(self.tcex.args.tc_temp_path, 'groups-saved')
if (
self.enable_saved_file
and os.path.isfile(fqfn_saved)
and os.access(fqfn_saved, os.R_OK)
):
self._saved_groups = True
self.tcex.log.debug('groups-saved file found')
return self._saved_groups
@property
def saved_indicators(self):
"""Return True if saved indicators files exits, else False."""
if self._saved_indicators is None:
self._saved_indicators = False
fqfn_saved = os.path.join(self.tcex.args.tc_temp_path, 'indicators-saved')
if (
self.enable_saved_file
and os.path.isfile(fqfn_saved)
and os.access(fqfn_saved, os.R_OK)
):
self._saved_indicators = True
self.tcex.log.debug('indicators-saved file found')
return self._saved_indicators
@property
def saved_xids(self):
"""Return previously saved xids."""
if self._saved_xids is None:
self._saved_xids = []
if self.debug:
fpfn = os.path.join(self.tcex.args.tc_temp_path, 'xids-saved')
if os.path.isfile(fpfn) and os.access(fpfn, os.R_OK):
with open(fpfn) as fh:
self._saved_xids = fh.read().splitlines()
return self._saved_xids
@property
def settings(self):
"""Return batch job settings."""
_settings = {
'action': self._action,
# not supported in v2 batch
# 'attributeWriteType': self._attribute_write_type,
'attributeWriteType': 'Replace',
'haltOnError': str(self._halt_on_error).lower(),
'owner': self._owner,
'version': 'V2',
}
if self._playbook_triggers_enabled is not None:
_settings['playbookTriggersEnabled'] = str(self._playbook_triggers_enabled).lower()
if self._hash_collision_mode is not None:
_settings['hashCollisionMode'] = self._hash_collision_mode
if self._file_merge_mode is not None:
_settings['fileMergeMode'] = self._file_merge_mode
return _settings
def signature(self, name, file_name, file_type, file_text, **kwargs):
"""Add Signature data to Batch object.
Valid file_types:
+ Snort ®
+ Suricata
+ YARA
+ ClamAV ®
+ OpenIOC
+ CybOX ™
+ Bro
+ Regex
+ SPL - Splunk ® Search Processing Language
Args:
name (str): The name for this Group.
file_name (str): The name for the attached signature for this Group.
file_type (str): The signature type for this Group.
file_text (str): The signature content for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Signature.
"""
group_obj = Signature(name, file_name, file_type, file_text, **kwargs)
return self._group(group_obj)
def submit(self, poll=True, errors=True, process_files=True, halt_on_error=True):
"""Submit Batch request to ThreatConnect API.
By default this method will submit the job request and data and if the size of the data
is below the value **synchronousBatchSaveLimit** set in System Setting it will process
the request synchronously and return the batch status. If the size of the batch is greater
than the value set the batch job will be queued.
Errors are not retrieve automatically and need to be enabled.
If any of the submit, poll, or error methods fail the entire submit will halt at the point
of failure. The behavior can be changed by setting halt_on_error to False.
Each of these methods can also be called on their own for greater control of the submit
process.
Args:
poll (bool, default:True): Poll for status.
errors (bool, default:True): Retrieve any batch errors (only if poll is True).
process_files (bool, default:True): Send any document or report attachments to the API.
halt_on_error (bool, default:True): If True any exception will raise an error.
Returns.
dict: The Batch Status from the ThreatConnect API.
"""
batch_data = (
self.submit_create_and_upload(halt_on_error).get('data', {}).get('batchStatus', {})
)
batch_id = batch_data.get('id')
if batch_id is not None:
self.tcex.log.info('Batch ID: {}'.format(batch_id))
# job hit queue
if poll:
# poll for status
batch_data = (
self.poll(batch_id, halt_on_error=halt_on_error)
.get('data', {})
.get('batchStatus')
)
if errors:
# retrieve errors
error_groups = batch_data.get('errorGroupCount', 0)
error_indicators = batch_data.get('errorIndicatorCount', 0)
if error_groups > 0 or error_indicators > 0:
self.tcex.log.debug('retrieving batch errors')
batch_data['errors'] = self.errors(batch_id)
else:
# can't process files if status is unknown (polling must be enabled)
process_files = False
if process_files:
# submit file data after batch job is complete
batch_data['uploadStatus'] = self.submit_files(halt_on_error)
return batch_data
def submit_all(self, poll=True, errors=True, process_files=True, halt_on_error=True):
"""Submit Batch request to ThreatConnect API.
By default this method will submit the job request and data and if the size of the data
is below the value **synchronousBatchSaveLimit** set in System Setting it will process
the request synchronously and return the batch status. If the size of the batch is greater
than the value set the batch job will be queued.
Errors are not retrieve automatically and need to be enabled.
If any of the submit, poll, or error methods fail the entire submit will halt at the point
of failure. The behavior can be changed by setting halt_on_error to False.
Each of these methods can also be called on their own for greater control of the submit
process.
Args:
poll (bool, default:True): Poll for status.
errors (bool, default:True): Retrieve any batch errors (only if poll is True).
process_files (bool, default:True): Send any document or report attachments to the API.
halt_on_error (bool, default:True): If True any exception will raise an error.
Returns.
dict: The Batch Status from the ThreatConnect API.
"""
batch_data_array = []
while True:
batch_data = {}
batch_id = None
if self.action.lower() == 'delete':
# while waiting of FR for delete support in createAndUpload submit delete request
# the old way (submit job + submit data), still using V2.
if len(self) > 0: # pylint: disable=C1801
batch_id = self.submit_job(halt_on_error)
if batch_id is not None:
batch_data = self.submit_data(batch_id, halt_on_error)
else:
batch_data = {}
else:
batch_data = (
self.submit_create_and_upload(halt_on_error)
.get('data', {})
.get('batchStatus', {})
)
batch_id = batch_data.get('id')
if not batch_data:
break
elif batch_id is not None:
self.tcex.log.info('Batch ID: {}'.format(batch_id))
# job hit queue
if poll:
# poll for status
batch_data = (
self.poll(batch_id, halt_on_error=halt_on_error)
.get('data', {})
.get('batchStatus')
)
if errors:
# retrieve errors
error_count = batch_data.get('errorCount', 0)
error_groups = batch_data.get('errorGroupCount', 0)
error_indicators = batch_data.get('errorIndicatorCount', 0)
if error_count > 0 or error_groups > 0 or error_indicators > 0:
self.tcex.log.debug('retrieving batch errors')
batch_data['errors'] = self.errors(batch_id)
else:
# can't process files if status is unknown (polling must be enabled)
process_files = False
if process_files:
# submit file data after batch job is complete
batch_data['uploadStatus'] = self.submit_files(halt_on_error)
batch_data_array.append(batch_data)
if self.debug:
self.write_error_json(batch_data.get('errors'))
return batch_data_array
def write_error_json(self, errors):
"""Writes the errors for debuging purposes"""
timestamp = str(time.time()).replace('.', '')
error_json_file = os.path.join(
self.tcex.args.tc_temp_path, 'errors-{}.json'.format(timestamp)
)
with open(error_json_file, 'w') as fh:
json.dump(errors, fh, indent=2)
def submit_create_and_upload(self, halt_on_error=True):
"""Submit Batch request to ThreatConnect API.
Returns.
dict: The Batch Status from the ThreatConnect API.
"""
# check global setting for override
if self.halt_on_batch_error is not None:
halt_on_error = self.halt_on_batch_error
content = self.data
if self.debug:
# special code for debugging App using batchV2.
self.write_batch_json(content)
if content.get('group') or content.get('indicator'):
# store the length of the batch data to use for poll interval calculations
self.tcex.log.info('Batch Group Size: {:,}.'.format(len(content.get('group'))))
self.tcex.log.info('Batch Indicator Size {:,}.'.format(len(content.get('indicator'))))
try:
files = (('config', json.dumps(self.settings)), ('content', json.dumps(content)))
params = {'includeAdditional': 'true'}
r = self.tcex.session.post('/v2/batch/createAndUpload', files=files, params=params)
self.tcex.log.debug('Batch Status Code: {}'.format(r.status_code))
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
self.tcex.handle_error(10510, [r.status_code, r.text], halt_on_error)
return r.json()
except Exception as e:
self.tcex.handle_error(10505, [e], halt_on_error)
return {}
def submit_data(self, batch_id, halt_on_error=True):
"""Submit Batch request to ThreatConnect API.
Args:
batch_id (string): The batch id of the current job.
"""
# check global setting for override
if self.halt_on_batch_error is not None:
halt_on_error = self.halt_on_batch_error
content = self.data
# store the length of the batch data to use for poll interval calculations
self._batch_data_count = len(content.get('group')) + len(content.get('indicator'))
self.tcex.log.info('Batch Size: {:,}'.format(self._batch_data_count))
if content.get('group') or content.get('indicator'):
headers = {'Content-Type': 'application/octet-stream'}
try:
r = self.tcex.session.post(
'/v2/batch/{}'.format(batch_id), headers=headers, json=content
)
except Exception as e:
self.tcex.handle_error(10520, [e], halt_on_error)
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
self.tcex.handle_error(10525, [r.status_code, r.text], halt_on_error)
return r.json()
return {}
def submit_files(self, halt_on_error=True):
"""Submit Files for Documents and Reports to ThreatConnect API.
Critical Errors
* There is insufficient document storage allocated to this account.
Args:
halt_on_error (bool, default:True): If True any exception will raise an error.
Returns:
dict: The upload status for each xid.
"""
# check global setting for override
if self.halt_on_file_error is not None:
halt_on_error = self.halt_on_file_error
upload_status = []
for xid, content_data in self._files.items():
del self._files[xid] # win or loose remove the entry
status = True
# used for debug/testing to prevent upload of previously uploaded file
if self.debug and xid in self.saved_xids:
self.tcex.log.debug('skipping previously saved file {}.'.format(xid))
continue
# process the file content
content = content_data.get('fileContent')
if callable(content):
content = content_data.get('fileContent')(xid)
if content is None:
upload_status.append({'uploaded': False, 'xid': xid})
self.tcex.log.warning('File content was null for xid {}.'.format(xid))
continue
if content_data.get('type') == 'Document':
api_branch = 'documents'
elif content_data.get('type') == 'Report':
api_branch = 'reports'
if self.debug and content_data.get('fileName'):
# special code for debugging App using batchV2.
fqfn = os.path.join(
self.tcex.args.tc_temp_path,
'{}--{}--{}'.format(
api_branch, xid, content_data.get('fileName').replace('/', ':')
),
)
with open(fqfn, 'wb') as fh:
fh.write(content)
# Post File
url = '/v2/groups/{}/{}/upload'.format(api_branch, xid)
headers = {'Content-Type': 'application/octet-stream'}
params = {'owner': self._owner, 'updateIfExists': 'true'}
r = self.submit_file_content('POST', url, content, headers, params, halt_on_error)
if r.status_code == 401:
# use PUT method if file already exists
self.tcex.log.info('Received 401 status code using POST. Trying PUT to update.')
r = self.submit_file_content('PUT', url, content, headers, params, halt_on_error)
self.tcex.log.debug('{} Upload URL: {}.'.format(content_data.get('type'), r.url))
if not r.ok:
status = False
self.tcex.handle_error(585, [r.status_code, r.text], halt_on_error)
elif self.debug:
self.saved_xids.append(xid)
self.tcex.log.info('Status {} for file upload with xid {}.'.format(r.status_code, xid))
upload_status.append({'uploaded': status, 'xid': xid})
return upload_status
def submit_file_content(self, method, url, data, headers, params, halt_on_error=True):
"""Submit File Content for Documents and Reports to ThreatConnect API.
Args:
method (str): The HTTP method for the request (POST, PUT).
url (str): The URL for the request.
data (str;bytes;file): The body (data) for the request.
headers (dict): The headers for the request.
params (dict): The query string parameters for the request.
halt_on_error (bool, default:True): If True any exception will raise an error.
Returns:
requests.models.Response: The response from the request.
"""
r = None
try:
r = self.tcex.session.request(method, url, data=data, headers=headers, params=params)
except Exception as e:
self.tcex.handle_error(580, [e], halt_on_error)
return r
def submit_job(self, halt_on_error=True):
"""Submit Batch request to ThreatConnect API."""
# check global setting for override
if self.halt_on_batch_error is not None:
halt_on_error = self.halt_on_batch_error
try:
r = self.tcex.session.post('/v2/batch', json=self.settings)
except Exception as e:
self.tcex.handle_error(10505, [e], halt_on_error)
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
self.tcex.handle_error(10510, [r.status_code, r.text], halt_on_error)
data = r.json()
if data.get('status') != 'Success':
self.tcex.handle_error(10510, [r.status_code, r.text], halt_on_error)
self.tcex.log.debug('Batch Submit Data: {}'.format(data))
return data.get('data', {}).get('batchId')
def threat(self, name, **kwargs):
"""Add Threat data to Batch object
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Threat.
"""
group_obj = Threat(name, **kwargs)
return self._group(group_obj)
def user_agent(self, text, **kwargs):
"""Add User Agent data to Batch object
Args:
text (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of UserAgent.
"""
indicator_obj = UserAgent(text, **kwargs)
return self._indicator(indicator_obj)
def url(self, text, **kwargs):
"""Add URL Address data to Batch object.
Args:
text (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of URL.
"""
indicator_obj = URL(text, **kwargs)
return self._indicator(indicator_obj)
def write_batch_json(self, content):
"""Write batch json data to a file."""
timestamp = str(time.time()).replace('.', '')
batch_json_file = os.path.join(
self.tcex.args.tc_temp_path, 'batch-{}.json'.format(timestamp)
)
with open(batch_json_file, 'w') as fh:
json.dump(content, fh, indent=2)
@property
def file_len(self):
"""Return the number of current indicators."""
return len(self._files)
@property
def group_len(self):
"""Return the number of current groups."""
return len(self.groups) + len(self.groups_shelf)
@property
def indicator_len(self):
"""Return the number of current indicators."""
return len(self.indicators) + len(self.indicators_shelf)
def __len__(self):
"""Return the number of groups and indicators."""
return self.group_len + self.indicator_len
def __str__(self):
"""Return string represtentation of object."""
groups = []
for group_data in self.groups.values():
if isinstance(group_data, dict):
groups.append(group_data)
else:
groups.append(group_data.data)
for group_data in self.groups_shelf.values():
if isinstance(group_data, dict):
groups.append(group_data)
else:
groups.append(group_data.data)
indicators = []
for indicator_data in self.indicators.values():
if isinstance(indicator_data, dict):
indicators.append(indicator_data)
else:
indicators.append(indicator_data.data)
for indicator_data in self.indicators_shelf.values():
if isinstance(indicator_data, dict):
indicators.append(indicator_data)
else:
indicators.append(indicator_data.data)
data = {'group': groups, 'indicators': indicators}
return json.dumps(data, indent=4, sort_keys=True)
|
"""
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
from __future__ import annotations
from contextlib import suppress
import copy
from datetime import (
date,
tzinfo,
)
import itertools
import os
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Hashable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
import numpy as np
from pandas._config import (
config,
get_option,
)
from pandas._libs import (
lib,
writers as libwriters,
)
from pandas._libs.tslibs import timezones
from pandas._typing import (
ArrayLike,
DtypeArg,
FrameOrSeries,
FrameOrSeriesUnion,
Shape,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.pickle_compat import patch_pickle
from pandas.errors import PerformanceWarning
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_list_like,
is_string_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
concat,
isna,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
PeriodArray,
)
import pandas.core.common as com
from pandas.core.computation.pytables import (
PyTablesExpr,
maybe_expression,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.api import ensure_index
from pandas.core.internals import BlockManager
from pandas.io.common import stringify_path
from pandas.io.formats.printing import (
adjoin,
pprint_thing,
)
if TYPE_CHECKING:
from tables import (
Col,
File,
Node,
)
from pandas.core.internals import Block
# versioning attribute
_version = "0.15.2"
# encoding
_default_encoding = "UTF-8"
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode("UTF-8")
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""
Ensure that an index / column name is a str (python 3); otherwise they
may be np.string dtype. Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, str):
name = str(name)
return name
Term = PyTablesExpr
def _ensure_term(where, scope_level: int):
"""
Ensure that the where is a Term or a list of Term.
This makes sure that we are capturing the scope of variables that are
passed create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automatically a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
where = [
Term(term, scope_level=level + 1) if maybe_expression(term) else term
for term in where
if term is not None
]
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where if where is None or len(where) else None
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {"f": "fixed", "fixed": "fixed", "t": "table", "table": "table"}
# axes map
_AXES_MAP = {DataFrame: [0]}
# register our configuration options
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix("io.hdf"):
config.register_option("dropna_table", False, dropna_doc, validator=config.is_bool)
config.register_option(
"default_format",
None,
format_doc,
validator=config.is_one_of_factory(["fixed", "table", None]),
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
with suppress(AttributeError):
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == "strict"
)
return _table_mod
# interface to/from ###
def to_hdf(
path_or_buf,
key: str,
value: FrameOrSeries,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool = False,
format: Optional[str] = None,
index: bool = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool] = None,
data_columns: Optional[Union[bool, List[str]]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
else:
# NB: dropna is not passed to `put`
f = lambda store: store.put(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
errors=errors,
encoding=encoding,
dropna=dropna,
)
path_or_buf = stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
with HDFStore(
path_or_buf, mode=mode, complevel=complevel, complib=complib
) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(
path_or_buf,
key=None,
mode: str = "r",
errors: str = "strict",
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
columns=None,
iterator=False,
chunksize: Optional[int] = None,
**kwargs,
):
"""
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path_or_buf : str, path object, pandas.HDFStore
Any valid string path is acceptable. Only supports the local file system,
remote URLs and file-like objects are not supported.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
Alternatively, pandas accepts an open :class:`pandas.HDFStore` object.
key : object, optional
The group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
mode : {'r', 'r+', 'a'}, default 'r'
Mode to use when opening the file. Ignored if path_or_buf is a
:class:`pandas.HDFStore`. Default is 'r'.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
where : list, optional
A list of Term (or convertible) objects.
start : int, optional
Row number to start selection.
stop : int, optional
Row number to stop selection.
columns : list, optional
A list of columns names to return.
iterator : bool, optional
Return an iterator object.
chunksize : int, optional
Number of rows to include in an iteration when using an iterator.
**kwargs
Additional keyword arguments passed to HDFStore.
Returns
-------
item : object
The selected object. Return type depends on the object stored.
See Also
--------
DataFrame.to_hdf : Write a HDF file from a DataFrame.
HDFStore : Low-level access to HDF files.
Examples
--------
>>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z'])
>>> df.to_hdf('./store.h5', 'data')
>>> reread = pd.read_hdf('./store.h5')
"""
if mode not in ["r", "r+", "a"]:
raise ValueError(
f"mode {mode} is not allowed while performing a read. "
f"Allowed modes are r, r+ and a."
)
# grab the scope
if where is not None:
where = _ensure_term(where, scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise OSError("The HDFStore must be open for reading.")
store = path_or_buf
auto_close = False
else:
path_or_buf = stringify_path(path_or_buf)
if not isinstance(path_or_buf, str):
raise NotImplementedError(
"Support for generic buffers has not been implemented."
)
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise FileNotFoundError(f"File {path_or_buf} does not exist")
store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError(
"Dataset(s) incompatible with Pandas data types, "
"not table, or no datasets found in HDF5 file."
)
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError(
"key must be provided when HDF5 "
"file contains multiple datasets."
)
key = candidate_only_group._v_pathname
return store.select(
key,
where=where,
start=start,
stop=stop,
columns=columns,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
except (ValueError, TypeError, KeyError):
if not isinstance(path_or_buf, HDFStore):
# if there is an error, close the store if we opened it.
with suppress(AttributeError):
store.close()
raise
def _is_metadata_of(group: Node, parent_group: Node) -> bool:
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == "meta":
return True
current = current._v_parent
return False
class HDFStore:
"""
Dict-like IO interface for storing pandas objects in PyTables.
Either Fixed or Table format.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path : str
File path to HDF5 file.
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
**kwargs
These parameters will be passed to the PyTables open_file method.
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
**Create or load HDF5 file in-memory**
When passing the `driver` option to the PyTables open_file method through
**kwargs, the HDF5 file is loaded or created in-memory and will only be
written when closed:
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5', driver='H5FD_CORE')
>>> store['foo'] = bar
>>> store.close() # only now, data is written to disk
"""
_handle: Optional[File]
_mode: str
_complevel: int
_fletcher32: bool
def __init__(
self,
path,
mode: str = "a",
complevel: Optional[int] = None,
complib=None,
fletcher32: bool = False,
**kwargs,
):
if "format" in kwargs:
raise ValueError("format is not a defined argument for HDFStore")
tables = import_optional_dependency("tables")
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
f"complib only supports {tables.filters.all_complibs} compression."
)
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self):
return self._path
@property
def root(self):
""" return the root node """
self._check_if_open()
assert self._handle is not None # for mypy
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key: str):
return self.get(key)
def __setitem__(self, key: str, value):
self.put(key, value)
def __delitem__(self, key: str):
return self.remove(key)
def __getattr__(self, name: str):
""" allow attribute access to get stores """
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def __contains__(self, key: str) -> bool:
"""
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self) -> int:
return len(self.groups())
def __repr__(self) -> str:
pstr = pprint_thing(self._path)
return f"{type(self)}\nFile path: {pstr}\n"
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self, include: str = "pandas") -> List[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
.. versionadded:: 1.1.0
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
"""
if include == "pandas":
return [n._v_pathname for n in self.groups()]
elif include == "native":
assert self._handle is not None # mypy
return [
n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
]
raise ValueError(
f"`include` should be either 'pandas' or 'native' but is '{include}'"
)
def __iter__(self):
return iter(self.keys())
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode: str = "a", **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
**kwargs
These parameters will be passed to the PyTables open_file method.
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
f"Re-opening the file [{self._path}] with mode [{self._mode}] "
"will delete the current file!"
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(
self._complevel, self._complib, fletcher32=self._fletcher32
)
if _table_file_open_policy_is_strict and self.is_open:
msg = (
"Cannot open HDF5 file, which is already opened, "
"even in read-only mode."
)
raise ValueError(msg)
self._handle = tables.open_file(self._path, self._mode, **kwargs)
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self) -> bool:
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync: bool = False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
with suppress(OSError):
os.fsync(self._handle.fileno())
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Returns
-------
object
Same type as object stored in file.
"""
with patch_pickle():
# GH#31167 Without this patch, pickle doesn't know how to unpickle
# old DateOffset objects now that they are cdef classes.
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator=False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
def select_as_coordinates(
self,
key: str,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
def select_column(
self,
key: str,
column: str,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
def select_as_multiple(
self,
keys,
where=None,
selector=None,
columns=None,
start=None,
stop=None,
iterator=False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas objects from multiple tables.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : bool, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return self.select(
key=keys,
where=where,
columns=columns,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError(f"Invalid table [{k}]")
if not t.is_table:
raise TypeError(
f"object [{t.pathname}] is not a table, and cannot be used in all "
"select as multiple"
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError("all tables must have exactly the same nrows!")
# The isinstance checks here are redundant with the check above,
# but necessary for mypy; see GH#29757
_tbls = [x for x in tbls if isinstance(x, Table)]
# axis is the concentration axes
axis = list({t.non_index_axes[0][0] for t in _tbls})[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [
t.read(where=_where, columns=columns, start=_start, stop=_stop)
for t in tbls
]
# concat and return
return concat(objs, axis=axis, verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result(coordinates=True)
def put(
self,
key: str,
value: FrameOrSeries,
format=None,
index=True,
append=False,
complib=None,
complevel: Optional[int] = None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
data_columns: Optional[List[str]] = None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
dropna: bool = False,
):
"""
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
Format to use when storing object in HDFStore. Value can be one of:
``'fixed'``
Fixed format. Fast writing/reading. Not-appendable, nor searchable.
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
append : bool, default False
This will force Table format, append the input data to the existing.
data_columns : list, default None
List of columns to create as data columns, or True to use all columns.
See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
.. versionadded:: 1.1.0
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
encoding=encoding,
errors=errors,
track_times=track_times,
dropna=dropna,
)
def remove(self, key: str, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : str
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as err:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
) from err
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
"can only remove with where on objects written as tables"
)
return s.delete(where=where, start=start, stop=stop)
def append(
self,
key: str,
value: FrameOrSeries,
format=None,
axes=None,
index=True,
append=True,
complib=None,
complevel: Optional[int] = None,
columns=None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
chunksize=None,
expectedrows=None,
dropna: Optional[bool] = None,
data_columns: Optional[List[str]] = None,
encoding=None,
errors: str = "strict",
):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'table' is the default
Format to use when storing object in HDFStore. Value can be one of:
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum str sizes
nan_rep : str to use as str nan representation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for str
dropna : bool, default False
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError(
"columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or "table"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
axes=axes,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
def append_to_multiple(
self,
d: Dict,
value,
selector,
data_columns=None,
axes=None,
dropna=False,
**kwargs,
):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError(
"axes is currently not accepted as a parameter to append_to_multiple; "
"you can create the tables independently instead"
)
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values: List = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how="all").index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
min_itemsize = kwargs.pop("min_itemsize", None)
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
filtered = (
{key: value for (key, value) in min_itemsize.items() if key in v}
if min_itemsize is not None
else None
)
self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
def create_table_index(
self,
key: str,
columns=None,
optlevel: Optional[int] = None,
kind: Optional[str] = None,
):
"""
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError: raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
s.create_index(columns=columns, optlevel=optlevel, kind=kind)
def groups(self):
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
def walk(self, where="/"):
"""
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
.. versionadded:: 0.24.0
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, "pandas_type", None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, "pandas_type", None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip("/"), groups, leaves)
def get_node(self, key: str) -> Optional[Node]:
""" return the node with the key or None if it does not exist """
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
assert _table_mod is not None # for mypy
try:
node = self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
assert isinstance(node, _table_mod.Node), type(node)
return node
def get_storer(self, key: str) -> Union[GenericFixed, Table]:
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
s = self._create_storer(group)
s.infer_axes()
return s
def copy(
self,
file,
mode="w",
propindexes: bool = True,
keys=None,
complib=None,
complevel: Optional[int] = None,
fletcher32: bool = False,
overwrite=True,
):
"""
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes : bool, default True
Restore indexes in copied file.
keys : list, optional
List of keys to include in the copy (defaults to all).
overwrite : bool, default True
Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if isinstance(s, Table):
index: Union[bool, List[str]] = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k,
data,
index=index,
data_columns=getattr(s, "data_columns", None),
encoding=s.encoding,
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self) -> str:
"""
Print detailed information on the store.
Returns
-------
str
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
# ------------------------------------------------------------------------
# private methods
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
def _validate_format(self, format: str) -> str:
""" validate / deprecate formats """
# validate
try:
format = _FORMAT_MAP[format.lower()]
except KeyError as err:
raise TypeError(f"invalid HDFStore format specified [{format}]") from err
return format
def _create_storer(
self,
group,
format=None,
value: Optional[FrameOrSeries] = None,
encoding: str = "UTF-8",
errors: str = "strict",
) -> Union[GenericFixed, Table]:
""" return a suitable class to operate """
cls: Union[Type[GenericFixed], Type[Table]]
if value is not None and not isinstance(value, (Series, DataFrame)):
raise TypeError("value must be None, Series, or DataFrame")
def error(t):
# return instead of raising so mypy can tell where we are raising
return TypeError(
f"cannot properly create the storer for: [{t}] [group->"
f"{group},value->{type(value)},format->{format}"
)
pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None))
tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
group, _table_mod.table.Table
):
pt = "frame_table"
tt = "generic_table"
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed"
)
else:
if isinstance(value, Series):
pt = "series"
else:
pt = "frame"
# we are actually a table
if format == "table":
pt += "_table"
# a storer node
if "table" not in pt:
_STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed}
try:
cls = _STORER_MAP[pt]
except KeyError as err:
raise error("_STORER_MAP") from err
return cls(self, group, encoding=encoding, errors=errors)
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_series"
elif index.nlevels > 1:
tt = "appendable_multiseries"
elif pt == "frame_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_frame"
elif index.nlevels > 1:
tt = "appendable_multiframe"
_TABLE_MAP = {
"generic_table": GenericTable,
"appendable_series": AppendableSeriesTable,
"appendable_multiseries": AppendableMultiSeriesTable,
"appendable_frame": AppendableFrameTable,
"appendable_multiframe": AppendableMultiFrameTable,
"worm": WORMTable,
}
try:
cls = _TABLE_MAP[tt]
except KeyError as err:
raise error("_TABLE_MAP") from err
return cls(self, group, encoding=encoding, errors=errors)
def _write_to_group(
self,
key: str,
value: FrameOrSeries,
format,
axes=None,
index=True,
append=False,
complib=None,
complevel: Optional[int] = None,
fletcher32=None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
chunksize=None,
expectedrows=None,
dropna=False,
nan_rep=None,
data_columns=None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
):
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
group = self._identify_group(key, append)
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
raise ValueError("Can only append to Tables")
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError("Compression not supported on Fixed format stores")
# write the object
s.write(
obj=value,
axes=axes,
append=append,
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
min_itemsize=min_itemsize,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
nan_rep=nan_rep,
data_columns=data_columns,
track_times=track_times,
)
if isinstance(s, Table) and index:
s.create_index(columns=index)
def _read_group(self, group: Node):
s = self._create_storer(group)
s.infer_axes()
return s.read()
def _identify_group(self, key: str, append: bool) -> Node:
"""Identify HDF5 group based on key, delete/create group if needed."""
group = self.get_node(key)
# we make this assertion for mypy; the get_node call will already
# have raised if this is incorrect
assert self._handle is not None
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
if group is None:
group = self._create_nodes_and_group(key)
return group
def _create_nodes_and_group(self, key: str) -> Node:
"""Create nodes from key and return group name."""
# assertion for mypy
assert self._handle is not None
paths = key.split("/")
# recursively create the groups
path = "/"
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith("/"):
new_path += "/"
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
return group
class TableIterator:
"""
Define the iteration interface on a table
Parameters
----------
store : HDFStore
s : the referred storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : bool, default False
Whether to use the default iterator.
chunksize : the passed chunking value (default is 100000)
auto_close : bool, default False
Whether to automatically close the store at the end of iteration.
"""
chunksize: Optional[int]
store: HDFStore
s: Union[GenericFixed, Table]
def __init__(
self,
store: HDFStore,
s: Union[GenericFixed, Table],
func,
where,
nrows,
start=None,
stop=None,
iterator: bool = False,
chunksize: Optional[int] = None,
auto_close: bool = False,
):
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
if self.coordinates is None:
raise ValueError("Cannot iterate until get_result is called.")
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates: bool = False):
# return the actual iterator
if self.chunksize is not None:
if not isinstance(self.s, Table):
raise TypeError("can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
if not isinstance(self.s, Table):
raise TypeError("can only read_coordinates on a table")
where = self.s.read_coordinates(
where=self.where, start=self.start, stop=self.stop
)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol:
"""
an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ["freq", "tz", "index_name"]
name: str
cname: str
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname: Optional[str] = None,
axis=None,
pos=None,
freq=None,
tz=None,
index_name=None,
ordered=None,
table=None,
meta=None,
metadata=None,
):
if not isinstance(name, str):
raise ValueError("`name` must be a str.")
self.values = values
self.kind = kind
self.typ = typ
self.name = name
self.cname = cname or name
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.ordered = ordered
self.table = table
self.meta = meta
self.metadata = metadata
if pos is not None:
self.set_pos(pos)
# These are ensured as long as the passed arguments match the
# constructor annotations.
assert isinstance(self.name, str)
assert isinstance(self.cname, str)
@property
def itemsize(self) -> int:
# Assumes self.typ has already been initialized
return self.typ.itemsize
@property
def kind_attr(self) -> str:
return f"{self.name}_kind"
def set_pos(self, pos: int):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
def __repr__(self) -> str:
temp = tuple(
map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))
)
return ",".join(
(
f"{key}->{value}"
for key, value in zip(["name", "cname", "axis", "pos", "kind"], temp)
)
)
def __eq__(self, other: Any) -> bool:
""" compare 2 col items """
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "axis", "pos"]
)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
@property
def is_indexed(self) -> bool:
""" return whether I am an indexed column """
if not hasattr(self.table, "cols"):
# e.g. if infer hasn't been called yet, self.table will be None.
return False
return getattr(self.table.cols, self.cname).is_indexed
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
val_kind = _ensure_decoded(self.kind)
values = _maybe_convert(values, val_kind, encoding, errors)
kwargs = {}
kwargs["name"] = _ensure_decoded(self.index_name)
if self.freq is not None:
kwargs["freq"] = _ensure_decoded(self.freq)
factory: Union[Type[Index], Type[DatetimeIndex]] = Index
if is_datetime64_dtype(values.dtype) or is_datetime64tz_dtype(values.dtype):
factory = DatetimeIndex
# making an Index instance could throw a number of different errors
try:
new_pd_index = factory(values, **kwargs)
except ValueError:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if "freq" in kwargs:
kwargs["freq"] = None
new_pd_index = factory(values, **kwargs)
# error: Incompatible types in assignment (expression has type
# "Union[ndarray, DatetimeIndex]", variable has type "Index")
new_pd_index = _set_tz(new_pd_index, self.tz) # type: ignore[assignment]
return new_pd_index, new_pd_index
def take_data(self):
""" return the values"""
return self.values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None):
"""
maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size
"""
if _ensure_decoded(self.kind) == "string":
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)
def validate_names(self):
pass
def validate_and_set(self, handler: AppendableTable, append: bool):
self.table = handler.table
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == "string":
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
f"Trying to store a string with len [{itemsize}] in "
f"[{self.cname}] column but\nthis column has a limit of "
f"[{c.itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns"
)
return c.itemsize
return None
def validate_attr(self, append: bool):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError(
f"incompatible kind in col [{existing_kind} - {self.kind}]"
)
def update_info(self, info):
"""
set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed
"""
for key in self._info_fields:
value = getattr(self, key, None)
idx = info.setdefault(self.name, {})
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ["freq", "index_name"]:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
f"invalid info for [{self.name}] for [{key}], "
f"existing_value [{existing_value}] conflicts with "
f"new value [{value}]"
)
else:
if value is not None or existing_value is not None:
idx[key] = value
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def set_attr(self):
""" set the kind for this column """
setattr(self.attrs, self.kind_attr, self.kind)
def validate_metadata(self, handler: AppendableTable):
""" validate that kind=category does not change the categories """
if self.meta == "category":
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if (
new_metadata is not None
and cur_metadata is not None
and not array_equivalent(new_metadata, cur_metadata)
):
raise ValueError(
"cannot append a categorical with "
"different categories to the existing"
)
def write_metadata(self, handler: AppendableTable):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self) -> bool:
return False
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep : str
encoding : str
errors : str
"""
assert isinstance(values, np.ndarray), type(values)
# error: Incompatible types in assignment (expression has type
# "Int64Index", variable has type "ndarray")
values = Int64Index(np.arange(len(values))) # type: ignore[assignment]
return values, values
def set_attr(self):
pass
class DataCol(IndexCol):
"""
a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ["tz", "ordered"]
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname=None,
pos=None,
tz=None,
ordered=None,
table=None,
meta=None,
metadata=None,
dtype: Optional[DtypeArg] = None,
data=None,
):
super().__init__(
name=name,
values=values,
kind=kind,
typ=typ,
pos=pos,
cname=cname,
tz=tz,
ordered=ordered,
table=table,
meta=meta,
metadata=metadata,
)
self.dtype = dtype
self.data = data
@property
def dtype_attr(self) -> str:
return f"{self.name}_dtype"
@property
def meta_attr(self) -> str:
return f"{self.name}_meta"
def __repr__(self) -> str:
temp = tuple(
map(
pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape)
)
)
return ",".join(
(
f"{key}->{value}"
for key, value in zip(["name", "cname", "dtype", "kind", "shape"], temp)
)
)
def __eq__(self, other: Any) -> bool:
""" compare 2 col items """
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "dtype", "pos"]
)
def set_data(self, data: ArrayLike):
assert data is not None
assert self.dtype is None
data, dtype_name = _get_data_and_dtype_name(data)
self.data = data
self.dtype = dtype_name
self.kind = _dtype_to_kind(dtype_name)
def take_data(self):
""" return the data """
return self.data
@classmethod
def _get_atom(cls, values: ArrayLike) -> Col:
"""
Get an appropriately typed and shaped pytables.Col object for values.
"""
dtype = values.dtype
# error: Item "ExtensionDtype" of "Union[ExtensionDtype, dtype[Any]]" has no
# attribute "itemsize"
itemsize = dtype.itemsize # type: ignore[union-attr]
shape = values.shape
if values.ndim == 1:
# EA, use block shape pretending it is 2D
# TODO(EA2D): not necessary with 2D EAs
shape = (1, values.size)
if isinstance(values, Categorical):
codes = values.codes
atom = cls.get_atom_data(shape, kind=codes.dtype.name)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
atom = cls.get_atom_datetime64(shape)
elif is_timedelta64_dtype(dtype):
atom = cls.get_atom_timedelta64(shape)
elif is_complex_dtype(dtype):
atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0])
elif is_string_dtype(dtype):
atom = cls.get_atom_string(shape, itemsize)
else:
atom = cls.get_atom_data(shape, kind=dtype.name)
return atom
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=shape[0])
@classmethod
def get_atom_coltype(cls, kind: str) -> Type[Col]:
""" return the PyTables column class for this column """
if kind.startswith("uint"):
k4 = kind[4:]
col_name = f"UInt{k4}Col"
elif kind.startswith("period"):
# we store as integer
col_name = "Int64Col"
else:
kcap = kind.capitalize()
col_name = f"{kcap}Col"
return getattr(_tables(), col_name)
@classmethod
def get_atom_data(cls, shape, kind: str) -> Col:
return cls.get_atom_coltype(kind=kind)(shape=shape[0])
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@property
def shape(self):
return getattr(self.data, "shape", None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
raise ValueError("appended items do not match existing items in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if existing_dtype is not None and existing_dtype != self.dtype:
raise ValueError(
"appended items dtype do not match existing items dtype in table!"
)
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep :
encoding : str
errors : str
Returns
-------
index : listlike to become an Index
data : ndarraylike to become a column
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
assert self.typ is not None
if self.dtype is None:
# Note: in tests we never have timedelta64 or datetime64,
# so the _get_data_and_dtype_name may be unnecessary
converted, dtype_name = _get_data_and_dtype_name(values)
kind = _dtype_to_kind(dtype_name)
else:
converted = values
dtype_name = self.dtype
kind = self.kind
assert isinstance(converted, np.ndarray) # for mypy
# use the meta if needed
meta = _ensure_decoded(self.meta)
metadata = self.metadata
ordered = self.ordered
tz = self.tz
assert dtype_name is not None
# convert to the correct dtype
dtype = _ensure_decoded(dtype_name)
# reverse converts
if dtype == "datetime64":
# recreate with tz if indicated
converted = _set_tz(converted, tz, coerce=True)
elif dtype == "timedelta64":
converted = np.asarray(converted, dtype="m8[ns]")
elif dtype == "date":
try:
converted = np.asarray(
[date.fromordinal(v) for v in converted], dtype=object
)
except ValueError:
converted = np.asarray(
[date.fromtimestamp(v) for v in converted], dtype=object
)
elif meta == "category":
# we have a categorical
categories = metadata
codes = converted.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
if categories is None:
# Handle case of NaN-only categorical columns in which case
# the categories are an empty array; when this is stored,
# pytables cannot write a zero-len array, so on readback
# the categories would be None and `read_hdf()` would fail.
categories = Index([], dtype=np.float64)
else:
mask = isna(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum()._values
converted = Categorical.from_codes(
codes, categories=categories, ordered=ordered
)
else:
try:
converted = converted.astype(dtype, copy=False)
except TypeError:
converted = converted.astype("O", copy=False)
# convert nans / decode
if _ensure_decoded(kind) == "string":
converted = _unconvert_string_array(
converted, nan_rep=nan_rep, encoding=encoding, errors=errors
)
return self.values, converted
def set_attr(self):
""" set the data for this column """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
assert self.dtype is not None
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize)
@classmethod
def get_atom_data(cls, shape, kind: str) -> Col:
return cls.get_atom_coltype(kind=kind)()
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col()
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
pass
class Fixed:
"""
represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : HDFStore
group : Node
The group node where the table resides.
"""
pandas_kind: str
format_type: str = "fixed" # GH#30962 needed by dask
obj_type: Type[FrameOrSeriesUnion]
ndim: int
encoding: str
parent: HDFStore
group: Node
errors: str
is_table = False
def __init__(
self,
parent: HDFStore,
group: Node,
encoding: str = "UTF-8",
errors: str = "strict",
):
assert isinstance(parent, HDFStore), type(parent)
assert _table_mod is not None # needed for mypy
assert isinstance(group, _table_mod.Node), type(group)
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.errors = errors
@property
def is_old_version(self) -> bool:
return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1
@property
def version(self) -> Tuple[int, int, int]:
""" compute and set our version """
version = _ensure_decoded(getattr(self.group._v_attrs, "pandas_version", None))
try:
version = tuple(int(x) for x in version.split("."))
if len(version) == 2:
version = version + (0,)
except AttributeError:
version = (0, 0, 0)
return version
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None))
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
jshape = ",".join(pprint_thing(x) for x in s)
s = f"[{jshape}]"
return f"{self.pandas_type:12.12} (shape->{s})"
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self) -> int:
return self.parent._complevel
@property
def _fletcher32(self) -> bool:
return self.parent._fletcher32
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self) -> bool:
return False
@property
def nrows(self):
return getattr(self.storable, "nrows", None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
"""
infer the axes of my storer
return a boolean indicating if we have a valid storer or not
"""
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement"
)
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: subclasses should implement"
)
def delete(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if com.all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"}
_reverse_index_map = {v: k for k, v in _index_type_map.items()}
attributes: List[str] = []
# indexer helpers
def _class_to_alias(self, cls) -> str:
return self._index_type_map.get(cls, "")
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, attrs):
index_class = self._alias_to_class(
_ensure_decoded(getattr(attrs, "index_class", ""))
)
factory: Callable
if index_class == DatetimeIndex:
def f(values, freq=None, tz=None):
# data are already in UTC, localize and convert if tz present
dta = DatetimeArray._simple_new(values.values, freq=freq)
result = DatetimeIndex._simple_new(dta, name=None)
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
factory = f
elif index_class == PeriodIndex:
def f(values, freq=None, tz=None):
parr = PeriodArray._simple_new(values, freq=freq)
return PeriodIndex._simple_new(parr, name=None)
factory = f
else:
factory = index_class
kwargs = {}
if "freq" in attrs:
kwargs["freq"] = attrs["freq"]
if index_class is Index:
# DTI/PI would be gotten by _alias_to_class
factory = TimedeltaIndex
if "tz" in attrs:
if isinstance(attrs["tz"], bytes):
# created by python2
kwargs["tz"] = attrs["tz"].decode("utf-8")
else:
# created by python3
kwargs["tz"] = attrs["tz"]
assert index_class is DatetimeIndex # just checking
return factory, kwargs
def validate_read(self, columns, where):
"""
raise if any keywords are passed which are not-None
"""
if columns is not None:
raise TypeError(
"cannot pass a column specification when reading "
"a Fixed format store. this store must be selected in its entirety"
)
if where is not None:
raise TypeError(
"cannot pass a where specification when reading "
"from a Fixed format store. this store must be selected in its entirety"
)
@property
def is_exists(self) -> bool:
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, "transposed", False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = _ensure_decoded(getattr(attrs, "value_type", None))
shape = getattr(attrs, "shape", None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == "datetime64":
# reconstruct a timezone if indicated
tz = getattr(attrs, "tz", None)
ret = _set_tz(ret, tz, coerce=True)
elif dtype == "timedelta64":
ret = np.asarray(ret, dtype="m8[ns]")
if transposed:
return ret.T
else:
return ret
def read_index(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
) -> Index:
variety = _ensure_decoded(getattr(self.attrs, f"{key}_variety"))
if variety == "multi":
return self.read_multi_index(key, start=start, stop=stop)
elif variety == "regular":
node = getattr(self.group, key)
index = self.read_index_node(node, start=start, stop=stop)
return index
else: # pragma: no cover
raise TypeError(f"unrecognized index variety: {variety}")
def write_index(self, key: str, index: Index):
if isinstance(index, MultiIndex):
setattr(self.attrs, f"{key}_variety", "multi")
self.write_multi_index(key, index)
else:
setattr(self.attrs, f"{key}_variety", "regular")
converted = _convert_index("index", index, self.encoding, self.errors)
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
node._v_attrs.freq = index.freq
if isinstance(index, DatetimeIndex) and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_multi_index(self, key: str, index: MultiIndex):
setattr(self.attrs, f"{key}_nlevels", index.nlevels)
for i, (lev, level_codes, name) in enumerate(
zip(index.levels, index.codes, index.names)
):
# write the level
if is_extension_array_dtype(lev):
raise NotImplementedError(
"Saving a MultiIndex with an extension dtype is not supported."
)
level_key = f"{key}_level{i}"
conv_level = _convert_index(level_key, lev, self.encoding, self.errors)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, f"{key}_name{name}", name)
# write the labels
label_key = f"{key}_label{i}"
self.write_array(label_key, level_codes)
def read_multi_index(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
) -> MultiIndex:
nlevels = getattr(self.attrs, f"{key}_nlevels")
levels = []
codes = []
names: List[Hashable] = []
for i in range(nlevels):
level_key = f"{key}_level{i}"
node = getattr(self.group, level_key)
lev = self.read_index_node(node, start=start, stop=stop)
levels.append(lev)
names.append(lev.name)
label_key = f"{key}_label{i}"
level_codes = self.read_array(label_key, start=start, stop=stop)
codes.append(level_codes)
return MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=True
)
def read_index_node(
self, node: Node, start: Optional[int] = None, stop: Optional[int] = None
) -> Index:
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we replace it with the original.
if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0:
data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type)
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if "name" in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
name = _ensure_decoded(name)
attrs = node._v_attrs
factory, kwargs = self._get_index_factory(attrs)
if kind == "date":
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
dtype=object,
**kwargs,
)
else:
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
**kwargs,
)
index.name = name
return index
def write_array_empty(self, key: str, value: ArrayLike):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
node = getattr(self.group, key)
node._v_attrs.value_type = str(value.dtype)
node._v_attrs.shape = value.shape
def write_array(self, key: str, obj: FrameOrSeries, items: Optional[Index] = None):
# TODO: we only have a few tests that get here, the only EA
# that gets passed is DatetimeArray, and we never have
# both self._filters and EA
value = extract_array(obj, extract_numpy=True)
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = value.size == 0
transposed = False
if is_categorical_dtype(value.dtype):
raise NotImplementedError(
"Cannot store a category dtype in a HDF5 dataset that uses format="
'"fixed". Use format="table".'
)
if not empty_array:
if hasattr(value, "T"):
# ExtensionArrays (1d) may not have transpose.
value = value.T
transposed = True
atom = None
if self._filters is not None:
with suppress(ValueError):
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
if atom is not None:
# We only get here if self._filters is non-None and
# the Atom.from_dtype call succeeded
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(
self.group, key, atom, value.shape, filters=self._filters
)
ca[:] = value
else:
self.write_array_empty(key, value)
elif value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value, skipna=False)
if empty_array:
pass
elif inferred_type == "string":
pass
else:
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom())
vlarr.append(value)
elif is_datetime64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "datetime64"
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "asi8"
self._handle.create_array(
self.group, key, value.asi8 # type: ignore[union-attr]
)
node = getattr(self.group, key)
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "tz"
node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr]
node._v_attrs.value_type = "datetime64"
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "timedelta64"
elif empty_array:
self.write_array_empty(key, value)
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class SeriesFixed(GenericFixed):
pandas_kind = "series"
attributes = ["name"]
name: Hashable
@property
def shape(self):
try:
return (len(self.group.values),)
except (TypeError, AttributeError):
return None
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
self.validate_read(columns, where)
index = self.read_index("index", start=start, stop=stop)
values = self.read_array("values", start=start, stop=stop)
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super().write(obj, **kwargs)
self.write_index("index", obj.index)
self.write_array("values", obj)
self.attrs.name = obj.name
class BlockManagerFixed(GenericFixed):
attributes = ["ndim", "nblocks"]
nblocks: int
@property
def shape(self) -> Optional[Shape]:
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, f"block{i}_items")
shape = getattr(node, "shape", None)
if shape is not None:
items += shape[0]
# data shape
node = self.group.block0_values
shape = getattr(node, "shape", None)
if shape is not None:
shape = list(shape[0 : (ndim - 1)])
else:
shape = []
shape.append(items)
return shape
except AttributeError:
return None
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
# start, stop applied to rows, so 0th axis only
self.validate_read(columns, where)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index(f"axis{i}", start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
dfs = []
for i in range(self.nblocks):
blk_items = self.read_index(f"block{i}_items")
values = self.read_array(f"block{i}_values", start=_start, stop=_stop)
columns = items[items.get_indexer(blk_items)]
df = DataFrame(values.T, columns=columns, index=axes[1])
dfs.append(df)
if len(dfs) > 0:
out = concat(dfs, axis=1)
out = out.reindex(columns=items, copy=False)
return out
return DataFrame(columns=axes[0], index=axes[1])
def write(self, obj, **kwargs):
super().write(obj, **kwargs)
data = obj._mgr
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0 and (not ax.is_unique):
raise ValueError("Columns index has to be unique for fixed format")
self.write_index(f"axis{i}", ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array(f"block{i}_values", blk.values, items=blk_items)
self.write_index(f"block{i}_items", blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = "frame"
obj_type = DataFrame
class Table(Fixed):
"""
represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = "wide_table"
format_type: str = "table" # GH#30962 needed by dask
table_type: str
levels: Union[int, List[Hashable]] = 1
is_table = True
index_axes: List[IndexCol]
non_index_axes: List[Tuple[int, Any]]
values_axes: List[DataCol]
data_columns: List
metadata: List
info: Dict
def __init__(
self,
parent: HDFStore,
group: Node,
encoding=None,
errors: str = "strict",
index_axes=None,
non_index_axes=None,
values_axes=None,
data_columns=None,
info=None,
nan_rep=None,
):
super().__init__(parent, group, encoding=encoding, errors=errors)
self.index_axes = index_axes or []
self.non_index_axes = non_index_axes or []
self.values_axes = values_axes or []
self.data_columns = data_columns or []
self.info = info or {}
self.nan_rep = nan_rep
@property
def table_type_short(self) -> str:
return self.table_type.split("_")[0]
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
jdc = ",".join(self.data_columns) if len(self.data_columns) else ""
dc = f",dc->[{jdc}]"
ver = ""
if self.is_old_version:
jver = ".".join(str(x) for x in self.version)
ver = f"[{jver}]"
jindex_axes = ",".join(a.name for a in self.index_axes)
return (
f"{self.pandas_type:12.12}{ver} "
f"(typ->{self.table_type_short},nrows->{self.nrows},"
f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})"
)
def __getitem__(self, c: str):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
f"[{other.table_type} - {self.table_type}]"
)
for c in ["index_axes", "non_index_axes", "values_axes"]:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
f"invalid combination of [{c}] on appending data "
f"[{sax}] vs current table [{oax}]"
)
# should never get here
raise Exception(
f"invalid combination of [{c}] on appending data [{sv}] vs "
f"current table [{ov}]"
)
@property
def is_multi_index(self) -> bool:
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_multiindex(
self, obj: FrameOrSeriesUnion
) -> Tuple[DataFrame, List[Hashable]]:
"""
validate that we can store the multi-index; reset and return the
new object
"""
levels = [
l if l is not None else f"level_{i}" for i, l in enumerate(obj.index.names)
]
try:
reset_obj = obj.reset_index()
except ValueError as err:
raise ValueError(
"duplicate names/columns in the multi-index when storing as a table"
) from err
assert isinstance(reset_obj, DataFrame) # for mypy
return reset_obj, levels
@property
def nrows_expected(self) -> int:
""" based on our axes, compute the expected nrows """
# error: Incompatible return value type (got "number", expected "int")
return np.prod( # type: ignore[return-value]
[i.cvalues.shape[0] for i in self.index_axes]
)
@property
def is_exists(self) -> bool:
""" has this table been created """
return "table" in self.group
@property
def storable(self):
return getattr(self.group, "table", None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self) -> int:
""" the number of total columns in the values axes """
return sum(len(a.values) for a in self.values_axes)
@property
def is_transposed(self) -> bool:
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(
itertools.chain(
[int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes],
)
)
def queryables(self) -> Dict[str, Any]:
""" return a dict of the kinds allowable columns for this object """
# mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here
axis_names = {0: "index", 1: "columns"}
# compute the values_axes queryables
d1 = [(a.cname, a) for a in self.index_axes]
d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes]
d3 = [
(v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)
]
# error: Unsupported operand types for + ("List[Tuple[str, IndexCol]]" and
# "List[Tuple[str, None]]")
return dict(d1 + d2 + d3) # type: ignore[operator]
def index_cols(self):
""" return a list of my index cols """
# Note: each `i.cname` below is assured to be a str.
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self) -> List[str]:
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key: str) -> str:
""" return the metadata pathname for this key """
group = self.group._v_pathname
return f"{group}/meta/{key}/meta"
def write_metadata(self, key: str, values: np.ndarray):
"""
Write out a metadata array to the key as a fixed-format Series.
Parameters
----------
key : str
values : ndarray
"""
# error: Incompatible types in assignment (expression has type
# "Series", variable has type "ndarray")
values = Series(values) # type: ignore[assignment]
# error: Value of type variable "FrameOrSeries" of "put" of "HDFStore"
# cannot be "ndarray"
self.parent.put( # type: ignore[type-var]
self._get_metadata_path(key),
values,
format="table",
encoding=self.encoding,
errors=self.errors,
nan_rep=self.nan_rep,
)
def read_metadata(self, key: str):
""" return the meta data array for this key """
if getattr(getattr(self.group, "meta", None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.info = self.info
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or []
self.data_columns = getattr(self.attrs, "data_columns", None) or []
self.info = getattr(self.attrs, "info", None) or {}
self.nan_rep = getattr(self.attrs, "nan_rep", None)
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
self.levels: List[Hashable] = getattr(self.attrs, "levels", None) or []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1:
ws = incompatibility_doc % ".".join([str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""
validate the min_itemsize doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k in min_itemsize:
# ok, apply generally
if k == "values":
continue
if k not in q:
raise ValueError(
f"min_itemsize has the key [{k}] which is not an axis or "
"data_column"
)
@cache_readonly
def indexables(self):
""" create/cache the indexables if they don't exist """
_indexables = []
desc = self.description
table_attrs = self.table.attrs
# Note: each of the `name` kwargs below are str, ensured
# by the definition in index_cols.
# index columns
for i, (axis, name) in enumerate(self.attrs.index_cols):
atom = getattr(desc, name)
md = self.read_metadata(name)
meta = "category" if md is not None else None
kind_attr = f"{name}_kind"
kind = getattr(table_attrs, kind_attr, None)
index_col = IndexCol(
name=name,
axis=axis,
pos=i,
kind=kind,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(index_col)
# values columns
dc = set(self.data_columns)
base_pos = len(_indexables)
def f(i, c):
assert isinstance(c, str)
klass = DataCol
if c in dc:
klass = DataIndexableCol
atom = getattr(desc, c)
adj_name = _maybe_adjust_name(c, self.version)
# TODO: why kind_attr here?
values = getattr(table_attrs, f"{adj_name}_kind", None)
dtype = getattr(table_attrs, f"{adj_name}_dtype", None)
kind = _dtype_to_kind(dtype)
md = self.read_metadata(c)
# TODO: figure out why these two versions of `meta` dont always match.
# meta = "category" if md is not None else None
meta = getattr(table_attrs, f"{adj_name}_meta", None)
obj = klass(
name=adj_name,
cname=c,
values=values,
kind=kind,
pos=base_pos + i,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
dtype=dtype,
)
return obj
# Note: the definition of `values_cols` ensures that each
# `c` below is a str.
_indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return _indexables
def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None):
"""
Create a pytables index on the specified columns.
Parameters
----------
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError if trying to create an index on a complex-type column.
Notes
-----
Cannot index Time64Col or ComplexCol.
Pytables must be >= 3.0.
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = {}
if optlevel is not None:
kw["optlevel"] = optlevel
if kind is not None:
kw["kind"] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw["kind"] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw["optlevel"] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith("complex"):
raise TypeError(
"Columns containing complex values can be stored but "
"cannot be indexed when using table format. Either use "
"fixed format, set index=False, or do not include "
"the columns containing complex values to "
"data_columns when initializing the table."
)
v.create_index(**kw)
elif c in self.non_index_axes[0][1]:
# GH 28156
raise AttributeError(
f"column {c} is not a data_column.\n"
f"In order to read column {c} you must reload the dataframe \n"
f"into HDFStore and include {c} with the data_columns argument."
)
def _read_axes(
self, where, start: Optional[int] = None, stop: Optional[int] = None
) -> List[Tuple[ArrayLike, ArrayLike]]:
"""
Create the axes sniffed from the table.
Parameters
----------
where : ???
start : int or None, default None
stop : int or None, default None
Returns
-------
List[Tuple[index_values, column_values]]
"""
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
values = selection.select()
results = []
# convert the data
for a in self.axes:
a.set_info(self.info)
res = a.convert(
values,
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
results.append(res)
return results
@classmethod
def get_object(cls, obj, transposed: bool):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize, non_index_axes):
"""
take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(non_index_axes):
return []
axis, axis_labels = non_index_axes[0]
info = self.info.get(axis, {})
if info.get("type") == "MultiIndex" and data_columns:
raise ValueError(
f"cannot use a multi-index on axis [{axis}] with "
f"data_columns {data_columns}"
)
# evaluate the passed data_columns, True == use all columns
# take only valid axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns = list(data_columns) # ensure we do not modify
data_columns.extend(
[
k
for k in min_itemsize.keys()
if k != "values" and k not in existing_data_columns
]
)
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def _create_axes(
self,
axes,
obj: DataFrame,
validate: bool = True,
nan_rep=None,
data_columns=None,
min_itemsize=None,
):
"""
Create and return the axes.
Parameters
----------
axes: list or None
The names or numbers of the axes to create.
obj : DataFrame
The object to create axes on.
validate: bool, default True
Whether to validate the obj against an existing object already written.
nan_rep :
A value to use for string column nan_rep.
data_columns : List[str], True, or None, default None
Specify the columns that we want to create to allow indexing on.
* True : Use all available columns.
* None : Use no columns.
* List[str] : Use the specified columns.
min_itemsize: Dict[str, int] or None, default None
The min itemsize for a column in bytes.
"""
if not isinstance(obj, DataFrame):
group = self.group._v_name
raise TypeError(
f"cannot properly create the storer for: [group->{group},"
f"value->{type(obj)}]"
)
# set the default axes if needed
if axes is None:
axes = [0]
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
table_exists = True
axes = [a.axis for a in self.index_axes]
data_columns = list(self.data_columns)
nan_rep = self.nan_rep
# TODO: do we always have validate=True here?
else:
table_exists = False
new_info = self.info
assert self.ndim == 2 # with next check, we must have len(axes) == 1
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable"
)
# create according to the new data
new_non_index_axes: List = []
# nan_representation
if nan_rep is None:
nan_rep = "nan"
# We construct the non-index-axis first, since that alters new_info
idx = [x for x in [0, 1] if x not in axes][0]
a = obj.axes[idx]
# we might be able to change the axes on the appending data if necessary
append_axis = list(a)
if table_exists:
indexer = len(new_non_index_axes) # i.e. 0
exist_axis = self.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis), np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(
np.array(sorted(append_axis)), np.array(sorted(exist_axis))
):
append_axis = exist_axis
# the non_index_axes info
info = new_info.setdefault(idx, {})
info["names"] = list(a.names)
info["type"] = type(a).__name__
new_non_index_axes.append((idx, append_axis))
# Now we can construct our new index axis
idx = axes[0]
a = obj.axes[idx]
axis_name = obj._get_axis_name(idx)
new_index = _convert_index(axis_name, a, self.encoding, self.errors)
new_index.axis = idx
# Because we are always 2D, there is only one new_index, so
# we know it will have pos=0
new_index.set_pos(0)
new_index.update_info(new_info)
new_index.maybe_set_size(min_itemsize) # check for column conflicts
new_index_axes = [new_index]
j = len(new_index_axes) # i.e. 1
assert j == 1
# reindex by our non_index_axes & compute data_columns
assert len(new_non_index_axes) == 1
for a in new_non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
transposed = new_index.axis == 1
# figure out data_columns and get out blocks
data_columns = self.validate_data_columns(
data_columns, min_itemsize, new_non_index_axes
)
frame = self.get_object(obj, transposed)._consolidate()
blocks, blk_items = self._get_blocks_and_items(
frame, table_exists, new_non_index_axes, self.values_axes, data_columns
)
# add my values
vaxes = []
for i, (blk, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if data_columns and len(b_items) == 1 and b_items[0] in data_columns:
klass = DataIndexableCol
name = b_items[0]
if not (name is None or isinstance(name, str)):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
# make sure that we match up the existing columns
# if we have an existing table
existing_col: Optional[DataCol]
if table_exists and validate:
try:
existing_col = self.values_axes[i]
except (IndexError, KeyError) as err:
raise ValueError(
f"Incompatible appended table [{blocks}]"
f"with existing table [{self.values_axes}]"
) from err
else:
existing_col = None
new_name = name or f"values_block_{i}"
data_converted = _maybe_convert_for_string_atom(
new_name,
blk,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
columns=b_items,
)
adj_name = _maybe_adjust_name(new_name, self.version)
typ = klass._get_atom(data_converted)
kind = _dtype_to_kind(data_converted.dtype.name)
tz = None
if getattr(data_converted, "tz", None) is not None:
tz = _get_tz(data_converted.tz)
meta = metadata = ordered = None
if is_categorical_dtype(data_converted.dtype):
ordered = data_converted.ordered
meta = "category"
metadata = np.array(data_converted.categories, copy=False).ravel()
data, dtype_name = _get_data_and_dtype_name(data_converted)
col = klass(
name=adj_name,
cname=new_name,
values=list(b_items),
typ=typ,
pos=j,
kind=kind,
tz=tz,
ordered=ordered,
meta=meta,
metadata=metadata,
dtype=dtype_name,
data=data,
)
col.update_info(new_info)
vaxes.append(col)
j += 1
dcs = [col.name for col in vaxes if col.is_data_indexable]
new_table = type(self)(
parent=self.parent,
group=self.group,
encoding=self.encoding,
errors=self.errors,
index_axes=new_index_axes,
non_index_axes=new_non_index_axes,
values_axes=vaxes,
data_columns=dcs,
info=new_info,
nan_rep=nan_rep,
)
if hasattr(self, "levels"):
# TODO: get this into constructor, only for appropriate subclass
new_table.levels = self.levels
new_table.validate_min_itemsize(min_itemsize)
if validate and table_exists:
new_table.validate(self)
return new_table
@staticmethod
def _get_blocks_and_items(
frame: DataFrame,
table_exists: bool,
new_non_index_axes,
values_axes,
data_columns,
):
# Helper to clarify non-state-altering parts of _create_axes
def get_blk_items(mgr):
return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks]
mgr = frame._mgr
mgr = cast(BlockManager, mgr)
blocks: List[Block] = list(mgr.blocks)
blk_items: List[Index] = get_blk_items(mgr)
if len(data_columns):
axis, axis_labels = new_non_index_axes[0]
new_labels = Index(axis_labels).difference(Index(data_columns))
mgr = frame.reindex(new_labels, axis=axis)._mgr
# error: Item "ArrayManager" of "Union[ArrayManager, BlockManager]" has no
# attribute "blocks"
blocks = list(mgr.blocks) # type: ignore[union-attr]
blk_items = get_blk_items(mgr)
for c in data_columns:
mgr = frame.reindex([c], axis=axis)._mgr
# error: Item "ArrayManager" of "Union[ArrayManager, BlockManager]" has
# no attribute "blocks"
blocks.extend(mgr.blocks) # type: ignore[union-attr]
blk_items.extend(get_blk_items(mgr))
# reorder the blocks in the same order as the existing table if we can
if table_exists:
by_items = {
tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)
}
new_blocks: List[Block] = []
new_blk_items = []
for ea in values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except (IndexError, KeyError) as err:
jitems = ",".join(pprint_thing(item) for item in items)
raise ValueError(
f"cannot match existing table structure for [{jitems}] "
"on appending data"
) from err
blocks = new_blocks
blk_items = new_blk_items
return blocks, blk_items
def process_axes(self, obj, selection: Selection, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
assert isinstance(self.levels, list) # assured by is_multi_index
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if selection.filter is not None:
for field, op, filt in selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_ORDERS:
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
assert axis_number is not None
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc(axis=axis_number)[takers]
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc(axis=axis_number)[takers]
raise ValueError(f"cannot find the field [{field}] for filtering!")
obj = process_filter(field, filt)
return obj
def create_description(
self,
complib,
complevel: Optional[int],
fletcher32: bool,
expectedrows: Optional[int],
) -> Dict[str, Any]:
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = {"name": "table", "expectedrows": expectedrows}
# description from the axes & values
d["description"] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel,
complib=complib,
fletcher32=fletcher32 or self._fletcher32,
)
d["filters"] = filters
elif self._filters is not None:
d["filters"] = self._filters
return d
def read_coordinates(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
"""
select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
coords = selection.select_coords()
if selection.filter is not None:
for field, op, filt in selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1
)
coords = coords[op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(
self,
column: str,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
f"column [{column}] can not be extracted individually; "
"it is not data indexable"
)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
col_values = a.convert(
c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
return Series(_set_tz(col_values[1], a.tz), name=column)
raise KeyError(f"column [{column}] not found in the table")
class WORMTable(Table):
"""
a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = "worm"
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
read the indices and the indexing array, calculate offset rows and return
"""
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
"""
write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORMTable needs to implement write")
class AppendableTable(Table):
""" support the new appendable table formats """
table_type = "appendable"
def write(
self,
obj,
axes=None,
append=False,
complib=None,
complevel=None,
fletcher32=None,
min_itemsize=None,
chunksize=None,
expectedrows=None,
dropna=False,
nan_rep=None,
data_columns=None,
track_times=True,
):
if not append and self.is_exists:
self._handle.remove_node(self.group, "table")
# create the axes
table = self._create_axes(
axes=axes,
obj=obj,
validate=append,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
)
for a in table.axes:
a.validate_names()
if not table.is_exists:
# create the table
options = table.create_description(
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows,
)
# set the table attributes
table.set_attrs()
options["track_times"] = track_times
# create the table
table._handle.create_table(table.group, **options)
# update my info
table.attrs.info = table.info
# validate the axes and set the kinds
for a in table.axes:
a.validate_and_set(table, append)
# add the rows
table.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize: Optional[int], dropna: bool = False):
"""
we form the data into a 2-d including indexes,values,mask write chunk-by-chunk
"""
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype("u1", copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
assert nindexes == 1, nindexes # ensures we dont need to broadcast
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = nrows // chunksize + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in indexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues],
)
def write_data_chunk(
self,
rows: np.ndarray,
indexes: List[np.ndarray],
mask: Optional[np.ndarray],
values: List[np.ndarray],
):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
if len(rows):
self.table.append(rows)
self.table.flush()
def delete(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
selection = Selection(self, where, start=start, stop=stop)
values = selection.select_coords()
# delete the rows in reverse order
sorted_series = Series(values).sort_values()
ln = len(sorted_series)
if ln:
# construct groups of consecutive rows
diff = sorted_series.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = sorted_series.take(range(g, pg))
table.remove_rows(
start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1
)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" support the new appendable table formats """
pandas_kind = "frame_table"
table_type = "appendable_frame"
ndim = 2
obj_type: Type[FrameOrSeriesUnion] = DataFrame
@property
def is_transposed(self) -> bool:
return self.index_axes[0].axis == 1
@classmethod
def get_object(cls, obj, transposed: bool):
""" these are written transposed """
if transposed:
obj = obj.T
return obj
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return None
result = self._read_axes(where=where, start=start, stop=stop)
info = (
self.info.get(self.non_index_axes[0][0], {})
if len(self.non_index_axes)
else {}
)
inds = [i for i, ax in enumerate(self.axes) if ax is self.index_axes[0]]
assert len(inds) == 1
ind = inds[0]
index = result[ind][0]
frames = []
for i, a in enumerate(self.axes):
if a not in self.values_axes:
continue
index_vals, cvalues = result[i]
# we could have a multi-index constructor here
# ensure_index doesn't recognized our list-of-tuples here
if info.get("type") == "MultiIndex":
cols = MultiIndex.from_tuples(index_vals)
else:
cols = Index(index_vals)
names = info.get("names")
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, "name", None))
else:
values = cvalues.T
index_ = Index(index, name=getattr(index, "name", None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
if isinstance(values, np.ndarray):
df = DataFrame(values.T, columns=cols_, index=index_)
elif isinstance(values, Index):
df = DataFrame(values, columns=cols_, index=index_)
else:
# Categorical
df = DataFrame([values], columns=cols_, index=index_)
assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype)
frames.append(df)
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
selection = Selection(self, where=where, start=start, stop=stop)
# apply the selection filters & axis orderings
df = self.process_axes(df, selection=selection, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = "series_table"
table_type = "appendable_series"
ndim = 2
obj_type = Series
@property
def is_transposed(self) -> bool:
return False
@classmethod
def get_object(cls, obj, transposed: bool):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or "values"
obj = obj.to_frame(name)
return super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
) -> Series:
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
assert isinstance(self.levels, list) # needed for mypy
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super().read(where=where, columns=columns, start=start, stop=stop)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == "values":
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = "series_table"
table_type = "appendable_multiseries"
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or "values"
newobj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
cols = list(self.levels)
cols.append(name)
newobj.columns = Index(cols)
return super().write(obj=newobj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = "frame_table"
table_type = "generic_table"
ndim = 2
obj_type = DataFrame
levels: List[Hashable]
@property
def pandas_type(self) -> str:
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, "table", None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@cache_readonly
def indexables(self):
""" create the indexables from the table description """
d = self.description
# TODO: can we get a typ for this? AFAICT it is the only place
# where we aren't passing one
# the index columns is just a simple index
md = self.read_metadata("index")
meta = "category" if md is not None else None
index_col = GenericIndexCol(
name="index", axis=0, table=self.table, meta=meta, metadata=md
)
_indexables: List[Union[GenericIndexCol, GenericDataIndexableCol]] = [index_col]
for i, n in enumerate(d._v_names):
assert isinstance(n, str)
atom = getattr(d, n)
md = self.read_metadata(n)
meta = "category" if md is not None else None
dc = GenericDataIndexableCol(
name=n,
pos=i,
values=[n],
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(dc)
return _indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = "appendable_multiframe"
obj_type = DataFrame
ndim = 2
_re_levels = re.compile(r"^level_\d+$")
@property
def table_type_short(self) -> str:
return "appendable_multi"
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super().write(obj=obj, data_columns=data_columns, **kwargs)
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
df = super().read(where=where, columns=columns, start=start, stop=stop)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names(
[None if self._re_levels.search(name) else name for name in df.index.names]
)
return df
def _reindex_axis(obj: DataFrame, axis: int, labels: Index, other=None) -> DataFrame:
ax = obj._get_axis(axis)
labels = ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = ensure_index(labels.unique())
if other is not None:
labels = ensure_index(other.unique()).intersection(labels, sort=False)
if not labels.equals(ax):
slicer: List[Union[slice, Index]] = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
# tz to/from coercion
def _get_tz(tz: tzinfo) -> Union[str, tzinfo]:
""" for a tz-aware type, return an encoded zone """
zone = timezones.get_timezone(tz)
return zone
def _set_tz(
values: Union[np.ndarray, Index],
tz: Optional[Union[str, tzinfo]],
coerce: bool = False,
) -> Union[np.ndarray, DatetimeIndex]:
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray or Index
tz : str or tzinfo
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if isinstance(values, DatetimeIndex):
# If values is tzaware, the tz gets dropped in the values.ravel()
# call below (which returns an ndarray). So we are only non-lossy
# if `tz` matches `values.tz`.
assert values.tz is None or values.tz == tz
if tz is not None:
if isinstance(values, DatetimeIndex):
name = values.name
values = values.asi8
else:
name = None
values = values.ravel()
tz = _ensure_decoded(tz)
values = DatetimeIndex(values, name=name)
values = values.tz_localize("UTC").tz_convert(tz)
elif coerce:
values = np.asarray(values, dtype="M8[ns]")
# error: Incompatible return value type (got "Union[ndarray, Index]",
# expected "Union[ndarray, DatetimeIndex]")
return values # type: ignore[return-value]
def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol:
assert isinstance(name, str)
index_name = index.name
# error: Argument 1 to "_get_data_and_dtype_name" has incompatible type "Index";
# expected "Union[ExtensionArray, ndarray]"
converted, dtype_name = _get_data_and_dtype_name(index) # type: ignore[arg-type]
kind = _dtype_to_kind(dtype_name)
atom = DataIndexableCol._get_atom(converted)
if isinstance(index, Int64Index) or needs_i8_conversion(index.dtype):
# Includes Int64Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
# in which case "kind" is "integer", "integer", "datetime64",
# "timedelta64", and "integer", respectively.
return IndexCol(
name,
values=converted,
kind=kind,
typ=atom,
freq=getattr(index, "freq", None),
tz=getattr(index, "tz", None),
index_name=index_name,
)
if isinstance(index, MultiIndex):
raise TypeError("MultiIndex not supported here!")
inferred_type = lib.infer_dtype(index, skipna=False)
# we won't get inferred_type of "datetime64" or "timedelta64" as these
# would go through the DatetimeIndex/TimedeltaIndex paths above
values = np.asarray(index)
if inferred_type == "date":
converted = np.asarray([v.toordinal() for v in values], dtype=np.int32)
return IndexCol(
name, converted, "date", _tables().Time32Col(), index_name=index_name
)
elif inferred_type == "string":
converted = _convert_string_array(values, encoding, errors)
itemsize = converted.dtype.itemsize
return IndexCol(
name,
converted,
"string",
_tables().StringCol(itemsize),
index_name=index_name,
)
elif inferred_type in ["integer", "floating"]:
return IndexCol(
name, values=converted, kind=kind, typ=atom, index_name=index_name
)
else:
assert isinstance(converted, np.ndarray) and converted.dtype == object
assert kind == "object", kind
atom = _tables().ObjectAtom()
return IndexCol(name, converted, kind, atom, index_name=index_name)
def _unconvert_index(
data, kind: str, encoding: str, errors: str
) -> Union[np.ndarray, Index]:
index: Union[Index, np.ndarray]
if kind == "datetime64":
index = DatetimeIndex(data)
elif kind == "timedelta64":
index = TimedeltaIndex(data)
elif kind == "date":
try:
index = np.asarray([date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object)
elif kind in ("integer", "float"):
index = np.asarray(data)
elif kind in ("string"):
index = _unconvert_string_array(
data, nan_rep=None, encoding=encoding, errors=errors
)
elif kind == "object":
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError(f"unrecognized index type {kind}")
return index
def _maybe_convert_for_string_atom(
name: str,
block: Block,
existing_col,
min_itemsize,
nan_rep,
encoding,
errors,
columns: List[str],
):
bvalues = block.values
if bvalues.dtype != object:
return bvalues
dtype_name = bvalues.dtype.name
inferred_type = lib.infer_dtype(bvalues, skipna=False)
if inferred_type == "date":
raise TypeError("[date] is not implemented as a table column")
elif inferred_type == "datetime":
# after GH#8260
# this only would be hit for a multi-timezone dtype which is an error
raise TypeError(
"too many timezones in this block, create separate data columns"
)
elif not (inferred_type == "string" or dtype_name == "object"):
return bvalues
blocks: List[Block] = block.fillna(nan_rep, downcast=False)
# Note: because block is always object dtype, fillna goes
# through a path such that the result is always a 1-element list
assert len(blocks) == 1
block = blocks[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data, skipna=False)
if inferred_type != "string":
# we cannot serialize this data, so report an exception on a column
# by column basis
# expected behaviour:
# search block for a non-string object column by column
for i in range(data.shape[0]):
col = block.iget(i)
inferred_type = lib.infer_dtype(col, skipna=False)
if inferred_type != "string":
error_column_label = columns[i] if len(columns) > i else f"No.{i}"
raise TypeError(
f"Cannot serialize the column [{error_column_label}]\n"
f"because its data contents are not [string] but "
f"[{inferred_type}] object dtype"
)
# itemsize is the maximum length of a string (along any dimension)
# error: Argument 1 to "_convert_string_array" has incompatible type "Union[ndarray,
# ExtensionArray]"; expected "ndarray"
data_converted = _convert_string_array(
data, encoding, errors # type: ignore[arg-type]
).reshape(data.shape)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
data_converted = data_converted.astype(f"|S{itemsize}", copy=False)
return data_converted
def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray:
"""
Take a string-like that is object dtype and coerce to a fixed size string type.
Parameters
----------
data : np.ndarray[object]
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[fixed-length-string]
"""
# encode if needed
if len(data):
data = (
Series(data.ravel())
.str.encode(encoding, errors)
._values.reshape(data.shape)
)
# create the sized dtype
ensured = ensure_object(data.ravel())
itemsize = max(1, libwriters.max_len_string_array(ensured))
data = np.asarray(data, dtype=f"S{itemsize}")
return data
def _unconvert_string_array(
data: np.ndarray, nan_rep, encoding: str, errors: str
) -> np.ndarray:
"""
Inverse of _convert_string_array.
Parameters
----------
data : np.ndarray[fixed-length-string]
nan_rep : the storage repr of NaN
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[object]
Decoded data.
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
if len(data):
itemsize = libwriters.max_len_string_array(ensure_object(data))
dtype = f"U{itemsize}"
if isinstance(data[0], bytes):
data = Series(data).str.decode(encoding, errors=errors)._values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = "nan"
data = libwriters.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str):
assert isinstance(val_kind, str), type(val_kind)
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding, errors)
values = conv(values)
return values
def _get_converter(kind: str, encoding: str, errors: str):
if kind == "datetime64":
return lambda x: np.asarray(x, dtype="M8[ns]")
elif kind == "string":
return lambda x: _unconvert_string_array(
x, nan_rep=None, encoding=encoding, errors=errors
)
else: # pragma: no cover
raise ValueError(f"invalid kind {kind}")
def _need_convert(kind: str) -> bool:
if kind in ("datetime64", "string"):
return True
return False
def _maybe_adjust_name(name: str, version: Sequence[int]) -> str:
"""
Prior to 0.10.1, we named values blocks like: values_block_0 an the
name values_0, adjust the given name if necessary.
Parameters
----------
name : str
version : Tuple[int, int, int]
Returns
-------
str
"""
if isinstance(version, str) or len(version) < 3:
raise ValueError("Version is incorrect, expected sequence of 3 integers.")
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
grp = m.groups()[0]
name = f"values_{grp}"
return name
def _dtype_to_kind(dtype_str: str) -> str:
"""
Find the "kind" string describing the given dtype name.
"""
dtype_str = _ensure_decoded(dtype_str)
if dtype_str.startswith("string") or dtype_str.startswith("bytes"):
kind = "string"
elif dtype_str.startswith("float"):
kind = "float"
elif dtype_str.startswith("complex"):
kind = "complex"
elif dtype_str.startswith("int") or dtype_str.startswith("uint"):
kind = "integer"
elif dtype_str.startswith("datetime64"):
kind = "datetime64"
elif dtype_str.startswith("timedelta"):
kind = "timedelta64"
elif dtype_str.startswith("bool"):
kind = "bool"
elif dtype_str.startswith("category"):
kind = "category"
elif dtype_str.startswith("period"):
# We store the `freq` attr so we can restore from integers
kind = "integer"
elif dtype_str == "object":
kind = "object"
else:
raise ValueError(f"cannot interpret dtype of [{dtype_str}]")
return kind
def _get_data_and_dtype_name(data: ArrayLike):
"""
Convert the passed data into a storable form and a dtype string.
"""
if isinstance(data, Categorical):
data = data.codes
# For datetime64tz we need to drop the TZ in tests TODO: why?
dtype_name = data.dtype.name.split("[")[0]
if data.dtype.kind in ["m", "M"]:
data = np.asarray(data.view("i8"))
# TODO: we used to reshape for the dt64tz case, but no longer
# doing that doesn't seem to break anything. why?
elif isinstance(data, PeriodIndex):
data = data.asi8
data = np.asarray(data)
return data, dtype_name
class Selection:
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertible to)
start, stop: indices to start and/or stop selection
"""
def __init__(
self,
table: Table,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
with suppress(ValueError):
inferred = lib.infer_dtype(where, skipna=False)
if inferred == "integer" or inferred == "boolean":
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if (self.start is not None and (where < self.start).any()) or (
self.stop is not None and (where >= self.stop).any()
):
raise ValueError(
"where must have index locations >= start and < stop"
)
self.coordinates = where
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return PyTablesExpr(where, queryables=q, encoding=self.table.encoding)
except NameError as err:
# raise a nice message, suggesting that the user should use
# data_columns
qkeys = ",".join(q.keys())
msg = dedent(
f"""\
The passed where expression: {where}
contains an invalid variable reference
all of the variable references must be a reference to
an axis (e.g. 'index' or 'columns'), or a data_column
The currently defined references are: {qkeys}
"""
)
raise ValueError(msg) from err
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(
self.condition.format(), start=self.start, stop=self.stop
)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(
self.condition.format(), start=start, stop=stop, sort=True
)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
|
class script(object):
START_MSG = """ <b>ʜᴇʟʟᴏ {}
ʏᴏᴜ ᴄᴀɴ ᴄᴀʟʟ ᴛʜɪꜱ ᴀꜱ ᴀɴ ᴀᴜᴛᴏ ꜰɪʟᴛᴇʀ ʙᴏᴛ ɪꜰ ʏᴏᴜ ʟɪᴋᴇ :ᴅ
ᴛʜɪꜱ ɪꜱ ᴠᴇʀꜱɪᴏɴ 2 ᴏꜰ ᴀᴜᴛᴏ ꜰɪʟᴛᴇʀ ʙᴏᴛ
ʙᴏᴛ ɢɪᴠᴇꜱ ʙᴜᴛᴛᴏɴ ʟɪɴᴋ ᴛᴏ ꜰɪʟᴇꜱ ɪɴ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟꜱ ᴏɴ Qᴜᴇʀʏ !
ɴᴏ ɴᴇᴇᴅ ᴛᴏ ᴀᴅᴅ ꜰɪʟᴛᴇʀꜱ ꜰᴏʀ ʏᴏᴜʀ ꜰɪʟᴇꜱ ᴏʀ ᴍᴏᴠɪᴇꜱ ꜰʀᴏᴍ ɴᴏᴡ ᴏɴ!
ꜰᴏʀ ᴍᴏʀᴇ ᴄʟɪᴄᴋ <i>/help</i></b>"""
HELP_MSG = """<b>ʜᴏᴡ ᴛᴏ ᴜꜱᴇ ᴛʜᴇ ʙᴏᴛ??</b>
<i>
* ᴀᴅᴅ ʙᴏᴛ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴡɪᴛʜ ᴀᴅᴍɪɴ ʀɪɢʜᴛꜱ.
* ᴀᴅᴅ ʙᴏᴛ ᴛᴏ ᴄʜᴀɴɴᴇʟꜱ ᴡʜɪᴄʜ ʏᴏᴜ ᴡᴀɴᴛ ᴛᴏ ʟɪɴᴋ ᴡɪᴛʜ <b>ᴀʟʟ ᴀᴅᴍɪɴ ʀɪɢʜᴛꜱ</b>!
</i>
<b>ʙᴏᴛ ᴄᴏᴍᴍᴀɴᴅꜱ - ᴡᴏʀᴋꜱ ɪɴ ɢʀᴏᴜᴘ ᴏɴʟʏ</b>
(ʏᴏᴜ ɴᴇᴇᴅ ᴛᴏ ʙᴇ ᴀ ᴀᴜᴛʜ ᴜꜱᴇʀ ɪɴ ᴏʀᴅᴇʀ ᴛᴏ ᴜꜱᴇ ᴛʜᴇꜱᴇ ᴄᴏᴍᴍᴀɴᴅꜱ)
* <code>/ᴀᴅᴅ ᴄʜᴀɴɴᴇʟɪᴅ</code> - ʟɪɴᴋꜱ ᴄʜᴀɴɴᴇʟ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ.
ᴏʀ
* <code>/ᴀᴅᴅ @ᴄʜᴀɴɴᴇʟᴜꜱᴇʀɴᴀᴍᴇ</code> - ʟɪɴᴋꜱ ᴄʜᴀɴɴᴇʟ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ.
<i>ɴᴏᴛᴇ : ʏᴏᴜ ᴄᴀɴ ɢᴇᴛ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ɪᴅ ꜰʀᴏᴍ ᴍɪꜱꜱ ʀᴏꜱᴇ ʙᴏᴛ</i>
* <code>/ᴅᴇʟ ᴄʜᴀɴɴᴇʟɪᴅ</code> - ᴅᴇʟɪɴᴋꜱ ᴄʜᴀɴɴᴇʟ ꜰʀᴏᴍ ɢʀᴏᴜᴘ
ᴏʀ
* <code>/ᴅᴇʟ @ᴄʜᴀɴɴᴇʟᴜꜱᴇʀɴᴀᴍᴇ</code> - ᴅᴇʟɪɴᴋꜱ ᴄʜᴀɴɴᴇʟ ꜰʀᴏᴍ ɢʀᴏᴜᴘ
<i>ɴᴏᴛᴇ : ʏᴏᴜ ᴄᴀɴ ɢᴇᴛ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟ ᴅᴇᴛᴀɪʟꜱ ʙʏ <code>/ꜰɪʟᴛᴇʀꜱᴛᴀᴛꜱ</code> </i>
* <code>/ᴅᴇʟᴀʟʟ</code> - ʀᴇᴍᴏᴠᴇꜱ ᴀʟʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟꜱ ᴀɴᴅ ꜰɪʟᴛᴇʀꜱ ꜰʀᴏᴍ ɢʀᴏᴜᴘ!
<i>ɴᴏᴛᴇ : ᴅᴏɴᴛ ᴀᴅᴅ ᴄᴏᴍᴍᴀɴᴅ ᴅᴇʟᴇᴛᴇ ʙᴏᴛꜱ ɪɴ ɢʀᴏᴜᴘ! ᴏᴛʜᴇʀᴡɪꜱᴇ, ᴅᴇʟᴀʟʟ ᴄᴏᴍᴍᴀɴᴅ ᴡᴏɴᴛ ᴡᴏʀᴋ</i>
* <code>/ꜰɪʟᴛᴇʀꜱᴛᴀᴛꜱ</code> - ᴄʜᴇᴄᴋ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟꜱ ᴀɴᴅ ɴᴜᴍʙᴇʀ ᴏꜰ ꜰɪʟᴛᴇʀꜱ.
ɴᴏ ɴᴇᴇᴅ ᴀᴅᴅ ᴇᴀᴄʜ ꜰɪʟᴛᴇʀ ᴀɢᴀɪɴ!
ʙᴏᴛ ᴡɪʟʟ ᴀᴜᴛᴏᴍᴀᴛɪᴄᴀʟʟʏ ꜱᴇᴀʀᴄʜ ꜰᴏʀ ʏᴏᴜʀ ꜰɪʟᴇꜱ ᴀɴᴅ ɢɪᴠᴇ ʟɪɴᴋꜱ ᴛᴏ ᴛʜᴀᴛ!
© @ᴘʀᴏʙ3ᴛᴏʀ"""
ABOUT_MSG = """💠<b>ᴍʏ ɴᴀᴍᴇ : ᴀᴜᴛᴏ ꜰɪʟᴛᴇʀ ʙᴏᴛ ᴠ2</b>
💠<b>ᴄʀᴇᴀᴛᴇʀ :</b> <a href='https://t.me/hcn_bots'>ʜᴄɴ ʙᴏᴛꜱ</a>
💠<b>ʟᴀɴɢᴜᴀɢᴇ :</b> <code>ᴘʏᴛʜᴏɴ3</code>
💠<b>ʟɪʙʀᴀʀʏ :</b> <a href='https://docs.pyrogram.org/'>ᴘʏʀᴏɢʀᴀᴍ 1.0.7</a>
"""
|
# Generated with InnerPipeLoading
#
from enum import Enum
from enum import auto
class InnerPipeLoading(Enum):
""""""
EXPOSED = auto()
SHELTERED_CLOSED = auto()
def label(self):
if self == InnerPipeLoading.EXPOSED:
return "Exposed"
if self == InnerPipeLoading.SHELTERED_CLOSED:
return "Sheltered Closed" |
import boto3
import yaml
import os
import argparse
cloudwatch = boto3.client("cloudwatch")
logs = boto3.client("logs")
# Arguments Engine
parser = argparse.ArgumentParser(description="OverWatch Rules Validator")
parser.add_argument(
"rules_folder_path",
metavar="path",
type=str,
nargs="?",
default="rules",
help='Path to rules folder | dirName of rules folder if autofind flag is set | Default: "rules"',
)
parser.add_argument(
"--directory",
type=str,
nargs="?",
default=".",
help='Path to OverWatch Core Application | Default "." or Current Directory',
)
parser.add_argument(
"--autofind",
dest="autofind",
action="store_true",
help="Enables autofinding of <rules_folder_path> directory within project",
)
class OverwatchDeployer:
def __init__(self, rules_dir_path, autofind):
self.rules_dir_path = (
str(self.find(rules_dir_path, os.path.abspath(os.curdir)))
if autofind
else rules_dir_path
)
self.rules_dir_path += "/" # to make it proper filepath
self.rules = []
# Taken from: https://stackoverflow.com/questions/1724693/find-a-file-in-python
# Finds rules folder depending on folder name provided
def find(self, name, path):
for root, dirs, files in os.walk(path):
if name in dirs:
return os.path.join(root, name)
def load_rules(self):
for filename in os.listdir(self.rules_dir_path):
if filename.endswith(".yaml"):
self.rules.append(filename)
print(f"Loaded {', '.join(self.rules)}")
def deploy_rules(self):
"""
Opens a rule file and generate a series of Alarms/MetricFilters.
If a field is not defined in the rule, it is populated by the corresponding default value defined by the default rule. If the value in the default rule is also empty then the field is removed from the request.
"""
for rule in self.rules:
with open(self.rules_dir_path + rule, "r") as stream, open(
DEFAULT_PATH, "r"
) as default:
try:
current_rules = yaml.safe_load(stream)
default_rule = yaml.safe_load(default)
except yaml.YAMLError as exc:
print(exc)
except Exception as err:
exit(1)
metric_fields_template = {
"filterName": "",
"filterPattern": "",
"logGroupName": "",
"metricTransformations": "",
}
alarm_fields_template = {
"ActionsEnabled": "",
"AlarmActions": "",
"AlarmDescription": "",
"AlarmName": "",
"ComparisonOperator": "",
"DatapointsToAlarm": "",
"Dimensions": "",
"EvaluateLowSampleCountPercentile": "",
"EvaluationPeriods": "",
"ExtendedStatistic": "",
"InsufficientDataActions": "",
"MetricName": "",
"Metrics": "",
"Namespace": "",
"OKActions": "",
"Period": "",
"Statistic": "",
"Tags": "",
"Threshold": "",
"ThresholdMetricId": "",
"TreatMissingData": "",
"Unit": "",
}
# print(default_rule['Metric'])
# create metric filters for all alarms
for current_rule in current_rules:
metric_fields = metric_fields_template
for field, value in list(metric_fields.items()):
if field in current_rule["Metric"]:
metric_fields[field] = current_rule["Metric"][field]
else:
metric_fields[field] = default_rule["Metric"][field]
if metric_fields[field] == "" or metric_fields[field] == []:
del metric_fields[field]
# print(metric_fields)
try:
# Creating metric filter
logs.put_metric_filter(**metric_fields)
except Exception as err:
print(err)
print("something went wrong creating metric filter")
exit(1)
# deploy alarms
for current_rule in current_rules:
alarm_fields = alarm_fields_template
for field, value in list(alarm_fields.items()):
if field in current_rule["Alarm"]:
alarm_fields[field] = current_rule["Alarm"][field]
else:
alarm_fields[field] = default_rule["Alarm"][field]
if alarm_fields["MetricName"] == "":
alarm_fields["MetricName"] = metric_fields[
"metricTransformations"
][0]["metricName"]
if alarm_fields["Namespace"] == "":
alarm_fields["Namespace"] = metric_fields[
"metricTransformations"
][0]["metricNamespace"]
elif alarm_fields[field] == "" or alarm_fields[field] == []:
del alarm_fields[field]
# print(alarm_fields)
try:
# Creating CloudWatch Alarm
cloudwatch.put_metric_alarm(**alarm_fields)
except Exception as err:
print(err)
print("something went wrong creating CloudWatch Alarm")
exit(1)
def deploy(self):
self.load_rules()
self.deploy_rules()
if __name__ == "__main__":
# parse the arguments
args = parser.parse_args()
# Path to Schema
SCHEMA_PATH = f"{args.directory}/ow-core/deployer/internal/schema.yaml"
# Path to Schema defaults
DEFAULT_PATH = f"{args.directory}/ow-core/deployer/internal/default.yaml"
# deployer class instance
deployer = OverwatchDeployer(args.rules_folder_path, args.autofind)
# if path given, attempt to deploy
deployer.deploy()
|
""" Code for prednet_run model : adapted from red (https://github.com/una-dinosauria/human-motion-prediction) """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import variable_scope as vs
from experiments import config
# from utils.evaluation.prednet_run.config import CKPT_DIR, LOAD
from . import modified_rnn_seq2seq
from . import rnn_cell_extensions # my extensions of the tf repos
class PredNet(object):
"""prednet_run model for human motion prediction"""
def __init__(self,
architecture,
source_seq_len,
target_seq_len,
rnn_size,
num_layers,
max_gradient_norm,
batch_size,
learning_rate,
learning_rate_decay_factor,
summaries_dir,
loss_to_use,
residual_velocities=False,
dtype=tf.float32):
"""
Args:
architecture: [basic, tied] whether to tie the encoder and decoder.
source_seq_len: lenght of the input sequence.
target_seq_len: lenght of the target sequence.
rnn_size: number of units in the rnn.
num_layers: number of rnns to stack.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
summaries_dir: where to log progress for tensorboard.
loss_to_use: [supervised, sampling_based]. Whether to use ground truth in
each timestep to compute the loss after decoding, or to feed back the
prediction from the previous time-step.
residual_velocities: whether to use a residual connection that models velocities.
dtype: the data type to use to store internal variables.
"""
self.human_size = config.MOGAZE_SIZE if config.TRAIN_MOGAZE or config.TEST_MOGAZE else config.HUMAN_SIZE
self.goal_size = config.GOAL_SIZE
self.input_size = self.human_size if config.AVOID_GOAL else self.human_size + self.goal_size
print("Input size is %d" % self.input_size)
self.train_writer = tf.summary.FileWriter(os.path.normpath(os.path.join(summaries_dir, 'train')))
self.val_writer = tf.summary.FileWriter(os.path.normpath(os.path.join(summaries_dir, 'val')))
self.source_seq_len = source_seq_len
self.target_seq_len = target_seq_len
self.rnn_size = rnn_size
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False, dtype=dtype)
self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
print('rnn_size = {0}'.format(rnn_size))
cell = tf.contrib.rnn.GRUCell(self.rnn_size)
if num_layers > 1:
cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.GRUCell(self.rnn_size) for _ in range(num_layers)])
# transform input in suitable format for tf
with tf.name_scope("inputs"):
enc_in = tf.placeholder(dtype, shape=[None, source_seq_len - 1, self.input_size], name="enc_in") # dim 37
dec_in = tf.placeholder(dtype, shape=[None, target_seq_len, self.input_size], name="dec_in") # dim 37
# dec_out is the variable to store the ground truth which will be later used to compute loss
dec_out = tf.placeholder(dtype, shape=[None, target_seq_len, self.human_size], name="dec_out") # dim 34
self.encoder_inputs = enc_in
self.decoder_inputs = dec_in
self.decoder_outputs = dec_out
enc_in = tf.transpose(enc_in, [1, 0, 2])
dec_in = tf.transpose(dec_in, [1, 0, 2])
dec_out = tf.transpose(dec_out, [1, 0, 2])
enc_in = tf.reshape(enc_in, [-1, self.input_size])
dec_in = tf.reshape(dec_in, [-1, self.input_size])
dec_out = tf.reshape(dec_out, [-1, self.human_size])
enc_in = tf.split(enc_in, source_seq_len - 1, axis=0)
dec_in = tf.split(dec_in, target_seq_len, axis=0)
dec_out = tf.split(dec_out, target_seq_len, axis=0)
# add linear decoder layer to GRU
cell = rnn_cell_extensions.LinearSpaceDecoderWrapper(cell, self.human_size)
# Finally, wrap everything in a residual layer if we want to model velocities
if residual_velocities:
print('residual_velocities = {}'.format(residual_velocities))
cell = rnn_cell_extensions.ResidualWrapper(cell)
# for sampling based loss, loop function generates next input as previous output
lf = None
if loss_to_use == "sampling_based":
def lf(prev, decoder_inputs, i):
# assumption: the goal position is fixed during prediction of 25 seq
if not config.AVOID_GOAL:
prev = tf.concat([prev, decoder_inputs[0][:, self.human_size:]], -1)
return prev
elif loss_to_use == "supervised":
pass
else:
raise (ValueError, "unknown loss: %s" % loss_to_use)
# Build the RNN
if architecture == "basic":
# Basic RNN does not have a loop function in its API, so copying here.
with vs.variable_scope("basic_rnn_seq2seq"):
_, enc_state = tf.contrib.rnn.static_rnn(cell, enc_in, dtype=tf.float32) # Encoder
outputs, self.states = tf.contrib.legacy_seq2seq.rnn_decoder(dec_in, enc_state, cell,
loop_function=lf) # Decoder
elif architecture == "tied":
outputs, self.states = modified_rnn_seq2seq.tied_rnn_seq2seq(enc_in, dec_in, cell, loop_function=lf)
else:
raise (ValueError, "Uknown architecture: %s" % architecture)
self.outputs = outputs # 25 tensors of shape (?,34)
# defining the weighted loss function specific to PredNet
with tf.name_scope("loss_angles"):
position_loss = tf.reduce_mean(tf.square(tf.subtract(
tf.convert_to_tensor(dec_out)[:, :, :3], tf.convert_to_tensor(outputs)[:, :, :3])))
orientation_loss = tf.reduce_mean(tf.square(tf.subtract(
tf.convert_to_tensor(dec_out)[:, :, 3:], tf.convert_to_tensor(outputs)[:, :, 3:])))
loss_angles = 0.2 * position_loss + 0.8 * orientation_loss
self.loss = loss_angles
self.loss_summary = tf.summary.scalar('loss/loss', self.loss)
# loss at each time step (1 time step = 40ms)
self.ms_loss = tf.reduce_mean(tf.square(tf.subtract(dec_out, outputs)), axis=[1, 2])
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
gradients = tf.gradients(self.loss, params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm)
self.gradient_norms = norm
# update the trainable parameters by SGD optimizer
self.updates = opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step)
# Keep track of the learning rate
self.learning_rate_summary = tf.summary.scalar('learning_rate/learning_rate', self.learning_rate)
# save the model
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
def step(self, session, encoder_inputs, decoder_inputs, decoder_outputs, forward_only=False, pred=False):
"""Run a step of the model feeding the given inputs.
Args
session: tensorflow session to use.
encoder_inputs: encoder input vector.
decoder_inputs: decoder inputs vector.
decoder_outputs: decoder outputs vector.
forward_only: whether to do the backward step (training) or only forward (validation or testing).
test: True for testing.
Returns
gradient norm, loss (mean squared error) and summaries for training.
loss, ms_loss and loss_summary for validation.
outputs for testing.
"""
input_feed = {self.encoder_inputs: encoder_inputs,
self.decoder_inputs: decoder_inputs,
self.decoder_outputs: decoder_outputs}
if not forward_only:
# Training step
output_feed = [self.updates, # Update Op that does SGD.
self.gradient_norms, # Gradient norm.
self.loss,
self.loss_summary,
self.learning_rate_summary]
outputs = session.run(output_feed, input_feed)
return outputs[1], outputs[2], outputs[3], outputs[4] # Gradient norm, loss, summaries
elif forward_only and not pred:
# validation step
output_feed = [self.loss,
self.ms_loss,
self.loss_summary]
outputs = session.run(output_feed, input_feed)
return outputs[0], outputs[1], outputs[2]
else:
# testing step
output_feed = [self.outputs]
outputs = session.run(output_feed, input_feed)
return outputs[0]
def get_batch(self, data, data_keys, batch):
"""Get a random batch of data
Args
data: input data
data_keys: keys of input data dictionary
batch: the batch number
Returns
data: the batch data
"""
batch_keys = data_keys[batch * self.batch_size: batch * self.batch_size + self.batch_size]
random.shuffle(batch_keys)
data = {batch_key: data[batch_key][5:, :] for batch_key in batch_keys} # avoiding first 5 time steps
return data
def get_sub_batch(self, data, sub_batch, data_keys=None):
"""Get a sub batch of data
Args
data: input batch data
sub_batch: the sub_batch number
Returns
tuple (encoder_inputs, decoder_inputs, decoder_outputs)
"""
batch_keys = list(data_keys) if config.TRAIN_MOGAZE or config.TEST_MOGAZE else list(data.keys())
# How many frames in total do we need?
total_frames = self.source_seq_len + self.target_seq_len
encoder_inputs = np.zeros((self.batch_size, self.source_seq_len - 1, self.input_size), dtype=float)
decoder_inputs = np.zeros((self.batch_size, self.target_seq_len, self.input_size), dtype=float)
decoder_outputs = np.zeros((self.batch_size, self.target_seq_len, self.input_size), dtype=float)
for i in range(self.batch_size):
the_key = batch_keys[i]
data_sel = data[the_key][sub_batch * self.target_seq_len: sub_batch * self.target_seq_len + total_frames, :]
encoder_inputs[i, :, 0:self.input_size] = data_sel[0:self.source_seq_len - 1, :self.input_size]
decoder_inputs[i, :, 0:self.input_size] = data_sel[
self.source_seq_len - 1:self.source_seq_len + self.target_seq_len - 1,
:self.input_size]
decoder_outputs[i, :, 0:self.input_size] = data_sel[self.source_seq_len:, 0:self.input_size]
return encoder_inputs, decoder_inputs, decoder_outputs
@staticmethod
def load_model(use_cpu=False, ckpt_dir=None, timestep=None, **_):
if ckpt_dir is None:
ckpt_dir = config.CKPT_DIR
if timestep is None:
timestep = config.TEST_LOAD
# TODO replace with pred_model = create_model(sess, config.TEST_LOAD)
"""Create model and initialize or load parameters in session."""
print("Creating rnn_GRU model %d layers of %d units." % (1, 1024))
# Note: define two separate tf graphs for RL model and DL motion pred model
# graph for DL model below
dl_graph = tf.Graph()
device_count = {"GPU": 0} if use_cpu else {"GPU": 1} # Use the CPU if asked to
tf_config = tf.ConfigProto(device_count=device_count)
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config, graph=dl_graph)
# add operations to tensorflow dl specific graph
with dl_graph.as_default():
model = PredNet(
architecture='tied',
source_seq_len=config.SEQ_LENGTH_IN,
target_seq_len=config.SEQ_LENGTH_OUT,
rnn_size=1024, # hidden layer size
num_layers=1,
max_gradient_norm=5,
batch_size=1,
learning_rate=0.001,
learning_rate_decay_factor=0.95,
summaries_dir=config.SUMMARIES_DIR,
loss_to_use="sampling_based",
residual_velocities=True,
dtype=tf.float32)
ckpt = tf.train.get_checkpoint_state(ckpt_dir, latest_filename="checkpoint")
print("ckpt_dir", ckpt_dir)
if ckpt and ckpt.model_checkpoint_path:
# Check if the specific checkpoint exists
if timestep > 0:
if os.path.isfile(os.path.join(ckpt_dir, "checkpoint-{0}.index".format(timestep))):
ckpt_name = os.path.normpath(
os.path.join(os.path.join(ckpt_dir, "checkpoint-{0}".format(timestep))))
else:
raise ValueError("Asked to load checkpoint {0}, but it does not seem to exist".format(timestep))
else:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
print("Loading model {0}".format(ckpt_name))
model.saver.restore(sess, ckpt_name)
return model, dl_graph, sess
|
from digi.xbee.io import IOLine, IOMode
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_SWITCH
from PHAS.RFHandler import RFHandler
import time, threading
import asyncio
import concurrent.futures
import logging
POWER_ON_SAFETY_DELAY = 20.0 #Delay in s after a shutdown before powering back up
DEBOUNCING_DELAY = 0.2 #Delay to debounce the switch
logger = logging.getLogger(__name__)
class AmplifierAccessory(Accessory):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#Setup the HomeKit Service and its attached Characteristic
serv_switch = self.add_preload_service('Switch')
self.amplifier = serv_switch.configure_char('On', setter_callback=self.setAmplifierPower)
#Setup the power up thread dealing with the delay
self.powerUpThread = threading.Thread(target = self.powerUpAsync)
self.validationEvent = threading.Event()
self.stoppingThreadEvent = threading.Event()
self.powerUpEvent = threading.Event()
self.powerUpThread.delay = 20
self.powerUpThread.start()
def stop(self):
logger.info("Stopping Amp Accessory")
super().stop()
self.rfHandler.removeRemoteAccessory(self.display_name)
self.stoppingThreadEvent.set()
self.powerUpThread.join()
def startAmpWithHandler(self, accRFHandler):
#Add the accessory to the RFHandler and set its IOSample callback
self.rfHandler = accRFHandler
self.rfHandler.addRemoteAccessory(self.display_name, "0013A2004183853B")
self.rfHandler.addAccessoryCallback(self.display_name, self.amplifierCallback)
#Get the current switch state on the amplifier
self.lastSwitchState = self.rfHandler.getInputStateOfAccessoryPin(self.display_name, IOLine.DIO2_AD2)
#Force shutdown the amp and notify it to HomeKit users
self.rfHandler.setDigitalConfigurationOfAccessoryPin(self.display_name, IOLine.DIO1_AD1, IOMode.DIGITAL_OUT_LOW)
self.amplifier.value = 0
self.amplifier.notify()
#Keep track of the shutdown time and last io sample time
self.lastPowerOffTime = time.time()
self.lastSendTime = 0
#Callback when IOSample are received (ie. when the switch is used)
def amplifierCallback(self, ioSample, sendTime):
print("Amplifier CallBack - lastSendTime = ", self.lastSendTime, "current sendTime = ", sendTime)
if not sendTime - self.lastSendTime < DEBOUNCING_DELAY:
newSwitchState = ioSample.get_digital_value(IOLine.DIO2_AD2)
self.lastSendTime = sendTime
if newSwitchState != self.lastSwitchState:
self.toggleAmplifierPower()
self.lastSwitchState = newSwitchState
else:
logger.debug("DEBOUNCE")
def toggleAmplifierPower(self):
logger.info("Toggling amp from the switch")
if self.amplifier.value == 0:
self.amplifier.client_update_value(1)
else:
self.amplifier.client_update_value(0)
#Threading function dealing with the delay before powering up
def powerUpAsync(self):
t = threading.currentThread()
while not self.stoppingThreadEvent.is_set():
#Check if a power up is asked and then wait for the delay
if self.powerUpEvent.is_set():
logger.info("Power up with delay : %f", getattr(t, "delay"))
#If this event is set, the powerUp is cancelled (the delay is not applied), else power the amp up
self.validationEvent.wait(getattr(t, "delay"))
if not self.validationEvent.is_set():
self.rfHandler.setDigitalConfigurationOfAccessoryPin(self.display_name, IOLine.DIO1_AD1, IOMode.DIGITAL_OUT_HIGH)
logger.info("Powering up")
self.powerUpEvent.clear()
else:
self.validationEvent.clear()
self.powerUpEvent.clear()
def setAmplifierPower(self, newState):
if newState == 1:
self.amplifier.value = 1
self.amplifier.notify()
if time.time() - self.lastPowerOffTime < POWER_ON_SAFETY_DELAY:
self.powerUpThread.delay = POWER_ON_SAFETY_DELAY - time.time() + self.lastPowerOffTime
self.powerUpEvent.set()
else:
logger.info("Powering up immediately")
self.rfHandler.setDigitalConfigurationOfAccessoryPin(self.display_name, IOLine.DIO1_AD1, IOMode.DIGITAL_OUT_HIGH)
else:
if self.powerUpEvent.is_set():
logger.info("Cancelling power up")
self.validationEvent.set()
else:
logger.info("Shutdown immediately")
self.rfHandler.setDigitalConfigurationOfAccessoryPin(self.display_name, IOLine.DIO1_AD1, IOMode.DIGITAL_OUT_LOW)
self.lastPowerOffTime = time.time()
self.amplifier.value = 0
self.amplifier.notify()
|
import os
import re
import time
from pathlib import Path
from flask import current_app
import shutil
class SessionFileSystem:
def __init__(self, filesystem):
self.filesystem = filesystem
def get_data_path(self):
path = Path(current_app.root_path)
return os.path.join(str(path.parent), 'data')
def get_data_sessions_path(self):
return os.path.join(self.get_data_path(), 'sessions')
def get_user_data_path(self, user_id, session_id):
path = os.path.join(self.get_data_sessions_path(), str(user_id))
if session_id > 0:
path = os.path.join(path, str(session_id))
if not os.path.isdir(path):
os.makedirs(path)
return path
def get_hashfile_path(self, user_id, session_id):
return os.path.join(self.get_user_data_path(user_id, session_id), 'hashes.txt')
def get_custom_file_path(self, user_id, session_id, prefix='', random=False, extension='.dict'):
if len(prefix) == 0:
random = True
name = prefix
if random:
name = name + str(int(time.time()))
name = name + extension
return os.path.join(self.get_user_data_path(user_id, session_id), name)
def custom_file_exists(self, file_path):
return os.path.isfile(file_path)
def hashfile_exists(self, user_id, session_id):
path = self.get_hashfile_path(user_id, session_id)
return os.path.isfile(path)
def get_potfile_path(self, user_id, session_id):
return os.path.join(self.get_user_data_path(user_id, session_id), 'hashes.potfile')
def get_screenfile_path(self, user_id, session_id, name=None):
if name is None:
name = 'screen.log'
return os.path.join(self.get_user_data_path(user_id, session_id), name)
def get_crackedfile_path(self, user_id, session_id):
return os.path.join(self.get_user_data_path(user_id, session_id), 'hashes.cracked')
def count_non_empty_lines_in_file(self, file):
if not os.path.isfile(file):
return 0
size = os.path.getsize(file)
max_size = 100 * 1024 * 1024 # 100 MB.
if size > max_size:
# File is too large, don't count it.
return -1
try:
count = 0
with open(file, 'r') as f:
for line in f:
if line.strip():
count += 1
except UnicodeDecodeError:
count = 0
return count
def backup_screen_log_file(self, user_id, session_id):
path = self.get_screenfile_path(user_id, session_id)
if not os.path.isfile(path):
return True
new_path = path + '.' + str(int(time.time()))
os.rename(path, new_path)
return True
def __remove_escape_characters(self, data):
# https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
ansi_escape_8bit = re.compile(br'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
return ansi_escape_8bit.sub(b'', data)
def __fix_line_termination(self, data):
return data.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
def tail_file(self, file, length):
if not os.path.isfile(file):
return b''
# If we try to read more than the actual size of the file, it will throw an error.
filesize = os.path.getsize(file)
bytes_to_read = filesize if filesize < length else length
# Read the last 4KB from the screen log file.
with open(file, 'rb') as file:
file.seek(bytes_to_read * -1, os.SEEK_END)
stream = file.read()
# Replace \r\n with \n, and any rebel \r to \n. We only like \n in here!
# Clean the file from escape characters.
stream = self.__remove_escape_characters(stream)
stream = self.__fix_line_termination(stream)
return stream
def save_hashes(self, user_id, session_id, hashes):
save_as = self.get_hashfile_path(user_id, session_id)
return self.write_to_file(save_as, hashes)
def write_to_file(self, file, data):
with open(file, 'w') as f:
f.write(data)
return True
def find_latest_screenlog(self, user_id, session_id):
# Sometimes when a backup of screen.log is made, a new "screen.log" doesn't appear, making it look like it's a brand new session.
# This function will try and find any historical screen.log.TIMESTAMP files in the event of this edge case.
filepath = self.get_screenfile_path(user_id, session_id)
if os.path.isfile(filepath):
return filepath
filename = os.path.basename(filepath)
path = os.path.dirname(filepath)
files = self.filesystem.get_files(path)
screen_files = []
len_to_look_for = len(filename) + 1
for name, data in files.items():
if name[:len_to_look_for] == (filename + '.'):
screen_files.append(name)
if len(screen_files) == 0:
# Return the original path if no historic files exist.
return filepath
screen_files.sort(reverse=True)
return os.path.join(path, screen_files[0])
def delete_path(self, path):
return shutil.rmtree(path)
def read_file(self, file):
if not os.path.isfile(file):
return ''
with open(file, 'r') as f:
contents = f.read()
return contents
|
import logging
from django.contrib.auth.models import User
from unplugged import RelatedPluginField, Schema, fields
from wampyre.realm import realm_manager
from ...plugins import ConfigPlugin, NotifierPlugin
logger = logging.getLogger(__name__)
class WAMPSchema(Schema):
config = RelatedPluginField(plugin_type=ConfigPlugin)
class NotifyFilterSchema(Schema):
patterns = fields.List(fields.String, many=True, default=list)
class WAMPNotifierHandlerPlugin(NotifierPlugin):
plugin_name = "wamp"
config_schema = WAMPSchema
default_config = {"patterns": ["admin.*"]}
def __init__(self, config):
self.config = config
config_plugin = config.get("config")
if config_plugin:
namespace, key = self.get_config_values()
config_plugin.set_default_config(
"system", namespace, key, self.default_config
)
config_plugin.set_config_schema(namespace, key, NotifyFilterSchema)
def get_config_values(self):
return f"{self.plugin_type}:{self.name}", "notification_filters"
def notify(self, notification):
config_plugin = self.config.get("config")
if not config_plugin:
return
namespace, key = self.get_config_values()
for realm in realm_manager.get_realms(): # TODO: optimize !
if not realm.realm.startswith("user."):
continue
username = realm.realm[5:]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
continue
if not notification.is_allowed(user):
continue
config = config_plugin.get_user_config(user, namespace, key)
if not notification.match_patterns(config.get("patterns", [])):
continue
realm.publish(
{},
"notification",
args=(),
kwargs={
"topic": notification.topic,
"type": notification.type,
"title": notification.title,
"body": notification.body,
},
)
|
from django.conf.urls import url
from fundoonotes.views import FundooNotesListCreateAPIView, FundooNotesDetailAPIView
app_name = 'fundoonotes'
urlpatterns = [
url(r'^$', FundooNotesListCreateAPIView.as_view(), name="list"),
url(r'^(?P<pk>[0-9]+)/$', FundooNotesDetailAPIView.as_view(), name="detail"),
]
|
"""
Account related functions.
These account related functions generally align one-for-one with published API calls categorized in the account category
API v1 - https://t3n.zendesk.com/forums/21509857-Account
API v2 - https://t3n.zendesk.com/forums/21645944-Account
"""
import clc
class Account:
account_status_itos = { 1: 'Action', 2: 'Disabled', 3: 'Deleted', 4: 'Demo' }
@staticmethod
def GetAlias():
"""Return specified alias or if none the alias associated with the provided credentials."""
if not clc.ALIAS: Account.GetAccounts()
return(clc.ALIAS)
@staticmethod
def GetLocation():
"""Return specified location or if none the default location associated with the provided credentials and alias."""
if not clc.LOCATION: Account.GetAccounts()
return(clc.LOCATION)
@staticmethod
def GetAccountDetails(alias=None):
"""Return account details dict associated with the provided alias."""
if not alias: alias = Account.GetAlias()
r = clc.v1.API.Call('post','Account/GetAccountDetails',{'AccountAlias': alias})
if r['Success'] != True:
if clc.args: clc.v1.output.Status('ERROR',3,'Error calling %s. Status code %s. %s' % ('Account/GetAccountDetails',r['StatusCode'],r['Message']))
raise Exception('Error calling %s. Status code %s. %s' % ('Account/GetAccountDetails',r['StatusCode'],r['Message']))
elif int(r['StatusCode']) == 0:
r['AccountDetails']['Status'] = Account.account_status_itos[r['AccountDetails']['Status']]
return(r['AccountDetails'])
@staticmethod
def GetLocations():
"""Return all cloud locations available to the calling alias."""
r = clc.v1.API.Call('post','Account/GetLocations',{})
if r['Success'] != True:
if clc.args: clc.v1.output.Status('ERROR',3,'Error calling %s. Status code %s. %s' % ('Account/GetLocations',r['StatusCode'],r['Message']))
raise Exception('Error calling %s. Status code %s. %s' % ('Account/GetLocations',r['StatusCode'],r['Message']))
elif int(r['StatusCode']) == 0:
clc.LOCATIONS = [x['Alias'] for x in r['Locations']]
return(r['Locations'])
@staticmethod
def GetAccounts(alias=None):
"""Return account inventory dict containing all subaccounts for the given alias. If None search from default alias."""
if alias is not None: payload = {'AccountAlias': alias}
else: payload = {}
r = clc.v1.API.Call('post','Account/GetAccounts',payload)
if int(r['StatusCode']) == 0:
# Assume first response is always the original account. Not sure if this is reliable
if not clc.ALIAS: clc.ALIAS = r['Accounts'][0]['AccountAlias']
if not clc.LOCATION: clc.LOCATION = r['Accounts'][0]['Location']
return(r['Accounts'])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numpy as np
import time
import os
from six.moves import cPickle
import opts
import models
from dataloader import *
from dataloaderraw import *
import eval_utils
import argparse
import misc.utils as utils
import torch
from misc import utils
from graph_utils import utils as gutils
# Input arguments and options
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--model', type=str, default='',
help='path to model to evaluate')
parser.add_argument('--infos_path', type=str, default='',
help='path to infos to evaluate')
parser.add_argument('--data_path', type=str, default='sampled_data/valdata.pt')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
opt = parser.parse_args()
# Load infos
with open(opt.infos_path, 'rb') as f:
infos = utils.pickle_load(f)
# override and collect parameters
replace = ['input_fc_dir', 'input_att_dir', 'input_box_dir', 'input_label_h5', 'input_treelabel_h5', 'input_json', 'input_tree_json', 'batch_size', 'id']
ignore = ['start_from']
for k in vars(infos['opt']).keys():
if k in replace:
setattr(opt, k, getattr(opt, k) or getattr(infos['opt'], k, ''))
elif k not in ignore:
if not k in vars(opt):
vars(opt).update({k: vars(infos['opt'])[k]}) # copy over options from model
vocab = infos['vocab']
word_to_ix = {v: k for k, v in vocab.items()}
# Setup the model
opt.vocab = vocab
model = models.setup(opt)
del opt.vocab
model.load_state_dict(torch.load(opt.model))
model.cuda()
model.eval()
data = torch.load('sampled_data/testdata.pt')
tmp = [data['fc_feats'][:1], data['att_feats'][:1], data['att_masks']]
tmp = [_ if _ is None else _.cuda() for _ in tmp]
fc_feats, att_feats, att_masks = tmp
time_start = time.time()
with torch.no_grad():
# for tpt in [0.5, 1.0, 2.0]:
# print("sampled by sample with temperature {}:".format(tpt))
# (seq, seq_idx, seqLen), seq_logprobs = model(fc_feats, att_feats, att_masks, opt={'sample_method': 'sample', 'temperature': tpt}, mode='sample')
# sent = gutils.decode_sequence(vocab, seq, seq_idx, seqLen)
# print('\n'.join(sent))
(seq, seq_idx, seqLen), seq_logprobs = model(fc_feats, att_feats, att_masks, opt={'sample_method': 'greedy', 'beam_size': 1}, mode='sample')
sent = gutils.decode_sequence(vocab, seq, seq_idx, seqLen)
for b in range(seq.shape[0]):
seq_len = seqLen[b]
seq_b = seq[b,:seq_len]
seq_logprobs_b = seq_logprobs[b,:seq_len]
p = seq_logprobs_b.gather(1, index=seq_b.unsqueeze(1)).sum()
print('{} (p={} | len={})'.format(sent[b], p, seq_len))
print('--' * 10)
print("sampled by beam search:")
(seq, seq_idx, seqLen), seq_logprobs = model(fc_feats, att_feats, att_masks, opt={'sample_method': 'greedy', 'beam_size': 2, 'length_penalty': 'avg_0', 'suppress_EOB_factor': 2.0}, mode='sample')
sent = gutils.decode_sequence(vocab, seq, seq_idx, seqLen)
print('\n'.join(sent))
for _s in model.done_beams:
for __s in _s:
# seq_len = __s['seq'].shape[0]
seq_len = __s['seqLen'].item()
print("seq_len: {}".format(seq_len))
__ss = gutils.decode_sequence(vocab, __s['seq'].unsqueeze(0), __s['seq_idx'].unsqueeze(0), [seq_len])
print('{} (p={} | len={})'.format(__ss[0], __s['p'], seq_len))
# print(__s['logps'].gather(1, index=__s['seq'].unsqueeze(1)).sum())
print('--' * 10)
|
"""Import required losses."""
from .regression_loss_functions import (
LogCoshLoss,
SPELoss,
)
__all__ = [
"LogCoshLoss",
"SPELoss",
] |
import sys
import cv2
import imutils
from itertools import combinations
import numpy as np
from numba import njit
import time
"""
Check whether two lines defined by their Hough angle are parallel.
"""
@njit
def are_parallel(θ1, θ2):
return np.abs(θ1-θ2) < np.pi/8 or np.abs(np.abs(θ1-θ2)-np.pi) < np.pi/8
"""
Check parallelity relations between four lines.
Lines must be parallel in pairs: two parallel lines and two other parallel lines.
"""
@njit
def check_parallelity(θ):
are_par_01 = are_parallel(θ[0], θ[1])
are_par_02 = are_parallel(θ[0], θ[2])
are_par_03 = are_parallel(θ[0], θ[3])
are_par_12 = are_parallel(θ[1], θ[2])
are_par_13 = are_parallel(θ[1], θ[3])
are_par_23 = are_parallel(θ[2], θ[3])
# Check that lines are parallel in pairs.
if (not (are_par_01 and are_par_23)
and not (are_par_02 and are_par_13)
and not (are_par_03 and are_par_12)
):
return True
# Check that lines of different pairs are not parallel.
if ((are_par_01 and are_par_02)
or (are_par_01 and are_par_03)
or (are_par_01 and are_par_03)
or (are_par_02 and are_par_03)
or (are_par_12 and are_par_13)
):
return True
return False
"""
Find intersection between two Hough lines.
"""
@njit
def line_intersection(ρ1, ρ2, θ1, θ2):
if np.abs(np.sin(θ1)) < 1e-3:
x = ρ1/np.cos(θ1)
y = (ρ2 - x*np.cos(θ2))/np.sin(θ2)
elif np.abs(np.sin(θ2)) < 1e-3:
x = ρ2/np.cos(θ2)
y = (ρ1 - x*np.cos(θ1))/np.sin(θ1)
else:
x = (ρ1/np.sin(θ1) - ρ2/np.sin(θ2))/(1/np.tan(θ1) - 1/np.tan(θ2))
y = (ρ1 - x*np.cos(θ1))/np.sin(θ1)
return [x, y]
"""
Choose whether a set of four Hough lines are valid document edges.
Find the corners of the four Hough lines, check their parallelity
relations, find the area inside them and, if the area is bigger than
any other set of edges we have, update doc_corners and doc_edges
accordingly.
"""
@njit
def find_doc_edges(edges, doc_corners, doc_edges, max_area):
ρ = edges[:, 0]
θ = edges[:, 1]
# Check that lines are parallel in pairs.
if check_parallelity(θ):
return max_area
# Find corners.
corners = np.zeros((4, 2))
if are_parallel(θ[0], θ[1]) and are_parallel(θ[2], θ[3]):
corners[0] = line_intersection(ρ[0], ρ[2], θ[0], θ[2])
corners[1] = line_intersection(ρ[0], ρ[3], θ[0], θ[3])
corners[2] = line_intersection(ρ[1], ρ[2], θ[1], θ[2])
corners[3] = line_intersection(ρ[1], ρ[3], θ[1], θ[3])
if are_parallel(θ[0], θ[2]) and are_parallel(θ[1], θ[3]):
corners[0] = line_intersection(ρ[0], ρ[1], θ[0], θ[1])
corners[1] = line_intersection(ρ[0], ρ[3], θ[0], θ[3])
corners[2] = line_intersection(ρ[2], ρ[1], θ[2], θ[1])
corners[3] = line_intersection(ρ[2], ρ[3], θ[2], θ[3])
if are_parallel(θ[0], θ[3]) and are_parallel(θ[1], θ[2]):
corners[0] = line_intersection(ρ[0], ρ[1], θ[0], θ[1])
corners[1] = line_intersection(ρ[0], ρ[2], θ[0], θ[2])
corners[2] = line_intersection(ρ[3], ρ[1], θ[3], θ[1])
corners[3] = line_intersection(ρ[3], ρ[2], θ[3], θ[2])
# Identify the position of the corners. We select each of the
# corners by projecting their coordinates on the lines y = x and
# y = -x.
ix_upleft_corner = np.argmin(corners[:, 0] + corners[:, 1])
ix_doright_corner = np.argmax(corners[:, 0] + corners[:, 1])
ix_upright_corner = np.argmax(corners[:, 0] - corners[:, 1])
ix_doleft_corner = np.argmin(corners[:, 0] - corners[:, 1])
# Rearrange corners in order.
cp_corners = np.copy(corners)
corners[0] = cp_corners[ix_upleft_corner]
corners[1] = cp_corners[ix_upright_corner]
corners[2] = cp_corners[ix_doleft_corner]
corners[3] = cp_corners[ix_doright_corner]
# Compute area inside quadrilateral.
x = corners[:, 0]
y = corners[:, 1]
area = (
np.abs((x[1]-x[0])*(y[2]-y[1]) - (x[1]-x[2])*(y[0]-y[1]))/2
+ np.abs((x[1]-x[3])*(y[2]-y[1]) - (x[1]-x[2])*(y[3]-y[1]))/2
)
if area > max_area:
doc_edges[:, 0] = ρ
doc_edges[:, 1] = θ
# For some reason we cannot change doc_corners by reference.
doc_corners[:, :] = corners
max_area = area
return max_area
def corner_detection(im):
orig_imsize = np.shape(im)[0:2]
# Resize and change color to greys.
im = imutils.resize(im, height=500)
img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
imsize = np.shape(im)
# Find Hough lines in the image.
img = cv2.GaussianBlur(img, (3, 3), 0)
edges = cv2.Canny(img, 100, 200)
lines = cv2.HoughLines(edges, 1, np.pi/180, 120)
lines = lines.reshape(np.shape(lines)[0], 2)
# Find the document's corners. We select as corners those defined as the intersection
# of four lines, which are parallel two by two, and have maximal area.
max_area = -1.0
# Iterate through every pair of lines.
doc_corners = np.zeros((4, 2))
doc_edges = np.zeros((4, 2))
for comb in combinations(range(np.shape(lines)[0]), 4):
# Find the four corners and area inside detected document and
# check if area is bigger than what we have.
max_area = find_doc_edges(lines[comb, :], doc_corners, doc_edges, max_area)
# Check if the algorithm has detected four corners.
if np.shape(doc_corners)[0] == 4:
has_detected_corners = True
else:
has_detected_corners = False
return [], [], lines, has_detected_corners, 1.
# Approximate measurement of document's scale.
# Mean of vertical (height) and horizontal (widht) lenghts.
µ_hl = (
np.linalg.norm(doc_corners[2] - doc_corners[0])
+ np.linalg.norm(doc_corners[3] - doc_corners[1])
)/2
µ_wl = (
np.linalg.norm(doc_corners[0] - doc_corners[1])
+ np.linalg.norm(doc_corners[2] - doc_corners[3])
)/2
doc_scale = µ_hl/µ_wl
# Rescale document's corners to original size.
doc_corners[:, 0] *= orig_imsize[0]/imsize[0]
doc_corners[:, 1] *= orig_imsize[1]/imsize[1]
return doc_corners, doc_edges, lines, has_detected_corners, doc_scale
def perspective_transformation(im, doc_corners, doc_scale):
imsize = np.shape(im)
pts1 = np.float32(doc_corners)
# Height and width of original image.
H, W = imsize[:2]
# Put into document's scale.
H = int(W*doc_scale)
pts2 = np.float32([[0, 0], [W, 0], [0, H], [W, H]])
M = cv2.getPerspectiveTransform(pts1, pts2)
imt = cv2.warpPerspective(im, M, (W, H))
return imt
def do_image_thresholding(im):
img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 51, 5)
return img
def insert_in_namepath(path, to_insert):
spath = path.split(".")
spath.insert(-1, to_insert)
spath[-1] = "." + spath[-1]
return "".join(spath)
def scanner_main(image_path, dest_name, debug=False):
im = cv2.imread(image_path)
orig = im.copy()
doc_corners, doc_edges, lines, has_detected_corners, doc_scale = corner_detection(im)
if has_detected_corners:
im = perspective_transformation(im, doc_corners, doc_scale)
im = do_image_thresholding(im)
im = imutils.resize(im, height=1000)
# Save final image.
cv2.imwrite(dest_name, im)
else:
print("The algorithm didn't detect the corners of the document. Please check with --debug enabled.")
# Debug: write an image of the middle process.
if debug == True:
im_dbg = orig.copy()
# Overwrite detected document corners.
circ_radius = int(np.shape(im_dbg)[0]*0.01)
for corner in doc_corners:
cv2.circle(im_dbg, (int(corner[0]), int(corner[1])), circ_radius, (0, 0, 255), -1)
# Overwrite detected lines. If line is vertical return ad hoc values.
def xy_hough_lines(ρ, θ, x):
if np.abs(np.sin(θ)) > 1e-3:
y = (ρ - x*np.cos(θ))/np.sin(θ)
return int(y)
else:
return 0 if x == 0 else 500
for line in lines:
ρ, θ = line
ρ *= np.shape(im_dbg)[0]/500
x1 = 0
y1 = xy_hough_lines(ρ, θ, x1)
x2 = np.shape(im_dbg)[0]
y2 = xy_hough_lines(ρ, θ, x2)
cv2.line(im_dbg, (x1, y1), (x2, y2), (255, 0, 0), 5)
# Overwrite detected document edges.
for line in doc_edges:
ρ, θ = line
ρ *= np.shape(im_dbg)[0]/500
x1 = 0
y1 = xy_hough_lines(ρ, θ, x1)
x2 = np.shape(im_dbg)[0]
y2 = xy_hough_lines(ρ, θ, x2)
cv2.line(im_dbg, (x1, y1), (x2, y2), (0, 0, 255), 5)
cv2.imwrite(insert_in_namepath(dest_name, "_dbg"), im_dbg)
return
if __name__ == "__main__":
if len(sys.argv) >= 3:
if "--debug" in sys.argv:
sys.argv.remove("--debug")
scanner_main(sys.argv[1], sys.argv[2], True)
else:
scanner_main(sys.argv[1], sys.argv[2], False)
else:
print("Please, introduce the path of the original image and the path of the destination image.")
sys.exit() |
from rest_framework.authentication import SessionAuthentication
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return # disable all CSRF protections locally - DO NOT DO THIS IN PRODUCTION
|
"""
Unit tests of functions within :mod:`weaver.processes.convert`.
"""
# pylint: disable=R1729 # ignore non-generator representation employed for displaying test log results
from collections import OrderedDict
from copy import deepcopy
import pytest
from pywps.inout.formats import Format
from pywps.inout.inputs import LiteralInput
from pywps.inout.literaltypes import AllowedValue, AnyValue
from pywps.validator.mode import MODE
from weaver.exceptions import PackageTypeError
from weaver.formats import CONTENT_TYPE_APP_JSON, CONTENT_TYPE_APP_NETCDF, CONTENT_TYPE_APP_XML, CONTENT_TYPE_TEXT_PLAIN
from weaver.processes.constants import PROCESS_SCHEMA_OGC, PROCESS_SCHEMA_OLD, WPS_INPUT, WPS_LITERAL
from weaver.processes.convert import _are_different_and_set # noqa: W0212
from weaver.processes.convert import (
DEFAULT_FORMAT,
PACKAGE_ARRAY_MAX_SIZE,
cwl2json_input_values,
cwl2wps_io,
is_cwl_array_type,
is_cwl_enum_type,
is_cwl_file_type,
json2wps_allowed_values,
json2wps_datatype,
merge_io_formats,
normalize_ordered_io,
repr2json_input_values
)
from weaver.utils import null
class ObjectWithEqProperty(object):
"""
Dummy object for some test evaluations.
"""
_prop = "prop"
def __init__(self, prop="prop"):
self._prop = prop
@property
def some_property(self):
return self._prop
def __eq__(self, other):
return self.some_property == other.some_property
def test_are_different_and_set_both_set():
assert _are_different_and_set(1, 2) is True
assert _are_different_and_set(1, 1) is False
assert _are_different_and_set({"a": 1}, {"a": 2}) is True
assert _are_different_and_set({"a": 1}, {"a": 1}) is False
assert _are_different_and_set({"a": 1, "b": 2}, {"a": 1}) is True
assert _are_different_and_set(ObjectWithEqProperty(), ObjectWithEqProperty()) is False
assert _are_different_and_set(ObjectWithEqProperty("a"), ObjectWithEqProperty("b")) is True
def test_are_different_and_set_similar_str_formats():
assert _are_different_and_set(b"something", u"something") is False
assert _are_different_and_set(u"something", u"something") is False
assert _are_different_and_set(b"something", b"something") is False
assert _are_different_and_set(b"something", u"else") is True
assert _are_different_and_set(u"something", u"else") is True
assert _are_different_and_set(b"something", b"else") is True
def test_are_different_and_set_both_null():
assert _are_different_and_set(null, null) is False
def test_are_different_and_set_single_null():
"""
Tests that equality check is correctly handled when a single item amongst the two is ``null``.
This was identified as problematic is case when the checked and set item implements ``__eq__`` and expects a
property to exist, which is not the case for the second item being ``null``.
"""
item = ObjectWithEqProperty()
assert _are_different_and_set(item, null) is False
assert _are_different_and_set(null, item) is False
def test_json2wps_datatype():
test_cases = [
("float", {"type": WPS_LITERAL, "data_type": "float"}), # noqa: E241
("integer", {"type": WPS_LITERAL, "data_type": "integer"}), # noqa: E241
("integer", {"type": WPS_LITERAL, "data_type": "int"}), # noqa: E241
("boolean", {"type": WPS_LITERAL, "data_type": "boolean"}), # noqa: E241
("boolean", {"type": WPS_LITERAL, "data_type": "bool"}), # noqa: E241
("string", {"type": WPS_LITERAL, "data_type": "string"}), # noqa: E241
("float", {"type": WPS_LITERAL, "default": 1.0}), # noqa: E241
("integer", {"type": WPS_LITERAL, "default": 1}), # noqa: E241
("boolean", {"type": WPS_LITERAL, "default": True}), # noqa: E241
("string", {"type": WPS_LITERAL, "default": "1"}), # noqa: E241
("float", {"type": WPS_LITERAL, "supported_values": [1.0, 2.0]}), # noqa: E241
("integer", {"type": WPS_LITERAL, "supported_values": [1, 2]}), # noqa: E241
("boolean", {"type": WPS_LITERAL, "supported_values": [True, False]}), # noqa: E241
("string", {"type": WPS_LITERAL, "supported_values": ["yes", "no"]}), # noqa: E241
("float", {"data_type": "float"}), # noqa: E241
("integer", {"data_type": "integer"}), # noqa: E241
("integer", {"data_type": "int"}), # noqa: E241
("boolean", {"data_type": "boolean"}), # noqa: E241
("boolean", {"data_type": "bool"}), # noqa: E241
("string", {"data_type": "string"}), # noqa: E241
]
for expect, test_io in test_cases:
copy_io = deepcopy(test_io) # can get modified by function
assert json2wps_datatype(test_io) == expect, "Failed for [{}]".format(copy_io)
def test_json2wps_allowed_values():
for i, (values, expect) in enumerate([
({"allowedvalues": [1, 2, 3]},
[AllowedValue(value=1), AllowedValue(value=2), AllowedValue(value=3)]),
({"allowedvalues": ["A", "B"]},
[AllowedValue(value="A"), AllowedValue(value="B")]),
({"allowedvalues": [{"closure": "open", "minimum": 1, "maximum": 5}]},
[AllowedValue(minval=1, maxval=5, range_closure="open")]),
({"allowedvalues": [{"closure": "open-closed", "minimum": 0, "maximum": 6, "spacing": 2}]},
[AllowedValue(minval=0, maxval=6, spacing=2, range_closure="open-closed")]),
({"literalDataDomains": [{"valueDefinition": [1, 2, 3]}]},
[AllowedValue(value=1), AllowedValue(value=2), AllowedValue(value=3)]),
({"literalDataDomains": [{"valueDefinition": ["A", "B"]}]},
[AllowedValue(value="A"), AllowedValue(value="B")]),
({"literalDataDomains": [{"valueDefinition": [{"closure": "open", "minimum": 1, "maximum": 5}]}]},
[AllowedValue(minval=1, maxval=5, range_closure="open")]),
({"literalDataDomains": [
{"valueDefinition": [{"closure": "open-closed", "minimum": 0, "maximum": 6, "spacing": 2}]}]},
[AllowedValue(minval=0, maxval=6, spacing=2, range_closure="open-closed")]),
]):
result = json2wps_allowed_values(values)
assert result == expect, "Failed test {}".format(i)
def test_cwl2wps_io_null_or_array_of_enums():
"""
I/O `CWL` with ``["null", "<enum-type>", "<array-enum-type>]`` must be parsed as `WPS` with parameters
``minOccurs=0``, ``maxOccurs>1`` and ``allowedValues`` as restricted set of values.
"""
allowed_values = ["A", "B", "C"]
io_info = {
"name": "test",
"type": [
"null", # minOccurs=0
{"type": "enum", "symbols": allowed_values}, # if maxOccurs=1, only this variant would be provided
{"type": "array", "items": {"type": "enum", "symbols": allowed_values}}, # but also this for maxOccurs>1
],
}
wps_io = cwl2wps_io(io_info, WPS_INPUT)
assert isinstance(wps_io, LiteralInput)
assert wps_io.min_occurs == 0
assert wps_io.max_occurs == PACKAGE_ARRAY_MAX_SIZE
assert wps_io.data_type == "string"
assert wps_io.allowed_values == [AllowedValue(value=val) for val in allowed_values]
def test_cwl2wps_io_raise_mixed_types():
io_type1 = ["string", "int"]
io_type2 = [
"int",
{"type": "array", "items": "string"}
]
io_type3 = [
{"type": "enum", "symbols": ["1", "2"]}, # symbols as literal strings != int literal
"null",
"int"
]
io_type4 = [
"null",
{"type": "enum", "symbols": ["1", "2"]}, # symbols as literal strings != int items
{"type": "array", "items": "int"}
]
for i, test_type in enumerate([io_type1, io_type2, io_type3, io_type4]):
io_info = {"name": "test-{}".format(i), "type": test_type}
with pytest.raises(PackageTypeError):
cwl2wps_io(io_info, WPS_INPUT)
def testis_cwl_array_type_explicit_invalid_item():
io_info = {
"name": "test",
"type": {
"type": "array",
"items": "unknown-type-item"
}
}
with pytest.raises(PackageTypeError):
is_cwl_array_type(io_info)
def testis_cwl_array_type_shorthand_invalid_item():
"""
In case of shorthand syntax, because type is only a string, it shouldn't raise.
Type is returned as is and value validation is left to later calls.
"""
io_info = {
"name": "test",
"type": "unknown[]"
}
try:
res = is_cwl_array_type(io_info)
assert res[0] is False
assert res[1] == "unknown[]"
assert res[2] == MODE.NONE
assert res[3] == AnyValue
except PackageTypeError:
pytest.fail("should not raise an error in this case")
def testis_cwl_array_type_not_array():
io_info = {
"name": "test",
"type": "float",
}
res = is_cwl_array_type(io_info)
assert res[0] is False
assert res[1] == "float"
assert res[2] == MODE.NONE
assert res[3] == AnyValue
def testis_cwl_array_type_simple_enum():
io_info = {
"name": "test",
"type": "enum",
"symbols": ["a", "b", "c"]
}
res = is_cwl_array_type(io_info)
assert res[0] is False
assert res[1] == "enum"
assert res[2] == MODE.NONE
assert res[3] == AnyValue
def testis_cwl_array_type_explicit_base():
io_info = {
"name": "test",
"type": {
"type": "array",
"items": "string"
}
}
res = is_cwl_array_type(io_info)
assert res[0] is True
assert res[1] == "string"
assert res[2] == MODE.NONE
assert res[3] == AnyValue
def testis_cwl_array_type_explicit_enum():
io_info = {
"name": "test",
"type": {
"type": "array",
"items": {
"type": "enum",
"symbols": ["a", "b", "c"]
}
}
}
res = is_cwl_array_type(io_info)
assert res[0] is True
assert res[1] == "string"
assert res[2] == MODE.SIMPLE
assert res[3] == ["a", "b", "c"]
def testis_cwl_array_type_shorthand_base():
io_info = {
"name": "test",
"type": "string[]",
}
res = is_cwl_array_type(io_info)
assert res[0] is True
assert res[1] == "string"
assert res[2] == MODE.NONE
assert res[3] == AnyValue
def testis_cwl_array_type_shorthand_enum():
io_info = {
"name": "test",
"type": "enum[]",
"symbols": ["a", "b", "c"]
}
res = is_cwl_array_type(io_info)
assert res[0] is True
assert res[1] == "string"
assert res[2] == MODE.SIMPLE
assert res[3] == ["a", "b", "c"]
def testis_cwl_array_type_explicit_optional_not_array():
io_info = {
"name": "test",
"type": ["null", "float"],
}
res = is_cwl_array_type(io_info)
assert res[0] is False
assert res[1] == "float"
assert res[2] == MODE.NONE
assert res[3] == AnyValue
def testis_cwl_array_type_explicit_optional_simple_enum():
io_info = {
"name": "test",
"type": ["null", "enum"],
"symbols": ["a", "b", "c"]
}
res = is_cwl_array_type(io_info)
assert res[0] is False
assert res[1] == "enum"
assert res[2] == MODE.NONE
assert res[3] == AnyValue
def testis_cwl_array_type_explicit_optional_explicit_base():
io_info = {
"name": "test",
"type": [
"null",
{"type": "array", "items": "string"}
]
}
res = is_cwl_array_type(io_info)
assert res[0] is True
assert res[1] == "string"
assert res[2] == MODE.NONE
assert res[3] == AnyValue
def testis_cwl_array_type_explicit_optional_explicit_enum():
io_info = {
"name": "test",
"type": [
"null",
{
"type": "array",
"items": {
"type": "enum",
"symbols": ["a", "b", "c"]
}
}
]
}
res = is_cwl_array_type(io_info)
assert res[0] is True
assert res[1] == "string"
assert res[2] == MODE.SIMPLE
assert res[3] == ["a", "b", "c"]
def testis_cwl_array_type_explicit_optional_shorthand_base():
io_info = {
"name": "test",
"type": ["null", "string[]"]
}
res = is_cwl_array_type(io_info)
assert res[0] is True
assert res[1] == "string"
assert res[2] == MODE.NONE
assert res[3] == AnyValue
def testis_cwl_array_type_explicit_optional_shorthand_enum():
io_info = {
"name": "test",
"type": ["null", "enum[]"],
"symbols": ["a", "b", "c"]
}
res = is_cwl_array_type(io_info)
assert res[0] is True
assert res[1] == "string"
assert res[2] == MODE.SIMPLE
assert res[3] == ["a", "b", "c"]
def testis_cwl_enum_type_string():
io_info = {
"name": "test",
"type": {
"type": "enum",
"symbols": ["a", "b", "c"]
}
}
res = is_cwl_enum_type(io_info)
assert res[0] is True
assert res[1] == "string"
assert res[2] == MODE.SIMPLE
assert res[3] == ["a", "b", "c"]
def testis_cwl_enum_type_float():
io_info = {
"name": "test",
"type": {
"type": "enum",
"symbols": [1.9, 2.8, 3.7]
}
}
res = is_cwl_enum_type(io_info)
assert res[0] is True
assert res[1] == "float"
assert res[2] == MODE.SIMPLE
assert res[3] == [1.9, 2.8, 3.7]
def testis_cwl_enum_type_int():
io_info = {
"name": "test",
"type": {
"type": "enum",
"symbols": [1, 2, 3]
}
}
res = is_cwl_enum_type(io_info)
assert res[0] is True
assert res[1] == "int"
assert res[2] == MODE.SIMPLE
assert res[3] == [1, 2, 3]
def test_is_cwl_file_type_guaranteed_file():
io_info = {
"name": "test",
"type": "File"
}
assert is_cwl_file_type(io_info)
def test_is_cwl_file_type_potential_file():
io_info = {
"name": "test",
"type": ["null", "File"]
}
assert is_cwl_file_type(io_info)
def test_is_cwl_file_type_file_array():
io_info = {
"name": "test",
"type": {"type": "array", "items": "File"}
}
assert is_cwl_file_type(io_info)
def test_is_cwl_file_type_none_one_or_many_files():
io_info = {
"name": "test",
"type": [
"null",
"File",
{"type": "array", "items": "File"}
]
}
assert is_cwl_file_type(io_info)
def test_is_cwl_file_type_not_files():
test_types = [
"int",
"string",
"float",
["null", "string"],
{"type": "enum", "symbols": [1, 2]},
{"type": "enum", "symbols": ["A", "B"]},
{"type": "array", "items": "string"},
{"type": "array", "items": "int"},
["null", {"type": "array", "items": "string"}],
]
for i, io_type in enumerate(test_types):
io_info = {"name": "test-{}".format(i), "type": io_type}
assert not is_cwl_file_type(io_info), "Test [{}]: {}".format(i, io_info)
def assert_formats_equal_any_order(format_result, format_expect):
assert len(format_result) == len(format_expect), "Expected formats sizes mismatch"
for r_fmt in format_result:
for e_fmt in format_expect:
if r_fmt.json == e_fmt.json:
format_expect.remove(e_fmt)
break
assert not format_expect, "Not all expected formats matched {}".format([fmt.json for fmt in format_expect])
def test_merge_io_formats_no_wps():
wps_fmt = []
cwl_fmt = [DEFAULT_FORMAT]
res_fmt = merge_io_formats(wps_fmt, cwl_fmt)
assert isinstance(res_fmt, list)
assert len(res_fmt) == 1
assert res_fmt[0] is DEFAULT_FORMAT
def test_merge_io_formats_with_wps_and_default_cwl():
wps_fmt = [Format(CONTENT_TYPE_APP_NETCDF)]
cwl_fmt = [DEFAULT_FORMAT]
res_fmt = merge_io_formats(wps_fmt, cwl_fmt)
assert isinstance(res_fmt, list)
assert_formats_equal_any_order(res_fmt, [Format(CONTENT_TYPE_APP_NETCDF)])
def test_merge_io_formats_both_wps_and_cwl():
wps_fmt = [Format(CONTENT_TYPE_APP_NETCDF)]
cwl_fmt = [Format(CONTENT_TYPE_APP_JSON)]
res_fmt = merge_io_formats(wps_fmt, cwl_fmt)
assert isinstance(res_fmt, list)
assert_formats_equal_any_order(res_fmt, [Format(CONTENT_TYPE_APP_NETCDF), Format(CONTENT_TYPE_APP_JSON)])
def test_merge_io_formats_wps_complements_cwl():
wps_fmt = [Format(CONTENT_TYPE_APP_JSON, encoding="utf-8")]
cwl_fmt = [Format(CONTENT_TYPE_APP_JSON)]
res_fmt = merge_io_formats(wps_fmt, cwl_fmt)
assert isinstance(res_fmt, list)
assert_formats_equal_any_order(res_fmt, [Format(CONTENT_TYPE_APP_JSON, encoding="utf-8")])
def test_merge_io_formats_wps_overlaps_cwl():
wps_fmt = [
Format(CONTENT_TYPE_APP_JSON, encoding="utf-8"), # complements CWL details
Format(CONTENT_TYPE_APP_NETCDF), # duplicated in CWL (but different index)
Format(CONTENT_TYPE_TEXT_PLAIN) # extra (but not default)
]
cwl_fmt = [
Format(CONTENT_TYPE_APP_JSON), # overridden by WPS version
Format(CONTENT_TYPE_APP_XML), # extra preserved
Format(CONTENT_TYPE_APP_NETCDF), # duplicated with WPS, merged
]
res_fmt = merge_io_formats(wps_fmt, cwl_fmt)
assert isinstance(res_fmt, list)
assert_formats_equal_any_order(res_fmt, [
Format(CONTENT_TYPE_APP_JSON, encoding="utf-8"),
Format(CONTENT_TYPE_APP_NETCDF),
Format(CONTENT_TYPE_APP_XML),
Format(CONTENT_TYPE_TEXT_PLAIN),
])
def test_normalize_ordered_io_with_builtin_dict_and_hints():
"""
Validate that I/O are all still there in the results with their respective contents.
Literal types should be modified to a dictionary with ``type`` key.
All dictionary contents should then remain as is, except with added ``id``.
.. note::
Ordering is not mandatory, so we don't validate this.
Also actually hard to test since employed python version running the test changes the behaviour.
"""
test_inputs = {
"id-literal-type": "float",
"id-dict-details": {
"type": "string"
},
"id-array-type": {
"type": {
"type": "array",
"items": "float"
}
},
"id-literal-array": "string[]"
}
test_wps_hints = [
{"id": "id-literal-type"},
{"id": "id-array-type"},
{"id": "id-dict-with-more-stuff"},
{"id": "id-dict-details"},
]
expected_result = [
{"id": "id-literal-type", "type": "float"},
{"id": "id-dict-details", "type": "string"},
{"id": "id-array-type", "type": {"type": "array", "items": "float"}},
{"id": "id-literal-array", "type": "string[]"}
]
result = normalize_ordered_io(test_inputs, test_wps_hints)
assert isinstance(result, list) and len(result) == len(expected_result)
# *maybe* not same order, so validate values accordingly
for expect in expected_result:
validated = False
for res in result:
if res["id"] == expect["id"]:
assert res == expect
validated = True
if not validated:
raise AssertionError("expected '{}' was not validated against any result value".format(expect["id"]))
def test_normalize_ordered_io_with_ordered_dict():
test_inputs = OrderedDict([
("id-literal-type", "float"),
("id-dict-details", {"type": "string"}),
("id-array-type", {
"type": {
"type": "array",
"items": "float"
}
}),
("id-literal-array", "string[]"),
])
expected_result = [
{"id": "id-literal-type", "type": "float"},
{"id": "id-dict-details", "type": "string"},
{"id": "id-array-type", "type": {"type": "array", "items": "float"}},
{"id": "id-literal-array", "type": "string[]"}
]
result = normalize_ordered_io(test_inputs)
assert isinstance(result, list) and len(result) == len(expected_result)
assert result == expected_result
def test_normalize_ordered_io_with_list():
"""
Everything should remain the same as list variant is only allowed to have I/O objects.
(i.e.: not allowed to have both objects and literal string-type simultaneously as for dictionary variant).
"""
expected_result = [
{"id": "id-literal-type", "type": "float"},
{"id": "id-dict-details", "type": "string"},
{"id": "id-array-type", "type": {"type": "array", "items": "float"}},
{"id": "id-literal-array", "type": "string[]"}
]
result = normalize_ordered_io(deepcopy(expected_result))
assert isinstance(result, list) and len(result) == len(expected_result)
assert result == expected_result
def test_normalize_ordered_io_when_direct_type_string():
inputs_as_strings = {
"input-1": "File[]",
"input-2": "float"
}
result = normalize_ordered_io(inputs_as_strings)
assert isinstance(result, list)
assert len(result) == len(inputs_as_strings)
assert all([isinstance(res_i, dict) for res_i in result])
assert all([i in [res_i["id"] for res_i in result] for i in inputs_as_strings])
assert all(["type" in res_i and res_i["type"] == inputs_as_strings[res_i["id"]] for res_i in result])
def test_cwl2json_input_values_ogc_format():
values = {
"test1": "value",
"test2": 1,
"test3": 1.23,
"test4": {"class": "File", "path": "/tmp/random.txt"},
"test5": ["val1", "val2"],
"test6": [1, 2],
"test7": [1.23, 4.56],
"test8": [{"class": "File", "path": "/tmp/other.txt"}]
}
expect = {
"test1": {"value": "value"},
"test2": {"value": 1},
"test3": {"value": 1.23},
"test4": {"href": "/tmp/random.txt"},
"test5": [{"value": "val1"}, {"value": "val2"}],
"test6": [{"value": 1}, {"value": 2}],
"test7": [{"value": 1.23}, {"value": 4.56}],
"test8": [{"href": "/tmp/other.txt"}]
}
result = cwl2json_input_values(values, PROCESS_SCHEMA_OGC)
assert result == expect
def test_cwl2json_input_values_old_format():
values = {
"test1": "value",
"test2": 1,
"test3": 1.23,
"test4": {"class": "File", "path": "/tmp/random.txt"},
"test5": ["val1", "val2"],
"test6": [1, 2],
"test7": [1.23, 4.56],
"test8": [{"class": "File", "path": "/tmp/other.txt"}]
}
expect = [
{"id": "test1", "value": "value"},
{"id": "test2", "value": 1},
{"id": "test3", "value": 1.23},
{"id": "test4", "href": "/tmp/random.txt"},
{"id": "test5", "value": "val1"},
{"id": "test5", "value": "val2"},
{"id": "test6", "value": 1},
{"id": "test6", "value": 2},
{"id": "test7", "value": 1.23},
{"id": "test7", "value": 4.56},
{"id": "test8", "href": "/tmp/other.txt"}
]
result = cwl2json_input_values(values, PROCESS_SCHEMA_OLD)
assert result == expect
def test_repr2json_input_values():
values = [
"test1=value",
"test2:int=1",
"test3:float=1.23",
"test4:File=/tmp/random.txt",
"test5=val1;val2",
"test6:int=1;2",
"test7:float=1.23;4.56",
"test8:file=/tmp/other.txt",
"test9:str=short",
"test10:string=long",
]
expect = [
{"id": "test1", "value": "value"},
{"id": "test2", "value": 1},
{"id": "test3", "value": 1.23},
{"id": "test4", "href": "/tmp/random.txt"},
{"id": "test5", "value": ["val1", "val2"]},
{"id": "test6", "value": [1, 2]},
{"id": "test7", "value": [1.23, 4.56]},
{"id": "test8", "href": "/tmp/other.txt"},
{"id": "test9", "value": "short"},
{"id": "test10", "value": "long"}
]
result = repr2json_input_values(values)
assert result == expect
|
import tkinter as tk
import tkinter.font as font
from in_out import in_out
from motion import noise
from rect_noise import rect_noise
from record import record
from PIL import Image, ImageTk
window = tk.Tk()
window.title("Smart cctv")
window.iconphoto(False, tk.PhotoImage(file='mn.png'))
window.geometry('1080x760')
frame1 = tk.Frame(window)
label_title = tk.Label(frame1, text="Smart cctv Camera")
label_font = font.Font(size=35, weight='bold',family='Helvetica')
label_title['font'] = label_font
label_title.grid(pady=(10,10), column=2)
icon = Image.open('icons/spy.png')
icon = icon.resize((150,150), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label_icon = tk.Label(frame1, image=icon)
label_icon.grid(row=1, pady=(5,10), column=2)
btn1_image = Image.open('icons/lamp.png')
btn1_image = btn1_image.resize((50,50), Image.ANTIALIAS)
btn1_image = ImageTk.PhotoImage(btn1_image)
btn2_image = Image.open('icons/rectangle-of-cutted-line-geometrical-shape.png')
btn2_image = btn2_image.resize((50,50), Image.ANTIALIAS)
btn2_image = ImageTk.PhotoImage(btn2_image)
btn5_image = Image.open('icons/exit.png')
btn5_image = btn5_image.resize((50,50), Image.ANTIALIAS)
btn5_image = ImageTk.PhotoImage(btn5_image)
btn3_image = Image.open('icons/security-camera.png')
btn3_image = btn3_image.resize((50,50), Image.ANTIALIAS)
btn3_image = ImageTk.PhotoImage(btn3_image)
btn6_image = Image.open('icons/incognito.png')
btn6_image = btn6_image.resize((50,50), Image.ANTIALIAS)
btn6_image = ImageTk.PhotoImage(btn6_image)
btn4_image = Image.open('icons/recording.png')
btn4_image = btn4_image.resize((50,50), Image.ANTIALIAS)
btn4_image = ImageTk.PhotoImage(btn4_image)
# --------------- Button -------------------#
btn_font = font.Font(size=25)
btn1 = tk.Button(frame1, text='Monitor', height=90, width=180, fg='green', image=btn1_image, compound='left')
btn1['font'] = btn_font
btn1.grid(row=3, pady=(20,10))
btn2 = tk.Button(frame1, text='Rectangle', height=90, width=180, fg='orange', command=rect_noise, compound='left', image=btn2_image)
btn2['font'] = btn_font
btn2.grid(row=3, pady=(20,10), column=3, padx=(20,5))
btn_font = font.Font(size=25)
btn3 = tk.Button(frame1, text='Noise', height=90, width=180, fg='green', command=noise, image=btn3_image, compound='left')
btn3['font'] = btn_font
btn3.grid(row=5, pady=(20,10))
btn4 = tk.Button(frame1, text='Record', height=90, width=180, fg='orange', command=record, image=btn4_image, compound='left')
btn4['font'] = btn_font
btn4.grid(row=5, pady=(20,10), column=3)
btn6 = tk.Button(frame1, text='In Out', height=90, width=180, fg='green', command=in_out, image=btn6_image, compound='left')
btn6['font'] = btn_font
btn6.grid(row=4, pady=(20,10), column=2)
btn5 = tk.Button(frame1, height=90, width=180, fg='red', command=window.quit, image=btn5_image)
btn5['font'] = btn_font
btn5.grid(row=6, pady=(20,10), column=2)
frame1.pack()
window.mainloop()
|
from .enums import TextChoices, IntegerChoices
from .fields import Field
|
import os
os.system("python seg_rgbd_flexible.py -i ./pc5 -th 2700 -s 860 -e 1900 -o ./pc5/nxp")
os.system("python seg_rgbd_flexible.py -i ./pc6 -th 2700 -s 500 -e 1900 -o ./pc6/nxp")
os.system("python seg_rgbd_flexible.py -i ./zmq1_6 -th 2900 -s 514 -e 1900 -o ./zmq1_6/nxp")
os.system("python seg_rgbd_flexible.py -i ./zmq2_6 -th 2900 -s 543 -e 1900 -o ./zmq2_6/nxp")
os.system("python seg_rgbd_flexible.py -i ./pc7 -th 2700 -s 300 -e 1880 -o ./pc7/nxp")
os.system("python seg_rgbd_flexible.py -i ./zmq1_7 -th 3000 -s 300 -e 1900 -o ./zmq1_7/nxp")
os.system("python seg_rgbd_flexible.py -i ./zmq2_7 -th 3300 -s 300 -e 1900 -o ./zmq2_7/nxp")
|
#!/usr/bin/env python3
# Copyright (c) 2021 The MicroBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This checks that all files in the repository have correct filenames and permissions
"""
import os
import re
import sys
from subprocess import check_output
from typing import Optional, NoReturn
CMD_ALL_FILES = "git ls-files -z --full-name"
CMD_SOURCE_FILES = 'git ls-files -z --full-name -- "*.[cC][pP][pP]" "*.[hH]" "*.[pP][yY]" "*.[sS][hH]"'
CMD_SHEBANG_FILES = "git grep --full-name --line-number -I '^#!'"
ALLOWED_FILENAME_REGEXP = "^[a-zA-Z0-9/_.@][a-zA-Z0-9/_.@-]*$"
ALLOWED_SOURCE_FILENAME_REGEXP = "^[a-z0-9_./-]+$"
ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP = (
"^src/(secp256k1/|univalue/|test/fuzz/FuzzedDataProvider.h)"
)
ALLOWED_PERMISSION_NON_EXECUTABLES = 644
ALLOWED_PERMISSION_EXECUTABLES = 755
ALLOWED_EXECUTABLE_SHEBANG = {
"py": [b"#!/usr/bin/env python3"],
"sh": [b"#!/usr/bin/env bash", b"#!/bin/sh"],
}
class FileMeta(object):
def __init__(self, file_path: str):
self.file_path = file_path
@property
def extension(self) -> Optional[str]:
"""
Returns the file extension for a given filename string.
eg:
'ci/lint_run_all.sh' -> 'sh'
'ci/retry/retry' -> None
'contrib/devtools/split-debug.sh.in' -> 'in'
"""
return str(os.path.splitext(self.file_path)[1].strip(".") or None)
@property
def full_extension(self) -> Optional[str]:
"""
Returns the full file extension for a given filename string.
eg:
'ci/lint_run_all.sh' -> 'sh'
'ci/retry/retry' -> None
'contrib/devtools/split-debug.sh.in' -> 'sh.in'
"""
filename_parts = self.file_path.split(os.extsep, 1)
try:
return filename_parts[1]
except IndexError:
return None
@property
def permissions(self) -> int:
"""
Returns the octal file permission of the file
"""
return int(oct(os.stat(self.file_path).st_mode)[-3:])
def check_all_filenames() -> int:
"""
Checks every file in the repository against an allowed regexp to make sure only lowercase or uppercase
alphanumerics (a-zA-Z0-9), underscores (_), hyphens (-), at (@) and dots (.) are used in repository filenames.
"""
filenames = check_output(CMD_ALL_FILES, shell=True).decode("utf8").rstrip("\0").split("\0")
filename_regex = re.compile(ALLOWED_FILENAME_REGEXP)
failed_tests = 0
for filename in filenames:
if not filename_regex.match(filename):
print(
f"""File {repr(filename)} does not not match the allowed filename regexp ('{ALLOWED_FILENAME_REGEXP}')."""
)
failed_tests += 1
return failed_tests
def check_source_filenames() -> int:
"""
Checks only source files (*.cpp, *.h, *.py, *.sh) against a stricter allowed regexp to make sure only lowercase
alphanumerics (a-z0-9), underscores (_), hyphens (-) and dots (.) are used in source code filenames.
Additionally there is an exception regexp for directories or files which are excepted from matching this regexp.
"""
filenames = check_output(CMD_SOURCE_FILES, shell=True).decode("utf8").rstrip("\0").split("\0")
filename_regex = re.compile(ALLOWED_SOURCE_FILENAME_REGEXP)
filename_exception_regex = re.compile(ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP)
failed_tests = 0
for filename in filenames:
if not filename_regex.match(filename) and not filename_exception_regex.match(filename):
print(
f"""File {repr(filename)} does not not match the allowed source filename regexp ('{ALLOWED_SOURCE_FILENAME_REGEXP}'), or the exception regexp ({ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP})."""
)
failed_tests += 1
return failed_tests
def check_all_file_permissions() -> int:
"""
Checks all files in the repository match an allowed executable or non-executable file permission octal.
Additionally checks that for executable files, the file contains a shebang line
"""
filenames = check_output(CMD_ALL_FILES, shell=True).decode("utf8").rstrip("\0").split("\0")
failed_tests = 0
for filename in filenames:
file_meta = FileMeta(filename)
if file_meta.permissions == ALLOWED_PERMISSION_EXECUTABLES:
with open(filename, "rb") as f:
shebang = f.readline().rstrip(b"\n")
# For any file with executable permissions the first line must contain a shebang
if not shebang.startswith(b"#!"):
print(
f"""File "{filename}" has permission {ALLOWED_PERMISSION_EXECUTABLES} (executable) and is thus expected to contain a shebang '#!'. Add shebang or do "chmod {ALLOWED_PERMISSION_NON_EXECUTABLES} {filename}" to make it non-executable."""
)
failed_tests += 1
# For certain file extensions that have been defined, we also check that the shebang conforms to a specific
# allowable set of shebangs
if file_meta.extension in ALLOWED_EXECUTABLE_SHEBANG.keys():
if shebang not in ALLOWED_EXECUTABLE_SHEBANG[file_meta.extension]:
print(
f"""File "{filename}" is missing expected shebang """
+ " or ".join(
[
x.decode("utf-8")
for x in ALLOWED_EXECUTABLE_SHEBANG[file_meta.extension]
]
)
)
failed_tests += 1
elif file_meta.permissions == ALLOWED_PERMISSION_NON_EXECUTABLES:
continue
else:
print(
f"""File "{filename}" has unexpected permission {file_meta.permissions}. Do "chmod {ALLOWED_PERMISSION_NON_EXECUTABLES} {filename}" (if non-executable) or "chmod {ALLOWED_PERMISSION_EXECUTABLES} {filename}" (if executable)."""
)
failed_tests += 1
return failed_tests
def check_shebang_file_permissions() -> int:
"""
Checks every file that contains a shebang line to ensure it has an executable permission
"""
filenames = check_output(CMD_SHEBANG_FILES, shell=True).decode("utf8").strip().split("\n")
# The git grep command we use returns files which contain a shebang on any line within the file
# so we need to filter the list to only files with the shebang on the first line
filenames = [filename.split(":1:")[0] for filename in filenames if ":1:" in filename]
failed_tests = 0
for filename in filenames:
file_meta = FileMeta(filename)
if file_meta.permissions != ALLOWED_PERMISSION_EXECUTABLES:
# These file types are typically expected to be sourced and not executed directly
if file_meta.full_extension in ["bash", "init", "openrc", "sh.in"]:
continue
# *.py files which don't contain an `if __name__ == '__main__'` are not expected to be executed directly
if file_meta.extension == "py":
with open(filename, "r", encoding="utf8") as f:
file_data = f.read()
if not re.search("""if __name__ == ['"]__main__['"]:""", file_data):
continue
print(
f"""File "{filename}" contains a shebang line, but has the file permission {file_meta.permissions} instead of the expected executable permission {ALLOWED_PERMISSION_EXECUTABLES}. Do "chmod {ALLOWED_PERMISSION_EXECUTABLES} {filename}" (or remove the shebang line)."""
)
failed_tests += 1
return failed_tests
def main() -> NoReturn:
failed_tests = 0
failed_tests += check_all_filenames()
failed_tests += check_source_filenames()
failed_tests += check_all_file_permissions()
failed_tests += check_shebang_file_permissions()
if failed_tests:
print(
f"ERROR: There were {failed_tests} failed tests in the lint-files.py lint test. Please resolve the above errors."
)
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Beerware Licence
# <louis@gatin.me> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return.
import socket
from argparse import ArgumentParser
from scapy.all import sniff
from pyModbusTCP.client import ModbusClient
from parser import parse_address
parser = ArgumentParser(description="Sniff ModbusTCP packets on network and raise alarms if intrusions are detected")
parser.add_argument('-i', '--interface', type=str, default='eth0', help='Interface')
parser.add_argument('-p', '--port', type=int, default=502, help='Port to listen to')
parser.add_argument('-a', '--alert', type=str, help='Modbus server to raise alert <address>:[<port=502>]/<register>')
parser.add_argument('authorized_hosts', nargs='*', help="list of authorized hosts")
args = parser.parse_args()
# Add own IP address to authorized hosts
args.authorized_hosts.append(socket.gethostbyname(socket.gethostname()))
# Open Modbus client if needed
if args.alert is not None:
adr = parse_address(args.alert)
args.__dict__.update({k: adr[k] for k in ["host", "port", "register"]})
c = ModbusClient(host=args.host, port=args.port, auto_open=True)
if not c.is_open():
if not c.open():
raise ConnectionError(f'cannot connect to Modbus server at {args.host}:{args.port}')
# Function called for every paquet handled by the sniffer
def callback(p):
src = p['IP'].src
dst = p['IP'].dst
if src not in args.authorized_hosts:
if args.alert is not None:
c.write_single_register(args.register, 1)
print(f"Request from {src} to {dst} detected!")
# Launch the sniffer with good options
try:
sniff(iface=args.interface, filter=f"tcp and port {args.port}", prn=callback)
except PermissionError as e:
print("This tool require root privileges or cap_net_raw capability.")
exit(1)
except KeyboardInterrupt:
exit(0)
|
import codecs
import spacy
import sys
def main(argv):
#input_filename = argv[1]
input_filename = "data/Corpus.DEV.txt"
output_filename = "data/output"
relation_type = "Live_In"
entity_types = (["PERSON"], ["GPE", "LOC"])
threshold = 0.7
nlp = spacy.load('en')
output_file = open(output_filename, 'w')
for sent_id, sent_str in read_lines(input_filename):
sent = nlp(sent_str)
entitiesA = []
entitiesB = []
for ne in sent.ents:
if ne.root.ent_type_ in entity_types[0]:
entitiesA.append(ne)
if ne.root.ent_type_ in entity_types[1]:
entitiesB.append(ne)
for entityA in entitiesA:
for entityB in entitiesB:
if get_score(sent, entityA, entityB) > threshold:
output_file.write("{}\t{}\t{}\t{}\t( {})\n".format(sent_id, entityA.text, relation_type, entityB.text, sent_str))
output_file.close()
def get_score(sent, entityA, entityB):
# TODO: Dear Alon, this is an example of iterating through the text between the two entities
#for i in range(entityA.end, entityB.start):
#print(sent[i].text)
return 1
def read_lines(filename):
for line in codecs.open(filename, encoding="utf8"):
sent_id, sent = line.strip().split("\t")
sent = sent.replace("-LRB-", "(")
sent = sent.replace("-RRB-", ")")
yield sent_id, sent
if __name__ == "__main__":
main(sys.argv)
|
from codecs import*
def encriptar(texto,clave="1234"):
# aC=clave.encode('utf-8')
# bC=aC.hex()
# bC=int(bC,16)
paso1=texto.encode('utf-8')
paso2=paso1.hex()
paso3=int(paso2,16)
return paso3
def desencriptar(texto,clave="1234"):
# aC=clave.encode('utf-8')
# bC=aC.hex()
# bC=int(bC,16)
paso1=hex(int(texto)).replace("0x","")
paso2=decode(paso1,"hex")
paso3=paso2.decode('utf-8')
return paso3
|
import frappe
@frappe.whitelist()
def get_sales_order_items(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select si.item_code
from `tabSales Order Item` si, `tabSales Order` s
where s.name = si.parent and si.parenttype = 'Sales Order'
and s.docstatus = 1 and
si.item_code not in(select g_item from `tabBOM` where type = 'Project' and g_item = si.item_code and docstatus<2)
and si.parent = (select name from `tabSales Order` where project = %s and docstatus = 1)""",(filters.get('project')))
@frappe.whitelist()
def get_generic_details(g_bom):
bom_item_list = frappe.db.sql("""select bi.item_code,i.item_group,bi.activity_type,bi.qty,bi.uom,bi.rate,bi.amount
from `tabBOM Item` bi, `tabBOM` b, `tabItem` i
where b.name = bi.parent and bi.parenttype = 'BOM'
and bi.item_code = i.item_code and i.disabled = 0
and bi.docstatus = 1 and bi.parent = %s order by bi.idx asc""",(g_bom),as_dict=1)
return bom_item_list
@frappe.whitelist()
def get_generic_bom_activities(g_bom):
bom_activity_list = frappe.db.sql("""select ba.activity_type,ba.description,ba.hour_rate,ba.uom,ba.qty,
ba.per_minutes_rate,ba.minutes,ba.per_hour_rate,ba.hour,ba.per_day_rate,ba.days,ba.base_activity_cost
from `tabBOM Activities` ba, `tabBOM` b
where b.name = ba.parent and ba.parenttype = 'BOM'
and ba.docstatus = 1 and ba.parent = %s order by ba.idx asc """,(g_bom),as_dict=1)
return bom_activity_list
def on_BOM_after_submit(doc, handler=""):
bom_item_list = frappe.db.sql("""select item_code from `tabItem Price` where item_code= %s """,(doc.item),as_dict=1)
if not bom_item_list:
project = frappe.new_doc('Item Price')
project.Item_code = doc.item
project.uom = doc.uom
project.price_list = 'Standard Selling'
project.price_list_rate = doc.total_bom_cost
project.flags.ignore_permissions = True
project.update({
'item_code': project.Item_code,
'uom': project.uom,
'price_list': project.price_list,
'price_list_rate': project.price_list_rate
}).insert()
frappe.msgprint(msg = 'Item Price Created',
title = 'Notification',
indicator = 'green')
else:
frappe.db.sql("""update `tabItem Price` set price_list_rate = %s where item_code =%s""",(doc.total_bom_cost, doc.item))
frappe.msgprint(msg = 'Item Price Updated',
title = 'Notification',
indicator = 'green')
return
@frappe.whitelist()
def get_Activity_details(activity_type):
bom_activity_list = frappe.db.sql("""select rate,uom from `tabActivity Item Details` where parent=%s """,(activity_type),as_dict=1)
return bom_activity_list
|
#
# PySNMP MIB module EXPAND-NETWORKS-SMI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EXPAND-NETWORKS-SMI
# Produced by pysmi-0.3.4 at Mon Apr 29 18:52:49 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, ModuleIdentity, Integer32, Bits, TimeTicks, Unsigned32, MibIdentifier, Counter64, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, Gauge32, iso, NotificationType, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "ModuleIdentity", "Integer32", "Bits", "TimeTicks", "Unsigned32", "MibIdentifier", "Counter64", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "enterprises", "Gauge32", "iso", "NotificationType", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
expand_networks = MibIdentifier((1, 3, 6, 1, 4, 1, 3405)).setLabel("expand-networks")
expandSystemId = MibScalar((1, 3, 6, 1, 4, 1, 3405, 1), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expandSystemId.setStatus('mandatory')
expandProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 3405, 2))
acceleratorOs = MibIdentifier((1, 3, 6, 1, 4, 1, 3405, 3))
p2pAccelerator = MibIdentifier((1, 3, 6, 1, 4, 1, 3405, 4))
management = MibIdentifier((1, 3, 6, 1, 4, 1, 3405, 10))
mibBuilder.exportSymbols("EXPAND-NETWORKS-SMI", acceleratorOs=acceleratorOs, expand_networks=expand_networks, expandSystemId=expandSystemId, expandProducts=expandProducts, p2pAccelerator=p2pAccelerator, management=management)
|
import cv2
import numpy as np
def add_border(img, width,color):
h,w = img.shape
w += width * 2
h += width * 2
img_with_border = np.zeros((h, w), dtype=np.uint8)
img_with_border = img_with_border + color
img_with_border[width:h - width, width:w - width] = img.copy()
return img_with_border
#This function came from pyimagesearch.com (Thank You!):
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
|
import braintree
from braintree.resource import Resource
class UnknownPaymentMethod(Resource):
def image_url(self):
return "https://assets.braintreegateway.com/payment_method_logo/unknown.png"
|
import folium
from folium.plugins import MarkerCluster
from glob import glob
from numpy import NaN
from pandas import to_numeric
from pandas import read_csv
import streamlit as st
from streamlit_folium import folium_static
stations = read_csv('inventario.csv', header=0, delimiter=',')
stations = stations.replace('#REF!', NaN)
stations = stations.replace('#N/D', NaN)
stations['LAT'] = to_numeric(stations['LAT'])
stations['LON'] = to_numeric(stations['LON'])
stations_without_nan = stations[stations['LAT'].notna()]
table = """
<!DOCTYPE html>
<html>
<head>
<style>
table {{
width:100%;
border-radius:10px;
}}
table, th, td {{
border-collapse: collapse;
}}
th, td {{
padding: 5px;
text-align: left;
}}
#t01 tr:nth-child(odd) {{
background-color:rgba(9, 0, 181, 0.2);
}}
#t01 tr:nth-child(even) {{
background-color:rgba(186, 190, 204, 0.4);
}}
#t01 th {{
background-color: #040054;
color: white;
}}
table tr:hover td {{
background-color: #ddd;
}}
<! -- Rounding borders of table -- >
table th:first-child {{
border-radius: 5px 0 0 0;
}}
table th:last-child {{
border-radius: 0 5px 0 0;
}}
table tr:last-child td:first-child {{
border-radius: 0 0 0 5px;
}}
table tr:last-child td:last-child {{
border-radius: 0 0 5px 0;
}}
</style>
</head>
<body>
<table id="t01">
<tr>
<th>{}</th>
<th>{}</th>
</tr>
<tr>
<td>{}</td>
<td>{}</td>
</tr>
<tr>
<td>{}</td>
<td>{}</td>
</tr>
<tr>
<td>{}</td>
<td>{}</td>
</tr>
<tr>
<td>{}</td>
<td>{}</td>
</tr>
<tr>
<td>{}</td>
<td>{}</td>
</tr>
</table>
</body>
</html>
""".format
#st.set_page_config(page_title='Estações Apac', layout="centered", initial_sidebar_state='expanded')
#Base map
map = folium.Map(location=[-6.5, -37], zoom_start = 6, min_zoom=6)
#options of tiles
folium.TileLayer('openstreetmap').add_to(map)
folium.TileLayer('Stamen Terrain').add_to(map)
folium.TileLayer('cartodbdark_matter').add_to(map)
marker_cluster = MarkerCluster(name='Estações').add_to(map)
#Plot markers of the stations
for _, local in stations_without_nan.iterrows():
iframe = folium.IFrame(table(
"ESTAÇÃO " , str(local['ESTAÇÃO']),
"ID " , str(local['Cód. IBGE/Apac']),
"LATITUDE " , str(local['LAT']),
"LONGITUDE " , str(local['LON']),
"TIPO PCD " , str(local['TIPO PCD']),
"TIPO COLETA " , str(local['TIPO COLETA'])
)
)
popup = folium.Popup(iframe,
min_width=500,
max_width=750)
pcd_icon = folium.features.CustomIcon('pcd_icon.jpg',
icon_size=(50, 50))
folium.Marker(location=[local['LAT'], local['LON']],
popup=popup,
icon=pcd_icon,
tooltip='Clique para ver os detalhes').add_to(marker_cluster)
#Add style to regions
style_function = lambda x: {'fillColor': 'black', 'opacity':0.2, 'color':'black', 'weight':5, 'dashArray':'5, 5', 'fillOpacity':0.2} #https://leafletjs.com/SlavaUkraini/reference.html#path-option
#add all regioes to map
files = glob('./mesoregioes_geojson/*.geojson')
for f in files:
region = f.replace("\\", "/") #correction. Glob returns \ instead of /
region_name = region.split('/')[-1].split('.')[0] #https://stackoverflow.com/questions/7336096/python-glob-without-the-whole-path-only-the-filename#answer-68415441
layer = folium.GeoJson(region, control='true', style_function=style_function, name=region_name)
folium.GeoJsonTooltip(fields=["Regiao"]).add_to(layer)
layer.add_to(map)
folium.LayerControl().add_to(map)
map
#map.save('testepy.html')
folium_static(map)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="EXCAT-Sync",
version="0.0.15",
author="Christian Marzahl",
author_email="christian.marzahl@gamil.com",
description="A package to download images and annotations from the EXACT Server https://github.com/ChristianMarzahl/Exact",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ChristianMarzahl/EXACT-Sync",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) |
#!/usr/bin/env python
"""
This script demonstrates how to read in data from a sqlite3
database file that is output from a PyMC simulation.
The resulting object is a numpy array.
"""
from __future__ import division
import matplotlib
matplotlib.use("Agg")
import numpy as np
import sqlite3
from pylab import *
# Initialize connection to sqlite3 database file
conn = sqlite3.connect('../Figures/example_heat_flux_2.sqlite')
# Create a cursor object to execute commands on
cur = conn.cursor()
# Use SQL select command to query all data from the table called 'deviance'
cur.execute('select * from deviance')
# Fetch all rows from the output of the select query
deviance = np.array(cur.fetchall())
# Repeat the above two steps for the remaining tables
cur.execute('select * from sigma')
sigma = np.array(cur.fetchall())
cur.execute('select * from theta')
theta = np.array(cur.fetchall())
cur.execute('select * from y_mean')
y_mean = np.array(cur.fetchall())
# Read in the third column of each data array as numpy arrays
deviance = deviance[:,2]
sigma = sigma[:,2]
theta = theta[:,2]
y_mean = y_mean[:,2]
# Generate plot
fig = figure()
ax = fig.add_subplot(111)
plot(theta, 'b-', lw=1.5)
xlabel('Iteration Number', fontsize=20)
ylabel('theta', fontsize=20)
grid(True)
xticks(fontsize=16)
yticks(fontsize=16)
savefig('theta.pdf')
|
import os
import numpy as np
from urllib.request import urlopen
from PIL import Image, ImageDraw, ImageFont
from cv2 import resize
import sys
from vgg16_places_365 import VGG16_Places365
import logging
class SceneSettingDetector:
"""
Detect scenes setting (place detection using places365)
"""
logger = None
model = None
categories = None
NUMBER_OF_PREDICTIONS = 5
def __init__(self):
# started
self.logger = logging.getLogger('vsi_application.scenesetting.SceneSettingDetector')
self.logger.info('__init__(): started')
### init
self.model = VGG16_Places365(weights='places')
file_name = 'categories_places365.txt'
if not os.access(file_name, os.W_OK):
synset_url = 'https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt'
#os.system('wget ' + synset_url)
classes = list()
with open(file_name) as class_file:
for line in class_file:
classes.append(line.strip().split(' ')[0][3:])
self.categories = tuple(classes)
# finished
self.logger.info('__init__(): finished')
def get_categories(self):
return self.categories
def detect(self, picture_filename):
self.logger.info('detect(): started')
image = Image.open(picture_filename)
image = np.array(image, dtype=np.uint8)
image = resize(image, (224, 224))
image = np.expand_dims(image, 0)
preds = self.model.predict(image)[0]
top_preds = np.argsort(preds)[::-1][0:self.NUMBER_OF_PREDICTIONS]
self.logger.info('--SCENE CATEGORIES:')
# output the prediction
for i in range(0, 5):
self.logger.info(self.categories[top_preds[i]])
# save into image
img = Image.open(picture_filename)
draw = ImageDraw.Draw(img)
# font = ImageFont.truetype(<font-file>, <font-size>)
font = ImageFont.truetype("arial.ttf", 16)
text_y = 20
for i in range(0, 5):
# draw.text((x, y),"Sample Text",(r,g,b))
draw.text((0, text_y), self.categories[top_preds[i]], (255,255,255), font=font)
text_y = text_y + 20
output_picture_filename = picture_filename + '-setting.jpg'
img.save(output_picture_filename)
self.logger.info('detect(): finished')
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model configurations for CNN benchmarks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from cnn_quantization.tf_cnn_benchmarks.models import alexnet_model
from cnn_quantization.tf_cnn_benchmarks.models import densenet_model
from cnn_quantization.tf_cnn_benchmarks.models import googlenet_model
from cnn_quantization.tf_cnn_benchmarks.models import inception_model
from cnn_quantization.tf_cnn_benchmarks.models import lenet_model
from cnn_quantization.tf_cnn_benchmarks.models import overfeat_model
from cnn_quantization.tf_cnn_benchmarks.models import resnet_model
from cnn_quantization.tf_cnn_benchmarks.models import ssd_model
from cnn_quantization.tf_cnn_benchmarks.models import trivial_model
from cnn_quantization.tf_cnn_benchmarks.models import vgg_model
_model_name_to_imagenet_model = {
'vgg11': vgg_model.Vgg11Model,
'vgg16': vgg_model.Vgg16Model,
'vgg19': vgg_model.Vgg19Model,
'lenet': lenet_model.Lenet5Model,
'googlenet': googlenet_model.GooglenetModel,
'overfeat': overfeat_model.OverfeatModel,
'alexnet': alexnet_model.AlexnetModel,
'trivial': trivial_model.TrivialModel,
'inception3': inception_model.Inceptionv3Model,
'inception4': inception_model.Inceptionv4Model,
'resnet50': resnet_model.create_resnet50_model,
'resnet50_v1.5': resnet_model.create_resnet50_v1_5_model,
'resnet50_v2': resnet_model.create_resnet50_v2_model,
'resnet101': resnet_model.create_resnet101_model,
'resnet101_v2': resnet_model.create_resnet101_v2_model,
'resnet152': resnet_model.create_resnet152_model,
'resnet152_v2': resnet_model.create_resnet152_v2_model,
}
_model_name_to_cifar_model = {
'alexnet': alexnet_model.AlexnetCifar10Model,
'resnet20': resnet_model.create_resnet20_cifar_model,
'resnet20_v2': resnet_model.create_resnet20_v2_cifar_model,
'resnet32': resnet_model.create_resnet32_cifar_model,
'resnet32_v2': resnet_model.create_resnet32_v2_cifar_model,
'resnet44': resnet_model.create_resnet44_cifar_model,
'resnet44_v2': resnet_model.create_resnet44_v2_cifar_model,
'resnet56': resnet_model.create_resnet56_cifar_model,
'resnet56_v2': resnet_model.create_resnet56_v2_cifar_model,
'resnet110': resnet_model.create_resnet110_cifar_model,
'resnet110_v2': resnet_model.create_resnet110_v2_cifar_model,
'trivial': trivial_model.TrivialCifar10Model,
'densenet40_k12': densenet_model.create_densenet40_k12_model,
'densenet100_k12': densenet_model.create_densenet100_k12_model,
'densenet100_k24': densenet_model.create_densenet100_k24_model,
}
_model_name_to_object_detection_model = {
'ssd300': ssd_model.SSD300Model,
'trivial': trivial_model.TrivialSSD300Model,
}
def _get_model_map(dataset_name):
"""Get name to model map for specified dataset."""
if dataset_name == 'cifar10':
return _model_name_to_cifar_model
elif dataset_name in ('imagenet', 'synthetic'):
return _model_name_to_imagenet_model
elif dataset_name == 'coco':
return _model_name_to_object_detection_model
else:
raise ValueError('Invalid dataset name: %s' % dataset_name)
def get_model_config(model_name, dataset, params):
"""Map model name to model network configuration."""
model_map = _get_model_map(dataset.name)
if model_name not in model_map:
raise ValueError('Invalid model name \'%s\' for dataset \'%s\'' %
(model_name, dataset.name))
else:
return model_map[model_name](params=params)
def register_model(model_name, dataset_name, model_func):
"""Register a new model that can be obtained with `get_model_config`."""
model_map = _get_model_map(dataset_name)
if model_name in model_map:
raise ValueError('Model "%s" is already registered for dataset "%s"' %
(model_name, dataset_name))
model_map[model_name] = model_func
|
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import numpy as np
import torch
UCI_datafolder = "../data/UCI/"
def dummy_encode(df):
"""
Auto encodes any dataframe column of type category or object.
"""
columnsToEncode = list(df.select_dtypes(include=['category', 'object']))
le = LabelEncoder()
for feature in columnsToEncode:
try:
df[feature] = le.fit_transform(df[feature])
except:
print('Error encoding '+feature)
return df
def normalize_col(s):
std = s.std()
mean = s.mean()
if std > 0:
return (s - mean) / std
else:
return s - mean
def normalize_cols(df, columns=None):
if columns is None:
columns = df.columns
for col in columns:
df[col] = normalize_col(df[col])
return df
def reg_to_class(s):
return (s > s.mean()).astype(int)
def mul_to_bin(s, border=None):
if border is None:
border = s.median()
return (s > border).astype(int)
def uci_data(data_mode):
if data_mode == 'bank':
df = pd.read_csv(f'{UCI_datafolder}/bank//bank-full.csv', sep=';')
df['balance'] = normalize_col(df['balance'])
df = dummy_encode(df)
df.rename(columns={'y': 'target'}, inplace=True)
elif data_mode == 'concrete':
df = pd.read_excel(f'{UCI_datafolder}/concrete//Concrete_Data.xls')
df = normalize_cols(df)
df.rename(columns={'Concrete compressive strength(MPa, megapascals) ': 'target'}, inplace=True)
df['target'] = reg_to_class(df['target'])
elif data_mode == 'housing':
df = pd.read_fwf(f'{UCI_datafolder}/housing//housing.data.txt', header=None)
df = normalize_cols(df)
df.rename(columns={13: 'target'}, inplace=True)
df['target'] = reg_to_class(df['target'])
elif data_mode == 'landsat':
df = pd.read_csv(f'{UCI_datafolder}/landsat//sat.trn.txt', header=None, sep=' ')
df = pd.concat([df, pd.read_csv(f'{UCI_datafolder}/landsat//sat.tst.txt', header=None, sep=' ')])
df = normalize_cols(df, columns=[x for x in range(36)])
df.rename(columns={36: 'target'}, inplace=True)
df['target'] = mul_to_bin(df['target'])
elif data_mode == 'mushroom':
df = pd.read_csv(f'{UCI_datafolder}/mushroom//agaricus-lepiota.data.txt', header=None)
df = dummy_encode(df)
df.rename(columns={0: 'target'}, inplace=True)
elif data_mode == 'pageblock':
df = pd.read_fwf(f'{UCI_datafolder}/pageblock//page-blocks.data', header=None)
df = normalize_cols(df, columns=[x for x in range(10)])
df.rename(columns={10: 'target'}, inplace=True)
df['target'] = mul_to_bin(df['target'], 1)
elif data_mode == 'shuttle':
df = pd.read_csv(f'{UCI_datafolder}/shuttle//shuttle.trn', header=None, sep=' ')
df = pd.concat([df, pd.read_csv(f'{UCI_datafolder}/shuttle//shuttle.tst.txt', header=None, sep=' ')])
df = normalize_cols(df, columns=[x for x in range(9)])
df.rename(columns={9: 'target'}, inplace=True)
df['target'] = mul_to_bin(df['target'], 1)
elif data_mode == 'spambase':
df = pd.read_csv(f'{UCI_datafolder}/spambase//spambase.data.txt', header=None, sep=',')
df = normalize_cols(df, columns=[x for x in range(57)])
df.rename(columns={57: 'target'}, inplace=True)
elif data_mode == 'wine':
df = pd.read_csv(f'{UCI_datafolder}/wine//winequality-red.csv', sep=';')
df_w = pd.read_csv(f'{UCI_datafolder}/wine//winequality-white.csv', sep=';')
df['target'] = 1
df_w['target'] = 0
df = pd.concat([df, df_w])
df = normalize_cols(df, [x for x in df.columns if x != 'target'])
df_neg = df[df['target'] == 0]
n_data = df_neg.drop(['target'], axis=1).values
n_shuffle = np.random.permutation(len(n_data))
n_data = n_data[n_shuffle]
df_pos = df[df['target'] == 1]
p_data = df_pos.drop(['target'], axis=1).values
p_shuffle = np.random.permutation(len(p_data))
p_data = p_data[p_shuffle]
return p_data, n_data
class UCI_data(torch.utils.data.Dataset):
def __init__(self, p_data, n_data, train=True):
if train:
self.p_data = p_data[ :len(p_data)*2//3].astype(np.float32)
self.n_data = n_data[ :len(n_data)*2//3].astype(np.float32)
else:
self.p_data = p_data[len(p_data)*2//3:].astype(np.float32)
self.n_data = n_data[len(n_data)*2//3:].astype(np.float32)
self.transform = None
self.target_transform = None
def __len__(self):
return len(self.n_data) + len(self.p_data)
|
################################################################################
# Simple(st) example for sending a notification.
################################################################################
import aamnotifs
n = aamnotifs.Notifs("amqps://user:password@domain.tld:5673/%2F")
# The routing_name is the name of the "channel" you want to use
# it can be "mail", "chat", etc.
# This will make it easy to choose which channels your clients
# will receive
n.send("example_channel", "Notification title here", "Notification content here")
|
import json
import mimetypes
import os
import tempfile
import flask
import cauldron as cd
from cauldron import cli
from cauldron import writer
from cauldron.cli import sync
from cauldron.cli.commands import create as create_command
from cauldron.cli.commands.open import opener as project_opener
from cauldron.cli.server import arguments
from cauldron.cli.server import authorization
from cauldron.cli.server import run as server_runner
from cauldron.cli.server.routes.synchronize import status
from cauldron.environ.response import Response
sync_status = dict(
time=-1,
project=None
)
@server_runner.APPLICATION.route('/sync-touch', methods=['GET', 'POST'])
@authorization.gatekeeper
def touch_project():
"""
Touches the project to trigger refreshing its cauldron.json state.
"""
r = Response()
project = cd.project.get_internal_project()
if project:
project.refresh()
else:
r.fail(
code='NO_PROJECT',
message='No open project to refresh'
)
return r.update(
sync_time=sync_status.get('time', 0)
).flask_serialize()
@server_runner.APPLICATION.route('/sync-status', methods=['GET', 'POST'])
@authorization.gatekeeper
def fetch_synchronize_status():
"""
Returns the synchronization status information for the currently opened
project
"""
r = Response()
project = cd.project.get_internal_project()
if not project:
r.fail(
code='NO_PROJECT',
message='No open project on which to retrieve status'
)
else:
with open(project.source_path, 'r') as f:
definition = json.load(f)
result = status.of_project(project)
r.update(
sync_time=sync_status.get('time', 0),
source_directory=project.source_directory,
remote_source_directory=project.remote_source_directory,
status=result,
definition=definition
)
return r.flask_serialize()
@server_runner.APPLICATION.route('/sync-open', methods=['POST'])
@authorization.gatekeeper
def sync_open_project():
"""..."""
r = Response()
args = arguments.from_request()
definition = args.get('definition')
source_directory = args.get('source_directory')
if None in [definition, source_directory]:
return r.fail(
code='INVALID_ARGS',
message='Invalid arguments. Unable to open project'
).response.flask_serialize()
# Remove any shared library folders from the library list. These will be
# stored using the single shared library folder instead
definition['library_folders'] = [
lf
for lf in definition.get('library_folders', ['libs'])
if lf and not lf.startswith('..')
]
definition['library_folders'] += ['../__cauldron_shared_libs']
container_folder = tempfile.mkdtemp(prefix='cd-remote-project-')
os.makedirs(os.path.join(container_folder, '__cauldron_shared_libs'))
os.makedirs(os.path.join(container_folder, '__cauldron_downloads'))
project_folder = os.path.join(container_folder, definition['name'])
os.makedirs(project_folder)
definition_path = os.path.join(project_folder, 'cauldron.json')
writer.write_json_file(definition_path, definition)
sync_status.update({}, time=-1, project=None)
open_response = project_opener.open_project(project_folder, forget=True)
open_response.join()
project = cd.project.get_internal_project()
project.remote_source_directory = source_directory
sync_status.update({}, time=-1, project=project)
return r.consume(open_response).update(
source_directory=project.source_directory,
project=project.kernel_serialize()
).notify(
kind='OPENED',
code='PROJECT_OPENED',
message='Project opened'
).response.flask_serialize()
@server_runner.APPLICATION.route('/sync-file', methods=['POST'])
@authorization.gatekeeper
def sync_source_file():
"""..."""
r = Response()
args = arguments.from_request()
relative_path = args.get('relative_path')
chunk = args.get('chunk')
index = args.get('index', 0)
sync_time = args.get('sync_time', -1)
location = args.get('location', 'project')
offset = args.get('offset', 0)
if None in [relative_path, chunk]:
return r.fail(
code='INVALID_ARGS',
message='Missing or invalid arguments'
).response.flask_serialize()
project = cd.project.get_internal_project()
if not project:
return r.fail(
code='NO_OPEN_PROJECT',
message='No project is open. Unable to sync'
).response.flask_serialize()
parts = relative_path.replace('\\', '/').strip('/').split('/')
root_directory = project.source_directory
if location == 'shared':
root_directory = os.path.realpath(os.path.join(
root_directory,
'..',
'__cauldron_shared_libs'
))
file_path = os.path.join(root_directory, *parts)
parent_directory = os.path.dirname(file_path)
if not os.path.exists(parent_directory):
os.makedirs(parent_directory)
sync.io.write_file_chunk(
file_path=file_path,
packed_chunk=chunk,
append=index > 0,
offset=offset
)
sync_status.update({}, time=sync_time)
return r.notify(
kind='SYNCED',
code='SAVED_CHUNK',
message='File chunk {} {}'.format(offset, file_path)
).console().response.flask_serialize()
@server_runner.APPLICATION.route(
'/download/<filename>',
methods=['GET', 'POST']
)
@authorization.gatekeeper
def download_file(filename: str):
""" downloads the specified project file if it exists."""
project = cd.project.get_internal_project()
source_directory = project.source_directory if project else None
if not filename or not project or not source_directory:
return '', 204
path = os.path.realpath(os.path.join(
source_directory,
'..',
'__cauldron_downloads',
filename
))
if not os.path.exists(path):
return '', 204
return flask.send_file(path, mimetype=mimetypes.guess_type(path)[0])
@server_runner.APPLICATION.route(
'/project-download/<path:filename>',
methods=['GET', 'POST']
)
@authorization.gatekeeper
def download_project_file(filename: str):
""" downloads the specified project file if it exists."""
project = cd.project.get_internal_project()
source_directory = project.source_directory if project else None
if not filename or not project or not source_directory:
return '', 204
path = os.path.realpath(os.path.join(
source_directory,
filename
))
if not os.path.exists(path):
return '', 204
return flask.send_file(path, mimetype=mimetypes.guess_type(path)[0])
@server_runner.APPLICATION.route('/sync-create', methods=['POST'])
@authorization.gatekeeper
def sync_create_project():
"""..."""
r = Response()
args = arguments.from_request()
name = args.get('name')
remote_source_directory = args.get('source_directory')
optional_args = args.get('args', {})
if None in [name, remote_source_directory]:
return r.fail(
code='INVALID_ARGS',
message='Invalid arguments. Unable to create project'
).response.flask_serialize()
container_folder = tempfile.mkdtemp(prefix='cd-remote-project-')
os.makedirs(os.path.join(container_folder, '__cauldron_shared_libs'))
os.makedirs(os.path.join(container_folder, '__cauldron_downloads'))
r.consume(create_command.execute(
cli.make_command_context('create'),
project_name=name,
directory=container_folder,
forget=True,
**optional_args
))
if r.failed:
return r.flask_serialize()
sync_status.update({}, time=-1, project=None)
project = cd.project.get_internal_project()
project.remote_source_directory = remote_source_directory
with open(project.source_path, 'r') as f:
definition = json.load(f)
sync_status.update({}, time=-1, project=project)
return r.update(
source_directory=project.source_directory,
remote_source_directory=remote_source_directory,
definition=definition,
project=project.kernel_serialize()
).notify(
kind='SUCCESS',
code='PROJECT_CREATED',
message='Project created'
).response.flask_serialize()
|
from __future__ import unicode_literals
import logging
from allauth.account.adapter import DefaultAccountAdapter
from invite_only.models import InviteCode
logger = logging.getLogger(__name__)
class InviteOnlyAccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
if request.method == 'GET':
logger.info("is oepn for signup {} {}".format(
request.GET, request.POST))
invite_code = self._get_invite(request)
# If None, return False
return invite_code is not None
else:
return True
def stash_verified_email(self, request, email):
request.session['account_verified_email'] = email
def is_email_verified(self, request, email):
return True
def save_user(self, request, user, form, commit=True):
super(InviteOnlyAccountAdapter, self).save_user(
request, user, form, commit=True)
def _get_invite(self, request):
"""
Returns the InviteCode object or None if it doesn't exist
"""
logger.info('get {}, post: {}'.format(request.GET, request.POST))
invite_code = request.GET.get('invite_code')
logger.debug('Got invite_code {}'.format(invite_code))
try:
return InviteCode.objects.get(code=invite_code)
except (InviteCode.MultipleObjectsReturned, InviteCode.DoesNotExist):
logger.exception('Invalid invite code {}'.format(invite_code))
return None
|
import os
import os.path
from io import BytesIO
import archi
from .constants import sub_format_list, supportted_compression_extension
from .sys_global_var import prefix
from .utils import get_best_subtitle
def extract_subtitle(
v_name,
v_path,
archive_name,
sub_data_b,
v_info_d,
rename,
single,
both,
plex,
delete=True,
):
""" 接受下载好的字幕包字节数据, 猜测字幕并解压。 """
sub_buff = BytesIO(sub_data_b)
ar = archi.Archive(sub_buff)
files = {a.filename: a.read() for a in ar}
if not single:
sub_name = get_best_subtitle(files.keys(), v_info_d)
else:
print(prefix)
for i, single_subtitle in enumerate(files.keys()):
single_subtitle = single_subtitle.split("/")[-1]
try:
# zipfile: Historical ZIP filename encoding
# try cp437 encoding
single_subtitle = single_subtitle.encode("cp437").decode("gbk")
except:
pass
info = " %3s) %s" % (str(i + 1), single_subtitle)
print(prefix + info)
indexes = range(len(files.keys()))
choice = None
while not choice:
try:
print(prefix)
choice = int(input(prefix + " choose subtitle: "))
except ValueError:
print(prefix + " Error: only numbers accepted")
continue
if not choice - 1 in indexes:
print(prefix + " Error: numbers not within the range")
choice = None
sub_name = list(files.keys())[choice - 1]
if not sub_name: # 自动模式下无最佳猜测
return None
os.chdir(v_path) # 切换到视频所在文件夹
v_name_without_format = os.path.splitext(v_name)[0]
# video_name + sub_type
to_extract_types = []
sub_title, sub_type = os.path.splitext(sub_name)
to_extract_subs = [[sub_name, sub_type]]
if both:
another_sub_type = ".srt" if sub_type == ".ass" else ".ass"
another_sub = sub_name.replace(sub_type, another_sub_type)
if another_sub in files:
to_extract_subs.append([another_sub, another_sub_type])
else:
print(prefix + " no %s subtitles in this archive" % another_sub_type)
if delete:
for one_sub_type in sub_format_list: # 删除若已经存在的字幕
if os.path.exists(v_name_without_format + one_sub_type):
os.remove(v_name_without_format + one_sub_type)
if os.path.exists(v_name_without_format + ".zh" + one_sub_type):
os.remove(v_name_without_format + ".zh" + one_sub_type)
for one_sub, one_sub_type in to_extract_subs:
if rename:
if plex:
sub_new_name = v_name_without_format + ".zh" + one_sub_type
else:
sub_new_name = v_name_without_format + one_sub_type
else:
sub_new_name = one_sub
with open(sub_new_name, "wb") as sub: # 保存字幕
sub.write(files[sub_name])
return to_extract_subs
|
from javadoc import *
|
from django.utils.translation import gettext_lazy as _
class CartAddProhibited(Exception):
"""Raised when a `signals.satchmo_cart_add_verify` listener vetoes adding an item to the cart.
Params:
- product: item which was being added
- message: veto message
"""
def __init__(self, product, message):
self.product, self.message = product, message
class OutOfStockError(CartAddProhibited):
def __init__(self, product, have, need):
if have == 0:
msg = _("'%s' is out of stock.") % product.translated_name()
else:
msg = _("Only %(amount)i of '%(product)s' in stock.") % {
'amount': have,
'product': product.translated_name()
}
CartAddProhibited.__init__(self, product, msg)
self.have = have
self.need = need |
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from carts.models import Cart
from .models import Product
class HomeListView(ListView):
model = Product
template_name = 'pages/index.html'
context_object_name = 'products'
# paginate_by = 8
#laptop category
class LaptopListView(ListView):
model = Product
template_name = 'pages/laptop_list.html'
def get_context_data(self, *args, **kwargs):
context = super(LaptopListView, self).get_context_data(*args, **kwargs)
context["products"] = Product.objects.filter(category="laptop")
return context
#phone category
class PhoneListView(ListView):
model = Product
template_name = 'pages/phone_list.html'
def get_context_data(self, *args, **kwargs):
context = super(PhoneListView, self).get_context_data(*args, **kwargs)
context["products"] = Product.objects.filter(category="phone")
return context
#Accesories category
class AccessoryListView(ListView):
model = Product
template_name = 'pages/accessories.html'
def get_context_data(self, *args, **kwargs):
context = super(AccessoryListView, self).get_context_data(*args, **kwargs)
context["products"] = Product.objects.filter(category="accesories")
return context
class ProductDetailView(DetailView):
model = Product
template_name = 'pages/product_detail.html'
def get_context_data(self, *args, **kwargs):
context = super(ProductDetailView,self).get_context_data(*args, **kwargs)
cart_obj, new_obj = Cart.objects.new_or_get(self.request)
context['cart'] = cart_obj
return context
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .dualbeam import * # noqa
from ._dualbeam_utils import estimate_dxdy, match_pairs # noqa
|
"""
flask-dependency性能测试
结果:
normal: 0.210586
proxy: 2.427060
"""
from timeit import timeit
from flask_dependency import Dependency
class Service(object):
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
normal = Service('normal')
proxy, init_proxy = Dependency(Service)
init_proxy('proxy')
print(normal.get_name())
print(proxy.get_name())
test = "service.get_name()"
print("normal: {:.6f}".format(timeit(test, globals={"service": normal})))
print("proxy: {:.6f}".format(timeit(test, globals={"service": proxy})))
|
import unittest
try:
from typing import Any
except ImportError:
pass # only needed to check type annotations
class Py23TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs): # type: (Any, Any) -> None
super(Py23TestCase, self).__init__(*args, **kwargs)
try:
self.assertRaisesRegex
except AttributeError:
# assertRaisesRegexp is deprecated in Python3 but assertRaisesRegex doesn't exist in Python2
# This fix is used in order to avoid using the alias from the six library
self.assertRaisesRegex = self.assertRaisesRegexp # type: ignore
|
# -*- coding: utf-8 -*-
"""
commah version
==============
Stores the version of the *commah* package.
"""
# %% VERSIONS
# Default/Latest/Current version
version = '0.3'
|
from django.urls import path
from . import views
from .views import CarList, CreateCar
urlpatterns = [
path('owner_info', views.info, name = 'info'),
path('owner/<int:ownership_id>', views.detail, name = 'detail'),
path('owners_list', views.owners_list),
path('car_list', CarList.as_view()),
path('', views.create_view),
path('car_model', CreateCar.as_view()),
]
|
import six
import json
import Utils
import Model
from GeoAnimation import GpsPoint, GeoTrack
from BatchPublishAttrs import batchPublishRaceAttr
class Template( object ):
templateAttributes = set.union( {
'distanceUnit',
'rule80MinLapCount',
'isTimeTrial',
'roadRaceFinishTimes',
'estimateLapsDownFinishTime',
'enableJChipIntegration',
'resetStartClockOnFirstTag',
'skipFirstTagRead',
'chipReaderType',
'chipReaderPort',
'chipReaderIpAddr',
'autocorrectLapsDefault',
'enableUSBCamera',
'photosAtRaceEndOnly',
'advancePhotoMilliseconds',
'finishKMH',
'ftpUploadDuringRace',
'ftpUploadPhotos',
'ftpHost',
'ftpUser',
'ftpPassword',
'ftpPath',
'groupByStartWave',
'winAndOut',
'city',
'stateProv',
'country',
'discipline',
'showCourseAnimationInHtml',
'licenseLinkTemplate',
'hideDetails',
'lapCounterForegrounds',
'lapCounterBackgrounds',
'secondsBeforeLeaderToFlipLapCounter',
'countdownTimer',
'lapCounterCycle',
'lapElapsedClock',
'setNoDataDNS',
'organizer',
'minutes',
'notes',
'minPossibleLapTime',
'showFullNamesInChart',
'allCategoriesFinishAfterFastestRidersLastLap',
'highPrecisionTimes'
'syncCategories',
'finishTop',
'reverseDirection',
'headerImage',
'email',
'postPublishCmd',
'course',
'teamRankByRiderTime',
'teamRankBySumTime',
'teamRankBySumPoints',
'teamRankBySumPercentTime',
'teamRankOption',
'nthTeamRider',
'topTeamResults',
'finishPointsStructure',
'showNumTeamParticipants',
},
set(batchPublishRaceAttr),
)
def __init__( self, race=None ):
self.template = {}
self.fileName = None
if race:
self.fromRace( race )
def write( self, fname ):
with open( fname, 'w' ) as fp:
json.dump( self.template, fp, indent=1, sort_keys=True )
self.fileName = fname
def read( self, fname ):
with open( fname, 'r' ) as fp:
self.template = json.load( fp )
self.fileName = fname
def fromRace( self, race ):
if not race:
self.template = {}
return
self.template = { attr:getattr(race, attr) for attr in self.templateAttributes if hasattr(race, attr) }
try:
firstLapDistance = (
max( c.firstLapDistance for c in race.getCategories( startWaveOnly=True ) if c.firstLapDistance )
* (1000.0 if race.distanceUnit == race.UnitKm else 1609.344) )
except ValueError:
firstLapDistance = None
try:
self.template['course'] = {
'isPointToPoint': race.geoTrack.isPointToPoint,
'geoTrackFName': race.geoTrackFName,
'points': [p._asdict() for p in race.geoTrack.gpsPoints],
'firstLapDistance': firstLapDistance,
}
except AttributeError:
pass
def toRace( self, race, updateCategoryDistances=True ):
if not race:
return
geoTrack = None
firstLapDistance = None
for attr, value in six.iteritems(self.template):
if attr not in self.templateAttributes:
continue
if attr == 'course':
course = self.template['course']
race.geoTrackFName = course.get('geoTrackFName', 'geoTrackFName')
race.geoTrack = geoTrack = GeoTrack()
geoTrack.setPoints( [GpsPoint(**p) for p in course.get('points',[])], course.get('isPointToPoint',False) )
if course['firstLapDistance']:
geoTrack.firstLapDistance = firstLapDistance = course['firstLapDistance']
else:
setattr( race, attr, value )
if updateCategoryDistances and geoTrack:
distance = geoTrack.lengthKm if race.distanceUnit == race.UnitKm else geoTrack.lengthMiles
firstLapDistance = firstLapDistance or None
if firstLapDistance:
firstLapDistance /= (1000.0 if race.distanceUnit == race.UnitKm else 1609.344)
for c in race.getCategories():
c.distance = distance
c.firstLapDistance = firstLapDistance
race.templateFileName = self.fileName
race.setChanged()
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.template == other.template)
if __name__ == '__main__':
Model.setRace( Model.Race() )
Model.getRace()._populate()
Model.race.winAndOut = True
Model.race.organizer = u'\u2713\u2713\u2713\u2713\u2713\u2713'
p1 = Template( Model.race )
p1.write( 'TemplateTest1.template' )
p2 = Template()
p2.read( 'TemplateTest1.template' )
assert p1 == p2
Model.race.winAndOut = False
p2 = Template( Model.race )
p2.write( 'TemplateTest2.template' )
assert p1 != p2
|
#
# Copyright (c) 2020-2021 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Python interface for ChipCommissionableNodeController
#
"""Chip Commissionable Node Controller interface
"""
from __future__ import absolute_import
from __future__ import print_function
from ctypes import *
from .ChipStack import *
from .exceptions import *
__all__ = ["ChipCommissionableNodeController"]
def _singleton(cls):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = cls(*args, **kwargs)
return instance[0]
return wrapper
@_singleton
class ChipCommissionableNodeController(object):
def __init__(self, startNetworkThread=True):
self.commissionableNodeCtrl = None
self._ChipStack = ChipStack()
self._dmLib = None
self._InitLib()
commissionableNodeCtrl = c_void_p(None)
res = self._dmLib.pychip_CommissionableNodeController_NewController(
pointer(commissionableNodeCtrl))
if res != 0:
raise self._ChipStack.ErrorToException(res)
self.commissionableNodeCtrl = commissionableNodeCtrl
self._ChipStack.commissionableNodeCtrl = commissionableNodeCtrl
def __del__(self):
if self.commissionableNodeCtrl != None:
self._dmLib.pychip_CommissionableNodeController_DeleteController(
self.commissionableNodeCtrl)
self.commissionableNodeCtrl = None
def PrintDiscoveredCommissioners(self):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_CommissionableNodeController_PrintDiscoveredCommissioners(
self.commissionableNodeCtrl)
)
def DiscoverCommissioners(self):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_CommissionableNodeController_DiscoverCommissioners(
self.commissionableNodeCtrl)
)
# ----- Private Members -----
def _InitLib(self):
if self._dmLib is None:
self._dmLib = CDLL(self._ChipStack.LocateChipDLL())
self._dmLib.pychip_CommissionableNodeController_NewController.argtypes = [
POINTER(c_void_p)]
self._dmLib.pychip_CommissionableNodeController_NewController.restype = c_uint32
self._dmLib.pychip_CommissionableNodeController_DeleteController.argtypes = [
c_void_p]
self._dmLib.pychip_CommissionableNodeController_DeleteController.restype = c_uint32
self._dmLib.pychip_CommissionableNodeController_DiscoverCommissioners.argtypes = [
c_void_p]
self._dmLib.pychip_CommissionableNodeController_DiscoverCommissioners.restype = c_uint32
self._dmLib.pychip_CommissionableNodeController_PrintDiscoveredCommissioners.argtypes = [
c_void_p]
|
import numpy as np
def depth(vector):
z=np.zeros([168,168])
return z |
#
# Copyright 2021 Digital.ai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
results = [eval(conditions[condition]) for condition in conditions.keys()]
if conditionsType == "AND":
gatePassed = all(results)
else:
gatePassed = any(results)
if not useOutput and not gatePassed:
raise Exception("Gate failed with results list: {}".format(results))
table = u"""
|Condition|Result|
|---|---|
"""
for index, condition in enumerate(conditions.keys()):
result = u"\u2714" if results[index] else u"\u2717"
table += u"|{}|{}|\n".format(unicode(condition, "utf-8") + u" ", result)
print(table) |
"""Support for getting temperature from TEMPer devices."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME, TEMP_FAHRENHEIT
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['temperusb==1.5.3']
CONF_SCALE = 'scale'
CONF_OFFSET = 'offset'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): vol.Coerce(str),
vol.Optional(CONF_SCALE, default=1): vol.Coerce(float),
vol.Optional(CONF_OFFSET, default=0): vol.Coerce(float)
})
TEMPER_SENSORS = []
def get_temper_devices():
"""Scan the Temper devices from temperusb."""
from temperusb.temper import TemperHandler
return TemperHandler().get_devices()
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Temper sensors."""
temp_unit = hass.config.units.temperature_unit
name = config.get(CONF_NAME)
scaling = {
'scale': config.get(CONF_SCALE),
'offset': config.get(CONF_OFFSET)
}
temper_devices = get_temper_devices()
for idx, dev in enumerate(temper_devices):
if idx != 0:
name = name + '_' + str(idx)
TEMPER_SENSORS.append(TemperSensor(dev, temp_unit, name, scaling))
add_entities(TEMPER_SENSORS)
def reset_devices():
"""
Re-scan for underlying Temper sensors and assign them to our devices.
This assumes the same sensor devices are present in the same order.
"""
temper_devices = get_temper_devices()
for sensor, device in zip(TEMPER_SENSORS, temper_devices):
sensor.set_temper_device(device)
class TemperSensor(Entity):
"""Representation of a Temper temperature sensor."""
def __init__(self, temper_device, temp_unit, name, scaling):
"""Initialize the sensor."""
self.temp_unit = temp_unit
self.scale = scaling['scale']
self.offset = scaling['offset']
self.current_value = None
self._name = name
self.set_temper_device(temper_device)
@property
def name(self):
"""Return the name of the temperature sensor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self.current_value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self.temp_unit
def set_temper_device(self, temper_device):
"""Assign the underlying device for this sensor."""
self.temper_device = temper_device
# set calibration data
self.temper_device.set_calibration_data(
scale=self.scale,
offset=self.offset
)
def update(self):
"""Retrieve latest state."""
try:
format_str = ('fahrenheit' if self.temp_unit == TEMP_FAHRENHEIT
else 'celsius')
sensor_value = self.temper_device.get_temperature(format_str)
self.current_value = round(sensor_value, 1)
except IOError:
_LOGGER.error("Failed to get temperature. The device address may"
"have changed. Attempting to reset device")
reset_devices()
|
# Copyright 2020 Saarland University, Spoken Language Systems LSV
# Author: Michael A. Hedderich, Lukas Lange, Dietrich Klakow
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS*, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
from autom_labeling_library.entity import EntityNameCollection
from autom_labeling_library.matching import MatchingAlgorithm, MatchConflictGreedySolvingAlgorithm
from autom_labeling_library.formats import LabelCreator, CoNLLFormatCreator
from .memory import Memory, DocumentType
from .status import Status
from .util import try_method_return_json, create_tokenizer
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for, jsonify
)
bp = Blueprint('autom_annotation', __name__, url_prefix='/autom_annotation')
@bp.route('/<document_type:document_type>', methods=('GET', 'POST'))
def index(document_type):
"""
If no text has been uploaded, redirect to text upload.
If text has been uploaded, show annotation result page.
The actual annotation is done via Ajax in the background on that
page as it might take a while (by calling the /autom_annotate_json
resource defined below).
"""
document = Memory.get_instance().get_document(document_type)
if not document:
flash("A {} document needs to be uploaded or inputed before automatic annotation.".format(
document_type), "warning")
return redirect(url_for('text_input.index', document_type=document_type))
at_least_one_entity_name = False
for extraction in Memory.get_instance().get_extractions(only_loaded=True):
if extraction.get_num_extracts() > 0:
at_least_one_entity_name = True
break
if not at_least_one_entity_name:
flash("Entity names need to be extracted for the automatic labeling process! Have you already extracted entitiy names? Have you loaded the necessary extractions?", "danger")
return redirect(url_for('knowledge_base.list_extracts'))
#if document.autom_labels is not None: # TODO Now that cache clearing for labels is deactivated, this does not make sense
# flash("This document has already been automatically annotated.", "warning")
return render_template("autom_annotation/autom_annotation.html", document_type=document_type)
@bp.route('/autom_annotate_json/<document_type:document_type>', methods=('GET', 'POST'))
def autom_annotate_json(document_type):
def lambda_function():
document = Memory.get_instance().get_document(document_type)
annotate_text(document)
return try_method_return_json(lambda_function, report_error_status=True)
def annotate_text(document):
Status.get_instance().set_state_processing()
settings = Memory.get_instance().get_settings()
if settings.use_language_specific_tokenizer_for_entity_names:
entity_name_tokenizer = create_tokenizer(settings.spacy_tokenizer_language_code)
else:
entity_name_tokenizer = create_tokenizer("whitespace")
Status.get_instance().set_message("Collecting entity names.")
entity_names = []
for extraction in Memory.get_instance().get_extractions(only_loaded=True):
entity_names.extend(extraction.get_extracts_for_matching(tokenizer=entity_name_tokenizer))
if len(entity_names) == 0:
raise Exception("No entity names found. Could not annotate. Maybe no extractions are active?")
Status.get_instance().set_message("Building entity name collection.")
entity_name_collection = EntityNameCollection(entity_names)
matching_algorithm = MatchingAlgorithm(entity_name_collection)
tokens = document.tokens
possible_matches = matching_algorithm.match_tokens(tokens, Status.get_instance())
Status.get_instance().set_message("Solving conflicts.")
conflict_solving_algorithm = MatchConflictGreedySolvingAlgorithm()
matches = list(possible_matches) # copy because conflict resolving algorithm removes matches to resolve conflicts
conflict_solving_algorithm.resolve_conflicts(matches)
label_creator = LabelCreator()
labels = label_creator.create(tokens, matches)
document.autom_labels = labels
document.matches = matches
document.possible_matches = possible_matches
Status.get_instance().set_state_idle()
|
from __future__ import print_function
from CGAL.CGAL_Kernel import Point_3
from CGAL.CGAL_Kernel import Segment_3
from CGAL.CGAL_Kernel import Triangle_3
from CGAL.CGAL_Polyhedron_3 import Polyhedron_3
from CGAL.CGAL_AABB_tree import AABB_tree_Polyhedron_3_Facet_handle
from CGAL.CGAL_AABB_tree import AABB_tree_Triangle_3_soup
from CGAL.CGAL_AABB_tree import AABB_tree_Segment_3_soup
def test_insert_from_array():
print("test_insert_from_array")
s1 = [1,2,3,6,4,5]
s2 = [1,2,3,16,41,51]
s3 = [1,2,3,65,45,5]
s4 = [1,2,3,64,44,5]
segments = [s1, s2, s3, s4]
tree_seg = AABB_tree_Segment_3_soup()
tree_seg.insert_from_array(segments)
assert(tree_seg.size()==4)
s=Segment_3(Point_3(1,2,3), Point_3(0,0,0))
assert(tree_seg.do_intersect(s))
t1 = [0,0,0,0,0,1,0,0,-1]
t2 = [0,0,0,0,0,1,0,1,-1]
t3 = [0,0,0,0,0,1,0,2,-1]
t4 = [0,0,0,0,0,1,0,3,-1]
triangles = [t1, t2, t3, t4]
tree_tri = AABB_tree_Triangle_3_soup()
tree_tri.insert_from_array(triangles)
assert(tree_tri.size()==4)
assert(tree_seg.do_intersect(s))
test_insert_from_array()
poly=Polyhedron_3()
poly.make_tetrahedron(Point_3(0,0,0),Point_3(1,0,0),Point_3(0,1,0),Point_3(0,0,1))
tree=AABB_tree_Polyhedron_3_Facet_handle()
lst=[]
for f in poly.facets():
lst.append(f)
tree.rebuild(lst)
tree=AABB_tree_Polyhedron_3_Facet_handle(lst)
print(tree.size())
lst=[]
for f in poly.facets():
p1=f.halfedge().vertex().point()
p2=f.halfedge().next().vertex().point()
p3=f.halfedge().next().next().vertex().point()
t=Triangle_3(p1,p2,p3)
lst.append(t)
tree2=AABB_tree_Triangle_3_soup(lst)
s=Segment_3(Point_3(-0.5,-0.5,0.5),Point_3(0.5,0.5,0.5))
l=[]
tree2.all_intersected_primitives(s,l)
for i in l:
print(i)
|
# Generated by Django 3.1.2 on 2020-10-22 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bomberbot', '0003_auto_20201018_2352'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='city',
field=models.CharField(blank=True, max_length=30),
),
]
|
#!/usr/bin/env python3
import os
import sys
import argparse
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.colors as mcol
import matplotlib.collections as mc
import matplotlib.transforms as mtrans
import matplotlib.ticker as mtick
from itertools import chain
from collections import deque
import numpy as np
import scipy.stats as spsta
import math
import traceback
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
default_colors_hardcoded = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
blue = np.array([0.12156863, 0.46666667, 0.70588235])
orange = np.array([1. , 0.49803922, 0.05490196])
green = np.array([0.17254902, 0.62745098, 0.17254902])
red = np.array(mcol.to_rgb(default_colors[3]))
blue = np.array([0x1f, 0x77, 0xb4], dtype=float) / 0xff
orange = np.array([0xff, 0x7f, 0x0e], dtype=float) / 0xff
green = np.array([0x2c, 0xa0, 0x2c], dtype=float) / 0xff
red = np.array([0xd6, 0x27, 0x28], dtype=float) / 0xff
purple = np.array([0x94, 0x67, 0xbd], dtype=float) / 0xff
brown = np.array([0x8c, 0x56, 0x4b], dtype=float) / 0xff
pink = np.array([0xe3, 0x77, 0xc2], dtype=float) / 0xff
grey = np.array([0x7f, 0x7f, 0x7f], dtype=float) / 0xff
ugly_green = np.array([0xbc, 0xbd, 0x22], dtype=float) / 0xff
teal = np.array([0x17, 0xbe, 0xcf], dtype=float) / 0xff
def generate_plot_of_postcorrection_probabilities(raw_data, interactive=True, savefig_basename=None):
histograms = {}
for fname in raw_data:
for n_weak_bits in raw_data[fname]:
if n_weak_bits not in histograms:
histograms[n_weak_bits] = []
for data_point in raw_data[fname][n_weak_bits]:
histograms[n_weak_bits] += [i for i in data_point['post_probabilities'] if i != 0.0]
for n_weak_bits in histograms:
print(n_weak_bits, len(histograms[n_weak_bits]))
fig, ax = plt.subplots(1, 1, figsize=(5, 2.25))
x = sorted(histograms.keys())
y = [j for i, j in sorted(histograms.items(), key=lambda entry: entry[0])]
y_raw = [[0.5] for _ in y]
y_log = [[d for d in row] for row in y]
labels = []
v1 = ax.violinplot(y_raw, np.array(x) * 1.5 + 0, points=100, bw_method=0.2, showmedians=True, widths=0.5)
v2 = ax.violinplot(y_log, np.array(x) * 1.5 + 0.5, points=100, bw_method=0.2, showmedians=True, widths=0.5)
labels.append(mpatches.Patch(color=v1["bodies"][0].get_facecolor().flatten(), label="Pre-Correction"))
labels.append(mpatches.Patch(color=v2["bodies"][0].get_facecolor().flatten(), label="Post-Correction"))
# Make all the violin statistics marks red:
for vp in [v1, v2]:
vp['cmedians'].set_color('black')
vp['cmedians'].set_linewidth(1)
vp['cmins'].set_linewidth(2)
vp['cmaxes'].set_linewidth(2)
ax.set_xticks(np.array(x) * 1.5 + 0.25)
ax.set_xticklabels(x)
ax.set_xlabel('Number of Pre-Correction Errors Per ECC Word')
ax.set_ylabel('Per-Bit Probability\nof Post-Correction Error', position=(0, 0.4))
ax.legend(handles=labels, fontsize=7, loc='upper left')
fig.tight_layout()
fig.canvas.manager.set_window_title("Figure 4: Distribution of error probabilites")
if interactive:
plt.show()
else:
fname = "fig_4_postcorrection_probabilities.pdf"
if savefig_basename != None:
if os.path.isdir(savefig_basename):
savefig_basename = os.path.join(os.path.normpath(savefig_basename), '')
fname = savefig_basename + fname
print("Saving figure:", fname)
fig.savefig(fname)
def extract_bracketlist(spt, start_pos):
assert spt[start_pos] == '['
epos = start_pos + 1
while epos < len(spt) and spt[epos] != ']':
epos += 1
assert epos < len(spt), "mismatched brace list!"
return spt[start_pos + 1 : epos], epos
def parse_file(fname):
data = {}
with open(fname, 'r') as f:
line_num = 1
try:
for line in f:
if line.startswith("[INFO]"):
if line[7:].startswith("generating Hamming code"):
spt = line.strip().split(' ')
k = int(spt[6])
p = int(spt[8])
assert k == 64, "ERROR - only did this analysis for K=64"
elif line[7:].startswith("n_weak_bits"):
spt = line.strip().split(' ')
n_weak_bits = int(spt[2])
data[n_weak_bits] = []
elif line.startswith("[DATA] weak_bits"):
end_pos = line.find('post_probabilities:')
assert end_pos != -1, "malformed line: " + line
weak_bit_list = eval(line[18:end_pos])
post_probabilities = eval(line[end_pos + 19:])
data[n_weak_bits].append({
'weak_bits' : weak_bit_list
, 'post_probabilities' : post_probabilities
})
line_num += 1
except:
# print("[DEBUG] data:", data)
print("[ERROR] failed parsing at fname:", fname, "line:", line_num)
raise
# print("[DEBUG]", data)
print("[DEBUG] fname:", fname, "parsed")
return data
def parse_files(filenames):
print("[INFO] parsing", len(filenames), "input files")
all_data = {}
for fname in filenames:
try:
all_data[fname] = parse_file(fname)
except Exception as e:
print(e)
print(traceback.format_exc())
return all_data
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interactive", help="draw interactive plot with matplotlib (else save plot to file)", action='store_true')
parser.add_argument("-o", "--output-filename-base", help="custom filename base of plots to save - will be suffixed with -figN.pdf", type=str, default=None)
parser.add_argument("input_files_and_dirs", metavar="input-files-and-dirs", help="files (or directory containing files) to parse", nargs='+')
args = parser.parse_args()
# canonicalize input filenames
cleaned_fnames = []
for f_or_d in args.input_files_and_dirs:
if os.path.isfile(f_or_d):
cleaned_fnames.append(f_or_d)
elif os.path.isdir(f_or_d):
for root, dirs, files in os.walk(f_or_d):
cleaned_fnames += [os.path.join(root, f) for f in files]
else:
print("[ERROR] invalid file/dir:", f_or_d)
sys.exit(-1)
if not cleaned_fnames:
print("[WARN] no input files provided to parse!")
else:
all_data = parse_files(cleaned_fnames)
generate_plot_of_postcorrection_probabilities(all_data, args.interactive, args.output_filename_base)
if __name__ == "__main__":
main()
|
import torch
import torch.nn as nn
from tqdm import tqdm
import math
import os
import copy
import numpy as np
from .seg import get_coord
from mmdet.models.builder import SAR_MODULES
@SAR_MODULES.register_module()
class SpatialRelationModule(nn.Module):
def __init__(self, img_prefix, d_model, pretrainedPSPNet, batch_size):
super(SpatialRelationModule, self).__init__()
self.d_model = d_model
self.anatomical_dict = get_coord(img_prefix, pretrainedPSPNet, batch_size)
def compute_Mp_vector(self, roi, _anatomical_part, wl, hl):
Mp = np.empty(40, float)
for idx, part in enumerate(_anatomical_part):
ana_part_coord = _anatomical_part[part]
Mp[idx*self.d_model+0] = (roi[0] - ana_part_coord[0]) / wl
Mp[idx*self.d_model+1] = (roi[1] - ana_part_coord[1]) / hl
Mp[idx*self.d_model+2] = (roi[0] - ana_part_coord[2]) / wl
Mp[idx*self.d_model+3] = (roi[1] - ana_part_coord[3]) / hl
Mp[idx*self.d_model+4] = (roi[2] - ana_part_coord[0]) / wl
Mp[idx*self.d_model+5] = (roi[3] - ana_part_coord[1]) / hl
Mp[idx*self.d_model+6] = (roi[2] - ana_part_coord[2]) / wl
Mp[idx*self.d_model+7] = (roi[3] - ana_part_coord[3]) / hl
return Mp
def get_spatial_vector(self, rois, anatomical_parts, image_sizes):
M = np.empty((0,40), float) # size = [n, 40]
for idx, anatomical_part in enumerate(anatomical_parts):
image_rois = rois[rois[:,0]==float(idx)]
image_width, image_height = image_sizes[idx][:2]
_anatomical_part = copy.deepcopy(anatomical_part)
for part in _anatomical_part:
_anatomical_part[part][0] *= image_width
_anatomical_part[part][1] *= image_height
_anatomical_part[part][2] *= image_width
_anatomical_part[part][3] *= image_height
wl = _anatomical_part["right_lung"][2] - _anatomical_part["left_lung"][0]
hl = _anatomical_part["right_lung"][3] - _anatomical_part["left_lung"][1]
image_rois = image_rois.detach().cpu().numpy()
Mp = np.apply_along_axis(
self.compute_Mp_vector, 1, image_rois[:,1:], _anatomical_part, wl, hl)
M = np.append(M, Mp, axis=0)
return M
def positional_encoding(self, M):
fspa = np.empty((M.shape[0],0), float)
for j in range(self.d_model):
_M = M * 1/(np.power(1000, j/self.d_model))
fspa = np.concatenate((fspa, np.concatenate((np.sin(_M), np.cos(_M)), axis=1)), axis=1)
return fspa
def forward(self, rois, img_metas):
image_paths = [item["filename"] for item in img_metas]
image_sizes = [item["img_shape"] for item in img_metas]
res = [self.anatomical_dict[image_path] for image_path in image_paths]
M = self.get_spatial_vector(rois, res, image_sizes)
# M.shape = (512, 40) if batch_sise = 1
fspa = self.positional_encoding(M)
return fspa |
# coding:utf-8
from django.db import models
# Create your models here.
class AmazonComment(models.Model):
isbn13 = models.CharField(max_length=200, default=None)
author = models.TextField(default=None)
content = models.TextField(default=None)
class AmazonInfo(models.Model):
"""
亚马逊基本信息
"""
title = models.TextField(default=None)
isbn13 = models.CharField(max_length=200, default=None)
average = models.CharField(max_length=200,default=None)
erecommand = models.TextField(default=None)
frecommand = models.TextField(default=None)
mrecommand = models.TextField(default=None) |
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import tags
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.images.common.types import ImageContainerFormat, ImageDiskFormat
from cloudroast.images.v1.fixtures import ImagesV1Fixture
class ImageMembersTests(ImagesV1Fixture):
@classmethod
def _create_image(cls):
image_id = cls.behaviors.create_standard_image(
rand_name,
ImageContainerFormat.BARE,
ImageDiskFormat.RAW, 1024)
cls.resources.add(image_id, cls.api_client.delete_image)
return image_id
@tags(type='positive', net='no')
def test_add_image_member(self):
image_id = self._create_image()
response = self.api_client.add_member_to_image(
image_id,
self.tenant_ids[0])
self.assertEquals(204, response.status_code)
response = self.api_client.list_image_membership(image_id)
self.assertEqual(200, response.status_code)
members = response.entity
member_ids = [x.member_id for x in members]
self.assertIn(self.tenant_ids[0], member_ids)
@tags(type='positive', net='no')
def test_get_shared_images(self):
image_id = self._create_image()
response = self.api_client.add_member_to_image(
image_id,
self.tenant_ids[0])
self.assertEquals(204, response.status_code)
shared_image = self._create_image()
response = self.api_client.add_member_to_image(
shared_image,
self.tenant_ids[0])
response = self.api_client.list_shared_images(self.tenant_ids[0])
self.assertEqual(200, response.status_code)
images = response.entity
image_ids = [x.id_ for x in images]
self.assertIn(image_id, image_ids)
self.assertIn(shared_image, image_ids)
@tags(type='positive', net='no')
def test_remove_member(self):
image_id = self._create_image()
response = self.api_client.add_member_to_image(image_id,
self.tenant_ids[0])
self.assertEqual(204, response.status_code)
response = self.api_client.list_image_membership(image_id)
self.assertEqual(200, response.status_code)
self.assertEqual(1, len(response.entity))
response = self.api_client.delete_member_from_image(image_id,
self.tenant_ids[0])
self.assertEqual(204, response.status_code)
response = self.api_client.list_image_membership(image_id)
self.assertEqual(200, response.status_code)
members = response.entity
self.assertEqual(0, len(members))
|
import sys
from setuptools import setup, find_packages
assert sys.version_info.major == 3 and sys.version_info.minor >= 6, \
"Bullet-Safety-Gym uses Python 3.6 and above. "
with open('README.md', 'r') as f:
# description from readme file
long_description = f.read()
setup(
name='bullet_safety_gym',
version='0.1',
author='Sven Gronauer',
author_email='sven.gronauer@tum.de',
description='A framework to benchmark safety in Reinforcement Learning.',
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
license='MIT license',
url='https://github.com/svengronauer/Bullet-Safety-Gym',
install_requires=[
'gym>=0.17.2',
'numpy',
'pybullet>=3.0.6'
],
python_requires='>=3.6',
platforms=['Linux Ubuntu', 'darwin'], # supports Linux and Mac OSX
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.