content stringlengths 5 1.05M |
|---|
import json
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
import numpy as np
import mlflow
import matplotlib.pyplot as plt
import os
# loading the California housing dataset
cali_housing = fetch_california_housing(as_frame=True)
# split the dataset into train and test partitions
X_train, X_test, y_train, y_test = train_test_split(
cali_housing.data, cali_housing.target, test_size=0.2, random_state=123
)
# train the model
lin_reg = LinearRegression().fit(X_train, y_train)
# creating the evaluation dataframe
eval_data = X_test.copy()
eval_data["target"] = y_test
def metrics_only_fn(eval_df, builtin_metrics):
"""
This example demonstrates an example custom metric function that does not
produce any artifacts. Also notice that for computing its metrics, it can either
directly use the eval_df or build upon existing metrics supplied by builtin_metrics
"""
return {
"squared_diff_plus_one": np.sum(np.abs(eval_df["prediction"] - eval_df["target"] + 1) ** 2),
"sum_on_label_divided_by_two": builtin_metrics["sum_on_label"] / 2,
}
def file_artifacts_fn(eval_df, builtin_metrics, artifacts_dir):
"""
This example shows how you can return file paths as representation
of the produced artifacts. For a full list of supported file extensions
refer to https://mlflow.org/docs/latest/python_api/mlflow.html#mlflow.evaluate
"""
example_np_arr = np.array([1, 2, 3])
np.save(os.path.join(artifacts_dir, "example.npy"), example_np_arr, allow_pickle=False)
example_df = pd.DataFrame({"test": [2.2, 3.1], "test2": [3, 2]})
example_df.to_csv(os.path.join(artifacts_dir, "example.csv"), index=False)
example_df.to_parquet(os.path.join(artifacts_dir, "example.parquet"))
example_json = {"hello": "there", "test_list": [0.1, 0.3, 4]}
example_json.update(builtin_metrics)
with open(os.path.join(artifacts_dir, "example.json"), "w") as f:
json.dump(example_json, f)
plt.scatter(eval_df["prediction"], eval_df["target"])
plt.xlabel("Targets")
plt.ylabel("Predictions")
plt.title("Targets vs. Predictions")
plt.savefig(os.path.join(artifacts_dir, "example.png"))
plt.savefig(os.path.join(artifacts_dir, "example.jpeg"))
with open(os.path.join(artifacts_dir, "example.txt"), "w") as f:
f.write("hello world!")
return {}, {
"example_np_arr_from_npy_file": os.path.join(artifacts_dir, "example.npy"),
"example_df_from_csv_file": os.path.join(artifacts_dir, "example.csv"),
"example_df_from_parquet_file": os.path.join(artifacts_dir, "example.parquet"),
"example_dict_from_json_file": os.path.join(artifacts_dir, "example.json"),
"example_image_from_png_file": os.path.join(artifacts_dir, "example.png"),
"example_image_from_jpeg_file": os.path.join(artifacts_dir, "example.jpeg"),
"example_string_from_txt_file": os.path.join(artifacts_dir, "example.txt"),
}
class ExampleClass:
def __init__(self, x):
self.x = x
def object_artifacts_fn(eval_df, builtin_metrics):
"""
This example shows how you can return python objects as artifacts
without the need to save them to file system.
"""
example_np_arr = np.array([1, 2, 3])
example_df = pd.DataFrame({"test": [2.2, 3.1], "test2": [3, 2]})
example_dict = {"hello": "there", "test_list": [0.1, 0.3, 4]}
example_dict.update(builtin_metrics)
example_dict_2 = '{"a": 3, "b": [1, 2, 3]}'
example_image = plt.figure()
plt.scatter(eval_df["prediction"], eval_df["target"])
plt.xlabel("Targets")
plt.ylabel("Predictions")
plt.title("Targets vs. Predictions")
example_custom_class = ExampleClass(10)
return {}, {
"example_np_arr_from_obj_saved_as_npy": example_np_arr,
"example_df_from_obj_saved_as_csv": example_df,
"example_dict_from_obj_saved_as_json": example_dict,
"example_image_from_obj_saved_as_png": example_image,
"example_dict_from_json_str_saved_as_json": example_dict_2,
"example_class_from_obj_saved_as_pickle": example_custom_class,
}
def mixed_example_fn(eval_df, builtin_metrics, artifacts_dir):
"""
This example mixes together some of the different ways to return metrics and artifacts
"""
metrics = {
"squared_diff_divided_two": np.sum(
np.abs(eval_df["prediction"] - eval_df["target"]) ** 2 / 2
),
"sum_on_label_multiplied_by_three": builtin_metrics["sum_on_label"] * 3,
}
example_dict = {"hello": "there", "test_list": [0.1, 0.3, 4]}
example_dict.update(builtin_metrics)
plt.scatter(eval_df["prediction"], eval_df["target"])
plt.xlabel("Targets")
plt.ylabel("Predictions")
plt.title("Targets vs. Predictions")
plt.savefig(os.path.join(artifacts_dir, "example2.png"))
artifacts = {
"example_dict_2_from_obj_saved_as_csv": example_dict,
"example_image_2_from_png_file": os.path.join(artifacts_dir, "example2.png"),
}
return metrics, artifacts
with mlflow.start_run() as run:
mlflow.sklearn.log_model(lin_reg, "model")
model_uri = mlflow.get_artifact_uri("model")
result = mlflow.evaluate(
model=model_uri,
data=eval_data,
targets="target",
model_type="regressor",
dataset_name="cali_housing",
evaluators=["default"],
custom_metrics=[
metrics_only_fn,
file_artifacts_fn,
object_artifacts_fn,
mixed_example_fn,
],
)
print(f"metrics:\n{result.metrics}")
print(f"artifacts:\n{result.artifacts}")
|
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'rightASIGNARleftSUMARESTAleftMULTDIVrightUMINUSINCLUDE USING NAMESPACE STD COUT CIN GET CADENA RETURN VOID INT ENDL IDENTIFICADOR ENTERO ASIGNAR SUMA RESTA MULT DIV POTENCIA MODULO MINUSMINUS PLUSPLUS SI SINO MIENTRAS PARA AND OR NOT MENORQUE MENORIGUAL MAYORQUE MAYORIGUAL IGUAL DISTINTO NUMERAL PARIZQ PARDER CORIZQ CORDER LLAIZQ LLADER PUNTOCOMA COMA COMDOB MAYORDER MAYORIZQdeclaracion : IDENTIFICADOR ASIGNAR expresion PUNTOCOMAdeclaracion : expresion\n expresion : expresion SUMA expresion\n | expresion RESTA expresion\n | expresion MULT expresion\n | expresion DIV expresion\n | expresion POTENCIA expresion\n | expresion MODULO expresion\n\n expresion : RESTA expresion %prec UMINUS\n expresion : PARIZQ expresion PARDER\n | LLAIZQ expresion LLADER\n | CORIZQ expresion CORDER\n \n expresion : expresion MENORQUE expresion \n | expresion MAYORQUE expresion \n | expresion MENORIGUAL expresion \n | expresion MAYORIGUAL expresion \n | expresion IGUAL expresion \n | expresion DISTINTO expresion\n | PARIZQ expresion PARDER MENORQUE PARIZQ expresion PARDER\n | PARIZQ expresion PARDER MAYORQUE PARIZQ expresion PARDER\n | PARIZQ expresion PARDER MENORIGUAL PARIZQ expresion PARDER \n | PARIZQ expresion PARDER MAYORIGUAL PARIZQ expresion PARDER\n | PARIZQ expresion PARDER IGUAL PARIZQ expresion PARDER\n | PARIZQ expresion PARDER DISTINTO PARIZQ expresion PARDER\n \n expresion : expresion AND expresion \n | expresion OR expresion \n | expresion NOT expresion \n | PARIZQ expresion AND expresion PARDER\n | PARIZQ expresion OR expresion PARDER\n | PARIZQ expresion NOT expresion PARDER\n expresion : ENTEROexpresion : COMDOB expresion COMDOBexpresion : IDENTIFICADOR'
_lr_action_items = {'ENTERO':([0,1,2,3,6,8,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,31,32,34,35,66,67,68,69,70,71,],[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,]),'MODULO':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,17,-33,-33,17,17,-9,17,17,-10,-12,17,-5,17,17,-4,17,-3,17,17,17,17,17,17,-6,17,-32,-11,17,17,17,17,-30,-29,-28,17,17,17,17,17,17,-24,-21,-20,-23,-19,-22,]),'MULT':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,15,-33,-33,15,15,-9,15,15,-10,-12,15,-5,15,15,15,15,15,15,15,15,15,15,15,-6,15,-32,-11,15,15,15,15,-30,-29,-28,15,15,15,15,15,15,-24,-21,-20,-23,-19,-22,]),'RESTA':([0,1,2,3,4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[3,3,3,3,-31,18,3,3,-33,-33,18,18,-9,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,18,18,3,3,-10,3,3,-12,18,-5,18,18,-4,18,-3,18,18,18,18,18,18,-6,18,-32,-11,18,18,18,18,-30,3,3,3,3,3,3,-29,-28,18,18,18,18,18,18,-24,-21,-20,-23,-19,-22,]),'MAYORQUE':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,19,-33,-33,19,19,-9,19,19,58,-12,19,-5,19,19,-4,19,-3,19,19,19,19,19,19,-6,19,-32,-11,19,19,19,19,-30,-29,-28,19,19,19,19,19,19,-24,-21,-20,-23,-19,-22,]),'IGUAL':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,16,-33,-33,16,16,-9,16,16,59,-12,16,-5,16,16,-4,16,-3,16,16,16,16,16,16,-6,16,-32,-11,16,16,16,16,-30,-29,-28,16,16,16,16,16,16,-24,-21,-20,-23,-19,-22,]),'POTENCIA':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,22,-33,-33,22,22,-9,22,22,-10,-12,22,-5,22,22,-4,22,-3,22,22,22,22,22,22,-6,22,-32,-11,22,22,22,22,-30,-29,-28,22,22,22,22,22,22,-24,-21,-20,-23,-19,-22,]),'PARIZQ':([0,1,2,3,6,8,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,31,32,34,35,56,57,58,59,60,61,66,67,68,69,70,71,],[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,66,67,68,69,70,71,1,1,1,1,1,1,]),'MENORIGUAL':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,25,-33,-33,25,25,-9,25,25,57,-12,25,-5,25,25,-4,25,-3,25,25,25,25,25,25,-6,25,-32,-11,25,25,25,25,-30,-29,-28,25,25,25,25,25,25,-24,-21,-20,-23,-19,-22,]),'$end':([4,5,7,9,10,13,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,64,65,72,73,80,81,82,83,84,85,],[-31,-2,0,-33,-33,-9,-10,-12,-27,-5,-17,-8,-4,-14,-3,-13,-7,-26,-18,-15,-25,-6,-16,-32,-11,-1,-30,-29,-28,-24,-21,-20,-23,-19,-22,]),'COMDOB':([0,1,2,3,4,6,8,10,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,65,66,67,68,69,70,71,72,73,80,81,82,83,84,85,],[6,6,6,6,-31,6,6,-33,-9,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,52,6,6,-10,6,6,-12,-27,-5,-17,-8,-4,-14,-3,-13,-7,-26,-18,-15,-25,-6,-16,-32,-11,-30,6,6,6,6,6,6,-29,-28,-24,-21,-20,-23,-19,-22,]),'AND':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,26,-33,-33,35,26,-9,26,26,-10,-12,26,-5,26,26,-4,26,-3,26,26,26,26,26,26,-6,26,-32,-11,26,26,26,26,-30,-29,-28,26,26,26,26,26,26,-24,-21,-20,-23,-19,-22,]),'SUMA':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,20,-33,-33,20,20,-9,20,20,-10,-12,20,-5,20,20,-4,20,-3,20,20,20,20,20,20,-6,20,-32,-11,20,20,20,20,-30,-29,-28,20,20,20,20,20,20,-24,-21,-20,-23,-19,-22,]),'LLADER':([4,10,13,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,65,72,73,80,81,82,83,84,85,],[-31,-33,-9,53,-10,-12,-27,-5,-17,-8,-4,-14,-3,-13,-7,-26,-18,-15,-25,-6,-16,-32,-11,-30,-29,-28,-24,-21,-20,-23,-19,-22,]),'CORIZQ':([0,1,2,3,6,8,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,31,32,34,35,66,67,68,69,70,71,],[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,]),'DIV':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,27,-33,-33,27,27,-9,27,27,-10,-12,27,-5,27,27,27,27,27,27,27,27,27,27,27,-6,27,-32,-11,27,27,27,27,-30,-29,-28,27,27,27,27,27,27,-24,-21,-20,-23,-19,-22,]),'PARDER':([4,10,11,13,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,-33,33,-9,-10,-12,-27,-5,-17,-8,-4,-14,-3,-13,-7,-26,-18,-15,-25,-6,-16,-32,-11,65,72,73,-30,-29,-28,80,81,82,83,84,85,-24,-21,-20,-23,-19,-22,]),'NOT':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,14,-33,-33,32,14,-9,14,14,-10,-12,14,-5,14,14,-4,14,-3,14,14,14,14,14,14,-6,14,-32,-11,14,14,14,14,-30,-29,-28,14,14,14,14,14,14,-24,-21,-20,-23,-19,-22,]),'MENORQUE':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,21,-33,-33,21,21,-9,21,21,60,-12,21,-5,21,21,-4,21,-3,21,21,21,21,21,21,-6,21,-32,-11,21,21,21,21,-30,-29,-28,21,21,21,21,21,21,-24,-21,-20,-23,-19,-22,]),'ASIGNAR':([9,],[31,]),'OR':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,23,-33,-33,34,23,-9,23,23,-10,-12,23,-5,23,23,-4,23,-3,23,23,23,23,23,23,-6,23,-32,-11,23,23,23,23,-30,-29,-28,23,23,23,23,23,23,-24,-21,-20,-23,-19,-22,]),'DISTINTO':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,24,-33,-33,24,24,-9,24,24,56,-12,24,-5,24,24,-4,24,-3,24,24,24,24,24,24,-6,24,-32,-11,24,24,24,24,-30,-29,-28,24,24,24,24,24,24,-24,-21,-20,-23,-19,-22,]),'CORDER':([4,10,12,13,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,65,72,73,80,81,82,83,84,85,],[-31,-33,36,-9,-10,-12,-27,-5,-17,-8,-4,-14,-3,-13,-7,-26,-18,-15,-25,-6,-16,-32,-11,-30,-29,-28,-24,-21,-20,-23,-19,-22,]),'LLAIZQ':([0,1,2,3,6,8,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,31,32,34,35,66,67,68,69,70,71,],[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,]),'IDENTIFICADOR':([0,1,2,3,6,8,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,31,32,34,35,66,67,68,69,70,71,],[9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,]),'PUNTOCOMA':([4,10,13,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,65,72,73,80,81,82,83,84,85,],[-31,-33,-9,-10,-12,-27,-5,-17,-8,-4,-14,-3,-13,-7,-26,-18,-15,-25,-6,-16,-32,-11,64,-30,-29,-28,-24,-21,-20,-23,-19,-22,]),'MAYORIGUAL':([4,5,9,10,11,12,13,29,30,33,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,62,63,65,72,73,74,75,76,77,78,79,80,81,82,83,84,85,],[-31,28,-33,-33,28,28,-9,28,28,61,-12,28,-5,28,28,-4,28,-3,28,28,28,28,28,28,-6,28,-32,-11,28,28,28,28,-30,-29,-28,28,28,28,28,28,28,-24,-21,-20,-23,-19,-22,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'expresion':([0,1,2,3,6,8,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,31,32,34,35,66,67,68,69,70,71,],[5,11,12,13,29,30,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,54,55,62,63,74,75,76,77,78,79,]),'declaracion':([0,],[7,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> declaracion","S'",1,None,None,None),
('declaracion -> IDENTIFICADOR ASIGNAR expresion PUNTOCOMA','declaracion',4,'p_declaracion_asignar','analizador_sintactico.py',17),
('declaracion -> expresion','declaracion',1,'p_declaracion_expr','analizador_sintactico.py',21),
('expresion -> expresion SUMA expresion','expresion',3,'p_expresion_operaciones','analizador_sintactico.py',27),
('expresion -> expresion RESTA expresion','expresion',3,'p_expresion_operaciones','analizador_sintactico.py',28),
('expresion -> expresion MULT expresion','expresion',3,'p_expresion_operaciones','analizador_sintactico.py',29),
('expresion -> expresion DIV expresion','expresion',3,'p_expresion_operaciones','analizador_sintactico.py',30),
('expresion -> expresion POTENCIA expresion','expresion',3,'p_expresion_operaciones','analizador_sintactico.py',31),
('expresion -> expresion MODULO expresion','expresion',3,'p_expresion_operaciones','analizador_sintactico.py',32),
('expresion -> RESTA expresion','expresion',2,'p_expresion_uminus','analizador_sintactico.py',53),
('expresion -> PARIZQ expresion PARDER','expresion',3,'p_expresion_grupo','analizador_sintactico.py',58),
('expresion -> LLAIZQ expresion LLADER','expresion',3,'p_expresion_grupo','analizador_sintactico.py',59),
('expresion -> CORIZQ expresion CORDER','expresion',3,'p_expresion_grupo','analizador_sintactico.py',60),
('expresion -> expresion MENORQUE expresion','expresion',3,'p_expresion_logicas','analizador_sintactico.py',66),
('expresion -> expresion MAYORQUE expresion','expresion',3,'p_expresion_logicas','analizador_sintactico.py',67),
('expresion -> expresion MENORIGUAL expresion','expresion',3,'p_expresion_logicas','analizador_sintactico.py',68),
('expresion -> expresion MAYORIGUAL expresion','expresion',3,'p_expresion_logicas','analizador_sintactico.py',69),
('expresion -> expresion IGUAL expresion','expresion',3,'p_expresion_logicas','analizador_sintactico.py',70),
('expresion -> expresion DISTINTO expresion','expresion',3,'p_expresion_logicas','analizador_sintactico.py',71),
('expresion -> PARIZQ expresion PARDER MENORQUE PARIZQ expresion PARDER','expresion',7,'p_expresion_logicas','analizador_sintactico.py',72),
('expresion -> PARIZQ expresion PARDER MAYORQUE PARIZQ expresion PARDER','expresion',7,'p_expresion_logicas','analizador_sintactico.py',73),
('expresion -> PARIZQ expresion PARDER MENORIGUAL PARIZQ expresion PARDER','expresion',7,'p_expresion_logicas','analizador_sintactico.py',74),
('expresion -> PARIZQ expresion PARDER MAYORIGUAL PARIZQ expresion PARDER','expresion',7,'p_expresion_logicas','analizador_sintactico.py',75),
('expresion -> PARIZQ expresion PARDER IGUAL PARIZQ expresion PARDER','expresion',7,'p_expresion_logicas','analizador_sintactico.py',76),
('expresion -> PARIZQ expresion PARDER DISTINTO PARIZQ expresion PARDER','expresion',7,'p_expresion_logicas','analizador_sintactico.py',77),
('expresion -> expresion AND expresion','expresion',3,'p_expresion_booleana','analizador_sintactico.py',103),
('expresion -> expresion OR expresion','expresion',3,'p_expresion_booleana','analizador_sintactico.py',104),
('expresion -> expresion NOT expresion','expresion',3,'p_expresion_booleana','analizador_sintactico.py',105),
('expresion -> PARIZQ expresion AND expresion PARDER','expresion',5,'p_expresion_booleana','analizador_sintactico.py',106),
('expresion -> PARIZQ expresion OR expresion PARDER','expresion',5,'p_expresion_booleana','analizador_sintactico.py',107),
('expresion -> PARIZQ expresion NOT expresion PARDER','expresion',5,'p_expresion_booleana','analizador_sintactico.py',108),
('expresion -> ENTERO','expresion',1,'p_expresion_numero','analizador_sintactico.py',126),
('expresion -> COMDOB expresion COMDOB','expresion',3,'p_expresion_cadena','analizador_sintactico.py',130),
('expresion -> IDENTIFICADOR','expresion',1,'p_expresion_nombre','analizador_sintactico.py',134),
]
|
import collections
import json
import os
import threading
import time
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusImageHandling
from hydrus.core import HydrusThreading
from hydrus.core import HydrusData
from hydrus.core import HydrusGlobals as HG
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientFiles
from hydrus.client import ClientImageHandling
from hydrus.client import ClientParsing
from hydrus.client import ClientRendering
class DataCache( object ):
def __init__( self, controller, cache_size, timeout = 1200 ):
self._controller = controller
self._cache_size = cache_size
self._timeout = timeout
self._keys_to_data = {}
self._keys_fifo = collections.OrderedDict()
self._total_estimated_memory_footprint = 0
self._lock = threading.Lock()
self._controller.sub( self, 'MaintainCache', 'memory_maintenance_pulse' )
def _Delete( self, key ):
if key not in self._keys_to_data:
return
del self._keys_to_data[ key ]
self._RecalcMemoryUsage()
def _DeleteItem( self ):
( deletee_key, last_access_time ) = self._keys_fifo.popitem( last = False )
self._Delete( deletee_key )
def _RecalcMemoryUsage( self ):
self._total_estimated_memory_footprint = sum( ( data.GetEstimatedMemoryFootprint() for data in self._keys_to_data.values() ) )
def _TouchKey( self, key ):
# have to delete first, rather than overwriting, so the ordereddict updates its internal order
if key in self._keys_fifo:
del self._keys_fifo[ key ]
self._keys_fifo[ key ] = HydrusData.GetNow()
def Clear( self ):
with self._lock:
self._keys_to_data = {}
self._keys_fifo = collections.OrderedDict()
self._total_estimated_memory_footprint = 0
def AddData( self, key, data ):
with self._lock:
if key not in self._keys_to_data:
while self._total_estimated_memory_footprint > self._cache_size:
self._DeleteItem()
self._keys_to_data[ key ] = data
self._TouchKey( key )
self._RecalcMemoryUsage()
def DeleteData( self, key ):
with self._lock:
self._Delete( key )
def GetData( self, key ):
with self._lock:
if key not in self._keys_to_data:
raise Exception( 'Cache error! Looking for {}, but it was missing.'.format( key ) )
self._TouchKey( key )
return self._keys_to_data[ key ]
def GetIfHasData( self, key ):
with self._lock:
if key in self._keys_to_data:
self._TouchKey( key )
return self._keys_to_data[ key ]
else:
return None
def HasData( self, key ):
with self._lock:
return key in self._keys_to_data
def MaintainCache( self ):
with self._lock:
while True:
if len( self._keys_fifo ) == 0:
break
else:
( key, last_access_time ) = next( iter( self._keys_fifo.items() ) )
if HydrusData.TimeHasPassed( last_access_time + self._timeout ):
self._DeleteItem()
else:
break
class LocalBooruCache( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
self._RefreshShares()
self._controller.sub( self, 'RefreshShares', 'refresh_local_booru_shares' )
self._controller.sub( self, 'RefreshShares', 'restart_client_server_service' )
def _CheckDataUsage( self ):
if not self._local_booru_service.BandwidthOK():
raise HydrusExceptions.InsufficientCredentialsException( 'This booru has used all its monthly data. Please try again next month.' )
def _CheckFileAuthorised( self, share_key, hash ):
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
if hash not in info[ 'hashes_set' ]:
raise HydrusExceptions.NotFoundException( 'That file was not found in that share.' )
def _CheckShareAuthorised( self, share_key ):
self._CheckDataUsage()
info = self._GetInfo( share_key )
timeout = info[ 'timeout' ]
if timeout is not None and HydrusData.TimeHasPassed( timeout ):
raise HydrusExceptions.NotFoundException( 'This share has expired.' )
def _GetInfo( self, share_key ):
try: info = self._keys_to_infos[ share_key ]
except: raise HydrusExceptions.NotFoundException( 'Did not find that share on this booru.' )
if info is None:
info = self._controller.Read( 'local_booru_share', share_key )
hashes = info[ 'hashes' ]
info[ 'hashes_set' ] = set( hashes )
media_results = self._controller.Read( 'media_results', hashes )
info[ 'media_results' ] = media_results
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
info[ 'hashes_to_media_results' ] = hashes_to_media_results
self._keys_to_infos[ share_key ] = info
return info
def _RefreshShares( self ):
self._local_booru_service = self._controller.services_manager.GetService( CC.LOCAL_BOORU_SERVICE_KEY )
self._keys_to_infos = {}
share_keys = self._controller.Read( 'local_booru_share_keys' )
for share_key in share_keys:
self._keys_to_infos[ share_key ] = None
def CheckShareAuthorised( self, share_key ):
with self._lock: self._CheckShareAuthorised( share_key )
def CheckFileAuthorised( self, share_key, hash ):
with self._lock: self._CheckFileAuthorised( share_key, hash )
def GetGalleryInfo( self, share_key ):
with self._lock:
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_results = info[ 'media_results' ]
return ( name, text, timeout, media_results )
def GetMediaResult( self, share_key, hash ):
with self._lock:
info = self._GetInfo( share_key )
media_result = info[ 'hashes_to_media_results' ][ hash ]
return media_result
def GetPageInfo( self, share_key, hash ):
with self._lock:
self._CheckFileAuthorised( share_key, hash )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_result = info[ 'hashes_to_media_results' ][ hash ]
return ( name, text, timeout, media_result )
def RefreshShares( self, *args, **kwargs ):
with self._lock:
self._RefreshShares()
class ParsingCache( object ):
def __init__( self ):
self._next_clean_cache_time = HydrusData.GetNow()
self._html_to_soups = {}
self._json_to_jsons = {}
self._lock = threading.Lock()
def _CleanCache( self ):
if HydrusData.TimeHasPassed( self._next_clean_cache_time ):
for cache in ( self._html_to_soups, self._json_to_jsons ):
dead_datas = set()
for ( data, ( last_accessed, parsed_object ) ) in cache.items():
if HydrusData.TimeHasPassed( last_accessed + 10 ):
dead_datas.add( data )
for dead_data in dead_datas:
del cache[ dead_data ]
self._next_clean_cache_time = HydrusData.GetNow() + 5
def CleanCache( self ):
with self._lock:
self._CleanCache()
def GetJSON( self, json_text ):
with self._lock:
now = HydrusData.GetNow()
if json_text not in self._json_to_jsons:
json_object = json.loads( json_text )
self._json_to_jsons[ json_text ] = ( now, json_object )
( last_accessed, json_object ) = self._json_to_jsons[ json_text ]
if last_accessed != now:
self._json_to_jsons[ json_text ] = ( now, json_object )
if len( self._json_to_jsons ) > 10:
self._CleanCache()
return json_object
def GetSoup( self, html ):
with self._lock:
now = HydrusData.GetNow()
if html not in self._html_to_soups:
soup = ClientParsing.GetSoup( html )
self._html_to_soups[ html ] = ( now, soup )
( last_accessed, soup ) = self._html_to_soups[ html ]
if last_accessed != now:
self._html_to_soups[ html ] = ( now, soup )
if len( self._html_to_soups ) > 10:
self._CleanCache()
return soup
class RenderedImageCache( object ):
def __init__( self, controller ):
self._controller = controller
cache_size = self._controller.options[ 'fullscreen_cache_size' ]
cache_timeout = self._controller.new_options.GetInteger( 'image_cache_timeout' )
self._data_cache = DataCache( self._controller, cache_size, timeout = cache_timeout )
def Clear( self ):
self._data_cache.Clear()
def GetImageRenderer( self, media ):
hash = media.GetHash()
key = hash
result = self._data_cache.GetIfHasData( key )
if result is None:
image_renderer = ClientRendering.ImageRenderer( media )
self._data_cache.AddData( key, image_renderer )
else:
image_renderer = result
return image_renderer
def HasImageRenderer( self, hash ):
key = hash
return self._data_cache.HasData( key )
class ThumbnailCache( object ):
def __init__( self, controller ):
self._controller = controller
cache_size = self._controller.options[ 'thumbnail_cache_size' ]
cache_timeout = self._controller.new_options.GetInteger( 'thumbnail_cache_timeout' )
self._data_cache = DataCache( self._controller, cache_size, timeout = cache_timeout )
self._magic_mime_thumbnail_ease_score_lookup = {}
self._InitialiseMagicMimeScores()
self._lock = threading.Lock()
self._thumbnail_error_occurred = False
self._waterfall_queue_quick = set()
self._waterfall_queue = []
self._waterfall_queue_empty_event = threading.Event()
self._delayed_regeneration_queue_quick = set()
self._delayed_regeneration_queue = []
self._waterfall_event = threading.Event()
self._special_thumbs = {}
self.Clear()
self._controller.CallToThreadLongRunning( self.MainLoop )
self._controller.sub( self, 'Clear', 'reset_thumbnail_cache' )
self._controller.sub( self, 'ClearThumbnails', 'clear_thumbnails' )
def _GetThumbnailHydrusBitmap( self, display_media ):
bounding_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
hash = display_media.GetHash()
mime = display_media.GetMime()
locations_manager = display_media.GetLocationsManager()
try:
path = self._controller.client_files_manager.GetThumbnailPath( display_media )
except HydrusExceptions.FileMissingException as e:
if locations_manager.IsLocal():
summary = 'Unable to get thumbnail for file {}.'.format( hash.hex() )
self._HandleThumbnailException( e, summary )
return self._special_thumbs[ 'hydrus' ]
try:
numpy_image = ClientImageHandling.GenerateNumPyImage( path, mime )
except Exception as e:
try:
# file is malformed, let's force a regen
self._controller.files_maintenance_manager.RunJobImmediately( [ display_media ], ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL, pub_job_key = False )
except Exception as e:
summary = 'The thumbnail for file {} was not loadable. An attempt to regenerate it failed.'.format( hash.hex() )
self._HandleThumbnailException( e, summary )
return self._special_thumbs[ 'hydrus' ]
try:
numpy_image = ClientImageHandling.GenerateNumPyImage( path, mime )
except Exception as e:
summary = 'The thumbnail for file {} was not loadable. It was regenerated, but that file would not render either. Your image libraries or hard drive connection are unreliable. Please inform the hydrus developer what has happened.'.format( hash.hex() )
self._HandleThumbnailException( e, summary )
return self._special_thumbs[ 'hydrus' ]
( current_width, current_height ) = HydrusImageHandling.GetResolutionNumPy( numpy_image )
( media_width, media_height ) = display_media.GetResolution()
( expected_width, expected_height ) = HydrusImageHandling.GetThumbnailResolution( ( media_width, media_height ), bounding_dimensions )
exactly_as_expected = current_width == expected_width and current_height == expected_height
rotation_exception = current_width == expected_height and current_height == expected_width
correct_size = exactly_as_expected or rotation_exception
if not correct_size:
it_is_definitely_too_big = current_width >= expected_width and current_height >= expected_height
if it_is_definitely_too_big:
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail {} too big.'.format( hash.hex() ) )
# the thumb we have is larger than desired. we can use it to generate what we actually want without losing significant data
# this is _resize_, not _thumbnail_, because we already know the dimensions we want
# and in some edge cases, doing getthumbresolution on existing thumb dimensions results in float/int conversion imprecision and you get 90px/91px regen cycles that never get fixed
numpy_image = HydrusImageHandling.ResizeNumPyImage( numpy_image, ( expected_width, expected_height ) )
if locations_manager.IsLocal():
# we have the master file, so it is safe to save our resized thumb back to disk since we can regen from source if needed
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail {} too big, saving back to disk.'.format( hash.hex() ) )
try:
try:
thumbnail_bytes = HydrusImageHandling.GenerateThumbnailBytesNumPy( numpy_image, mime )
except HydrusExceptions.CantRenderWithCVException:
thumbnail_bytes = HydrusImageHandling.GenerateThumbnailBytesFromStaticImagePath( path, ( expected_width, expected_height ), mime )
except:
summary = 'The thumbnail for file {} was too large, but an attempt to shrink it failed.'.format( hash.hex() )
self._HandleThumbnailException( e, summary )
return self._special_thumbs[ 'hydrus' ]
try:
self._controller.client_files_manager.AddThumbnailFromBytes( hash, thumbnail_bytes, silent = True )
self._controller.files_maintenance_manager.ClearJobs( { hash }, ClientFiles.REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL )
except:
summary = 'The thumbnail for file {} was too large, but an attempt to save back the shrunk file failed.'.format( hash.hex() )
self._HandleThumbnailException( e, summary )
return self._special_thumbs[ 'hydrus' ]
else:
# the thumb we have is either too small or completely messed up due to a previous ratio misparse
media_is_same_size_as_current_thumb = current_width == media_width and current_height == media_height
if media_is_same_size_as_current_thumb:
# the thumb is smaller than expected, but this is a 32x32 pixilart image or whatever, so no need to scale
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail {} too small due to small source file.'.format( hash.hex() ) )
pass
else:
numpy_image = HydrusImageHandling.ResizeNumPyImage( numpy_image, ( expected_width, expected_height ) )
if locations_manager.IsLocal():
# we have the master file, so we should regen the thumb from source
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail {} too small, scheduling regeneration from source.'.format( hash.hex() ) )
delayed_item = display_media.GetMediaResult()
with self._lock:
if delayed_item not in self._delayed_regeneration_queue_quick:
self._delayed_regeneration_queue_quick.add( delayed_item )
self._delayed_regeneration_queue.append( delayed_item )
else:
# we do not have the master file, so we have to scale up from what we have
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail {} was too small, only scaling up due to no local source.'.format( hash.hex() ) )
hydrus_bitmap = ClientRendering.GenerateHydrusBitmapFromNumPyImage( numpy_image )
return hydrus_bitmap
def _HandleThumbnailException( self, e, summary ):
if self._thumbnail_error_occurred:
HydrusData.Print( summary )
else:
self._thumbnail_error_occurred = True
message = 'A thumbnail error has occurred. The problem thumbnail will appear with the default \'hydrus\' symbol. You may need to take hard drive recovery actions, and if the error is not obviously fixable, you can contact hydrus dev for additional help. Specific information for this first error follows. Subsequent thumbnail errors in this session will be silently printed to the log.'
message += os.linesep * 2
message += str( e )
message += os.linesep * 2
message += summary
HydrusData.ShowText( message )
def _InitialiseMagicMimeScores( self ):
# let's render our thumbs in order of ease of regeneration, so we rush what we can to screen as fast as possible and leave big vids until the end
for mime in HC.ALLOWED_MIMES:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 5
# default filetype thumbs are easiest
self._magic_mime_thumbnail_ease_score_lookup[ None ] = 0
self._magic_mime_thumbnail_ease_score_lookup[ HC.APPLICATION_UNKNOWN ] = 0
for mime in HC.APPLICATIONS:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 0
for mime in HC.AUDIO:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 0
# images a little trickier
for mime in HC.IMAGES:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 1
# override because these are a bit more
self._magic_mime_thumbnail_ease_score_lookup[ HC.IMAGE_APNG ] = 2
self._magic_mime_thumbnail_ease_score_lookup[ HC.IMAGE_GIF ] = 2
# ffmpeg hellzone
for mime in HC.VIDEO:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 3
for mime in HC.ANIMATIONS:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 3
def _RecalcQueues( self ):
# here we sort by the hash since this is both breddy random and more likely to access faster on a well defragged hard drive!
# and now with the magic mime order
def sort_waterfall( item ):
( page_key, media ) = item
display_media = media.GetDisplayMedia()
if display_media is None:
magic_score = self._magic_mime_thumbnail_ease_score_lookup[ None ]
hash = ''
else:
magic_score = self._magic_mime_thumbnail_ease_score_lookup[ display_media.GetMime() ]
hash = display_media.GetHash()
return ( magic_score, hash )
self._waterfall_queue = list( self._waterfall_queue_quick )
# we pop off the end, so reverse
self._waterfall_queue.sort( key = sort_waterfall, reverse = True )
if len( self._waterfall_queue ) == 0:
self._waterfall_queue_empty_event.set()
else:
self._waterfall_queue_empty_event.clear()
def sort_regen( item ):
media_result = item
hash = media_result.GetHash()
mime = media_result.GetMime()
magic_score = self._magic_mime_thumbnail_ease_score_lookup[ mime ]
return ( magic_score, hash )
self._delayed_regeneration_queue = list( self._delayed_regeneration_queue_quick )
# we pop off the end, so reverse
self._delayed_regeneration_queue.sort( key = sort_regen, reverse = True )
def CancelWaterfall( self, page_key: bytes, medias: list ):
with self._lock:
self._waterfall_queue_quick.difference_update( ( ( page_key, media ) for media in medias ) )
cancelled_display_medias = { media.GetDisplayMedia() for media in medias }
cancelled_display_medias.discard( None )
cancelled_media_results = { media.GetMediaResult() for media in cancelled_display_medias }
outstanding_delayed_hashes = { media_result.GetHash() for media_result in cancelled_media_results if media_result in self._delayed_regeneration_queue_quick }
if len( outstanding_delayed_hashes ) > 0:
self._controller.files_maintenance_manager.ScheduleJob( outstanding_delayed_hashes, ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL )
self._delayed_regeneration_queue_quick.difference_update( cancelled_media_results )
self._RecalcQueues()
def Clear( self ):
with self._lock:
self._data_cache.Clear()
self._special_thumbs = {}
names = [ 'hydrus', 'pdf', 'psd', 'clip', 'audio', 'video', 'zip' ]
bounding_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
for name in names:
path = os.path.join( HC.STATIC_DIR, name + '.png' )
numpy_image = ClientImageHandling.GenerateNumPyImage( path, HC.IMAGE_PNG )
numpy_image_resolution = HydrusImageHandling.GetResolutionNumPy( numpy_image )
target_resolution = HydrusImageHandling.GetThumbnailResolution( numpy_image_resolution, bounding_dimensions )
numpy_image = HydrusImageHandling.ResizeNumPyImage( numpy_image, target_resolution )
hydrus_bitmap = ClientRendering.GenerateHydrusBitmapFromNumPyImage( numpy_image )
self._special_thumbs[ name ] = hydrus_bitmap
self._controller.pub( 'notify_complete_thumbnail_reset' )
self._waterfall_queue_quick = set()
self._delayed_regeneration_queue_quick = set()
self._RecalcQueues()
def ClearThumbnails( self, hashes ):
with self._lock:
for hash in hashes:
self._data_cache.DeleteData( hash )
def WaitUntilFree( self ):
while True:
if HG.view_shutdown:
raise HydrusExceptions.ShutdownException( 'Application shutting down!' )
queue_is_empty = self._waterfall_queue_empty_event.wait( 1 )
if queue_is_empty:
return
def GetThumbnail( self, media ):
display_media = media.GetDisplayMedia()
if display_media is None:
# sometimes media can get switched around during a collect event, and if this happens during waterfall, we have a problem here
# just return for now, we'll see how it goes
return self._special_thumbs[ 'hydrus' ]
locations_manager = display_media.GetLocationsManager()
if locations_manager.ShouldIdeallyHaveThumbnail():
mime = display_media.GetMime()
if mime in HC.MIMES_WITH_THUMBNAILS:
hash = display_media.GetHash()
result = self._data_cache.GetIfHasData( hash )
if result is None:
try:
hydrus_bitmap = self._GetThumbnailHydrusBitmap( display_media )
except:
hydrus_bitmap = self._special_thumbs[ 'hydrus' ]
self._data_cache.AddData( hash, hydrus_bitmap )
else:
hydrus_bitmap = result
return hydrus_bitmap
elif mime in HC.AUDIO: return self._special_thumbs[ 'audio' ]
elif mime in HC.VIDEO: return self._special_thumbs[ 'video' ]
elif mime == HC.APPLICATION_PDF: return self._special_thumbs[ 'pdf' ]
elif mime == HC.APPLICATION_PSD: return self._special_thumbs[ 'psd' ]
elif mime == HC.APPLICATION_CLIP: return self._special_thumbs[ 'clip' ]
elif mime in HC.ARCHIVES: return self._special_thumbs[ 'zip' ]
else: return self._special_thumbs[ 'hydrus' ]
else:
return self._special_thumbs[ 'hydrus' ]
def HasThumbnailCached( self, media ):
display_media = media.GetDisplayMedia()
if display_media is None:
return True
mime = display_media.GetMime()
if mime in HC.MIMES_WITH_THUMBNAILS:
hash = display_media.GetHash()
return self._data_cache.HasData( hash )
else:
return True
def Waterfall( self, page_key, medias ):
with self._lock:
self._waterfall_queue_quick.update( ( ( page_key, media ) for media in medias ) )
self._RecalcQueues()
self._waterfall_event.set()
def MainLoop( self ):
while not HydrusThreading.IsThreadShuttingDown():
time.sleep( 0.00001 )
with self._lock:
do_wait = len( self._waterfall_queue ) == 0 and len( self._delayed_regeneration_queue ) == 0
if do_wait:
self._waterfall_event.wait( 1 )
self._waterfall_event.clear()
start_time = HydrusData.GetNowPrecise()
stop_time = start_time + 0.005 # a bit of a typical frame
page_keys_to_rendered_medias = collections.defaultdict( list )
num_done = 0
max_at_once = 16
while not HydrusData.TimeHasPassedPrecise( stop_time ) and num_done <= max_at_once:
with self._lock:
if len( self._waterfall_queue ) == 0:
break
result = self._waterfall_queue.pop()
if len( self._waterfall_queue ) == 0:
self._waterfall_queue_empty_event.set()
self._waterfall_queue_quick.discard( result )
( page_key, media ) = result
if media.GetDisplayMedia() is not None:
self.GetThumbnail( media )
page_keys_to_rendered_medias[ page_key ].append( media )
num_done += 1
if len( page_keys_to_rendered_medias ) > 0:
for ( page_key, rendered_medias ) in page_keys_to_rendered_medias.items():
self._controller.pub( 'waterfall_thumbnails', page_key, rendered_medias )
time.sleep( 0.00001 )
# now we will do regen if appropriate
with self._lock:
# got more important work or no work to do
if len( self._waterfall_queue ) > 0 or len( self._delayed_regeneration_queue ) == 0 or HG.client_controller.CurrentlyPubSubbing():
continue
media_result = self._delayed_regeneration_queue.pop()
self._delayed_regeneration_queue_quick.discard( media_result )
if HG.file_report_mode:
hash = media_result.GetHash()
HydrusData.ShowText( 'Thumbnail {} now regenerating from source.'.format( hash.hex() ) )
try:
self._controller.files_maintenance_manager.RunJobImmediately( [ media_result ], ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL, pub_job_key = False )
except HydrusExceptions.FileMissingException:
pass
except Exception as e:
hash = media_result.GetHash()
summary = 'The thumbnail for file {} was incorrect, but a later attempt to regenerate it or load the new file back failed.'.format( hash.hex() )
self._HandleThumbnailException( e, summary )
|
import pandas as pd
import numpy as np
import math
class DPcovariance:
# Implementation is based off of https://github.com/privacytoolsproject/PSI-Library
def __init__(self, n, cols, rng, global_eps, epsilon_dist=None, alpha=0.05):
# TODO finish adding functionality for intercept
intercept = False
# The following variables are for different ways of setting up the epsilon value for DP covariance calculation
# There is infrastructure for them, but we're currently choosing not to expose them.
epsilon = None
accuracy = None
impute_rng = None
accuracy_vals = None
self.num_rows = n
self.columns = cols
self.intercept = intercept
self.alpha = alpha
self.rng = check_range(rng)
self.sens = covariance_sensitivity(n, rng, intercept)
if impute_rng is None:
self.imputeRng = rng
else:
self.imputeRng = impute_rng
if self.intercept:
self.columns = ["intercept"] + self.columns
else:
self.columns = self.columns
s = len(self.columns)
output_length = (np.zeros((s, s))[np.tril_indices(s)]).size
# Distribute epsilon across all covariances that will be calculated
if epsilon is not None:
self.epsilon = check_epsilon(epsilon, expected_length=output_length)
self.globalEps = sum(self.epsilon)
# Option 2: Enter global epsilon value and vector of percentages specifying how to split global
# epsilon between covariance calculations.
elif global_eps is not None and epsilon_dist is not None:
self.globalEps = check_global_epsilon(global_eps)
self.epsilonDist = check_epsilon_dist(epsilon_dist, output_length)
self.epsilon = distribute_epsilon(self.globalEps, epsilon_dist=epsilon_dist)
self.accuracyVals = laplace_get_accuracy(self.sens, self.epsilon, self.alpha)
# Option 3: Only enter global epsilon, and have it be split evenly between covariance calculations.
elif global_eps is not None:
self.globalEps = check_global_epsilon(global_eps)
self.epsilon = distribute_epsilon(self.globalEps, n_calcs=output_length)
self.accuracyVals = laplace_get_accuracy(self.sens, self.epsilon, self.alpha)
# Option 4: Enter an accuracy value instead of an epsilon, and calculate individual epsilons with this accuracy.
elif accuracy is not None:
self.accuracy = check_accuracy(accuracy)
self.epsilon = laplace_get_epsilon(self.sens, self.accuracy, self.alpha)
self.globalEps = sum(self.epsilon)
# Option 5: Enter vector of accuracy values, and calculate ith epsilon value from ith accuracy value
elif accuracy_vals is not None:
self.accuracyVals = check_accuracy_vals(accuracy_vals, output_length)
self.epsilon = laplace_get_epsilon(self.sens, self.accuracyVals, self.alpha)
self.globalEps = sum(self.epsilon)
def make_covar_symmetric(self, covar):
"""
Converts unique private covariances into symmetric matrix
Args:
covar (???): differentially privately release of elements in lower triangle of covariance matrix
Returns:
A symmetric differentially private covariance matrix (numpy array)
"""
n = len(self.columns)
indices = np.triu_indices(n)
m = np.zeros((n, n))
m[indices] = covar
m = m.T
m = np.tril(m) + np.triu(m.T, 1)
df = pd.DataFrame(m, columns=self.columns, index=self.columns)
return df
def release(self, data):
new_data = censor_data(data[self.columns], self.rng)
new_data = fill_missing(new_data, impute_rng=self.imputeRng)
# TODO: add intercept functionality
def covar(x, intercept=False):
if intercept:
pass # TODO: Find python equivalent for the following R code: `x < - cbind(1, x)`
covariance = np.cov(x)
return list(covariance[np.tril_indices(covariance.shape[0])])
def q_lap_iter(p, mu=0, b=1):
for i in range(len(p)):
p[i] = q_lap(p[i], mu, b[i])
return p
def q_lap(elem, mu=0, b=1):
if elem < 0.5:
return mu + b * np.log(2 * elem)
else:
return mu - b * np.log(2 - 2 * elem)
def dp_noise(n, noise_scale):
u = np.random.uniform(size=n)
return q_lap_iter(u, b=noise_scale)
true_val = covar(data.values.T, self.intercept)
scale = self.sens / self.epsilon
val = np.array(true_val) + dp_noise(n=len(true_val), noise_scale=scale)
return list(val)
# TODO: this implementation only works for one dependent variable right now
def get_linear_regression(self, data, x_names, y_name, intercept=False):
"""
Takes in data, lists of feature names and target names, and whether or not
we should calculate a y-intercept; and returns a DP linear regression model
Args:
data (Dataframe): the data that will be used to make the linear regression
x_names (list): list of names of the features (i.e. independent variables) to use
y_name (string): name of the target (i.e. dependent variable) to use
intercept (boolean): true if the lin-reg equation should have a y-intercept, false if not
Return:
linear regression model (Dataframe) in the following format:
Each independent variable gets its own row; there are two columns: 'Estimate' and 'Std. Error'.
'Estimate' is the calculated coefficient for that row's corresponding independent variable,
'Std. Error' is self evident.
Here is an example return value given intercept=FALSE, independent variables 'Height' and 'Volume'
and dependent variable 'Girth':
Estimate Std. Error
Height -0.04548 0.02686
Volume 0.19518 0.01041
"""
covar_matrix = self.make_covar_symmetric(self.release(data))
return cov_method_lin_reg(covar_matrix, self.num_rows, x_names, y_name, intercept)
def cov_method_lin_reg(release, num_rows, x_names, y_name, intercept=False):
"""
Takes in a differentially privately released covariance matrix, the number of rows in the
original data, whether or not a y-intercept should be calculated, a list of
feature names, and a target name; and returns a DP linear regression model
Args:
release (Dataframe): differentially privately released covariance matrix that will be used to make the linear regression
num_rows (int): the number of rows in the original data
x_names (list): list of names of the features (i.e. independent variables) to use
y_name (string): name of the target (i.e. dependent variable) to use
intercept (boolean): true if the lin-reg equation should have a y-intercept, false if not
Returns:
linear regression model (Dataframe) in the following format:
Each independent variable gets its own row; there are two columns: 'Estimate' and 'Std. Error'.
'Estimate' is the calculated coefficient for that row's corresponding independent variable,
'Std. Error' is self evident.
Here is an example return value given intercept=FALSE, independent variables 'Height' and 'Volume'
and dependent variable 'Girth':
Estimate Std. Error
Height -0.04548 0.02686
Volume 0.19518 0.01041
"""
eigenvals, _ = list(np.linalg.eig(release.values))
if not all([ev != 0 for ev in eigenvals]):
raise ValueError("Matrix is not invertible")
elif not all([ev > 0 for ev in eigenvals]):
raise ValueError("Matrix is not positive definite")
else:
# Find locations corresponding to the given x & y names
loc_vec = [False] * release.shape[0]
row_labels = release.index.values
x_loc = []
y_loc = None
for index in range(len(row_labels)):
if row_labels[index] in x_names:
loc_vec[index] = True
x_loc.append(index)
if row_labels[index] == y_name:
y_loc = index
if x_loc is None or y_loc is None:
raise ValueError("Names aren't found in the release")
# Use a sweep to find the coefficient of the independent variable in
# the linear regression corresponding to the covariance matrix
sweep = amsweep(release.values / num_rows, np.array(loc_vec))
coef = sweep[y_loc, x_loc]
# Calculate the standard error
submatrix = release.values[x_loc, :][:, x_loc]
se = list(map(np.sqrt, sweep[y_loc, y_loc] * np.diag(np.linalg.inv(submatrix))))
new_x_names = [release.index.values[x_loc[i]] for i in range(len(x_loc))]
def round_5(elem):
return round(elem, 5)
# Round both values to account for floating point error, put in Series
estimates = pd.Series(map(round_5, coef), index=new_x_names, name="Estimate")
std_error = pd.Series(map(round_5, se), index=new_x_names, name="Std. Error")
return pd.DataFrame([estimates, std_error]).transpose()
def check_accuracy_vals(accuracy_vals, expected_length):
if len(accuracy_vals) != expected_length:
raise ValueError("Epsilon parameter has improper length")
else:
for eps in accuracy_vals:
if eps <= 0:
raise ValueError("Privacy parameter epsilon must be a value greater than zero")
return accuracy_vals
def laplace_get_epsilon(sens, accuracy, alpha=0.05):
return np.log(1 / alpha) * (sens / accuracy)
def check_accuracy(accuracy):
if accuracy <= 0:
raise ValueError("Privacy parameter epsilon must be a value greater than zero")
return accuracy
def laplace_get_accuracy(sens, epsilon, alpha=0.05):
return np.log(1 / alpha) * (sens / epsilon)
def distribute_epsilon(global_eps, n_calcs=None, epsilon_dist=None):
if epsilon_dist is None:
eps = [global_eps / n_calcs for i in range(n_calcs)]
else:
eps = [eps * global_eps for eps in epsilon_dist]
return eps
def check_epsilon_dist(epsilon_dist, expected_length):
if len(epsilon_dist) != expected_length:
raise ValueError("Epsilon parameter has improper length")
else:
for eps in epsilon_dist:
if eps <= 0:
raise ValueError("All values in epsilonDist must be a value greater than zero")
if sum(epsilon_dist) != 1.0:
raise ValueError("All values in epsilonDist must sum to 1")
return epsilon_dist
def check_epsilon(epsilon, expected_length):
if len(epsilon) != expected_length:
raise ValueError("Epsilon parameter has improper length")
else:
for eps in epsilon:
if eps <= 0:
raise ValueError("(Privacy parameter epsilon must be a value greater than zero")
elif eps >= 3:
raise ValueError("This is a higher global value than recommended for most cases")
return epsilon
def check_global_epsilon(eps):
if eps <= 0:
raise ValueError("(Privacy parameter epsilon must be a value greater than zero")
elif eps >= 3:
raise ValueError("This is a higher global value than recommended for most cases")
return eps
def covariance_sensitivity(n, rng, intercept):
diffs = []
for i in range(rng.shape[1]):
diffs.append(rng[i][1] - rng[i][0])
if intercept:
diffs = [0] + diffs
const = 2 / n
sensitivity = []
for i in range(len(diffs)):
for j in range(i, len(diffs)):
s = const * diffs[i] * diffs[j]
sensitivity.append(s)
return np.array(sensitivity)
def check_range(rng):
rng.columns = list(range(rng.shape[1]))
for col in range(rng.shape[1]):
rng[col] = rng[col].sort_values()
return rng
def fill_missing_1D(x, low, high):
n_missing = x.isnull().sum()
u = np.random.uniform(size=n_missing)
def scale(v):
return v * (high - low) + low
u = list(map(scale, u))
def replace_nan(v):
if math.isnan(v):
return u.pop()
return v
return x.apply(replace_nan)
def fill_missing(data, impute_rng):
for i in range(data.shape[1]):
data[i] = fill_missing_1D(data[i], impute_rng[i][0], impute_rng[i][1])
return data
def censor(value, low, high):
if value < low:
return low
elif value > high:
return high
else:
return value
def censor_data_1D(x, l, h):
def scale(v):
return censor(v, l, h)
return x.apply(scale)
def censor_data(data, rng):
new_data = data
new_data.columns = list(range(data.shape[1]))
rng = check_range(rng)
for i in range(data.shape[1]):
data[i] = censor_data_1D(data[i], rng[i][0], rng[i][1])
return data
def amsweep(g, m):
"""
Sweeps a covariance matrix to extract regression coefficients.
Args:
g (Numpy array): a numeric, symmetric covariance matrix divided by the number of observations in the data
m (Numpy array): a logical vector of length equal to the number of rows in g
in which the True values correspond to the x values in the matrix
and the False values correspond to the y values in the matrix
Return:
a matrix with the coefficients from g
"""
# if m is a vector of all falses, then return g
if np.array_equal(m, np.full(np.shape(m), False, dtype=bool)):
return g
else:
p = np.shape(g)[
0
] # number of rows of g (np.shape gives a tuple as (rows, cols), so we index [0])
rowsm = sum(m) # sum of logical vector "m" (m must be a (n,) shape np array)
# if all values of m are True (thus making the sum equal to the length),
# we take the inverse and then negate all the values
if p == rowsm:
h = np.linalg.inv(g) # inverse of g
h = np.negative(h) # negate the sign of all elements
else:
k = np.where(m == True)[0] # indices where m is True
kcompl = np.where(m == False)[0] # indices where m is False
# separate the elements of g
# make the type np.matrix so that dimensions are preserved correctly
g11 = np.matrix(g[k, k])
g12 = np.matrix(g[k, kcompl])
g21 = np.transpose(g12)
g22 = np.matrix(g[kcompl, kcompl])
# use a try-except to get the inverse of g11
try:
h11a = np.linalg.inv(g11) # try to get the regular inverse
except: # should have LinAlgError (not defined error)
h11a = np.linalg.pinv(g11)
h11 = np.negative(h11a)
# matrix multiplication to get sections of h
h12 = np.matmul(h11a, g12)
h21 = np.transpose(h12)
h22 = g22 - np.matmul(np.matmul(g21, h11a), g12)
# combine sections of h
hwo = np.concatenate(
(np.concatenate((h11, h12), axis=1), np.concatenate((h21, h22), axis=1)), axis=0
)
hwo = np.asarray(
hwo
) # convert back to array (from matrix) to avoid weird indexing behavior
xordering = np.concatenate((k, kcompl), axis=0) # concatenate k and kcompl
h = np.zeros((p, p)) # make a pxp array of zeros
for i in range(p): # traverse each element as defined by xordering
for j in range(p):
h[xordering[i]][xordering[j]] = hwo[i][
j
] # and replace it with the normal i, j element from hwo
return h
|
import socket
import datetime
from dataclasses import dataclass, replace
from functools import partial
from typing import Any, Optional, Type, Callable, Dict, List
from xrpc.const import SERVER_SERDE_INST
from xrpc.dsl import rpc, regular, socketio, signal, DEFAULT_GROUP
from xrpc.generic import build_generic_context
from xrpc.serde.abstract import SerdeStruct, SerdeSet
from xrpc.serde.types import CallableArgsWrapper, CallableRetWrapper
from xrpc.transform import get_rpc, get_regular, get_socketio, get_signal
class Bindable(dict):
def bind(self, obj: Any):
return self.__class__({k: replace(v, fn=partial(v.fn, obj)) for k, v in self.items()})
@dataclass
class RPCEntry:
name: str
fn: Callable
conf: rpc
req: Type[Any]
res: Type[Any]
class RPCEntrySet(Bindable, Dict[str, RPCEntry]):
def groups(self):
r = set()
for v in self.values():
r.add(v.conf.group)
return sorted(list(r))
def by_group(self, name=DEFAULT_GROUP) -> 'RPCEntrySet':
return RPCEntrySet({
k: v for k, v in self.items() if v.conf.group == name
})
@property
def serde(self) -> SerdeStruct:
ss = SerdeSet.walk(SERVER_SERDE_INST, datetime.datetime)
ss = ss | SerdeSet.walk(SERVER_SERDE_INST, Optional[str])
for k, v in self.items():
if v.conf.exc:
continue
ssreq = SerdeSet.walk(SERVER_SERDE_INST, v.req)
ssres = SerdeSet.walk(SERVER_SERDE_INST, v.res)
ss = ss | ssreq | ssres
return ss.struct(SERVER_SERDE_INST)
@classmethod
def from_cls(cls, type_):
type_, ctx = build_generic_context(type_)
rpcs = get_rpc(type_)
rpcs_return = RPCEntrySet()
for rpc_name, rpc_def in rpcs.items():
fa = CallableArgsWrapper.from_func_cls(type_, rpc_def.fn, )
fb = CallableRetWrapper.from_func_cls(type_, rpc_def.fn, )
if rpc_def.conf.exc:
retannot = fb.spec.annotations.get('return')
assert retannot == bool, (rpc_def.fn, retannot)
rpcs_return[rpc_name] = RPCEntry(
rpc_name, rpc_def.fn, rpc_def.conf, fa, fb
)
return rpcs_return
@dataclass
class RegularEntry:
name: str
fn: Callable
conf: regular
class RegularEntrySet(Bindable, Dict[str, RegularEntry]):
@classmethod
def from_cls(cls, type_):
regulars = get_regular(type_)
r = cls()
for n, defn in regulars.items():
r[n] = RegularEntry(
n,
defn.fn,
defn.conf,
)
return r
@dataclass
class SocketIOEntry:
name: str
fn: Callable[[Optional[List[bool]]], List[socket.socket]]
conf: socketio
class SocketIOEntrySet(Bindable, Dict[str, SocketIOEntry]):
@classmethod
def from_cls(cls, type_):
sios = get_socketio(type_)
r = cls()
for n, (conf, fn) in sios.items():
r[n] = SocketIOEntry(
n,
fn,
conf
)
return r
@dataclass
class SignalEntry:
name: str
fn: Callable
conf: signal
class SignalEntrySet(Bindable, Dict[str, SignalEntry]):
def to_signal_map(self) -> Dict[int, List[Callable]]:
r = {}
for k, v in self.items():
for code in v.conf.codes:
if code not in r:
r[code] = []
r[code].append(v.fn)
return r
@classmethod
def from_cls(cls, type_):
sios = get_signal(type_)
r = cls()
for n, (conf, fn) in sios.items():
r[n] = SignalEntry(
n,
fn,
conf
)
return r
@dataclass
class ServiceDefn:
serde: SerdeStruct
rpcs: RPCEntrySet
def __contains__(self, item):
return item in self.rpcs
def keys(self):
return self.rpcs.keys()
def __getitem__(self, item) -> RPCEntry:
return self.rpcs[item]
def bind(self, obj) -> 'ServiceDefn':
return ServiceDefn(self.serde, self.rpcs.bind(obj))
@classmethod
def from_cls(cls, type_):
x = RPCEntrySet.from_cls(type_)
return ServiceDefn(x.serde, x)
|
import planckStyle as s
from pylab import *
g=s.plotter
g.settings.line_labels = False
g.settings.setWithSubplotSize(4)
g.settings.tight_layout=True
g.settings.lab_fontsize=32
g.settings.axes_fontsize = 24
g.settings.lw1 = 2
g.setAxes(lims=[0.0, 2.0, 0.0, 1.1])
roots=['base_mnu_planck_lowl_lowLike_highL','base_mnu_planck_lowl_lowLike_highL_lensing', 'base_mnu_Alens_planck_lowl_lowLike_highL','base_mnu_Alens_planck_lowl_lowLike_highL_post_lensing']
g.add_1d(roots[0],'mnu');
g.add_1d(roots[1],'mnu',color='b');
g.add_1d(roots[2],'mnu',color='r');
g.add_1d(roots[3],'mnu',color='g');
text(0.9,1.02, s.WPhighL, fontsize=18)
text(0.9,0.94, s.WPhighLlensing, color='b', fontsize=18)
text(0.9,0.85, s.WPhighL+'($A_{\mathrm L}$)', color='r', fontsize=18)
text(0.9,0.77, s.WPhighLlensing+'($A_{\mathrm L}$)', color='g', fontsize=18)
xlabel(r'$\Sigma m_\nu\,[\mathrm{eV}]$',fontsize=32)
ylabel(r'$P/P_{\rm max}$',fontsize=32)
g.export('mnu')
|
from optparse import OptionParser
from classification_model import ClassificationModel
from regression_model import RegressionModel
def main():
parser = OptionParser()
parser.add_option('-m', '--model', type='string', help='Model name: {linear, dnn}')
parser.add_option('-t', '--tab', type='string', help='Path to the tabular data file(CSV)')
parser.add_option('-l', '--target', type='string', help='Target (performance metric) of the estimation model')
parser.add_option('-p', '--path', type='string', help='Path to the model to be saved')
parser.add_option('--train', action="store_true", dest="train", default=True)
parser.add_option('--no-train', action="store_false", dest="train")
(options, args) = parser.parse_args()
options_dict = vars(options)
model_names = ['linear', 'decision_tree', 'random_forest', 'dnn', 'hist_dnn', 'clf_decision_tree', 'clf_random_forest', 'rnk_random_forest']
try:
model_name = options_dict['model']
if model_name not in model_names:
print('Available model are {}'.format(', '.join(model_names)))
return
else:
if model_name in ['linear', 'decision_tree', 'random_forest']:
model = RegressionModel(model_name)
elif model_name in ['clf_decision_tree', 'clf_random_forest']:
model = ClassificationModel(model_name)
tabular_path = options_dict['tab']
target = options_dict['target']
model_path = options_dict['path']
is_train = options_dict['train']
if is_train:
model.train(tabular_path, target, model_path)
else:
mae, mape, mse, msle = model.test(tabular_path, target, model_path)
if model_name in ['clf_decision_tree', 'clf_random_forest']:
exit(1)
print('mae: {}\nmape: {}\nmse: {}\nmlse: {}'.format(mae, mape, mse, msle))
print('{}\t{}\t{}\t{}'.format(mae, mape, mse, msle))
except RuntimeError:
print('Please check your arguments')
if __name__ == "__main__":
main()
|
from lxml import etree
from lxml.builder import E
from odoo import api, fields, models
from odoo.tools.translate import _
from odoo.addons.base.res.res_users import name_boolean_group, name_selection_groups
class GroupsView(models.Model):
_inherit = "res.groups"
is_custom_group = fields.Boolean(
"Custom Group", help="show group at the top of Access Rights tab in user form"
)
@api.model
def _update_user_groups_view(self):
# call super to make module compatible with other modules (e.g. access_restricted)
super(GroupsView, self)._update_user_groups_view()
if self._context.get("install_mode"):
# use installation/admin language for translatable names in the view
user_context = self.env["res.users"].context_get()
self = self.with_context(**user_context)
# We have to try-catch this, because at first init the view does not
# exist but we are already creating some basic groups.
view = self.env.ref("base.user_groups_view", raise_if_not_found=False)
if view and view.exists() and view._name == "ir.ui.view":
group_no_one = view.env.ref("base.group_no_one")
xml1, xml2 = [], []
xml1.append(E.separator(string=_("Application"), colspan="2"))
xml3 = []
xml3.append(E.separator(string=_("Custom User Groups"), colspan="4"))
for app, kind, gs in self.get_groups_by_application():
xml = None
custom = False
if (
kind == "selection"
and any([g.is_custom_group for g in gs])
or all([g.is_custom_group for g in gs])
):
xml = xml3
custom = True
# hide groups in category 'Hidden' (except to group_no_one)
attrs = (
{"groups": "base.group_no_one"}
if app
and (
app.xml_id == "base.module_category_hidden"
or app.xml_id == "base.module_category_extra"
)
and not custom
else {}
)
if kind == "selection":
xml = xml or xml1
# application name with a selection field
field_name = name_selection_groups(map(int, gs))
xml.append(E.field(name=field_name, **attrs))
xml.append(E.newline())
else:
xml = xml or xml2
# application separator with boolean fields
app_name = app and app.name or _("Other")
if not custom:
xml.append(E.separator(string=app_name, colspan="4", **attrs))
for g in gs:
field_name = name_boolean_group(g.id)
if g == group_no_one:
# make the group_no_one invisible in the form view
xml.append(E.field(name=field_name, invisible="1", **attrs))
else:
xml.append(E.field(name=field_name, **attrs))
xml2.append({"class": "o_label_nowrap"})
xml = E.field(
E.group(*(xml3), col="2"),
E.group(*(xml2), col="4"),
E.group(*(xml1), col="2"),
name="groups_id",
position="replace",
)
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(
xml, pretty_print=True, xml_declaration=True, encoding="utf-8"
)
view.write({"arch": xml_content})
return True
|
import numpy as np
from common import activations
from common import metrics
from math import ceil
from tqdm import tqdm
rng = np.random.default_rng(12345)
class NeuralNetworkModel:
def __init__(self, in_features=784, out_features=10, layers=3, channels=16, activation='relu', learning_rate=1e-4):
self.in_features = in_features
self.out_features = out_features
self.num_layers = layers
self.channels = channels
self.activation = activation
self.learning_rate = learning_rate
self.layers = []
for layer in range(layers):
in_f = in_features if layer == 0 else channels
out_f = out_features if layer == layers - 1 else channels
self.layers.append(rng.uniform(-0.2, 0.2, size=[in_f + 1, out_f]))
def forward_pass(self, inputs):
x = inputs
logits = [x]
for i in range(len(self.layers)):
x = np.matmul(x, self.layers[i][:-1]) + self.layers[i][-1]
x = activations.activation_dict[self.activation][0](x)
logits.append(x)
return x, logits
def get_gradient(self, logits, labels):
pred = logits[-1]
true = labels
d_a = metrics.d_mse(true, pred)
gradient_vector = []
for i in range(-1, -len(self.layers) - 1, -1):
layer = self.layers[i]
pred_i = logits[i-1]
z = np.matmul(pred_i, layer[:-1]) + layer[-1]
d_z = activations.activation_dict[self.activation][1](z)
d_w = (d_a * d_z)[:, np.newaxis, :] * pred_i[:, :, np.newaxis]
d_b = d_a * d_z
gradient_vector.insert(0, np.concatenate([d_w, d_b[:, np.newaxis, :]], axis=-2))
d_a = np.matmul(layer[:-1], (d_a * d_z)[:, :, np.newaxis])[:, :, 0]
return gradient_vector
def optimizer_step(self, logits, labels):
gv = self.get_gradient(logits, labels)
for layer, gradient in zip(self.layers, gv):
layer -= gradient.mean(axis=0) * self.learning_rate
def train_step(self, inputs, labels):
y, logits = self.forward_pass(inputs)
self.optimizer_step(logits, labels)
return y
def test_step(self, inputs):
y, logits = self.forward_pass(inputs)
return y
def fit(self, inputs, labels, test_inputs, test_labels, epochs=1, batch_size=64, shuffle=True):
train_x = inputs
train_y = labels
test_x = test_inputs
test_y = test_labels
history = {'losses': [], 'metrics': [], 'val_losses': [], 'val_metrics': []}
for epoch in range(epochs):
print(f'Epoch {epoch}:')
if shuffle:
permutation = rng.permutation(len(labels))
train_x = train_x[permutation]
train_y = train_y[permutation]
loss = []
metric = []
for batch in tqdm(range(ceil(len(inputs) / batch_size))):
batch_x = train_x[batch*batch_size:(batch+1)*batch_size]
batch_y = train_y[batch*batch_size:(batch+1)*batch_size]
batch_y_pred = self.train_step(batch_x, batch_y)
loss.append(metrics.mse(batch_y, batch_y_pred))
metric.append(metrics.categorical_acc(batch_y, batch_y_pred))
loss = np.concatenate(loss).mean()
metric = np.array(metric).sum() / len(inputs)
print(f'Loss: {loss}, Metric: {metric}', end='; ')
val_loss = []
val_metric = []
for batch in range(ceil(len(test_inputs) / batch_size)):
batch_x = test_x[batch*batch_size:(batch+1)*batch_size]
batch_y = test_y[batch*batch_size:(batch+1)*batch_size]
batch_y_pred = self.test_step(batch_x)
val_loss.append(metrics.mse(batch_y, batch_y_pred))
val_metric.append(metrics.categorical_acc(batch_y, batch_y_pred))
val_loss = np.concatenate(val_loss).mean()
val_metric = np.array(val_metric).sum() / len(test_inputs)
print(f'Val Loss: {val_loss}, Metric: {val_metric}')
history['losses'].append(loss)
history['metrics'].append(metric)
history['val_losses'].append(val_loss)
history['val_metrics'].append(val_metric)
return history
|
# To use this code, make sure you
#
# import json
#
# and then, to convert JSON from a string, do
#
# result = users_response_from_dict(json.loads(json_string))
from dataclasses import dataclass
from typing import Optional, Any, List, TypeVar, Type, cast, Callable
T = TypeVar("T")
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_none(x: Any) -> Any:
assert x is None
return x
def from_union(fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False
def from_int(x: Any) -> int:
assert isinstance(x, int) and not isinstance(x, bool)
return x
def from_bool(x: Any) -> bool:
assert isinstance(x, bool)
return x
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
def from_list(f: Callable[[Any], T], x: Any) -> List[T]:
assert isinstance(x, list)
return [f(y) for y in x]
@dataclass
class Errors:
description: Optional[str] = None
code: Optional[int] = None
@staticmethod
def from_dict(obj: Any) -> 'Errors':
assert isinstance(obj, dict)
description = from_union([from_str, from_none], obj.get("description"))
code = from_union([from_int, from_none], obj.get("code"))
return Errors(description, code)
def to_dict(self) -> dict:
result: dict = {}
result["description"] = from_union([from_str, from_none], self.description)
result["code"] = from_union([from_int, from_none], self.code)
return result
@dataclass
class Address:
street_address: Optional[str] = None
locality: Optional[str] = None
region: Optional[str] = None
postal_code: Optional[str] = None
country: Optional[str] = None
primary: Optional[bool] = None
@staticmethod
def from_dict(obj: Any) -> 'Address':
assert isinstance(obj, dict)
street_address = from_union([from_str, from_none], obj.get("streetAddress"))
locality = from_union([from_str, from_none], obj.get("locality"))
region = from_union([from_str, from_none], obj.get("region"))
postal_code = from_union([from_str, from_none], obj.get("postalCode"))
country = from_union([from_str, from_none], obj.get("country"))
primary = from_union([from_bool, from_none], obj.get("primary"))
return Address(street_address, locality, region, postal_code, country, primary)
def to_dict(self) -> dict:
result: dict = {}
result["streetAddress"] = from_union([from_str, from_none], self.street_address)
result["locality"] = from_union([from_str, from_none], self.locality)
result["region"] = from_union([from_str, from_none], self.region)
result["postalCode"] = from_union([from_str, from_none], self.postal_code)
result["country"] = from_union([from_str, from_none], self.country)
result["primary"] = from_union([from_bool, from_none], self.primary)
return result
@dataclass
class Email:
value: Optional[str] = None
primary: Optional[bool] = None
type: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'Email':
assert isinstance(obj, dict)
value = from_union([from_str, from_none], obj.get("value"))
primary = from_union([from_bool, from_none], obj.get("primary"))
type = from_union([from_str, from_none], obj.get("type"))
return Email(value, primary, type)
def to_dict(self) -> dict:
result: dict = {}
result["value"] = from_union([from_str, from_none], self.value)
result["primary"] = from_union([from_bool, from_none], self.primary)
result["type"] = from_union([from_str, from_none], self.type)
return result
@dataclass
class Group:
value: Optional[str] = None
display: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'Group':
assert isinstance(obj, dict)
value = from_union([from_str, from_none], obj.get("value"))
display = from_union([from_str, from_none], obj.get("display"))
return Group(value, display)
def to_dict(self) -> dict:
result: dict = {}
result["value"] = from_union([from_str, from_none], self.value)
result["display"] = from_union([from_str, from_none], self.display)
return result
@dataclass
class Meta:
created: Optional[str] = None
location: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'Meta':
assert isinstance(obj, dict)
created = from_union([from_str, from_none], obj.get("created"))
location = from_union([from_str, from_none], obj.get("location"))
return Meta(created, location)
def to_dict(self) -> dict:
result: dict = {}
result["created"] = from_union([from_str, from_none], self.created)
result["location"] = from_union([from_str, from_none], self.location)
return result
@dataclass
class Name:
given_name: Optional[str] = None
family_name: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'Name':
assert isinstance(obj, dict)
given_name = from_union([from_str, from_none], obj.get("givenName"))
family_name = from_union([from_str, from_none], obj.get("familyName"))
return Name(given_name, family_name)
def to_dict(self) -> dict:
result: dict = {}
result["givenName"] = from_union([from_str, from_none], self.given_name)
result["familyName"] = from_union([from_str, from_none], self.family_name)
return result
@dataclass
class Photo:
value: Optional[str] = None
type: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'Photo':
assert isinstance(obj, dict)
value = from_union([from_str, from_none], obj.get("value"))
type = from_union([from_str, from_none], obj.get("type"))
return Photo(value, type)
def to_dict(self) -> dict:
result: dict = {}
result["value"] = from_union([from_str, from_none], self.value)
result["type"] = from_union([from_str, from_none], self.type)
return result
@dataclass
class Manager:
pass
@staticmethod
def from_dict(obj: Any) -> 'Manager':
assert isinstance(obj, dict)
return Manager()
def to_dict(self) -> dict:
result: dict = {}
return result
@dataclass
class UrnScimSchemasExtensionEnterprise10:
manager: Optional[Manager] = None
@staticmethod
def from_dict(obj: Any) -> 'UrnScimSchemasExtensionEnterprise10':
assert isinstance(obj, dict)
manager = from_union([Manager.from_dict, from_none], obj.get("manager"))
return UrnScimSchemasExtensionEnterprise10(manager)
def to_dict(self) -> dict:
result: dict = {}
result["manager"] = from_union([lambda x: to_class(Manager, x), from_none], self.manager)
return result
@dataclass
class UrnScimSchemasExtensionSlackGuest10:
type: Optional[str] = None
expiration: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'UrnScimSchemasExtensionSlackGuest10':
assert isinstance(obj, dict)
type = from_union([from_str, from_none], obj.get("type"))
expiration = from_union([from_str, from_none], obj.get("expiration"))
return UrnScimSchemasExtensionSlackGuest10(type, expiration)
def to_dict(self) -> dict:
result: dict = {}
result["type"] = from_union([from_str, from_none], self.type)
result["expiration"] = from_union([from_str, from_none], self.expiration)
return result
@dataclass
class Resource:
schemas: Optional[List[str]] = None
id: Optional[str] = None
external_id: Optional[str] = None
meta: Optional[Meta] = None
user_name: Optional[str] = None
nick_name: Optional[str] = None
name: Optional[Name] = None
display_name: Optional[str] = None
profile_url: Optional[str] = None
title: Optional[str] = None
timezone: Optional[str] = None
active: Optional[bool] = None
emails: Optional[List[Email]] = None
photos: Optional[List[Photo]] = None
groups: Optional[List[Group]] = None
addresses: Optional[List[Address]] = None
phone_numbers: Optional[List[Email]] = None
roles: Optional[List[Email]] = None
urn_scim_schemas_extension_enterprise_10: Optional[UrnScimSchemasExtensionEnterprise10] = None
urn_scim_schemas_extension_slack_guest_10: Optional[UrnScimSchemasExtensionSlackGuest10] = None
@staticmethod
def from_dict(obj: Any) -> 'Resource':
assert isinstance(obj, dict)
schemas = from_union([lambda x: from_list(from_str, x), from_none], obj.get("schemas"))
id = from_union([from_str, from_none], obj.get("id"))
external_id = from_union([from_str, from_none], obj.get("externalId"))
meta = from_union([Meta.from_dict, from_none], obj.get("meta"))
user_name = from_union([from_str, from_none], obj.get("userName"))
nick_name = from_union([from_str, from_none], obj.get("nickName"))
name = from_union([Name.from_dict, from_none], obj.get("name"))
display_name = from_union([from_str, from_none], obj.get("displayName"))
profile_url = from_union([from_str, from_none], obj.get("profileUrl"))
title = from_union([from_str, from_none], obj.get("title"))
timezone = from_union([from_str, from_none], obj.get("timezone"))
active = from_union([from_bool, from_none], obj.get("active"))
emails = from_union([lambda x: from_list(Email.from_dict, x), from_none], obj.get("emails"))
photos = from_union([lambda x: from_list(Photo.from_dict, x), from_none], obj.get("photos"))
groups = from_union([lambda x: from_list(Group.from_dict, x), from_none], obj.get("groups"))
addresses = from_union([lambda x: from_list(Address.from_dict, x), from_none], obj.get("addresses"))
phone_numbers = from_union([lambda x: from_list(Email.from_dict, x), from_none], obj.get("phoneNumbers"))
roles = from_union([lambda x: from_list(Email.from_dict, x), from_none], obj.get("roles"))
urn_scim_schemas_extension_enterprise_10 = from_union([UrnScimSchemasExtensionEnterprise10.from_dict, from_none], obj.get("urn:scim:schemas:extension:enterprise:1.0"))
urn_scim_schemas_extension_slack_guest_10 = from_union([UrnScimSchemasExtensionSlackGuest10.from_dict, from_none], obj.get("urn:scim:schemas:extension:slack:guest:1.0"))
return Resource(schemas, id, external_id, meta, user_name, nick_name, name, display_name, profile_url, title, timezone, active, emails, photos, groups, addresses, phone_numbers, roles, urn_scim_schemas_extension_enterprise_10, urn_scim_schemas_extension_slack_guest_10)
def to_dict(self) -> dict:
result: dict = {}
result["schemas"] = from_union([lambda x: from_list(from_str, x), from_none], self.schemas)
result["id"] = from_union([from_str, from_none], self.id)
result["externalId"] = from_union([from_str, from_none], self.external_id)
result["meta"] = from_union([lambda x: to_class(Meta, x), from_none], self.meta)
result["userName"] = from_union([from_str, from_none], self.user_name)
result["nickName"] = from_union([from_str, from_none], self.nick_name)
result["name"] = from_union([lambda x: to_class(Name, x), from_none], self.name)
result["displayName"] = from_union([from_str, from_none], self.display_name)
result["profileUrl"] = from_union([from_str, from_none], self.profile_url)
result["title"] = from_union([from_str, from_none], self.title)
result["timezone"] = from_union([from_str, from_none], self.timezone)
result["active"] = from_union([from_bool, from_none], self.active)
result["emails"] = from_union([lambda x: from_list(lambda x: to_class(Email, x), x), from_none], self.emails)
result["photos"] = from_union([lambda x: from_list(lambda x: to_class(Photo, x), x), from_none], self.photos)
result["groups"] = from_union([lambda x: from_list(lambda x: to_class(Group, x), x), from_none], self.groups)
result["addresses"] = from_union([lambda x: from_list(lambda x: to_class(Address, x), x), from_none], self.addresses)
result["phoneNumbers"] = from_union([lambda x: from_list(lambda x: to_class(Email, x), x), from_none], self.phone_numbers)
result["roles"] = from_union([lambda x: from_list(lambda x: to_class(Email, x), x), from_none], self.roles)
result["urn:scim:schemas:extension:enterprise:1.0"] = from_union([lambda x: to_class(UrnScimSchemasExtensionEnterprise10, x), from_none], self.urn_scim_schemas_extension_enterprise_10)
result["urn:scim:schemas:extension:slack:guest:1.0"] = from_union([lambda x: to_class(UrnScimSchemasExtensionSlackGuest10, x), from_none], self.urn_scim_schemas_extension_slack_guest_10)
return result
@dataclass
class UsersResponse:
total_results: Optional[int] = None
items_per_page: Optional[int] = None
start_index: Optional[int] = None
schemas: Optional[List[str]] = None
resources: Optional[List[Resource]] = None
errors: Optional[Errors] = None
@staticmethod
def from_dict(obj: Any) -> 'UsersResponse':
assert isinstance(obj, dict)
total_results = from_union([from_int, from_none], obj.get("totalResults"))
items_per_page = from_union([from_int, from_none], obj.get("itemsPerPage"))
start_index = from_union([from_int, from_none], obj.get("startIndex"))
schemas = from_union([lambda x: from_list(from_str, x), from_none], obj.get("schemas"))
resources = from_union([lambda x: from_list(Resource.from_dict, x), from_none], obj.get("Resources"))
errors = from_union([Errors.from_dict, from_none], obj.get("Errors"))
return UsersResponse(total_results, items_per_page, start_index, schemas, resources, errors)
def to_dict(self) -> dict:
result: dict = {}
result["totalResults"] = from_union([from_int, from_none], self.total_results)
result["itemsPerPage"] = from_union([from_int, from_none], self.items_per_page)
result["startIndex"] = from_union([from_int, from_none], self.start_index)
result["schemas"] = from_union([lambda x: from_list(from_str, x), from_none], self.schemas)
result["Resources"] = from_union([lambda x: from_list(lambda x: to_class(Resource, x), x), from_none], self.resources)
result["Errors"] = from_union([lambda x: to_class(Errors, x), from_none], self.errors)
return result
def users_response_from_dict(s: Any) -> UsersResponse:
return UsersResponse.from_dict(s)
def users_response_to_dict(x: UsersResponse) -> Any:
return to_class(UsersResponse, x)
|
# coding: utf-8
import pprint
import six
from enum import Enum
class DatabaseTranslatedStringItem:
swagger_types = {
'language': 'str',
'language_code': 'str',
'translation': 'str',
}
attribute_map = {
'language': 'language','language_code': 'languageCode','translation': 'translation',
}
_language = None
_language_code = None
_translation = None
def __init__(self, **kwargs):
self.discriminator = None
self.language = kwargs.get('language', None)
self.language_code = kwargs.get('language_code', None)
self.translation = kwargs.get('translation', None)
@property
def language(self):
"""Gets the language of this DatabaseTranslatedStringItem.
:return: The language of this DatabaseTranslatedStringItem.
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this DatabaseTranslatedStringItem.
:param language: The language of this DatabaseTranslatedStringItem.
:type: str
"""
self._language = language
@property
def language_code(self):
"""Gets the language_code of this DatabaseTranslatedStringItem.
:return: The language_code of this DatabaseTranslatedStringItem.
:rtype: str
"""
return self._language_code
@language_code.setter
def language_code(self, language_code):
"""Sets the language_code of this DatabaseTranslatedStringItem.
:param language_code: The language_code of this DatabaseTranslatedStringItem.
:type: str
"""
self._language_code = language_code
@property
def translation(self):
"""Gets the translation of this DatabaseTranslatedStringItem.
:return: The translation of this DatabaseTranslatedStringItem.
:rtype: str
"""
return self._translation
@translation.setter
def translation(self, translation):
"""Sets the translation of this DatabaseTranslatedStringItem.
:param translation: The translation of this DatabaseTranslatedStringItem.
:type: str
"""
if translation is not None and len(translation) > 16777216:
raise ValueError("Invalid value for `translation`, length must be less than or equal to `16777216`")
self._translation = translation
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(DatabaseTranslatedStringItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, DatabaseTranslatedStringItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
#!/usr/bin/env python3
# coding: utf-8
import tldevicesync
tio = tldevicesync.DeviceSync()
syncStreams = tio.syncStreamsStart([tio.vmr0.vector,tio.vmr1.vector])
data = tio.syncStreamsRead(syncStreams, samples=3)
print(data) |
from conans import ConanFile, CMake
import shutil, os
class ConsumerConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
exports_sources = "src/*"
build_requires = "source/0.1@user/testing"
def build(self):
src_folder = os.path.join(self.deps_cpp_info["source"].rootpath, "src")
shutil.copytree(src_folder, "src/external")
cmake = CMake(self)
cmake.configure(source_folder="src")
cmake.build()
def package(self):
self.copy("*.h", dst="include", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["greet"]
|
from figures import FuzzySet
def main():
"""
1. number of inputs/outputs
1.1. how many input variables do you want?
1.2. how many output variables do you want?
2. configure inputs/outputs (range of separate i/o, name of each i/o, params of each i/o)
2.1 input_1 what a type of mf do you want and how enter number of mfs
...
2.1 input_n what a type of mf do you want and how enter number of mfs
2.2 output_1 what a type of mf do you want and how enter number of mfs
...
2.2 output_m what a type of mf do you want and how enter number of mfs
3. configure rules
4. print charts and some info
"""
# i - счётчик
i = 0
# n - максимальный диапазон
n = 30
# x - список значений для функции принадлежности
x = list()
"""
entrances = list()
exits = list()
"""
# Заполнение x значениями
while len(x) <= n*10:
x.append(i)
i += 0.1
"""
for i in range(int(input("How many entrances = "))):
entrances.append(list()) # 3
for i in range(int(input("How many exits = "))):
exits.append(list())
for entry in entrances:
for index in range(int(input("How many mfs in " + str(entrances.index(entry)+1) + " input = "))):
if input("trapmf? - ") == "y":
entry.append(FuzzySet(input("Name of mf - ")).trapmf(x, float(input("Enter a = ")),
float(
input("Enter b = ")),
float(
input("Enter c = ")),
float(input("Enter d = "))))
elif input("trimf? - ") == "y":
entry.append(FuzzySet(input("Name of mf - ")).trimf(x, float(input("Enter a = ")),
float(
input("Enter b = ")),
float(input("Enter c = "))))
elif input("gaussmf? - ") == "y":
entry.append(FuzzySet(input("Name of mf - ")).gaussmf(x, float(input("Enter p = ")),
float(input("Enter c = "))))
else:
print("Error")
for out in exits:
for index in range(int(input("How many mfs in " + str(exits.index(entry)+1) + " output = "))):
if input("trapmf? - ") == "y":
out.append(FuzzySet(input("Name of mf - ")).trapmf(x, float(input("Enter a = ")),
float(
input("Enter b = ")),
float(
input("Enter c = ")),
float(input("Enter d = "))))
elif input("trimf? - ") == "y":
out.append(FuzzySet(input("Name of mf - ")).trimf(x, float(input("Enter a = ")),
float(
input("Enter b = ")),
float(input("Enter c = "))))
elif input("gaussmf? - ") == "y":
out.append(FuzzySet(input("Name of mf - ")).gaussmf(x, float(input("Enter p = ")),
float(input("Enter c = "))))
else:
print("Error")
"""
# Вход №1 Сервис
service = [FuzzySet("poor").gaussmf(x, 1, 0.5),
FuzzySet("good").gaussmf(x, 2, 5),
FuzzySet("excellent").gaussmf(x, 1, 9.5)]
# Вход №2 Еда
food = [FuzzySet("rancid").trapmf(x, 0, 0, 1, 3),
FuzzySet("delicious").trapmf(x, 7, 8, 10, 10)]
# Выход №1 Чаевые
tip = [FuzzySet("cheap").trimf(x, 0, 5, 10),
FuzzySet("average").trimf(x, 10, 15, 20),
FuzzySet("generous").trimf(x, 20, 25, 30)]
# rules - лингвистические переменные (в данном случае находим степень нечёткого равентва)
rules = list()
rules.append((set(service[0]) | set(food[0])) & set(tip[0]))
rules.append((set(service[1])) & set(tip[1]))
rules.append((set(service[2]) | set(food[1])) & set(tip[2]))
# Вывод
print(service)
print()
print()
print()
print()
print(food)
print()
print()
print()
print()
print(tip)
print()
print()
print()
print()
print(rules)
if __name__ == "__main__":
main()
|
#SNMP class
1.3.6.1.2.1.1.1
from snmp_helper import snmp_get_oid, snmp_extract
COMMUNITY_STRING = 'galileo'
SNMP_PORT = 161
IP = " "
device_name = (IP, COMMUNITY_STRING, SNMP_PORT)
OID = '1.3.6.1.2.1.1.0'
snmp_data = snmp_get_oid(device_name, oid=OID)
print snmp_data
output = snmp_extract(snmp_data)
print output |
# https://github.com/ssarfraz/SPL/blob/master/SPL_Loss/
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
########################
# Losses
########################
## Gradient Profile (GP) loss
## The image gradients in each channel can easily be computed
## by simple 1-pixel shifted image differences from itself.
class GPLoss(nn.Module):
def __init__(self, trace=False, spl_norm=False):
super(GPLoss, self).__init__()
self.spl_norm = spl_norm
if trace == True: # Alternate behavior: use the complete calculation with SPL_ComputeWithTrace()
self.trace = SPL_ComputeWithTrace()
else: # Default behavior: use the more efficient SPLoss()
self.trace = SPLoss()
def get_image_gradients(self,input):
f_v_1 = F.pad(input,(0,-1,0,0))
f_v_2 = F.pad(input,(-1,0,0,0))
f_v = f_v_1-f_v_2
f_h_1 = F.pad(input,(0,0,0,-1))
f_h_2 = F.pad(input,(0,0,-1,0))
f_h = f_h_1-f_h_2
return f_v, f_h
def __call__(self, input, reference):
## Use "spl_norm" when reading a [-1,1] input, but you want to compute the loss over a [0,1] range
if self.spl_norm == True:
input = (input+1.0)/2.0
reference = (reference+1.0)/2.0
input_v,input_h = self.get_image_gradients(input)
ref_v, ref_h = self.get_image_gradients(reference)
trace_v = self.trace(input_v,ref_v)
trace_h = self.trace(input_h,ref_h)
return trace_v + trace_h
## Colour Profile (CP) loss
class CPLoss(nn.Module):
def __init__(self, rgb=True, yuv=True, yuvgrad=True, trace=False, spl_norm=False, yuv_norm=False):
super(CPLoss, self).__init__()
self.rgb = rgb
self.yuv = yuv
self.yuvgrad = yuvgrad
self.spl_norm = spl_norm
self.yuv_norm = yuv_norm
if trace == True: # Alternate behavior: use the complete calculation with SPL_ComputeWithTrace()
self.trace = SPL_ComputeWithTrace()
self.trace_YUV = SPL_ComputeWithTrace()
else: # Default behavior: use the more efficient SPLoss()
self.trace = SPLoss()
self.trace_YUV = SPLoss()
def get_image_gradients(self,input):
f_v_1 = F.pad(input,(0,-1,0,0))
f_v_2 = F.pad(input,(-1,0,0,0))
f_v = f_v_1-f_v_2
f_h_1 = F.pad(input,(0,0,0,-1))
f_h_2 = F.pad(input,(0,0,-1,0))
f_h = f_h_1-f_h_2
return f_v, f_h
def to_YUV(self,input,consts='BT.601'):
"""Converts one or more images from RGB to YUV.
Outputs a tensor of the same shape as the `input` image tensor, containing the YUV
value of the pixels.
The output is only well defined if the value in images are in [0,1].
Args:
input: 2-D or higher rank. Image data to convert. Last dimension must be
size 3. (Could add additional channels, ie, AlphaRGB = AlphaYUV)
consts: YUV constant parameters to use. BT.601 or BT.709. Could add YCbCr
https://en.wikipedia.org/wiki/YUV
Returns:
images: images tensor with the same shape as `input`.
"""
## Comment the following line if you already apply the value adjustment in __call__()
# We rerange the inputs to [0,1] here in order to convert to YUV
if self.yuv_norm==True and self.spl_norm==False:
input = (input+1.0)/2.0 # Only needed if input is [-1,1]
# Y′CbCr is often confused with the YUV color space, and typically the terms YCbCr and YUV
# are used interchangeably, leading to some confusion. The main difference is that YUV is
# analog and YCbCr is digital. https://en.wikipedia.org/wiki/YCbCr
if consts == 'BT.709': # HDTV
Wr = 0.2126
Wb = 0.0722
Wg = 1 - Wr - Wb #0.7152
Uc = 0.539
Vc = 0.635
else: # Default to 'BT.601', SDTV (as the original code)
Wr = 0.299
Wb = 0.114
Wg = 1 - Wr - Wb #0.587
Uc = 0.493
Vc = 0.877
# return torch.cat((0.299*input[:,0,:,:].unsqueeze(1)+0.587*input[:,1,:,:].unsqueeze(1)+0.114*input[:,2,:,:].unsqueeze(1),\
# 0.493*(input[:,2,:,:].unsqueeze(1)-(0.299*input[:,0,:,:].unsqueeze(1)+0.587*input[:,1,:,:].unsqueeze(1)+0.114*input[:,2,:,:].unsqueeze(1))),\
# 0.877*(input[:,0,:,:].unsqueeze(1)-(0.299*input[:,0,:,:].unsqueeze(1)+0.587*input[:,1,:,:].unsqueeze(1)+0.114*input[:,2,:,:].unsqueeze(1)))),dim=1)
return torch.cat((Wr*input[:,0,:,:].unsqueeze(1)+Wg*input[:,1,:,:].unsqueeze(1)+Wb*input[:,2,:,:].unsqueeze(1),\
Uc*(input[:,2,:,:].unsqueeze(1)-(Wr*input[:,0,:,:].unsqueeze(1)+Wg*input[:,1,:,:].unsqueeze(1)+Wb*input[:,2,:,:].unsqueeze(1))),\
Vc*(input[:,0,:,:].unsqueeze(1)-(Wr*input[:,0,:,:].unsqueeze(1)+Wg*input[:,1,:,:].unsqueeze(1)+Wb*input[:,2,:,:].unsqueeze(1)))),dim=1)
def __call__(self, input, reference):
## Use "spl_norm" when reading a [-1,1] input, but you want to compute the loss over a [0,1] range
# self.spl_norm=False when your inputs and outputs are in [0,1] range already
if self.spl_norm==True:
input = (input+1.0)/2.0
reference = (reference+1.0)/2.0
total_loss= 0
if self.rgb:
total_loss += self.trace(input,reference)
if self.yuv:
input_yuv = self.to_YUV(input) # to_YUV needs images in [0,1] range to work
reference_yuv = self.to_YUV(reference) # to_YUV needs images in [0,1] range to work
total_loss += self.trace(input_yuv,reference_yuv)
if self.yuvgrad:
input_v,input_h = self.get_image_gradients(input_yuv)
ref_v,ref_h = self.get_image_gradients(reference_yuv)
total_loss += self.trace(input_v,ref_v)
total_loss += self.trace(input_h,ref_h)
return total_loss
## Spatial Profile Loss (SPL)
# Both loss versions equate to the cosine similarity of rows/columns.
# While in 'SPL_ComputeWithTrace()' this is achieved using the trace
# (sum over the diagonal) of matrix multiplication of L2-normalized
# input/target rows/columns, 'SPLoss()' L2-normalizes the rows/columns,
# performs piece-wise multiplication of the two tensors and then sums
# along the corresponding axes. The latter variant, however, needs less
# operations since it can be performed batchwise and, thus, is the
# preferred variant.
# Note: SPLoss() makes image result too bright, at least when using
# images in the [0,1] range and no activation as output of the Generator.
# SPL_ComputeWithTrace() does not have this problem, but at least initial
# results are very blurry. Testing with SPLoss() with images normalized
# in the [-1,1] range and with tanh activation in the Generator output.
# In the original implementation, they used tanh as generator output,
# rescaled the tensors to a [0,1] range from [-1,1] and also used [-1,1]
# ranged input images to be able to use the rgb-yuv conversion in the CP
# component. Not using any activation function or using ReLU might lead
# to bright images as nothing caps your outputs inside the [0,1]-range
# and your values might overflow when you transfer it back to opencv/Pillow
# for visualization.
## Spatial Profile Loss (SPL) with trace
class SPL_ComputeWithTrace(nn.Module):
"""
Slow implementation of the trace loss using the same formula as stated in the paper.
In principle, we compute the loss between a source and target image by considering such
pattern differences along the image x and y-directions. Considering a row or a column
spatial profile of an image as a vector, we can compute the similarity between them in
this induced vector space. Formally, this similarity is measured over each image channel ’c’.
The first term computes similarity among row profiles and the second among column profiles
of an image pair (x, y) of size H ×W. These image pixels profiles are L2-normalized to
have a normalized cosine similarity loss.
"""
def __init__(self,weight = [1.,1.,1.]): # The variable 'weight' was originally intended to weigh color channels differently. In our experiments, we found that an equal weight between all channels gives the best results. As such, this variable is a leftover from that time and can be removed.
super(SPL_ComputeWithTrace, self).__init__()
self.weight = weight
def __call__(self, input, reference):
a = 0
b = 0
for i in range(input.shape[0]):
for j in range(input.shape[1]):
a += torch.trace(torch.matmul(F.normalize(input[i,j,:,:],p=2,dim=1),torch.t(F.normalize(reference[i,j,:,:],p=2,dim=1))))/input.shape[2]*self.weight[j]
b += torch.trace(torch.matmul(torch.t(F.normalize(input[i,j,:,:],p=2,dim=0)),F.normalize(reference[i,j,:,:],p=2,dim=0)))/input.shape[3]*self.weight[j]
a = -torch.sum(a)/input.shape[0]
b = -torch.sum(b)/input.shape[0]
return a+b
## Spatial Profile Loss (SPL) without trace, prefered
class SPLoss(nn.Module):
#def __init__(self,weight = [1.,1.,1.]): # The variable 'weight' was originally intended to weigh color channels differently. In our experiments, we found that an equal weight between all channels gives the best results. As such, this variable is a leftover from that time and can be removed.
def __init__(self):
super(SPLoss, self).__init__()
#self.weight = weight
def __call__(self, input, reference):
a = torch.sum(torch.sum(F.normalize(input, p=2, dim=2) * F.normalize(reference, p=2, dim=2),dim=2, keepdim=True))
b = torch.sum(torch.sum(F.normalize(input, p=2, dim=3) * F.normalize(reference, p=2, dim=3),dim=3, keepdim=True))
return -(a + b) / input.size(2)
|
#!/usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
import argparse
import errno
import random
import socket
import sys
import adiantum
def fail(msg):
sys.stderr.write(f'Error: {msg}\n')
sys.exit(1)
class AdiantumKernelImpl():
def __init__(self, kern_algname):
self.alg_fd = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.alg_fd.bind(('skcipher', kern_algname))
def _crypt(self, message, key, tweak, op):
self.alg_fd.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
req, _ = self.alg_fd.accept()
req.sendmsg_afalg([message], op=op, iv=tweak)
(data, _, _, _) = req.recvmsg(len(message) + 256)
if len(data) != len(message):
fail("{} didn't preserve length!".format(
"Encryption" if op == socket.ALG_OP_ENCRYPT else "Decryption"))
return data
def encrypt(self, plaintext, key, tweak):
return self._crypt(plaintext, key, tweak, socket.ALG_OP_ENCRYPT)
def decrypt(self, ciphertext, key, tweak):
return self._crypt(ciphertext, key, tweak, socket.ALG_OP_DECRYPT)
def do_test_impl(args, kern_impl, ref_impl):
sizes = []
for _ in range(args.num_msgs):
size = max(16, int(random.expovariate(1 / args.avg_msgsize)))
orig_msg = bytes(random.getrandbits(8) for _ in range(size))
key = bytes(random.getrandbits(8) for _ in range(32))
tweak = bytes(random.getrandbits(8) for _ in range(32))
sizes.append(size)
ref_ctext = ref_impl.encrypt(orig_msg, key, tweak)
kern_ctext = kern_impl.encrypt(orig_msg, key, tweak)
if ref_ctext != kern_ctext:
fail('Encryption results differed')
ref_ptext = ref_impl.decrypt(ref_ctext, key, tweak)
kern_ptext = kern_impl.decrypt(ref_ctext, key, tweak)
if ref_ptext != kern_ptext:
fail('Decryption results differed')
if ref_ptext != orig_msg:
fail("Decryption didn't invert encryption")
#print(f'Tested sizes: {sizes}')
def test_impl(args, kern_algname, variant_selector, required=True):
try:
kern_impl = AdiantumKernelImpl(kern_algname)
except OSError as ex:
is_algnotfound = (ex.errno == errno.EAFNOSUPPORT or
ex.errno == errno.ENOENT)
if is_algnotfound and not required:
return
sys.stderr.write('Unable to set up AF_ALG socket for Adiantum!\n')
if is_algnotfound:
sys.stderr.write('Try enabling CONFIG_CRYPTO_USER_API_SKCIPHER and CONFIG_CRYPTO_ADIANTUM.\n')
sys.exit(1)
raise ex
print(f'Testing {kern_algname}...')
ref_impl = adiantum.Adiantum()
ref_impl.choose_variant(variant_selector)
do_test_impl(args, kern_impl, ref_impl)
def is_Adiantum_XChaCha_AES(variant, chacha_nrounds):
return variant == {
'cipher': 'Adiantum',
'streamcipher': {
'cipher': 'XChaCha',
'rounds': chacha_nrounds,
'delgatevariant': {
'cipher': 'ChaCha',
'rounds': chacha_nrounds,
'lengths': {
'key': 32,
'nonce': 8
}
},
'lengths': {
'key': 32,
'nonce': 24
}
},
'blockcipher': {
'cipher': 'AES',
'lengths': {
'block': 16,
'key': 32
}
},
'lengths': {
'key': 32
}
}
XCHACHA_IMPLS = ['generic', 'neon', 'simd']
AES_IMPLS = ['generic', 'arm', 'ce', 'aesni']
NHPOLY1305_IMPLS = ['generic', 'neon', 'sse2', 'avx2']
def main():
parser = argparse.ArgumentParser(description="""Use AF_ALG to verify that
the kernel implementation of Adiantum produces the same results as the
reference implementation.""")
parser.add_argument('--num-msgs', type=int, default=128,
help='number of messages to test per implementation')
parser.add_argument('--avg-msgsize', type=int, default=1024,
help='typical message size in bytes')
parser.add_argument('--all-impls', action='store_true',
help='test all available implementations, not just the default one')
args = parser.parse_args()
print('Arguments:')
print(f'\tNumber of messages: {args.num_msgs}')
print(f'\tTypical message size: {args.avg_msgsize}')
print(f'\tTest non-default implementations: {args.all_impls}')
print('')
for chacha_nrounds in [12, 20]:
variant_selector = lambda variant: \
is_Adiantum_XChaCha_AES(variant, chacha_nrounds)
test_impl(args, f'adiantum(xchacha{chacha_nrounds},aes)',
variant_selector)
if args.all_impls:
for xchacha_impl in XCHACHA_IMPLS:
for aes_impl in AES_IMPLS:
for nhpoly1305_impl in NHPOLY1305_IMPLS:
test_impl(args,
f'adiantum(xchacha{chacha_nrounds}-{xchacha_impl},aes-{aes_impl},nhpoly1305-{nhpoly1305_impl})',
variant_selector, required=False)
if __name__ == "__main__":
main()
|
import socket
from datetime import datetime, timedelta
from functools import partial
from os import environ
from subprocess import check_output
from typing import List
import click
from click import Choice, ClickException
from plexapi.exceptions import NotFound, Unauthorized
from plexapi.myplex import MyPlexAccount, MyPlexResource, ResourceConnection
from plexapi.server import PlexServer
from plextraktsync.factory import factory
from plextraktsync.style import (comment, disabled, error, highlight, prompt,
success, title)
PROMPT_PLEX_PASSWORD = prompt("Please enter your Plex password")
PROMPT_PLEX_USERNAME = prompt("Please enter your Plex username or e-mail")
PROMPT_PLEX_RELOGIN = prompt("You already have Plex Access Token, do you want to log in again?")
SUCCESS_MESSAGE = success("Plex Media Server Authentication Token and base URL have been added to .env file")
NOTICE_2FA_PASSWORD = comment(
"If you have 2 Factor Authentication enabled on Plex "
"you can append the code to your password below (eg. passwordCODE)"
)
CONFIG = factory.config()
from InquirerPy import get_style, inquirer
style = get_style({"questionmark": "hidden", "question": "ansiyellow", "pointer": "fg:ansiblack bg:ansiyellow", })
def myplex_login(username, password):
while True:
username = click.prompt(PROMPT_PLEX_USERNAME, type=str, default=username)
click.echo(NOTICE_2FA_PASSWORD)
password = click.prompt(PROMPT_PLEX_PASSWORD, type=str, default=password, hide_input=True, show_default=False)
try:
return MyPlexAccount(username, password)
except Unauthorized as e:
click.echo(error(f"Log in to Plex failed: {e}, Try again."))
def choose_managed_user(account: MyPlexAccount):
users = [u.title for u in account.users()]
if not users:
return None
click.echo(success("Managed user(s) found:"))
users = sorted(users)
users.insert(0, account.username)
user = inquirer.select(message="Select the user you would like to use:", choices=users, default=None, style=style, qmark="", pointer=">",).execute()
if user == account.username:
return None
# Sanity check, even the user can't input invalid user
user_account = account.user(user)
if user_account:
return user
return None
def prompt_server(servers: List[MyPlexResource]):
old_age = datetime.now() - timedelta(weeks=1)
def fmt_server(s):
if s.lastSeenAt < old_age:
decorator = disabled
else:
decorator = comment
product = decorator(f"{s.product}/{s.productVersion}")
platform = decorator(f"{s.device}: {s.platform}/{s.platformVersion}")
click.echo(f"- {highlight(s.name)}: [Last seen: {decorator(str(s.lastSeenAt))}, Server: {product} on {platform}]")
c: ResourceConnection
for c in s.connections:
click.echo(f" {c.uri}")
owned_servers = [s for s in servers if s.owned]
unowned_servers = [s for s in servers if not s.owned]
sorter = partial(sorted, key=lambda s: s.lastSeenAt)
server_names = []
if owned_servers:
click.echo(success(f"{len(owned_servers)} owned servers found:"))
for s in sorter(owned_servers):
fmt_server(s)
server_names.append(s.name)
if unowned_servers:
click.echo(success(f"{len(unowned_servers)} unowned servers found:"))
for s in sorter(unowned_servers):
fmt_server(s)
server_names.append(s.name)
return inquirer.select(message="Select default server:", choices=sorted(server_names), default=None, style=style, qmark="", pointer=">",).execute()
def pick_server(account: MyPlexAccount):
servers = account.resources()
if not servers:
return None
if len(servers) == 1:
return servers[0]
server_name = prompt_server(servers)
# Sanity check, even the user can't choose invalid resource
server = account.resource(server_name)
if server:
return server
return None
def choose_server(account: MyPlexAccount):
while True:
try:
server = pick_server(account)
if not server:
raise ClickException("Unable to find server from Plex account")
# Connect to obtain baseUrl
click.echo(title(f"Attempting to connect to {server.name}. This may take time and print some errors."))
click.echo(title("Server connections:"))
for c in server.connections:
click.echo(f" {c.uri}")
plex = server.connect()
# Validate connection again, the way we connect
plex = PlexServer(token=server.accessToken, baseurl=plex._baseurl)
return [server, plex]
except NotFound as e:
click.secho(f"{e}, Try another server, {type(e)}")
def has_plex_token():
return CONFIG["PLEX_TOKEN"] is not None
def plex_login_autoconfig():
username = environ.get("PLEX_USERNAME", CONFIG["PLEX_USERNAME"])
password = environ.get("PLEX_PASSWORD", None)
login(username, password)
@click.command("plex-login")
@click.option("--username", help="Plex login", default=lambda: environ.get("PLEX_USERNAME", CONFIG["PLEX_USERNAME"]))
@click.option("--password", help="Plex password", default=lambda: environ.get("PLEX_PASSWORD", None))
def plex_login(username, password):
"""
Log in to Plex Account to obtain Access Token. Optionally can use managed user on servers that you own.
"""
login(username, password)
def login(username: str, password: str):
if has_plex_token():
if not click.confirm(PROMPT_PLEX_RELOGIN, default=True):
return
account = myplex_login(username, password)
click.echo(success("Login to MyPlex was successful!"))
[server, plex] = choose_server(account)
click.echo(success(f"Connection to {plex.friendlyName} established successfully!"))
token = server.accessToken
user = account.username
if server.owned:
managed_user = choose_managed_user(account)
if managed_user:
user = managed_user
token = account.user(managed_user).get_token(plex.machineIdentifier)
CONFIG["PLEX_USERNAME"] = user
CONFIG["PLEX_TOKEN"] = token
CONFIG["PLEX_BASEURL"] = plex._baseurl
if environ.get("PTS_IN_DOCKER"):
try:
host_ip = socket.gethostbyname("host.docker.internal")
except socket.gaierror:
try:
host_ip = check_output("ip -4 route show default | awk '{ print $3 }'", shell=True).decode().rstrip()
except Exception:
host_ip = "172.17.0.1"
CONFIG["PLEX_FALLBACKURL"] = f"http://{host_ip}:32400"
else:
CONFIG["PLEX_FALLBACKURL"] = "http://localhost:32400"
CONFIG.save()
click.echo(SUCCESS_MESSAGE)
|
a=input('Digite algo:')
print('O tipo primitivo desse valor "{}" é: {}'.format(a, type(a)))
print('"{}" É um número ? "{}"'.format(a, a.isnumeric()))
print('"{}" É alfabético ? "{}"'.format(a, a.isalpha()))
print('"{}" É alfanúmerico ? "{}"'.format(a, a.isalnum()))
print('"{}" Só tem espaço ? "{}"'.format(a, a.isspace()))
print('"{}" Está em maiúscula ? "{}"'.format(a, a.isupper()))
print('"{}" Está em minuscúlo ? "{}"'.format(a, a.islower()))
print('"{}" Está capitalizado ? "{}"'.format(a, a.istitle())) |
# Copyright 2019 1QBit
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform Meta-Löwdin localization.
The orbital localization of the canonical orbitals
using Meta-Löwdin localization is done here.
`pyscf.lo` is used.
For details, refer to:
Q. Sun et al., JCTC 10, 3784-3790 (2014).
"""
from pyscf.lo import orth
def meta_lowdin_localization(mol, mf):
"""Localize the orbitals using Meta-Löwdin localization.
Args:
mol (pyscf.gto.Mole): The molecule to simulate.
mf (pyscf.scf.RHF): The mean field of the molecule.
Returns:
numpy.array: The localized orbitals (float64).
"""
return orth.orth_ao(mol, "meta_lowdin")
|
#!/afs/bx.psu.edu/project/pythons/linux-i686-ucs4/bin/python2.7
"""
Application to convert MAF file to AXT file, projecting to any two species.
Reads a MAF file from standard input and writes an AXT file to standard out;
some statistics are written to standard error. The user must specify the
two species of interest.
usage: %prog primary_species secondary_species < maf_file > axt_file
"""
__author__ = "Bob Harris (rsharris@bx.psu.edu)"
import sys
import copy
import bx.align.maf
import bx.align.axt
def usage(s=None):
message = """
maf_to_axt primary_species secondary_species < maf_file > axt_file
"""
if (s == None): sys.exit (message)
else: sys.exit ("%s\n%s" % (s,message))
def main():
# parse the command line
primary = None
secondary = None
args = sys.argv[1:]
while (len(args) > 0):
arg = args.pop(0)
val = None
fields = arg.split("=",1)
if (len(fields) == 2):
arg = fields[0]
val = fields[1]
if (val == ""):
usage("missing a value in %s=" % arg)
if (primary == None) and (val == None):
primary = arg
elif (secondary == None) and (val == None):
secondary = arg
else:
usage("unknown argument: %s" % arg)
if (primary == None):
usage("missing primary species")
if (secondary == None):
usage("missing secondary species")
# read the alignments and other info
out = bx.align.axt.Writer(sys.stdout)
axtsRead = 0
mafsWritten = 0
for mafBlock in bx.align.maf.Reader(sys.stdin):
axtsRead += 1
p = mafBlock.get_component_by_src_start(primary)
if (p == None): continue
s = mafBlock.get_component_by_src_start(secondary)
if (s == None): continue
axtBlock = bx.align.Alignment (mafBlock.score, mafBlock.attributes)
axtBlock.add_component (clone_component(p))
axtBlock.add_component (clone_component(s))
remove_mutual_gaps (axtBlock)
if (axtBlock.text_size == 0):
continue
out.write (axtBlock)
mafsWritten += 1
sys.stderr.write ("%d blocks read, %d written\n" % (axtsRead,mafsWritten))
def clone_component(c):
return bx.align.Component (c.src, c.start, c.size, c.strand, c.src_size, \
copy.copy(c.text))
def remove_mutual_gaps (block):
if (len(block.components) == 0): return
nonGaps = []
for c in block.components:
for ix in range(0,block.text_size):
if (ix not in nonGaps) and (c.text[ix] != "-"):
nonGaps.append(ix)
nonGaps.sort()
for c in block.components:
c.text = "".join([c.text[ix] for ix in nonGaps])
block.text_size = len(nonGaps)
if __name__ == "__main__": main()
|
from django.db import models
from django.core.validators import MinValueValidator
class Post(models.Model):
news = 'news'
article = 'article'
Posts = [(news, 'news'), (article, 'article'), ('select', 'select')]
choosing = models.BooleanField(default=False)
time_in = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=255, unique=True)
text = models.CharField(max_length=255)
rating = models.FloatField(default=0.0)
|
import pickle
import numpy as np
from flask import Flask, render_template, request
app = Flask(__name__)
model = pickle.load(open('svc_trained_model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
features = [int(x) for x in request.form.values()]
features = [np.array(features)]
prediction = model.predict(features)
output = prediction[0]
return render_template('index.html', prediction_text = 'Patient have a Heart Disease?: {}'.format('No' if output==0 else 'Yes'))
if __name__ == '__main__':
app.run(debug=True)
|
import gym
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
def get_square_data(side_len):
def inner(batch_size):
data = np.random.rand(batch_size, 2) * side_len
labels = (data.sum(axis=1) > side_len).astype(int)
return data, labels
return inner
class BinaryClassifier(gym.Env):
def __init__(self,
n_grad_steps_example=1,
n_episodes_weight_reset=0,
batch_size=64,
done_every=10,
data_gen=get_square_data(5),
writer: tf.summary.FileWriter=None):
"""
Create binary classifier
Args:
n_grad_steps_example: Number gradient steps to take before getting new data.
n_episodes_weight_reset: Number times to reset before reinitializing model's parameters.
batch_size: Batch size of data to use.
done_every: Report done every `done_every` number of steps.
data_gen: Function that produces data (takes no arguments).
writer: FileWriter used to write out summary statistics for tensorboard.
"""
super(BinaryClassifier, self).__init__()
self.batch_size = batch_size
self.done_every = done_every
self.n_grad_steps = n_grad_steps_example
self.n_episodes_weight_reset = n_episodes_weight_reset
self.writer = writer
self.n_episodes_since_weights_reset = 0
self._setup_net()
self.data = data_gen
self.num_iters = 0
self.count = 0
def _setup_net(self):
with tf.variable_scope('model'):
# TODO(jalex): Reduce size even more?
self.inputs = tf.placeholder(tf.float32, name='inputs', shape=[None, 2])
self.targets = tf.placeholder(tf.int32, name='targets', shape=[None])
with tf.variable_scope('fc1'):
self.fc1 = fully_connected(self.inputs, 4, activation_fn=tf.nn.leaky_relu)
with tf.variable_scope('fc2'):
self.fc2 = fully_connected(self.fc1, 2, activation_fn=tf.nn.leaky_relu)
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.targets,
logits=self.fc2,
name='xent'
)
self.loss = tf.reduce_mean(xent, name='loss')
if self.writer:
tf.summary.scalar('loss', self.loss)
self._setup_gradients()
def _setup_gradients(self):
parameters = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.grads = tf.gradients(self.loss, parameters)
num_W = [x.shape.num_elements() for x in parameters]
total_num_W = sum(num_W)
self.action_space = gym.spaces.Box(-np.inf, np.inf, shape=[total_num_W], dtype=np.float32)
self.observation_space = gym.spaces.Box(-np.inf, np.inf, shape=[total_num_W], dtype=np.float32)
self.processed_grads = tf.placeholder(tf.float32, name='processed_grads', shape=[total_num_W])
apply_gradients_list = []
start = 0
for param, n in zip(parameters, num_W):
param_processed_grads = self.processed_grads[start: start + n]
op = param.assign_sub(tf.reshape(param_processed_grads, param.shape))
apply_gradients_list.append(op)
start += n
self.apply_gradients_op = tf.group(*apply_gradients_list, name='apply_gradients')
def step(self, action):
"""
Apply the gradients passed in.
Args:
action: Modified gradients to apply
Returns:
tuple of form (observations (gradients), reward, done boolean, info dict)
"""
self.num_iters += 1
done = self.num_iters >= self.done_every
self.sess.run(self.apply_gradients_op, {self.processed_grads: action[0][0]})
if self.num_iters % self.n_grad_steps == 0:
self.X, self.y = self.data(self.batch_size)
obs, rew = self._get_obs_rew()
return obs, rew, done, {}
def set_session(self, sess):
self.sess = sess
def reset(self):
self.X, self.y = self.data(self.batch_size)
if self.n_episodes_weight_reset:
if self.n_episodes_since_weights_reset % self.n_episodes_weight_reset == 0:
model_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='model')
self.sess.run(tf.variables_initializer(model_vars))
self.n_episodes_since_weights_reset = 0
self.n_episodes_since_weights_reset += 1
self.num_iters = 0
obs, _ = self._get_obs_rew()
return obs
def _get_obs_rew(self):
feed = {self.inputs: self.X, self.targets: self.y}
merged_summaries = tf.summary.merge_all(scope='model')
if merged_summaries and self.writer:
rew, grads, summaries = self.sess.run([-self.loss, self.grads, merged_summaries], feed)
self.writer.add_summary(summaries, global_step=self.count)
self.writer.flush()
else:
rew, grads = self.sess.run([-self.loss, self.grads], feed)
self.count += 1
obs = np.concatenate([x.flatten() for x in grads])
return obs, rew
def render(self, mode='human'):
pass
|
from __future__ import absolute_import, division, print_function
import io
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib import patches, transforms
from matplotlib.path import Path
# NOTE: All of these tests assume that path.simplify is set to True
# (the default)
@image_comparison(baseline_images=['clipping'], remove_text=True)
def test_clipping():
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(2*np.pi*t)
fig, ax = plt.subplots()
ax.plot(t, s, linewidth=1.0)
ax.set_ylim((-0.20, -0.28))
@image_comparison(baseline_images=['overflow'], remove_text=True)
def test_overflow():
x = np.array([1.0, 2.0, 3.0, 2.0e5])
y = np.arange(len(x))
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_xlim(xmin=2, xmax=6)
@image_comparison(baseline_images=['clipping_diamond'], remove_text=True)
def test_diamond():
x = np.array([0.0, 1.0, 0.0, -1.0, 0.0])
y = np.array([1.0, 0.0, -1.0, 0.0, 1.0])
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_xlim(xmin=-0.6, xmax=0.6)
ax.set_ylim(ymin=-0.6, ymax=0.6)
def test_noise():
np.random.seed(0)
x = np.random.uniform(size=(50000,)) * 50
fig, ax = plt.subplots()
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = path.cleaned(simplify=True)
assert simplified.vertices.size == 25512
def test_antiparallel_simplification():
def _get_simplified(x, y):
fig, ax = plt.subplots()
p1 = ax.plot(x, y)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = path.cleaned(simplify=True)
simplified = transform.inverted().transform_path(simplified)
return simplified
# test ending on a maximum
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, -1, 1, 2, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., -1.],
[0., 2.],
[1., 0.5]],
simplified.vertices[:-2, :])
# test ending on a minimum
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, -1, 1, -2, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., 1.],
[0., -2.],
[1., 0.5]],
simplified.vertices[:-2, :])
# test ending in between
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, -1, 1, 0, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., 1.],
[0., -1.],
[0., 0.],
[1., 0.5]],
simplified.vertices[:-2, :])
# test no anti-parallel ending at max
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, 2, 1, 3, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., 3.],
[1., 0.5]],
simplified.vertices[:-2, :])
# test no anti-parallel ending in middle
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, 2, 1, 1, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., 2.],
[0., 1.],
[1., 0.5]],
simplified.vertices[:-2, :])
# Only consider angles in 0 <= angle <= pi/2, otherwise
# using min/max will get the expected results out of order:
# min/max for simplification code depends on original vector,
# and if angle is outside above range then simplification
# min/max will be opposite from actual min/max.
@pytest.mark.parametrize('angle', [0, np.pi/4, np.pi/3, np.pi/2])
@pytest.mark.parametrize('offset', [0, .5])
def test_angled_antiparallel(angle, offset):
scale = 5
np.random.seed(19680801)
# get 15 random offsets
# TODO: guarantee offset > 0 results in some offsets < 0
vert_offsets = (np.random.rand(15) - offset) * scale
# always start at 0 so rotation makes sense
vert_offsets[0] = 0
# always take the first step the same direction
vert_offsets[1] = 1
# compute points along a diagonal line
x = np.sin(angle) * vert_offsets
y = np.cos(angle) * vert_offsets
# will check these later
x_max = x[1:].max()
x_min = x[1:].min()
y_max = y[1:].max()
y_min = y[1:].min()
if offset > 0:
p_expected = Path([[0, 0],
[x_max, y_max],
[x_min, y_min],
[x[-1], y[-1]],
[0, 0]],
codes=[1, 2, 2, 2, 0])
else:
p_expected = Path([[0, 0],
[x_max, y_max],
[x[-1], y[-1]],
[0, 0]],
codes=[1, 2, 2, 0])
p = Path(np.vstack([x, y]).T)
p2 = p.cleaned(simplify=True)
assert_array_almost_equal(p_expected.vertices,
p2.vertices)
assert_array_equal(p_expected.codes, p2.codes)
def test_sine_plus_noise():
np.random.seed(0)
x = (np.sin(np.linspace(0, np.pi * 2.0, 50000)) +
np.random.uniform(size=(50000,)) * 0.01)
fig, ax = plt.subplots()
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = path.cleaned(simplify=True)
assert simplified.vertices.size == 25240
@image_comparison(baseline_images=['simplify_curve'], remove_text=True)
def test_simplify_curve():
pp1 = patches.PathPatch(
Path([(0, 0), (1, 0), (1, 1), (np.nan, 1), (0, 0), (2, 0), (2, 2),
(0, 0)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3,
Path.CURVE3, Path.CURVE3, Path.CLOSEPOLY]),
fc="none")
fig, ax = plt.subplots()
ax.add_patch(pp1)
ax.set_xlim((0, 2))
ax.set_ylim((0, 2))
@image_comparison(baseline_images=['hatch_simplify'], remove_text=True)
def test_hatch():
fig, ax = plt.subplots()
ax.add_patch(plt.Rectangle((0, 0), 1, 1, fill=False, hatch="/"))
ax.set_xlim((0.45, 0.55))
ax.set_ylim((0.45, 0.55))
@image_comparison(baseline_images=['fft_peaks'], remove_text=True)
def test_fft_peaks():
fig, ax = plt.subplots()
t = np.arange(65536)
p1 = ax.plot(abs(np.fft.fft(np.sin(2*np.pi*.01*t)*np.blackman(len(t)))))
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = path.cleaned(simplify=True)
assert simplified.vertices.size == 36
def test_start_with_moveto():
# Should be entirely clipped away to a single MOVETO
data = b"""
ZwAAAAku+v9UAQAA+Tj6/z8CAADpQ/r/KAMAANlO+v8QBAAAyVn6//UEAAC6ZPr/2gUAAKpv+v+8
BgAAm3r6/50HAACLhfr/ewgAAHyQ+v9ZCQAAbZv6/zQKAABepvr/DgsAAE+x+v/lCwAAQLz6/7wM
AAAxx/r/kA0AACPS+v9jDgAAFN36/zQPAAAF6Pr/AxAAAPfy+v/QEAAA6f36/5wRAADbCPv/ZhIA
AMwT+/8uEwAAvh77//UTAACwKfv/uRQAAKM0+/98FQAAlT/7/z0WAACHSvv//RYAAHlV+/+7FwAA
bGD7/3cYAABea/v/MRkAAFF2+//pGQAARIH7/6AaAAA3jPv/VRsAACmX+/8JHAAAHKL7/7ocAAAP
rfv/ah0AAAO4+/8YHgAA9sL7/8QeAADpzfv/bx8AANzY+/8YIAAA0OP7/78gAADD7vv/ZCEAALf5
+/8IIgAAqwT8/6kiAACeD/z/SiMAAJIa/P/oIwAAhiX8/4QkAAB6MPz/HyUAAG47/P+4JQAAYkb8
/1AmAABWUfz/5SYAAEpc/P95JwAAPmf8/wsoAAAzcvz/nCgAACd9/P8qKQAAHIj8/7cpAAAQk/z/
QyoAAAWe/P/MKgAA+aj8/1QrAADus/z/2isAAOO+/P9eLAAA2Mn8/+AsAADM1Pz/YS0AAMHf/P/g
LQAAtur8/10uAACr9fz/2C4AAKEA/f9SLwAAlgv9/8ovAACLFv3/QDAAAIAh/f+1MAAAdSz9/ycx
AABrN/3/mDEAAGBC/f8IMgAAVk39/3UyAABLWP3/4TIAAEFj/f9LMwAANm79/7MzAAAsef3/GjQA
ACKE/f9+NAAAF4/9/+E0AAANmv3/QzUAAAOl/f+iNQAA+a/9/wA2AADvuv3/XDYAAOXF/f+2NgAA
29D9/w83AADR2/3/ZjcAAMfm/f+7NwAAvfH9/w44AACz/P3/XzgAAKkH/v+vOAAAnxL+//04AACW
Hf7/SjkAAIwo/v+UOQAAgjP+/905AAB5Pv7/JDoAAG9J/v9pOgAAZVT+/606AABcX/7/7zoAAFJq
/v8vOwAASXX+/207AAA/gP7/qjsAADaL/v/lOwAALZb+/x48AAAjof7/VTwAABqs/v+LPAAAELf+
/788AAAHwv7/8TwAAP7M/v8hPQAA9df+/1A9AADr4v7/fT0AAOLt/v+oPQAA2fj+/9E9AADQA///
+T0AAMYO//8fPgAAvRn//0M+AAC0JP//ZT4AAKsv//+GPgAAojr//6U+AACZRf//wj4AAJBQ///d
PgAAh1v///c+AAB+Zv//Dz8AAHRx//8lPwAAa3z//zk/AABih///TD8AAFmS//9dPwAAUJ3//2w/
AABHqP//ej8AAD6z//+FPwAANb7//48/AAAsyf//lz8AACPU//+ePwAAGt///6M/AAAR6v//pj8A
AAj1//+nPwAA/////w=="""
import base64
if hasattr(base64, 'encodebytes'):
# Python 3 case
decodebytes = base64.decodebytes
else:
# Python 2 case
decodebytes = base64.decodestring
verts = np.fromstring(decodebytes(data), dtype='<i4')
verts = verts.reshape((len(verts) // 2, 2))
path = Path(verts)
segs = path.iter_segments(transforms.IdentityTransform(),
clip=(0.0, 0.0, 100.0, 100.0))
segs = list(segs)
assert len(segs) == 1
assert segs[0][1] == Path.MOVETO
def test_throw_rendering_complexity_exceeded():
plt.rcParams['path.simplify'] = False
xx = np.arange(200000)
yy = np.random.rand(200000)
yy[1000] = np.nan
fig, ax = plt.subplots()
ax.plot(xx, yy)
with pytest.raises(OverflowError):
fig.savefig(io.BytesIO())
@image_comparison(baseline_images=['clipper_edge'], remove_text=True)
def test_clipper():
dat = (0, 1, 0, 2, 0, 3, 0, 4, 0, 5)
fig = plt.figure(figsize=(2, 1))
fig.subplots_adjust(left=0, bottom=0, wspace=0, hspace=0)
ax = fig.add_axes((0, 0, 1.0, 1.0), ylim=(0, 5), autoscale_on=False)
ax.plot(dat)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim(5, 9)
@image_comparison(baseline_images=['para_equal_perp'], remove_text=True)
def test_para_equal_perp():
x = np.array([0, 1, 2, 1, 0, -1, 0, 1] + [1] * 128)
y = np.array([1, 1, 2, 1, 0, -1, 0, 0] + [0] * 128)
fig, ax = plt.subplots()
ax.plot(x + 1, y + 1)
ax.plot(x + 1, y + 1, 'ro')
@image_comparison(baseline_images=['clipping_with_nans'])
def test_clipping_with_nans():
x = np.linspace(0, 3.14 * 2, 3000)
y = np.sin(x)
x[::100] = np.nan
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_ylim(-0.25, 0.25)
def test_clipping_full():
p = Path([[1e30, 1e30]] * 5)
simplified = list(p.iter_segments(clip=[0, 0, 100, 100]))
assert simplified == []
p = Path([[50, 40], [75, 65]], [1, 2])
simplified = list(p.iter_segments(clip=[0, 0, 100, 100]))
assert ([(list(x), y) for x, y in simplified] ==
[([50, 40], 1), ([75, 65], 2)])
p = Path([[50, 40]], [1])
simplified = list(p.iter_segments(clip=[0, 0, 100, 100]))
assert ([(list(x), y) for x, y in simplified] ==
[([50, 40], 1)])
|
# Form implementation generated from reading ui file 'generator.ui'
#
# Created by: PyQt6 UI code generator 6.2.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic6 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(153, 200)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(20, 150, 113, 32))
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 20, 111, 61))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label.setObjectName("label")
self.minValue = QtWidgets.QLineEdit(self.centralwidget)
self.minValue.setGeometry(QtCore.QRect(20, 90, 113, 21))
self.minValue.setObjectName("minValue")
self.maxValue = QtWidgets.QLineEdit(self.centralwidget)
self.maxValue.setGeometry(QtCore.QRect(20, 120, 113, 21))
self.maxValue.setObjectName("maxValue")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Random Number Generator"))
self.pushButton.setText(_translate("MainWindow", "Generate"))
self.label.setText(_translate("MainWindow", "50"))
self.minValue.setText(_translate("MainWindow", "1"))
self.minValue.setPlaceholderText(_translate("MainWindow", "min"))
self.maxValue.setText(_translate("MainWindow", "100"))
self.maxValue.setPlaceholderText(_translate("MainWindow", "max"))
|
a, b, c = map(int, input().split())
if c-(a+b) > 0 and a*b*4 < (c-a-b)**2:
print('Yes')
else:
print('No')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-12 08:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clone', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='project',
name='description',
field=models.TextField(),
),
migrations.AlterField(
model_name='project',
name='url',
field=models.CharField(max_length=100),
),
]
|
""" Implementation of WaterFrame.max_diff(parameter1, parameter2) """
def max_diff(self, parameter1, parameter2):
"""
It calculates the maximum difference between the values of two parameters.
Parameters
----------
parameter1: str
Key name of column 1 to calculate the difference.
parameter2: str
Key name of column 2 to calculate the difference.
Returns
-------
where: index
The position (index) of WaterFrame.data.
value: float
Value of the maximum difference.
"""
where = (self.data[parameter1] - self.data[parameter2]).abs().idxmax()
value = (self.data[parameter1] - self.data[parameter2]).abs().max()
return (where, value)
|
import ctypes
import ctypes.util
import re
import struct
from caw_memory_editors import MacOSX, CannotReadException
BASE_WORLD_NAME = "Von Haunt Estate".encode('utf-8')
REPLACE_WORLD_NAME = "Apple Fritters are tasty".encode('utf-8')
class ChangeWorldNames:
def __init__(self, process):
self.process = process
def run(self):
# self.find_lots()
a = self.process.allocate_bytes(len(BASE_WORLD_NAME)+1)
b = self.process.allocate_bytes(len(REPLACE_WORLD_NAME)+1)
self.process.write_bytes(a.value, ctypes.create_string_buffer(BASE_WORLD_NAME + b'\x00'), len(BASE_WORLD_NAME)+1)
self.process.write_bytes(b.value, ctypes.create_string_buffer(REPLACE_WORLD_NAME + b'\x00'), len(REPLACE_WORLD_NAME)+1)
print(a, a.value, self.process.read_bytes(a.value, bytes=8))
print(b, b.value, self.process.read_bytes(b.value, bytes=8))
b.value = a.value
print(a, a.value, self.process.read_bytes(a.value, bytes=8))
print(b, b.value, self.process.read_bytes(b.value, bytes=8))
def find_lots(self):
potential_lot_addrs = self.process.find_in_memory(BASE_WORLD_NAME)
return self.filter_to_relevant_lots(potential_lot_addrs)
def filter_to_relevant_lots(self, lot_addrs):
mem_regions = self.process.all_regions()
replace_addr = self.process.allocate_bytes(len(REPLACE_WORLD_NAME)+1)
self.process.write_bytes(replace_addr.value, ctypes.create_string_buffer(REPLACE_WORLD_NAME + b'\x00'), len(REPLACE_WORLD_NAME)+1)
replace_addr_bytes = struct.pack('L', replace_addr.value)
refs_to_name = []
for addr in lot_addrs:
print(addr)
addr_bytes = struct.pack('L', addr)
refs_to_name += self.process.find_in_memory(addr_bytes, mem_regions=mem_regions)
print("HMMM: " + str(struct.pack('L', addr)) + " - " + str(len(struct.pack('L', addr))) + " - " + str(refs_to_name))
print(refs_to_name)
print(replace_addr_bytes)
print(len(replace_addr_bytes))
for ref_addr in refs_to_name:
print("\n--1-----\n{}\n---1-----\n".format(self.process.read_bytes(ref_addr, bytes=len(replace_addr_bytes))))
self.process.write_bytes(ref_addr, ctypes.create_string_buffer(replace_addr_bytes), len(replace_addr_bytes))
print("\n---2----\n{}\n---2-----\n".format(self.process.read_bytes(ref_addr, bytes=len(replace_addr_bytes))))
sims_process = MacOSX("The Sims 4")
change_names = ChangeWorldNames(sims_process)
change_names.run()
|
# Generated by Django 3.2.4 on 2021-09-06 22:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('members', '0004_alter_profile_picture'),
]
operations = [
migrations.AddField(
model_name='profile',
name='degree',
field=models.CharField(choices=[('BSC', 'Bachelor of Science'), ('MSC', 'Master of Science')], default='BSC', max_length=3),
preserve_default=False,
),
]
|
import sys
def parse_url(url, proj, DRS_len):
parts = line.split('/')
match = 0
for i,pp in enumerate (parts):
if pp == proj:
match = i
break
return '.'.join(parts[i:(i+DRS_len)])
def parse_openid(x):
parts = x.split('/')
return parts[2], parts[5]
|
import torch
import torch.nn as nn
import numpy as np
import cv2
from libs.backbone import BackboneVGG16, SSD
from libs.data import getDataLoader
from libs.utils import getAllName, seedReproducer
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
def predict(test_loader, model, device):
# switch to evaluate mode
model.eval()
res_list = []
with torch.no_grad():
#end = time.time()
pres = []
labels = []
for i, (data, img_name) in enumerate(test_loader):
# print("\r",str(i)+"/"+str(test_loader.__len__()),end="",flush=True)
print(img_name)
data = data.to(device)
output = model(data)
print(output[0][:10])
pred_score = nn.Softmax(dim=1)(output)
print(pred_score[0][:10])
# pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
# correct += pred.eq(target.view_as(pred)).sum().item()
batch_pred_score = pred_score.data.cpu().numpy()#.tolist()
print(batch_pred_score.shape)
print(np.max(batch_pred_score[0]), np.argmax(batch_pred_score[0]))
return 1
if __name__ == "__main__":
random_seed = 42
seedReproducer(random_seed)
device = torch.device("cuda")
kwargs = {'num_workers': 1, 'pin_memory': True}
classes = 20
pretrained_path = "./data/models/vgg16-397923af.pth"
model = SSD(classes, pretrained_path).to(device)
print(model)
voc_dir = "../data/VOC2007/trainval/"
img_path = "./data/test"
img_names = getAllName(img_path)
test_loader = getDataLoader("testBackbone", voc_dir, img_names, 300, 1, kwargs)
print("len test_loader: ", len(test_loader))
predict(test_loader, model, device)
|
from typing import Iterable
def folder(
api,
url: str,
http_method: str,
cloud_path: str,
limit=100,
offset=0,
sort={"type":"name","order":"asc"}) -> dict:
data = {
"home": cloud_path,
"token": api.csrf_token,
"limit": limit,
"offset": offset,
"sort": sort,
}
return api(url, http_method, params=data)
def folder_add(
api,
url: str,
http_method: str,
cloud_path: str) -> dict:
data = {
"home": cloud_path,
"conflict": "rename",
"token": api.csrf_token
}
return api(url, http_method, data=data)
def folder_find(
api,
url: str,
http_method: str,
finding: str,
cloud_path: str,
limit=10000) -> dict:
data = {
"q": finding,
"path": cloud_path,
"limit": limit,
"token": api.csrf_token
}
return api(url, http_method, params=data)
def folder_viruscan(
api,
url: str,
http_method: str,
hash_list: Iterable[str]) -> dict:
data = {
"hash_list": hash_list,
"token": api.csrf_token
}
return api(url, http_method, json=data)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
owner_users = {'email': 'qa845@meta.ua',
'password': 'selenide'}
provider_users = {'lexins@bigmir.net': 'fondriest'
# 'qatest55@meta.ua': 'selenide',
# 'test-supply@meta.ua': '0564305488',
# 'Rudakov.d@meta.ua': '0564305488'
}
broker = {'url': 'https://e-test.aps-market.com/tender'}
# login
login_button = '//*[@id="login_ribbon"]/a'
username_field = '//*[@id="LoginBox"]'
pass_field = '//*[@id="LoginPasswordBox"]'
submit_login_button = '//*[@id="ButtonLogin"]'
# create tender
create_tender_button = '//*[@id="ButtonTenderAdd"]/a'
below_threshold_button = '//*[@id="menuContainer"]/li[1]/a'
input_title = '//*[@id="edtTenderTitle"]'
input_description = '//*[@id="edtTenderDetail"]'
input_value_amount = '//*[@id="edtTenderBudget"]'
input_min_step = '//*[@id="edtMinStep"]'
input_start_enquiry = """ var date = new Date();
date.setHours(date.getHours(), date.getMinutes() + 3);
$("#date_enquiry_start").datetimepicker({ allowTimes: [], format: "d.m.Y H:i", value: date })"""
input_end_enquiry = """ var date = new Date();
date.setHours(date.getHours(), date.getMinutes() + 5);
$("#date_enquiry_end").datetimepicker({ allowTimes: [], format: "d.m.Y H:i", value: date })"""
input_start_tender = """ var date = new Date();
date.setHours(date.getHours(), date.getMinutes() + 7);
$("#date_tender_start").datetimepicker({ allowTimes: [], format: "d.m.Y H:i", value: date })"""
input_end_tender = """ var date = new Date();
date.setDate(date.getDate() + 5);
$("#date_tender_end").datetimepicker({ allowTimes: [], format: "d.m.Y H:i", value: date })"""
next_button = '//*[@id="CreateTender"]'
add_item = '//*[@id="AddPoss"]'
input_item_description = '//*[@id="itemDescription"]'
input_quantity = '//*[@id="editItemQuantity"]'
select_unit = '//*[@id="window_itemadd"]/div[2]/div/div[2]/div[2]/div/div[2]/div/button/span[1]'
input_unit = '//*[@id="input_MeasureItem"]'
select_unit_1 = '//*[@id="window_itemadd"]/div[2]/div/div[2]/div[2]/div/div[2]/div/div/ul/li[1]/a'
click_cpv_button = '//*[@id="button_add_cpv"]'
select_cpv_1item = '//*[@id="03000000-1_anchor"]'
confirm_cpv = '//*[@id="populate_cpv"]'
select_dkpp = '//*[@id="button_add_dkpp"]'
select_dkpp_1item = '//*[@id="000_NONE_anchor"]'
confirm_dkpp = '//*[@id="populate_dkpp"]'
input_delivery_start = """ var date = new Date();
date.setDate(date.getDate() + 10);
$("#date_delivery_start").datetimepicker({ allowTimes: [], format: "d.m.Y H:i", value: date })"""
input_delivery_end = """ var date = new Date();
date.setDate(date.getDate() + 13);
$("#date_delivery_end").datetimepicker({ allowTimes: [], format: "d.m.Y H:i", value: date })"""
delivery_checkbox = '//*[@id="shiping"]/label'
click_dropdown_country = '//*[@id="div_combo_selectCountry"]/div/button'
input_country = '//*[@id="input_CountryItem"]'
select_country = '//*[@id="div_combo_selectCountry"]/div/div/ul/li[230]/a/span[1]'
click_dropdown_region = '//*[@id="HideShow_div"]/div[1]/div/div[2]/div/div/button'
input_region = '//*[@id="input_RegionsItem"]'
select_region = '//*[@id="HideShow_div"]/div[1]/div/div[2]/div/div/div/ul/li[1]/a'
input_postal_code = '//*[@id="post_code"]'
input_locality = '//*[@id="addr_locality"]'
input_delivery_address = '//*[@id="addr_street"]'
input_latitude = '//*[@id="latitude"]'
input_longitude = '//*[@id="longitude"]'
input_height = '//*[@id="elevation"]'
save_changes = '//*[@id="AddItemButton"]'
add_tender_doc = '//*[@id="addFile"]'
select_type = '//*[@id="TypesFilesDropDown"]'
select_doc_type = '//*[@id="TypesFilesDropDown"]/option[5]'
file_input = '//*[@id="FileUpload"]'
submit_tender_doc_upload = '//*[@id="UploadFile"]'
delete_doc = '//*[@id="DelFileBtn_"]'
create_tender_draft = '//*[@id="sumbit"]'
submit_create_tender = '//*[@id="TenderPublishTop"]'
decline_electr_signature = '//*[@id="PublishConfirm"]/div[2]/div/div[2]/div[1]/div[2]/div[2]/label'
submit_popup = '//*[@id="PublishConfirm"]/div[2]/div/div[3]/button[1]'
# search for tender
tender_get_id_locator = '//*[@id="titleTenderUcode"]'
all_tenders = '//*[@id="selectjournal_name"]'
select_all_tenders = '//*[@id="selectTypeJournal"]/li[1]/a'
input_search_field = '//*[@id="search_text"]'
search_tender_button = '//*[@id="search_btn"]'
select_tender = '//p[contains(text(), "[ТЕСТУВАННЯ] LOAD_TEST_Below_Threshold")]'
# # make bid
all_bids = u'//a[contains(text(), "Пропозиції")]'
input_bid_amount = '//*[@id="editBid"]'
input_bid_doc = '//*[@id="FileUpload_bids"]'
submit_bid_doc = '//*[@id="UploadFileToBid"]'
submit_bid_button = '//*[@id="AddNoLotBid"]'
delete_bid_button = '(//*[@class="btn btn-yellow dt_button"])[2]'
|
from collections import defaultdict
from dataclasses import dataclass, field
from pathlib import Path
from typing import DefaultDict, Dict, Generator, List
from ward._errors import ParameterisationError
from ward._fixtures import FixtureCache
from ward.fixtures import TeardownResult
from ward.models import Scope
from ward.testing import Test, TestResult
@dataclass
class Suite:
tests: List[Test]
cache: FixtureCache = field(default_factory=FixtureCache)
@property
def num_tests(self) -> int:
"""
Returns: The number of tests in the suite, *before* taking parameterisation into account.
"""
return len(self.tests)
@property
def num_tests_with_parameterisation(self) -> int:
"""
Returns: The number of tests in the suite, *after* taking parameterisation into account.
"""
return sum(test.find_number_of_instances() for test in self.tests)
def _test_counts_per_module(self) -> Dict[Path, int]:
"""
Returns: A dictionary mapping a module Path to the number of tests that can be found within that module.
"""
module_paths = [test.path for test in self.tests]
counts: DefaultDict[Path, int] = defaultdict(int)
for path in module_paths:
counts[path] += 1
return counts
def generate_test_runs(
self,
dry_run: bool = False,
capture_output: bool = True,
) -> Generator[TestResult, None, None]:
"""
Run tests
Returns a generator which yields test results
"""
num_tests_per_module = self._test_counts_per_module()
for test in self.tests:
num_tests_per_module[test.path] -= 1
try:
generated_tests = test.get_parameterised_instances()
except ParameterisationError as e:
yield test.fail_with_error(e)
continue
for generated_test in generated_tests:
result = generated_test.run(self.cache, dry_run=dry_run)
teardown_results: List[
TeardownResult
] = self.cache.teardown_fixtures_for_scope(
Scope.Test,
scope_key=generated_test.id,
capture_output=capture_output,
)
if teardown_results:
try:
# There could be exceptions in the teardown code of multiple fixtures
# injected into a single test. Take the first exception and associate
# that with the test.
first_teardown_error_result: TeardownResult = next(
r
for r in teardown_results
if r.captured_exception is not None
)
# Any exceptions that occur during the teardown of a test-scoped fixture
# are considered to be an error in any test that depends on said fixture
result = test.fail_with_error(
first_teardown_error_result.captured_exception # type: ignore[arg-type]
)
except StopIteration:
# There were no exceptions while tearing down the fixtures.
pass
yield result
if num_tests_per_module[test.path] == 0:
self.cache.teardown_fixtures_for_scope(
Scope.Module,
scope_key=test.path,
capture_output=capture_output,
)
self.cache.teardown_global_fixtures(capture_output=capture_output)
|
import pandas as pd
import numpy as np
import scipy as s
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
plt.style.use('fivethirtyeight')
pd.set_option('display.max_columns', 500)
from pylab import cm
import warnings
warnings.filterwarnings('ignore')
from pywaffle import Waffle
from copy import deepcopy
from sklearn import feature_selection
def prepare():
dataf = pd.read_csv(r"./books.csv")
dataf.columns = ["Timestamp", "Format", "Beverage", "Music","Target", "Frequency","Connect","Variety"]
dataf["VarCount"] = dataf.Variety.apply(lambda stri: len(stri.split(","))) #Count of variety of books read by people
format_count = dataf.groupby('Format')['Format'].count()
return dataf, format_count
def plot1(dataf, format_count):
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(22,8))
sns.countplot(dataf.Format, palette="summer", ax=ax[0])
ax[0].set_xlabel("Book format")
ax[0].set_ylabel("Preferred choice of readers")
sns.boxplot(dataf.Format, dataf.VarCount, palette='summer', ax=ax[1])
ax[1].set_xlabel("Book format")
ax[1].set_ylabel("No. of different genres read")
plt.suptitle("Readership analysis", fontsize=20)
ax[0].text(-0.05,format_count["Paperback"]+1, s=format_count["Paperback"])
ax[0].text(0.95,format_count["Hard cover"]+1, s=format_count["Hard cover"])
ax[0].text(1.95,format_count["Ebook"]+1, s=format_count["Ebook"])
ax[0].text(2.95,format_count["Audiobook"]+1, s=format_count["Audiobook"])
def postprocess(dataf):
dataf = dataf[(dataf.Format=="Paperback") | (dataf.Format=="Hard cover")]
d = dataf[["Variety"]] #Only the variety column
d.Variety = d.Variety.apply(lambda s:s.strip(' ').split(",")) #Picking up the varieties of genres
genres = []
for i in d.Variety:
for j in i:
genres.append(j.strip(' ')) #Storing all the genre varieties in genres
genres = pd.DataFrame(genres).groupby(0)[0].count()
genres.drop(['In fiction- fantasy fiction to be exact. Adventure books. Enid Blyton( a little kiddish ik). I also would like to read all the Vedas one by one.','None other than those required for my coursework','Something else'],axis=0,inplace=True) #Dropping single ultra specific entrie(s)
genres=genres.sort_values(0) #Sorting the genres dataframe
dataf.Connect.replace({'Yes! no. Well maybe....':'May connect','Yes':'Want to connect','No':'Do not want to connect'},inplace=True)
d = dataf.groupby('Connect')['Connect'].count() #Grouping preferences of "Want to connect" attribute
pphc = pd.DataFrame(dataf.Target) #Main column of interest
pphc.Target.replace({"I wish I could read them without buying a lot of books":"Want to read without buying",
"I want to build a library duh!":"Want to build a library"}, inplace=True)
pphc = pphc.groupby('Target')['Target'].count() #Grouping by interest to read books without buying
dataf.Beverage.replace({'No drink necessary':'I do not drink but I know things','None':'I do not drink but I know things','No drink necessary':'I do not drink but I know things','I drink but not with books':'I do not drink but I know things'},inplace=True)
bv =pd.DataFrame(dataf.groupby('Beverage')['Beverage'].count()) #Grouping by interest of beverage consumption
bv.drop(['There is no connection between books and beverage'],axis=0,inplace=True)
bv.columns=['Count']
return dataf, genres, pphc, d, bv
def plot2(dataf, genres):
fig,ax=plt.subplots(nrows=1, ncols=2,figsize=(20,8))
genres.plot(kind='barh', color="dodgerblue", ax=ax[0])
#sns.catplot(genres, ax=ax[0])
plt.suptitle('Diversity among readers', fontsize=20)
ax[0].set_ylabel('Genre')
ax[0].set_xlabel('Genre read by different people')
sns.lineplot(x="VarCount", y="Frequency",data=dataf, ax=ax[1],color='royalblue')
ax[1].set_xlabel('No. of different genres read by a reader')
ax[1].set_ylabel('No. of books read by the reader in the last six months')
#ax[0].axhline(genres.mean(), color='red')
#ax[0].text(40,genres.mean()+0.5,"Mean", fontsize=18)
def plot3(pphc,d):
plt.figure(figsize=(20,8))
plt.pie(pphc, autopct='%2.1f%%',colors=['lavender','royalblue'], explode=[0.02,0.02], pctdistance=1.05,labels=pphc.index, labeldistance=1.1)
plt.title("Interest among readers about a book cafe", fontsize=22)
#draw circle
centre_circle = plt.Circle((0,0),0.70,fc='white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# Equal aspect ratio ensures that pie is drawn as a circle
plt.axis('equal')
plt.pie(d, radius=0.85, autopct='%1.1f%%', explode=[0.02,0.02,0.02], pctdistance=0.7, labels=d.index, labeldistance=0.55, colors=['mintcream','palegreen','mediumspringgreen'])
plt.tight_layout()
plt.show()
def plot4(bv):
# To plot the waffle Chart
fig = plt.figure(FigureClass = Waffle, rows = 3, values = bv. Count, labels = list(bv.index) , figsize=(20,8))
plt.title('Beverage preference of readers',fontsize=20)
def plot5(dataf):
da = dataf[dataf.Beverage=='I do not drink but I know things']
x = pd.DataFrame(da.groupby('Connect')['Connect'].count())
x.columns=['Count']
fig = plt.figure(FigureClass = Waffle, rows = 1, values = x.Count, labels = list(x.index) , figsize=(20,4))
plt.title('Interest to meet fellow book readers among people who do not drink while reading')
def plot6(dmod):
dmod.Beverage.replace({'I do not drink but I know things':'No drink required','None':'No drink required','No drink necessary':'No drink required','I drink but not with books':'No drink required','Depends upon mood and time of day':'No drink required','There is no connection between books and beverage':'No drink required'},inplace=True)
dmod.Connect.replace({'Yes! no. Well maybe....':'Maybe'},inplace=True)
dmod.Music.replace({'Yes, like my life':'Yes to music','Nope':'No to music'},inplace=True)
dk = dmod[["Beverage","Connect","Music"]].groupby(['Connect','Music','Beverage'])[["Beverage"]].count()
dk.columns=["Count"]
dk.sort_values(["Connect","Music"], ascending=[False,True], inplace=True)
dk = pd.concat([dk.iloc[0:7,:],dk.iloc[13:20,:]])
return dk
def modprocess(datax):
datax = datax[datax.Beverage!='There is no connection between books and beverage']
datax.Beverage.replace({'I do not drink but I know things':'No drink required','None':'No drink required','No drink necessary':'No drink required','I drink but not with books':'No drink required','Depends upon mood and time of day':'No drink required'},inplace=True)
datax.Connect.replace({'Yes':'Yes to connect','No':'No to connect','Yes! no. Well maybe....':'Open to connect'}, inplace=True)
datax.Music.replace({'Nope':'No to music','Yes, like my life':'Yes to music'},inplace=True)
datax[list(pd.DataFrame(datax["Format"].unique())[0].sort_values())] = pd.get_dummies(datax.Format)
datax[list(pd.DataFrame(datax["Beverage"].unique())[0].sort_values())] = pd.get_dummies(datax.Beverage)
datax[list(pd.DataFrame(datax["Music"].unique())[0].sort_values())] = pd.get_dummies(datax.Music)
datax.Target.replace({"I wish I could read them without buying a lot of books":1,"I want to build a library duh!":0}, inplace=True)
datax[list(pd.DataFrame(datax["Connect"].unique())[0].sort_values())] = pd.get_dummies(datax.Connect)
datax.drop(['Format','Beverage','Music','Connect','Timestamp','Variety'], axis=1, inplace=True)
datax.drop(['Alcohol','No to connect','Audiobook','Ebook'],axis=1, inplace=True)
return datax
def plot7(dframe):
x = dframe.drop(['Target'],axis=1)
y = dframe.Target
from sklearn.feature_selection import SelectKBest, chi2, f_classif
fs = SelectKBest(f_classif, k="all")
fs.fit(x,y)
sc = pd.concat([pd.DataFrame(x.columns),pd.DataFrame(fs.scores_)], axis=1)
sc.columns = ['Feature','Score']
sc.sort_values('Score', inplace=True, ascending=False)
plt.figure(figsize=(20,10))
sns.barplot(sc.Score, sc.Feature, color='seagreen')
plt.title('Feature importance among people wanting to read books without buying')
def modeller(dmod):
dmod['Fiction'] = dmod.Variety.apply(lambda x:'Fiction' in x).astype('int')
dmod['Science fiction'] = dmod.Variety.apply(lambda x:'Science fiction' in x).astype('int')
dmod['Humour'] = dmod.Variety.apply(lambda x:'Humour' in x).astype('int')
dmod['Philosophy'] = dmod.Variety.apply(lambda x:'Philosophy' in x).astype('int')
dmod['History'] = dmod.Variety.apply(lambda x:'History' in x).astype('int')
dmod['Business'] = dmod.Variety.apply(lambda x:'Business' in x).astype('int')
dmod['Literature'] = dmod.Variety.apply(lambda x:'Literature' in x).astype('int')
dmod['Biography'] = dmod.Variety.apply(lambda x:'Biography' in x).astype('int')
dmod['Travel'] = dmod.Variety.apply(lambda x:'Travel' in x).astype('int')
dmod['Religion'] = dmod.Variety.apply(lambda x:'Religion' in x).astype('int')
y = dmod.Target
dmod.Beverage.replace({'Alcohol':'No drink required','Juice':'No drink required'}, inplace=True)
dmod =pd.concat([dmod,pd.get_dummies(dmod.Format, drop_first=True),pd.get_dummies(dmod.Beverage, prefix='Beverage_', drop_first=True),
pd.get_dummies(dmod.Music, drop_first=True), pd.get_dummies(dmod.Connect, prefix='Connect', drop_first=True)],axis=1)
X=dmod.drop(['Timestamp','Variety','Format','Connect','Beverage','Music','Target'],axis=1)
y=y.replace({'I wish I could read them without buying a lot of books':1 , 'I want to build a library duh!':0})
return X,y
def get_palette(pal,n):
a=[]
cmap = cm.get_cmap(pal, n) # PiYG
for i in range(cmap.N):
rgba = cmap(i)
# rgb2hex accepts rgb or rgba
a.append(matplotlib.colors.rgb2hex(rgba))
return a
|
import numpy as np
from collections import OrderedDict
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import random
import sys
import os
import time
import argparse
import Data
import Model
from transformers import AdamW
parser = argparse.ArgumentParser(description='Train and evaluate an HRED')
parser.add_argument('--batch-size', type=int, default=1, metavar='N', help='input batch size for training (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='enables CUDA training (default: True)')
parser.add_argument('--vocab-size', type=int, default=2**13, metavar='V', help='Size of vocabulary (default: 20000)')
parser.add_argument('--maxlen', type=int, default=50, metavar='ML', help='Maximum Length of an utterance')
parser.add_argument('--word-size', type=int, default=128, metavar='W', help='Size of word embeddings')
parser.add_argument('--hidden-size', type=int, default=128, metavar='H', help='Size of hidden embeddings')
parser.add_argument('--goal-len', type=int, default=500, metavar='GL', help='Maximum Length of an utterance')
parser.add_argument('--resume', action='store_true', default=False, help='Resume an old model (default: False)')
parser.add_argument('--lr', type=float, default=.000001, metavar='LR', help='Learning Rate (default: .00015)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--val-step', type=int, default=3, metavar='ES', help='how many batches to wait before evaluating 1 test batch')
parser.add_argument('--log-step', type=int, default=100, metavar='LS', help='how many batches to wait before logging training status')
parser.add_argument('--save-step', type=int, default=100, metavar='SS', help='how many batches to wait before saving model')
parser.add_argument('--data', type=str, default=None, help='dataset')
parser.add_argument('--model', type=str, default=None, help='dataset')
parser.add_argument('--bert-model', type=str, default='bert-base-uncased', help='pretrained bert model for goal')
parser.add_argument('--base-dir', type=str, default='/dccstor/gpandey11/gaurav/', help='A directiory that contains a data and models folder')
parser.add_argument('--best', action='store_true', default=False, help='Load the best model so far')
parser.add_argument('--num-epochs', type=int, default=18, metavar='E', help='Number of epochs for training the model')
parser.add_argument('--save-name', type=str, default="", help='Name of model to be saved')
parser.add_argument('--load-name', type=str, default="", help='Name of model to be loaded')
parser.add_argument('--max-steps', type=int, default=200000, help='Max steps per epoch')
parser.add_argument("--max_norm", type=float, default=1.0, help="Clipping gradient norm")
args = parser.parse_args()
if args.save_name == "":
args.save_name = args.load_name
if args.load_name == "":
args.load_name = args.save_name
if args.data is None:
if args.model is None:
sys.exit("You need to specify atleast one of data-dir and model-dir!!!!!!")
else:
args.data = args.model
elif args.model is None:
args.model = args.data
# class args:
# batch_size = 10
# no_cuda = False
# vocab_size=10000
# maxlen=50
# word_size = 300
# hidden_size = 1000
# resume = False
# lr = .00015
# data = 'ibm_5000'
# val_step = 3
# log_step = 10
# save_step = 500
# base_dir = '/dccstor/gpandey11/gaurav/'
# num_epochs = 25
# model = 'future_ibm_cr'
# data = 'future_ibm_cr'
# mname = 'future'
# best = True
# train = False
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.data_dir = args.base_dir + "data/" + args.data.lower() + "/"
args.model_dir = args.base_dir + "models/" + args.model.lower() + "/"
if not os.path.exists(args.model_dir):
os.mkdir(args.model_dir)
args.load_path = args.model_dir + "checkpoint_hred_user_" + args.load_name + "_" + str(args.word_size) + "_" + str(args.hidden_size) + "_" + str(args.batch_size) + '.pth.tar'
args.save_path = args.model_dir + "checkpoint_hred_user_" + args.save_name + "_" + str(args.word_size) + "_" + str(args.hidden_size) + "_" + str(args.batch_size) + '.pth.tar'
args.best_path = args.model_dir + "checkpoint_hred_user_" + args.save_name + "_best" + "_" + str(args.word_size) + "_" + str(args.hidden_size) + "_" + str(args.batch_size) + '.pth.tar'
print("DATA LOADING......")
data = Data.SubData(data_dir = args.data_dir, vocab_size=args.vocab_size)
print("DATA LOADING DONE!!!")
print(data.tokenizer.vocab_size)
args.vocab_size = data.tokenizer.vocab_size
print("The vocab size is {}".format(args.vocab_size))
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def init_model(args, data):
print(args.resume)
if args.resume:
print(args.load_path)
checkpoint = torch.load(args.load_path)
if 'args' in checkpoint:
print("model parameters detected in checkpoint")
load_args = checkpoint['args']
model = Model.Siamese(load_args)
else:
sys.exit("Model parameters not detected in checkpoint!!! ")
if args.cuda:
model = model.cuda()
optimizer = model.load(checkpoint, args)
else:
model = Model.Siamese(args)
if args.cuda:
model = model.cuda()
optimizer = AdamW(params=model.parameters(), lr=args.lr, correct_bias=True)
return data, model, optimizer
def train(epoch, start=-1):
input_ids, token_type_ids, attention_mask, labels = data.get_batch(start=start)
if args.cuda:
input_ids, token_type_ids, attention_mask = input_ids.cuda(), token_type_ids.cuda(), attention_mask.cuda()
labels = labels.cuda()
model.train()
output, ploss = model(input_ids, token_type_ids, attention_mask, labels)
optimizer.zero_grad()
ploss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
optimizer.step()
if labels[0].item() == 0:
accuracy = (output.item()<0.2)
else:
accuracy = (output.item()>0.7)
return ploss.item(), accuracy
def validate(start):
input_ids, token_type_ids, attention_mask, labels = data.get_batch(start=start, train=False)
if args.cuda:
input_ids, token_type_ids, attention_mask = input_ids.cuda(), token_type_ids.cuda(), attention_mask.cuda()
labels =labels.cuda()
model.eval()
output, ploss = model(input_ids, token_type_ids, attention_mask, labels)
if labels[0].item() == 0:
accuracy = (output.item()<0.2)
else:
accuracy = (output.item()>0.7)
return ploss.item(), accuracy
data, model, optimizer = init_model(args, data)
vloss_min = 200
t0 = time.time()
num_steps_per_epoch = int(len(data.train_input)/args.batch_size)
if num_steps_per_epoch > args.max_steps:
num_steps_per_epoch = args.max_steps
for epoch in range(args.num_epochs):
print("EPOCH : ", epoch)
start = 0
ploss = 0
nloss = 0
vploss = 0
vnloss = 0
niter = 0
nviter = 0
acc = 0
vacc = 0
for step in range(0, num_steps_per_epoch):
#Train on training data
p, a = train(epoch+1, start=start)
ploss += p
niter += 1
start += args.batch_size
acc += a
#Validate on validation data
if niter%args.val_step==0:
with torch.no_grad():
p, a = validate(nviter)
vploss += p
vacc += a
nviter += 1
if niter%args.log_step==0:
t1 = time.time()
print(epoch, start, "LOSS : {0:.3f}".format(ploss/niter), "{0:.3f}".format(vploss/nviter), "ACC : {0:.3f}".format(acc/niter), "{0:.3f}".format(vacc/nviter), "{0:.3f}".format(t1-t0))
t0 = t1
sys.stdout.flush()
if niter%args.save_step==0:
model.save(args.save_path, optimizer, args)
print("="*50)
model.save(args.save_path, optimizer, args)
if vploss/nviter < vloss_min:
print('SAVE', args.best_path, '\n', )
vloss_min = vploss/nviter
model.save(args.best_path, optimizer, args)
|
print('Omolewa is teaching a class')
print('Lanre is still making changes')
print('Omolewa has made a change too')
|
# coding: utf-8
# pylint: disable= invalid-name, unused-import
"""For compatibility and optional dependencies."""
import abc
import os
import sys
from pathlib import PurePath
assert (sys.version_info[0] == 3), 'Python 2 is no longer supported.'
# pylint: disable=invalid-name, redefined-builtin
STRING_TYPES = (str,)
def py_str(x):
"""convert c string back to python string"""
return x.decode('utf-8')
###############################################################################
# START NUMPY PATHLIB ATTRIBUTION
###############################################################################
# os.PathLike compatibility used in Numpy:
# https://github.com/numpy/numpy/tree/v1.17.0
# Attribution:
# https://github.com/numpy/numpy/blob/v1.17.0/numpy/compat/py3k.py#L188-L247
# Backport os.fs_path, os.PathLike, and PurePath.__fspath__
if sys.version_info[:2] >= (3, 6):
os_fspath = os.fspath
os_PathLike = os.PathLike
else:
def _PurePath__fspath__(self):
return str(self)
class os_PathLike(abc.ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
if issubclass(subclass, PurePath):
return True
return hasattr(subclass, '__fspath__')
def os_fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
if issubclass(path_type, PurePath):
return _PurePath__fspath__(path)
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
###############################################################################
# END NUMPY PATHLIB ATTRIBUTION
###############################################################################
# pandas
try:
from pandas import DataFrame, Series
from pandas import MultiIndex
from pandas import concat as pandas_concat
PANDAS_INSTALLED = True
except ImportError:
MultiIndex = object
DataFrame = object
Series = object
pandas_concat = None
PANDAS_INSTALLED = False
# dt
try:
# Workaround for #4473, compatibility with dask
if sys.__stdin__ is not None and sys.__stdin__.closed:
sys.__stdin__ = None
import datatable
if hasattr(datatable, "Frame"):
DataTable = datatable.Frame
else:
DataTable = datatable.DataTable
DT_INSTALLED = True
except ImportError:
# pylint: disable=too-few-public-methods
class DataTable(object):
""" dummy for datatable.DataTable """
DT_INSTALLED = False
try:
from cudf import DataFrame as CUDF_DataFrame
from cudf import Series as CUDF_Series
from cudf import MultiIndex as CUDF_MultiIndex
from cudf import concat as CUDF_concat
CUDF_INSTALLED = True
except ImportError:
CUDF_DataFrame = object
CUDF_Series = object
CUDF_MultiIndex = object
CUDF_INSTALLED = False
CUDF_concat = None
# sklearn
try:
from sklearn.base import BaseEstimator
from sklearn.base import RegressorMixin, ClassifierMixin
from sklearn.preprocessing import LabelEncoder
try:
from sklearn.model_selection import KFold, StratifiedKFold
except ImportError:
from sklearn.cross_validation import KFold, StratifiedKFold
SKLEARN_INSTALLED = True
XGBModelBase = BaseEstimator
XGBRegressorBase = RegressorMixin
XGBClassifierBase = ClassifierMixin
XGBKFold = KFold
XGBStratifiedKFold = StratifiedKFold
XGBLabelEncoder = LabelEncoder
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
XGBModelBase = object
XGBClassifierBase = object
XGBRegressorBase = object
XGBKFold = None
XGBStratifiedKFold = None
XGBLabelEncoder = None
# dask
try:
import dask
from dask import delayed
from dask import dataframe as dd
from dask import array as da
from dask.distributed import Client, get_client
from dask.distributed import comm as distributed_comm
from dask.distributed import wait as distributed_wait
from distributed import get_worker as distributed_get_worker
DASK_INSTALLED = True
except ImportError:
dd = None
da = None
Client = None
delayed = None
get_client = None
distributed_comm = None
distributed_wait = None
distributed_get_worker = None
dask = None
DASK_INSTALLED = False
try:
import sparse
import scipy.sparse as scipy_sparse
SCIPY_INSTALLED = True
except ImportError:
sparse = False
scipy_sparse = False
SCIPY_INSTALLED = False
|
# -*- coding: utf-8 -*-
"""
Pycharm
2019/3/28 10:25
zc-bnlp
RonWong
"""
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .algorithm_type_ns_service import *
from .dhcp_relay_profile import *
from .dhcp_relay_service import *
from .dhcp_server_ip_pool import *
from .dhcp_server_profile import *
from .ether_type_ns_service import *
from .firewall_section import *
from .get_certificate import *
from .get_edge_cluster import *
from .get_firewall_section import *
from .get_ip_pool import *
from .get_logical_tier0_router import *
from .get_logical_tier1_router import *
from .get_mac_pool import *
from .get_ns_group import *
from .get_ns_service import *
from .get_policy_certificate import *
from .get_policy_context_profile import *
from .get_policy_edge_cluster import *
from .get_policy_edge_node import *
from .get_policy_gateway_policy import *
from .get_policy_gateway_qos_profile import *
from .get_policy_group import *
from .get_policy_i_pv6_dad_profile import *
from .get_policy_i_pv6_ndra_profile import *
from .get_policy_ip_block import *
from .get_policy_ip_discovery_profile import *
from .get_policy_ip_pool import *
from .get_policy_lb_app_profile import *
from .get_policy_lb_client_ssl_profile import *
from .get_policy_lb_monitor import *
from .get_policy_lb_persistence_profile import *
from .get_policy_lb_server_ssl_profile import *
from .get_policy_mac_discovery_profile import *
from .get_policy_qos_profile import *
from .get_policy_realization_info import *
from .get_policy_security_policy import *
from .get_policy_segment_realization import *
from .get_policy_segment_security_profile import *
from .get_policy_service import *
from .get_policy_site import *
from .get_policy_spoofguard_profile import *
from .get_policy_tier0_gateway import *
from .get_policy_tier1_gateway import *
from .get_policy_transport_zone import *
from .get_policy_vm import *
from .get_policy_vni_pool import *
from .get_switching_profile import *
from .get_transport_zone import *
from .icmp_type_ns_service import *
from .igmp_type_ns_service import *
from .ip_block import *
from .ip_block_subnet import *
from .ip_discovery_switching_profile import *
from .ip_pool import *
from .ip_pool_allocation_ip_address import *
from .ip_protocol_ns_service import *
from .ip_set import *
from .l4_port_set_ns_service import *
from .lb_client_ssl_profile import *
from .lb_cookie_persistence_profile import *
from .lb_fast_tcp_application_profile import *
from .lb_fast_udp_application_profile import *
from .lb_passive_monitor import *
from .lb_pool import *
from .lb_server_ssl_profile import *
from .lb_service import *
from .lb_source_ip_persistence_profile import *
from .lbhttp_application_profile import *
from .lbhttp_forwarding_rule import *
from .lbhttp_monitor import *
from .lbhttp_request_rewrite_rule import *
from .lbhttp_response_rewrite_rule import *
from .lbhttp_virtual_server import *
from .lbhttps_monitor import *
from .lbicmp_monitor import *
from .lbtcp_monitor import *
from .lbtcp_virtual_server import *
from .lbudp_monitor import *
from .lbudp_virtual_server import *
from .logical_dhc_perver import *
from .logical_dhcp_port import *
from .logical_port import *
from .logical_router_centralized_service_port import *
from .logical_router_downlink_port import *
from .logical_router_link_port_on_tier0 import *
from .logical_router_link_port_on_tier1 import *
from .logical_switch import *
from .logical_tier0_router import *
from .logical_tier1_router import *
from .mac_management_switching_profile import *
from .n_sroup import *
from .nat_rule import *
from .ns_service_group import *
from .policy_bgp_config import *
from .policy_bgp_neighbor import *
from .policy_context_profile import *
from .policy_dhcp_relay import *
from .policy_dhcp_server import *
from .policy_gateway_policy import *
from .policy_gateway_prefix_list import *
from .policy_group import *
from .policy_ip_address_allocation import *
from .policy_ip_block import *
from .policy_ip_pool import *
from .policy_ip_pool_block_subnet import *
from .policy_ip_pool_static_subnet import *
from .policy_lb_pool import *
from .policy_lb_service import *
from .policy_lb_virtual_server import *
from .policy_nat_rule import *
from .policy_predefined_gateway_policy import *
from .policy_predefined_security_policy import *
from .policy_security_policy import *
from .policy_segment import *
from .policy_service import *
from .policy_static_route import *
from .policy_tier0_gateway import *
from .policy_tier0_gateway_havip_config import *
from .policy_tier0_gateway_interface import *
from .policy_tier1_gateway import *
from .policy_tier1_gateway_interface import *
from .policy_vlan_segment import *
from .policy_vm_tags import *
from .provider import *
from .qos_switching_profile import *
from .spoofguard_switching_profile import *
from .static_route import *
from .switch_security_switching_profile import *
from .vlan_logical_switch import *
from .vm_tags import *
|
import numpy as np
import sys
import warnings
warnings.filterwarnings('ignore')
import george
from george import kernels
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF,WhiteKernel, ConstantKernel as C, DotProduct, RationalQuadratic, Matern
from scipy.optimize import minimize
from scipy.interpolate import PchipInterpolator, interp1d
import scipy.io as sio
from .priors import *
import pkg_resources
def get_file(folder, filename):
resource_package = __name__
resource_path = '/'.join((folder, filename)) # Do not use os.path.join()
template = pkg_resources.resource_stream(resource_package, resource_path)
return template
fsps_mlc = sio.loadmat(get_file('train_data','fsps_mass_loss_curve.mat'))
#fsps_mlc = sio.loadmat('dense_basis/train_data/fsps_mass_loss_curve.mat')
fsps_time = fsps_mlc['timeax_fsps'].ravel()
fsps_massloss = fsps_mlc['mass_loss_fsps'].ravel()
# basic SFH tuples
rising_sfh = np.array([10.0,1.0,3,0.5,0.7,0.9])
regular_sfg_sfh = np.array([10.0,0.3,3,0.25,0.5,0.75])
young_quenched_sfh = np.array([10.0,-1.0,3,0.3,0.6,0.8])
old_quenched_sfh = np.array([10.0,-1.0,3,0.1,0.2,0.4])
old_very_quenched_sfh = np.array([10.0,-10.0,3,0.1,0.2,0.4])
double_peaked_SF_sfh = np.array([10.0,0.5,3,0.25,0.4,0.7])
double_peaked_Q_sfh = np.array([10.0,-1.0,3,0.2,0.4,0.8])
# functions:
def neg_ln_like(p, gp, y):
gp.set_parameter_vector(p)
return -gp.log_likelihood(y)
def grad_neg_ln_like(p, gp, y):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y)
def correct_for_mass_loss(sfh, time, mass_loss_curve_time, mass_loss_curve):
correction_factors = np.interp(time, mass_loss_curve_time, mass_loss_curve)
return sfh * correction_factors
def gp_interpolator(x,y,res = 1000, Nparam = 3):
yerr = np.zeros_like(y)
yerr[2:(2+Nparam)] = 0.001/np.sqrt(Nparam)
if len(yerr) > 26:
yerr[2:(2+Nparam)] = 0.1/np.sqrt(Nparam)
#kernel = np.var(yax) * kernels.ExpSquaredKernel(np.median(yax)+np.std(yax))
#k2 = np.var(yax) * kernels.LinearKernel(np.median(yax),order=1)
#kernel = np.var(y) * kernels.Matern32Kernel(np.median(y)) #+ k2
kernel = np.var(y) * (kernels.Matern32Kernel(np.median(y)) + kernels.LinearKernel(np.median(y), order=2))
gp = george.GP(kernel)
#print(xax.shape, yerr.shape)
gp.compute(x.ravel(), yerr.ravel())
x_pred = np.linspace(np.amin(x), np.amax(x), res)
y_pred, pred_var = gp.predict(y.ravel(), x_pred, return_var=True)
return x_pred, y_pred
def gp_sklearn_interpolator(x,y,res = 1000):
kernel = DotProduct(10.0, (1e-2,1e2)) *RationalQuadratic(0.1)
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
gp.fit(x.reshape(-1,1),(y-x).reshape(-1,1))
x_pred = np.linspace(0,1,1000)
y_pred, sigma = gp.predict(x_pred[:,np.newaxis], return_std=True)
y_pred = y_pred.ravel() + x_pred
return x_pred, y_pred
def linear_interpolator(x,y,res = 1000):
interpolator = interp1d(x,y)
x_pred = np.linspace(np.amin(x), np.amax(x), res)
y_pred = interpolator(x_pred)
return x_pred, y_pred
def Pchip_interpolator(x,y,res = 1000):
interpolator = PchipInterpolator(x,y)
x_pred = np.linspace(np.amin(x), np.amax(x), res)
y_pred = interpolator(x_pred)
return x_pred, y_pred
def tuple_to_sfh(sfh_tuple, zval, interpolator = 'gp_george', set_sfr_100Myr = False, vb = False):
# generate an SFH from an input tuple (Mass, SFR, {tx}) at a specified redshift
Nparam = int(sfh_tuple[2])
mass_quantiles = np.linspace(0,1,Nparam+2)
time_quantiles = np.zeros_like(mass_quantiles)
time_quantiles[-1] = 1
time_quantiles[1:-1] = sfh_tuple[3:]
# now add SFR constraints
# SFR smoothly increasing from 0 at the big bang
mass_quantiles = np.insert(mass_quantiles,1,[0.00])
time_quantiles = np.insert(time_quantiles,1,[0.01])
# SFR constrained to SFR_inst at the time of observation
SFH_constraint_percentiles = np.array([0.96,0.97,0.98,0.99])
for const_vals in SFH_constraint_percentiles:
delta_mstar = 10**(sfh_tuple[0]) *(1-const_vals)
delta_t = 1 - delta_mstar/(10**sfh_tuple[1])/(cosmo.age(zval).value*1e9)
if (delta_t > time_quantiles[-2]) & (delta_t > 0.9):
mass_quantiles = np.insert(mass_quantiles, -1, [const_vals], )
time_quantiles = np.insert(time_quantiles, -1, [delta_t],)
else:
delta_m = 1 - ((cosmo.age(zval).value*1e9)*(1-const_vals)*(10**sfh_tuple[1]))/(10**sfh_tuple[0])
time_quantiles = np.insert(time_quantiles, -1, [const_vals])
mass_quantiles= np.insert(mass_quantiles, -1, [delta_m])
if interpolator == 'gp_george':
time_arr_interp, mass_arr_interp = gp_interpolator(time_quantiles, mass_quantiles, Nparam = int(Nparam))
elif interpolator == 'gp_sklearn':
time_arr_interp, mass_arr_interp = gp_sklearn_interpolator(time_quantiles, mass_quantiles)
elif interpolator == 'linear':
time_arr_interp, mass_arr_interp = linear_interpolator(time_quantiles, mass_quantiles)
elif interpolator == 'pchip':
time_arr_interp, mass_arr_interp = Pchip_interpolator(time_quantiles, mass_quantiles)
else:
raise Exception('specified interpolator does not exist: {}. \n use one of the following: gp_george, gp_sklearn, linear, and pchip '.format(interpolator))
sfh_scale = 10**(sfh_tuple[0])/(cosmo.age(zval).value*1e9/1000)
sfh = np.diff(mass_arr_interp)*sfh_scale
sfh[sfh<0] = 0
sfh = np.insert(sfh,0,[0])
if set_sfr_100Myr == True:
time_100Myr = np.argmin(np.abs(time_arr_interp*cosmo.age(zval).value - 0.1))
sfh[-time_100Myr:] = 10**sfh_tuple[1]
timeax = time_arr_interp * cosmo.age(zval).value
if vb == True:
print('time and mass quantiles:')
print(time_quantiles, mass_quantiles)
plt.plot(time_quantiles, mass_quantiles,'--o')
plt.plot(time_arr_interp, mass_arr_interp)
plt.axis([0,1,0,1])
#plt.axis([0.9,1.05,0.9,1.05])
plt.show()
print('instantaneous SFR: %.1f' %sfh[-1])
plt.plot(np.amax(time_arr_interp) - time_arr_interp, sfh)
#plt.xscale('log')
plt.show()
return sfh, timeax
def calctimes(timeax,sfh,nparams):
massint = np.cumsum(sfh)
massint_normed = massint/np.amax(massint)
tx = np.zeros((nparams,))
for i in range(nparams):
tx[i] = timeax[np.argmin(np.abs(massint_normed - 1*(i+1)/(nparams+1)))]
#tx[i] = (np.argmin(np.abs(massint_normed - 1*(i+1)/(nparams+1))))
#print(1*(i+1)/(nparams+1))
#mass = np.log10(np.sum(sfh)*1e9)
mass = np.log10(np.trapz(sfh,timeax*1e9))
sfr = np.log10(sfh[-1])
return mass, sfr, tx/np.amax(timeax)
def scale_t50(t50_val = 1.0, zval = 1.0):
"""
Change a t50 value from lookback time in Gyr at a given redshift
to fraction of the age of the universe.
inputs: t50 [Gyr, lookback time], redshift
outputs: t50 [fraction of the age of the universe, cosmic time]
"""
return (1 - t50_val/cosmo.age(zval).value)
|
from django import template
from RollMarkingApp.models import Attendance, Absence
register = template.Library()
@register.filter
def filter_cadet_attendance(cadet, term_date):
return Attendance.objects.filter(cadet=cadet, meeting=term_date)
@register.filter
def filter_cadet_uniform(cadet, term_date):
return Attendance.objects.filter(cadet=cadet, meeting=term_date, uniform=True)
@register.filter
def filter_cadet_absence(cadet, term_date):
return Absence.objects.filter(cadet=cadet, meeting=term_date)
@register.filter
def filter_cadet_absence_reason(cadet, term_date):
cadet_meeting_obj = Absence.objects.get(cadet=cadet, meeting=term_date).reason_code
return cadet_meeting_obj
|
import pickle
from twisted.internet import defer
from jasmin.protocols.smpp.configs import SMPPClientConfig
from jasmin.protocols.cli.managers import Manager, Session
from jasmin.vendor.smpp.pdu.constants import (addr_npi_name_map, addr_ton_name_map,
replace_if_present_flap_name_map, priority_flag_name_map)
from jasmin.protocols.cli.protocol import str2num
# A config map between console-configuration keys and SMPPClientConfig keys.
SMPPClientConfigKeyMap = {'cid': 'id', 'host': 'host', 'port': 'port', 'username': 'username',
'password': 'password', 'systype': 'systemType', 'logfile': 'log_file', 'loglevel': 'log_level',
'bind_to': 'sessionInitTimerSecs', 'elink_interval': 'enquireLinkTimerSecs', 'trx_to': 'inactivityTimerSecs',
'res_to': 'responseTimerSecs', 'con_loss_retry': 'reconnectOnConnectionLoss', 'con_fail_retry': 'reconnectOnConnectionFailure',
'con_loss_delay': 'reconnectOnConnectionLossDelay', 'con_fail_delay': 'reconnectOnConnectionFailureDelay',
'pdu_red_to': 'pduReadTimerSecs', 'bind': 'bindOperation', 'bind_ton': 'bind_addr_ton', 'bind_npi': 'bind_addr_npi',
'src_ton': 'source_addr_ton', 'src_npi': 'source_addr_npi', 'dst_ton': 'dest_addr_ton', 'dst_npi': 'dest_addr_npi',
'addr_range': 'address_range', 'src_addr': 'source_addr', 'proto_id': 'protocol_id',
'priority': 'priority_flag', 'validity': 'validity_period', 'ripf': 'replace_if_present_flag',
'def_msg_id': 'sm_default_msg_id', 'coding': 'data_coding', 'requeue_delay': 'requeue_delay', 'submit_throughput': 'submit_sm_throughput',
'dlr_expiry': 'dlr_expiry'
}
# When updating a key from RequireRestartKeys, the connector need restart for update to take effect
RequireRestartKeys = ['host', 'port', 'username', 'password', 'systemType', 'logfile', 'loglevel']
def castToBuiltInType(key, value):
'Will cast value to the correct type depending on the key'
if isinstance(value, bool):
return 1 if value else 0
if key in ['bind_npi', 'dst_npi', 'src_npi']:
return addr_npi_name_map[str(value)]
if key in ['bind_ton', 'dst_ton', 'src_ton']:
return addr_ton_name_map[str(value)]
if key == 'ripf':
return replace_if_present_flap_name_map[str(value)]
if key == 'priority':
return priority_flag_name_map[str(value)]
return value
class JCliSMPPClientConfig(SMPPClientConfig):
'Overload SMPPClientConfig with getters and setters for JCli'
PendingRestart = False
def set(self, key, value):
setattr(self, key, value)
if key in RequireRestartKeys:
self.PendingRestart = True
def getAll(self):
r = {}
for key, value in SMPPClientConfigKeyMap.iteritems():
r[key] = castToBuiltInType(key, getattr(self, value))
return r
def SMPPClientConfigBuild(fCallback):
'Parse args and try to build a JCliSMPPClientConfig instance to pass it to fCallback'
def parse_args_and_call_with_instance(self, *args, **kwargs):
cmd = args[0]
arg = args[1]
# Empty line
if cmd is None:
return self.protocol.sendData()
# Initiate JCliSMPPClientConfig with sessBuffer content
if cmd == 'ok':
if len(self.sessBuffer) == 0:
return self.protocol.sendData('You must set at least connector id (cid) before saving !')
connector = {}
for key, value in self.sessBuffer.iteritems():
connector[key] = value
try:
SMPPClientConfigInstance = JCliSMPPClientConfig(**connector)
# Hand the instance to fCallback
return fCallback(self, SMPPClientConfigInstance)
except Exception, e:
return self.protocol.sendData('Error: %s' % str(e))
else:
# Unknown key
if not SMPPClientConfigKeyMap.has_key(cmd):
return self.protocol.sendData('Unknown SMPPClientConfig key: %s' % cmd)
# Cast to boolean
if cmd in ['con_loss_retry', 'con_fail_retry']:
if arg.lower() in ['yes', 'y', '1']:
arg = True
elif arg.lower() in ['no', 'n', '0']:
arg = False
# Buffer key for later SMPPClientConfig initiating
SMPPClientConfigKey = SMPPClientConfigKeyMap[cmd]
if isinstance(arg, str):
self.sessBuffer[SMPPClientConfigKey] = str2num(arg)
else:
self.sessBuffer[SMPPClientConfigKey] = arg
return self.protocol.sendData()
return parse_args_and_call_with_instance
def SMPPClientConfigUpdate(fCallback):
'''Get connector configuration and log update requests passing to fCallback
The log will be handed to fCallback when 'ok' is received'''
def log_update_requests_and_call(self, *args, **kwargs):
cmd = args[0]
arg = args[1]
# Empty line
if cmd is None:
return self.protocol.sendData()
# Pass sessBuffer as updateLog to fCallback
if cmd == 'ok':
if len(self.sessBuffer) == 0:
return self.protocol.sendData('Nothing to save')
return fCallback(self, self.sessBuffer)
else:
# Unknown key
if not SMPPClientConfigKeyMap.has_key(cmd):
return self.protocol.sendData('Unknown SMPPClientConfig key: %s' % cmd)
if cmd == 'cid':
return self.protocol.sendData('Connector id can not be modified !')
# Buffer key for later (when receiving 'ok')
SMPPClientConfigKey = SMPPClientConfigKeyMap[cmd]
self.sessBuffer[SMPPClientConfigKey] = str2num(arg)
return self.protocol.sendData()
return log_update_requests_and_call
class ConnectorExist:
'Check if connector cid exist before passing it to fCallback'
def __init__(self, cid_key):
self.cid_key = cid_key
def __call__(self, fCallback):
cid_key = self.cid_key
def exist_connector_and_call(self, *args, **kwargs):
opts = args[1]
cid = getattr(opts, cid_key)
if self.pb['smppcm'].getConnector(cid) is not None:
return fCallback(self, *args, **kwargs)
return self.protocol.sendData('Unknown connector: %s' % cid)
return exist_connector_and_call
class SmppCCManager(Manager):
managerName = 'smppcc'
def persist(self, arg, opts):
if self.pb['smppcm'].perspective_persist(opts.profile):
self.protocol.sendData('%s configuration persisted (profile:%s)' % (self.managerName, opts.profile), prompt = False)
else:
self.protocol.sendData('Failed to persist %s configuration (profile:%s)' % (self.managerName, opts.profile), prompt = False)
@defer.inlineCallbacks
def load(self, arg, opts):
r = yield self.pb['smppcm'].perspective_load(opts.profile)
if r:
self.protocol.sendData('%s configuration loaded (profile:%s)' % (self.managerName, opts.profile), prompt = False)
else:
self.protocol.sendData('Failed to load %s configuration (profile:%s)' % (self.managerName, opts.profile), prompt = False)
def list(self, arg, opts):
connectors = self.pb['smppcm'].perspective_connector_list()
counter = 0
if (len(connectors)) > 0:
self.protocol.sendData("#%s %s %s %s %s" % ('Connector id'.ljust(35),
'Service'.ljust(7),
'Session'.ljust(16),
'Starts'.ljust(6),
'Stops'.ljust(5),
), prompt=False)
for connector in connectors:
counter += 1
self.protocol.sendData("#%s %s %s %s %s" % (str(connector['id']).ljust(35),
str('started' if connector['service_status'] == 1 else 'stopped').ljust(7),
str(connector['session_state']).ljust(16),
str(connector['start_count']).ljust(6),
str(connector['stop_count']).ljust(5),
), prompt=False)
self.protocol.sendData(prompt=False)
self.protocol.sendData('Total connectors: %s' % counter)
@Session
@SMPPClientConfigBuild
@defer.inlineCallbacks
def add_session(self, SMPPClientConfigInstance):
st = yield self.pb['smppcm'].perspective_connector_add(pickle.dumps(SMPPClientConfigInstance, 2))
if st:
self.protocol.sendData('Successfully added connector [%s]' % SMPPClientConfigInstance.id, prompt=False)
self.stopSession()
else:
self.protocol.sendData('Failed adding connector, check log for details')
def add(self, arg, opts):
return self.startSession(self.add_session,
annoucement='Adding a new connector: (ok: save, ko: exit)',
completitions=SMPPClientConfigKeyMap.keys())
@Session
@SMPPClientConfigUpdate
@defer.inlineCallbacks
def update_session(self, updateLog):
connector = self.pb['smppcm'].getConnector(self.sessionContext['cid'])
connectorDetails = self.pb['smppcm'].getConnectorDetails(self.sessionContext['cid'])
for key, value in updateLog.iteritems():
connector['config'].set(key, value)
if connector['config'].PendingRestart and connectorDetails['service_status'] == 1:
self.protocol.sendData('Restarting connector [%s] for updates to take effect ...' % self.sessionContext['cid'], prompt=False)
st = yield self.pb['smppcm'].perspective_connector_stop(self.sessionContext['cid'])
if not st:
self.protocol.sendData('Failed stopping connector, check log for details', prompt=False)
else:
self.pb['smppcm'].perspective_connector_start(self.sessionContext['cid'])
self.protocol.sendData('Successfully updated connector [%s]' % self.sessionContext['cid'], prompt=False)
self.stopSession()
@ConnectorExist(cid_key='update')
def update(self, arg, opts):
return self.startSession(self.update_session,
annoucement='Updating connector id [%s]: (ok: save, ko: exit)' % opts.update,
completitions=SMPPClientConfigKeyMap.keys(),
sessionContext={'cid': opts.update})
@ConnectorExist(cid_key='remove')
@defer.inlineCallbacks
def remove(self, arg, opts):
st = yield self.pb['smppcm'].perspective_connector_remove(opts.remove)
if st:
self.protocol.sendData('Successfully removed connector id:%s' % opts.remove)
else:
self.protocol.sendData('Failed removing connector, check log for details')
@ConnectorExist(cid_key='show')
def show(self, arg, opts):
connector = self.pb['smppcm'].getConnector(opts.show)
for k, v in connector['config'].getAll().iteritems():
self.protocol.sendData('%s %s' % (k, v), prompt=False)
self.protocol.sendData()
@ConnectorExist(cid_key='stop')
@defer.inlineCallbacks
def stop(self, arg, opts):
st = yield self.pb['smppcm'].perspective_connector_stop(opts.stop)
if st:
self.protocol.sendData('Successfully stopped connector id:%s' % opts.stop)
else:
self.protocol.sendData('Failed stopping connector, check log for details')
@ConnectorExist(cid_key='start')
def start(self, arg, opts):
st = self.pb['smppcm'].perspective_connector_start(opts.start)
if st:
self.protocol.sendData('Successfully started connector id:%s' % opts.start)
else:
self.protocol.sendData('Failed starting connector, check log for details')
|
import pygame
from math import pi
# Initialize the game engine
pygame.init()
# Define the colors we will use in RGB format
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
# Set the height and width of the screen
size = [400, 300]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Example code for the draw module")
# Loop until the user clicks the close button.
done = False
clock = pygame.time.Clock()
while not done:
# This limits the while loop to a max of 10 times per second.
# Leave this out and we will use all CPU we can.
clock.tick(10)
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# All drawing code happens after the for loop and but
# inside the main while done==False loop.
# Clear the screen and set the screen background
screen.fill(WHITE)
# This draws a triangle using the polygon command
pygame.draw.polygon(screen, BLACK, [[100, 100],[200, 100], [200, 200], [250, 200], [150, 250], [50, 200], [100,200]], 5)
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
# Be IDLE friendly
pygame.quit() |
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Standard imports
import unittest
# Third party imports
from mock import patch, MagicMock
from cloudify.exceptions import OperationRetry
# Local imports
from cloudify_aws.common._compat import reload_module
from cloudify_aws.ec2.resources import subnet
from cloudify_aws.common.tests.test_base import (
TestBase,
mock_decorator
)
from cloudify_aws.ec2.resources.subnet import (
EC2Subnet,
SUBNET,
CIDR_BLOCK,
SUBNET_ID,
VPC_ID,
VPC_TYPE
)
class TestEC2Subnet(TestBase):
def setUp(self):
super(TestEC2Subnet, self).setUp()
self.subnet = EC2Subnet("ctx_node", resource_id=True,
client=True, logger=None)
mock1 = patch('cloudify_aws.common.decorators.aws_resource',
mock_decorator)
mock2 = patch('cloudify_aws.common.decorators.wait_for_status',
mock_decorator)
mock1.start()
mock2.start()
reload_module(subnet)
def test_class_properties(self):
effect = self.get_client_error_exception(name='EC2 Subnet')
self.subnet.client = self.make_client_function('describe_subnets',
side_effect=effect)
res = self.subnet.properties
self.assertIsNone(res)
value = {}
self.subnet.client = self.make_client_function('describe_subnets',
return_value=value)
res = self.subnet.properties
self.assertIsNone(res)
value = {'Subnets': [{SUBNET_ID: 'test_name'}]}
self.subnet.client = self.make_client_function('describe_subnets',
return_value=value)
res = self.subnet.properties
self.assertEqual(res[SUBNET_ID], 'test_name')
def test_class_status(self):
value = {}
self.subnet.client = self.make_client_function('describe_subnets',
return_value=value)
res = self.subnet.status
self.assertIsNone(res)
value = {'Subnets': [{SUBNET_ID: 'test_name', 'State': 'available'}]}
self.subnet.client = self.make_client_function('describe_subnets',
return_value=value)
res = self.subnet.status
self.assertEqual(res, 'available')
def test_class_create(self):
value = {SUBNET: 'test'}
self.subnet.client = self.make_client_function('create_subnet',
return_value=value)
res = self.subnet.create(value)
self.assertEqual(res, value)
def test_class_delete(self):
params = {}
self.subnet.client = self.make_client_function('delete_subnet')
self.subnet.delete(params)
self.assertTrue(self.subnet.client.delete_subnet.called)
params = {SUBNET: 'subnet', CIDR_BLOCK: 'cidr_block'}
self.subnet.delete(params)
self.assertEqual(params[CIDR_BLOCK], 'cidr_block')
def test_prepare(self):
ctx = self.get_mock_ctx("Subnet")
config = {SUBNET_ID: 'subnet', CIDR_BLOCK: 'cidr_block'}
# iface = MagicMock()
# iface.create = self.mock_return(config)
subnet.prepare(ctx, config)
self.assertEqual(ctx.instance.runtime_properties['resource_config'],
config)
def test_create(self):
ctx = self.get_mock_ctx("Subnet", {'client_config': {
'region_name': 'aq-testzone-1'
}})
config = {SUBNET_ID: 'subnet', CIDR_BLOCK: 'cidr_block',
VPC_ID: 'vpc'}
self.subnet.resource_id = config[SUBNET_ID]
iface = MagicMock()
iface.create = self.mock_return({SUBNET: config})
subnet.create(ctx=ctx, iface=iface, resource_config=config)
self.assertEqual(self.subnet.resource_id,
'subnet')
def test_create_with_relationships(self):
ctx = self.get_mock_ctx("Subnet", test_properties={'client_config': {
'region_name': 'aq-testzone-1'
}}, type_hierarchy=[VPC_TYPE])
config = {SUBNET_ID: 'subnet', CIDR_BLOCK: 'cidr_block'}
self.subnet.resource_id = config[SUBNET_ID]
iface = MagicMock()
iface.create = self.mock_return({SUBNET: config})
with patch('cloudify_aws.common.utils.find_rel_by_node_type'):
subnet.create(ctx=ctx, iface=iface, resource_config=config)
self.assertEqual(self.subnet.resource_id,
'subnet')
def test_delete(self):
ctx = self.get_mock_ctx("Subnet")
iface = MagicMock()
subnet.delete(ctx=ctx, iface=iface, resource_config={})
self.assertTrue(iface.delete.called)
def test_modify_subnet_attribute(self):
ctx = self.get_mock_ctx("Subnet")
iface = MagicMock()
iface.status = 0
self.subnet.resource_id = 'test_name'
try:
subnet.modify_subnet_attribute(
ctx, iface, {SUBNET_ID: self.subnet.resource_id})
except OperationRetry:
pass
self.assertTrue(iface.modify_subnet_attribute.called)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
"""Deploys documentation to GitHub pages.
If the environment variable :attr:`TRAVIS_BRANCH` is set, it overrides
the current git branch.
If the environment variable :attr:`GH_TOKEN` is set, it is used as the API
token.
"""
import os
import shutil
import subprocess as sub
import sys
def get_current_git_branch():
"""Returns the current git branch."""
return str(sub.check_output("git rev-parse --abbrev-ref HEAD",
shell=True).splitlines()[0], 'utf-8')
GIT_CONFIG = ['user.email travis@travis-ci.com',
'user.name "Travis CI"']
GIT_BRANCH = os.environ.get('TRAVIS_BRANCH', get_current_git_branch())
WORKING_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__),
'../docs'))
DEPLOY_REPO_DIR = os.path.join(WORKING_DIR, 'deploy-repo')
DEPLOY_REPO_REMOTE = "https://{token}github.com/garstka/garstka.github.io.git"
DEPLOY_DOCS_PARENT_DIR = os.path.join(DEPLOY_REPO_DIR, 'idact')
DEPLOY_DOCS_DIR = os.path.join(DEPLOY_DOCS_PARENT_DIR,
'{git_branch}/html'.format(
git_branch=GIT_BRANCH))
SOURCE_DOCS_DIR = os.path.join(WORKING_DIR, '_build/html')
BUILD_NUMBER = os.environ.get('TRAVIS_BUILD_NUMBER', 'manual')
COMMIT_MESSAGE = ("Deploy docs for branch {git_branch},"
" build: {build_number}").format(git_branch=GIT_BRANCH,
build_number=BUILD_NUMBER)
def main():
"""Main script function."""
def call(command):
"""Alias for shell check_call."""
sub.check_call(command, shell=True)
try:
os.chdir(WORKING_DIR)
print("Deploying docs...")
if os.path.isdir(DEPLOY_REPO_DIR):
shutil.rmtree(DEPLOY_REPO_DIR)
os.mkdir(DEPLOY_REPO_DIR)
os.chdir(DEPLOY_REPO_DIR)
call("git init")
for config in GIT_CONFIG:
call("git config {}".format(config))
token = os.environ.get('GH_TOKEN', '')
if token:
token += '@'
remote = DEPLOY_REPO_REMOTE.format(token=token)
call("git remote add origin {}".format(remote))
call("git fetch origin")
call("git checkout master")
if os.path.isdir(DEPLOY_DOCS_DIR):
shutil.rmtree(DEPLOY_DOCS_DIR)
shutil.copytree(SOURCE_DOCS_DIR, DEPLOY_DOCS_DIR)
call("git add {}".format(DEPLOY_DOCS_DIR))
call('git commit -m "{}"'.format(COMMIT_MESSAGE))
call("git push")
return 0
except Exception as e: # pylint: disable=broad-except
print(e)
return 1
if __name__ == '__main__':
sys.exit(main())
|
#!/usr/bin/env python3
#
# ~~~
# This file is part of the dune-gdt project:
# https://github.com/dune-community/dune-gdt
# Copyright 2010-2018 dune-gdt developers and contributors. All rights reserved.
# License: Dual licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# or GPL-2.0+ (http://opensource.org/licenses/gpl-license)
# with "runtime exception" (http://www.dune-project.org/license.html)
# Authors:
# Felix Schindler (2016 - 2017)
# Rene Milk (2016 - 2018)
#
# ~~~
tpl = '''# This file is part of the dune-gdt project:
# https://github.com/dune-community/dune-gdt
# Copyright 2010-2016 dune-gdt developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# Authors:
# Felix Schindler (2016 - 2017)
# Rene Milk (2016 - 2017)
# THIS FILE IS AUTOGENERATED -- DO NOT EDIT #
sudo: required
dist: trusty
language: generic
services: docker
before_script:
- export IMAGE="dunecommunity/${MY_MODULE}-testing_${DOCKER_TAG}:${TRAVIS_BRANCH}"
- ./.travis.add_swap.bash 2000 &
# get image with fallback to master branch of the super repo
- docker pull ${IMAGE} || export IMAGE="dunecommunity/${MY_MODULE}-testing_${DOCKER_TAG}:master" ; docker pull ${IMAGE}
# for add swap
- wait
- export ENV_FILE=${HOME}/env
- printenv | \grep TRAVIS > ${ENV_FILE} || echo This is not a failure
- printenv | \grep encrypt >> ${ENV_FILE} || echo This is not a failure
- printenv | \grep TEST >> ${ENV_FILE}
- printenv | \grep TOKEN >> ${ENV_FILE} || echo This is not a failure
- export DOCKER_RUN="docker run --env-file ${ENV_FILE} -v ${TRAVIS_BUILD_DIR}:/root/src/${MY_MODULE} ${IMAGE}"
script:
- ${DOCKER_RUN} /root/src/${MY_MODULE}/.travis.script.bash
# runs independent of 'script' failure/success
after_script:
- ${DOCKER_RUN} /root/src/${MY_MODULE}/.travis.after_script.bash
notifications:
email:
on_success: change
on_failure: change
on_start: never
webhooks:
urls:
- https://buildtimetrend.herokuapp.com/travis
- https://webhooks.gitter.im/e/2a38e80d2722df87f945
branches:
except:
- gh-pages
env:
global:
- MY_MODULE=dune-gdt
matrix:
include:
# gcc 5
{%- for c in builders %}
- env: DOCKER_TAG=gcc TESTS={{c}}
{%- endfor %}
# clang 3.9
{%- for c in builders %}
- env: DOCKER_TAG=clang TESTS={{c}}
{%- endfor %}
# THIS FILE IS AUTOGENERATED -- DO NOT EDIT #
'''
import os
import jinja2
import sys
import where
import subprocess
tpl = jinja2.Template(tpl)
builder_count = int(sys.argv[1])
ymlfn = os.path.join(os.path.dirname(__file__), '.travis.yml')
with open(ymlfn, 'wt') as yml:
yml.write(tpl.render(builders=range(0, builder_count)))
travis = where.first('travis')
if travis:
try:
subprocess.check_call([str(travis), 'lint', ymlfn])
except subprocess.CalledProcessError as err:
print('Linting {} failed'.format(ymlfn))
print(err)
sys.exit(-1)
else:
print('Travis linter missing. Try:\ngem install travis')
|
'''
/**
* created by cicek on 13.04.2018 10:03
*/
'''
# 3*(x**2) - y**2 = 8
# 8*(x**3) - 24*x = 1 problem kaç iterasyon sonrasında çözülür
from math import e
def f(x,y):
return 3 * (x ** 2) - y ** 2 - 8
def g(x,y):
return 8*(x**3) - 24*x - 1
def fx(x,y):
return 6*x
def fy(x,y):
return -2*y
def gx(x,y):
return 3*(y**2) - 3*(x**2)
def gy(x,y):
return 6*x*y
xi = float(input("xi sayısını gir: "))
yi = float(input("yi sayısını gir: "))
for z in range(100):
deltayi = (fx(xi,yi)*g(xi,yi) - gx(xi,yi)*f(xi,yi)) / (fx(xi,yi)*gy(xi,yi) - fy(xi,yi)*gx(xi,yi))
deltaxi = (f(xi,yi) - fy(xi,yi)*deltayi) / fx(xi,yi)
xi += deltaxi
yi += deltayi
print(xi , yi) |
from django.shortcuts import reverse
from wildlifelicensing.apps.main.tests import helpers as helpers
class RoutingViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('home')
@property
def permissions(self):
return {
'get': {
'allowed': self.all_users,
'forbidden': [],
'kwargs': {
'follow': True
}
},
}
class TreeViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:tree_officer')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class TableCustomerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:tables_customer')
@property
def permissions(self):
return {
'get': {
'allowed': self.all_users,
'forbidden': [],
},
}
class TableAssessorViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:tables_assessor')
@property
def permissions(self):
return {
'get': {
'allowed': [self.assessor],
'forbidden': [self.officer, self.customer],
},
}
class TableApplicationOfficerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:tables_applications_officer')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class DataTableApplicationOfficerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:data_application_officer')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
'post': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class TableApplicationOfficerOnBehalfViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:tables_officer_onbehalf')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class DataTableApplicationOfficerOnBehalfViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:data_application_officer_onbehalf')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
'post': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class DataTableApplicationCustomerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:data_application_customer')
@property
def permissions(self):
return {
'get': {
'allowed': self.all_users,
'forbidden': [],
},
'post': {
'allowed': self.all_users,
'forbidden': [],
},
}
class DataTableApplicationAssessorViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:data_application_assessor')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer, self.assessor],
'forbidden': [self.customer],
},
'post': {
'allowed': [self.officer, self.assessor],
'forbidden': [self.customer],
},
}
class TableLicenceOfficerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:tables_licences_officer')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class DataTableLicenceOfficerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:data_licences_officer')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
'post': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class DataTableLicenceCustomerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:data_licences_customer')
@property
def permissions(self):
return {
'get': {
'allowed': self.all_users,
'forbidden': [],
},
'post': {
'allowed': self.all_users,
'forbidden': [],
},
}
class BulkLicenceRenewalCustomerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:bulk_licence_renewal_pdf')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
'post': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class TableReturnOfficerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:tables_returns_officer')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class DataTableReturnOfficerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:data_returns_officer')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
'post': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class DataTableReturnOnBehalfViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:data_returns_officer_onbehalf')
@property
def permissions(self):
return {
'get': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
'post': {
'allowed': [self.officer],
'forbidden': [self.assessor, self.customer],
},
}
class DataTableCustomerCustomerViewTest(helpers.BasePermissionViewTestCase):
view_url = reverse('wl_dashboard:data_returns_customer')
@property
def permissions(self):
return {
'get': {
'allowed': self.all_users,
'forbidden': [],
},
'post': {
'allowed': self.all_users,
'forbidden': [],
},
}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Tristan Le Guern <tleguern at bouledef.eu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxmox_group_info
short_description: Retrieve information about one or more Proxmox VE groups
version_added: 1.3.0
description:
- Retrieve information about one or more Proxmox VE groups
options:
group:
description:
- Restrict results to a specific group.
aliases: ['groupid', 'name']
type: str
author: Tristan Le Guern (@tleguern)
extends_documentation_fragment: community.general.proxmox.documentation
'''
EXAMPLES = '''
- name: List existing groups
community.general.proxmox_group_info:
api_host: helldorado
api_user: root@pam
api_password: "{{ password | default(omit) }}"
api_token_id: "{{ token_id | default(omit) }}"
api_token_secret: "{{ token_secret | default(omit) }}"
register: proxmox_groups
- name: Retrieve information about the admin group
community.general.proxmox_group_info:
api_host: helldorado
api_user: root@pam
api_password: "{{ password | default(omit) }}"
api_token_id: "{{ token_id | default(omit) }}"
api_token_secret: "{{ token_secret | default(omit) }}"
group: admin
register: proxmox_group_admin
'''
RETURN = '''
proxmox_groups:
description: List of groups.
returned: always, but can be empty
type: list
elements: dict
contains:
comment:
description: Short description of the group.
returned: on success, can be absent
type: str
groupid:
description: Group name.
returned: on success
type: str
users:
description: List of users in the group.
returned: on success
type: list
elements: str
'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.proxmox import (
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
def get_group(self, groupid):
try:
group = self.proxmox_api.access.groups.get(groupid)
except Exception:
self.module.fail_json(msg="Group '%s' does not exist" % groupid)
group['groupid'] = groupid
return ProxmoxGroup(group)
def get_groups(self):
groups = self.proxmox_api.access.groups.get()
return [ProxmoxGroup(group) for group in groups]
class ProxmoxGroup:
def __init__(self, group):
self.group = dict()
# Data representation is not the same depending on API calls
for k, v in group.items():
if k == 'users' and isinstance(v, str):
self.group['users'] = v.split(',')
elif k == 'members':
self.group['users'] = group['members']
else:
self.group[k] = v
def proxmox_group_info_argument_spec():
return dict(
group=dict(type='str', aliases=['groupid', 'name']),
)
def main():
module_args = proxmox_auth_argument_spec()
group_info_args = proxmox_group_info_argument_spec()
module_args.update(group_info_args)
module = AnsibleModule(
argument_spec=module_args,
required_one_of=[('api_password', 'api_token_id')],
required_together=[('api_token_id', 'api_token_secret')],
supports_check_mode=True
)
result = dict(
changed=False
)
if not HAS_PROXMOXER:
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
proxmox = ProxmoxGroupInfoAnsible(module)
group = module.params['group']
if group:
groups = [proxmox.get_group(groupid=group)]
else:
groups = proxmox.get_groups()
result['proxmox_groups'] = [group.group for group in groups]
module.exit_json(**result)
if __name__ == '__main__':
main()
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['Clusterer']
# Cell
import numpy as np
import pandas as pd
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from tslearn.clustering import TimeSeriesKMeans
from netdata_pandas.data import get_data, get_chart_list
from am4894plots.plots import plot_lines_grid
# Cell
class Clusterer:
"""
"""
def __init__(self,
hosts: list, charts: list, after: int, before: int, diff: bool = False, norm: bool = True,
smooth_n: int = 5, smooth_func: str = 'mean', n_clusters: int = 10, min_n: int = 3,
max_n: int = 100, min_qs: float = 0.5):
self.hosts = hosts
self.charts = charts
self.after = after
self.before = before
self.diff = diff
self.norm = norm
self.smooth_n = smooth_n
self.smooth_func = smooth_func
self.n_clusters = n_clusters
self.min_n = min_n
self.max_n = max_n
self.min_qs = min_qs
self.cluster_quality_dict = {}
def get_data(self):
"""
"""
self.df = get_data(self.hosts, self.charts, after=self.after, before=self.before, user=None, pwd=None)
# remove duplicate columns that we might get from get_data()
self.df = self.df.loc[:,~self.df.columns.duplicated()]
# drop any empty columns
self.df = self.df.dropna(axis=1, how='all')
# forward fill and backward fill to try remove any N/A values
self.df = self.df.ffill().bfill()
def preprocess_data(self):
"""
"""
if self.diff:
self.df = self.df.diff()
if self.smooth_n > 0:
if self.smooth_func == 'mean':
self.df = self.df.rolling(self.smooth_n).mean().dropna(how='all')
elif self.smooth_func == 'max':
self.df = self.df.rolling(self.smooth_n).max().dropna(how='all')
elif self.smooth_func == 'min':
self.df = self.df.rolling(self.smooth_n).min().dropna(how='all')
elif self.smooth_func == 'sum':
self.df = self.df.rolling(self.smooth_n).sum().dropna(how='all')
elif self.smooth_func == 'median':
self.df = self.df.rolling(self.smooth_n).median().dropna(how='all')
else:
self.df = self.df.rolling(self.smooth_n).mean().dropna(how='all')
if self.norm:
self.df = (self.df-self.df.min())/(self.df.max()-self.df.min())
self.df = self.df.dropna(axis=1, how='all')
self.df = self.df.set_index(pd.to_datetime(self.df.index, unit='s'))
def cluster_data(self):
"""
"""
self.model = TimeSeriesKMeans(
n_clusters=self.n_clusters, metric="euclidean", max_iter=10, n_init=2
).fit(self.df.transpose().values)
self.df_cluster = pd.DataFrame(list(zip(self.df.columns, self.model.labels_)), columns=['metric', 'cluster'])
self.cluster_metrics_dict = self.df_cluster.groupby(['cluster'])['metric'].apply(lambda x: [x for x in x]).to_dict()
self.cluster_len_dict = self.df_cluster['cluster'].value_counts().to_dict()
def generate_quality_scores(self):
"""
"""
for cluster in self.model.labels_:
self.x_corr = self.df[self.cluster_metrics_dict[cluster]].corr().abs().values
self.x_corr_mean = round(self.x_corr[np.triu_indices(self.x_corr.shape[0],1)].mean(),2)
self.cluster_quality_dict[cluster] = self.x_corr_mean
def generate_df_cluster_meta(self):
"""
"""
self.df_cluster_meta = pd.DataFrame.from_dict(self.cluster_len_dict, orient='index', columns=['n'])
self.df_cluster_meta.index.names = ['cluster']
self.df_cluster_meta['quality_score'] = self.df_cluster_meta.index.map(self.cluster_quality_dict).fillna(0)
self.df_cluster_meta = self.df_cluster_meta.sort_values('quality_score', ascending=False)
self.df_cluster_meta['valid'] = np.where(self.df_cluster_meta['n'] < self.min_n, 0, 1)
self.df_cluster_meta['valid'] = np.where(self.df_cluster_meta['n'] > self.max_n, 0, self.df_cluster_meta['valid'])
self.df_cluster_meta['valid'] = np.where(self.df_cluster_meta['quality_score'] < self.min_qs, 0, self.df_cluster_meta['valid'])
def generate_df_cluster_centers(self):
"""
"""
self.df_cluster_centers = pd.DataFrame(
data = self.model.cluster_centers_.reshape(
self.model.cluster_centers_.shape[0],
self.model.cluster_centers_.shape[1]
)
).transpose()
self.df_cluster_centers.index = self.df.index
def run_all(self):
"""
"""
self.get_data()
self.preprocess_data()
self.cluster_data()
self.generate_quality_scores()
self.generate_df_cluster_meta()
self.generate_df_cluster_centers()
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import os
from posts import new_posts
def crear_carpeta(carpeta):
if not os.path.exists(carpeta):
os.makedirs(carpeta)
def generar_datos_post(plantilla,post):
return (plantilla.replace("*t",post["titulo"])
.replace("*d",post["resumen"])
.replace("*e",post["enlace"])
.replace("*c",post["contenido"])
)
def grabar_fichero(carpeta, nombre, datos):
if carpeta != "":
crear_carpeta(carpeta)
with open(os.path.join(carpeta,nombre), "w") as g:
g.write(datos)
g.close()
def leer_archivo(carpeta,nombre):
with open(os.path.join(carpeta,nombre), "r") as f:
data = f.read()
f.close()
return data
def lista(new_posts):
a = '<li><a href="'
b = '">'
c = '</a></li>\n\t'
x = ""
for post in new_posts:
x += a + post['enlace'] + b + post['titulo'] + c
return x
def genera_entry_rss(post, fecha):
from plantillas import plantilla_item_rss
return (plantilla_item_rss.replace("*t",post['titulo'])
.replace("*d", post['resumen'])
.replace("*e", post['enlace'])
.replace("*f", fecha)
.replace("*c", post['contenido'])
.replace("*y", post['categoria'])
)
def actualiza_fecha_rss(fecha, plantilla_rss):
import re
insercion = "<updated>" + fecha + "</updated>\n"
expresion_regular = re.compile("<updated>\S*</updated>")
return expresion_regular.sub(insercion, plantilla_rss, count=1)
def genera_new_posts(posts):
from plantillas import plantilla_posts
for post in posts:
datos = generar_datos_post(plantilla_posts,post)
grabar_fichero(post["enlace"], "index.html", datos)
def genera_index(new_posts):
index = leer_archivo("", 'index.html')
lista_new_posts = lista(new_posts)
punto_insercion = "<!--insert-->"
lista_new_posts = punto_insercion + "\n\t" + lista_new_posts
datos = index.replace(punto_insercion, lista_new_posts)
grabar_fichero("", "index.html", datos)
def genera_rss(new_posts):
from datetime import datetime
punto_insercion = "<!--entries-->"
datos = punto_insercion + "\n"
fecha = str(datetime.today()).replace(" ","T")
for post in new_posts:
datos += genera_entry_rss(post, fecha) + "\n"
rss = actualiza_fecha_rss(fecha, leer_archivo("", "feed.xml"))
grabar_fichero("", "feed.xml", rss.replace(punto_insercion,datos))
genera_new_posts(new_posts)
genera_index(new_posts)
genera_rss(new_posts)
|
root = []
root[:] = [root, root, None]
print(root)
print(id(root))
print(id(root[0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0][0]))
# Y = [[1,2,3],
# [4,5,6]]
#
# # Unpacking Arguments *Y
# abc = zip(*Y)
#
# for x in abc:
# print(type(x))
#
# print( 1+2+3+4+5 + 6 )
class A:
pass
abc = A()
print(type(abc))
print(type(A))
print(type(type))
# a = 0
# id_a = id(a)
# variables = {**locals(), **globals()}
# for var in variables:
# exec('var_id=id(%s)'%var)
# if var_id == id_a:
# exec('the_variable=%s'%var)
# print(the_variable)
# print(id(the_variable))
print("==================================")
print('' or 1 and 2)
print("==================================") |
import json
from functools import partial
import time
try:
from queue import Full, Empty
except ImportError:
from Queue import Full, Empty
from collections import namedtuple
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.httpclient import AsyncHTTPClient
from tornado.escape import json_decode
from .rest_proxy import request_for_batch, ERROR_CODES, RETRIABLE_ERROR_CODES
from .message import Message
from .events import FlushReason, DropReason
from .custom_logging import getLogger
logger = getLogger('kafka_rest.producer')
class AsyncProducer(object):
def __init__(self, client):
self.client = client
self.flush_timers = {}
self.retry_timer = None
self.inflight_requests = {}
self.http_client = AsyncHTTPClient(io_loop=self.client.io_loop,
max_clients=self.client.http_max_clients)
def _schedule_retry_periodically(self):
logger.trace('Scheduling retry queue processing every {0} seconds'.format(self.client.retry_period_seconds))
self.retry_timer = PeriodicCallback(self._start_retries,
self.client.retry_period_seconds * 1000)
self.retry_timer.start()
def _message_batches_from_queue(self, queue):
current_time = time.time()
current_batch = []
while not queue.empty():
try:
message = queue.get_nowait()
except Empty:
break
# If this is the retry queue, stop gathering if the first prioritized
# item in the queue isn't due for retry yet. If this is the first-send
# queue, this shouldn't ever trigger because retry_after_time is 0
if message.retry_after_time > current_time:
queue.put_nowait(message)
break
current_batch.append(message)
if len(current_batch) >= self.client.flush_max_batch_size:
yield current_batch
current_batch = []
if current_batch:
yield current_batch
def _start_retries(self):
"""Go through all the retry queues and schedule produce callbacks
for all messages that are due to be retried."""
if self.client.response_5xx_circuit_breaker.tripped:
logger.trace('Transport circuit breaker is tripped, skipping retry pass')
self.client.registrar.emit('circuit_breaker.retries')
return
logger.trace('Checking retry queues for events to retry')
for topic, retry_queue in self.client.retry_queues.items():
for batch in self._message_batches_from_queue(retry_queue):
logger.trace('Retrying batch of size {0} for topic {1}'.format(len(batch), topic))
self.client.registrar.emit('retry_batch', topic, batch)
IOLoop.current().add_callback(self._send_batch_produce_request, topic, batch)
def _reset_flush_timer(self, topic):
if topic in self.flush_timers:
logger.trace('Clearing flush timer for topic {0}'.format(topic))
IOLoop.current().remove_timeout(self.flush_timers[topic])
logger.trace('Scheduled new flush timer for topic {0} in {1} seconds'.format(topic,
self.client.flush_time_threshold_seconds))
handle = IOLoop.current().call_later(self.client.flush_time_threshold_seconds,
self._flush_topic, topic, FlushReason.TIME)
self.flush_timers[topic] = handle
def _send_batch_produce_request(self, topic, batch):
if self.client.in_shutdown:
connect_timeout = self.client.shutdown_timeout_seconds
request_timeout = self.client.shutdown_timeout_seconds
else:
connect_timeout = self.client.connect_timeout_seconds
request_timeout = self.client.request_timeout_seconds
request = request_for_batch(self.client.host, self.client.port,
connect_timeout, request_timeout,
self.client.schema_cache, topic, batch)
logger.info('Sending {0} events to topic {1}'.format(len(batch), topic))
self.client.registrar.emit('send_request', topic, batch)
self.inflight_requests[request._id] = request
self.http_client.fetch(request,
callback=partial(self._handle_produce_response, topic),
raise_error=False)
def _queue_message_for_retry(self, topic, message):
if message.can_retry(self.client):
new_message = message.for_retry(self.client)
try:
self.client.retry_queues[topic].put_nowait(new_message)
except Full:
logger.critical('Retry queue full for topic {0}, message {1} cannot be retried'.format(topic, message))
self.client.registrar.emit('drop_message', topic, message, DropReason.RETRY_QUEUE_FULL)
else:
logger.trace('Queued failed message {0} for retry in topic {1}'.format(new_message, topic))
self.client.registrar.emit('retry_message', topic, new_message)
else:
logger.critical('Dropping failed message {0} for topic {1}, has exceeded maximum retries'.format(message, topic))
self.client.registrar.emit('drop_message', topic, message, DropReason.MAX_RETRIES_EXCEEDED)
def _handle_produce_success(self, topic, response, response_body):
# Store schema IDs if we haven't already
if self.client.schema_cache[topic].get('value-id') is None:
logger.debug('Storing value schema ID of {0} for topic {1}'.format(response_body['value_schema_id'], topic))
self.client.schema_cache[topic]['value-id'] = response_body['value_schema_id']
if response_body.get('key_schema_id') and self.client.schema_cache[topic].get('key-id') is None:
logger.debug('Storing key schema ID of {0} for topic {1}'.format(response_body['key_schema_id'], topic))
self.client.schema_cache[topic]['key-id'] = response_body['key_schema_id']
# Individual requests could still have failed, need to check
# each response object's error code
succeeded, failed = [], []
for idx, offset in enumerate(response_body['offsets']):
message = response.request._batch[idx]
if offset.get('error_code') is None:
succeeded.append((message, offset))
elif offset.get('error_code') in RETRIABLE_ERROR_CODES:
failed.append((message, offset))
self._queue_message_for_retry(topic, message)
else:
failed.append((message, offset))
logger.critical('Got non-retriable error code ({0}: {1}) for message {2}'.format(offset.get('error_code'),
offset.get('message'),
response.request._batch[idx]))
self.client.registrar.emit('drop_message', topic, message, DropReason.NONRETRIABLE)
logger.info('Successful produce response for topic {0}. Succeeded: {1} Failed: {2}'.format(topic,
len(succeeded),
len(failed)))
logger.trace('Failed messages with offsets: {0}'.format(failed))
self.client.registrar.emit('produce_success', topic, succeeded, failed)
def _handle_produce_response(self, topic, response):
del self.inflight_requests[response.request._id]
if response.code != 599:
try:
response_body = json_decode(response.body)
except Exception:
# The proxy should always respond to us in JSON but it's possible
# something like a load balancer or reverse proxy could return
# a response to us we are not expecting.
logger.error('Got unexpected non-JSON body in response, will attempt to retry')
self.client.registrar.emit('response_malformed', topic, response)
self.client.response_5xx_circuit_breaker.record_failure()
for message in response.request._batch:
self._queue_message_for_retry(topic, message)
return
else:
error_code, error_message = response_body.get('error_code'), response_body.get('message')
if response.code not in (200, 599):
logger.error('Received {0} response ({1}: {2}) submitting batch to topic {3}'.format(response.code,
error_code,
error_message,
topic))
if response.code >= 500:
self.client.response_5xx_circuit_breaker.record_failure()
self.client.registrar.emit('response_5xx', topic, response)
else:
self.client.response_5xx_circuit_breaker.reset()
if response.code == 200:
self._handle_produce_success(topic, response, response_body)
else: # We failed somehow, more information in the error code
if response.code == 599 or error_code in RETRIABLE_ERROR_CODES:
for message in response.request._batch:
self._queue_message_for_retry(topic, message)
else: # Non-retriable failure of entire request
for message in response.request._batch:
self.client.registrar.emit('drop_message', topic, message, DropReason.NONRETRIABLE)
def _flush_topic(self, topic, reason):
if self.client.response_5xx_circuit_breaker.tripped:
logger.trace('Transport circuit breaker is tripped, skipping flush topic')
self.client.registrar.emit('circuit_breaker.flush_topic', topic, reason)
else:
logger.trace('Flushing topic {0} (reason: {1})'.format(topic, reason))
self.client.registrar.emit('flush_topic', topic, reason)
queue = self.client.message_queues[topic]
for batch in self._message_batches_from_queue(queue):
IOLoop.current().add_callback(self._send_batch_produce_request, topic, batch)
if not self.client.in_shutdown:
self._reset_flush_timer(topic)
def evaluate_queue(self, topic, queue):
if queue.qsize() >= self.client.flush_length_threshold:
self._flush_topic(topic, FlushReason.LENGTH)
elif topic not in self.flush_timers:
self._reset_flush_timer(topic)
def start_shutdown(self):
"""Prevent the producer from firing off any additional requests
as a result of timers, then schedule the remainder of the shutdown
tasks to take place after giving in-flight requests some time
to return."""
# We need to take manual control of the event loop now, so
# we stop the timers in order to not fight against them
self.retry_timer.stop()
for topic in self.flush_timers:
logger.debug('Shutdown: removing flush timer for topic {0}'.format(topic))
IOLoop.current().remove_timeout(self.flush_timers[topic])
# Last-ditch send attempts on remaining messages. These will use
# shorter shutdown timeouts on the request in order to finish
# by the time we invoke _finish_shutdown
self.client.response_5xx_circuit_breaker.reset()
IOLoop.current().add_callback(self._start_retries)
for topic, queue in self.client.message_queues.items():
if not queue.empty():
IOLoop.current().add_callback(self._flush_topic, topic, FlushReason.SHUTDOWN)
logger.debug('Shutdown: waiting {0} seconds for in-flight requests to return'.format(self.client.shutdown_timeout_seconds))
# We issue this step in a separate callback to get around a small timing issue
# with sending out all these requests before shutdown. If you imagine that the
# _flush_topic calls above take 0.1 seconds each to complete, if we simply
# registered this call here before any of those calls did their 0.1 seconds
# of work, we would actually invoke _finish_shutdown before the last request
# made had the full length of time allotted to it to finish its request.
# Additionally, we add a buffer second to the timeout to make sure the request
# timeouts get into Tornado's IOLoop before the shutdown request.
IOLoop.current().add_callback(lambda: IOLoop.current().call_later(self.client.shutdown_timeout_seconds + 1,
self._finish_shutdown))
def _finish_shutdown(self):
# Anything not sent at this point is not going to make it out. We
# fire off a specialized event in this case to give the
# application code a chance to do something with this data all
# at once.
self.client.registrar.emit('shutdown',
self.client.message_queues,
self.client.retry_queues,
self.inflight_requests)
IOLoop.current().stop()
logger.debug('Shutdown: producer issued stop command to IOLoop')
|
from .evaluation import EvalSection
from django.db import models
class Polygraphic(EvalSection):
eval_data = models.OneToOneField(
'drf_schemas.Item',
null=True,
on_delete=models.CASCADE,
related_name='polygraphic_eval'
)
|
#
# * This library is free software; you can redistribute it and/or
# * modify it under the terms of the GNU Lesser General Public
# * License as published by the Free Software Foundation; either
# * version 2.1 of the License, or (at your option) any later version.
# *
# * This library is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# * Lesser General Public License for more details.
#
#propka3.0, revision 182 2011-08-09
#-------------------------------------------------------------------------------------------------------
#-- --
#-- PROPKA: A PROTEIN PKA PREDICTOR --
#-- --
#-- VERSION 3.0, 01/01/2011, COPENHAGEN --
#-- BY MATS H.M. OLSSON AND CHRESTEN R. SONDERGARD --
#-- --
#-------------------------------------------------------------------------------------------------------
#
#
#-------------------------------------------------------------------------------------------------------
# References:
#
# Very Fast Empirical Prediction and Rationalization of Protein pKa Values
# Hui Li, Andrew D. Robertson and Jan H. Jensen
# PROTEINS: Structure, Function, and Bioinformatics 61:704-721 (2005)
#
# Very Fast Prediction and Rationalization of pKa Values for Protein-Ligand Complexes
# Delphine C. Bas, David M. Rogers and Jan H. Jensen
# PROTEINS: Structure, Function, and Bioinformatics 73:765-783 (2008)
#
# PROPKA3: Consistent Treatment of Internal and Surface Residues in Empirical pKa predictions
# Mats H.M. Olsson, Chresten R. Sondergard, Michal Rostkowski, and Jan H. Jensen
# Journal of Chemical Theory and Computation, 7, 525-537 (2011)
#-------------------------------------------------------------------------------------------------------
from lib import pka_print
def resName2Type(resName=None):
"""
definition of which parameter-group each residues belongs to
"""
resType = {'C- ': "COO",
'ASP': "COO",
'GLU': "COO",
'HIS': "HIS",
'CYS': "CYS",
'TYR': "TYR",
'LYS': "LYS",
'ARG': "ARG",
'N+ ': "N+ ",
'SER': "ROH",
'THR': "ROH",
'ASN': "AMD",
'GLN': "AMD",
'TRP': "TRP"}
if resName in resType:
return resType[resName]
else:
return resType
def getQs(resName=None):
"""
Returns a dictionary with residue charges
"""
Q = {'COO': -1.0,
'ASP': -1.0,
'GLU': -1.0,
'C- ': -1.0,
'TYR': -1.0,
'CYS': -1.0,
'HIS': 1.0,
'LYS': 1.0,
'ARG': 1.0,
'N+ ': 1.0}
if resName in Q:
return Q[resName]
else:
return Q
def pKa_mod(resName=None):
"""
returns a dictionary with model/water pKa values
"""
pKa_mod = {'C- ': 3.20,
'ASP': 3.80,
'GLU': 4.50,
'HIS': 6.50,
'CYS': 9.00,
'TYR': 10.00,
'LYS': 10.50,
'ARG': 12.50,
'N+ ': 8.00}
if resName == None:
return pKa_mod
elif resName in pKa_mod:
return pKa_mod[resName]
else:
# generic value for 'uninteresting' residues, e.g. ASN, GLN
return 20.00
def getInteraction():
"""
matrix for propka interactions; Note that only the LOWER part of the matrix is used!
'N' non-iterative interaction
'I' iterative interaction
'-' no interaction
"""
# COO CYS TYR HIS N+ LYS ARG
side_chain = {'COO': ["I", "I", "N", "N", "N", "N", "N"],
'CYS': ["I", "I", "N", "I", "N", "N", "N"],
'TYR': ["N", "N", "I", "I", "I", "I", "N"],
'HIS': ["I", "I", "I", "I", "N", "N", "N"],
'N+ ': ["N", "N", "I", "N", "I", "N", "N"],
'LYS': ["N", "N", "I", "N", "N", "I", "N"],
'ARG': ["N", "N", "N", "N", "N", "N", "I"],
'ROH': ["N", "N", "N", "-", "-", "-", "-"],
'AMD': ["N", "N", "N", "N", "-", "-", "-"],
'TRP': ["N", "N", "N", "-", "-", "-", "-"]}
return side_chain
# ------- Coulomb parameters --------- #
def getCoulombParameters(label=None):
"""
storage of Coulomb default parameters
"""
CoulombParameters = {}
CoulombParameters['Linear'] = {'cutoff': [4.0, 7.0],
'max_dpka': 2.40,
'scaled': True,
}
CoulombParameters['Coulomb'] = {'cutoff': [4.0, 10.0],
'diel': 80.00,
'scaled': True,
}
if label in CoulombParameters:
return CoulombParameters[label]
else:
return CoulombParameters
# ------- Desolvation parameters --------- #
def getDesolvationParameters(label=None):
"""
storage of desolvation default parameters
"""
DesolvationParameters = {}
DesolvationParameters['propka2'] = {'allowance': 400.00,
'prefactor': -0.01,
'local': -0.07,
'radii': getLocalRadii(),
}
DesolvationParameters['ContactModel'] = {'allowance': 400.00,
'prefactor': -0.01,
'local': -0.07,
'radii': getLocalRadii(),
}
DesolvationParameters['VolumeModel'] = {'allowance': 0.00,
'prefactor': -13.50,
'surface': 0.25,
'volume': getVanDerWaalsVolumes(),
}
DesolvationParameters['ScaledVolumeModel'] = {'allowance': 0.00,
'prefactor': -13.50,
'surface': 0.00,
'volume': getVanDerWaalsVolumes(),
}
if label in DesolvationParameters:
return DesolvationParameters[label]
else:
return DesolvationParameters
def getVanDerWaalsVolumes():
"""
storing relative Van der Waals volumes for volume desolvation models
"""
# relative tabulated
VanDerWaalsVolume = {'C': 1.40, # 20.58 all 'C' and 'CA' atoms
'C4': 2.64, # 38.79 hydrodphobic carbon atoms + unidentified atoms
'N': 1.06, # 15.60 all nitrogen atoms
'O': 1.00, # 14.71 all oxygen atoms
'S': 1.66, # 24.43 all sulphur atoms
}
return VanDerWaalsVolume
def getLocalRadii():
"""
local radii used in the 'propka2' and 'contact' desolvation models
"""
local_radius = {'ASP': 4.5,
'GLU': 4.5,
'HIS': 4.5,
'CYS': 3.5,
'TYR': 3.5,
'LYS': 4.5,
'ARG': 5.0,
'C- ': 4.5,
'N+ ': 4.5}
return local_radius
# ------- hydrogen-bond parameters --------- #
def getHydrogenBondParameters(type=None):
"""
definitions of default back-bone or side-chain interaction parameters
IMPORTANT: parameters with assigned to 'None' are given by the reverse
(e.g. CYS-COO is given by COO-CYS) generated at the end.
"""
if type == "back-bone":
# --- old 'propka1' back-bone parameter set ---
# parameters determining the interaction with back-bone NH or CO groups
parameters = {"COO": [-1.20, [2.00, 3.50]],
"CYS": [-2.40, [3.50, 4.50]],
"TYR": [-1.20, [3.50, 4.50]],
"HIS": [ 1.20, [2.00, 3.50]],
"N+ ": [ 1.20, [2.00, 3.50]],
"LYS": [ 1.20, [2.00, 3.50]],
"ARG": [ 1.20, [2.00, 3.50]]}
elif type == "side-chain":
# --- old 'propka1' side-chain parameter set ---
# parameters determining the interaction with side-chain NH or CO groups
# IMPORTANT: parameters with assigned to 'None' are given by the reverse
# (e.g. CYS-COO is given by COO-CYS) generated at the end.
parameters = {}
parameters["COO"] = {"COO": [-0.80, [ 2.50, 3.50]],
"CYS": [-0.80, [ 3.00, 4.00]],
"TYR": [-0.80, [ 3.00, 4.00]],
"HIS": [-0.80, [ 2.00, 3.00]],
"N+ ": [-1.20, [ 3.00, 4.50]],
"LYS": [-0.80, [ 3.00, 4.00]],
"ARG": [-0.80, [ 2.00, 4.00]],
"ROH": [-0.80, [ 3.00, 4.00]],
"AMD": [-0.80, [ 2.00, 3.00]],
"TRP": [-0.80, [ 2.00, 3.00]]}
parameters["CYS"] = {"COO": None,
"CYS": [-1.60, [ 3.00, 5.00]],
"TYR": [-0.80, [ 3.50, 4.50]],
"HIS": [-1.60, [ 3.00, 4.00]],
"N+ ": [-2.40, [ 3.00, 4.50]],
"LYS": [-1.60, [ 3.00, 4.00]],
"ARG": [-1.60, [ 2.50, 4.00]],
"ROH": [-1.60, [ 3.50, 4.50]],
"AMD": [-1.60, [ 2.50, 3.50]],
"TRP": [-1.60, [ 2.50, 3.50]]}
parameters["TYR"] = {"COO": None,
"CYS": None,
"TYR": [ 0.80, [ 3.50, 4.50]],
"HIS": [-0.80, [ 2.00, 3.00]],
"N+ ": [-1.20, [ 3.00, 4.50]],
"LYS": [-0.80, [ 3.00, 4.00]],
"ARG": [-0.80, [ 2.50, 4.00]],
"ROH": [-0.80, [ 3.50, 4.50]],
"AMD": [-0.80, [ 2.50, 3.50]],
"TRP": [-0.80, [ 2.50, 3.50]]}
parameters["HIS"] = {"COO": None,
"CYS": None,
"TYR": None,
"HIS": [ 0.00, [ 0.00, 0.00]],
"N+ ": [ 0.00, [ 0.00, 0.00]],
"LYS": [ 0.00, [ 0.00, 0.00]],
"ARG": [ 0.00, [ 0.00, 0.00]],
"ROH": [ 0.00, [ 0.00, 0.00]],
"AMD": [ 0.80, [ 2.00, 3.00]],
"TRP": [ 0.00, [ 0.00, 0.00]]}
parameters["N+ "] = {"COO": None,
"CYS": None,
"TYR": None,
"HIS": None,
"N+ ": [ 0.00, [ 0.00, 0.00]],
"LYS": [ 0.00, [ 0.00, 0.00]],
"ARG": [ 0.00, [ 0.00, 0.00]],
"ROH": [ 0.00, [ 0.00, 0.00]],
"AMD": [ 0.00, [ 0.00, 0.00]],
"TRP": [ 0.00, [ 0.00, 0.00]]}
parameters["LYS"] = {"COO": None,
"CYS": None,
"TYR": None,
"HIS": None,
"N+ ": None,
"LYS": [ 0.00, [ 0.00, 0.00]],
"ARG": [ 0.00, [ 0.00, 0.00]],
"ROH": [ 0.00, [ 0.00, 0.00]],
"AMD": [ 0.00, [ 0.00, 0.00]],
"TRP": [ 0.00, [ 0.00, 0.00]]}
parameters["ARG"] = {"COO": None,
"CYS": None,
"TYR": None,
"HIS": None,
"N+ ": None,
"LYS": None,
"ARG": [ 0.00, [ 0.00, 0.00]],
"ROH": [ 0.00, [ 0.00, 0.00]],
"AMD": [ 0.00, [ 0.00, 0.00]],
"TRP": [ 0.00, [ 0.00, 0.00]]}
parameters["ROH"] = {"COO": None,
"CYS": None,
"TYR": None,
"HIS": None,
"N+ ": None,
"LYS": None,
"ARG": None,
"ROH": [ 0.00, [ 0.00, 0.00]],
"AMD": [ 0.00, [ 0.00, 0.00]],
"TRP": [ 0.00, [ 0.00, 0.00]]}
parameters["AMD"] = {"COO": None,
"CYS": None,
"TYR": None,
"HIS": None,
"N+ ": None,
"LYS": None,
"ARG": None,
"ROH": None,
"AMD": [ 0.00, [ 0.00, 0.00]],
"TRP": [ 0.00, [ 0.00, 0.00]]}
parameters["TRP"] = {"COO": None,
"CYS": None,
"TYR": None,
"HIS": None,
"N+ ": None,
"LYS": None,
"ARG": None,
"ROH": None,
"AMD": None,
"TRP": [ 0.00, [ 0.00, 0.00]]}
# updating side-chain parameter matrix to full matrix
keys = parameters.keys()
for key1 in keys:
for key2 in keys:
if key2 not in parameters[key1]:
parameters[key1][key2] == [ 0.00, [ 0.00, 0.00]]
elif parameters[key1][key2] == None:
parameters[key1][key2] = parameters[key2][key1]
else:
pka_print("cannot determine what type of hydrogen-bonding interactions you want type=\"%s\" ['back-bone', 'side-chain']" % (label))
sys.exit(9)
return parameters
|
# -*- coding: utf-8 -*-
import re
from cms.models import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.utils.plugins import downcast_plugins
from tests.models import DummyLink, DummySpacer, DummyText
from djangocms_translations.utils import get_text_field_child_label
@plugin_pool.register_plugin
class DummyTextPlugin(CMSPluginBase):
render_plugin = False
model = DummyText
@staticmethod
def get_translation_export_content(field, plugin_data):
content = plugin_data[field]
subplugins_within_this_content = []
regex = re.compile(r'.*?<cms-plugin id="(?P<pk>\d+)"></cms-plugin>.*?')
for subplugin in CMSPlugin.objects.filter(id__in=regex.findall(content)):
subplugins_within_this_content.append(subplugin.id)
subplugin = list(downcast_plugins([subplugin]))[0]
field = get_text_field_child_label(subplugin.plugin_type)
if field:
to = r'<cms-plugin id="{}">{}</cms-plugin>'.format(subplugin.id, getattr(subplugin, field))
content = re.sub(r'<cms-plugin id="{}"></cms-plugin>'.format(subplugin.id), to, content)
empty_plugin_ids = re.findall(r'<cms-plugin id="(\d+)"></cms-plugin>', content)
for empty_plugin_id in empty_plugin_ids:
if int(empty_plugin_id) not in subplugins_within_this_content:
content = content.replace(r'<cms-plugin id="{}"></cms-plugin>'.format(empty_plugin_id), '')
return (content, subplugins_within_this_content)
@staticmethod
def set_translation_import_content(content, plugin):
regex = re.compile(r'.*?<cms-plugin id="(?P<pk>\d+)">(?P<content>.*?)</cms-plugin>.*?')
subplugin_data = regex.findall(content)
return {
int(subplugin_id): subplugin_content
for subplugin_id, subplugin_content in subplugin_data
if CMSPlugin.objects.filter(id=subplugin_id).exists()
}
@plugin_pool.register_plugin
class DummyText2Plugin(CMSPluginBase):
render_plugin = False
model = DummyText
@staticmethod
def get_translation_export_content(field, plugin_data):
return ('super dummy overwritten content', [])
@staticmethod
def set_translation_import_content(content, plugin):
return {42: 'because I want this to be id=42'}
@plugin_pool.register_plugin
class DummyText3Plugin(CMSPluginBase):
render_plugin = False
model = DummyText
@plugin_pool.register_plugin
class DummyLinkPlugin(CMSPluginBase):
render_plugin = False
model = DummyLink
@plugin_pool.register_plugin
class DummySpacerPlugin(CMSPluginBase):
render_plugin = False
model = DummySpacer
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert Kiba and Davis datasets into npz file which can be trained directly.
Note that the dataset split is inherited from GraphDTA and DeepDTA
"""
import os
import sys
import json
import random
import pickle
import argparse
import numpy as np
from rdkit import Chem
from collections import OrderedDict
from pahelix.utils.compound_tools import smiles_to_graph_data
from pahelix.utils.protein_tools import ProteinTokenizer
from utils import save_data_list_to_npz
def main(args):
tokenizer = ProteinTokenizer()
for dataset in ['davis', 'kiba']:
data_dir = os.path.join(args.dataset_root, dataset)
if not os.path.exists(data_dir):
print('Cannot find {}'.format(data_dir))
continue
train_fold = json.load(
open(os.path.join(data_dir, 'folds', 'train_fold_setting1.txt')))
train_fold = [ee for e in train_fold for ee in e] # flatten
test_fold = json.load(
open(os.path.join(data_dir, 'folds', 'test_fold_setting1.txt')))
ligands = json.load(
open(os.path.join(data_dir, 'ligands_can.txt')),
object_pairs_hook=OrderedDict)
proteins = json.load(
open(os.path.join(data_dir, 'proteins.txt')),
object_pairs_hook=OrderedDict)
affinity = pickle.load(
open(os.path.join(data_dir, 'Y'), 'rb'), encoding='latin1')
smiles_lst, protein_lst = [], []
for k in ligands.keys():
smiles = Chem.MolToSmiles(Chem.MolFromSmiles(ligands[k]),
isomericSmiles=True)
smiles_lst.append(smiles)
for k in proteins.keys():
protein_lst.append(proteins[k])
if dataset == 'davis':
# Kd data
affinity = [-np.log10(y / 1e9) for y in affinity]
affinity = np.asarray(affinity)
os.makedirs(os.path.join(data_dir, 'processed'), exist_ok=True)
for split in ['train', 'test']:
print('processing {} set of {}'.format(split, dataset))
split_dir = os.path.join(data_dir, 'processed', split)
os.makedirs(split_dir, exist_ok=True)
fold = train_fold if split == 'train' else test_fold
rows, cols = np.where(np.isnan(affinity) == False)
rows, cols = rows[fold], cols[fold]
data_lst = [[] for _ in range(args.npz_files)]
for idx in range(len(rows)):
mol_graph = smiles_to_graph_data(smiles_lst[rows[idx]])
data = {k: v for k, v in mol_graph.items()}
seqs = []
for seq in protein_lst[cols[idx]].split('\x01'):
seqs.extend(tokenizer.gen_token_ids(seq))
data['protein_token_ids'] = np.array(seqs)
af = affinity[rows[idx], cols[idx]]
if dataset == 'davis':
data['Log10_Kd'] = np.array([af])
elif dataset == 'kiba':
data['KIBA'] = np.array([af])
data_lst[idx % args.npz_files].append(data)
random.shuffle(data_lst)
for j, sub_data_lst in enumerate(data_lst):
random.shuffle(sub_data_lst)
npz = os.path.join(
split_dir, '{}_{}_{}.npz'.format(dataset, split, j))
save_data_list_to_npz(sub_data_lst, npz)
print('==============================')
print('dataset:', dataset)
print('train_fold:', len(train_fold))
print('test_fold:', len(test_fold))
print('unique drugs:', len(set(smiles_lst)))
print('unique proteins:', len(set(protein_lst)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', type=str, default=None, required=True)
parser.add_argument('--npz_files', type=int, default=1) # set it > 1 for multi trainers
args = parser.parse_args()
main(args)
|
import os
from datetime import datetime
from collections import OrderedDict
#-------------------------------------------------------------------------------
class SpecfitParser:
"""Parse and modify parameter files for the SpecFit task"""
def __init__(self, filename, ext=-1):
self.filename = filename
self.n_components = 0
self.components = OrderedDict()
self.loadfile(ext)
if len(self) != self.n_components:
raise ValueError("{} components found, file says {}".format(len(self),
self.n_components))
def __str__(self):
message = "Specfit database file {}\n".format(self.filename)
message += '\n'.join(['-' + str(item) for item in self.components])
return message
def __iter__(self):
for item in self.components.itervalues():
yield item
def __getitem__(self, i):
for num, item in enumerate(self.components.itervalues()):
if num == i:
return item
raise ValueError("No component {} found".format(i))
def __len__(self):
return len(self.components)
def loadfile(self, ext):
data = open(self.filename, 'r').readlines()
if not len(data):
raise ValueError('No lines in input datafile')
extensions = [i for i, line in enumerate(data) if line.startswith('begin')]
n_ext = len(extensions)
if ext > n_ext:
raise ValueError("Not enough extensions")
pos = extensions[ext]
for i, line in enumerate(data):
if i < pos:
continue
line = line.strip()
if line.startswith('components'):
self.n_components = int(line.split()[1])
self.read_components(data, i + self.n_components + 1)
return
raise ValueError('No components found')
def plot(self, plotfile):
#data = ascii.read(plotfile,
# header_start=None,
# data_start=2)
raise NotImplementedError("Nope, I can't do this yet")
def fixall(self):
"""Fix all parameters"""
for comp in self:
for par in comp:
par.fix()
def find_num(self, i):
"""Get the numbered component"""
for item in self.components.itervalues():
if int(item.number) == i:
return item
raise ValueError("No component {} found".format(i))
def read_components(self, data, start):
components_read = 0
while components_read < self.n_components:
comp_name, n_pars = data[start].strip().split()[0:2]
n_pars = int(n_pars)
if comp_name in self.components:
raise KeyError('Component defined twice!')
self.components[comp_name] = SpecfitComponent(data[start:start + n_pars + 1])
components_read += 1
start += n_pars + 1
if components_read < self.n_components:
raise ValueError("Not enough parameters supplied")
def write(self, outname, clobber=True, mode='w'):
if os.path.exists(outname) and not clobber:
raise IOError("Will not overrite {} while clobber={}".format(outname,
clobber))
out_path, out_filename = os.path.split(outname)
if not out_filename.startswith('sf'):
print "adding 'sf' to db name"
outname = os.path.join(out_path, 'sf'+out_filename)
now = datetime.now()
with open(outname, mode) as out_file:
#-- Not sure if these are needed, keeping for now
out_file.write('# {}\n'.format(now.ctime()))
out_file.write('begin {}\n'.format(out_filename.lstrip('sf')))
out_file.write(' task specfit\n')
#-- These are needed
out_file.write('components {}\n'.format(self.n_components))
for item in self.components.itervalues():
out_file.write(' {}\n'.format(item.type))
for item in self.components.itervalues():
out_file.write('{:>19} {} # {}\n'.format(item.name,
item.n_pars,
item.label))
for par in item.parameters.itervalues():
out_file.write('{:>33}{:>14}{:>14}{:>14}{:>10}{:>10d}\n'.format(par.value,
par.lower_lim,
par.upper_lim,
par.stepsize,
par.tolerance,
par.linkage))
#-------------------------------------------------------------------------------
class SpecfitComponent:
"""Parse and modify an individual compenent of a SpecFit paramter file"""
_par_names = {'linear': ['flux', 'slope'],
'powerlaw': ['flux', 'index'],
'bpl': ['flux', 'break', 'index_above', 'index_below'],
'blackbody': ['flux', 'temperature'],
'gaussian': ['flux', 'centroid', 'fwhm', 'skew'],
'logarith': ['flux', 'centroid', 'fwhm', 'slew'],
'labsorp': ['eq_width', 'centroid', 'fwhm'],
'tauabs': ['depth', 'centroid', 'fwhm'],
'eabsorp': ['depth', 'wavelength'],
'recomb': ['flux', 'wavelength', 'temperature', 'fwhm'],
'extcor': ['e(v-b)'],
'usercont': ['norm', 'shift', 'redshift', 'key'],
'userline': ['norm', 'shift', 'redshift', 'key'],
'userabs': ['norm', 'shift', 'redshift', 'key'],
'lorentz': ['flux', 'centroid', 'fwhm', 'alpha'],
'dampabs': ['density', 'centroid', 'lifetime'],
'logabs': ['depth', 'centroid', 'fwhm'],
'ffree': ['norm', 'temperature'],
'extdrude': ['e(b-v)', 'w0', 'gamma', 'c1', 'c2', 'c3', 'c4'],
'disk': ['flux', 'beta', 'temperature'],
'ccmext': ['e(b-v)', 'rv']}
def __init__(self, lines):
self.name, self.n_pars = lines[0].strip().split()[0:2]
self.n_pars = int(self.n_pars)
self.label = ' '.join(lines[0].strip().split()[3:]) or ''
#-- Strip out numbers to get type of model fit
#-- ALL numbers will be stripped, but they should all be at the end
self.type = ''.join([item for item in self.name if not item.isdigit()])
self.number = int(''.join([item for item in self.name if item.isdigit()]))
if self.n_pars != len(lines[1:]):
message = "Incorrect number of parameters specified for"
message += "{}, got {} instead of {}".format(self.type,
len(lines[1:]),
self.n_pars)
raise ValueError(message)
self.parameters = OrderedDict()
for key, line in zip(self._par_names[self.type], lines[1:]):
self.parameters[key] = SpecfitParameter(line)
def __len__(self):
return len(self.parameters)
def __str__(self):
output = "component: {} #{}\n".format(self.type, self.number)
output += '\n'.join(['--' + str(item) for item in self.parameters])
return output
def __iter__(self):
for item in self.parameters.itervalues():
yield item
def __getitem__(self, i):
for num, item in enumerate(self.parameters.itervalues()):
if num == i:
return item
raise ValueError("No parameter {} found".format(i))
#-------------------------------------------------------------------------------
class SpecfitParameter:
"""Parse and modify a parameter of a compenent"""
def __init__(self, line):
values = map(float, line.strip().split())
if len(values) != 6:
raise ValueError('Parameter can only have 6 values!')
self.value = values[0]
self.lower_lim = values[1]
self.upper_lim = values[2]
self.stepsize = values[3]
self.tolerance = values[4]
self.linkage = int(values[5])
def __str__(self):
return "parameter: {} {} {} {} {} {}".format(self.value,
self.lower_lim,
self.upper_lim,
self.stepsize,
self.tolerance,
self.linkage)
def free(self):
if self.linkage <= 0:
self.linkage = 0
def fix(self):
if self.linkage <= 0:
self.linkage = -1
@property
def hit_boundary(self):
if (self.value <= self.lower_lim) or (self.value >= self.upper_lim):
return True
else:
return False
#-------------------------------------------------------------------------------
|
# Generated by Django 3.1.8 on 2021-04-13 07:09
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("store", "0002_auto_20210413_0706"),
]
operations = [
migrations.AlterField(
model_name="defectivereturn",
name="price",
field=models.DecimalField(
decimal_places=2,
default=Decimal("0.00"),
max_digits=5,
validators=[django.core.validators.MinValueValidator(Decimal("0.00"))],
),
),
migrations.AlterField(
model_name="defectivereturn",
name="quantity",
field=models.IntegerField(
default=1, validators=[django.core.validators.MinValueValidator(1)]
),
),
migrations.AlterField(
model_name="inventoryadjustment",
name="price",
field=models.DecimalField(
decimal_places=2,
default=Decimal("0.00"),
max_digits=5,
validators=[django.core.validators.MinValueValidator(Decimal("0.00"))],
),
),
migrations.AlterField(
model_name="inventoryadjustment",
name="quantity",
field=models.IntegerField(
default=1, validators=[django.core.validators.MinValueValidator(1)]
),
),
migrations.AlterField(
model_name="purchase",
name="price",
field=models.DecimalField(
decimal_places=2,
default=Decimal("0.00"),
max_digits=5,
validators=[django.core.validators.MinValueValidator(Decimal("0.00"))],
),
),
migrations.AlterField(
model_name="purchase",
name="quantity",
field=models.IntegerField(
default=1, validators=[django.core.validators.MinValueValidator(1)]
),
),
migrations.AlterField(
model_name="rentrequest",
name="penalty_fee",
field=models.DecimalField(
decimal_places=2,
default=Decimal("0.00"),
max_digits=5,
validators=[django.core.validators.MinValueValidator(Decimal("0.00"))],
),
),
migrations.AlterField(
model_name="rentrequest",
name="price",
field=models.DecimalField(
decimal_places=2,
default=Decimal("0.00"),
max_digits=5,
validators=[django.core.validators.MinValueValidator(Decimal("0.00"))],
),
),
migrations.AlterField(
model_name="rentrequest",
name="quantity",
field=models.IntegerField(
default=1, validators=[django.core.validators.MinValueValidator(1)]
),
),
migrations.AlterField(
model_name="rentreturn",
name="price",
field=models.DecimalField(
decimal_places=2,
default=Decimal("0.00"),
max_digits=5,
validators=[django.core.validators.MinValueValidator(Decimal("0.00"))],
),
),
migrations.AlterField(
model_name="rentreturn",
name="quantity",
field=models.IntegerField(
default=1, validators=[django.core.validators.MinValueValidator(1)]
),
),
migrations.AlterField(
model_name="sale",
name="price",
field=models.DecimalField(
decimal_places=2,
default=Decimal("0.00"),
max_digits=5,
validators=[django.core.validators.MinValueValidator(Decimal("0.00"))],
),
),
migrations.AlterField(
model_name="sale",
name="quantity",
field=models.IntegerField(
default=1, validators=[django.core.validators.MinValueValidator(1)]
),
),
]
|
from __future__ import absolute_import
from .exceptions import CartesianMotionError
from .analytical_inverse_kinematics import AnalyticalInverseKinematics
from .analytical_plan_cartesian_motion import AnalyticalPlanCartesianMotion
from .solvers import (
OffsetWristKinematics,
SphericalWristKinematics,
UR3Kinematics,
UR3eKinematics,
UR5Kinematics,
UR5eKinematics,
UR10Kinematics,
UR10eKinematics,
Staubli_TX260LKinematics,
ABB_IRB4600_40_255Kinematics,
)
__all__ = [
# exceptions
'CartesianMotionError',
# clients / backend features
'AnalyticalInverseKinematics',
'AnalyticalPlanCartesianMotion',
# solvers
'OffsetWristKinematics',
'SphericalWristKinematics',
'UR3Kinematics',
'UR3eKinematics',
'UR5Kinematics',
'UR5eKinematics',
'UR10Kinematics',
'UR10eKinematics',
'Staubli_TX260LKinematics',
'ABB_IRB4600_40_255Kinematics',
]
|
import pixellib
from pixellib.tune_bg import alter_bg
change_bg = alter_bg()
change_bg.load_pascalvoc_model(
"deeplabv3_xception_tf_dim_ordering_tf_kernels.h5")
change_bg.color_bg(
"img/sample.jpg", colors=(0, 128, 0), output_image_name="colored_bg.jpg"
)
|
"""
Protocol buffer writer in Python
author: Atsushi Sakai
"""
import addressbook_pb2
def main():
print("start!!")
address_book = addressbook_pb2.AddressBook()
person1 = address_book.people.add()
person1.id = 1234
person1.name = "John Doe"
person1.email = "jdoe@example.com"
# person1.no_such_field = 1 # raises AttributeError
# person1.id = "1234" # raises TypeError
phone = person1.phones.add()
phone.number = "555-4321"
phone.type = addressbook_pb2.Person.HOME
person2 = address_book.people.add()
person2.id = 4321
person2.name = "Tom Ranger"
person2.email = "tranger@example.com"
phone = person2.phones.add()
phone.number = "555-4322"
phone.type = addressbook_pb2.Person.WORK
print(address_book) # Human readable print
# writing the data
f = open("pbdata_py.dat", "wb")
f.write(address_book.SerializeToString())
f.close()
print("done!!")
if __name__ == '__main__':
main()
|
import time
import unittest
from paprika import sleep_after, sleep_before, repeat
class UtilityTestCases(unittest.TestCase):
# We cannot be too sure that sleeping exceeds the time, so give it 10ms leeway
SLEEP_TOLERANCE = 0.01
def test_sleep_after(self):
start = time.perf_counter()
@sleep_after(duration=2)
def f():
self.assertLess(time.perf_counter() - start, 0.5)
f()
self.assertGreaterEqual(time.perf_counter() - start, 2.0 - self.SLEEP_TOLERANCE)
def test_sleep_before(self):
start = time.perf_counter()
@sleep_before(duration=2)
def f():
self.assertGreaterEqual(time.perf_counter() - start, 1.5)
f()
def test_repeat(self):
cnt = [0]
@repeat(n=5)
def f(counter):
counter[0] += 1
f(cnt)
self.assertEqual(cnt, [5])
|
class Query:
"""Range Query representation
Query have some QueryConditions
"""
def __init__(self, conditions):
"""
Args:
conditions (QueryCondition[]): list of QueryCondition
"""
self.conditions = conditions
def show(self):
"""Print query information.
Examples:
>>> Query([ QueryCondition(0, 2, 3), QueryCondition(1, 0, 0) ]).show()
[(0, 2, 3), (1, 0, 0)]
"""
print([(condition.attribute, condition.start, condition.end) for condition in self.conditions])
class QueryCondition:
"""Range Query condition
Attributes:
attribute (int): dimension or attribute
start (int): start of range at self.attribute
end (int): end of range at self.attribute
"""
def __init__(self, attribute, start, end):
self.attribute = attribute
self.start = start
self.end = end |
from metagraph import translator, dtypes
from metagraph.plugins import has_pandas, has_scipy
import numpy as np
from .. import has_cudf
from metagraph.plugins.numpy.types import NumpyNodeSet, NumpyNodeMap, NumpyVector
from metagraph.plugins.python.types import PythonNodeSet, PythonNodeMap, dtype_casting
if has_cudf:
import cudf
import cupy
from .types import (
CuDFVector,
CuDFNodeSet,
CuDFNodeMap,
CuDFEdgeSet,
CuDFEdgeMap,
)
@translator
def cudf_nodemap_to_nodeset(x: CuDFNodeMap, **props) -> CuDFNodeSet:
return CuDFNodeSet(x.value.index.to_series())
@translator
def cudf_edgemap_to_edgeset(x: CuDFEdgeMap, **props) -> CuDFEdgeSet:
data = x.value[[x.src_label, x.dst_label]].copy()
return CuDFEdgeSet(data, x.src_label, x.dst_label, is_directed=x.is_directed)
@translator
def translate_nodes_cudfnodemap2pythonnodemap(
x: CuDFNodeMap, **props
) -> PythonNodeMap:
cast = dtype_casting[dtypes.dtypes_simplified[x.value[x.value_label].dtype]]
data = {
i.item(): cast(x.value.loc[i.item()].loc[x.value_label])
for i in x.value.index.values
}
return PythonNodeMap(data)
@translator
def translate_nodes_pythonnodemap2cudfnodemap(
x: PythonNodeMap, **props
) -> CuDFNodeMap:
keys, values = zip(*x.value.items())
# TODO consider special casing the situation when all the keys form a compact range
data = cudf.DataFrame({"value": values}, index=keys)
return CuDFNodeMap(data, "value")
@translator
def translate_nodes_cudfnodeset2pythonnodeset(
x: CuDFNodeSet, **props
) -> PythonNodeSet:
return PythonNodeSet(set(x.value.index.to_pandas()))
@translator
def translate_nodes_pythonnodeset2cudfnodeset(
x: PythonNodeSet, **props
) -> CuDFNodeSet:
return CuDFNodeSet(cudf.Series(x.value))
@translator
def translate_nodes_numpyvector2cudfvector(x: NumpyVector, **props) -> CuDFVector:
if x.mask is not None:
data = x.value[x.mask]
series = cudf.Series(data, index=np.flatnonzero(x.mask))
else:
data = x.value
series = cudf.Series(data)
return CuDFVector(series)
@translator
def translate_vector_cudfvector2numpyvector(x: CuDFVector, **props) -> NumpyVector:
is_dense = CuDFVector.Type.compute_abstract_properties(x, {"is_dense"})[
"is_dense"
]
if is_dense:
np_vector = cupy.asnumpy(x.value.sort_index().values)
mask = None
else:
series = x.value.sort_index()
positions = series.index.to_array()
np_vector = np.empty(len(x), dtype=series.dtype)
np_vector[positions] = cupy.asnumpy(series.values)
mask = np.zeros(len(x), dtype=bool)
mask[positions] = True
return NumpyVector(np_vector, mask=mask)
@translator
def translate_nodes_numpynodemap2cudfnodemap(
x: NumpyNodeMap, **props
) -> CuDFNodeMap:
if x.mask is not None:
keys = np.flatnonzero(x.value)
np_values = x.value[mask]
# TODO make CuDFNodeMap store a Series instead of DataFrame to avoid making 2 copies here
df = cudf.Series(np_values, index=keys).to_frame("value")
elif x.pos2id is not None:
# TODO make CuDFNodeMap store a Series instead of DataFrame to avoid making 2 copies here
df = cudf.DataFrame({"value": x.value}, index=x.pos2id)
else:
df = cudf.DataFrame({"value": x.value})
return CuDFNodeMap(df, "value")
@translator
def translate_nodes_cudfnodemap2numpynodemap(
x: CuDFNodeMap, **props
) -> NumpyNodeMap:
if isinstance(x.value.index, cudf.core.index.RangeIndex):
x_index_min = x.value.index.start
x_index_max = x.value.index.stop - 1
else:
x_index_min = x.value.index.min()
x_index_max = x.value.index.max()
x_density = (x_index_max + 1 - x_index_min) / (x_index_max + 1)
if x_density == 1.0:
data = np.empty(len(x.value), dtype=x.value[x.value_label].dtype)
data[cupy.asnumpy(x.value.index.values)] = cupy.asnumpy(
x.value[x.value_label].values
)
mask = None
node_ids = None
elif x_density > 0.5: # TODO consider moving this threshold out to a global
data = np.empty(x_index_max + 1, dtype=x.value[x.value_label].dtype)
position_selector = cupy.asnumpy(x.value.index.values)
data[position_selector] = cupy.asnumpy(x.value[x.value_label].values)
mask = np.zeros(x_index_max + 1, dtype=bool)
mask[position_selector] = True
node_ids = None
else:
# O(n log n) sort, but n is small since not dense
df_index_sorted = x.value.sort_index()
data = cupy.asnumpy(df_index_sorted[x.value_label].values)
node_ids = dict(map(reversed, enumerate(df_index_sorted.index.values_host)))
mask = None
return NumpyNodeMap(data, mask=mask, node_ids=node_ids)
@translator
def translate_nodes_numpynodeset2cudfnodeset(
x: NumpyNodeSet, **props
) -> CuDFNodeSet:
data = cudf.Series(x.nodes())
return CuDFNodeSet(data)
@translator
def translate_nodes_cudfnodeset2numpynodeset(
x: CuDFNodeSet, **props
) -> NumpyNodeSet:
if isinstance(x.value.index, cudf.core.index.RangeIndex):
x_index_min = x.value.index.start
x_index_max = x.value.index.stop - 1
else:
x_index_min = x.value.index.min()
x_index_max = x.value.index.max()
x_density = (x_index_max + 1 - x_index_min) / (x_index_max + 1)
node_positions = cupy.asnumpy(x.value.index.values)
if x_density > 0.5: # TODO consider moving this threshold out to a global
mask = np.zeros(x_index_max + 1, dtype=bool)
mask[node_positions] = True
node_ids = None
else:
node_ids = node_positions
mask = None
return NumpyNodeSet(node_ids=node_ids, mask=mask)
if has_cudf and has_pandas:
from metagraph.plugins.pandas.types import PandasEdgeSet, PandasEdgeMap
@translator
def translate_edgeset_pdedgeset2cudfedgeset(
x: PandasEdgeSet, **props
) -> CuDFEdgeSet:
df = cudf.from_pandas(x.value[[x.src_label, x.dst_label]])
return CuDFEdgeSet(
df, src_label=x.src_label, dst_label=x.dst_label, is_directed=x.is_directed
)
@translator
def translate_edgemap_pdedgemap2cudfedgemap(
x: PandasEdgeMap, **props
) -> CuDFEdgeMap:
df = cudf.from_pandas(x.value[[x.src_label, x.dst_label, x.weight_label]])
return CuDFEdgeMap(
df,
src_label=x.src_label,
dst_label=x.dst_label,
weight_label=x.weight_label,
is_directed=x.is_directed,
)
@translator
def translate_edgeset_cudfedgeset2pdedgeset(
x: CuDFEdgeSet, **props
) -> PandasEdgeSet:
pdf = x.value[[x.src_label, x.dst_label]].to_pandas()
return PandasEdgeSet(
pdf,
src_label=x.src_label,
dst_label=x.dst_label,
is_directed=x.is_directed,
)
@translator
def translate_edgemap_cudfedgemap2pdedgemap(
x: CuDFEdgeMap, **props
) -> PandasEdgeMap:
pdf = x.value[[x.src_label, x.dst_label, x.weight_label]].to_pandas()
return PandasEdgeMap(
pdf,
src_label=x.src_label,
dst_label=x.dst_label,
weight_label=x.weight_label,
is_directed=x.is_directed,
)
if has_cudf and has_scipy:
import scipy.sparse as ss
from metagraph.plugins.scipy.types import ScipyEdgeSet, ScipyEdgeMap, ScipyGraph
@translator
def translate_edgeset_scipyedgeset2cudfedgeset(
x: ScipyEdgeSet, **props
) -> CuDFEdgeSet:
is_directed = ScipyEdgeSet.Type.compute_abstract_properties(x, {"is_directed"})[
"is_directed"
]
coo_matrix = (
x.value.tocoo()
) # TODO consider handling CSR and COO cases separately
get_node_from_pos = lambda index: x.node_list[index]
row_ids = map(get_node_from_pos, coo_matrix.row)
column_ids = map(get_node_from_pos, coo_matrix.col)
rc_pairs = zip(row_ids, column_ids)
if not is_directed:
rc_pairs = filter(lambda pair: pair[0] < pair[1], rc_pairs)
rc_pairs = list(rc_pairs)
df = cudf.DataFrame(rc_pairs, columns=["source", "target"])
return CuDFEdgeSet(df, is_directed=is_directed)
@translator
def translate_edgemap_scipyedgemap2cudfedgemap(
x: ScipyEdgeMap, **props
) -> CuDFEdgeMap:
is_directed = ScipyEdgeMap.Type.compute_abstract_properties(x, {"is_directed"})[
"is_directed"
]
coo_matrix = (
x.value.tocoo()
) # TODO consider handling CSR and COO cases separately
get_node_from_pos = lambda index: x.node_list[index]
row_ids = map(get_node_from_pos, coo_matrix.row)
column_ids = map(get_node_from_pos, coo_matrix.col)
rcw_triples = zip(row_ids, column_ids, coo_matrix.data)
if not is_directed:
rcw_triples = filter(lambda triple: triple[0] < triple[1], rcw_triples)
rcw_triples = list(rcw_triples)
df = cudf.DataFrame(rcw_triples, columns=["source", "target", "weight"])
return CuDFEdgeMap(df, is_directed=is_directed)
@translator
def translate_edgeset_cudfedgeset2scipyedgeset(
x: CuDFEdgeSet, **props
) -> ScipyEdgeSet:
is_directed = x.is_directed
node_list = np.unique(
cupy.asnumpy(x.value[[x.src_label, x.dst_label]].values).ravel("K")
)
node_list.sort()
num_nodes = len(node_list)
id2pos = dict(map(reversed, enumerate(node_list)))
get_id_pos = lambda node_id: id2pos[node_id]
source_positions = list(map(get_id_pos, x.value[x.src_label].values_host))
target_positions = list(map(get_id_pos, x.value[x.dst_label].values_host))
if not is_directed:
source_positions, target_positions = (
source_positions + target_positions,
target_positions + source_positions,
)
source_positions = np.array(source_positions)
target_positions = np.array(target_positions)
matrix = ss.coo_matrix(
(np.ones(len(source_positions)), (source_positions, target_positions)),
shape=(num_nodes, num_nodes),
).tocsr()
return ScipyEdgeSet(matrix, node_list)
@translator
def translate_edgemap_cudfedgemap2scipyedgemap(
x: CuDFEdgeMap, **props
) -> ScipyEdgeMap:
is_directed = x.is_directed
node_list = np.unique(
cupy.asnumpy(x.value[[x.src_label, x.dst_label]].values).ravel("K")
)
node_list.sort()
num_nodes = len(node_list)
id2pos = dict(map(reversed, enumerate(node_list)))
get_id_pos = lambda node_id: id2pos[node_id]
source_positions = list(map(get_id_pos, x.value[x.src_label].values_host))
target_positions = list(map(get_id_pos, x.value[x.dst_label].values_host))
weights = cupy.asnumpy(x.value[x.weight_label].values)
if not is_directed:
source_positions, target_positions = (
source_positions + target_positions,
target_positions + source_positions,
)
weights = np.concatenate([weights, weights])
matrix = ss.coo_matrix(
(weights, (source_positions, target_positions)),
shape=(num_nodes, num_nodes),
).tocsr()
return ScipyEdgeMap(matrix, node_list)
|
import contextlib
import os
import re
import secrets
import shutil
from pathlib import Path
from flask import Flask
from pygluu.kubernetes.gui.extensions import csrf, socketio, gluu_settings
from pygluu.kubernetes.gui.views.main import main_blueprint
from pygluu.kubernetes.gui.views.wizard import wizard_blueprint
from pygluu.kubernetes.gui.views.operation import operation_blueprint
def resolve_secret_key(path):
key = ""
with contextlib.suppress(FileNotFoundError):
with open(path) as f:
key = f.read().strip()
if not key:
key = secrets.token_urlsafe(32)
with open(path, "w") as f:
f.write(key)
return key
def create_app(debug=False):
"""
GUI installer app for gluu cloud native
- set app config
- initialize extensions
- registering blueprints
- generate urls for static files
"""
app = Flask(__name__)
# set app config
cfg = "pygluu.kubernetes.gui.config.ProductionConfig"
app.config.from_object(cfg)
app.config["DEBUG"] = debug
# resolve persistent secret key for production
secret_key_file = Path("./secret-key.txt").resolve()
with contextlib.suppress(FileNotFoundError):
# if running inside container, copy mounted file (if any)
shutil.copy(
Path("./installer-secret-key.txt").resolve(),
secret_key_file,
)
app.config["SECRET_KEY"] = resolve_secret_key(secret_key_file)
# init csrf
csrf.init_app(app)
socketio.init_app(app)
gluu_settings.init_app(app)
# register blueprint
app.register_blueprint(main_blueprint)
app.register_blueprint(wizard_blueprint)
app.register_blueprint(operation_blueprint, url_prefix="/operations")
@app.context_processor
def hash_processor():
def hashed_url(filepath):
directory, filename = filepath.rsplit('/')
name, extension = filename.rsplit(".")
folder = os.path.join(os.path.sep,
app.root_path, 'static', directory)
files = os.listdir(folder)
for f in files:
regex = name + r"\.[a-z0-9]+\." + extension
if re.match(regex, f):
return os.path.join('/static', directory, f)
return os.path.join('/static', filepath)
return dict(hashed_url=hashed_url)
return app
|
"""
Copyright (c) Nikita Moriakov and Jonas Teuwen
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import SimpleITK as sitk
import numpy as np
DICOM_MODALITY_TAG = '0008|0060'
_DICOM_VOI_LUT_FUNCTION = '0028|1056'
_DICOM_WINDOW_CENTER_TAG = '0028|1050'
_DICOM_WINDOW_WIDTH_TAG = '0028|1051'
_DICOM_WINDOW_CENTER_WIDTH_EXPLANATION_TAG = '0028|1055'
_DICOM_FIELD_OF_VIEW_HORIZONTAL_FLIP = '0018|7034'
_DICOM_PATIENT_ORIENTATION = '0020|0020'
_DICOM_LATERALITY = '0020|0060'
_DICOM_IMAGE_LATERALITY = '0020|0062'
_DICOM_PHOTOMETRIC_INTERPRETATION = '0028|0004'
_SITK_INTERPOLATOR_DICT = {
'nearest': sitk.sitkNearestNeighbor,
'linear': sitk.sitkLinear,
'gaussian': sitk.sitkGaussian,
'label_gaussian': sitk.sitkLabelGaussian,
'bspline': sitk.sitkBSpline,
'hamming_sinc': sitk.sitkHammingWindowedSinc,
'cosine_windowed_sinc': sitk.sitkCosineWindowedSinc,
'welch_windowed_sinc': sitk.sitkWelchWindowedSinc,
'lanczos_windowed_sinc': sitk.sitkLanczosWindowedSinc
}
def read_image_as_sitk_image(filename):
"""
Read file as a SimpleITK image trying to parse the error.
Parameters
----------
filename : Path or str
Returns
-------
SimpleITK image.
"""
try:
sitk_image = sitk.ReadImage(str(filename))
except RuntimeError as error:
if 'itk::ERROR' in str(error):
error = str(error).split('itk::ERROR')[-1]
raise RuntimeError(error)
return sitk_image
def read_image(filename, dtype=None, no_metadata=False, **kwargs):
"""Read medical image
Parameters
----------
filename : Path, str
Path to image, can be any SimpleITK supported filename
dtype : dtype
The requested dtype the output should be cast.
no_metadata : bool
Do not output metadata
Returns
-------
Image as ndarray and dictionary with metadata.
"""
filename = pathlib.Path(filename)
if not filename.exists():
raise FileNotFoundError(f'{filename} does not exist.')
new_spacing = kwargs.get('spacing', False)
if new_spacing and np.all(np.asarray(new_spacing) <= 0):
new_spacing = False
metadata = {}
sitk_image = read_image_as_sitk_image(filename)
# TODO: A more elaborate check for dicom can be needed, not necessarly all dicom files have .dcm as extension.
if filename.suffix.lower() == '.dcm' and kwargs.get('dicom_keys', None):
dicom_data = {}
metadata_keys = sitk_image.GetMetaDataKeys()
for v in kwargs['dicom_keys']:
dicom_data[v] = None if v not in metadata_keys else sitk_image.GetMetaData(v).strip()
metadata['dicom_tags'] = dicom_data
orig_shape = sitk.GetArrayFromImage(sitk_image).shape
if new_spacing:
sitk_image, orig_spacing = resample_sitk_image(
sitk_image,
spacing=new_spacing,
interpolator=kwargs.get('interpolator', None),
fill_value=0
)
metadata.update(
{'orig_spacing': tuple(orig_spacing), 'orig_shape': orig_shape})
image = sitk.GetArrayFromImage(sitk_image)
metadata.update({
'filename': filename.resolve(),
'depth': sitk_image.GetDepth(),
'spacing': sitk_image.GetSpacing(),
'origin': sitk_image.GetOrigin(),
'direction': sitk_image.GetDirection()
})
if dtype:
image = image.astype(dtype)
if no_metadata:
return image
return image, metadata
def read_mammogram(filename, dtype=np.int):
"""
Read mammograms in dicom format. Dicom tags which:
- flip images horizontally,
- VOI Lut Function before displaying,
are read and set appropriately.
Parameters
----------
filename : pathlib.Path or str
dtype : dtype
Returns
-------
np.ndarray, dict
"""
extra_tags = [DICOM_MODALITY_TAG, _DICOM_VOI_LUT_FUNCTION,
_DICOM_LATERALITY, _DICOM_IMAGE_LATERALITY,
_DICOM_WINDOW_WIDTH_TAG, _DICOM_WINDOW_CENTER_TAG,
_DICOM_FIELD_OF_VIEW_HORIZONTAL_FLIP, _DICOM_PATIENT_ORIENTATION,
_DICOM_PHOTOMETRIC_INTERPRETATION]
image, metadata = read_image(filename, dicom_keys=extra_tags, dtype=dtype)
dicom_tags = metadata['dicom_tags']
modality = dicom_tags[DICOM_MODALITY_TAG]
if not modality == 'MG':
raise ValueError(f'{filename} is not a mammogram. Wrong Modality in DICOM header.')
if not metadata['depth'] == 1:
raise ValueError(f'First dimension of mammogram should be one.')
# Remove the depth dimension
image = image.reshape(list(image.shape)[1:])
# Sometimes a function, the VOILUTFunction, needs to be applied before displaying the mammogram.
voi_lut_function = dicom_tags[_DICOM_VOI_LUT_FUNCTION] if dicom_tags[_DICOM_VOI_LUT_FUNCTION] else 'LINEAR'
if voi_lut_function == 'LINEAR':
pass
elif voi_lut_function == 'SIGMOID':
# https://dicom.innolitics.com/ciods/nm-image/voi-lut/00281056
image_min = image.min()
image_max = image.max()
window_center = float(dicom_tags[_DICOM_WINDOW_CENTER_TAG])
window_width = float(dicom_tags[_DICOM_WINDOW_WIDTH_TAG])
image = (image_max - image_min) / (1 + np.exp(-4 * (image - window_center) / window_width)) + image_min
if dtype:
image = image.astype(dtype)
else:
raise NotImplementedError(f'VOI LUT Function {voi_lut_function} is not implemented.')
# Photometric Interpretation determines how to read the pixel values and if they should be inverted
photometric_interpretation = dicom_tags[_DICOM_PHOTOMETRIC_INTERPRETATION]
if photometric_interpretation == 'MONOCHROME2':
pass
else:
raise NotImplementedError(f'Photometric Interpretation {photometric_interpretation} is not implemented.')
laterality = dicom_tags[_DICOM_LATERALITY] or dicom_tags[_DICOM_IMAGE_LATERALITY]
metadata['laterality'] = laterality
# Sometimes a horizontal flip is required:
# https://groups.google.com/forum/#!msg/comp.protocols.dicom/X4ddGYiQOzs/g04EDChOQBwJ
needs_horizontal_flip = dicom_tags[_DICOM_FIELD_OF_VIEW_HORIZONTAL_FLIP] == 'YES'
if laterality:
# Check patient position
orientation = dicom_tags[_DICOM_PATIENT_ORIENTATION].split('\\')[0]
if (laterality == 'L' and orientation == 'P') or (laterality == 'R' and orientation == 'A'):
needs_horizontal_flip = True
if needs_horizontal_flip:
image = np.ascontiguousarray(np.fliplr(image))
del metadata['dicom_tags']
del metadata['depth']
del metadata['direction']
del metadata['origin']
metadata['spacing'] = metadata['spacing'][:-1]
return image, metadata
def resample_sitk_image(sitk_image, spacing=None, interpolator=None,
fill_value=0):
"""Resamples an ITK image to a new grid. If no spacing is given,
the resampling is done isotropically to the smallest value in the current
spacing. This is usually the in-plane resolution. If not given, the
interpolation is derived from the input data type. Binary input
(e.g., masks) are resampled with nearest neighbors, otherwise linear
interpolation is chosen.
Parameters
----------
sitk_image : SimpleITK image or str
Either a SimpleITK image or a path to a SimpleITK readable file.
spacing : tuple
Tuple of integers
interpolator : str
Either `nearest`, `linear` or None.
fill_value : int
Returns
-------
SimpleITK image.
"""
if isinstance(sitk_image, str):
sitk_image = sitk.ReadImage(sitk_image)
num_dim = sitk_image.GetDimension()
if not interpolator:
interpolator = 'linear'
pixelid = sitk_image.GetPixelIDValue()
if pixelid not in [1, 2, 4]:
raise NotImplementedError(
'Set `interpolator` manually, '
'can only infer for 8-bit unsigned or 16, 32-bit signed integers')
if pixelid == 1: # 8-bit unsigned int
interpolator = 'nearest'
orig_pixelid = sitk_image.GetPixelIDValue()
orig_origin = sitk_image.GetOrigin()
orig_direction = sitk_image.GetDirection()
orig_spacing = np.array(sitk_image.GetSpacing())
orig_size = np.array(sitk_image.GetSize(), dtype=np.int)
if not spacing:
min_spacing = orig_spacing.min()
new_spacing = [min_spacing]*num_dim
else:
new_spacing = [float(s) if s else orig_spacing[idx] for idx, s in enumerate(spacing)]
assert interpolator in _SITK_INTERPOLATOR_DICT.keys(),\
'`interpolator` should be one of {}'.format(_SITK_INTERPOLATOR_DICT.keys())
sitk_interpolator = _SITK_INTERPOLATOR_DICT[interpolator]
new_size = orig_size*(orig_spacing/new_spacing)
new_size = np.ceil(new_size).astype(np.int) # Image dimensions are in integers
# SimpleITK expects lists
new_size = [int(s) if spacing[idx] else int(orig_size[idx]) for idx, s in enumerate(new_size)]
resample_filter = sitk.ResampleImageFilter()
resampled_sitk_image = resample_filter.Execute(
sitk_image,
new_size,
sitk.Transform(),
sitk_interpolator,
orig_origin,
new_spacing,
orig_direction,
fill_value,
orig_pixelid
)
return resampled_sitk_image, orig_spacing
|
WTF_CSRF_ENABLED = True
SECRET_KEY = 'Bleh'
|
def max_dot_product(a, b):
#write your code here
res = 0
a=sorted(a)
b=sorted(b)
for i in range(len(a)):
res += a[i] * b[i]
return res
n = int(input())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
print(max_dot_product(a, b))
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_evpn_bgp_rr
version_added: "2.4"
short_description: Manages RR for the VXLAN Network on HUAWEI CloudEngine switches.
description:
- Configure an RR in BGP-EVPN address family view on HUAWEI CloudEngine switches.
author: Zhijin Zhou (@CloudEngine-Ansible)
notes:
- Ensure that BGP view is existed.
- The peer, peer_type, and reflect_client arguments must all exist or not exist.
options:
as_number:
description:
- Specifies the number of the AS, in integer format.
The value is an integer that ranges from 1 to 4294967295.
required: true
bgp_instance:
description:
- Specifies the name of a BGP instance.
The value of instance-name can be an integer 1 or a string of 1 to 31.
bgp_evpn_enable:
description:
- Enable or disable the BGP-EVPN address family.
choices: ['enable','disable']
default: 'enable'
peer_type:
description:
- Specify the peer type.
choices: ['group_name','ipv4_address']
peer:
description:
- Specifies the IPv4 address or the group name of a peer.
reflect_client:
description:
- Configure the local device as the route reflector and the peer or peer group as the client of the route reflector.
choices: ['enable','disable']
policy_vpn_target:
description:
- Enable or disable the VPN-Target filtering.
choices: ['enable','disable']
'''
EXAMPLES = '''
- name: BGP RR test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure BGP-EVPN address family view and ensure that BGP view has existed."
ce_evpn_bgp_rr:
as_number: 20
bgp_evpn_enable: enable
provider: "{{ cli }}"
- name: "Configure reflect client and ensure peer has existed."
ce_evpn_bgp_rr:
as_number: 20
peer_type: ipv4_address
peer: 192.8.3.3
reflect_client: enable
provider: "{{ cli }}"
- name: "Configure the VPN-Target filtering."
ce_evpn_bgp_rr:
as_number: 20
policy_vpn_target: enable
provider: "{{ cli }}"
- name: "Configure an RR in BGP-EVPN address family view."
ce_evpn_bgp_rr:
as_number: 20
bgp_evpn_enable: enable
peer_type: ipv4_address
peer: 192.8.3.3
reflect_client: enable
policy_vpn_target: disable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"as_number": "20",
"bgp_evpn_enable": "enable",
"bgp_instance": null,
"peer": "192.8.3.3",
"peer_type": "ipv4_address",
"policy_vpn_target": "disable",
"reflect_client": "enable"
}
existing:
description: k/v pairs of existing attributes on the device
returned: always
type: dict
sample: {
"as_number": "20",
"bgp_evpn_enable": "disable",
"bgp_instance": null,
"peer": null,
"peer_type": null,
"policy_vpn_target": "disable",
"reflect_client": "disable"
}
end_state:
description: k/v pairs of end attributes on the device
returned: always
type: dict
sample: {
"as_number": "20",
"bgp_evpn_enable": "enable",
"bgp_instance": null,
"peer": "192.8.3.3",
"peer_type": "ipv4_address",
"policy_vpn_target": "disable",
"reflect_client": "enable"
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"bgp 20",
" l2vpn-family evpn",
" peer 192.8.3.3 enable",
" peer 192.8.3.3 reflect-client",
" undo policy vpn-target"
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config, ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
class EvpnBgpRr(object):
"""Manange RR in BGP-EVPN address family view"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# RR configuration parameters
self.as_number = self.module.params['as_number']
self.bgp_instance = self.module.params['bgp_instance']
self.peer_type = self.module.params['peer_type']
self.peer = self.module.params['peer']
self.bgp_evpn_enable = self.module.params['bgp_evpn_enable']
self.reflect_client = self.module.params['reflect_client']
self.policy_vpn_target = self.module.params['policy_vpn_target']
self.commands = list()
self.config = None
self.bgp_evpn_config = ""
self.cur_config = dict()
self.conf_exist = False
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def __init_module__(self):
"""Init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""Load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def is_bgp_view_exist(self):
"""Judge whether BGP view has existed"""
if self.bgp_instance:
view_cmd = "bgp %s instance %s" % (
self.as_number, self.bgp_instance)
else:
view_cmd = "bgp %s" % self.as_number
return is_config_exist(self.config, view_cmd)
def is_l2vpn_family_evpn_exist(self):
"""Judge whether BGP-EVPN address family view has existed"""
view_cmd = "l2vpn-family evpn"
return is_config_exist(self.config, view_cmd)
def is_reflect_client_exist(self):
"""Judge whether reflect client is configured"""
view_cmd = "peer %s reflect-client" % self.peer
return is_config_exist(self.bgp_evpn_config, view_cmd)
def is_policy_vpn_target_exist(self):
"""Judge whether the VPN-Target filtering is enabled"""
view_cmd = "undo policy vpn-target"
if is_config_exist(self.bgp_evpn_config, view_cmd):
return False
else:
return True
def get_config_in_bgp_view(self):
"""Get configuration in BGP view"""
flags = list()
exp = " | section include"
if self.as_number:
if self.bgp_instance:
exp += " bgp %s instance %s" % (self.as_number,
self.bgp_instance)
else:
exp += " bgp %s" % self.as_number
flags.append(exp)
config = get_config(self.module, flags)
return config
def get_config_in_bgp_evpn_view(self):
"""Get configuration in BGP_EVPN view"""
self.bgp_evpn_config = ""
if not self.config:
return ""
index = self.config.find("l2vpn-family evpn")
if index == -1:
return ""
return self.config[index:]
def get_current_config(self):
"""Get current configuration"""
if not self.as_number:
self.module.fail_json(msg='Error: The value of as-number cannot be empty.')
self.cur_config['bgp_exist'] = False
self.cur_config['bgp_evpn_enable'] = 'disable'
self.cur_config['reflect_client'] = 'disable'
self.cur_config['policy_vpn_target'] = 'disable'
self.cur_config['peer_type'] = None
self.cur_config['peer'] = None
self.config = self.get_config_in_bgp_view()
if not self.is_bgp_view_exist():
return
self.cur_config['bgp_exist'] = True
if not self.is_l2vpn_family_evpn_exist():
return
self.cur_config['bgp_evpn_enable'] = 'enable'
self.bgp_evpn_config = self.get_config_in_bgp_evpn_view()
if self.is_reflect_client_exist():
self.cur_config['reflect_client'] = 'enable'
self.cur_config['peer_type'] = self.peer_type
self.cur_config['peer'] = self.peer
if self.is_policy_vpn_target_exist():
self.cur_config['policy_vpn_target'] = 'enable'
def get_existing(self):
"""Get existing config"""
self.existing = dict(as_number=self.as_number,
bgp_instance=self.bgp_instance,
peer_type=self.cur_config['peer_type'],
peer=self.cur_config['peer'],
bgp_evpn_enable=self.cur_config[
'bgp_evpn_enable'],
reflect_client=self.cur_config['reflect_client'],
policy_vpn_target=self.cur_config[
'policy_vpn_target'])
def get_proposed(self):
"""Get proposed config"""
self.proposed = dict(as_number=self.as_number,
bgp_instance=self.bgp_instance,
peer_type=self.peer_type,
peer=self.peer,
bgp_evpn_enable=self.bgp_evpn_enable,
reflect_client=self.reflect_client,
policy_vpn_target=self.policy_vpn_target)
def get_end_state(self):
"""Get end config"""
self.get_current_config()
self.end_state = dict(as_number=self.as_number,
bgp_instance=self.bgp_instance,
peer_type=self.cur_config['peer_type'],
peer=self.cur_config['peer'],
bgp_evpn_enable=self.cur_config[
'bgp_evpn_enable'],
reflect_client=self.cur_config['reflect_client'],
policy_vpn_target=self.cur_config['policy_vpn_target'])
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def judge_if_config_exist(self):
"""Judge whether configuration has existed"""
if self.bgp_evpn_enable and self.bgp_evpn_enable != self.cur_config['bgp_evpn_enable']:
return False
if self.bgp_evpn_enable == 'disable' and self.cur_config['bgp_evpn_enable'] == 'disable':
return True
if self.reflect_client and self.reflect_client == 'enable':
if self.peer_type and self.peer_type != self.cur_config['peer_type']:
return False
if self.peer and self.peer != self.cur_config['peer']:
return False
if self.reflect_client and self.reflect_client != self.cur_config['reflect_client']:
return False
if self.policy_vpn_target and self.policy_vpn_target != self.cur_config['policy_vpn_target']:
return False
return True
def cli_add_command(self, command, undo=False):
"""Add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_rr(self):
"""Configure RR"""
if self.conf_exist:
return
if self.bgp_instance:
view_cmd = "bgp %s instance %s" % (
self.as_number, self.bgp_instance)
else:
view_cmd = "bgp %s" % self.as_number
self.cli_add_command(view_cmd)
if self.bgp_evpn_enable == 'disable':
self.cli_add_command(" undo l2vpn-family evpn")
else:
self.cli_add_command(" l2vpn-family evpn")
if self.reflect_client and self.reflect_client != self.cur_config['reflect_client']:
if self.reflect_client == 'enable':
self.cli_add_command(" peer %s enable" % self.peer)
self.cli_add_command(
" peer %s reflect-client" % self.peer)
else:
self.cli_add_command(
" undo peer %s reflect-client" % self.peer)
self.cli_add_command(" undo peer %s enable" % self.peer)
if self.cur_config['bgp_evpn_enable'] == 'enable':
if self.policy_vpn_target and self.policy_vpn_target != self.cur_config['policy_vpn_target']:
if self.policy_vpn_target == 'enable':
self.cli_add_command(" policy vpn-target")
else:
self.cli_add_command(" undo policy vpn-target")
else:
if self.policy_vpn_target and self.policy_vpn_target == 'disable':
self.cli_add_command(" undo policy vpn-target")
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def check_is_ipv4_addr(self):
"""Check ipaddress validate"""
rule1 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.'
rule2 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])'
ipv4_regex = '%s%s%s%s%s%s' % ('^', rule1, rule1, rule1, rule2, '$')
return bool(re.match(ipv4_regex, self.peer))
def check_params(self):
"""Check all input params"""
if self.cur_config['bgp_exist'] == 'false':
self.module.fail_json(msg="Error: BGP view doesnot exist.")
if self.bgp_instance:
if len(self.bgp_instance) < 1 or len(self.bgp_instance) > 31:
self.module.fail_json(
msg="Error: The length of BGP instance-name must be between 1 or a string of 1 to and 31.")
if self.as_number:
if len(self.as_number) > 11 or len(self.as_number) == 0:
self.module.fail_json(
msg='Error: The len of as_number %s is out of [1 - 11].' % self.as_number)
tmp_dict1 = dict(peer_type=self.peer_type,
peer=self.peer,
reflect_client=self.reflect_client)
tmp_dict2 = dict((k, v)
for k, v in tmp_dict1.items() if v is not None)
if len(tmp_dict2) != 0 and len(tmp_dict2) != 3:
self.module.fail_json(
msg='Error: The peer, peer_type, and reflect_client arguments must all exist or not exist.')
if self.peer_type:
if self.peer_type == 'ipv4_address' and not self.check_is_ipv4_addr():
self.module.fail_json(msg='Error: Illegal IPv4 address.')
elif self.peer_type == 'group_name' and self.check_is_ipv4_addr():
self.module.fail_json(
msg='Error: Ip address cannot be configured as group-name.')
def work(self):
"""Excute task"""
self.get_current_config()
self.check_params()
self.get_existing()
self.get_proposed()
self.conf_exist = self.judge_if_config_exist()
self.config_rr()
self.get_end_state()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
as_number=dict(required=True, type='str'),
bgp_instance=dict(required=False, type='str'),
bgp_evpn_enable=dict(required=False, type='str',
default='enable', choices=['enable', 'disable']),
peer_type=dict(required=False, type='str', choices=[
'group_name', 'ipv4_address']),
peer=dict(required=False, type='str'),
reflect_client=dict(required=False, type='str',
choices=['enable', 'disable']),
policy_vpn_target=dict(required=False, choices=['enable', 'disable']),
)
argument_spec.update(ce_argument_spec)
evpn_bgp_rr = EvpnBgpRr(argument_spec)
evpn_bgp_rr.work()
if __name__ == '__main__':
main()
|
"""
Support for getting current pollen levels from Pollenkollen.se
Visit https://pollenkoll.se/pollenprognos/ to find available cities
Visit https://pollenkoll.se/pollenprognos-ostersund/ to find available allergens
Example configuration
sensor:
- platform: pollenniva
scan_interval: 4 (default, optional)
state_as_string: false (default, optional, show states as strings as per STATES below)
sensors:
- city: Stockholm
days_to_track: 3 (0-3, optional)
allergens:
- Gräs
- Hassel
- city: Östersund
allergens:
- Hassel
"""
import logging
import json
from collections import namedtuple
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.components.rest.sensor import RestData
from homeassistant.const import (CONF_NAME)
from dateutil import parser
from datetime import datetime
_LOGGER = logging.getLogger(__name__)
_ENDPOINT = 'https://pollenkoll.se/wp-content/themes/pollenkoll/api/get_all.json'
STATES = {
"i.h.": 0,
"L": 1,
"L-M": 2,
"M": 3,
"M-H": 4,
"H": 5,
"H-H+": 6
}
DEFAULT_NAME = 'Pollennivå'
DEFAULT_INTERVAL = 4
DEFAULT_STATE_AS_STRING = False
DEFAULT_VERIFY_SSL = True
CONF_SENSORS = 'sensors'
CONF_INTERVAL = 'scan_interval'
CONF_STATE_AS_STRING = 'state_as_string'
SENSOR_OPTIONS = {
'city': ('Stad'),
'allergens': ('Allergener'),
'days_to_track': ('Antal dagar framåt (0-3)')
}
SENSOR_ICONS = {
'Al': 'mdi:leaf',
'Alm': 'mdi:leaf',
'Asp': 'mdi:leaf',
'Björk': 'mdi:leaf',
'Ek': 'mdi:leaf',
'Gråbo': 'mdi:flower',
'Gräs': 'mdi:flower',
'Hassel': 'mdi:leaf',
'Sälg': 'mdi:leaf',
'default': 'mdi:leaf'
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_INTERVAL, default=DEFAULT_INTERVAL): cv.string,
vol.Optional(CONF_STATE_AS_STRING, default=DEFAULT_STATE_AS_STRING): cv.boolean,
vol.Required(CONF_SENSORS, default=[]): vol.Optional(cv.ensure_list, [vol.In(SENSOR_OPTIONS)]),
})
SCAN_INTERVAL = timedelta(hours=DEFAULT_INTERVAL)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Pollen sensor."""
name = config.get(CONF_NAME)
sensors = config.get(CONF_SENSORS)
state_as_string = config.get(CONF_STATE_AS_STRING)
method = 'GET'
payload = ''
auth = ''
verify_ssl = DEFAULT_VERIFY_SSL
headers = {}
rest = RestData(method, _ENDPOINT, auth, headers, payload, verify_ssl)
rest.update()
if rest.data is None:
_LOGGER.error("Unable to fetch data from Pollenkollen")
return False
devices = []
for sensor in sensors:
if 'days_to_track' in sensor:
for day in range(int(sensor['days_to_track'])):
for allergen in sensor['allergens']:
devices.append(PollenkollSensor(rest, name, sensor, allergen, state_as_string, day))
else:
for allergen in sensor['allergens']:
devices.append(PollenkollSensor(rest, name, sensor, allergen, state_as_string))
add_devices(devices, True)
# pylint: disable=no-member
class PollenkollSensor(Entity):
"""Representation of a Pollen sensor."""
def __init__(self, rest, name, sensor, allergen, state_as_string, day=0):
"""Initialize a Pollen sensor."""
self._state_as_string = state_as_string
self._rest = rest
self._item = sensor
self._city = sensor['city']
self._state = None
self._day = day
self._allergen = allergen
self._name = "{} {} {} day {}".format(name, self._city, self._allergen, str(self._day))
self._attributes = None
self._result = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._state is not None:
return self._state
return None
@property
def device_state_attributes(self):
"""Return the state attributes of the monitored installation."""
if self._attributes is not None:
return self._attributes
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return ""
@property
def icon(self):
""" Return the icon for the frontend."""
if self._allergen in SENSOR_ICONS:
return SENSOR_ICONS[self._allergen]
return SENSOR_ICONS['default']
def update(self):
"""Get the latest data from the API and updates the state."""
try:
pollen = {}
self._rest.update()
self._result = json.loads(self._rest.data)
self._attributes = {}
for cities in self._result:
for city in cities['CitiesData']:
if city['name'] in self._city:
self._attributes.update({"last_modified": city['date_mod']})
self._attributes.update({"city": city['name']})
pollen = city['pollen']
for allergen in pollen:
if allergen['type'] == self._allergen:
day_value = 'day' + str(self._day) + '_value'
if day_value in allergen:
if self._state_as_string is False and allergen[day_value] in STATES:
value = STATES[allergen[day_value]]
else:
value = allergen[day_value]
self._state = value
self._attributes.update({"allergen": allergen['type']})
self._attributes.update({"level": allergen[day_value]})
self._attributes.update({"relative_day": allergen['day' + str(self._day) + '_relative_date']})
self._attributes.update({"day": allergen['day' + str(self._day) + '_name']})
self._attributes.update({"date": allergen['day' + str(self._day) + '_date']})
self._attributes.update({"description": allergen['day' + str(self._day) + '_desc']})
except TypeError as e:
self._result = None
_LOGGER.error(
"Unable to fetch data from Pollenkoll. " + str(e)) |
import os
import argparse
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from Yolo_v2_pytorch.src.anotherMissOh_dataset import AnotherMissOh, Splits, SortFullRect, PersonCLS, PBeHavCLS
from Yolo_v2_pytorch.src.utils import *
from Yolo_v2_pytorch.src.loss import YoloLoss
import shutil
import cv2
import pickle
import numpy as np
from lib.logger import Logger
from lib.person_model import person_model
from lib.pytorch_misc import optimistic_restore, de_chunkize, clip_grad_norm, flatten
from lib.focal_loss import FocalLossWithOneHot, FocalLossWithOutOneHot, CELossWithOutOneHot
from lib.hyper_yolo import anchors
'''
----------------------------------------------
--------sgd learning on 4 gpus----------------
----------------------------------------------
01 epoch : 2.85 %
03 epoch : 4.73 %
09 epoch : 6.45 %
12 epoch : 5.49 %
20 epoch : 7.76 %
----------------------------------------------
'''
def get_args():
parser = argparse.ArgumentParser(
"You Only Look Once:Unified, Real-Time Object Detection")
parser.add_argument("--image_size", type=int,
default=448,
help="The common width and height for all images")
parser.add_argument("--batch_size", type=int, default=1,
help="The number of images per batch")
# Training base Setting
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--decay", type=float, default=0.0005)
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--num_epoches", type=int, default=200)
parser.add_argument("--test_interval", type=int, default=1,
help="Number of epoches between testing phases")
parser.add_argument("--object_scale", type=float, default=1.0)
parser.add_argument("--noobject_scale", type=float, default=0.5)
parser.add_argument("--class_scale", type=float, default=1.0)
parser.add_argument("--coord_scale", type=float, default=5.0)
parser.add_argument("--reduction", type=int, default=32)
parser.add_argument("--es_min_delta", type=float, default=0.0,
help="Early stopping's parameter:minimum change loss to qualify as an improvement")
parser.add_argument("--es_patience", type=int, default=0,
help="Early stopping's parameter:number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.")
parser.add_argument("--pre_trained_model_type",
type=str, choices=["model", "params"],
default="model")
parser.add_argument("--trained_model_path", type=str,
default="./checkpoint/detector") # Pre-training path
parser.add_argument("--saved_path", type=str,
default="./checkpoint/person") # saved training path
parser.add_argument("--conf_threshold", type=float, default=0.35)
parser.add_argument("--nms_threshold", type=float, default=0.5)
parser.add_argument("--img_path", type=str,
default="./data/AnotherMissOh/AnotherMissOh_images_ver3.2/")
parser.add_argument("--json_path", type=str,
default="./data/AnotherMissOh/AnotherMissOh_Visual_ver3.2/")
parser.add_argument("-model", dest='model', type=str, default="baseline")
parser.add_argument("-lr", dest='lr', type=float, default=1e-5)
parser.add_argument("-clip", dest='clip', type=float, default=10.0)
parser.add_argument("-print_interval", dest='print_interval', type=int,
default=100)
parser.add_argument("-b_loss", dest='b_loss', type=str, default='ce')
parser.add_argument("-f_gamma", dest='f_gamma', type=float, default=1.0)
parser.add_argument("-clip_grad", dest='clip_grad',action='store_true')
args = parser.parse_args()
return args
# get args.
opt = get_args()
print(opt)
# splits the episodes int train, val, test
train, val, test = Splits(num_episodes=18)
# load datasets
train_set = AnotherMissOh(train, opt.img_path, opt.json_path, False)
val_set = AnotherMissOh(val, opt.img_path, opt.json_path, False)
test_set = AnotherMissOh(test, opt.img_path, opt.json_path, False)
num_persons = len(PersonCLS)
# logger path
logger_path = 'logs/{}'.format(opt.model)
if os.path.exists(logger_path):
print('exist_{}'.format(logger_path))
else:
os.makedirs(logger_path)
print('mkdir_{}'.format(logger_path))
logger = Logger(logger_path)
def train(opt):
device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
torch.cuda.manual_seed(123)
training_params = {"batch_size": opt.batch_size,
"shuffle": True,
"drop_last": True,
"collate_fn": custom_collate_fn,
"num_workers": 8}
test_params = {"batch_size": opt.batch_size,
"shuffle": False,
"drop_last": False,
"collate_fn": custom_collate_fn}
train_loader = DataLoader(train_set, **training_params)
# define behavior-model
model = person_model(num_persons, device)
if False :
# cause in person_model, loaded the voc pre-trained params
trained_persons = opt.trained_model_path + os.sep + "{}".format(
'anotherMissOh_only_params_person.pth')
ckpt = torch.load(trained_persons)
if optimistic_restore(model.detector, ckpt):
print("loaded pre-trained detector sucessfully.")
# initialization
model.to(device)
# get optim
num_gpus = torch.cuda.device_count()
# yolo detector and person
fc_params = [p for n,p in model.named_parameters()
if n.startswith('person') \
or n.startswith('detector') \
and p.requires_grad]
#p_params = [{'params': fc_params, 'lr': opt.lr * num_gpus}]
p_params = model.parameters()
if False:
p_optimizer = torch.optim.Adam(p_params, lr = opt.lr,
weight_decay=opt.decay, amsgrad=True)
else:
p_optimizer = torch.optim.SGD(p_params, lr = opt.lr,
momentum=opt.momentum, weight_decay=opt.decay)
if False:
p_scheduler = ReduceLROnPlateau(p_optimizer, 'min', patience=2,
factor=0.1, verbose=True,
threshold=0.0001, threshold_mode='abs',
cooldown=1)
learning_rate_schedule = {"0": 1e-5, "5": 1e-4,
"80": 1e-5, "110": 1e-6}
# multi-gpus
if num_gpus > 1:
model = torch.nn.DataParallel(model)
model.to(device)
model.train()
# initialize model
nn.init.normal_(list(model.modules())[-1].weight, 0, 0.01)
criterion = YoloLoss(num_persons, anchors, device, opt.reduction)
num_iter_per_epoch = len(train_loader)
for epoch in range(opt.num_epoches):
if str(epoch) in learning_rate_schedule.keys() and True :
for param_group in p_optimizer.param_groups:
param_group['lr'] = learning_rate_schedule[str(epoch)]
p_loss_list = []
for iter, batch in enumerate(train_loader):
behavior_lr = iter % (1) == 0
verbose=iter % (opt.print_interval*10) == 0
image, info = batch
# sort label info on fullrect
image, label, behavior_label, obj_label, face_label, emo_label = SortFullRect(
image, info, is_train=True)
if np.array(label).size == 0 :
print("iter:{}_person bboxs are empty".format(
iter, label))
continue
# image [b, 3, 448, 448]
if torch.cuda.is_available():
image = torch.cat(image).to(device)
else:
image = torch.cat(image)
p_optimizer.zero_grad()
# logits [b, 125, 14, 14]
logits,_ = model(image)
# losses for person detection
loss, loss_coord, loss_conf, loss_cls = criterion(
logits, label, device)
loss.backward()
p_optimizer.step()
print("Model:{}".format(opt.model))
print("Epoch: {}/{}, Iteration: {}/{}, lr:{:.9f}".format(
epoch + 1, opt.num_epoches,iter + 1,
num_iter_per_epoch, p_optimizer.param_groups[0]['lr']))
print("+loss:{:.2f}(coord:{:.2f},conf:{:.2f},cls:{:.2f})".format(
loss, loss_coord, loss_conf, loss_cls))
print()
loss_dict = {
'total' : loss.item(),
'coord' : loss_coord.item(),
'conf' : loss_conf.item(),
'cls' : loss_cls.item(),
}
if iter % 100 == 0:
p_loss_list.append(loss_cls.item())
# Log scalar values
for tag, value in loss_dict.items():
logger.scalar_summary(tag, value, num_iter_per_epoch * epoch + iter)
print("SAVE MODEL")
if not os.path.exists(opt.saved_path):
os.makedirs(opt.saved_path + os.sep + "{}".format('person'))
print('mkdir_{}'.format(opt.saved_path))
# learning rate schedular
if False:
p_loss_avg = np.stack(p_loss_list).mean()
p_scheduler.step(p_loss_avg)
torch.save(model.state_dict(),
opt.saved_path + os.sep + "anotherMissOh_only_params_{}.pth".format(opt.model))
torch.save(model,
opt.saved_path + os.sep + "anotherMissOh_{}.pth".format(opt.model))
if __name__ == "__main__":
train(opt)
|
"""
Authorization Utilities
"""
from shared.models.user_entities import User
from shared.service.jwt_auth_wrapper import JWTAuthManager
manager = JWTAuthManager(oidc_vault_secret="oidc/rest",
object_creator=lambda claims, assumed_role, user_roles: User(
first_name=claims["given_name"],
last_name=claims["family_name"],
school=assumed_role,
email=claims['email']
))
AUTH_USER = manager.auth_header()
|
import os
from mock import patch, Mock
from unittest import skipUnless
from unittest import skip
from testconfig import config
import logging
logger = logging.getLogger("test")
logger.setLevel(logging.DEBUG)
from tests.integration.core.chroma_integration_testcase import ChromaIntegrationTestCase
class TestHsmCoordinatorControl(ChromaIntegrationTestCase):
def _create_with_params(self, enabled=False):
host_addresses = [h["address"] for h in config["lustre_servers"][:2]]
self.hosts = self.add_hosts(host_addresses)
self.configure_power_control(host_addresses)
# Since the test code seems to rely on this ordering, we should
# check for it right away and blow up if it's not as we expect.
self.assertEqual(
[h["address"] for h in self.hosts],
[config["lustre_servers"][0]["address"], config["lustre_servers"][1]["address"]],
)
volumes = self.wait_for_shared_volumes(4, 2)
mgt_volume = volumes[0]
mdt_volume = volumes[1]
ost_volume = volumes[2]
host_ids = [h["id"] for h in self.hosts]
self.set_volume_mounts(mgt_volume, *host_ids)
self.set_volume_mounts(mdt_volume, *host_ids)
self.set_volume_mounts(ost_volume, *host_ids)
if enabled:
mdt_params = {"mdt.hsm_control": "enabled"}
else:
mdt_params = {"mdt.hsm_control": "disabled"}
self.filesystem_id = self.create_filesystem(
self.hosts,
{
"name": "testfs",
"mgt": {"volume_id": mgt_volume["id"]},
"mdts": [{"volume_id": mdt_volume["id"], "conf_params": mdt_params}],
"osts": [{"volume_id": ost_volume["id"], "conf_params": {}}],
"conf_params": {"llite.max_cached_mb": "16"},
},
)
def _test_params(self):
mds = config["lustre_servers"][0]["address"]
param = "mdt.testfs-MDT0000.hsm_control"
self.wait_until_true(lambda: "{}=enabled".format(param) == self.remote_operations.lctl_get_param(mds, param))
def test_hsm_coordinator_enabled_at_fs_creation(self):
self._create_with_params(enabled=True)
self._test_params()
self.graceful_teardown(self.chroma_manager)
class ManagedCopytoolTestCase(ChromaIntegrationTestCase):
def _create_copytool(self):
test_copytool = dict(
filesystem=self.filesystem["resource_uri"],
host=self.worker["resource_uri"],
bin_path="/usr/sbin/lhsmtool_posix",
archive=1,
mountpoint="/mnt/testfs",
hsm_arguments="-p /tmp",
)
response = self.chroma_manager.post("/api/copytool/", body=test_copytool)
self.assertTrue(response.successful, response.text)
return response.json["copytool"]
def setUp(self):
self.TEST_SERVERS.append(self.config_workers[0])
super(ManagedCopytoolTestCase, self).setUp()
filesystem_id = self.create_filesystem_standard(self.TEST_SERVERS, hsm=True)
self.filesystem = self.get_json_by_uri("/api/filesystem/%s" % filesystem_id)
self.worker = self.add_hosts([self.config_workers[0]["address"]])[0]
self.copytool = self._create_copytool()
class TestHsmCopytoolWorker(ManagedCopytoolTestCase):
def test_worker_remove(self):
# Start the copytool (creates a client mount, etc.)
action = self.wait_for_action(self.copytool, state="started")
self.set_state(self.copytool["resource_uri"], action["state"])
# Now remove the worker with everything started and make sure
# it all gets torn down cleanly.
action = self.wait_for_action(self.worker, state="removed")
command = self.set_state(self.worker["resource_uri"], action["state"], verify_successful=False)
self.assertFalse(command["errored"] or command["cancelled"], command)
class TestHsmCopytoolManagement(ManagedCopytoolTestCase):
def test_copytool_start_stop(self):
action = self.wait_for_action(self.copytool, state="started")
self.set_state(self.copytool["resource_uri"], action["state"])
action = self.wait_for_action(self.copytool, state="stopped")
self.set_state(self.copytool["resource_uri"], action["state"])
def test_copytool_remove(self):
action = self.wait_for_action(self.copytool, state="removed")
self.set_state(self.copytool["resource_uri"], action["state"], verify_successful=False)
self.wait_until_true(lambda: len(self.get_list("/api/copytool/")) == 0)
def test_copytool_force_remove(self):
action = self.wait_for_action(self.copytool, class_name="ForceRemoveCopytoolJob")
self.run_command([action], "Test Force Remove (%s)" % self.copytool["label"])
self.wait_until_true(lambda: len(self.get_list("/api/copytool/")) == 0)
# Use this to neuter the simulated copytool's ability to write
# events into the monitor's fifo.
def patch_fifo(obj):
obj.fifo = Mock()
@skip("Needs implementation for real hardware")
class TestHsmCopytoolEventRelay(ManagedCopytoolTestCase):
def _get_fifo_writer(self, copytool):
fifo_path = os.path.join(
self.COPYTOOL_TESTING_FIFO_ROOT, "%s-%s-events" % (copytool["host"]["address"], copytool["label"])
)
logger.info("Opening %s for write in test harness" % fifo_path)
return open(fifo_path, "w", 1)
def _get_active_operations(self):
return self.get_list("/api/copytool_operation/", {"active": True})
def test_restore_operation(self, *mocks):
action = self.wait_for_action(self.copytool, state="started")
self.set_state(self.copytool["resource_uri"], action["state"])
# Wait until everything is really started
self.wait_for_action(self.copytool, state="stopped")
# Get a handle on the intake side of the pipeline
fifo = self._get_fifo_writer(self.copytool)
# Assert that we're starting with a clean slate (no current ops)
self.assertEqual(len(self._get_active_operations()), 0)
# Write a start event and see if it makes it all the way through
fifo.write(
'{"event_time": "2014-01-31 02:58:19 -0500", "event_type": "RESTORE_START", "total_bytes": 0, "lustre_path": "boot/vmlinuz-2.6.32-431.3.1.el6.x86_64", "source_fid": "0x200000400:0x13:0x0", "data_fid": "0x200000400:0x13:0x0"}\n'
)
self.wait_until_true(lambda: len(self._get_active_operations()))
operation = self._get_active_operations()[0]
# Report some progress, make sure that the active operation reflects
# the update.
fifo.write(
'{"event_time": "2014-01-31 02:58:19 -0500", "event_type": "RESTORE_RUNNING", "current_bytes": 0, "total_bytes": 4128688, "lustre_path": "boot/vmlinuz-2.6.32-431.3.1.el6.x86_64", "source_fid": "0x200000400:0x13:0x0", "data_fid": "0x200000401:0x1:0x0"}\n'
)
self.wait_until_true(lambda: self._get_active_operations()[0]["updated_at"] != operation["updated_at"])
# Finally, make sure that a finish event zeroes out the list of
# active operations.
fifo.write(
'{"event_time": "2014-01-31 02:58:19 -0500", "event_type": "RESTORE_FINISH", "source_fid": "0x200000401:0x1:0x0", "data_fid": "0x200000401:0x1:0x0"}\n'
)
self.wait_until_true(lambda: len(self._get_active_operations()) == 0)
|
# Portion Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved. Contact: Pasi Eronen <pasi.eronen@nokia.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Nokia Corporation and/or its
# subsidiary(-ies) nor the names of its contributors may be used
# to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import itertools
import re
from tempfile import mkstemp
import datetime
from collections import OrderedDict
import debug # pyflakes:ignore
from django import forms
from django.shortcuts import render, redirect, get_object_or_404
from django.template.loader import render_to_string
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.conf import settings
from django.urls import reverse as urlreverse
from django.views.decorators.cache import cache_page
from django.db.models import Q
from ietf.doc.models import State, DocAlias, RelatedDocument
from ietf.doc.utils import get_chartering_type
from ietf.doc.templatetags.ietf_filters import clean_whitespace
from ietf.doc.utils_search import prepare_document_table
from ietf.doc.utils_charter import charter_name_for_group
from ietf.group.models import Group, Role, ChangeStateGroupEvent
from ietf.name.models import GroupTypeName
from ietf.group.utils import (get_charter_text, can_manage_group_type,
milestone_reviewer_for_group_type, can_provide_status_update,
can_manage_materials, get_group_or_404,
construct_group_menu_context, get_group_materials)
from ietf.group.views_edit import roles_for_group_type
from ietf.community.utils import docs_tracked_by_community_list
from ietf.community.models import CommunityList, EmailSubscription
from ietf.utils.pipe import pipe
from ietf.utils.textupload import get_cleaned_text_file_content
from ietf.settings import MAILING_LIST_INFO_URL
from ietf.mailtrigger.utils import gather_relevant_expansions
from ietf.ietfauth.utils import has_role
from ietf.meeting.utils import group_sessions
from ietf.meeting.helpers import get_meeting
def roles(group, role_name):
return Role.objects.filter(group=group, name=role_name).select_related("email", "person")
def fill_in_charter_info(group, include_drafts=False):
group.areadirector = getattr(group.ad_role(),'email',None)
personnel = {}
for r in Role.objects.filter(group=group).order_by('person__name').select_related("email", "person", "name"):
if r.name_id not in personnel:
personnel[r.name_id] = []
personnel[r.name_id].append(r)
if group.parent and group.parent.type_id == "area" and group.ad_role() and "ad" not in personnel:
ad_roles = list(Role.objects.filter(group=group.parent, name="ad", person=group.ad_role().person))
if ad_roles:
personnel["ad"] = ad_roles
group.personnel = []
for role_name_slug, roles in personnel.iteritems():
label = roles[0].name.name
if len(roles) > 1:
if label.endswith("y"):
label = label[:-1] + "ies"
else:
label += "s"
group.personnel.append((role_name_slug, label, roles))
group.personnel.sort(key=lambda t: t[2][0].name.order)
milestone_state = "charter" if group.state_id == "proposed" else "active"
group.milestones = group.groupmilestone_set.filter(state=milestone_state).order_by('due')
if group.charter:
group.charter_text = get_charter_text(group)
else:
group.charter_text = u"Not chartered yet."
def extract_last_name(role):
return role.person.name_parts()[3]
def wg_summary_area(request, group_type):
if group_type != "wg":
raise Http404
areas = Group.objects.filter(type="area", state="active").order_by("name")
for area in areas:
area.ads = sorted(roles(area, "ad"), key=extract_last_name)
area.groups = Group.objects.filter(parent=area, type="wg", state="active").order_by("acronym")
for group in area.groups:
group.chairs = sorted(roles(group, "chair"), key=extract_last_name)
areas = [a for a in areas if a.groups]
return render(request, 'group/1wg-summary.txt',
{ 'areas': areas },
content_type='text/plain; charset=UTF-8')
def wg_summary_acronym(request, group_type):
if group_type != "wg":
raise Http404
areas = Group.objects.filter(type="area", state="active").order_by("name")
groups = Group.objects.filter(type="wg", state="active").order_by("acronym").select_related("parent")
for group in groups:
group.chairs = sorted(roles(group, "chair"), key=extract_last_name)
return render(request, 'group/1wg-summary-by-acronym.txt',
{ 'areas': areas,
'groups': groups },
content_type='text/plain; charset=UTF-8')
def fill_in_wg_roles(group):
def get_roles(slug, default):
for role_slug, label, roles in group.personnel:
if slug == role_slug:
return roles
return default
group.chairs = get_roles("chair", [])
ads = get_roles("ad", [])
group.areadirector = ads[0] if ads else None
group.techadvisors = get_roles("techadv", [])
group.editors = get_roles("editor", [])
group.secretaries = get_roles("secr", [])
def fill_in_wg_drafts(group):
aliases = DocAlias.objects.filter(document__type="draft", document__group=group).select_related('document').order_by("name")
group.drafts = []
group.rfcs = []
for a in aliases:
if a.name.startswith("draft"):
group.drafts.append(a)
else:
group.rfcs.append(a)
a.rel = RelatedDocument.objects.filter(source=a.document,relationship_id__in=['obs','updates']).distinct()
a.invrel = RelatedDocument.objects.filter(target=a,relationship_id__in=['obs','updates']).distinct()
@cache_page ( 60 * 60 )
def wg_charters(request, group_type):
if group_type != "wg":
raise Http404
areas = Group.objects.filter(type="area", state="active").order_by("name")
for area in areas:
area.ads = sorted(roles(area, "ad"), key=extract_last_name)
area.groups = Group.objects.filter(parent=area, type="wg", state="active").order_by("name")
for group in area.groups:
fill_in_charter_info(group)
fill_in_wg_roles(group)
fill_in_wg_drafts(group)
group.area = area
return render(request, 'group/1wg-charters.txt',
{ 'areas': areas },
content_type='text/plain; charset=UTF-8')
@cache_page ( 60 * 60 )
def wg_charters_by_acronym(request, group_type):
if group_type != "wg":
raise Http404
areas = dict((a.id, a) for a in Group.objects.filter(type="area", state="active").order_by("name"))
for area in areas.itervalues():
area.ads = sorted(roles(area, "ad"), key=extract_last_name)
groups = Group.objects.filter(type="wg", state="active").exclude(parent=None).order_by("acronym")
for group in groups:
fill_in_charter_info(group)
fill_in_wg_roles(group)
fill_in_wg_drafts(group)
group.area = areas.get(group.parent_id)
return render(request, 'group/1wg-charters-by-acronym.txt',
{ 'groups': groups },
content_type='text/plain; charset=UTF-8')
def active_groups(request, group_type=None):
if not group_type:
return active_group_types(request)
elif group_type == "wg":
return active_wgs(request)
elif group_type == "rg":
return active_rgs(request)
elif group_type == "ag":
return active_ags(request)
elif group_type == "area":
return active_areas(request)
elif group_type == "team":
return active_teams(request)
elif group_type == "dir":
return active_dirs(request)
elif group_type == "program":
return active_programs(request)
else:
raise Http404
def active_group_types(request):
grouptypes = GroupTypeName.objects.filter(slug__in=['wg','rg','ag','team','dir','area','program'])
return render(request, 'group/active_groups.html', {'grouptypes':grouptypes})
def active_dirs(request):
dirs = Group.objects.filter(type="dir", state="active").order_by("name")
for group in dirs:
group.chairs = sorted(roles(group, "chair"), key=extract_last_name)
group.ads = sorted(roles(group, "ad"), key=extract_last_name)
group.secretaries = sorted(roles(group, "secr"), key=extract_last_name)
return render(request, 'group/active_dirs.html', {'dirs' : dirs })
def active_teams(request):
teams = Group.objects.filter(type="team", state="active").order_by("name")
for group in teams:
group.chairs = sorted(roles(group, "chair"), key=extract_last_name)
return render(request, 'group/active_teams.html', {'teams' : teams })
def active_programs(request):
programs = Group.objects.filter(type="program", state="active").order_by("name")
for group in programs:
group.leads = sorted(roles(group, "lead"), key=extract_last_name)
return render(request, 'group/active_programs.html', {'programs' : programs })
def active_areas(request):
areas = Group.objects.filter(type="area", state="active").order_by("name")
return render(request, 'group/active_areas.html', {'areas': areas })
def active_wgs(request):
areas = Group.objects.filter(type="area", state="active").order_by("name")
for area in areas:
# dig out information for template
area.ads = (list(sorted(roles(area, "ad"), key=extract_last_name))
+ list(sorted(roles(area, "pre-ad"), key=extract_last_name)))
area.groups = Group.objects.filter(parent=area, type="wg", state="active").order_by("acronym")
area.urls = area.groupurl_set.all().order_by("name")
for group in area.groups:
group.chairs = sorted(roles(group, "chair"), key=extract_last_name)
group.ad_out_of_area = group.ad_role() and group.ad_role().person not in [role.person for role in area.ads]
# get the url for mailing list subscription
if group.list_subscribe.startswith('http'):
group.list_subscribe_url = group.list_subscribe
elif group.list_email.endswith('@ietf.org'):
group.list_subscribe_url = MAILING_LIST_INFO_URL % {'list_addr':group.list_email.split('@')[0]}
else:
group.list_subscribe_url = "mailto:"+group.list_subscribe
return render(request, 'group/active_wgs.html', { 'areas':areas })
def active_rgs(request):
irtf = Group.objects.get(acronym="irtf")
irtf.chair = roles(irtf, "chair").first()
groups = Group.objects.filter(type="rg", state="active").order_by("acronym")
for group in groups:
group.chairs = sorted(roles(group, "chair"), key=extract_last_name)
return render(request, 'group/active_rgs.html', { 'irtf': irtf, 'groups': groups })
def active_ags(request):
groups = Group.objects.filter(type="ag", state="active").order_by("acronym")
for group in groups:
group.chairs = sorted(roles(group, "chair"), key=extract_last_name)
group.ads = sorted(roles(group, "ad"), key=extract_last_name)
return render(request, 'group/active_ags.html', { 'groups': groups })
def bofs(request, group_type):
groups = Group.objects.filter(type=group_type, state="bof")
return render(request, 'group/bofs.html',dict(groups=groups))
def chartering_groups(request):
charter_states = State.objects.filter(used=True, type="charter").exclude(slug__in=("approved", "notrev"))
group_types = GroupTypeName.objects.filter(slug__in=("wg", "rg"))
for t in group_types:
t.chartering_groups = Group.objects.filter(type=t, charter__states__in=charter_states).select_related("state", "charter").order_by("acronym")
if t.chartering_groups.exists():
t.can_manage = can_manage_group_type(request.user, t.chartering_groups.first())
else:
t.can_manage = False
for g in t.chartering_groups:
g.chartering_type = get_chartering_type(g.charter)
return render(request, 'group/chartering_groups.html',
dict(charter_states=charter_states,
group_types=group_types))
def concluded_groups(request):
sections = OrderedDict()
sections['WGs'] = Group.objects.filter(type='wg', state="conclude").select_related("state", "charter").order_by("parent__name","acronym")
sections['RGs'] = Group.objects.filter(type='rg', state="conclude").select_related("state", "charter").order_by("parent__name","acronym")
sections['BOFs'] = Group.objects.filter(type='wg', state="bof-conc").select_related("state", "charter").order_by("parent__name","acronym")
for name, groups in sections.items():
# add start/conclusion date
d = dict((g.pk, g) for g in groups)
for g in groups:
g.start_date = g.conclude_date = None
for e in ChangeStateGroupEvent.objects.filter(group__in=groups, state="active").order_by("-time"):
d[e.group_id].start_date = e.time
for e in ChangeStateGroupEvent.objects.filter(group__in=groups, state="conclude").order_by("time"):
d[e.group_id].conclude_date = e.time
return render(request, 'group/concluded_groups.html',
dict(sections=sections))
def prepare_group_documents(request, group, clist):
found_docs, meta = prepare_document_table(request, docs_tracked_by_community_list(clist), request.GET)
docs = []
docs_related = []
# split results
for d in found_docs:
# non-WG drafts and call for WG adoption are considered related
if (d.group != group
or (d.stream_id and d.get_state_slug("draft-stream-%s" % d.stream_id) in ("c-adopt", "wg-cand"))):
d.search_heading = "Related Internet-Draft"
docs_related.append(d)
else:
docs.append(d)
meta_related = meta.copy()
return docs, meta, docs_related, meta_related
def group_home(request, acronym, group_type=None):
group = get_group_or_404(acronym, group_type)
kwargs = dict(acronym=group.acronym)
if group_type:
kwargs["group_type"] = group_type
return HttpResponseRedirect(urlreverse(group.features.default_tab, kwargs=kwargs))
def group_documents(request, acronym, group_type=None):
group = get_group_or_404(acronym, group_type)
if not group.features.has_documents:
raise Http404
clist = get_object_or_404(CommunityList, group=group)
docs, meta, docs_related, meta_related = prepare_group_documents(request, group, clist)
subscribed = request.user.is_authenticated and EmailSubscription.objects.filter(community_list=clist, email__person__user=request.user)
context = construct_group_menu_context(request, group, "documents", group_type, {
'docs': docs,
'meta': meta,
'docs_related': docs_related,
'meta_related': meta_related,
'subscribed': subscribed,
'clist': clist,
})
return render(request, 'group/group_documents.html', context)
def group_documents_txt(request, acronym, group_type=None):
"""Return tabulator-separated rows with documents for group."""
group = get_group_or_404(acronym, group_type)
if not group.features.has_documents:
raise Http404
clist = get_object_or_404(CommunityList, group=group)
docs, meta, docs_related, meta_related = prepare_group_documents(request, group, clist)
for d in docs:
d.prefix = d.get_state().name
for d in docs_related:
d.prefix = u"Related %s" % d.get_state().name
rows = []
for d in itertools.chain(docs, docs_related):
rfc_number = d.rfc_number()
if rfc_number != None:
name = rfc_number
else:
name = "%s-%s" % (d.name, d.rev)
rows.append(u"\t".join((d.prefix, name, clean_whitespace(d.title))))
return HttpResponse(u"\n".join(rows), content_type='text/plain; charset=UTF-8')
def group_about(request, acronym, group_type=None):
group = get_group_or_404(acronym, group_type)
fill_in_charter_info(group)
e = group.latest_event(type__in=("changed_state", "requested_close",))
requested_close = group.state_id != "conclude" and e and e.type == "requested_close"
can_manage = can_manage_group_type(request.user, group)
charter_submit_url = ""
if group.features.has_chartering_process:
charter_submit_url = urlreverse('ietf.doc.views_charter.submit', kwargs={ "name": charter_name_for_group(group) })
can_provide_update = can_provide_status_update(request.user, group)
status_update = group.latest_event(type="status_update")
return render(request, 'group/group_about.html',
construct_group_menu_context(request, group, "about", group_type, {
"milestones_in_review": group.groupmilestone_set.filter(state="review"),
"milestone_reviewer": milestone_reviewer_for_group_type(group_type),
"requested_close": requested_close,
"can_manage": can_manage,
"can_provide_status_update": can_provide_update,
"status_update": status_update,
"charter_submit_url": charter_submit_url,
"editable_roles": roles_for_group_type(group_type),
}))
def all_status(request):
wgs = Group.objects.filter(type='wg',state__in=['active','bof'])
rgs = Group.objects.filter(type='rg',state__in=['active','proposed'])
wg_reports = []
for wg in wgs:
e = wg.latest_event(type='status_update')
if e:
wg_reports.append(e)
wg_reports.sort(key=lambda x: (x.group.parent.acronym,datetime.datetime.now()-x.time))
rg_reports = []
for rg in rgs:
e = rg.latest_event(type='status_update')
if e:
rg_reports.append(e)
return render(request, 'group/all_status.html',
{ 'wg_reports': wg_reports,
'rg_reports': rg_reports,
}
)
def group_about_status(request, acronym, group_type=None):
group = get_group_or_404(acronym, group_type)
status_update = group.latest_event(type='status_update')
can_provide_update = can_provide_status_update(request.user, group)
return render(request, 'group/group_about_status.html',
{ 'group' : group,
'status_update': status_update,
'can_provide_status_update': can_provide_update,
}
)
def group_about_status_meeting(request, acronym, num, group_type=None):
meeting = get_meeting(num)
group = get_group_or_404(acronym, group_type)
status_update = group.status_for_meeting(meeting)
return render(request, 'group/group_about_status_meeting.html',
{ 'group' : group,
'status_update': status_update,
'meeting': meeting,
}
)
class StatusUpdateForm(forms.Form):
content = forms.CharField(widget=forms.Textarea, label='Status update', help_text = 'Edit the status update', required=False, strip=False)
txt = forms.FileField(label='.txt format', help_text='Or upload a .txt file', required=False)
def clean_content(self):
return self.cleaned_data['content'].replace('\r','')
def clean_txt(self):
return get_cleaned_text_file_content(self.cleaned_data["txt"])
def group_about_status_edit(request, acronym, group_type=None):
group = get_group_or_404(acronym, group_type)
if not can_provide_status_update(request.user, group):
raise Http404
old_update = group.latest_event(type='status_update')
login = request.user.person
if request.method == 'POST':
if 'submit_response' in request.POST:
form = StatusUpdateForm(request.POST, request.FILES)
if form.is_valid():
from_file = form.cleaned_data['txt']
if from_file:
update_text = from_file
else:
update_text = form.cleaned_data['content']
group.groupevent_set.create(
by=login,
type='status_update',
desc=update_text,
)
return redirect('ietf.group.views.group_about',acronym=group.acronym)
else:
form = None
else:
form = None
if not form:
form = StatusUpdateForm(initial={"content": old_update.desc if old_update else ""})
return render(request, 'group/group_about_status_edit.html',
{
'form': form,
'group':group,
}
)
def check_group_email_aliases():
pattern = re.compile('expand-(.*?)(-\w+)@.*? +(.*)$')
tot_count = 0
good_count = 0
with open(settings.GROUP_VIRTUAL_PATH,"r") as virtual_file:
for line in virtual_file.readlines():
m = pattern.match(line)
tot_count += 1
if m:
good_count += 1
if good_count > 50 and tot_count < 3*good_count:
return True
return False
def get_group_email_aliases(acronym, group_type):
if acronym:
pattern = re.compile('expand-(%s)(-\w+)@.*? +(.*)$'%acronym)
else:
pattern = re.compile('expand-(.*?)(-\w+)@.*? +(.*)$')
aliases = []
with open(settings.GROUP_VIRTUAL_PATH,"r") as virtual_file:
for line in virtual_file.readlines():
m = pattern.match(line)
if m:
if acronym or not group_type or Group.objects.filter(acronym=m.group(1),type__slug=group_type):
aliases.append({'acronym':m.group(1),'alias_type':m.group(2),'expansion':m.group(3)})
return aliases
def email(request, acronym, group_type=None):
group = get_group_or_404(acronym, group_type)
aliases = get_group_email_aliases(acronym, group_type)
expansions = gather_relevant_expansions(group=group)
return render(request, 'group/email.html',
construct_group_menu_context(request, group, "email expansions", group_type, {
'expansions':expansions,
'aliases':aliases,
'group':group,
'ietf_domain':settings.IETF_DOMAIN,
}))
def history(request, acronym, group_type=None):
group = get_group_or_404(acronym, group_type)
events = group.groupevent_set.all().select_related('by').order_by('-time', '-id')
return render(request, 'group/history.html',
construct_group_menu_context(request, group, "history", group_type, {
"events": events,
}))
def materials(request, acronym, group_type=None):
group = get_group_or_404(acronym, group_type)
if not group.features.has_materials:
raise Http404
docs = get_group_materials(group).order_by("type__order", "-time").select_related("type")
doc_types = OrderedDict()
for d in docs:
if d.type not in doc_types:
doc_types[d.type] = []
doc_types[d.type].append(d)
return render(request, 'group/materials.html',
construct_group_menu_context(request, group, "materials", group_type, {
"doc_types": doc_types.items(),
"can_manage_materials": can_manage_materials(request.user, group)
}))
def nodename(name):
return name.replace('-','_')
class Edge(object):
def __init__(self,relateddocument):
self.relateddocument=relateddocument
def __hash__(self):
return hash("|".join([str(hash(nodename(self.relateddocument.source.name))),
str(hash(nodename(self.relateddocument.target.document.name))),
self.relateddocument.relationship.slug]))
def __eq__(self,other):
return self.__hash__() == other.__hash__()
def sourcename(self):
return nodename(self.relateddocument.source.name)
def targetname(self):
return nodename(self.relateddocument.target.document.name)
def styles(self):
# Note that the old style=dotted, color=red styling is never used
if self.relateddocument.is_downref():
return { 'color':'red','arrowhead':'normalnormal' }
else:
styles = { 'refnorm' : { 'color':'blue' },
'refinfo' : { 'color':'green' },
'refold' : { 'color':'orange' },
'refunk' : { 'style':'dashed' },
'replaces': { 'color':'pink', 'style':'dashed', 'arrowhead':'diamond' },
}
return styles[self.relateddocument.relationship.slug]
def get_node_styles(node,group):
styles=dict()
# Shape and style (note that old diamond shape is never used
styles['style'] = 'filled'
if node.get_state('draft').slug == 'rfc':
styles['shape'] = 'box'
elif node.get_state('draft-iesg') and not node.get_state('draft-iesg').slug in ['watching','dead']:
styles['shape'] = 'parallelogram'
elif node.get_state('draft').slug == 'expired':
styles['shape'] = 'house'
styles['style'] ='solid'
styles['peripheries'] = 3
elif node.get_state('draft').slug == 'repl':
styles['shape'] = 'ellipse'
styles['style'] ='solid'
styles['peripheries'] = 3
else:
pass # quieter form of styles['shape'] = 'ellipse'
# Color (note that the old 'Flat out red' is never used
if node.group.acronym == 'none':
styles['color'] = '"#FF800D"' # orangeish
elif node.group == group:
styles['color'] = '"#0AFE47"' # greenish
else:
styles['color'] = '"#9999FF"' # blueish
# Label
label = node.name
if label.startswith('draft-'):
if label.startswith('draft-ietf-'):
label=label[11:]
else:
label=label[6:]
try:
t=label.index('-')
label="%s\\n%s" % (label[:t],label[t+1:])
except:
pass
if node.group.acronym != 'none' and node.group != group:
label = "(%s) %s"%(node.group.acronym,label)
if node.get_state('draft').slug == 'rfc':
label = "%s\\n(%s)"%(label,node.canonical_name())
styles['label'] = '"%s"'%label
return styles
def make_dot(group):
references = Q(source__group=group,source__type='draft',relationship__slug__startswith='ref')
both_rfcs = Q(source__states__slug='rfc',target__document__states__slug='rfc')
inactive = Q(source__states__slug__in=['expired','repl'])
attractor = Q(target__name__in=['rfc5000','rfc5741'])
removed = Q(source__states__slug__in=['auth-rm','ietf-rm'])
relations = RelatedDocument.objects.filter(references).exclude(both_rfcs).exclude(inactive).exclude(attractor).exclude(removed)
edges = set()
for x in relations:
target_state = x.target.document.get_state_slug('draft')
if target_state!='rfc' or x.is_downref():
edges.add(Edge(x))
replacements = RelatedDocument.objects.filter(relationship__slug='replaces',target__document__in=[x.relateddocument.target.document for x in edges])
for x in replacements:
edges.add(Edge(x))
nodes = set([x.relateddocument.source for x in edges]).union([x.relateddocument.target.document for x in edges])
for node in nodes:
node.nodename=nodename(node.name)
node.styles = get_node_styles(node,group)
return render_to_string('group/dot.txt',
dict( nodes=nodes, edges=edges )
)
@cache_page(60 * 60)
def dependencies(request, acronym, group_type=None, output_type="pdf"):
group = get_group_or_404(acronym, group_type)
if not group.features.has_documents or output_type not in ["dot", "pdf", "svg"]:
raise Http404
dothandle, dotname = mkstemp()
os.close(dothandle)
dotfile = open(dotname, "w")
dotfile.write(make_dot(group))
dotfile.close()
if (output_type == "dot"):
return HttpResponse(make_dot(group),
content_type='text/plain; charset=UTF-8'
)
unflathandle, unflatname = mkstemp()
os.close(unflathandle)
outhandle, outname = mkstemp()
os.close(outhandle)
pipe("%s -f -l 10 -o %s %s" % (settings.UNFLATTEN_BINARY, unflatname, dotname))
pipe("%s -T%s -o %s %s" % (settings.DOT_BINARY, output_type, outname, unflatname))
outhandle = open(outname, "r")
out = outhandle.read()
outhandle.close()
os.unlink(outname)
os.unlink(unflatname)
os.unlink(dotname)
if (output_type == "pdf"):
output_type = "application/pdf"
elif (output_type == "svg"):
output_type = "image/svg+xml"
return HttpResponse(out, content_type=output_type)
def email_aliases(request, acronym=None, group_type=None):
group = get_group_or_404(acronym,group_type) if acronym else None
if not acronym:
# require login for the overview page, but not for the group-specific
# pages
if not request.user.is_authenticated:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
aliases = get_group_email_aliases(acronym, group_type)
return render(request,'group/email_aliases.html',{'aliases':aliases,'ietf_domain':settings.IETF_DOMAIN,'group':group})
def meetings(request, acronym=None, group_type=None):
group = get_group_or_404(acronym,group_type) if acronym else None
four_years_ago = datetime.datetime.now()-datetime.timedelta(days=4*365)
sessions = group.session_set.filter(status__in=['sched','schedw','appr','canceled'],
meeting__date__gt=four_years_ago,
type__in=['session','plenary','other'])
future, in_progress, past = group_sessions(sessions)
can_edit = has_role(request.user,["Secretariat","Area Director"]) or group.has_role(request.user,["Chair","Secretary"])
return render(request,'group/meetings.html',
construct_group_menu_context(request, group, "meetings", group_type, {
'group':group,
'future':future,
'in_progress':in_progress,
'past':past,
'can_edit':can_edit,
}))
def derived_archives(request, acronym=None, group_type=None):
group = get_group_or_404(acronym,group_type) if acronym else None
list_acronym = None
m = re.search('mailarchive.ietf.org/arch/search/?\?email_list=([-\w]+)\Z',group.list_archive)
if m:
list_acronym=m.group(1)
if not list_acronym:
m = re.search('mailarchive.ietf.org/arch/browse/([-\w]+)/?\Z',group.list_archive)
if m:
list_acronym=m.group(1)
return render(request, 'group/derived_archives.html',
construct_group_menu_context(request, group, "list archive", group_type, {
'group':group,
'list_acronym':list_acronym,
}))
def chair_photos(request, group_type=None):
roles = sorted(Role.objects.filter(group__type=group_type, group__state='active', name_id='chair'),key=lambda x: x.person.last_name()+x.person.name+x.group.acronym)
for role in roles:
role.last_initial = role.person.last_name()[0]
return render(request, 'group/all_photos.html', {'group_type': group_type, 'role': 'Chair', 'roles': roles })
def reorder_roles(roles, role_names):
list = []
for name in role_names:
list += [ r for r in roles if r.name_id == name ]
list += [ r for r in roles if not r in list ]
return list
def group_photos(request, group_type=None, acronym=None):
group = get_object_or_404(Group, acronym=acronym)
roles = sorted(Role.objects.filter(group__acronym=acronym),key=lambda x: x.name.name+x.person.last_name())
if group.type_id in ['wg', 'rg', ]:
roles = reorder_roles(roles, ['chair', 'secr'])
elif group.type_id in ['nomcom', ]:
roles = reorder_roles(roles, ['chair', 'member', 'advisor', ])
elif group.type_id in ['team', ]:
roles = reorder_roles(roles, ['chair', 'member', 'matman', ])
elif group.type_id in ['sdo', ]:
roles = reorder_roles(roles, ['liaiman', ])
else:
pass
for role in roles:
role.last_initial = role.person.last_name()[0]
return render(request, 'group/group_photos.html',
construct_group_menu_context(request, group, "photos", group_type, {
'group_type': group_type,
'roles': roles,
'group':group }))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, unicode_literals)
import warnings, sys, os, inspect, fileinput, re
from future.builtins import *
__all__ = ['main']
warnings.simplefilter('module')
setX = 'problems_perl'
def xform_args(args, path_pfx):
return map(lambda f: f if None == path_pfx else
"{0}/{1}/{2}".format(path_pfx, setX, f), args)
def _create_contents_generator(dirs):
'''Create contents generator'''
for dir1 in dirs:
for root, dirnames, filenames in os.walk(dir1):
for file1 in filenames:
yield os.path.join(root, file1)
def _create_files_dicts(dir1, dir2):
'''Create dictionaries of all files(directories 1 & 2) for two
directories'''
filesAR, filesA = {}, _create_contents_generator([dir1])
filesBR, filesB = {}, _create_contents_generator([dir2])
for file1 in filesA:
filesAR[os.path.basename(file1)] = file1
filesBR[os.path.basename(file1)] = None
for file1 in filesB:
filesBR[os.path.basename(file1)] = file1
if not os.path.basename(file1) in filesAR:
filesAR[os.path.basename(file1)] = None
return (filesAR, filesBR)
def create_diff_generator(dirs, opt_differ=True, opt_same=True, opt_dir1=True,
opt_dir2=True, path_pfx = os.environ.get('PATH_PFX')):
'''Create diff generator'''
import difflib
(filesAR, filesBR) = _create_files_dicts(*xform_args(dirs, path_pfx))
for tester in sorted(filesAR.keys()):
coexist = filesAR[tester] and filesBR[tester]
if opt_dir1 and not filesBR[tester]:
yield '<<< {0}'.format(tester)
if opt_dir2 and not filesAR[tester]:
yield '>>> {0}'.format(tester)
if coexist:
fromfile, tofile = filesAR[tester], filesBR[tester]
with open(fromfile, 'rt') as fileF, open(tofile, 'rt') as fileT:
diff = difflib.context_diff(fileF.readlines(),
fileT.readlines(), fromfile, tofile)
if opt_differ and list(diff):
yield '< {0} >'.format(tester)
elif opt_same and not list(diff):
yield '> {0} <'.format(tester)
def main(argv = None):
'''Main entry'''
paths = filter(lambda e: not re.search('^-.*', str(e)), argv)
opts = list(filter(lambda e: re.search('^-.*', str(e)), argv))
opts_dict = {} if 0 == len(opts) else {
'opt_differ': any(filter(lambda o: re.search('d', o), opts)),
'opt_same': any(filter(lambda o: re.search('s', o), opts)),
'opt_dir1': any(filter(lambda o: re.search('1', o), opts)),
'opt_dir2': any(filter(lambda o: re.search('2', o), opts))}
# Performs diff on similar named files in two cmdline arg directories and
# indicates status if file names and/or contents do/don't match.
#
# Uses the following symbols around file names to indicate status:
# unmatched file in dir1 : <<< file1
# unmatched file in dir2 : >>> file2
# similar name but different : < file >
# similar name and same : > file <
# demo: $ script [-ds12] <path>/dataA <path>/dataB
for line in create_diff_generator(list(paths), **opts_dict):
print(line)
return 0
if '__main__' == __name__:
raise SystemExit(main(sys.argv[1:]))
|
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorstream.finance.signals import MovingAverageConvergenceDivergenceSignal as MACDSignal
from tensorstream.tests import TestCase
class MACDSignalSpec(TestCase):
def setUp(self):
self.sheets = self.read_ods(
self.from_test_res('macd_signals.ods', __file__))
def test_single_dim(self):
s = self.sheets['Sheet1']
prices_ts = s['Close']
expected_signals = s['Trade Signals']
signal = MACDSignal(26, 12, 9)
prices = tf.placeholder(tf.float32)
signals_ts, _, _ = signal(prices)
with tf.Session() as sess:
output_ts = sess.run(signals_ts, {
prices: prices_ts
})
np.testing.assert_almost_equal(output_ts,
expected_signals.values, decimal=3)
|
PRICE_TABLE = {
'A': 50,
'B': 30,
'C': 20,
'D': 15,
'E': 40,
'F': 10,
'G': 20,
'H': 10,
'I': 35,
'J': 60,
'K': 70,
'L': 90,
'M': 15,
'N': 40,
'O': 10,
'P': 50,
'Q': 30,
'R': 50,
'S': 20,
'T': 20,
'U': 40,
'V': 50,
'W': 20,
'X': 17,
'Y': 20,
'Z': 21
}
def illegal_input(skus):
"""
Check if there are any illegal characters.
Acceptable characters: ABCD, any other character
will be an illegal character.
param skus: a String
@return: Boolean value
"""
for letter in skus:
if letter not in PRICE_TABLE:
return True
return False
# noinspection PyUnusedLocal
# skus = unicode string
def checkout(skus):
"""
Return the total price for the checkout basket.
param skus: a String containing the SKUs of all the
products in the basket.
@return: an Integer representing the total checkout
value of the items.
"""
if illegal_input(skus):
return -1
# sort `PRICE_TABLE` keys by value (descending order)
items_ordered_by_price = sorted(PRICE_TABLE, key=PRICE_TABLE.__getitem__, reverse=True)
# orders skus based on the list `items_ordered_by_price`,
# example:
# INPUT skus = 'AAOLKK'
# items_ordered_by_price = ['L', 'K', 'J', 'A', 'P', 'O']
# OUTPUT skus = 'LKKAAO'
skus = sorted(skus, key=lambda s: items_ordered_by_price.index(s))
# convert to string
skus = ''.join(skus)
total_cost = 0
pre_cost = 0
discount = 0
free_b_count = 0
free_m_count = 0
free_q_count = 0
free_u_count = 0
promo_items = []
basket = {}
for item in skus:
try:
basket[item]
except KeyError:
basket[item] = {'total_qnt': 0, 'qnt': 0, 'cost': 0}
basket[item]['total_qnt'] += 1
basket[item]['qnt'] += 1
basket[item]['cost'] += PRICE_TABLE[item]
# freebies
if item == 'B' and free_b_count:
free_b_count -= 1
basket[item]['qnt'] -= 1
basket[item]['cost'] -= PRICE_TABLE[item]
elif item == 'M' and free_m_count:
free_m_count -= 1
basket[item]['qnt'] -= 1
basket[item]['cost'] -= PRICE_TABLE[item]
elif item == 'Q' and free_q_count:
free_q_count -= 1
basket[item]['qnt'] -= 1
basket[item]['cost'] -= PRICE_TABLE[item]
elif item == 'U' and free_u_count:
free_u_count -= 1
basket[item]['qnt'] -= 1
basket[item]['cost'] -= PRICE_TABLE[item]
# 10H for 80
elif (
item in ['H'] and
basket[item]['qnt'] != 0 and
basket[item]['qnt'] % 10 == 0
):
if item == 'H':
basket[item]['qnt'] = 0
basket[item]['cost'] -= 15
# 5A for 200
# 5H for 45
# 5P for 200
elif (
item in ['A', 'H', 'P'] and
basket[item]['qnt'] != 0 and
basket[item]['qnt'] % 5 == 0
):
if item == 'A':
# reset the number of items
basket[item]['qnt'] = 0
basket[item]['cost'] -= 30
elif item == 'H':
basket[item]['cost'] -= 5
elif item == 'P':
basket[item]['cost'] -= 50
# 3A for 130
# 2F get one F free (3F for 2F)
# 3N get one M free
# 3Q for 80
# 3R get one Q free
# 3U get one U free
# 3V for 130
elif (
item in ['A', 'F', 'U', 'N', 'Q', 'V', 'R'] and
basket[item]['qnt'] != 0 and
basket[item]['qnt'] % 3 == 0
):
if item == 'A':
basket[item]['cost'] -= 20
elif item == 'Q':
basket[item]['cost'] -= 10
elif item == 'V':
# reset the number of items
basket[item]['qnt'] = 0
basket[item]['cost'] -= 10
elif item == 'F':
basket[item]['cost'] -= PRICE_TABLE[item]
elif item == 'U':
free_u_count += 1
elif item == 'N':
free_m_count += 1
elif item == 'R':
free_q_count += 1
# 2B for 45
# 2E get one B free
# 2K for 120
# 2V for 90
elif (
item in ['B', 'E', 'K', 'V'] and
basket[item]['qnt'] != 0 and
basket[item]['qnt'] % 2 == 0
):
if item == 'B':
basket[item]['cost'] -= 15
elif item == 'K':
basket[item]['cost'] -= 20
elif item == 'V':
basket[item]['cost'] -= 10
elif item == 'E':
free_b_count += 1
# buy any 3 of (S,T,X,Y,Z) for 45
elif item in ['S', 'T', 'X', 'Y', 'Z']:
promo_items.append(PRICE_TABLE[item])
for values in basket.values():
total_cost += values['cost']
if promo_items:
for n_index, item_value in enumerate(promo_items, start=1):
pre_cost += item_value
if n_index % 3 == 0:
discount += pre_cost - 45
pre_cost = 0
total_cost -= discount
return total_cost
|
import numpy as np
import random
'''
just a little structure to hold extra data
'''
class ExperienceInput(object):
def __init__(self, niter, prev_fs, rs, actions, next_fs, terminal):
self.niter = niter
self.prev_fs = prev_fs
self.rs = rs
self.actions = actions
self.next_fs = next_fs
self.terminal = terminal
'''
This class stores an experience memory for deep RL.
'''
class Experience(object):
def __init__(self, size, xdim, ydim, discount=0.9):
self._prev_x = np.zeros((size, xdim))
self._next_x = np.zeros((size, xdim))
self._y = np.zeros((size, ydim))
self._r = np.zeros((size, 1))
self._terminal = np.zeros((size, 1), dtype=bool)
self._idx = 0
self._length = 0
self._size = size
self._discount = discount
self.model_output = None
self.model_input = None
self.max_output = None
def addInput(self, data):
for i in xrange(data.niter):
idx = (self._idx + i) % self._size
self._prev_x[idx] = data.prev_fs[i]
self._next_x[idx] = data.next_fs[i]
self._r[idx] = data.rs[i]
self._terminal[idx] = data.terminal[i]
self._y[idx] = data.actions[i]
self._idx += data.niter
if self._length < self._size and self._idx > self._length:
self._length = self._idx
if self._length > self._size:
self._length = self._size
self._idx %= self._size
'''
this loads supervised training data
we handle this a little differently from our other data
'''
def initFromArrays(self, x, y):
length = x.shape[0]
self._prev_x[:length,:] = x
self._terminal[:length] = True
self._y[:length,:] = y
self._r[:length] = 1. # max reward for supervised learning
self._idx = length
self._length = length
'''
withdraw a single minibatch
also computes the rewards for training
'''
def sample(self, num):
idx = random.sample(range(self._length), num)
x = self._prev_x[idx]
y = self._y[idx]
r = self._r[idx]
non_terminal_idx = np.squeeze(self._terminal[idx] == False)
x1 = self._next_x[idx,:]
x1nt = x1[non_terminal_idx]
num_non_terminal = len(x1nt)
if num_non_terminal > 0:
best = self.max_output.eval(feed_dict={self.model_input: x1nt})
r[non_terminal_idx] += (self._discount * best).reshape(num_non_terminal, 1)
return x, y, r
|
import logging
def get_logger(filename):
logger = logging.getLogger("logger")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format="%(message)s", level=logging.DEBUG)
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s: %(message)s"))
logging.getLogger().addHandler(handler)
return logger
|
exchange_rate_url = 'http://api.hnb.hr/tecajn/v2?valuta=EUR&valuta=USD&valuta=GBP'
country_name_list = (
'Afghanistan',
'Albania',
'Algeria',
'Andorra',
'Angola',
'Antigua & Deps',
'Argentina',
'Armenia',
'Australia',
'Austria',
'Azerbaijan',
'Bahamas',
'Bahrain',
'Bangladesh',
'Barbados',
'Belarus',
'Belgium',
'Belize',
'Benin',
'Bhutan',
'Bolivia',
'Bosnia Herzegovina',
'Botswana',
'Brazil',
'Brunei',
'Bulgaria',
'Burkina',
'Burundi',
'Cambodia',
'Cameroon',
'Canada',
'Cape Verde',
'Central African Rep',
'Chad',
'Chile',
'China',
'Colombia',
'Comoros',
'Congo',
'Congo (Democratic Rep)',
'Costa Rica',
'Croatia',
'Cuba',
'Cyprus',
'Czech Republic',
'Denmark',
'Djibouti',
'Dominica',
'Dominican Republic',
'East Timor',
'Ecuador',
'Egypt',
'El Salvador',
'Equatorial Guinea',
'Eritrea',
'Estonia',
'Ethiopia',
'Fiji',
'Finland',
'France',
'Gabon',
'Gambia',
'Georgia',
'Germany',
'Ghana',
'Greece',
'Grenada',
'Guatemala',
'Guinea',
'Guinea-Bissau',
'Guyana',
'Haiti',
'Honduras',
'Hungary',
'Iceland',
'India',
'Indonesia',
'Iran',
'Iraq',
'Ireland (Republic)',
'Israel',
'Italy',
'Ivory Coast',
'Jamaica',
'Japan',
'Jordan',
'Kazakhstan',
'Kenya',
'Kiribati',
'Korea North',
'Korea South',
'Kosovo',
'Kuwait',
'Kyrgyzstan',
'Laos',
'Latvia',
'Lebanon',
'Lesotho',
'Liberia',
'Libya',
'Liechtenstein',
'Lithuania',
'Luxembourg',
'Macedonia',
'Madagascar',
'Malawi',
'Malaysia',
'Maldives',
'Mali',
'Malta',
'Marshall Islands',
'Mauritania',
'Mauritius',
'Mexico',
'Micronesia',
'Moldova',
'Monaco',
'Mongolia',
'Montenegro',
'Morocco',
'Mozambique',
'Myanmar, Burma',
'Namibia',
'Nauru',
'Nepal',
'Netherlands',
'New Zealand',
'Nicaragua',
'Niger',
'Nigeria',
'Norway',
'Oman',
'Pakistan',
'Palau',
'Panama',
'Papua New Guinea',
'Paraguay',
'Peru',
'Philippines',
'Poland',
'Portugal',
'Qatar',
'Romania',
'Russian Federation',
'Rwanda',
'St Kitts & Nevis',
'St Lucia',
'Saint Vincent & the Grenadines',
'Samoa',
'San Marino',
'Sao Tome & Principe',
'Saudi Arabia',
'Senegal',
'Serbia',
'Seychelles',
'Sierra Leone',
'Singapore',
'Slovakia',
'Slovenia',
'Solomon Islands',
'Somalia',
'South Africa',
'South Sudan',
'Spain',
'Sri Lanka',
'Sudan',
'Suriname',
'Swaziland',
'Sweden',
'Switzerland',
'Syria',
'Taiwan',
'Tajikistan',
'Tanzania',
'Thailand',
'Togo',
'Tonga',
'Trinidad & Tobago',
'Tunisia',
'Turkey',
'Turkmenistan',
'Tuvalu',
'Uganda',
'Ukraine',
'United Arab Emirates',
'United Kingdom',
'United States',
'Uruguay',
'Uzbekistan',
'Vanuatu',
'Vatican City',
'Venezuela',
'Vietnam',
'Yemen',
'Zambia',
'Zimbabwe',
)
form_mandatory_fields = (
'address',
'city',
'country',
'email',
'first_name',
'last_name',
'zip_code',
'shipping_first_name',
'shipping_last_name',
'account_shipping_country',
'shipping_city',
'shipping_address',
'shipping_zip_code',
'shipping_state_county',
'shipping_pp_country',
'shipping_pp_first_name',
'shipping_pp_last_name',
'shipping_pp_address',
'shipping_pp_city',
'shipping_pp_zip_code',
'shipping_pp_state_county',
)
ipg_fields = (
'cardholder_address',
'cardholder_city',
'cardholder_country',
'cardholder_email',
'cardholder_name',
'cardholder_surname',
'cardholder_zip_code',
)
form_extra_fields = (
'phone_number',
'state_county',
'phone_number',
'note',
'company_name',
'company_address',
'company_uin',
'register',
'subscribe_to_newsletter',
'agree_to_terms',
)
initial_credit_card_logos = (
{
'name': 'American Express',
'ordering': 6,
'published': True,
'external': True,
'location': 'https://www.americanexpress.com/hr/network/',
'css_class': 'american-express-logo-icon',
},
{
'name': 'Discover',
'ordering': 5,
'published': True,
'external': True,
'location': 'https://www.discover.com/',
'css_class': 'discover-logo-icon',
},
{
'name': 'Diners Club',
'ordering': 4,
'published': False,
'external': True,
'location': '''
https://www.diners.com.hr/Pogodnosti-i-usluge/MasterCard-SecureCode.html?Ym5cMzQsY2FyZFR5cGVcMSxwXDc3
''',
'css_class': 'diners-club-logo-icon',
},
{
'name': 'Mastercard',
'ordering': 3,
'published': True,
'external': True,
'location': 'https://www.mastercard.hr/hr-hr.html',
'css_class': 'mastercard-logo-icon',
},
{
'name': 'Maestro',
'ordering': 2,
'published': True,
'external': True,
'location': 'http://www.maestrocard.com/hr/',
'css_class': 'maestro-logo-icon',
},
{
'name': 'Visa',
'ordering': 1,
'published': True,
'external': True,
'location': 'https://www.visa.com.hr',
'css_class': 'visa-logo-icon',
},
)
CONTACT = 'contact'
PAYMENT_DELIVERY = 'payment-and-delivery'
COMPLAINTS = 'returns-and-complaints'
TOS = 'general-terms-and-conditions'
initial_footer_items = (
{
'name': 'Contact',
'location': '/{}/'.format(CONTACT),
'ordering': 4,
'published': True,
'css_class': '',
},
{
'name': 'Payment and delivery',
'location': '/{}/'.format(PAYMENT_DELIVERY),
'ordering': 3,
'published': True,
'css_class': '',
},
{
'name': 'Returns and complaints',
'location': '/{}/'.format(COMPLAINTS),
'ordering': 2,
'published': True,
'css_class': '',
},
{
'name': 'General terms and conditions',
'location': '/{}/'.format(TOS),
'ordering': 1,
'published': True,
'css_class': '',
},
)
initial_footer_share_links = (
{
'name': 'Facebook',
'ordering': 2,
'location': 'https://www.facebook.com/TheBrushStash/',
'external': True,
'published': True,
'css_class': 'facebook-icon',
},
{
'name': 'Instagram',
'ordering': 1,
'location': 'https://www.instagram.com/thebrushstash',
'external': True,
'published': True,
'css_class': 'instagram-icon',
},
)
ABOUT = 'about-the-story'
BRUSH_CARE = 'brush-care'
initial_navigation_items = (
{
'name': 'Webshop',
'location': '/',
'ordering': 6,
'published': True,
},
{
'name': 'Brush Care',
'location': '/{}/'.format(BRUSH_CARE),
'ordering': 5,
'published': True,
},
{
'name': 'About / The Story',
'location': '/{}/'.format(ABOUT),
'ordering': 4,
'published': True,
},
{
'name': 'FAQ',
'location': '/faq/',
'ordering': 3,
'published': True,
},
)
initial_product_types = (
{
'name': 'Brush',
'slug': 'brush',
},
)
inital_exchange_rates = (
{
'currency': 'GBP',
'currency_code': '826',
'state_iso': 'GBR',
'buying_rate': '8.39528000',
'middle_rate': '8.42054200',
'selling_rate': '8.44580400',
'added_value': '10',
},
{
'currency': 'USD',
'currency_code': '840',
'state_iso': 'USA',
'buying_rate': '6.71698600',
'middle_rate': '6.73719800',
'selling_rate': '6.75741000',
'added_value': '10',
},
{
'currency': 'EUR',
'currency_code': '978',
'state_iso': 'EMU',
'buying_rate': '7.40211900',
'middle_rate': '7.42439200',
'selling_rate': '7.44666500',
'added_value': '10',
},
)
DEFAULT_COUNTRY = 'Croatia'
DEFAULT_CURRENCY = 'hrk'
DEFAULT_CURRENCY_CODE = 191
DEFAULT_REGION = 'hr'
currency_symbol_mapping = {
'hrk': 'kn',
'eur': '€',
'gbp': '£',
'usd': '$',
}
initial_region_data = (
{
'name': 'eu',
'language': 'en',
'currency': 'eur',
'published': True,
'ordering': 3,
},
{
'name': DEFAULT_REGION,
'language': 'hr',
'currency': 'hrk',
'published': True,
'ordering': 2,
},
{
'name': 'uk',
'language': 'en',
'currency': 'gbp',
'published': True,
'ordering': 1,
},
{
'name': 'int',
'language': 'en',
'currency': 'usd',
'published': True,
'ordering': 0,
},
)
DEFAULT_INSTALLMENT_CODE = 'Y0000'
initial_installment_options = [
{
'range_from': '0',
'range_to': '499.99',
'installment_number': 2,
},
{
'range_from': '500',
'range_to': '999.99',
'installment_number': 3,
},
{
'range_from': '1000',
'range_to': '1999.99',
'installment_number': 4,
},
{
'range_from': '2000',
'range_to': '3999.99',
'installment_number': 5,
},
{
'range_from': '4000',
'range_to': '999999999',
'installment_number': 6,
},
]
|
import pandas as pd
def mono_channels(file):
"""Defines the column name for each monotonic data channel based on the
channel headers.
Defines stress and geometry columns separately from others b/c
some data will have stress, some will not. Same with geometry"""
#Read data file...
data = pd.read_csv(file,header=0)
#Get file headers as a list...
channel_names = data.columns
#Change headers to standard format for identification...
std_names = []
for channel_name in channel_names:
channel_name = str(channel_name).replace(" ","").lower()
std_names.append(channel_name)
#Loop for defining main channels...
i = 0
stress_bool = False
width_bool = False
thick_bool = False
geo_bool = False
for name in std_names:
#Use logic to identify consistent channels...
if "position" in name:
pos_col = channel_names[i]
elif "load" in name:
load_col = channel_names[i]
elif "axialstrain" in name:
ax_col = channel_names[i]
elif "transversestrain" in name:
tr_col = channel_names[i]
#Use boolean to identify inconsistent channels...
elif "stress" in name:
stress_bool = True
stress_col = channel_names[i]
elif "width" in name:
width_bool = True
width_col = channel_names[i]
elif "thickness" in name:
thick_bool = True
thick_col = channel_names[i]
i += 1
channels = [
pos_col, load_col, ax_col, tr_col
]
#Append stress or geometry...
geo_bool = False
if stress_bool:
channels.append(stress_col)
elif width_bool and thick_bool:
channels.append(width_col)
channels.append(thick_col)
geo_bool = True
elif stress_bool and width_bool and thick_bool:
raise AttributeError(
"Data must contain stress OR sample geometry, not both"
)
else:
raise AttributeError(
"Data must contain stress OR sample width AND thickness"
)
return channels, stress_bool, geo_bool
def fatigue_channels(file):
"""Defines the column name for each fatigue data channel based on the
channel headers.
Defines stress and geometry columns separately from others b/c
some data will have stress, some will not. Same with geometry"""
#Read data file...
data = pd.read_csv(file,header=0)
#Get file headers as a list...
channel_names = data.columns
#Change headers to standard format for identification...
std_names = []
reg_names = []
for channel_name in channel_names:
reg_names.append(channel_name)
channel_name = str(channel_name).replace(" ","").lower()
std_names.append(channel_name)
#Loop for defining main channels...
i = 0
stress_bool = False
width_bool = False
thick_bool = False
geo_bool = False
for name in std_names:
if "cycle" in name and not "time" in name:
cycles_col = reg_names[i]
elif "max" in name and "load" in name:
max_load_col = reg_names[i]
elif "min" in name and "load" in name:
min_load_col = reg_names[i]
elif "axialstrain" in name and "max" in name:
max_str_col = reg_names[i]
elif "axialstrain" in name and "min" in name:
min_str_col = reg_names[i]
#Use boolean to identify inconsistent channels...
elif "stress" in name and "max" in name:
stress_bool = True
max_stress_col = reg_names[i]
elif "stress" in name and "min" in name:
stress_bool = True
min_stress_col = reg_names[i]
elif "width" in name:
width_bool = True
width_col = reg_names[i]
elif "thickness" in name:
thick_bool = True
thick_col = reg_names[i]
i += 1
channels = [
cycles_col, max_load_col, min_load_col, max_str_col, min_str_col
]
#Append stress or geometry...
geo_bool = False
if stress_bool:
channels.append(max_stress_col)
channels.append(min_stress_col)
elif width_bool and thick_bool:
channels.append(width_col)
channels.append(thick_col)
geo_bool = True
elif stress_bool and width_bool and thick_bool:
raise AttributeError(
"Data must contain stress OR sample geometry, not both"
)
else:
raise AttributeError(
"Data must contain stress OR sample width AND thickness"
)
return channels, stress_bool, geo_bool
|
'''IR Codes for Sony BluRay (remote RMT-VB310U)
These are derived from code set #6528 from the Global Cache IR code database ("Control Tower").
They seem to have been compiled from multiple sources, hence the slight variations in timing
from one code to another.
'''
CARRIER_FREQUENCY = 40000
REPEAT_OFFSET = 1
REPEAT_COUNT = 3
IR_CODES = {
"AUDIO":"96,24,24,24,24,24,48,24,24,24,24,24,48,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"BLUETOOTH":"96,24,48,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,456",
"CLEAR":"96,24,48,24,48,24,48,24,48,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"CURSOR_DOWN":"96,24,24,24,48,24,24,24,48,24,48,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"CURSOR_ENTER":"96,24,48,24,24,24,48,24,48,24,48,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,456",
"CURSOR_LEFT":"96,24,48,24,48,24,24,24,48,24,48,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,456",
"CURSOR_RIGHT":"96,24,24,24,24,24,48,24,48,24,48,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"CURSOR_UP":"96,24,48,24,24,24,24,24,48,24,48,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"DIGIT_0":"96,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,576",
"DIGIT_1":"96,24,48,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,552",
"DIGIT_2":"96,24,24,24,48,24,24,24,24,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,552",
"DIGIT_3":"96,24,48,24,48,24,24,24,24,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,528",
"DIGIT_4":"96,24,24,24,24,24,48,24,24,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,552",
"DIGIT_5":"96,24,48,24,24,24,48,24,24,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,528",
"DIGIT_6":"96,24,24,24,48,24,48,24,24,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,528",
"DIGIT_7":"96,24,48,24,48,24,48,24,24,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"DIGIT_8":"96,24,24,24,24,24,24,24,48,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,552",
"DIGIT_9":"96,24,48,24,24,24,24,24,48,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,528",
"DISPLAY":"96,24,48,24,24,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,528",
"FAVORITE":"96,24,24,24,48,24,48,24,48,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,456",
"FORWARD":"96,24,24,24,24,24,48,24,48,24,48,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"FUNCTION_BLUE":"96,24,48,24,24,24,48,24,24,24,24,24,48,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"FUNCTION_GREEN":"96,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"FUNCTION_RED":"96,24,48,24,48,24,48,24,24,24,24,24,48,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,456",
"FUNCTION_YELLOW":"96,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"MENU_HOME":"96,24,24,24,48,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,528",
"MENU_POPUP":"96,24,48,24,24,24,24,24,48,24,24,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"MENU_TOP":"96,24,24,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"NET_SERVICES":"96,24,48,24,24,24,24,24,48,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"NETFLIX":"96,24,48,24,48,24,24,24,48,24,24,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"NEXT":"96,24,24,24,48,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"OPEN_CLOSE":"96,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"OPTIONS":"96,24,48,24,48,24,48,24,48,24,48,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,432",
"PAUSE":"96,24,48,24,24,24,24,24,48,24,48,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"PLAY":"96,24,24,24,48,24,24,24,48,24,48,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"POWER_OFF":"96,24,48,24,48,24,48,24,48,24,24,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,456",
"POWER_ON":"96,24,24,24,48,24,48,24,48,24,24,24,48,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"POWER_TOGGLE":"96,24,48,24,24,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"PREVIOUS":"96,24,48,24,48,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,456",
"RETURN":"96,24,48,24,48,24,24,24,24,24,24,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"REVERSE":"96,24,48,24,48,24,24,24,48,24,48,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"SLOW_FORWARD":"96,24,24,24,24,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,504",
"SLOW_REVERSE":"96,24,48,24,48,24,24,24,24,24,48,24,24,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480",
"STOP":"96,24,24,24,24,24,24,24,48,24,48,24,24,24,24,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,528",
"SUBTITLE":"96,24,48,24,48,24,24,24,24,24,24,24,48,24,48,24,24,24,48,24,24,24,48,24,48,24,24,24,48,24,24,24,24,24,24,24,48,24,48,24,48,480"
}
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Return all new alerts"
class Input:
FREQUENCY = "frequency"
class Output:
ALERT = "alert"
class GetAlertsInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"frequency": {
"type": "integer",
"title": "Frequency",
"description": "Poll frequency in seconds",
"default": 10,
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetAlertsOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"alert": {
"$ref": "#/definitions/alert",
"title": "Alert",
"description": "Alert",
"order": 1
}
},
"required": [
"alert"
],
"definitions": {
"alert": {
"type": "object",
"title": "alert",
"properties": {
"aadTenantId": {
"type": "string",
"title": "AAD Tenant ID",
"description": "AAD tenant ID",
"order": 23
},
"alertCreationTime": {
"type": "string",
"title": "Alert Creation Time",
"description": "Alert creation time",
"order": 15
},
"assignedTo": {
"type": "string",
"title": "Assigned To",
"description": "Assigned To",
"order": 4
},
"category": {
"type": "string",
"title": "Category",
"description": "Category",
"order": 11
},
"classification": {
"type": "string",
"title": "Classification",
"description": "Classification",
"order": 7
},
"computerDnsName": {
"type": "string",
"title": "Computer DNS Name",
"description": "Computer DNS name",
"order": 21
},
"description": {
"type": "string",
"title": "Description",
"description": "Description",
"order": 14
},
"detectionSource": {
"type": "string",
"title": "Detection Source",
"description": "Detection source",
"order": 10
},
"determination": {
"type": "string",
"title": "Determination",
"description": "Determination",
"order": 8
},
"firstEventTime": {
"type": "string",
"title": "First Event Time",
"description": "First event time",
"order": 16
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 1
},
"incidentId": {
"type": "integer",
"title": "Incident ID",
"description": "Incident ID",
"order": 2
},
"investigationId": {
"type": "integer",
"title": "Investigation ID",
"description": "Investigation ID",
"order": 3
},
"investigationState": {
"type": "string",
"title": "Investigation State",
"description": "Investigation state",
"order": 9
},
"lastEventTime": {
"type": "string",
"title": "Last Event Time",
"description": "Last event time",
"order": 17
},
"lastUpdateTime": {
"type": "string",
"title": "Last Update Time",
"description": "Last update time",
"order": 18
},
"machineId": {
"type": "string",
"title": "Machine ID",
"description": "Machine ID",
"order": 20
},
"rbacGroupName": {
"type": "string",
"title": "RBAC Group Name",
"description": "RBAC group name",
"order": 22
},
"relatedUser": {
"$ref": "#/definitions/related_user_object",
"title": "Related User",
"description": "Related user",
"order": 24
},
"resolvedTime": {
"type": "string",
"title": "Resolved Time",
"description": "Resolved time",
"order": 19
},
"severity": {
"type": "string",
"title": "Severity",
"description": "Severity",
"order": 5
},
"status": {
"type": "string",
"title": "Status",
"description": "Status",
"order": 6
},
"threatFamilyName": {
"type": "string",
"title": "Threat Family Name",
"description": "Threat family name",
"order": 12
},
"title": {
"type": "string",
"title": "Title",
"description": "Title",
"order": 13
}
},
"definitions": {
"related_user_object": {
"type": "object",
"title": "related_user_object",
"properties": {
"domainName": {
"type": "string",
"title": "Domain Name",
"description": "Domain name",
"order": 1
},
"userName": {
"type": "string",
"title": "User Name",
"description": "User name",
"order": 2
}
}
}
}
},
"related_user_object": {
"type": "object",
"title": "related_user_object",
"properties": {
"domainName": {
"type": "string",
"title": "Domain Name",
"description": "Domain name",
"order": 1
},
"userName": {
"type": "string",
"title": "User Name",
"description": "User name",
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
import pdb
class irange:
def __init__(self, *args):
if len(args) == 0:
raise TypeError("irange expected 1 arguments, got 0")
elif len(args) > 3:
raise TypeError(
"irange expected at most 3 arguments got {l}"
.format(l=len(args))
)
for x in args:
if type(x) != int:
raise TypeError(
"'{type_name}' object cannot be interpreted as an integer"
.format(type_name=type(x).__name__)
)
start_stop_and_step = len(args) == 3
start_and_stop = len(args) == 2
just_stop = len(args) == 1
if start_stop_and_step:
self.start = args[0]
self.stop = args[1]
self.step = args[2]
if self.step == 0:
raise ValueError("irange() arg 3 must not be zero")
elif start_and_stop:
self.start = args[0]
self.stop = args[1]
self.step = 1
elif just_stop:
self.start = 0
self.stop = args[0]
self.step = 1
def __iter__(self):
no_values = None
if not bool(self):
return no_values
value = self.start
if self.start > self.stop:
while value > self.stop:
yield value
value += self.step
elif self.start < self.stop:
while value < self.stop:
yield value
value += self.step
if self.stop in self:
yield self.stop
def __repr__(self):
if self.step == 1:
return (
'irange({start}, {stop})'
.format(start=self.start, stop=self.stop)
)
else:
return (
'irange({start}, {stop}, {step})'
.format(start=self.start, stop=self.stop, step=self.step)
)
def __bool__(self):
return not ((self.start == self.stop) or
(self.start > self.stop and self.step > 0) or
(self.start < self.stop and self.step < 0))
def __len__(self):
if not bool(self):
return 0
elif self.start < 0 or self.stop < 0:
abs_start = abs(self.start)
abs_stop = abs(self.stop)
abs_step = abs(self.step)
step_is_one = self.step == 1
inclusive = self.stop == 0 or self.start == 0 or self.stop in self
if not step_is_one and inclusive:
return int((abs_stop + abs_start)/abs_step) - 1
elif not step_is_one:
return int((abs_stop + abs_start)/abs_step)
else:
return abs_stop + abs_start + 1
else:
if self.stop < self.start:
lowest = self.stop
highest = self.start
else:
lowest = self.start
highest = self.stop
abs_step = abs(self.step)
step_is_one = self.step == 1
inclusive = self.stop == 0 or self.start == 0 or self.stop in self
if not step_is_one and inclusive:
return int((highest - lowest)/abs_step) + 1
elif not step_is_one:
return int((highest - lowest)/abs_step)
else:
return highest - lowest + 1
def __contains__(self, value):
if not bool(self):
return False
in_step = abs(value - self.start) % abs(self.step) == 0
in_between = (
(value >= self.start and value <= self.stop) or
(value <= self.start and value >= self.stop)
)
return in_step and in_between
def __getitem__(self, index):
contains_stop = self.stop in self
negative_index = index < 0
if index > len(self) or abs(index) > (len(self) + 1):
raise IndexError("irange object index out of range")
elif not contains_stop and negative_index:
real_stop = self.start + self.step * (len(self) - 1)
if self.stop < self.start:
return real_stop + self.step * (index + 1)
else:
return real_stop - abs(self.step * (index + 1))
elif negative_index:
if self.stop < self.start:
return self.stop + self.step * (index + 1)
else:
return self.stop - abs(self.step * (index + 1))
else:
if self.stop < self.start:
return self.start - abs(self.step * index)
else:
return self.start + self.step * index
def __reversed__(self):
if self.stop not in self:
real_stop = self.start + self.step * (len(self) - 1)
return irange(real_stop, self.start, -self.step)
else:
return irange(self.stop, self.start, -self.step)
def index(self, value):
if value not in self:
raise ValueError("{v} is not in range".format(v=value))
return int((value - self.start)/(self.step))
def count(self, value):
return 1 if value in self else 0
|
import os
import shutil
import sys
import tempfile
from typing import List, Tuple
import numpy
from scipy.sparse.csr import csr_matrix
from labours.cors_web_server import web_server
IDEAL_SHARD_SIZE = 4096
def train_embeddings(
index: List[str],
matrix: csr_matrix,
tmpdir: None,
shard_size: int = IDEAL_SHARD_SIZE,
) -> Tuple[List[Tuple[str, numpy.int64]], List[numpy.ndarray]]:
import tensorflow as tf
from labours._vendor import swivel
assert matrix.shape[0] == matrix.shape[1]
assert len(index) <= matrix.shape[0]
outlier_threshold = numpy.percentile(matrix.data, 99)
matrix.data[matrix.data > outlier_threshold] = outlier_threshold
nshards = len(index) // shard_size
if nshards * shard_size < len(index):
nshards += 1
shard_size = len(index) // nshards
nshards = len(index) // shard_size
remainder = len(index) - nshards * shard_size
if remainder > 0:
lengths = matrix.indptr[1:] - matrix.indptr[:-1]
filtered = sorted(numpy.argsort(lengths)[remainder:])
else:
filtered = list(range(len(index)))
if len(filtered) < matrix.shape[0]:
print("Truncating the sparse matrix...")
matrix = matrix[filtered, :][:, filtered]
meta_index = []
for i, j in enumerate(filtered):
meta_index.append((index[j], matrix[i, i]))
index = [mi[0] for mi in meta_index]
with tempfile.TemporaryDirectory(
prefix="hercules_labours_", dir=tmpdir or None
) as tmproot:
print("Writing Swivel metadata...")
vocabulary = "\n".join(index)
with open(os.path.join(tmproot, "row_vocab.txt"), "w") as out:
out.write(vocabulary)
with open(os.path.join(tmproot, "col_vocab.txt"), "w") as out:
out.write(vocabulary)
del vocabulary
bool_sums = matrix.indptr[1:] - matrix.indptr[:-1]
bool_sums_str = "\n".join(map(str, bool_sums.tolist()))
with open(os.path.join(tmproot, "row_sums.txt"), "w") as out:
out.write(bool_sums_str)
with open(os.path.join(tmproot, "col_sums.txt"), "w") as out:
out.write(bool_sums_str)
del bool_sums_str
reorder = numpy.argsort(-bool_sums)
print("Writing Swivel shards...")
for row in range(nshards):
for col in range(nshards):
def _int64s(xs):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(xs))
)
def _floats(xs):
return tf.train.Feature(
float_list=tf.train.FloatList(value=list(xs))
)
indices_row = reorder[row::nshards]
indices_col = reorder[col::nshards]
shard = matrix[indices_row][:, indices_col].tocoo()
example = tf.train.Example(
features=tf.train.Features(
feature={
"global_row": _int64s(indices_row),
"global_col": _int64s(indices_col),
"sparse_local_row": _int64s(shard.row),
"sparse_local_col": _int64s(shard.col),
"sparse_value": _floats(shard.data),
}
)
)
with open(
os.path.join(tmproot, "shard-%03d-%03d.pb" % (row, col)), "wb"
) as out:
out.write(example.SerializeToString())
print("Training Swivel model...")
swivel.FLAGS.submatrix_rows = shard_size
swivel.FLAGS.submatrix_cols = shard_size
if len(meta_index) <= IDEAL_SHARD_SIZE / 16:
embedding_size = 50
num_epochs = 100000
elif len(meta_index) <= IDEAL_SHARD_SIZE:
embedding_size = 50
num_epochs = 50000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 2:
embedding_size = 60
num_epochs = 10000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 4:
embedding_size = 70
num_epochs = 8000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 10:
embedding_size = 80
num_epochs = 5000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 25:
embedding_size = 100
num_epochs = 1000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 100:
embedding_size = 200
num_epochs = 600
else:
embedding_size = 300
num_epochs = 300
if os.getenv("CI"):
# Travis, AppVeyor etc. during the integration tests
num_epochs /= 10
swivel.FLAGS.embedding_size = embedding_size
swivel.FLAGS.input_base_path = tmproot
swivel.FLAGS.output_base_path = tmproot
swivel.FLAGS.loss_multiplier = 1.0 / shard_size
swivel.FLAGS.num_epochs = num_epochs
# Tensorflow 1.5 parses sys.argv unconditionally *applause*
argv_backup = sys.argv[1:]
del sys.argv[1:]
swivel.main(None)
sys.argv.extend(argv_backup)
print("Reading Swivel embeddings...")
embeddings = []
with open(os.path.join(tmproot, "row_embedding.tsv")) as frow:
with open(os.path.join(tmproot, "col_embedding.tsv")) as fcol:
for i, (lrow, lcol) in enumerate(zip(frow, fcol)):
prow, pcol = (l.split("\t", 1) for l in (lrow, lcol))
assert prow[0] == pcol[0]
erow, ecol = (
numpy.fromstring(p[1], dtype=numpy.float32, sep="\t")
for p in (prow, pcol)
)
embeddings.append((erow + ecol) / 2)
return meta_index, embeddings
def write_embeddings(
name: str,
output: str,
run_server: bool,
index: List[Tuple[str, numpy.int64]],
embeddings: List[numpy.ndarray],
) -> None:
print("Writing Tensorflow Projector files...")
if not output:
output = "couples"
if output.endswith(".json"):
output = os.path.join(output[:-5], "couples")
run_server = False
metaf = "%s_%s_meta.tsv" % (output, name)
with open(metaf, "w") as fout:
fout.write("name\tcommits\n")
for pair in index:
fout.write("%s\t%s\n" % pair)
print("Wrote", metaf)
dataf = "%s_%s_data.tsv" % (output, name)
with open(dataf, "w") as fout:
for vec in embeddings:
fout.write("\t".join(str(v) for v in vec))
fout.write("\n")
print("Wrote", dataf)
jsonf = "%s_%s.json" % (output, name)
with open(jsonf, "w") as fout:
fout.write(
"""{
"embeddings": [
{
"tensorName": "%s %s coupling",
"tensorShape": [%s, %s],
"tensorPath": "http://0.0.0.0:8000/%s",
"metadataPath": "http://0.0.0.0:8000/%s"
}
]
}
"""
% (output, name, len(embeddings), len(embeddings[0]), dataf, metaf)
)
print("Wrote %s" % jsonf)
if run_server and not web_server.running:
web_server.start()
url = "http://projector.tensorflow.org/?config=http://0.0.0.0:8000/" + jsonf
print(url)
if run_server:
if shutil.which("xdg-open") is not None:
os.system("xdg-open " + url)
else:
browser = os.getenv("BROWSER", "")
if browser:
os.system(browser + " " + url)
else:
print("\t" + url)
|
from docker.errors import APIError, ImageNotFound
import click
from gigantumcli.dockerinterface import DockerInterface
from gigantumcli.changelog import ChangeLog
from gigantumcli.utilities import is_running_as_admin
@click.command()
@click.option('--edge', '-e', type=bool, is_flag=True, default=False,
help="Optional flag indicating if the edge version should be used. Note, you must have access "
"to this image and may need to log into DockerHub.")
def install(edge: bool):
"""Install the Gigantum Client Docker Image.
This command will pull and configure the Gigantum Client Docker Image for the first time. The
process can take a few minutes depending on your internet speed.
\f
Args:
edge(bool): Flag indicating if the install is stable or edge
"""
# Make sure user is not root
if is_running_as_admin():
raise click.UsageError("Do not run `gigantum install` as root.")
docker_obj = DockerInterface()
image_name = docker_obj.image_name(edge)
try:
try:
# Check to see if the image has already been pulled
docker_obj.client.images.get(image_name)
raise click.UsageError("Gigantum Client image already installed. Run `gigantum update` instead.")
except ImageNotFound:
# Pull for the first time
print("\nDownloading and installing the Gigantum Client Docker Image. Please wait...\n")
cl = ChangeLog()
tag = cl.latest_tag()
image = docker_obj.client.images.pull(image_name, tag)
docker_obj.client.api.tag('{}:{}'.format(image_name, tag), image_name, 'latest')
except APIError:
click.echo("Failed to pull image! Verify your internet connection and try again.", err=True)
raise click.Abort()
short_id = image.short_id.split(':')[1]
print("\nSuccessfully pulled {}:{}\n".format(image_name, short_id))
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.ads.google_ads.v6.proto.resources import keyword_plan_ad_group_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_keyword__plan__ad__group__pb2
from google.ads.google_ads.v6.proto.services import keyword_plan_ad_group_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2
class KeywordPlanAdGroupServiceStub(object):
"""Proto file describing the keyword plan ad group service.
Service to manage Keyword Plan ad groups.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetKeywordPlanAdGroup = channel.unary_unary(
'/google.ads.googleads.v6.services.KeywordPlanAdGroupService/GetKeywordPlanAdGroup',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2.GetKeywordPlanAdGroupRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_keyword__plan__ad__group__pb2.KeywordPlanAdGroup.FromString,
)
self.MutateKeywordPlanAdGroups = channel.unary_unary(
'/google.ads.googleads.v6.services.KeywordPlanAdGroupService/MutateKeywordPlanAdGroups',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2.MutateKeywordPlanAdGroupsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2.MutateKeywordPlanAdGroupsResponse.FromString,
)
class KeywordPlanAdGroupServiceServicer(object):
"""Proto file describing the keyword plan ad group service.
Service to manage Keyword Plan ad groups.
"""
def GetKeywordPlanAdGroup(self, request, context):
"""Returns the requested Keyword Plan ad group in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateKeywordPlanAdGroups(self, request, context):
"""Creates, updates, or removes Keyword Plan ad groups. Operation statuses are
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KeywordPlanAdGroupServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetKeywordPlanAdGroup': grpc.unary_unary_rpc_method_handler(
servicer.GetKeywordPlanAdGroup,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2.GetKeywordPlanAdGroupRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_keyword__plan__ad__group__pb2.KeywordPlanAdGroup.SerializeToString,
),
'MutateKeywordPlanAdGroups': grpc.unary_unary_rpc_method_handler(
servicer.MutateKeywordPlanAdGroups,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2.MutateKeywordPlanAdGroupsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2.MutateKeywordPlanAdGroupsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.KeywordPlanAdGroupService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class KeywordPlanAdGroupService(object):
"""Proto file describing the keyword plan ad group service.
Service to manage Keyword Plan ad groups.
"""
@staticmethod
def GetKeywordPlanAdGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.KeywordPlanAdGroupService/GetKeywordPlanAdGroup',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2.GetKeywordPlanAdGroupRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_keyword__plan__ad__group__pb2.KeywordPlanAdGroup.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MutateKeywordPlanAdGroups(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.KeywordPlanAdGroupService/MutateKeywordPlanAdGroups',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2.MutateKeywordPlanAdGroupsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_keyword__plan__ad__group__service__pb2.MutateKeywordPlanAdGroupsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
|
import os
import pytest
from common import PORTS
@pytest.mark.parametrize("ports", PORTS)
def test_main(ports, mock_device, monkeypatch, capsys):
from numato_gpio.__main__ import main
monkeypatch.setattr("serial.Serial.ports", ports)
main()
cap = capsys.readouterr()
assert cap.out.startswith(f"Discovered devices: {os.linesep}dev: /dev/ttyACM0")
assert cap.err == ""
# @pytest.mark.parametrize("ports", PORTS)
# def test_error_duplicate_device(ports, mock_device, monkeypatch):
# from numato_gpio.__main__ import main
# device2 = deepcopy(mock_device)
# monkeypatch.setattr("serial.Serial.ports", ports)
# main()
|
import json
import os
import re
import subprocess
import time
import zipfile
import config.config
all_cap_csv = re.compile(r'capture-[0-9]+\.(cap|csv)$')
def save_data(to_save: dict, path: str) -> None:
with open(path, 'w') as save_file:
save_file.writelines(json.dumps(to_save, sort_keys=True, indent=4))
def load_data(path: str) -> dict:
with open(path, 'r') as save_file:
return json.load(save_file)
def zip_and_delete(settings: config.config.Settings):
os.makedirs(settings.zip_path, exist_ok=True)
# find out the current container number
# we dont't want to override anything in case of a restart
zip_ctr = 0
while True:
filename = f'container-{zip_ctr}.zip'
if not os.path.isfile(os.path.join(settings.zip_path, filename)):
break
zip_ctr += 1
print(f"\tFirst Container: container-{zip_ctr}.zip")
saved_files = set()
while True:
files = os.listdir(settings.capture_path)
zip_list = list()
candidates = list()
# first we find all .cap and .csv file
for f in files:
if f not in saved_files:
match = all_cap_csv.match(f)
if match is not None:
candidates.append(match.group(0))
candidates = sorted(candidates)
zip_list = candidates[:-settings.zip_file_buffer]
print(zip_list)
# make sure we have enough files (at least MinGroupSize)
if len(zip_list) >= int(settings.zip_group_size):
filename = f'container-{zip_ctr}.zip'
with zipfile.ZipFile(os.path.join(settings.zip_path, filename), 'w') as zipper:
for f in zip_list:
zipper.write(os.path.join(settings.capture_path, f),
f, compress_type=zipfile.ZIP_DEFLATED)
saved_files.add(f)
# because we have likely no permission to clear the file
# we need to sudo our way around it
subprocess.call("cat /dev/null | sudo tee {}".format(os.path.join(settings.capture_path, f)),
shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
zip_ctr += 1
time.sleep(settings.zip_freq)
|
from django.utils import timezone
from django.db import connection, transaction
from validr import T
from rssant_common.detail import Detail
from rssant_api.monthly_story_count import MonthlyStoryCount
from .helper import Model, ContentHashMixin, models, optional, User
from .feed import Feed, UserFeed
MONTH_18 = timezone.timedelta(days=18 * 30)
ONE_MONTH = timezone.timedelta(days=30)
StoryDetailSchema = T.detail.fields("""
unique_id
image_url
sentence_count
dt_published
dt_updated
dt_created
dt_watched
dt_favorited
""").extra_fields("""
author
audio_url
iframe_url
dt_synced
summary
content
content_hash_base64
""").default(False)
STORY_DETAIL_FEILDS = Detail.from_schema(False, StoryDetailSchema).exclude_fields
USER_STORY_DETAIL_FEILDS = [f'story__{x}' for x in STORY_DETAIL_FEILDS]
class Story(Model, ContentHashMixin):
"""故事"""
class Meta:
unique_together = (
('feed', 'offset'),
('feed', 'unique_id'),
)
indexes = [
models.Index(fields=["feed", "offset"]),
models.Index(fields=["feed", "unique_id"]),
]
class Admin:
display_fields = ['feed_id', 'offset', 'title', 'link']
feed = models.ForeignKey(Feed, on_delete=models.CASCADE)
offset = models.IntegerField(help_text="Story在Feed中的位置")
unique_id = models.CharField(max_length=200, help_text="Unique ID")
title = models.CharField(max_length=200, help_text="标题")
link = models.TextField(help_text="文章链接")
author = models.CharField(max_length=200, **optional, help_text='作者')
image_url = models.TextField(
**optional, help_text="图片链接")
audio_url = models.TextField(
**optional, help_text="播客音频链接")
iframe_url = models.TextField(
**optional, help_text="视频iframe链接")
has_mathjax = models.BooleanField(
**optional, default=False, help_text='has MathJax')
is_user_marked = models.BooleanField(
**optional, default=False, help_text='is user favorited or watched ever')
dt_published = models.DateTimeField(help_text="发布时间")
dt_updated = models.DateTimeField(**optional, help_text="更新时间")
dt_created = models.DateTimeField(auto_now_add=True, help_text="创建时间")
dt_synced = models.DateTimeField(**optional, help_text="最近一次同步时间")
summary = models.TextField(**optional, help_text="摘要或较短的内容")
content = models.TextField(**optional, help_text="文章内容")
sentence_count = models.IntegerField(**optional, help_text='sentence count')
_STORY_FIELD_NAMES = None
@classmethod
def _story_field_names(cls):
if cls._STORY_FIELD_NAMES is None:
names = set()
for field in cls._meta.get_fields():
column = getattr(field, 'column', None)
if column:
names.add(column)
cls._STORY_FIELD_NAMES = list(sorted(names))
return cls._STORY_FIELD_NAMES
@staticmethod
def get_by_offset(feed_id, offset, detail=False) -> 'Story':
q = Story.objects.filter(feed_id=feed_id, offset=offset)
detail = Detail.from_schema(detail, StoryDetailSchema)
q = q.defer(*detail.exclude_fields)
return q.seal().get()
@classmethod
def batch_get_by_offset(cls, keys, detail=False):
if not keys:
return []
detail = Detail.from_schema(detail, StoryDetailSchema)
select_fields = set(cls._story_field_names()) - set(detail.exclude_fields)
select_fields_quoted = ','.join(['"{}"'.format(x) for x in select_fields])
# Note: below query can not use index, it's very slow
# WHERE ("feed_id","offset")=Any(%s)
# WHERE ("feed_id","offset")=Any(ARRAY[(XX, YY), ...])
where_items = []
for feed_id, offset in keys:
# ensure integer, avoid sql inject attack
feed_id, offset = int(feed_id), int(offset)
where_items.append(f'("feed_id"={feed_id} AND "offset"={offset})')
where_clause = ' OR '.join(where_items)
sql = f"""
SELECT {select_fields_quoted}
FROM rssant_api_story
WHERE {where_clause}
"""
storys = list(Story.objects.seal().raw(sql))
return storys
@staticmethod
def _dedup_sort_storys(storys):
# 去重,排序,分配offset时保证offset和dt_published顺序一致
unique_storys = {}
for story in storys:
unique_id = story['unique_id']
if unique_id in unique_storys:
is_newer = story['dt_published'] > unique_storys[unique_id]['dt_published']
if is_newer:
unique_storys[unique_id] = story
else:
unique_storys[unique_id] = story
def key_func(x):
return (x['dt_published'], x['unique_id'])
storys = list(sorted(unique_storys.values(), key=key_func))
return storys
@staticmethod
def bulk_save_by_feed(feed_id, storys, batch_size=100, is_refresh=False):
"""
Deprecated since 1.5.0
"""
if not storys:
return [] # modified_story_objects
storys = Story._dedup_sort_storys(storys)
with transaction.atomic():
feed = Feed.objects\
.only(
'_version',
'id',
'dryness',
'monthly_story_count_data',
'total_storys',
'dt_first_story_published',
'dt_latest_story_published',
)\
.get(pk=feed_id)
offset = feed.total_storys
unique_ids = [x['unique_id'] for x in storys]
story_objects = {}
q = Story.objects\
.defer(
'content', 'summary', 'title', 'author',
'image_url', 'iframe_url', 'audio_url',
)\
.filter(feed_id=feed_id, unique_id__in=unique_ids)
for story in q.all():
story_objects[story.unique_id] = story
new_story_objects = []
modified_story_objects = []
now = timezone.now()
for data in storys:
unique_id = data['unique_id']
content_hash_base64 = data['content_hash_base64']
is_story_exist = unique_id in story_objects
if is_story_exist:
story = story_objects[unique_id]
if (not is_refresh) and (not story.is_modified(content_hash_base64)):
continue
else:
story = Story(feed_id=feed_id, unique_id=unique_id, offset=offset)
story_objects[unique_id] = story
new_story_objects.append(story)
offset += 1
story.content_hash_base64 = content_hash_base64
story.content = data['content']
story.summary = data['summary']
story.title = data["title"]
story.link = data["link"]
story.author = data["author"]
story.image_url = data['image_url']
story.iframe_url = data['iframe_url']
story.audio_url = data['audio_url']
story.has_mathjax = data['has_mathjax']
# 发布时间只第一次赋值,不更新
if not story.dt_published:
story.dt_published = data['dt_published']
story.dt_updated = data['dt_updated']
story.dt_synced = now
if is_story_exist:
story.save()
modified_story_objects.append(story)
if new_story_objects:
Story.objects.bulk_create(new_story_objects, batch_size=batch_size)
Story._update_feed_monthly_story_count(feed, new_story_objects)
Story._update_feed_story_dt_published_total_storys(feed, total_storys=offset)
return modified_story_objects
@staticmethod
def _update_feed_monthly_story_count(feed, new_story_objects):
monthly_story_count = MonthlyStoryCount.load(feed.monthly_story_count_data)
for story in new_story_objects:
if not story.dt_published:
continue
year, month = story.dt_published.year, story.dt_published.month
if not MonthlyStoryCount.is_valid_year_month(year, month):
continue
count = monthly_story_count.get(year, month)
monthly_story_count.put(year, month, count + 1)
feed.monthly_story_count = monthly_story_count
feed.save()
@staticmethod
def refresh_feed_monthly_story_count(feed_id):
count_sql = """
SELECT
CAST(EXTRACT(YEAR FROM dt_published) AS INTEGER) AS year,
CAST(EXTRACT(MONTH FROM dt_published) AS INTEGER) AS month,
count(1) as count
FROM rssant_api_story
WHERE feed_id = %s AND dt_published IS NOT NULL
GROUP BY
CAST(EXTRACT(YEAR FROM dt_published) AS INTEGER),
CAST(EXTRACT(MONTH FROM dt_published) AS INTEGER);
"""
with connection.cursor() as cursor:
cursor.execute(count_sql, [feed_id])
rows = list(cursor.fetchall())
items = []
for row in rows:
year, month, count = map(int, row)
if 1970 <= year <= 9999:
items.append((year, month, count))
monthly_story_count = MonthlyStoryCount(items)
with transaction.atomic():
feed = Feed.objects.filter(pk=feed_id).get()
feed.monthly_story_count = monthly_story_count
feed.save()
@staticmethod
def _update_feed_story_dt_published_total_storys(feed, total_storys):
if total_storys <= 0:
return
first_story = Story.objects\
.only('id', 'offset', 'dt_published')\
.filter(feed_id=feed.id, offset=0)\
.first()
latest_story = Story.objects\
.only('id', 'offset', 'dt_published')\
.filter(feed_id=feed.id, offset=total_storys - 1)\
.first()
feed.total_storys = total_storys
if first_story:
feed.dt_first_story_published = first_story.dt_published
if latest_story:
feed.dt_latest_story_published = latest_story.dt_published
feed.save()
@staticmethod
def fix_feed_total_storys(feed_id):
with transaction.atomic():
feed = Feed.objects.only('_version', 'id', 'total_storys').get(pk=feed_id)
total_storys = Story.objects.filter(feed_id=feed_id).count()
if feed.total_storys != total_storys:
feed.total_storys = total_storys
feed.save()
return True
return False
@staticmethod
def query_feed_incorrect_total_storys():
sql = """
SELECT id, total_storys, correct_total_storys
FROM rssant_api_feed AS current
JOIN (
SELECT feed_id, count(1) AS correct_total_storys
FROM rssant_api_story
GROUP BY feed_id
) AS correct
ON current.id=correct.feed_id
WHERE total_storys!=correct_total_storys
"""
with connection.cursor() as cursor:
cursor.execute(sql)
return list(cursor.fetchall())
@staticmethod
def set_user_marked_by_id(story_id, is_user_marked=True):
Story.objects.filter(pk=story_id)\
.update(is_user_marked=is_user_marked)
@staticmethod
def delete_by_retention_offset(feed_id, retention_offset):
"""
delete storys < retention_offset and not is_user_marked
"""
n, __ = Story.objects\
.filter(feed_id=feed_id, offset__lt=retention_offset)\
.exclude(is_user_marked=True)\
.delete()
return n
@staticmethod
def delete_by_retention(feed_id, retention=5000, limit=5000):
"""
Deprecated since 1.5.0
Params:
feed_id: feed ID
retention: num storys to keep
limit: delete at most limit rows
"""
with transaction.atomic():
feed = Feed.get_by_pk(feed_id)
offset = feed.retention_offset or 0
# delete at most limit rows, avoid out of memory and timeout
new_offset = min(offset + limit, feed.total_storys - retention)
if new_offset > offset:
n = Story.delete_by_retention_offset(feed_id, new_offset)
feed.retention_offset = new_offset
feed.save()
return n
return 0
class UserStory(Model):
class Meta:
unique_together = [
('user', 'story'),
('user_feed', 'offset'),
('user', 'feed', 'offset'),
]
indexes = [
models.Index(fields=["user", "feed", "offset"]),
models.Index(fields=["user", "feed", "story"]),
]
class Admin:
display_fields = ['user_id', 'feed_id', 'story_id', 'is_watched', 'is_favorited']
search_fields = ['user_feed_id']
user = models.ForeignKey(User, on_delete=models.CASCADE)
story = models.ForeignKey(Story, on_delete=models.CASCADE)
feed = models.ForeignKey(Feed, on_delete=models.CASCADE)
user_feed = models.ForeignKey(UserFeed, on_delete=models.CASCADE)
offset = models.IntegerField(help_text="Story在Feed中的位置")
dt_created = models.DateTimeField(auto_now_add=True, help_text="创建时间")
is_watched = models.BooleanField(default=False)
dt_watched = models.DateTimeField(**optional, help_text="关注时间")
is_favorited = models.BooleanField(default=False)
dt_favorited = models.DateTimeField(**optional, help_text="标星时间")
@staticmethod
def get_by_pk(pk, user_id=None, detail=False):
q = UserStory.objects.select_related('story')
if not detail:
q = q.defer(*USER_STORY_DETAIL_FEILDS)
if user_id is not None:
q = q.filter(user_id=user_id)
user_story = q.get(pk=pk)
return user_story
@staticmethod
def get_by_offset(user_id, feed_id, offset, detail=False):
q = UserStory.objects.select_related('story')
q = q.filter(user_id=user_id, feed_id=feed_id, offset=offset)
if not detail:
q = q.defer(*USER_STORY_DETAIL_FEILDS)
user_story = q.get()
return user_story
|
import numpy as np
import urllib
import time
import cv2
from yolact_edge.inference import YOLACTEdgeInference
weights = "yolact_edge_resnet50_54_800000.pth"
# All available model configs, depends on which weights
# you use. More info could be found in data/config.py.
model_configs = [
'yolact_edge_mobilenetv2_config',
'yolact_edge_vid_config',
'yolact_edge_vid_minimal_config',
'yolact_edge_vid_trainflow_config',
'yolact_edge_youtubevis_config',
'yolact_resnet50_config',
'yolact_resnet152_config',
'yolact_edge_resnet50_config',
'yolact_edge_vid_resnet50_config',
'yolact_edge_vid_trainflow_resnet50_config',
'yolact_edge_youtubevis_resnet50_config',
]
config = model_configs[5]
# All available model datasets, depends on which weights
# you use. More info could be found in data/config.py.
datasets = [
'coco2014_dataset',
'coco2017_dataset',
'coco2017_testdev_dataset',
'flying_chairs_dataset',
'youtube_vis_dataset',
]
dataset = datasets[1]
# Used tensorrt calibration
calib_images = "./data/calib_images"
# Override some default configuration
config_ovr = {
'use_fast_nms': True, # Does not work with regular nms
'mask_proto_debug': False
}
model_inference = YOLACTEdgeInference(
weights, config, dataset, calib_images, config_ovr)
img = None
try:
with urllib.request.urlopen("http://images.cocodataset.org/val2017/000000439715.jpg") as f:
img = np.asarray(bytearray(f.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
except:
pass
if img is None:
print("Couldn't retrieve image for benchmark...")
exit(1)
print("Benchmarking performance...")
start = time.time()
samples = 200
for i in range(samples):
p = model_inference.predict(img, False)
print(f"Average {1 / ( (time.time() - start) / samples )} FPS")
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('register', views.register, name='register'),
path('logout', views.logout, name='logout'),
path('accounts/leave/', views.employee_form,name='employee_insert'),
path('accounts/leave/<int:id>/', views.employee_form,name='leave_update'),
path('accounts/leave/delete/<int:id>/', views.employee_delete, name='leave_delete'),
path('accounts/list/', views.employee_list, name='employee_list')
] |
import pandas as pd
import os
import datetime
import fsspec
import xarray as xr
def add_time_info(df,verbose=False):
'''add start, stop and nt for all zstores in df'''
starts = []; stops = []; nts = []; calendars = []; units = []; ttypes = []
dz = df.copy()
for index, row in df.iterrows():
zstore = row.zstore
ds = xr.open_zarr(fsspec.get_mapper(zstore),consolidated=True)
start = 'NA'
start = 'NA'
nt = '1'
if 'time' in ds.coords:
ttype = str(type(ds.time.values[0]))
#dstime = ds.time.values
#start = str(dstime[0])[:10]
#stop = str(dstime[-1])[:10]
dstime = ds.time.values.astype('str')
start = dstime[0][:10]
stop = dstime[-1][:10]
calendar = ds.time.encoding['calendar']
unit = ds.time.encoding['units']
nt = len(dstime)
if verbose:
print(zstore,start,stop,nt)
starts += [start]
stops += [stop]
nts += [nt]
calendars += [calendar]
units += [unit]
ttypes += [ttype]
dz['start'] = starts
dz['stop'] = stops
dz['nt'] = nts
dz['calendar'] = calendars
dz['time_units'] = units
dz['time_type'] = ttypes
return dz
# define a simple search on keywords
def search_df(df, verbose= False, **search):
'''search by keywords - if list, then match exactly, otherwise match as substring'''
keys = ['activity_id','institution_id','source_id','experiment_id','member_id', 'table_id', 'variable_id', 'grid_label']
d = df
for skey in search.keys():
if isinstance(search[skey], str): # match a string as a substring
d = d[d[skey].str.contains(search[skey])]
else:
dk = []
for key in search[skey]: # match a list of strings exactly
dk += [d[d[skey]==key]]
d = pd.concat(dk)
keys.remove(skey)
if verbose:
for key in keys:
print(key,' = ',list(d[key].unique()))
return d
from functools import partial
def getFolderSize(p):
prepend = partial(os.path.join, p)
return sum([(os.path.getsize(f) if os.path.isfile(f) else
getFolderSize(f)) for f in map(prepend, os.listdir(p))])
def get_zid(gsurl):
''' given a GC zarr location, return the dataset_id'''
assert gsurl[:10] == 'gs://cmip6'
return gsurl[11:-1].split('/')
def get_zdict(gsurl):
''' given a GC zarr location, return a dictionary of keywords'''
zid = get_zid(gsurl)
keys = ['activity_id','institution_id','source_id','experiment_id','member_id','table_id','variable_id','grid_label']
values = list(zid)
return dict(zip(keys,values))
def remove_from_GC(gsurl,execute=False):
'''gsurl is a GC zstore, use execute=False to test, execute=True to remove'''
remove_from_GC_bucket(gsurl,execute=execute)
remove_from_GC_listing(gsurl,execute=execute)
return
def remove_from_local(gsurl,execute=False):
'''gsurl is a GC zstore, use execute=False to test, execute=True to remove'''
remove_from_drives(gsurl,execute=execute)
ret = remove_from_shelf(gsurl,execute=execute)
if ret==1:
remove_from_local_listings(gsurl,execute=execute)
else:
print('zstore is not in any shelf listings')
return
def remove_from_catalogs(gsurl,execute=False):
'''gsurl is a GC zstore, use execute=False to test, execute=True to remove'''
date = str(datetime.datetime.now().strftime("%Y%m%d"))
cat_files = ['csv/pangeo-cmip6-noQC']
for cat_file in cat_files:
os.system(f'cp {cat_file}.csv {cat_file}-'+date+'.csv')
df = pd.read_csv(f'{cat_file}.csv', dtype='unicode')
df = df[df['zstore'] != gsurl]
df.to_csv(f'{cat_file}.csv', mode='w+', index=False)
return
def remove_from_GC_bucket(gsurl,execute=False):
'''delete old version in GC'''
command = '/usr/bin/gsutil -m rm -r '+ gsurl[:-1]
if execute:
os.system(command)
else:
print(command)
return
def remove_from_GC_listing(gsurl,execute=False):
'''delete entry in ncsv/GC_files_{activity_id}-{institution_id}.csv'''
zdict = get_zdict(gsurl)
activity_id = zdict['activity_id']
institution_id = zdict['institution_id']
file = f'ncsv/GC_files_{activity_id}-{institution_id}.csv'
if execute:
with open(file, "r") as f:
lines = f.readlines()
with open(file, "w") as f:
for line in lines:
if line.strip("\n") != gsurl + ".zmetadata":
f.write(line)
else:
print('modifying ',file)
return
from glob import glob
def remove_from_drives(gsurl,execute=False):
'''delete old local copy(ies)'''
gdirs = glob('/h*/naomi/zarr-minimal'+gsurl[10:])
if len(gdirs)==0:
print('zstore is not on any mounted drives')
else:
for gdir in gdirs:
command = '/bin/rm -rf '+ gdir
if execute:
os.system(command)
else:
print(command)
return
def remove_from_shelf(gsurl,execute=False):
'''delete entry(ies) in shelf-new/h*.csv'''
file = 'shelf-new/local.csv'
df_local = pd.read_csv(file, dtype='unicode')
zpaths = df_local[df_local.zstore.str.contains(gsurl[10:-1])].zstore.values
if len(zpaths)==0:
return 0
for zpath in zpaths:
#print(' ',zpath)
ldir = zpath.split('/')[1]
file = 'shelf-new/' + ldir + '.csv'
writeable = os.access(file, os.W_OK)
if not writeable:
command = "chmod u+w " + file
if execute:
os.system(command)
dfff = pd.read_csv(file, dtype='unicode')
dff = dfff[dfff.zstore != zpath]
if execute:
dff.to_csv(file, mode='w+', index=False)
else:
print(zpath,f'dff.to_csv({file})')
if not writeable:
command = "chmod u-w " + file
if execute:
os.system(command)
return 1
def remove_from_local_listings(gsurl,execute=False):
'''remove from concatenated catalog'''
file = 'shelf-new/local.csv'
df_local = pd.read_csv(file, dtype='unicode')
for zpath in df_local[df_local.zstore.str.contains(gsurl[10:-1])].zstore.values:
dff = df_local[df_local.zstore != zpath]
if execute:
dff.to_csv(file, mode='w+', index=False)
else:
print(f'dff.to_csv({file})')
return |
from kivy.app import App
from kivy.config import Config
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from MainPanel import MainPanel
from ConfigureScreen import Configure
Config.set('graphics', 'fullscreen', 'auto') # 'auto' -> Fullscreen | '0' -> NormalMode
class MyScreenManager(ScreenManager):
pass
myfile = open('window-image-conf', 'r')
root_widget = Builder.load_string(myfile.read())
class Panel(App):
def build(self):
return root_widget
if __name__ == "__main__":
Panel().run()
|
"""
lvsfunc, a collection of VapourSynth functions and wrappers written and/or modified by LightArrowsEXE.
If you spot any issues, please do not hesitate to send in a Pull Request
or reach out to me on Discord (LightArrowsEXE#0476)!
"""
# flake8: noqa
from . import (aa, comparison, dehalo, dehardsub, deinterlace, denoise,
kernels, mask, misc, recon, scale, types, util)
# Aliases:
comp = comparison.compare
diff = comparison.diff
ef = misc.edgefixer
rfs = util.replace_ranges
scomp = comparison.stack_compare
sraa = aa.upscaled_sraa
src = misc.source
demangle = recon.ChromaReconstruct
crecon = recon.ChromaReconstruct
|
import os
import pytest
AWNAS_TEST_NASBENCH = os.environ.get("AWNAS_TEST_NASBENCH", None)
@pytest.mark.skipif(not AWNAS_TEST_NASBENCH, reason="do not test the nasbench301 BTC by default.")
@pytest.mark.parametrize("case", ["xgb_v0.9", "gnn_gin_v0.9"])
def test_nb301_query(case):
from aw_nas.btcs.nasbench_301 import NB301SearchSpace
from aw_nas.btcs.nasbench_301 import NB301Evaluator
ss = NB301SearchSpace()
model_dir_ = os.path.expanduser("~/awnas/data/nasbench-301/nb_models")
rollouts = [ss.random_sample() for _ in range(2)]
# test mutate
rollouts.append(ss.mutate(rollouts[0]))
rollouts.append(ss.mutate(rollouts[1]))
evaluator = NB301Evaluator(
None, None, None,
path=os.path.join(model_dir_, case))
rollouts = evaluator.evaluate_rollouts(rollouts, False)
print(rollouts)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.