content stringlengths 5 1.05M |
|---|
""" Class to control the lights """
from platform import system
from colour import Color
import random, copy
import k24.binding_winbond as binding_winbond
class Keylights:
"""
see: Keylights.setall() and Keylights.setkey() for practical uses
"""
vid=0
pid=0
def __init__(self):
self.adapter = binding_winbond.Usblinux()
self.vid=binding_winbond.vid
self.pid=binding_winbond.pid
return
def gencolorprofile(self, color):
isvalid_hex = False
try:
isvalid_hex = int(color, 16)
colorstr = color
except:
color = Color(color)
colorstr = color.hex_l[1:]
if isvalid_hex > 16777215:
print('the max hex value of ffffff has been exceeded....exiting')
exit()
return colorstr
def setall(self, key_color, ambient_color, brightness):
"""
Sets the color of all keys.
The color parameter is a colour.Color object
or alternatively a string interpretable by the colour.Color constructor
"""
self.adapter.setcolor(self.gencolorprofile(key_color), self.gencolorprofile(ambient_color), brightness)
|
"""
Not every sitemap needs to be as complex as
the post sitmap. Django anticipates that in
most cases, we will simply override the
items() method of the Sitemap subclass and
nothing else. To make this easy the sitemaps
app supplies the GenericSitemap class, which
can be passed a dictionary of items to
automatically generate Sitemap subclass
"""
from django.contrib.sitemaps import (
GenericSitemap, Sitemap
)
from .models import Tag, Startup
tag_sitemap_dict = {
'queryset': Tag.objects.all(),
}
TagSitemap = GenericSitemap(tag_sitemap_dict)
class StartupSitemap(Sitemap):
"""
Class for implementing sitemap
functionality for Startup
"""
model = Startup
def items(self):
"""
overriding the items method
of the Sitemap class
"""
return self.model.objects.all()
def lastmod(self, startup):
"""
If a startup has a newslink
this method will calculate
mod based on that, else it
will be based on the founded_date
"""
if startup.newslink_set.exists():
return (
startup.newslink_set.latest().pub_date
)
else:
return startup.founded_date
|
# Refer: https://arxiv.org/pdf/1308.4008.pdf (Momin, J. A. M. I. L., & Yang, X. S. (2013). A literature survey of benchmark functions for global optimization problems. Journal of Mathematical Modelling and Numerical Optimisation, 4(2), 150-194.)
import numpy as np
import matplotlib.pyplot as plt
import pso_solver
pso_solver.setSeed(1)
lower = xlower = ylower = zlower = -50
upper = xupper = yupper = zupper = 50
particleList = pso_solver.createRandomParticleList(3, numParticles=20, lower=lower, upper=upper)
# Testing on Chung Reynolds function
f = lambda x, y, z: (x**2 + y**2 + z**2)**2
pso_solver.psoVisualizeTrivariate(particleList, f, xlower, xupper, ylower, yupper, zlower, zupper, c1=1, c2=1, W=0.5, numIters=20, maxFlag=False, sleepTime=0.5, accuracy=2, verbose=False)
|
import numpy
import os
numpy.seterr('ignore')
luz = int(300000000)
while True:
inicio = input("¿que deseas?(entrar, salir o limpiar): \n>")
print ("")
if inicio == "entrar" or inicio == "enter":
opcion = input("elige la magnitud(energia, fuerza o recorrido): \n>")
if opcion == "energia" or opcion == "energy" or opcion == "energi":
try:
print(" ")
masa_en_reposo = float(input("ingresa la masa (kg): \n>"))
print(" ")
aceleracion = float(input("ingresa la aceleracion (m/s): \n>"))
if aceleracion > (luz):
print ("las ecuaciones de einstein impiden velocidades superluminicas por lo que intentarlo conlleva numeros “rotos” aun así, si te interesa ver el resultado, elimina esta condicion del codigo y veras un “nan”.")
print("")
else:
relatividad = (masa_en_reposo/(numpy.sqrt(1-(aceleracion**2)/luz**2)))
energia = (0.5*(relatividad * (aceleracion**2)))
print ("la energia usada son:", energia,"J\n")
except:
print("")
print(" ")
elif opcion == "fuerza" or opcion == "momento lineal" or opcion == "momentum" or opcion == "impetú":
try:
print("")
masa_en_reposo = float(input("ingresa la masa (kg): \n>"))
print(" ")
aceleracion = float(input("ingresa la aceleracion (m/s): \n>"))
print(" ")
if (aceleracion) > (luz):
print ("las ecuaciones de einstein impiden velocidades superluminicas por lo que intentarlo conlleva numeros “rotos” aun así, si te interesa ver el resultado, elimina esta condicion del codigo y veras un “nan”.")
print("")
else:
relatividad = (masa_en_reposo/(numpy.sqrt(1-(aceleracion**2)/luz**2)))
fuerza = (relatividad * aceleracion)
peso = (fuerza/9.81)
print ("la fuerza (newtons) es:",fuerza,"N")
print (" ")
print("la fuerza (kilogramos) es:", peso, "kg/f\n")
print(" ")
except:
print("")
print(" ")
elif opcion == "recorrido":
try:
print(" ")
espacio = float(input("ingresa la distancia a recorrer(m):\n>"))
print(" ")
velocidad = float(input("ingresa la velocidad (m/s): \n>"))
print("")
if velocidad > (luz):
print ("las ecuaciones de einstein impiden velocidades superluminicas por lo que intentarlo conlleva numeros “rotos” aun así, si te interesa ver el resultado, elimina esta condicion del codigo y veras un “nan”.")
print("")
else:
relatividad = (espacio *(numpy.sqrt(1-(velocidad**2)/(luz**2))))
dilatacion = (1*(numpy.sqrt(1-(velocidad**2)/(luz**2))))
tiempo = (relatividad/velocidad)
total= (tiempo/dilatacion)
print ("recorrer la distancia de", espacio,"metros", "toma", tiempo, "segundos")
print(" ")
except:
print("")
else:
print("")
elif inicio == "salir" or inicio == "exit":
break
elif inicio == "limpiar" or inicio == "clean":
if os.name == "posix":
os.system ("clear")
elif os.name == "ce" or os.name == "nt" or os.name == "dos":
os.system ("cls")
else:
print("ingresa opcion valida")
print(" ")
|
import logging
import os
import jenga.compile
import jenga.crossreference
import jenga.interpret
import jenga.lex
import jenga.preprocess
from jenga.types import Word
def gen_ir(fn: str, include_paths: list[str]) -> list[Word]:
"""Generate the intermediate representation (list of Words) from a program"""
logging.info("Lexing file")
words = jenga.lex.lex_file(fn)
logging.info("Preprocessing program")
words = jenga.preprocess.preprocess_program(words, fn, include_paths)
logging.info("Crossreferencing program")
words = jenga.crossreference.crossreference_program(words)
return words
def simulate_file(fn: str, include_paths: list[str]):
"""Simulate (interpret) a Jenga program"""
words = gen_ir(fn, include_paths)
logging.info("Interpreting file")
jenga.interpret.interpret(words)
def compile_file(fn: str, src_folder: str, base_file: str, outpath: str, run: bool, nocleanup: bool, include_paths: list[str]):
"""Compile a Jenga program"""
words = gen_ir(fn, include_paths)
logging.info("Compiling file")
tmp_fn = os.path.join(src_folder, base_file)
asm_path = tmp_fn + '.asm'
o_path = tmp_fn + '.o'
jenga.compile.compile(words, asm_path)
if jenga.util.is_cmd("nasm"):
res = jenga.util.run_cmd(["nasm", "-fmacho64", "-o", o_path, asm_path])
if res.returncode != 0:
logging.critical(f"Subcommand failed with code {res.returncode}, exitting")
exit(res.returncode)
res = jenga.util.run_cmd(["ld", "-o", outpath, o_path, "-L/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib", "-lSystem"])
if res.returncode != 0:
logging.critical(f"Subcommand failed with code {res.returncode}, exitting")
exit(res.returncode)
if run:
res = jenga.util.run_cmd([outpath])
logging.info(f"File ran with exit code {res.returncode}")
if not nocleanup:
os.remove(asm_path)
os.remove(o_path)
else:
logging.error("'nasm' not found, use your own assembler")
|
"""Payment forwarder."""
import pytest
import datetime
from eth_tester.exceptions import TransactionFailed
from web3.contract import Contract
@pytest.fixture
def issuer_token(chain, team_multisig) -> Contract:
"""Create the token contract."""
args = [team_multisig, "Foobar", "FOOB", 1000000, 0, int((datetime.datetime(2017, 4, 22, 16, 0) - datetime.datetime(1970, 1, 1)).total_seconds())]
tx = {
"from": team_multisig
}
contract, hash = chain.provider.deploy_contract('CentrallyIssuedToken', deploy_args=args, deploy_transaction=tx)
contract.functions.releaseTokenTransfer().transact({"from": team_multisig})
return contract
@pytest.fixture
def issuer_id_1() -> int:
return int(1)
@pytest.fixture
def issuer_id_2() -> int:
return int(2)
@pytest.fixture
def issue_script_owner(accounts):
"""Ethereum account that interacts with issuer contract."""
return accounts[8]
@pytest.fixture
def issuer_with_id(chain, team_multisig, issuer_token, issue_script_owner):
args = [issue_script_owner, team_multisig, issuer_token.address]
tx = {
"from": team_multisig
}
contract, hash = chain.provider.deploy_contract('IssuerWithId', deploy_args=args, deploy_transaction=tx)
# Set issuance allowance
assert issuer_token.functions.balanceOf(team_multisig).call() > 2000
issuer_token.functions.approve(contract.address, 2000).transact({"from": team_multisig})
return contract
def test_issuer_with_id(web3, issuer_with_id, issue_script_owner, customer, issuer_token, team_multisig, issuer_id_1):
"""Issue some tokens."""
team_multisig_begin = issuer_token.functions.balanceOf(team_multisig).call()
assert issuer_token.functions.allowance(team_multisig, issuer_with_id.address).call() == 2000
assert issuer_with_id.functions.owner().call() == issue_script_owner
issuer_with_id.functions.issue(customer, 1000, issuer_id_1).transact({"from": issue_script_owner})
assert issuer_with_id.functions.issuedCount().call() == 1000
assert issuer_token.functions.balanceOf(customer).call() == 1000
team_multisig_end = issuer_token.functions.balanceOf(team_multisig).call()
assert team_multisig_begin - team_multisig_end == 1000
def test_issuer_with_different_ids(web3, issuer_with_id, issue_script_owner, customer, issuer_id_1, issuer_id_2):
"""Issue some tokens."""
issuer_with_id.functions.issue(customer, 500, issuer_id_1).transact({"from": issue_script_owner})
issuer_with_id.functions.issue(customer, 500, issuer_id_2).transact({"from": issue_script_owner})
def test_issuer_with_id_too_many(web3, issuer_with_id, issue_script_owner, customer, issuer_id_1):
"""Issue over allowance."""
with pytest.raises(TransactionFailed):
issuer_with_id.functions.issue(customer, 3000, issuer_id_1).transact({"from": issue_script_owner})
def test_issuer_with_id_twice(web3, issuer_with_id, issue_script_owner, customer, issuer_id_1):
"""Issue some tokens."""
issuer_with_id.functions.issue(customer, 500, issuer_id_1).transact({"from": issue_script_owner})
with pytest.raises(TransactionFailed):
issuer_with_id.functions.issue(customer, 500, issuer_id_1).transact({"from": issue_script_owner})
def test_issuer_with_id_not_an_owner(web3, issuer_with_id, customer, issuer_id_1):
"""Somebody tries to issue for themselves."""
with pytest.raises(TransactionFailed):
issuer_with_id.functions.issue(customer, 500, issuer_id_1).transact({"from": customer})
|
# -*- encoding: utf-8 -*-
'''
Filename :lib_circuit_base.py
Description :This document is used for fundamental class of quantum circuit
Time :2021/09/26 13:58:23
Author :Weiwen Jiang & Zhirui Hu
Version :1.0
'''
import sys
import numpy as np
import numpy as np
from qiskit.tools.monitor import job_monitor
from qiskit import QuantumRegister
from qiskit.extensions import UnitaryGate
from qiskit import Aer, execute,IBMQ,transpile
import math
from qiskit import BasicAer
import copy
import abc
class BaseCircuit(metaclass= abc.ABCMeta):
"""BaseCircuit is a class, which includes fundamental functions of a circuit module.
Args:
n_qubits: input qubits of each unit
n_repeats: repeat times of each unit
"""
def __init__(self,n_qubits,n_repeats):
self.n_qubits = n_qubits
self.n_repeats = n_repeats
def add_qubits(self,circuit,name,number):
"""
Function add_qubits is to add a group of qubits to a circuit. [Test at 09/29]
Args:
circuit: The circuit that you add the unit at the end
name: The name of the group
number: The number of qubits in the group.
Returns:
qubits: The register of qubits
"""
qubits = QuantumRegister(number,name)
circuit.add_register(qubits)
return qubits
def add_input_qubits(self,circuit,name):
"""
Function add_input_qubits is to add a group of qubits as input qubit .
Args:
circuit: The circuit that you add the unit at the end
name: The name of the group
Returns:
qubits: The register of qubits
"""
inps = []
for i in range(self.n_repeats):
inp = QuantumRegister(self.n_qubits,name+str(i)+"_qbit")
circuit.add_register(inp)
inps.append(inp)
return inps
############# Weiwen&Zhirui on 2021/09/26 ############
# Function: add_qubits
# Note: Add a circuit unit of the clss at the end.
# Parameters:
# circuit: The circuit that you add the unit at the end
######################################################
@abc.abstractclassmethod
def forward(self,circuit):
pass
|
import numpy as np
import os
from wafl_interface import WAflInterface
from util import fast_hash
import json
import shutil
from collections import Counter,defaultdict
COV_MAX_SIZE = 65536
# CONSTANTS to depict change in coverage
COV_NO_CHANGE = -1
COV_CHANGE = 0
COV_INCREASE = 1
COV_SOFT_INCREASE = 2
COV_SOFT_DECREASE = 3
COV_DECREASE = 4
class MultiStats():
def __init__(self):
self.seed_cov = defaultdict(Counter)
self.cov_change = Counter()
self.spliced = Counter()
def dump(self, fname):
for attr in vars(self):
with open("{}_{}".format(fname,attr), 'wb') as f:
json.dump(getattr(self,attr), f)
def witness_training(self, seed, buf, cov, splicing_with):
# TODO would be nice to get the exec_cksum from afl
h = fast_hash(cov)
self.seed_cov[seed][h] +=1
self.spliced.update([splicing_with])
def witness_cov_change(self, change_type):
self.cov_change[change_type] +=1
class WAflModel(WAflInterface):
def __init__(self, save_incremental_dir=None, stats=None, alpha = 0.5,beta=0.4, gamma=0.3, delta=0.2, epsilon=0.1, profile=None):
"""
Seeds is a list of buffers, optional
"""
super(WAflModel, self).__init__()
self.seed_table = {} # structure will be {seed_id: bytes}#
self.weight_table = {} # structure will be {seed_id: np.zeros(len(seed), dtype=np.float64)
self.latest_cov = {} # structure will be {seed_id: np.zeros(COV_MAX_SIZE), dtype=uint32}
self.cov_counter = {}
# Save off params for rewarding/penalizing training
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.delta = delta
self.epsilon = epsilon
# other
self.profile = profile
self.stats = stats
self.curr_cycle = None
self.save_incremental_dir = save_incremental_dir
if self.save_incremental_dir:
try: os.mkdir(self.save_incremental_dir)
except OSError: pass
def binarize_cov(self,cov):
cov = np.frombuffer(cov, dtype=np.uint8)
return np.where(cov==0, 0, 1).astype(np.uint8)
def got_new_seed(self, seed_id, buf, cov):
"""
Model received a new seed to store away.
:param seed_id: int
:param buf: str
:param cov: str
:param path: str
:return:
"""
if seed_id in self.seed_table:
print ("updating existing seed (id=%d, len=%d)." % (seed_id, len(buf)))
else:
print ("got new seed (id=%d, len=%d)." % (seed_id, len(buf)))
self.seed_table[seed_id] = np.frombuffer(buf, dtype=np.uint8)
self.weight_table[seed_id] = np.zeros(len(buf), dtype=np.float64)
self.latest_cov[seed_id] = self.binarize_cov(cov)
# with np.printoptions(threshold=np.inf, suppress=True):
# print('cov nonzero at:')
# print(np.where(self.latest_cov[seed_id]))
# print('seed nonzero at:')
# print(np.where(self.seed_table[seed_id]))
def get_weights(self, seed_id):
"""
Given a x from AFL, calculate the weight vector of each byte.
The weight vector is a set of w_1,w_2,...w_n where w_1 is the probability of that
byte to be mutated. n is the length of x.
If id is provided, use that seed's weight vector from weight_table.
If None, calculate closest neighbor to x from weight_table.
:param seed_id:
:return: w, a numpy array of weights
"""
if seed_id in self.weight_table:
return self.weight_table[seed_id]
else:
raise KeyError("Cannot find the seed_id {} in the weight table. "
"It probably never got initialized.".format(seed_id))
def calc_bytes_changed(self, seed_id, new_bytes):
"""
(int, str) -> np.array
Calculates the bytes that mutated from seed to new_bytes
:param seed_id:
:param new_bytes:
:return: array of [0,0,1] where 1 is a byte change
"""
seed_bytes = self.seed_table[seed_id]
# return new_bytes ^ seed_bytes
return (new_bytes != seed_bytes).astype(np.uint8)
_cov_change_cache = {}
def calc_cov_change(self, seed_id, cov_new, old_cksum, new_cksum):
# check the cache first
t = (old_cksum, new_cksum)
result = self._cov_change_cache.get(t, None)
if result is not None:
# yay cache hit
return result
# boo we have to call the real function
result = self._calc_cov_change(seed_id, cov_new)
self._cov_change_cache[t] = result
return result
def _calc_cov_change(self, seed_id, cov_new):
"""
(int, str) -> int
Calculate the coverage change between latest stored coverage of seed_id and cov_new
:param seed_id:
:param cov_new:
:return:
"""
# TODO, think about this: # if the branches that differed were ones that we've hit a lot before (relative "a lot"), should we reward that?
cov_old = self.latest_cov[seed_id]
cov_new = self.binarize_cov(cov_new)
# print('old != new:', np.where(cov_old != cov_new))
# NO CHANGE
if len(cov_new) == len(cov_old) and np.all(cov_new == cov_old):
return COV_NO_CHANGE
# Determine if new coverage has kept old coverage despite the difference
diff_cov = cov_new ^ cov_old
change = cov_new[np.where(diff_cov)] == 1 # if the changed bits in the new coverage is not zero, we gained
cov_new_sum = np.sum(cov_new)
cov_old_sum = np.sum(cov_old)
cov_return = None
# ** Strict increase
# There was a change and overall, coverage increased
if (cov_new_sum > cov_old_sum) and np.all(change):
# Since it was a strict increase, store this new increase
# TODO DGL - I don't think we should ever modify latest_cov this; just stick with the coverage of the actual seed
# self.latest_cov[seed_id] = cov_new
cov_return = COV_INCREASE
# ** Soft increase
# There was a change and overall, the coverage didn't increase
elif (cov_new_sum > cov_old_sum) and np.any(change):
cov_return = COV_SOFT_INCREASE
# ** Strict decrease
# There was a change and overall, coverage decreased
elif (cov_new_sum < cov_old_sum) and np.all(~change):
cov_return = COV_DECREASE
# ** Soft decrease
# The coverage decreased but the coverage increased in another spot
elif (cov_new_sum < cov_old_sum) and np.any(change):
cov_return = COV_SOFT_DECREASE
# ** Some change happened, but none of the above ones
else:
cov_return = COV_CHANGE
if self.stats is not None:
self.stats.witness_cov_change(cov_return)
return cov_return
def got_training(self, seed_id, new_bytes, cov_new, mutation_seq, splicing_with, old_cksum, new_cksum):
"""
Given a buffer and edge coverage from AFL, update the seed_id's weights
:param seed_id: ancestor seed that new_bytes came from
:param new_bytes: byte buffer of mutated buffer
:param cov_new: edge coverage of new_bytes
:return:
"""
# Get seed bytes
seed_bytes = self.seed_table[seed_id]
# TODO: Only calculate change if one ancestor. Handle this if too many instances where >1 ancestor
# TODO: cannot handle different length seed and new bytes
if splicing_with is None and len(seed_bytes) == len(new_bytes):
new_bytes = np.frombuffer(new_bytes, dtype=np.uint8)
# calculate bytes_changed from new_bytes
bytes_changed = self.calc_bytes_changed(seed_id, new_bytes)
cov_change = self.calc_cov_change(seed_id, cov_new, old_cksum, new_cksum)
# print('cov_change:', cov_change)
# print(np.where(cov_new))
# print(np.where(bytes_changed))
# if edge coverage increases
if cov_change == COV_INCREASE:
self.weight_table[seed_id] = self.weight_table[seed_id] + bytes_changed # NUMBER 1 REWARD
if cov_change == COV_SOFT_INCREASE:
self.weight_table[seed_id] = self.weight_table[seed_id] + self.alpha*bytes_changed # NUMBER 2 REWARD
# if edge coverage changes, but didn't strictly increase
if cov_change == COV_CHANGE:
self.weight_table[seed_id] = self.weight_table[seed_id] + self.beta*bytes_changed # NUMBER 3 REWARD
# if edge coverage changes, increases in some spots, but decreases overall
if cov_change == COV_SOFT_DECREASE:
self.weight_table[seed_id] = self.weight_table[seed_id] + self.gamma*bytes_changed # NUMBER 4 REWARD
# if edge coverage changes, but decreases overall
if cov_change == COV_DECREASE:
self.weight_table[seed_id] = self.weight_table[seed_id] - self.delta * bytes_changed # PENALTY
# if edge coverage doesn't change, penalize here?
# this is terrible outcome, says danny.
if cov_change == COV_NO_CHANGE:
self.weight_table[seed_id] = self.weight_table[seed_id] - self.epsilon*bytes_changed # PENALTY
return cov_change
if self.stats is not None:
self.stats.witness_training(seed_id, new_bytes, cov_new, splicing_with)
# self.stats.dump(os.path.join(self.save_incremental_dir, 'testsave.stats'))
def normalize_weights(self, weights):
weights_sum = np.sum(weights)
if weights_sum == 0:
norm = weights
else:
norm = weights/weights_sum
if norm.dtype != np.float64:
return np.array(norm, dtype=np.float64)
else:
return norm
def got_cycle_start(self, num):
self.curr_cycle = num
def got_cycle_end(self, num):
"""
At the end of the cycle, write out the entire weight table
:param num: int
:return:
"""
# Write out any stats and profile info
if self.stats is not None and self.save_incremental_dir:
self.stats.dump(os.path.join(self.save_incremental_dir, 'cycle%04d.stats' % num))
if self.profile is not None and self.save_incremental_dir:
self.profile.dump_stats(os.path.join(self.save_incremental_dir, 'cycle%04d.profile' % num))
self.profile.enable()
def got_seed_end(self, seed_id):
"""
Once the seed is finished mutations, write out weights.
:param seed_id: int
:return:
"""
weights = self.weight_table[seed_id]
# normalize weights and write out to afl
weights_norm = self.normalize_weights(weights)
path = self.save_weights(seed_id, weights_norm)
# save debug info
self.save_incremental(path, weights_norm)
def save_weights(self, seed_id, weights_norm):
print ('saving weights for %d (len=%d)' % (seed_id, len(weights_norm)))
return super(WAflModel, self).save_weights(seed_id, weights_norm)
# mostly for debugging
def save_incremental(self, alias_fname, norm):
if self.save_incremental_dir:
dest_dir = os.path.join(self.save_incremental_dir, 'cycle%04d' % self.curr_cycle)
try: os.mkdir(dest_dir)
except OSError: pass
# save the alias table
shutil.copy(alias_fname, dest_dir)
# save the normalized weights
weights_fname = '%s/%s.weights' % (dest_dir, os.path.basename(alias_fname))
with open(weights_fname, 'wb') as f:
f.write(norm.tobytes())
###### FUTURE WORK #######
def calc_nearest_seed(self, x):
"""
Return the seed id with the closest distance to x
:param x:
:return:
"""
# TODO
return 0
def smooth_weights(self, bytes, weights):
"""
Use additive smoothing to smooth out weight vector
This will become useful when we start to accept addition/deletion in bytes as mutations
:param bytes:
:param weights:
:return:
"""
if len(bytes) > len(weights):
return np.concatenate([weights, np.ones(len(bytes)-len(weights))])
if __name__ == "__main__":
# import cProfile
# profile = cProfile.Profile()
# profile.enable()
import argparse
alpha = float(os.environ["WAFL_ALPHA"]) if "WAFL_ALPHA" in os.environ else 0.5
beta = float(os.environ["WAFL_BETA"]) if "WAFL_BETA" in os.environ else 0.4
gamma = float(os.environ["WAFL_GAMMA"]) if "WAFL_GAMMA" in os.environ else 0.3
delta = float(os.environ["WAFL_DELTA"]) if "WAFL_DELTA" in os.environ else 0.2
epsilon = float(os.environ["WAFL_EPSILON"]) if "WAFL_EPSILON" in os.environ else 0.1
# savedir = os.environ["SAVE_DIR"] if "SAVE_DIR" in os.environ else None
# print ("Outputing incremental save to {}".format(savedir))
# if savedir is not None:
# with open("{}/params.csv".format(savedir), 'w') as fd:
# fd.write("alpha,beta,gamma,delta,epsilon\n")
# fd.write("{},{},{},{},{}\n".format(alpha,beta,gamma,delta,epsilon))
wafl = WAflModel(
alpha = alpha,
beta = beta,
gamma = gamma,
delta = delta,
epsilon = epsilon,
# stats=MultiStats(),
# profile=profile,
# save_incremental_dir=savedir
)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 09:43:34 2020
@author: Nikki
"""
#referencing https://github.com/aqeelanwar/SocialDistancingAI
import cv2
import sys
import csv
import transform as tform
import numpy as np
import os.path
# Goal Input: csv file with ip address, length, width
# Output: csv file with ip + transform
def main():
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample1_1.mp4'
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample2_1.mp4'
# video_path = 'rtsp://root:worklab@192.168.86.246/axis-media/media.amp?framerate=30.0?streamprofile=vlc' #hall
# video_path = 'rtsp://root:worklab@192.168.86.247/axis-media/media.amp?framerate=30.0?streamprofile=vlc' #living room
# video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/vid_short.mp4'
output_file = 'C:/Users/Nikki/Documents/work/inputs-outputs/transforms.csv'
#whether to include surroundings, or just the space between the four selected points
roi_only = True
# #width and length of measured area, in feet aot 1
# length = 125.37564416986191
# wth = 91.52952303334027
# #width and length of measured area, in feet aot 2
# length = 72.6
# wth = 43.4
# living room
length = 11.18
wth = 15.64
# #hall
# length = 19.71
# wth = 3.375
#enter points top left, top right, bottom left bottom right, 2 * 6 ft apart - should be rectangle
#furthest locations away and closest locations to camera that someone could be standing
parallel, corners = find_all_pts(video_path, roi_only)
#find transfromation matrices
pix_real, real_pix = find_transform(parallel, corners, wth, length, roi_only)
# pix_real = np.array2string(pix_real, separator = ',')
# real_pix = np.array2string(real_pix, separator = ',')
pix_real = pix_real.tolist()
real_pix = real_pix.tolist()
#if file doesn't exist, create it. Otherwise, append to it
try:
if not os.path.isfile(output_file):
csvfile = open(output_file, 'w+')
else:
csvfile = open(output_file, 'a+', newline = '')
writer = csv.writer(csvfile)
writer.writerow([video_path, pix_real, real_pix])
csvfile.close()
except:
print("Unexpected error:", sys.exc_info())
csvfile.close()
# --------------------------------------------------------------------------
# Given four points, and real world distance between them (wth and length),
# finds transformation matrix
def find_transform(parallel, corners, wth, length, roi_only = True, display = False):
#convert to numpy arrays, define overhead plane using measurements taken of the area
parallel = np.array(parallel)
corners = np.array(corners)
overhead_pts = np.array([[0,0], [wth, 0], [wth,length], [0, length]])
#get transformation arrays
pix_real = tform.get_best_transform(parallel, overhead_pts)
real_pix = tform.get_best_transform(overhead_pts, parallel)
if display:
show_overhead(parallel, corners, pix_real, length, wth, roi_only)
return pix_real, real_pix
# --------------------------------------------------------------------------
# Displays first frame of video and allows user to click on 4 or 8 pts, which
# are appended to a list
def find_all_pts(video_path, roi_only = True):
video_capture = cv2.VideoCapture(video_path)
try:
global allpts
allpts = []
_, frame = video_capture.read()
# use = False
# FIXME cv2.namedWindow("Calibration", cv2.WINDOW_NORMAL)
# # Allow user to regrab frame if previous frame was not acceptable
# while not use:
# try:
# key = input('Get new frame (y/n)?')
# if key == 'y':
# video_capture.grab()
# _, frame = video_capture.retrieve()
# elif key == 'n':
# use = True
# else:
# print('Invalid option')
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# cv2.imshow("Calibration", frame)
# if cv2.waitKey(1) & 0xFF == ord('q'): break
# except:
# break
frame_size = frame.shape[:2]
corners = [[0,0], [frame_size[1], 0], [frame_size[1],frame_size[0]], [0, frame_size[0]]]
cv2.namedWindow("Calibration", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Calibration", get_pix_coord, frame)
cv2.imshow("Calibration", frame)
if roi_only == True:
cutoff = 4
else:
cutoff = 8
#enter points top left, top right, bottom left bottom right, 2 * 6 ft apar
while len(allpts) < cutoff:
cv2.waitKey(1)
if cv2.waitKey(1) & 0xFF == ord('q'): break
cv2.imshow("Calibration", frame)
parallel = allpts[:4]
corners = allpts[4:]
video_capture.release()
cv2.destroyAllWindows()
return parallel, corners
except:
print("Unexpected error:", sys.exc_info())
video_capture.release()
cv2.destroyAllWindows()
# --------------------------------------------------------------------------
# Handler to append left-click location to list of points
def get_pix_coord(event, x, y, flags, frame):
global allpts
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(frame, (x, y), 10, (0, 255, 255), -1)
if "allpts" not in globals():
allpts = []
allpts.append([x,y])
print("Point selected: " + str(x) + ", " + str(y))
# --------------------------------------------------------------------------
# Given points, frame size, and transformation, diplays overhead view of points
def show_overhead(parallel, corners, pix_real, length, wth, roi_only):
pts = tform.transform_pt_array(parallel, pix_real)
#scale frame size
if roi_only:
frame_hgt = length
frame_wdt = wth
#calculate points at extremeties
else:
#find locations of extremes
extremes = tform.transform_pt_array(corners, pix_real)
#find most negative values in both directions
mins = np.ndarray.min(extremes, axis=0)
mins[mins > 0] = 0
mins = np.absolute(mins)
#add to both directions until all values are positive
shifted = extremes + mins
#scale frame size
maxs = np.ndarray.max(shifted, axis=0)
pts = pts + mins
frame_hgt = int(maxs[1])
frame_wdt = int(maxs[0])
#generate blank frame
img = np.zeros((frame_hgt, frame_wdt, 3), np.uint8)
#draw circles for all included points
if not roi_only:
for pt in shifted:
x = int(pt[0])
y = int(pt[1])
cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
for pt in pts:
x = int(pt[0])
y = int(pt[1])
cv2.circle(img, (x, y), 5, (0, 255, 255), -1)
#display image
try:
cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
cv2.imshow("Result", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
except:
print("Unexpected error:", sys.exc_info())
cv2.destroyAllWindows()
if __name__ == "__main__":
main() |
import os,fortnitepy,datetime,requests,json,asyncio,time,random,threading
from threading import Thread
from Fortnite import Variants,API,Extras,colored,apiwrapper
fnapi = apiwrapper.FortniteAPI()
async def Command(self, message):
HasFullAccess = False
TimeInUTC = datetime.datetime.utcnow().strftime('%H:%M:%S')
GiveFullAccess = self.Settings["GiveFullAccessTo"]
if "," in GiveFullAccess:
if message.author.id in (GiveFullAccess.split(",")):
HasFullAccess = True
elif GiveFullAccess == message.author.id:
HasFullAccess = True
author = message.author
msg = message.content.upper()
args = msg.split(" ")
def GetValue(fullLine,startWith,endWith):
startIndex = fullLine.index(startWith) + len(startWith)
endIndex = fullLine[startIndex:].index(endWith) + startIndex
return fullLine[startIndex:endIndex]
def GetValues(fullLine):
Variants = []
for Variant in range(0,fullLine.count("--")):
try:
startIndex = fullLine.index("--")
ValueStartIndex = fullLine[startIndex:].index("=") + startIndex + 1
try:
endIndex = fullLine[ValueStartIndex:].index("--") + ValueStartIndex
except:
endIndex = len(fullLine)
Variants.append(fullLine[startIndex:endIndex])
fullLine = fullLine.replace(fullLine[startIndex:endIndex],"")
except:
return None
return Variants
if args[0] == "!BANNER" and len(args) > 1:
if self.Settings["ChangeBannerOnCommand"] or HasFullAccess:
if "--LEVEL=" in msg:
msg = msg + " "
Level = GetValue(msg,"--LEVEL="," ")
try:
Level = int(Level)
except:
await message.reply("Sorry you can only use numbers as level")
return
msg = msg.replace("--LEVEL=" + str(Level), "").strip()
await self.user.party.me.set_banner(icon=msg[8:], season_level=Level)
await message.reply("Banner and Level set")
else:
await self.user.party.me.set_banner(icon=msg[8:])
await message.reply("Banner set")
else:
await message.reply("Can't change Banner. The Bot owner has disabled this command!")
if msg == "!LOGOUT":
if self.Settings["LogoutOnCommand"] or HasFullAccess:
await message.reply("Logged out")
await self.logout()
os.system("cls")
os.system(colored.Colored(f"[BOT] [{TimeInUTC}] Logged out.", "red"))
else:
await message.reply("Can't Logout. The Bot owner has disabled this command!")
if msg == "!RESTART":
if self.Settings["RestartOnCommand"] or HasFullAccess:
await message.reply("Restarting...")
await self.logout()
await self.start()
else:
await message.reply("Can't Restart. The Bot owner has disabled this command!")
if "!BP" == args[0] and len(args) > 1:
try:
if self.Settings["ChangeBattlePassInfoOnCommand"] or HasFullAccess:
await self.user.party.me.set_battlepass_info(has_purchased=bool(args[1]), level=int(args[2]), self_boost_xp=int(args[3]), friend_boost_xp=int(args[4]))
await message.reply("New Battle Pass Info set")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] New Battle Pass Info set by {message.author.display_name}", "green"))
else:
await message.reply("Can't set new Battle Pass Info. The Bot owner has disabled this command!")
except:
await message.reply("Command : !BP <True/False> <Level> <Self XP Boost> <Friend XP Boost>")
if args[0] == "!STATUS" and len(args) > 1:
if self.Settings["ChangeStatusOnCommand"] or HasFullAccess:
await self.send_status(message.content[8:])
await message.reply(f"Status set to : {message.content[8:]}")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] New status set by {message.author.display_name}", "green"))
else:
await message.reply("Can't set new status. The Bot owner has disabled this command!")
if "!PLATFORM" == args[0] and len(args) > 1:
if self.Settings["ChangePlatformOnCommand"] or HasFullAccess:
if msg[10:] in fortnitepy.Platform.__members__:
self.platform = fortnitepy.Platform[msg[10:]]
else:
await message.reply("Can't find the Platform!")
return
if self.Settings["TryToRejoinOldParty"] or HasFullAccess:
Members = []
for Member in self.user.party.members:
Members.append(Member)
await self.user.party.me.leave()
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Changed Platform to {(str((self.platform))[9:]).lower().capitalize()}", "green"))
await message.reply(f"Successfuly changed Platform to {(str((self.platform))[9:]).lower().capitalize()}")
for Member in Members:
if Member != self.user.id:
UserName = (await self.fetch_profile(Member,cache=True, raw=False)).display_name
if self.get_friend(Member):
await self.get_friend(Member).join_party()
await message.reply(f"Tryied to join {UserName}")
return
if args[0] == "!KICK" and len(args) > 1:
if self.Settings["KickMembersOnCommand"] or HasFullAccess:
UserToKick = await self.fetch_profile(msg[6:],cache=True, raw=False)
if UserToKick.id == self.user.id:
await message.reply('Can\'t kick myself. Use "!Leave Party" insteand')
return
if UserToKick.id in self.user.party.members:
User = self.user.party.members.get(UserToKick.id)
try:
await User.kick()
await message.reply("Kicked {User.display_name}")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Kicked {User.display_name}", "red"))
except fortnitepy.Forbidden:
await message.reply(f"Can't kick {User.display_name}.I am not the leader of the party.")
else:
await message.reply("User isn't in my party")
else:
await message.reply("Can't kick. The Bot owner has disabled this command!")
if args[0] == "!PROMOTE":
if self.Settings["PromoteMembersOnCommand"] or HasFullAccess:
if msg == "!PROMOTE":
UserToPromote = await self.fetch_profile(message.author.id,cache=True, raw=False)
else:
UserToPromote = await self.fetch_profile(msg[9:],cache=True, raw=False)
if UserToPromote.id in self.user.party.members:
User = self.user.party.members.get(UserToPromote.id)
try:
await User.promote()
await message.reply(f"Promoted {User.display_name}")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Promoted {User.display_name}", "green"))
except fortnitepy.Forbidden:
await message.reply(f"Can't Promote {User.display_name}, I am not the party leader")
else:
await message.reply("User isnt in my party")
else:
await message.reply("Can't promote. The Bot owner has disabled this command!")
if args[0] == "!INVITE" and msg != "!INVITE ALL BOTS":
if self.Settings["InviteUserOnCommand"] or HasFullAccess:
if msg == "!INVITE":
User = await self.fetch_profile(message.author.id, cache=True, raw=False)
else:
User = await self.fetch_profile(msg[8:], cache=True, raw=False)
if User is None:
await message.reply(f"Can't invite {message.content[8:]}, the user isn't my friend")
return
try:
if User.id in self.user.party.members:
await message.reply(f"{User.display_name} is already member of the party")
return
else:
Friend = self.get_friend(User.id)
await Friend.invite()
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Invited {Friend.display_name}", "green"))
await message.reply(f"Invited {Friend.display_name}")
except fortnitepy.errors.PartyError:
await message.reply(f"Can't invite {User.display_name}, the party is full.")
if msg == "!LEAVE PARTY":
if self.Settings["LeavePartyOnCommand"] or HasFullAccess:
await self.user.party.me.set_emote('EID_Wave')
await asyncio.sleep(2)
await self.user.party.me.leave()
await message.reply("Successfuly left Party.")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Left party", "red"))
else:
await message.reply("Can't leave party. The Bot owner has disabled this command!")
if msg == "!READY":
if self.Settings["SetReadyOnCommand"] or HasFullAccess:
await self.user.party.me.set_ready(True)
await message.reply("Successfuly set my readiness to ready")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Set readiness to ready", "green"))
else:
await message.reply("Can't set my readiness to ready. The Bot owner has disabled this command!")
if msg == "!NOT READY":
if self.Settings["SetNotReadyOnCommand"] or HasFullAccess:
await self.user.party.me.set_ready(False)
await message.reply("Successfuly set my readiness to not ready")
os.system(colored.Colored(f"[BOT {self.user.display_namee}] [{TimeInUTC}] Set readiness to not ready", "green"))
else:
await message.reply("Can't set my readiness to not ready. The Bot owner has disabled this command!")
if msg == "!STOP EMOTE":
if self.Settings["LetOthersStopEmote"] or HasFullAccess:
if self.user.party.me.emote is None:
await message.reply("I am not dancing!")
else:
await self.user.party.me.clear_emote()
if self.user.party.me.emote is None:
await message.reply("Stopped Dancing!")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Stopped dancing", "green"))
else:
await self.user.party.me.set_emote("EID_InvaildEmoteToStopDancing")
if self.user.party.me.emote is None:
await message.reply("Stopped Dancing!")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Stopped dancing", "green"))
else:
await message.reply("Can't set stop dancing. The Bot owner has disabled this command!")
if "!ADD" == args[0] and len(args) > 1:
if self.Settings["SendFriendRequestsOnCommand"] or HasFullAccess:
User = await self.fetch_profile(msg[5:], cache=False, raw=False)
if User is None:
await message.reply(f"Can't find user {message.content[5:]}")
return
try:
await self.add_friend(User.id)
await message.reply(f"Friend request send to {User.display_name}")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Added {User.display_name}", "green"))
except fortnitepy.errors.HTTPException as Error:
Error2Send = Error.message
for message_var in Error.message_vars:
if self.is_id(message_var):
UserName = (self.fetch_profile(message_var, cache=False, raw=False)).display_name
Error2Send.replace(message_var, UserName)
await message.reply(Error2Send)
else:
await message.reply(f"Can't add {message.content[5:]}. The Bot owner has disabled this command!")
if "!REMOVE" == args[0] and len(args) > 1:
if self.Settings["RemoveOthersOnCommand"] or HasFullAccess:
if await self.fetch_profile(msg[8:], cache=False, raw=False) is not None:
User = await self.fetch_profile(msg[8:], cache=False, raw=False)
if self.get_friend(User.id) is not None:
await self.remove_friend(User.id)
await message.reply(f"Removed {User.display_name} as my friend")
os.system(colored.Colored(f"Removed {User.display_name} as my friend","red"))
else:
await message.reply("Can't find user in my friend list")
else:
await message.reply("Can't find user")
else:
await message.reply(f"Can't remove {message.content[5:]}. The Bot owner has disabled this command!")
if msg == "!REMOVE":
await message.reply('Are you sure that I should delete you as my friend? Please write "Yes delete me"')
def isYes(message):
if (message.author.id == author.id) and (message.content.upper() == "YES DELETE ME"):
return True
else:
return False
try:
DeleteMe = await self.wait_for('message', check=isYes, timeout=200)
if DeleteMe:
try:
await self.remove_friend(message.author.id)
await message.reply("Removed you as my friend")
os.system(colored.Colored(f"[BOT {self.user.display_name}] [{TimeInUTC}] Removed {message.author.display_name} as my friend", "red"))
except fortnitepy.errors.HTTPException as Error:
Error2Send = Error.message
for message_var in Error.message_vars:
if self.is_id(Error.message_vars):
UserName = (self.fetch_profile(message_var, cache=False, raw=False)).display_name
Error2Send.replace(message_var, UserName)
await message.reply(Error2Send)
except asyncio.TimeoutError:
await message.reply("You took too long, canceled removing you as a friend ♥")
if msg == "?FRIENDS":
if self.Settings["SendCurrentFriendCountOnCommand"] or HasFullAccess:
Friend_count = len(self.friends.items())
if Friend_count == 0:
await message.reply("I dont have Friends")
elif Friend_count == 1:
await message.reply("I have one Friend")
elif Friend_count > 1:
await message.reply(f"I have {str(Friend_count)} friends")
else:
await message.reply("Can't send the count of my Friends. The Bot owner has disabled this command!")
if msg == "?BLOCKED":
if self.Settings["SendCurrentBlockedUserCountOnCommand"] or HasFullAccess:
Blocked_count = len(await self.get_blocklist())
if Blocked_count == 0:
await message.reply("I dont have Blocked anyone")
elif Blocked_count == 1:
await message.reply("I have blocked one user")
elif Blocked_count > 1:
await message.reply("I have blocked {str(Blocked_count)} users")
else:
await message.reply("Can't send the count of my Friends. The Bot owner has disabled this command!")
if msg == "?SHOP PRICE":
if self.Settings["SendShopPriceOnCommand"] or HasFullAccess:
Price = 0
for item in (await self.fetch_item_shop()).featured_items + (await self.fetch_item_shop()).daily_items:
Price += item.price
await message.reply(f"Price in VBucks : {Price}, Price in $ : {Extras.MtxCurrencyConverter(Price)}")
else:
await message.reply("Can't send the current Price. The Bot owner has disabled this command!")
if msg == "?ASSISTED CHALLENGE":
if self.Settings["SendAssistedChallengeOnCommand"] or HasFullAccess:
if self.user.party.me.assisted_challenge is not None:
await message.reply(f"Current assisted challenge : {self.user.party.me.assisted_challenge}")
else:
await message.reply("I haven't set an assited challange")
else:
await message.reply("Can't send my assisted challenge. The Bot owner has disabled this command!")
if msg == "?BANNER":
if self.Settings["SendCurrentBannerNameOnCommand"] or HasFullAccess:
await message.reply(f"Current Banner Name : {self.user.party.me.banner[0]}")
else:
await message.reply("Can't send the banner name. The Bot owner has disabled this command!")
if args[0] == "?ID":
if self.Settings["SendIDOnCommand"] or HasFullAccess:
if msg == "?ID":
await message.reply(f"My ID is : {str(self.user.id)}")
elif len(args) > 1:
User = await self.fetch_profile(msg[4:],cache=False,raw=False)
await message.reply(f"ID : {User.id}")
else:
await message.reply("Can't send the Account ID. The Bot owner has disabled this command!")
if msg == "?PARTY LEADER":
if self.Settings["SendCurrentPartyLeaderOnCommand"] or HasFullAccess:
PartyLeaderName = str(self.user.party.leader.display_name)
await message.reply(f"Current Party Leader : {PartyLeaderName}")
else:
await message.reply("Can't send the current Party Leader Name.The Bot owner has disabled this command!")
if msg == "?JOINED":
if self.Settings["SendTimeBotJoinedTheLobbyOnCommand"] or HasFullAccess:
delta_time = datetime.datetime.utcnow() - self.user.party.me.joined_at
Time = datetime.timedelta(seconds=delta_time.seconds)
await message.reply(f"Joined {Time} ago")
else:
await message.reply("Can't send join time. The Bot owner has disabled this command!")
if msg == "?PARTY":
if self.Settings["SendPartyInfosOnCommand"] or HasFullAccess:
PartyLeader = str(self.user.party.leader.display_name)
Members = str(self.user.party.member_count)
PlayList = self.user.party.playlist_info[0]
Privacy = str(self.user.party.privacy)[13:]
Fill = str(self.user.party.squad_fill_enabled)
await message.reply(f"Party leader : {PartyLeader} | Members : {Members} | Playlist : {PlayList} | Privacy : {Privacy} + | Fill : {Fill}")
else:
await message.reply("Can't send Party info. The Bot owner has disabled this command!")
if msg == "!JOIN":
if self.Settings["AcceptIncomingFriendRequest"] or HasFullAccess:
if self.get_friend(author.id):
try:
await self.get_friend(author.id).join_party()
except:
await message.reply("Can't join your Party")
else:
await message.reply("You aren't my friend")
else:
await message.reply("Can't join. The Bot owner has disabled this command!")
if args[0] == "!EMOTE" and len(args) > 1:
if self.Settings["SetEmoteOnCommand"] or HasFullAccess:
Lang = "en"
if "--LANG=" in msg:
msg = msg + " "
Lang = GetValue(msg,"--LANG="," ")
msg = msg.replace("--LANG=" + Lang, "").strip()
Lang = Lang.lower()
r = fnapi.GetEmote(NameorId=msg[7:],matchMethod="starts",searchLanguage=Lang,Language=Lang)
if r.status != 200:
await message.reply("Emote wasn't found")
def isYes(msg):
if msg.author.id == message.author.id and msg.content.upper() == "USE EID":
return True
else:
return False
if not msg[7:].startswith("EID_"):
await message.reply("If you are sure that emote exists please try again with the EID")
else:
await message.reply('If you are sure this emote does exists write "Use EID"')
try:
UseEID = await self.wait_for('message', check=isYes, timeout=100)
if UseEID:
await self.user.party.me.set_emote(msg[7:])
except asyncio.TimeoutError:
return
else:
await self.user.party.me.set_emote(r.id)
await message.reply(f'Emote set to {r.Names[Lang]}')
if args[0] == "?SKIN" and args[1] == "VARIANTS":
Lang = "en"
if "--LANG=" in msg:
msg = msg + " "
Lang = GetValue(msg,"--LANG="," ")
msg = msg.replace("--LANG=" + Lang, "").strip()
Lang = Lang.lower()
r = fnapi.GetSkin(NameorId=msg[15:],matchMethod="starts",searchLanguage=Lang,Language=Lang)
if r.status != 200:
await message.reply("Skin wasn't found")
return
terax = requests.get(f"https://fnapi.terax235.com/api/v1.2/cosmetics/search?query={r.id}&type=skin").json()
allvariants = ""
if terax["statusCode"] != 200:
await message.reply("Sorry the server for variants isn't updated")
return
else:
if "variants" in terax["data"]:
for variant in terax["data"]["variants"]:
allvariants += f'{variant["channel"]}:\n'
for v in variant["tags"]:
allvariants += f'-{v["name"][Lang]}\n'
await message.reply(allvariants)
else:
await message.reply("This skin doesn't have any variants or the server isn't updated")
if args[0] == "!SKIN" and len(args) > 1:
if self.Settings["SetSkinOnCommand"] or HasFullAccess:
Lang = "en"
if "--LANG=" in msg:
msg = msg + " "
Lang = GetValue(msg,"--LANG="," ")
msg = msg.replace("--LANG=" + Lang, "").strip()
Lang = Lang.lower()
try:
if msg.count("--") != 0:
Skin = GetValue(msg,"!SKIN ","--")
else:
Skin = msg[6:]
except:
await message.reply("Command : !Skin <Skin Name> *--<Variant Channel Name>=<Variant Name>")
r = fnapi.GetSkin(NameorId=Skin.strip(),matchMethod="starts",searchLanguage=Lang,Language=Lang)
if r.status != 200:
await message.reply("Skin wasn't found")
def isYes(msg):
if msg.author.id == message.author.id and msg.content.upper() == "USE CID":
return True
else:
return False
if not msg[10:].startswith("CID_"):
await message.reply("If you are sure that skin exists please try again with the CID")
else:
await message.reply('If you are sure this skin does exists write "Use CID"')
try:
UseCID = await self.wait_for('message', check=isYes, timeout=100)
if UseCID:
await self.user.party.me.set_outfit(msg[10:])
except asyncio.TimeoutError:
return
else:
if msg.count("--") != 0:
terax = requests.get(f"https://fnapi.terax235.com/api/v1.2/cosmetics/search?query={r.id}&type=skin").json()
if terax["statusCode"] != 200:
await message.reply("Sorry the server for variants isn't updated")
return
v = []
def create_variant(VariantChannelName,Variant,item="AthenaCharacter"):
v = {
'item': item,
'channel': VariantChannelName,
'variant': Variant
}
return v
if "variants" in terax["data"]:
for Variant in GetValues(msg):
VariantChannelName = (Variant.split("=")[0])[2:]
Variant = Variant.split("=")[1]
for variant in terax["data"]["variants"]:
if variant["channel"].upper() == VariantChannelName:
for tag in variant["tags"]:
if tag["name"][Lang].upper() == Variant:
v.append(create_variant(variant["channel"],tag["tag"]))
await self.user.party.me.set_outfit(r.id,variants=v)
await message.reply(f'Outfit set to {r.Names[Lang]}')
else:
await self.user.party.me.set_outfit(r.id)
await message.reply(f'Outfit set to {r.Names[Lang]}')
if args[0] == "!BACKPACK" and len(args) > 1:
if self.Settings["SetBackpackOnCommand"] or HasFullAccess:
Lang = "en"
if "--LANG=" in msg:
msg = msg + " "
Lang = GetValue(msg,"--LANG="," ")
msg = msg.replace("--LANG=" + Lang, "").strip()
Lang = Lang.lower()
r = fnapi.GetBackpack(NameorId=msg[10:],matchMethod="starts",searchLanguage=Lang,Language=Lang)
if r.status != 200:
await message.reply("Backpack wasn't found")
def isYes(msg):
if msg.author.id == message.author.id and msg.content.upper() == "USE BID":
return True
else:
return False
if not msg[10:].startswith("BID_"):
await message.reply("If you are sure that backpack exists please try again with the BID")
else:
await message.reply('If you are sure this backpack does exists write "Use BID"')
try:
UseBID = await self.wait_for('message', check=isYes, timeout=100)
if UseBID:
await self.user.party.me.set_backpack(msg[10:])
except asyncio.TimeoutError:
return
else:
await self.user.party.me.set_backpack(r.id)
await message.reply(f'Backpack set to {r.Names[Lang]}') |
import numpy as np
#LaxFriedrichs算法,二阶精度。只有当a = 1时才完全准确。其它时候数值耗散很大
nmax = 510
tmax = 1005
U = np.zeros((tmax,nmax))
f = np.zeros((tmax,nmax+1))
F = np.zeros((tmax,nmax))
a = 0.5#速度
b = np.zeros((2,nmax+1))
dt = 1
dx = 1
for i in range(0,nmax+1):
b[0,i] = -i*i/2 + i*10
b[1,i] = -i*i*i*i/6 + i*i*5
j = i + 0.5
#b[1,i] = -j*j*j/6 + j*j*5
for i in range(0,nmax):
if i <= 8:
U[0,i] = -i*i/16 + i
elif i >= 16:
U[0,i] = -(i-8)*(i-8)/16 + (i-8)
else:
U[0,i] = 4
# U[0,i] = -i*i*i/16 + i #原来的真正的二阶函数
F[0,:] = a*U[0,:]
for t in range(0,tmax-1):
for i in range(0,nmax+1):
if(i == 0):
f[t,i] = F[t,i]
elif(i == nmax):
f[t,i] = F[t,i-1]
else:
f[t,i] = 0.5*(F[t,i] + F[t,i-1]) - 0.5*dx/dt*(U[t,i] - U[t,i-1])
for i in range(0,nmax):
U[t+1,i] = U[t,i] - dt/dx*(f[t,i+1] - f[t,i])
F[t+1,:] = a*U[t+1,:]
|
from invoke import task
CONTAINER_NAME = 'jupyter_datascience_pyspark'
IMAGE_NAME = f"tuteco/{CONTAINER_NAME}"
CONTAINER_INSTANCE = 'default'
@task
def build_local(context):
"""
build an image from a Dockerfile with tag 'latest-dev'
"""
context.run(f"docker build -t {IMAGE_NAME}:latest-dev . -f Dockerfile")
@task
def run(context):
"""
run the local image with tag 'latest-dev'
"""
context.run(f"docker run --rm --name {CONTAINER_NAME}-{CONTAINER_INSTANCE} -p 8888:8888 {IMAGE_NAME}:latest-dev")
@task(help={
"images": "remove images used by service"
})
def docker_clean(context, images=False):
"""
remove containers, networks, volumes and images(optional)
"""
context.run("docker compose down -v")
if images:
# delete project image
context.run(f"docker rmi {IMAGE_NAME}:latest-dev -f")
# remove dangling images
context.run(f"docker image prune -f")
|
'''Kiosk main functions and classes'''
class Channels():
"""
Holds dict of channels
Channels have a uuid name
Channels contain a list of urls
"""
def __init__(self):
self.channels = {
'_standby': ['https://dutchsec.com']
}
def add(self, name, pages):
"""
Add a list of urls to pages
pages = list of urls
time = time before refresh in seconds
"""
self.channels.update({name: pages})
def delete(self, name):
"""
Remove entry name from channels dict
"""
if name in self.channels:
del self.channels[name]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mezcal-widget.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MezcalPicker(object):
def setupUi(self, MezcalPicker):
MezcalPicker.setObjectName("MezcalPicker")
MezcalPicker.resize(438, 591)
MezcalPicker.setAutoFillBackground(False)
MezcalPicker.setStyleSheet("background-color: rgb(0, 0, 0);")
self.gridLayout_2 = QtWidgets.QGridLayout(MezcalPicker)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.tetrad4 = QtWidgets.QFrame(MezcalPicker)
self.tetrad4.setStyleSheet("background-color: rgb(0, 204, 0);")
self.tetrad4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.tetrad4.setFrameShadow(QtWidgets.QFrame.Raised)
self.tetrad4.setObjectName("tetrad4")
self.gridLayout_6 = QtWidgets.QGridLayout(self.tetrad4)
self.gridLayout_6.setObjectName("gridLayout_6")
self.push_tetrad4 = QtWidgets.QPushButton(self.tetrad4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_tetrad4.sizePolicy().hasHeightForWidth())
self.push_tetrad4.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_tetrad4.setFont(font)
self.push_tetrad4.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_tetrad4.setAutoFillBackground(False)
self.push_tetrad4.setDefault(False)
self.push_tetrad4.setFlat(True)
self.push_tetrad4.setObjectName("push_tetrad4")
self.gridLayout_6.addWidget(self.push_tetrad4, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.tetrad4)
self.monochrome1 = QtWidgets.QFrame(MezcalPicker)
self.monochrome1.setStyleSheet("background-color: rgb(0, 102, 204);")
self.monochrome1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.monochrome1.setFrameShadow(QtWidgets.QFrame.Raised)
self.monochrome1.setObjectName("monochrome1")
self.gridLayout_10 = QtWidgets.QGridLayout(self.monochrome1)
self.gridLayout_10.setObjectName("gridLayout_10")
self.push_monochrome1 = QtWidgets.QPushButton(self.monochrome1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_monochrome1.sizePolicy().hasHeightForWidth())
self.push_monochrome1.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_monochrome1.setFont(font)
self.push_monochrome1.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_monochrome1.setAutoFillBackground(False)
self.push_monochrome1.setDefault(False)
self.push_monochrome1.setFlat(True)
self.push_monochrome1.setObjectName("push_monochrome1")
self.gridLayout_10.addWidget(self.push_monochrome1, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.monochrome1)
self.monochrome2 = QtWidgets.QFrame(MezcalPicker)
self.monochrome2.setStyleSheet("background-color: rgb(0, 59, 119);\n"
"")
self.monochrome2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.monochrome2.setFrameShadow(QtWidgets.QFrame.Raised)
self.monochrome2.setObjectName("monochrome2")
self.gridLayout_12 = QtWidgets.QGridLayout(self.monochrome2)
self.gridLayout_12.setObjectName("gridLayout_12")
self.push_monochrome2 = QtWidgets.QPushButton(self.monochrome2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_monochrome2.sizePolicy().hasHeightForWidth())
self.push_monochrome2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_monochrome2.setFont(font)
self.push_monochrome2.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_monochrome2.setAutoFillBackground(False)
self.push_monochrome2.setDefault(False)
self.push_monochrome2.setFlat(True)
self.push_monochrome2.setObjectName("push_monochrome2")
self.gridLayout_12.addWidget(self.push_monochrome2, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.monochrome2)
self.monochrome3 = QtWidgets.QFrame(MezcalPicker)
self.monochrome3.setStyleSheet("background-color: rgb(0, 17, 34);")
self.monochrome3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.monochrome3.setFrameShadow(QtWidgets.QFrame.Raised)
self.monochrome3.setObjectName("monochrome3")
self.gridLayout_11 = QtWidgets.QGridLayout(self.monochrome3)
self.gridLayout_11.setObjectName("gridLayout_11")
self.push_monochrome3 = QtWidgets.QPushButton(self.monochrome3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_monochrome3.sizePolicy().hasHeightForWidth())
self.push_monochrome3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_monochrome3.setFont(font)
self.push_monochrome3.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_monochrome3.setAutoFillBackground(False)
self.push_monochrome3.setDefault(False)
self.push_monochrome3.setFlat(True)
self.push_monochrome3.setObjectName("push_monochrome3")
self.gridLayout_11.addWidget(self.push_monochrome3, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.monochrome3)
self.gridLayout_2.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.tetrad3 = QtWidgets.QFrame(MezcalPicker)
self.tetrad3.setStyleSheet("background-color: rgb(204, 102, 0);")
self.tetrad3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.tetrad3.setFrameShadow(QtWidgets.QFrame.Raised)
self.tetrad3.setObjectName("tetrad3")
self.gridLayout_5 = QtWidgets.QGridLayout(self.tetrad3)
self.gridLayout_5.setObjectName("gridLayout_5")
self.push_tetrad3 = QtWidgets.QPushButton(self.tetrad3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_tetrad3.sizePolicy().hasHeightForWidth())
self.push_tetrad3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_tetrad3.setFont(font)
self.push_tetrad3.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_tetrad3.setAutoFillBackground(False)
self.push_tetrad3.setDefault(False)
self.push_tetrad3.setFlat(True)
self.push_tetrad3.setObjectName("push_tetrad3")
self.gridLayout_5.addWidget(self.push_tetrad3, 0, 0, 1, 1)
self.horizontalLayout.addWidget(self.tetrad3)
self.tetrad2 = QtWidgets.QFrame(MezcalPicker)
self.tetrad2.setStyleSheet("background-color: rgb(204, 0, 204);")
self.tetrad2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.tetrad2.setFrameShadow(QtWidgets.QFrame.Raised)
self.tetrad2.setObjectName("tetrad2")
self.gridLayout_4 = QtWidgets.QGridLayout(self.tetrad2)
self.gridLayout_4.setObjectName("gridLayout_4")
self.push_tetrad2 = QtWidgets.QPushButton(self.tetrad2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_tetrad2.sizePolicy().hasHeightForWidth())
self.push_tetrad2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_tetrad2.setFont(font)
self.push_tetrad2.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_tetrad2.setAutoFillBackground(False)
self.push_tetrad2.setDefault(False)
self.push_tetrad2.setFlat(True)
self.push_tetrad2.setObjectName("push_tetrad2")
self.gridLayout_4.addWidget(self.push_tetrad2, 0, 0, 1, 1)
self.horizontalLayout.addWidget(self.tetrad2)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.rum_color = QtWidgets.QFrame(MezcalPicker)
self.rum_color.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y0:0, x2:0, y2:1, stop:0 rgba(255, 0, 0, 255), stop:0.166 rgba(255, 255, 0, 255), stop:0.333 rgba(0, 255, 0, 255), stop:0.5 rgba(0, 255, 255, 255), stop:0.666 rgba(0, 0, 255, 255), stop:0.833 rgba(255, 0, 255, 255), stop:1 rgba(255, 0, 0, 255));")
self.rum_color.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.rum_color.setFrameShadow(QtWidgets.QFrame.Raised)
self.rum_color.setObjectName("rum_color")
self.gridLayout_13 = QtWidgets.QGridLayout(self.rum_color)
self.gridLayout_13.setObjectName("gridLayout_13")
self.push_rum_color = QtWidgets.QPushButton(self.rum_color)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_rum_color.sizePolicy().hasHeightForWidth())
self.push_rum_color.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(88)
self.push_rum_color.setFont(font)
self.push_rum_color.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.push_rum_color.setAutoFillBackground(False)
self.push_rum_color.setStyleSheet("background-color: rgb(0, 102, 204);")
self.push_rum_color.setCheckable(False)
self.push_rum_color.setDefault(True)
self.push_rum_color.setFlat(False)
self.push_rum_color.setObjectName("push_rum_color")
self.gridLayout_13.addWidget(self.push_rum_color, 0, 0, 1, 1)
self.verticalLayout_3.addWidget(self.rum_color)
self.gridLayout_2.addLayout(self.verticalLayout_3, 0, 1, 1, 1)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.tetrad1 = QtWidgets.QFrame(MezcalPicker)
self.tetrad1.setStyleSheet("background-color: rgb(0, 102, 204);")
self.tetrad1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.tetrad1.setFrameShadow(QtWidgets.QFrame.Raised)
self.tetrad1.setProperty("tetrad_bg", QtGui.QColor(0, 0, 255))
self.tetrad1.setObjectName("tetrad1")
self.gridLayout_3 = QtWidgets.QGridLayout(self.tetrad1)
self.gridLayout_3.setObjectName("gridLayout_3")
self.push_tetrad1 = QtWidgets.QPushButton(self.tetrad1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_tetrad1.sizePolicy().hasHeightForWidth())
self.push_tetrad1.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_tetrad1.setFont(font)
self.push_tetrad1.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_tetrad1.setAutoFillBackground(False)
self.push_tetrad1.setDefault(False)
self.push_tetrad1.setFlat(True)
self.push_tetrad1.setObjectName("push_tetrad1")
self.gridLayout_3.addWidget(self.push_tetrad1, 0, 0, 1, 1)
self.verticalLayout_2.addWidget(self.tetrad1)
self.triad3 = QtWidgets.QFrame(MezcalPicker)
self.triad3.setStyleSheet("background-color: rgb(102, 204, 0);")
self.triad3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.triad3.setFrameShadow(QtWidgets.QFrame.Raised)
self.triad3.setObjectName("triad3")
self.gridLayout_9 = QtWidgets.QGridLayout(self.triad3)
self.gridLayout_9.setObjectName("gridLayout_9")
self.push_triad3 = QtWidgets.QPushButton(self.triad3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_triad3.sizePolicy().hasHeightForWidth())
self.push_triad3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_triad3.setFont(font)
self.push_triad3.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_triad3.setAutoFillBackground(False)
self.push_triad3.setDefault(False)
self.push_triad3.setFlat(True)
self.push_triad3.setObjectName("push_triad3")
self.gridLayout_9.addWidget(self.push_triad3, 0, 0, 1, 1)
self.verticalLayout_2.addWidget(self.triad3)
self.triad2 = QtWidgets.QFrame(MezcalPicker)
self.triad2.setStyleSheet("background-color: rgb(204, 0, 102);")
self.triad2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.triad2.setFrameShadow(QtWidgets.QFrame.Raised)
self.triad2.setObjectName("triad2")
self.gridLayout_8 = QtWidgets.QGridLayout(self.triad2)
self.gridLayout_8.setObjectName("gridLayout_8")
self.push_triad2 = QtWidgets.QPushButton(self.triad2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_triad2.sizePolicy().hasHeightForWidth())
self.push_triad2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_triad2.setFont(font)
self.push_triad2.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_triad2.setAutoFillBackground(False)
self.push_triad2.setDefault(False)
self.push_triad2.setFlat(True)
self.push_triad2.setObjectName("push_triad2")
self.gridLayout_8.addWidget(self.push_triad2, 0, 0, 1, 1)
self.verticalLayout_2.addWidget(self.triad2)
self.triad1 = QtWidgets.QFrame(MezcalPicker)
self.triad1.setStyleSheet("background-color: rgb(0, 102, 204);")
self.triad1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.triad1.setFrameShadow(QtWidgets.QFrame.Raised)
self.triad1.setObjectName("triad1")
self.gridLayout_7 = QtWidgets.QGridLayout(self.triad1)
self.gridLayout_7.setObjectName("gridLayout_7")
self.push_triad1 = QtWidgets.QPushButton(self.triad1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_triad1.sizePolicy().hasHeightForWidth())
self.push_triad1.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_triad1.setFont(font)
self.push_triad1.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_triad1.setAutoFillBackground(False)
self.push_triad1.setDefault(False)
self.push_triad1.setFlat(True)
self.push_triad1.setObjectName("push_triad1")
self.gridLayout_7.addWidget(self.push_triad1, 0, 0, 1, 1)
self.verticalLayout_2.addWidget(self.triad1)
self.gridLayout_2.addLayout(self.verticalLayout_2, 0, 2, 1, 1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.analogous3 = QtWidgets.QFrame(MezcalPicker)
self.analogous3.setStyleSheet("background-color: rgb(102, 204, 0);")
self.analogous3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.analogous3.setFrameShadow(QtWidgets.QFrame.Raised)
self.analogous3.setObjectName("analogous3")
self.gridLayout_20 = QtWidgets.QGridLayout(self.analogous3)
self.gridLayout_20.setObjectName("gridLayout_20")
self.push_analogous3 = QtWidgets.QPushButton(self.analogous3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_analogous3.sizePolicy().hasHeightForWidth())
self.push_analogous3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_analogous3.setFont(font)
self.push_analogous3.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_analogous3.setAutoFillBackground(False)
self.push_analogous3.setDefault(False)
self.push_analogous3.setFlat(True)
self.push_analogous3.setObjectName("push_analogous3")
self.gridLayout_20.addWidget(self.push_analogous3, 0, 0, 1, 1)
self.gridLayout.addWidget(self.analogous3, 2, 2, 1, 1)
self.scompliment2 = QtWidgets.QFrame(MezcalPicker)
self.scompliment2.setStyleSheet("background-color: rgb(204, 20, 0);")
self.scompliment2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.scompliment2.setFrameShadow(QtWidgets.QFrame.Raised)
self.scompliment2.setObjectName("scompliment2")
self.gridLayout_18 = QtWidgets.QGridLayout(self.scompliment2)
self.gridLayout_18.setObjectName("gridLayout_18")
self.push_scompliment2 = QtWidgets.QPushButton(self.scompliment2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_scompliment2.sizePolicy().hasHeightForWidth())
self.push_scompliment2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_scompliment2.setFont(font)
self.push_scompliment2.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_scompliment2.setAutoFillBackground(False)
self.push_scompliment2.setDefault(False)
self.push_scompliment2.setFlat(True)
self.push_scompliment2.setObjectName("push_scompliment2")
self.gridLayout_18.addWidget(self.push_scompliment2, 0, 0, 1, 1)
self.gridLayout.addWidget(self.scompliment2, 0, 0, 1, 1)
self.scompliment1 = QtWidgets.QFrame(MezcalPicker)
self.scompliment1.setStyleSheet("background-color: rgb(0, 102, 204);")
self.scompliment1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.scompliment1.setFrameShadow(QtWidgets.QFrame.Raised)
self.scompliment1.setObjectName("scompliment1")
self.gridLayout_16 = QtWidgets.QGridLayout(self.scompliment1)
self.gridLayout_16.setObjectName("gridLayout_16")
self.push_scompliment1 = QtWidgets.QPushButton(self.scompliment1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_scompliment1.sizePolicy().hasHeightForWidth())
self.push_scompliment1.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_scompliment1.setFont(font)
self.push_scompliment1.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_scompliment1.setAutoFillBackground(False)
self.push_scompliment1.setDefault(False)
self.push_scompliment1.setFlat(True)
self.push_scompliment1.setObjectName("push_scompliment1")
self.gridLayout_16.addWidget(self.push_scompliment1, 0, 0, 1, 1)
self.gridLayout.addWidget(self.scompliment1, 0, 1, 1, 1)
self.scompliment3 = QtWidgets.QFrame(MezcalPicker)
self.scompliment3.setStyleSheet("background-color: rgb(204, 184, 0);")
self.scompliment3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.scompliment3.setFrameShadow(QtWidgets.QFrame.Raised)
self.scompliment3.setObjectName("scompliment3")
self.gridLayout_21 = QtWidgets.QGridLayout(self.scompliment3)
self.gridLayout_21.setObjectName("gridLayout_21")
self.push_scompliment3 = QtWidgets.QPushButton(self.scompliment3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_scompliment3.sizePolicy().hasHeightForWidth())
self.push_scompliment3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_scompliment3.setFont(font)
self.push_scompliment3.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_scompliment3.setAutoFillBackground(False)
self.push_scompliment3.setDefault(False)
self.push_scompliment3.setFlat(True)
self.push_scompliment3.setObjectName("push_scompliment3")
self.gridLayout_21.addWidget(self.push_scompliment3, 0, 0, 1, 1)
self.gridLayout.addWidget(self.scompliment3, 0, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)
self.compliment2 = QtWidgets.QFrame(MezcalPicker)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.compliment2.sizePolicy().hasHeightForWidth())
self.compliment2.setSizePolicy(sizePolicy)
self.compliment2.setStyleSheet("background-color: rgb(204, 102, 0);")
self.compliment2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.compliment2.setFrameShadow(QtWidgets.QFrame.Raised)
self.compliment2.setObjectName("compliment2")
self.gridLayout_15 = QtWidgets.QGridLayout(self.compliment2)
self.gridLayout_15.setObjectName("gridLayout_15")
self.push_compliment2 = QtWidgets.QPushButton(self.compliment2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_compliment2.sizePolicy().hasHeightForWidth())
self.push_compliment2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_compliment2.setFont(font)
self.push_compliment2.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_compliment2.setAutoFillBackground(False)
self.push_compliment2.setDefault(False)
self.push_compliment2.setFlat(True)
self.push_compliment2.setObjectName("push_compliment2")
self.gridLayout_15.addWidget(self.push_compliment2, 0, 0, 1, 1)
self.gridLayout.addWidget(self.compliment2, 1, 1, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 1, 2, 1, 1)
self.analogous2 = QtWidgets.QFrame(MezcalPicker)
self.analogous2.setStyleSheet("background-color: rgb(204, 0, 102);")
self.analogous2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.analogous2.setFrameShadow(QtWidgets.QFrame.Raised)
self.analogous2.setObjectName("analogous2")
self.gridLayout_19 = QtWidgets.QGridLayout(self.analogous2)
self.gridLayout_19.setObjectName("gridLayout_19")
self.push_analogous2 = QtWidgets.QPushButton(self.analogous2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_analogous2.sizePolicy().hasHeightForWidth())
self.push_analogous2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_analogous2.setFont(font)
self.push_analogous2.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_analogous2.setAutoFillBackground(False)
self.push_analogous2.setDefault(False)
self.push_analogous2.setFlat(True)
self.push_analogous2.setObjectName("push_analogous2")
self.gridLayout_19.addWidget(self.push_analogous2, 0, 0, 1, 1)
self.gridLayout.addWidget(self.analogous2, 2, 0, 1, 1)
self.analogous1 = QtWidgets.QFrame(MezcalPicker)
self.analogous1.setStyleSheet("background-color: rgb(0, 102, 204);")
self.analogous1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.analogous1.setFrameShadow(QtWidgets.QFrame.Raised)
self.analogous1.setObjectName("analogous1")
self.gridLayout_14 = QtWidgets.QGridLayout(self.analogous1)
self.gridLayout_14.setObjectName("gridLayout_14")
self.push_analogous1 = QtWidgets.QPushButton(self.analogous1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.push_analogous1.sizePolicy().hasHeightForWidth())
self.push_analogous1.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Noto Emoji")
font.setPointSize(24)
self.push_analogous1.setFont(font)
self.push_analogous1.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.push_analogous1.setAutoFillBackground(False)
self.push_analogous1.setDefault(False)
self.push_analogous1.setFlat(True)
self.push_analogous1.setObjectName("push_analogous1")
self.gridLayout_14.addWidget(self.push_analogous1, 0, 0, 1, 1)
self.gridLayout.addWidget(self.analogous1, 2, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 1, 0, 1, 3)
self.retranslateUi(MezcalPicker)
self.push_scompliment1.clicked.connect(MezcalPicker.set_scompliments1)
self.push_analogous3.clicked.connect(MezcalPicker.set_analogous3)
self.push_analogous1.clicked.connect(MezcalPicker.set_analogous1)
self.push_analogous2.clicked.connect(MezcalPicker.set_analogous2)
self.push_compliment2.clicked.connect(MezcalPicker.set_compliments2)
self.push_monochrome2.clicked.connect(MezcalPicker.set_monochrome2)
self.push_monochrome3.clicked.connect(MezcalPicker.set_monochrome3)
self.push_scompliment2.clicked.connect(MezcalPicker.set_scompliments2)
self.push_scompliment3.clicked.connect(MezcalPicker.set_scompliments3)
self.push_tetrad1.clicked.connect(MezcalPicker.set_tetrad1)
self.push_tetrad2.clicked.connect(MezcalPicker.set_tetrad2)
self.push_tetrad3.clicked.connect(MezcalPicker.set_tetrad3)
self.push_triad1.clicked.connect(MezcalPicker.set_triad1)
self.push_triad2.clicked.connect(MezcalPicker.set_triad2)
self.push_triad3.clicked.connect(MezcalPicker.set_tetrad3)
self.push_monochrome1.clicked.connect(MezcalPicker.set_monochrome1)
self.push_tetrad4.clicked.connect(MezcalPicker.set_tetrad4)
self.push_rum_color.clicked.connect(MezcalPicker.pick_theme)
MezcalPicker.theme_color['QColor'].connect(MezcalPicker.set_theme)
MezcalPicker.sig_analogous1['QString'].connect(self.analogous1.setStyleSheet)
MezcalPicker.sig_analogous2['QString'].connect(self.analogous2.setStyleSheet)
MezcalPicker.sig_analogous3['QString'].connect(self.analogous3.setStyleSheet)
MezcalPicker.sig_compliments1['QString'].connect(self.scompliment1.setStyleSheet)
MezcalPicker.sig_compliments2['QString'].connect(self.compliment2.setStyleSheet)
MezcalPicker.sig_mono1['QString'].connect(self.monochrome1.setStyleSheet)
MezcalPicker.sig_mono2['QString'].connect(self.monochrome2.setStyleSheet)
MezcalPicker.sig_mono3['QString'].connect(self.monochrome3.setStyleSheet)
MezcalPicker.sig_scompliments1['QString'].connect(self.scompliment1.setStyleSheet)
MezcalPicker.sig_scompliments2['QString'].connect(self.scompliment2.setStyleSheet)
MezcalPicker.sig_scompliments3['QString'].connect(self.scompliment3.setStyleSheet)
MezcalPicker.sig_tetrad1['QString'].connect(self.tetrad1.setStyleSheet)
MezcalPicker.sig_tetrad2['QString'].connect(self.tetrad2.setStyleSheet)
MezcalPicker.sig_tetrad3['QString'].connect(self.tetrad3.setStyleSheet)
MezcalPicker.sig_tetrad4['QString'].connect(self.tetrad4.setStyleSheet)
MezcalPicker.sig_triad1['QString'].connect(self.triad1.setStyleSheet)
MezcalPicker.sig_triad2['QString'].connect(self.triad2.setStyleSheet)
MezcalPicker.sig_triad3['QString'].connect(self.triad3.setStyleSheet)
QtCore.QMetaObject.connectSlotsByName(MezcalPicker)
MezcalPicker.setTabOrder(self.push_analogous1, self.push_analogous2)
MezcalPicker.setTabOrder(self.push_analogous2, self.push_analogous3)
MezcalPicker.setTabOrder(self.push_analogous3, self.push_scompliment1)
MezcalPicker.setTabOrder(self.push_scompliment1, self.push_compliment2)
MezcalPicker.setTabOrder(self.push_compliment2, self.push_scompliment2)
MezcalPicker.setTabOrder(self.push_scompliment2, self.push_scompliment3)
MezcalPicker.setTabOrder(self.push_scompliment3, self.push_triad1)
MezcalPicker.setTabOrder(self.push_triad1, self.push_triad2)
MezcalPicker.setTabOrder(self.push_triad2, self.push_triad3)
MezcalPicker.setTabOrder(self.push_triad3, self.push_tetrad1)
MezcalPicker.setTabOrder(self.push_tetrad1, self.push_tetrad2)
MezcalPicker.setTabOrder(self.push_tetrad2, self.push_tetrad3)
MezcalPicker.setTabOrder(self.push_tetrad3, self.push_tetrad4)
MezcalPicker.setTabOrder(self.push_tetrad4, self.push_monochrome1)
MezcalPicker.setTabOrder(self.push_monochrome1, self.push_monochrome2)
MezcalPicker.setTabOrder(self.push_monochrome2, self.push_monochrome3)
MezcalPicker.setTabOrder(self.push_monochrome3, self.push_rum_color)
def retranslateUi(self, MezcalPicker):
_translate = QtCore.QCoreApplication.translate
MezcalPicker.setWindowTitle(_translate("MezcalPicker", "Mezcal Color Scheme Picker"))
self.push_tetrad4.setText(_translate("MezcalPicker", "🐊"))
self.push_monochrome1.setText(_translate("MezcalPicker", "🐣"))
self.push_monochrome2.setText(_translate("MezcalPicker", "🐤"))
self.push_monochrome3.setText(_translate("MezcalPicker", "🐔"))
self.push_tetrad3.setText(_translate("MezcalPicker", "🐙"))
self.push_tetrad2.setText(_translate("MezcalPicker", "🐬"))
self.push_rum_color.setText(_translate("MezcalPicker", "🌈"))
self.tetrad1.setProperty("bg_style", _translate("MezcalPicker", "asdfsadfdsf"))
self.push_tetrad1.setText(_translate("MezcalPicker", "🐋"))
self.push_triad3.setText(_translate("MezcalPicker", "🐂"))
self.push_triad2.setText(_translate("MezcalPicker", "🐇"))
self.push_triad1.setText(_translate("MezcalPicker", "🐀"))
self.push_analogous3.setText(_translate("MezcalPicker", "🌱"))
self.push_scompliment2.setText(_translate("MezcalPicker", "🌛"))
self.push_scompliment1.setText(_translate("MezcalPicker", "🌚"))
self.push_scompliment3.setText(_translate("MezcalPicker", "🌜"))
self.push_compliment2.setText(_translate("MezcalPicker", "🌝"))
self.push_analogous2.setText(_translate("MezcalPicker", "⛈"))
self.push_analogous1.setText(_translate("MezcalPicker", "🌞"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MezcalPicker = QtWidgets.QWidget()
ui = Ui_MezcalPicker()
ui.setupUi(MezcalPicker)
MezcalPicker.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python
##
#
# hiroshima.py
# This is the main front-end that pulls all network attacks into one interface. Enjoy.
# Samuel Steele (cryptoc1)
#
##
import networks, sys
_usage = "hiroshima help: \
\n\tAttackable Networks: \
\n\t\t+ Twitter \
\n\t\t+ Instagram \
\n\t\t+ AskFM \
\n\tAttack Types: \
\n\t\tTwitter: \
\n\t\t\t+ Favorite \
\n\t\t\t+ Reply \
\n\t\t\t+ Un-favorite \
\n\t\tInstagram: \
\n\t\t\t+ Like \
\n\t\t\t+ Un-like \
\n\t\tAskFM: \
\n\t\t\t+ Ask Question \
\n\tCommands: \
\n\t\tbegin :: enters the main-loop to attack \
\n\t\texit :: exits the program \
\n\t\thelp|? :: prints this help dialog"
def prologue():
print "For help, enter 'help', or '?'. To begin, enter 'begin', or 'exit' to exit."
cmd = raw_input("> ").lower()
if cmd == "help" or cmd == "?":
print _usage
prologue()
elif cmd == "begin":
main()
elif cmd == "exit":
sys.exit()
else:
prologue()
def main():
print "Enter a social network to attack"
network = raw_input("> ").lower()
if network == "instagram":
instagram_attack()
elif network == "twitter":
twitter_attack()
elif network == "askfm":
askfm_attack()
else:
main()
def instagram_attack():
insta = networks.Instagram()
if not insta.AUTH_IN_PREFS:
print "In order for any actions to be preformed, you need to authorize hiroshima to use your Instagram account. Proceed? (Y/n)"
prompt = raw_input("> ").lower()
else:
prompt = "y"
if prompt == "y":
if insta.login():
print "Enter the username of your victim"
username = raw_input("> ")
print "Searching..."
search = insta.search_users(username)
print "I found this: "
print insta.format_user_info(search)
print "Is this the intended victim? (Y/n)"
prompt = raw_input("> ").lower()
if prompt == "y":
if insta.set_victim(search):
print "Enter the attack type (" + insta.get_attack_types() + ")"
attack_type = raw_input("> ").lower()
if attack_type == "like":
print "Enter the number of photos to be liked (enter 'all' to like all)"
insta.like_attack(raw_input("> "))
print "Like attack complete."
prologue()
elif attack_type == "unlike":
print "Enter the number of photos to be liked (enter 'all' to unlike all)"
insta.unlike_attack(raw_input("> "))
print "Unlike attack complete."
prologue()
else:
print "Attack type not entered, starting attack over..."
instagram_attack()
else:
print "There was an error setting the victim."
prologue()
elif prompt == "n":
print "Please check to make sure you have the correct username. (The attack will now start over)."
instagram_attack()
else:
print "Unrecognized characters(s)"
prologue()
else:
print "There was an error logging in."
prologue()
elif prompt == "n":
print "Okay..."
prologue()
else:
print "Unrecognized character(s)."
prologue()
def twitter_attack():
twit = networks.Twitter()
if not twit.AUTH_IN_PREFS:
print "In order for any actions to be preformed, you need to authorize hiroshima to use your Twitter account. Proceed? (Y/n)"
prompt = raw_input("> ").lower()
else:
prompt = "y"
if prompt == "y":
if twit.login():
print "Enter the username of your victim."
username = raw_input("> ")
print "Searching..."
search = twit.search_users(username)
print "I found this: "
print twit.format_user_info(search)
print "Is this the intended victim? (Y/n)"
prompt = raw_input("> ").lower()
if prompt == "y":
if twit.set_victim(search):
print "Enter attack type (" + twit.get_attack_types() + ")"
attack_type = raw_input("> ").lower()
if attack_type == "favorite":
print "Enter the number of tweets to favorite."
twit.fav_attack(raw_input("> "))
print "Favorite attack complete."
prologue()
elif attack_type == "reply":
print "Enter tweet text (remember the 140 character limit)."
text = raw_input("> ")
if len(text) > 140:
print "Text length excedes (140) character limit, starting over."
twitter_attack()
else:
print "Enter the number of tweets to reply to."
twit.reply_attack(text, raw_input("> "))
print "Reply attack complete."
prologue()
elif attack_type == "retweet":
print "Enter the number of tweets to be retweeted."
twit.rewtweet_attack(raw_input("> "))
print "Retweet attack complete."
prologue()
else:
print "Attack type not entered, starting attack over..."
twitter_attack()
else:
print "There was an error setting the victim."
prologue()
elif prompt == "n":
print "Please check to make sure you have the correct username. (The attack will now start over)."
twitter_attack()
else:
print "Unrecognized character(s)"
prologue()
else:
print "There was an error logging in."
prologue()
elif prompt == "n":
print "Okay..."
prologue()
else:
print "Unrecognized character(s)"
prologue()
def askfm_attack():
print "Enter the username"
username = raw_input("> ")
ask = networks.AskFM(username)
print "Enter the question to be asked"
query = raw_input("> ")
print "Enter the number of times the question should be asked"
count = raw_input("> ")
print "About to ask @" + username + " \"" + query + "\", " + count + " times. Proceed? (Y/n)"
prompt = raw_input("> ").lower()
if prompt == "y":
ask.ask_question(query, int(count))
elif prompt == "n":
print "Aborting attack."
else:
print "Unrecognized character(s), restarting attack."
askfm_attack()
print "ask.fm attack complete."
prologue()
if __name__ == "__main__":
print "Hello, and welcome to Hiroshima: A Social Spammer."
prologue()
|
"""
Helpers for accessing C++ STL containers in GDB.
"""
# @lint-avoid-python-3-compatibility-imports
import gdb
import re
from gdbutils import *
from hashes import hash_of
#------------------------------------------------------------------------------
# STL accessors.
#
# These are only designed to work for gcc-4.8.1.
def atomic_get(atomic):
return atomic['_M_b']['_M_p']
def vector_at(vec, idx):
vec = vec['_M_impl']
if idx >= vec['_M_finish'] - vec['_M_start']:
return None
else:
return vec['_M_start'][idx]
def unordered_map_at(umap, idx):
h = umap['_M_h']
bucket = h['_M_buckets'][hash_of(idx) % h['_M_bucket_count']]
if bucket == 0x0:
return None
node = bucket['_M_nxt']
value_type = T(str(umap.type) + '::value_type').pointer()
while node != 0x0:
# Hashtable nodes contain only a pointer to the next node in the
# bucket, but are always immediately followed by the value pair.
value = (node + 1).cast(value_type)
if idx == value['first']:
return value['second']
node = node['_M_nxt']
return None
#------------------------------------------------------------------------------
# HHVM accessors.
def compact_ptr_get(csp):
value_type = T(str(csp.type).split('<', 1)[1][:-1])
return (csp['m_data'] & 0xffffffffffff).cast(value_type.pointer())
def fixed_vector_at(fv, idx):
return compact_ptr_get(fv['m_sp'])[idx]
def thm_at(thm, key):
table = atomic_get(thm['m_table'])
capac = table['capac']
idx = (hash_of(key) & (capac - 1)).cast(T('size_t'))
while True:
entry = table['entries'][idx]
probe = atomic_get(entry['first'])
if probe == key:
return entry['second']
if probe == 0:
return None
idx += 1
if idx == capac:
idx = 0
#------------------------------------------------------------------------------
# Helpers.
def template_type(t):
return str(t).split('<')[0]
#------------------------------------------------------------------------------
# `idx' command.
class IdxCommand(gdb.Command):
"""Index into an arbitrary container.
Usage: idx <container> <index>
GDB `print` is called on the address of the value, and then the value itself is
printed.
If `container' is of a recognized type (e.g., native arrays, std::vector),
`idx' will index according to operator[]. Otherwise, it will attempt to treat
`container' as an object with data member `index'.
"""
def __init__(self):
super(IdxCommand, self).__init__('idx', gdb.COMMAND_DATA)
self.accessors = {
'std::vector': vector_at,
'std::unordered_map': unordered_map_at,
'HPHP::FixedVector': fixed_vector_at,
'HPHP::TreadHashMap': thm_at,
}
def invoke(self, args, from_tty):
argv = parse_argv(args)
if len(argv) != 2:
print('Usage: idx <container> <index>')
return
container = argv[0]
idx = argv[1]
value = None
container_type = template_type(argv[0].type)
true_type = template_type(argv[0].type.strip_typedefs())
if container_type in self.accessors:
value = self.accessors[container_type](container, idx)
elif true_type in self.accessors:
value = self.accessors[true_type](container, idx)
else:
try:
value = container[idx]
except:
print('idx: Unrecognized container.')
return
if value is None:
print('idx: Element not found.')
return
gdb.execute('print (%s)%s' % (
str(value.type.pointer()), value.address))
print(vstr(value))
IdxCommand()
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (C) Canux CHENG <canuxcheng@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Check that HADR is available."""
import logging
from monitoring.nagios.plugin import NagiosPluginSSH
import hadr
logger = logging.getLogger('plugin.hadr')
# define new args
class PluginHadr(NagiosPluginSSH):
"""Custom plugin definition."""
def define_plugin_arguments(self):
super(PluginHadr, self).define_plugin_arguments()
self.required_args.add_argument('-d', '--db2user',
dest="db2_user",
help="Db2 user use, an string",
required=True)
self.required_args.add_argument('-w', '--warn',
type=int,
dest='warning',
default=0,
help='Warning threshold for log_gap.',
required=False)
self.required_args.add_argument('-c', '--crit',
type=int,
dest='critical',
default=0,
help='Critical threshold for log_gap.',
required=False)
# Init plugin
plugin = PluginHadr(version=hadr.__version__,
description="check HADR statut")
plugin.shortoutput = "HADR is Connected"
# Final status exit for the plugin
status = None
cmd = """echo 'db2 \"SELECT HADR_ROLE, \
HADR_LOCAL_HOST, \
HADR_CONNECT_STATUS, \
HADR_REMOTE_HOST, \
HADR_LOG_GAP, \
HADR_STATE \
FROM SYSIBMADM.SNAPHADR\"' \
| sudo -u {0} -i \
| sed -n '/--/{{n; p;}}' \
| sed 's/[ ][ ]*/ /g'""".format(plugin.options.db2_user)
logger.debug("cmd : {0}".format(cmd))
try:
command = plugin.ssh.execute(cmd)
except plugin.ssh.SSHCommandTimeout:
plugin.unknown("Plugin execution timed out in {} secs !".format(
plugin.options.timeout))
output = command.output
errors = command.errors
if errors:
plugin.unknown("Errors found:\n{}".format("\n".join(errors)))
# Travel output cmd by line
cmpt = 0
if not any(output):
plugin.unknown("Output is empty !")
for line in output:
logger.debug(line)
status = plugin.ok
role, name, state, remote, log_gap, hadr_state = line.split()
plugin.longoutput.append("Active Host: {0} "
"Role: {1}{2}"
"Remote Host: {3}{2}"
"State: {4}{2}"
"Log_Gap: {5}{2}"
"HADR_State: {6}".format(name,
role,
'\n',
remote,
state,
log_gap,
hadr_state))
plugin.perfdata.append(
'log_gap[{0}]={1}b;{2.warning};{2.critical};0;'.format(cmpt,
log_gap,
plugin.options))
cmpt += 1
if state != "CONNECTED":
plugin.shortoutput = "HADR is {}".format(state)
plugin.critical(plugin.output())
if hadr_state != "PEER":
plugin.shortoutput = "Hadr state is {}".format(hadr_state)
plugin.warning(plugin.output())
# Log_gap
if plugin.options.warning:
if int(log_gap) >= plugin.options.warning:
status = plugin.warning
plugin.shortoutput = "Log_gap : {} " \
"(threshold warn {})".format(
log_gap, plugin.options.warning)
if plugin.options.critical:
if int(log_gap) >= plugin.options.critical:
status = plugin.critical
plugin.shortoutput = "Log_gap : {} " \
"(threshold crit {})".format(
log_gap, plugin.options.critical)
# Return status with message to Nagios
logger.debug("Return status and exit to Nagios.")
if status:
status(plugin.output())
else:
plugin.unknown('Unexpected error during plugin execution, please '
'investigate with debug mode on.')
|
from functools import lru_cache
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from sanic_routing import BaseRouter # type: ignore
from sanic_routing.exceptions import NoMethod # type: ignore
from sanic_routing.exceptions import (
NotFound as RoutingNotFound, # type: ignore
)
from sanic_routing.route import Route # type: ignore
from sanic.constants import HTTP_METHODS
from sanic.exceptions import MethodNotSupported, NotFound, SanicException
from sanic.models.handler_types import RouteHandler
ROUTER_CACHE_SIZE = 1024
ALLOWED_LABELS = ("__file_uri__",)
class Router(BaseRouter):
"""
The router implementation responsible for routing a :class:`Request` object
to the appropriate handler.
"""
DEFAULT_METHOD = "GET"
ALLOWED_METHODS = HTTP_METHODS
def _get(
self, path: str, method: str, host: Optional[str]
) -> Tuple[Route, RouteHandler, Dict[str, Any]]:
try:
return self.resolve(
path=path,
method=method,
extra={"host": host},
)
except RoutingNotFound as e:
raise NotFound("Requested URL {} not found".format(e.path))
except NoMethod as e:
raise MethodNotSupported(
"Method {} not allowed for URL {}".format(method, path),
method=method,
allowed_methods=e.allowed_methods,
)
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def get( # type: ignore
self, path: str, method: str, host: Optional[str]
) -> Tuple[Route, RouteHandler, Dict[str, Any]]:
"""
Retrieve a `Route` object containg the details about how to handle
a response for a given request
:param request: the incoming request object
:type request: Request
:return: details needed for handling the request and returning the
correct response
:rtype: Tuple[ Route, RouteHandler, Dict[str, Any]]
"""
return self._get(path, method, host)
def add( # type: ignore
self,
uri: str,
methods: Iterable[str],
handler: RouteHandler,
host: Optional[Union[str, Iterable[str]]] = None,
strict_slashes: bool = False,
stream: bool = False,
ignore_body: bool = False,
version: Union[str, float, int] = None,
name: Optional[str] = None,
unquote: bool = False,
static: bool = False,
) -> Union[Route, List[Route]]:
"""
Add a handler to the router
:param uri: the path of the route
:type uri: str
:param methods: the types of HTTP methods that should be attached,
example: ``["GET", "POST", "OPTIONS"]``
:type methods: Iterable[str]
:param handler: the sync or async function to be executed
:type handler: RouteHandler
:param host: host that the route should be on, defaults to None
:type host: Optional[str], optional
:param strict_slashes: whether to apply strict slashes, defaults
to False
:type strict_slashes: bool, optional
:param stream: whether to stream the response, defaults to False
:type stream: bool, optional
:param ignore_body: whether the incoming request body should be read,
defaults to False
:type ignore_body: bool, optional
:param version: a version modifier for the uri, defaults to None
:type version: Union[str, float, int], optional
:param name: an identifying name of the route, defaults to None
:type name: Optional[str], optional
:return: the route object
:rtype: Route
"""
if version is not None:
version = str(version).strip("/").lstrip("v")
uri = "/".join([f"/v{version}", uri.lstrip("/")])
params = dict(
path=uri,
handler=handler,
methods=methods,
name=name,
strict=strict_slashes,
unquote=unquote,
)
if isinstance(host, str):
hosts = [host]
else:
hosts = host or [None] # type: ignore
routes = []
for host in hosts:
if host:
params.update({"requirements": {"host": host}})
route = super().add(**params) # type: ignore
route.ctx.ignore_body = ignore_body
route.ctx.stream = stream
route.ctx.hosts = hosts
route.ctx.static = static
routes.append(route)
if len(routes) == 1:
return routes[0]
return routes
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def find_route_by_view_name(self, view_name, name=None):
"""
Find a route in the router based on the specified view name.
:param view_name: string of view name to search by
:param kwargs: additional params, usually for static files
:return: tuple containing (uri, Route)
"""
if not view_name:
return None
route = self.name_index.get(view_name)
if not route:
full_name = self.ctx.app._generate_name(view_name)
route = self.name_index.get(full_name)
if not route:
return None
return route
@property
def routes_all(self):
return self.routes
@property
def routes_static(self):
return self.static_routes
@property
def routes_dynamic(self):
return self.dynamic_routes
@property
def routes_regex(self):
return self.regex_routes
def finalize(self, *args, **kwargs):
super().finalize(*args, **kwargs)
for route in self.dynamic_routes.values():
if any(
label.startswith("__") and label not in ALLOWED_LABELS
for label in route.labels
):
raise SanicException(
f"Invalid route: {route}. Parameter names cannot use '__'."
)
|
from django.urls import path
from . import views
urlpatterns = [
path('add/<str:codigo>/', views.pedido_add_item, name='pedido_add_item'),
path('add/atacado/<str:codigo>/<int:quantidade>/', views.pedido_add_item_atacado, name='pedido_add_item_atacado'),
path('aberto/', views.pedido_aberto, name='pedido_aberto'),
path('checkout/<pk>/', views.pedido_checkout, name='pedido_checkout'),
path('details/<pk>/', views.pedido_details, name='pedido_details'),
path('export/pdf/<pk>/', views.pedido_export_pdf, name='pedido_export_pdf'),
path('export/pdf/deliveryterm/<pk>/', views.pedido_delivery_term_pdf, name='pedido_export_delivery_term_pdf'),
path('export/pdf/completo/<pk>/', views.pedido_delivery_term_with_order_pdf, name='pedido_export_complete_pdf'),
path('list/', views.pedidos_list, name='pedidos_list'),
path('list/separacao/', views.pedidos_list_separacao, name='pedidos_list_separacao'),
path('list/separados/', views.pedidos_list_separados, name='pedidos_list_separados'),
path('separacao/<pk>/', views.pedido_separacao, name='pedido_separacao'),
]
|
from typing import Any, Dict
from django.db.models import Count
from django.db.models.functions import TruncDay
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.urls import reverse, reverse_lazy
from django.utils import timezone
from django.views import View
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, DeleteView
from analytics.models import ShortUrlVisit
from .models import ShortUrl
class ShortUrlCreateView(CreateView):
model = ShortUrl
fields = ["redirect_url"]
def get_success_url(self) -> str:
return reverse("short-url-detail", kwargs={"slug": self.object.slug})
class ShortUrlDeleteView(DeleteView):
model = ShortUrl
success_url = reverse_lazy("short-url-create")
class ShortUrlDetailView(DetailView):
model = ShortUrl
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
context = super().get_context_data(**kwargs)
short_url = context["shorturl"]
# Construct a full URL with scheme (HTTP/HTTPS), host, and URL slug
context["full_url"] = f"{ self.request.scheme }://{ self.request.get_host() }/{ short_url.slug}"
# Aggregate visit counts by day
# https://stackoverflow.com/a/41930880/1191545
analytics = (
short_url.visits.all()
.annotate(date=TruncDay("occurred"))
.values("date")
.annotate(visits_count=Count("id"))
.order_by("date")
)
# Add analytics to context data as list
# so it can be parsed to JSON in the template
context["analytics"] = list(analytics)
return context
class ShortUrlRedirectView(View):
def get(self, request: HttpRequest, slug: str) -> HttpResponse:
# Get desired short URL object
short_url = ShortUrl.objects.get(slug=slug)
# Get the current, timezone-aware datetime
now = timezone.now()
# Record a visit for this short URL
# using current datetime
ShortUrlVisit.objects.create(
short_url=short_url,
occurred=now,
)
return redirect(short_url.redirect_url)
|
#!/usr/bin/env python
import sys
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('--noan', action='store_true', help='Not to run make psql-analyze')
TOTAL_SIZE_SQL = """SELECT
pg_size_pretty(sum(size)) AS size
FROM (
SELECT
relname as "Table",
pg_total_relation_size(relid) as "size"
FROM pg_catalog.pg_statio_user_tables
WHERE schemaname='public'
) a
;""".replace('\"', '\\\"')
TABLE_SIZES_SQL = """SELECT
a.relname as "table",
pg_table_size(a.relid) as "size",
b.n_live_tup as "rows"
FROM pg_catalog.pg_statio_user_tables a
LEFT JOIN pg_stat_user_tables b ON (a.relid = b.relid)
WHERE
a.schemaname='public'
ORDER BY a.relname;
""".replace('\"', '\\\"')
TABLES_SQL = """SELECT
a.relname
FROM pg_catalog.pg_statio_user_tables a
WHERE
a.schemaname='public'
ORDER BY a.relname;
"""
COLUMN_NAMES_SQL = """SELECT a.attname
FROM pg_class As c
INNER JOIN pg_attribute As a ON c.oid = a.attrelid
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace
WHERE
c.relkind IN('r', 'v', 'm') AND
a.attnum > 0 AND
n.nspname = 'public' AND
c.relname = '{0}' AND
a.attisdropped = FALSE
ORDER BY a.attname;
"""
COLUMNS_SQL = """select
sum(pg_column_size(t.*)) as "all",
{0}
from {1} t;
""".replace('\"', '\\\"')
def print_column_sizes(tables):
for table in tables:
print("Column sizes of table " + table)
cmds = [
'docker-compose run --rm import-osm',
'/usr/src/app/psql.sh -t -A -F\",\" -P pager=off',
'-c \"' + COLUMN_NAMES_SQL.format(table).replace('\n', ' ').replace('\r',
'') + '\"'
]
# print " ".join(cmds)
output = subprocess.check_output(" ".join(cmds), shell=True)
columns = filter(lambda c: len(c) > 0,
map(lambda l: l.strip(), output.split('\n')))
# print columns
col_sql = ",\n".join(
map(lambda c: "sum(pg_column_size(\\\"" + c + "\\\")) as \\\"" + c + "\\\"",
columns))
# print COLUMNS_SQL.format(col_sql, table);
cmds = [
'docker-compose run --rm import-osm',
'/usr/src/app/psql.sh -F\",\" --no-align -P pager=off',
'-c \"' + COLUMNS_SQL.format(col_sql, table).replace('\n', ' ').replace(
'\r', '') + '\"'
]
# print " ".join(cmds)
col_csv = subprocess.check_output(" ".join(cmds), shell=True)
print(col_csv)
if __name__ == "__main__":
args = parser.parse_args()
try:
if not args.noan:
print("Running make psql-analyze")
subprocess.check_output("make psql-analyze", shell=True)
print("Total size of tables")
cmds = [
'docker-compose run --rm import-osm',
'/usr/src/app/psql.sh -F\",\" --no-align -P pager=off',
'-c \"' + TOTAL_SIZE_SQL.replace('\n', ' ').replace('\r', '') + '\"'
]
# print " ".join(cmds)
TOTAL_SIZE_CSV = subprocess.check_output(" ".join(cmds), shell=True)
print(TOTAL_SIZE_CSV)
print("\n")
print("Table sizes")
cmds = [
'docker-compose run --rm import-osm',
'/usr/src/app/psql.sh -F\",\" --no-align -P pager=off',
'-c \"' + TABLE_SIZES_SQL.replace('\n', ' ').replace('\r', '') + '\"'
]
# print " ".join(cmds)
TABLE_SIZES_CSV = subprocess.check_output(" ".join(cmds), shell=True)
print(TABLE_SIZES_CSV)
print("\n")
print("Column sizes")
cmds = [
'docker-compose run --rm import-osm',
'/usr/src/app/psql.sh -t -A -F\",\" -P pager=off',
'-c \"' + TABLES_SQL.replace('\n', ' ').replace('\r', '') + '\"'
]
# print " ".join(cmds)
output = subprocess.check_output(" ".join(cmds), shell=True)
tables = filter(lambda t: len(t) > 0,
map(lambda l: l.strip(), output.split('\n')))
print_column_sizes(tables);
# print tables
except subprocess.CalledProcessError as e:
print("Error:\n", e.output)
sys.exit(0)
|
from django.contrib import admin
from django.contrib.auth.hashers import make_password
from ..models.authorModel import Author
# DJANGO ADMIN PANEL
# Allows you to view pending request to action on
def pendingRequest(ModelAdmin, request, result):
for request in result:
admin = Author(displayName=request.displayName, username=request.username, password=make_password(request.password), host = request.host, github=request.github)
admin.url = (f'{request.host}author/{admin.uuid}')
admin.id = admin.url
admin.profileImage = "https://180dc.org/wp-content/uploads/2016/08/default-profile.png"
admin.save()
result.delete()
pendingRequest.short_description = "ACCEPT USER REQUEST"
# Admin pending request
class pendingRequestView(admin.ModelAdmin):
list_display = ['username','displayName', 'github', 'host']
ordering = ['username']
actions = [pendingRequest]
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# @Time : 2021/8/26 11:38
# @Author : NoWords
# @FileName: comment_view.py
from core.common.common_view import CommonAPIView
from ..serializers import CommentSerializer
from ..models import Comment
class CommentAPIView(CommonAPIView):
"""
评论管理
"""
model = Comment
serializer = CommentSerializer
add_insert_creator = True
update_insert_updater = False
query = [
{'filter_key': 'id', 'request_key': 'id'},
{'filter_key': 'content__contains', 'request_key': 'content'},
]
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
import sys
sys.path.append(str(Path().cwd().parents[1] / 'utils'))
import nnabla as nn
import numpy as np
from neu.comm import CommunicatorWrapper
from neu.tts.optimizer import Optimizer
from nnabla.ext_utils import get_extension_context
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.learning_rate_scheduler import ExponentialScheduler
from dataset import LJSpeechDataSource
from hparams import hparams as hp
from model.model import Discriminator, Generator
from train import HiFiGANTrainer
def run(args):
"""Runs the algorithm."""
Path(hp.output_path).mkdir(parents=True, exist_ok=True)
# setup nnabla context
ctx = get_extension_context(args.context)
nn.set_default_context(ctx)
hp.comm = CommunicatorWrapper(ctx)
if hp.comm.n_procs > 1 and hp.comm.rank == 0:
n_procs = hp.comm.n_procs
logger.info(f'Distributed training with {n_procs} processes.')
rng = np.random.RandomState(hp.seed)
# train data
train_loader = data_iterator(
LJSpeechDataSource('meta_train.csv', hp, shuffle=True, rng=rng),
batch_size=hp.batch_size, with_memory_cache=False
)
# valid data
valid_loader = data_iterator(
LJSpeechDataSource('meta_test.csv', hp, shuffle=False, rng=rng),
batch_size=hp.batch_size, with_memory_cache=False
)
dataloader = dict(train=train_loader, valid=valid_loader)
# build model
gen = Generator(hp)
dis = Discriminator(hp)
# setup optimizer
iter_interval = train_loader.size//hp.batch_size
g_optim = Optimizer(
lr_scheduler=ExponentialScheduler(
hp.alpha, gamma=hp.lr_decay, iter_interval=iter_interval),
name='AdamW', alpha=hp.alpha, beta1=hp.beta1, beta2=hp.beta2
)
d_optim = Optimizer(
lr_scheduler=ExponentialScheduler(
hp.alpha, gamma=hp.lr_decay, iter_interval=iter_interval),
name='AdamW', alpha=hp.alpha, beta1=hp.beta1, beta2=hp.beta2
)
HiFiGANTrainer(gen, dis, g_optim, d_optim, dataloader, hp).run()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--context', '-c', type=str, default='cudnn',
help="'cudnn' is highly recommended.")
parser.add_argument("--device-id", "-d", type=str, default='-1',
help='A list of device ids to use.\
This is only valid if you specify `-c cudnn`. \
Defaults to use all available GPUs.')
for key, value in hp.__dict__.items():
name = "--" + key
if type(value) == list:
nargs, t = '+', type(value[0])
else:
nargs, t = None, type(value)
parser.add_argument(name, type=t, nargs=nargs, default=value)
args = parser.parse_args()
for k, v in vars(args).items():
hp.__dict__[k] = v
# setup context for nnabla
if args.device_id != '-1':
os.environ["CUDA_VISIBLE_DEVICES"] = args.device_id
run(args)
|
#!/usr/bin/env python
# encoding: utf-8
"""
pilatus.py - connect to and control the pilatus100
NB: cam_server and EPICS GUI must be on for this to work.
If something looks wrong, debug by watching the output on the
cam_server and GUI windows
Created by Dave Williams on 2014-12-04
"""
import os
import time
import epics
from epics import pv
#TODO: Pass in basedir
BASE_DIR = '/nas_data/2016DanielQR/'
BASE_EPICS = "18ID-2:PILATUS1:"
def set_up_pilatus(fn, exp_time, base_dir=BASE_DIR):
"""Configure the pilatus for an acquisition"""
# Validate the name
if not fn.split('.')[-1].startswith('tif'):
fn += '.tiff'
# Set up pv connections
expose_entry = pv.PV(BASE_EPICS+"AcquireTime")
aquire_button = pv.PV(BASE_EPICS+"Acquire")
filepath_entry = pv.PV(BASE_EPICS+"FilePath")
filename_entry = pv.PV(BASE_EPICS+"FileName")
template_entry = pv.PV(BASE_EPICS+"FileTemplate")
trigger_entry = pv.PV(BASE_EPICS+"TriggerMode")
# Set up messages to send
exp_msg = (exp_time/1000000.0) # convert to sec
fn_msg = fn
# Send messages
expose_entry.put(exp_msg)
filepath_entry.put(base_dir)
filename_entry.put(fn)
aquire_button.put(1)
template_entry.put("%s%s")
trigger_entry.put(2)
return base_dir+fn |
import os
import glob
import sys
import time
import numpy as np
from collections import defaultdict
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Add
from tensorflow.keras.layers import Dot
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import LSTM
from tensorflow.keras.models import Model
from tensorflow.keras import regularizers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import class_weight
CHECKPOINTER_PATH = '/tmp/model%d.bin'
class LateModelCheckpoint(ModelCheckpoint):
def __init__(self, min_epochs, *args, **kwargs):
self.min_epochs = min_epochs
super(LateModelCheckpoint, self).__init__(*args, **kwargs)
def on_epoch_end(self, epoch, logs=None):
# only save after min epochs
if epoch > self.min_epochs:
super(LateModelCheckpoint, self).on_epoch_end(epoch, logs)
class LayoutClassifier:
def __init__(self, pattern_model, word_embedding_model,
web_table_embedding_model, arff_features):
self.MAX_SEQUENCE_SIZE = 10
self.models = {
'rf_model': None,
'web_table_model': None,
'pattern_model': None,
'word_embedding_model': None
}
# each model has a set of feature generators that extract features
self.feature_generators = {}
if pattern_model:
self.feature_generators['pattern_model'] = {
'patterns': pattern_model
}
if web_table_embedding_model:
self.feature_generators['web_table_model'] = {
'web_table_embeddings': web_table_embedding_model
}
if word_embedding_model:
self.feature_generators['word_embedding_model'] = {
'word_embedding_model': word_embedding_model
}
# structural features extracted from the arff file
self.global_features = arff_features
def create_rf_model(self, label_index=None):
"""Creates random forest classifier (key: 'rf_model') for (global)
structured features.
"""
model = RandomForestClassifier(
n_estimators=100, class_weight='balanced')
if label_index != None:
if ('rf_model' not in self.models) or (self.models['rf_model'] == None):
self.models['rf_model'] = dict()
self.models['rf_model'][label_index] = model
else:
self.models['rf_model'] = model
def create_lstm_model(self, model_name, input_dim,
global_features_input_dim, output_dim, label_index=None):
"""
Create the LSTM network that uses only embedding features.
"""
print('input_dim', input_dim, 'output_dim', output_dim)
last_activation = 'sigmoid' if label_index is not None else 'softmax'
loss = 'binary_crossentropy' if label_index is not None else 'categorical_crossentropy'
i1 = Input(shape=(10, input_dim))
i2 = Input(shape=(10, input_dim))
i3 = Input(shape=(10, input_dim))
i4 = Input(shape=(10, input_dim))
input1 = Dropout(0.3)(i1)
input2 = Dropout(0.3)(i2)
input3 = Dropout(0.3)(i3)
input4 = Dropout(0.3)(i4)
weighting = Dense(10, activation='softmax',
kernel_regularizer=regularizers.l1_l2(l1=1e-3, l2=1e-2))
f1 = Flatten()(input1)
w1 = weighting(f1)
x1, s1, _ = LSTM(8, dropout=0.5, recurrent_dropout=0.1,
return_sequences=True, return_state=True)(input1)
p1 = Dot(axes=1)([x1, w1])
f2 = Flatten()(input2)
w2 = weighting(f2)
x2, s2, _ = LSTM(8, dropout=0.5, recurrent_dropout=0.1,
return_sequences=True, return_state=True)(input2)
p2 = Dot(axes=1)([x2, w2])
f3 = Flatten()(input3)
w3 = weighting(f3)
x3, s3, _ = LSTM(8, dropout=0.5, recurrent_dropout=0.1,
return_sequences=True, return_state=True)(input3)
p3 = Dot(axes=1)([x3, w3])
f4 = Flatten()(input4)
w4 = weighting(f4)
x4, s4, _ = LSTM(8, dropout=0.5, recurrent_dropout=0.1,
return_sequences=True, return_state=True)(input4)
p4 = Dot(axes=1)([x4, w4])
c = Concatenate(axis=1)([p1, p2, p3, p4, s1, s2, s3, s4])
d1 = Dense(100, activation='sigmoid',
kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4))(c)
d1 = Dropout(0.3)(d1)
last_d = d1
for i in range(0):
d_1 = Dense(50, activation='sigmoid', kernel_regularizer=regularizers.l1_l2(
l1=1e-5, l2=1e-4))(last_d)
d_2 = Dense(50, activation='sigmoid', kernel_regularizer=regularizers.l1_l2(
l1=1e-5, l2=1e-4))(d_1)
d_3 = Dense(50, activation='sigmoid', kernel_regularizer=regularizers.l1_l2(
l1=1e-5, l2=1e-4))(d_2)
d_4 = Dense(100, activation='sigmoid', kernel_regularizer=regularizers.l1_l2(
l1=1e-5, l2=1e-4))(d_3)
d_4 = Dropout(0.1)(d_4)
dadd = Add()([last_d, d_4])
last_d = dadd
out = Dense(output_dim, activation=last_activation,
input_dim=100)(last_d)
model = Model(
inputs=[i1, i2, i3, i4], outputs=out)
model.compile(loss=loss, optimizer=Adam(
learning_rate=1e-3), metrics=['accuracy'])
if label_index == None:
self.models[model_name] = model
else:
if (model_name not in self.models) or (self.models[model_name] == None):
self.models[model_name] = dict()
self.models[model_name][label_index] = model
def train_lstm_model_with_masking(self, model_name, feature_dict,
label_dict, multi_model=False, epochs=20):
""" Trains an LSTM network with feature sequences of a fixed length.
To obtain feature sequences with equal length masking has to be enabled
in `_get_table_feature_vector_for_lstm()`.
"""
features = feature_dict['train']
labels = label_dict['train']
transformed_features = [np.array(a) for a in zip(*features)]
valid_features = feature_dict['valid']
valid_labels = label_dict['valid']
valid_transformed_features = [
np.array(a) for a in zip(*valid_features)]
print('TODO Global Feature Shape', np.array(transformed_features[:-1]).shape) # TODO check if this is right
if multi_model:
for i in self.models[model_name]:
if self.models[model_name][i] == None:
print('ERROR: Currently, no model is created.', file=sys.stderr)
class_weights = class_weight.compute_class_weight('balanced',
np.unique(
labels.T[i]),
labels.T[i])
class_weights = {i: class_weights[i]
for i in range(len(class_weights))}
print('Class weights:', class_weights)
model_save_path = CHECKPOINTER_PATH % (int(time.time()),)
checkpointer = LateModelCheckpoint(
25, filepath=model_save_path, verbose=1, save_best_only=True, monitor='loss', save_weights_only=True)
self.models[model_name][i].fit(transformed_features[:-1], labels.T[i], epochs=epochs,
batch_size=32, validation_data=(
valid_transformed_features[:-1],
valid_labels.T[i]), class_weight=class_weights, callbacks=[checkpointer])
self.models[model_name][i].load_weights(model_save_path)
for f in glob.glob(model_save_path + '*'):
os.remove(f)
else:
if self.models[model_name] == None:
print('ERROR: Currently, no model is created.', file=sys.stderr)
class_weights = class_weight.compute_class_weight('balanced',
np.unique(
[np.argmax(x) for x in labels]),
[np.argmax(x) for x in labels])
sample_weights = np.array(
[class_weights[np.argmax(x)] for x in labels])
self.models[model_name].fit(transformed_features[:-1], labels, epochs=epochs,
batch_size=32, validation_data=(
valid_transformed_features[:-1],
valid_labels), sample_weight=sample_weights)
return
def train_rf_model(self, feature_dict, label_dict, multi_model=False, use_test_set=False):
""" Trains a random forest classifier (key: 'rf_model') by using only
the (global) structured features.
"""
transformed_features = [a for a in zip(*feature_dict['train'])][-1]
if use_test_set:
transformed_features + [a for a in zip(*feature_dict['test'])][-1]
if multi_model:
for i in self.models['rf_model']:
self.models['rf_model'][i].fit(
transformed_features,
label_dict['train'].T[i])
else:
self.models['rf_model'].fit(
transformed_features,
[list(x).index(1) for x in label_dict['train']])
# evaluate
transformed_features = [a for a in zip(*feature_dict['valid'])][-1]
y_pred = None
if multi_model:
all_preds = [None] * len(self.models['rf_model'])
for i in self.models['rf_model']:
all_preds[i] = self.models['rf_model'][i].predict(
transformed_features)
y_pred = np.array(all_preds).T
else:
y_pred = self.models['rf_model'].predict_proba(
transformed_features)
right = 0
wrong = 0
for i in range(len(y_pred)):
prediction = np.argmax(y_pred[i])
if label_dict['valid'][i][prediction] == 1:
right += 1
else:
wrong += 1
print('Accuracy', right / (right + wrong))
return
def preprocessing(self, model_name, data, size=4):
"""
Transforms features in a format which can be used by the ann model.
Attributes:
model_name (str): key of the model in self.models
data (dict): dictonary of datasets (e.g. train, test and validataion
data set) where each dataset contains a list of table_ids
corresponding to the ids in the SQLite database, a list of
tables (matrix of cells), and a corresponding list of labels
size (int): number of first rows or columns for which features
should be obtained
"""
table_features = defaultdict(list)
count = 0
for key in data:
table_ids = data[key]['table_ids']
table_data = data[key]['table_data']
# create feature vectors
table_features[key] = []
for id_index, table in enumerate(table_data):
feature_vector = self. \
_get_table_feature_vector_for_lstm(model_name,
table_ids[id_index],
table, size, masking=True,
max_sequence_size=self.MAX_SEQUENCE_SIZE)
table_features[key].append(feature_vector)
count += 1
if count % 100 == 0:
print('Preprocessing done for', count, 'tables')
table_features[key] = table_features[key]
return table_features
def label_preprocessing(self, data, label_set):
"""
Transforms labels into one-hot encoding.
Attributes:
data (dict): dictonary of datasets (e.g. train, test and validataion
data set) where each dataset contains a list of table_ids
corresponding to the ids in the SQLite database, a list of
tables (matrix of cells), and a corresponding list of labels
label_set: set of all possible labels
"""
# label_list = list(label_set)
# TODO replace this with comment above
label_list = ['RELATION', 'OTHER', 'ENTITY', 'MATRIX']
label_encodings = defaultdict(list)
for key in data:
labels = data[key]['labels']
# crate one hot encoded label vectors
for label in labels:
vec = np.zeros(len(label_list))
pos = label_list.index(label)
vec[pos] = 1
label_encodings[key].append(vec)
label_encodings[key] = np.array(label_encodings[key])
return label_encodings, label_list
def _extract_feature_vectors(self, model_name, columns):
""" Extracts feature vectors for a set of columns.
Features:
* Content Pattern: A pattern of length n (default: 10) that
captures the types of the first n characters in every cell
* Word Embeddings: Represent cells by a word embedding model
* Web Table Embeddings: Represnt celss by a web table embeddding
model
Returns:
* features
"""
features = defaultdict(list)
for i in range(len(columns)):
col = columns[i]
col_features = dict()
for (name, generator) in self.feature_generators[model_name].items():
col_features = generator.get_features(col)
features[name].append(col_features)
return features
def _get_table_feature_vector_for_lstm(self, model_name, id, table, size,
masking=False,
max_sequence_size=None):
"""
Creates features for LSTM network using the feature generators to
generate embedding features.
Attributes:
model_name (str): key of the model in self.models
id (int): table id in SQLite database
table (list): columns of one table
size (int): maximal number of rows and columns for which features
should be extracted
masking (bool): if True this function creates feature sequences of
equal size (`max_sequence_size`). If the table is too small
missing feature vectors are filled with zero vectors.
max_sequence_size (int): length of feature sequences used if
`masking` is enabled.
"""
feature_dict_column_wise = self._extract_feature_vectors(
model_name, table)
transposed_table_data = np.transpose(table)
feature_dict_row_wise = self._extract_feature_vectors(model_name,
transposed_table_data)
features_cols = []
features_rows = []
for name in feature_dict_column_wise:
col_features = feature_dict_column_wise[name]
col_features = [col_features[i] if i < len(col_features) else [np.zeros(
col_features[0][0].shape)] for i in range(size)]
row_features = feature_dict_row_wise[name]
row_features = [row_features[i] if i < len(row_features) else [np.zeros(
row_features[0][0].shape)] for i in range(size)]
if masking:
if max_sequence_size != None:
for i in range(len(col_features)):
col_features[i] = col_features[i][:max_sequence_size]
while len(col_features[i]) < max_sequence_size:
col_features[i].append(
np.zeros(col_features[i][0].shape[0]))
for i in range(len(row_features)):
row_features[i] = row_features[i][:max_sequence_size]
while len(row_features[i]) < max_sequence_size:
row_features[i].append(
np.zeros(row_features[i][0].shape[0]))
else:
raise Exception(
'Masking requires max_sequence_size != None')
col_features = np.concatenate(col_features, axis=1)
row_features = np.concatenate(row_features, axis=1)
features_cols.append(col_features)
features_rows.append(row_features)
f1 = np.concatenate(features_cols, axis=1)
f1 = np.array([np.concatenate(
[f1[i], f1[i] * np.mean([x for x in f1 if np.linalg.norm(x) > 0.0001], axis=0)], axis=0) for i in range(len(f1))])
f2 = np.flip(f1, 0)
f3 = np.concatenate(features_rows, axis=1)
f3 = np.array([np.concatenate(
[f3[i], f3[i] * np.mean([x for x in f3 if np.linalg.norm(x) > 0.0001], axis=0)], axis=0) for i in range(len(f3))])
f4 = np.flip(f3, 0)
feature_vector = [f1, f2, f3, f4]
# get global features (arff features)
if self.global_features is not None:
global_feature_vec = self.global_features.get_vector(id)
feature_vector.append(global_feature_vec)
return feature_vector
|
from PyQt5 import QtWidgets
from PyQt5 import uic
from PyQt5.QtGui import QIcon, QPixmap, QClipboard
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog, QPushButton, QHBoxLayout, QDialog, QBoxLayout, QMainWindow
from PyQt5.QtWebEngineWidgets import QWebEnginePage
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtCore import QUrl, QDir, QMimeData
from formpy.ui_mainwindow2 import Ui_mainWindow
import PyQt5.QtCore as QtCore
from formpy.ui_full_disp import Ui_Dialog
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(200, 200)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.setCentralWidget(self.centralwidget)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(50, 110, 75, 23))
self.pushButton.setObjectName("pushButton")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent=parent)
self.setupUi(self)
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_F5:
self.close()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
w.show()
sys.exit(app.exec_()) |
from recviz.rec import recviz
|
from context import Meta
meta = Meta()
merge_dictionaries = meta.load("http://www.meta-lang.org/snippets/56ee468ac0cb8f7470fbb338")
print(merge_dictionaries.get_dynamic_type_sig())
|
import sys
import os.path
import datetime
import re
from urlparse import urljoin
from urllib import urlopen
import nltk
from base_source import BaseSource
from shared import common
from shared.config import ConfigReader
class Ford(BaseSource):
def __init__(self):
self._ptn_date = re.compile('^(\d\d) (\w\w\w) 2012')
def get_linkpages(self):
all_html = []
cfr = ConfigReader()
root = cfr.get('ROOT_ORIGINAL')
path1 = cfr.get('PR_SOURCES')
path2 = os.path.join(root, path1)
path = os.path.join(path2, 'ford.html')
print 'collecting links from source file'
with open(path) as f:
lines = f.readlines()
all_html.append(''.join(lines))
return all_html
def is_link(self, tag):
return tag.has_key('href')
def is_title(self, tag):
return self.is_link(tag)
def is_date(self, tag):
return tag.name == 'span' and tag.has_key('class') and \
tag.get('class')[0] == 'feeds-date' and \
self._ptn_date.match(tag.string)
def get_link(self, link):
link = '{0}?view=print'.format(link)
return urljoin('http://corporate.ford.com/', link)
def get_title(self, tag):
sb = []
for s in tag.stripped_strings:
sb.append(s.strip())
return ''.join(sb)
def get_date(self, tag):
raw = tag.string.strip()
match = self._ptn_date.match(raw)
if not match:
raise Exception('date format did not match pattern')
year = 2012
month = common.get_month_by_name(match.group(2))
day = int(match.group(1))
return datetime.date(year, month, day)
def get_encoding(self):
return "utf-8"
def get_text(self, html):
start = html.index('<div class="hub-feature-body">')
end = html.index('<!--hub-feature-body -->', start)
html = html[start:end]
return self._filter_html(html)
|
from flask import Blueprint
bp = Blueprint('topic', __name__, url_prefix='/topics')
|
#coding: utf-8
import requests
from AdvancedHTMLParser import AdvancedHTMLParser
import collections
from unidecode import unidecode
import modules.utils
import json
import re
def pprint(txt="",e="\n"):
if False:
print(txt, end=e)
"""
www_parser - module for parsing timetable from vulcan.
"""
class www_parser:
def __init__(self, teacher_recovery_filename = None):
self.http = requests.Session()
self.http.encoding = "UTF-8"
self.base_url = None
self.classrooms = []
self.units = {}
self.update_dates = []
self.timetable = collections.OrderedDict()
self.teachers_timetable = collections.OrderedDict()
self.new_teachers_timetable = collections.OrderedDict()
self.teacher_recovery = {}
return None
def load_teacher_recovery(self, filename):
try:
with open(filename, "r") as f:
self.teacher_recovery = json.load(f)
except:
pass
return
def import_teachers(self):
#print("TODO!")
return True
def import_units(self):
'''
Gets list of units, that are described in base_url
returns: bool (True/False)
'''
resp = self.http.get("{}/lista.html".format(self.base_url))
resp.encoding = "UTF-8"
if resp.status_code != 200:
print("[E] Serwer zwrócił kod błędu {} przy próbie pobrania listy klas".format(resp.status_code))
return False
parser = AdvancedHTMLParser()
parser.parseStr(resp.text)
units = parser.getAllNodes().getElementsByTagName("a")
for unit in units:
self.units[unit.innerText.upper()] = unit.href
return True
def import_timesteps(self):
'''
Gets timesteps from first unit that was found
'''
unit_url = next(iter(self.units.values()))
resp = self.http.get("{}/{}".format(self.base_url, unit_url))
resp.encoding = "UTF-8"
if resp.status_code != 200:
print("[E] Serwer zwrócił kod błędu {} przy próbie pobrania timesteps".format(resp.status_code))
return False
parser = AdvancedHTMLParser()
parser.parseStr(resp.text)
units = parser.getAllNodes().getElementsByClassName("g").getElementsByTagName("td")
for unit in units:
start, stop = unit.innerText.split("-")
# TODO: hold it in some variable
return True
def find_by_class(self, parser_object, class_name):
out = []
for obj in parser_object:
if obj.className == class_name:
out.append(obj)
return out
def get_teacher(self, parser_object):
#workaroud for timetables, where are duplicate entries for subject, but there are none for teacher
if len(self.find_by_class(parser_object.getElementsByTagName("span"),"p")) > 1:
# Try to find teacher_recovery_mapping
pseudo_teacher = unidecode(self.find_by_class(parser_object.getElementsByTagName("span"),"p")[1].innerText.upper())
if pseudo_teacher in self.teacher_recovery:
return self.teacher_recovery[pseudo_teacher]
else:
print("Nie znalazlem recovery dla {}".format(pseudo_teacher))
return pseudo_teacher
else:
pseudo_teacher = unidecode(self.find_by_class(parser_object.getElementsByTagName("span"),"n")[0].innerText.upper())
if pseudo_teacher in self.teacher_recovery:
return self.teacher_recovery[pseudo_teacher]
return unidecode(self.find_by_class(parser_object.getElementsByTagName("span"),"n")[0].innerText.upper())
def get_subject(self, parser_object):
return self.find_by_class(parser_object.getElementsByTagName("span"),"p")[0].innerText
def get_classroom(self, parser_object):
return self.find_by_class(parser_object.getElementsByTagName("span"),"s")[0].innerText
def get_group(self, subject):
if len(subject.split("-")) > 1:
return subject.split("-")[1]
else:
return '-1'
def get_group2(self, parser_object):
a = self.find_by_class(parser_object.getElementsByTagName("span"),"p")[0].innerText
b = self.find_by_class(parser_object.getElementsByTagName("span"),"p")[0].parentElement.innerHTML
b = re.sub(r"<[^>]*>", "", b).split(" ")[0]
a = a.strip()
b = b.strip()
if len(a.split("-")) > 1:
return a.split("-")[1]
elif len(b.split("-")) > 1:
return b.split("-")[1]
else:
return '-1'
def add_to_teacher_plan(self, p, n, s, unit_name, day, hour):
if n not in self.teachers_timetable:
self.teachers_timetable[n] = dict()
if day not in self.teachers_timetable[n]:
self.teachers_timetable[n][day] = dict()
if hour not in self.teachers_timetable[n][day]:
self.teachers_timetable[n][day][hour] = dict()
self.teachers_timetable[n][day][hour] = {
"p":p,
"n":n.upper(),
"s":s,
"k":unit_name
}
return
def add_to_new_teacher_plan(self, p, n, s, unit_name, day, hour):
if n not in self.new_teachers_timetable:
self.new_teachers_timetable[n] = dict()
if day not in self.new_teachers_timetable[n]:
self.new_teachers_timetable[n][day] = dict()
if hour not in self.new_teachers_timetable[n][day]:
self.new_teachers_timetable[n][day][hour] = []
self.new_teachers_timetable[n][day][hour].append({
"p":p,
"n":n.upper(),
"s":s,
"k":unit_name
})
return
def get_units_list(self):
'''Returns list of units, that are described in currently set timetable url'''
resp = self.http.get("{}/lista.html".format(self.base_url))
resp.encoding = "UTF-8"
if resp.status_code != 200:
print("[E] Serwer zwrócił kod błędu {} przy próbie pobrania listy klas".format(resp.status_code))
exit(-1)
parser = AdvancedHTMLParser()
parser.parseStr(resp.text)
units = parser.getAllNodes().getElementsByTagName("a")
for unit in self.units:
self.units[unit.innerText.upper()] = unit.href[7:-5]
#self.units_list.append(unit.innerText.upper())
return self.units
def import_timetable(self):
print()
for unit in self.units:
if not self.import_timetable_for_unit(unit):
return False
else:
modules.utils.step("Przetwarzam plan lekcji klasy {}".format(unit), state=" OK ")
return True
def import_timetable_for_unit(self, unit_name):
modules.utils.step("Przetwarzam plan lekcji klasy {}".format(unit_name))
unit_url = self.units[unit_name]
resp = self.http.get("{}/{}".format(self.base_url, unit_url))
resp.encoding = "UTF-8"
if resp.status_code != 200:
print("[E] Serwer zwrócił kod błędu {} przy próbie pobrania planu klasy {} (url {})".format(resp.status_code, unit_name, unit_url))
return False
parser = AdvancedHTMLParser()
parser.parseStr(resp.text)
# Get update date of *this* unit timetable
self.update_dates.append(parser.getElementsByAttr("align","right")[0][0][0][0].innerText.split('\r\n')[1].split(" ")[1])
rows = parser.getAllNodes().getElementsByClassName("tabela")[0].getChildren()
for hour,row in enumerate(rows):
# MAYBE ITS FUCKING TIME TO DECIDE?
# (and change to count from 0)
day = 0 #count from 0, because WTF
#day = 1 #count from 1, because backwards compatibility
columns = row.getElementsByClassName("l")
for column in columns:
day += 1
#Empty - skip
if column.innerText == " ":
continue
modules.utils.debug("Dzien {} - {} godzina lekcyjna".format(day, hour), level=2)
'''TODO: make this a func'''
# If this is the first iteration of this day, create dict
if day not in self.timetable:
self.timetable[day] = {}
# If this is the first iteration of this lesson in this day, create dict
if hour not in self.timetable[day]:
self.timetable[day][hour] = {}
# If this is the first iteration of this unit lesson in this day, create array
if unit_name not in self.timetable[day][hour]:
self.timetable[day][hour][unit_name] = []
'''END TODO'''
entries = column.innerHTML.split("<br />")
for e in entries:
entry = AdvancedHTMLParser()
entry.parseStr(e)
if entry.getElementsByTagName("span")[0].className == "p":
modules.utils.debug("Znaleziono kontener z pojedynczym przedmiotem")
subject = dict()
subject["p"] = self.get_subject(entry)
subject["n"] = self.get_teacher(entry)
subject["s"] = self.get_classroom(entry)
subject["g"] = self.get_group2(entry)
#subject["g"] = self.get_group(subject["p"])
modules.utils.debug("- Przedmiot: {}".format(subject["p"]))
modules.utils.debug("- Nauczyciel: {}".format(subject["n"]))
modules.utils.debug("- Sala: {}".format(subject["s"]))
modules.utils.debug("- Grupa: {}".format(subject["g"]))
if subject["s"] not in self.classrooms:
self.classrooms.append(subject["s"])
self.timetable[day][hour][unit_name].append(subject)
self.add_to_teacher_plan(subject["p"], subject["n"], subject["s"], unit_name, day, hour)
try:
self.add_to_new_teacher_plan(subject["p"], subject["n"], subject["s"], unit_name, day, hour)
except:
pass
else:
modules.utils.debug("Nie znaleziono kontenera, szukam ręcznie")
parents = entry.getElementsByTagName("span")
for parent in parents:
parent = parent.getChildren()
if len(parent) != 0:
subject = dict()
subject["p"] = parent.getElementsByClassName("p")[0].innerText
subject["n"] = parent.getElementsByClassName("n")[0].innerText.upper()
if subject["n"] in self.teacher_recovery:
subject["n"] = self.teacher_recovery[subject["n"]]
subject["s"] = parent.getElementsByClassName("s")[0].innerText
#subject["g"] = self.get_group(subject["p"])
subject["g"] = self.get_group2(parent)
modules.utils.debug("Znaleziono:")
modules.utils.debug("- Przedmiot: {}".format(subject["p"]))
modules.utils.debug("- Nauczyciel: {}".format(subject["n"]))
modules.utils.debug("- Sala: {}".format(subject["s"]))
modules.utils.debug("- Grupa: {}".format(subject["g"]))
if subject["s"] not in self.classrooms:
self.classrooms.append(subject["s"])
self.timetable[day][hour][unit_name].append(subject)
self.add_to_teacher_plan(subject["p"], subject["n"], subject["s"], unit_name, day, hour)
try:
self.add_to_new_teacher_plan(subject["p"], subject["n"], subject["s"], unit_name, day, hour)
except:
pass
return True
def generate(self):
units = []
for unit in self.units:
units.append(unit)
self.units = sorted(units)
self.teachers = collections.OrderedDict(sorted(self.teachers_timetable.items()))
self.new_teachers = self.new_teachers_timetable
return True |
import ontotextapi as onto
import utils
import json
from os.path import isfile, join, split
import joblib as jl
import cohortanalysis as cohort
from ann_post_rules import AnnRuleExecutor
import sys
import xml.etree.ElementTree as ET
import concept_mapping
import urllib3
import logging
class StudyConcept(object):
def __init__(self, name, terms, umls_instance=None):
self.terms = terms
self._name = name
self._term_to_concept = None
self._concept_closure = None
self._umls_instance = umls_instance
def gen_concept_closure(self, term_concepts=None, concept_to_closure=None):
"""
generate concept closures for all terms
:param term_concepts: optional - expert verified mappings can be used
:param concept_to_closure: precomputed concept to closure dictionary
:return:
"""
self._term_to_concept = {}
self._concept_closure = set()
if term_concepts is None:
term_concepts = {}
for term in self.terms:
concept_objs = onto.match_term_to_concept(term if not term.startswith("~~") else term[2:])
if concept_objs is not None:
term_concepts[term] = [o['localName'] for o in concept_objs]
for term in term_concepts:
candidate_terms = []
for concept in term_concepts[term]:
if concept_to_closure is not None:
candidate_terms.append((concept, concept_to_closure[concept]))
else:
candidate_terms.append((concept, onto.get_transitive_subconcepts(concept)))
# pick the rich sub-concept mappings
if len(candidate_terms) > 1:
candidate_terms = sorted(candidate_terms, key=lambda x: -len(x[1]))
if term.startswith('~~'):
to_remove = set(candidate_terms[0][1])
to_remove.add(candidate_terms[0][0])
self._concept_closure -= to_remove
print 'removed %s items' % len(to_remove)
else:
self._concept_closure.add(candidate_terms[0][0])
self._concept_closure |= set(candidate_terms[0][1])
self._term_to_concept[term] = {'mapped': candidate_terms[0][0], 'closure': len(candidate_terms[0][1])}
@staticmethod
def compute_all_concept_closure(all_concepts, umls_instance, skip_relations={}):
concept_to_closure = {}
print 'all concepts number %s' % len(all_concepts)
computed = []
results =[]
utils.multi_thread_tasking(all_concepts, 40, StudyConcept.do_compute_concept_closure,
args=[umls_instance, computed, results, skip_relations])
for r in results:
concept_to_closure[r['concept']] = r['closure']
return concept_to_closure
@staticmethod
def do_compute_concept_closure(concept, umls_instance, computed, results, skip_relations={}):
if concept not in computed:
closure = umls_instance.transitive_narrower(concept, skip_relations=skip_relations)
computed.append(concept)
results.append({'concept': concept, 'closure': closure})
print 'concept: %s transitive children %s' % (concept, closure)
@property
def name(self):
return self._name
@property
def concept_closure(self):
if self._concept_closure is None:
self.gen_concept_closure()
return self._concept_closure
@concept_closure.setter
def concept_closure(self, value):
self._concept_closure = value
@property
def term_to_concept(self):
if self._concept_closure is None:
self.gen_concept_closure()
return self._term_to_concept
@term_to_concept.setter
def term_to_concept(self, value):
self._term_to_concept = value
class StudyAnalyzer(object):
def __init__(self, name):
self._study_name = name
self._study_concepts = []
self._skip_terms = []
self._options = None
@property
def study_name(self):
return self._study_name
@study_name.setter
def study_name(self, value):
self._study_name = value
@property
def study_concepts(self):
return self._study_concepts
@study_concepts.setter
def study_concepts(self, value):
self._study_concepts = value
@property
def skip_terms(self):
return self._skip_terms
@skip_terms.setter
def skip_terms(self, value):
self._skip_terms = value
def add_concept(self, concept):
self.study_concepts.append(concept)
def generate_exclusive_concepts(self):
"""
it is important to have a set of disjoint concepts otherwise concept-document frequencies would
contain double-counted results
:return:
"""
# call the concept closure property to make sure
# that the closure has been generated before
# compute the disjoint
for sc in self.study_concepts:
cc = sc.concept_closure
intersections = {}
explain_inter = {}
for i in range(1, len(self.study_concepts)):
for j in xrange(i):
common = self.study_concepts[i].concept_closure & self.study_concepts[j].concept_closure
if len(common) > 0:
intersections[self.study_concepts[i].name + ' - ' + self.study_concepts[j].name] = common
self.study_concepts[j].concept_closure -= common
explain_inter[self.study_concepts[j].name] = \
['removed %s common (%s) concepts' % (len(common), self.study_concepts[i].name)] \
if self.study_concepts[j].name not in explain_inter \
else explain_inter[self.study_concepts[j].name] + \
['removed %s common (%s) concepts' % (len(common), self.study_concepts[i].name)]
# if len(intersections) > 0:
# print 'intersections [[\n%s\n]]' % json.dumps(explain_inter)
# for sc in self.study_concepts:
# print '%s %s' % (sc.name, len(sc.concept_closure))
def remove_study_concept_by_name(self, concept_name):
for sc in self.study_concepts:
if sc.name == concept_name:
self.study_concepts.remove(sc)
def retain_study_concepts(self, concept_names):
retained = []
for sc in self.study_concepts:
if sc.name in concept_names:
retained.append(sc)
self.study_concepts = retained
def export_mapping_in_json(self):
mapping = {}
for c in self._study_concepts:
mapping[c.name] = c.term_to_concept
def serialise(self, out_file):
print 'iterating concepts to populate the mappings'
for c in self._study_concepts:
tc = c.term_to_concept
print 'saving...'
jl.dump(self, out_file)
print 'serialised to %s' % out_file
@property
def study_options(self):
return self._options
@study_options.setter
def study_options(self, value):
self._options = value
@staticmethod
def deserialise(ser_file):
return jl.load(ser_file)
def gen_study_table(self, cohort_name, out_file):
cohort.populate_patient_study_table(cohort_name, self, out_file)
def gen_sample_docs(self, cohort_name, out_file):
cohort.random_extract_annotated_docs(cohort_name, self, out_file, 10)
def gen_study_table_with_rules(self, cohort_name, out_file, sample_out_file, ruler, ruled_out_file,
sql_config, db_conn_file, text_preprocessing=False):
sql_setting = get_sql_template(sql_config)
cohort.populate_patient_study_table_post_ruled(cohort_name, self, out_file, ruler, 20,
sample_out_file, ruled_out_file,
sql_setting['patients_sql'], sql_setting['term_doc_anns_sql'],
sql_setting['skip_term_sql'],
db_conn_file, text_preprocessing=text_preprocessing)
def gen_study_table_in_one_iteration(self, cohort_name, out_file, sample_out_file,
sql_config, db_conn_file):
sql_setting = get_one_iteration_sql_template(sql_config)
cohort.generate_result_in_one_iteration(cohort_name, self, out_file, 20, sample_out_file,
sql_setting['doc_to_brc_sql'],
sql_setting['brc_sql'],
sql_setting['anns_iter_sql'],
sql_setting['skip_term_sql'],
sql_setting['doc_content_sql'],
db_conn_file)
def gen_study_table_with_rules_es(self, cohort_name, out_file, sample_out_file, ruler, ruled_out_file,
sem_idx_setting_file, retained_patients_filter, filter_obj=None):
cohort.es_populate_patient_study_table_post_ruled(self, out_file, ruler, 20,
sample_out_file, ruled_out_file, sem_idx_setting_file,
retained_patients_filter=retained_patients_filter,
filter_obj=filter_obj)
def get_sql_template(config_file):
root = ET.parse(config_file).getroot()
return {'term_doc_anns_sql': root.find('term_doc_anns_sql').text,
'patients_sql': root.find('patients_sql').text,
'skip_term_sql': root.find('skip_term_sql').text}
def get_one_iteration_sql_template(config_file):
root = ET.parse(config_file).getroot()
return {'doc_to_brc_sql': root.find('doc_to_brc_sql').text,
'brc_sql': root.find('brc_sql').text,
'anns_iter_sql': root.find('anns_iter_sql').text,
'doc_content_sql': root.find('doc_content_sql').text,
'skip_term_sql': root.find('skip_term_sql').text}
def load_ruler(rule_setting_file):
ruler = AnnRuleExecutor()
if rule_setting_file is None:
ruler.load_rule_config('./studies/rules/_default_rule_config.json')
else:
ruler.load_rule_config(rule_setting_file)
return ruler
def load_study_settings(folder, umls_instance,
rule_setting_file=None,
concept_filter_file=None,
do_disjoint_computing=True,
export_study_concept_only=False):
p, fn = split(folder)
if isfile(join(folder, 'study_analyzer.pickle')):
sa = StudyAnalyzer.deserialise(join(folder, 'study_analyzer.pickle'))
else:
sa = StudyAnalyzer(fn)
if isfile(join(folder, 'label2concept.tsv')):
# using tsv file if exists
logging.info('loading study concepts from tsv file...')
lines = utils.read_text_file(join(folder, 'label2concept.tsv'))
scs = []
for l in lines:
arr = l.split('\t')
if len(arr) != 2:
logging.error('line [%s] not parsable' % l)
continue
t = arr[0]
c = arr[1]
sc = StudyConcept(t, [t])
sc.concept_closure = set([c])
tc = {}
tc[t] = {'closure': 1, 'mapped': c}
sc.term_to_concept = tc
scs.append(sc)
logging.debug('study concept [%s]: %s, %s' % (sc.name, sc.term_to_concept, sc.concept_closure))
sa.study_concepts = scs
logging.info('study concepts loaded')
elif isfile(join(folder, 'exact_concepts_mappings.json')):
concept_mappings = utils.load_json_data(join(folder, 'exact_concepts_mappings.json'))
concept_to_closure = None
# concept_to_closure = \
# StudyConcept.compute_all_concept_closure([concept_mappings[t] for t in concept_mappings],
# umls_instance, skip_relations=skip_closure_relations)
scs = []
for t in concept_mappings:
sc = StudyConcept(t, [t])
t_c = {}
t_c[t] = [concept_mappings[t]]
sc.gen_concept_closure(term_concepts=t_c, concept_to_closure=concept_to_closure)
scs.append(sc)
logging.debug(sc.concept_closure)
sa.study_concepts = scs
sa.serialise(join(folder, 'study_analyzer.pickle'))
elif isfile(join(folder, 'manual_mapped_concepts.json')):
mapped_scs = utils.load_json_data(join(folder, 'manual_mapped_concepts.json'))
scs = []
for t in mapped_scs:
sc = StudyConcept(t, [t])
sc.concept_closure = set(mapped_scs[t]['concepts'])
tc = {}
tc[t] = mapped_scs[t]['tc']
sc.term_to_concept = tc
scs.append(sc)
logging.debug('study concept [%s]: %s, %s' % (sc.name, sc.term_to_concept, sc.concept_closure))
sa.study_concepts = scs
else:
concepts = utils.load_json_data(join(folder, 'study_concepts.json'))
if len(concepts) > 0:
scs = []
for name in concepts:
scs.append(StudyConcept(name, concepts[name], umls_instance=umls_instance))
logging.debug('%s, %s' % (name, concepts[name]))
sa.study_concepts = scs
sa.serialise(join(folder, 'study_analyzer.pickle'))
# get filtered concepts only, if filter exists
if concept_filter_file is not None:
logging.debug('before removal, the concept length is: %s' % len(sa.study_concepts))
concept_names = utils.load_json_data(concept_filter_file)
sa.retain_study_concepts(concept_names)
logging.debug('after removal: %s' % len(sa.study_concepts))
# compute disjoint concepts
if do_disjoint_computing:
sa.generate_exclusive_concepts()
if export_study_concept_only:
sc2closure = {}
for sc in sa.study_concepts:
sc2closure[sc.name] = list(sc.concept_closure)
utils.save_json_array(sc2closure, join(folder, 'sc2closure.json'))
logging.debug('sc2closure.json generated in %s' % folder)
if isfile(join(folder, 'study_options.json')):
sa.study_options = utils.load_json_data(join(folder, 'study_options.json'))
merged_mappings = {}
study_concept_list = []
for c in sa.study_concepts:
for t in c.term_to_concept:
all_concepts = list(c.concept_closure)
study_concept_list += all_concepts
if len(all_concepts) > 1:
idx = 0
for cid in all_concepts:
merged_mappings['(%s) %s (%s)' % (c.name, t, idx)] = {'closure': len(all_concepts), 'mapped': cid}
idx += 1
else:
merged_mappings['(%s) %s' % (c.name, t)] = c.term_to_concept[t]
# print c.name, c.term_to_concept, c.concept_closure
# print json.dumps(list(c.concept_closure))
# logging.debug('print merged mappings...')
# print json.dumps(merged_mappings)
# logging.debug(len(study_concept_list))
utils.save_string('\n'.join(study_concept_list), join(folder, 'all_concepts.txt'))
if export_study_concept_only:
return
# sa.gen_study_table(cohort_name, join(folder, 'result.csv'))
# sa.gen_sample_docs(cohort_name, join(folder, 'sample_docs.json'))
ruler = load_ruler(rule_setting_file)
if len(ruler.skip_terms) > 0:
sa.skip_terms = ruler.skip_terms
return {'study_analyzer': sa, 'ruler': ruler}
def study(folder, cohort_name, sql_config_file, db_conn_file, umls_instance,
do_one_iter=False, do_preprocessing=False,
rule_setting_file=None, sem_idx_setting_file=None,
concept_filter_file=None,
retained_patients_filter=None,
filter_obj_setting=None,
do_disjoint_computing=True,
export_study_concept_only=False,
skip_closure_relations={}):
ret = load_study_settings(folder, umls_instance,
rule_setting_file=rule_setting_file,
concept_filter_file=concept_filter_file,
do_disjoint_computing=do_disjoint_computing,
export_study_concept_only=export_study_concept_only)
sa = ret['study_analyzer']
ruler = ret['ruler']
if do_one_iter:
sa.gen_study_table_in_one_iteration(cohort_name, join(folder, 'result.csv'), join(folder, 'sample_docs.json'),
sql_config_file, db_conn_file)
else:
if sem_idx_setting_file is None:
sa.gen_study_table_with_rules(cohort_name, join(folder, 'result.csv'), join(folder, 'sample_docs.js'), ruler,
join(folder, 'ruled_anns.json'), sql_config_file, db_conn_file,
text_preprocessing=do_preprocessing)
else:
filter_obj = None
if filter_obj_setting is not None:
filter_obj = utils.load_json_data(filter_obj_setting)
sa.gen_study_table_with_rules_es(cohort_name, join(folder, 'result.csv'), join(folder, 'sample_docs.js'),
ruler,
join(folder, 'ruled_anns.json'),
sem_idx_setting_file,
retained_patients_filter,
filter_obj=filter_obj)
logging.info('done')
def run_study(folder_path, no_sql_filter=None):
study_config = 'study.json' if no_sql_filter is None else 'study_no_filter.json'
if isfile(join(folder_path, study_config)):
r = utils.load_json_data(join(folder_path, study_config))
retained_patients = None
if 'query_patients_file' in r:
retained_patients = []
lines = utils.read_text_file(r['query_patients_file'])
for l in lines:
arr = l.split('\t')
retained_patients.append(arr[0])
skip_closure_relations = {}
if 'skip_closure_relations' in r:
skip_closure_relations = utils.load_json_data(r['skip_closure_relations'])
study(folder_path, r['cohort'], r['sql_config'], r['db_conn'],
concept_mapping.get_umls_client_inst(r['umls_key']),
do_preprocessing=r['do_preprocessing'],
rule_setting_file=r['rule_setting_file'],
do_one_iter=r['do_one_iter'],
sem_idx_setting_file=None if 'sem_idx_setting_file' not in r else r['sem_idx_setting_file'],
concept_filter_file=None if 'concept_filter_file' not in r else r['concept_filter_file'],
retained_patients_filter=retained_patients,
filter_obj_setting=None if 'filter_obj_setting' not in r else r['filter_obj_setting'],
do_disjoint_computing=True if 'do_disjoint' not in r else r['do_disjoint'],
export_study_concept_only=False if 'export_study_concept' not in r else r['export_study_concept'],
skip_closure_relations=skip_closure_relations
)
else:
logging.error('study.json not found in the folder')
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('cp1252')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if 2 < len(sys.argv) > 3:
print 'the syntax is [python study_analyzer.py STUDY_DIR [-no-sql-filter]]'
else:
run_study(sys.argv[1], no_sql_filter=None if len(sys.argv) == 2 else 'yes')
|
# coding=utf-8
"""
© 2014 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from collections import defaultdict
import os
import sys
from luminol import utils, exceptions
from luminol.anomaly_detector import AnomalyDetector
from luminol.correlator import Correlator
from luminol.modules.correlation_result import CorrelationResult
from luminol.modules.time_series import TimeSeries
class RCA(object):
def __init__(self, metrix, related_metrices):
"""
Initializer
:param metrix: a TimeSeries, a dictionary or a path to a csv file(str)
:param list related_metrixes: a list of time series.
"""
self.metrix = self._load(metrix)
self.anomaly_detector = AnomalyDetector(metrix)
self.related_metrices = related_metrices
self.anomalies = self.anomaly_detector.get_anomalies()
self._analyze()
def _load(self, metrix):
"""
Load time series.
:param timeseries: a TimeSeries, a dictionary or a path to a csv file(str).
:return TimeSeries: a TimeSeries object.
"""
if isinstance(metrix, TimeSeries):
return metrix
if isinstance(metrix, dict):
return TimeSeries(metrix)
return TimeSeries(utils.read_csv(metrix))
def _analyze(self):
"""
Analyzes if a matrix has anomalies.
If any anomaly is found, determine if the matrix correlates with any other matrixes.
To be implemented.
"""
output = defaultdict(list)
output_by_name = defaultdict(list)
scores = self.anomaly_detector.get_all_scores()
if self.anomalies:
for anomaly in self.anomalies:
metrix_scores = scores
start_t, end_t = anomaly.get_time_window()
t = anomaly.exact_timestamp
# Compute extended start timestamp and extended end timestamp.
room = (end_t - start_t) / 2
if not room:
room = 30
extended_start_t = start_t - room
extended_end_t = end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Adjust the two timestamps if not enough data points are included.
while len(metrix_scores_cropped) < 2:
extended_start_t = extended_start_t - room
extended_end_t = extended_end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Correlate with other metrics
for entry in self.related_metrices:
try:
entry_correlation_result = Correlator(self.metrix, entry, time_period=(extended_start_t, extended_end_t), use_anomaly_score=True).get_correlation_result()
record = extended_start_t, extended_end_t, entry_correlation_result.__dict__, entry
record_by_name = extended_start_t, extended_end_t, entry_correlation_result.__dict__
output[t].append(record)
output_by_name[entry].append(record_by_name)
except exceptions.NotEnoughDataPoints:
pass
self.output = output
self.output_by_name = output_by_name
|
import tkinter as tk
import random
class Controller(object):
"""
A class to control the movement of the snake in the game
"""
def __init__(self, screen):
"""
Binds the arrow keys to the game canvas.
Parameters:
screen (Canvas): The canvas for the Snake game.
"""
self._screen = screen
screen.bind("<Left>", self.left_move)
screen.bind("<Right>", self.right_move)
screen.bind("<Up>", self.up_move)
screen.bind("<Down>", self.down_move)
screen.focus_set()
def left_move(self, e):
"""Changes the direction of the snake head to 'left' when the left key is pressed."""
if self._screen.get_direction() == 'right':
pass
else:
self._screen.set_direction('left')
self._screen.delete(self._screen.get_snake_id())
self._screen.change_x_axis(-10)
self._screen.create_snake_head()
def right_move(self, e):
"""Changes the direction of the snake head to 'right' when the right key is pressed."""
if self._screen.get_direction() == 'left':
pass
else:
self._screen.set_direction('right')
self._screen.delete(self._screen.get_snake_id())
self._screen.change_x_axis(10)
self._screen.create_snake_head()
def up_move(self, e):
"""Changes the direction of the snake head to 'up' when the up key is pressed."""
if self._screen.get_direction() == 'down':
pass
else:
self._screen.set_direction('up')
self._screen.delete(self._screen.get_snake_id())
self._screen.change_y_axis(-10)
self._screen.create_snake_head()
def down_move(self, e):
"""Changes the direction of the snake head to 'down' when the down key is pressed."""
if self._screen.get_direction() == 'up':
pass
else:
self._screen.set_direction('down')
self._screen.delete(self._screen.get_snake_id())
self._screen.change_y_axis(10)
self._screen.create_snake_head()
class Screen(tk.Canvas):
"""
A canvas class that displays the game
"""
def __init__(self, master):
"""
Construct the canvas of the game on the root window.
Parameters:
master (tk.Tk): The root window for the Snake game.
"""
super().__init__(master)
self._master = master
self._width = 500
self._height = 300
self.config(bg='white', width=self._width, height=self._height)
self._x = self._width
self._y = self._height
self._game_status = True
self._direction = 'right'
self._snake = self.create_oval
self.create_snake_head()
self._snack = self.create_oval
self.create_snack()
self._tail_number = 0
self._tail_list = []
self._tail = self.create_line([(0, 0), (0, 0)])
def get_snake_id(self):
"""Returns the id of the snake head.
Returns:
snake (int): The id of the snake head.
"""
return self._snake
def get_direction(self):
"""Returns the current direction of the snake head.
Returns:
direction (str): The direction of the snake head.
"""
return self._direction
def get_tail_number(self):
"""Returns the length of the tail of the snake.
Returns:
tail_number (int): The current length of the tail.
"""
return self._tail_number
def get_game_status(self):
"""Returns the current status of the game. True if the game is running,
False otherwise.
Returns:
game_status (bool): The current status of the game
"""
return self._game_status
def set_direction(self, direction):
"""
Changes the movement direction of the snake
Parameter:
direction (str): The new direction of the snake.
"""
self._direction = direction
def change_x_axis(self, change):
"""
Changes the value of the x-axis.
Parameter:
change (int): Changes the x-axis by this value.
"""
self._x += change
def change_y_axis(self, change):
"""
Changes the value of the y-axis.
Parameter:
change (int): Changes the y-axis by this value.
"""
self._y += change
def check_collision(self):
"""
Checks for any collision between the snake head and its tail
or with the snack in the game.
"""
snake_coords = self.coords(self._snake)
snack_coords = self.coords(self._snack)
x1, y1, x2, y2 = snack_coords
xx1, yy1, xx2, yy2 = snake_coords
# Checks for collision between the snake head and the snake
if xx1 <= x1 <= xx2:
if yy1 <= y1 <= yy2:
self.delete(self._snack)
self._tail_number += 10
self.create_snack()
elif xx1 <= x2 <= xx2:
if yy1 <= y2 <= yy2:
self.delete(self._snack)
self._tail_number += 10
self.create_snack()
# Checks for collision between the snake head and the tail
for tail in self._tail_list:
tail_coords = self.coords(tail)
x1, y1, x2, y2 = tail_coords
if xx1 <= x1 <= xx2:
if yy1 <= y1 <= yy2:
self._game_status = False
elif xx1 <= x2 <= xx2:
if yy1 <= y2 <= yy2:
self._game_status = False
def create_snack(self):
"""
Creates the snack in the game based on random coordinates.
"""
random_x = random.randint(0, self._width-5)
random_y = random.randint(0, self._height-5)
self._snack = self.create_oval(random_x, random_y, random_x + 5, random_y + 5, fill='red', outline='red')
def create_snake_head(self):
"""
Creates the snake head in the game.
"""
circle_size = (self._x / 2, self._y / 2)
x, y = circle_size
# Resets the x and y coordinates of the snake head if it makes contact
# with the boundaries of the game.
if (self._width*2) < self._x+10:
self._x = 0
elif self._x < 0:
self._x = (self._width*2)
if (self._height*2) < self._y+10:
self._y = 0
elif self._y < 0:
self._y = (self._height*2)
self._snake = self.create_oval(x, y, x + 10, y+10, fill='black')
def create_tail(self):
"""
Creates and keeps track of the tail of the snake based on the current score
as well as the movement direction.
"""
snake_coords = self.coords(self._snake)
x1, y1, x2, y2 = snake_coords
x = (x1+x2)/2
y = (y1+y2)/2
tail_size = 10
self._tail_list += [self._tail, ]
if self._direction == 'right':
self._tail = self.create_line([(x-tail_size, y), (x, y)])
elif self._direction == 'left':
self._tail = self.create_line([(x+tail_size, y), (x, y)])
elif self._direction == 'up':
self._tail = self.create_line([(x, y+tail_size), (x, y)])
else:
self._tail = self.create_line([(x, y-tail_size), (x, y)])
# Removes any tail-lines created after the length of the tail exceeds the score
if len(self._tail_list) > self._tail_number:
self.delete(self._tail_list.pop(0))
class SnakeGame(object):
"""
A game of Snake Xenzia
"""
def __init__(self, master):
"""
Construct the main game window
Parameters:
master (tk.Tk): The root window for the Snake game.
"""
self._master = master
self._master.title("Snake Game")
self._canvas = Screen(master)
self._controls = Controller(self._canvas)
self._canvas.pack(side=tk.BOTTOM)
self._score = tk.Label(master, bg='black', fg='white')
self._score.pack(fill='x')
self._speed = 50
self._master.after(self._speed, self.animation)
def animation(self):
"""
Animates and constructs the snake head and tail. Checks the
the score and game status at every cycle and updates accordingly.
"""
if self._canvas.get_direction() == 'right':
self._controls.right_move('')
elif self._canvas.get_direction() == 'left':
self._controls.left_move('')
elif self._canvas.get_direction() == 'up':
self._controls.up_move('')
else:
self._controls.down_move('')
self._canvas.check_collision()
if not self._canvas.get_game_status():
self.game_end()
self._canvas.create_tail()
self.update_score()
speed = self._speed - (self._canvas.get_tail_number()//10)
self._master.after(speed, self.animation)
def update_score(self):
"""
Updates the game score on the label widget of the main window.
"""
self._score.config(text=f'Score: {self._canvas.get_tail_number()}')
def game_end(self):
"""
Hides the game canvas and increases the size of the score label.
"""
self._canvas.pack_forget()
self._score.config(font='Courier, 30')
self._score.pack(ipadx=200, ipady=200)
if __name__ == '__main__':
root = tk.Tk()
game = SnakeGame(root)
root.resizable(False, False)
root.mainloop()
|
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved.
This class is used for training models and is the core of the framework.
With the help of this class, the user of the framework is able to train and
develop models. The framework gets all the relevant objects as an input, and
all the parameters from a YAML file or a dictionary with the parameters, it
instantiates all the relevant helper objects for training the model and does
the training.
"""
from copsolver.frank_wolfe_solver import FrankWolfeSolver
from copsolver.analytical_solver import AnalyticalSolver
from validator import Validator
from paretomanager.pareto_manager_class import ParetoManager
from commondescentvector.multi_objective_cdv import MultiObjectiveCDV
from loss.loss_class import Loss
import torch.nn as nn
from dataloader.mamo_data_handler import MamoDataHandler
import time
import numpy as np
import random
import os
import torch
import torch.optim as optim
from torch.autograd import Variable
import yaml
import logging
logger = logging.getLogger('main')
logger.setLevel(logging.INFO)
class Trainer():
"""The trainer class, the core of the framework, used for training models.
All the needed objects for this class have to be given through the constructor.
Additionally, the other parameters needed by this trainer have to be supplied
in a YAML file named 'trainer_params.yaml' or a dictionary containing the parameters.
For more details about the parameters supplied in this YAML file, please refer to
'Attributes from the YAML file' section below.
Attributes:
data_handler: A MamoDataHandler object which feeds the data set to the trainer.
model: A torch.nn.Module object which is the model that is being trained.
losses: A list of Loss objects which represent the losses/objectives that the
model is trained on.
validation_metrics: A list of MetricAtK objects which are used to evaluate
the model while the training and validation process.
save_to_path: A path to a directory where the trained models from the Pareto
front will be saved during training.
device: A variable indicating whether the model is trained on the gpu or on
the cpu.
_train_dataloader: A dataloader object used for feeding the data to the trainer.
pareto_manager: A ParetoManager which is responsible for maintaining a pareto
front of models and saving these models on permanent storage.
validator: A Validator object which is used to evaluate the models on multiple
objective and multiple losses.
max_empirical_losses: A list of losses (float) which is the approximation of the
maximum empirical losses the model will have.
common_descent_vector: A MultiObjectiveCDV, is responsible for combining the multiple
gradients from the multiple losses into a single gradient.
optimizer: A pytorch optimizer which is used to train the model.
Attributes from the YAML file:
seed: An integer, used to initialize the numpy and pytorch random seeds, default = 42.
normalize_gradients: A boolean value, indicating whether to normalize the gradients
while training the model or not, default = True.
learning_rate: A float value, the learning rate that is given to the pytorch
optimizer, if the optimizer is not given in the constructor, default = 1e-3.
batch_size_training: An integer value, representing the batch sizes in which the data is
fed to the trainer, default = 500.
shuffle_training: A boolean value, indicating if the training data should be shuffled,
default = True.
drop_last_batch_training: A boolean value, indicating to drop the last incomplete batch,
if the training dataset size is not divisible by the batch size, default = True.
batch_size_validation: An integer value, representing the batch sizes in which the data is
fed to the validator, default = 500.
shuffle_validation: A boolean value, indicating if the validation data should be shuffled,
default = True.
drop_last_batch_validation: A boolean value, indicating to drop the last incomplete batch,
if the validation dataset size is not divisible by the batch size, default = False.
number_of_epochs: An integer value, indicating for how many epochs should the model
be trained, default = 50.
frank_wolfe_max_iter: An integer value, indicating the maximum number of iterations
to be used by the frank wolfe algorithm in the commonDescentVector object,
default = 100.
anneal: A boolean value, indicating if annealing should be used while training the
model, default = True.
beta_start: If the anneal is used, this will be the first value of the beta,
default = 0.
beta_cap: If the anneal is used, this will be the maximum value of the beta,
default = 0.3.
beta_step: If the anneal is used, this is the amount by which to increase the beta
every batch, default = 0.3/10000.
"""
def __init__(self, data_handler, model, losses, validation_metrics,
save_to_path, params='yaml_files/trainer_params.yaml',
optimizer=None):
"""The constructor which initializes a trainer object.
Arguments:
data_handler: A MamoDataHandler object which feeds the data set to the trainer.
model: A torch.nn.Module object which is the model that is being trained.
losses: A list of Loss objects which represent the losses/objectives that the
model is trained on.
validation_metrics: A list of MetricAtK objects which are used to evaluate
the model while the training and validation process.
save_to_path: A path to a directory where the trained models from the Pareto
front will be saved during training.
params: Path to the yaml file with the trainger parameters, or a dictionary
containing the parameters.
optimizer: A pytorch optimizer which is used to train the model, if it is None,
a default Adam optimizer is created.
Raises:
TypeError: If any of the arguments passed are not an instance of the expected
class or are None, a TypeError will be raised.
ValueError: If the directory which save_to_path references is not empty, a
ValueError will be raised.
"""
logger.info('Trainer: Started with initializing trainer...')
self._check_input_(data_handler, model, losses,
validation_metrics, save_to_path, optimizer)
self.data_handler = data_handler
self._read_params(params)
self.model = model
self.losses = losses
logger.info('Trainer: Losses: ')
logger.info('Trainer: '.join(['%s ' % loss.name for loss in self.losses]))
self.validation_metrics = validation_metrics
logger.info('Trainer: Validation metrics: ')
logger.info('Trainer: '.join(['%s ' % m.get_name() for m in self.validation_metrics]))
self.save_to_path = save_to_path
logger.info('Trainer: Saving models to: %s' % self.save_to_path)
self.optimizer = optimizer
# set cuda if available
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
logger.info('Trainer: Training on device: %s' % self.device)
self._init_objects()
logger.info('Trainer: Initialization done.')
def _check_input_(self, data_handler, model, losses, validation_metrics, save_to_path, optimizer):
"""A helper function for the __init__ to check the input of the constructor.
"""
if not isinstance(data_handler, MamoDataHandler):
raise TypeError(
'Please check you are using the right data handler object, or the right order of the attributes!')
if not isinstance(model, nn.Module):
raise TypeError(
'Please check you are using the right model object, or the right order of the attributes!')
# check if losses is None
if losses is None:
raise ValueError(
'The losses are None, please make sure to give valid losses!')
if not all([isinstance(x, Loss) for x in losses]):
raise TypeError(
'Please check you are using the right loss objects, or the right order of the attributes!')
# check if there are at least two losses
if len(losses) < 2:
raise ValueError(
'Please check you have defined at least two losses,'
+ ' for training with one loss use the Single Objective Loss class!')
# check if length is at least 1
if len(validation_metrics) == 0:
raise ValueError(
'Please check you have defined at least one validation metric!')
if not os.path.exists(save_to_path):
os.mkdir(save_to_path)
# checking if the save_to_path directory is empty
if os.listdir(save_to_path):
raise ValueError(
'Please make sure that the directory where you want to save the models is empty!')
# if the optimizer is not None, than has to be pytorch optimizer object
if optimizer is not None:
if not isinstance(optimizer, optim.Optimizer):
raise TypeError(
'Please make sure that the optimizer is a pytorch Optimizer object!')
def _read_params(self, params):
"""A helper function for the __init__ to read the configuration yaml file.
"""
logger.info('Trainer: Reading trainer parameters.')
if type(params) is str:
with open(params, 'r') as stream:
params = yaml.safe_load(stream)
self.seed = random.randint(1, 1000)#int(params.get('seed', 42))
#logger.info('Trainer: Random seed: %d' % self.seed)
self.normalize_gradients = bool(
params.get('normalize_gradients', True))
logger.info('Trainer: Normalize gradients: %s' %
self.normalize_gradients)
self.learning_rate = float(params.get('learning_rate', 1e-3))
logger.info('Trainer: Learning rate: %f' % self.learning_rate)
self.batch_size_training = int(params.get('batch_size_training', 500))
logger.info('Trainer: Batch size training: %d' %
self.batch_size_training)
self.shuffle_training = bool(params.get('shuffle_training', True))
logger.info('Trainer: Shuffle training: %d' %
self.shuffle_training)
self.drop_last_batch_training = bool(
params.get('drop_last_batch_training', True))
logger.info('Trainer: Drop last batch training: %d' %
self.drop_last_batch_training)
self.batch_size_validation = int(
params.get('batch_size_validation', self.data_handler.get_validationdata_len()))
logger.info('Trainer: Batch size validation: %d' %
self.batch_size_validation)
self.shuffle_validation = bool(params.get('shuffle_validation', True))
logger.info('Trainer: Shuffle validation: %d' %
self.shuffle_validation)
self.drop_last_batch_validation = bool(
params.get('drop_last_batch_validation', False))
logger.info('Trainer: Drop last batch validation: %d' %
self.drop_last_batch_validation)
self.number_of_epochs = int(params.get('number_of_epochs', 50))
logger.info('Trainer: Number of epochs: %f' % self.number_of_epochs)
self.frank_wolfe_max_iter = int(
params.get('frank_wolfe_max_iter', 100))
logger.info('Trainer: Frank Wolfe max iterations: %d' %
self.frank_wolfe_max_iter)
self.anneal = bool(params.get('anneal', True))
logger.info('Trainer: Annealing: %s' % self.anneal)
if self.anneal and ('beta_start' not in params or 'beta_cap' not in params or 'beta_step' not in params):
raise ValueError(('Please make sure that if anneal is set to True, '
'the beta_start, beta_cap and beta_step are all '
'present in the parameters yaml file!'))
if self.anneal:
self.beta_start = float(params.get('beta_start', 0))
logger.info('Trainer: Beta start: %f' % self.beta_start)
self.beta_cap = float(params.get('beta_cap', 0.3))
logger.info('Trainer: Beta cap: %f' % self.beta_cap)
self.beta_step = float(eval(params.get('beta_step', '0.3/10000')))
logger.info('Trainer: Beta step: %f' % self.beta_step)
def _init_objects(self):
"""A helper function for the __init__ to initialize different objects.
"""
logger.info('Trainer: Initializing helper trainer objects.')
np.random.seed(self.seed)
torch.manual_seed(self.seed)
self.model.to(self.device)
self._train_dataloader = self.data_handler.get_train_dataloader(batch_size=self.batch_size_training,
shuffle=self.shuffle_training,
drop_last=self.drop_last_batch_training)
self.pareto_manager = ParetoManager(PATH=self.save_to_path)
val_dataloader = self.data_handler.get_validation_dataloader(
batch_size=self.batch_size_validation, shuffle=self.shuffle_validation,
drop_last=self.drop_last_batch_validation)
self.validator = Validator(
self.model, val_dataloader, self.validation_metrics, self.losses)
self.max_empirical_losses = None
if self.normalize_gradients:
self.max_empirical_losses = self._compute_max_empirical_losses()
logger.info('Trainer: Max empirical losses: %s' %
self.max_empirical_losses)
copsolver = None
if len(self.losses) <= 2:
copsolver = AnalyticalSolver()
else:
copsolver = FrankWolfeSolver(max_iter=self.frank_wolfe_max_iter)
self.common_descent_vector = MultiObjectiveCDV(
copsolver=copsolver, max_empirical_losses=self.max_empirical_losses,
normalized=self.normalize_gradients)
# create default optimizer
if self.optimizer is None:
self.optimizer = optim.Adam(
self.model.parameters(), lr=self.learning_rate)
def _compute_loss(self, loss, y_true, y_pred, x, model):
if(loss.needs_model):
L = loss.compute_loss(y_true, y_pred, x, model)
else:
L = loss.compute_loss(y_true, y_pred)
return(L)
def _compute_max_empirical_losses(self):
"""A helper function for approximating the maximum empirical loss the model
could have. It is called by _init_objects function.
"""
# approximate the max loss empirically
max_losses = [0] * len(self.losses)
cnt = 0
for x, y in self._train_dataloader:
cnt += 1
# forward pass
model_output = self.model(x)
for i, loss in enumerate(self.losses):
# if annealing is done, the KL divergence is ignored when computing
# the max empirical loss, therefore the anneal is set to 0
L = self._compute_loss(loss, y, model_output, x, self.model)
# compute the moving average term
max_losses[i] = (cnt - 1) / cnt * \
max_losses[i] + 1 / cnt * L.item()
return max_losses
def _get_gradient_np(self):
"""A helper function for obtaining the gradients of the model in a numpy
array.
Before the first backward call, all grad attributes are set to None, and
that is when the exception is thrown, and the parameters are returned.
After the first backward pass, the gradient values are available and are
returned by this function.
"""
gradient = []
try:
for p in self.model.parameters():
gradient.append(p.grad.cpu().detach().numpy().ravel())
return np.concatenate(gradient)
except Exception:
size = 0
for p in self.model.parameters():
size += len(p.cpu().detach().numpy().ravel())
return np.zeros(shape=size)
def train(self):
"""The main method of this class. By calling this method, the traning process
starts.
"""
# model training
logger.info('Trainer: Started training...')
if self.anneal:
beta = self.beta_start
for epoch in range(self.number_of_epochs):
# start time for current epoch
start_time = time.time()
# statistics
training_loss = 0
average_alpha = [0] * len(self.losses)
cnt = 0
# set model in train mode
self.model.train()
# do training
for x, y in self._train_dataloader:
# anneal beta
if self.anneal:
beta += self.beta_step
beta = beta if beta < self.beta_cap else self.beta_cap
# calculate the gradients
gradients = []
for i, loss in enumerate(self.losses):
# forward pass
model_output = self.model(x)
# calculate loss
L = self._compute_loss(loss, y, model_output, x, self.model)
# zero gradient
self.optimizer.zero_grad()
# backward pass
L.backward()
# get gradient for correctness objective
gradients.append(self._get_gradient_np())
# calculate the losses
losses_computed = []
# forward pass
model_output = self.model(x)
for i, loss in enumerate(self.losses):
L = self._compute_loss(loss, y, model_output, x, self.model)
losses_computed.append(L)
# get the final loss to compute the common descent vector
final_loss, alphas = self.common_descent_vector.get_descent_vector(
losses_computed, gradients)
#print(gradients, losses_computed, final_loss, alphas)
# zero gradient
self.optimizer.zero_grad()
# backward pass
final_loss.backward()
# update parameters
self.optimizer.step()
# statistics....
cnt += 1
# moving average loss
training_loss = (cnt - 1) / cnt * \
training_loss + 1 / cnt * final_loss.item()
# moving average alpha
for i, alpha in enumerate(alphas):
average_alpha[i] = (cnt - 1) / cnt * \
average_alpha[i] + 1 / cnt * alpha
# time in milliseconds for current batch
batch_time = (time.time() - start_time) / cnt * 1000
# log progress
if cnt % 20 == 0:
average_alpha_string = ', '.join(
['%.4f']*len(average_alpha)) % tuple(average_alpha)
logger.info('Trainer: Batch: %d/%d, Batch time: %.2fms,' %
(cnt,
int(np.round(self.data_handler.get_traindata_len() / self.batch_size_training)),
batch_time)
+ ' Training loss: %.3f, Alphas: [%s]' %
(training_loss, average_alpha_string))
# do validation
val_metrics, val_objectives = self.validator.evaluate(
disable_anneal=self.anneal, verbose=False)
val_loss = self.validator.combine_objectives(
val_objectives, alphas=average_alpha, max_normalization=self.max_empirical_losses)
# add the solution to the pareto manager
self.pareto_manager.add_solution(val_metrics, self.model)
# calculate epoch time
epoch_time = time.time() - start_time
val_metrics_string = ', '.join(
['%.4f']*len(val_metrics)) % tuple(val_metrics)
val_objectives_string = ', '.join(
['%.4f']*len(val_objectives)) % tuple(val_objectives)
logger.info('Trainer: Epoch: %d, Epoch time: %.2fs, Training loss: %.3f,' %
(epoch + 1, epoch_time, training_loss)
+ ' Validation loss: %.3f, Validation metrics: [%s], Validation losses: [%s]' %
(val_loss, val_metrics_string, val_objectives_string))
return val_loss
|
from src.flaskbasic.functions import Functions
import pytest
fun = Functions
def test_student_name():
assert fun.readName('Lwando',1) == 'Lwando'
assert fun.readName('Zukisa',2) == 'Zukisa'
assert fun.readName('ludwe',3) == 'ludwe'
def delete(student_id):
student_results = Student.query.get_or_404(student_id)
db.session.delete(student_results)
db.session.commit()
def test_all_results():
assert fun.readResults(1,'Lwando',10, 60, 10 ) == (1, 'Lwando', 10, 60, 10)
assert fun.readResults(2,'Zukisa',10, 60, 5) == (2,'Zukisa',10, 60, 5)
assert fun.readResults(3,'ludwe',32, 12, 22) == (3,'ludwe',32, 12, 22)
# def test_all_results():
# assert fun.readResults(1, 'Lwando',10,60,10) == (1, 'Lwando', 10, 60, 10)
# assert fun.readResults(2, 'Zukisa',10,60,5) == (2, 'Zukisa',10,60,5)
# assert fun.readResults(1, 'Zikia',10,60,10) == (1, 'Lwando', 10, 60, 10)
|
# -*- coding: utf-8 -*-
# vispy: gallery 30
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# Author: John David Reaver
# Date: 04/29/2014
# -----------------------------------------------------------------------------
from vispy import app, gloo
# Shader source code
# -----------------------------------------------------------------------------
vertex = """
attribute vec2 position;
void main()
{
gl_Position = vec4(position, 0, 1.0);
}
"""
fragment = """
uniform vec2 resolution;
uniform vec2 center;
uniform float scale;
vec3 hot(float t)
{
return vec3(smoothstep(0.00,0.33,t),
smoothstep(0.33,0.66,t),
smoothstep(0.66,1.00,t));
}
void main()
{
const int n = 300;
const float log_2 = 0.6931471805599453;
vec2 c;
// Recover coordinates from pixel coordinates
c.x = (gl_FragCoord.x / resolution.x - 0.5) * scale + center.x;
c.y = (gl_FragCoord.y / resolution.y - 0.5) * scale + center.y;
float x, y, d;
int i;
vec2 z = c;
for(i = 0; i < n; ++i)
{
x = (z.x*z.x - z.y*z.y) + c.x;
y = (z.y*z.x + z.x*z.y) + c.y;
d = x*x + y*y;
if (d > 4.0) break;
z = vec2(x,y);
}
if ( i < n ) {
float nu = log(log(sqrt(d))/log_2)/log_2;
float index = float(i) + 1.0 - nu;
float v = pow(index/float(n),0.5);
gl_FragColor = vec4(hot(v),1.0);
} else {
gl_FragColor = vec4(hot(0.0),1.0);
}
}
"""
# vispy Canvas
# -----------------------------------------------------------------------------
class Canvas(app.Canvas):
def __init__(self, *args, **kwargs):
app.Canvas.__init__(self, *args, **kwargs)
self.program = gloo.Program(vertex, fragment)
# Draw a rectangle that takes up the whole screen. All of the work is
# done in the shader.
self.program["position"] = [(-1, -1), (-1, 1), (1, 1),
(-1, -1), (1, 1), (1, -1)]
self.scale = self.program["scale"] = 3
self.center = self.program["center"] = [-0.5, 0]
self.apply_zoom()
self.bounds = [-2, 2]
self.min_scale = 0.00005
self.max_scale = 4
gloo.set_clear_color(color='black')
self._timer = app.Timer('auto', connect=self.update, start=True)
self.show()
def on_draw(self, event):
self.program.draw()
def on_resize(self, event):
self.apply_zoom()
def apply_zoom(self):
width, height = self.physical_size
gloo.set_viewport(0, 0, width, height)
self.program['resolution'] = [width, height]
def on_mouse_move(self, event):
"""Pan the view based on the change in mouse position."""
if event.is_dragging and event.buttons[0] == 1:
x0, y0 = event.last_event.pos[0], event.last_event.pos[1]
x1, y1 = event.pos[0], event.pos[1]
X0, Y0 = self.pixel_to_coords(float(x0), float(y0))
X1, Y1 = self.pixel_to_coords(float(x1), float(y1))
self.translate_center(X1 - X0, Y1 - Y0)
def translate_center(self, dx, dy):
"""Translates the center point, and keeps it in bounds."""
center = self.center
center[0] -= dx
center[1] -= dy
center[0] = min(max(center[0], self.bounds[0]), self.bounds[1])
center[1] = min(max(center[1], self.bounds[0]), self.bounds[1])
self.program["center"] = self.center = center
def pixel_to_coords(self, x, y):
"""Convert pixel coordinates to Mandelbrot set coordinates."""
rx, ry = self.size
nx = (x / rx - 0.5) * self.scale + self.center[0]
ny = ((ry - y) / ry - 0.5) * self.scale + self.center[1]
return [nx, ny]
def on_mouse_wheel(self, event):
"""Use the mouse wheel to zoom."""
delta = event.delta[1]
if delta > 0: # Zoom in
factor = 0.9
elif delta < 0: # Zoom out
factor = 1 / 0.9
for _ in range(int(abs(delta))):
self.zoom(factor, event.pos)
def on_key_press(self, event):
"""Use + or - to zoom in and out.
The mouse wheel can be used to zoom, but some people don't have mouse
wheels :)
"""
if event.text == '+' or event.text == '=':
self.zoom(0.9)
elif event.text == '-':
self.zoom(1/0.9)
def zoom(self, factor, mouse_coords=None):
"""Factors less than zero zoom in, and greater than zero zoom out.
If mouse_coords is given, the point under the mouse stays stationary
while zooming. mouse_coords should come from MouseEvent.pos.
"""
if mouse_coords is not None: # Record the position of the mouse
x, y = float(mouse_coords[0]), float(mouse_coords[1])
x0, y0 = self.pixel_to_coords(x, y)
self.scale *= factor
self.scale = max(min(self.scale, self.max_scale), self.min_scale)
self.program["scale"] = self.scale
# Translate so the mouse point is stationary
if mouse_coords is not None:
x1, y1 = self.pixel_to_coords(x, y)
self.translate_center(x1 - x0, y1 - y0)
if __name__ == '__main__':
canvas = Canvas(size=(800, 800), keys='interactive')
app.run()
|
import logging
from mopidy import backend
logger = logging.getLogger(__name__)
BITRATES = {
128: "low",
160: "med",
320: "hi",
}
class GMusicPlaybackProvider(backend.PlaybackProvider):
def translate_uri(self, uri):
track_id = uri.rsplit(":")[-1]
quality = BITRATES[self.backend.config["gmusic"]["bitrate"]]
stream_uri = self.backend.session.get_stream_url(
track_id, quality=quality
)
logger.debug("Translated: %s -> %s", uri, stream_uri)
return stream_uri
|
from discovery_imaging_utils.reports.qc import ind_functional_qc
from discovery_imaging_utils.reports.qc import ind_group_functional_qc
from discovery_imaging_utils.reports.qc import ind_group_structural_qc
from discovery_imaging_utils.reports.qc import ind_structural_qc
from discovery_imaging_utils.reports.qc import shared_tools
from discovery_imaging_utils.reports.qc import visualizations
from discovery_imaging_utils.reports.qc import alignment_metrics
|
from abc import abstractmethod, ABC
class AnsatzGenerator(ABC):
"""
Abstract class to be inherited by iPOPO bundles to generate quantum circuit ansatz for quantum computation
Subclasses must inherit and implement the generate() method to return an XACC Intermediate Representation (IR) Function instance which expresses
a composition of instructions to be executed
"""
@abstractmethod
def generate(self, inputParams):
pass
|
"""Module for handling internationalisation in URL patterns."""
from django.urls import LocalePrefixPattern, URLResolver
from django.conf import settings
from django.utils.translation import activate, get_language
class ActiveLocalePrefixPattern(LocalePrefixPattern):
"""Patched version of LocalePrefixPattern for i18n_patterns."""
@property
def language_prefix(self):
"""Overwrite the default language_prefix function within LocalePrefixPattern.
This allows us to check for activated languages before resolving URL.
"""
language_code = get_language() or settings.LANGUAGE_CODE
active_language_codes = [active_language_code for active_language_code, _ in settings.ACTIVE_LANGUAGES]
if language_code not in active_language_codes:
language_code = settings.LANGUAGE_CODE
activate(language_code)
if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language:
return ''
return '%s/' % language_code
def i18n_patterns(*urls, prefix_default_language=True):
"""Add the language code prefix to every URL pattern within this function.
This may only be used in the root URLconf, not in an included URLconf.
"""
if not settings.USE_I18N:
return list(urls)
return [
URLResolver(
ActiveLocalePrefixPattern(prefix_default_language=prefix_default_language),
list(urls),
)
]
|
import os
import re
import traceback
from abc import abstractmethod
from pydoc import locate
from config.Config import Config
from engine.component.TemplateModuleComponent import TemplateModuleComponent
from enums.Architectures import Arch
from enums.Language import Language
class ModuleNotCompatibleException(Exception):
pass
class ModuleNotLoadableException(Exception):
pass
class TemplateModule:
def __init__(self, name: str = None, arch=Arch.x64, libraries: list = None, components: list = None):
self.components = components if components else []
self.libraries = libraries if libraries else []
self.name = name
self.arch = arch
self.order = None
self.compile = False
self.filter_string = ""
self.loadable = True
def add_component(self, component):
self.components.append(component)
@abstractmethod
def generate(self, **kwargs):
pass
@abstractmethod
def build(self, **kwargs):
pass
@staticmethod
def all_modules(init=False):
kwargs = {
"language": Language.CSHARP,
"seconds": 1,
"dinvoke": True,
"process": "",
"pinject": True,
"arch": Arch.x64
}
modules_path = str(Config().get_path("DIRECTORIES", "MODULES"))
all_files = [
f for f in
os.listdir(modules_path)
if os.path.isfile(os.path.join(modules_path, f))
and f not in [
"TemplateModule.py",
"__init__.py",
"AdditionalSourceModule.py",
"AssemblyInfoModule.py",
"EncoderModule.py",
"PowerShellModule.py",
]
]
module_names = ["_".join(re.sub(r"([A-Z])", r" \1", f.replace("Module.py", "")).split()).lower() for f in
all_files]
if not init:
return module_names
return [TemplateModule.from_name(m, kwargs=kwargs) for m in module_names]
@staticmethod
def from_name(name, **kwargs):
try:
_module_name = "".join([n.capitalize() for n in str(name).split("_")])
_class_string = f"engine.modules.{_module_name}Module.{_module_name}Module"
# print(_class_string)
_class = locate(_class_string)
_instance = _class(kwargs=kwargs['kwargs'])
if not _instance.loadable:
raise ModuleNotLoadableException()
return _instance
except ModuleNotCompatibleException:
raise ModuleNotCompatibleException()
except TypeError as e:
if str(e).find("unexpected keyword argument 'kwargs'") > -1:
raise ModuleNotLoadableException()
except Exception as e:
# traceback.print_exc()
pass
|
import setuptools
# Pypi doesn't seem to like .rst
#with open("README.rst", "r") as file:
# long_description = file.read()
long_description = "Easily protect your serialized class objects with secure encryption. Kosher Pickles provides a Mixin you can use in any classes you wish to protect."
setuptools.setup(
name='kosher',
version='0.1',
scripts=['mixin.py'] ,
author="Forest Mars",
author_email="themarsgroup+pypi@gmail.com",
description="Encrypted Pickles",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/forestmars/kosher",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
|
nome= input("Olá, qual o seu nome?\n")
if nome== "Lucas":
print("Olá, %s"% nome)
elif nome== "Mário":
print("Oi, Mário!")
elif nome== "José":
print("Oi, José!")
else:
print("Olá, visitante!")
|
from enum import Enum
import basicTypes
class MainOp(Enum):
J = 2
JAL = 3
BEQ = 4
BNE = 5
BLEZ = 6
BGTZ = 7
ADDI = 8
ADDIU = 9
SLTI = 10
SLTIU = 11
ANDI = 12
ORI = 13
XORI = 14
LUI = 15
BEQL = 20
BNEL = 21
BLEZL = 22
BGTZL = 23
LB = 32
LH = 33
LWL = 34
LW = 35
LBU = 36
LHU = 37
LWR = 38
SB = 40
SH = 41
SWL = 42
SW = 43
SWR = 46
CACHE = 47
LL = 48
LWC1 = 49
LWC2 = 50
PREF = 51
LDC1 = 53
LDC2 = 54
SC = 56
SWC1 = 57
SWC2 = 58
SDC1 = 61
SDC2 = 62
class RegOp(Enum):
SLL = 0
SRL = 2
SRA = 3
SLLV = 4
SRLV = 6
SRAV = 7
JR = 8
JALR = 9
MOVZ = 10
MOVN = 11
SYSCALL = 12
BREAK = 13
SYNC = 15
MFHI = 16
MTHI = 17
MFLO = 18
MTLO = 19
MULT = 24
MULTU = 25
DIV = 26
DIVU = 27
ADD = 32
ADDU = 33
SUB = 34
SUBU = 35
AND = 36
OR = 37
XOR = 38
NOR = 39
SLT = 42
SLTU = 43
class CopOp(Enum):
MFC = 0
CFC = 2
MTC = 4
CTC = 6
BCF = 10
BCT = 11
BCFL = 12
BCTL = 13
class FloatOp(Enum):
ADD = 0
SUB = 1
MUL = 2
DIV = 3
SQRT = 4
ABS = 5
MOV = 6
NEG = 7
ROUND_W = 12
TRUNC_W = 13
CEIL_W = 14
FLOOR_W = 15
CVT_S = 32
CVT_D = 33
CVT_W = 36
C_F = 48
C_UN = 49
C_EQ = 50
C_UEQ = 51
C_OLT = 52
C_ULT = 53
C_OLE = 54
C_ULE = 55
C_SF = 56
C_NGLE = 57
C_SEQ = 58
C_NGL = 59
C_LT = 60
C_NGE = 61
C_LE = 62
C_NGT = 63
class SpecialOp(Enum):
NOP = 0
BLTZ = 10
BGEZ = 11
BLTZL = 12
BGEZL = 13
class Register(Enum):
R0, AT, V0, V1, A0, A1, A2, A3 = range(8)
T0, T1, T2, T3, T4, T5, T6, T7 = range(8, 16)
S0, S1, S2, S3, S4, S5, S6, S7 = range(16, 24)
T8, T9, K0, K1, GP, SP, S8, RA = range(24, 32)
# I'm deeply, deeply sorry for this. I didn't want to require 3.5 just for "start",
# though I guess I'm requiring 3.4 just for enums
class FloatRegister(Enum):
exec(';'.join(('F%s = %s' % (i,i)) for i in range(32)))
SpecialRegister = Enum('SpecialRegister', 'Compare MultLo MultHi')
class Instruction:
branchOPs = set([MainOp[x] for x in "BEQ BNE BLEZ BGTZ BEQL BNEL BLEZL BGTZL".split()] + [CopOp[x] for x in "BCF BCT BCFL BCTL".split()])
J_format = set([MainOp.J,MainOp.JAL])
I_format = set([CopOp.BCF,CopOp.BCT,CopOp.BCFL,CopOp.BCTL])
D_format = set([RegOp.MFLO, RegOp.MFHI])
R_format = set([RegOp.JALR,RegOp.JR,RegOp.MFHI,RegOp.MTHI,RegOp.MFLO,RegOp.MTLO])
RI_format = set([MainOp.LUI, MainOp.BLEZL,MainOp.BGTZL])
SI_format = set([MainOp.BLEZ, MainOp.BGTZ, SpecialOp.BLTZ,SpecialOp.BGEZ,SpecialOp.BLTZL,SpecialOp.BGEZL])
RR_format = set([RegOp.MULT,RegOp.MULTU,RegOp.DIV,RegOp.DIVU])
RRI_format = set([MainOp[x] for x in "BEQ BNE ADDI ADDIU SLTI SLTIU ANDI ORI XORI BEQL BNEL".split()])
RRS_format = set([RegOp[x] for x in "SLL SRL SRA".split()])
RIR_format = set([MainOp[x] for x in "LB LH LWL LW LBU LHU LWR SB SH SWL SW SWR".split()])
RRR_format = set([RegOp[x] for x in "SLLV SRLV SRAV ADD ADDU SUB SUBU AND OR XOR NOR SLT SLTU".split()])
FIR_format = set([MainOp[x] for x in "LWC1 LWC2 LDC1 LDC2 SWC1 SWC2 SDC1 SDC2".split()])
FF_format = set([FloatOp[x] for x in "SQRT ABS MOV NEG ROUND_W TRUNC_W CEIL_W FLOOR_W CVT_S CVT_D CVT_W".split()])
FsF_format = set([FloatOp[x] for x in "C_EQ C_LT C_LE".split()])
FFF_format = set([FloatOp[x] for x in "ADD SUB MUL DIV".split()])
RF_format = set([CopOp.MFC,CopOp.CFC,CopOp.MTC,CopOp.CTC])
def __init__(self, word):
self.raw = word
#________********________********
op = word >> 26 #111111..........................
rs = (word>>21) & 0x1f #......11111.....................
rt = (word>>16) & 0x1f #...........11111................
rd = (word>>11) & 0x1f #................11111...........
imm = word & 0xffff #................1111111111111111
spec = word & 0x3f #..........................111111
try:
self.opcode = MainOp(op)
except ValueError: #need further specification
if op == 0:
if word == 0:
self.opcode = SpecialOp.NOP
return
else:
self.opcode = RegOp(spec)
elif op == 1:
self.opcode = SpecialOp(rt+10)
self.sourceReg = Register(rs)
self.immediate = imm
return
elif op in [16,17,18]:
self.cop = op - 16
if rs == 16:
if self.cop == 0:
raise Exception("cop 0 mostly unimplemented")
elif self.cop == 1:
self.fmt = basicTypes.single
self.opcode = FloatOp(spec)
else:
raise Exception("cop > 1 unimplemented")
elif rs == 17 and self.cop == 1:
self.fmt = basicTypes.double
self.opcode = FloatOp(spec)
elif rs == 20 and spec == 32:
self.fmt = basicTypes.word
self.opcode = FloatOp(spec)
elif rs == 8:
self.opcode = CopOp(((word>>16) & 0x3)+10)
self.target = imm
else:
self.opcode = CopOp(rs)
self.targetReg = Register(rt)
self.fs = FloatRegister(rd)
else:
raise Exception("op " + str(op) + " unimplemented",hex(word))
if isinstance(self.opcode, FloatOp):
self.ft = FloatRegister(rt)
self.fs = FloatRegister(rd)
self.fd = FloatRegister((word>>6) & 0x1f)
elif self.opcode in [MainOp.J, MainOp.JAL]:
self.target = 4*(word & 0x3ffffff)
elif self.opcode in Instruction.FIR_format:
self.sourceReg = Register(rs)
self.targetReg = FloatRegister(rt)
self.immediate = imm
elif isinstance(self.opcode, MainOp):
self.sourceReg = Register(rs)
self.targetReg = Register(rt)
self.immediate = imm
elif self.opcode in [RegOp.SLL,RegOp.SRL,RegOp.SRA]:
self.targetReg = Register(rt)
self.destReg = Register(rd)
self.shift = (word>>6) & 0x1f
elif isinstance(self.opcode, RegOp) or isinstance(self.opcode, CopOp):
self.sourceReg = Register(rs)
self.targetReg = Register(rt)
self.destReg = Register(rd)
elif isinstance(self.opcode, SpecialOp):
pass
else:
raise Exception(str(self.opcode) + " is uncategorized")
def __repr__(self):
return "Instruction(raw = %r, opcode = %r)" % (self.raw, self.opcode)
def __str__(self):
if self.opcode in Instruction.J_format:
return '%s %#X' % (self.opcode.name, self.target)
if self.opcode in Instruction.D_format:
return '%s %s' % (self.opcode.name, self.destReg.name)
if self.opcode in Instruction.R_format:
return '%s %s' % (self.opcode.name, self.sourceReg.name)
if self.opcode in Instruction.I_format:
return '%s%d %#X' % (self.opcode.name, self.cop, self.target)
if self.opcode in Instruction.RI_format:
return '%s %s, %#x' % (self.opcode.name, self.targetReg.name, self.immediate)
if self.opcode in Instruction.SI_format:
return '%s %s, %#x' % (self.opcode.name, self.sourceReg.name, self.immediate)
if self.opcode in Instruction.RR_format:
return '%s %s, %s' % (self.opcode.name, self.sourceReg.name, self.targetReg.name)
if self.opcode in Instruction.RIR_format:
return '%s %s, %#x (%s)' % (self.opcode.name, self.targetReg.name, self.immediate, self.sourceReg.name)
if self.opcode in Instruction.RRI_format:
return '%s %s, %s, %#x' % (self.opcode.name, self.targetReg.name, self.sourceReg.name, self.immediate)
if self.opcode in Instruction.RRR_format:
return '%s %s, %s, %s' % (self.opcode.name, self.destReg.name, self.sourceReg.name, self.targetReg.name)
if self.opcode in Instruction.RRS_format:
return '%s %s, %s, %#x' % (self.opcode.name, self.destReg.name, self.targetReg.name, self.shift)
if self.opcode in Instruction.FIR_format:
return '%s %s, %#x (%s)' % (self.opcode.name, self.targetReg.name, self.immediate, self.sourceReg.name)
if self.opcode in Instruction.FF_format:
return '%s_%s %s, %s' % (self.opcode.name, self.fmt.name[0].upper(), self.fd.name, self.ft.name)
if self.opcode in Instruction.FsF_format:
return '%s_%s %s, %s' % (self.opcode.name, self.fmt.name[0].upper(), self.fs.name, self.ft.name)
if self.opcode in Instruction.FFF_format:
return '%s_S %s, %s, %s' % (self.opcode.name, self.fd.name, self.fs.name, self.ft.name)
if self.opcode in Instruction.RF_format:
return '%s%d %s, %s' % (self.opcode.name, self.cop, self.targetReg.name, self.fs.name)
return self.opcode.name
def isBranch(instr):
op = instr >> 26
try:
if MainOp(op) in Instruction.branchOPs:
return True
except:
pass
if op == 1:
return ((instr >> 16) & 0x1f) in [0, 1, 2, 3]
else:
return op == 16 and (instr >> 21) & 0x1f == 8
|
import pathlib
import jsonlines as jl
import progressbar as pb
import typing as t
import utils as u
from typeguard import typechecked
@typechecked
def list_documents(jsonl_in: pathlib.Path) -> t.Iterator[dict]:
"""
Lists all the documents in the `JSONL` file
Parameters
----------
jsonl_in : pathlib.Path
The JSONL file containing all the documents
"""
encoding = u.guess_encoding(jsonl_in)
with open(jsonl_in, 'r', encoding = encoding) as fp:
with jl.Reader(fp) as reader:
for item in reader:
yield item
@typechecked
def save_documents(jsonl_out: pathlib.Path, documents: t.Iterator[dict]) -> None:
"""
Saves the documents to a `JSONL` file
Parameters
----------
jsonl_out : pathlib.Path
The JSONL file to contain all the documents
documents : Iterator[dict]
The JSON documents to save
"""
bar_i = 0
widgets = [ 'Saving JSONL # ', pb.Counter(), ' ', pb.Timer(), ' ', pb.BouncingBar(marker = '.', left = '[', right = ']')]
with pb.ProgressBar(widgets = widgets) as bar:
with open(jsonl_out, 'w', encoding = 'utf-8') as fp:
with jl.Writer(fp, compact = True, sort_keys = True) as writer:
for document in documents:
writer.write(document)
bar_i = bar_i + 1
bar.update(bar_i)
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
#-*- coding: utf-8 -*-
import handlers
from django.conf.urls.defaults import *
from treeio.core.api.auth import auth_engine
from treeio.core.api.doc import documentation_view
from treeio.core.api.resource import CsrfExemptResource
ad = { 'authentication': auth_engine }
#knowledge resources
folderResource = CsrfExemptResource(handler = handlers.KnowledgeFolderHandler, **ad)
categoryResource = CsrfExemptResource(handler = handlers.KnowledgeCategoryHandler, **ad)
itemResource = CsrfExemptResource(handler = handlers.KnowledgeItemHandler, **ad)
urlpatterns = patterns('',
#Knowledge
url(r'^doc$', documentation_view, kwargs={'module': handlers}, name="api_knowledge_doc"),
url(r'^folders$', folderResource, name = 'api_knowledge_folders'),
url(r'^folder/(?P<object_ptr>\d+)', folderResource, name = 'api_knowledge_folders'),
url(r'^categories$', categoryResource, name = 'api_knowledge_categories'),
url(r'^category/(?P<object_ptr>\d+)', categoryResource, name = 'api_knowledge_categories'),
url(r'^items$', itemResource, name = 'api_knowledge_items'),
url(r'^item/(?P<object_ptr>\d+)', itemResource, name = 'api_knowledge_items'),
)
|
# Copyright 2019 The SQLNet Company GmbH
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import datetime
import json
import random
import string
import time
import pandas as pd
import getml.communication as comm
import getml.engine as engine
import getml.loss_functions as loss_functions
from .modutils import _parse_placeholders
# ------------------------------------------------------------------------------
class RelboostModel(object):
"""
Generalization of the XGBoost algorithm to relational data.
RelboostModel automates feature engineering for relational data and time series.
It is based on a generalization of the XGBoost algorithm to relational data,
hence the name.
Args:
population (:class:`~getml.models.Placeholder`): Population table (the main table)
peripheral (List[:class:`~getml.models.Placeholder`]): Peripheral tables
name (str): Name of the model. Defaults to `None`.
feature_selector (:class:`~getml.predictors`): Predictor used for feature selection
predictor (:class:`~getml.predictors`): Predictor used for the prediction
units (dict): Mapping of column names to units. All columns containing
that column name will be assigned the unit. Columns containing the same
unit can be directly compared.
loss_function (:class:`~getml.loss_functions`): Loss function to be
used to optimize your features. We recommend
:class:`~getml.loss_functions.SquareLoss` for regression problems and
:class:`~loss_functions.CrossEntropyLoss` for classification
problems. Default: :class:`~getml.loss_functions.SquareLoss`.
delta_t (float): Frequency with which lag variables will be explored in a time
series setting. When set to 0.0, then there will be to lag variables.
Default: 0.0.
gamma (float): Minimum improvement required for a split. Default: 0.0.
include_categorical (bool): Whether you want to pass the categorical columns from the population table to the predictors.
Default: False.
max_depth (int): Maximum depth of the trees. Default: 3.
num_features (int): The number of features to be trained. Default: 100.
share_selected_features (float): The maximum share of features you would like to
select. Requires you to pass a *feature_selector*. Any features with a
feature importance of zero will be removed. Therefore,
the number of features actually selected can be smaller than implied by
*share_selected_features*. When set to 0.0,
no feature selection will be conducted. Default: 0.0.
num_subfeatures (int): The number of subfeatures you would like to
extract (for snowflake data model only). Default: 100.
reg_lambda (float): L2 regularization on the weights. This is probably one of the most important hyperparameters
in the *RelboostModel*. Default: 0.01.
seed (int): The seed used for initializing the random number generator. Default: 5843.
shrinkage (float): Learning rate to be used for the boosting algorithm. Default: 0.3.
silent (bool): Controls the logging during training. Default: False.
subsample (float): Subsample ratio during training. Set to 0.0 for no subsampling. Default: 1.0.
target_num (int): Signifies which of the targets to use, since RelboostModel does not
support multiple targets. Default: 0.
use_timestamps (bool): Whether you want to ignore all elements in the peripheral tables where the
time stamp is greater than the time stamp in the corresponding element of the population table.
In other words, this determines whether you want add the condition "t2.time_stamp <= t1.time_stamp" at
the very end of each feature.
We strongly recommend that you keep the default value - it is the golden rule of predictive analytics!
Default: True.
"""
# -------------------------------------------------------------------------
def __init__(self, name=None, **params):
self.name = name or \
datetime.datetime.now().isoformat().split(".")[0].replace(':', '-') + "-relboost"
self.feature_selector = None
self.predictor = None
self.thisptr = dict()
self.thisptr["type_"] = "RelboostModel"
self.thisptr["name_"] = self.name
self.params = {
'population': None,
'peripheral': None,
'units': dict(),
'send': False,
'delta_t': 0.0,
'feature_selector': None,
'gamma': 0.0,
'include_categorical': False,
'loss_function': "SquareLoss",
'max_depth': 3,
'min_num_samples': 200,
'num_features': 100,
'num_subfeatures': 100,
'session_name': "",
'share_selected_features': 0.0,
'num_threads': 0,
'predictor': None,
'reg_lambda': 0.01,
'sampling_factor': 1.0,
'shrinkage': 0.3,
'seed': 5843,
'silent': False,
'target_num': 0,
'use_timestamps': True
}
self.set_params(params)
if self.params['send']:
self.send()
# -------------------------------------------------------------------------
def __close(self, s):
cmd = dict()
cmd["type_"] = "RelboostModel.close"
cmd["name_"] = self.name
comm.send_string(s, json.dumps(cmd))
msg = comm.recv_string(s)
if msg != "Success!":
raise Exception(msg)
# -------------------------------------------------------------------------
def __fit(
self,
peripheral_data_frames,
population_data_frame,
s
):
# -----------------------------------------------------
# Send the complete fit command.
cmd = dict()
cmd["type_"] = "RelboostModel.fit"
cmd["name_"] = self.name
cmd["peripheral_names_"] = [df.name for df in peripheral_data_frames]
cmd["population_name_"] = population_data_frame.name
comm.send_string(s, json.dumps(cmd))
# -----------------------------------------------------
# Do the actual fitting
begin = time.time()
print("Loaded data. Features are now being trained...")
msg = comm.recv_string(s)
end = time.time()
# ----------------------------------------------------------------------
# Print final message
if "Trained" in msg:
print(msg)
self.__print_time_taken(begin, end, "Time taken: ")
else:
raise Exception(msg)
# -------------------------------------------------------------------------
def __load_peripheral_tables(self, peripheral_tables, s):
peripheral_data_frames = []
for i, peripheral_table in enumerate(peripheral_tables):
if type(peripheral_table) == engine.DataFrame:
peripheral_data_frames.append(peripheral_table)
else:
categorical_peripheral = [
per.thisptr["categorical_"] for per in self.params["peripheral"]
]
discrete_peripheral = [
per.thisptr["discrete_"] for per in self.params["peripheral"]
]
numerical_peripheral = [
per.thisptr["numerical_"] for per in self.params["peripheral"]
]
join_keys_peripheral = [
per.thisptr["join_keys_"] for per in self.params["peripheral"]
]
names_peripheral = [
per.thisptr["name_"] for per in self.params["peripheral"]
]
time_stamps_peripheral = [
per.thisptr["time_stamps_"] for per in self.params["peripheral"]
]
peripheral_data_frames.append(
engine.DataFrame(
name=self.__make_random_name(),
join_keys=join_keys_peripheral[i],
time_stamps=time_stamps_peripheral[i],
categorical=categorical_peripheral[i],
discrete=discrete_peripheral[i],
numerical=numerical_peripheral[i],
targets=[],
units=self.params['units']
)
)
peripheral_data_frames[i].send(
data_frame=peripheral_table,
sock=s
)
return peripheral_data_frames
# -------------------------------------------------------------------------
def __load_population_table(self, population_table, targets, s):
if type(population_table) == engine.DataFrame:
population_data_frame = population_table
else:
population_data_frame = engine.DataFrame(
name=self.__make_random_name(),
join_keys=self.params["population"].thisptr["join_keys_"],
time_stamps=self.params["population"].thisptr["time_stamps_"],
categorical=self.params["population"].thisptr["categorical_"],
discrete=self.params["population"].thisptr["discrete_"],
numerical=self.params["population"].thisptr["numerical_"],
targets=targets,
units=self.params["units"]
)
population_data_frame.send(
data_frame=population_table,
sock=s
)
return population_data_frame
# -------------------------------------------------------------------------
def __make_hyperparameters(self):
hyperparameters = dict()
for key, value in self.params.items():
if key == "population" or key == "peripheral":
continue
hyperparameters[key + "_"] = value
return hyperparameters
# -------------------------------------------------------------------------
def __make_random_name(self):
return "temp-" + ''.join(
random.choice(string.ascii_letters) for i in range(15)
)
# -------------------------------------------------------------------------
def __print_time_taken(self, begin, end, msg):
seconds = end - begin
hours = int(seconds / 3600)
seconds -= float(hours * 3600)
minutes = int(seconds / 60)
seconds -= float(minutes * 60)
seconds = round(seconds, 6)
print(
msg + str(hours) + "h:" +
str(minutes) + "m:" + str(seconds)
)
print("")
# -------------------------------------------------------------------------
def __repr__(self):
return self.get_params().__repr__()
# -------------------------------------------------------------------------
def __save(self):
"""
Saves the model as a JSON file.
"""
# -------------------------------------------
# Send JSON command to getML engine
cmd = dict()
cmd["type_"] = "RelboostModel.save"
cmd["name_"] = self.name
comm.send(cmd)
# -------------------------------------------------------------------------
def __score(self, yhat, y):
"""
Returns the score for a set of predictions.
**yhat**: Predictions.
**y**: Targets.
"""
# ----------------------------------------------------------------------
# Build the cmd string
cmd = dict()
cmd["type_"] = "RelboostModel.score"
cmd["name_"] = self.name
# ----------------------------------------------------------------------
# Establish connection with the getML engine and send command
s = comm.send_and_receive_socket(cmd)
msg = comm.recv_string(s)
if msg != "Found!":
raise Exception(msg)
# ----------------------------------------------------------------------
# Send data
comm.send_matrix(s, yhat)
comm.send_matrix(s, y)
msg = comm.recv_string(s)
# ----------------------------------------------------------------------
# Ensure success, receive scores
if msg != "Success!":
raise Exception(msg)
scores = comm.recv_string(s)
s.close()
# ----------------------------------------------------------------------
return json.loads(scores)
# -------------------------------------------------------------------------
def __transform(
self,
peripheral_data_frames,
population_data_frame,
s,
score=False,
predict=False,
table_name=""
):
# -----------------------------------------------------
# Prepare the command for the getML engine
cmd = dict()
cmd["type_"] = "RelboostModel.transform"
cmd["name_"] = self.name
cmd["score_"] = score
cmd["predict_"] = predict
cmd["peripheral_names_"] = [df.name for df in peripheral_data_frames]
cmd["population_name_"] = population_data_frame.name
cmd["table_name_"] = table_name
comm.send_string(s, json.dumps(cmd))
# -----------------------------------------------------
# Do the actual transformation
msg = comm.recv_string(s)
if msg == "Success!":
if table_name == "":
yhat = comm.recv_matrix(s)
else:
yhat = None
else:
raise Exception(msg)
# -----------------------------------------------------
return yhat
# -------------------------------------------------------------------------
def copy(self, other):
"""
Copies the parameters and hyperparameters from another model.
Args:
other (:class:`getml.models.RelboostModel`): The other model.
"""
# -------------------------------------------
# Send JSON command to getML engine
cmd = dict()
cmd["type_"] = "RelboostModel.copy"
cmd["name_"] = self.name
cmd["other_"] = other.name
comm.send(cmd)
# -------------------------------------------
self.refresh()
# -------------------------------------------------------------------------
def delete(self, mem_only=False):
"""
Deletes the model from the engine.
Args:
mem_only (bool): If True, then the data frame will be deleted from
memory only, but not from disk. Default: False.
"""
# -------------------------------------------
# Send JSON command to getML engine
cmd = dict()
cmd["type_"] = "RelboostModel.delete"
cmd["name_"] = self.name
cmd["mem_only_"] = mem_only
comm.send(cmd)
# -------------------------------------------------------------------------
def fit(
self,
population_table,
peripheral_tables
):
"""
Fits the model.
Args:
population_table (:class:`pandas.DataFrame` or :class:`~getml.engine.DataFrame`):
Population table containing the target.
peripheral_tables (List[:class:`pandas.DataFrame` or :class:`~getml.engine.DataFrame`]):
Peripheral tables.
The peripheral tables have to be passed in the exact same order as their
corresponding placeholders!
"""
# -----------------------------------------------------
# Prepare the command for the getML engine
cmd = dict()
cmd["type_"] = "RelboostModel.fit"
cmd["name_"] = self.name
# -----------------------------------------------------
# Send command to engine and make sure that model has
# been found
s = comm.send_and_receive_socket(cmd)
msg = comm.recv_string(s)
if msg != "Found!":
raise Exception(msg)
# ----------------------------------------------------------------------
# Load peripheral tables
peripheral_tables = peripheral_tables or self.params['peripheral_tables']
peripheral_data_frames = self.__load_peripheral_tables(
peripheral_tables,
s
)
# ----------------------------------------------------------------------
# Load population table
targets = self.params['population'].thisptr["targets_"]
population_data_frame = self.__load_population_table(
population_table,
targets,
s
)
# ----------------------------------------------------------------------
# Call the __fit(...) method, which does the actual fitting.
self.__fit(peripheral_data_frames, population_data_frame, s)
# ----------------------------------------------------------------------
self.__close(s)
s.close()
self.__save()
return self.refresh()
# -------------------------------------------------------------------------
def get_params(self):
"""
Returns the hyperparameters of the model.
"""
return self.params
# -------------------------------------------------------------------------
def get_predictor(self):
"""
Returns the predictor of the model.
"""
return self.params["predictor"]
# -------------------------------------------------------------------------
def load(self):
"""
Loads the model from a JSON file.
"""
# -------------------------------------------
# Send JSON command to getML engine
cmd = dict()
cmd["type_"] = "RelboostModel.load"
cmd["name_"] = self.name
comm.send(cmd)
# -------------------------------------------
return self.refresh()
# -------------------------------------------------------------------------
def predict(
self,
population_table,
peripheral_tables=None,
table_name=""
):
"""
Returns the predictions generated by the model or writes them into a data base.
Requires that you have passed a predictor.
Args:
population_table (:class:`pandas.DataFrame` or :class:`~getml.engine.DataFrame`):
Population table. Targets will be ignored
peripheral_tables (List[:class:`pandas.DataFrame` or :class:`~getml.engine.DataFrame`]):
Peripheral tables.
The peripheral tables have to be passed in the exact same order as their
corresponding placeholders!
table_name (str): If not an empty string, the resulting features
will be written into the data base, instead of returning them.
"""
# -----------------------------------------------------
# Prepare the command for the getML engine
cmd = dict()
cmd["type_"] = "RelboostModel.transform"
cmd["name_"] = self.name
# -----------------------------------------------------
# Send command to engine and make sure that model has
# been found
s = comm.send_and_receive_socket(cmd)
msg = comm.recv_string(s)
if msg != "Found!":
raise Exception(msg)
# ----------------------------------------------------------------------
# Load peripheral tables
peripheral_tables = peripheral_tables or self.params['peripheral_tables']
peripheral_data_frames = self.__load_peripheral_tables(
peripheral_tables,
s
)
# ----------------------------------------------------------------------
# Load population table
if type(population_table) == engine.DataFrame:
targets = []
else:
targets = [
elem for elem in self.params['population'].thisptr["targets_"]
if elem in population_table.columns
]
population_data_frame = self.__load_population_table(
population_table,
targets,
s
)
# ----------------------------------------------------------------------
# Get predictions as numpy array
yhat = self.__transform(
peripheral_data_frames,
population_data_frame,
s,
predict=True,
score=False,
table_name=table_name
)
# ----------------------------------------------------------------------
# Close connection.
self.__close(s)
s.close()
# ----------------------------------------------------------------------
return yhat
# -------------------------------------------------------------------------
def refresh(self):
"""
Refreshes the hyperparameters and placeholders in Python based on a
model already loaded in the engine.
"""
# -------------------------------------------
# Send JSON command to getml engine
cmd = dict()
cmd["type_"] = "RelboostModel.refresh"
cmd["name_"] = self.name
s = comm.send_and_receive_socket(cmd)
# -------------------------------------------
# Make sure everything went well and close
# connection
msg = comm.recv_string(s)
if msg[0] != '{':
raise Exception(msg)
s.close()
# -------------------------------------------
# Parse results.
json_obj = json.loads(msg)
self.set_params(json_obj["hyperparameters_"])
self.params = _parse_placeholders(
json_obj,
self.params
)
return self
# -------------------------------------------------------------------------
def score(
self,
population_table,
peripheral_tables=None
):
"""
Calculates scores for the model.
Args:
population_table (:class:`pandas.DataFrame` or :class:`~getml.engine.DataFrame`):
Population table. Targets will be ignored
peripheral_tables (List[:class:`pandas.DataFrame` or :class:`~getml.engine.DataFrame`]):
Peripheral tables.
The peripheral tables have to be passed in the exact same order as their
corresponding placeholders!
"""
# -----------------------------------------------------
# Prepare the command for the getML engine
cmd = dict()
cmd["type_"] = "RelboostModel.transform"
cmd["name_"] = self.name
# -----------------------------------------------------
# Send command to engine and make sure that model has
# been found
s = comm.send_and_receive_socket(cmd)
msg = comm.recv_string(s)
if msg != "Found!":
raise Exception(msg)
# ----------------------------------------------------------------------
# Load peripheral tables
peripheral_tables = peripheral_tables or self.params['peripheral_tables']
peripheral_data_frames = self.__load_peripheral_tables(
peripheral_tables,
s
)
# ----------------------------------------------------------------------
# Load population table
if type(population_table) == engine.DataFrame:
targets = []
else:
targets = [
elem for elem in self.params['population'].thisptr["targets_"]
if elem in population_table.columns
]
population_data_frame = self.__load_population_table(
population_table,
targets,
s
)
# ----------------------------------------------------------------------
# Get predictions as numpy array
yhat = self.__transform(
peripheral_data_frames,
population_data_frame,
s,
predict=True,
score=True)
# ----------------------------------------------------------------------
# Get targets
colname = population_data_frame.target_names[self.params["target_num"]]
y = population_data_frame.target(colname).get(s).ravel()
# ----------------------------------------------------------------------
# Close connection.
self.__close(s)
s.close()
# ----------------------------------------------------------------------
# Do the actual scoring.
scores = self.__score(yhat, y)
# ----------------------------------------------------------------------
self.__save()
return scores
# -------------------------------------------------------------------------
def send(self):
"""
Send this RelboostModel to the getml engine.
"""
# -------------------------------------------
# Send own JSON command to getML engine
if self.params["population"] is None:
raise Exception("Population cannot be None!")
if self.params["peripheral"] is None:
raise Exception("Peripheral cannot be None!")
cmd = dict()
cmd["name_"] = self.name
cmd["type_"] = "RelboostModel"
cmd["population_"] = self.params["population"].thisptr
cmd["peripheral_"] = [per.thisptr["name_"]
for per in self.params["peripheral"]]
cmd["hyperparameters_"] = self.__make_hyperparameters()
comm.send(cmd)
# -------------------------------------------
return self
# -------------------------------------------------------------------------
def set_params(self, params=None, **kwargs):
"""
Sets the hyperparameters of the model.
Args:
params (dict): Hyperparameters that were returned by :func:`~getml.models.RelboostModel.get_params`.
"""
if params is not None:
items = params.items()
else:
items = kwargs.items()
valid_params = self.get_params()
for key, value in items:
if key[-1] == "_":
key = key[:-1]
if key not in valid_params:
raise ValueError(
'Invalid parameter %s. ' %
(key)
)
if key == "loss_function":
try:
self.params[key] = value.thisptr["type_"]
except:
self.params[key] = value
elif key == "predictor":
self.predictor = value
try:
self.params[key] = value._getml_thisptr()
except:
self.params[key] = None
elif key == "feature_selector":
self.feature_selector = value
try:
self.params[key] = value._getml_thisptr()
except:
self.params[key] = None
else:
self.params[key] = value
return self
# -------------------------------------------------------------------------
def set_peripheral_tables(self, peripheral_tables):
"""
Sets the peripheral tables.
This is very useful for establishing a pipeline.
Args:
peripheral_tables (List[:class:`pandas.DataFrame` or :class:`~getml.engine.DataFrame`]):
Peripheral tables.
The peripheral tables have to be passed in the exact same order as their
corresponding placeholders!
"""
self.params['peripheral_tables'] = peripheral_tables
# -------------------------------------------------------------------------
def to_sql(self):
"""
Extracts the SQL statements underlying the trained model.
"""
# ------------------------------------------------------
# Build and send JSON command
cmd = dict()
cmd["type_"] = "RelboostModel.to_sql"
cmd["name_"] = self.name
s = comm.send_and_receive_socket(cmd)
# ------------------------------------------------------
# Make sure model exists on getML engine
msg = comm.recv_string(s)
if msg != "Found!":
raise Exception(msg)
# ------------------------------------------------------
# Receive SQL code from getML engine
sql = comm.recv_string(s)
# ------------------------------------------------------
s.close()
return sql
# -------------------------------------------------------------------------
def transform(
self,
population_table,
peripheral_tables=None,
table_name=""
):
"""
Returns the features learned by the model or writes them into a data base.
Args:
population_table (:class:`pandas.DataFrame` or :class:`~getml.engine.DataFrame`):
Population table. Targets will be ignored.
peripheral_tables (List[:class:`pandas.DataFrame` or :class:`~getml.engine.DataFrame`]):
Peripheral tables.
The peripheral tables have to be passed in the exact same order as their
corresponding placeholders!
table_name (str): If not an empty string, the resulting features
will be written into the data base, instead of returning them.
"""
# -----------------------------------------------------
# Prepare the command for the getML engine
cmd = dict()
cmd["type_"] = "RelboostModel.transform"
cmd["name_"] = self.name
# -----------------------------------------------------
# Send command to engine and make sure that model has
# been found
s = comm.send_and_receive_socket(cmd)
msg = comm.recv_string(s)
if msg != "Found!":
raise Exception(msg)
# ----------------------------------------------------------------------
# Load peripheral tables
peripheral_tables = peripheral_tables or self.params['peripheral_tables']
peripheral_data_frames = self.__load_peripheral_tables(
peripheral_tables,
s
)
# ----------------------------------------------------------------------
# Load population table
if type(population_table) == engine.DataFrame:
targets = []
else:
targets = [
elem for elem in self.params['population'].thisptr["targets_"]
if elem in population_table.columns
]
population_data_frame = self.__load_population_table(
population_table,
targets,
s
)
# ----------------------------------------------------------------------
# Call the predict function to get features as numpy array
y_hat = self.__transform(
peripheral_data_frames,
population_data_frame,
s,
table_name=table_name
)
self.__close(s)
s.close()
return y_hat
# ------------------------------------------------------------------------------
|
#coding=utf-8
# coding=utf-8
'''
Created on 2014-2-17
@author: ETHAN
'''
from doraemon.home.models import DicType,DicData
class DAL_DictValue(object):
'''
data access for dictionary table
'''
@staticmethod
def getdatavaluebytype(datatypename):
dicType=DicType.objects.all().filter(DicTypeName=datatypename).first()
if dicType:
return DicData.objects.all().filter(DicType_id=dicType.id)
else:
return None
@staticmethod
def getdatavaluebyid(dataid):
dicdata=DicData.objects.get(id=dataid)
return dicdata
@staticmethod
def getdatavaluebydataname(datatypename,dataname):
dicType=DicType.objects.all().filter(DicTypeName=datatypename).first()
if dicType:
dicDataList=DicData.objects.all().filter(DicType_id=dicType.id)
for dicdata in dicDataList:
if dicdata.DicDataName==dataname:
result=dicdata
return result
@staticmethod
def get_dataname_by_datavalue(datatypename,datavalue):
dicType=DicType.objects.all().filter(DicTypeName=datatypename).first()
if dicType:
dicDataList=DicData.objects.all().filter(DicType_id=dicType.id)
for dicdata in dicDataList:
if dicdata.DicDataValue==datavalue:
result=dicdata
return result
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
LONG_DESCRIPTION = "Package to handle scientific experiments easily"
setup(name='uib_experiments',
version='1.0.0',
description='Handle the experiments.',
long_description=LONG_DESCRIPTION,
url='https://github.com/miquelmn/uib_experiments',
author='Miquel Miró Nicolau, Dr. Gabriel Moyà Alcover',
author_email='miquel.miro@uib.cat, gabriel_moya@uib.es',
download_url = "https://github.com/miquelmn/uib_experiments/archive/refs/tags/v0.9.2.tar.gz",
packages=find_packages(),
install_requires=[
'telegram_send',
'opencv-python',
'matplotlib',
'scipy',
'numpy'
],
zip_safe=False)
|
# apis_v1/documentation_source/friend_invitation_by_email_send_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def friend_invitation_by_email_send_doc_template_values(url_root):
"""
Show documentation about friendInvitationByEmailSend
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'email_address_array',
'value': 'string', # boolean, integer, long, string
'description': 'Array of Email address for friends to send the invitation to.',
},
{
'name': 'first_name_array',
'value': 'string', # boolean, integer, long, string
'description': 'Array of First Name for friends to send the invitation to.',
},
{
'name': 'last_name_array',
'value': 'string', # boolean, integer, long, string
'description': 'Array of Last Name for friends to send the invitation to.',
},
{
'name': 'email_addresses_raw',
'value': 'string', # boolean, integer, long, string
'description': 'Email addresses to send the invitation to.',
},
]
optional_query_parameter_list = [
{
'name': 'invitation_message',
'value': 'string', # boolean, integer, long, string
'description': 'An optional message to send.',
},
{
'name': 'sender_email_address',
'value': 'string', # boolean, integer, long, string
'description': 'The email address to use if an email is not attached to voter account.',
},
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
]
try_now_link_variables_dict = {
# 'organization_we_vote_id': 'wv85org1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "description_of_send_status": string,\n' \
'}'
template_values = {
'api_name': 'friendInvitationByEmailSend',
'api_slug': 'friendInvitationByEmailSend',
'api_introduction':
"Invite your friends via email.",
'try_now_link': 'apis_v1:friendInvitationByEmailSendView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
# Copyright 2014 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from barbicanclient import client as barbicanclient
from ceilometerclient.v2 import client as ceilometerclient
from cinderclient.v2 import client as cinderclient
from glanceclient.v2 import client as glanceclient
from heatclient.v1 import client as heatclient
from ironic_inspector_client import v1 as ironic_inspector_client
from ironicclient.v1 import client as ironicclient
from keystoneclient.auth import identity
from keystoneclient import httpclient
from keystoneclient.v3 import client as keystoneclient
from mistralclient.api.v2 import client as mistralclient
from neutronclient.v2_0 import client as neutronclient
from novaclient import client as novaclient
from oslo_config import cfg
from oslo_log import log
from swiftclient import client as swift_client
from troveclient import client as troveclient
from zaqarclient.queues.v2 import client as zaqarclient
from mistral.actions.openstack import base
from mistral import context
from mistral.utils import inspect_utils
from mistral.utils.openstack import keystone as keystone_utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class NovaAction(base.OpenStackAction):
def _get_client(self):
ctx = context.ctx()
LOG.debug("Nova action security context: %s" % ctx)
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
nova_endpoint = keystone_utils.get_endpoint_for_project('nova')
client = novaclient.Client(
2,
username=None,
api_key=None,
endpoint_type='publicURL',
service_type='compute',
auth_token=ctx.auth_token,
tenant_id=ctx.project_id,
region_name=keystone_endpoint.region,
auth_url=keystone_endpoint.url
)
client.client.management_url = keystone_utils.format_url(
nova_endpoint.url,
{'tenant_id': ctx.project_id}
)
return client
@classmethod
def _get_fake_client(cls):
return novaclient.Client(2)
class GlanceAction(base.OpenStackAction):
_client_class = glanceclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Glance action security context: %s" % ctx)
glance_endpoint = keystone_utils.get_endpoint_for_project('glance')
return self._client_class(
glance_endpoint.url,
region_name=glance_endpoint.region,
token=ctx.auth_token
)
@classmethod
def _get_fake_client(cls):
return cls._client_class("")
class KeystoneAction(base.OpenStackAction):
_client_class = keystoneclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Keystone action security context: %s" % ctx)
kwargs = {
'token': ctx.auth_token,
'auth_url': CONF.keystone_authtoken.auth_uri,
'project_id': ctx.project_id,
'cacert': CONF.keystone_authtoken.cafile,
}
# In case of trust-scoped token explicitly pass endpoint parameter.
if (ctx.is_trust_scoped
or keystone_utils.is_token_trust_scoped(ctx.auth_token)):
kwargs['endpoint'] = CONF.keystone_authtoken.auth_uri
client = self._client_class(**kwargs)
client.management_url = CONF.keystone_authtoken.auth_uri
return client
@classmethod
def _get_fake_client(cls):
# Here we need to replace httpclient authenticate method temporarily
authenticate = httpclient.HTTPClient.authenticate
httpclient.HTTPClient.authenticate = lambda x: True
fake_client = cls._client_class()
# Once we get fake client, return back authenticate method
httpclient.HTTPClient.authenticate = authenticate
return fake_client
class CeilometerAction(base.OpenStackAction):
_client_class = ceilometerclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Ceilometer action security context: %s" % ctx)
ceilometer_endpoint = keystone_utils.get_endpoint_for_project(
'ceilometer'
)
endpoint_url = keystone_utils.format_url(
ceilometer_endpoint.url,
{'tenant_id': ctx.project_id}
)
return self._client_class(
endpoint_url,
region_name=ceilometer_endpoint.region,
token=ctx.auth_token,
username=ctx.user_name
)
@classmethod
def _get_fake_client(cls):
return cls._client_class("")
class HeatAction(base.OpenStackAction):
_client_class = heatclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Heat action security context: %s" % ctx)
heat_endpoint = keystone_utils.get_endpoint_for_project('heat')
endpoint_url = keystone_utils.format_url(
heat_endpoint.url,
{'tenant_id': ctx.project_id}
)
return self._client_class(
endpoint_url,
region_name=heat_endpoint.region,
token=ctx.auth_token,
username=ctx.user_name
)
@classmethod
def _get_fake_client(cls):
return cls._client_class("")
class NeutronAction(base.OpenStackAction):
_client_class = neutronclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Neutron action security context: %s" % ctx)
neutron_endpoint = keystone_utils.get_endpoint_for_project('neutron')
return self._client_class(
endpoint_url=neutron_endpoint.url,
region_name=neutron_endpoint.region,
token=ctx.auth_token,
auth_url=CONF.keystone_authtoken.auth_uri
)
class CinderAction(base.OpenStackAction):
_client_class = cinderclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Cinder action security context: %s" % ctx)
cinder_endpoint = keystone_utils.get_endpoint_for_project(
service_type='volumev2'
)
cinder_url = keystone_utils.format_url(
cinder_endpoint.url,
{'tenant_id': ctx.project_id}
)
client = self._client_class(
ctx.user_name,
ctx.auth_token,
project_id=ctx.project_id,
auth_url=cinder_url,
region_name=cinder_endpoint.region
)
client.client.auth_token = ctx.auth_token
client.client.management_url = cinder_url
return client
@classmethod
def _get_fake_client(cls):
return cls._client_class()
class MistralAction(base.OpenStackAction):
_client_class = mistralclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Mistral action security context: %s" % ctx)
# Check for trust scope token. This may occur if the action is
# called from a workflow triggered by a Mistral cron trigger.
if ctx.is_trust_scoped:
auth_url = None
mistral_endpoint = keystone_utils.get_endpoint_for_project(
'mistral'
)
mistral_url = mistral_endpoint.url
else:
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
auth_url = keystone_endpoint.url
mistral_url = None
return self._client_class(
mistral_url=mistral_url,
auth_token=ctx.auth_token,
project_id=ctx.project_id,
user_id=ctx.user_id,
auth_url=auth_url
)
@classmethod
def _get_fake_client(cls):
return cls._client_class()
class TroveAction(base.OpenStackAction):
_client_class = troveclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Trove action security context: %s" % ctx)
trove_endpoint = keystone_utils.get_endpoint_for_project(
service_type='database'
)
trove_url = keystone_utils.format_url(
trove_endpoint.url,
{'tenant_id': ctx.project_id}
)
client = self._client_class(
ctx.user_name,
ctx.auth_token,
project_id=ctx.project_id,
auth_url=trove_url,
region_name=trove_endpoint.region
)
client.client.auth_token = ctx.auth_token
client.client.management_url = trove_url
return client
@classmethod
def _get_fake_client(cls):
return cls._client_class()
class IronicAction(base.OpenStackAction):
_client_class = ironicclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Ironic action security context: %s" % ctx)
ironic_endpoint = keystone_utils.get_endpoint_for_project('ironic')
return self._client_class(
ironic_endpoint.url,
token=ctx.auth_token,
region_name=ironic_endpoint.region
)
@classmethod
def _get_fake_client(cls):
return cls._client_class("http://127.0.0.1:6385/")
class BaremetalIntrospectionAction(base.OpenStackAction):
_client_class = ironic_inspector_client.ClientV1
def _get_client(self):
ctx = context.ctx()
LOG.debug("Baremetal introspection action security context: %s" % ctx)
inspector_endpoint = keystone_utils.get_endpoint_for_project(
service_type='baremetal-introspection'
)
return self._client_class(
api_version=1,
inspector_url=inspector_endpoint.url,
auth_token=ctx.auth_token,
)
class SwiftAction(base.OpenStackAction):
_client_class = swift_client.Connection
def _get_client(self):
ctx = context.ctx()
LOG.debug("Swift action security context: %s" % ctx)
swift_endpoint = keystone_utils.get_endpoint_for_project('swift')
kwargs = {
'preauthurl': swift_endpoint.url % {'tenant_id': ctx.project_id},
'preauthtoken': ctx.auth_token
}
return self._client_class(**kwargs)
class ZaqarAction(base.OpenStackAction):
_client_class = zaqarclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Zaqar action security context: %s" % ctx)
zaqar_endpoint = keystone_utils.get_endpoint_for_project(
service_type='messaging')
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
opts = {
'os_auth_token': ctx.auth_token,
'os_auth_url': keystone_endpoint.url,
'os_project_id': ctx.project_id,
}
auth_opts = {'backend': 'keystone', 'options': opts}
conf = {'auth_opts': auth_opts}
return self._client_class(zaqar_endpoint.url, conf=conf)
@classmethod
def _get_fake_client(cls):
return cls._client_class("")
@classmethod
def _get_client_method(cls, client):
method = getattr(cls, cls.client_method_name)
# We can't use partial as it's not supported by getargspec
@functools.wraps(method)
def wrap(*args, **kwargs):
return method(client, *args, **kwargs)
args = inspect_utils.get_arg_list_as_str(method)
# Remove client
wrap.__arguments__ = args.split(', ', 1)[1]
return wrap
@staticmethod
def queue_messages(client, queue_name, **params):
"""Gets a list of messages from the queue.
:param queue_name: Name of the target queue.
:type queue_name: `six.string_type`
:param params: Filters to use for getting messages.
:type params: **kwargs dict
:returns: List of messages.
:rtype: `list`
"""
queue = client.queue(queue_name)
return queue.messages(**params)
@staticmethod
def queue_post(client, queue_name, messages):
"""Posts one or more messages to a queue.
:param queue_name: Name of the target queue.
:type queue_name: `six.string_type`
:param messages: One or more messages to post.
:type messages: `list` or `dict`
:returns: A dict with the result of this operation.
:rtype: `dict`
"""
queue = client.queue(queue_name)
return queue.post(messages)
@staticmethod
def queue_pop(client, queue_name, count=1):
"""Pop `count` messages from the queue.
:param queue_name: Name of the target queue.
:type queue_name: `six.string_type`
:param count: Number of messages to pop.
:type count: int
:returns: List of messages.
:rtype: `list`
"""
queue = client.queue(queue_name)
return queue.pop(count)
class BarbicanAction(base.OpenStackAction):
_client_class = barbicanclient.Client
def _get_client(self):
ctx = context.ctx()
LOG.debug("Barbican action security context: %s" % ctx)
barbican_endpoint = keystone_utils.get_endpoint_for_project('barbican')
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
auth = identity.v2.Token(
auth_url=keystone_endpoint.url,
tenant_name=ctx.user_name,
token=ctx.auth_token,
tenant_id=ctx.project_id
)
return self._client_class(
project_id=ctx.project_id,
endpoint=barbican_endpoint.url,
auth=auth
)
@classmethod
def _get_fake_client(cls):
return cls._client_class(
project_id="1",
endpoint="http://127.0.0.1:9311"
)
@classmethod
def _get_client_method(cls, client):
if cls.client_method_name != "secrets_store":
return super(BarbicanAction, cls)._get_client_method(client)
method = getattr(cls, cls.client_method_name)
@functools.wraps(method)
def wrap(*args, **kwargs):
return method(client, *args, **kwargs)
args = inspect_utils.get_arg_list_as_str(method)
# Remove client.
wrap.__arguments__ = args.split(', ', 1)[1]
return wrap
@staticmethod
def secrets_store(client,
name=None,
payload=None,
algorithm=None,
bit_length=None,
secret_type=None,
mode=None, expiration=None):
"""Create and Store a secret in Barbican.
:param name: A friendly name for the Secret
:type name: string
:param payload: The unencrypted secret data
:type payload: string
:param algorithm: The algorithm associated with this secret key
:type algorithm: string
:param bit_length: The bit length of this secret key
:type bit_length: int
:param secret_type: The secret type for this secret key
:type secret_type: string
:param mode: The algorithm mode used with this secret keybit_length:
:type mode: string
:param expiration: The expiration time of the secret in ISO 8601 format
:type expiration: string
:returns: A new Secret object
:rtype: class:`barbicanclient.secrets.Secret'
"""
entity = client.secrets.create(
name,
payload,
algorithm,
bit_length,
secret_type,
mode,
expiration
)
entity.store()
return entity._get_formatted_entity()
|
# Django settings for testsite project.
from os import path
import sys
import socket
hostname = socket.gethostname()
# Try and import pycairo or fallback to cairocffi and install as cairo
try:
import cairo
except ImportError:
import cairocffi
cairocffi.install_as_pycairo()
from django.core.urlresolvers import reverse_lazy
PROJECT_ROOT = path.dirname(path.dirname(__file__))
LOGIN_REDIRECT_URL = '/'
EMAIL_HOST = 'localhost'
# debug on dev machines
if hostname == 'vennzaa1.miniserver.com':
DEBUG = False
else:
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Matt Venn', 'matt@mattvenn.net'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cursivedata',
'USER': 'cursivedata',
'HOST': 'localhost',
'PASSWORD': 'cursivedata',
},
'sqllite': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path.join(PROJECT_ROOT, 'db', 'www.sqlite'),
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Greenwich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
import warnings
warnings.filterwarnings(
'error', r"DateTimeField received a naive datetime",
RuntimeWarning, r'django\.db\.models\.fields')
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/home/polarsite/polargraphenergymonitor/testsite/media/admin/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/media/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'i@))&55xb)_981^88xtxtd6bds+bn_be&3w)uttk*+w*fs+%7v'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages"
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'www.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'www.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
path.join(PROJECT_ROOT, 'www', 'templates'),
"cursivedata/templates"
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party libraries
'tastypie',
'django_nose',
'south',
# Our apps
'landing',
'cursivedata',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
if DEBUG:
default_logger = {
'handlers': ['console','file'],
'level': 'DEBUG',
}
else:
default_logger = {
'handlers': ['file'],
'level': 'INFO',
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s] [%(levelname)s] %(process)d %(module)s : %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'log/info.log',
'formatter': 'verbose',
},
},
'loggers': {
'endpoint': default_logger,
'api': default_logger,
'graphics': default_logger,
'data': default_logger,
'generator': default_logger,
'views': default_logger,
'pipeline': default_logger,
},
}
LOGIN_URL = reverse_lazy('login')
LOGOUT_URL = reverse_lazy('logout')
|
from liquer import *
import pandas as pd
import io
from urllib.request import urlopen
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
import numpy as np
@command
def MPL(state, command, *series):
"""Matplotlib chart
"""
fig = plt.figure(figsize=(8, 6), dpi=300)
axis = fig.add_subplot(1, 1, 1)
series = list(reversed(list(series)))
extension="png"
df = state.df()
while len(series):
t = series.pop()
if t in ["jpg","png","svg"]:
extension = t
continue
elif t == "xy":
xcol = state.expand_column(series.pop())
ycol = state.expand_column(series.pop())
state.log_info(f"Chart XY ({xcol} {ycol})")
axis.plot(df[xcol],df[ycol],label=state.column_label(ycol))
continue
elif t == "xye":
xcol = state.expand_column(series.pop())
ycol = state.expand_column(series.pop())
ecol = state.expand_column(series.pop())
state.log_info(f"Chart XY ({xcol} {ycol}) Error:{ecol}")
axis.errorbar(df[xcol],df[ycol],yerr=df[ecol],label=state.column_label(ycol))
continue
elif t == "xyee":
xcol = state.expand_column(series.pop())
ycol = state.expand_column(series.pop())
e1col = state.expand_column(series.pop())
e2col = state.expand_column(series.pop())
state.log_info(f"Chart XY ({xcol} {ycol}) Error:({e1col},{e2col})")
axis.errorbar(df[xcol],df[ycol],yerr=[df[e1col],df[e2col]],label=state.column_label(ycol))
continue
elif t == "cxy":
c = series.pop()
xcol = state.expand_column(series.pop())
ycol = state.expand_column(series.pop())
axis.plot(df[xcol],df[ycol],c,label=state.column_label(ycol))
continue
else:
state.log_warning(f"Unrecognized MPL parameter {t}")
fig.legend()
output = io.BytesIO()
fig.savefig(output, dpi=300, format=extension)
state.data = output.getvalue()
state.extension=extension
return state
@df_source
def TestData():
"""Test data
"""
x=np.linspace(-5,5,100)
return pd.DataFrame(dict(x=x,y=np.sin(x),y1=0.1*np.sin(x),y2=0.2*np.sin(x+0.1)))
|
#Author-Hyunyoung Kim
#Description-MorpheesPlug script for Fusion 360
#Basic unit: cm, rad
import adsk.core, adsk.fusion, adsk.cam, traceback, math
_app = adsk.core.Application.cast(None)
_ui = adsk.core.UserInterface.cast(None)
_handlers = []
_inputs = adsk.core.CommandInputs.cast(None)
def run(context):
try:
global _app, _ui
_app = adsk.core.Application.get()
_ui = _app.userInterface
# Create the command definition.
cmdDef = _ui.commandDefinitions.itemById('morpheesPlug')
if not cmdDef:
cmdDef = _ui.commandDefinitions.addButtonDefinition('morpheesPlug','Morphees Plug','Create Morphees Plug shape-changing widgets','Resources')
# Connect to the command created event.
onCommandCreated = MyCommandCreatedHandler()
cmdDef.commandCreated.add(onCommandCreated)
_handlers.append(onCommandCreated)
# # Add the button the ADD-INS panel.
# addInsPanel = _ui.allToolbarPanels.itemById('SolidScriptsAddinsPanel')
# addInsPanel.controls.addCommand(cmdDef)
# Execute the command definition.
cmdDef.execute() # Remove for Add-in
# Prevent this module from being terminated when the script returns, because we are waiting for event handlers to fire.
adsk.autoTerminate(False) # Remove for Add-in
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Event handler for the commandCreated event.
class MyCommandCreatedHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
global _inputs
# Get the command that was created.
cmd = adsk.core.Command.cast(args.command)
# # Connect to the command destroyed event. # Remove for Add-in
onDestroy = MyCommandDestroyHandler()
cmd.destroy.add(onDestroy)
_handlers.append(onDestroy)
# Connect to the input changed event.
onInputChanged = MyCommandInputChangedHandler()
cmd.inputChanged.add(onInputChanged)
_handlers.append(onInputChanged)
# Connect to command excute handler.
onExecute = MyExecuteHandler()
cmd.execute.add(onExecute)
_handlers.append(onExecute)
# Connect to a preview handler.
onExecutePreview = MyExecutePreviewHandler()
cmd.executePreview.add(onExecutePreview)
_handlers.append(onExecutePreview)
# Get the CommandInputs collection associated with the command.
_inputs = cmd.commandInputs
#inputs = cmd.commandInputs
# Connect to command inputs.
des = adsk.fusion.Design.cast(_app.activeProduct)
um = des.unitsManager # TODO. change the unit.
dropDownInput = _inputs.addDropDownCommandInput('dropDown', 'Widget', adsk.core.DropDownStyles.LabeledIconDropDownStyle)
dropDownInputItems = dropDownInput.listItems
dropDownInputItems.add('Fold', True, 'Resources/Widget_Fold')
dropDownInputItems.add('Spiral', False, 'Resources/Widget_Spiral')
dropDownInputItems.add('Teeth', False, 'Resources/Widget_Teeth')
dropDownInputItems.add('Bump', False, 'Resources/Widget_Bump')
dropDownInputItems.add('Accordion', False, 'Resources/Widget_Accordion')
dropDownInputItems.add('Auxetic', False, 'Resources/Widget_Auxetic')
_inputs.addGroupCommandInput('groupInputs', 'Set Parameters')
updateInputs(_inputs)
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def updateInputs(commandInputs):
inputs = adsk.core.CommandInputs.cast(commandInputs)
group = inputs.itemById('groupInputs')
groupInputs = group.children
# Remove all previous command inputs in the group
while groupInputs.count > 0:
groupInputs.item(0).deleteMe()
dropDownInput = inputs.itemById('dropDown')
dropDownItem = dropDownInput.selectedItem
if dropDownItem.name == 'Fold':
groupInputs.addImageCommandInput('imgFold', '', 'Resources/Widget_Image/Fold.png')
groupInputs.item(0).isFullWidth = True
groupInputs.addTextBoxCommandInput('textDesc', '', 'Fold widget will elongate along the arrow direction when inflated.', 3, True)
groupInputs.addIntegerSpinnerCommandInput('numFolds', '1. Number of Folds', 1, 10, 1, 1) # id, name, min, max, spinStep, initialValue
groupInputs.addValueInput('height', '2. Height', 'cm', adsk.core.ValueInput.createByReal(2))
groupInputs.addValueInput('length', '3. Length', 'cm', adsk.core.ValueInput.createByReal(4))
groupInputs.addValueInput('width', '4. Thickness', 'cm', adsk.core.ValueInput.createByReal(0.5))
groupInputs.addValueInput('gap', '5. Gap', 'cm', adsk.core.ValueInput.createByReal(0.5))
elif dropDownItem.name == 'Spiral':
groupInputs.addImageCommandInput('imgFold', '', 'Resources/Widget_Image/Spiral.png')
groupInputs.item(0).isFullWidth = True
groupInputs.addTextBoxCommandInput('textDesc', '', 'Spiral widget will unbend when inflated.', 3, True)
groupInputs.addIntegerSpinnerCommandInput('numTurns', '1. Number of Turns', 1, 10, 1, 1) # id, name, min, max, spinStep, initialValue
groupInputs.addValueInput('height', '2. Height', 'cm', adsk.core.ValueInput.createByReal(2))
groupInputs.addValueInput('offset', '3. Central Offset', 'cm', adsk.core.ValueInput.createByReal(1))
groupInputs.addValueInput('width', '4. Thickness', 'cm', adsk.core.ValueInput.createByReal(0.5))
groupInputs.addValueInput('distBtwTurns', '5. Gap', 'cm', adsk.core.ValueInput.createByReal(0.5))
elif dropDownItem.name == 'Teeth':
groupInputs.addImageCommandInput('imgFold', '', 'Resources/Widget_Image/Teeth.png')
groupInputs.item(0).isFullWidth = True
groupInputs.addTextBoxCommandInput('textDesc', '', 'Teeth widget will bend to the direction of the arrow when inflated.', 3, True)
groupInputs.addValueInput('width', '1. Length 1', 'cm', adsk.core.ValueInput.createByReal(10))
groupInputs.addValueInput('width2', '2. Length 2', 'cm', adsk.core.ValueInput.createByReal(3))
groupInputs.addValueInput('height', '3. Height', 'cm', adsk.core.ValueInput.createByReal(5))
groupInputs.addValueInput('depth', '4. Thickness 1', 'cm', adsk.core.ValueInput.createByReal(0.5))
groupInputs.addValueInput('thickness', '5. Thickness 2', 'cm', adsk.core.ValueInput.createByReal(0.5))
groupInputs.addValueInput('gap', '6. Gap', 'cm', adsk.core.ValueInput.createByReal(0.5))
groupInputs.addValueInput('angle', '7. Angle', 'deg', adsk.core.ValueInput.createByReal(0))
elif dropDownItem.name == 'Bump':
groupInputs.addImageCommandInput('imgFold', '', 'Resources/Widget_Image/Bump.png')
groupInputs.item(0).isFullWidth = True
groupInputs.addTextBoxCommandInput('textDesc', '', 'Bump widget will create bumps where the empty chambers are when inflated.', 3, True)
groupInputs.addIntegerSpinnerCommandInput('numWidth', '1. Chambers on X Axis', 1, 30, 1, 2)
groupInputs.addIntegerSpinnerCommandInput('numLength', '2. Chambers on Y Axis', 1, 30, 1, 2)
groupInputs.addValueInput('width', '3. Length on X Axis', 'cm', adsk.core.ValueInput.createByReal(1))
groupInputs.addValueInput('length', '4. Length on Y Axis', 'cm', adsk.core.ValueInput.createByReal(1))
elif dropDownItem.name == 'Accordion':
groupInputs.addImageCommandInput('imgFold', '', 'Resources/Widget_Image/Accordion.png')
groupInputs.item(0).isFullWidth = True
groupInputs.addTextBoxCommandInput('textDesc', '', 'Accordion widget will create a set of length-changing chambers. Each chamber can be inflated individually', 3, True)
groupInputs.addIntegerSpinnerCommandInput('height', '1. Number of Layers', 1, 10, 1, 1) # id, name, min, max, spinStep, initialValue
groupInputs.addIntegerSpinnerCommandInput('x_axis', '2. Chambers on X Axis', 1, 10, 1, 2) # id, name, min, max, spinStep, initialValue
groupInputs.addIntegerSpinnerCommandInput('y_axis', '3. Chambers on Y Axis', 1, 10, 1, 2) # id, name, min, max, spinStep, initialValue
groupInputs.addValueInput('width', '4. Length on X Axis', 'cm', adsk.core.ValueInput.createByReal(5))
groupInputs.addValueInput('depth', '5. Length on Y Axis', 'cm', adsk.core.ValueInput.createByReal(5))
elif dropDownItem.name == 'Auxetic':
groupInputs.addImageCommandInput('imgFold', '', 'Resources/Widget_Image/Accordion.png')
groupInputs.item(0).isFullWidth = True
groupInputs.addTextBoxCommandInput('textDesc', '', 'Auxetic widget will change width when inflated.', 3, True)
groupInputs.addValueInput('a', '1. Width', 'cm', adsk.core.ValueInput.createByReal(3.5))
groupInputs.addValueInput('b', '2. Inner Gap', 'mm', adsk.core.ValueInput.createByReal(.3))
groupInputs.addValueInput('c', '3. Outer Gap', 'mm', adsk.core.ValueInput.createByReal(.8))
groupInputs.addValueInput('height', '4. Height', 'cm', adsk.core.ValueInput.createByReal(2.5))
# Event handler that reacts to any changes the user makes to any of the command inputs.
class MyCommandInputChangedHandler(adsk.core.InputChangedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
# eventArgs = adsk.core.InputChangedEventArgs.cast(args)
# inputs = eventArgs.inputs
# cmdInput = eventArgs.input
command = args.firingEvent.sender
cmdInput = args.input
# onInputChange for slider controller
if cmdInput.id == 'dropDown':
#updateInputs(inputs)
updateInputs(args.inputs)
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Now I feel this handler is not necessary, because the preview handler does the job I need.
class MyExecuteHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.CommandEventArgs.cast(args)
dropDownInput = _inputs.itemById('dropDown')
dropDownItem = dropDownInput.selectedItem
#_ui.messageBox(dropDownItem.name)
if dropDownItem.name == 'Fold':
numFolds = _inputs.itemById('numFolds').value
width = _inputs.itemById('width').value
length = _inputs.itemById('length').value
height = _inputs.itemById('height').value
gap = _inputs.itemById('gap').value
modelFold(numFolds, width, length, height, gap)
elif dropDownItem.name == 'Spiral':
numTurns = _inputs.itemById('numTurns').value
distBtwTurns = _inputs.itemById('distBtwTurns').value
width = _inputs.itemById('width').value
height = _inputs.itemById('height').value
offset = _inputs.itemById('offset').value
modelSpiral(numTurns, distBtwTurns, width, height, offset)
elif dropDownItem.name == 'Teeth':
width = _inputs.itemById('width').value
width2 = _inputs.itemById('width2').value
depth = _inputs.itemById('depth').value
thickness = _inputs.itemById('thickness').value
gap = _inputs.itemById('gap').value
height = _inputs.itemById('height').value
angle = _inputs.itemById('angle').value
modelTeeth(width, width2, depth, height, angle, thickness, gap)
elif dropDownItem.name == 'Bump':
width = _inputs.itemById('width').value
length = _inputs.itemById('length').value
numWidth = _inputs.itemById('numWidth').value
numLength = _inputs.itemById('numLength').value
modelBump(width, length, numWidth, numLength)
elif dropDownItem.name == 'Accordion':
width = _inputs.itemById('width').value
depth = _inputs.itemById('depth').value
height = _inputs.itemById('height').value
x_axis = _inputs.itemById('x_axis').value
y_axis = _inputs.itemById('y_axis').value
modelAccordion(width, depth, height, x_axis, y_axis)
elif dropDownItem.name == 'Auxetic':
a = _inputs.itemById('a').value
b = _inputs.itemById('b').value
c = _inputs.itemById('c').value
height = _inputs.itemById('height').value
modelAuxetic(a, b, c, height)
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class MyExecutePreviewHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
eventArgs = adsk.core.CommandEventArgs.cast(args)
dropDownInput = _inputs.itemById('dropDown')
dropDownItem = dropDownInput.selectedItem
#_ui.messageBox(dropDownItem.name)
if dropDownItem.name == 'Fold':
numFolds = _inputs.itemById('numFolds').value
width = _inputs.itemById('width').value
length = _inputs.itemById('length').value
height = _inputs.itemById('height').value
gap = _inputs.itemById('gap').value
modelFold(numFolds, width, length, height, gap)
elif dropDownItem.name == 'Spiral':
numTurns = _inputs.itemById('numTurns').value
distBtwTurns = _inputs.itemById('distBtwTurns').value
width = _inputs.itemById('width').value
height = _inputs.itemById('height').value
offset = _inputs.itemById('offset').value
modelSpiral(numTurns, distBtwTurns, width, height, offset)
elif dropDownItem.name == 'Teeth':
width = _inputs.itemById('width').value
width2 = _inputs.itemById('width2').value
depth = _inputs.itemById('depth').value
thickness = _inputs.itemById('thickness').value
gap = _inputs.itemById('gap').value
height = _inputs.itemById('height').value
angle = _inputs.itemById('angle').value
modelTeeth(width, width2, depth, height, angle, thickness, gap)
elif dropDownItem.name == 'Bump':
width = _inputs.itemById('width').value
length = _inputs.itemById('length').value
numWidth = _inputs.itemById('numWidth').value
numLength = _inputs.itemById('numLength').value
modelBump(width, length, numWidth, numLength)
elif dropDownItem.name == 'Accordion':
width = _inputs.itemById('width').value
depth = _inputs.itemById('depth').value
height = _inputs.itemById('height').value
x_axis = _inputs.itemById('x_axis').value
y_axis = _inputs.itemById('y_axis').value
modelAccordion(width, depth, height, x_axis, y_axis)
elif dropDownItem.name == 'Auxetic':
a = _inputs.itemById('a').value
b = _inputs.itemById('b').value
c = _inputs.itemById('c').value
height = _inputs.itemById('height').value
modelAuxetic(a, b, c, height)
eventArgs.isValidResult = True
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Event handler that reacts to when the command is destroyed. This terminates the script.
# # Remove for Add-in
class MyCommandDestroyHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
# When the command is done, terminate the script
# This will release all globals which will remove all event handlers
adsk.terminate()
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Model a fold widget with the given parameters.
def modelFold(numFolds, width, length, height, gap):
try:
design = adsk.fusion.Design.cast(_app.activeProduct)
root = design.rootComponent
sketches = root.sketches
features = root.features
### 1. Draw sketch lines, path.
sketchPath = sketches.add(root.xYConstructionPlane)
lines = sketchPath.sketchCurves.sketchLines
lineCollection = adsk.core.ObjectCollection.create()
x1 = 0
y1 = 0
x2 = 0
y2 = length
line = lines.addByTwoPoints(adsk.core.Point3D.create(0, -width/2, 0), adsk.core.Point3D.create(0, 0, 0))
lineCollection.add(line)
for i in range(numFolds):
x1 = (gap + width) * 2 * i
y1 = 0
x2 = (gap + width) * 2 * i
y2 = length
line = lines.addByTwoPoints(adsk.core.Point3D.create(x1, y1, 0), adsk.core.Point3D.create(x2, y2, 0))
lineCollection.add(line)
x1 = x2
y1 = y2
x2 = x2 + (gap + width)
y2 = y2
line = lines.addByTwoPoints(adsk.core.Point3D.create(x1, y1, 0), adsk.core.Point3D.create(x2, y2, 0))
lineCollection.add(line)
x1 = x2
y1 = y2
x2 = x2
y2 = 0
line = lines.addByTwoPoints(adsk.core.Point3D.create(x1, y1, 0), adsk.core.Point3D.create(x2, y2, 0))
lineCollection.add(line)
x1 = x2
y1 = y2
x2 = x2 + (gap + width)
y2 = y2
line = lines.addByTwoPoints(adsk.core.Point3D.create(x1, y1, 0), adsk.core.Point3D.create(x2, y2, 0))
lineCollection.add(line)
x1 = x2
y1 = y2
y2 = length + width/2
line = lines.addByTwoPoints(adsk.core.Point3D.create(x1, y1, 0), adsk.core.Point3D.create(x2, y2, 0))
lineCollection.add(line)
chainedOption = adsk.fusion.ChainedCurveOptions.connectedChainedCurves
path = adsk.fusion.Path.create(lineCollection, chainedOption) # Actually the value of chainedOption does not matter when input ObjectCollection.
### 2. Create a profile for sweep.
# Create a sketch
xzPlane = root.xZConstructionPlane
sketchProfile = sketches.add(xzPlane)
# Create two points to create a rectangle.
center = xzPlane.geometry.origin
center = sketchProfile.modelToSketchSpace(center)
sketchPoints = sketchProfile.sketchPoints
point = adsk.core.Point3D.create(width/2, height/2, 0)
sketchPoint = sketchPoints.add(point)
# Create a rectangular profile with the created two points.
lines = sketchProfile.sketchCurves.sketchLines
rect = lines.addCenterPointRectangle(center, sketchPoint)
prof = sketchProfile.profiles.item(0)
# 3. Sweep.
sweeps = root.features.sweepFeatures
sweepInput = sweeps.createInput(prof, path, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
sweep = sweeps.add(sweepInput)
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Model a spiral widget with the given parameters.
def modelSpiral(numTurns, distanceBetweenTurns, width, height, offset):
try:
#_ui.messageBox('Test 01')
design = adsk.fusion.Design.cast(_app.activeProduct)
root = design.rootComponent
sketches = root.sketches
features = root.features
### Create a spiral path.
# Create a new sketch.
sketchPath = sketches.add(root.xYConstructionPlane)
# Create a series of points along the spiral using the spiral equation.
# r = a + (beta * theta) --> r = offset + (distanceBetweenTurns * theta)
points = adsk.core.ObjectCollection.create()
pointsPerTurn = 20
theta = 0
offset = offset + width/2
for i in range(pointsPerTurn * numTurns + 1):
r = offset + ((distanceBetweenTurns + width) * theta/(math.pi*2))
x = r * math.cos(theta)
y = r * math.sin(theta)
points.add(adsk.core.Point3D.create(x,y,0))
theta += (math.pi*2) / pointsPerTurn
splines = sketchPath.sketchCurves.sketchFittedSplines.add(points)
path = features.createPath(splines)
### Create a profile.
# Create a plane normal to the splines.
planes = root.constructionPlanes
planeInput = planes.createInput()
planeInput.setByDistanceOnPath(splines, adsk.core.ValueInput.createByReal(0))
plane = planes.add(planeInput)
# Create two points to create a rectangle.
sketchProfile = sketches.add(plane)
center = plane.geometry.origin
center = sketchProfile.modelToSketchSpace(center)
sketchPoints = sketchProfile.sketchPoints
point = adsk.core.Point3D.create(width/2, height/2, 0)
sketchPoint = sketchPoints.add(point)
# Create a rectangular profile with the created two points.
lines = sketchProfile.sketchCurves.sketchLines
rect = lines.addCenterPointRectangle(center, sketchPoint)
prof = sketchProfile.profiles.item(0) # Make a profile
### Create a spiral widget.
# Sweep
sweeps = root.features.sweepFeatures
sweepInput = sweeps.createInput(prof, path, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
sweep = sweeps.add(sweepInput)
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Model a widget that has teeth (previously called wrinkles) on one side.
def modelTeeth(width, width2, depth, height, angle, thickness, gap):
try:
design = adsk.fusion.Design.cast(_app.activeProduct)
root = design.rootComponent
sketches = root.sketches
features = root.features
# Internal parameters.
wrinkleWidth = thickness
wrinkleGap = gap
wrinkleLength = width2
# ### 1. Make a wall to put wrinkles.
# # Draw a box for wall and teeth
# sketchWall = sketches.add(root.xYConstructionPlane)
# lines = sketchWall.sketchCurves.sketchLines
# _ = lines.addTwoPointRectangle(adsk.core.Point3D.create(0, -depth, 0), adsk.core.Point3D.create(width, wrinkleLength, 0))
# prof = sketchWall.profiles.item(0)
# # Extrude
# extrudes = features.extrudeFeatures
# distance = adsk.core.ValueInput.createByReal(height)
# _ = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
# ### 2. Make wrinkles.
# # Create a new sketch
# planes = root.constructionPlanes
# planeInput = planes.createInput()
# planeInput.setByAngle(root.yConstructionAxis, angle, prof)
# teethPlane = planes.add(planeInput)
# sketchTeeth = sketches.add(teethPlane)
# lines = sketchTeeth.sketchCurves.sketchLines
### 1. Make a wall to put wrinkles.
# Draw the wall
sketchWall = sketches.add(root.xZConstructionPlane)
lines = sketchWall.sketchCurves.sketchLines
_ = lines.addTwoPointRectangle(adsk.core.Point3D.create(0, 0, 0), adsk.core.Point3D.create(width, height, 0))
prof = sketchWall.profiles.item(0)
# Extrude
extrudes = features.extrudeFeatures
distance = adsk.core.ValueInput.createByReal(-depth)
_ = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
### 2. Make wrinkles.
# Create a new sketch
planes = root.constructionPlanes
planeInput = planes.createInput()
sketchTeeth = sketches.add(root.xZConstructionPlane)
lines = sketchTeeth.sketchCurves.sketchLines
if angle == 0:
# Draw wrinkles
startPointX = 0
endPointX = wrinkleWidth
while endPointX < width:
rect = lines.addTwoPointRectangle(adsk.core.Point3D.create(startPointX, 0, 0), adsk.core.Point3D.create(endPointX, height, 0))
startPointX = endPointX + wrinkleGap
endPointX = startPointX + wrinkleWidth
else:
# Draw first triangle
xTiltedRect = wrinkleWidth / math.cos(angle)
#_ui.messageBox("width:"+str(wrinkleWidth)+" x:"+str(xTiltedRect))
lines.addByTwoPoints(adsk.core.Point3D.create(0, height, 0), adsk.core.Point3D.create(xTiltedRect, height, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(xTiltedRect, height, 0), adsk.core.Point3D.create(0, height-wrinkleWidth/math.sin(angle), 0))
lines.addByTwoPoints(adsk.core.Point3D.create(0, height-wrinkleWidth/math.sin(angle), 0), adsk.core.Point3D.create(0, height, 0))
# Draw parallelograms
topX1 = 2 * xTiltedRect
topX2 = topX1 + xTiltedRect
bottomX1 = topX1 - height / math.tan(math.pi/2 - angle)
bottomX2 = bottomX1 + xTiltedRect
while topX2 < width:
if bottomX1 >= 0:
lines.addByTwoPoints(adsk.core.Point3D.create(topX1, height, 0), adsk.core.Point3D.create(topX2, height, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(topX2, height, 0), adsk.core.Point3D.create(bottomX2, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(bottomX2, 0, 0), adsk.core.Point3D.create(bottomX1, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(bottomX1, 0, 0), adsk.core.Point3D.create(topX1, height, 0))
elif bottomX1 < 0 and bottomX2 > 0:
lines.addByTwoPoints(adsk.core.Point3D.create(topX1, height, 0), adsk.core.Point3D.create(topX2, height, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(topX2, height, 0), adsk.core.Point3D.create(bottomX2, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(bottomX2, 0, 0), adsk.core.Point3D.create(0, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(0, 0, 0), adsk.core.Point3D.create(0, height - topX1 / math.tan(angle), 0))
lines.addByTwoPoints(adsk.core.Point3D.create(0, height - topX1 / math.tan(angle), 0), adsk.core.Point3D.create(topX1, height, 0))
else: # bottomX2 <= 0
lines.addByTwoPoints(adsk.core.Point3D.create(topX1, height, 0), adsk.core.Point3D.create(topX2, height, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(topX2, height, 0), adsk.core.Point3D.create(0, height - topX2 / math.tan(angle), 0))
lines.addByTwoPoints(adsk.core.Point3D.create(0, height - topX2 / math.tan(angle), 0), adsk.core.Point3D.create(0, height - topX1 / math.tan(angle), 0))
lines.addByTwoPoints(adsk.core.Point3D.create(0, height - topX1 / math.tan(angle), 0), adsk.core.Point3D.create(topX1, height, 0))
topX1 += 2 * xTiltedRect
topX2 = topX1 + xTiltedRect
bottomX1 += 2 * xTiltedRect
bottomX2 = bottomX1 + xTiltedRect
counter = 1
while bottomX1 < width:
if topX1 < width:
lines.addByTwoPoints(adsk.core.Point3D.create(topX1, height, 0), adsk.core.Point3D.create(width, height, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(width, height, 0), adsk.core.Point3D.create(width, height - (topX2 - width) / math.tan(angle), 0))
lines.addByTwoPoints(adsk.core.Point3D.create(width, height - (topX2 - width) / math.tan(angle), 0), adsk.core.Point3D.create(bottomX2, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(bottomX2, 0, 0), adsk.core.Point3D.create(bottomX1, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(bottomX1, 0, 0), adsk.core.Point3D.create(topX1, height, 0))
elif bottomX2 < width:
lines.addByTwoPoints(adsk.core.Point3D.create(width, height - (topX1 - width) / math.tan(angle), 0), adsk.core.Point3D.create(width, height - (topX2 - width) / math.tan(angle), 0))
lines.addByTwoPoints(adsk.core.Point3D.create(width, height - (topX2 - width) / math.tan(angle), 0), adsk.core.Point3D.create(bottomX2, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(bottomX2, 0, 0), adsk.core.Point3D.create(bottomX1, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(bottomX1, 0, 0), adsk.core.Point3D.create(width, height - (topX1 - width) / math.tan(angle), 0))
else: #bottomX2 >= width
lines.addByTwoPoints(adsk.core.Point3D.create(width, height - (topX1 - width) / math.tan(angle), 0), adsk.core.Point3D.create(width, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(width, 0, 0), adsk.core.Point3D.create(bottomX1, 0, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(bottomX1, 0, 0), adsk.core.Point3D.create(width, height - (topX1 - width) / math.tan(angle), 0))
topX1 += 2 * xTiltedRect
topX2 = topX1 + xTiltedRect
bottomX1 += 2 * xTiltedRect
bottomX2 = bottomX1 + xTiltedRect
counter += 1
# Make profiles.
profileCollection = adsk.core.ObjectCollection.create()
for i in range(sketchTeeth.profiles.count):
profileCollection.add(sketchTeeth.profiles.item(i))
# Extrude.
distance = adsk.core.ValueInput.createByReal(wrinkleLength)
_ = extrudes.addSimple(profileCollection, distance, adsk.fusion.FeatureOperations.JoinFeatureOperation)
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Model a Bump widget with the given parameters.
def modelBump(width, length, numWidth, numLength):
try:
design = adsk.fusion.Design.cast(_app.activeProduct)
root = design.rootComponent
sketches = root.sketches
features = root.features
# # user parameters
# width = 1
# length = 1
# numWidth = 2
# numLength = 3
# internal parameters
gap = 0.2
heightTop = 0.06
heightBottom = 0.2
heightChamber = 0.3
totalWidth = (gap + width) * numWidth + gap
totalLength = (gap + length) * numLength + gap
### 1. Extrude for the top layer and chambers.
# Draw a rectangle for the top layer (top layer where bumps will go up will be printed at the bottom.)
sketchTop = sketches.add(root.xYConstructionPlane)
lines = sketchTop.sketchCurves.sketchLines
rect = lines.addTwoPointRectangle(adsk.core.Point3D.create(0, 0, 0), adsk.core.Point3D.create(totalWidth, totalLength, 0))
prof = sketchTop.profiles.item(0)
# Extrude
extrudes = features.extrudeFeatures
distance = adsk.core.ValueInput.createByReal(heightTop + heightChamber)
extrude1 = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
### 2. Draw and extrude chambers
# Create a new sketch
planes = root.constructionPlanes
planeInput = planes.createInput()
planeInput.setByOffset(root.xYConstructionPlane, distance)
planeChamber = planes.add(planeInput)
sketchChamber = sketches.add(planeChamber)
lines = sketchChamber.sketchCurves.sketchLines
# Draw chambers
for i in range(numWidth):
for j in range(numLength):
xCorner = gap + (width + gap) * i
yCorner = gap + (length + gap) * j
lines.addTwoPointRectangle(adsk.core.Point3D.create(xCorner, yCorner, 0), adsk.core.Point3D.create(xCorner + width, yCorner + length, 0))
# Extrude chambers
profileCollection = adsk.core.ObjectCollection.create()
for i in range(sketchChamber.profiles.count):
profileCollection.add(sketchChamber.profiles.item(i))
distance = adsk.core.ValueInput.createByReal(-heightChamber)
extrude2 = extrudes.addSimple(profileCollection, distance, adsk.fusion.FeatureOperations.CutFeatureOperation)
### 3. Draw and extrude bottom layer.
# Draw a rectangle
sketchBottom = sketches.add(planeChamber)
lines = sketchBottom.sketchCurves.sketchLines
rect = lines.addTwoPointRectangle(adsk.core.Point3D.create(0, 0, 0), adsk.core.Point3D.create(totalWidth, totalLength, 0))
prof = sketchBottom.profiles.item(0)
# Extrude.
distance = adsk.core.ValueInput.createByReal(heightBottom)
extrude3 = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.JoinFeatureOperation)
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def modelAccordion(width, depth, height, x_axis, y_axis):
# Internal widget values:
wrinkle_height = 0.5
wrinkle_spacing = 1
shell_thickness = .1
try:
design = adsk.fusion.Design.cast(_app.activeProduct)
root = design.rootComponent
sketches = root.sketches
features = root.features
planes = root.constructionPlanes
for i in range(int(x_axis)):
for j in range(int(y_axis)):
bodies = adsk.core.ObjectCollection.create()
target_body = None
for k in range(int(height)):
# Creating planes
offsetValue = adsk.core.ValueInput.createByReal(k * wrinkle_height * 2)
planeInput = planes.createInput()
planeInput.setByOffset(root.xYConstructionPlane, offsetValue)
p = planes.add(planeInput)
# Sketching
s = sketches.add(p)
lines = s.sketchCurves.sketchLines
_ = lines.addTwoPointRectangle(adsk.core.Point3D.create(i * width, j * depth, 0), adsk.core.Point3D.create((i * width) + width, (j * depth) + depth, 0))
prof = s.profiles.item(0)
# Extruding
extrudes = features.extrudeFeatures
distance = adsk.core.ValueInput.createByReal(wrinkle_height)
base = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
if k == 0:
target_body = base.bodies.item(0)
else:
bodies.add(base.bodies.item(0))
faces = base.endFaces
s = sketches.add(faces.item(0))
lines = s.sketchCurves.sketchLines
_ = lines.addTwoPointRectangle(adsk.core.Point3D.create((i * width) + wrinkle_spacing, (j * depth) + wrinkle_spacing, 0), adsk.core.Point3D.create((i * width) + width - wrinkle_spacing, (j * depth) + depth - wrinkle_spacing, 0))
prof = s.profiles.item(1)
# Extruding
extrudes = features.extrudeFeatures
distance = adsk.core.ValueInput.createByReal(wrinkle_height)
wrinkle = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
bodies.add(wrinkle.bodies.item(0))
combineFeatures = features.combineFeatures
combineFeatureInput = combineFeatures.createInput(target_body, bodies)
combineFeatureInput.operation = 0
combineFeatureInput.isKeepToolBodies = False
combineFeatureInput.isNewComponent = False
_ = combineFeatures.add(combineFeatureInput).bodies.item(0)
# Shelling
shellFeats = features.shellFeatures
isTangentChain = True
#e = adsk.core.ObjectCollection.create()
#bodies.add(bodies.endFaces.item(0))
shellFeatureInput = shellFeats.createInput(bodies, isTangentChain)
thickness = adsk.core.ValueInput.createByReal(.1)
shellFeatureInput.insideThickness = thickness
shellFeats.add(shellFeatureInput)
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def modelAuxetic(a, b, c, height):
theta = 0.523599 # 30 degrees in radians
try:
design = adsk.fusion.Design.cast(_app.activeProduct)
rootComp = design.rootComponent
sketches = rootComp.sketches
xyPlane = rootComp.xYConstructionPlane
features = rootComp.features
s = sketches.add(xyPlane)
# # Sketching
lines = s.sketchCurves.sketchLines
p2x = a * math.cos(theta)
p2y = a * math.sin(theta)
lines.addByTwoPoints(adsk.core.Point3D.create(0, 0, 0), adsk.core.Point3D.create(p2x, p2y, 0))
p3x = p2x + b * math.cos(theta + theta)
p3y = p2y + b * math.sin(theta + theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p2x, p2y, 0), adsk.core.Point3D.create(p3x, p3y,0))
p4x = p3x + a * math.cos(2 * theta + theta)
p4y = p3y + a * math.sin(2 * theta + theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p3x, p3y,0), adsk.core.Point3D.create(p4x, p4y, 0))
p5x = p4x + (a - c) * math.cos(3 * theta + 4 * theta)
p5y = p4y + (a - c) * math.sin(3 * theta + 4 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p4x, p4y, 0), adsk.core.Point3D.create(p5x, p5y, 0))
p6x = p5x + b * math.cos(7 * theta - 3 * theta)
p6y = p5y + b * math.sin(7 * theta - 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p5x, p5y, 0), adsk.core.Point3D.create(p6x, p6y, 0))
p7x = p6x + (a - c) * math.cos(4 * theta - 3 * theta)
p7y = p6y + (a - c) * math.sin(4 * theta - 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p6x, p6y, 0), adsk.core.Point3D.create(p7x, p7y, 0))
p8x = p7x + a * math.cos(theta + 4 * theta)
p8y = p7y + a * math.sin(theta + 4 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p7x, p7y, 0), adsk.core.Point3D.create(p8x, p8y, 0))
p9x = p8x + b * math.cos(5 * theta + theta)
p9y = p8y + b * math.sin(5 * theta + theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p8x, p8y, 0), adsk.core.Point3D.create(p9x, p9y, 0))
p10x = p9x + a * math.cos(6 * theta + theta)
p10y = p9y + a * math.sin(6 * theta + theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p9x, p9y, 0), adsk.core.Point3D.create(p10x, p10y, 0))
p11x = p10x + (a - c) * math.cos(7 * theta + 4 * theta)
p11y = p10y + (a - c) * math.sin(7 * theta + 4 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p10x, p10y, 0), adsk.core.Point3D.create(p11x, p11y, 0))
p12x = p11x + b * math.cos(11 * theta - 3 * theta)
p12y = p11y + b * math.sin(11 * theta - 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p11x, p11y, 0), adsk.core.Point3D.create(p12x, p12y, 0))
p13x = p12x + (a - c) * math.cos(8 * theta - 3 * theta)
p13y = p12y + (a - c) * math.sin(8 * theta - 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p12x, p12y, 0), adsk.core.Point3D.create(p13x, p13y, 0))
p14x = p13x + a * math.cos(5 * theta + 4 * theta)
p14y = p13y + a * math.sin(5 * theta + 4 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p13x, p13y, 0), adsk.core.Point3D.create(p14x, p14y, 0))
p15x = p14x + b * math.cos(9 * theta + theta)
p15y = p14y + b * math.sin(9 * theta + theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p14x, p14y, 0), adsk.core.Point3D.create(p15x, p15y, 0))
p16x = p15x + a * math.cos(10 * theta + theta)
p16y = p15y + a * math.sin(10 * theta + theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p15x, p15y, 0), adsk.core.Point3D.create(p16x, p16y, 0))
p17x = p16x + (a - c) * math.cos(11 * theta + 4 * theta)
p17y = p16y + (a - c) * math.sin(11 * theta + 4 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p16x, p16y, 0), adsk.core.Point3D.create(p17x, p17y, 0))
p18x = p17x + b * math.cos(15 * theta - 3 * theta)
p18y = p17y + b * math.sin(15 * theta - 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p17x, p17y, 0), adsk.core.Point3D.create(p18x, p18y, 0))
p19x = p18x + (a - c) * math.cos(12 * theta - 3 * theta)
p19y = p18y + (a - c) * math.sin(12 * theta - 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p18x, p18y, 0), adsk.core.Point3D.create(p19x, p19y, 0))
prof = s.profiles.item(0)
# Extruding
extrudes = features.extrudeFeatures
distance = adsk.core.ValueInput.createByReal(height)
_ = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
# Interior design
p2x = 0 + (a - c) * math.cos(-theta)
p2y = a + (a - c) * math.sin(-theta)
lines.addByTwoPoints(adsk.core.Point3D.create(0, a, 0), adsk.core.Point3D.create(p2x, p2y, 0))
p3x = p2x + b * math.cos(-theta + 3 * theta)
p3y = p2y + b * math.sin(-theta + 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p2x, p2y, 0), adsk.core.Point3D.create(p3x, p3y, 0))
p4x = p3x + (a - c) * math.cos(2 * theta + 3 * theta)
p4y = p3y + (a - c) * math.sin(2 * theta + 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p3x, p3y, 0), adsk.core.Point3D.create(p4x, p4y, 0))
p5x = p4x + b * math.cos(5 * theta - theta)
p5y = p4y + b * math.sin(5 * theta - theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p4x, p4y, 0), adsk.core.Point3D.create(p5x, p5y, 0))
p6x = p5x + (a - c) * math.cos(4 * theta - theta)
p6y = p5y + (a - c) * math.sin(4 * theta - theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p5x, p5y, 0), adsk.core.Point3D.create(p6x, p6y, 0))
p7x = p6x + b * math.cos(3 * theta + 3 * theta)
p7y = p6y + b * math.sin(3 * theta + 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p6x, p6y, 0), adsk.core.Point3D.create(p7x, p7y, 0))
p8x = p7x + (a - c) * math.cos(6 * theta + 3 * theta)
p8y = p7y + (a - c) * math.sin(6 * theta + 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p7x, p7y, 0), adsk.core.Point3D.create(p8x, p8y, 0))
p9x = p8x + b * math.cos(9 * theta - theta)
p9y = p8y + b * math.sin(9 * theta - theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p8x, p8y, 0), adsk.core.Point3D.create(p9x, p9y, 0))
p10x = p9x + (a - c) * math.cos(8 * theta - theta)
p10y = p9y + (a - c) * math.sin(8 * theta - theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p9x, p9y, 0), adsk.core.Point3D.create(p10x, p10y, 0))
p11x = p10x + b * math.cos(7 * theta + 3 * theta)
p11y = p10y + b * math.sin(7 * theta + 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p10x, p10y, 0), adsk.core.Point3D.create(p11x, p11y, 0))
p12x = p11x + (a - c) * math.cos(10 * theta + 3 * theta)
p12y = p11y + (a - c) * math.sin(10 * theta + 3 * theta)
lines.addByTwoPoints(adsk.core.Point3D.create(p11x, p11y, 0), adsk.core.Point3D.create(p12x, p12y, 0))
lines.addByTwoPoints(adsk.core.Point3D.create(p12x, p12y, 0), adsk.core.Point3D.create(0, a, 0))
prof = s.profiles.item(1)
# Extruding
extrudes = features.extrudeFeatures
distance = adsk.core.ValueInput.createByReal(height)
_ = extrudes.addSimple(prof, distance, adsk.fusion.FeatureOperations.CutFeatureOperation)
except:
_ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) |
# Transcibed from original Visual Basic scripts by Clayton Lewis and Lawrence Hipps
import pandas as pd
import scipy
import numpy as np
import dask as dd
#Public Module EC
import numba
# https://stackoverflow.com/questions/47594932/row-wise-interpolation-in-dataframe-using-interp1d
# https://krstn.eu/fast-linear-1D-interpolation-with-numba/
# https://scikit-learn.org/stable/modules/generated/sklearn.covariance.EmpiricalCovariance.html
# https://pythonawesome.com/maximum-covariance-analysis-in-python/
# https://pyxmca.readthedocs.io/en/latest/quickstart.html#maximum-covariance-analysis
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.cov.html
# https://pandas.pydata.org/pandas-docs/stable/user_guide/enhancingperf.html
# https://www.statsmodels.org/devel/generated/statsmodels.tsa.stattools.acovf.html
# https://www.statsmodels.org/devel/generated/statsmodels.tsa.stattools.ccovf.html
# https://python-advanced.quantecon.org/index_time_series_models.html
class CalcFluxWithKH20(object):
"""Determines H20 flux from input weather data, including a KH20 sensor, by the eddy covariance method.
:param df: dataframe Weather Parameters for the Eddy Covariance Method; must be time-indexed and include Ux, Uy, Uz, Pr, Ea, and LnKH
:return: Atmospheric Fluxes
:notes:
No High Pass Filtering or Trend Removal are Applied to the Data
Time Series Data Are Moved Forward and Backward to Find Maximum Covariance Values
Air Temperature and Sensible Heat Flux are Estimated From Sonic Temperature and Wind Data
Other Corrections Include Transducer Shadowing, Traditional Coordinate Rotation, High Frequency Correctioons, and WPL"""
def __init__(self, **kwargs):
self.Rv = 461.51 # 'Water Vapor Gas Constant', 'J/[kg*K]'
self.Ru = 8.314 # 'Universal Gas Constant', 'J/[kg*K]'
self.Cpd = 1005 # 'Specific Heat of Dry Air', 'J/[kg*K]'
self.Rd = 287.05 # 'Dry Air Gas Constant', 'J/[kg*K]'
self.Co = 0.21 # Molar Fraction of Oxygen in the Atmosphere
self.Mo = 0.032 # Molar Mass of Oxygen (gO2/mole)
self.XKH20 = 1.412 # 'Path Length of KH20', 'cm'
self.XKwC1 = -0.152214126 # First Order Coefficient in Vapor Density-KH20 Output Relationship, cm
self.XKwC2 = -0.001667836 # Second Order Coefficient in Vapor Density-KH20 Output Relationship, cm
self.directionKH20_U = 180
self.UHeight = 3 # Height of Sonic Anemometer above Ground Surface', 'm'
self.PathKH20_U = 0.1 # Separation Distance Between Sonic Anemometer and KH20', 'm', 0.1
self.lag = 10 # number of lags to consider
self.direction_bad_min = 0 # Clockwise Orientation from DirectionKH20_U
self.direction_bad_max = 360 # Clockwise Orientation from DirectionKH20_U
self.Kw = 1 # Extinction Coefficient of Water (m^3/[g*cm]) -instrument calibration
self.Ko = -0.0045 # Extinction Coefficient of Oxygen (m^3/[g*cm]) -derived experimentally
#Despiking Weather Parameters
self.despikefields = ['Ux', 'Uy', 'Uz', 'Ts', 'volt_KH20', 'Pr', 'Ta', 'Rh']
# Allow for update of input parameters
# https://stackoverflow.com/questions/60418497/how-do-i-use-kwargs-in-python-3-class-init-function
self.__dict__.update(kwargs)
self.parameters = {
'Ea': ['Actual Vapor Pressure', 'kPa'],
'LnKH': ['Natural Log of Krypton Hygrometer Output', 'mV'],
'Pr': ['Air Pressure', 'Pa'],
'Ta': ['Air Temperature', 'K'],
'Ts': ['Sonic Temperature', 'K'],
'Ux': ['X Component of Wind Speed', 'm/s'],
'Uy': ['Y Component of Wind Speed', 'm/s'],
'Uz': ['Z Component of Wind Speed', 'm/s'],
'E': ['Vapor Pressure', 'kPa'],
'Q': ['Specific Humidity', 'unitless'],
'pV': ['Water Vapor Density', 'kg/m^3'],
'Sd': ['Entropy of Dry Air', 'J/K'],
'Tsa': ['Absolute Air Temperature Derived from Sonic Temperature', 'K'],
}
def runall(self, df):
df = self.renamedf(df)
if 'Ea' in df.columns:
pass
else:
df['Ea'] = self.tetens(df['Ta'].to_numpy())
if 'LnKH' in df.columns:
pass
else:
df['LnKH'] = np.log(df['volt_KH20'].to_numpy())
for col in self.despikefields:
if col in df.columns:
df[col] = self.despike(df[col].to_numpy(), nstd=4.5)
df['Ts'] = self.convert_CtoK(df['Ts'].to_numpy())
df['Ux'],df['Uy'],df['Uz'] = self.fix_csat(df['Ux'].to_numpy(),
df['Uy'].to_numpy(),
df['Uz'].to_numpy())
# Calculate Sums and Means of Parameter Arrays
df = self.calculated_parameters(df)
# Calculate the Correct XKw Value for KH20
XKw = self.XKwC1 + 2 * self.XKwC2 * (df['pV'].mean() * 1000.)
self.Kw = XKw / self.XKH20
# Calculate Covariances (Maximum Furthest From Zero With Sign in Lag Period)
CovTs_Ts = df[['Ts', 'Ts']].cov().iloc[0,0] # location index needed because of same fields
CovUx_Uy = df[['Ux', 'Uy']].cov().loc['Ux', 'Uy'] # CalcCovariance(IWP.Ux, IWP.Uy)
CovUx_Uz = df[['Ux', 'Uz']].cov().loc['Ux', 'Uz'] # CalcCovariance(IWP.Ux, IWP.Uz)
CovUy_Uz = df[['Uy', 'Uz']].cov().loc['Uy', 'Uz'] # CalcCovariance(IWP.Uy, IWP.Uz)
CovTs_Q = self.calc_max_covariance(df, 'Ts', 'Q', self.lag)[0]
CovUx_LnKH = self.calc_max_covariance(df, 'Ux', 'LnKH', self.lag)[0]
CovUx_Q = self.calc_max_covariance(df, 'Ux', 'Q', self.lag)[0]
CovUx_Sd = self.calc_max_covariance(df, 'Ux', 'Sd', self.lag)[0]
CovUx_Ts = self.calc_max_covariance(df, 'Ux', 'Ts', self.lag)[0]
CovUy_LnKH = self.calc_max_covariance(df, 'Uy', 'LnKH', self.lag)[0]
CovUy_Q = self.calc_max_covariance(df, 'Uy', 'Q', self.lag)[0]
CovUy_Sd = self.calc_max_covariance(df, 'Uy', 'Sd', self.lag)[0]
CovUy_Ts = self.calc_max_covariance(df, 'Uy', 'Ts', self.lag)[0]
CovUz_LnKH = self.calc_max_covariance(df, 'Uz', 'LnKH', self.lag)[0]
CovUz_Q = self.calc_max_covariance(df, 'Uz', 'Q', self.lag)[0]
CovUz_Sd = self.calc_max_covariance(df, 'Uz', 'Sd', self.lag)[0]
CovUz_Ts = self.calc_max_covariance(df, 'Uz', 'Ts', self.lag)[0]
# Traditional Coordinate Rotation
cosν, sinν, sinTheta, cosTheta, Uxy, Uxyz = self.coord_rotation(df)
# Find the Mean Squared Error of Velocity Components and Humidity
UxMSE = self.calc_MSE(df['Ux'])
UyMSE = self.calc_MSE(df['Uy'])
UzMSE = self.calc_MSE(df['Uz'])
QMSE = self.calc_MSE(df['Q'])
# Correct Covariances for Coordinate Rotation
Uz_Ts = CovUz_Ts * cosTheta - CovUx_Ts * sinTheta * cosν - CovUy_Ts * sinTheta * sinν
if np.abs(Uz_Ts) >= np.abs(CovUz_Ts):
CovUz_Ts = Uz_Ts
Uz_LnKH = CovUz_LnKH * cosTheta - CovUx_LnKH * sinTheta * cosν - CovUy_LnKH * sinν * sinTheta
if np.abs(Uz_LnKH) >= np.abs(CovUz_LnKH):
CovUz_LnKH = Uz_LnKH
CovUx_Q = CovUx_Q * cosTheta * cosν + CovUy_Q * cosTheta * sinν + CovUz_Q * sinTheta
CovUy_Q = CovUy_Q * cosν - CovUx_Q * sinν
CovUz_Q = CovUz_Q * cosTheta - CovUx_Q * sinTheta * cosν - CovUy_Q * sinν * sinTheta
CovUx_Uz = CovUx_Uz * cosν * (cosTheta**2 - sinTheta**2) - 2 * CovUx_Uy * sinTheta * cosTheta * sinν * cosν + CovUy_Uz * sinν * (cosTheta**2 - sinTheta**2) - UxMSE * sinTheta * cosTheta * cosν**2 - UyMSE * sinTheta * cosTheta * sinν**2 + UzMSE * sinTheta * cosTheta
CovUy_Uz = CovUy_Uz * cosTheta * cosν - CovUx_Uz * cosTheta * sinν - CovUx_Uy * sinTheta * (cosν**2 - sinν**2) + UxMSE * sinTheta * sinν * cosν - UyMSE * sinTheta * sinν * cosν
CovUz_Sd = CovUz_Sd * cosTheta - CovUx_Sd * sinTheta * cosν - CovUy_Sd * sinν * sinTheta
Uxy_Uz = np.sqrt(CovUx_Uz**2 + CovUy_Uz**2)
Ustr = np.sqrt(Uxy_Uz)
# Find Average Air Temperature From Average Sonic Temperature
Tsa = self.calc_Tsa(df['Ts'].mean(), df['Pr'].mean(), df['pV'].mean())
# Calculate the Latent Heat of Vaporization
lamb = (2500800 - 2366.8 * (self.convert_KtoC(Tsa)))
# Determine Vertical Wind and Water Vapor Density Covariance
Uz_pV = (CovUz_LnKH / XKw) / 1000
# Calculate the Correct Average Values of Some Key Parameters
Cp = self.Cpd * (1 + 0.84 * df['Q'].mean())
pD = (df['Pr'].mean() - df['E'].mean()) / (self.Rd * Tsa)
p = pD + df['pV'].mean()
# Calculate Variance of Air Temperature From Variance of Sonic Temperature
StDevTa = np.sqrt(CovTs_Ts - 1.02 * df['Ts'].mean() * CovTs_Q - 0.2601 * QMSE * df['Ts'].mean()**2)
Uz_Ta = CovUz_Ts - 0.07 * lamb * Uz_pV / (p * Cp)
# Determine Saturation Vapor Pressure of the Air Using Highly Accurate Wexler's Equations Modified by Hardy
Td = self.calc_Td(df['E'].mean())
D = self.calc_Es(Tsa) - df['E'].mean()
S = (self.calc_Q(df['Pr'].mean(), self.calc_Es(Tsa + 1)) - self.calc_Q(df['Pr'].mean(), self.calc_Es(Tsa - 1))) / 2
# 'Determine Wind Direction
WindDirection = np.arctan(df['Uy'].mean() / df['Ux'].mean()) * 180 / np.pi
if df['Ux'].mean() < 0:
WindDirection += 180 * np.sign(df['Uy'].mean())
direction = self.directionKH20_U - WindDirection
if direction < 0:
direction += 360
# 'Calculate the Lateral Separation Distance Projected Into the Mean Wind Direction
pathlen = self.PathKH20_U * np.abs(np.sin((np.pi / 180) * direction))
#'Calculate the Average and Standard Deviations of the Rotated Velocity Components
StDevUz = df['Uz'].std()
UMean = df['Ux'].mean() * cosTheta * cosν + df['Uy'].mean() * cosTheta * sinν + df['Uz'].mean() * sinTheta
#'Frequency Response Corrections (Massman, 2000 & 2001)
tauB = (3600) / 2.8
tauEKH20 = np.sqrt((0.01 / (4 * UMean)) **2 + (pathlen / (1.1 * UMean))**2)
tauETs = np.sqrt((0.1 / (8.4 * UMean))**2)
tauEMomentum = np.sqrt((0.1 / (5.7 * UMean))**2 + (0.1 / (2.8 * UMean))**2)
#'Calculate ζ and Correct Values of Uᕽ and Uz_Ta
L = self.calc_L(Ustr, Tsa, Uz_Ta)
alpha, X = self.calc_AlphX(L)
fX = X * UMean / self.UHeight
B = 2 * np.pi * fX * tauB
momentum = 2 * np.pi * fX * tauEMomentum
_Ts = 2 * np.pi * fX * tauETs
_KH20 = 2 * np.pi * fX * tauEKH20
Ts = self.correct_spectral(B, alpha, _Ts)
Uxy_Uz /= self.correct_spectral(B, alpha, momentum)
Ustr = np.sqrt(Uxy_Uz)
#'Recalculate L With New Uᕽ and Uz_Ta, and Calculate High Frequency Corrections
L = self.calc_L(Ustr, Tsa, Uz_Ta / Ts)
alpha, X = self.calc_AlphX(L)
Ts = self.correct_spectral(B, alpha, _Ts)
KH20 = self.correct_spectral(B, alpha, _KH20)
#'Correct the Covariance Values
Uz_Ta /= Ts
Uz_pV /= KH20
Uxy_Uz /= self.correct_spectral(B, alpha, momentum)
Ustr = np.sqrt(Uxy_Uz)
CovUz_Sd /= KH20
exchange = ((p * Cp) / (S + Cp / lamb)) * CovUz_Sd
#'KH20 Oxygen Correction
Uz_pV += self.correct_KH20(Uz_Ta, df['Pr'].mean(), Tsa)
#'Calculate New H and LE Values
H = p * Cp * Uz_Ta
lambdaE = lamb * Uz_pV
#'Webb, Pearman and Leuning Correction
lambdaE = lamb * p * Cp * Tsa * (1.0 + (1.0 / 0.622) * (df['pV'].mean() / pD)) * (Uz_pV + (df['pV'].mean() / Tsa) * Uz_Ta) / (p * Cp * Tsa + lamb * (1.0 + (1 / 0.622) * (df['pV'].mean() / pD)) * df['pV'].mean() * 0.07)
#'Finish Output
Tsa = self.convert_KtoC(Tsa)
Td = self.convert_KtoC(Td)
zeta = self.UHeight / L
ET = lambdaE * self.get_Watts_to_H2O_conversion_factor(Tsa, (df.last_valid_index() - df.first_valid_index())/ pd.to_timedelta(1, unit='D'))
#'Out.Parameters = CWP
self.columns = ['Ta','Td','D', 'Ustr', 'zeta', 'H', 'StDevUz', 'StDevTa', 'direction', 'exchange', 'lambdaE', 'ET', 'Uxy']
self.out = [Tsa, Td, D, Ustr, zeta, H, StDevUz, StDevTa, direction, exchange, lambdaE, ET, Uxy]
return pd.Series(data=self.out,index=self.columns)
def calc_LnKh(self, mvolts):
return np.log(mvolts.to_numpy())
def renamedf(self, df):
return df.rename(columns={'T_SONIC':'Ts',
'TA_1_1_1':'Ta',
'amb_press':'Pr',
'RH_1_1_1':'Rh',
't_hmp':'Ta',
'e_hmp':'Ea',
'kh':'volt_KH20'
})
def despike(self, arr, nstd=4.5):
"""Removes spikes from parameter within a specified deviation from the mean.
"""
stdd = np.nanstd(arr) * nstd
avg = np.nanmean(arr)
avgdiff = stdd - np.abs(arr - avg)
y = np.where(avgdiff >= 0, arr, np.NaN)
nans, x = np.isnan(y), lambda z: z.nonzero()[0]
if len(x(~nans)) > 0:
y[nans] = np.interp(x(nans), x(~nans), y[~nans])
return y
def calc_Td(self, E):
c0 = 207.98233
c1 = -20.156028
c2 = 0.46778925
c3 = -0.0000092288067
d0 = 1
d1 = -0.13319669
d2 = 0.0056577518
d3 = -0.000075172865
lne = np.log(E)
return (c0 + c1 * lne + c2 * lne ** 2 + c3 * lne ** 3) / (d0 + d1 * lne + d2 * lne ** 2 + d3 * lne ** 3)
def calc_Q(self, P, E):
return (0.622 * E) / (P - 0.378 * E)
def calc_E(self, pV, T):
return pV * self.Rv * T
def calc_L(self, Ust, Tsa, Uz_Ta):
#removed negative sign
return -1*(Ust ** 3) * Tsa / (9.8 * 0.4 * Uz_Ta)
#@numba.njit#(forceobj=True)
def calc_Tsa(self, Ts, P, pV, Rv=461.51):
E = pV * self.Rv * Ts
return -0.01645278052 * (
-500 * P - 189 * E + np.sqrt(250000 * P ** 2 + 128220 * E * P + 35721 * E ** 2)) / pV / Rv
#@numba.jit(forceobj=True)
def calc_AlphX(self, L):
if (self.UHeight / L) <= 0:
alph = 0.925
X = 0.085
else:
alph = 1
X = 2 - 1.915 / (1 + 0.5 * self.UHeight / L)
return alph, X
#@numba.jit(forceobj=True)
def calc_Es(self,T):
g0 = -2836.5744
g1 = -6028.076559
g2 = 19.54263612
g3 = -0.02737830188
g4 = 0.000016261698
g5 = 0.00000000070229056
g6 = -0.00000000000018680009
g7 = 2.7150305
return np.exp(
g0 * T ** (-2) + g1 * T ** (-1) + g2 + g3 * T + g4 * T ** 2 + g5 * T ** 3 + g6 * T ** 4 + g7 * np.log(T))
def calc_cov(self, p1, p2):
# p1mean = np.mean(p1)
# p2mean = np.mean(p2)
sumproduct = 0
for i in range(len(p1)):
sumproduct += p1[i] * p2[i]
return (sumproduct - (np.sum(p1) * np.sum(p2)) / len(p1)) / (len(p1) - 1)
#@numba.njit#(forceobj=True)
def calc_MSE(self, y):
return np.mean((y - np.mean(y)) ** 2)
def convert_KtoC(self, T):
return T - 273.16
def convert_CtoK(self, T):
return T + 273.16
def correct_KH20(self, Uz_Ta, P, T):
"""Calculates an additive correction for the KH20 due to cross sensitivity between H20 and 02 molecules.
Uz_Ta = Covariance of Vertical Wind Component and Air Temperature (m*K/s)
P = Air Pressure (Pa)
T = Air Temperature (K)
Kw = Extinction Coefficient of Water (m^3/[g*cm]) -instrument calibration
Ko = Extinction Coefficient of Oxygen (m^3/[g*cm]) -derived experimentally
returns KH20 Oxygen Correction
"""
return ((self.Co * self.Mo * P) / (self.Ru * T ** 2)) * (self.Ko / self.Kw) * Uz_Ta
def correct_spectral(self, B, alpha, varib):
B_alpha = B ** alpha
V_alpha = varib ** alpha
return (B_alpha / (B_alpha + 1)) * (B_alpha / (B_alpha + V_alpha)) * (1 / (V_alpha + 1))
def get_Watts_to_H2O_conversion_factor(self, temperature, day_fraction):
to_inches = 25.4
return (self.calc_water_density(temperature) * 86.4 * day_fraction) / (
self.calc_latent_heat_of_vaporization(temperature) * to_inches)
def calc_water_density(self, temperature):
d1 = -3.983035 # °C
d2 = 301.797 # °C
d3 = 522528.9 # °C2
d4 = 69.34881 # °C
d5 = 999.97495 # kg/m3
return d5 * (1 - (temperature + d1) ** 2 * (temperature + d2) / (d3 * (temperature + d4))) # 'kg/m^3
def calc_latent_heat_of_vaporization(self, temperature):
l0 = 2500800
l1 = -2360
l2 = 1.6
l3 = -0.06
return l0 + l1 * temperature + l2 * temperature ** 2 + l3 * temperature ** 3 # 'J/kg
#@numba.njit#(forceobj=True)
def fix_csat(self, Ux, Uy, Uz):
CSAT3Inverse = [[-0.5, 0, 0.86602540378444],
[0.25, 0.4330127018922, 0.86602540378444],
[0.25, -0.4330127018922, 0.86602540378444]]
CSAT3Transform = [[-1.3333333333333, 0.66666666666666, 0.66666666666666],
[0, 1.1547005383792, -1.1547005383792],
[0.3849001794597, 0.3849001794597, 0.3849001794597]]
Ux_out = []
Uy_out = []
Uz_out = []
for i in range(len(Ux)):
u = {}
u[0] = CSAT3Inverse[0][0] * Ux[i] + CSAT3Inverse[0][1] * Uy[i] + CSAT3Inverse[0][2] * Uz[i]
u[1] = CSAT3Inverse[1][0] * Ux[i] + CSAT3Inverse[1][1] * Uy[i] + CSAT3Inverse[1][2] * Uz[i]
u[2] = CSAT3Inverse[2][0] * Ux[i] + CSAT3Inverse[2][1] * Uy[i] + CSAT3Inverse[2][2] * Uz[i]
scalar = (Ux[i] ** 2. + Uy[i] ** 2. + Uz[i] ** 2.) ** 0.5
u[0] = u[0] / (0.68 + 0.32 * np.sin(np.arccos(u[0] / scalar)))
u[1] = u[1] / (0.68 + 0.32 * np.sin(np.arccos(u[1] / scalar)))
u[2] = u[2] / (0.68 + 0.32 * np.sin(np.arccos(u[2] / scalar)))
Ux_out.append(CSAT3Transform[0][0] * u[0] + CSAT3Transform[0][1] * u[1] + CSAT3Transform[0][2] * u[2])
Uy_out.append(CSAT3Transform[1][0] * u[0] + CSAT3Transform[1][1] * u[1] + CSAT3Transform[1][2] * u[2])
Uz_out.append(CSAT3Transform[2][0] * u[0] + CSAT3Transform[2][1] * u[1] + CSAT3Transform[2][2] * u[2])
return Ux_out, Uy_out, Uz_out
# Calculated Weather Parameters
# @numba.jit
def calculated_parameters(self, df):
df['pV'] = self.calc_pV(df['Ea'],df['Ts'])
df['Tsa'] = self.calc_Tsa(df['Ts'], df['Pr'], df['pV'])
df['E'] = self.calc_E(df['pV'], df['Tsa'])
df['Q'] = self.calc_Q(df['Pr'], df['E'])
df['Sd'] = self.calc_Q(df['Pr'], self.calc_Es(df['Tsa'])) - df['Q']
return df
#@numba.njit#(forceobj=True)
def calc_pV(self, Ea, Ts):
return (Ea * 1000.0) / (self.Rv * Ts)
def calc_max_covariance(self, df, colx, coly, lags=10):
dfcov = []
for i in np.arange(-1 * lags, lags):
df[f"{coly}_{i}"] = df[coly].shift(i)
dfcov.append(df[[colx, f"{coly}_{i}"]].cov().loc[colx, f"{coly}_{i}"])
# print(i,df[[colx, f"{coly}_{i}"]].cov().loc[colx, f"{coly}_{i}"])
df = df.drop([f"{coly}_{i}"], axis=1)
abscov = np.abs(dfcov)
maxabscov = np.max(abscov)
try:
maxlagindex = np.where(abscov == maxabscov)[0][0]
lagno = maxlagindex - lags
maxcov = dfcov[maxlagindex]
except IndexError:
lagno = 0
maxcov = dfcov[10]
return maxcov, lagno
#@numba.njit#(forceobj=True)
def coord_rotation(self, df, Ux='Ux', Uy='Uy', Uz='Uz'):
"""Traditional Coordinate Rotation
"""
xmean = df[Ux].mean()
ymean = df[Uy].mean()
zmean = df[Uz].mean()
Uxy = np.sqrt(xmean ** 2 + ymean ** 2)
Uxyz = np.sqrt(xmean ** 2 + ymean ** 2 + zmean ** 2)
cosν = xmean / Uxy
sinν = ymean / Uxy
sinTheta = zmean / Uxyz
cosTheta = Uxy / Uxyz
return cosν, sinν, sinTheta, cosTheta, Uxy, Uxyz
def dayfrac(self, df):
return (df.last_valid_index() - df.first_valid_index()) / pd.to_timedelta(1, unit='D')
#@numba.njit#(forceobj=True)
def tetens(self, t, a=0.611, b=17.502, c=240.97):
"""Tetens formula for computing the
saturation vapor pressure of water from temperature; eq. 3.8
t = temperature (C)
a = constant (kPa)
b = constant (dimensionless)
c = constant (C)
returns saturation vapor pressure ()
"""
return a * np.exp((b * t) / (t + c))
|
import json
import os
import shutil
import tempfile
import logging
from debpackager.utils.pom import Pom
from sh import mkdir
import pytest
from debpackager.main import get_project_type, add_missing_params
logging.basicConfig(level=logging.WARNING)
@pytest.mark.unit
class TestMain(object):
def setup_method(self, method):
self.tmp_dir = tempfile.mkdtemp()
mkdir(self.tmp_dir + '/' + 'folderA').wait()
mkdir(self.tmp_dir + '/' + 'folderB').wait()
mkdir(self.tmp_dir + '/' + 'debian').wait()
with open(self.tmp_dir + '/' + 'project.json', 'w') as proj_file:
project_conf = '''{
"version": "0.1.0",
"type": "python",
"debians": [{"name":"test-proj", "install_path": "/opt/test-proj"}],
"deb_dependencies" : [],
"excludes" : []
}
'''
proj_file.write(project_conf)
def teardown_method(self, method):
shutil.rmtree(self.tmp_dir)
def test_get_project_type_python(self):
with open(self.tmp_dir + '/' + 'requirements.txt', 'w') as req_file:
req_file.write('something')
assert get_project_type(self.tmp_dir) == 'python'
def test_get_project_type_general(self):
assert get_project_type(self.tmp_dir) == 'general'
def test_get_project_type_wrong_path(self):
with pytest.raises(Exception) as excinfo:
get_project_type('/bad/path')
assert str(excinfo.value) == 'Could not find project path: /bad/path'
def test_add_missing_params_with_project_name(self):
with open(self.tmp_dir + '/' + 'project.json', 'r+') as proj_file:
proj = json.loads(proj_file.read())
proj['name'] = 'test_proj_name'
proj_file.seek(0)
proj_file.write(json.dumps(proj))
proj_file.truncate()
pom = Pom(project_path=self.tmp_dir)
os.chdir(self.tmp_dir)
args = DotDict()
args['project_path'] = os.getcwd()
args['project_type'] = None
result = add_missing_params(args, pom)
assert result.project_name == 'test_proj_name'
def test_add_missing_params_no_project_name(self):
pom = Pom(project_path=self.tmp_dir)
os.chdir(self.tmp_dir)
args = DotDict()
args['project_path'] = os.getcwd()
args['project_type'] = None
result = add_missing_params(args, pom)
assert result.project_name == os.path.basename(self.tmp_dir)
def test_add_missing_params_with_project_type(self):
pom = Pom(project_path=self.tmp_dir)
os.chdir(self.tmp_dir)
args = DotDict()
args['project_path'] = os.getcwd()
args['project_type'] = None
result = add_missing_params(args, pom)
assert result.project_type == 'general'
def test_add_missing_params_no_project_type_general(self):
with open(self.tmp_dir + '/' + 'project.json', 'r+') as proj_file:
proj = json.loads(proj_file.read())
del proj['type']
proj_file.seek(0)
proj_file.write(json.dumps(proj))
proj_file.truncate()
pom = Pom(project_path=self.tmp_dir)
os.chdir(self.tmp_dir)
args = DotDict()
args['project_path'] = os.getcwd()
args['project_type'] = None
result = add_missing_params(args, pom)
assert result.project_type == 'general'
def test_add_missing_params_no_project_type_python(self):
with open(self.tmp_dir + '/' + 'project.json', 'r+') as proj_file:
proj = json.loads(proj_file.read())
del proj['type']
proj_file.seek(0)
proj_file.write(json.dumps(proj))
proj_file.truncate()
with open(self.tmp_dir + '/' + 'requirements.txt', 'w') as req_file:
req_file.write('something')
pom = Pom(project_path=self.tmp_dir)
os.chdir(self.tmp_dir)
args = DotDict()
args['project_path'] = os.getcwd()
args['project_type'] = None
result = add_missing_params(args, pom)
assert result.project_type == 'python'
class DotDict(dict):
def __getattr__(self, name):
return self[name]
|
from util import loader
from wrappers.update import Update
from games.base_class import Game
class PathOfExile(Game):
def __init__(self):
self.alert_level = 3
self.ignore_terms = 'sale, showcase, interview'
super().__init__('Path of Exile', homepage='https://pathofexile.com')
def scan(self):
forums = [
'https://www.pathofexile.com/forum/view-forum/366/orderby/create-time',
'https://www.pathofexile.com/forum/view-forum/419/orderby/create-time',
'https://www.pathofexile.com/forum/view-forum/54/orderby/create-time'
] # In order of importance. alert_level is a cutoff here.
i = 0
for forum in forums:
i += 1
if i > self.alert_level:
break # !cover
soup = loader.soup(forum)
table = soup.find(attrs={"class": 'viewForumTable'})
elems = table.find('tbody').find_all('tr')
elem = None
for e in elems:
# Skip to first non-sticky thread.
if not e.find(attrs={'class': 'sticky'}):
elem = e
break
ttl = elem.find(attrs={'class': 'title'})
_title = ttl.text
link = ttl.find('a')
_url = 'https://www.pathofexile.com' + link["href"]
if any(s.lower().strip() in _title.lower().strip() for s in self.ignore_terms.split(',')):
continue # !cover
page = loader.soup(_url)
dsc = page.find(attrs={'class': 'newsPost'})
if not dsc:
dsc = page.find(attrs={"class": 'content-container'}).find(attrs={'class': 'content'})
_desc = dsc.getText('\n')
yield Update(game=self, update_name=_title, post_url=_url, desc=_desc, color="#af6025")
if __name__ == "__main__":
lol = PathOfExile()
for u in lol.scan():
print(u)
|
from Modularity.predict import predict
from seq2seq.helpers import sequence_accuracy
from Modularity.nn import get_exact_match
import torch
import torch.nn as nn
from typing import Iterator
from typing import Dict
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def evaluate(data_iterator: Iterator, model: nn.Module, max_decoding_steps=-1, max_examples_to_evaluate=None) -> Dict[str, float]:
metrics = {target: {"target_correct": 0, "target_total": 0} for target in model.target_keys}
sequence_metrics = {target: {"accuracy": 0, "exact_match": 0, "total": 0} for target in model.target_keys_to_pad}
for predictions_batch in predict(
data_iterator=data_iterator, model=model, max_decoding_steps=max_decoding_steps,
max_examples_to_evaluate=max_examples_to_evaluate):
for target_name in model.target_keys:
equal = torch.eq(predictions_batch["targets"]["%s_targets" % target_name].data,
predictions_batch["predictions"][target_name].data).long().sum().data.item()
metrics[target_name]["target_correct"] += equal
metrics[target_name]["target_total"] += len(predictions_batch["targets"]["%s_targets" % target_name])
for target_name in model.target_keys_to_pad:
sequence_preds = predictions_batch["predictions"][target_name + "_sequences"]
sequence_pred_lengths = predictions_batch["predictions"][target_name + "_sequence_lengths"]
target_lengths = torch.tensor(predictions_batch["targets"]["%s_lengths" % target_name], device=device,
dtype=sequence_pred_lengths.dtype) - 1 # -1 because SOS gets removed
sequence_targets = model.remove_start_of_sequence(predictions_batch["targets"]["%s_targets" % target_name])
accuracy_per_sequence, exact_match_per_sequence = get_exact_match(sequence_preds, sequence_pred_lengths,
sequence_targets, target_lengths)
sequence_metrics[target_name]["accuracy"] += accuracy_per_sequence.mean().item()
sequence_metrics[target_name]["exact_match"] += exact_match_per_sequence.mean().item()
sequence_metrics[target_name]["total"] += 1
final_metrics = {}
for target_name in model.target_keys:
final_metrics[target_name] = (metrics[target_name]["target_correct"]
/ metrics[target_name]["target_total"]) * 100.
for target_name in model.target_keys_to_pad:
final_metrics[target_name + "_accuracy"] = (sequence_metrics[target_name]["accuracy"]
/ sequence_metrics[target_name]["total"]) * 100.
final_metrics[target_name + "_exact_match"] = (sequence_metrics[target_name]["exact_match"]
/ sequence_metrics[target_name]["total"]) * 100.
return final_metrics
|
import glob
import os.path as path
import subprocess
import sys
import tempfile
from shlex import quote
import yaml
def main():
repos = _buildRepoMap()
for arg in sys.argv[1:]:
_validateFile(arg, repos)
def _buildRepoMap():
repos = {}
for file in glob.glob("./**/*.yaml", recursive=True):
with open(file) as f:
try:
for definition in yaml.load_all(f, Loader=yaml.SafeLoader):
if (
not definition
or "kind" not in definition
or definition["kind"] != "HelmRepository"
):
continue
repoName = definition["metadata"]["name"]
repos[repoName] = definition["spec"]["url"]
except Exception:
continue
return repos
def _validateFile(fileToValidate, repos):
with open(fileToValidate) as f:
for definition in yaml.load_all(f, Loader=yaml.SafeLoader):
if (
not definition
or "kind" not in definition
or definition["kind"] != "HelmRelease"
):
continue
chartSpec = definition["spec"]["chart"]["spec"]
if chartSpec["sourceRef"]["kind"] != "HelmRepository":
continue
chartName = chartSpec["chart"]
chartVersion = chartSpec["version"]
chartUrl = repos[chartSpec["sourceRef"]["name"]]
with tempfile.TemporaryDirectory() as tmpDir:
with open(path.join(tmpDir, "values.yaml"), "w") as valuesFile:
if "spec" in definition and "values" in definition["spec"]:
yaml.dump(definition["spec"]["values"], valuesFile)
res = subprocess.run(
f"helm pull --repo {quote(chartUrl)} --version {quote(chartVersion)} {quote(chartName)}",
shell=True,
cwd=tmpDir,
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
if res.returncode != 0:
print(res.stdout)
exit(1)
res = subprocess.run(
"helm lint -f values.yaml *.tgz",
shell=True,
cwd=tmpDir,
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
if res.returncode != 0:
print(res.stdout)
exit(1)
if __name__ == "__main__":
main()
|
import os
import subprocess
import sys
_PYTHON_VERSIONS_ = ["3.6", "3.7", "3.8"]
_TAGS = [
("Dockerfile", "basecuda"),
("Dockerfile.cuda", "cuda"),
("Dockerfile.nightly", "rustnightly"),
("Dockerfile.stable", "rust"),
("Dockerfile.cuda.stable", "cudarust")
]
cwd = os.path.abspath(os.path.dirname(__file__))
for py in _PYTHON_VERSIONS_:
dirname = os.path.join(cwd, f"Python{py}")
for dockerimage, tag in _TAGS:
tag = "npapapietro/pythonbundles:py" + py.replace('.','') + tag
dockerfile = os.path.join(dirname, dockerimage)
if not os.path.isfile(dockerfile):
continue
cmd = [
"docker", "build",
"-t", tag,
"-f", dockerfile,
"."
]
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
sys.stdout.write(line.decode('utf-8'))
p.wait()
cmd = [
"docker", "push", tag
]
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
sys.stdout.write(line.decode('utf-8'))
p.wait()
print("\n")
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Finished with", tag)
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("\n")
|
"""Tests for STRIPS operator learning."""
from gym.spaces import Box
import numpy as np
from predicators.src.nsrt_learning.strips_learning import segment_trajectory, \
learn_strips_operators
from predicators.src.structs import Type, Predicate, State, Action, \
ParameterizedOption, LowLevelTrajectory
from predicators.src import utils
def test_segment_trajectory():
"""Tests for segment_trajectory()."""
cup_type = Type("cup_type", ["feat1"])
cup0 = cup_type("cup0")
cup1 = cup_type("cup1")
cup2 = cup_type("cup2")
pred0 = Predicate("Pred0", [cup_type], lambda s, o: s[o[0]][0] > 0.5)
pred1 = Predicate("Pred1", [cup_type, cup_type],
lambda s, o: s[o[0]][0] > 0.5)
pred2 = Predicate("Pred2", [cup_type], lambda s, o: s[o[0]][0] > 0.5)
preds = {pred0, pred1, pred2}
state0 = State({cup0: [0.4], cup1: [0.7], cup2: [0.1]})
atoms0 = utils.abstract(state0, preds)
state1 = State({cup0: [0.8], cup1: [0.3], cup2: [1.0]})
atoms1 = utils.abstract(state1, preds)
# Tests with known options.
param_option = ParameterizedOption("dummy", [cup_type], Box(0.1, 1, (1, )),
lambda s, m, o, p: Action(p),
utils.always_initiable,
utils.onestep_terminal)
option0 = param_option.ground([cup0], np.array([0.2]))
assert option0.initiable(state0)
action0 = option0.policy(state0)
# Even though the option changes, the option spec stays the same, so we do
# not want to segment. This is because we are segmenting based on symbolic
# aspects only, because the strips operators can only depend on symbols.
option1 = param_option.ground([cup0], np.array([0.1]))
assert option1.initiable(state0)
action1 = option1.policy(state0)
option2 = param_option.ground([cup1], np.array([0.1]))
assert option2.initiable(state0)
action2 = option2.policy(state0)
trajectory = (LowLevelTrajectory([state0.copy() for _ in range(5)],
[action0, action1, action2, action0]),
[atoms0, atoms0, atoms0, atoms0, atoms0])
known_option_segments = segment_trajectory(trajectory)
assert len(known_option_segments) == 3
assert len(known_option_segments[0].actions) == 2
assert len(known_option_segments[1].actions) == 1
assert len(known_option_segments[2].actions) == 1
# Tests without known options.
action0 = option0.policy(state0)
action0.unset_option()
action1 = option0.policy(state0)
action1.unset_option()
action2 = option1.policy(state0)
action2.unset_option()
trajectory = (LowLevelTrajectory([state0.copy() for _ in range(5)],
[action0, action1, action2, action0]),
[atoms0, atoms0, atoms0, atoms0, atoms0])
assert len(segment_trajectory(trajectory)) == 0
trajectory = (LowLevelTrajectory(
[state0.copy() for _ in range(5)] + [state1],
[action0, action1, action2, action0, action1]),
[atoms0, atoms0, atoms0, atoms0, atoms0, atoms1])
unknown_option_segments = segment_trajectory(trajectory)
assert len(unknown_option_segments) == 1
assert len(unknown_option_segments[0].actions) == 5
return known_option_segments, unknown_option_segments
def test_learn_strips_operators():
"""Tests for learn_strips_operators()."""
utils.reset_config({"min_data_for_nsrt": 0})
known_option_segments, unknown_option_segments = test_segment_trajectory()
known_option_pnads = learn_strips_operators(known_option_segments)
known_option_ops = [pnad.op for pnad in known_option_pnads]
assert len(known_option_ops) == 1
assert str((known_option_ops[0])) == """STRIPS-Op0:
Parameters: [?x0:cup_type]
Preconditions: []
Add Effects: []
Delete Effects: []
Side Predicates: []"""
unknown_option_pnads = learn_strips_operators(unknown_option_segments)
unknown_option_ops = [pnad.op for pnad in unknown_option_pnads]
assert len(unknown_option_ops) == 1
assert str(unknown_option_ops[0]) == """STRIPS-Op0:
Parameters: [?x0:cup_type, ?x1:cup_type, ?x2:cup_type]
Preconditions: [Pred0(?x1:cup_type), Pred1(?x1:cup_type, ?x0:cup_type), Pred1(?x1:cup_type, ?x1:cup_type), Pred1(?x1:cup_type, ?x2:cup_type), Pred2(?x1:cup_type)]
Add Effects: [Pred0(?x0:cup_type), Pred0(?x2:cup_type), Pred1(?x0:cup_type, ?x0:cup_type), Pred1(?x0:cup_type, ?x1:cup_type), Pred1(?x0:cup_type, ?x2:cup_type), Pred1(?x2:cup_type, ?x0:cup_type), Pred1(?x2:cup_type, ?x1:cup_type), Pred1(?x2:cup_type, ?x2:cup_type), Pred2(?x0:cup_type), Pred2(?x2:cup_type)]
Delete Effects: [Pred0(?x1:cup_type), Pred1(?x1:cup_type, ?x0:cup_type), Pred1(?x1:cup_type, ?x1:cup_type), Pred1(?x1:cup_type, ?x2:cup_type), Pred2(?x1:cup_type)]
Side Predicates: []""" # pylint: disable=line-too-long
|
from axiom.test.historic import stubloader
from xmantissa.people import EmailAddress, Person
class EmailAddressTestCase(stubloader.StubbedTest):
def testUpgrade(self):
ea = self.store.findUnique(EmailAddress)
person = self.store.findUnique(Person)
self.assertIdentical(ea.person, person)
self.assertEquals(ea.address, 'bob@divmod.com')
|
from google.cloud.datastore_v1 import types
from typing import NamedTuple
class _StoredObject(NamedTuple):
version: int
entity: types.Entity
|
#!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by Olivier Huin on 2010-03-22.
Copyright (c) 2010 Flarebyte.com Limited. All rights reserved.
"""
import sys
import os
import S3
import time
import csv, uuid
import simplejson as json
import mondriancss
import datautils, devutils, datasource
from collections import defaultdict
from export import MediaExporter
NAME_ID=">>>[selector-name]"
class Stylesheet(MediaExporter):
def __init__(self):
self.initialize()
def upload(self):
if (self.conn==None):
self.connect()
if (self.conn==None):
return False
stylesheet=mondriancss.get_stylesheet()
css=self.create_css(stylesheet)
self.upload_css("base",css)
def create_css(self,stylesheet):
r = ""
for selector in stylesheet:
name = selector.pop(NAME_ID)
r+="%s{" % name
for k in selector:
r+="%s:%s;" % (k,selector[k])
r+="}\n"
return r
stylesheet=Stylesheet()
stylesheet.upload()
|
#!/usr/bin/env python3
# coding: utf-8
"""Plotting and analysis tools for the ARTIS 3D supernova radiative transfer code."""
import datetime
import sys
from pathlib import Path
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
sys.path.append('artistools/')
from commands import console_scripts
class PyTest(TestCommand):
"""Setup the py.test test runner."""
def finalize_options(self):
"""Set options for the command line."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Execute the test runner command."""
# Import here, because outside the required eggs aren't loaded yet
import pytest
sys.exit(pytest.main(self.test_args))
print(datetime.datetime.now().isoformat())
setup(
name="artistools",
version="2021.04.24dev",
# version=datetime.datetime.now().isoformat(),
author="ARTIS Collaboration",
author_email="luke.shingles@gmail.com",
packages=find_packages(),
url="https://www.github.com/artis-mcrt/artistools/",
license="MIT",
description="Plotting and analysis tools for the ARTIS 3D supernova radiative transfer code.",
long_description=(Path(__file__).absolute().parent / "README.md").open('rt').read(),
long_description_content_type='text/markdown',
install_requires=(Path(__file__).absolute().parent / "requirements.txt").open('rt').read().splitlines(),
entry_points={
'console_scripts': console_scripts
},
python_requires='>==3.6',
# test_suite='tests',
setup_requires=['pytest', 'pytest-runner', 'pytest-cov'],
tests_require=['pytest', 'pytest-runner', 'pytest-cov'],
include_package_data=True)
|
'''
This Module contain a collection of decorator to manage maya selection.
The goal is to externalize all selection checks during procedure like:
def generic_method_creating_a_deformer_from_selection():
selection = cmds.ls(selection=True)
if len(selection) == 0:
return cmds.warning('Please select at least one node')
if cmds.nodeType(selection[0]) != 'mesh':
return cmds.warning('Please select a mesh first')
# all your process ...
# and finally reselect to get back your selection
cmds.select(selection)
return
These kind of lines pollute all procedures changing, checking selection
and it can be cleaner to externalize them like this:
@preserve_selection
@filter_node_type_in_selection(node_type=('transform, mesh'))
@selection_contains_at_least(2, 'transform')
@selection_required
def generic_method_creating_a_deformer_from_selection():
# all your process ...
return
careful, the used order is really important. A bad wrapper managment can
create issue.
'''
__author__ = 'Lionel Brouyere'
__copyright__ = not 'DreamWall'
__license__ = 'MIT'
from functools import wraps
from contextlib import contextmanager
import maya.cmds as mc
def preserve_selection(func):
'''
this decorator save your maya selection before execute the
decorated function. And reselect it when it's executed.
'''
@wraps(func)
def wrapper(*args, **kwargs):
with preserve_selection_ctx():
return func(*args, **kwargs)
return wrapper
@contextmanager
def preserve_selection_ctx():
"""Context manager to preserve selection"""
selection = mc.ls(selection=True)
try:
yield
finally:
if selection:
mc.select(selection, noExpand=True)
else:
mc.select(clear=True)
def selection_required(func):
'''
this decorator check check if node is selected and return a mc.error
if nothing is selected
'''
@wraps(func)
def wrapper(*args, **kwargs):
if not mc.ls(selection=True):
return mc.warning('Select at least one node')
else:
return func(*args, **kwargs)
return wrapper
def filter_selection(**ls_kwargs):
'''
this decorator filter the current selection and keep only the node_types
in the node_type list
@node_type string or tuple of string
'''
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
mc.select(mc.ls(selection=True, **ls_kwargs))
result = func(*args, **kwargs)
return result
return wrapper
return decorator
def filter_transforms_by_children_types(*nodetypes):
'''
this decorators remove from the current selection the transforms
who not contains at least a specified nodetype shape.
The shapes in selection are kept.
'''
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
not_transforms_selected = [
n for n in mc.ls(selection=True)
if mc.nodeType(n) != 'transform']
filtered_transforms = []
for transform in mc.ls(selection=True, type='transform'):
for node in mc.listRelatives(transform):
if not mc.getAttr(node + '.intermediateObject'):
if mc.nodeType(node) in nodetypes:
filtered_transforms.append(transform)
continue
mc.select(not_transforms_selected + filtered_transforms)
return func(*args, **kwargs)
return wrapper
return decorator
def select_shape_transforms(func):
'''
this decorator select all transforms instead of shapes
'''
@wraps(func)
def wrapper(*args, **kwargs):
nodes = [
n if mc.nodeType(n) == 'transform'
else mc.listRelatives(n, parent=True)[0]
for n in mc.ls(sl=True)]
mc.select(nodes)
result = func(*args, **kwargs)
return result
return wrapper
def selection_contains_at_least(number, node_type):
'''
this decorqtor check if a maya selection contain at least the number of
nodes with nodetype specified.
:number int
:node_type string
'''
assert isinstance(node_type, str) # node_type argument must be a string
assert isinstance(number, int) # number argument must be an int
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
typed_node_in_selection = mc.ls(selection=True, type=node_type)
if len(typed_node_in_selection) < number:
return mc.warning(
'The selection must contains at least {} nodes {} '
'and it contains {}'.format(
number, node_type, len(typed_node_in_selection)))
return func(*args, **kwargs)
return wrapper
return decorator
def selection_contains_exactly(number, node_type):
'''
this decorator check if a maya selection contains exactly the number of
nodes with nodetype specified.
:number int
:node_type string
'''
assert isinstance(node_type, str) # node_type argument must be a string
assert isinstance(number, int) # number argument must be an int
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
typed_node_in_selection = mc.ls(selection=True, type=node_type)
if len(typed_node_in_selection) != number:
return mc.warning(
'The selection must contains exactly {} node(s) {} '
'and it contains {}'.format(
number, node_type, len(typed_node_in_selection)))
return func(*args, **kwargs)
return wrapper
return decorator
|
'''
模拟处理机调度算法
create by Ian in 2017-11-9 19:28:46
'''
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import sys
import time
class MultiInPutDialog(QDialog):
'''新建作业弹窗,自定义对话框'''
def __init__(self):
super(MultiInPutDialog, self).__init__()
self.initUI()
def initUI(self):
'''初始化UI'''
self.setWindowTitle(u"新建作业")
self.resize(100, 80)
grid = QGridLayout()
grid.addWidget(QLabel(u"作业名:", parent=self), 0, 0, 1, 1)
self.name = QLineEdit()
grid.addWidget(self.name, 0, 1, 1, 1)
grid.addWidget(QLabel(u"作业长度:", parent=self), 1, 0, 1, 1)
self.lenth = QLineEdit()
grid.addWidget(self.lenth, 1, 1, 1, 1)
buttonBox = QDialogButtonBox()
buttonBox.setOrientation(Qt.Horizontal)
buttonBox.setStandardButtons(
QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.accept) # 确定
buttonBox.rejected.connect(self.reject) # 取消
layout = QVBoxLayout()
layout.addLayout(grid)
layout.addWidget(buttonBox)
self.setLayout(layout)
def accept(self):
self.reject() # 点击ok也退出窗口
return [self.name.text(), self.lenth.text()]
class TaskContant(list):
'''作业的具体内容,时间片算法'''
def __init__(self):
super(TaskContant, self).__init__()
# 初始化作业内容
self.taskName = [] # 作业名
self.taskStatus = [] # 作业状态
self.lenth = [] # 长度
self.schedule = [] # 进度
self.completion = [] # 完成率
self.priority = [] # 优先级
self.response = [] # 响应比
self.task = [] # 作业列表
self.counter = 0
def addTask(self, taskName, lenth):
'''增加作业'''
self.counter += 1
self.taskName.append(taskName)
self.lenth.append(int(lenth))
if (len(self.taskStatus) == 0):
self.taskStatus.append(1) # 就绪状态为0,运行态为1,阻塞态为-1
else:
self.taskStatus.append(0)
self.schedule.append(0) # 最开始进度为0
self.completion.append(0)
def showAll(self):
'''单个作业的所有内容'''
self.task = zip(self.taskName, self.taskStatus,
self.lenth, self.schedule, self.completion)
return self.task
def len(self):
'''作业的总数'''
return self.counter
def show(self):
'''展现当前新建的作业内容(也是最后一个作业)'''
i = self.counter - 1
self.task = list(self.showAll())
return self.task[i]
def run(self):
'''执行算法'''
for i in range(self.counter):
print(self.taskName[i], self.taskStatus[i],self.lenth[i], self.schedule[i], self.completion[i])
if self.taskStatus[i] == 1:
self.schedule[i] += 1 # 进度值+1
self.completion[i] = "%d/%d" %(self.schedule[i],self.lenth[i]) # 完成率
self.taskStatus[i] = 0 # 状态值设为0
if i+1 < self.counter:
# 设置下一个任务的状态值,这里有个不好的设计,状态设置在了独立的list里
self.taskStatus[i+1] = 1
else:
self.taskStatus[0] = 1
break
for i in range(self.counter):
if i >= self.counter: # 由于counter会修改,所以会出现i大于counter的情况
break
if self.schedule[i] == self.lenth[i]: # 进度值达到最大
print("删除%d" %i)
del self.taskName[i]
del self.taskStatus[i]
del self.lenth[i]
del self.schedule[i]
del self.completion[i]
self.counter -= 1
class TaskPS(TaskContant):
'''作业的内容,动态优先级调度'''
def __init__(self):
super().__init__()
def addTask(self, taskName, lenth=100, priority=0):
'''增加作业'''
super().addTask()
self.priority.append(priority)
class Window(QTabWidget):
'''主窗口'''
def __init__(self):
super(Window, self).__init__()
self.initUI()
def initUI(self):
'''初始化UI'''
self.tab_RR = QWidget() # 时间片轮转
self.tab_PS = QWidget() # 动态优先调度
self.tab_HRRF = QWidget() # 高响应比优先调度
self.addTab(self.tab_RR, u"时间片轮转")
self.addTab(self.tab_PS, u"动态优先调度")
self.addTab(self.tab_HRRF, u"高响应比优先调度")
self.tab_RR.tabw = QTableWidget()
self.resize(1200, 600)
self.task = TaskContant()
self.tab_RRUI()
self.i = 0
task = [] # 临时容器i,测试用
def tab_RRUI(self):
'''时间片轮转容器内部代码'''
# 初始化各种组件
hbox = QHBoxLayout()
grid = QGridLayout()
split = QSplitter(Qt.Horizontal)
labl = QLabel()
labl.setText("每个作业的长度都为50~100间的随机值")
newTask = QPushButton("新建作业")
hbox.addWidget(newTask)
hbox.addWidget(labl)
hbox.addStretch() # 增加弹性布局,把空间都压缩在右边
self.btn = QPushButton("开始作业", self)
self.btn.clicked.connect(self.doAction)
self.timer = QBasicTimer()
hbox.addWidget(self.btn)
self.tab_RR.tabw.setColumnCount(5)
horizontalHeader = ["作业名", "作业状态", "作业长度", "进度", "完成率"]
self.tab_RR.tabw.setHorizontalHeaderLabels(horizontalHeader)
# self.tab_RR.tabw.resizeRowsToContents() # 自动调整单元格的大小
split.addWidget(self.tab_RR.tabw)
grid.addLayout(hbox, 1, 1)
grid.addWidget(split, 2, 1)
grid.setRowStretch(1, 10)
grid.setRowStretch(2, 90)
self.tab_RR.setLayout(grid)
newTask.clicked.connect(self.addTask)
def addTask(self):
'''新增作业'''
dialog = MultiInPutDialog()
dialog.show()
dialog.exec_() # 程序进入消息循环,等待可能输入进行响应
self.taskname, self.lenth = dialog.accept()
if self.lenth == '': # 输入错误
return
self.task.addTask(self.taskname,self.lenth) # 新增任务
task = self.task.show()
j = 0
self.tab_RR.tabw.insertRow(self.i) # 槽函数,从i开始增加行
for item in task:
if j == 3:
self.qpr = QProgressBar()
self.qpr.setValue(int(item))
self.tab_RR.tabw.setCellWidget(self.i,j,self.qpr)
else:
newItem = QTableWidgetItem(str(item)) # 将其他对象转换为QTableWidgetItem对象
self.tab_RR.tabw.setItem(self.i, j, newItem)
j += 1
self.i += 1
def updateUI(self):
'''更新界面内容'''
self.tab_RR.tabw.clearContents() # 清除表头以外的所有信息
task = self.task.showAll()
self.i = 0
for one in task:
one = list(one)
j = 0
for item in one:
if j == 3:
self.qpr = QProgressBar()
self.qpr.setValue(int(item/one[2]*100))
self.tab_RR.tabw.setCellWidget(self.i,j,self.qpr)
else:
newItem = QTableWidgetItem(str(item)) # 将其他对象转换为QTableWidgetItem对象
self.tab_RR.tabw.setItem(self.i, j, newItem)
j += 1
self.i += 1
def doAction(self):
'''开始作业事件'''
if self.timer.isActive():
self.timer.stop()
self.btn.setText("开始作业")
else:
self.timer.start(100, self) # 每100ms启动一次计时器
self.btn.setText("停止作业")
def timerEvent(self,e):
'''时间触发事件,还有进程为空没有写'''
if self.task.counter == 0:
self.btn.setText('作业已完成')
return
else:
self.task.run()
self.updateUI()
if __name__ == '__main__':
app = QApplication(sys.argv) # 每个PyQt5应用都必须创建一个应用对象
MainWindow = Window()
MainWindow.show()
sys.exit(app.exec_())
|
#!/usr/bin/python3 -OO
# Copyright 2007-2021 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Testing SABnzbd deobfuscate module
"""
import random
import shutil
import zipfile
from sabnzbd.deobfuscate_filenames import *
from tests.testhelper import *
def create_big_file(filename):
with open(filename, "wb") as myfile:
# must be above MIN_SIZE, so ... 15MB
myfile.truncate(15 * 1024 * 1024)
def create_small_file(filename):
with open(filename, "wb") as myfile:
myfile.truncate(1024)
class TestDeobfuscateFinalResult:
def test_is_probably_obfuscated(self):
# Test the base function test_is_probably_obfuscated(), which gives a boolean as RC
# obfuscated names
assert is_probably_obfuscated("599c1c9e2bdfb5114044bf25152b7eaa.mkv")
assert is_probably_obfuscated("/my/blabla/directory/stuff/599c1c9e2bdfb5114044bf25152b7eaa.mkv")
assert is_probably_obfuscated(
"/my/blabla/directory/A Directory Should Not Count 2020/599c1c9e2bdfb5114044bf25152b7eaa.mkv"
)
assert is_probably_obfuscated("/my/blabla/directory/stuff/afgm.avi")
assert is_probably_obfuscated("/my/blabla/directory/stuff/afgm2020.avi")
assert is_probably_obfuscated("MUGNjK3zi65TtN.mkv")
assert is_probably_obfuscated("T306077.avi")
assert is_probably_obfuscated("bar10nmbkkjjdfr.mkv")
assert is_probably_obfuscated("4rFF-fdtd480p.bin")
assert is_probably_obfuscated("e0nFmxBNTprpbQiVQ44WeEwSrBkLlJ7IgaSj3uzFu455FVYG3q.bin")
assert is_probably_obfuscated("e0nFmxBNTprpbQiVQ44WeEwSrBkLlJ7IgaSj3uzFu455FVYG3q") # no ext
assert is_probably_obfuscated("greatdistro.iso")
assert is_probably_obfuscated("my.download.2020")
assert is_probably_obfuscated("abc.xyz.a4c567edbcbf27.BLA") # by definition
assert is_probably_obfuscated("abc.xyz.iso") # lazy brother
assert is_probably_obfuscated("0675e29e9abfd2.f7d069dab0b853283cc1b069a25f82.6547")
# non-obfuscated names:
assert not is_probably_obfuscated("/my/blabla/directory/stuff/My Favorite Distro S03E04.iso")
assert not is_probably_obfuscated("/my/blabla/directory/stuff/Great Distro (2020).iso")
assert not is_probably_obfuscated("ubuntu.2004.iso")
assert not is_probably_obfuscated("/my/blabla/directory/stuff/GreatDistro2020.iso")
assert not is_probably_obfuscated("Catullus.avi")
assert not is_probably_obfuscated("Der.Mechaniker.HDRip.XviD-SG.avi")
assert not is_probably_obfuscated("Bonjour.1969.FRENCH.BRRiP.XviD.AC3-HuSh.avi")
assert not is_probably_obfuscated("Bonjour.1969.avi")
assert not is_probably_obfuscated("This That S01E11")
assert not is_probably_obfuscated("This_That_S01E11")
assert not is_probably_obfuscated("this_that_S01E11")
assert not is_probably_obfuscated("My.Download.2020")
assert not is_probably_obfuscated("this_that_there_here.avi")
assert not is_probably_obfuscated("Lorem Ipsum.avi")
assert not is_probably_obfuscated("Lorem Ipsum") # no ext
def test_deobfuscate_filelist_lite(self):
# ligthweight test of deobfuscating: with just one file
# Create directory (with a random directory name)
dirname = os.path.join(SAB_DATA_DIR, "testdir" + str(random.randint(10000, 99999)))
os.mkdir(dirname)
# Create a big enough file with a non-useful, obfuscated filename
output_file1 = os.path.join(dirname, "111c1c9e2bdfb5114044bf25152b7eab.bin")
create_big_file(output_file1)
assert os.path.isfile(output_file1)
# create the filelist, with just the above file
myfilelist = [output_file1]
# and now unleash the magic on that filelist, with a more useful jobname:
jobname = "My Important Download 2020"
deobfuscate_list(myfilelist, jobname)
# Check original files:
assert not os.path.isfile(output_file1) # original filename should not be there anymore
# Check the renaming
assert os.path.isfile(os.path.join(dirname, jobname + ".bin")) # ... it should be renamed to the jobname
# Done. Remove (non-empty) directory
shutil.rmtree(dirname)
def test_deobfuscate_filelist_full(self):
# Full test, with a combinantion of files: Test that deobfuscate() works and renames correctly
# ... but only the files that are in the filelist
# Create directory (with a random directory name)
dirname = os.path.join(SAB_DATA_DIR, "testdir" + str(random.randint(10000, 99999)))
os.mkdir(dirname)
# Create a big enough file with a non-useful filename
output_file1 = os.path.join(dirname, "111c1c9e2bdfb5114044bf25152b7eaa.bin")
create_big_file(output_file1)
assert os.path.isfile(output_file1)
# and another one
output_file2 = os.path.join(dirname, "222c1c9e2bdfb5114044bf25152b7eaa.bin")
create_big_file(output_file2)
assert os.path.isfile(output_file2)
# create the filelist, with just the above files
myfilelist = [output_file1, output_file2]
# Create some extra files ... that will not be in the list
output_file3 = os.path.join(dirname, "333c1c9e2bdfb5114044bf25152b7eaa.bin")
create_big_file(output_file3)
assert os.path.isfile(output_file3)
output_file4 = os.path.join(dirname, "This Great Download 2020.bin")
create_big_file(output_file4)
assert os.path.isfile(output_file4)
# and now unleash the magic on that filelist, with a more useful jobname:
jobname = "My Important Download 2020"
deobfuscate_list(myfilelist, jobname)
# Check original files:
assert not os.path.isfile(output_file1) # original filename should not be there anymore
assert not os.path.isfile(output_file2) # original filename should not be there anymore
assert os.path.isfile(output_file3) # but this one should still be there
assert os.path.isfile(output_file4) # and this one too
# Check the renaming
assert os.path.isfile(os.path.join(dirname, jobname + ".bin")) # ... it should be renamed to the jobname
assert os.path.isfile(os.path.join(dirname, jobname + ".1.bin")) # should be there (2nd file renamed)
# Done. Remove (non-empty) directory
shutil.rmtree(dirname)
def test_deobfuscate_filelist_subdir(self):
# test of deobfuscating with sub directories
# Create directory with subdirs
dirname = os.path.join(SAB_DATA_DIR, "testdir" + str(random.randint(10000, 99999)))
os.mkdir(dirname)
subdirname = os.path.join(dirname, "testdir" + str(random.randint(10000, 99999)))
os.mkdir(subdirname)
subsubdirname = os.path.join(subdirname, "testdir" + str(random.randint(10000, 99999)))
os.mkdir(subsubdirname)
# Create a big enough file with a non-useful, obfuscated filename
output_file1 = os.path.join(subsubdirname, "111c1c9e2bdfb5114044bf25152b7eab.bin")
create_big_file(output_file1)
assert os.path.isfile(output_file1)
# create the filelist, with just the above file
myfilelist = [output_file1]
# and now unleash the magic on that filelist, with a more useful jobname:
jobname = "My Important Download 2020"
deobfuscate_list(myfilelist, jobname)
# Check original files:
assert not os.path.isfile(output_file1) # original filename should not be there anymore
# Check the renaming
assert os.path.isfile(os.path.join(subsubdirname, jobname + ".bin")) # ... it should be renamed to the jobname
# Done. Remove (non-empty) directory
shutil.rmtree(dirname)
def test_deobfuscate_big_file_small_accompanying_files(self):
# input: myiso.iso, with accompanying files (.srt files in this case)
# test that the small accompanying files (with same basename) are renamed accordingly to the big ISO
# Create directory (with a random directory name)
dirname = os.path.join(SAB_DATA_DIR, "testdir" + str(random.randint(10000, 99999)))
os.mkdir(dirname)
# Create a big enough file with a non-useful filename
isofile = os.path.join(dirname, "myiso.iso")
create_big_file(isofile)
assert os.path.isfile(isofile)
# and a srt file
srtfile = os.path.join(dirname, "myiso.srt")
create_small_file(srtfile)
assert os.path.isfile(srtfile)
# and a dut.srt file
dutsrtfile = os.path.join(dirname, "myiso.dut.srt")
create_small_file(dutsrtfile)
assert os.path.isfile(dutsrtfile)
# and a non-related file
txtfile = os.path.join(dirname, "something.txt")
create_small_file(txtfile)
assert os.path.isfile(txtfile)
# create the filelist, with just the above files
myfilelist = [isofile, srtfile, dutsrtfile, txtfile]
# and now unleash the magic on that filelist, with a more useful jobname:
jobname = "My Important Download 2020"
deobfuscate_list(myfilelist, jobname)
# Check original files:
assert not os.path.isfile(isofile) # original iso not be there anymore
assert not os.path.isfile(srtfile) # ... and accompanying file neither
assert not os.path.isfile(dutsrtfile) # ... and this one neither
assert os.path.isfile(txtfile) # should still be there: not accompanying, and too small to rename
# Check the renaming
assert os.path.isfile(os.path.join(dirname, jobname + ".iso")) # ... should be renamed to the jobname
assert os.path.isfile(os.path.join(dirname, jobname + ".srt")) # ... should be renamed to the jobname
assert os.path.isfile(os.path.join(dirname, jobname + ".dut.srt")) # ... should be renamed to the jobname
# Done. Remove (non-empty) directory
shutil.rmtree(dirname)
def test_deobfuscate_collection_with_same_extension(self):
# input: a collection of 3+ bigger files with the same extension
# test that there is no renaming on the collection ... as that's useless on a collection
# Create directory (with a random directory name)
dirname = os.path.join(SAB_DATA_DIR, "testdir" + str(random.randint(10000, 99999)))
os.mkdir(dirname)
# Create big enough files with a non-useful filenames, all with same extension
file1 = os.path.join(dirname, "file1.bin")
create_big_file(file1)
assert os.path.isfile(file1)
file2 = os.path.join(dirname, "file2.bin")
create_big_file(file2)
assert os.path.isfile(file2)
file3 = os.path.join(dirname, "file3.bin")
create_big_file(file3)
assert os.path.isfile(file3)
file4 = os.path.join(dirname, "file4.bin")
create_big_file(file4)
assert os.path.isfile(file4)
# other extension ... so this one should get renamed
otherfile = os.path.join(dirname, "other.iso")
create_big_file(otherfile)
assert os.path.isfile(otherfile)
# create the filelist, with the above files
myfilelist = [file1, file2, file3, file4, otherfile]
# and now unleash the magic on that filelist, with a more useful jobname:
jobname = "My Important Download 2020"
deobfuscate_list(myfilelist, jobname)
# Check original files:
# the collection with same extension should still be there:
assert os.path.isfile(file1) # still there
assert os.path.isfile(file2) # still there
assert os.path.isfile(file3) # still there
assert os.path.isfile(file4) # still there
# but the one separate file with obfuscated name should be renamed:
assert not os.path.isfile(otherfile) # should be renamed
# Check the renaming
assert os.path.isfile(os.path.join(dirname, jobname + ".iso")) # ... should be renamed to the jobname
# Done. Remove (non-empty) directory
shutil.rmtree(dirname)
def test_deobfuscate_filelist_nasty_tests(self):
# check no problems occur with nasty use cases
# non existing file
myfilelist = ["/bla/bla/notthere.bin"]
jobname = "My Important Download 2020"
deobfuscate_list(myfilelist, jobname)
# Create directory with a directory name that could be renamed, but should not
dirname = os.path.join(SAB_DATA_DIR, "333c1c9e2bdfb5114044bf25152b7eaa.bin")
os.mkdir(dirname)
myfilelist = [dirname]
jobname = "My Important Download 2020"
deobfuscate_list(myfilelist, jobname)
assert os.path.exists(dirname)
shutil.rmtree(dirname)
def test_deobfuscate_par2(self):
# Simple test to see if the par2 file is picked up
test_dir = os.path.join(SAB_DATA_DIR, "deobfuscate_filenames")
test_input = os.path.join(test_dir, "E0CcYdGDFbeCAsT3LoID")
test_output = os.path.join(test_dir, "random.bin")
# Check if it is there
assert os.path.exists(test_input)
list_of_files = []
for (dirpath, dirnames, filenames) in os.walk(test_dir):
list_of_files += [os.path.join(dirpath, file) for file in filenames]
# Run deobfuscate
recover_par2_names(list_of_files)
# Should now be renamed to the filename in the par2 file
assert not os.path.exists(test_input)
assert os.path.exists(test_output)
# Rename back
os.rename(test_output, test_input)
assert os.path.exists(test_input)
def test_deobfuscate_par2_plus_deobfuscate(self):
# test for first par2 based renaming, then deobfuscate obfuscated names
work_dir = os.path.join(SAB_DATA_DIR, "testdir" + str(random.randint(10000, 99999)))
os.mkdir(work_dir)
source_zip_file = os.path.join(SAB_DATA_DIR, "deobfuscate_par2_based", "20mb_with_par2_package.zip")
with zipfile.ZipFile(source_zip_file, "r") as zip_ref:
zip_ref.extractall(work_dir)
assert os.path.isfile(os.path.join(work_dir, "rename.par2")) # the par2 that will do renaming
assert os.path.isfile(os.path.join(work_dir, "aaaaaaaaaaa")) # a 20MB no-name file ...
list_of_files = []
for (dirpath, dirnames, filenames) in os.walk(work_dir):
list_of_files += [os.path.join(dirpath, file) for file in filenames]
# deobfuscate will do:
# first par2 based renaming aaaaaaaaaaa to twentymb.bin,
# then deobfuscate twentymb.bin to the job name (with same extension)
list_of_files = recover_par2_names(list_of_files)
assert os.path.isfile(os.path.join(work_dir, "twentymb.bin")) # should exist
deobfuscate_list(list_of_files, "My Great Download")
assert os.path.isfile(os.path.join(work_dir, "My Great Download.bin")) # the twentymb.bin should be renamed
assert not os.path.isfile(os.path.join(work_dir, "twentymb.bin")) # should now be gone
shutil.rmtree(work_dir)
|
from mara.storage.dict import DictStore
async def test_flat_store():
store = DictStore(a=1, foo="bar")
data = await store.store()
assert data == '{"a": 1, "foo": "bar"}'
async def test_flat_restore():
store = await DictStore.restore('{"a": 1, "foo": "bar"}')
assert store.a == 1
assert store.foo == "bar"
async def test_nested_store():
store = DictStore(a=1, child=DictStore(b=2))
data = await store.store()
assert (
data
== '{"a": 1, "child": {"_store_class": "DictStore", "data": "{\\"b\\": 2}"}}'
)
async def test_nested_restore():
store = await DictStore.restore(
'{"a": 1, "child": {"_store_class": "DictStore", "data": "{\\"b\\": 2}"}}'
)
assert store.a == 1
assert isinstance(store.child, DictStore)
assert store.child.b == 2
|
class medidas:
π = 3.14159265359
def coordenadas_hipotenusa(self, x1: float, y1: float, x2: float, y2: float)->float:
'''Função que calcula os catetos e retorna a hipotenusa
Parameters:
x1 (float): Primeira posição X
y1 (float): Primeira posição Y
x2 (float): Segunda posição X
y2 (float): Segunda posição Y
Returns:
hipotenusa (float): Distância da Hipotenuza calculada
'''
try:
cateto_adjacente = x2 - x1
cateto_oposto = y2 - y1
d = ( cateto_adjacente ** 2 ) + ( cateto_oposto ** 2 )
d = d ** .5
return d
except Exception as er:
print('medidas.coordenadas_hipotenusa:')
print(er)
return 0
def catetos_hipotenusa(self, cateto_adjacente: float, cateto_oposto: float)->float:
'''Função que calcula a hipotenusa
Parameters:
cateto_adjacente (float): Cateto para cálculo
cateto_oposto (float): Cateto para cálculo
Returns:
hipotenusa (float): Distância da Hipotenuza calculada
'''
try:
d = ( cateto_adjacente ** 2 ) + ( cateto_oposto ** 2 )
d = d ** .5
return d
except Exception as er:
print('medidas.catetos_hipotenusa:')
print(er)
return 0
def coordenadas_area(self, x1: float, y1: float, x2: float, y2: float)->float:
'''Função que calcula os catetos e retorna a área
Parameters:
x1 (float): Primeira posição X
y1 (float): Primeira posição Y
x2 (float): Segunda posição X
y2 (float): Segunda posição Y
Returns:
área (float): Área calculada
'''
try:
cateto_adjacente = x2 - x1
cateto_oposto = y2 - y1
a = ( cateto_adjacente * cateto_oposto ) / 2
return a
except Exception as er:
print('medidas.coordenadas_area:')
print(er)
return 0
def catetos_area(self, cateto_adjacente: float, cateto_oposto: float)->float:
'''Função que calcula a área
Parameters:
cateto_adjacente (float): Cateto para cálculo
cateto_oposto (float): Cateto para cálculo
Returns:
área (float): Área calculada
'''
try:
a = ( cateto_adjacente * cateto_oposto ) / 2
return a
except Exception as er:
print('medidas.catetos_area:')
print(er)
return 0
def raio_area(self, raio: float)->float:
'''Função que calcula a área baseada em um raio
Parameters:
raio (float): Raio para cálculo
Returns:
area (float): Área calculada
'''
try:
a = ( self.π * ( raio ** 2 ) )
return a
except Exception as er:
print('medidas.raio_area:')
print(er)
return 0
def raio_circunferencia(self, raio: float)->float:
'''Função que calcula a circunferência baseada em um raio
Parameters:
raio (float): Raio para cálculo
Returns:
circunferencia (float): Circunferência calculada
'''
try:
c = 2 * self.π * raio
return c
except Exception as er:
print('medidas.raio_circunferencia:')
print(er)
return 0
def area_raio(self, area: float)->float:
'''Função que calcula o raio baseado na área
Parameters:
area (float): Área para cálculo
Returns:
raio (float): Raio calculado
'''
try:
r = area / self.π ** .5
return r
except Exception as er:
print('medidas.area_raio:')
print(er)
return 0
def circunferencia_raio(self, circunferencia: float)->float:
'''Função que calcula o raio baseado em uma circunferência
Parameters:
circunferencia (float): Circunferência para cálculo
Returns:
raio (float): Raio calculado
'''
try:
r = circunferencia / ( 2 * self.π )
return r
except Exception as er:
print('medidas.circunferencia_raio:')
print(er)
return 0
|
# Copyright 2021 InterDigital R&D and Télécom Paris.
# Author: Giorgia Cantisani
# License: Apache 2.0
"""Code to generate the dataset and set sources to zero manually.
"""
import os
import argparse
import random
import numpy as np
import librosa
import musdb
import soundfile as sf
from copy import deepcopy as cp
from utils.utils_adaptation import *
def main():
source_names = ["drums", "bass", "other", "vocals"]
random_order = True
channels = [0, 1]
path = '/tsi/doctorants/gcantisani/Datasets/MUSDB18/'
new_path = '/tsi/doctorants/gcantisani/Datasets/MUSDB18_manual_activations/'
os.makedirs(new_path, exist_ok=True)
# Iterate over all the tracks in the test set
test_set = musdb.DB(root=path, subsets=["test"], is_wav=False)
for idx in range(len(test_set)):
track = test_set.tracks[idx]
print('-------------------')
print(idx, str(track.name))
# copy the track object and associate the new path
new_track = cp(track)
new_track.path = os.path.join(new_path, track.subset, track.name)
os.makedirs(os.path.join(new_path, track.subset, track.name), exist_ok=True)
# generate a random order of sources
if random_order:
sources = random.sample(source_names, 4)
print(sources)
# Load the mixture, make STFT, divide it into a number of
# segments equal to the number of sources and make ISTFT
# Transoform to STFT and then back to have smoothing at boarders
linear_mixture = track.targets['linear_mixture'].audio
stft_mixture = librosa.stft(linear_mixture[:, 0])
segment_len = stft_mixture.shape[1]//len(source_names)
new_references = []
for t, name in enumerate(sources):
audio = track.targets[name].audio
audio_new = np.zeros_like(audio)
win = slice(t*segment_len, (t+1)*segment_len)
if t == len(source_names)-1:
win = slice(t*segment_len, stft_mixture.shape[1] )
for ch in channels:
stft = librosa.stft(audio[:, ch])
stft[:, win] = 0
istft = librosa.istft(stft)
audio_new[:, ch] = istft
new_track.sources[name].audio = audio_new
sf.write(os.path.join(new_track.path, name + '.wav'), audio_new, track.rate)
new_references = np.stack([new_track.sources[name].audio for name in source_names])
audio_mix = new_references.sum(0)
sf.write(os.path.join(new_track.path, 'mixture.wav'), audio_mix, track.rate)
if __name__ == "__main__":
main() |
import pymongo
# 데이터베이스와 컬렉션을 생성하는 코드입니다. 수정하지 마세요!
connection = pymongo.MongoClient("mongodb://localhost:27017/")
db = connection["library"]
col = db["books"]
# 사라진 책을 데이터베이스에서 삭제하세요.
query = { "title": {"$regex": "^Alice's Adventures in Wonderland"} }
col.delete_many(query)
# 책이 잘 삭제되었는지 확인하는 코드입니다. 수정하지 마세요!
for x in col.find():
print(x) |
#importing tkinter which comes already installed with python
from tkinter import *
from tkinter import messagebox
#main_window is the parent or the main window
window=Tk()
#giving our gui a title
window.title("Fun GUI")
#size of minimized GUI
window.geometry('350x250')
lb=Label(window,text="Hello to my site!! Wanna see something :-> ")
lb.grid(column=0,row=0)
def buttonchecker():
messagebox.showinfo("Information","Here is my very own GUI :) ")
#B is our button,here command helps to join the function which will be executed when we click on the button
B=Button(window,text="click here",command=buttonchecker,bg="yellow")
#joined the button to our gui
B.grid(row=0,column=2)
#this command helps in opening our gui window
window.mainloop() |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.setpoint_managers import SetpointManagerOutdoorAirReset
log = logging.getLogger(__name__)
class TestSetpointManagerOutdoorAirReset(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_setpointmanageroutdoorairreset(self):
pyidf.validation_level = ValidationLevel.error
obj = SetpointManagerOutdoorAirReset()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_control_variable = "Temperature"
obj.control_variable = var_control_variable
# real
var_setpoint_at_outdoor_low_temperature = 3.3
obj.setpoint_at_outdoor_low_temperature = var_setpoint_at_outdoor_low_temperature
# real
var_outdoor_low_temperature = 4.4
obj.outdoor_low_temperature = var_outdoor_low_temperature
# real
var_setpoint_at_outdoor_high_temperature = 5.5
obj.setpoint_at_outdoor_high_temperature = var_setpoint_at_outdoor_high_temperature
# real
var_outdoor_high_temperature = 6.6
obj.outdoor_high_temperature = var_outdoor_high_temperature
# node
var_setpoint_node_or_nodelist_name = "node|Setpoint Node or NodeList Name"
obj.setpoint_node_or_nodelist_name = var_setpoint_node_or_nodelist_name
# object-list
var_schedule_name = "object-list|Schedule Name"
obj.schedule_name = var_schedule_name
# real
var_setpoint_at_outdoor_low_temperature_2 = 9.9
obj.setpoint_at_outdoor_low_temperature_2 = var_setpoint_at_outdoor_low_temperature_2
# real
var_outdoor_low_temperature_2 = 10.1
obj.outdoor_low_temperature_2 = var_outdoor_low_temperature_2
# real
var_setpoint_at_outdoor_high_temperature_2 = 11.11
obj.setpoint_at_outdoor_high_temperature_2 = var_setpoint_at_outdoor_high_temperature_2
# real
var_outdoor_high_temperature_2 = 12.12
obj.outdoor_high_temperature_2 = var_outdoor_high_temperature_2
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.setpointmanageroutdoorairresets[0].name, var_name)
self.assertEqual(idf2.setpointmanageroutdoorairresets[0].control_variable, var_control_variable)
self.assertAlmostEqual(idf2.setpointmanageroutdoorairresets[0].setpoint_at_outdoor_low_temperature, var_setpoint_at_outdoor_low_temperature)
self.assertAlmostEqual(idf2.setpointmanageroutdoorairresets[0].outdoor_low_temperature, var_outdoor_low_temperature)
self.assertAlmostEqual(idf2.setpointmanageroutdoorairresets[0].setpoint_at_outdoor_high_temperature, var_setpoint_at_outdoor_high_temperature)
self.assertAlmostEqual(idf2.setpointmanageroutdoorairresets[0].outdoor_high_temperature, var_outdoor_high_temperature)
self.assertEqual(idf2.setpointmanageroutdoorairresets[0].setpoint_node_or_nodelist_name, var_setpoint_node_or_nodelist_name)
self.assertEqual(idf2.setpointmanageroutdoorairresets[0].schedule_name, var_schedule_name)
self.assertAlmostEqual(idf2.setpointmanageroutdoorairresets[0].setpoint_at_outdoor_low_temperature_2, var_setpoint_at_outdoor_low_temperature_2)
self.assertAlmostEqual(idf2.setpointmanageroutdoorairresets[0].outdoor_low_temperature_2, var_outdoor_low_temperature_2)
self.assertAlmostEqual(idf2.setpointmanageroutdoorairresets[0].setpoint_at_outdoor_high_temperature_2, var_setpoint_at_outdoor_high_temperature_2)
self.assertAlmostEqual(idf2.setpointmanageroutdoorairresets[0].outdoor_high_temperature_2, var_outdoor_high_temperature_2) |
# Further clean occupation data from 1847 census data
# Alice Huang, 7/18/19
import csv
def main():
# init vars
listM = []
listF = []
male = []
male2 = []
female = []
female2 = []
matches = []
with open('testdata2.csv') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
# put data into 3 arrays
male.append([row[0].strip(),row[1].strip()])
female.append([row[2].strip(),row[3].strip()])
if row[4] != '':
matches.append(row[4].strip())
# loop through male array
for i in range(len(male)):
for j in range(len(matches)):
if male[i][0] == matches[j]:
male2.append(male[i])
# loop through female array
for entry in female:
if entry[0] in matches:
female2.append(entry)
# combine male/female into one writeable file
# for i in range(len(female2)):
# if i < 1980:
# list.append([male2[i][0],male2[i][1],female2[i][0],female2[i][1]])
# else:
# list.append(['','',female2[i][0],female2[i][1]])
for i in range(len(male2)):
listM.append([male2[i][0],male2[i][1]])
for i in range(len(female2)):
listF.append([female2[i][0],female2[i][1]])
with open('testdata3.csv', 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerows([['OCCUPATION', 'MALE']]) # add headers
writer.writerows(listM)
with open('testdata4.csv', 'w') as f1:
writer = csv.writer(f1, delimiter=',')
writer.writerows([['OCCUPATION2', 'FEMALE']]) # add headers
writer.writerows(listF)
main()
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
def __init__(self, *args, **kwargs):
super(UserRegisterForm, self).__init__(*args, **kwargs)
for fieldname in ['username', 'password1', 'password2']:
self.fields[fieldname].help_text = None
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2'] |
from pgmpy.models import BayesianModel
from pgmpy.sampling import BayesianModelSampling
from pgmpy.factors.discrete import State
import numpy as np
import helpers
#all_slots = ['area', 'customer rating', 'eatType', 'familyFriendly', 'food', 'near', 'priceRange']
def sample_slots(model_info_file, mr_slot_names):
model_info = helpers.load_from_pickle(model_info_file)
model = model_info['model']
inference = BayesianModelSampling(model)
# use the missing mr slots as evidence
all_slots = model_info['all_slots']
missing_slots = [mr for mr in all_slots if mr not in mr_slot_names]
evidence = [State(mr, 0) for mr in missing_slots]
inference = BayesianModelSampling(model)
# don't allow empty samples
sampled_slots = []
while(sampled_slots == []):
sample = inference.rejection_sample(evidence=evidence, size=1, return_type='recarray')
# return a list of the column names which had presence
sampled_slots = [name for var, name in zip(sample.view('<i8'), sample.dtype.names) if var == 1]
return sampled_slots
|
from random import randint
from typing import Optional
from sqlalchemy.orm import Session
from app import crud, schemas
from app.constants.state import TaskType, TaskState
from tests.utils.utils import random_lower_string
from tests.utils.datasets import create_dataset_record
def create_task(
db: Session,
user_id: int,
project_id: Optional[int] = None,
type_: TaskType = TaskType.mining,
state: TaskState = TaskState.done,
):
project_id = project_id or randint(100, 200)
j = {
"name": random_lower_string(),
"type": type_,
"project_id": project_id,
"parameters": {"dataset_id": randint(100, 200)},
"state": state,
}
task_in = schemas.TaskCreate(**j)
task = crud.task.create_task(db, obj_in=task_in, task_hash=random_lower_string(), user_id=user_id)
create_dataset_record(db, user_id, project_id, task_id=task.id)
return task
|
import dolfin as df
import math
class RadialBasisFunction(df.UserExpression):
# slow and old
def __init__(self, r0, l, **kwargs):
self.r0 = r0
self.l = l
super().__init__(**kwargs)
def eval_cell(self, values, x, ufc_cell):
raise NotImplementedError
def eval(self, values, x):
T = (x[0] - self.r0[0]) ** 2 + (x[1] - self.r0[1]) ** 2
values[0] = math.exp((-T / self.l ** 2))
def value_shape(self):
return ()
def FastRadialBasisFunction(element):
# new and improved. r0 and l are placeholders to be changed
r0 = df.Constant((0.5, 0.5))
l = df.Constant(0.15)
return df.Expression(' exp(-(pow((x[0] - r0[0]),2) + pow((x[1] - r0[1]),2))/ pow(l,2))', r0=r0, l=l, element=element), r0, l
|
import os
from fastapi import File, UploadFile, Depends, HTTPException
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from common.constant import Message
from model.user_profile_model import UserProfile
from router import router
from utils.database import get_db
from utils.jwt_utils import get_current_user_rowid
from utils.save_file import get_filename, save_file
from schema.user_profile_schema import UserProfileSchema
from settings import FILE_MAPPING
# @router.post('/user_profiles/avatar', tags=['User profile'])
# async def uplaod_avater(file: UploadFile = File(...), _: int = Depends(get_current_user_rowid)):
# content = await file.read()
# if not content:
# raise HTTPException(status_code=400, detail=Message.AVATER_NOT_EMPTY)
# file_name, file_ext = file.filename.rsplit('.', 1)
# file_name: str = f"{get_filename(file_name)}.{file_ext}"
# file_name = os.path.join(FILE_MAPPING.get('avater'), file_name)
# _ = await save_file(file_name, content)
# return {"file_path": file_name}
@router.post("/user_profiles/", tags=["User profile"], description="添加用户信息")
async def user_profile(user_profile_schema: UserProfileSchema, user_rowid: int = Depends(get_current_user_rowid), db: Session = Depends(get_db)):
user_profile: UserProfile = UserProfile.get_profile_by_user_rowid(user_rowid, db)
profile_data = user_profile_schema.dict()
if user_profile:
user_profile.update(db, **profile_data)
else:
user_profile = UserProfile(**profile_data, user_id=user_rowid)
user_profile.save(db)
return jsonable_encoder(user_profile_schema)
|
''' Operadores Aritimeticos
+ : Adição ** : Potência (o comando 'pow' tbm serve)
- : Subtração % : Resto da divisão
* : Multiplicação // : Divisão inteira
/ : Divisão == : Igual (tem que usar os dois mesmo.)
**(1/2) : Para fazer raiz quadrada
Ordem de Precedência
1º : O que estiver entre parenteses ()
2º : Potencia **
3º : Multiplicação,Divisão,Resto da divisão ou Divisão inteira (o que aparecer primeiro faz)
4º : Soma e Subtração
'''
a = int(input('Digite um valor:'))
b = int(input('Outro valor:'))
s = a + b
su = a - b
m = a * b
d = a / b
p = a ** b
r = a % b
di = a // b
print('A soma é {}, a subtração é {}, a multiplicação é {} ' .format(s, su, m) , end=' ' ) #Esse ", end=' '" é pra deixar os print abaixo na mesma linha,deixe um espaço nas aspas para não deixar agarrado?#
print((', a divisão é {:.1f}' .format(d))) #Esse ':.1f' é o numero que quero que seja mostrado depois da virgula#
print('a potencia é {:=^4},o resto da divisão é {} e divisão inteira é {}' .format(p, r, di))
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from polycommon.config_manager import ConfigManager
def set_celery(context, config: ConfigManager, routes: Dict):
context["CELERY_TASK_TRACK_STARTED"] = config.get_boolean(
"POLYAXON_CELERY_TASK_TRACK_STARTED", is_optional=True, default=True
)
context["CELERY_BROKER_POOL_LIMIT"] = config.get_int(
"POLYAXON_CELERY_BROKER_POOL_LIMIT", is_optional=True, default=100
)
context["CELERY_BROKER_BACKEND"] = config.broker_backend
confirm_publish = config.get_boolean(
"POLYAXON_CELERY_CONFIRM_PUBLISH", is_optional=True, default=True
)
context["CELERY_CONFIRM_PUBLISH"] = confirm_publish
if config.is_rabbitmq_broker and confirm_publish:
# see https://github.com/celery/celery/issues/5410 for details
context["CELERY_BROKER_TRANSPORT_OPTIONS"] = {"confirm_publish": True}
context["CELERY_BROKER_URL"] = config.get_broker_url()
context["INTERNAL_EXCHANGE"] = config.get_string(
"POLYAXON_INTERNAL_EXCHANGE", is_optional=True, default="internal"
)
result_bucked = config.get_string(
"POLYAXON_REDIS_CELERY_RESULT_BACKEND_URL", is_optional=True,
)
if result_bucked:
context["CELERY_RESULT_BACKEND"] = config.get_redis_url(
"POLYAXON_REDIS_CELERY_RESULT_BACKEND_URL"
)
context["CELERY_WORKER_PREFETCH_MULTIPLIER"] = config.get_int(
"POLYAXON_CELERY_WORKER_PREFETCH_MULTIPLIER", is_optional=True, default=4
)
eager_mode = config.get_boolean("POLYAXON_CELERY_TASK_ALWAYS_EAGER")
context["CELERY_TASK_ALWAYS_EAGER"] = eager_mode
if eager_mode:
context["CELERY_BROKER_TRANSPORT"] = "memory"
context["CELERY_ACCEPT_CONTENT"] = ["application/json"]
context["CELERY_TASK_SERIALIZER"] = "json"
context["CELERY_RESULT_SERIALIZER"] = "json"
context["CELERY_TASK_IGNORE_RESULT"] = True
context["CELERY_TIMEZONE"] = config.timezone
context["CELERY_HARD_TIME_LIMIT_DELAY"] = config.get_int(
"POLYAXON_CELERY_HARD_TIME_LIMIT_DELAY", is_optional=True, default=180
)
context["CELERY_WORKER_MAX_TASKS_PER_CHILD"] = config.get_int(
"POLYAXON_CELERY_WORKER_MAX_TASKS_PER_CHILD", is_optional=True, default=100
)
context["CELERY_WORKER_MAX_MEMORY_PER_CHILD"] = config.get_int(
"POLYAXON_CELERY_WORKER_MAX_MEMORY_PER_CHILD", is_optional=True, default=400000
)
class Intervals:
"""All intervals are in seconds"""
OPERATIONS_DEFAULT_RETRY_DELAY = config.get_int(
"POLYAXON_INTERVALS_OPERATIONS_DEFAULT_RETRY_DELAY",
is_optional=True,
default=60,
)
OPERATIONS_MAX_RETRY_DELAY = config.get_int(
"POLYAXON_INTERVALS_OPERATIONS_MAX_RETRY_DELAY",
is_optional=True,
default=60 * 60,
)
RUNS_SCHEDULER = config.get_int(
"POLYAXON_INTERVALS_RUNS_SCHEDULER", is_optional=True, default=30
)
context["Intervals"] = Intervals
context["CELERY_TASK_ROUTES"] = routes
|
#おまじない
from tkinter import Tk, Button, X, Frame, GROOVE, W, E, Label, Entry, END
import numpy as np
import os
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# プロットする関数
def graph(data):
# 大きさ(6,3)のグラフを生成する
fig = plt.Figure(figsize=(6,3))
ax1 = fig.add_subplot(111)
# もらったデータをプロットする。
ax1.plot(data)
# グラフの描画
canvas = FigureCanvasTkAgg(fig, frame_3)
canvas.draw()
canvas.get_tk_widget().grid(row=1, column=0)
return fig
# 入力フォームの保存
def plot():
# 入力フォームを読み込む
a = box1.get()
b = box2.get()
c = box3.get()
# 表形式に変換
result = []
result.append(int(a))
result.append(int(b))
result.append(int(c))
# 描画関数にデータを渡す
graph(result)
#おまじない ↓ここから本文
if __name__ == '__main__':
# tkinter定義
root = Tk()
# ボタン1
frame_1 = Frame(root, bd=4, relief=GROOVE) #ボタン1の定義
frame_1.grid(row=0, column=0) #ボタン1の位置
btn1 = Button(frame_1, text='描画', command=plot, font=("",20)) #ボタン1が押されたときの処理
btn1.pack(fill=X) #ボタン1設置
# グラフ
frame_3 = Frame(root, bd=4, relief=GROOVE) #ボタン1の定義
frame_3.grid(row=1, column=0)
canvas = FigureCanvasTkAgg(graph([]), frame_3)
# 入力フォーム
box1 = Entry(width=3) #入力フォームの定義
box1.place(x=20, y=5) #入力フォームの位置
box2 = Entry(width=3) #入力フォームの定義
box2.place(x=50, y=5) #入力フォームの位置
box3 = Entry(width=3) #入力フォームの定義
box3.place(x=80, y=5) #入力フォームの位置
# tkinter作動
root.mainloop() |
from random import randint
from random import choice
from redbot.core import commands
from redbot.core import Config
from redbot.core.data_manager import cog_data_path
import dateutil.parser
import discord
class Gacha(commands.Cog):
"""
Plays a game of gachapon.
"""
def __init__(self, bot):
self.bot = bot
def _convertList(self, rollList):
#eventually move to outside of this file
rarityTable = {
"S": 15,
"A": 500,
"B": 2500,
"C": 10000
}
dictS = {
"gonface": {},
"BlademasterX": {},
"Varun's Mom": {},
"Hoodrat Jordan": {},
"Daughter Arta": {},
"T-Time Pat": {},
"GAYM": {},
"Roar": {},
"Sleeping Prim": {},
"Kinx, the Avatar of Idol Hell": {},
"Leanne Jin": {}
}
dictA = {
"cho": {},
"Eaguru": {},
"Ladleram": {},
"Duvet": {},
"kat": {},
"Varun": {},
"pmon": {},
"leonid": {},
"ggsnipes": {},
"Prim": {},
"Atsui": {},
"Arta": {},
"Thai": {},
"Himsef": {},
"Wineandbread": {},
"V3NOMG": {},
"Brandon": {},
"Kinx": {}
}
dictB = {
"10% Coupon": {},
"Double Stuff Oreo": {},
"Cast Iron Pan": {},
"Killer Fork": {},
"Thunder Kids": {},
"Magic Stick": {},
"Scorching Shower": {}
}
dictC = {
"Old Boot": {},
"Torn Shirt": {},
"Ripped Glove": {},
"Tattered Scarf": {},
"Ripped Jeans": {},
"Paper Knife": {},
"Straw Hat": {},
"Literally Dogshit": {},
"Stained Underwear": {}
}
convertList = []
x=0
while x < len(rollList):
if rollList[x] < rarityTable["S"]:
convertList.append(choice(list(dictS)))
convertList[x] = "S: " + convertList[x]
elif rollList[x] < rarityTable["A"]:
convertList.append(choice(list(dictA)))
convertList[x] = "A: " + convertList[x]
elif rollList[x] < rarityTable["B"]:
convertList.append(choice(list(dictB)))
convertList[x] = "B: " + convertList[x]
else:
convertList.append(choice(list(dictC)))
convertList[x] = "C: " + convertList[x]
x+=1
return convertList
@commands.command()
async def gacha(self, ctx, rollNumber : int = 10):
"""
Plays the gacha game.
"""
#avoid overrolling
if rollNumber > 20:
await ctx.send("Please don't be greedy.")
return
elif rollNumber <= 0:
await ctx.send("?")
return
else:
await ctx.send("I don't understand.")
return
rollList = []
while len(rollList) < rollNumber:
rollList.append(randint(1,10000))
rolls = self._convertList(rollList)
displayString = ""
for i in rolls:
displayString = displayString + i + "\n"
await ctx.send("```" + displayString + "```")
#change to embed form, when I learn how
@commands.command()
async def gachalist(self, ctx, listTier : str = None):
"""
Shows the roll list with percentages. If a specific tier is specified, it will show only that tier instead.
"""
await ctx.send("check github xdd") |
celsius = int(raw_input('Informe a temperatura em Celsius: '))
farenheit = ((celsius / 5.0) * 9.0) + 32.0
print "A temperatura em Farenheit eh", farenheit
|
# A train script using just pytroch and torchvision
# test to get training loop right
# main detection script for now
### TODO
# add checkpoint saving
# try again on FP16
# validation loop
# model watching for wandb
# argparse?
# different models
import torch
import torchvision
import torchvision.models.detection as models
import models.detection as local_models
import argparse
from data_prep.preproc_coco_detect import CocoDetection, CocoDetectProcessor, coco_remove_images_without_annotations
from data_prep.preproc_coco_detect import Compose, RandomHorizontalFlip, ToTensor
from misc_utils.detection_logger import Logger
import os
#import wandb
#wandb.init(project="object_detection")
### fp16 to save space
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this script.")
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
# this was in the reference code base but need to unpack why?
# the data has to be converted back to list for the detector later anyway?
def collate_fn(batch):
return tuple(zip(*batch))
def batch_loop(model, optimizer, data_loader, device, epoch, fp16):
# based on the train_one_epoch detection engine reference script
model.train()
if fp16:
# fp16 fix? - https://github.com/NVIDIA/apex/issues/122
def fix_bn(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval().half()
model.apply(fix_bn)
metric_logger = Logger()
header = 'Epoch: [{}]'.format(epoch)
i = 0
for images, targets in metric_logger.log(data_loader, header):
images_l = list(image.to(device) for image in images)
target_l = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images_l, target_l)
losses = sum(loss for loss in loss_dict.values())
optimizer.zero_grad()
#losses.backward()
if fp16:
with amp.scale_loss(losses, optimizer) as scaled_loss:
scaled_loss.backward()
else:
losses.backward()
optimizer.step()
# converting tensors to numbers
for k, v in loss_dict.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
results_dict = loss_dict
results_dict['epoch'] = epoch
results_dict['batch'] = i
#wandb.log(results_dict)
i += 1
def eval_loop(model, optimizer, data_loader, device, epoch, fp16):
model.eval()
metric_logger = Logger()
header = 'Epoch: [{}]'.format(epoch)
i = 0
for images, targets in metric_logger.log(data_loader, header):
images_l = list(image.to(device) for image in images)
target_l = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(images_l)
#losses = sum(loss for loss in loss_dict.values())
# converting tensors to numbers
for k, v in loss_dict.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
results_dict = loss_dict
results_dict['epoch'] = epoch
results_dict['batch'] = i
#wandb.log(results_dict)
i += 1
def train(model, optimizer, data_loader, test_loader, device, fp16):
for epoch in range(10):
# train one epoch
batch_loop(model, optimizer, data_loader, device, epoch, fp16)
# validate one epoch
eval_loop(model, optimizer, data_loader, device, epoch, fp16)
def main(args):
# distributed training variable
args.gpu = 0
args.world_size = 1
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
### distributed deep learn parameters
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.total_batch_size = args.world_size * args.batch_size
if args.static_loss_scale != 1.0:
if not args.fp16:
print("Warning: if --fp16 is not used, static_loss_scale will be ignored.")
device = torch.device(args.device)
log_interval = 20
train_transforms = Compose([CocoDetectProcessor(), ToTensor(), RandomHorizontalFlip(0.5)])
val_transforms = Compose([CocoDetectProcessor(), ToTensor()])
### Coco DataSet Processors
train_set = CocoDetection(os.path.join(args.data, 'train2017'),
os.path.join(args.data, 'annotations', 'instances_train2017.json'),
train_transforms)
val_set = CocoDetection(os.path.join(args.data, 'val2017'),
os.path.join(args.data, 'annotations', 'instances_val2017.json'),
val_transforms)
train_set = coco_remove_images_without_annotations(train_set)
# Coco Dataset Samplers
train_sampler = torch.utils.data.RandomSampler(train_set)
test_sampler = torch.utils.data.SequentialSampler(val_set)
train_batch_sampler = torch.utils.data.BatchSampler(
train_sampler, args.batch_size, drop_last=True)
### pytorch dataloaders
# cannot increase batch size till we sort the resolutions
train_loader = torch.utils.data.DataLoader(
train_set, batch_sampler=train_batch_sampler, num_workers=args.workers,
collate_fn=collate_fn)
test_loader = torch.utils.data.DataLoader(
val_set, batch_size=1,
sampler=test_sampler, num_workers=args.workers,
collate_fn=collate_fn)
# instantiate model
if args.arch in model_names:
model = models.__dict__[args.arch](pretrained=False)
elif args.arch in local_model_names:
model = local_models.__dict__[args.arch](pretrained=False)
model.to(device)
## declare optimiser
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(
params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.fp16:
model, optimizer = amp.initialize(model, optimizer,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale="dynamic" if args.dynamic_loss_scale else args.static_loss_scale
)
model.roi_heads.box_roi_pool.forward = \
amp.half_function(model.roi_heads.box_roi_pool.forward)
if args.distributed:
model = DDP(model)
#wandb.watch(model)
# trigger train loop
for epoch in range(10):
# train one epoch
batch_loop(model, optimizer, train_loader, device, epoch, args.fp16)
# validate one epoch
# eval_loop(model, optimizer, test_loader, device, epoch, args.fp16)
#train(model, optimizer, train_loader, test_loader, device, fp_16)
if __name__ == '__main__':
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
local_model_names = sorted(name for name in local_models.__dict__
if name.islower() and not name.startswith("__")
and callable(local_models.__dict__[name]))
valid_models = model_names + local_model_names
parser = argparse.ArgumentParser(description="PyTorch Detection Model Training")
parser.add_argument('data', metavar='DIR', default='../external_data/coco',
help='paths to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH',
choices=valid_models, default='fasterrcnn_resnet50_fpn',
help='model architecture: | {0} (default: fasterrcnn_resnet50_fpn)'.format(valid_models))
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('--epochs', '-e', metavar='N', default=10, type=int,
help='default num of epochs (default 10)')
parser.add_argument('-b', '--batch-size', default=3, type=int,
metavar='N', help='mini-batch size (default: 3)')
parser.add_argument('--lr', '--learning-rate', default=0.0025, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('-j', '--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 1)')
#fp16 vars
parser.add_argument('--fp16', action='store_true', help='fp 16 or not')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument supersedes ' +
'--static-loss-scale.')
parser.add_argument('--opt-level', type=str, default='O1')
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
## distributed
parser.add_argument('--local_rank', default=0, type=int,
help='Used for multi-process training. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
args = parser.parse_args()
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
main(args) |
import json
import config
import time
from datetime import date, timedelta, datetime
from utils import setup_client, save_homework
from discord_webhook import DiscordWebhook
def log(message):
ts = time.time()
print("[" + datetime.fromtimestamp(ts).strftime('%H:%M %d/%m/%Y') + "]" + " " + message)
def send_webhook(content):
webhook = DiscordWebhook(url=config.webhook_url, content=content)
try:
webhook.execute()
except:
log("Error while sending webhook occurred!")
def send_homework(homework):
str_date = str(homework.deadline)
date_raw = str_date.split()[0].split("-")
date_parsed = date_raw[2] + "." + date_raw[1] + "." + date_raw[0]
text = "@here\n**Przedmiot:** %s\n**Termin:** %s\n**Treść:**```%s```" % (
homework.subject.name, date_parsed, homework.content
)
send_webhook(text)
async def check_homework(client, homework_cache):
log("Checking homework...")
homework_raw = await client.data.get_homework(date.today() - timedelta(days=1))
async for homework in homework_raw:
if not homework:
homework_cache.append(homework.id)
log("Invalid homework, skipping...")
continue
if homework.id in homework_cache:
log("Homework found in cache, skipping...")
continue
else:
homework_cache.append(homework.id)
send_homework(homework)
save_homework(homework_cache)
async def main():
global homework_cache
log("Starting app...")
# Check Homework.
for i in range(0, 100):
while True:
try:
client, homework_cache = await setup_client()
await check_homework(client, homework_cache)
await client.close()
log("Stopping app...")
return
except:
log("Error occurred when checking homework, retrying...")
continue
break |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Responsible file to start the application.
"""
from os.path import join, dirname
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
from tornado.web import Application, StaticFileHandler
from tornado.options import define, options, parse_command_line
from settings import *
from settings.accounts import __COOKIE_SECRET__
from controllers.main import __LIST_BASEHANDLER_SUBCLASSES__
from models import *
# define all arguments to pass in command line
# Define which IP will be used
define("address", default=IP_APP, help="run on the given IP", type=str)
# Define which port will be used
define("port", default=PORT_APP, help="run on the given port", type=int)
# Debug mode will detail the information on console and use the test DB
define("debug", default=DEBUG_MODE, help="run in debug mode", type=bool)
# Define if the layers will be published in geoserver
define("publish_layers_in_geoserver", default=PUBLISH_LAYERS_IN_GEOSERVER, help="publish or not the layers in geoserver", type=bool)
# to see the list of arguments, use:
# $ python main.py --help
class HttpServerApplication(Application):
"""
Responsible class to set the handlers and settings of the application.
"""
def __init__(self):
"""
Responsible method to be the constructor of the class.
Args:
Nothing until the moment.
Returns:
Nothing until the moment.
Raises:
Nothing until the moment.
"""
# All the classes added in the under list have to extend of the BaseHandler class
# because it that have the static variable called urls
handler_classes = [subclass["class_instance"] for subclass in __LIST_BASEHANDLER_SUBCLASSES__]
# Create a new handler ( (url, class) ) using the URL of the list of urls with its class correspondent
__handlers__ = []
for __class__ in handler_classes:
for __url__ in __class__.urls:
__handlers__.append(
(__url__, __class__) # add a tuple with the URL and instance of CLASS
)
# Add the path "static" as static file
static_path = join(dirname(__file__), "static")
__handlers__.append( ( r"/static/(.*)", StaticFileHandler, { "path": static_path } ) )
# Put here the settings of the application, that can be accessed in the template
__setting__s = dict(
# blog_title=TITLE_APP,
blog_title="",
template_path=join(dirname(__file__), "templates"),
# xsrf_cookies=True,
xsrf_cookies=False,
# how to generate: https://gist.github.com/didip/823887
cookie_secret=__COOKIE_SECRET__,
# login_url="/auth/login/",
login_url=LOGIN_URL,
debug=options.debug,
current_year=CURRENT_YEAR,
author=AUTHOR,
# Passed functions to be used in the template
# process_text=process_text,
)
# create a global variable to debug mode
self.DEBUG_MODE = options.debug
self.PUBLISH_LAYERS_IN_GEOSERVER = options.publish_layers_in_geoserver
# create a instance of the databases passing arguments
self.PGSQLConn = PGSQLConnection.get_instance(self.DEBUG_MODE, self.PUBLISH_LAYERS_IN_GEOSERVER)
# Pass the handlers and the settings created to the constructor of the super class (father class)
Application.__init__(self, __handlers__, **__setting__s)
def start_application():
parse_command_line()
# http_server = HTTPServer(HttpServerApplication(), max_buffer_size=10485760000) # 10G
http_server = HTTPServer(HttpServerApplication())
http_server.listen(options.port, address=options.address)
print("\nRunning Tornado on " + URL_APP)
print("Is debug mode? ", options.debug)
print("Is publishing the layers in Geoserver? ", options.publish_layers_in_geoserver)
print("Version of service: ", VERSION, "\n")
IOLoop.current().start()
def stop_application():
# Get the instance of the DB connection
PGSQLConn = PGSQLConnection.get_instance()
# PGSQLConn.close()
print("Closing the web service! \n\n")
IOLoop.instance().stop()
def main():
"""
Responsible function to execute routines to start the application.
Args:
Nothing until the moment.
Returns:
Nothing until the moment.
Raises:
Nothing until the moment.
"""
try:
start_application()
# CTRL+C on linux / CTRL+BREAK on windows
except KeyboardInterrupt:
stop_application()
# If this file is the main application, so will execute the main function
# If the file is run as Python script (main), so execute it
# if the file is called as a module, so doesn't execute it
if __name__ == "__main__":
main()
|
import collections
from germanium.impl._load_script import load_script
from .DeferredLocator import DeferredLocator
class StaticElementLocator(DeferredLocator):
def __init__(self, germanium, element):
""" Just holds a static reference to the elements. """
super(StaticElementLocator, self).__init__(germanium)
if not isinstance(element, collections.Iterable):
self._element = [element]
else:
self._element = element
def _find_element(self):
""" Returns the locally stored element. """
element_list = self._find_element_list()
if not element_list:
return None
return element_list[0]
def _find_element_list(self):
if not self._root_element:
return self._element
js_arguments = []
code = load_script(__name__, 'inside-filter.min.js')
js_arguments.append(code)
js_arguments.append(0) # ignore without_children
js_arguments.append(1)
js_arguments.append(self._root_element)
js_arguments.append(0) # no containing_elements
js_arguments.append(0) # no outside_elements
js_arguments.append(0) # no containing_all selectors
js_arguments.append(0) # no containing_all element/groupIds pairs
js_arguments.append(len(self._element))
for element in self._element:
js_arguments.append(element)
result_elements = self._germanium.js(*js_arguments)
return result_elements
|
# James Clarke
# 25/09/2019
from . import serialize
from . import protocol
from threading import Thread
import socket, logging, os
DEFAULT_PORT = 3141
USE_MULTITHREADING = True
BLOCK_SIZE = 128
SIZE_INFO = 10
logging.basicConfig(level=os.environ.get("LOGLEVEL", "NOTSET"))
# Format a message for sending by adding size headder information
def format_msg(serialize, data):
return bytes(f"{len(data)+len(serialize):<{SIZE_INFO}}", 'utf-8')+serialize+data
# Listen for data in chunks
def listen_for_data(sock):
data = b''
new_msg = True
while True:
msg = sock.recv(BLOCK_SIZE)
if new_msg:
# get the length of the message in bytes by checking the request header
msglen = int(msg[:SIZE_INFO])
new_msg = False
data += msg
if len(data)-SIZE_INFO == msglen:
serialize_type = data[SIZE_INFO:SIZE_INFO+1] # Get the serialization type
# deserialize the data in the correct format
return serialize_type, serialize.deserialize(serialize_type, data[SIZE_INFO+1:].decode('utf-8'))
# The TCP/IP server that the service runs. Data is read in
# blocks of up to 1024 bytes, and followed by a sentinal message
# to notify the other end of a complete message.
class NetServer(Thread):
def __init__(self, service, addr):
Thread.__init__(self)
self.service = service
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.address = (addr, DEFAULT_PORT)
self.socket.bind(self.address)
self.logger = logging.getLogger("SERVER")
def run(self):
self.socket.listen(1)
while True:
self.logger.info("waiting for a connection")
connection, client_address = self.socket.accept()
if USE_MULTITHREADING:
Thread(target=self.connection_handler, args=(connection, client_address)).start()
else:
self.connection_handler(connection, client_address)
def connection_handler(self, connection, client_address):
try:
payload = None
code = 0
self.logger.info("client connected: {}".format(client_address))
if str(client_address[0]) in self.service.conn_list:
self.service.conn_list[client_address[0]]()
if self.service.use_whitelist and client_address[0] not in self.service.whitelist:
payload = "Your ip is not whitelisted!"
code = 1
elif client_address[0] in self.service.blacklist:
payload = "Your ip is blacklisted!"
code = 2
else:
# Get the serialization type & the data recieved
serialize_type, data = listen_for_data(connection)
# Get the payload and the return code type
payload, code = self.service.visit_route(client_address, data)
# Serialize the return payload headder with the same type as the client sent with
headder = serialize.serialize(serialize_type, protocol.parse_routepayload(code, payload)).encode('utf-8')
# Format the message ready to be sent
headder = format_msg(serialize_type, headder)
connection.sendall(headder)
self.logger.info("done sending data")
finally:
connection.close()
self.logger.info("closing connection")
# This is for testing, users are encouraged to develop their own
# code for sending to the server. NOTE, it is important to adhere
# to the chunk size otherwise syncing errors may occur
class NetClient(Thread):
def __init__(self, ip, serialize_type=serialize.SER_JSON):
Thread.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.address = (ip, DEFAULT_PORT)
self.serialize_type = serialize_type
self.logger = logging.getLogger("CLIENT")
def send(self, path, args=(), addr=None, serialize_type=None):
if addr is None:
addr = self.address
if serialize_type is None:
serialize_type = self.serialize_type
self.socket.connect(addr)
self.logger.info("connected to {}".format(addr))
# Serialize the parsed request with the correct type
headder = serialize.serialize(serialize_type, protocol.parse_routereq(path, args)).encode('utf-8')
# The format it ready to be sent
headder = format_msg(serialize_type, headder)
self.socket.sendall(headder)
self.logger.info("sent request data")
# wait for response
serialize_type, data = listen_for_data(self.socket)
self.logger.info("recieved data & closing socket")
return data |
from LAUG.util.dataloader.dataset_dataloader import *
from LAUG.util.dataloader.module_dataloader import *
|
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
from htmlfactory.TagFactory import TagFactory
from htmlfactory.SingletonTag import SingletonTag
def setup_function(function):
print("Setting up", function)
def test_basic_tag():
test_tag = TagFactory("div")
assert(str(test_tag) == '<div></div>')
def test_attributes():
test_tag = TagFactory("div", 'I have an action and method attribute.',
action="/action_page.php", method="get")
assert(str(test_tag) == '''<div action='/action_page.php' method='get'>'''
+ 'I have an action and method attribute.</div>')
def test_for_attribute():
test_tag = TagFactory("div.my-class", "inside the div", four="my-form")
assert(str(test_tag) == '''<div class='my-class\' '''
+ '''for='my-form'>inside the div</div>''')
def test_multiple_classes():
test_tag = TagFactory("div.col-10.col-lg-9.d-inline-block", '')
assert(str(test_tag) == '''<div class='col-10 col-lg-9 '''
+ '''d-inline-block'></div>''')
def test_single_tagfactory_child():
test_tag = TagFactory('div', TagFactory('div-2', ''))
assert(str(test_tag) == '''<div><div-2></div-2></div>''')
def test_inner_html_list():
assert(str(TagFactory("div.my-class",
[TagFactory("div", "child tag")]))
== '''<div class='my-class'><div>child tag</div></div>''')
def test_inner_html_tuple():
assert(str(TagFactory("div.my-class",
(TagFactory("div", "child tag"))))
== '''<div class='my-class'><div>child tag</div></div>''')
def test_pretty_str():
test_tag = TagFactory('div', TagFactory('div-2', ''))
assert(test_tag.pretty_str() == '''<div>\n <div-2>\n </div-2>\n</div>''')
def test_pretty_str_with_html_tags():
test_tag = TagFactory('div', TagFactory('div-2', ''))
assert(test_tag.pretty_str(add_html_tags=True) ==
'<html>\n <head>\n </head>\n <body>\n <div>\n'
+ ' <div-2>\n </div-2>\n </div>\n </body>\n</html>')
def test_omitted_dash():
test_tag = TagFactory("div", '', role="application",
ariadescribedby="info")
assert(str(test_tag) == '''<div role='application' '''
+ '''aria-describedby='info'></div>''')
def test_add_child_element():
test_tag = TagFactory("footer.footer")
test_tag.add_child_element((TagFactory("div.container")))
assert(str(test_tag) == '''<footer class='footer'>'''
+ '''<div class='container'></div></footer>''')
def test_add_child_element_list():
test_tag = TagFactory("test_tag")
child_tag = TagFactory("div")
child_list = []
for x in range(3):
child_list.append(child_tag)
test_tag.add_child_element(child_list)
assert(str(test_tag) == '<test_tag><div></div><div>'
+ '</div><div></div></test_tag>')
def test_add_child_element_with_child_element():
test_tag = TagFactory("test_tag")
test_tag.add_child_element(TagFactory("div.container", TagFactory("div1")))
assert(str(test_tag) == '''<test_tag><div class='container'>'''
+ '<div1></div1></div></test_tag>')
def test_add_child_element_with_multiple_child_tags():
test_tag = TagFactory("test_tag")
test_tag.add_child_element([
TagFactory("div.container",
TagFactory("div1",
TagFactory("div2",
TagFactory("div3",
TagFactory("div4")))))
])
assert(str(test_tag) == '''<test_tag><div class='container'><div1><div2>'''
+ '<div3><div4></div4></div3></div2>'
+ '</div1></div></test_tag>')
def test_add_child_element_with_existing_child_element():
test_tag = TagFactory("test_tag", TagFactory("div"))
test_tag.add_child_element(TagFactory("child"))
assert(str(test_tag) == '<test_tag><div></div><child></child></test_tag>')
def test_set_str_as_child_element_after_setting_child_tag():
test_tag = TagFactory("test_tag", TagFactory("div"))
test_tag.add_child_element("This is a test string.")
assert(str(test_tag) == '<test_tag>This is a test string.</test_tag>')
def test_basic_singleton_tag():
test_tag = SingletonTag("div")
assert(str(test_tag) == '<div>')
def test_link_tag():
test_tag = SingletonTag('link', rel="stylesheet",
href="https://stackpath.bootstrapcdn"
+ ".com/bootstrap/4.3.1/css/bootstrap.min.css",
integrity="sha384-ggOyR0iXCbMQv3Xipma"
+ "34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T",
crossorigin="anonymous")
assert(str(test_tag) == "<link rel='stylesheet\'"
+ " href=\'https://stackpath.bootstrapcdn.com"
+ "/bootstrap/4.3.1/css/bootstrap.min.css\'"
+ " integrity=\'sha384-ggOyR0iXCbMQv3Xipma3"
+ "4MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T\'"
+ " crossorigin=\'anonymous\'>")
def test_img_tag():
test_tag = SingletonTag("img", border="0", alt="TestTag",
src="logo_w3s.gif", width="100",
height="100")
assert(str(test_tag) == """<img border='0' alt='TestTag'"""
+ """ src='logo_w3s.gif' width='100'"""
+ """ height='100'>""")
def test_singleton_tag_as_child_element():
a_tag = TagFactory("a", SingletonTag("img", src="logo_w3s.gif"),
href="www.google.com")
assert(str(a_tag) == """<a href='www.google.com'>"""
+ """<img src='logo_w3s.gif'></a>""")
def test_singleton_tag_with_add_child_element_function():
img_tag = SingletonTag("img", src="logo_w3s.gif")
a_tag = TagFactory("a", href="www.google.com")
a_tag.add_child_element(img_tag)
assert(str(a_tag) == """<a href='www.google.com'>"""
+ """<img src='logo_w3s.gif'></a>""")
def test_singleton_tag_add_with_child_element_list():
body = TagFactory("body")
body.add_child_element(
SingletonTag("img"),
SingletonTag("img1")
)
assert(str(body) == "<body><img><img1></body>")
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import abc
from .basic import *
__all__ = ["IIC"]
class IIC(BaseModule):
def __init__(self, in_channels: int = 3, num_classes: int = 10, num_classes_over: int = 100, z_dim=512, num_heads=10):
super().__init__()
self.use_multi_heads = num_heads > 1
self.num_heads = num_heads
self.encoder = Encoder(in_channels, z_dim)
if self.use_multi_heads:
self.classifier = nn.ModuleList([self.gen_classifier(z_dim, num_classes) for _ in range(self.num_heads)])
else:
self.classifier = self.gen_classifier(z_dim, num_classes)
if self.use_multi_heads:
self.over_classifier = nn.ModuleList([self.gen_classifier(z_dim, num_classes_over) for _ in range(self.num_heads)])
else:
self.over_classifier = self.gen_classifier(z_dim, num_classes_over)
self.weight_init()
def gen_classifier(self, in_dim, out_dim):
return nn.Sequential(
nn.Linear(in_dim, in_dim),
nn.BatchNorm1d(in_dim),
nn.ReLU(inplace=True),
nn.Linear(in_dim, out_dim),
)
def forward(self, x, *args, lam=1.0, z_detach=False, reduction="mean"):
_, z_x, _ = self.encoder(x)
if z_detach:
z_x = z_x.detach()
w_x = self.clustering(z_x)
w_x_over = self.over_clustering(z_x)
mi, mi_over, n = 0, 0, 0
for y in args:
_, z_y, _ = self.encoder(y)
if z_detach:
z_y = z_y.detach()
w_y = self.clustering(z_y)
w_y_over = self.over_clustering(z_y)
mi += self.mutual_info(w_x, w_y, lam=lam, reduction=reduction)
mi_over += self.mutual_info(w_x_over, w_y_over, lam=lam, reduction=reduction)
n += 1
mi, mi_over = mi / n, mi_over / n
return mi, mi_over
def params(self, x: torch.Tensor):
assert not self.training
_, z_x, _ = self.encoder(x)
w_x = self.clustering(z_x)
w_x_over = self.over_clustering(z_x)
return w_x, w_x_over, z_x
def clustering(self, x):
if self.use_multi_heads:
tmp = []
for classifier in self.classifier:
w = F.softmax(classifier(x), dim=-1)
tmp.append(w)
return torch.stack(tmp, dim=-1)
else:
w = F.softmax(self.classifier(x), dim=-1).unsqueeze(-1)
return w
def over_clustering(self, x):
if self.use_multi_heads:
tmp = []
for classifier in self.over_classifier:
w = F.softmax(classifier(x), dim=-1)
tmp.append(w)
return torch.stack(tmp, dim=-1)
else:
w = F.softmax(self.over_classifier(x), dim=-1).unsqueeze(-1)
return w
def mutual_info(self, x, y, lam=1.0, eps=1e-8, reduction="mean"):
p = (x.unsqueeze(2) * y.unsqueeze(1)).sum(0)
p = ((p + p.permute(1, 0, 2)) / 2) / p.sum()
p[(p < eps).data] = eps
_, k, m = x.shape
pi = p.sum(dim=1).view(k, -1).expand(k, k, m).pow(lam)
pj = p.sum(dim=0).view(k, -1).expand(k, k, m).pow(lam)
if reduction == "mean":
return (p * (torch.log(pi) + torch.log(pj) - torch.log(p))).sum() / m
elif reduction == "sum":
return (p * (torch.log(pi) + torch.log(pj) - torch.log(p))).sum()
else:
return (p * (torch.log(pi) + torch.log(pj) - torch.log(p))).sum([0, 1])
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import math
from PIL import Image, ImageFont, ImageDraw, ImageEnhance, ImageChops
def add_mark(imagePath, mark, out, quality):
'''
添加水印,然后保存图片
'''
im = Image.open(imagePath)
image = mark(im)
name = os.path.basename(imagePath)
if image:
new_name = out
if os.path.splitext(new_name)[1] != '.png':
image = image.convert('RGB')
image.save(new_name, quality=quality)
print(name + " Success.")
else:
print(name + " Failed.")
def set_opacity(im, opacity):
'''
设置水印透明度
'''
assert opacity >= 0 and opacity <= 1
alpha = im.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
im.putalpha(alpha)
return im
def crop_image(im):
'''裁剪图片边缘空白'''
bg = Image.new(mode='RGBA', size=im.size)
diff = ImageChops.difference(im, bg)
del bg
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
return im
def gen_mark(font_height_crop, mark, size, color, font_family, opacity, space, angle):
'''
生成mark图片,返回添加水印的函数
'''
# 字体宽度、高度
is_height_crop_float = '.' in font_height_crop # not good but work
width = len(mark) * size
if is_height_crop_float:
height = round(size * float(font_height_crop))
else:
height = int(font_height_crop)
# 创建水印图片(宽度、高度)
mark_ = Image.new(mode='RGBA', size=(width, height))
# 生成文字
draw_table = ImageDraw.Draw(im=mark_)
draw_table.text(xy=(0, 0),
text=mark,
fill=color,
font=ImageFont.truetype(font_family, size=size))
del draw_table
# 裁剪空白
mark_ = crop_image(mark_)
# 透明度
set_opacity(mark_, opacity)
def mark_im(im):
''' 在im图片上添加水印 im为打开的原图'''
# 计算斜边长度
c = int(math.sqrt(im.size[0] * im.size[0] + im.size[1] * im.size[1]))
# 以斜边长度为宽高创建大图(旋转后大图才足以覆盖原图)
mark2 = Image.new(mode='RGBA', size=(c, c))
# 在大图上生成水印文字,此处mark为上面生成的水印图片
y, idx = 0, 0
while y < c:
# 制造x坐标错位
x = -int((mark_.size[0] + space) * 0.5 * idx)
idx = (idx + 1) % 2
while x < c:
# 在该位置粘贴mark水印图片
mark2.paste(mark_, (x, y))
x = x + mark_.size[0] + space
y = y + mark_.size[1] + space
# 将大图旋转一定角度
mark2 = mark2.rotate(angle)
# 在原图上添加大图水印
if im.mode != 'RGBA':
im = im.convert('RGBA')
im.paste(mark2, # 大图
(int((im.size[0] - c) / 2), int((im.size[1] - c) / 2)), # 坐标
mask=mark2.split()[3])
del mark2
return im
return mark_im
def marker(file,mark,out,
color="#dddddd",
space=200,
angle=30,
font_family="arial.ttf",
font_height_crop="1.2",
size=50,
opacity=0.05,
quality=80):
marker = gen_mark(font_height_crop, mark, size, color, font_family, opacity, space, angle)
add_mark(file, marker, out, quality)
if __name__ == '__main__':
marker('simple.jpg','QTechCode','test.jpg') |
import os
import sys
import shutil
PIVY_HEADER = """\
#ifdef __PIVY__
%%include %s
#endif
"""
def copy_and_swigify_headers(includedir, dirname, files):
"""Copy the header files to the local include directories. Add an
#include line at the beginning for the SWIG interface files..."""
for file in files:
if not os.path.isfile(os.path.join(dirname, file)):
continue
if file[-2:] == ".i":
file = os.path.join(dirname, file)
file_i = file.split(os.path.sep)
file_i = [i for i in file_i if i != ".."]
file_i = os.path.join(*file_i)
file_h = file_i[:-2] + ".h"
from_file = os.path.join(includedir, file_h)
file_h = file[:-2] + ".h"
to_file = os.path.abspath(file_h)
if os.path.exists(from_file):
shutil.copyfile(from_file, to_file)
sys.stdout.write('create swigified header: ' + to_file + '\n')
fd = open(to_file, 'r+')
contents = fd.readlines()
ins_line_nr = -1
for line in contents:
ins_line_nr += 1
if line.find("#include ") != -1:
break
if ins_line_nr != -1:
contents.insert(ins_line_nr, PIVY_HEADER % (file_i))
fd.seek(0)
fd.writelines(contents)
else:
print("[failed]")
sys.exit(1)
fd.close
# fixes for SWIG 1.3.21 and upwards
# (mostly workarounding swig's preprocessor "function like macros"
# preprocessor bug when no parameters are provided which then results
# in no constructors being created in the wrapper)
elif file[-4:] == ".fix":
sys.stdout.write(' ' + os.path.join(dirname, file)[:-4])
shutil.copyfile(os.path.join(dirname, file),
os.path.join(dirname, file)[:-4])
# had to introduce this because windows is a piece of crap
elif sys.platform == "win32" and file[-6:] == ".win32":
sys.stdout.write(' ' + os.path.join(dirname, file)[:-6])
shutil.copyfile(os.path.join(dirname, file),
os.path.join(dirname, file)[:-6])
def swigify(interface_dir, include_dir):
dir_gen = os.walk(os.path.relpath(os.path.join(interface_dir, "Inventor")))
for _dir, _, names in dir_gen:
copy_and_swigify_headers(include_dir, _dir, names) |
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
import numpy as np
import collections
from ipu_sparse_ops import host_utils
import tensorflow.compat.v1 as tf
from tensorflow.python import ipu
from logging import getLogger
tf.disable_eager_execution()
tf.disable_v2_behavior()
def get_lib_path(lib_name):
base_path = os.path.realpath(os.path.dirname(__file__))
return os.path.join(base_path, "lib" + lib_name + ".so")
MatmulSpec = collections.namedtuple('MatmulSpec', 'max_non_zeros num_groups batch_size input_size output_size data_type topk')
logger = getLogger(os.path.basename(__file__))
class SparseRepresentation:
def __init__(self, metainfo, nz):
self.metainfo_state = metainfo
self.nz_values = nz
self.metainfo_state_fp16 = self.metainfo_state.view(dtype=np.float16)
def makePlaceHolders(self, data_type):
metainfo_ph = tf.placeholder(tf.float16, self.metaInfoShape())
nz_ph = tf.placeholder(data_type, self.valuesShape())
return metainfo_ph, nz_ph
def metaInfoShape(self):
return [self.metainfo_state.size]
def valuesShape(self):
return [self.nz_values.size]
def metaInfoFeed(self):
# XLA requires us to pass only floating point tensors to custom ops:
return self.metainfo_state_fp16
def valuesFeed(self):
return self.nz_values
def __str__(self):
return f"metainfo: {self.metainfo_state} values:{self.nz_values}"
def get_or_create_args(spec: MatmulSpec):
with tf.variable_scope("dummy", reuse=tf.AUTO_REUSE, use_resource=True):
# Compile time args have to be passed in the tensor shape:
args = [spec.output_size, spec.max_non_zeros, spec.num_groups]
arg_dummy = tf.get_variable(
name="args_hidden_in_shape",
dtype=tf.float32,
shape=args,
trainable=False,
initializer=tf.zeros_initializer())
return arg_dummy
def get_or_create_nz_values(data_type, shape=None):
with tf.variable_scope("sparse_weights", reuse=tf.AUTO_REUSE, use_resource=True):
return tf.get_variable("values", dtype=data_type, shape=shape)
def get_or_create_metainfo(data_type, shape=None):
with tf.variable_scope("sparse_weights", reuse=tf.AUTO_REUSE, use_resource=True):
return tf.get_variable("metainfo", dtype=data_type, shape=shape)
def get_or_create_dense_grad_w(spec: MatmulSpec):
# We need a dummy input that allows us to retrive the dense gradient:
with tf.variable_scope("sparse_weights", reuse=tf.AUTO_REUSE, use_resource=True):
return tf.get_variable("dense_gradW", shape=[spec.input_size, spec.output_size],
dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
def allocate_matmul_inputs(lhs, spec: MatmulSpec):
metainfo_size, nz_max_size = get_sparse_tensor_sizes(spec)
arg_dummy = get_or_create_args(spec)
outputs = {
"output_types": [tf.float16, tf.float32, tf.float32],
"output_shapes": [metainfo_size, nz_max_size, lhs.shape],
}
lib_path = get_lib_path("fc_allocate")
return ipu.custom_ops.precompiled_user_op([arg_dummy, lhs],
lib_path,
outs=outputs,
inputs_with_gradients=[])
def matmul(spec: MatmulSpec, lhs, return_dense_grad):
metainfo, nz_values, dense_lhs = allocate_matmul_inputs(lhs, spec)
dense_lhs = tf.identity(lhs)
result_shape = tf.TensorShape([spec.batch_size, spec.output_size])
# Make cars for the representaiton so we can update it from the host
# and the weights are trainable:
trainable_nz = get_or_create_nz_values(nz_values.dtype, nz_values.shape)
metainfo = get_or_create_metainfo(metainfo.dtype, metainfo.shape)
dense_grad_w = get_or_create_dense_grad_w(spec)
outputs = {
"output_types": [tf.float32],
"output_shapes": [result_shape],
}
arg_dummy = get_or_create_args(spec)
inputs = [dense_lhs, metainfo, trainable_nz, arg_dummy, return_dense_grad, dense_grad_w]
lib_path = get_lib_path("sparse_matmul")
with_grads = [0, 2, 5] # No grads for metainfo and dummy arg
result = ipu.custom_ops.precompiled_user_op(inputs,
lib_path,
outs=outputs,
inputs_with_gradients=with_grads)
return result
def update_metainfo_op(metainfo_ph, nz_ph):
# Returns an op that can be used to update the sparsity pattern:
nz = get_or_create_nz_values(nz_ph.dtype)
meta = get_or_create_metainfo(metainfo_ph.dtype)
assign_nz = nz.assign(nz_ph)
assign_meta = meta.assign(metainfo_ph)
with tf.control_dependencies([assign_nz, assign_meta]):
update_op = tf.no_op()
return update_op
def representation_from_triplets(spec: MatmulSpec, row_indices, col_indices, values):
# TODO: why is it necessary to sort by rows - popsparse claims it is not?
sort_idx = np.argsort(row_indices)
metainfo, nzvalues = host_utils.representation_from_triplets(
spec.max_non_zeros, spec.num_groups, spec.batch_size, spec.input_size, spec.output_size,
row_indices[sort_idx], col_indices[sort_idx], values[sort_idx])
return SparseRepresentation(metainfo, nzvalues)
def triplets_from_representation(spec: MatmulSpec, sparse_data: SparseRepresentation):
row_indices, col_indices, values = host_utils.triplets_from_representation(
spec.max_non_zeros, spec.num_groups, spec.batch_size, spec.input_size, spec.output_size,
sparse_data.metainfo_state, sparse_data.nz_values)
return row_indices, col_indices, values
def get_sparse_tensor_sizes(spec: MatmulSpec):
return host_utils.get_sparse_tensor_sizes(spec.max_non_zeros, spec.num_groups, spec.batch_size, spec.input_size, spec.output_size)
def triplets_from_dense(matrix: np.array):
indices = np.nonzero(matrix)
values = matrix[indices]
return indices[0], indices[1], values
def dense_from_triplets(spec: MatmulSpec, row_indices, col_indices, values):
# Input is multiplied on the left in popsparse:
dense = np.zeros(shape=[spec.input_size, spec.output_size])
dense[(row_indices, col_indices)] = values
return dense
def mask_from_triplets(spec: MatmulSpec, row_indices, col_indices, values):
# Input is multiplied on the left in popsparse:
mask = np.zeros(shape=[spec.input_size, spec.output_size])
mask[(row_indices, col_indices)] = 1
return mask
def values_at_indices(row_indices, col_indices, matrix: np.array):
grad_indices = (row_indices, col_indices)
return matrix[grad_indices]
def random_triplets(spec: MatmulSpec, seed: int, value_generator, excluded_flat_indices=None, count=None):
rng = np.random.default_rng(seed=seed)
# Input is multiplied on the left in popsparse:
rows = spec.input_size
cols = spec.output_size
number = count if count is not None else spec.max_non_zeros
# Create a random sample of non-repeating flat indices
# and then convert them to row, col:
total_indices = rows * cols
if total_indices < number:
raise ValueError(f"Not enough indices (Attempting to draw {number} from set of {total_indices})")
if excluded_flat_indices is None:
flat_indices = rng.choice(total_indices, size=number, replace=False)
else:
# NOTE: Forming the total index list is a poor algorithm for very
# large matrices:
choose_from = np.delete(np.arange(total_indices), excluded_flat_indices)
flat_indices = rng.choice(choose_from, size=number, replace=False)
row_indices, col_indices = np.unravel_index(flat_indices, (rows, cols))
values = value_generator(size=len(flat_indices))
return row_indices, col_indices, values
|
from pyopteryx.factories.usage_action_factories.abstract_usage_action_factory import AbstractUsageActionFactory
from pyopteryx.utils.utils import string_int_to_string_float
from pyopteryx.utils.builder_utils import add_activity_to_task, add_synch_call_to_activity
class LoopUsageActionFactory(AbstractUsageActionFactory):
def __init__(self, action, xml_cache, input_data, processor):
super().__init__(action=action, xml_cache=xml_cache, input_data=input_data, processor=processor)
def add_action(self):
action_id = self.action.get("id")
activity = add_activity_to_task(task_activities=self.task_activities,
activity_name=self.activity_name,
host_demand_mean="0.0",
hide_activity=True)
specification = string_int_to_string_float(self.action.find("loopIteration_Loop").get("specification"))
synch_call_name = 'UsageScenario_Loop_{}_Entry'.format(action_id)
add_synch_call_to_activity(activity=activity,
synch_call_dest=synch_call_name,
calls_mean=specification)
self._add_precedences(action=self.action,
action_activity_name=self.activity_name)
def add_loop_config(self):
activity = add_activity_to_task(task_activities=self.task_activities,
activity_name=self.activity_name,
host_demand_mean="0.0",
hide_activity=True)
self._add_synch_call_to_loop_activity(activity=activity,
usage=self.action)
self._add_precedences(action=self.action,
action_activity_name=self.activity_name)
@staticmethod
def _add_synch_call_to_loop_activity(activity, usage):
synch_call_name = 'UsageScenario_Loop_{}_Entry'.format(usage.get('id'))
add_synch_call_to_activity(activity, synch_call_name, calls_mean=string_int_to_string_float(
usage.find('.//loopIteration_Loop').get('specification')))
|
import os
file_path = os.path.join(os.path.dirname(__file__), 'GUI_QT.ui')
dest_path = os.path.join(os.path.dirname(__file__), 'gui.py')
out = os.system(f'pyuic5 -x {file_path} -o {dest_path}')
if out:
print('Error:', out)
exit(1)
else:
print('Its ok.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.