content
stringlengths 5
1.05M
|
|---|
"""
Support for Zwave roller shutter components.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/rollershutter.zwave/
"""
# Because we do not compile openzwave on CI
# pylint: disable=import-error
import logging
from homeassistant.components.rollershutter import DOMAIN
from homeassistant.components.zwave import ZWaveDeviceEntity
from homeassistant.components import zwave
from homeassistant.components.rollershutter import RollershutterDevice
COMMAND_CLASS_SWITCH_MULTILEVEL = 0x26 # 38
COMMAND_CLASS_SWITCH_BINARY = 0x25 # 37
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Find and return Z-Wave roller shutters."""
if discovery_info is None or zwave.NETWORK is None:
return
node = zwave.NETWORK.nodes[discovery_info[zwave.ATTR_NODE_ID]]
value = node.values[discovery_info[zwave.ATTR_VALUE_ID]]
if value.command_class != zwave.COMMAND_CLASS_SWITCH_MULTILEVEL:
return
if value.index != 0:
return
value.set_change_verified(False)
add_devices([ZwaveRollershutter(value)])
class ZwaveRollershutter(zwave.ZWaveDeviceEntity, RollershutterDevice):
"""Representation of an Zwave roller shutter."""
def __init__(self, value):
"""Initialize the zwave rollershutter."""
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
ZWaveDeviceEntity.__init__(self, value, DOMAIN)
self._node = value.node
dispatcher.connect(
self.value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
def value_changed(self, value):
"""Called when a value has changed on the network."""
if self._value.value_id == value.value_id or \
self._value.node == value.node:
self.update_ha_state()
_LOGGER.debug("Value changed on network %s", value)
@property
def current_position(self):
"""Return the current position of Zwave roller shutter."""
if self._value.data <= 5:
return 100
elif self._value.data >= 95:
return 0
else:
return 100 - self._value.data
def move_up(self, **kwargs):
"""Move the roller shutter up."""
self._node.set_dimmer(self._value.value_id, 100)
def move_down(self, **kwargs):
"""Move the roller shutter down."""
self._node.set_dimmer(self._value.value_id, 0)
def stop(self, **kwargs):
"""Stop the roller shutter."""
for value in self._node.get_values(
class_id=COMMAND_CLASS_SWITCH_BINARY).values():
# Rollershutter will toggle between UP (True), DOWN (False).
# It also stops the shutter if the same value is sent while moving.
value.data = value.data
break
|
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import numpy as np
from base.SO3 import SO3
from base.simple_filter import LowPassFilter, AverageFilter
from estimation.magnetometer_calibrator import MagnetometerCalibrator
from estimation.kalman_filter import KalmanFilterSO3
class Engine:
GYRO_NO_MOTION_THRESHOLD = 0.1
ACCEL_NO_MOTION_THRESHOLD = 10.0 # FIXME we may need a bigger value
LOWPASS_GAIN = 0.9
STATIC_CAL_SAMPLE_COUNT = 200
SENSOR_COVAR_AMPLIFIER = 2.0 # covar obtained after static calibration would be amplified for better stability
INITIAL_POSE_COVAR = 1e1 # diagonal
STATE_INIT = 0
STATE_CALIBRATE_MOVING = 1 # for mag bias
STATE_CALIBRATE_STATIC = 2 # for gyro bias, mag ref and accel ref
STATE_RUNNING = 3
def __init__(self):
self._filter = KalmanFilterSO3() # estimates the transform from current chip to initial chip
self._gyro_lp = LowPassFilter(Engine.LOWPASS_GAIN)
self._accel_lp = LowPassFilter(Engine.LOWPASS_GAIN)
self._gyro_avg = AverageFilter()
self._accel_avg = AverageFilter()
self._mag_avg = AverageFilter()
self._gyro_bias = None
self._mag_calibrator = MagnetometerCalibrator(np.zeros(3))
self._state = Engine.STATE_INIT
self._last_update_time = 0.0
def set_mag_param(self, mag_bias):
'''
update the mag parameters.
Could be used as a hacky way to advance the internal state machine,
but only in the simulation.
'''
if (self._state < Engine.STATE_CALIBRATE_STATIC):
self._state = Engine.STATE_CALIBRATE_STATIC
self._mag_bias = mag_bias
def update(self, t, gyro, accel, mag):
"""
"""
t *= 1.0
gyro = np.array(gyro, dtype=np.float)
accel = np.array(accel, dtype=np.float)
mag = np.array(mag, dtype=np.float)
# update low pass filters
self._gyro_lp.update(gyro)
self._accel_lp.update(accel)
no_motion = self._check_no_motion(gyro, accel)
if (self._state == Engine.STATE_INIT):
# wait until starts to move
if (not no_motion):
print("[EngineState] transit from INIT to CALIBRATE_MOVING")
self._state = Engine.STATE_CALIBRATE_MOVING
elif (self._state == Engine.STATE_CALIBRATE_MOVING):
self._mag_calibrator.update(mag)
self._mag_bias = self._mag_calibrator.bias
# wait until found bias, and stopped moving
if ((self._mag_bias is not None) and \
(no_motion)):
print("[EngineState] transit from CALIBRATE_MOVING to CALIBRATE_STATIC")
print("mag bias is {}".format(self._mag_bias))
self._state = Engine.STATE_CALIBRATE_STATIC
elif (self._state == Engine.STATE_CALIBRATE_STATIC):
if (no_motion): # only update when device is stationary
done = self._update_static_calibration(gyro, accel, mag)
if (done):
# NOTE: acceleration is in the opposite direction of the corresponding inertial force
gravity_in_body = self._accel_avg.value
gravity_in_world = np.array([0, 0, 1], dtype=np.float) * np.linalg.norm(gravity_in_body)
R_from_body_to_world = SO3.from_two_directions(gravity_in_body, gravity_in_world)
initial_pose_covar = np.eye(3) * Engine.INITIAL_POSE_COVAR
self._gyro_bias = self._gyro_avg.value
gyro_covar = self._gyro_avg.covar * Engine.SENSOR_COVAR_AMPLIFIER
accel_covar = self._accel_avg.covar * Engine.SENSOR_COVAR_AMPLIFIER
mag_ref = R_from_body_to_world.inverse() * self._mag_avg.value
mag_covar = self._mag_avg.covar * Engine.SENSOR_COVAR_AMPLIFIER
# initialize the kalman filter here.
self._filter.set_initial_pose(R_from_body_to_world, initial_pose_covar)
self._filter.set_sensor_covar(gyro_covar, accel_covar, mag_covar)
self._filter.set_references(gravity_in_world, mag_ref)
self._state = Engine.STATE_RUNNING
print("[EngineState] transit from CALIBRATE_STATIC to RUNNING")
print("initial orientation = {}\nroll = {}, pitch = {}, yaw = {}".format(
R_from_body_to_world.ln(), R_from_body_to_world.get_roll(),
R_from_body_to_world.get_pitch(), R_from_body_to_world.get_yaw()))
print("gravity in world = {}".format(gravity_in_world))
print("gyro bias = {}".format(self._gyro_bias))
print("gyro covar = \n{}".format(gyro_covar))
print("accel covar = \n{}".format(accel_covar))
print("mag ref = {}".format(mag_ref))
print("mag covar = \n{}".format(mag_covar))
elif (self._state == Engine.STATE_RUNNING):
dt = t - self._last_update_time
# always do gyro update
gyro_calibrated = gyro - self._gyro_bias
self._filter.process_update(gyro_calibrated, dt)
# do accel update iff gravity is dominant
if (np.linalg.norm(accel) < Engine.ACCEL_NO_MOTION_THRESHOLD):
self._filter.acc_update(accel)
else:
print("[ACC] rejected")
# do mag update iff mag reading matchs mag param
mag_calibrated = self._mag_calibrator.calibrate_measurement(mag)
if (mag_calibrated is not None):
self._filter.mag_update(mag_calibrated)
else:
print("[MAG] rejected")
else:
# invalid state -- should not happen
assert(False)
self._last_update_time = t
def get_orientation_in_world(self):
'''
:return transform from current chip to world.
'''
if (self._state < Engine.STATE_RUNNING):
return None
return self._filter.get_estimate_mean().inverse()
def get_orientation_covar(self):
if (self._state < Engine.STATE_RUNNING):
return None
return self._filter.get_estimate_covar()
def get_state_string(self):
"""
:return a string representing the internal state.
"""
if (self._state == Engine.STATE_INIT):
return "Init"
elif (self._state == Engine.STATE_CALIBRATE_MOVING):
return "Moving calibration"
elif (self._state == Engine.STATE_CALIBRATE_STATIC):
return "Static calibration"
elif (self._state == Engine.STATE_RUNNING):
return "Running"
else:
raise RuntimeError("Invalid state: {}".format(self._state))
def _check_no_motion(self, gyro, accel):
"""
:return True if the barely moving
"""
tg = Engine.GYRO_NO_MOTION_THRESHOLD
ta = Engine.ACCEL_NO_MOTION_THRESHOLD
# trivial motion both instantaneously and recently
return ((np.linalg.norm(gyro) < tg) and \
(np.linalg.norm(self._gyro_lp.value) < tg) and \
(np.linalg.norm(accel) < ta) and \
(np.linalg.norm(self._accel_lp.value) < ta))
def _update_static_calibration(self, gyro, accel, mag):
"""
estimate gyro offset, mag ref and accel ref
:return True if finished.
"""
self._gyro_avg.update(gyro)
self._accel_avg.update(accel)
self._mag_avg.update(mag - self._mag_bias)
return ((self._gyro_avg.count > Engine.STATIC_CAL_SAMPLE_COUNT) and \
(self._accel_avg.count > Engine.STATIC_CAL_SAMPLE_COUNT) and \
(self._mag_avg.count > Engine.STATIC_CAL_SAMPLE_COUNT))
if (__name__ == '__main__'):
pass
|
import matplotlib.pyplot as plt
import shapefile
import pandas as pd
shpFilePath = r"C:\Users\tq220\Documents\Tits things\2018-2019\Data Science\Final-project-data\Data\Utah_FORGE_gravity.shp"
line_dict={}
df=pd.DataFrame(columns=['x','y'])
test = shapefile.Reader(shpFilePath)
inds=[]
listx=[]
listy=[]
for ind,sr in enumerate(test.shapeRecords()):
inds.append(ind)
listx=[]
listy=[]
# print(sr.shape.points)
for xNew,yNew in sr.shape.points:
# print(xNew)
listx.append(xNew)
listy.append(yNew)
df.loc['{0}'.format(ind)]=[listx,listy]
# plt.plot(listx,listy)
# line_dict['line{0}x'.format(ind)]=listx
# line_dict['line{0}y'.format(ind)]=listy
#df.to_csv('surface_fractures_group_by_shape.csv')
#plt.scatter(listx,listy,markersize=1)
#plt.show()
|
import os
import sys
import shutil
import asyncio
from azure_functions_worker import protos
from azure_functions_worker import testutils
async def vertify_nested_namespace_import():
test_env = {}
request = protos.FunctionEnvironmentReloadRequest(
environment_variables=test_env)
request_msg = protos.StreamingMessage(
request_id='0',
function_environment_reload_request=request)
disp = testutils.create_dummy_dispatcher()
# Mock intepreter starts in placeholder mode
import azure.module_a as mod_a # noqa: F401
# Mock function specialization, load customer's libraries and functionapps
ns_root = os.path.join(
testutils.UNIT_TESTS_ROOT,
'azure_namespace_import',
'namespace_location_b')
test_path = os.path.join(ns_root, 'azure', 'namespace_b', 'module_b')
test_mod_path = os.path.join(test_path, 'test_module.py')
os.makedirs(test_path)
with open(test_mod_path, 'w') as f:
f.write('MESSAGE = "module_b is imported"')
try:
# Mock a customer uses test_module
if sys.argv[1].lower() == 'true':
await disp._handle__function_environment_reload_request(
request_msg)
from azure.namespace_b.module_b import test_module
print(test_module.MESSAGE)
except ModuleNotFoundError:
print('module_b fails to import')
finally:
# Cleanup
shutil.rmtree(ns_root)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(vertify_nested_namespace_import())
loop.close()
|
# ---------------------------------------------------------------
# 这个脚本向你展示了如何使用 openvino 对 PPQ 导出的模型进行推理
# 你需要注意,openvino 也可以运行各种各样的量化方案,你甚至可以用 tensorRT 的 policy
# 但总的来说,openvino 需要非对称量化的 activation 和对称量化的 weights
# ---------------------------------------------------------------
# For this onnx inference test, all test data is randomly picked.
# If you want to use real data, just rewrite the defination of SAMPLES
import numpy as np
import openvino
import torch
import torchvision
import torchvision.models
from torch.utils.data import DataLoader
from tqdm import tqdm
from ppq import *
from ppq.api import *
QUANT_PLATFROM = TargetPlatform.PPL_CUDA_INT8
BATCHSIZE = 16
MODELS = {
'resnet50': torchvision.models.resnet50,
'mobilenet_v2': torchvision.models.mobilenet.mobilenet_v2,
'mnas': torchvision.models.mnasnet0_5,
'shufflenet': torchvision.models.shufflenet_v2_x1_0}
DEVICE = 'cuda'
SAMPLES = [torch.rand(size=[BATCHSIZE, 3, 224, 224]) for _ in range(256)]
for mname, model_builder in MODELS.items():
print(f'Ready for run quantization with {mname}')
model = model_builder(pretrained = True).to(DEVICE)
quantized = quantize_torch_model(
model=model, calib_dataloader=SAMPLES, collate_fn=lambda x: x.to(DEVICE),
calib_steps=32, input_shape=[BATCHSIZE, 3, 224, 224],
setting=QuantizationSettingFactory.default_setting(),
platform=QUANT_PLATFROM,
onnx_export_file='model_fp32.onnx')
executor = TorchExecutor(graph=quantized)
ref_results = []
for sample in tqdm(SAMPLES, desc='PPQ GENERATEING REFERENCES', total=len(SAMPLES)):
result = executor.forward(inputs=sample.to(DEVICE))[0]
result = result.cpu().reshape([BATCHSIZE, 1000])
ref_results.append(result)
fp32_input_names = [name for name, _ in quantized.inputs.items()]
fp32_output_names = [name for name, _ in quantized.outputs.items()]
graphwise_error_analyse(graph=quantized, running_device='cuda',
dataloader=SAMPLES, collate_fn=lambda x: x.cuda(), steps=32)
export_ppq_graph(graph=quantized, platform=TargetPlatform.OPENVINO_INT8,
graph_save_to='model_int8.onnx')
int8_input_names = [name for name, _ in quantized.inputs.items()]
int8_output_names = [name for name, _ in quantized.outputs.items()]
# run with openvino.
# do not use Tensorrt provider to run quantized model.
# TensorRT provider needs another qdq format.
import openvino.runtime
openvino_executor = openvino.runtime.Core()
model = openvino_executor.compile_model(
model = openvino_executor.read_model(model="model_int8.onnx"), device_name="CPU")
openvino_results = []
for sample in tqdm(SAMPLES, desc='OPENVINO GENERATEING OUTPUTS', total=len(SAMPLES)):
result = model([convert_any_to_numpy(sample)])
for key, value in result.items():
result = convert_any_to_torch_tensor(value).reshape([BATCHSIZE, 1000])
openvino_results.append(result)
# compute simulating error
error = []
for ref, real in zip(ref_results, openvino_results):
error.append(torch_snr_error(ref, real))
error = sum(error) / len(error) * 100
print(f'PPQ INT8 Simulating Error: {error: .3f} %')
# benchmark with openvino int8
print(f'Start Benchmark with openvino (Batchsize = {BATCHSIZE})')
benchmark_samples = [np.zeros(shape=[BATCHSIZE, 3, 224, 224], dtype=np.float32) for _ in range(512)]
model = openvino_executor.compile_model(
model = openvino_executor.read_model(model="model_fp32.onnx"), device_name="CPU")
tick = time.time()
for sample in tqdm(benchmark_samples, desc='FP32 benchmark...'):
result = model([convert_any_to_numpy(sample)])
tok = time.time()
print(f'Time span (FP32 MODE): {tok - tick : .4f} sec')
model = openvino_executor.compile_model(
model = openvino_executor.read_model(model="model_int8.onnx"), device_name="CPU")
tick = time.time()
for sample in tqdm(benchmark_samples, desc='INT8 benchmark...'):
result = model([convert_any_to_numpy(sample)])
tok = time.time()
print(f'Time span (INT8 MODE): {tok - tick : .4f} sec')
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import argparse
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import os
import random
import pickle
import esm
from torch.utils.data import TensorDataset, DataLoader
import emb_classifier
torch.cuda.empty_cache()
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-indir', type=str, required=False, default=None)
parser.add_argument('-outdir', type=str, required=False, default=None)
parser.add_argument('-data_dir', type=str, required=False, default='data/')
args = parser.parse_args()
return args
def suffle_n_batch(data, batch_size):
batched = []
random.shuffle(data)
for i in range(len(data)//batch_size+1):
batched.append(data[i*batch_size:i*batch_size+batch_size])
if len(batched[-1])==0:
return batched[:-1]
else:
return batched
def get_emb_esm1b(seq, LM_model, average=True):
B, L = seq.shape
L = L - 2 # remove start and end token
#LM_model.eval()
with torch.no_grad():
output = LM_model(seq, repr_layers=[33], return_contacts=True) # get the output from the language model
embedding = output['representations'][33][:,1:-1,:] # embedding size (1, L, 1280)
attention_map = output['attentions'][:,:,:,1:-1,1:-1] # attention map size (1, 33, 20, L, L)
attention_map = attention_map.reshape(B, 33*20, L, L).permute(0,2,3,1) # (1, L, L, 660)
# if you wanna average the embeddings along the sequence dimension -- i think this could be really cool too
if (average):
embedding = embedding.mean(1)
return embedding,attention_map
def load_data(args):
with open(os.path.join(args.data_dir,'train_tuple_data.pkl'), 'rb') as f:
train = pickle.load(f)
with open(os.path.join(args.data_dir,'valid_tuple_data.pkl'), 'rb') as f:
valid = pickle.load(f)
return train, valid
#get arguments
args = get_args()
indir = args.indir
outdir = args.outdir
print('Args got')
print(f'indir {indir}')
print(f'outdir {outdir}')
# Loading and processing the data:
train, valid = load_data(args)
print('Data loaded')
#Preprocess data into tensors
LM_model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
batch_converter = alphabet.get_batch_converter()
print('ESM1b loaded')
#Convert data into format that esm1b will like
y_train, _, x_train = batch_converter(train)
y_val, _, x_val = batch_converter(valid)
y_train = torch.tensor(y_train)
y_val = torch.tensor(y_val)
# Instantiate the network
classifier = emb_classifier.Net()
# Load model from previous state if indir arg is specified
if indir is not None:
if os.path.exists(indir):
classifier.load_state_dict(torch.load(indir))
print(f'loaded model from {indir}')
# Instantiate the cross-entropy loss
criterion = nn.CrossEntropyLoss()
# Instantiate the Adam optimizer
optimizer = optim.Adam(classifier.parameters(),lr=3e-4)
print('Classifier, optimizer, and criterion compiled')
# Moving tensors over to gpu if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f'Using device {device}')
#x_train = x_train.to(device)
#y_train = y_train.to(device)
#x_val = x_val.to(device)
#y_val = y_val.to(device)
classifier = classifier.to(device)
LM_model = LM_model.to(device)
LM_model.eval()
print(f'Moved tensors to {device}')
trainset = TensorDataset(x_train, y_train)
valset = TensorDataset(x_val, y_val)
train_loader = DataLoader(trainset, shuffle=True, batch_size=1)
valid_loader = DataLoader(valset, shuffle=True, batch_size=1)
print('Dataloaders built')
num_parameters = sum(p.numel() for p in classifier.parameters() if p.requires_grad)
print('Number of parameters classifier: ', num_parameters)
num_parameters = sum(p.numel() for p in LM_model.parameters() if p.requires_grad)
print('Number of parameters esm1b: ', num_parameters)
grad_accum = 256
# CNN model training
count = 0
loss_list = []
iteration_list = []
accuracy_list = []
num_epochs = 100
output_dict = {}
print('Now beginning training')
torch.cuda.empty_cache()
for epoch in range(num_epochs):
for i, data in enumerate(train_loader):
seq, labels = data
seq = seq.to(device)
labels = labels.to(device)
x, _ = get_emb_esm1b(seq, LM_model=LM_model, average=True)
# Clear gradients
optimizer.zero_grad()
# Forward propagation
outputs = classifier(x)
# Calculate relu and cross entropy loss
loss = criterion(outputs, labels)/grad_accum
print(f'outputs {outputs.tolist()} lables {labels.tolist()}')
# Calculating gradients
loss.backward()
if (i+1) % grad_accum == 0:
total_norm = torch.nn.utils.clip_grad_norm_(classifier.parameters(),1.0)
if not (total_norm == total_norm):
print('Gradient are NaN')
optimizer.zero_grad()
continue
optimizer.step()
print('Train - epoch: '+str(epoch)+' batch: '+str(int((i+1)/grad_accum))+' loss: '+str(float(loss.data)*grad_accum))
count += 1
correct = 0
total = 0
valid_loss = 0
for j, val_data in enumerate(valid_loader):
with torch.no_grad():
val_seq, val_labels = val_data
val_seq = val_seq.to(device)
val_labels = val_labels.to(device)
val_x, _ = get_emb_esm1b(val_seq, LM_model=LM_model, average=True)
outputs = classifier(val_x)
loss_valid = criterion(outputs, val_labels)
# Get predictions from the maximum value
predicted = torch.max(outputs.data, 1)[1]
# Total number of labels
total += len(val_labels)
correct += (predicted == val_labels).sum()
valid_loss += float(loss_valid.data)
# print('valid_loss: ', valid_loss)
accuracy = 100 * correct / float(total)
print('Valid - epcoh: '+str(epoch) +
' loss: '+str(float(valid_loss/(j+1)))+' accuracy: '+str(float(accuracy)))
path = os.path.join(outdir,'save_model/model_'+str(epoch)+'.pt')
torch.save(classifier.state_dict(), path)
print('Model '+str(epoch)+' was saved.')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 18 11:45:16 2018
@author: nemec
"""
# =============================================================================
# importing all the packages
import numpy as np
import glob
import re
# =============================================================================
#defining all the constants
norm = 90*90*4/np.pi**2*np.pi #we need to normalize the areas on the sphere
area = 0.1**2. # I don't know if I ever use this....
conv = np.pi/180.
# =============================================================================
#set position of observer and Bsat
#x_c is the central longitude of the observer
#y_c is the central latitude of the observer
# 90 is the north pole, -90 the south pole
x_c = 0
y_c = 0
#we might need to experiment with this two values more, but for now, we just take
#those two as fixed
B_sat = 484
B_spot = 1500
# =============================================================================
#set up the grid for the mu calculations
#I think its probably better if Sasha explains you at same point, how SATIRE works
mu_grid = [1.0000, 0.9000, 0.8000, 0.7000, 0.6000, 0.5000, 0.4000, 0.3000 ,0.2000, 0.1000, 0.0500]
mu_low = [0.95,0.85,0.75, 0.65, 0.55, 0.45, 0.35, 0.25, 0.15, 0.075, 0]
mu_up = [1,0.95,0.85,0.75,0.65,0.55,0.45,0.35,0.25,0.15,0.075]
# =============================================================================
#reading in the files
#the filenames are saved into "files"
files = sorted(glob.glob("CalcMagnetogram.2000.*"))
spotx = np.loadtxt("pos_x.txt")
spoty = np.loadtxt("pos_y.txt")
start = 100749
g = open("fac_old"+str(B_sat)+".txt","w")
for files in files:
name = re.findall("2000.(\d+)",files)
visibility = []
#I'm setting up all kinds of lists that are needed for the next steps
a = []
b = []
c = []
mu1 = []
mu2 = []
mu3 = []
mu4 = []
mu5 = []
mu6 = []
mu7 = []
mu8 = []
mu9 = []
mu10 = []
mu11 = []
factor = []
data = np.loadtxt(files) #this array now contains the information of the flux in each pixel
#of the magnetogram, so the array has 360X181 entries
time= int(name[0])
ff = np.zeros((180,360))
spot_x= spotx[np.where(int(name[0])==spotx[:,2]),0]
spot_y= spoty[np.where(int(name[0])==spoty[:,2]),0]
#if np.shape(spot_x) == np.shape(spot_y):
for i in range(0,180): #for latitudes
for j in range(0,360): #for longitudes
#setting up the boundaries of the pixels
x_min = j
x_max = j+1
y_min = i
y_max = i+1
#look for spot-minipixels in the 1x1 deg pixels
spot = (spot_x[np.where((x_min <= spot_x) & (spot_x < x_max) & (y_min<= spot_y) & (spot_y < y_max))])
B = abs(data[i][j])
#here we have two methods to calculate the faculae filling factors, maybe stick to that one for now
if np.shape(spot) !=(0,):
# ff[i,j] =(1 - len(spot)*0.1*0.1)
helper =B- B_spot*len(spot)*0.1*0.1
if helper < 0:
ff[i,j] = 0
else:
ff[i,j] = helper/B_sat
# a.append(1-len(spot)*0.1*0.1)
if np.shape(spot) == (0,) and B < B_sat:
ff[i,j] =abs(data[i][j])/B_sat
if np.shape(spot) == (0,) and B >= B_sat:
ff[i,j]=1
#here I am rotating the grid and calculate the mu-positions
x_rot = []
conv = np.pi/180.
x_rot=(j+13.28*(int(time)-start))%359
x_pos = 180-x_rot
y_pos = 90-i
delta_lambda = abs(x_pos-x_c)
distance = np.arccos((np.sin((y_c)*conv)*np.sin((y_pos)*conv)+np.cos((y_c)*conv) \
*np.cos((y_pos)*conv)*np.cos(delta_lambda*conv)))/conv
vis = np.cos(distance*conv)
#print(distance)
#if distance <=90:
# visibility.append(ff[i,j]*np.cos(distance*conv)*np.cos(y_pos*conv))
#if vis <=1 and vis >= 0 and np.shape(spot) !=(0,): #I don't remember what these two
#lines were for....
# a.append(1-len(spot)*0.1*0.1)
if vis <=mu_up[0] and vis >mu_low[0]:
mu1.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[1] and vis >mu_low[1]:
mu2.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[2] and vis >mu_low[2]:
mu3.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[3] and vis >mu_low[3]:
mu4.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[4] and vis >mu_low[4]:
mu5.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[5] and vis >mu_low[5]:
mu6.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[6] and vis >mu_low[6]:
mu7.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[7] and vis >mu_low[7]:
mu8.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[8] and vis >mu_low[8]:
mu9.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[9] and vis >mu_low[9]:
mu10.append(ff[i,j]*vis*np.cos(y_pos*conv))
if vis <=mu_up[10] and vis >mu_low[10]:
mu11.append(ff[i,j]*vis*np.cos(y_pos*conv))
#the following one is just for testing, if the mu-ring distribution gives the same
#result, as if we would just use the full "visible" disc
# if distance <=90:
# visibility.append(ff[i,j]*np.cos(distance*conv)*np.cos(y_pos*conv))
r1=sum(mu1)/norm
r2=sum(mu2)/norm
r3= sum(mu3)/norm
r4=sum(mu4)/norm
r5=sum(mu5)/norm
r6=sum(mu6)/norm
r7=sum(mu7)/norm
r8=sum(mu8)/norm
r9=sum(mu9)/norm
r10=sum(mu10)/norm
r11=sum(mu11)/norm
total = r1+r2+r3+r4+r5+r6+r7+r8+r9+r10+r11
#print(a)
g.write("%f \t %f \t %f \t %f \t %f \t %f \t %f \t %f \t %f \t %f \t %f \t %f \t %f \n" %(r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,time,total))
# f.write("%f \t \n" %(sum(a)))
#f.close()
g.close()
|
"""
Scraping Manga from komikid.com
Author: Irfan Chahyadi
Source: github.com/irfanchahyadi/Scraping-Manga
"""
# IMPORT REQUIRED PACKAGE
import os, requests, bs4
# GET LIST OF ALL CHAPTER
def get_all_chapter():
res = requests.get('http://www.komikid.com/manga/one-piece')
soup = bs4.BeautifulSoup(res.content, 'html.parser')
chapters = soup.select('h5.chapter-title-rtl > a')
chapters = [(c['href'], c.get_text()) for c in chapters]
for h, t in chapters:
download(h, t)
# DOWNLOAD EACH CHAPTER
def download(html, text):
res = requests.get(html)
soup = bs4.BeautifulSoup(res.content, 'html.parser')
pages = soup.select('#all > img')
# Create Chapter folder
folder_dir = os.path.join(directory, text)
if not os.path.isdir(folder_dir):
os.mkdir(folder_dir)
# Download each Page
for i, p in enumerate(pages):
href = p['data-src'].strip()
filename = os.path.join(folder_dir, str(i+1) + href[-4:])
if not os.path.isfile(filename):
print('downloading {} page {}'.format(text, i+1))
try:
img = requests.get(href)
with open(filename, 'wb') as f:
for chunk in img.iter_content(100000):
f.write(chunk)
except Exception as e:
print('{} at {} page {}'.format(e, text, i+1))
else:
print('already have {} page {}'.format(text, i+1))
if __name__ == '__main__':
directory = os.path.join(os.getcwd(), 'One Piece')
# Create Manga folder if not exists
if not os.path.isdir(directory):
os.mkdir(directory)
get_all_chapter()
|
n = int(input())
m = int(input())
mat = [[int(j) for j in input().split()] for i in range(n)]
def flood_fill(i, j):
if i >= n or j >= m or i < 0 or j < 0 or mat[i][j] == 0 or mat[i][j] == -1:
return 0
else:
mat[i][j] = -1
return 1 + flood_fill(i-1, j-1) + flood_fill(i-1, j) + flood_fill(i-1, j+1) + flood_fill(i, j-1) + flood_fill(i, j+1) + flood_fill(i+1, j-1) + flood_fill(i+1, j) + flood_fill(i+1, j+1)
print (max(flood_fill(i, j) for i in range(n) for j in range(m)))
|
from . import blocks
from . import losses
from . import normalization
from . import regularizers
|
"""
Launch point
"""
from components.button import ToggleButton
from components.camera import Camera
from components.microphone import Microphone
button = ToggleButton()
camera = Camera()
mic = Microphone()
def main():
try:
while True:
if button.is_on():
camera.start_recording()
mic.start_recording()
else:
camera.stop_recording()
mic.stop_recording()
finally:
camera.close_and_clean()
mic.close_and_clean()
if __name__ == "__main__":
main()
|
import time
from model.contact import Contact
import re
from selenium.webdriver.support.ui import Select
import allure
class ContactHelper:
def __init__(self, app):
self.app = app
def open_add_contact_page(self):
wd = self.app.wd
if not wd.current_url.endswith("/edit.php"):
wd.find_element_by_link_text("add new").click()
@allure.step('I add a contact: "{contact}" to the list')
def create(self, contact):
wd = self.app.wd
self.open_add_contact_page()
self.enter_value(contact)
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.return_to_home_page()
self.contact_cache = None
def return_to_home_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
def open_home_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/index.php") and len(
wd.find_elements_by_xpath("//input[@value='Delete']")) > 0):
wd.find_element_by_link_text("home").click()
@allure.step('I delete a first contact')
def delete_first(self):
self.delete_some(0)
@allure.step('I delete a contact with id: "{id}"')
def delete_some_contact_by_id(self, id):
wd = self.app.wd
self.open_home_page()
self.select_some_by_id(id)
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
time.sleep(1)
self.contact_cache = None
@allure.step('I delete a contact with index: "{index}"')
def delete_some_contact_by_index(self, index):
wd = self.app.wd
self.open_home_page()
self.select_some_by_index(index)
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
time.sleep(1)
self.contact_cache = None
@allure.step('I modify a contact: "{contact}"')
def modify_first(self, contact):
wd = self.app.wd
self.open_home_page()
self.select_first()
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
self.enter_value(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.return_to_home_page()
self.contact_cache = None
@allure.step('I modify a contact with index: "{index}"')
def modify_some_by_index(self, contact, index):
wd = self.app.wd
self.open_home_page()
self.select_some_by_index(index)
xpath = "//table[@id='maintable']/tbody/tr[" + str(index + 2) + "]/td[8]/a/img"
wd.find_element_by_xpath(xpath).click()
self.enter_value(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.return_to_home_page()
self.contact_cache = None
@allure.step('I modify a contact with id: "{id}"')
def modify_some_by_id(self, contact, id):
wd = self.app.wd
self.open_home_page()
row = wd.find_element_by_xpath("//input[@id='%s']/../.." % id)
row.find_elements_by_tag_name("td")[7].click()
self.enter_value(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.return_to_home_page()
self.contact_cache = None
def select_first(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
@allure.step('I select a contact with index: "{index}"')
def select_some_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
@allure.step('I select a contact with id: "{id}"')
def select_some_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[id='%s']" % id).click()
@allure.step('I get a contact with id: "{id}"')
def get_some_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[id='%s']" % id)
self.open_home_page()
row = wd.find_element_by_xpath("//input[@id='%s']/../.." % id)
cells = row.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_phones = cells[5].text
all_emails = cells[4].text
contact = Contact(lastname=lastname, firstname=firstname, id=id,
all_phone_from_home_page=all_phones,
all_emails_from_home_page=all_emails)
return contact
@allure.step('I enter a contact: "{contact}"')
def enter_value(self, contact):
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home_phone)
self.change_field_value("mobile", contact.mobile_phone)
self.change_field_value("work", contact.work_phone)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("phone2", contact.phone2)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
@allure.step('I calculate contact count')
def count(self):
wd = self.app.wd
self.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
@allure.step('I get a contact list')
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_phones = cells[5].text
all_emails = cells[4].text
self.contact_cache.append(Contact(lastname=lastname, firstname=firstname, id=id,
all_phone_from_home_page=all_phones, all_emails_from_home_page=all_emails))
# below is my_way
# count = len(wd.find_elements_by_name("selected[]"))
# for i in range(2, count+2):
# xpath = "//table[@id='maintable']/tbody/tr[" + str(i) + "]/td[2]"
# lastname = wd.find_element_by_xpath(xpath).text
# xpath = "//table[@id='maintable']/tbody/tr[" + str(i) + "]/td[3]"
# firstname = wd.find_element_by_xpath(xpath).text
# xpath = "//table[@id='maintable']/tbody/tr[" + str(i) + "]/td[1]/input"
# id = wd.find_element_by_xpath(xpath).get_attribute("value")
# self.contact_cache.append(Contact(lastname=lastname, firstname=firstname, id=id))
return list(self.contact_cache)
@allure.step('I open a contact to edit by index: "{index}"')
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
@allure.step('I open a contact to view by index: "{index}"')
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
@allure.step('I get a contact info from edit page by index: "{index}"')
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
first_name = wd.find_element_by_name("firstname").get_attribute("value")
last_name = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home_phone = wd.find_element_by_name("home").get_attribute("value")
work_phone = wd.find_element_by_name("work").get_attribute("value")
mobile_phone = wd.find_element_by_name("mobile").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(firstname=first_name, lastname=last_name, id=id, home_phone=home_phone, work_phone=work_phone,
mobile_phone=mobile_phone, email=email, email2=email2, email3=email3, phone2=phone2)
@allure.step('I get a contact info from view page by index: "{index}"')
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
if re.search("H: (.*)", text) is None:
home_phone = None
else:
home_phone = re.search("H: (.*)", text).group(1)
if re.search("W: (.*)", text) is None:
work_phone = None
else:
work_phone = re.search("W: (.*)", text).group(1)
if re.search("M: (.*)", text) is None:
mobile_phone = None
else:
mobile_phone = re.search("M: (.*)", text).group(1)
if re.search("P: (.*)", text) is None:
phone2 = None
else:
phone2 = re.search("P: (.*)", text).group(1)
return Contact(home_phone=home_phone, work_phone=work_phone, mobile_phone=mobile_phone, phone2=phone2)
@allure.step('I add a contact: "{contact}" to a group: "{group}"')
def add_in_group(self, contact, group):
wd = self.app.wd
self.app.open_home_page()
self.select_some_by_id(contact.id)
Select(wd.find_element_by_name("to_group")).select_by_visible_text(group.name)
wd.find_element_by_name("add").click()
self.open_home_page()
@allure.step('I del a contact: "{contact}" from a group: "{group}"')
def del_from_group(self, contact, group):
wd = self.app.wd
self.app.open_home_page()
Select(wd.find_element_by_name("group")).select_by_visible_text(group.name)
self.select_some_by_id(contact.id)
wd.find_element_by_name("remove").click()
self.open_home_page()
|
from .http import MPXApi
from .api_base import get_guid_based_id
__version__ = "0.1.16"
__title__ = "mpxapi"
|
# ====================================================================
# CODE AUTHOR: RAUL ESPINOSA
# This is the code for the actual cart functionality. A small note:
# I can't actually make the cart "work" until the models of the things
# that are supposed to go in the cart (i.e. the books) exist and can
# be imported/referenced. That being the case, I'm going to use some
# placeholder code to model the functionality needed. These can be
# refactored to the genuine book model code when it's available
# Note that I used the following resources to learn
# about django session functions and such:
# https://docs.djangoproject.com/en/2.0/topics/http/sessions/
# https://developer.mozilla.org/en-US/docs/Learn/Server-side/Django/Sessions
# ====================================================================
# This data type simplifies floating-point arithmetic and makes it
# look more like what calculators make it look like in real life.
# Also allows for rounding rules to be established,
# which will help with price calculations
from decimal import Decimal
from django.conf import settings
# Importing my Book class from the bookDetails package I made
from bookDetails.models import Book, Purchase
# This is the cart class.
class Cart(object):
# Constructor method for the class - includes a request parameter
def __init__(self, request):
# Start by creating a session for the new cart
self.session = request.session
# This structure is better than the try/except I had before.
# Just try to get the cart from the current session
userCart = self.session.get(settings.CART_SESSION_ID)
if not userCart:
# If we can't, just make a blank cart
userCart = self.session[settings.CART_SESSION_ID] = {}
# Assign the newly created and ID'd userCart to this cart model
self.userCart = userCart
# Our cart's status has to be modifiable, i.e. we need to be able
# to change the state of the cart dynamically. For this, we need
# a save function
def save(self):
self.session.modified = True
# The function that will be used to add items to the cart.
def add(self, book, amount=1, change_amount=False):
# Get the book's ID (its Primary Key)
book_id = str(book.id)
#purchse = Purchase.objects.create(book=book.id, user=request.user, has_purchased=True)
# If the book isn't in the cart, add it (and all the requisite parameters
# the cart has to show), as well as a parameter indicating whether
# an item is saved for later (SFL) or not, which defaults to false
if book_id not in self.userCart:
self.userCart[book_id] = {'amount': 0,
'author': book.book_author,
'author_bio': book.author_bio,
'description': book.book_description,
'genre': book.book_genre,
'publishing_info': book.publishing_info,
'avg_rating': str(book.avg_rating),
'price': str(book.price),
'SFL': False}
# If change_amount is True, we change the number of the specified
# book in the cart to the specified amount
if change_amount:
self.userCart[book_id]['amount'] = amount
# Otherwise, we just add the specified amount of the specified book
# to the cart
else:
self.userCart[book_id]['amount'] += amount
# Save the state of the cart, cementing our changes
self.save()
# Function for removing books from the cart
def remove(self, book):
# Same idea as in the add() function - now uses
# book's ID!
book_id = str(book.id)
# If the book is in the cart, remove it,
# then save the state of the cart
if book_id in self.userCart:
del self.userCart[book_id]
self.save()
# Function for setting a book's status as Saved For Later (SFL)
def addSFL(self, book):
book_id = str(book.id)
# If the book is in the cart, set its SFL status
# to True, indicating the book is in the saved for later
# list
if book_id in self.userCart:
self.userCart[book_id]['SFL'] = True
self.save()
# Function for moving a book back to the cart from the
# SFL list
def removeSFL(self, book):
book_id = str(book.id)
# If the book is in the cart, set its
# SFL status to False so that it will show up
# in the cart again
if book_id in self.userCart:
self.userCart[book_id]['SFL'] = False
self.save()
# This iterator will be used to iterate (of course) through the
# books in the cart, i.e. the Book models (instances)
def __iter__(self):
# Get the keys corresponding to all the books
# in the database - now uses Book model's hidden ID
# parameter!
book_ids = self.userCart.keys()
# Use the keys to get the actual Book objects
# and add them to the cart
books = Book.objects.filter(id__in=book_ids)
# Create a copy of the current cart
cart = self.userCart.copy()
# Add the books to the copy of the cart
for book in books:
cart[str(book.id)]['book'] = book
# Iterate over the books in the copied cart,
# adding a price and total price attribute to each
# item, then returning the book instance
for book in cart.values():
# We made the price attribute of the book a string
# in the add method so we could serialize it (needed
# for use w/ Sessions); now we have to convert back to
# a Decimal value so we can perform arithmetic on it
book['price'] = Decimal(book['price'])
# Calculate the subtotal price for the copies of each book
book['total_price'] = book['price'] * book['amount']
# NOTE 2/23/19:
# Maybe we need to add the book's other attributes here, such
# as author name, etc...? Not sure if that's needed, since
# we pass the book instance later
yield book
# Returns the total number of items in a user's cart
def __len__(self):
return sum(book['amount'] for book in self.userCart.values())
# Calculates the total cost of all the books in the cart that aren't
# listed as being saved for later
def get_total_price(self):
return sum((book['price'] * book['amount']) for book in self.userCart.values() if not book['SFL'])
# Delete the cart from the session - this is functionally the same
# as "emptying" it, since a new empty cart will be created the next time the
# user adds a book to their cart
def clear(self):
del self.session[settings.CART_SESSION_ID]
self.save()
|
# -*- coding: utf-8 -*-
import urwid
__author__ = 'Sumin Byeon'
__version__ = '0.1.3'
__all__ = ['StackedWidget']
class StackedWidget(urwid.Widget):
"""A widget container that presents one child widget at a time."""
#: A list containing all widgets
widgets = []
#: An index of the current widget
current = 0
def __init__(self):
self.widgets = []
self.current = 0
def push_widget(self, widget):
"""Appends a widget at the end of the list."""
self.widgets.append(widget)
def insert_widget(self, index, widget):
"""Inserts a widget at a given index."""
self.widgets.insert(index, widget)
def pop_widget(self):
"""Retrieves and removes the last widget (with the maximum index)."""
n = len(self.widgets)
assert n > 0
widget = self.widgets.pop()
if self.current == n - 1:
self.current -= 1
self._invalidate()
return widget
def show_widget(self, index):
assert 0 <= index < len(self.widgets)
self.current = index
self._invalidate()
def show_next_widget(self):
n = self.widget_count
self.show_widget((self.current + 1) % n)
def show_previous_widget(self):
n = self.widget_count
self.show_widget((self.current - 1 + n) % n)
@property
def widget_count(self):
"""The function name is pretty much self-explanatory."""
return len(self.widgets)
@property
def current_widget(self):
"""Returns a widget that is currently being rendered. If the widget
list is empty, it returns None."""
if self.widget_count > 0:
return self.widgets[self.current]
else:
return None
def selectable(self):
"""It appears ``selectable()`` must return ``True`` in order to get any
key input."""
return True
def render(self, size, focus=False):
assert self.current_widget is not None
return self.current_widget.render(size, focus)
def keypress(self, size, key):
"""Passes key inputs to the current widget. If the current widget is
``None`` then it returns the given key input so that
``unhandled_input`` function can handle it."""
if self.current_widget is not None:
return self.current_widget.keypress(size, key)
else:
return key
def mouse_event(self, size, event, button, col, row, focus):
if self.current_widget is not None:
return self.current_widget.mouse_event(
size, event, button, col, row, focus)
else:
return False
|
#!/usr/bin/env python3
# We are given a list of numbers in a 'short-hand' range notation where only
# the significant part of the next number is written because we know the
# numbers are always increasing
# ex. '1,3,7,2,4,1' represents [1, 3, 7, 12, 14, 21]).
#
# Some people use different separators for their ranges
# ex. '1-3,1-2', '1:3,1:2', '1..3,1..2'
# represent the same numbers [1, 2, 3, 11, 12])
# and they sometimes specify a third digit for the range step
# ex. '1:5:2' represents [1, 3, 5]
#
# NOTE: For this challenge range limits are always inclusive.
# Our job is to return a list of the complete numbers.
# The possible separators are: ['-', ':', '..']
#
# You'll be given strings in the 'short-hand' range notation
#
# '1,3,7,2,4,1'
# '1-3,1-2'
# '1:5:2'
# '104-2'
# '104..02'
# '545,64:11'
#
# You should output a string of all the numbers separated by a space
#
# '1 3 7 12 14 21'
# '1 2 3 11 12'
# '1 3 5'
# '104 105 106 107 108 109 110 111 112'
# '104 105 106...200 201 202' # truncated for simplicity
# '545 564 565 566...609 610 611' # truncated for simplicity
separators = ['-', ':', '..']
inputs = [
'1,3,7,2,4,1',
'1-3,1-2',
'1:5:2',
'104-2',
'104..02',
'545,64:11',
]
for line in inputs:
line_output = []
line_ranges = line.split(',')
for i, r in enumerate(line_ranges):
range_output = []
try:
range_output = [int(r)]
except ValueError:
range_numbers = []
for s in separators:
if r.count(s):
range_numbers = r.split(s)
break
for n in enumerate(range_numbers):
if len(range_numbers) == 3:
if i > 0:
for s in separators:
if r.count(s):
sep_found = True
if not sep_found:
numbers = int(r)
if not numbers:
if r.count(s):
numbers = range(*[int(n) for n in r.split(s)])
output.append(numbers)
print(output)
|
from HABApp.core.events import ValueChangeEventFilter, ValueUpdateEventFilter
from . import ItemStateChangedEvent, ItemStateEvent
class ItemStateEventFilter(ValueUpdateEventFilter):
_EVENT_TYPE = ItemStateEvent
class ItemStateChangedEventFilter(ValueChangeEventFilter):
_EVENT_TYPE = ItemStateChangedEvent
|
#!/bin/python
import math
import os
import random
import re
import sys
if __name__ == '__main__':
cases = int(input())
for caseNo in range(cases):
s = raw_input()
#reverse string
rs = s[::-1]
n = len(s)
#take the difference of ascii values
for i in range(1, n):
d1 = abs(ord(s[i]) - ord(s[i - 1]))
d2 = abs(ord(rs[i]) - ord(rs[i - 1]))
#if the difference at any point isn't equal string is not funny
if d1 != d2:
print("Not Funny")
break
else:
print("Funny")
|
"""
Campaign tools
"""
import yaml
import arcade
from .ship import Ship
from .utils import Position
from .player import Player
from .render import RenderEngine
class Map(object):
"""
A single map within a campaign
"""
def __init__(self):
pass # TODO
class Campaign(object):
"""
A campaign holds all of the story information required
to play through our tails of adventure!
"""
def __init__(self):
pass # TODO
def basic_start(self, player: Player, window):
"""
When using the dev mode, we often just want to jump into a
map or environment to work on mechanics and other game elements
:param player: Our current Player
:return: None
"""
player.set_ship(
Ship.new_ship("Skalk", position=Position(250, 230))
)
player.ship.add_to_scene()
# Bit of a performance drag... need to know why
from ..draw.starfield import Starfield
self._background = Starfield(window)
RenderEngine().add_object(self._background)
class CampaignLoader(object):
"""
Yaml parser for reading and understanding a campaign
"""
def __init__(self):
self._found_campaigns = {}
def dev_campaign(self):
"""
:return: A default Campaign for testing
"""
return Campaign()
|
_base_ = [
'../_base_/models/universenet50.py',
'../_base_/datasets/waymo_open_2d_detection_f0_mstrain_640_1280.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=3))
data = dict(samples_per_gpu=2)
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001)
fp16 = dict(loss_scale=512.)
load_from = 'https://github.com/shinya7y/UniverseNet/releases/download/20.06/universenet50_fp16_8x2_mstrain_480_960_2x_coco_20200523_epoch_24-726c5c93.pth' # noqa
|
"""Diagnostics support for Open-Meteo."""
from __future__ import annotations
import json
from typing import Any
from open_meteo import Forecast
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN
TO_REDACT = {
CONF_LATITUDE,
CONF_LONGITUDE,
}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: DataUpdateCoordinator[Forecast] = hass.data[DOMAIN][entry.entry_id]
# Round-trip via JSON to trigger serialization
data: dict[str, Any] = json.loads(coordinator.data.json())
return async_redact_data(data, TO_REDACT)
|
from django.contrib import admin
from .models import Atracao
admin.site.register(Atracao)
|
from .. import app
from .. import queue
from .. import hooks
from ..models import Post
from bs4 import BeautifulSoup
import urllib
import re
import requests
def register():
hooks.register('post-saved', send_webmentions)
def send_webmentions(post, args):
if args.get('action') in ('save_draft', 'publish_quietly'):
app.logger.debug('skipping webmentions for {}'.format(post.id))
return
try:
app.logger.debug("queueing webmentions for {}".format(post.id))
queue.enqueue(do_send_webmentions, post.id)
return True, 'Success'
except Exception as e:
app.logger.exception('sending webmentions')
return False, "Exception while sending webmention: {}"\
.format(e)
def do_send_webmentions(post_id):
app.logger.debug("sending mentions for {}".format(post_id))
post = Post.load_by_id(post_id)
return handle_new_or_edit(post)
def get_source_url(post):
return post.permalink
def get_target_urls(post):
target_urls = []
# send mentions to 'in_reply_to' as well as all linked urls
target_urls += post.in_reply_to
target_urls += post.repost_of
target_urls += post.like_of
app.logger.debug("search post content {}".format(post.content_html))
soup = BeautifulSoup(post.content_html)
for link in soup.find_all('a'):
link_target = link.get('href')
if link_target:
app.logger.debug("found link {} with href {}"
.format(link, link_target))
target_urls.append(link_target.strip())
return target_urls
def get_response(url):
if url in get_response.cached_responses:
return get_response.cached_responses[url]
response = requests.get(url)
get_response.cached_responses[url] = response
return response
get_response.cached_responses = {}
def handle_new_or_edit(post):
target_urls = get_target_urls(post)
app.logger.debug("Sending webmentions to these urls {}"
.format(" ; ".join(target_urls)))
results = []
for target_url in target_urls:
results.append(send_mention(post, target_url))
return results
def send_mention(post, target_url):
app.logger.debug("Looking for webmention endpoint on %s",
target_url)
success, explanation = check_content_type_and_size(target_url)
if success:
if supports_webmention(target_url):
app.logger.debug("Site supports webmention")
success, explanation = send_webmention(post, target_url)
elif supports_pingback(target_url):
app.logger.debug("Site supports pingback")
success, explanation = send_pingback(post, target_url)
app.logger.debug("Sending pingback successful: %s", success)
else:
app.logger.debug("Site does not support mentions")
success = False
explanation = 'Site does not support webmentions or pingbacks'
return {'target': target_url,
'success': success,
'explanation': explanation}
def check_content_type_and_size(target_url):
request = urllib.request.Request(
target_url, headers={'User-Agent': 'kylewm.com'})
metadata = urllib.request.urlopen(request).info()
if not metadata:
return False, "Could not retrieve metadata for url {}".format(
target_url)
content_type = metadata.get_content_maintype()
content_length = metadata.get('Content-Length')
if content_type and content_type != 'text':
return False, "Target content type '{}' is not 'text'".format(
content_type)
if content_length and int(content_length) > 2097152:
return False, "Target content length {} is too large".format(
content_length)
return True, None
def supports_webmention(target_url):
return find_webmention_endpoint(target_url) is not None
def find_webmention_endpoint(target_url):
app.logger.debug("looking for webmention endpoint in %s", target_url)
response = get_response(target_url)
app.logger.debug("looking for webmention endpoint in headers and body")
endpoint = (find_webmention_endpoint_in_headers(response.headers)
or find_webmention_endpoint_in_html(response.text))
app.logger.debug("webmention endpoint %s %s", response.url, endpoint)
return endpoint and urllib.parse.urljoin(response.url, endpoint)
def find_webmention_endpoint_in_headers(headers):
if 'link' in headers:
m = re.search('<(https?://[^>]+)>; rel="webmention"',
headers.get('link')) or \
re.search('<(https?://[^>]+)>; rel="http://webmention.org/?"',
headers.get('link'))
if m:
return m.group(1)
def find_webmention_endpoint_in_html(body):
soup = BeautifulSoup(body)
link = (soup.find('link', attrs={'rel': 'webmention'})
or soup.find('link', attrs={'rel': 'http://webmention.org/'})
or soup.find('a', attrs={'rel': 'webmention'}))
return link and link.get('href')
def send_webmention(post, target_url):
app.logger.debug(
"Sending webmention from %s to %s",
get_source_url(post), target_url)
try:
endpoint = find_webmention_endpoint(target_url)
if not endpoint:
return False, "No webmention endpoint for {}".format(
target_url)
payload = {'source': get_source_url(post),
'target': target_url}
headers = {'content-type': 'application/x-www-form-urlencoded',
'accept': 'application/json'}
response = requests.post(endpoint, data=payload, headers=headers)
#from https://github.com/vrypan/webmention-tools/blob/master/
# webmentiontools/send.py
if response.status_code // 100 != 2:
app.logger.warn(
"Failed to send webmention for %s. "
"Response status code: %s, %s",
target_url, response.status_code, response.text)
return False, "Status code: {}, Response: {}".format(
response.status_code, response.text)
else:
app.logger.debug(
"Sent webmention successfully to %s. Sender response: %s:",
target_url, response.text)
return True, "Successful"
except Exception as e:
return False, "Exception while sending webmention {}".format(e)
def supports_pingback(target_url):
return find_pingback_endpoint(target_url) is not None
def find_pingback_endpoint(target_url):
response = get_response(target_url)
endpoint = response.headers.get('x-pingback')
if not endpoint:
soup = BeautifulSoup(response.text)
link = soup.find('link', attrs={'rel': 'pingback'})
endpoint = link and link.get('href')
return endpoint
def send_pingback(post, target_url):
try:
endpoint = find_pingback_endpoint(target_url)
source_url = get_source_url(post)
payload = (
"""<?xml version="1.0" encoding="iso-8859-1"?><methodCall>"""
"""<methodName>pingback.ping</methodName><params><param>"""
"""<value><string>{}</string></value></param><param><value>"""
"""<string>{}</string></value></param></params></methodCall>"""
.format(source_url, target_url))
headers = {'content-type': 'application/xml'}
response = requests.post(endpoint, data=payload, headers=headers)
app.logger.debug(
"Pingback to %s response status code %s. Message %s",
target_url, response.status_code, response.text)
return True, "Sent pingback successfully"
except Exception as e:
return False, "Exception while sending pingback: {}".format(e)
|
from nose.tools import eq_
from elasticutils import S, DefaultMappingType, NoModelError, MappingType
from elasticutils.tests import ElasticTestCase
model_cache = []
def reset_model_cache():
del model_cache[0:]
class Meta(object):
def __init__(self, db_table):
self.db_table = db_table
class Manager(object):
def filter(self, id__in=None):
return [m for m in model_cache if m.id in id__in]
class FakeModel(object):
_meta = Meta('fake')
objects = Manager()
def __init__(self, **kw):
for key in kw:
setattr(self, key, kw[key])
model_cache.append(self)
class FakeMappingType(MappingType):
def get_model(self):
return FakeModel
class TestResultsWithData(ElasticTestCase):
@classmethod
def setup_class(cls):
super(TestResultsWithData, cls).setup_class()
if cls.skip_tests:
return
cls.create_index()
cls.index_data([
{'id': 1, 'foo': 'bar', 'tag': 'awesome', 'width': '2'},
{'id': 2, 'foo': 'bart', 'tag': 'boring', 'width': '7'},
{'id': 3, 'foo': 'car', 'tag': 'awesome', 'width': '5'},
{'id': 4, 'foo': 'duck', 'tag': 'boat', 'width': '11'},
{'id': 5, 'foo': 'train car', 'tag': 'awesome', 'width': '7'}
])
@classmethod
def teardown_class(cls):
super(TestResultsWithData, cls).teardown_class()
reset_model_cache()
def test_default_results_are_default_mapping_type(self):
"""With untyped S, return dicts."""
# Note: get_s with no args should return an untyped S
searcher = list(self.get_s().query(foo='bar'))
assert isinstance(searcher[0], DefaultMappingType)
def test_typed_s_returns_type(self):
"""With typed S, return objects of type."""
searcher = list(self.get_s(FakeMappingType).query(foo='bar'))
assert isinstance(searcher[0], FakeMappingType)
def test_values_dict_results(self):
"""With values_dict, return list of dicts."""
searcher = list(self.get_s().query(foo='bar').values_dict())
assert isinstance(searcher[0], dict)
def test_values_list_no_fields(self):
"""Specifying no fields with values_list defaults to ['id']."""
searcher = list(self.get_s().query(foo='bar').values_list())
assert isinstance(searcher[0], tuple)
# We sort the result and expected result here so that the
# order is stable and comparable.
eq_(sorted(searcher[0]), sorted((u'2', u'bar', u'awesome', 1)))
def test_values_list_results(self):
"""With values_list fields, returns list of tuples."""
searcher = list(self.get_s().query(foo='bar')
.values_list('foo', 'width'))
assert isinstance(searcher[0], tuple)
def test_default_results_form_has_metadata(self):
"""Test default results form has metadata."""
searcher = list(self.get_s().query(foo='bar'))
assert hasattr(searcher[0], '_id')
assert hasattr(searcher[0], '_score')
assert hasattr(searcher[0], '_source')
assert hasattr(searcher[0], '_type')
assert hasattr(searcher[0], '_explanation')
assert hasattr(searcher[0], '_highlight')
def test_values_list_form_has_metadata(self):
"""Test default results form has metadata."""
searcher = list(self.get_s().query(foo='bar').values_list('id'))
assert hasattr(searcher[0], '_id')
assert hasattr(searcher[0], '_score')
assert hasattr(searcher[0], '_source')
assert hasattr(searcher[0], '_type')
assert hasattr(searcher[0], '_explanation')
assert hasattr(searcher[0], '_highlight')
def test_values_dict_form_has_metadata(self):
"""Test default results form has metadata."""
searcher = list(self.get_s().query(foo='bar').values_dict())
assert hasattr(searcher[0], '_id')
assert hasattr(searcher[0], '_score')
assert hasattr(searcher[0], '_source')
assert hasattr(searcher[0], '_type')
assert hasattr(searcher[0], '_explanation')
assert hasattr(searcher[0], '_highlight')
def test_values_dict_no_args(self):
"""Calling values_dict() with no args fetches all fields."""
eq_(S().query(fld1=2)
.values_dict()
._build_query(),
{"query": {"term": {"fld1": 2}}})
def test_values_list_no_args(self):
"""Calling values() with no args fetches only id."""
eq_(S().query(fld1=2)
.values_list()
._build_query(),
{'query': {"term": {"fld1": 2}}})
class TestMappingType(ElasticTestCase):
def tearDown(self):
super(TestMappingType, self).tearDown()
self.__class__.cleanup_index()
def test_default_mapping_type(self):
data = [
{'id': 1, 'name': 'Alice'}
]
self.__class__.index_data(data, create_index=True)
s = self.get_s(DefaultMappingType)
result = list(s)[0]
assert isinstance(result, DefaultMappingType)
eq_(result.id, 1)
self.assertRaises(NoModelError, lambda: result.object)
def test_mapping_type_attribute_override(self):
data = [
{'id': 1, '_object': 'foo'}
]
self.__class__.index_data(data, create_index=True)
s = self.get_s(DefaultMappingType)
result = list(s)[0]
# Instance attribute (which starts out as None) takes precedence.
eq_(result._object, None)
# _object ES result field is available through __getitem__
eq_(result['_object'], 'foo') # key/val from ES
# Get the ES result field id
eq_(result.id, 1)
# Set id to something else
result.id = 'foo'
# Now it returns the instance attribute
eq_(result.id, 'foo')
# id still available through __getitem__
eq_(result['id'], 1)
# If it doesn't exist, throw AttributeError
self.assertRaises(AttributeError, lambda: result.doesnt_exist)
# If it doesn't exist, throw KeyError
self.assertRaises(KeyError, lambda: result['doesnt_exist'])
|
import fractions
num = 1
den = 1
count = 0
sum = 0
while den <= 12000:
count = 0
min_num = int((den + 0.0) / 3 + 1)
max_num = den/2
if den % 2 == 0:
max_num -= 1
while min_num <= max_num:
if fractions.gcd(min_num, den) == 1:
count += 1
min_num += 1
sum += count
den += 1
print(sum)
|
import concurrent.futures as cf
import json
from datetime import datetime
import math
import moodle.models as models
from frontend.models import Submission, GradingFile, Assignment, Course
from moodle.exceptions import AccessDenied, InvalidResponse
from persistence.worktree import WorkTree
from util import interaction
MAX_WORKERS = 10
class MoodleFrontend:
def __init__(self, worktree=None):
# todo, read course from worktree config.
from moodle.communication import MoodleSession
self.worktree = worktree or WorkTree()
self.config = WorkTree.get_global_config_values()
self.session = MoodleSession(moodle_url=self.config.url, token=self.config.token)
@property
def course_ids(self):
return self.worktree.courses.keys()
@property
def assignment_ids(self):
return self.worktree.assignments.keys()
def sync_assignments(self):
response = self.session.mod_assign_get_assignments(self.course_ids)
wrapped = models.CourseAssignmentResponse(response)
result = self.worktree.assignments.update(wrapped.raw)
output = ['{}: {:d}'.format(k, v) for k, v in result.items()]
return output
def sync_users(self):
# limit collected information to only relevant bits. is faster and can possibly work around some moodle bugs.
sync_fields = ['fullname', 'groups', 'id']
options = {'userfields': ','.join(sync_fields)}
users = {}
output = ""
for cid in self.course_ids:
try:
response = self.session.core_enrol_get_enrolled_users(course_id=cid, options=options)
users[int(cid)] = response
output += f'{cid:5d}:got {len(response):4d}\n'
except AccessDenied as denied:
message = f'{cid:d} denied access to users: {denied}\n'
output += message
except InvalidResponse as e:
message = f'Moodle encountered an error: msg:{e.message} \n debug:{e.debug_message}\n'
output += message
self.worktree.users = users
return output
def sync_submissions(self):
now = math.floor(datetime.now().timestamp())
response = self.session.mod_assign_get_submissions(self.assignment_ids,
since=self.worktree.submissions.last_sync)
result = self.worktree.submissions.update(response, now)
output = ['{}: {:d}'.format(k, v) for k, v in result.items()]
return output
def sync_grades(self):
now = math.floor(datetime.now().timestamp())
response = self.session.mod_assign_get_grades(self.assignment_ids, since=self.worktree.grades.last_sync)
result = self.worktree.grades.update(response, now)
output = ['{}: {:d}'.format(k, v) for k, v in result.items()]
return output
def get_course_list(self):
wrapped = models.CourseListResponse(self.session.core_enrol_get_users_courses(self.config.user_id))
return wrapped
def sync_file_meta_data(self):
files = []
for as_id, submissions in self.worktree.submissions.items():
for submission in submissions:
files += Submission(submission).files
with cf.ThreadPoolExecutor(max_workers=MAX_WORKERS) as tpe:
try:
future_to_file = {tpe.submit(self.session.core_files_get_files, **file.meta_data_params): file for file in files}
for future in cf.as_completed(future_to_file):
file = future_to_file[future]
response = models.FileMetaDataResponse(future.result())
for file in response.files:
print(file)
except KeyboardInterrupt:
print('stopping…')
tpe.shutdown()
raise
# for file in files:
# wrapped = models.FileMetaDataResponse(self.session.core_files_get_files(**file.meta_data_params))
# print(str(wrapped.raw))
# for received in wrapped.files:
# print(received.author)
# reply = moodle.get_submissions_for_assignments(wt.assignments.keys())
# data = json.loads(strip_mlang(reply.text))
# result = wt.submissions.update(data)
# output = ['{}: {:d}'.format(k, v) for k, v in result.items()]
# print('finished. ' + ' '.join(output))
def download_files(self, assignment_ids=None):
courses = self.worktree.data
assignments = []
if assignment_ids is None or 0 == len(assignment_ids):
for c in courses:
assignments += c.assignments.values()
else:
for c in courses:
assignments += c.get_assignments(assignment_ids)
files = self.worktree.prepare_download(assignments)
file_count = len(files)
counter = 0
# todo, error handling
if file_count > 0:
interaction.print_progress(counter, file_count)
with cf.ThreadPoolExecutor(max_workers=MAX_WORKERS) as tpe:
try:
future_to_file = {tpe.submit(self.session.download_file, file.url): file for file in files}
for future in cf.as_completed(future_to_file):
file = future_to_file[future]
response = future.result()
counter += 1
interaction.print_progress(counter, file_count, suffix=file.path)
self.worktree.write_submission_file(file, response.content)
except KeyboardInterrupt:
print('stopping…')
tpe.shutdown()
raise
for a in assignments:
self.worktree.write_grading_and_html_file(a)
def upload_grades(self, upload_data):
def argument_list(upload_data):
for grades in upload_data:
as_id = grades.assignment_id
team = grades.team_submission
args = []
for values in grades.grades:
args.append({
'assignment_id': as_id,
'user_id': values.id,
'grade': values.grade,
'feedback_text': values.feedback,
'team_submission': team
})
return args
args_list = argument_list(upload_data)
grade_count = len(args_list)
counter = 0
if grade_count > 0:
interaction.print_progress(counter, grade_count)
with cf.ThreadPoolExecutor(max_workers=MAX_WORKERS) as tpe:
try:
future_to_grade = {tpe.submit(self.session.mod_assign_save_grade, **args): args for args in args_list}
for future in cf.as_completed(future_to_grade):
args = future_to_grade[future]
response = future.result()
counter += 1
interaction.print_progress(counter, grade_count)
except KeyboardInterrupt:
print('stopping…')
tpe.shutdown()
raise
def upload_files(self, files):
# TODO, Wrap and return it, don't print. do print in wstools.upload. also modify submit
response = self.session.upload_files(files)
return models.FileUploadResponse(response)
def search_courses_by_keywords(self, keyword_list):
# TODO: wrap and return to wstools.enrol
response = self.session.core_course_search_courses(' '.join(keyword_list))
return response
def get_course_enrolment_methods(self, course_id):
# TODO: wrap and return to wstools.enrol
response = self.session.core_enrol_get_course_enrolment_methods(course_id)
return response
def enrol_in_course(self, course_id, instance_id, password=''):
# TODO: wrap and return to wstools.enrol
response = self.session.enrol_self_enrol_user(course_id, instance_id=instance_id, password=password)
return response
def save_submission(self, assignment_id, text='', text_format=0, text_file_id=0, files_id=0):
# TODO: wrap and return to wstools.submit
response = self.session.mod_assign_save_submission(assignment_id, text, text_format, text_file_id, files_id)
return response
@classmethod
def get_token(cls, url, user, password, service):
# TODO: wrap and return to wstools.auth
from moodle.communication import MoodleSession
session = MoodleSession(moodle_url=url)
token = session.get_token(user_name=user, password=password, service=service)
return token
def get_user_id(self):
# TODO: wrap and return to wstools.auth
from moodle.fieldnames import JsonFieldNames as Jn
data = self.session.core_webservice_get_site_info()
return data[Jn.user_id]
def parse_grade_files(self, fd_list):
"""
this mostly rewrites the values read from the grading file.
since most of this can be done, when creating the grading file
it should be done there.
Namely, adding a team_submission field and instead of
setting the submission.id in the file, use the user.id instead
:param fd_list:
:return:
"""
upload_data = []
print('this will upload the following grades:')
grade_format = ' {:>20}:{:6d} {:5.1f} > {}'
invalid_values = []
for file in fd_list:
# cls needs to be set, for the strict flag to be registered.
wrapped = GradingFile(json.load(file, cls=json.JSONDecoder, strict=False))
assignment = Assignment(self.worktree.assignments[wrapped.assignment_id])
assignment.course = Course(self.worktree.courses[assignment.course_id])
assignment.course.users = self.worktree.users[str(assignment.course_id)]
assignment.submissions = self.worktree.submissions[assignment.id]
wrapped.team_submission = assignment.is_team_submission
print(f' assignment {assignment.id:5d}, team_submission: {assignment.is_team_submission}')
for grade in wrapped.grades:
submission = assignment.submissions[grade.id]
if assignment.is_team_submission:
group = assignment.course.groups[submission.group_id]
user = group.members[0]
grade.id = user.id
else:
grade.id = submission.user_id
if assignment.max_points < grade.grade:
invalid_values.append(grade)
print(grade_format.format(grade.name, grade.id, grade.grade, grade.feedback[:40]))
upload_data.append(wrapped)
if len(invalid_values) > 0:
for grade in invalid_values:
print(
"WARNING: the grade value is larger than the max achievable grade")
print(grade_format.format(grade.name, grade.id, grade.grade, grade.feedback[:40]))
raise SystemExit(1)
answer = input('does this look good? [Y/n]: ')
if 'n' == answer:
print('do it right, then')
raise SystemExit(0)
elif not ('y' == answer.lower() or '' == answer):
print('wat')
raise SystemExit(1)
return upload_data
def get_course_content(self):
for course_id in self.course_ids:
response = self.session.core_course_get_contents(course_id)
wrapped = models.CourseContentResponse(response)
for section in wrapped:
for module in section.modules:
"""
known modnames and how to dump:
forum: via mod_forum_get_forum_discussions_paginated, to get discussion id list
then use mod_forum_get_forum_discussion_posts, to dump the posts.
posts can have attachments, download maybe?
assign: is easy, already implemented.
folder: can contain multiple files, the urls may contain the ?forcedownload parameter
which might need to get stripped, not quite sure
resource: same as folder, both have 'contents' field, containing a fileurl, should check type.
label: is just an annotation to an activity, so just dump description.
uncertain:
choice: has externallib, should be dumpable.
page: presents html, has no externallib, cant be dumped via WS.
but page contains an url, can be downloaded, maybe.
quiz: has externallib, but is not accessible to me.
lesson,
ratingallocate,
label,
wiki: no clue
undumpable:
lti: linked external learning tool, can't do anything about that.
choicegroup: no externallib, https://github.com/ndunand/moodle-mod_choicegroup
"""
# print(module.modname)
known_dumpable = ['forum', 'assign', 'folder', 'resource', 'label']
uncertain = ['choice', 'lesson', 'quiz', 'wiki', 'page', 'ratingallocate', 'publication']
known_undumpable = ['lti', 'choicegroup']
unchecked = ['url', 'organizer', 'checklist', 'glossary', 'feedback', 'book', 'attendance']
if module.modname not in known_dumpable+uncertain+known_undumpable+unchecked:
print(module.modname)
#print(json.dumps(module.raw, indent=2, ensure_ascii=False))
if module.modname == 'organizer':
print(json.dumps(module.raw, indent=2, ensure_ascii=False))
|
from marshmallow import Schema, fields, pprint
from flask import Flask
from flask_restplus import reqparse
from flask_restplus import Resource, Api
from scraper import scrape
import util
def analyze_func(x):
return x
api = Api()
app = Flask(__name__)
api.init_app(app)
""" Serialization Schemas """
class SubmissionResponseSchema(Schema):
"""
SubmissionRequestSchema
:param str submission: permalink
"""
submission = fields.Str()
@api.route('/submission')
class SubmissionResource(Resource):
submission_parser = reqparse.RequestParser()
submission_parser.add_argument('submission', type=str, help='Submission URL.')
def post(self):
submission_args = self.submission_parser.parse_args()
submission_url = submission_args.get('submission', None)
print(submission_url)
if submission_url is None:
return {
"error": "URL not found."
}, 400
try:
submission_scraper = scrape.RedditSubmissionScraper(url=submission_url)
submission_scraper.extract_data()
except:
return {
"error": "Submission not found."
}, 404
predicted_flair = util.predict_flair(submission_scraper.scraped)
submission_scraper.scraped['comments'] = submission_scraper.scraped['comments'][:10]
submission_scraper.scraped['prediction'] = predicted_flair[0]
return submission_scraper.scraped, 200
if __name__ == "__main__":
app.run(debug=True)
|
from argparse import ArgumentParser
from pathlib import Path
from pfm import PotentialFieldMethod, Space
DIR = Path(__file__).parent
def main() -> None:
parser = ArgumentParser(
prog="python main.py",
description="Potential field method runner"
)
parser.add_argument(
"--space",
dest="space",
help=(
"Path to the JSON file describing the configuration space, "
"default: %(default)r"
),
default=DIR / "data" / "normally.json",
)
parser.add_argument(
"--solution",
dest="solution",
help=(
"The path to the file where the solution will be written, "
"default: %(default)r"
),
default=DIR / "solution.json",
)
args = parser.parse_args()
input_path = Path(args.space).resolve()
output_path = Path(args.solution).resolve()
space = Space.form_file(input_path)
algorithm = PotentialFieldMethod()
plan = algorithm.solve(space)
plan.dump(output_path)
parser.exit()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""INFO
Set the package information for `mkstuff'
:Author: Martin Kilbinger, <martin.kilbinger@cea.fr>
:Date: 18/04/2019
"""
# Release version
version_info = (1, 2, 2)
__version__ = '.'.join(str(c) for c in version_info)
# Package details
__author__ = 'Martin Kilbinger'
__email__ = 'martin.kilbinger@cea.fr'
__year__ = 2019
__url__ = 'https://github.com/martinkilbinger/mkstuff'
__description__ = 'Useful but random scientific python modules and scripts'
# Dependencies
__requires__ = ['numpy', 'scipy', 'astropy']
# Default package properties
__license__ = 'MIT'
__about__ = ('{} \n\n Author: {} \n Email: {} \n Year: {} \n {} \n\n'
''.format(__name__, __author__, __email__, __year__,
__description__))
__setup_requires__ = ['pytest-runner', ]
__tests_require__ = ['pytest', 'pytest-cov', 'pytest-pep8']
|
from db.models import Country, Notice, session
from sqlalchemy import desc, func
import datetime
class Stats:
def __init__(self, db_session):
self.session = db_session
def notice_ordered_per_country(self):
all_countries = self.session.query(Country)
ordered_list = all_countries.order_by(desc(Country.total_notices)).all()
data = {}
for country in ordered_list:
data[country.name] = country.total_notices
return data
@property
def total_notices(self):
total = self.session.query(func.sum(Country.total_notices)).one()
return int(total[0])
def notice_per_hundred_thousand_people(self):
data = {}
for country in self.session.query(Country).all():
try:
data[country.code] = round((int(country.total_notices) * 1_000_000) / int(country.population), 2)
except TypeError:
data[country.code] = -1
data = dict(sorted(data.items(), key=lambda item: item[1], reverse=True))
return data
def notices_percentage_per_country(self):
data = {}
for country, notices in self.notice_ordered_per_country().items():
data[country] = f"{round(int(notices) / int(self.total_notices), 2) * 100} %"
return data
def gender_ratio(self, country=None):
if not country:
male = self.session.query(Notice.sex).filter(Notice.sex == 'M').count()
female = self.session.query(Notice.sex).filter(Notice.sex == 'F').count()
else:
male = self.session.query(Notice.sex).filter(Notice.sex == 'M', Notice.country == country).count()
female = self.session.query(Notice.sex).filter(Notice.sex == 'F', Notice.country == country).count()
total = male + female
male_ratio = "{} %".format(round((male / total) * 100, 2))
female_ratio = "{} %".format(round((female / total) * 100, 2))
return {
"Male": male_ratio,
"Female": female_ratio
}
@staticmethod
def calculate_age(birth_date):
today = datetime.date.today()
age = today.year - birth_date.year - ((today.month, today.day) < (birth_date.month, birth_date.day))
return age
def average_age(self, query=None):
if not query:
dates = session.query(Notice.date_of_birth).all()
else:
dates = [(country.date_of_birth,) for country in query]
ages = []
for date in dates:
if isinstance(date[0], datetime.date):
ages.append(self.calculate_age(date[0]))
avg = round(sum(ages) / len(ages))
return avg
def average_age_by_gender(self, gender):
query = self.session.query(Notice.date_of_birth).filter(Notice.sex == gender).all()
return self.average_age(query)
def stats_by_country(self, country_code: str):
country_code = country_code.upper()
country = self.session.query(Country).filter(Country.code == country_code).first()
query = self.session.query(Notice).filter(Notice.country == country_code).all()
average_age = self.average_age(query)
gender_ratio = self.gender_ratio(country_code)
per_million_people = self.notice_per_hundred_thousand_people().get(country_code)
percentage = self.notices_percentage_per_country().get(country.name)
return {
"country": country.name,
"percentage": percentage,
"total_notices": country.total_notices,
"average_age": average_age,
"gender_ratio": gender_ratio,
"per_million_people": per_million_people,
}
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.colors as colors
from scipy.integrate import cumtrapz, quad
from scipy.interpolate import interp1d
from scipy.stats import chi2
import PlottingTools as PT
import argparse
import os
#---------------
# MATPLOTLIB settings
mpl.rcParams.update({'font.size': 18,'font.family':'serif'})
mpl.rcParams['xtick.major.size'] = 7
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 7
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['ytick.minor.width'] = 1
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rc('text', usetex=True)
mpl.rcParams['legend.edgecolor'] = 'inherit'
#---------------
parser = argparse.ArgumentParser(description='...')
parser.add_argument('-m_x', '--m_x', help='DM mass in GeV', type=float, default = 0.2)
parser.add_argument('-hemisphere','--hemisphere', help='Hemisphere of the experiment (N or S)', type=str, default="N")
parser.add_argument('-sigtext', '--sigtext', type=str, default="")
args = parser.parse_args()
hemisphere = args.hemisphere
m_x = args.m_x #DM mass in GeV
m_str = str(int(m_x*1000)) #String of DM mass in MeV
if (hemisphere == "N"):
lat_text = "Northern Hemisphere ($46^\circ$N)"
elif (hemisphere == "S"):
lat_text = "Southern Hemisphere ($37^\circ$S)"
fig = plt.figure(figsize=(7,5))
ax = fig.add_subplot(111)
axes = []
axes.append(fig.add_subplot(131))
axes.append(fig.add_subplot(132))
axes.append(fig.add_subplot(133))
#axes.append(fig.add_subplot(144))
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
file_labels = ["A", "B", "C"]#, "D"]
#sig_labels = [r"5 \times 10^{-35}", r"8 \times 10^{-35}", r"1 \times 10^{-34}", r"2 \times 10^{-34}"]
ratio_labels = [r"0.3 ", r"", r"3.1"]
m_list = np.geomspace(0.0581, 0.5, 1000)
rho_list = np.linspace(0.01, 1.0, 2000)
L_clip = -20
for i in range(3):
m, rho, L = np.loadtxt("../results/example_mx100_" + args.sigtext + "_" + hemisphere + "_" + file_labels[i] + ".txt", unpack=True, usecols=(0, 1,2))
L_grid = L.reshape(1000, 2000).T
cont = axes[i].contourf(m_list, rho_list, np.clip(L_grid, L_clip, 0.1), levels=np.linspace(L_clip, 0, 11))
irho,im = np.unravel_index(np.argmax(L_grid), (2000, 1000))
axes[i].plot([m_x, m_x], [0.01, 1.0], linestyle='--',color='w')
axes[i].axhline(0.4, linestyle='--',color='w')
axes[i].plot(m_list[im], rho_list[irho],"r^", mew=2, ms=4)
#axes[i].set_xscale('log')
if (i > 0):
axes[i].get_yaxis().set_ticklabels([])
axes[i].set_xlim(0.06, 0.160)
#axes[i].set_xticks([0.06, 0.07, 0.08, 0.09, 0.1, 0.2])#, 0.3, 0.4, 0.5])
axes[i].set_xticks(np.arange(0.06, 0.165, 0.01), minor=True)
axes[i].set_xticks([0.100, 0.150])
axes[i].set_xticklabels(["100", "150"])
#axes[i].set_xticklabels([" ","60", " ", " ", " ", "100", " ", " ", " ", " ", "150", " "])#, "", "0.4", " "])
axes[i].tick_params(axis='x', colors='white', labelsize=16, which='both')
axes[i].tick_params(axis='y', colors='white')
axes[i].tick_params(labelcolor='k')
axes[i].spines['top'].set_color('white')
axes[i].spines['bottom'].set_color('white')
axes[i].spines['left'].set_color('white')
axes[i].spines['right'].set_color('white')
#axes[i].text(0.3, 0.07, r'$' + sig_labels[i] + '\, \mathrm{cm}^2$', color='w', ha='center', va='center', fontsize=12)
#0.3, 0.07
axes[i].text(0.71, 0.85, r"$\sigma_{p}^{\mathrm{SI}} = " + ratio_labels[i] + " \,\sigma_{p}^{\mathrm{SI}}{}'$",color='w', ha='center', va='center', fontsize=12,transform=axes[i].transAxes)
axes[i].text(0.71, 0.93, file_labels[i], color='w', ha='center', va='center', fontsize=18, transform=axes[i].transAxes)
axes[1].text(0.05, 1.02,lat_text,fontsize=14)
cb_ax = fig.add_axes([0.94, 0.09, 0.02, 0.8])
cbar = fig.colorbar(cont, cax=cb_ax, label=r'$\Delta \log \mathcal{L}$')
ax.set_xlabel(r'$m_\chi$ [MeV]')
axes[0].set_ylabel(r'$\rho_\chi$ [GeV/cm$^3$]')
#plt.tight_layout()
plt.subplots_adjust(wspace=0.1)
plt.savefig("../plots/Likelihood_examples_mx100_" + args.sigtext + "_" + hemisphere + ".pdf", bbox_inches='tight')
plt.show()
|
import matplotlib.pyplot as plt
import numpy as np
import json
'''
This script plots multiple frame-by-frame results for PSNR and SSIM for testing video sequences.
The raw results files are generated from eval_video.m using MATLAB.
'''
### Three sets of results ###
# Loading result text files into JSON format
path1 = "test/vid4/alt_only_cur_downsize_20200916_ff_0.txt"
f1 = open(path1, 'r')
frameData1 = json.load(f1)
path2 = "test/vid4/alt_only_cur_downsize_20200916_obj2_HR_10_20201008.txt"
f2 = open(path2, 'r')
frameData2 = json.load(f2)
path3 = "test/vid4/alt_only_cur_downsize_20200916_info_recycle_ff_0_20201002.txt"
f3 = open(path3, 'r')
frameData3 = json.load(f3)
# Iterate through each video sequence
for (vid1, vid2, vid3) in zip(frameData1, frameData2, frameData3):
# Initialise result arrays
psnr_arr1 = []
ssim_arr1 = []
psnr_arr2 = []
ssim_arr2 = []
psnr_arr3 = []
ssim_arr3 = []
# Do not plot the final average of average result from the test since it is not a video sequence
if vid1 == 'average of average' or vid2 == 'average of average' or vid3 == 'average of average':
continue
#iterate through each frame
for (frames1, frames2, frames3) in zip(frameData1[vid1]['frame'][0],frameData2[vid2]['frame'][0], frameData3[vid3]['frame'][0]):
psnr1 = frameData1[vid1]['frame'][0][frames1][0]
ssim1 = frameData1[vid1]['frame'][0][frames1][1]
psnr_arr1.append(psnr1)
ssim_arr1.append(ssim1)
psnr2 = frameData2[vid2]['frame'][0][frames2][0]
ssim2 = frameData2[vid2]['frame'][0][frames2][1]
psnr_arr2.append(psnr2)
ssim_arr2.append(ssim2)
psnr3 = frameData3[vid3]['frame'][0][frames3][0]
ssim3 = frameData3[vid3]['frame'][0][frames3][1]
psnr_arr3.append(psnr3)
ssim_arr3.append(ssim3)
psnr_arr1 = np.array(psnr_arr1)
ssim_arr1 = np.array(ssim_arr1)
psnr_arr2 = np.array(psnr_arr2)
ssim_arr2 = np.array(ssim_arr2)
psnr_arr3 = np.array(psnr_arr3)
ssim_arr3 = np.array(ssim_arr3)
plt.figure(figsize=(14, 7))
plt.subplot(121)
plt.plot(range(0, len(psnr_arr1)), psnr_arr1, label='No Information Recycling')
plt.plot(range(0, len(psnr_arr2)), psnr_arr2, label='Periodic HR')
plt.plot(range(0, len(psnr_arr3)), psnr_arr3, label='Information Recycling')
plt.legend(loc='lower right')
plt.title('{}: PSNR Across Frames'.format(vid1))
plt.xlabel('Frame')
plt.ylabel('PSNR')
plt.subplot(122)
plt.plot(range(0, len(ssim_arr1)), ssim_arr1, label='No Information Recycling')
plt.plot(range(0, len(ssim_arr2)), ssim_arr2, label='Periodic HR')
plt.plot(range(0, len(ssim_arr3)), ssim_arr3, label='Information Recycling')
plt.title('{}: SSIM Across Frames'.format(vid1))
plt.legend(loc='lower right')
plt.xlabel('Frame')
plt.ylabel('SSIM')
plt.show()
|
"""Create category table.
Revision ID: 0c3924deb2b7
Revises: f68e6dafb977
Create Date: 2020-04-15 17:01:38.788610
"""
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from modist.models.common import CategoryType
from alembic import op
# revision identifiers, used by Alembic.
revision = "0c3924deb2b7"
down_revision = "f68e6dafb977"
branch_labels = None
depends_on = None
def upgrade():
"""Pushes changes into the database."""
op.create_table(
"category",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("is_active", sa.Boolean(), server_default="true", nullable=False),
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("parent_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("type", sa.Enum(CategoryType), nullable=False),
sa.Column("name", sa.String(length=64), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("depth", sa.Integer(), server_default="0", nullable=False),
sa.Column(
"lineage",
postgresql.ARRAY(postgresql.UUID(as_uuid=True)),
server_default="{}",
nullable=False,
),
sa.ForeignKeyConstraint(["parent_id"], ["category.id"], ondelete="cascade"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("parent_id", "name", "type"),
)
op.create_refresh_updated_at_trigger("category")
op.create_refresh_depth_and_lineage_trigger("category")
def downgrade():
"""Reverts changes performed by upgrade()."""
op.drop_refresh_depth_and_lineage_trigger("category")
op.drop_refresh_updated_at_trigger("category")
op.drop_table("category")
sa.Enum(CategoryType).drop(bind=op.get_bind())
|
import scrapy
from tobber.items import Torrent
from tobber.spiders.indexer import Indexer
class Rarbg(Indexer):
name = "rarbg"
def start_requests(self):
print 'Rarbg is scrapying...'
self.site = "https://rarbg.is"
urls = []
search = self.site + "/torrents.php?search="
for title in self.title:
urls.append(search + title.replace('%20','+'))
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
#xPath rules
table = response.xpath('//table[@class="lista2t"]')
row = table.xpath('./tr[@class="lista2"]')
for i in range(len(row)):
content = row[i].xpath('./td[@class="lista"]')
title = content[1].xpath('./a/@title')
href = content[1].xpath('./a/@href')
size = content[3].xpath('./text()')
seeders = content[4].xpath('./font/text()')
leechers = content[5].xpath('./text()')
yield Torrent(
title = self.extract_data(title),
size = self.extract_data(size),
seeders = self.extract_data(seeders),
leechers = self.extract_data(leechers),
href = self.site + self.extract_data(href),
site = self.name,
counter = i
)
|
#!/usr/bin/env python
import os
import math
import numpy as np
import cst.sord
# FIXME: prestress not correct
dx = 100.0
dt = dx / 12500.0
nx = int(16500.0 / dx + 21.5)
ny = int(16500.0 / dx + 21.5)
nz = int(12000.0 / dx + 120.5)
nt = int(8.0 / dt + 1.5)
alpha = math.sin(math.pi / 3.0)
prm = {
'shape': [nx, ny, nz, nt],
'delta': [dx, dx, dx, dt],
'nproc3': [1, 1, 2],
'bc1': ['-node', 'free', 'free'],
'bc2': ['pml', 'pml', 'free'],
'n1expand': [0, 0, 50],
'n2expand': [0, 0, 50],
'affine': [
[1.0, 0.0, 0.0],
[0.0, alpha, 0.0],
[0.0, 0.5, 1.0],
],
'hourglass': [1.0, 2.0],
'rho': [2700.0],
'vp': [5716.0],
'vs': [3300.0],
'faultnormal': '+z',
'co': [200000.0],
'dc': [0.5],
'mud': [0.1],
'sxx': [([0, []], '=>', 'sxx.bin')],
'syy': [([0, []], '=>', 'syy.bin')],
'szz': [([0, []], '=>', 'szz.bin')],
}
# hypocenter
y = 12000.0 / dx
z = nz // 2 - 0.5
prm['hypocenter'] = hypo = [0.0, y, z]
# near-fault volume
i = int(15000.0 / dx + 0.5)
l0 = int(z - 3000.0 / dx + 0.5)
l1 = int(z + 3000.0 / dx + 0.5)
prm['gam'] = [0.2, ([[i], [i], [l0, l1]], '=', 0.02)]
prm['mus'] = [10000.0, ([[i+1], [i+1]], '=', 0.7)]
prm['trup'] = [([[i+1], [i+1], -1], '=>', 'trup.bin')]
# nucleation
k = int(hypo[1])
m = int(1500.0 / dx + 0.5)
n = int(1500.0 / dx + 1.5)
prm['mus'] += [
([[n], [k-n, k+n+1]], '=', 0.66),
([[n], [k-m, k+m+1]], '=', 0.62),
([[m], [k-n, k+n+1]], '=', 0.62),
([[m], [k-m, k+m+1]], '=', 0.54),
]
# slip, slip velocity, and shear traction time histories
for j, k in [
[0, 0],
[45, 0],
[120, 0],
[0, 15],
[0, 30],
[0, 45],
[0, 75],
[45, 75],
[120, 75],
[0, 120],
]:
x = j * 100.0 / dx
y = k * 100.0 / dx
for f in (
'sux', 'suy', 'suz',
'svx', 'svy', 'svz',
'tsx', 'tsy', 'tsz', 'tnm'
):
s = 'faultst%03ddp%03d-%s.bin' % (j, k, f)
if f not in prm:
prm[f] = []
prm[f] += [([x, y, []], '.>', s)]
# displacement and velocity time histories
for j, k, l in [
[0, 0, -30],
[0, 0, -20],
[0, 0, -10],
[0, 0, 10],
[0, 0, 20],
[0, 0, 30],
[0, 3, -10],
[0, 3, -5],
[0, 3, 5],
[0, 3, 10],
[120, 0, -30],
[120, 0, 30],
]:
x = j * 100.0 / dx
y = k * 100.0 / dx / alpha
z = l * 100.0 / dx + hypo[2]
for f in 'ux', 'uy', 'uz', 'vx', 'vy', 'vz':
s = 'body%03dst%03ddp%03d-%s.bin' % (j, k, l, f)
s = s.replace('body-', 'body-0')
if f not in prm:
prm[f] = []
prm[f] += [([x, y, z, []], '.>', s)]
# pre-stress
d = np.arange(ny) * alpha * dx
x = d * 9.8 * -1147.16
y = d * 9.8 * -1700.0
z = d * 9.8 * -594.32
k = int(13800.0 / dx + 1.5)
x[k:] = y[k:]
z[k:] = y[k:]
d = 'repo/TPV12'
os.mkdir(d)
os.chdir(d)
x.astype('f').tofile('sxx.bin')
y.astype('f').tofile('syy.bin')
z.astype('f').tofile('szz.bin')
cst.sord.run(prm)
|
def plug_element_count(plug):
lbracket = plug.rfind("[")
if lbracket != -1:
rbracket = plug.rfind("]")
if rbracket != -1 and lbracket < rbracket:
slicestr = plug[lbracket + 1:rbracket]
bounds = slicestr.split(":")
if len(bounds) > 1:
return int(bounds[1]) - int(bounds[0]) + 1
return 1
class MayaParserBase(object):
def on_requires_maya(self, version):
pass
def on_requires_plugin(self, plugin, version):
pass
def on_file_info(self, key, value):
pass
def on_current_unit(self, angle, linear, time):
pass
def on_file_reference(self, path):
pass
def on_create_node(self, nodetype, name, parent):
pass
def on_select(self, name):
pass
def on_add_attr(self, node, name):
pass
def on_set_attr(self, name, value, type):
pass
def on_set_attr_flags(self, plug, keyable=None, channelbox=None, lock=None):
pass
def on_connect_attr(self, src_plug, dst_plug):
pass
|
import json
import csv
import pymysql
import re
import os.path
import cloudscraper
import http.client
http.client._is_legal_header_name = re.compile(rb'[^\s][^:\r\n]*').fullmatch
scraper = cloudscraper.create_scraper()
def main():
print("========== Starting ==========")
cfg = read_config()
db = pymysql.connect(cfg["SQL Server IP"], cfg["SQL username"], cfg["SQL Password"], "curse_analytics")
cursor = db.cursor()
r = get(cfg, "https://authors.curseforge.com/dashboard/projects?filter-games=&filter-project-status=4")
text = r.text
for group in re.findall("<a.*href=\"/dashboard/project/(.*)\"><span>Download</span></a>", text):
print(group)
got = get(cfg, "https://authors.curseforge.com/dashboard/project/" + group)
for row in csv.DictReader(got.text.splitlines(), delimiter=','):
cursor.execute("select statDate, projectId from stats where statDate=STR_TO_DATE(\"{}\", '%Y-%m-%d') and projectId={}".format(row["Date"], row["Project ID"]))
if cursor.rowcount == 0:
cursor.execute("insert into stats values(STR_TO_DATE(\"{}\", '%Y-%m-%d'), {}, \"{}\",{},{},{},{},{},{})".format(row["Date"], row["Project ID"], row["Name"], row["Points"], row["Historical Download"], row["Daily Download"], row["Daily Unique Download"], row["Daily Twitch App Download"], row["Daily Curse Forge Download"]))
db.commit()
print("============ Done ============")
def read_config():
if not os.path.isfile("config.json"):
print("=== Generating Config File ===")
with open('config.json', 'w') as f:
json.dump({"SQL Server IP": "127.0.0.1", "SQL username": "username", "SQL Password": "password", "Cobalt Session": "session"}, f)
print(" Please edit config and retry ")
exit(0)
with open('config.json') as config_file:
return json.load(config_file)
def get(cfg, url):
return scraper.get(url, headers={
"referer": "https://authors.curseforge.com/store/transactions",
"user-agent": 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'twitch-desktop-electron-platform/1.0.0 Chrome/66.0.3359.181 Twitch/3.0.16 Safari/537.36 '
'desklight/8.42.2',
"accept-language": "en-GB,en;q=0.9,en-US;q=0.8,de;q=0.7",
"accept-encoding": "gzip, deflate, br",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
":authority": "authors.curseforge.com",
":method": "GET",
":scheme": "https",
}, cookies={
"CobaltSession": cfg["Cobalt Session"],
})
if __name__ == '__main__':
main()
|
# Generated by Django 3.0.9 on 2020-10-06 13:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0003_auto_20201005_1613'),
]
operations = [
migrations.AddField(
model_name='article',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='article_images/'),
),
]
|
import pytest
from flask import Flask
from flask import jsonify
from flask_jwt_extended import create_access_token
from flask_jwt_extended import create_refresh_token
from flask_jwt_extended import jwt_required
from flask_jwt_extended import JWTManager
from tests.utils import get_jwt_manager
from tests.utils import make_headers
@pytest.fixture(scope="function")
def app():
app = Flask(__name__)
app.config["JWT_SECRET_KEY"] = "foobarbaz"
JWTManager(app)
@app.route("/protected", methods=["GET"])
@jwt_required()
def access_protected():
return jsonify(foo="bar")
@app.route("/refresh_protected", methods=["GET"])
@jwt_required(refresh=True)
def refresh_protected():
return jsonify(foo="bar")
return app
@pytest.mark.parametrize("blocklist_type", [["access"], ["refresh", "access"]])
def test_non_blocklisted_access_token(app, blocklist_type):
jwt = get_jwt_manager(app)
@jwt.token_in_blocklist_loader
def check_blocklisted(jwt_header, jwt_data):
assert jwt_header["alg"] == "HS256"
assert jwt_data["sub"] == "username"
return False
with app.test_request_context():
access_token = create_access_token("username")
test_client = app.test_client()
response = test_client.get("/protected", headers=make_headers(access_token))
assert response.get_json() == {"foo": "bar"}
assert response.status_code == 200
@pytest.mark.parametrize("blocklist_type", [["access"], ["refresh", "access"]])
def test_blocklisted_access_token(app, blocklist_type):
jwt = get_jwt_manager(app)
@jwt.token_in_blocklist_loader
def check_blocklisted(jwt_header, jwt_data):
return True
with app.test_request_context():
access_token = create_access_token("username")
test_client = app.test_client()
response = test_client.get("/protected", headers=make_headers(access_token))
assert response.get_json() == {"msg": "Token has been revoked"}
assert response.status_code == 401
@pytest.mark.parametrize("blocklist_type", [["refresh"], ["refresh", "access"]])
def test_non_blocklisted_refresh_token(app, blocklist_type):
jwt = get_jwt_manager(app)
@jwt.token_in_blocklist_loader
def check_blocklisted(jwt_header, jwt_data):
return False
with app.test_request_context():
refresh_token = create_refresh_token("username")
test_client = app.test_client()
response = test_client.get(
"/refresh_protected", headers=make_headers(refresh_token)
)
assert response.get_json() == {"foo": "bar"}
assert response.status_code == 200
@pytest.mark.parametrize("blocklist_type", [["refresh"], ["refresh", "access"]])
def test_blocklisted_refresh_token(app, blocklist_type):
jwt = get_jwt_manager(app)
@jwt.token_in_blocklist_loader
def check_blocklisted(jwt_header, jwt_data):
return True
with app.test_request_context():
refresh_token = create_refresh_token("username")
test_client = app.test_client()
response = test_client.get(
"/refresh_protected", headers=make_headers(refresh_token)
)
assert response.get_json() == {"msg": "Token has been revoked"}
assert response.status_code == 401
def test_custom_blocklisted_message(app):
jwt = get_jwt_manager(app)
@jwt.token_in_blocklist_loader
def check_blocklisted(jwt_header, jwt_data):
return True
@jwt.revoked_token_loader
def custom_error(jwt_header, jwt_data):
assert jwt_header["alg"] == "HS256"
assert jwt_data["sub"] == "username"
return jsonify(baz="foo"), 404
with app.test_request_context():
access_token = create_access_token("username")
test_client = app.test_client()
response = test_client.get("/protected", headers=make_headers(access_token))
assert response.get_json() == {"baz": "foo"}
assert response.status_code == 404
|
import xlrd
from string import lower
ddr_fpga_list = ['FPGA00', 'FPGA01', 'FPGA02', 'FPGA03', 'FPGA30', 'FPGA31', 'FPGA32', 'FPGA33']
class lpfrow(object):
def __init__(self, schematic, port, ball, bank):
self.schematic = schematic
self.port = port
self.ball = ball
self.bank = bank
self.commented = True
def render(self):
if self.commented:
precomment = "# "
else:
precomment = ""
return precomment + "LOCATE COMP \"" + str(self.port) + "\" SITE \"" + str(self.ball) + "\"; # " + str(self.schematic)
def port_prefix(self):
cut = self.port.index("[")
return self.port[:cut]
# returns a unique number for hs ls
def hs_enum(self):
if 'hs' in lower(self.port):
return 1
if 'ls' in lower(self.port):
return 2
# returns a unique number for n,e,s,w
def dir_enum(self):
# trim off hs, ls to avoid detection of south below
trunc = self.port
if trunc.startswith('HS_'):
trunc = trunc[3:]
if trunc.startswith('LS_'):
trunc = trunc[3:]
# check long names first so that E doesn't trigger in WEST
if 'NORTH' in trunc:
return 3
if 'EAST' in trunc:
return 4
if 'SOUTH' in trunc:
return 5
if 'WEST' in trunc:
return 6
# check singles if we didn't match fulls above
if 'N' in trunc:
return 3
if 'E' in trunc:
return 4
if 'S' in trunc:
return 5
if 'W' in trunc:
return 6
# def unique_enum(self):
# return self.hs_enum() + self.dir_enum()*10
class ddrrow(lpfrow):
def __init__(self):
lpfrow.__init__(self, None, None, None, None)
self.sdrampin = None
self.iotype = None
self.slewrate = None
self.termination = None
self.vref = None
self.diffresistor = None
def render_location(self):
finalcomment = str(self.sdrampin) + ", " + self.iotype + ", " + self.slewrate + ", " + str(self.termination) + ", " + str(self.diffresistor)
if self.commented:
precomment = "# "
else:
precomment = ""
return precomment + "LOCATE COMP \"" + self.port + "\" SITE \"" + self.ball + "\"; # " + finalcomment
class mibrow(ddrrow):
def __init__(self):
ddrrow.__init__(self)
def parse_mib_sheet(sheetname, workbook):
sheet = workbook.sheet_by_name(sheetname)
maxrows = len(sheet.col(0))
mibpins = []
ddr_fpga_enabled = []
all_configs = []
for lookrow, lookcol in [(0,4),(0,5),(0,6)]:
cell = sheet.cell(lookrow, lookcol).value
this_config = {}
this_config['name'] = cell
this_config['col'] = lookcol
if 'CONFIG' in cell:
this_config['parsed_name'] = ['CONFIG']
else:
this_config['parsed_name'] = cell.split(', ')
# this_config['balls'] = []
#
# for lookdatarow in range(lookrow+1, maxrows):
# datacell = sheet.cell(lookdatarow, lookcol).value
# if xlrd.empty_cell.value == datacell:
# break
# this_config['balls'].append(datacell)
# when all done
all_configs.append(this_config)
o2 = {}
keymap = {}
keymap[0] = 'schematic'
keymap[1] = 'port'
keymap[2] = 'iotype'
keymap[3] = 'slewrate'
for cfg in all_configs:
for fpga in cfg['parsed_name']:
# return all_configs
mibpins = []
for row in range(1, 31):
# a = sheet.cell(row, 0).value
mpin = mibrow()
# Set the the col in which to read the .ball from
ballcell = sheet.cell(row, cfg['col']).value
mpin.ball = ballcell
anyfound = False
for k in keymap:
v = keymap[k]
cellval = sheet.cell(row, k).value
if xlrd.empty_cell.value == cellval:
continue
else:
anyfound = True
mpin.__setattr__(v, cellval)
if anyfound:
if mpin.port is None:
mpin.commented = True
mibpins.append(mpin)
o2[fpga] = mibpins
return all_configs, o2
def parse_ddr_sheet(sheetname, workbook):
sheet = workbook.sheet_by_name(sheetname)
maxrows = len(sheet.col(0))
ddrpins = []
keymap = {}
keymap[0] = 'port'
keymap[1] = 'ball'
keymap[2] = 'bank'
keymap[3] = 'sdrampin'
keymap[4] = 'iotype'
keymap[5] = 'slewrate'
keymap[6] = 'termination'
keymap[7] = 'vref'
keymap[8] = 'diffresistor'
ddr_fpga_enabled = []
for row in range(0, maxrows):
a = sheet.cell(row, 0).value
# b = sheet.cell(row, 1).value
if 'fpga00' in lower(a):
ddr_fpga_enabled = a.split(', ')
if 'connection between' in lower(a) or 'fpga00' in lower(a) or 'fpga top level' in lower(a):
continue
ddrpin = ddrrow()
anyfound = False
for k in keymap:
v = keymap[k]
cellval = sheet.cell(row, k).value
if xlrd.empty_cell.value == cellval:
continue
else:
anyfound = True
ddrpin.__setattr__(v, cellval)
if anyfound:
# push ddrpin object
ddrpins.append(ddrpin)
return ddrpins, ddr_fpga_enabled
#
#
#
#
# if xlrd.empty_cell.value in [sheet.cell(row, 0).value, sheet.cell(row, 1).value, sheet.cell(row, 2).value, sheet.cell(row, 3).value]:
# continue
# open file
def parse_fpga(sheetname, workbook):
sheet = workbook.sheet_by_name(sheetname)
maxrows = len(sheet.col(0))
pins = []
for row in range(0, maxrows):
if xlrd.empty_cell.value in [sheet.cell(row, 0).value, sheet.cell(row, 1).value, sheet.cell(row, 2).value, sheet.cell(row, 3).value]:
continue
# pull 4 potential columns
a = sheet.cell(row, 0).value
b = sheet.cell(row, 1).value
c = sheet.cell(row, 2).value
d = sheet.cell(row, 3).value
rowgood = True
if 'connection between' in lower(a) or 'schematic' in lower(a):
rowgood = False
print row
if rowgood:
pins.append(lpfrow(a, b, c, d))
return pins
def write_fpga(sheetname, pins, all_mib_pins, ddrpins=None):
# sheet = workbook.sheet_by_name(sheetname)
if sheetname.startswith("FPGA"):
fnameout = "outputs/" + "cs" + sheetname.lstrip("FPGA") + "_top.lpf"
else:
fnameout = "outputs/" + sheetname + "_output.lpf"
prev_pin_enum = -1
group_first_pin = []
group_names = []
group_name_suffix = "_GROUP"
ls_type = "LVCMOS15"
hs_type = "SSTL15_I"
skip_ddr = True
# write file
with open(fnameout, 'w') as f:
# write out header
with open('common_header_snip.txt', 'r') as content_file:
f.write(content_file.read())
f.write("######################## CS" + sheetname.lstrip("FPGA") + " configuration only ########################")
f.write('\n')
# write out Groups for Normal Pins
for p in pins:
# print p.hs_enum(), p.dir_enum()
# print p.port_prefix()
if p.port_prefix() not in group_names:
if skip_ddr and 'ddr' in lower(p.port_prefix()):
continue
group_names.append(p.port_prefix())
group_first_pin.append(p)
for i, name in enumerate(group_names):
firstpin = group_first_pin[i]
full_group_name = name + group_name_suffix
f.write('DEFINE PORT GROUP "' + full_group_name + '" "' + name + '[*]";\n')
if firstpin.hs_enum() == 1:
speedtype = hs_type
else:
speedtype = ls_type
f.write('IOBUF GROUP "' + full_group_name + '" IO_TYPE=' + speedtype + ';\n')
# write out normal pins
for p in pins:
if skip_ddr and 'ddr' in lower(p.port_prefix()):
continue
f.write(p.render() + "\n")
# mandatory write out MIB
assert sheetname in all_mib_pins
mibpins = all_mib_pins[sheetname]
f.write('\n\n######################## MIB ########################\n')
with open('mib_groups_snip.txt', 'r') as content_file:
f.write(content_file.read())
f.write('\n')
for p in mibpins:
f.write(p.render() + "\n")
# optional write out ddr pins
if ddrpins is not None:
f.write('\n\n######################## DDR ########################\n')
with open('ddr_groups_snip.txt', 'r') as content_file:
f.write(content_file.read())
f.write('\n')
for p in ddrpins:
f.write(p.render_location() + "\n")
def main():
workbook = xlrd.open_workbook('rev2_array_fpga_pin_mappings_new.xls')
ddr, ddr_enabled = parse_ddr_sheet('DDR SDRAM', workbook)
junk, all_mib_pins = parse_mib_sheet('MIB BUS', workbook)
fpgas = {}
fpgas['FPGA00'] = None
fpgas['FPGA01'] = None
fpgas['FPGA02'] = None
fpgas['FPGA03'] = None
fpgas['FPGA10'] = None
fpgas['FPGA11'] = None
fpgas['FPGA12'] = None
fpgas['FPGA13'] = None
fpgas['FPGA20'] = None
fpgas['FPGA21'] = None
fpgas['FPGA22'] = None
fpgas['FPGA23'] = None
fpgas['FPGA30'] = None
fpgas['FPGA31'] = None
fpgas['FPGA32'] = None
fpgas['FPGA33'] = None
for key in fpgas:
pins = parse_fpga(key, workbook)
enabled_for_us = key in ddr_enabled
if enabled_for_us:
# write it with ddr
write_fpga(key, pins, all_mib_pins, ddr)
else:
# write it solo
write_fpga(key, pins, all_mib_pins)
# save it
fpgas[key] = pins
if __name__ == '__main__':
main()
|
# -*- coding: UTF-8 -*-
# Clasificador con Naive Bayes incorporado, archivos usados: stopwords.txt, clasificador.pickle, politicos-historico.txt, medios-historico.txt, politicos-historico-recuperados.json, medios-historico-recuperados.json
# Argumento 1 : Lista de usuarios recuperados ()
# Argumento 2 : Lista de usuarios totales
import json
from nltk.stem import SnowballStemmer
import os
import pickle
import re
import sys
from textblob.classifiers import NaiveBayesClassifier
# ============ UTILIDADES ==============
# Asignación del stemmer en español
stemmer = SnowballStemmer("spanish")
# Creo un diccionario de stopwords a partir de un archivo ubicado en directorio util
stopwords = {}
archivoStop = open('util/stopwords.txt', 'r')
for stopw in archivoStop:
stopwords[stopw.strip()] = ''
# Para el reemplazo de acentos por sus equivalentes no acentuadas
acentos = {'á': 'a', 'é': 'e', 'í': 'i', 'ó': 'o', 'ú': 'u', 'à': 'a', 'è': 'e', 'ì': 'i', 'ò': 'o', 'ù': 'u'}
# =======================================
# ============ NORMALIZACIÓN ============
# =======================================
def textNormalization(dictUsuarios):
# Contenedor de elementos normalizados
normalizados = {}
# Recorrido del diccionario de descripciones de usuarios
for recListado in dictUsuarios:
transform = dictUsuarios[recListado].strip() # Elimino leading y trailing spaces
transform = transform.lower() # Cambio a minúsculas
# Remoción de URLs
URLless_string = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', transform)
transform = URLless_string
# Reemplazo de separadores (arbitrario)
transform = transform.replace('/', ' ').replace('-', ' ')
# Reemplazo de acentos
for acento in acentos:
transform = transform.replace(acento, acentos[acento])
# Remoción de caracteres no alfabéticos
for caracter in transform:
if not caracter.isalpha() and not caracter == ' ':
transform = transform.replace(caracter, '')
# División de la cadena de texto para hacer un recorrido y eliminar stopwords
transform = transform.split()
for palabra in transform:
if palabra in stopwords:
transform[transform.index(palabra)] = '' # Si la palabra se encuentra en el diccionario de stopwords, se elimina
# Stemming: empleando el SnowballStemmer ubica la raíz de la palabra para eliminar plurales y otras transformaciones:
for palabra in transform:
transform[transform.index(palabra)] = stemmer.stem(palabra)
transform = list(set(transform)) # Elimina palabras duplicadas de la lista
transform = ' '.join(transform) # Fusión de la cadena
transform = ' '.join(transform.split()) # Separación - fusión para remover espacios de más
if transform != '':
normalizados[recListado] = transform
return normalizados
if os.path.isfile('util/clasificador.pickle'):
print('Clasificador ya entrenado, cargando...')
f = open('util/clasificador.pickle', 'rb')
clasificador = pickle.load(f)
f.close()
print('Clasificador cargado :D')
else:
# ================================================
# ============ CARGA DE DESCRIPCIONES ============
# ================================================
print('Clasificador no entrenado, entrenando (may take a while)...\nCargando archivos necesarios...')
polRuta = 'util/politicos-historico-recuperados.json' # Archivo de políticos (descripciones)
medRuta = 'util/medios-historico-recuperados.json' # Archivo de medios (descripciones)
polArchivo = open(polRuta, 'r')
politJson = json.load(polArchivo)
medArchivo = open(medRuta, 'r')
mediosJson = json.load(medArchivo)
# Creación de un diccionario con nombres de usuario como keys y descripciones como valores
polDescripciones = {}
for linea in politJson:
polDescripciones[linea['name']] = linea['description'].encode('UTF-8')
medDescripciones = {}
for linea in mediosJson:
medDescripciones[linea['name']] = linea['description'].encode('UTF-8')
print('Normalizando descripciones...')
# Creación de diccionarios de usuarios = descipciones_normalizadas
polNormalizados = textNormalization(polDescripciones)
medNormalizados = textNormalization(medDescripciones)
# ciuNormalizados = textNormalization(ciuDescripciones)
print('Descripciones normalizadas.\nEntrenando clasificador...')
# ============ Entrenamiento ============
training = []
for recNormalizados in polNormalizados:
training.append((polNormalizados[recNormalizados], 'politico'))
for recNormalizados in medNormalizados:
training.append((medNormalizados[recNormalizados], 'medio'))
# for recNormalizados in ciuNormalizados:
# training.append((ciuNormalizados[recNormalizados], 'ciudadano'))
clasificador = NaiveBayesClassifier(training)
f = open('util/clasificador.pickle', 'wb')
pickle.dump(clasificador, f, -1)
f.close()
print('Clasificador entrenado :D\n')
print('\nNormalizando texto ingresado...')
porClasificar = {}
f = open(sys.argv[1], 'r')
classifyJson = json.load(f)
for item in classifyJson:
porClasificar[item['name']] = item['description'].encode('UTF-8')
clasificaEsto = textNormalization(porClasificar)
print('Texto normalizado.')
historicos = {}
fPolH = open('util/politicos-historico.txt', 'r')
for item in fPolH:
historicos[item.strip()] = 'politico'
fMedH = open('util/medios-historico.txt', 'r')
for item in fMedH:
historicos[item.strip()] = 'medio'
print('\nClasificando:')
clasifSalida = {}
for item in clasificaEsto:
if item in historicos:
clasifSalida[item] = historicos[item]
else:
prob_dist = clasificador.prob_classify(clasificaEsto[item])
if round(prob_dist.prob(prob_dist.max()), 3) == 1:
clasifSalida[item] = prob_dist.max()
else:
clasifSalida[item] = 'ciudadano'
print 'Leyendo lista completa de usuarios...'
fUserList = open(sys.argv[2], 'r')
for item in fUserList:
item = item.strip()
if not item in clasifSalida:
if item in historicos:
clasifSalida[item] = historicos[item]
else:
clasifSalida[item] = 'ciudadano'
print 'Imprimiendo salida...'
clasifExport = []
for item in clasifSalida:
exportThis = {}
exportThis['name'] = item
exportThis['category'] = clasifSalida[item]
clasifExport.append(exportThis)
# Escritura del archivo al terminar la ejecución
archivoRuta = sys.argv[2].split('.')
archivoRuta = archivoRuta[0] + '_clasifsalida.json'
with open(archivoRuta, 'w') as f:
json.dump(clasifExport, f, indent=4, ensure_ascii=False)
|
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Weighted Minkowski distance layer.
Classes:
WeightedMinkowski: A TensorFlow layer for computing weighted
minkowski distance.
"""
import tensorflow as tf
from tensorflow.python.keras import backend as K
import psiz.keras.constraints as pk_constraints
from psiz.keras.layers.ops.core import wpnorm
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='WeightedMinkowski'
)
class WeightedMinkowski(tf.keras.layers.Layer):
"""Weighted Minkowski distance."""
def __init__(self, rho_initializer=None, **kwargs):
"""Initialize.
Arguments:
rho_initializer (optional): Initializer for rho.
"""
super(WeightedMinkowski, self).__init__(**kwargs)
if rho_initializer is None:
rho_initializer = tf.random_uniform_initializer(1.01, 3.)
self.rho_initializer = tf.keras.initializers.get(rho_initializer)
self.rho = self.add_weight(
shape=[], initializer=self.rho_initializer,
trainable=self.trainable, name="rho", dtype=K.floatx(),
constraint=pk_constraints.GreaterThan(min_value=1.0)
)
self.theta = {'rho': self.rho}
def call(self, inputs):
"""Call.
Arguments:
inputs:
z_0: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
z_1: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
w: The weights allocated to each dimension
in a weighted minkowski metric.
shape = (batch_size, [n, m, ...] n_dim)
Returns:
shape = (batch_size, [n, m, ...])
"""
z_0 = inputs[0] # Query.
z_1 = inputs[1] # References.
w = inputs[2] # Dimension weights.
# Expand rho to shape=(sample_size, batch_size, [n, m, ...]).
rho = self.rho * tf.ones(tf.shape(z_0)[0:-1])
# Weighted Minkowski distance.
x = z_0 - z_1
d_qr = wpnorm(x, w, rho)
d_qr = tf.squeeze(d_qr, [-1])
return d_qr
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
'rho_initializer': tf.keras.initializers.serialize(
self.rho_initializer
)
})
return config
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Model
def dped_resnet(input_shape, instance_norm = True):
inputs = keras.Input(shape=input_shape)
conv_0 = layers.Conv2D(64, 9, padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(inputs)
conv_b1 = _dped_resnet_block(conv_0, instance_norm)
conv_b2 = _dped_resnet_block(conv_b1, instance_norm)
conv_b3 = _dped_resnet_block(conv_b2, instance_norm)
conv_b4 = _dped_resnet_block(conv_b3, instance_norm)
conv_5 = layers.Conv2D(64, 9, padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(conv_b4)
conv_5 = tf.nn.leaky_relu(conv_5)
conv_6 = layers.Conv2D(64, 9, padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(conv_5)
conv_6 = tf.nn.leaky_relu(conv_6)
tconv_7 = layers.Conv2DTranspose(64, 3, (2,2), padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(conv_6)
tconv_7 = tf.nn.leaky_relu(tconv_7)
conv_8 = layers.Conv2D(3, 9, padding="same", activation=tf.nn.tanh, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(tconv_7) * 0.58 + 0.5
return Model(inputs=inputs, outputs=conv_8, name="dped_resnet")
def _dped_resnet_block(input, instance_norm):
x = layers.Conv2D(64, 3, padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(input)
if instance_norm:
x = layers.BatchNormalization()(x)
x = layers.Conv2D(64, 3, padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(x)
if instance_norm:
x = layers.BatchNormalization()(x)
return layers.add([input, x])
def dped_adversarial(input_shape):
inputs = keras.Input(shape=input_shape)
conv1 = keras.layers.Conv2D(48, 11, (4,4), padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(inputs)
conv2 = keras.layers.Conv2D(128, 5, (2,2), padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(conv1)
conv2 = layers.BatchNormalization()(conv2)
conv3 = keras.layers.Conv2D(192, 3, (1,1), padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(conv2)
conv3 = layers.BatchNormalization()(conv3)
conv4 = keras.layers.Conv2D(192, 3, (1,1), padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(conv3)
conv4 = layers.BatchNormalization()(conv4)
conv5 = keras.layers.Conv2D(192, 3, (1,1), padding="same", activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(conv4)
conv5 = layers.BatchNormalization()(conv5)
conv5 = layers.Flatten()(conv5)
fc6 = layers.Dense(1024, activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(conv5)
fc7 = layers.Dense(2, activation=tf.nn.softmax, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(fc6)
return Model(inputs=inputs, outputs=fc7, name="dped_adversarial")
def fourier_adversarial(input_shape):
inputs = keras.Input(shape=input_shape)
finputs = layers.Flatten()(inputs)
fc1 = layers.Dense(1024, activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(finputs)
fc2 = layers.Dense(1024, activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(fc1)
fc3 = layers.Dense(1024, activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(fc2)
fc4 = layers.Dense(1024, activation=tf.nn.leaky_relu, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(fc3)
fc5 = layers.Dense(2, activation=tf.nn.softmax, \
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01, seed=1), \
bias_initializer=tf.keras.initializers.constant(0.01))(fc4)
return Model(inputs=inputs, outputs=fc5, name="fourier_adversarial")
|
from abc import abstractclassmethod
import os
import datetime
import typing
import json
from ..EnumLogLevel import EnumLogLevel
class Converter_prettyJSON_to_raw(object):
################################################################################################################################
## Constructor
################################################################################################################################
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
def __json_to_timeStamp(self, jTimeStamp:dict) -> float:
assert isinstance(jTimeStamp, dict)
return jTimeStamp["t"]
#
def __json_to_stackTraceElement(self, jStackTraceElement:dict) -> tuple:
assert isinstance(jStackTraceElement, dict)
return (
jStackTraceElement["file"],
jStackTraceElement["line"],
jStackTraceElement["module"],
jStackTraceElement["code"],
)
#
################################################################################################################################
## Public Methods
################################################################################################################################
def json_to_logEntry(self, jLogEntry:dict) -> list:
assert isinstance(jLogEntry, dict)
sType = jLogEntry["type"]
rawLogEntry = [
sType,
0, # jLogEntry["id"],
jLogEntry["indent"],
self.__json_to_timeStamp(jLogEntry["timeStamp"]),
EnumLogLevel.parse(jLogEntry["logLevel"][0]),
]
if sType == "txt":
rawLogEntry.append(jLogEntry["text"])
assert len(rawLogEntry) == 7
elif sType == "ex":
rawLogEntry.append(jLogEntry["exception"])
rawLogEntry.append(jLogEntry["text"])
stackTraceList = None
if "stacktrace" in jLogEntry:
stackTraceList = [
self.__json_to_stackTraceElement(x) for x in jLogEntry["stacktrace"]
]
rawLogEntry.append(stackTraceList)
assert len(rawLogEntry) == 9
elif sType == "desc":
rawLogEntry.append(jLogEntry["text"])
children = None
if "children" in jLogEntry:
children = [
self.json_to_logEntry(x) for x in jLogEntry["children"]
]
rawLogEntry.append(children)
assert len(rawLogEntry) == 8
else:
raise Exception("Implementation Error!")
return rawLogEntry
#
################################################################################################################################
## Static Methods
################################################################################################################################
#
|
# Generated by Django 2.2.1 on 2019-05-06 10:28
from django.db import migrations, models
import Core.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ProjectModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(
choices=[('default', 'default'), ('home', 'home'), ('abroad', 'abroad'), ('other', 'other')],
default='other', max_length=50)),
('name', models.CharField(blank=True, max_length=100, null=True)),
('desc', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='SettingModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activating', models.BooleanField(default=False)),
('tag', models.CharField(
choices=[('debug', 'debug'), ('home', 'home'), ('abroad', 'abroad'), ('other', 'other')],
default='other', max_length=50)),
('kind', models.CharField(choices=[('redis', 'redis'), ('nginx_file_server', 'nginx_file_server')],
default='other', max_length=50)),
('setting', Core.models.DiyDictField(default={})),
],
),
migrations.CreateModel(
name='TaskModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.IntegerField(default=0)),
('kind', models.CharField(blank=True, default=None, max_length=100, null=True)),
('kwargs', Core.models.DiyDictField(default={})),
('task_id', models.CharField(blank=True, default=None, max_length=150, null=True)),
('status', models.CharField(blank=True, choices=[('PENDING', 'PENDING'), ('STARTED', 'STARTED'),
('SUCCESS', 'SUCCESS'), ('FAILURE', 'FAILURE'),
('RETRY', 'RETRY'), ('REVOKED', 'REVOKED'),
('PROGRESS', 'PROGRESS'), ('STORED', 'STORED'),
('STOREFAIL', 'STOREFAIL')], default='PENDING',
max_length=50, null=True)),
('start_time', models.IntegerField(default=0)),
('end_time', models.IntegerField(default=0)),
],
),
]
|
import mysql.connector
from mysql.connector import errorcode
from mysql.connector.connection import MySQLConnection
class DatabaseHelper(MySQLConnection):
def __init__(self, svr_ctx=None, *args, **kwargs):
super(DatabaseHelper, self).__init__(*args, **kwargs)
self.svr_ctx = svr_ctx
def _create_database(self, db_name):
cursor = self.cursor()
try:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(db_name))
except mysql.connector.Error as err:
self.svr_ctx.logger.fatal("Failed creating database: {}".format(err))
def create_database(self, db_name):
cursor = self.cursor()
try:
cursor.execute("USE {}".format(db_name))
except mysql.connector.Error as err:
self.svr_ctx.logger.info("Database {} does not exists.".format(db_name))
if err.errno == errorcode.ER_BAD_DB_ERROR:
self._create_database(cursor)
self.svr_ctx.logger.info("Database {} created successfully.".format(db_name))
self.database = db_name
else:
self.svr_ctx.logger.fatal(err)
def create_table(self, table_name, table_description):
cursor = self.cursor()
try:
self.svr_ctx.logger.info("Creating table {}: ".format(table_name), end='')
cursor.execute(table_description)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
self.svr_ctx.logger.warn("already exists.")
else:
self.svr_ctx.logger.error(err.msg)
else:
self.svr_ctx.logger.info("OK")
|
import os
import torch
class CheckPoint(object):
def __init__(self, root, save_fn, load_fn, record_filename="checkpoint.txt", num_record=-1, clean=False):
self.root = root
self.save_fn = save_fn
self.load_fn = load_fn
self.filename = record_filename
self.num_record = num_record
if not os.path.exists(root):
os.makedirs(root)
# clean the record file
record_filename = os.path.join(self.root, self.filename)
if clean and os.path.exists(record_filename):
with open(record_filename, "w") as f:
f.write('')
def read_record_file(self):
record_filename = os.path.join(self.root, self.filename)
if not os.path.exists(record_filename):
return []
with open(record_filename, "r") as f:
records = f.read().split()
records = [i for i in records if i.strip()]
return records
def write_record_file(self, records):
record_filename = os.path.join(self.root, self.filename)
with open(record_filename, "w") as f:
for record in records:
f.write("{}\n".format(record))
def save(self, filename, *args):
if not os.path.isabs(filename):
filename = os.path.join(self.root, filename)
self.save_fn(filename, *args)
records = self.read_record_file()
records.append(filename)
if 0 < self.num_record < len(records):
while self.num_record < len(records):
record = records.pop(0)
try:
os.remove(record)
except Exception:
pass
self.write_record_file(records)
def load(self, filename):
self.load_fn(filename)
def try_load_last(self):
record_filename = os.path.join(self.root, self.filename)
if not os.path.exists(record_filename):
return
with open(record_filename, "r") as f:
records = f.read().split()
records = [i for i in records if i.strip()]
if len(records) > 0:
self.load_fn(records[-1])
def search_checkpoint_records(save_dir):
checkpoint_files = []
if os.path.exists(save_dir):
checkpoint_record = os.path.join(save_dir, "checkpoint.txt")
if os.path.exists(checkpoint_record):
for line in open(checkpoint_record):
if line.strip() != "":
# print(line.strip())
checkpoint_files.append(line.strip())
while len(checkpoint_files) > 5:
try:
print("remove {}".format(checkpoint_files[0]))
os.remove(os.path.join(save_dir, checkpoint_files.pop(0)))
except Exception:
pass
return checkpoint_files
def save_checkpoint_records(checkpoint_files, save_dir):
with open(os.path.join(save_dir, "checkpoint.txt"), "w") as f:
for checkpoint_file in checkpoint_files:
f.write("{}\n".format(checkpoint_file))
def save_checkpoint(model, optimizer, scheduler, epoch, path):
save_dir = os.path.dirname(path)
checkpoint_files = search_checkpoint_records(save_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if len(checkpoint_files) == 5:
try:
os.remove(os.path.join(save_dir, checkpoint_files.pop(0)))
except Exception:
pass
checkpoint_files.append(os.path.basename(path))
torch.save({
"epoch": epoch,
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
}, path)
save_checkpoint_records(checkpoint_files, save_dir)
def load_checkpoint(save_path):
checkpoint_path = save_path
if os.path.isdir(save_path) and os.path.exists(save_path):
checkpoint_records = search_checkpoint_records(save_path)
if len(checkpoint_records) == 0:
raise FileNotFoundError(os.path.join(save_path, "checkpoint.txt"))
checkpoint_path = os.path.join(save_path, checkpoint_records[-1])
print("restore checkpoint from {}".format(checkpoint_path))
if os.path.exists(checkpoint_path):
return torch.load(checkpoint_path, map_location="cpu")
else:
raise FileNotFoundError(checkpoint_path)
def log_loss(losses, save_dir, epoch, mode="train"):
assert isinstance(losses, list)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, "{}.txt".format(mode)), "a") as f:
f.write("{} ".format(epoch))
for loss in losses:
f.write("{} ".format(loss))
f.write("\n")
|
#!/usr/bin/env python
import kubeexport
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
requires = ['docopt==0.6.2']
test_requires = ['pytest==3.7.1', 'pytest-cov==2.5.1']
if sys.version_info[0] < 3:
test_requires.append('SystemIO>=1.1')
class PyTestCommand(TestCommand):
"""Run py.test unit tests"""
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--verbose', '--cov', 'kubeexport']
self.test_suite = True
def run(self):
import pytest
rcode = pytest.main(self.test_args)
sys.exit(rcode)
setup_options = dict(
name='kube-export',
version=kubeexport.__version__,
description='Kubernetes export tool. Export Kubernetes resources in structured yaml or json files.',
long_description=open('README.md').read(),
author='Shiwaforce.com',
url='https://www.shiwaforce.com',
packages=find_packages(exclude=['tests*']),
install_requires=requires,
tests_require=test_requires,
cmdclass={'test': PyTestCommand},
entry_points={
'console_scripts': ['kube-export=kubeexport.kubeexport:main'],
},
license="MIT",
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
),
)
setup(**setup_options)
|
from .domain import Domain, DomainCreate, DomainInDB, DomainUpdate
from .msg import Msg
from .token import Token, TokenPayload
from .user import User, UserCreate, UserInDB, UserUpdate
from .event import EventCreate, EventCreated
from .billing import StripeLink
|
import pytest
import trio
from alexandria.payloads import (
Advertise, Ack,
FindNodes, FoundNodes,
Locate, Locations,
Ping, Pong,
Retrieve, Chunk,
)
@pytest.mark.trio
async def test_client_send_ping(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(Ping) as subscription:
request_id = await alice.send_ping(bob.local_node)
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, Ping)
assert payload.request_id == request_id
@pytest.mark.trio
async def test_client_send_pong(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(Pong) as subscription:
await alice.send_pong(bob.local_node, request_id=1234)
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, Pong)
@pytest.mark.trio
async def test_client_send_find_nodes(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(FindNodes) as subscription:
request_id = await alice.send_find_nodes(bob.local_node, distance=1)
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, FindNodes)
assert payload.request_id == request_id
@pytest.mark.trio
async def test_client_send_found_nodes(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(FoundNodes) as subscription:
total_messages = await alice.send_found_nodes(
bob.local_node,
request_id=1234,
found_nodes=(),
)
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, FoundNodes)
assert payload.total == total_messages
@pytest.mark.trio
async def test_client_send_advertise(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(Advertise) as subscription:
request_id = await alice.send_advertise(
bob.local_node,
key=b'key',
who=bob.local_node,
)
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, Advertise)
assert payload.request_id == request_id
assert payload.key == b'key'
assert payload.node == (bob.local_node_id, bob.listen_on.ip_address.packed, bob.listen_on.port) # noqa: E501
@pytest.mark.trio
async def test_client_send_ack(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(Ack) as subscription:
await alice.send_ack(bob.local_node, request_id=1234)
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, Ack)
assert payload.request_id == 1234
@pytest.mark.trio
async def test_client_send_locate(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(Locate) as subscription:
request_id = await alice.send_locate(bob.local_node, key=b'key')
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, Locate)
assert payload.request_id == request_id
assert payload.key == b'key'
@pytest.mark.trio
async def test_client_send_locations(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(Locations) as subscription:
total_messages = await alice.send_locations(bob.local_node, request_id=1234, locations=())
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, Locations)
assert payload.request_id == 1234
assert payload.total == total_messages
# This is an odd Hashable type...
assert tuple(payload.nodes) == ()
@pytest.mark.trio
async def test_client_send_retrieve(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(Retrieve) as subscription:
request_id = await alice.send_retrieve(bob.local_node, key=b'key')
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, Retrieve)
assert payload.request_id == request_id
assert payload.key == b'key'
@pytest.mark.trio
async def test_client_send_chunks(alice_and_bob_clients):
alice, bob = alice_and_bob_clients
async with bob.message_dispatcher.subscribe(Chunk) as subscription:
total_chunks = await alice.send_chunks(bob.local_node, request_id=1234, data=b'key')
with trio.fail_after(1):
message = await subscription.receive()
assert message.node == alice.local_node
payload = message.payload
assert isinstance(payload, Chunk)
assert payload.request_id == 1234
assert payload.total == total_chunks
assert payload.index == 0
assert payload.data == b'key'
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2014 Konrad Podloucky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setuptools script for building pytractor.
"""
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytractor',
version='0.2.2.dev1',
description='Selenium testing for Angular.js apps',
long_description=long_description,
url='https://github.com/kpodl/pytractor',
author='Konrad Podloucky',
author_email='konrad+pytractor@crunchy-frog.org',
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
],
keywords='selenium angular.js testing',
package_dir={'': 'src'},
packages=find_packages('src'),
# For package data see MANIFEST.in
include_package_data=True,
install_requires=[
'selenium>=2.48.0',
'future>=0.15.2'
],
tests_require=[
'nose>=1.3.7',
'mock>=1.3.0',
],
test_suite='nose.collector',
use_2to3=True
)
|
def aumentar(n=0, tx=0, formato=False):
aum = n + (n * tx / 100)
if formato:
return moeda(aum)
else:
return aum
def diminuir(n=0, tx=0, formato=False):
dim = n - (n * tx / 100)
return dim if formato is False else moeda(dim)
def metade(n=0, formato=False):
met = n / 2
return met if formato is False else moeda(met)
def dobro(n=0, formato=False):
dob = n * 2
return dob if formato is False else moeda(dob)
def moeda(n=0, m='R$ '):
return f'{m}{n:.2f}'.replace('.', ',')
def titulo(txt):
t = len(txt)
print('~' * t)
print(f'{txt}')
print('~' * t)
def resumo(n=0, tx1=0, tx2=0):
a = aumentar(n, tx1)
d = diminuir(n, tx2)
m = metade(n)
db = dobro(n)
titulo(f'{"DADOS DO VALOR"}'.center(30))
print(f'Preço analisado: \t{moeda(n)}')
print(f'Dobro do preço: \t{moeda(db)}')
print(f'Metade do preço: \t{moeda(m)}')
print(f'{tx1}% de aumento: \t{moeda(a)}')
print(f'{tx2}% de desconto: \t{moeda(d)}')
titulo(f'{"FIM"}'.center(30))
|
"""Canonical ISO date format YYYY-MM-DDTHH:mm:SS+HH:mm
This parser is _extremely_ strict, and the dates that match it,
though really easy to work with for the computer, are not particularly
readable. See the iso_date_loose module for a slightly relaxed
definition which allows the "T" character to be replaced by a
" " character, and allows a space before the timezone offset, as well
as allowing the integer values to use non-0-padded integers.
ISO_date -- YYYY-MM-DD format, with a month and date optional
ISO_time -- HH:mm:SS format, with minutes and seconds optional
ISO_date_time -- YYYY-MM-DD HH:mm:SS+HH:mm format,
with time optional and TimeZone offset optional
Interpreter:
MxInterpreter
Interprets the parse tree as mx.DateTime values
ISO_date and ISO_time
returns DateTime objects
Time only
returns RelativeDateTime object which, when
added to a DateTime gives you the given time
within that day
"""
try:
from mx import DateTime
haveMX = 1
except ImportError:
haveMX = 0
from simpleparse.parser import Parser
from simpleparse import common, objectgenerator
from simpleparse.common import chartypes, numbers
from simpleparse.dispatchprocessor import *
c = {}
declaration ="""
year := digit,digit,digit,digit
month := digit,digit
day := digit,digit
hour := digit,digit
minute := digit,digit
second := digit,digit
offset_sign := [-+]
offset := offset_sign, hour, time_separator?, minute
<date_separator> := '-'
<time_separator> := ':'
ISO_date := year, (date_separator, month, (date_separator, day)?)?
ISO_time := hour, (time_separator, minute, (time_separator, second)?)?
ISO_date_time := ISO_date, ([T], ISO_time)?, offset?
"""
_p = Parser( declaration )
for name in ["ISO_time","ISO_date", "ISO_date_time"]:
c[ name ] = objectgenerator.LibraryElement(
generator = _p._generator,
production = name,
)
common.share( c )
if haveMX:
class MxInterpreter(DispatchProcessor):
"""Interpret a parsed ISO_date_time_loose in GMT/UTC time or localtime
"""
def __init__(
self,
inputLocal = 1,
returnLocal = 1,
):
self.inputLocal = inputLocal
self.returnLocal = returnLocal
dateName = 'ISO_date'
timeName = 'ISO_time'
def ISO_date_time( self, (tag, left, right, sublist), buffer):
"""Interpret the loose ISO date + time format"""
set = singleMap( sublist, self, buffer )
base, time, offset = (
set.get(self.dateName),
set.get(self.timeName) or DateTime.RelativeDateTime(hour=0,minute=0,second=0),
set.get( "offset" ),
)
base = base + time
offset = set.get( "offset" )
if offset is not None:
# an explicit timezone was entered, convert to gmt and return as appropriate...
gmt = base - offset
if self.returnLocal:
return gmt.localtime()
else:
return gmt
# was in the default input locale (either gmt or local)
if self.inputLocal and self.returnLocal:
return base
elif not self.inputLocal and not self.returnLocal:
return base
elif self.inputLocal and not self.returnLocal:
# return gmt from local...
return base.gmtime()
else:
return base.localtime()
def ISO_date( self, (tag, left, right, sublist), buffer):
"""Interpret the ISO date format"""
set = {}
for item in sublist:
set[ item[0] ] = dispatch( self, item, buffer)
return DateTime.DateTime(
set.get("year") or now().year,
set.get("month") or 1,
set.get("day") or 1,
)
def ISO_time( self, (tag, left, right, sublist), buffer):
"""Interpret the ISO time format"""
set = {}
for item in sublist:
set[ item[0] ] = dispatch( self, item, buffer)
return DateTime.RelativeDateTime(
hour = set.get("hour") or 0,
minute = set.get("minute") or 0,
second = set.get("second") or 0,
)
integer = numbers.IntInterpreter()
second = offset_minute = offset_hour = year = month = day = hour =minute =integer
def offset( self, (tag, left, right, sublist), buffer):
"""Calculate the time zone offset as a date-time delta"""
set = singleMap( sublist, self, buffer )
direction = set.get('offset_sign',1)
hour = set.get( "hour", 0)
minute = set.get( "minute", 0)
delta = DateTime.DateTimeDelta( 0, hour*direction, minute*direction)
return delta
def offset_sign( self , (tag, left, right, sublist), buffer):
"""Interpret the offset sign as a multiplier"""
v = buffer [left: right]
if v in ' +':
return 1
else:
return -1
|
import encodedecode as ed
import registers as registers
import execution as execution
import shareddata as globl
def jmpf(params):
distance = [params[1], params[2], params]
distance = ed.decode(distance)
if globl.mode == globl.modes.bootloader:
globl.bootl += distance
elif globl.mode == globl.modes.rom:
globl.roml += distance
def jmpb(params):
distance = [params[1], params[2], params]
distance = ed.decode(distance)
if globl.mode == globl.modes.bootloader:
globl.bootl -= distance
elif globl.mode == globl.modes.rom:
globl.roml -= distance
def jmp(params):
if params[1] == "00000011" and len(params) >= 2 and ed.decode([params[2], params[3], params]) == 0:
globl.mode = globl.modes.rom
execution.run(globl.rom)
|
###############################
#
# Created by Patrik Valkovic
# 3/9/2021
#
###############################
import unittest
import ffeat
class NormalizedPipeTest(unittest.TestCase):
def test_valid(self):
n = ffeat.NormalizedPipe()
param = (1,3,8), {"something": True}
result = n(param)
self.assertEqual(result, param)
def test_onlyargs(self):
n = ffeat.NormalizedPipe()
result = n((1,3,8))
self.assertEqual(result, ((1,3,8), {}))
def test_only_one_arg(self):
n = ffeat.NormalizedPipe()
result = n(13)
self.assertEqual(result, ((13,), {}))
def test_none(self):
n = ffeat.NormalizedPipe()
result = n(None)
self.assertEqual(result, (tuple(), {}))
if __name__ == '__main__':
unittest.main()
|
import json
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from multapp.utility.notification import SendMailNotifications
class NotificationServicesRest(APIView):
name = 'notification_service'
'''
{
"mail_list": ["mail@domain.com", "other_mail@domail.com"],
"subject": "Notification Fake",
"content_html": "<h1>Content Mail</h1>"
}
'''
def post(self, request, **kwargs):
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
''' message = '<h2>Multa de Transito N0: ' + str(numquote) + \
'</h2></br><p>Estimado proveedor se ha creado una nueva cotizacion para el cliente ' + client_name + \
'</p></br><p>Ingrese a la plataforma o cree su Oferta a traves de los servicios disponibles</p>' + \
'<br><h3>MERCA DIGITAL SA</h3>'
subject = 'Multa de Transito - Simit Services'
'''
if 'mail_list' in body and 'subject' in body and 'content_html' in body:
try:
sendmailnotification = SendMailNotifications()
sendmailnotification.send_mail(
list_emails=body['mail_list'],
subject=body['subject'],
content_xml=body['content_html']
)
return Response(
{'data': {'mail': 'OK'}},
status=status.HTTP_200_OK
)
except:
return Response(
{'data': {'error': 'Message Don´t Send'}},
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
else:
return Response(
{'message': 'invalid message'},
status=status.HTTP_400_BAD_REQUEST
)
|
from torch import nn
import torchvision.models as models
from typeguard import typechecked
@typechecked
class ResNet(nn.Module):
def __init__(self, encoder_name: str, pretrained: bool = False):
"""
:param encoder_name:
:param pretrained:
"""
super(ResNet, self).__init__()
self.base_layers = get_encoder(encoder_name, pretrained)
self.layer0 = nn.Sequential(
nn.Conv2d(
3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
),
nn.BatchNorm2d(
64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
),
nn.ReLU(inplace=True),
)
self.layer1 = nn.Sequential(*self.base_layers[0:2])
self.layer2 = self.base_layers[2]
self.layer3 = self.base_layers[3]
self.layer4 = self.base_layers[4]
def forward(self, x):
"""
Forward pass through feature extraction network
:param x: Input image
:return: Returns feature outputs at different stages of the networks
resnet18
Layer 0: torch.Size([1, 64, 112, 112]); Latent size multiple: 8.0
Layer 1: torch.Size([1, 64, 56, 56]); Latent size multiple: 8.0
Layer 2: torch.Size([1, 128, 28, 28]); Latent size multiple: 4.0
Layer 3: torch.Size([1, 256, 14, 14]); Latent size multiple: 2.0
Layer 4: torch.Size([1, 512, 7, 7]); Latent size multiple: 1.0 (4, 4 for 128x128 image)
resnet50
Layer 0: torch.Size([1, 64, 112, 112]); Latent size multiple: 32.0
Layer 1: torch.Size([1, 256, 56, 56]); Latent size multiple: 8.0
Layer 2: torch.Size([1, 512, 28, 28]); Latent size multiple: 4.0
Layer 3: torch.Size([1, 1024, 14, 14]); Latent size multiple: 2.0
Layer 4: torch.Size([1, 2048, 7, 7]); Latent size multiple: 1.0
"""
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer0, layer1, layer2, layer3, layer4
def get_encoder(encoder_name, pretrained):
if encoder_name == "resnet18":
base_model = models.resnet18(pretrained=pretrained)
elif encoder_name == "resnet50":
base_model = models.resnet50(pretrained=pretrained)
elif encoder_name == "resnet101":
base_model = models.resnet101(pretrained=pretrained)
else:
raise Exception("Unspecified model name: {}".format(encoder_name))
return list(base_model.children())[3:8]
|
"""
Base da API
"""
from flask import Flask, jsonify
from flask_cors import CORS
from sqlalchemy import create_engine
import pandas as pd
app = Flask(__name__)
CORS(app)
engine = create_engine('postgresql://equipe215:ZXF1aXBlMjE1QHNlcnBybw@bd.inova.serpro.gov.br:5432/equipe215')
with engine.connect() as connection:
columns = connection.execute("SELECT column_name FROM information_schema.columns WHERE table_name='imoveis';")
columns = columns.fetchall()
columns = [column[0] for column in columns]
@app.route("/bairros")
def get_bairros():
"""
Retorna os bairros com valores disponíveis
:return:
"""
with engine.connect() as connection:
response = connection.execute("SELECT DISTINCT x.\"bairro\" FROM imoveis x ORDER BY x.\"bairro\";")
connection.close()
return jsonify([line[0] for line in response])
@app.route("/all")
def get_all():
"""
Retorna todos
:return:
"""
with engine.connect() as connection:
response = connection.execute(f"SELECT * FROM imoveis;").fetchall()
response = [{columns[i]: line[i] for i in range(len(line))} for line in response]
return jsonify(response)
@app.route("/bairro/<string:bairro>")
def getBy_bairro(bairro):
"""
Retorna os imóveis disponíveis em determinado municipio
:param str bairro:
:return:
"""
with engine.connect() as connection:
response = connection.execute(f"SELECT x.* FROM imoveis x WHERE x.\"bairro\" = '{bairro}';").fetchall()
response = [{columns[i]: line[i] for i in range(len(line))} for line in response]
return jsonify(response)
@app.route("/valor/<int:valor>")
def getBy_valor(valor):
"""
Retorna os imóveis disponíveis de acordo com o valor
:param int valor:
:return:
"""
limite_inferior = valor * 0.2
limite_superior = limite_inferior + .2
with engine.connect() as connection:
response = connection.execute(f"SELECT x.* FROM imoveis x WHERE x.\"normal_value_terreno\" <= {limite_superior} AND x.\"normal_value_terreno\" > {limite_inferior};").fetchall()
response = [{columns[i]: line[i] for i in range(len(line))} for line in response]
return jsonify(response)
@app.route("/index/<int:index>")
def getBy_index(index):
"""
Retorna linha de indice tal
:param int index:
:return:
"""
with engine.connect() as connection:
response = connection.execute(f"SELECT x.* FROM imoveis x WHERE x.\"index\" =100;").fetchall()
response = [{columns[i]: line[i] for i in range(len(line))} for line in response]
return jsonify(response[0])
if __name__ == "__main__":
app.run(host = 'localhost', port = 5000, debug = False)
|
import requests
SMMS_URL = "https://sm.ms/api/upload"
def upload_smms(path, upload_name):
smfile = {'smfile': open(path, 'rb')}
r = requests.post(SMMS_URL, files=smfile)
result = r.json()
if result['code'] == 'success':
url = result['data']['url']
delete = result['data']['delete']
return url, delete
else:
return False, False
|
import numpy as np
import pandas as pd
from patsy import dmatrices
import warnings
def sigmoid(x):
'''SIGMOID FUNCTION FOR X'''
return 1/(1+np.exp(-x))
## Algorith settings
np.random.seed(0) # set the seed
tol=1e-8 # convergence tolerance
lam = None # l2 - regularization
max_iter = 20 # maximum allowed iterations
r = 0.95 # covariance between x and z
n = 1000 # number of observations (size of dataset to generate)
sigma = 1 # variance of noise - how spread out is the data?
## Model settings
beta_x, beta_z, beta_v = -4, .9, 1 # true beta coefficients
var_x, var_z, var_v = 1, 1, 4 # variances of inputs
## the model specification i want to fit
formula = 'y ~ x + z + v + np.exp(x) + I(v**2 + z)'
# keeping x and z closely related (height and weight)
x, z = np.random.multivariate_normal([0,0], [[var_x, r], [r, var_z]], n).T
# blood pressure
v = np.random.normal(0, var_v, n)**3
# create a pandas dataframe
A = pd.DataFrame({'x': x, 'z': z, 'v': v})
# compute the log offs for our 3 independent variables
# using the sigmoid function
A['log_odds'] = sigmoid(A[['x', 'z', 'v']].dot([beta_x, beta_z, beta_v]) + sigma * np.random.normal(0,1,n))
#compute te probability sample from binomial distribuition
A['y'] = [np.random.binomial(1,p) for p in A.log_odds]
# create a dataframe that encompasses our input data, model formula, and outputs
y, X = dmatrices(formula, A, return_type = 'dataframe')
X.head()
def catch_singularity(f):
def silencer(*args, **kwargs):
try:
return f(*args, **kwargs)
except np.linalg.LinAlgError:
warnings.warn('Algotithm terminated - singular Hessian')
return args[0]
return silencer
def newton_step(curr, X, lam=None):
p = np.array(sigmoid(X.dot(curr[:, 0])), ndmin=2).T
w = np.diag((p * (1 - p))[:, 0])
hessian = X.T.dot(w).dot(X)
grad = X.T.dot(y - p)
if lam:
step, *_ = np.linalg.lstsq(hessian + lam * np.eye(curr.shape[0]), grad)
else:
step, *_ = np.linalg.lstsq(hessian, grad)
beta = curr + step
return beta
def alt_newton_step(curr, X, lam=None):
p = np.array(sigmoid(X.dot(curr[:, 0])), ndmin=2).T
w = np.diag((p * (1 - p))[:, 0])
hessian = X.T.dot(w).dot(X)
grad = X.T.dot(y - p)
if lam:
step = np.dot(np.linalg(hessian + lam * np.eye(curr.chape[0])), grad)
else:
step = np.dot(np.linalg.inv(hessian), grad)
beta = curr + step
return beta
def check_coefs_convergence(beta_old, beta_new, tol, iters):
coef_change = np.abs(beta_old - beta_new)
return not (np.any(coef_change > tol) & (iters < max_iter))
## initial conditions
# initial coefficients (weight values), 2 copies, we'll update one
beta_old, beta = np.ones((len(X.columns), 1)), np.zeros((len(X.columns), 1))
iter_count = 0
coefs_converged = False
while not coefs_converged:
beta_old = beta
beta = newton_step(beta, X, lam=lam)
iter_count += 1
coefs_converged = check_coefs_convergence(beta_old, beta, tol, iter_count)
print('Iterations: {}'.format(iter_count))
print('Beta: {}'.format(beta))
|
from typing import Callable, Dict, Optional, List
import logging
import torch
import matplotlib as mpl
from .subscriber import LoggerSubscriber
def get_type(value):
if isinstance(value, torch.nn.Module):
return LoggerObserver.TORCH_MODULE
if isinstance(value, mpl.figure.Figure):
return LoggerObserver.FIGURE
if isinstance(value, str):
return LoggerObserver.TEXT
return LoggerObserver.SCALAR
class LoggerObserver(object):
"""Logger Oberserver Degisn Pattern
notifies every subscribers when .log() is called
"""
SCALAR = 'scalar'
FIGURE = 'figure'
TORCH_MODULE = 'torch_module'
TEXT = 'text'
WARN = logging.WARN
ERROR = logging.ERROR
DEBUG = logging.DEBUG
INFO = logging.INFO
CRITICAL = logging.CRITICAL
instances = {}
def __new__(cls, name, *args, **kwargs):
if name in LoggerObserver.instances.keys():
return LoggerObserver.instances[name]
else:
return object.__new__(cls, *args, **kwargs)
def __init__(self, name) -> None:
self.subscriber = []
self.name = name
LoggerObserver.instances[name] = self
@classmethod
def getLogger(cls, name):
if name in LoggerObserver.instances.keys():
return LoggerObserver.instances[name]
else:
return cls(name)
def subscribe(self, subscriber: LoggerSubscriber):
self.subscriber.append(subscriber)
def log(self, logs: List[Dict]):
for subscriber in self.subscriber:
for log in logs:
tag = log['tag']
value = log['value']
type = log['type'] if 'type' in log.keys() else get_type(value)
kwargs = log['kwargs'] if 'kwargs' in log.keys() else {}
if type == LoggerObserver.SCALAR:
subscriber.log_scalar(
tag=tag,
value=value,
**kwargs
)
if type == LoggerObserver.FIGURE:
subscriber.log_figure(
tag=tag,
value=value,
**kwargs
)
if type == LoggerObserver.TORCH_MODULE:
subscriber.log_torch_module(
tag=tag,
value=value,
**kwargs
)
if type == LoggerObserver.TEXT:
subscriber.log_text(
tag=tag,
value=value,
**kwargs
)
def text(self, value, level):
self.log([{
'tag': 'stdout',
'value': value,
'type': LoggerObserver.TEXT,
'kwargs': {
'level': level
}
}])
|
from pathlib import Path
import pytest
@pytest.fixture()
def html_file():
return Path(__file__).parent / "assets" / "index.html"
|
"""
출처: https://www.hackerrank.com/challenges/coin-change/problem
"""
def getWay(n, c):
n_perms = [1] + [0] * n
for coin in c:
print("coin", coin)
for i in range(coin, n + 1):
print("i", i)
n_perms[i] += n_perms[i - coin]
return n_perms
if __name__ == '__main__':
print("========answer========")
print(getWay(4, [1, 4, 3]))
|
import random
letter = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
number = ['0','1','2','3','4','5','6','7','8','9']
symbol = ['!','@','#','$','%','^','&','*','(',')','_']
print("Welcome to the PyPassword Generator!")
letters = int(input("How many letters would you like in your password: \n"))
symbols = int(input("How many symbols would you like in your password: \n"))
numbers = int(input("How many numbers would you like in your password: \n"))
password = ""
for n in range(1,letters+1):
password += random.choice(letter)
for x in range(1,symbols+1):
password += random.choice(symbol)
for y in range(1,numbers+1):
password += random.choice(number)
temp = list(password)
random.shuffle(temp)
password = ''.join(temp)
print(password)
|
from .process_tables import *
|
from pydantic import BaseModel
from typing import List
from app.api.schemas.pagination import PaginatedModel
class GroupUserBase(BaseModel):
group_id: int
class GroupUserCreate(GroupUserBase):
user_id: int
class GroupUserAddOrRemove(BaseModel):
user_id: int
class GroupUser(GroupUserCreate):
id: int
class Config: orm_mode = True
class GroupUsersPaginated(PaginatedModel):
"""
Group user list with pagination.
"""
items: List[GroupUser]
class Meta: orm_model_class = GroupUser
|
from fabric.api import *
from fabric.contrib.files import *
# Custom Code Enigma modules
import common.ConfigFile
import common.Services
import common.Utils
import common.PHP
import AdjustConfiguration
import Drupal
import DrupalUtils
# Override the shell env variable in Fabric, so that we don't see
# pesky 'stdin is not a tty' messages when using sudo
env.shell = '/bin/bash -c'
global config
# Here, an editorial site is a copy of the production site, which hooks into
# the same database and files directory as the production site, but is
# hosted on a different server.
#
# As a result, this script assumes an editorial site already exists. It does
# not handle creating a new editorial site.
#
# This script simply sets up some variables from config.ini, clones down the
# repository, adjusts the settings.php and files symlinks, then adjust the live
# symlink, all on the editorial server. Lastly, it clears opcache and purges
# Varnish cache.
@task
def main(repo, repourl, build, branch, buildtype, keepbuilds=10, config_filename='config.ini'):
# Read the config.ini file from repo, if it exists
config = common.ConfigFile.buildtype_config_file(buildtype, config_filename)
# We don't need to define a host, as that should be defined in the Jenkins job (or whatever CI is being used)
# Define server roles (if applicable)
common.Utils.define_roles(config, False, None)
user = "jenkins"
application_directory = "www"
www_root = "/var/www"
site_root = www_root + '/%s_%s_%s' % (repo, branch, build)
site_link = www_root + '/live.%s.%s' % (repo, branch)
# Set our host_string based on user@host
env.host_string = '%s@%s' % (user, env.host)
ssh_key = common.ConfigFile.return_config_item(config, "Build", "ssh_key")
# Can be set in the config.ini [Drupal] section
### @TODO: deprecated, can be removed later
drupal_version = common.ConfigFile.return_config_item(config, "Version", "drupal_version", "string", None, True, True, replacement_section="Drupal")
# This is the correct location for 'drupal_version' - note, respect the deprecated value as default
drupal_version = common.ConfigFile.return_config_item(config, "Drupal", "drupal_version", "string", drupal_version)
# Can be set in the config.ini [Composer] section
composer = common.ConfigFile.return_config_item(config, "Composer", "composer", "boolean", True)
composer_lock = common.ConfigFile.return_config_item(config, "Composer", "composer_lock", "boolean", True)
no_dev = common.ConfigFile.return_config_item(config, "Composer", "no_dev", "boolean", True)
# Set SSH key if needed
# @TODO: this needs to be moved to config.ini for Code Enigma GitHub projects
if "git@github.com" in repourl:
ssh_key = "/var/lib/jenkins/.ssh/id_rsa_github"
# Run the tasks.
# --------------
execute(common.Utils.clone_repo, repo, repourl, branch, build, None, ssh_key, hosts=env.roledefs['app_all'])
# Gitflow workflow means '/' in branch names, need to clean up.
branch = common.Utils.generate_branch_name(branch)
print "===> Branch is %s" % branch
drupal_version = int(DrupalUtils.determine_drupal_version(drupal_version, repo, branch, build, config))
print "===> the drupal_version variable is set to %s" % drupal_version
if drupal_version > 7 and composer is True:
# Sometimes people use the Drupal Composer project which puts Drupal 8's composer.json file in repo root.
with settings(warn_only=True):
if run("find %s/composer.json" % site_root).return_code == 0:
path = site_root
else:
path = site_root + "/" + application_directory
execute(common.PHP.composer_command, path, "install", None, no_dev, composer_lock)
# Compile a site mapping, which is needed if this is a multisite build
# Just sets to 'default' if it is not
mapping = {}
mapping = Drupal.configure_site_mapping(repo, mapping, config)
for alias,site in mapping.iteritems():
execute(AdjustConfiguration.adjust_settings_php, repo, branch, build, buildtype, alias, site, www_root, application_directory)
execute(AdjustConfiguration.adjust_drushrc_php, repo, branch, build, site, www_root, application_directory)
execute(AdjustConfiguration.adjust_files_symlink, repo, branch, build, alias, site, www_root, application_directory)
execute(common.Utils.adjust_live_symlink, repo, branch, build, hosts=env.roledefs['app_all'])
# Final clean up and run tests, if applicable
execute(common.Services.clear_php_cache, hosts=env.roledefs['app_all'])
execute(common.Services.clear_varnish_cache, hosts=env.roledefs['app_all'])
execute(common.Utils.remove_old_builds, repo, branch, keepbuilds, hosts=env.roledefs['app_all'])
|
import math
import torch
import torch.nn as nn
from torch.nn.init import _calculate_fan_in_and_fan_out, calculate_gain
def MSRInitializer(Alpha=0, WeightScale=1):
def Initializer(Tensor):
_, fan_out = _calculate_fan_in_and_fan_out(Tensor)
gain = calculate_gain('leaky_relu', Alpha)
std = gain / math.sqrt(fan_out)
bound = math.sqrt(3.0) * std * WeightScale
with torch.no_grad():
if WeightScale != 0:
return Tensor.uniform_(-bound, bound)
else:
return Tensor.zero_()
return Initializer
def XavierInitializer(Tensor):
_, fan_out = _calculate_fan_in_and_fan_out(Tensor)
gain = calculate_gain('sigmoid')
std = gain * math.sqrt(1.0 / fan_out)
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
with torch.no_grad():
return Tensor.uniform_(-a, a)
def RawConvolutionalLayer(InputChannels, OutputChannels, ReceptiveField=3, Strides=1, KernelInitializer=None, UseBias=True, Groups=1):
KernelInitializer = MSRInitializer() if KernelInitializer is None else KernelInitializer
ConvolutionalLayer = nn.Conv2d(InputChannels, OutputChannels, kernel_size=ReceptiveField, stride=Strides, padding=(ReceptiveField - 1) // 2, bias=UseBias, groups=Groups)
KernelInitializer(ConvolutionalLayer.weight)
if UseBias:
ConvolutionalLayer.bias.data.fill_(0)
return ConvolutionalLayer
def RawFullyConnectedLayer(InputFeatures, OutputFeatures):
FullyConnectedLayer = nn.Linear(InputFeatures, OutputFeatures, bias=True)
XavierInitializer(FullyConnectedLayer.weight)
FullyConnectedLayer.bias.data.fill_(0)
return FullyConnectedLayer
def BatchNorm(Channels):
RawBatchNorm = nn.BatchNorm2d(Channels)
RawBatchNorm.weight.data.fill_(1)
RawBatchNorm.bias.data.fill_(0)
return RawBatchNorm
class ConvolutionalLayer(nn.Module):
def __init__(self, InputChannels, OutputChannels, ReceptiveField=3, Strides=1, Activation=True, BatchNormalization=True, Junction=False, WeightScale=0, Groups=1):
super(ConvolutionalLayer, self).__init__()
WeightScale = WeightScale if Junction else 1
Alpha = 0 if Activation else 1
self.LinearLayer = RawConvolutionalLayer(InputChannels, OutputChannels, ReceptiveField, Strides, MSRInitializer(Alpha, WeightScale), not BatchNormalization, Groups)
self.BatchNormalizationLayer = BatchNorm(OutputChannels) if BatchNormalization else None
self.ActivationLayer = nn.ReLU(inplace=True if BatchNormalization else False) if Activation else None
def forward(self, x, Shortcut=None):
RawOutputFlag = self.BatchNormalizationLayer is None and self.ActivationLayer is None
x = self.LinearLayer(x)
x = x + Shortcut if Shortcut is not None else x
y = self.BatchNormalizationLayer(x) if self.BatchNormalizationLayer is not None else x
y = self.ActivationLayer(y) if self.ActivationLayer is not None else y
return [x, y] if RawOutputFlag == False else x
class BottleneckConvolutionalLayer(nn.Module):
def __init__(self, InputChannels, OutputChannels, ReceptiveField=3, Strides=1, Activation=True, BatchNormalization=True, Junction=False, WeightScale=0):
super(BottleneckConvolutionalLayer, self).__init__()
self.BottleneckLayer = ConvolutionalLayer(InputChannels, OutputChannels, 1, 1, BatchNormalization=BatchNormalization)
self.TransformationLayer = ConvolutionalLayer(OutputChannels, OutputChannels, ReceptiveField, Strides, Activation, BatchNormalization, Junction, WeightScale)
def forward(self, x, Shortcut=None):
_, x = self.BottleneckLayer(x)
return self.TransformationLayer(x, Shortcut)
class SequentialConvolutionalLayer(nn.Module):
def __init__(self, InputChannels, OutputChannels, ReceptiveField=3, Windowed=True, BatchNormalization=True, Junction=False, WeightScale=0, k=8, DropRate=0.1):
super(SequentialConvolutionalLayer, self).__init__()
Filters = []
self.Windowed = Windowed
for x in range(OutputChannels // k):
Filters += [BottleneckConvolutionalLayer(InputChannels if Windowed else InputChannels + x * k, k, ReceptiveField=ReceptiveField, Strides=1, Activation=True, BatchNormalization=BatchNormalization, Junction=Junction, WeightScale=WeightScale)]
self.Filters = nn.ModuleList(Filters)
self.Dropout = nn.Dropout2d(p=DropRate) if DropRate > 0 else None
self.k = k
def forward(self, x, Shortcut=None):
SequenceView = x
RawSequence = []
ActivatedSequence = []
for y in range(len(self.Filters)):
CurrentShortcutSlice = Shortcut[:, y * self.k : (y + 1) * self.k, :, :] if Shortcut is not None else None
DropoutSequenceView = self.Dropout(SequenceView) if self.Dropout is not None else SequenceView
RawFeature, ActivatedFeature = self.Filters[y](DropoutSequenceView, CurrentShortcutSlice)
RawSequence += [RawFeature]
ActivatedSequence += [ActivatedFeature]
SequenceView = SequenceView[:, self.k:, :, :] if self.Windowed else SequenceView
SequenceView = torch.cat([SequenceView, ActivatedFeature], 1)
return torch.cat(RawSequence, 1), torch.cat(ActivatedSequence, 1)
class ResidualBlock(nn.Module):
def __init__(self, InputChannels, BatchNormalization=True, WeightScale=0, k=8):
super(ResidualBlock, self).__init__()
self.LayerA = SequentialConvolutionalLayer(InputChannels, InputChannels, BatchNormalization=BatchNormalization, k=k)
self.LayerB = SequentialConvolutionalLayer(InputChannels, InputChannels, BatchNormalization=BatchNormalization, Junction=True, WeightScale=WeightScale, k=k)
def forward(self, x, Shortcut):
_, x = self.LayerA(x)
return self.LayerB(x, Shortcut)
class DownsampleLayer(nn.Module):
def __init__(self, InputChannels, OutputChannels, BatchNormalization=True, k=8):
super(DownsampleLayer, self).__init__()
self.ExtensionLayer = SequentialConvolutionalLayer(InputChannels, OutputChannels - InputChannels, BatchNormalization=BatchNormalization, k=k)
self.ShrinkingLayer = ConvolutionalLayer(OutputChannels, OutputChannels, 3, 2, BatchNormalization=BatchNormalization, Groups=OutputChannels // k)
def forward(self, x):
_, ActivatedFeatures = self.ExtensionLayer(x)
x = torch.cat([x, ActivatedFeatures], 1)
return self.ShrinkingLayer(x)
class ResNetCIFAR(nn.Module):
def __init__(self, Classes=10, BlocksPerStage=[3, 3, 3], PyramidFactor=[1, 2, 4], Widening=8, Granularity=8, BatchNormalization=True, DropRate=0.1, WeightScale=0):
super(ResNetCIFAR, self).__init__()
Settings = dict(BatchNormalization=BatchNormalization, k=Granularity)
Stage2Width = 16 * PyramidFactor[1] / PyramidFactor[0]
Stage3Width = 16 * PyramidFactor[2] / PyramidFactor[0]
self.Init = ConvolutionalLayer(3, 16, BatchNormalization=BatchNormalization)
self.Head = SequentialConvolutionalLayer(16, 16 * Widening, Windowed=False, DropRate=0, **Settings)
self.Stage1 = nn.ModuleList([ResidualBlock(16 * Widening, WeightScale=WeightScale, **Settings) for _ in range(BlocksPerStage[0])])
self.Downsample1 = DownsampleLayer(16 * Widening, int(Stage2Width * Widening), **Settings)
self.Stage2 = nn.ModuleList([ResidualBlock(int(Stage2Width * Widening), WeightScale=WeightScale, **Settings) for _ in range(BlocksPerStage[1])])
self.Downsample2 = DownsampleLayer(int(Stage2Width * Widening), int(Stage3Width * Widening), **Settings)
self.Stage3 = nn.ModuleList([ResidualBlock(int(Stage3Width * Widening), WeightScale=WeightScale, **Settings) for _ in range(BlocksPerStage[2])])
self.Dropout = nn.Dropout2d(p=DropRate) if DropRate > 0 else None
self.Blender = ConvolutionalLayer(int(Stage3Width * Widening), int(Stage3Width * Widening), 1, 1, BatchNormalization=BatchNormalization)
self.Compress = nn.AdaptiveAvgPool2d((1, 1))
self.Classifier = RawFullyConnectedLayer(int(Stage3Width * Widening), Classes)
def forward(self, x):
def Refine(ActivatedFeatures, RawFeatures, PoolOfBlocks):
for Block in PoolOfBlocks:
RawFeatures, ActivatedFeatures = Block(ActivatedFeatures, RawFeatures)
return RawFeatures, ActivatedFeatures
_, ActivatedFeatures = self.Init(x)
RawFeatures, ActivatedFeatures = self.Head(ActivatedFeatures)
RawFeatures, ActivatedFeatures = Refine(ActivatedFeatures, RawFeatures, self.Stage1)
RawFeatures, ActivatedFeatures = self.Downsample1(ActivatedFeatures)
RawFeatures, ActivatedFeatures = Refine(ActivatedFeatures, RawFeatures, self.Stage2)
RawFeatures, ActivatedFeatures = self.Downsample2(ActivatedFeatures)
RawFeatures, ActivatedFeatures = Refine(ActivatedFeatures, RawFeatures, self.Stage3)
ActivatedFeatures = self.Dropout(ActivatedFeatures) if self.Dropout is not None else ActivatedFeatures
RawFeatures, ActivatedFeatures = self.Blender(ActivatedFeatures)
x = self.Compress(ActivatedFeatures)
x = x.view(x.size(0), -1)
x = self.Classifier(x)
return x
from utils import summary
summary(ResNetCIFAR(Classes=10, BlocksPerStage=[1,1,1], PyramidFactor=[1,2,3], Widening=7, Granularity=16, BatchNormalization=True, WeightScale=0),(3,32,32))
|
import music21
from mirdata.annotations import KeyData, ChordData
from mirdata.datasets import haydn_op20
from tests.test_utils import run_track_tests
import numpy as np
def test_track():
default_trackid = "0"
data_home = "tests/resources/mir_datasets/haydn_op20"
dataset = haydn_op20.Dataset(data_home)
track = dataset.track(default_trackid)
expected_attributes = {
"humdrum_annotated_path": "tests/resources/mir_datasets/haydn_op20/op20n1-01.hrm",
"title": "op20n1-01",
"track_id": "0",
}
expected_property_types = {
"duration": int,
"chords": ChordData,
"chords_music21": list,
"roman_numerals": list,
"keys": KeyData,
"keys_music21": list,
"score": music21.stream.Score,
"midi_path": str,
}
run_track_tests(track, expected_attributes, expected_property_types)
def test_to_jam():
data_home = "tests/resources/mir_datasets/haydn_op20"
dataset = haydn_op20.Dataset(data_home)
track = dataset.track("0")
jam = track.to_jams()
assert jam["file_metadata"]["title"] == "op20n1-01", "title does not match expected"
assert jam["file_metadata"]["duration"] == 644, "duration does not match expected"
assert (
jam["sandbox"]["humdrum_annotated_path"]
== "tests/resources/mir_datasets/haydn_op20/op20n1-01.hrm"
), "duration does not match expected"
assert (
jam["sandbox"]["midi_path"]
== "tests/resources/mir_datasets/haydn_op20/op20n1-01.midi"
), "duration does not match expected"
assert isinstance(jam["sandbox"]["chords_music21"], list)
assert jam["sandbox"]["chords_music21"][0]["time"] == 0
assert jam["sandbox"]["chords_music21"][0]["chord"] == "Eb-major triad"
assert isinstance(jam["sandbox"]["keys_music21"], list)
assert jam["sandbox"]["keys_music21"][0]["time"] == 0
assert isinstance(jam["sandbox"]["keys_music21"][0]["key"], music21.key.Key)
assert isinstance(jam["sandbox"]["roman_numerals"], list)
assert jam["sandbox"]["roman_numerals"][0]["time"] == 0
assert jam["sandbox"]["roman_numerals"][0]["roman_numeral"] == "I"
chord_data = jam["sandbox"]["chord"]
assert type(chord_data) == ChordData
assert type(chord_data.intervals) == np.ndarray
assert type(chord_data.labels) == list
assert np.array_equal(
chord_data.intervals[:, 0], np.array([0.0, 364.0, 392.0, 644.0])
)
assert np.array_equal(
chord_data.intervals[:, 1], np.array([363.0, 391.0, 643.0, 644.0])
)
assert np.array_equal(
chord_data.labels,
np.array(
[
"Eb-major triad",
"Bb-dominant seventh chord",
"Eb-major triad",
"F-dominant seventh chord",
]
),
)
assert haydn_op20.load_chords(None) is None
key_data = jam["sandbox"]["key"]
assert type(key_data) == KeyData
assert type(key_data.intervals) == np.ndarray
assert np.array_equal(key_data.intervals[:, 0], np.array([0.0, 644.0]))
assert np.array_equal(key_data.intervals[:, 1], np.array([643.0, 644.0]))
assert np.array_equal(key_data.keys, ["Eb:major", "Bb:major"])
assert haydn_op20.load_key(None) is None
def test_load_score():
path = "tests/resources/mir_datasets/haydn_op20/op20n1-01.hrm"
score = haydn_op20.load_score(path)
assert isinstance(score, music21.stream.Score)
assert len(score.parts) == 4
def test_load_key():
path = "tests/resources/mir_datasets/haydn_op20/op20n1-01.hrm"
key_data = haydn_op20.load_key(path)
assert type(key_data) == KeyData
assert type(key_data.intervals) == np.ndarray
assert np.array_equal(key_data.intervals[:, 0], np.array([0.0, 644.0]))
assert np.array_equal(key_data.intervals[:, 1], np.array([643.0, 644.0]))
assert np.array_equal(key_data.keys, ["Eb:major", "Bb:major"])
assert haydn_op20.load_key(None) is None
key_music21 = haydn_op20.load_key_music21(path)
assert isinstance(key_music21, list)
assert len(key_music21) == 4
assert key_music21[0]["time"] == 0
assert key_music21[-1]["time"] == 644
assert isinstance(key_music21[0]["key"], music21.key.Key)
def test_load_chords():
path = "tests/resources/mir_datasets/haydn_op20/op20n1-01.hrm"
chord_data = haydn_op20.load_chords(path)
assert type(chord_data) == ChordData
assert type(chord_data.intervals) == np.ndarray
assert type(chord_data.labels) == list
assert np.array_equal(
chord_data.intervals[:, 0], np.array([0.0, 364.0, 392.0, 644.0])
)
assert np.array_equal(
chord_data.intervals[:, 1], np.array([363.0, 391.0, 643.0, 644.0])
)
assert np.array_equal(
chord_data.labels,
np.array(
[
"Eb-major triad",
"Bb-dominant seventh chord",
"Eb-major triad",
"F-dominant seventh chord",
]
),
)
assert haydn_op20.load_chords(None) is None
chords = haydn_op20.load_chords_music21(path)
assert isinstance(chords, list)
assert len(chords) == 4
assert chords[0]["time"] == 0
assert chords[-1]["time"] == 644
assert chords[0]["chord"] == "Eb-major triad"
assert chords[-1]["chord"] == "F-dominant seventh chord"
def test_load_roman_numerals():
path = "tests/resources/mir_datasets/haydn_op20/op20n1-01.hrm"
roman_numerals = haydn_op20.load_roman_numerals(path)
assert isinstance(roman_numerals, list)
assert len(roman_numerals) == 4
assert roman_numerals[0]["time"] == 0
assert roman_numerals[-1]["time"] == 644
assert roman_numerals[0]["roman_numeral"] == "I"
assert roman_numerals[-1]["roman_numeral"] == "V43/V"
def test_load_midi_path():
path = "tests/resources/mir_datasets/haydn_op20/op20n1-01.hrm"
midi_path = haydn_op20.convert_and_save_to_midi(path)
assert isinstance(midi_path, str)
assert midi_path == "tests/resources/mir_datasets/haydn_op20/op20n1-01.midi"
|
import json
class TestQuestionnaireResourceEndpoints:
def test_valid_questionnaire_submision(
self, client, logged_in_user_token, setup_plans
):
""" Test for questionnaira submission with valid data """
response = client.post(
"/api/v1/questionnaire/process",
data=json.dumps(
{
"first_name": "Jesse",
"address": "No 8, Heaven Gates Drive, Santorini, Greece",
"occupation": "Employed",
"has_children": False,
"email": "bigmanjesse@gmail.com",
"children_count": 2,
}
),
headers={"authorization": f"Bearer {logged_in_user_token}"},
)
resp = response.get_json()
assert response.status_code == 200
assert resp["message"] == "Here is your recommendation"
assert "highly_recommended" in resp["data"].keys()
assert "least_recommended" in resp["data"].keys()
assert "recommended" in resp["data"].keys()
|
#!/usr/bin/env python
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from ar_track_alvar_msgs.msg import AlvarMarkers
from nav_msgs.msg import Odometry
from ros_numpy import numpify
from tf.transformations import decompose_matrix, euler_from_quaternion
import numpy as np
def odom_callback(msg):
# global cur_pos, cur_heading
pose_msg = msg.pose.pose
pose = numpify(pose_msg)
__, __, angles, translate, __ = decompose_matrix(pose)
cur_pos = translate[0:2]
cur_heading = angles[2]
# print "odom: " + str(cur_heading)
def ar_pose_marker_callback(msg):
if len(msg.markers) == 0:
return
pose_msg = msg.markers[0].pose.pose
pose = numpify(pose_msg)
__, __, angles, translate, __ = decompose_matrix(pose)
cur_pos = translate[0:2]
cur_heading = angles[2]
print "ar: " + str(angles)
# print msg.markers[0].pose
# print "______________________________________"
rospy.init_node('range_ahead')
odom_sub = rospy.Subscriber('odom', Odometry, odom_callback)
ar_sub = rospy.Subscriber('ar_pose_marker', AlvarMarkers, ar_pose_marker_callback)
rospy.spin()
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Set random seed for reproducibility
np.random.seed(1000)
Nsamples = 10000
def X1_sample():
return np.random.normal(0.1, 2.0)
def X2_sample(x1):
return np.random.normal(x1, 0.5 + np.sqrt(np.abs(x1)))
if __name__ == '__main__':
X = np.zeros((Nsamples,))
Y = np.zeros((Nsamples,))
for i, t in enumerate(range(Nsamples)):
x1 = X1_sample()
x2 = X2_sample(x1)
X[i] = x1
Y[i] = x2
# Show the density estimation
sns.set()
fig, ax = plt.subplots(figsize=(10, 8))
sns.kdeplot(X, Y, shade=True, shade_lowest=True, kernel="gau", ax=ax)
ax.set_xlabel(r"$x_1$", fontsize=18)
ax.set_ylabel(r"$x_2$", fontsize=18)
ax.set_xlim([-5, 5])
ax.set_ylim([-6, 6])
plt.show()
|
"""
## Twitter Celebrity Matcher
This is a standalone script that scrapes tweets based on a given Twitter handle
Author: [Ahmed Shahriar Sakib](https://www.linkedin.com/in/ahmedshahriar)
Source: [Github](https://github.com/ahmedshahriar/TwitterCelebrityMatcher)
"""
import tweepy
import pandas as pd
import schedule
import time
import os
import glob
from dotenv import load_dotenv
load_dotenv()
access_key = os.environ.get("ACCESS_KEY")
access_secret = os.environ.get("ACCESS_SECRET")
consumer_key = os.environ.get("CONSUMER_KEY")
consumer_secret = os.environ.get("CONSUMER_SECRET")
# screen_name="apotofvestiges"
# https://gist.github.com/mbejda/9c3353780270e7298763
handler_df = pd.read_csv("../celebrity-listing/Top-1000-Celebrity-Twitter-Accounts.csv", header=0)
# list of accounts
# screen_names = handler_df.twitter.unique().tolist()
screen_names_all = handler_df.twitter.unique().tolist()
screen_names_parsed = [x.split('\\')[1].replace('.csv', '').strip() for x in glob.glob(
"../twitter-celebrity-tweets-data/*.csv")]
# missing Twitter accounts to be parsed
screen_names = list(set(screen_names_all) - set(screen_names_parsed))
i = 0
def parser():
try:
print("\n\nstarting...")
global screen_names
global i
print(i)
name = screen_names[i]
print(name)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
alltweets = []
new_tweets = api.user_timeline(screen_name=name, count=200, tweet_mode="extended")
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
print("getting tweets before %s" % (oldest))
new_tweets = api.user_timeline(screen_name=name, count=200, max_id=oldest, tweet_mode="extended")
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
print("...%s tweets downloaded so far" % (len(alltweets)))
out_tweets = [[tweet.id_str, tweet.created_at, tweet.full_text.encode("utf-8")] for tweet in alltweets]
df = pd.DataFrame(out_tweets, columns=['twitter_id', 'date', 'tweet'])
# export tweets to CSV under dataset directory
df.to_csv("../twitter-celebrity-tweets-data/%s.csv" % name, index=False)
i = i + 1
print(i)
except Exception as e:
print(e)
i = i + 1
pass
# Cron Job
parser()
# configure schedule job
schedule.every(5).seconds.do(parser)
# schedule.every(1).minutes.do(parser)
# schedule.every().hour.do(parser)
# schedule.every().day.at("23:19").do(parser)
# schedule.every().day.at("23:24").do(parser)
# schedule.every().day.at("23:30").do(parser)
# schedule.every().day.at("23:40").do(parser)
# schedule.every(5).to(10).minutes.do(parser)
# schedule.every().monday.do(parser)
# schedule.every().wednesday.at("13:15").do(parser)
# schedule.every().minute.at(":17").do(parser)
while len(screen_names) > i:
schedule.run_pending()
time.sleep(1)
|
text = "Lorem ipsum dolor sit amet, calis de tabarnak d'osti de criss de sacréfils de siboire de calaille d'incompétent de calis."
saveFile = open("createdFiles/Lorem ipsum 2.0.txt", "w")
saveFile.write(text)
saveFile.close()
|
#!/usr/bin/env python
import os
import sys
from optparse import OptionParser
INDENTCOUNT = 4
INDENTCHAR = ' '
parser = OptionParser()
parser.add_option('-a', '--app', dest='app', help='The app which contains the model.')
parser.add_option('-m', '--model', dest='model', help='The model to produce the Form for.')
parser.add_option('-p', '--path', dest='path', help='The path to look for the files, directories separated by space.')
parser.add_option('-w', '--write', dest='file', help='The output file to append the form to, without this argument the output is printed.')
options, args = parser.parse_args()
if not(options.model and options.app):
parser.print_help()
sys.exit()
if options.path:
sys.path += options.path.split()
if options.file:
sys.stdout = file(options.file, 'a')
try:
if 'DJANGO_SETTINGS_MODULE' in os.environ:
settings = __import__(os.environ['DJANGO_SETTINGS_MODULE'])
else:
import settings
except ImportError:
print 'Settings file not found. Place this file in the same dir as manage.py or use the path argument.'
sys.exit()
project_directory = os.path.dirname(settings.__file__)
project_name = os.path.basename(project_directory)
sys.path.append(os.path.join(project_directory, '..'))
project_module = __import__(project_name)
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name
from django.newforms import form_for_model
models = __import__('%s.%s.models' % (project_name, options.app,), '', '', [options.model])
model = getattr(models, options.model)
fields = model._meta.fields + model._meta.many_to_many
print 'class %sForm(forms.Form):' % (options.model)
for field in fields:
formfield = field.formfield()
if formfield:
fieldtype = str(formfield).split()[0].split('.')[-1]
arguments = {}
arguments['verbose_name'] = '\'%s\'' % field.verbose_name
arguments['help_text'] = '\'%s\'' % field.help_text
arguments['required'] = not field.blank
print '%s%s = forms.%s(%s)' % (INDENTCOUNT * INDENTCHAR, field.name, fieldtype, ', '.join(['%s=%s' % (k, v) for k, v in arguments.iteritems()]))
|
# -*- coding: utf-8 -*-
from .fields import ImportPathField
__all__ = ['ImportPathField']
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2018, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
# PySNMP MIB module PYSNMP-MIB (http://snmplabs.com/pysnmp)
# ASN.1 source http://mibs.snmplabs.com:80/asn1/PYSNMP-MIB
# Produced by pysmi-0.1.3 at Mon Apr 17 11:46:02 2017
# On host grommit.local platform Darwin version 16.4.0 by user ilya
# Using Python version 3.4.2 (v3.4.2:ab2c023a9432, Oct 5 2014, 20:42:22)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, NotificationType, IpAddress, MibIdentifier, Unsigned32, Counter32, ObjectIdentity, Counter64, Bits, Integer32, enterprises, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "NotificationType", "IpAddress", "MibIdentifier", "Unsigned32", "Counter32", "ObjectIdentity", "Counter64", "Bits", "Integer32", "enterprises", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
pysnmp = ModuleIdentity((1, 3, 6, 1, 4, 1, 20408))
if mibBuilder.loadTexts: pysnmp.setRevisions(('2017-04-14 00:00', '2005-05-14 00:00',))
if mibBuilder.loadTexts: pysnmp.setLastUpdated('201704140000Z')
if mibBuilder.loadTexts: pysnmp.setOrganization('The PySNMP Project')
if mibBuilder.loadTexts: pysnmp.setContactInfo('E-mail: Ilya Etingof <etingof@gmail.com> GitHub: https://github.com/etingof/pysnmp')
if mibBuilder.loadTexts: pysnmp.setDescription('PySNMP top-level MIB tree infrastructure')
pysnmpObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 1))
pysnmpExamples = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 2))
pysnmpEnumerations = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 3))
pysnmpModuleIDs = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 3, 1))
pysnmpAgentOIDs = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 3, 2))
pysnmpDomains = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 3, 3))
pysnmpExperimental = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 9999))
pysnmpNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 4))
pysnmpNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 4, 0))
pysnmpNotificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 4, 1))
pysnmpConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 5))
pysnmpCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 5, 1))
pysnmpGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 20408, 5, 2))
mibBuilder.exportSymbols("PYSNMP-MIB", pysnmpCompliances=pysnmpCompliances, pysnmpObjects=pysnmpObjects, pysnmpNotificationPrefix=pysnmpNotificationPrefix, pysnmpModuleIDs=pysnmpModuleIDs, pysnmpGroups=pysnmpGroups, pysnmpNotificationObjects=pysnmpNotificationObjects, pysnmp=pysnmp, pysnmpExperimental=pysnmpExperimental, pysnmpNotifications=pysnmpNotifications, PYSNMP_MODULE_ID=pysnmp, pysnmpEnumerations=pysnmpEnumerations, pysnmpDomains=pysnmpDomains, pysnmpAgentOIDs=pysnmpAgentOIDs, pysnmpConformance=pysnmpConformance, pysnmpExamples=pysnmpExamples)
|
from typing import Optional
from aioros_action import ActionClient
from aioros_action import create_client
from aioros import NodeHandle
from aioros_tf2.abc import BufferInterface
from aioros_tf2.exceptions import ConnectivityException
from aioros_tf2.exceptions import ExtrapolationException
from aioros_tf2.exceptions import InvalidArgumentException
from aioros_tf2.exceptions import LookupException
from aioros_tf2.exceptions import TimeoutException
from aioros_tf2.exceptions import TransformException
from genpy import Duration
from genpy import Time
from geometry_msgs.msg import TransformStamped
from tf2_msgs.msg import LookupTransformAction
from tf2_msgs.msg import LookupTransformGoal
from tf2_msgs.msg import TF2Error
class BufferActionClient(BufferInterface):
def __init__(self, ns: str) -> None:
self._ns = ns
self._action_client: Optional[ActionClient] = None
async def init(
self,
node_handle: NodeHandle
) -> None:
self._action_client = await create_client(
node_handle,
self._ns,
LookupTransformAction)
async def close(self) -> None:
if self._action_client:
await self._action_client.close()
self._action_client = None
async def wait_for_server(self) -> None:
await self._action_client.wait_for_server()
async def lookup_transform(
self,
target_frame: str,
source_frame: str,
time: Time,
timeout: Optional[Duration] = None
) -> TransformStamped:
return await self._call_action(
LookupTransformGoal(
target_frame=target_frame,
source_frame=source_frame,
source_time=time,
timeout=timeout or Duration(),
advanced=False))
async def lookup_transform_full(
self,
target_frame: str,
target_time: Time,
source_frame: str,
source_time: Time,
fixed_frame: str,
timeout: Optional[Duration] = None
) -> TransformStamped:
return await self._call_action(
LookupTransformGoal(
target_frame=target_frame,
source_frame=source_frame,
source_time=source_time,
timeout=timeout or Duration(),
target_time=target_time,
fixed_frame=fixed_frame,
advanced=True))
async def can_transform(
self,
target_frame: str,
source_frame: str,
time: Time,
timeout: Optional[Duration] = None
) -> bool:
try:
self.lookup_transform(
target_frame,
source_frame,
time,
timeout)
return True
except TransformException:
return False
async def can_transform_full(
self,
target_frame: str,
target_time: Time,
source_frame: str,
source_time: Time,
fixed_frame: str,
timeout: Optional[Duration] = None
) -> bool:
try:
self.lookup_transform_full(
target_frame,
target_time,
source_frame,
source_time,
fixed_frame,
timeout)
return True
except TransformException:
return False
async def _call_action(
self,
goal: LookupTransformGoal
) -> TransformStamped:
goal_handle = self._action_client.send_goal(goal)
result = await goal_handle.wait_for_result()
if result.error.error != TF2Error.NO_ERROR:
if result.error.error == TF2Error.LOOKUP_ERROR:
raise LookupException(result.error.error_string)
elif result.error.error == TF2Error.CONNECTIVITY_ERROR:
raise ConnectivityException(result.error.error_string)
elif result.error.error == TF2Error.EXTRAPOLATION_ERROR:
raise ExtrapolationException(result.error.error_string)
elif result.error.error == TF2Error.INVALID_ARGUMENT_ERROR:
raise InvalidArgumentException(result.error.error_string)
elif result.error.error == TF2Error.TIMEOUT_ERROR:
raise TimeoutException(result.error.error_string)
else:
raise TransformException(result.error.error_string)
return result.transform
async def create_buffer_action_client(
node_handle: NodeHandle,
ns: str,
) -> BufferActionClient:
buffer_action_client = BufferActionClient(ns)
await buffer_action_client.init(node_handle)
return buffer_action_client
|
import logging
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
logger = logging.getLogger()
from activeClassifier.visualisation.base import Visualiser, visualisation_level
from activeClassifier.tools.utility import softmax
# annoying UserWarning from plt.imshow in _glimpse_patches_until_t()
import warnings
warnings.filterwarnings(
action='ignore',
category=UserWarning,
module=r'.*matplotlib'
)
class Visualization_predRSSM(Visualiser):
def __init__(self, model, FLAGS):
super().__init__(model, FLAGS)
self.num_policies = model.n_policies
self.size_z = FLAGS.size_z
self.planner = FLAGS.planner
self.use_pixel_obs_FE = FLAGS.use_pixel_obs_FE
self.rnd_first_glimpse = FLAGS.rnd_first_glimpse
self.rnn_cell = FLAGS.rnn_cell
@visualisation_level(1)
def visualise(self, d, suffix='', nr_obs_overview=8, nr_obs_reconstr=5):
nr_obs_overview = min(nr_obs_overview, self.batch_size_eff) # batch_size_eff is set in _eval_feed() -> has to come before
nr_obs_reconstr = min(nr_obs_reconstr, self.batch_size_eff)
nr_obs_FE = min(3, self.batch_size_eff)
self.plot_overview(d, nr_obs_overview, suffix)
self.plot_reconstr(d, nr_obs_reconstr, suffix)
self.plot_reconstr_patches(d, nr_obs_reconstr, suffix)
# moved to batch-wise:
# self.plot_stateBelieves(d, suffix)
# self.plot_fb(d, prefix)
if (self.planner == 'ActInf') & (d['epoch'] >= self.pre_train_epochs):
# self.plot_planning(d, nr_examples=nr_obs_reconstr)
self.plot_planning_patches(d, nr_examples=nr_obs_reconstr)
self.plot_FE(d, nr_obs_FE, suffix)
@visualisation_level(1)
def intermed_plots(self, d, nr_examples, suffix='', folder_name='rnd_loc_eval'):
self.plot_planning_patches(d, nr_examples, suffix, folder_name)
@visualisation_level(1)
def plot_reconstr(self, d, nr_examples, suffix='', folder_name='reconstr'):
def get_title_color(post_believes, hyp):
if post_believes[hyp] == post_believes.max():
color = 'magenta'
elif post_believes[hyp] > 0.1:
color = 'blue'
else:
color = 'black'
return color
nax = 2 + self.num_classes_kn
gl = self._glimpse_reshp(d['glimpse']) # [T, B, scale[0], scales*scale[0]]
gl_post = self._glimpse_reshp(d['reconstr_posterior']) # [T, B, scale[0], scales*scale[0]]
gl_preds = self._glimpse_reshp(d['reconstr_prior']) # [T, B, hyp, scale[0], scales*scale[0]]
idx_examples = self._get_idx_examples(d['y'], nr_examples, replace=False)
for i in idx_examples:
f, axes = plt.subplots(self.num_glimpses + 1, nax, figsize=(4 * self.num_scales * nax, 4 * (self.num_glimpses + 1)))
axes = axes.reshape([self.num_glimpses + 1, nax])
self._plot_img_plus_locs(axes[0, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
for t in range(self.num_glimpses):
# true glimpse
axes[t+1, 0].imshow(gl[t, i], **self.im_show_kwargs)
title = 'Label: {}, clf: {}'.format(self.lbl_map[d['y'][i]], self.lbl_map[d['clf'][i]])
if self.uk_label is not None:
title += ', p(uk) post: {:.2f}'.format(d['uk_belief'][t + 1, i])
axes[t+1, 0].set_title(title)
# posterior
axes[t+1, 1].imshow(gl_post[t, i], **self.im_show_kwargs)
axes[t+1, 1].set_title('Posterior, nll: {:.2f}'.format(d['nll_posterior'][t, i]))
# prior for all classes
ranked_losses = np.argsort(d['KLdivs'][t, i, :])
ps = softmax(-d['KLdivs'][t, i, :])
for j, hyp in enumerate(ranked_losses):
axes[t+1, j+2].imshow(gl_preds[t, i, hyp], **self.im_show_kwargs)
if d['decisions'][t, i] != -1:
axes[t+1, j + 2].set_title('Decision: {}'.format(d['decisions'][t, i]))
else:
c = get_title_color(d['state_believes'][t+1, i, :], hyp)
axes[t+1, j + 2].set_title('{}, p: {:.2f}, KL: {:.2f}, post-c: {:.2f}'.format(self.lbl_map[hyp], ps[hyp], d['KLdivs'][t, i, hyp], d['state_believes'][t+1, i, hyp]), color=c)
[ax.set_axis_off() for ax in axes.ravel()]
self._save_fig(f, folder_name, '{}{}_n{}{isuk}.png'.format(self.prefix, suffix, i,
isuk='_uk' if (d['y'][i] == self.uk_label) else ''))
@visualisation_level(1)
def plot_reconstr_patches(self, d, nr_examples, suffix='', folder_name='reconstr_patches'):
def get_title_color(post_believes, hyp):
if post_believes[hyp] == post_believes.max():
color = 'magenta'
elif post_believes[hyp] > 0.1:
color = 'blue'
else:
color = 'black'
return color
nax = 2 + self.num_classes_kn
gl = self._glimpse_reshp(d['glimpse']) # [T, B, scale[0], scales*scale[0]]
gl_post = self._glimpse_reshp(d['reconstr_posterior']) # [T, B, scale[0], scales*scale[0]]
gl_preds = self._glimpse_reshp(d['reconstr_prior']) # [T, B, hyp, scale[0], scales*scale[0]]
idx_examples = self._get_idx_examples(d['y'], nr_examples, replace=False)
for i in idx_examples:
f, axes = plt.subplots(self.num_glimpses, nax, figsize=(4 * self.num_scales * nax, 4 * (self.num_glimpses + 1)))
axes = axes.reshape([self.num_glimpses, nax])
self._plot_img_plus_locs(axes[0, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
# rank hypotheses by final believes
T = np.argmax(d['decisions'][:, i]) # all non-decisions are -1
ranked_hyp = np.argsort(-d['state_believes'][T, i, :])
for t in range(self.num_glimpses - 1):
# true glimpses up until and including t
self._plot_seen(d['x'][i], d['locs'][:, i], until_t=min(t + 1, self.num_glimpses), ax=axes[t + 1, 0])
title = 'Label: {}, clf: {}'.format(self.lbl_map[d['y'][i]], self.lbl_map[d['clf'][i]])
if self.uk_label is not None:
title += ', p(uk) post: {:.2f}'.format(d['uk_belief'][t + 1, i])
axes[t + 1, 0].set_title(title)
# posterior
self._glimpse_patches_until_t(t+1, gl[:, i], gl_post[:, i], d['locs'][:, i], axes[t + 1, 1])
axes[t + 1, 1].set_title('Posterior, nll: {:.2f}'.format(d['nll_posterior'][t, i]))
# prior for all classes
ranks_overall = np.argsort(-d['state_believes'][t, i, :]).tolist()
ranks_kl = np.argsort(d['KLdivs'][t, i, :]).tolist()
ps_kl = softmax(-d['KLdivs'][t, i, :])
for j, hyp in enumerate(ranked_hyp):
self._glimpse_patches_until_t(min(t + 1, self.num_glimpses), gl[:, i], gl_preds[:, i, hyp], d['locs'][:, i], axes[t + 1, j + 2])
if d['decisions'][t, i] != -1:
axes[t + 1, j + 2].set_title('Decision: {}'.format(d['decisions'][t, i]))
else:
c = get_title_color(d['state_believes'][min(t + 1, self.num_glimpses), i, :], hyp)
axes[t + 1, j + 2].set_title('{}: tot. rank pre: {}, kl rank: {}\nsftmx(KL): {:.2f}, KL: {:.2f}, post-c: {:.2f}'.format(
self.lbl_map[hyp], ranks_overall.index(hyp), ranks_kl.index(hyp),
ps_kl[hyp], d['KLdivs'][t, i, hyp], d['state_believes'][t + 1, i, hyp]),
color=c)
[(ax.set_xticks([]), ax.set_yticks([]), ax.set_ylim([self.img_shape[0] - 1, 0]), ax.set_xlim([0, self.img_shape[1] - 1])) for ax in axes.ravel()]
[ax.set_axis_off() for ax in axes[0].ravel()]
self._save_fig(f, folder_name, '{}{}_n{}{isuk}.png'.format(self.prefix, suffix, i,
isuk='_uk' if (d['y'][i] == self.uk_label) else ''))
def _stick_glimpse_onto_canvas(self, glimpse, loc):
img_y, img_x = self.img_shape[:2]
loc_y, loc_x = loc
half_width = self.scale_sizes[0] / 2
assert len(self.scale_sizes) == 1, 'Not adjusted for multiple scales yet'
# Adjust glimpse if going over the edge
y_overlap_left = -int(min(round(loc_y - half_width), 0))
y_overlap_right = int(img_y - round(loc_y + half_width)) if ((round(loc_y + half_width) - img_y) > 0) else None
x_overlap_left = -int(min(round(loc_x - half_width), 0))
x_overlap_right = int(img_x - round(loc_x + half_width)) if ((round(loc_x + half_width) - img_x) > 0) else None
glimpse = glimpse[y_overlap_left : y_overlap_right,
x_overlap_left : x_overlap_right]
# Boundaries of the glimpse
x_boundry_left = int(max(round(loc_x - half_width), 0))
x_boundry_right = int(min(round(loc_x + half_width), img_x))
y_boundry_left = int(max(round(loc_y - half_width), 0))
y_boundry_right = int(min(round(loc_y + half_width), img_y))
# Pad up to canvas size
if self.img_shape[2] == 1:
glimpse_padded = np.pad(glimpse, [(y_boundry_left, img_y - y_boundry_right),
(x_boundry_left, img_x - x_boundry_right)],
mode='constant')
else:
glimpse_padded = np.pad(glimpse, [(y_boundry_left, img_y - y_boundry_right),
(x_boundry_left, img_x - x_boundry_right),
(0, 0)],
mode='constant')
assert glimpse_padded.shape == tuple(self.img_shape_squeezed)
return glimpse_padded
def _glimpse_patches_until_t(self, until_t, true_glimpses, glimpses, locs, ax):
"""Plot the true_glimpses[:until_t - 2] & glimpses[until_t - 1] onto a canvas of shape img_shape, with the latest glimpses overlapping older ones (important for predictions)"""
ix, iy = np.meshgrid(np.arange(self.img_shape[0]), np.arange(self.img_shape[1]))
half_width = self.scale_sizes[0] / 2
seen = np.zeros(self.img_shape[:2], np.bool)
glimpse_padded = np.zeros(self.img_shape_squeezed)
for t in range(until_t):
loc = locs[t, :]
y_boundry = [loc[0] - half_width, loc[0] + half_width]
x_boundry = [loc[1] - half_width, loc[1] + half_width]
new = (ix >= round(x_boundry[0])) & (ix < round(x_boundry[1])) & (iy >= round(y_boundry[0])) & (iy < round(y_boundry[1]))
seen[new] = True
input = glimpses if (t == until_t - 1) else true_glimpses
new_glimpse_padded = self._stick_glimpse_onto_canvas(input[t], locs[t])
glimpse_padded = np.where(new, new_glimpse_padded, glimpse_padded)
glimpse_padded_seen = self._mask_unseen(glimpse_padded, seen)
ax.imshow(glimpse_padded_seen, **self.im_show_kwargs)
half_pixel = 0.5 if (self.scale_sizes[0] % 2 == 0) else 0 # glimpses are rounded to pixel values do the same for the rectangle to make it fit nicely
ax.add_patch(Rectangle(np.round(locs[until_t - 1, ::-1] - half_width) - half_pixel, width=self.scale_sizes[0], height=self.scale_sizes[0], edgecolor='green', facecolor='none'))
# @visualisation_level(2)
# def plot_planning(self, d, nr_examples, suffix='', folder_name='planning'):
# # T x [True glimpse, exp_exp_obs, exp_obs...]
# nax_x = self.num_policies
# nax_y = 1 + self.num_glimpses
#
# # exp_exp_obs = self._scale_reshp(d['exp_exp_obs']) # [T, B, n_policies, scale[0], scales*scale[0]]
# # exp_obs = self._scale_reshp(d['exp_obs']) # [T, B, n_policies, num_classes, scale[0], scales*scale[0]]
#
# for i in range(nr_examples):
# f, axes = plt.subplots(nax_y, nax_x, figsize=(4 * self.num_scales * nax_x, 4 * nax_y))
# axes = axes.reshape([nax_y, nax_x])
# self._plot_img_plus_locs(axes[0, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
#
# # Note: first action is random, meaning d['potential_actions'][0] will be zero
# for t in range(self.num_glimpses):
# for k in range(self.num_policies):
# # potential location under evaluation
# locs = d['potential_actions'][t, i, k]
# color = 'green' if (locs == d['locs'][t, i, :]).all() else 'cyan'
#
# axes[t, k].imshow(d['x'][i].reshape(self.img_shape_squeezed), **self.im_show_kwargs)
# axes[t, k].scatter(locs[1], locs[0], marker='x', facecolors=color, linewidth=2.5, s=0.25 * (5 * 8 * 24))
# axes[t, k].add_patch(Rectangle(locs[::-1] - self.scale_sizes[0] / 2, width=self.scale_sizes[0], height=self.scale_sizes[0], edgecolor=color, facecolor='none', linewidth=2.5))
# axes[t, k].set_title('G: {:.2f}, H_: {:.2f}, exp_H: {:.2f}, G_dec: {:.2f}'.format(d['G'][t, i, k], d['H_exp_exp_obs'][t, i, k], d['exp_H'][t, i, k], d['G'][t, i, -1]))
#
# # ranked_hyp = np.argsort(d['state_believes'][t, i, :])
# # for j, hyp in enumerate(ranked_hyp[::-1]):
# # # axes[t, j + 2].imshow(exp_obs[t, i, k, hyp], **self.im_show_kwargs)
# # axes[t, j + 2].set_title('Hyp: {}, prob: {:.2f}'.format(hyp, d['state_believes'][t, i, hyp]))
#
# [ax.set_axis_off() for ax in axes.ravel()]
# self._save_fig(f, folder_name, '{}{}_n{}.png'.format(self.prefix, suffix, i))
@visualisation_level(2)
def plot_planning(self, d, nr_examples, suffix='', folder_name='planning'):
# T x [True glimpse, exp_exp_obs, exp_obs...]
nax_x = nr_examples
nax_y = self.num_glimpses
f, axes = plt.subplots(nax_y, nax_x, figsize=(8 * self.num_scales * nax_x, 4 * nax_y), squeeze=False)
for i in range(nr_examples):
# Note: first action is random, meaning d['potential_actions'][0] will be zero
for t in range(self.num_glimpses):
if t == 0: # random action
self._plot_img_plus_locs(axes[0, i], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
axes[t, i].set_title('t: {}, random policy, lbl: {}, clf: {}'.format(t, d['y'][i], d['clf'][i]))
else:
if np.sum(d['H_exp_exp_obs'][t, i, :]) == 0.:
axes[t, i].set_title('t: {}, decision - no glimpse'.format(t))
break
axes[t, i].imshow(d['x'][i].reshape(self.img_shape_squeezed), **self.im_show_kwargs)
axes[t, i].set_title('t: {}, selected policy: {}'.format(t, np.argmax(d['G'][t, i, :])))
for k in range(self.num_policies):
# potential location under evaluation
locs = d['potential_actions'][t, i, k]
color = 'C{}'.format(k)
correct = np.all((locs == d['locs'][t, i, :]))
lbl = '{}: G: {:.2f}, H_: {:.2f}, exp_H: {:.2f}, G_dec: {:.2f}'.format(k, d['G'][t, i, k], d['H_exp_exp_obs'][t, i, k], d['exp_H'][t, i, k], d['G'][t, i, -1])
axes[t, i].add_patch(Rectangle(locs[::-1] - self.scale_sizes[0] / 2,
width=self.scale_sizes[0], height=self.scale_sizes[0],
edgecolor=color, facecolor='none', linewidth=1.5, label=lbl))
if correct:
axes[t, i].scatter(locs[1], locs[0], marker='x', facecolors=color, linewidth=1.5, s=0.25 * (5 * 8 * 24))
# add current believes to legend
ranked_believes = np.argsort(- d['state_believes'][t, i, :])
lbl = 'hyp: ' + ', '.join('{} ({:.2f})'.format(j, d['state_believes'][t, i, j]) for j in ranked_believes[:5])
axes[t, i].scatter(0, 0, marker='x', facecolors='k', linewidth=0, s=0, label=lbl)
chartBox = axes[t, i].get_position()
axes[t, i].set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.6, chartBox.height])
axes[t, i].legend(loc='center left', bbox_to_anchor=(1.04, 0.5), borderaxespad=0)
[ax.set_axis_off() for ax in axes.ravel()]
self._save_fig(f, folder_name, '{}{}.png'.format(self.prefix, suffix))
@visualisation_level(2)
def plot_planning_patches(self, d, nr_examples, suffix='', folder_name='planning_patches'):
nax_x = nr_examples
nax_y = self.num_glimpses if self.rnd_first_glimpse else self.num_glimpses + 1
f, axes = plt.subplots(nax_y, nax_x, figsize=(8 * self.num_scales * nax_x, 4 * nax_y), squeeze=False)
frames_cmap = matplotlib.cm.get_cmap('bwr')
frames_color = frames_cmap(np.linspace(1, 0, self.num_policies))
for i in range(nr_examples):
# if first glimpse is random, plot overview in its spot. O/w create an additional plot
self._plot_img_plus_locs(axes[0, i], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
if self.rnd_first_glimpse:
start_t = 1
axes[0, i].set_title('t: {}, random policy, lbl: {}, clf: {}'.format(0, d['y'][i], d['clf'][i]))
else:
start_t = 0
axes[0, i].set_title('Lbl: {}, clf: {}'.format(d['y'][i], d['clf'][i]))
for ax, t in enumerate(range(start_t, self.num_glimpses)):
ax += 1
# plot patches seen until now
self._plot_seen(d['x'][i], d['locs'][:, i], until_t=t, ax=axes[ax, i])
# add current believes to legend
ranked_believes = np.argsort(- d['state_believes'][t, i, :])
lbl = 'hyp: ' + ', '.join('{} ({:.2f})'.format(j, d['state_believes'][t, i, j]) for j in ranked_believes[:5])
axes[ax, i].add_patch(Rectangle((0, 0), width=0.1, height=0.1, linewidth=0, color='white', label=lbl))
decided = (d['decisions'][:t+1, i] != -1).any()
if decided:
axes[ax, i].set_title('t: {}, decision - no new glimpse'.format(t))
else:
selected = [j for j, arr in enumerate(d['potential_actions'][t, i, :]) if (arr == d['locs'][t, i]).all()]
axes[ax, i].set_title('t: {}, selected policy: {}'.format(t, selected[0]))
# plot rectangles for evaluated next locations
ranked_policies = np.argsort(- d['G'][t, i, :-1])
for iii, k in enumerate(ranked_policies):
# potential location under evaluation
locs = d['potential_actions'][t, i, k]
correct = np.all((locs == d['locs'][t, i, :]))
lbl = '{}: G: {:.2f}, H(exp): {:.2f}, E(H): {:.2f}, G_dec: {:.2f}'.format(k, d['G'][t, i, k], d['H_exp_exp_obs'][t, i, k], d['exp_H'][t, i, k], d['G'][t, i, -1])
axes[ax, i].add_patch(Rectangle(locs[::-1] - self.scale_sizes[0] / 2,
width=self.scale_sizes[0], height=self.scale_sizes[0],
edgecolor=frames_color[iii], facecolor='none', linewidth=1.5, label=lbl))
if correct:
axes[ax, i].scatter(locs[1], locs[0], marker='x', facecolors=frames_color[iii], linewidth=1.5, s=0.25 * (5 * 8 * 24))
# place legend next to plot
chartBox = axes[ax, i].get_position()
axes[ax, i].set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.6, chartBox.height])
axes[ax, i].legend(loc='center left', bbox_to_anchor=(1.04, 0.5), borderaxespad=0)
if decided: # set all following axes off and stop
[axes[ttt, i].set_axis_off() for ttt in range(ax+1, nax_y)]
break
[(ax.set_xticks([]), ax.set_yticks([]), ax.set_ylim([self.img_shape[0] - 1, 0]), ax.set_xlim([0, self.img_shape[1] - 1])) for ax in axes.ravel()]
self._save_fig(f, folder_name, '{}{}.png'.format(self.prefix, suffix))
@visualisation_level(2)
def plot_FE(self, d, nr_examples, suffix='', folder_name='FE'):
if self.rnn_cell.startswith('Conv') and not self.use_pixel_obs_FE:
logging.debug('Skip FE plots for convLSTM. Shapes for z not defined')
# TODO: adjust size_z to not come from FLAGS but from VAEEncoder.output_shape_flat
return
# T x [True glimpse, posterior, exp_exp_obs, exp_obs...]
nax_x = 3 + self.num_classes_kn
nax_y = self.num_glimpses
gl = self._glimpse_reshp(d['glimpse']) # [T, B, scale[0], scales*scale[0]]
if self.use_pixel_obs_FE:
posterior = self._glimpse_reshp(d['reconstr_posterior'])
exp_exp_obs = self._glimpse_reshp(d['exp_exp_obs'])
exp_obs_prior = self._glimpse_reshp(d['reconstr_prior'])
else:
if self.size_z == 10:
shp = [5, 2]
elif self.size_z == 32:
shp = [8, 4]
elif self.size_z == 128:
shp = [16, 8]
else:
shp = 2 * [int(np.sqrt(self.size_z))]
if np.prod(shp) != self.size_z:
print('Unspecified shape for this size_z and plot_z. Skipping z plots.')
return
posterior = np.reshape(d['z_post'], [self.num_glimpses, self.batch_size_eff] + shp)
exp_exp_obs = np.reshape(d['exp_exp_obs'], [self.num_glimpses, self.batch_size_eff, self.num_policies] + shp)
exp_obs_prior = np.reshape(d['selected_exp_obs_enc'], [self.num_glimpses, self.batch_size_eff, self.num_classes_kn] + shp)
for i in range(nr_examples):
f, axes = plt.subplots(nax_y, nax_x, figsize=(4 * self.num_scales * nax_x, 4 * nax_y), squeeze=False)
for t in range(self.num_glimpses):
if t == 0:
self._plot_img_plus_locs(axes[t, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
else:
axes[t, 0].imshow(gl[t, i], **self.im_show_kwargs)
axes[t, 0].set_title('t: {}'.format(t))
axes[t, 1].imshow(posterior[t, i], **self.im_show_kwargs)
axes[t, 1].set_title('posterior')
p = d['selected_action_idx'][t, i]
axes[t, 2].imshow(exp_exp_obs[t, i, p], **self.im_show_kwargs)
axes[t, 2].set_title('H(exp) policy0: {:.2f}'.format(d['H_exp_exp_obs'][t, i, p]))
ranked_believes = np.argsort(- d['state_believes'][t, i, :])
for k in ranked_believes:
axes[t, 3 + k].imshow(exp_obs_prior[t, i, k], **self.im_show_kwargs)
axes[t, 3 + k].set_title('k: {}, p: {:.2f}'.format(k, d['state_believes'][t, i, k]))
[ax.set_axis_off() for ax in axes.ravel()]
self._save_fig(f, folder_name, '{}{}_n{}.png'.format(self.prefix, suffix, i))
@visualisation_level(2)
def plot_fb(self, d, suffix=''):
def fb_hist(fb1, fb2, ax, title, add_legend):
"""fb1, fb2: tuple of (values, legend)"""
ax.hist(fb1[0], bins, alpha=0.5, label=fb1[1])
ax.hist(fb2[0], bins, alpha=0.5, label=fb2[1])
ax.set_title(title)
if add_legend:
ax.legend(loc='upper right')
nax = self.num_classes
ntax = self.num_glimpses - 1
bins = 40
f, axes = plt.subplots(ntax, nax, figsize=(4 * nax, 4 * self.num_glimpses))
if self.uk_label is not None:
is_uk = (d['y'] == self.uk_label)
fb_kn_best = d['fb'][:, ~is_uk, :].min(axis=2) # in-shape: [T, B, hyp]
fb_uk_best = d['fb'][:, is_uk, :].min(axis=2)
else:
fb_kn_best, fb_uk_best = None, None
for t in range(ntax):
for hyp in range(self.num_classes_kn):
is_hyp = (d['y'] == hyp)
if t < self.num_glimpses:
pre = 't{}: '.format(t) if (hyp == 0) else ''
fb_corr = d['fb'][t, is_hyp, hyp]
fb_wrong = d['fb'][t, ~is_hyp, hyp]
else: # last row: sum over time
break
# pre = 'All t: ' if (hyp == 0) else ''
# fb_corr = d['fb'][:, is_hyp, hyp].sum(axis=0)
# fb_wrong = d['fb'][:, ~is_hyp, hyp].sum(axis=0)
fb_hist((fb_corr, 'correct hyp'),
(fb_wrong, 'wrong hyp'),
axes[t, hyp], '{}hyp: {}'.format(pre, self.lbl_map[hyp]), add_legend=(t==0))
if self.uk_label is not None:
# right most: best fb across hyp for kn vs uk
if t < self.num_glimpses:
fb_kn = fb_kn_best[t]
fb_uk = fb_uk_best[t]
else:
fb_kn = fb_kn_best.sum(axis=0)
fb_uk = fb_uk_best.sum(axis=0)
fb_hist((fb_kn, 'known'),
(fb_uk, 'uk'),
axes[t, nax - 1], 'best fb', add_legend=(t==0))
self._save_fig(f, 'fb', '{}{}.png'.format(self.prefix, suffix))
@visualisation_level(2)
def plot_stateBelieves(self, d, suffix):
# TODO: INCLUDE uk_belief and plots differentiating by known/uk
ntax = self.num_glimpses
bins = 40
f, axes = plt.subplots(ntax, 1, figsize=(4, 4 * self.num_glimpses), squeeze=False)
top_believes = d['state_believes'].max(axis=2) # [T+1, B, num_classes] -> [T+1, B]
top_believes_class = d['state_believes'].argmax(axis=2) # [T+1, B, num_classes] -> [T+1, B]
is_corr = (top_believes_class == d['y'][np.newaxis, :])
corr = np.ma.masked_array(top_believes, mask=~is_corr)
wrong = np.ma.masked_array(top_believes, mask=is_corr)
for t in range(ntax):
if corr[t+1].mask.any():
axes[t, 0].hist(corr[t+1].compressed(), bins=bins, alpha=0.5, label='corr')
if wrong[t+1].mask.any():
axes[t, 0].hist(wrong[t+1].compressed(), bins=bins, alpha=0.5, label='wrong')
axes[t, 0].legend(loc='upper right')
axes[t, 0].set_title('Top believes after glimpse {}'.format(t+1))
axes[t, 0].set_xlim([0, 1])
self._save_fig(f, 'c', '{}{}.png'.format(self.prefix, suffix))
|
import torch
import torch.nn as nn
from core.taming.utils import Normalize, nonlinearity
class ResnetBlock(nn.Module):
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
dropout, temb_channels=512):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
if temb_channels > 0:
self.temb_proj = torch.nn.Linear(temb_channels,
out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
else:
self.nin_shortcut = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=1, stride=1, padding=0
)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
if temb is not None:
h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x + h
|
import os
import git
import click
import json
import string
import random
import pickle
from pathlib import Path
import shutil
from subprocess import STDOUT, check_call
import sys
def load_config():
"""
Load configuration class. If not configured before
create a new one and save as pickle in homedir/.settler.
"""
home_dir = os.path.expanduser("~")
settler_dir = os.path.join(home_dir, ".settler")
cfg_filepath = os.path.join(settler_dir, "config.pickle")
# TODO: do this with __new__
# https://stackoverflow.com/questions/43965376/initialize-object-from-the-pickle-in-the-init-function
if os.path.isfile(cfg_filepath):
with open(cfg_filepath, 'rb') as file:
return pickle.load(file)
else:
# click.echo("No settler folder found, initializing new one.", color='green')
return SettlerConfig()
class SettlerConfig:
def __init__(self):
self.home_dir = os.path.expanduser("~")
settler_dir = os.path.join(self.home_dir, ".settler")
self.settler_cfg = os.path.join(settler_dir, "config.pickle")
if not os.path.isdir(settler_dir):
os.mkdir(settler_dir)
self.backpacks = {}
self.backpack_dir = None
self.backpack_data = None
self.backpack_name = None
# self.save_pickle()
def drop_backpack(self):
self.backpack_dir = None
self.backpack_data = None
self.backpack_name = None
def load_backpack(self):
for dir_name in self.backpack_data["folders"]:
load_dir(dir_name, self.backpack_dir, self.home_dir)
for filename in self.backpack_data["files"]:
load_file(filename, self.backpack_dir, self.home_dir)
#for package_name in self.backpack_data["apt-get"]:
# install_apt(package_name)
def unload_backpack(self):
for dirname in self.backpack_data["folders"]:
unload_dir(dirname, self.backpack_dir, self.home_dir)
for filename in self.backpack_data["files"]:
unload_file(filename, self.backpack_dir, self.home_dir)
self.drop_backpack()
def add_backpack(self, backpack_dir):
backpack_dir = os.path.abspath(backpack_dir)
backpack_cfg_path = os.path.join(backpack_dir, "settler.json")
backpack_data = read_cfg(backpack_cfg_path)
if self.backpack_dir is None:
self.backpacks[backpack_data["name"]] = backpack_dir
self.backpack_data = backpack_data
self.backpack_dir = backpack_dir
self.backpack_name = backpack_data["name"]
else:
self.backpacks[backpack_data["name"]] = backpack_dir
click.echo("Backpack " + self.backpack_name + " already loaded", color='red')
def status(self):
if self.backpacks:
rm_list = []
click.echo("Backpacks:")
for name, dirpath in sorted(self.backpacks.items()):
if os.path.isdir(dirpath):
if name == self.backpack_name:
click.echo(click.style("[x] " + name + " " + dirpath, fg='blue'))
else:
click.echo("[ ] " + name + " " + dirpath)
else:
click.echo(click.style("Folder not found , removing @ " + dirpath, fg='blue'))
rm_list.append(name)
for key in rm_list:
self.backpacks.pop(key, None)
def save_pickle(self):
with open(self.settler_cfg, "wb") as raw_file:
pickle.dump(self, raw_file)
def remove_backpack(self, name):
if name in self.backpacks:
self.backpacks.pop(name)
click.echo(click.style("Removed backpack: " + name, fg='green'))
else:
click.echo(click.style("Backpack: " + name + " not registered.", fg='blue'))
def random_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def initialize_folder(foldername):
try:
os.mkdir("./" + foldername)
cfg_filepath = os.path.join(foldername, "settler.json")
with open(cfg_filepath, 'w') as cfg_file:
default_cfg = """
{
"name": \""""+foldername+"""\",
"apt-get" : [
"gimp"
],
"folders": [".fakefolder"],
"files": [".fakerc", ".fakerc2"]
}"""
cfg_file.write(default_cfg)
print(foldername + " created.")
print("Edit settler.json to add files and folders to settler")
except OSError:
print("Folder " + foldername + " already exists.")
def refresh_folder(folderpath):
cfg_path = os.path.join(folderpath, "settler.json")
if os.path.isfile(cfg_path):
print("Check if files need copying")
cfg_data = read_cfg(folderpath)
else:
print("Folder is not a backpack.")
def unload_file(filename, backpack_dir, home_dir):
src = os.path.join(home_dir, filename)
dst = os.path.join(backpack_dir, filename)
if os.path.isfile(dst):
os.unlink(src)
shutil.copy(dst, src)
click.echo(click.style("File: " + dst + " -> " + src, fg='green'))
else:
click.echo(click.style("Skipping: File not found @ " + dst, fg='blue'))
def unload_dir(dirname, backpack_dir, home_dir):
"""
"""
src = os.path.join(home_dir, dirname)
dst = os.path.join(backpack_dir, dirname)
if not os.path.isdir(dst):
click.echo(click.style("Skipping: Folder not found @ " + dst, fg='blue'))
return
if os.path.islink(src):
os.unlink(src)
if os.path.isdir(src):
click.echo(click.style("Skipping: Folder already present @ " + src, fg='blue'))
return
shutil.copytree(dst, src)
click.echo(click.style("Dir: " + dst + " -> " + src, fg='green'))
def load_dir(dirname, backpack_dir, home_dir):
"""
If the dirname has not been loaded it will be:
1) Moved to the current active backpack
2) A link pointing to backpack will be placed
"""
src = os.path.join(home_dir, dirname)
dst = os.path.join(backpack_dir, dirname)
if not os.path.isdir(src):
click.echo(click.style("Skipping: Source directory doesnt exist: @ " + src, fg='blue'))
return
if os.path.isdir(dst):
click.echo(click.style("Skipping: Destination directory already exists @ " + dst, fg='blue'))
return
click.echo(click.style("Dir: " + src + " -> " + dst, fg='green'))
shutil.copytree(src, dst)
shutil.rmtree(src)
os.symlink(dst, src)
def load_file(filename, backpack_dir, home_dir):
"""
If the filename has not been loaded it will be:
1) Moved to the current active backpack
2) A link pointing to backpack will be placed
"""
src = os.path.join(home_dir, filename)
dst = os.path.join(backpack_dir, filename)
if os.path.isfile(src):
# shutil.copy(src, dst)
# os.remove(src)
shutil.move(src, dst)
os.symlink(dst, src)
click.echo(click.style("File:" + src + " -> " + dst, fg='green'))
else:
click.echo(click.style("Skipping: File not found @ " + src, fg='blue'))
def install_apt(package_name):
install_text = click.style(' + %s' % package_name, fg='blue')
click.echo(install_text)
check_call(['sudo', 'apt-get', 'install', '-y', package_name],
stdout=open(os.devnull, 'wb'), stderr=STDOUT)
def uninstall_apt(package_name):
pass
def read_cfg(filepath):
with open(filepath, 'r') as raw_file:
cfg_data = json.load(raw_file)
return cfg_data
#
# # Install apt-get packages
# for package_name in cfg_data['apt-get']:
# install_apt(package_name)
#
# for dirname in cfg_data['folders']:
# load_dir(dirname)
#
# for filename in cfg_data['files']:
# load_file(filename)
def read_config(path):
click.echo("Loading config")
with open(path + '/settler.json', 'r') as f_json:
data = json.load(f_json)
click.echo("Installing apt packages:")
# Install apt-get packages
for package_name in data['apt-get']:
install_apt(package_name)
# Copy directories
copy_directories(path, data)
copy_files(path, data)
return data
def copy_files(path, config):
home_path = str(Path.home())
files = {}
for root, dirs, filesx in os.walk(path):
for filename in filesx:
if filename != 'settler.json':
filename = str(filename)
files[filename] = os.path.join(home_path, filename)
break
if 'files' in config:
for filename, filepath in config['files'].items():
if os.path.exists(os.path.join(path, filename)) and filename != '':
if os.path.isabs(filepath):
files[filename] = filepath
else:
files[filename] = os.path.join(home_path, filepath)
for filename, dst in files.items():
click.echo("Saving %s as %s" % (filename, dst))
src = os.path.join(path, filename)
shutil.copyfile(src, dst)
def copy_directories(path, config):
home_path = str(Path.home())
folders = {}
for f in os.listdir(path):
if not os.path.isfile(os.path.join(path, f)) and f != '.git':
folders[f] = os.path.join(home_path, f)
if 'folders' in config:
for foldername, folderpath in config['folders'].items():
if os.path.exists(os.path.join(path, foldername)) and foldername != '':
if os.path.isabs(folderpath):
folders[foldername] = folderpath
else:
folders[foldername] = os.path.join(home_path, folderpath)
for foldername, dst in folders.items():
click.echo("Copying %s to %s" % (foldername, dst))
src = os.path.join(path, foldername)
copydir(src, dst)
print("Folders", folders)
def copydir(source, dest, indent=0):
"""Copy a directory structure overwriting existing files"""
for root, dirs, files in os.walk(source):
if not os.path.isdir(root):
os.makedirs(root)
for each_file in files:
rel_path = root.replace(source, '').lstrip(os.sep)
dest_path = os.path.join(dest, rel_path, each_file)
shutil.copyfile(os.path.join(root, each_file), dest_path)
def clone_repo(backpack, branch):
text_repo = click.style('%s' % backpack, fg='blue')
text_branch = click.style('%s' % branch, fg='green')
click.echo("Cloning " + text_repo + ":" + text_branch)
# Clone repository
gh_url = 'https://github.com/' + backpack + '.git'
local_path = '/tmp/' + random_generator()
repo = git.Repo.clone_from(gh_url, local_path, branch=branch)
return local_path
# class Progress(git.remote.RemoteProgress):
# def update(self, op_code, cur_count, max_count=None, message=''):
# print(self._cur_line)
|
from facepy.exceptions import FacepyError
from facepy.graph_api import GraphAPI
from facepy.signed_request import SignedRequest
from facepy.utils import get_application_access_token, get_extended_access_token
__all__ = [
'FacepyError',
'GraphAPI',
'SignedRequest',
'get_application_access_token',
'get_extended_access_token',
]
|
"""Added RandomTable
Revision ID: 183d3f0348eb
Revises: 51f5ccfba190
Create Date: 2017-11-28 11:43:16.511000
"""
# revision identifiers, used by Alembic.
revision = '183d3f0348eb'
down_revision = '2356a38169ea'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('random_table',
sa.Column('id', sa.Text(), nullable=False),
sa.Column('author_id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('definition', sa.Text(), nullable=True),
sa.Column('min', sa.Integer(), nullable=True),
sa.Column('max', sa.Integer(), nullable=True),
sa.Column('description_html', sa.Text(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.Column('line_type', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id', 'author_id')
)
op.create_index(op.f('ix_random_table_permissions'), 'random_table', ['permissions'], unique=False)
op.create_index(op.f('ix_random_table_timestamp'), 'random_table', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_random_table_timestamp'), table_name='random_table')
op.drop_index(op.f('ix_random_table_permissions'), table_name='random_table')
op.drop_table('random_table')
# ### end Alembic commands ###
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0109_nexusexport__exportbeauti'),
]
operations = [
migrations.AddField(
model_name='language',
name='latitude',
field=models.DecimalField(
null=True, max_digits=19, decimal_places=10),
),
migrations.AddField(
model_name='language',
name='longitude',
field=models.DecimalField(
null=True, max_digits=19, decimal_places=10),
),
]
|
"""Atomic append of gzipped data.
The point is - if several gzip streams are concatenated,
they are read back as one whole stream.
"""
import gzip
import io
__all__ = ('gzip_append',)
def gzip_append(filename: str, data: bytes, level: int = 6) -> None:
"""Append a block of data to file with safety checks."""
# compress data
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, compresslevel=level, mode="w") as g:
g.write(data)
zdata = buf.getvalue()
# append, safely
with open(filename, "ab+", 0) as f:
f.seek(0, 2)
pos = f.tell()
try:
f.write(zdata)
except Exception as ex:
# rollback on error
f.seek(pos, 0)
f.truncate()
raise ex
|
"""
Copyright (C) 2019 by
The Salk Institute for Biological Studies and
Pittsburgh Supercomputing Center, Carnegie Mellon University
Use of this source code is governed by an MIT-style
license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
"""
# NOTE: usage of some a git library was considered, however, it was not clear whether they
# really work on Windows and MacOS, therefore a simple wrapper functions were created instead
import os
import time
import datetime
from utils import *
from build_settings import *
BASE_URL = 'https://github.com/mcellteam/'
BASE_URL_HTTPS = 'https://github.com/mcellteam/'
BASE_URL_SSH = 'git@github.com:mcellteam/'
PRIVATE_BASE_URL_SSH = 'git@github.com:mcellteam/'
GIT_SUFFIX = '.git'
BASE_REPOSITORIES = [
REPO_NAME_MCELL, REPO_NAME_LIBBNG, REPO_NAME_CELLBLENDER,
REPO_NAME_MCELL_TESTS, REPO_NAME_MCELL_TOOLS,
REPO_NAME_NEUROPIL_TOOLS, REPO_NAME_MESH_TOOLS,
# REPO_NAME_GAMER1, - TODO
REPO_NAME_VTK
]
FORKED_REPOSITORIES = [REPO_NAME_NFSIM, REPO_NAME_NFSIMCINTERFACE, REPO_NAME_BIONETGEN, REPO_NAME_GAMER]
ALL_REPOSITORIES = BASE_REPOSITORIES + FORKED_REPOSITORIES
REPOSITORIES_ALLOWED_TO_BE_DIRTY = [REPO_NAME_MCELL_TESTS, REPO_NAME_MCELL_TOOLS, REPO_NAME_GAMER, REPO_NAME_BIONETGEN]
FORKED_REPOSITORY_BRANCH_PREFIX = 'mcell_'
MIN_GIT_VERSION= 'git version 1.9'
ORIGIN = 'origin'
GAMER_BASE_URL = 'https://github.com/ctlee/'
GAMER_BRANCH = 'master'
def run_git_w_ascii_output(args, cwd):
cmd = ['git']
cmd += args
return run_with_ascii_output(cmd, cwd)
def run_git_w_ec_check(args, cwd):
cmd = ['git']
cmd += args
#print(str(cmd))
ec = run(cmd, cwd)
check_ec(ec, cmd)
def check_git_version():
out = run_git_w_ascii_output(['--version'], os.getcwd())
# TODO: just a string compre for now..
if out >= MIN_GIT_VERSION:
log("Checked " + out + " - ok")
else:
fatal_error("Required at least " + MIN_GIT_VERSION)
def clone(name, opts, base_url):
log("Repository '" + name + "' does not exist, cloning it...")
run_git_w_ec_check(['clone', base_url + name + GIT_SUFFIX], opts.top_dir)
def fetch(name, opts):
run_git_w_ec_check(['fetch'], os.path.join(opts.top_dir, name))
def get_default_branch(name, branch):
if branch.startswith(BRANCH_PREFIX_MCELL4):
return DEFAULT_BRANCH_MCELL4
elif name in FORKED_REPOSITORIES:
return FORKED_REPOSITORY_BRANCH_PREFIX + DEFAULT_BRANCH
else:
return DEFAULT_BRANCH
def checkout(name, opts, branch):
log("Checking out branch '" + branch + "'")
repo_dir = os.path.join(opts.top_dir, name)
# first check that the branch exists on remote
branches = run_git_w_ascii_output(['branch', '-r'], repo_dir)
full_name = ORIGIN + '/' + branch
if not full_name in branches: # FIXME: improve check, we are just checking a substring
orig_branch = branch
branch = get_default_branch(name, branch)
warning("Remote branch '" + orig_branch + "' does not exit in repo '" + name + "', defaulting to '" + branch + "'.")
full_name2 = ORIGIN + '/' + branch
if not full_name2 in branches: # FIXME: improve check, we are just checking a substring
orig_branch = branch
branch = DEFAULT_BRANCH
warning("Remote default branch '" + orig_branch + "' does not exit in repo '" + name + "', defaulting to '" + branch + "'.")
# then we need to check that the branch is clean before we switch
status = run_git_w_ascii_output(['status'], repo_dir)
print(status)
if 'working directory clean' not in status and 'working tree clean' not in status:
if not opts.ignore_dirty and name not in REPOSITORIES_ALLOWED_TO_BE_DIRTY:
fatal_error("Repository '" + name + "' is not clean. "
"Either clean it manually or if you are sure that there are "
"no changes that need to be kept run this script with '-c'.")
else:
warning("Repository '" + name + "' is not clean, but this repo is allowed to be dirty.")
# finally we can switch
run_git_w_ec_check(['checkout', branch], repo_dir)
def update(name, opts):
log("Updating repository '" + name + "'.")
run_git_w_ec_check(['pull'], os.path.join(opts.top_dir, name))
def get_or_update_repository(name, opts, base_url, branch):
# does the directory exist?
repo_path = os.path.join(opts.top_dir, name)
if not os.path.exists(repo_path):
clone(name, opts, base_url)
else:
log("Repository '" + name + "' already exists, no need to clone it.")
fetch(name, opts)
# checkout the required branch
checkout(name, opts, branch)
# update
if opts.update:
update(name, opts)
def pull_repository(name, opts, base_url, branch):
run_git_w_ec_check(['pull'], os.path.join(opts.top_dir, name))
def push_repository(name, opts, base_url, branch):
run_git_w_ec_check(['push'], os.path.join(opts.top_dir, name))
def reset_hard_repository(name, opts, base_url, branch):
run_git_w_ec_check(['reset', '--hard'], os.path.join(opts.top_dir, name))
def tag_repository(name, opts, base_url, branch):
run_git_w_ec_check(['tag', branch, '-m', branch], os.path.join(opts.top_dir, name))
def merge_repository(name, opts, base_url, branch):
# not all branches must be present
out = run_git_w_ascii_output(['merge', branch], os.path.join(opts.top_dir, name))
print(out)
def run_on_all_repositories(opts, function):
if opts.ssh:
base_url_w_prefix = BASE_URL_SSH
else:
base_url_w_prefix = BASE_URL_HTTPS
for name in BASE_REPOSITORIES:
log("--- Preparing repository '" + name + "' ---")
function(name, opts, base_url_w_prefix, opts.branch)
for name in FORKED_REPOSITORIES:
log("--- Preparing repository '" + name + "' ---")
if not opts.branch.startswith(BRANCH_PREFIX_MCELL4):
# use mcell_ prefix
branch_name = FORKED_REPOSITORY_BRANCH_PREFIX + opts.branch
else:
branch_name = opts.branch
function(name, opts, base_url_w_prefix, branch_name)
if opts.use_private_repos:
log("--- Preparing repository '" + REPO_NAME_MCELL_TEST_PRIVATE + "' ---")
function(REPO_NAME_MCELL_TEST_PRIVATE, opts, PRIVATE_BASE_URL_SSH, opts.branch)
def get_or_update(opts):
check_git_version()
run_on_all_repositories(opts, get_or_update_repository)
def pull(opts):
check_git_version()
run_on_all_repositories(opts, pull_repository)
def push(opts):
check_git_version()
run_on_all_repositories(opts, push_repository)
def reset_hard(opts):
check_git_version()
run_on_all_repositories(opts, reset_hard_repository)
def tag(opts):
check_git_version()
run_on_all_repositories(opts, tag_repository)
def merge(opts):
check_git_version()
run_on_all_repositories(opts, merge_repository)
def create_version_file(opts):
if not os.path.exists(opts.work_dir):
os.makedirs(opts.work_dir)
version_file = os.path.join(opts.work_dir, RELEASE_INFO_FILE)
with open(version_file, "w", encoding="utf-8") as f:
f.write("CellBlender release: " + opts.release_version + "\n")
now = datetime.datetime.now()
f.write("Built on " + now.strftime("%Y-%m-%d %H:%M") + " " + str(time.tzname) + "\n")
f.write("OS: " + platform.platform() + "\n")
cmd = ['gcc', '--version']
res = run_with_ascii_output(cmd, cwd='.')
if cmd:
f.write("GCC: " + res.split('\n')[0] + "\n")
f.write("\n")
for repo_name in ALL_REPOSITORIES:
branch = run_git_w_ascii_output(['describe', '--all'], cwd=os.path.join(opts.top_dir, repo_name))
commit = run_git_w_ascii_output(['log', '-1', '--pretty="%H"'], cwd=os.path.join(opts.top_dir, repo_name))
f.write(repo_name + ": " + commit + " (" + branch + ")\n")
|
from collections import Counter
import numpy as np
VOWELS = [ord(c) for c in "AEIOU"]
def duplicate_items(l):
return [x for x, count in Counter(l).items() if count > 1]
def duplicate_items_order(l):
total = 0
for i in range(len(l) - 1):
if l[i] == l[i + 1]:
total += i
return total
def alphab(c):
i = c - ord("A")
# If we have non text characters, treat as A
if i > 25:
return 0
return i
def name_features(name):
# 0 letter frequency
# 26 letter order
# 52 length
# 53 vowel percentage
# 54 final character
# 55 final character - 1
# 56 final character - 2
# 57 double letter counts
# 58 double letter order
arr = np.zeros(26 + 26 + 7)
# Letter frequencies and orders
for i, c in enumerate(name):
arr[alphab(c)] += 1
arr[alphab(c) + 26] += i + 1
# Length
arr[52] = len(name)
# Vowel percentage
count_vowels = len([c for c in name if c in VOWELS])
arr[53] = int((count_vowels / len(name)) * 100)
# Last character
arr[54] = name[-1]
# Last but one character
arr[55] = name[-2]
# Last but two character
arr[56] = name[-3]
# Double character counts and order
arr[57] = len(duplicate_items(name))
arr[58] = duplicate_items_order(name)
return arr
name_features_map = np.vectorize(name_features, otypes=[np.ndarray])
def featurize(names):
return np.array(name_features_map(names).tolist())
|
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import random
import tarfile
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pandas as pd
import wget
from fastestimator.dataset.csv_dataset import CSVDataset
from fastestimator.util.wget_util import bar_custom, callback_progress
wget.callback_progress = callback_progress
def _create_csv(images: List[str], label_dict: Dict[str, int], csv_path: str) -> None:
"""A helper function to create and save csv files.
Args:
images: List of image id's.
label_dict: Mapping of class name to label id.
csv_path: Path to save the csv file.
"""
df = pd.DataFrame(images, columns=["image"])
df["label"] = df["image"].apply(lambda x: label_dict[x.split("/")[0]])
df["image"] = "food-101/images/" + df["image"] + ".jpg"
df.to_csv(csv_path, index=False)
return None
def load_data(root_dir: Optional[str] = None) -> Tuple[CSVDataset, CSVDataset]:
"""Load and return the Food-101 dataset.
Food-101 dataset is a collection of images from 101 food categories.
Sourced from http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz
Args:
root_dir: The path to store the downloaded data. When `path` is not provided, the data will be saved into
`fastestimator_data` under the user's home directory.
Returns:
(train_data, test_data)
"""
home = str(Path.home())
if root_dir is None:
root_dir = os.path.join(home, 'fastestimator_data', 'Food_101')
else:
root_dir = os.path.join(os.path.abspath(root_dir), 'Food_101')
os.makedirs(root_dir, exist_ok=True)
image_compressed_path = os.path.join(root_dir, 'food-101.tar.gz')
image_extracted_path = os.path.join(root_dir, 'food-101')
train_csv_path = os.path.join(root_dir, 'train.csv')
test_csv_path = os.path.join(root_dir, 'test.csv')
if not os.path.exists(image_extracted_path):
# download
if not os.path.exists(image_compressed_path):
print("Downloading data to {}".format(root_dir))
wget.download('http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz', root_dir, bar=bar_custom)
# extract
print("\nExtracting files ...")
with tarfile.open(image_compressed_path) as img_tar:
img_tar.extractall(root_dir)
labels = open(os.path.join(root_dir, "food-101/meta/classes.txt"), "r").read().split()
label_dict = {labels[i]: i for i in range(len(labels))}
if not os.path.exists(train_csv_path):
train_images = open(os.path.join(root_dir, "food-101/meta/train.txt"), "r").read().split()
random.shuffle(train_images)
_create_csv(train_images, label_dict, train_csv_path)
if not os.path.exists(test_csv_path):
test_images = open(os.path.join(root_dir, "food-101/meta/test.txt"), "r").read().split()
random.shuffle(test_images)
_create_csv(test_images, label_dict, test_csv_path)
return CSVDataset(train_csv_path), CSVDataset(test_csv_path)
|
from django.core.validators import MinLengthValidator
from django.db import models
class About(models.Model):
first_name = models.CharField(max_length=100, validators=[MinLengthValidator(2)])
last_name = models.CharField(max_length=100, validators=[MinLengthValidator(2)])
place = models.CharField(max_length=100, validators=[MinLengthValidator(2)])
contact = models.EmailField()
country = models.CharField(max_length=100, validators=[MinLengthValidator(2)])
description = models.TextField()
def __str__(self):
return f"{self.first_name} {self.last_name}"
|
import numpy as np
import nltk
from nltk.tokenize import word_tokenize # using sent_tokenize won't work
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
def preprocess(file_name):
file = open(file_name,"r")
if file.mode == 'r':
content = file.read()
sentences = content.split(".")
words = []
for i,j in enumerate(sentences):
words.append(word_tokenize(j))
stop_words = set(stopwords.words('english'))
ps = PorterStemmer()
for i in range(np.shape(words)[0]):
words[i] = [w for w in words[i] if not w in stop_words and w.isalpha()] # Removes stop_words and punctuations
pos_tagged = np.copy(words)
# ner_tags = np.copy(words)
# POS Tagging
"""
try:
for i,j in enumerate(words):
pos_tagged[i] = []
for p,q in enumerate(nltk.pos_tag(j)):
pos_tagged[i].append(q[1])
except Exception as e:
print(str(e))
"""
# NER Tagging
"""
try:
for i,j in enumerate(words):
tagged = nltk.pos_tag(words[i])
ner_tags[i] = nltk.ne_chunk(tagged, binary=False)
except Exception as e:
print(str(e))
"""
for i in range(np.shape(words)[0]): # Perform stemming only after POS and NER Tagging
for j,a in enumerate(words[i]):
words[i][j] = ps.stem(a)
save_name = file_name.split(".")[0] + "_words." + file_name.split(".")[1]
file_new = open(save_name,"w+")
for i in range(np.shape(words)[0]):
for j in words[i]:
file_new.write(j+" ")
file_new.write("\n")
file_new.close()
file.close()
|
from __future__ import unicode_literals
import os
import fcntl
import select
import signal
import errno
from codecs import getincrementaldecoder
from ..terminal.vt100_input import InputStream
from .base import BaseEventLoop
__all__ = (
'PosixEventLoop',
'call_on_sigwinch',
)
class PosixEventLoop(BaseEventLoop):
stdin_decoder_cls = getincrementaldecoder('utf-8')
def __init__(self, input_processor, stdin):
super(PosixEventLoop, self).__init__(input_processor, stdin)
self.inputstream = InputStream(self.input_processor)
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
# Create incremental decoder for decoding stdin.
# We can not just do `os.read(stdin.fileno(), 1024).decode('utf-8')`, because
# it could be that we are in the middle of a utf-8 byte sequence.
self._stdin_decoder = self.stdin_decoder_cls()
def loop(self):
"""
The input 'event loop'.
"""
if self.closed:
raise Exception('Event loop already closed.')
timeout = self.input_timeout
while True:
r, w, x = _select([self.stdin, self._schedule_pipe[0]], [], [], timeout)
# If we got a character, feed it to the input stream. If we got
# none, it means we got a repaint request.
if self.stdin in r:
c = self._read_from_stdin()
if c:
# Feed input text.
self.inputstream.feed(c)
# Immediately flush the input.
self.inputstream.flush()
return
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif self._schedule_pipe[0] in r:
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c()
else:
# Fire input timeout event.
self.onInputTimeout.fire()
timeout = None
def _read_from_stdin(self):
"""
Read the input and return it.
"""
# Note: the following works better than wrapping `self.stdin` like
# `codecs.getreader('utf-8')(stdin)` and doing `read(1)`.
# Somehow that causes some latency when the escape
# character is pressed. (Especially on combination with the `select`.
try:
bytes = os.read(self.stdin.fileno(), 1024)
except OSError:
# In case of SIGWINCH
bytes = b''
try:
return self._stdin_decoder.decode(bytes)
except UnicodeDecodeError:
# When it's not possible to decode this bytes, reset the decoder.
# The only occurence of this that I had was when using iTerm2 on OS
# X, with "Option as Meta" checked (You should choose "Option as
# +Esc".)
self._stdin_decoder = self.stdin_decoder_cls()
return ''
def call_from_executor(self, callback):
"""
Call this function in the main event loop.
Similar to Twisted's ``callFromThread``.
"""
self._calls_from_executor.append(callback)
if self._schedule_pipe:
os.write(self._schedule_pipe[1], b'x')
def close(self):
super(PosixEventLoop, self).close()
# Close pipes.
schedule_pipe = self._schedule_pipe
self._schedule_pipe = None
if schedule_pipe:
os.close(schedule_pipe[0])
os.close(schedule_pipe[1])
def _select(*args, **kwargs):
"""
Wrapper around select.select.
When the SIGWINCH signal is handled, other system calls, like select
are aborted in Python. This wrapper will retry the system call.
"""
while True:
try:
return select.select(*args, **kwargs)
except select.error as e:
# Retry select call when EINTR
if e.args and e.args[0] == errno.EINTR:
continue
else:
raise
class call_on_sigwinch(object):
"""
Context manager which Installs a SIGWINCH callback.
(This signal occurs when the terminal size changes.)
"""
def __init__(self, callback):
self.callback = callback
def __enter__(self):
self.previous_callback = signal.signal(signal.SIGWINCH, lambda *a: self.callback())
def __exit__(self, *a, **kw):
signal.signal(signal.SIGWINCH, self.previous_callback)
|
from machine import ADC , Pin
from Blocky.Pin import getPin
class WaterSensor :
def __init__ (self , port , sensitivity = 4):
if (getPin(port)[2] == None):
from machine import reset
reset()
self.adc = ADC(getPin[2])
if (sensitivity == 1) self.adc.atten(ADC.ATTN0_DB)
elif (sensitivity == 2) self.adc.atten(ADC.ATTN_2_5DB)
elif (sensitivity == 3) self.adc.atten(ADC.ATTN6_DB)
elif (sensitivity == 4) self.adc.atten(ADC.ATTN11_DB)
def read(self):
return 4095 - self.adc.read()
|
import pandas as pd
import numpy as np
import scipy
import scipy.stats as stats
import pylab
import sklearn.feature_selection as sk
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from scipy.stats import probplot
from pandas import DataFrame
import matplotlib.pyplot as plt
#
#
#X = full[[]]
#tup = [2,3,4,5,6,7]
#for k in range(len(full)):
# tup.append(k)
#
#tup = tuple(tup)
#X = full.iloc[:,[3,4,5,6,7,9,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1]]
#print(X)
#
#
##print(full)
#
#X = np.array(X)
#
#SNP15 = pd.read_csv("AIHack-SNP-Matrix-15.csv")
#Clin = pd.read_csv("AIHack-Clinical.csv")
#PC = pd.read_csv("AIHack-PCs.csv")
#Clin = Clin.replace('nan', np.NaN)
#Clin.fillna(Clin.mean(), inplace = True)
#full = pd.concat([Clin, SNP15, PC], axis=1, join_axes=[Clin.index])
#full = full.drop("Unnamed: 0", axis=1)
low = 8
high = 16391
#===========================================================================================
# 1. Data Loading
def load_data_CAD():
SNP15 = pd.read_csv("D:/AI_Hack/GWAS_files/AIHack-SNP-Matrix-15.csv")
Clin = pd.read_csv("D:/AI_Hack/GWAS_files/AIHack-Clinical.csv")
PC = pd.read_csv("D:/AI_Hack/GWAS_files/AIHack-PCs.csv")
Clin = Clin.replace('nan', np.NaN)
Clin.fillna(Clin.mean(), inplace = True)
full = pd.concat([Clin, SNP15, PC], axis=1, join_axes=[Clin.index])
full = full.drop("Unnamed: 0", axis=1)
CAD = full.iloc[:,1]
return CAD
def load_data_HDL():
SNP15 = pd.read_csv("AIHack-SNP-Matrix-15.csv")
Clin = pd.read_csv("AIHack-Clinical.csv")
PC = pd.read_csv("AIHack-PCs.csv")
Clin = Clin.replace('nan', np.NaN)
Clin.fillna(Clin.mean(), inplace = True)
full = pd.concat([Clin, SNP15, PC], axis=1, join_axes=[Clin.index])
full = full.drop("Unnamed: 0", axis=1)
HDL = full.iloc[:,6]
return HDL
def load_data_LDL():
SNP15 = pd.read_csv("AIHack-SNP-Matrix-15.csv")
Clin = pd.read_csv("AIHack-Clinical.csv")
PC = pd.read_csv("AIHack-PCs.csv")
Clin = Clin.replace('nan', np.NaN)
Clin.fillna(Clin.mean(), inplace = True)
full = pd.concat([Clin, SNP15, PC], axis=1, join_axes=[Clin.index])
full = full.drop("Unnamed: 0", axis=1)
LDL = full.iloc[:,7]
return LDL
#===========================================================================================
# 2. Statistical learning for prediction of significant SNPs
# Logistic Regression Model
def calc_CAD(CAD):
SNP_pvalues = []
for i in range(low, high):
X = full.iloc[:,[2,3,4,5,6,7,i,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2]] #X Values for 1 SNP
X = np.array(X) #convert dataframe to array
#create logisitic regression model, and find weights
clf = LogisticRegression(random_state=0, solver='lbfgs',
multi_class='multinomial').fit(X, CAD)
denom = (2.0*(1.0+np.cosh(clf.decision_function(X))))
F_ij = np.dot((X/denom[:,None]).T,X) ## Fisher Information Matrix
Cramer_Rao = np.linalg.inv(F_ij) ## Inverse Information Matrix
sigma_estimates = np.array([np.sqrt(Cramer_Rao[i,i]) for i in range(Cramer_Rao.shape[0])]) # sigma for each coefficient
z_scores = clf.coef_[0]/sigma_estimates # z-score for eaach model coefficient
p_values = [stats.norm.sf(abs(x))*2 for x in z_scores] ### `two tailed test for p-values
SNP_pvalues.append(-np.log(p_values[5]))
return SNP_pvalues
# Alternative methods but none gave successful Manhattan Plots
# def svm(CAD):
# SNP_pvalues = []
# for i in range(low, high):
# X = full.iloc[:,[2,3,4,5,6,7,i,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2]] #X Values for 1 SNP
# X = np.array(X) #convert dataframe to array
# #create logisitic regression model, and find weights
# clf = LinearSVC(random_state=0, tol=1e-5).fit(X, CAD)
# denom = (2.0*(1.0+np.cosh(clf.decision_function(X))))
# F_ij = np.dot((X/denom[:,None]).T,X) ## Fisher Information Matrix
# Cramer_Rao = np.linalg.inv(F_ij) ## Inverse Information Matrix
# sigma_estimates = np.array([np.sqrt(Cramer_Rao[i,i]) for i in range(Cramer_Rao.shape[0])]) # sigma for each coefficient
# z_scores = clf.coef_[0]/sigma_estimates # z-score for eaach model coefficient
# p_values = [stats.norm.sf(abs(x))*2 for x in z_scores] ### `two tailed test for p-values
# SNP_pvalues.append(-np.log(p_values[5]))
# return SNP_pvalues
# def forest(CAD):
# SNP_pvalues = []
# for i in range(8,9):
# # range(low, high):
# print (i)
# X = full.iloc[:,[2,3,4,5,6,7,i,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2]] #X Values for 1 SNP
# X = np.array(X) #convert dataframe to array
# #create logisitic regression model, and find weights
# clf = RandomForestClassifier(n_estimators=100, max_depth=2,random_state=0).fit(X, CAD)
# denom = (2.0*(1.0+np.cosh(clf.oob_decision_function_[:,0])))
# # F_ij = np.dot((X/denom[:,None]).T,X) ## Fisher Information Matrix
# # Cramer_Rao = np.linalg.inv(F_ij) ## Inverse Information Matrix
# # sigma_estimates = np.array([np.sqrt(Cramer_Rao[i,i]) for i in range(Cramer_Rao.shape[0])]) # sigma for each coefficient
# # z_scores = clf.coef_[0]/sigma_estimates # z-score for eaach model coefficient
# # p_values = [stats.norm.sf(abs(x))*2 for x in z_scores] ### `two tailed test for p-values
# # SNP_pvalues.append(-np.log(p_values[5]))
# return clf.oob_decision_function_
#===========================================================================================
# Flagging of missing data
def flag_alleles(SNP_pvalues):
lst = []
col = []
for i in range(len(SNP_pvalues)):
if SNP_pvalues[i] > -np.log10((10**(-5)/(10**1))):
col.append(full.columns[i+8])
return col
def calc_HDL(HDL):
SNP_pvalues = []
for i in range(low,high):
X = full.iloc[:,[1,2,3,4,5,7,i,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2]] #X Values for 1 SNP
# X = np.array(X, dtype = float) #convert dataframe to array
# X = X + np.ones(shape = np.shape(X))
fvalue, pvalue = sk.f_regression(X, HDL)
SNP_pvalues.append(-np.log(pvalue[5]))
return SNP_pvalues
def calc_LDL(LDL):
SNP_pvalues = []
for i in range(low,high):
X = full.iloc[:,[2,3,4,5,6,i,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2]] #X Values for 1 SNP
X = np.array(X) #convert dataframe to array
fvalue, pvalue = sk.f_regression(X, LDL)
# for j in range(len(X)):
# scipy.stats.chi2()
SNP_pvalues.append(-np.log(pvalue[5]))
return SNP_pvalues
#===========================================================================================
3. Manhattan Plots
def graph_CAD(SNP_pvalues):
df = DataFrame({'gene' : ['gene-%i' % i for i in np.arange(len(SNP_pvalues))],
'minuslog10pvalue' : SNP_pvalues})
df['ind'] = range(len(df))
print (df)
# ax = plt.fig.add_subplot(111)
plt.figure(1)
colors = 'red'
plt.scatter(df['ind'],df['minuslog10pvalue'],s=1)
plt.plot(np.arange(0,17000,1), np.ones(17000)*-np.log10((10**(-5)/(17))),color = 'red', linewidth = '1')
plt.gca()
plt.title("Deviation between control and CAD phenotype allele frequencies")
plt.xlabel(r'SNP site $i$')
plt.ylabel(r'$-log(p_{i})$')
plt.show()
def graph_HDL(SNP_pvalues):
df = DataFrame({'gene' : ['gene-%i' % i for i in np.arange(len(SNP_pvalues))],
'minuslog10pvalue' : SNP_pvalues})
df['ind'] = range(len(df))
plt.figure(2)
ax = fig.add_subplot(111)
plt.scatter(df['ind'],df['minuslog10pvalue'],s=1)
plt.plot(np.arange(0,17000,1), np.ones(17000)*-np.log10((10**(-5)/(17))),color = 'red', linewidth = '1')
plt.gca()
plt.title("Deviation between control and HDL phenotype allele frequencies")
plt.xlabel(r'SNP site $i$')
plt.ylabel(r'$-log(p_{i})$')
plt.show()
def graph_LDL(SNP_pvalues):
df = DataFrame({'gene' : ['gene-%i' % i for i in np.arange(len(SNP_pvalues))],
'minuslog10pvalue' : SNP_pvalues})
df['ind'] = range(len(df))
plt.figure(3)
ax = fig.add_subplot(111)
colors = 'red'
plt.scatter(df['ind'],df['minuslog10pvalue'],s=1)
plt.plot(np.arange(0,17000,1), np.ones(17000)*-np.log10((10**(-5)/(17))),color = 'red', linewidth = '1')
plt.gca()
plt.title("Deviation between control and LDL phenotype allele frequencies")
plt.xlabel(r'SNP site $i$')
plt.ylabel(r'$-log(p_{i})$')
plt.show()
# def QQ_plots_CAD(x):
# stats.probplot(x, dist = 'norm', plot=pylab)
# pylab.show()
# pass
|
from marshmallow import Schema, validate, validates_schema, ValidationError
from marshmallow.fields import String
from app.users.documents import User
BaseUserSchema = User.schema.as_marshmallow_schema()
class CreateUserSchema(BaseUserSchema):
id = String(
dump_only=True,
description='Unique document identifier of a User.'
)
username = String(
required=True,
allow_none=False,
description='Unique username.',
validate=validate.Length(min=1, error='Field cannot be blank.')
)
password = String(
load_only=True,
required=True,
allow_none=False,
description='User password.',
validate=validate.Length(min=1, error='Field cannot be blank.')
)
confirm_password = String(
load_only=True,
required=True,
allow_none=False,
description='User password confirm.',
validate=validate.Length(min=1, error='Field cannot be blank.')
)
@validates_schema(skip_on_field_errors=True)
def validate_password_confirmation(self, data):
if data['password'] != data['confirm_password']:
raise ValidationError(
'Confirm password must equal to a new password.',
field_names=['confirm_password', ]
)
def load(self, data, many=None, partial=None):
result = super(CreateUserSchema, self).load(data, many=many, partial=partial) # NOQA
if not result.errors:
result.data.pop('confirm_password')
return result
class Meta:
fields = (
'id',
'username',
'password',
'confirm_password'
)
class UserProfileSchema(BaseUserSchema):
id = String(
dump_only=True,
description='Unique document identifier of a User.'
)
username = String(
dump_only=True,
description='Unique username.'
)
class Meta:
fields = (
'id',
'username',
)
class UserTokenSchema(Schema):
access_token = String(
required=True,
allow_none=False,
description='An JSON Web Token for getting an access to the resource.',
validate=validate.Length(min=1, error='Field cannot be blank.')
)
class Meta:
fields = (
'access_token',
)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
#plt.scatter(x,y,alpha=0.3)
#df.drop(df.columns[[0, 2]], axis='columns')
def borrar(data):
for iterable in range(99,data.shape[0]):
data.drop([iterable], inplace=True)
class regresion_logistica():
def __init__(self):
self.X = []
self.Y = []
self.alsa = 0.07
self.umbral = 0.01
self.Theta = np.random.rand(4)
#data = pd.read_csv(filename)
#data.head()
def h(self,vc):
return sum( [ e[0]*e[1] for e in zip (self.Theta,vc ) ] )
def S(self,vc):
return 1 / (1 + np.exp( -1 * self.h(vc)) )
def error():
"""
m = len(self.X);
s = 0
for i in range(m):
s += Y[i]*math.log10( S(X[i]) + ( 1 - Y[i])* math.lhttps://github.com/perborgen/LogisticRegression/blob/master/logistic.pyog10( 1- S(X[i])) )
"""
matrix = [ [1,2,3,4],[1,2,3,4]]
regresion = regresion_logistica()
regresion.S(matrix)
#print(data)
|
# -*- coding: utf-8 -*-
start_time = "2018-06-01 00:00:00"
end_time = "2018-07-01 00:00:00"
contributors_id = ()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Common utility functions
Created on Sun May 27 16:37:42 2018
@author: chen
"""
import math
import cv2
import os
from imutils import paths
import numpy as np
import scipy.ndimage
def rotate_cooridinate(cooridinate_og,rotate_angle,rotate_center):
"""
calculate the coordinates after rotation
"""
rotate_angle = rotate_angle*(math.pi/180)
rotated_x = (cooridinate_og[0]-rotate_center[0])*math.cos(rotate_angle)\
-(cooridinate_og[1]-rotate_center[1])*math.sin(rotate_angle)+rotate_center[0]
rotated_y = (cooridinate_og[0]-rotate_center[0])*math.sin(rotate_angle)\
+(cooridinate_og[1]-rotate_center[1])*math.cos(rotate_angle)+rotate_center[1]
rotated_coordinate = np.array([rotated_x,rotated_y])
rotated_coordinate = np.round(rotated_coordinate).astype(np.int)
return rotated_coordinate
def mkdir(path):
"""
create new folder automatically
"""
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
def load_data(path):
"""
load data from specified folder
"""
print("[INFO] loading images...")
imgs = []
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images(path)))
for imagePath in imagePaths:
# load the image, pre-process it, and store it in the data list
image = cv2.imread(imagePath,cv2.IMREAD_GRAYSCALE)
imgs.append(image)
return imgs
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def normfun(x,sigma):
"""
function of normal distribution
"""
mu = 45
pdf = np.exp(-((x - mu)**2)/(2*sigma**2)) / (sigma * np.sqrt(2*np.pi))
return pdf
def calc_box(box,x_gap,y_gap,rotate_angle,center):
"""
calculate the size of the required surrounding environment for doorway segmentation
box: four corners' coordinates of doorway
x_gap: remained space in the vertical way
y_gap: remained space in the horizontal way
"""
door_box = np.array([box[0][::-1]+[y_gap,x_gap],box[1][::-1]+[y_gap,-x_gap],
box[2][::-1]-[y_gap,x_gap],box[3][::-1]-[y_gap,-x_gap]])
rotated_box = []
for coordinate in door_box:
box_coordinate = rotate_cooridinate(coordinate,rotate_angle,center)
rotated_box.append(box_coordinate)
rotated_box = np.array(rotated_box)
box = [np.min(rotated_box[:,0]),np.min(rotated_box[:,1]),np.max(rotated_box[:,0]),np.max(rotated_box[:,1])]
return box
def calc_IoU(candidateBound, groundTruthBounds):
"""
calculate the intersection over union
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
carea = (cx2 - cx1) * (cy2 - cy1)
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
ious = area / (carea + garea - area)
return ious
def overlapp(candidateBound, groundTruthBounds):
"""
calculate the proportion of prediction to groundtruth
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
reious = area / garea
return reious
def calc_corner(door_center,door_size,door_depth,side):
"""
calculate the corners' coordinates from the centroid, size and depth of doorway
door_corners_inside is a list of coordinates of corners close to the corridor
door_corners_outside is a list of coordinates of corners close to the room
"""
door_corners_inside = [door_center-np.array([np.int(door_size/2),0]),
door_center+np.array([door_size-np.int(door_size/2),0])]
door_corners_outside = [x-np.array([0,np.power(-1,side)*door_depth[side]])
for x in door_corners_inside]
door_corners_outside = np.array(door_corners_outside)
return door_corners_inside,door_corners_outside
def draw_door(mask,complete_map,door,door_depth,side):
"""
label the doorway on the mask and add some error inside the doorway region
"""
door_size = abs(door[1,0]-door[0,0])
door_area_inside = door+np.array([0,np.power(-1,side)*door_depth[side]])
# label the doorway on the mask
cv2.rectangle(mask,tuple(door[0][::-1]),tuple(door_area_inside[1][::-1]),255,-1)
# add a small point to emulate the error in the doorway region
if door_size>20:
if np.random.randint(4)==0:
if side ==0:
pt_center = [np.random.randint(door[0,0]+4,door[1,0]-3),np.random.randint(door[0,1],door_area_inside[0,1])]
else:
pt_center = [np.random.randint(door[0,0]+3,door[1,0]-2),np.random.randint(door_area_inside[0,1],door[0,1])]
cv2.circle(complete_map,tuple(pt_center[::-1]),np.random.choice([1,2,3]),0,-1)
return door_size
def room_division(room_space,num_room):
"""
assign the lengths of rooms according to the length of corridor and number of rooms
room_space: coordinates of corridor's side
num_room: the number of rooms on one side
rooms: a list of the coordinates belonging to different rooms
rooms_corners: a list of only the top and bottom cooridnates of different rooms
"""
rooms = []
rooms_corners=[]
a = num_room
thickness = np.random.randint(2,5)
length = room_space.shape[0]-(num_room-1)*thickness
start_point = 0
for i in range(num_room-1):
room_size = np.random.randint(length/(a+0.7),length/(a-0.7))
room = room_space[start_point:start_point+room_size,:]
rooms.append(room)
start_point +=room_size+thickness
room = room_space[start_point:,:]
rooms.append(room)
rooms = [room.astype(np.int) for room in rooms]
for x in rooms:
rooms_corner = np.concatenate((x[0,:][np.newaxis,:],x[-1,:][np.newaxis,:]),axis = 0)
rooms_corners.append(rooms_corner)
return rooms,rooms_corners
def calc_gradient(gmap):
"""
calculate the gradient of image to find the contour
"""
kernel = np.array([[1,1,1],[1,-8,1],[1,1,1]])
img = gmap.astype(np.int16)
gradient = scipy.ndimage.correlate(img,kernel,mode = 'constant',cval =127)
return gradient
|
# CRISP-DM Python template, main
#import logging
import load
#import Model.svm.model as svmimport imp
import cDataPreparation.dataCleaning.cleaningFunctions as clean
import cDataPreparation.dataConstruction.constructFunctions as construct
from sklearn.datasets import fetch_20newsgroups
#import DataPreparation.integrating
pathToSelectedData = "./_data/raw/train.json" # Where the initial data can be found
rawDataType = 'json'
label = 'firstLabel'
pathToCleanData = "./data/clean/" # Where different versions of the cleaned data can be found
pathToConstructedData = "./data/constructed/" # Where different versions of the constructed data can be found
cleanDataFunctionToUse = clean.decode
constructDataFunctionToUse = construct.buildTfIdfMatrix
#data = {version:.., tables:.., functionUsed:..., rawDataUsed: ...}
cleanData = load.conditionalLoadCleanData(clean.identity, pathToSelectedData, label, rawDataType)
constructData = load.conditionalLoadConstructData(constructDataFunctionToUse, cleanData)
print(constructData[1:5])
#ABT = integrateData(cleanData, constructData)
#modelingGrid = list()
# TODO load configurations from other files, check if they have been run or not,
# TODO config file that says which configs have been run
#modelingGrid.append({construct: constructDataFunctionToUse, model:modelOptions})
## TODO implement the functions on the recipes problem
#for params in modelingGrid:
# if dataHasChangedSinceLastTask:
# constructData = conditionalLoadConstructData(constructDataFunctionToUse)
# ABT = integrateData(cleanData, constructData)
# trainABT, validationABT = testDesign(ABT, testOptions)
# model = modelType(modelOptions, ABT)
# assessment = modelAssessment(model, validation)
# logging.logAndSave(model, modelOptions, assessment, cleanDataFunctionToUse, constructDataFunctionToUse)
|
from .core import Field
from .core import FieldDescriptor
from .core import FieldGroup
from .core import SimpleField
from .core.enums import ValueStatus, FieldKind, IsbnType, PublicationType
class IsbnField(Field):
def __init__(self,
document,
name):
super(IsbnField, self).__init__(document,
'identifiers.*',
FieldKind.readonly)
self._name = name
self._value = None
@property
def value(self):
if self._status is ValueStatus.none:
raise RuntimeError('Accessing to field which is not set')
else :
return self._value
def hard_set(self,
value):
pass
def update(self,
gjp):
value = self._master_object(gjp)
for fname in ['identifiers', 'isbns']:
if fname in value:
value = value[fname]
else :
value = None
break
if value is not None:
if (self._name in value) and ('ean' in value[self._name]):
self._value = str(value[self._name]['ean'])
else:
self._value = None
self._status = ValueStatus.soft
def gjp(self,
gjp):
pass
class RelatedIsbnsField(SimpleField):
def __init__(self,
document):
super(RelatedIsbnsField, self).__init__(document,
'related_products_isbn',
'related_products_isbn')
def _parse_value(self,
value):
if not value:
return set()
else:
return set(value.split(';'))
def _value_validation(self,
value):
if not isinstance(value, set):
raise TypeError('Expected set, got : {0}'.format(type(value)))
for i in value:
if not isinstance(i, str):
raise ValueError('Expected ISBN, got : {0}'.format(i))
return value
def _serialize_value(self,
value):
return ';'.join({v.replace('-','')for v in value})
class IsbnGroup(FieldGroup):
def __init__(self,
document):
super(IsbnGroup, self).__init__(document)
self._document = document
self._fields['pdf'] = IsbnField(document,
'pdf')
self._fields['epub'] = IsbnField(document,
'epub')
self._fields['mobi'] = IsbnField(document,
'mobi')
self._fields['ibooks'] = IsbnField(document,
'ibooks')
self._fields['audiobook'] = IsbnField(document,
'audiobook')
self._fields['software'] = IsbnField(document,
'software')
self._fields['pod'] = IsbnField(document,
'pod')
self._fields['related'] = RelatedIsbnsField(document)
pdf = FieldDescriptor('pdf')
epub = FieldDescriptor('epub')
mobi = FieldDescriptor('mobi')
ibooks = FieldDescriptor('ibooks')
audiobook = FieldDescriptor('audiobook')
software = FieldDescriptor('software')
pod = FieldDescriptor('pod')
related = FieldDescriptor('related')
def __getitem__(self, key):
if key in PublicationType:
return getattr(self, key.identifier)
elif PublicationType.find(key) is not None:
return getattr(self, key)
def assign(self,
isbn_type,
isbn = None):
if isbn_type not in IsbnType:
raise ValueError('isbn_type should be one of op.isbn, got: {0}'.format(isbn_type))
self._document.context.gjp.assign_isbn(self._document.document_id,
isbn_type.identifier,
isbn)
self._document._fetch([])
|
from django.contrib.auth import models
import factory
from faker import Faker
fake = Faker()
class UserFactory(factory.django.DjangoModelFactory):
username = fake.user_name()
email = fake.email()
first_name = fake.first_name()
last_name = fake.last_name()
class Meta:
model = models.User
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.