content
stringlengths 5
1.05M
|
|---|
from unittest import TestCase
"""These are not real unit tests, as they rely on the database to be correct,
and the DbCursor class to be correct as well """
class TestNode(TestCase):
def setUp(self):
from arkdbtools.dbtools import set_connection
set_connection(
host='localhost',
database='ark_mainnet',
user='ark'
)
def tearDown(self):
from arkdbtools.dbtools import set_connection
set_connection()
def test_height(self):
from arkdbtools.dbtools import Node
height = Node.height()
self.assertIsInstance(height, int)
def test_check_node(self):
from arkdbtools.dbtools import Node
# This test could potentially return a true, as Blockchain.height() is queried before Node.height()
bool_false = Node.check_node(0)
bool_true = Node.check_node(float('inf'))
self.assertIsInstance(bool_false, bool)
self.assertFalse(bool_false)
self.assertIsInstance(bool_true, bool)
self.assertTrue(bool_true)
def test_max_timestamp(self):
from arkdbtools.dbtools import Node
max_timestamp = Node.max_timestamp()
self.assertIsInstance(max_timestamp, int)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 RT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, rospy, math
from std_srvs.srv import Trigger, TriggerResponse
from thouzer_msgs.msg import Battery, Commander
from thouzer_msgs.msg import App as StatusApp
from enum import Enum
class ThouzerStatus(Enum):
UNKNOWN = 0
STARTING = 1
EMERGENCY = 2
USER_INUPUT = 3
COMMAND = 4
class ThouzerCommander():
def __init__(self):
# if not self.set_power(False): sys.exit(1) #モータの電源を切る(TrueをFalseに)
rospy.on_shutdown(self.set_power)
self.thouzer_status = ThouzerStatus.UNKNOWN
self.battery_voltage = 0.0
self._is_on = False
self._commander_pub = rospy.Publisher('/thouzer/commander', Commander, queue_size=1)
self._battery_sub = rospy.Subscriber('/thouzer/battery', Battery, self._callback_battery, queue_size=1)
self._status_app_sub = rospy.Subscriber('/thouzer/status/app', StatusApp, self._callback_status_app, queue_size=1)
# Wait for MQTT server
self._wait_for_mqtt_server()
self._srv_on = rospy.Service('motor_on', Trigger, self.callback_on)
self._srv_off = rospy.Service('motor_off', Trigger, self.callback_off)
def _wait_for_mqtt_server(self):
i = 0
while self.thouzer_status == ThouzerStatus.UNKNOWN and self.battery_voltage == 0.0:
rospy.sleep(0.01)
i += 1
if i == 499:
rospy.loginfo("thouzer_driver/commander.py: Waiting for MQTT server")
rospy.logerr("thouzer_driver/commander.py: Error connectiong to MQTT server")
i = 0
def _callback_status_app(self, msg):
self.thouzer_status = self._parse_status_message(msg.app)
def _parse_status_message(self, status_message):
if status_message == "#start":
# 起動
self._is_on = False
return ThouzerStatus.STARTING
elif status_message == "#alert":
# 非常停止
return ThouzerStatus.EMERGENCY
elif status_message == "#check":
# 不明
return ThouzerStatus.UNKNOWN
elif "app-userInput" in status_message:
# ユーザ操作受付
self._is_on = False
return ThouzerStatus.USER_INUPUT
elif "app-whisperer" in status_message:
# コマンド受付
self._is_on = True
return ThouzerStatus.COMMAND
def _callback_battery(self, msg):
self.battery_voltage = msg.voltage_v
def set_power(self, onoff=False):
try:
power_command = Commander()
if self._is_on != onoff:
if onoff:
power_command.app = 'app-whisperer'
else:
power_command.app = ''
self._is_on = onoff
self._commander_pub.publish(power_command)
return True
except Exception as e:
print(e)
rospy.logerr("cannot send MQTT topic to thouzer")
return False
def onoff_response(self, onoff):
d = TriggerResponse()
d.success = self.set_power(onoff)
d.message = "ON" if self._is_on else "OFF"
return d
def callback_on(self, message):
return self.onoff_response(True)
def callback_off(self, message):
return self.onoff_response(False)
if __name__ == '__main__':
rospy.init_node('thouzer_commander')
cmd = ThouzerCommander()
rospy.spin()
|
from __future__ import absolute_import, print_function
from sentry.rules.base import RuleBase
class EventAction(RuleBase):
rule_type = "action/event"
def after(self, event, state):
"""
Executed after a Rule matches.
Should yield CallBackFuture instances which will then be passed into
the given callback.
See the notification implementation for example usage.
>>> def after(self, event, state):
>>> yield self.future(self.print_results)
>>>
>>> def print_results(self, event, futures):
>>> print('Got futures for Event {}'.format(event.id))
>>> for future in futures:
>>> print(future)
"""
raise NotImplementedError
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 12 13:39:17 2019
@author: abraverm
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from src.enet_wc_keras_model import autoencoder_wc, transfer_weights
from src.camvid_dataset_load import prepare_for_training, create_dataset
from src.camvid_dataset_load import create_coco_dataset, create_coco_test_set
from src.camvid_dataset_load import median_frequency_balancing
from src.enetcfg import EnetCfg
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
print("Num CPUs Available: ", len(tf.config.experimental.list_physical_devices('CPU')))
#tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
flags = EnetCfg()
flags.default_enet_cfg(flags=flags)
cfg = flags.parse_args()
class EpochModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
def __init__(self,filepath, monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=True,
mode='auto', ):
super(EpochModelCheckpoint, self).__init__(filepath=filepath,monitor=monitor,
verbose=verbose,save_best_only=save_best_only,
save_weights_only=save_weights_only, mode=mode)
self.ckpt = tf.train.Checkpoint(completed_epochs=tf.Variable(0,trainable=False,dtype='int32'))
ckpt_dir = f'{os.path.dirname(filepath)}/tf_ckpts'
self.manager = tf.train.CheckpointManager(self.ckpt, ckpt_dir, max_to_keep=3)
def on_epoch_begin(self,epoch,logs=None):
self.ckpt.completed_epochs.assign(epoch)
self.manager.save()
print( f"Epoch checkpoint {self.ckpt.completed_epochs.numpy()} saved to: {self.manager.latest_checkpoint}" )
print(logs)
def callbacks(log_dir, checkpoint_dir, model_name):
tb = TensorBoard(log_dir=log_dir,
histogram_freq=1,
write_graph=True,
write_images=True)
best_model = os.path.join(checkpoint_dir, f'{model_name}_best.hdf5')
save_best = EpochModelCheckpoint( best_model )
checkpoint_file = os.path.join(checkpoint_dir, 'weights.' + model_name + '.{epoch:02d}-{val_loss:.2f}.hdf5')
checkpoints = ModelCheckpoint(
filepath=checkpoint_file,
monitor='val_loss',
verbose=1,
save_best_only=False,
save_weights_only=False,
mode='auto'
#period=1
)
# keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
return [tb, save_best ]
#return [tb, save_best, checkpoints]
#return []
class WaitedCategoricalCrossentropy (tf.keras.losses.CategoricalCrossentropy):
def __init__(self,class_w):
super(WaitedCategoricalCrossentropy, self).__init__(from_logits=True,name='waighted_categorical_crossentropy')
self.c_w = class_w
def __call__(self, y_true, y_pred, sample_weight=None):
weights = y_true * self.c_w
weights = tf.reduce_sum(weights, -1)
return super(WaitedCategoricalCrossentropy, self).__call__(y_true, y_pred, sample_weight)*weights
def get_class_normalization_dic(dataset=cfg.dataset_name):
cw_d = None
if dataset == 'camvid':
# class_weights = median_frequency_balancing()
# cw_d = {}
# for i,c in enumerate(class_weights):
# cw_d[i] = c
cw_d = {0: 0.0159531051456976, 1: 0.011580246710544183, 2: 0.22857586995014328, 3: 0.009042348126826805, 4: 0.05747495410789924, 5: 0.025342723815993118, 6: 0.16389458162792303, 7: 0.2807956777529651, 8: 0.0931421249518621186, 9: 0.9930486077110527676, 10: 0.85542331331773912, 11: 0.0001}
if dataset == 'coco':
#cw_d = {0: 0.0, 1: 0.05, 2: 0.1, 3: 0.05, 4: 0.05, 5: 0.1, 6: 0.3, 7: 0.1, 8: 0.1, 9: 0.6, 10: 0.05, 11: 0.05, 12: 0.2 }
cw_d = {0: 0.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0, 6: 1.0, 7: 1.0, 8: 1.0, 9: 1.0, 10: 1.0, 11: 1.0, 12: 1.0 }
#cw_d = None
return cw_d
def get_checkpoint_log_dir():
experiment_dir = os.path.join('models', cfg.dataset_name, cfg.model_name)
log_dir = os.path.join(experiment_dir, 'logs')
checkpoint_dir = os.path.join(experiment_dir, 'weights')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
return checkpoint_dir, log_dir
def model_preload(model):
# ------------ Model preload ------------
checkpoint_dir, _ = get_checkpoint_log_dir()
ckpt = tf.train.Checkpoint(completed_epochs=tf.Variable(0,trainable=False,dtype='int32'))
manager = tf.train.CheckpointManager(ckpt, f'{checkpoint_dir}/tf_ckpts', max_to_keep=3)
best_weights = os.path.join(checkpoint_dir, f'{cfg.model_name}_best.hdf5')
#best_weights = os.path.join(checkpoint_dir, f'enet_best.hdf5') #ckpt
#best_weights = 'wc1_preloaded_coco.hdf5'
#best_weights = 'evet_no_wc_preload_from_coco.hdf5'
#best_weights = '/home/ddd/Desktop/univer/project/py_ws/Enet/models/camvid/enet_wc_in_encoder_1/weights/enet_best.hdf5'
#best_weights = '/home/ddd/Desktop/univer/project/py_ws/Enet/models/camvid/enet_no_wc/weights/enet_best.hdf5'
print(f'Tryigg to load model {best_weights}')
if os.path.exists(best_weights):
print(f'Loading model {best_weights}')
model.load_weights(best_weights)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print(f"Restored epoch ckpt from {manager.latest_checkpoint}, value is ",ckpt.completed_epochs.numpy())
else:
print("Initializing from scratch.")
else:
model = transfer_weights(model)
print('Done loading {} model!'.format(cfg.model_name))
# print("TODO Not Trainable !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# for idx in range(10,len(model.layers)):
# model.layers[idx].trainable = False
return model, ckpt
def get_metrics():
new_metrics = [ #tf.keras.metrics.MeanIoU(num_classes=nc),
tf.keras.metrics.Precision(), ]
metrics=['accuracy', 'mean_squared_error']
metrics += new_metrics
return metrics
def get_optimizer():
initial_learning_rate = 5e-4 #0.1
#decay_steps = int(num_epochs_before_decay * num_steps_per_epoch) ~100*100
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=500, #10*2*(50*10),# (steps_in_s*batch)
decay_rate=1e-1, #0.96,
staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule, epsilon=1e-8)
#optimizer = tf.optimizers.Adadelta(learning_rate=5e-5, rho=1e-1, epsilon=1e-08)
#optimizer='adadelta'
return optimizer
def get_loss():
#loss = WaitedCategoricalCrossentropy(class_w=class_weights)
loss = "categorical_crossentropy"
return loss
def get_model(cfg=cfg):
dw = cfg.image_width
dh = cfg.image_height
optimizer = get_optimizer()
loss = get_loss()
metrics = get_metrics()
model, model_name = autoencoder_wc(nc=cfg.num_classes, input_shape=(dw, dh),
loss=loss,
optimizer=optimizer,
metrics=metrics,
wc_in_encoder=cfg.wc_in_encoder,
wc_in_decoder=cfg.wc_in_decoder)
return model
def get_train_val_data(dataset = cfg.dataset_name):
dw = cfg.image_width
dh = cfg.image_height
nc = cfg.num_classes #12 #get_classes_nmber()
class_weights = get_class_normalization_dic()
if dataset == 'coco':
val_ds = create_coco_dataset(dataDir='../../../cocodataset', dataType='val2017', im_w=dw, im_h=dh)
# 118300 semples
train_ds = create_coco_dataset(dataDir='../../../cocodataset', dataType='train2017', im_w=dw, im_h=dh)
elif dataset == 'camvid':
data_dir = "../dataset/train"
train_ds = create_dataset(data_dir,im_w=dw,im_h=dh, num_classes=nc,reshape=None,class_w=class_weights)
data_dir = "../dataset/val"
val_ds = create_dataset(data_dir,im_w=dw,im_h=dh, num_classes=nc,reshape=None,class_w=class_weights)
if cfg.concat_ds > 0:
train_ds = train_ds.concatenate(val_ds)
if cfg.repeat_train_ds is not None:
if cfg.repeat_train_ds == 0:
print("infinit train dataset !!!!!!!!!!!!!!!!!!!!!")
train_ds = train_ds.repeat()
else:
train_ds = train_ds.repeat(cfg.repeat_train_ds)
if cfg.use_test_as_val_ds is not None:
val_ds = get_test_data(dataset = cfg.dataset_name, with_batch_size = False)
train_ds = prepare_for_training(train_ds, batch_size=cfg.batch_size, cache=None, shuffle_buffer_size=2)
val_ds = prepare_for_training(val_ds, batch_size=cfg.val_batch_size, cache=None, shuffle_buffer_size=2)
return train_ds, val_ds
def get_test_data(dataset = cfg.dataset_name, with_batch_size = True):
dw = cfg.image_width
dh = cfg.image_height
nc = cfg.num_classes #12 #get_classes_nmber()
if dataset == 'coco':
val_ds = create_coco_dataset(dataDir='../../../cocodataset', dataType='val2017', im_w=dw, im_h=dh)
test_ds = val_ds
elif dataset == 'camvid':
data_dir = f"{cfg.dataset_dir}/test"
test_ds = create_dataset(data_dir,im_w=dw,im_h=dh, num_classes=nc,reshape=None,data_transform=None)
if with_batch_size :
test_ds = test_ds.batch(cfg.batch_size)
return test_ds
def train( ):
print(f'Preparing to train on {cfg.dataset_name} data...')
autoenc = get_model()
autoenc, ckpt = model_preload(autoenc)
train_ds, val_ds = get_train_val_data(dataset = cfg.dataset_name)
# Class Waight balancing
cw_d = get_class_normalization_dic(dataset=cfg.dataset_name)
checkpoint_dir, log_dir = get_checkpoint_log_dir()
# checkpoint for epoch counter
if cfg.initial_epoch is None:
completed_epochs = ckpt.completed_epochs.numpy()
else :
completed_epochs = cfg.initial_epoch
# ---------- Fit Model - Training-----------------
history = autoenc.fit(
x=train_ds,
epochs=cfg.epochs,
steps_per_epoch=cfg.steps,
class_weight=cw_d,
verbose=1,
callbacks=callbacks(log_dir, checkpoint_dir, cfg.model_name ),
validation_data=val_ds,
validation_steps=cfg.val_steps,
initial_epoch=completed_epochs
)
print('\nhistory dict:', history.history)
return autoenc
def evaluate():
model = get_model()
model, _ = model_preload(model)
#-------- TODO get_dataset -------
#test_ds = create_coco_test_set(im_w=cfg.image_width, im_h=cfg.image_height)
#val_ds = create_coco_dataset(dataType='val2017', im_w=cfg.image_width, im_h=cfg.image_height)
#test_ds = val_ds
#test_ds = model.batch(100)
test_ds = get_test_data()
#--------------------------------
results = model.evaluate(test_ds) #, steps=100)
print(f'\nTEST Evaluete of {cfg.model_name} model:\n [ Loss , Accuracy, Mean Square Error, Precision:\n', results)
i = 0
fid = 0
for ims ,lbls in test_ds:
predictions = model.predict(ims)
for pred,lbl,img in zip(predictions,lbls,ims):
pred = np.argmax(pred,axis=-1)
lable = np.argmax(lbl,axis=-1)
f = calculate_fid(lable, pred)
if np.all(np.isfinite(f)):
i+=1
fid += f
if i > 100: break
print ("Mean FID :", fid/i)
return model
label_to_colours = {0: [128,128,128],
1: [128,0,0],
2: [192,192,128],
3: [128,64,128],
4: [60,40,222],
5: [128,128,0],
6: [192,128,128],
7: [64,64,128],
8: [64,0,128],
9: [64,128,0], #Pedestrian [64,64,0]
10: [0,128,192], # Bicyclist = [0,128,192]
11: [0,0,0],
12: [120,56,70]}
def onehot_2_rgb(lbl):
shape = lbl.shape
img = np.zeros([shape[0], shape[1],3], dtype=np.uint16)
for i in range(shape[0]):
for j in range(shape[1]):
img[i][j] =label_to_colours[ lbl[i][j] ]
return img
def visualize_prediction(pred,lbl,img):
pred = tf.math.argmax(pred,axis=-1).numpy()
lable = tf.math.argmax(lbl,axis=-1).numpy()
#lable = np.reshape(lable, [256,256,3])
fid = calculate_fid(lable, pred)
print('FID : %.3f' % fid)
pred = onehot_2_rgb(pred )
lable = onehot_2_rgb(lable )
fig, ((ax1,ax2,ax3)) = plt.subplots(1,3)
for ax in [ax1,ax2,ax3]: ax.axis('off')
#plt.figure(figsize=(10,10))
ax1.imshow(pred) #,cmap='winter')
ax2.imshow(lable)
ax3.imshow(img)
plt.show()
from src.utils import calculate_fid
def predict():
model = get_model()
model, _ = model_preload(model)
model.save(f'{cfg.model_name}')
#test_ds = create_coco_test_set(im_w=cfg.image_width, im_h=cfg.image_height)
#val_ds = create_coco_dataset(dataType='val2017', im_w=cfg.image_width, im_h=cfg.image_height)
#_, val_ds = get_train_val_data()
#test_ds = val_ds
test_ds = get_test_data()
#test_ds = test_ds.batch(10)
for ims ,lbls in test_ds.take(1):
predictions = model.predict(ims)
for pred,lbl,img in zip(predictions,lbls,ims):
visualize_prediction(pred,lbl,img)
tf.keras.utils.plot_model(model, f'{cfg.model_name}_model_with_shape_info.png', show_shapes=True)
return model
if __name__ == '__main__':
# cfg.train_flow = 0
# cfg.predict_flow = 0
#
if cfg.train_flow > 0:
print(f'Training ENet model: {cfg.model_name}, with datatset: {cfg.dataset_name}')
trained_model = train()
if cfg.predict_flow > 0:
prediction_model = predict()
if cfg.eval_flow > 0:
eval_model = evaluate()
if 0>0:
cfg.image_width = 256
cfg.image_height = 256
cfg.wc_in_encoder = 0
cfg.wc_in_decoder = 1
model = get_model()
print("model.summary()")
model.summary()
print(' ---------- len(model.trainable_variables) ------ ')
print('trainable_variables #:',len(model.trainable_variables))
print('layers #:',len(model.layers))
tf.keras.utils.plot_model(model, f'{cfg.model_name}_model_with_shape_info.png', show_shapes=True)
#model preload
if 0>0:
cfg.image_width = 256
cfg.image_height = 256
cfg.wc_in_encoder = 0
cfg.wc_in_decoder = 0
# first 7 layers : inp -> conv2d->maxpull->concat->[0:3] init -> conv2d->norm->prelu
enet_model = get_model()
best_weights = 'models/coco/enet_no_wc_256x256/weights/enet_no_wc_256x256_best.hdf5'
enet_model.load_weights(best_weights)
cfg.wc_in_encoder = 1
wc_model = get_model()
offset = len(wc_model.layers) - len(enet_model.layers)
for idx in range(7,len(enet_model.layers)):
wc_model.layers[idx+offset].set_weights(enet_model.layers[idx].get_weights())
wc_model.layers[idx+offset].trainable = False
|
# -*- coding: utf-8 -*-
#
from __future__ import division
from mpmath import mp
import numpy
import sympy
from .helpers import cartesian_to_spherical_sympy
from ..helpers import untangle, pm_array0, fsd, pm_array, pm
class McLaren(object):
"""
A.D. McLaren,
Optimal Numerical Integration on a Sphere,
Mathematics of Computation, Vol. 17, No. 84. (Oct., 1963), pp. 361-383,
<https://doi.org/10.1090/S0025-5718-1963-0159418-2>.
"""
def __init__(self, index, symbolic=False):
frac = sympy.Rational if symbolic else lambda x, y: x / y
sqrt = sympy.sqrt if symbolic else numpy.sqrt
roots = mp.polyroots if symbolic else numpy.roots
if index == 1:
self.degree = 3
data = [(frac(1, 12), fsd(3, (sqrt(frac(1, 2)), 2)))]
elif index == 2:
self.degree = 5
# Stroud doesn't mention u=1, but it's implied. (After all, this is
# integration on a sphere.)
u = 1
r = frac(1, 2)
s, t = [(sqrt(5) + pm_) / 4 for pm_ in [+1, -1]]
data = [
(frac(1, 30), fsd(3, (u, 1))),
(frac(1, 30), pm_array([r, s, t])),
(frac(1, 30), pm_array([t, r, s])),
(frac(1, 30), pm_array([s, t, r])),
]
elif index == 3:
self.degree = 7
# the positive roots of
# z^6 - z^4 + 0.2*z^2 - 1/105 = 0,
# i.e., the square roots of the roots of
# z^3 - z^2 + 0.2*z^1 - 1/105 = 0,
r2, s2, t2 = roots([1, -1, frac(1, 5), -frac(1, 105)])
r = sqrt(r2)
s = sqrt(s2)
t = sqrt(t2)
u = numpy.array([+r, -r, +s, -s, +t, -t])
v = numpy.array([+s, +t, +t, +r, +r, +s])
w = numpy.array([+t, +s, +r, +t, +s, +r])
data = [
(frac(1, 24), numpy.column_stack([+u, +v, +w])),
(frac(1, 24), numpy.column_stack([+u, -v, -w])),
(frac(1, 24), numpy.column_stack([+u, +w, -v])),
(frac(1, 24), numpy.column_stack([+u, -w, +v])),
]
elif index == 4:
self.degree = 8
# the positive roots of
# z^6 - z^4 + 5/21 * z^2 - 5/441 = 0,
# i.e., the square roots of the roots of
# z^3 - z^2 + 5/21 * z^1 - 5/441 = 0,
r2, s2, t2 = roots([1, -1, frac(5, 21), -frac(5, 441)])
r = sqrt(r2)
s = sqrt(s2)
t = sqrt(t2)
u = numpy.array([+r, -r, +s, -s, +t, -t])
v = numpy.array([+s, +t, +t, +r, +r, +s])
w = numpy.array([+t, +s, +r, +t, +s, +r])
data = [
(frac(16, 600), fsd(3, (1, 1))),
(frac(21, 600), numpy.column_stack([+u, +v, +w])),
(frac(21, 600), numpy.column_stack([+u, -v, -w])),
(frac(21, 600), numpy.column_stack([+u, +w, -v])),
(frac(21, 600), numpy.column_stack([+u, -w, +v])),
]
elif index == 5:
self.degree = 9
r, s = [sqrt((5 + pm_ * sqrt(5)) / 10) for pm_ in [+1, -1]]
u, v = [sqrt((3 - pm_ * sqrt(5)) / 6) for pm_ in [+1, -1]]
t = sqrt(frac(1, 3))
B1 = frac(25, 840)
B2 = frac(27, 840)
data = [
(B1, pm_array0(3, [r, s], [0, 1])),
(B1, pm_array0(3, [r, s], [1, 2])),
(B1, pm_array0(3, [r, s], [2, 0])),
#
(B2, pm_array0(3, [u, v], [0, 1])),
(B2, pm_array0(3, [u, v], [1, 2])),
(B2, pm_array0(3, [u, v], [2, 0])),
#
(B2, pm(3, t)),
]
elif index == 6:
self.degree = 9
r, s = [sqrt((5 + pm_ * sqrt(5)) / 10) for pm_ in [+1, -1]]
t = 1
u = frac(1, 2)
v, w = [(sqrt(5) + pm_) / 4 for pm_ in [+1, -1]]
B = frac(25, 1260)
C = frac(32, 1260)
data = [
# ERR Stroud is missing +- at the first r.
(B, pm_array0(3, [r, s], [0, 1])),
(B, pm_array0(3, [r, s], [1, 2])),
(B, pm_array0(3, [r, s], [2, 0])),
#
(C, fsd(3, (t, 1))),
#
(C, pm_array([u, v, w])),
(C, pm_array([w, u, v])),
(C, pm_array([v, w, u])),
]
elif index == 7:
self.degree = 9
r, s = [sqrt((3 - pm_ * sqrt(5)) / 6) for pm_ in [+1, -1]]
t = sqrt(frac(1, 3))
# ERR Stroud falsely gives sqrt(0.5)
u = frac(1, 2)
v, w = [(sqrt(5) + pm_) / 4 for pm_ in [+1, -1]]
B = -frac(9, 140)
C = frac(16, 210)
data = [
(B, pm_array0(3, [r, s], [0, 1])),
(B, pm_array0(3, [r, s], [1, 2])),
(B, pm_array0(3, [r, s], [2, 0])),
#
(B, pm(3, t)),
#
(C, fsd(3, (1, 1))),
#
(C, pm_array([u, v, w])),
(C, pm_array([w, u, v])),
(C, pm_array([v, w, u])),
]
elif index == 8:
self.degree = 11
r = 1
s = sqrt(frac(1, 2))
t = sqrt(frac(1, 3))
u = sqrt(frac(1, 11))
v = sqrt(frac(9, 11))
B1 = frac(9216, 725760)
B2 = frac(16384, 725760)
B3 = frac(15309, 725760)
B4 = frac(14641, 725760)
data = [
(B1, fsd(3, (r, 1))),
(B2, fsd(3, (s, 2))),
(B3, pm(3, t)),
(B4, fsd(3, (u, 2), (v, 1))),
]
elif index == 9:
self.degree = 11
sqrt5 = sqrt(5)
p, q = [sqrt((5 + pm_ * sqrt5) / 10) for pm_ in [+1, -1]]
r, s = [sqrt((3 - pm_ * sqrt5) / 6) for pm_ in [+1, -1]]
t = sqrt(frac(1, 3))
u = frac(1, 2)
v, w = [(sqrt(5) + pm_) / 4 for pm_ in [+1, -1]]
B = frac(625, 27720)
C = frac(243, 27720)
D = frac(512, 27720)
data = [
(B, pm_array0(3, [p, q], [0, 1])),
(B, pm_array0(3, [p, q], [1, 2])),
(B, pm_array0(3, [p, q], [2, 0])),
#
(C, pm_array0(3, [r, s], [0, 1])),
(C, pm_array0(3, [r, s], [1, 2])),
(C, pm_array0(3, [r, s], [2, 0])),
#
(C, pm(3, t)),
#
(D, fsd(3, (1, 1))),
#
(D, pm_array([u, v, w])),
(D, pm_array([w, u, v])),
(D, pm_array([v, w, u])),
]
else:
assert index == 10
self.degree = 14
r, s = [sqrt((5 - pm_ * sqrt(5)) / 10) for pm_ in [+1, -1]]
B = frac(125, 10080)
C = frac(143, 10080)
# The roots of
#
# 2556125 y^6 - 5112250 y^5 + 3578575 y^4 - 1043900 y^3
# + 115115 y^2 - 3562 y + 9 =0
#
# in decreasing order.
y = [
0.8318603575087328951583062165711519728388,
0.5607526046766541293084396308069013490725,
0.4118893592345073860321480490176804941547,
0.1479981814629634692260834719469411619893,
0.04473134613410273910111648293922113227845,
0.002768150983039381173906148718103889666260,
]
z = numpy.sqrt(y)
u = (
numpy.array(
[z[3] - z[2], z[1] - z[4], z[5] - z[1], z[2] - z[5], z[4] - z[3]]
)
/ 2
/ s
)
v = (
numpy.array(
[z[4] + z[5], z[5] + z[3], z[2] + z[4], z[3] + z[1], z[1] + z[2]]
)
/ 2
/ s
)
w = (
numpy.array(
[z[0] + z[1], z[0] + z[2], z[0] + z[3], z[0] + z[4], z[0] + z[5]]
)
/ 2
/ s
)
data = [
(B, pm_array0(3, [r, s], [0, 1])),
(B, pm_array0(3, [r, s], [1, 2])),
(B, pm_array0(3, [r, s], [2, 0])),
#
(C, numpy.column_stack([+u, +v, +w])),
(C, numpy.column_stack([+u, -v, -w])),
(C, numpy.column_stack([-u, -v, +w])),
(C, numpy.column_stack([-u, +v, -w])),
#
(C, numpy.column_stack([+v, +w, +u])),
(C, numpy.column_stack([+v, -w, -u])),
(C, numpy.column_stack([-v, -w, +u])),
(C, numpy.column_stack([-v, +w, -u])),
#
(C, numpy.column_stack([+w, +u, +v])),
(C, numpy.column_stack([+w, -u, -v])),
(C, numpy.column_stack([-w, -u, +v])),
(C, numpy.column_stack([-w, +u, -v])),
]
self.points, self.weights = untangle(data)
self.azimuthal_polar = cartesian_to_spherical_sympy(self.points)
return
|
import os
import threading
import queue
from utils.grid_tools_2d import Point, Vector
from utils.intcode_computer import IntCodeComputer, get_program
def get_input(filepath):
# Load the comma separated intcode program from a file into memory (a list).
with open(filepath) as f:
data = [line.strip() for line in f]
return
BLACK = 0
WHITE = 1
class PaintRobot:
debug = False
READ_TIMEOUT = 10
WRITE_TIMEOUT = 10
def __init__(self, program, grid, starting_position, starting_facing):
"""
:param List[int] -> program:
:param List[List[int]] -> grid:
:param Point -> starting_position:
:param Vector -> starting_facing:
"""
self.grid = grid
self.position = starting_position
self.direction = starting_facing
self.optical_scanner = queue.Queue(1)
self.move_instructions = queue.Queue(2)
self.brain = IntCodeComputer(
program, input_queue=self.optical_scanner, output_queue=self.move_instructions, name="PainterBrain"
)
self.brain.debug = self.debug
self.running = False
# Track panels visited for the Part 1
self.visited_panels = {self.position}
def scan_panel(self):
color = self.grid[self.position.y][self.position.x]
if color == BLACK:
# Send 0
self.optical_scanner.put(0, timeout=self.WRITE_TIMEOUT)
if self.debug:
print("Painter scanner put BLACK", self.optical_scanner.qsize())
elif color == WHITE:
# Send 1
self.optical_scanner.put(1, timeout=self.WRITE_TIMEOUT)
if self.debug:
print("Painter scanner put WHITE", self.optical_scanner.qsize())
else:
raise RuntimeError("Unknown Panel Color '{}'".format(color))
def get_next_instruction(self):
if self.debug:
print("Getting instructions from brain...")
color = self.move_instructions.get(timeout=self.READ_TIMEOUT)
if self.debug:
print("Painter got color code", color)
direction = self.move_instructions.get(timeout=self.READ_TIMEOUT)
if self.debug:
print("Painter got direction code", direction)
return color, direction
def set_direction(self, direction_code):
if direction_code == 0:
# Turn left 90 degrees
self.direction.rotate(-90)
self.direction = self.direction.nearest_integer()
if self.debug:
print("Turned left 90. New direction: {}".format(self.direction))
elif direction_code == 1:
# Turn right 90 degrees
self.direction.rotate(90)
self.direction = self.direction.nearest_integer()
if self.debug:
print("Turned right 90. New direction: {}".format(self.direction))
else:
raise RuntimeError("Unknown Direction Code '{}'".format(direction_code))
def paint_panel(self, color_code):
if color_code == 0:
# Paint it black
self.grid[self.position.y][self.position.x] = BLACK
if self.debug:
print("Painted Panel at {} BLACK".format(self.position))
elif color_code == 1:
# Paint it white
self.grid[self.position.y][self.position.x] = WHITE
if self.debug:
print("Painted Panel at {} WHITE".format(self.position))
else:
raise RuntimeError("Unknown Paint Code '{}'".format(color_code))
def move(self):
self.position += self.direction
if self.debug:
print("Moved to Panel at {}".format(self.position))
self.visited_panels.add(self.position)
def stop(self):
self.running = False
def run(self):
brain_thread = threading.Thread(target=lambda: self.brain.run(memory_allocation_size=10000))
brain_thread.start()
self.running = True
while self.running:
if self.debug:
print('Tick')
if not self.brain.running:
self.stop()
continue
try:
self.scan_panel()
color, direction = self.get_next_instruction()
self.paint_panel(color)
self.set_direction(direction)
self.move()
except BaseException as e:
print("Exception:", e)
self.stop()
# Terminate the brain
if brain_thread.is_alive():
self.brain.running = False
brain_thread.join()
print(len(self.visited_panels))
def tests():
pass
print("Tests Done")
if __name__ == "__main__":
tests()
input_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "Input")
program = get_program(input_file)
panel_grid = [
list([BLACK]) * 80 for _ in range(30)
]
starting_position = Point(10, 10)
panel_grid[starting_position.y][starting_position.x] = WHITE
painter = PaintRobot(program, panel_grid, starting_position, Vector(0, -1))
painter.run()
for row in panel_grid:
output = ""
for panel in row:
if panel == BLACK:
output += "*"
elif panel == WHITE:
output += " "
print(output)
|
from core.advbase import *
class Pecorine(Adv):
def prerun(self):
self.gourmand_gauge = 0
self.gourmand_mode = ModeManager(group="gourmand", fs=True, s1=True, duration=20, pause=("s", "dragon"))
def a1_update(self, gauge):
if not self.gourmand_mode.get():
self.gourmand_gauge += gauge
if self.gourmand_gauge == 100:
self.gourmand_mode.on()
self.gourmand_gauge = 0
def hitattr_make(self, name, base, group, aseq, attr, onhit=None, dtype=None):
self.a1_update(attr.get("cp", 0))
super().hitattr_make(name, base, group, aseq, attr, onhit=onhit, dtype=dtype)
variants = {None: Pecorine}
|
# coding:utf-8
from django.shortcuts import render, render_to_response, HttpResponse
from api.function import *
from api.models import *
defaultCount = 40
# Create your views here.
# API文档
def doc(request):
return render_to_response("doc/doc.html", {})
# 登录处理 POST方式, 参数 uid, pwd, pwd须进行MD5加密
def login(request):
if request.method == "POST":
try:
uid = request.POST["uid"]
pwd = request.POST["pwd"]
except:
return error("请求参数不正确")
user = queryUser(uid, pwd)
if user == None:
return error("用户名或密码错误")
context = dict()
context["uid"] = user.uid;
context["name"] = user.name
context["photo"] = user.photo;
context["access_token"] = user.access_token
return render_to_response("login.json", context, content_type = 'application/json')
else:
return error("请求方式不正确,应使用POST")
# 注册处理 POST方式, 参数 uid, pwd, pwd须进行MD5加密
def register(request):
if request.method == "POST":
try:
uid = request.POST["uid"]
pwd = request.POST["pwd"]
if len(pwd) < 6 or len(pwd) > 64:
return error("密码长度不符合要求")
if len(uid) < 6 or len(uid) > 16:
return error("用户名长度不符合要求")
except:
return error("请求参数不正确")
user = registerUser(uid, pwd)
if user == None:
return error("注册失败, 用户名已被注册")
elif user == -1:
return error("注册失败, 请稍后再试")
return render_to_response("register.json", {}, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 发送一条消息 POST方式,
# 参数 text(
# 聊天内容,文字消息为:消息内容; gif表情消息为:gif表情对应的图片名
# 称 名称;语音,图片消息为:资源的url
# )
# type(消息类型) (0, "文本消息"),(1, "gif表情消息"),(2, "图片消息"),(3, "语音消息")
# access_token
# to_user(接收者uid)
def chat_upload(request):
if request.method == "POST":
try:
# 还须细化 为语音和图片消息时 并未对参数的进行严格的判定(url)
text = request.POST["text"]
access_token = request.POST["access_token"]
to_user = request.POST["to_user"]
type = request.POST["type"]
type = (int)(type)
if type < 0 or type > 3:
type = 0
except:
return error("请求参数不正确")
context = insertMessage(text, type, access_token, to_user)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("目的用户不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("chat/upload.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获得新的message POST方式,
# 参数 access_token, since_id, count
# 通过access_token 获得 消息id大于since_id的数据, 并且不多于 count 条
def chat_newmessages(request):
if request.method == "POST":
try:
since_id = request.POST["since_id"]
access_token = request.POST["access_token"]
count = defaultCount
if request.POST.has_key("count"):
count = (int)(request.POST["count"])
if count <= 0:
count = 1
elif count > defaultCount:
count = defaultCount
except:
return error("请求参数不正确")
context = queryNewMessages(since_id, access_token, count)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("chat/messages.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获得旧的message POST方式,
# 参数 access_token, max_id, count
# 通过access_token 获得 消息id < max_id的数据, 并且不多于 count 条
def chat_oldmessages(request):
if request.method == "POST":
try:
max_id = request.POST["max_id"]
access_token = request.POST["access_token"]
count = defaultCount
if request.POST.has_key("count"):
count = (int)(request.POST["count"])
if count <= 0:
count = 1
elif count > defaultCount:
count = defaultCount
except:
return error("请求参数不正确")
context = queryOldMessages(max_id, access_token, count)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("chat/messages.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 发送一条状态 POST方式,
# 参数 text(状态内容)
# pics(图片地址, 数组)
# access_token(发送者)
def status_upload(request):
if request.method == "POST":
try:
text = request.POST["text"]
access_token = request.POST["access_token"]
pics = list()
# 还须细化 并未对参数的进行严格的判定(url)
if request.POST.has_key("pics[]"):
pics = request.POST.getlist('pics[]')
print pics
if len(pics) > 9:
return error("图片数量不能多于9张")
except:
return error("请求参数不正确")
context = insertStatus(text,access_token, pics)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("status/upload.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获得新的status POST方式,
# 参数 access_token, since_id, count
# 通过access_token 获得 status id大于since_id的数据, 并且不多于 count 条
def status_newstatuses(request):
if request.method == "POST":
try:
since_id = request.POST["since_id"]
access_token = request.POST["access_token"]
count = defaultCount
if request.POST.has_key("count"):
count = (int)(request.POST["count"])
if count <= 0:
count = 1
elif count > defaultCount:
count = defaultCount
except:
return error("请求参数不正确")
context = queryNewStatuses(since_id, access_token, count)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("status/statuses.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获得旧的status POST方式,
# 参数 access_token, max_id, count
# 通过access_token 获得 status id < max_id的数据, 并且不多于 count 条
def status_oldstatuses(request):
if request.method == "POST":
try:
max_id = request.POST["max_id"]
access_token = request.POST["access_token"]
count = defaultCount
if request.POST.has_key("count"):
count = (int)(request.POST["count"])
if count <= 0:
count = 1
elif count > defaultCount:
count = defaultCount
except:
return error("请求参数不正确")
context = queryOldStatuses(max_id, access_token, count)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("status/statuses.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 发送一条评论 POST方式,
# 参数 text(评论内容)
# access_token
# s_id(status id)
def comment_upload(request):
if request.method == "POST":
try:
text = request.POST["text"]
access_token = request.POST["access_token"]
s_id = request.POST["s_id"]
except:
return error("请求参数不正确")
context = insertComment(text, access_token, s_id)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("该状态不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("comment/upload.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获取一条状态的所有评论 POST方式,
# s_id(status id)
def comment_comments(request):
if request.method == "POST":
try:
s_id = request.POST["s_id"]
except:
return error("请求参数不正确")
context = queryComments(s_id)
if context == -2:
return error("该状态不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("comment/comments.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 请求添加好友 POST方式,
# to_user(接收者uid)
# text(请求说明)
# access_token
def friend_addfriend(request):
if request.method == "POST":
try:
text = request.POST["text"]
access_token = request.POST["access_token"]
to_user = request.POST["to_user"]
except:
return error("请求参数不正确")
context = addFriend(text, access_token, to_user)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("添加的用户不存在")
elif context == -3:
return error("不能添加自己为好友")
elif context == -4:
return error("对方已经是你好友了")
elif context == -5:
return error("请求已发出无需重复请求")
elif context == -6:
return error("对方已对你发出好友请求,同意其请求即可")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/addfriend.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 处理添加好友请求 POST方式,
# f_id(好友请求消息id)
# access_token
# result (处理结果)(1, "拒绝"),(2, "同意")
def friend_dowithrequest(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
f_id = request.POST["f_id"]
result = request.POST["result"]
result = (int)(result)
if result != 1 and result != 2:
result = 1
except:
return error("请求参数不正确")
context = dowithAddFriend(f_id, access_token, result)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("该请求不存在")
elif context == -3:
return error("不能添加自己为好友")
elif context == -4:
return error("对方已经是你好友了")
elif context == -5:
return error("请求已发出无需重复请求")
elif context == -6:
return error("对方已对你发出好友请求,同意其请求即可")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/addfriend.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 删除一个好友
def friend_deletefriend(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
to_user = request.POST["to_user"]
except:
return error("请求参数不正确")
context = deleteFriend(to_user, access_token)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("欲删除的用户不存在")
elif context == -3:
return error("不能删除自己")
elif context == -4:
return error("对方还不是你好友")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/addfriend.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获取所有未处理的好友请求
def friend_newfriends(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
except:
return error("请求参数不正确")
context = newFriends(access_token)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/newfriends.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获取好友列表
def friend_friendlist(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
except:
return error("请求参数不正确")
context = queryFriendList(access_token)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/friendlist.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 搜索陌生人
def friend_search(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
key = request.POST["key"]
page = request.POST["page"]
page = (int)(page)
except:
return error("请求参数不正确")
context = querySearch(access_token, key, page)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/users.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获取用户信息, uid
def user_userinfo(request):
if request.method == "POST":
try:
uid = request.POST["uid"]
access_token = request.POST["access_token"]
except:
return error("请求参数不正确")
context = queryUserInfo(uid, access_token)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("用户不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("user/userinfo.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 更新用户信息, access_token
def user_updateuserinfo(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
name = request.POST["name"]
age = request.POST["age"]
sex = request.POST["sex"]
birthday = request.POST["birthday"]
city = request.POST["city"]
if len(city) <= 0 or len(name) <= 0 or len(age) <= 0 or len(sex) <= 0 or len(birthday) <= 0:
return error("请求参数不正确")
except:
return error("请求参数不正确")
context = updateUserInfo(access_token, name, age, sex, birthday, city)
if context == -1:
return error("用户不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("user/userinfo.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 更新用户密码, access_token, pwd oldpwd
def user_updateuserpwd(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
pwd = request.POST["pwd"]
oldpwd = request.POST["oldpwd"]
if len(pwd) < 6 or len(pwd) > 64:
return error("密码长度不能小于6")
except:
return error("请求参数不正确")
context = updateUserPwd(access_token, pwd, oldpwd)
if context == -1:
return error("用户不存在")
elif context == -2:
return error("旧密码不符,修改失败")
elif context == None:
return error("服务器发生错误")
return render_to_response("user/userinfo.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 更新用户头像, access_token, photo
def user_updateuserphoto(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
photo = request.POST["photo"]
except:
return error("请求参数不正确")
context = updateUserPhoto(access_token, photo)
if context == -1:
return error("用户不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("user/userinfo.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
def qiniu_token(request):
if request.method == "POST":
try:
key = request.POST["fileName"]
except:
return error("请求参数不正确")
context = getQiniu_token(key)
if context == None:
return error("服务器发生错误")
return render_to_response("qiniu/token.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
def error(message):
return render_to_response("error.json", {"message" : message}, content_type = 'application/json')
|
"""
Obtain the statistics of Du et al. dataset
"""
import json
from pathlib import Path
from transformers import BertTokenizerFast
from pprint import pprint as print
from utils.logging import logging
from tqdm import tqdm
class Configs:
txt_data_dir = "../txt_data/preprocessed/"
txt_data_file_fmt = "para_73k_{split}.json"
splits = [
"train", "dev", "test"
]
max_seq_length = 384
if __name__ == "__main__":
# configs
cfgs = Configs
# logging
logger = logging.getLogger()
# check input dir
txt_data_dir = Path(cfgs.txt_data_dir)
assert txt_data_dir.is_dir()
# tokenizer
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
for s in cfgs.splits:
input_json = txt_data_dir / cfgs.txt_data_file_fmt.format(split=s)
assert input_json.is_file()
with open(input_json, "r", encoding="utf-8") as f:
examples = json.load(f)
logger.info(f"Num of examples in {s}: {len(examples)}")
context_set = set()
num_context_toks = 0
num_question_toks = 0
for e in tqdm(examples, desc=s):
# Context
context = e["context"]
if context not in context_set:
context_toks = tokenizer(context, add_special_tokens=False)["input_ids"]
num_context_toks += len(context_toks)
context_set.add(context)
# Questions
question = e["question"]
question_toks = tokenizer(question, add_special_tokens=False)["input_ids"]
num_question_toks += len(question_toks)
avg_num_context_toks = num_context_toks / len(context_set)
avg_num_question_toks = num_question_toks / len(examples)
logger.info(f"Average number of context tokens in {s}: {avg_num_context_toks:.2f}")
logger.info(f"Average number of question tokens in {s}: {avg_num_question_toks:.2f}")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'__slots__绑定限制的练习'
__author__ = 'Jacklee'
# 导入模块
#import types
# 绑定限制
class Student(object):
__slots__ = ('name', 'age')
s = Student()
#s.score = 99
Student.score = 99
class A(object):
__slots__ = ()
pass
class B(A):
#__slots__ = ('x', 'y')
pass
class C(B):
__slots__ = ('z')
pass
a = A()
b = B()
c = C()
b.x = 1
|
from arm.logicnode.arm_nodes import *
class OnContactNode(ArmLogicTreeNode):
"""Activates the output when the rigid body make contact with
another rigid body.
@option Begin: the output is activated on the first frame when the
two objects have contact
@option End: the output is activated on the frame after the last
frame when the two objects had contact
@option Overlap: the output is activated on each frame the object
have contact
"""
bl_idname = 'LNOnContactNode'
bl_label = 'On Contact'
arm_version = 1
property0: EnumProperty(
items = [('Begin', 'Begin', 'Begin'),
('End', 'End', 'End'),
('Overlap', 'Overlap', 'Overlap')],
name='', default='Begin')
def init(self, context):
super(OnContactNode, self).init(context)
self.add_input('ArmNodeSocketObject', 'Rigid Body 1')
self.add_input('ArmNodeSocketObject', 'Rigid Body 2')
self.add_output('ArmNodeSocketAction', 'Out')
def draw_buttons(self, context, layout):
layout.prop(self, 'property0')
add_node(OnContactNode, category=PKG_AS_CATEGORY, section='contact')
|
"""
File watch commercial remover
"""
import os
import subprocess
import logging
import shutil
import sys
import time
from threading import Thread
from queue import Queue
WORK_ROOT = "/config/"
_LOGGER = logging.getLogger(__name__)
logging.basicConfig(filename=WORK_ROOT+'watcher.log', filemode='a', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
IN_PROCESS = set()
class CommercialWorker(Thread):
"""Commercial process queue"""
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get paths
pid_path, file_path = self.queue.get()
try:
find_commercials(pid_path, file_path)
finally:
self.queue.task_done()
def find_commercials(pid_path, file_path):
"""Call comchap to find commercials"""
# file_path = file_path.rstrip()
# print(pid_path)
# print(file_path)
# return
# Check to make sure file exists first
if os.path.isfile(file_path):
_LOGGER.info("Processing: " + file_path)
name = os.path.splitext(os.path.basename(file_path))[0]
path = os.path.dirname(file_path)
# Make backup of original in case something goes wrong and
# store it's size for comparison later
backup = os.path.join(path, name + ".mkv.bak")
shutil.copy(file_path, backup)
backup_size = os.path.getsize(backup)
_LOGGER.info("Backup Created (%s): %s", backup_size, backup)
# Start commercial processing
cmd = ['/opt/comchap/comchap',
'--keep-edl',
'--cuvid',
'--comskip=/opt/Comskip/comskip',
'--comskip-ini=/opt/Comskip/comskip.ini',
file_path]
try:
result = subprocess.run(cmd, stdout=subprocess.DEVNULL, timeout=5400)
_LOGGER.debug("Subprocess finished (code: %s) for: %s", result.returncode, file_path)
except subprocess.TimeoutExpired as err:
# Timeout expired before we had a result
_LOGGER.debug("1:30hr timeout expired for: %s, code: %s", file_path, result.returncode)
# If we end up here we need to make sure the backup is restored
shutil.move(backup, file_path)
# Remove working indicator
os.remove(pid_path)
IN_PROCESS.remove(file_path)
if result.returncode == 0:
_LOGGER.info("Commercial chapters inserted into: " + file_path)
# Explicitly set new file permissions
shutil.chown(file_path, 99, 100)
os.chmod(file_path, 0o644)
# Make sure new file exists and is in the size ballpark
if os.path.isfile(file_path):
new_size = os.path.getsize(file_path)
if new_size > (backup_size*.9):
# New is at least 90% of backup, we can move on
# Remove path from process set and delete file
try:
os.remove(pid_path)
IN_PROCESS.remove(file_path)
os.remove(backup)
except OSError as err:
_LOGGER.error("File removal error: " + err)
else:
_LOGGER.error("New file size incorrect (B: %s, N: %s) Restoring Backup.", backup_size, new_size)
# New file size isn't what we expect, restore the backup
shutil.move(backup, file_path)
# Remove working indicators
os.remove(pid_path)
IN_PROCESS.remove(file_path) # Only removing this would allow a retry
else:
_LOGGER.error("New file doesn't exist, restoring backup.")
shutil.move(backup, file_path)
# Remove working indicator from set to try again
IN_PROCESS.remove(file_path)
else:
if result.stderr:
# Something went wrong in commercial processing
_LOGGER.error("Comchap error: %s", result.stderr)
else:
_LOGGER.error("Unknown Comchap error (%s) for file: %s, Restoring backup.", result.returncode, file_path)
# If we end up here we need to make sure the backup is restored
shutil.move(backup, file_path)
# Remove working indicator
os.remove(pid_path)
IN_PROCESS.remove(file_path)
else:
# File doesn't exist, we can't do anything
_LOGGER.info("%s does not exist, nothing to do...", file_path)
# Remove working indicator
os.remove(pid_path)
IN_PROCESS.remove(file_path)
def main():
"""Main function."""
watch_path = os.fsencode(sys.argv[1])
queue = Queue()
for xwork in range(5):
worker = CommercialWorker(queue)
worker.daemon = True
worker.start()
queue.join()
_LOGGER.info("Starting Loop...")
while True:
# Check folder for new file tasks
for item in os.scandir(watch_path):
if item.is_file():
pid = item.path.decode('utf-8')
if pid.endswith(".comm"):
# New comm task to process
with open(pid) as fop:
fpath = fop.readline().rstrip()
if fpath not in IN_PROCESS:
IN_PROCESS.add(fpath)
queue.put((pid, fpath))
# Check every 5s to limit I/O
time.sleep(5)
if __name__ == '__main__':
main()
|
import taichi as ti
def archs_support_bitmasked(func):
return ti.archs_excluding(ti.opengl, ti.cc)(func)
@archs_support_bitmasked
def test_basic():
x = ti.field(ti.i32)
c = ti.field(ti.i32)
s = ti.field(ti.i32)
bm = ti.root.bitmasked(ti.ij, (3, 6)).bitmasked(ti.i, 5)
bm.place(x)
ti.root.place(c, s)
@ti.kernel
def run():
x[5, 1] = 2
x[9, 4] = 20
x[0, 3] = 20
@ti.kernel
def sum():
for i, j in x:
c[None] += ti.is_active(bm, [i, j])
s[None] += x[i, j]
run()
sum()
assert c[None] == 3
assert s[None] == 42
@archs_support_bitmasked
def test_bitmasked_then_dense():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 128
ti.root.bitmasked(ti.i, n).dense(ti.i, n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in x:
s[None] += 1
x[0] = 1
x[127] = 1
x[256] = 1
x[257] = 1
func()
assert s[None] == 256
@archs_support_bitmasked
def test_bitmasked_bitmasked():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 128
ti.root.bitmasked(ti.i, n).bitmasked(ti.i, n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in x:
s[None] += 1
x[0] = 1
x[127] = 1
x[256] = 1
x[257] = 1
func()
assert s[None] == 4
@archs_support_bitmasked
def test_huge_bitmasked():
# Mainly for testing Metal listgen's grid-stride loop implementation.
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 1024
ti.root.bitmasked(ti.i, n).bitmasked(ti.i, 2 * n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in range(n * n * 2):
if i % 32 == 0:
x[i] = 1.0
@ti.kernel
def count():
for i in x:
s[None] += 1
func()
count()
assert s[None] == (n * n * 2) // 32
@archs_support_bitmasked
def test_bitmasked_listgen_bounded():
# Mainly for testing Metal's listgen is bounded by the actual number of
# elements possible for that SNode. Note that 1) SNode's size is padded
# to POT, and 2) Metal ListManager's data size is not padded, we need to
# make sure listgen doesn't go beyond ListManager's capacity.
x = ti.field(ti.i32)
c = ti.field(ti.i32)
# A prime that is bit higher than 65536, which is Metal's maximum number of
# threads for listgen.
n = 80173
ti.root.dense(ti.i, n).bitmasked(ti.i, 1).place(x)
ti.root.place(c)
@ti.kernel
def func():
for i in range(n):
x[i] = 1
@ti.kernel
def count():
for i in x:
c[None] += 1
func()
count()
assert c[None] == n
@archs_support_bitmasked
def test_deactivate():
# https://github.com/taichi-dev/taichi/issues/778
a = ti.field(ti.i32)
a_a = ti.root.bitmasked(ti.i, 4)
a_b = a_a.dense(ti.i, 4)
a_b.place(a)
c = ti.field(ti.i32)
ti.root.place(c)
@ti.kernel
def run():
a[0] = 123
@ti.kernel
def is_active():
c[None] = ti.is_active(a_a, [0])
@ti.kernel
def deactivate():
ti.deactivate(a_a, [0])
run()
is_active()
assert c[None] == 1
deactivate()
is_active()
assert c[None] == 0
@archs_support_bitmasked
def test_sparsity_changes():
x = ti.field(ti.i32)
c = ti.field(ti.i32)
s = ti.field(ti.i32)
bm = ti.root.bitmasked(ti.i, 5).bitmasked(ti.i, 3)
bm.place(x)
ti.root.place(c, s)
@ti.kernel
def run():
for i in x:
s[None] += x[i]
c[None] += 1
# Only two elements of |x| are activated
x[1] = 2
x[8] = 20
run()
assert c[None] == 2
assert s[None] == 22
c[None] = 0
s[None] = 0
# Four elements are activated now
x[7] = 15
x[14] = 5
run()
assert c[None] == 4
assert s[None] == 42
@archs_support_bitmasked
def test_bitmasked_offset_child():
x = ti.field(ti.i32)
x2 = ti.field(ti.i32)
y = ti.field(ti.i32)
y2 = ti.field(ti.i32)
y3 = ti.field(ti.i32)
z = ti.field(ti.i32)
s = ti.field(ti.i32, shape=())
n = 16
# Offset children:
# * In |bm|'s cell: |bm2| has a non-zero offset
# * In |bm2|'s cell: |z| has a non-zero offset
# * We iterate over |z| to test the listgen handles offsets correctly
bm = ti.root.bitmasked(ti.i, n)
bm.dense(ti.i, 16).place(x, x2)
bm2 = bm.bitmasked(ti.i, 4)
bm2.dense(ti.i, 4).place(y, y2, y3)
bm2.bitmasked(ti.i, 4).place(z)
@ti.kernel
def func():
for _ in z:
s[None] += 1
z[0] = 1
z[7] = 1
z[42] = 1
z[53] = 1
z[88] = 1
z[101] = 1
z[233] = 1
func()
assert s[None] == 7
|
"""skmf.views by Brendan Sweeney, CSS 593, 2015.
Define the views that are rendered by Flask to the end user. This controls what
action to take given a URL path provided by the user. Typically, a path will
result in the display of a Web page which is rendered from a template. Multiple
paths may lead to the same action, but multiple actions cannot directly share a
path. Instead, the action for one path may include a conditional redirect for
another path. Any views that require the user to be authenticated have the
@login_required decorator to automatically redirect unauthenticated users to a
login view. Form validation is handled by WTForms and session management is
handled by Flask-Login, but it is still necessary to properly verify any user
provided data, particularly when it will be passed to an object that interacts
with the SPARQL endpoint.
Functions:
add_conn: Insert one RDF triple through the SPARQL endpoint.
add_tag: Create a new tag to store with the SPARQL endpoint.
add_user: Create a new user to store with the SPARQL endpoint.
load_user: Retrieve a user from the triplestore for login authentication.
login: Authenticate and create a session for a valid user.
logout: Clear the session for a logged in user.
page_not_found: Handle user attempts to access an invalid path.
resources: View and manage resources in the datastore.
show_subject: Display all triples for a single RDF subject.
welcome: Display a basic landing page.
"""
from time import sleep
from flask import render_template, request, redirect, url_for, flash
from flask.ext.bcrypt import Bcrypt
from flask.ext.login import LoginManager, login_required, login_user, \
logout_user, current_user
from skmf import app, forms
from skmf.resource import Query, Subject, User
import skmf.i18n.en_US as uiLabel
bcrypt = Bcrypt(app)
"""Password hash management for secure user sessions and persistence."""
login_manager = LoginManager()
"""User session manager and token handler."""
login_manager.init_app(app)
login_manager.login_view = 'login'
@app.route('/')
@app.route('/index')
def welcome():
"""Display a landing page with pointers on getting started with SKMF."""
return render_template('welcome.html', title=uiLabel.viewWelcomeTitle)
@app.route('/resources', methods=['GET', 'POST'])
def resources():
"""View and manage resources in the datastore.
This function is only meant to provide a basic demonstration of the under-
lying SPARQL functionality. It is barely functional and is a mess of
functionality that needs to be split into multiple views. That said, it
allows a user to: perform queries with one degree of separation from the
target; create resources and connections (properties) in the datastore;
insert triples involving existing resources and connections into the data-
store. Ideally, this functionality will be migrated to a set of views that
are capable of providing the same level of dynamic query building as is
supported by the backend.
Returns:
Rendered page containing forms and query results, if any.
"""
entries = None
print('entering entries')
# Failure to set explicit parameters leads to broken garbage collection
query = Query(labellist = set(), subjectlist = {}, optlist = [])
print('empty query')
rdfs_class = query.get_resources('rdfs:Class')
owl_class = query.get_resources('owl:Class')
owl_obj_prop = query.get_resources('owl:ObjectProperty')
owl_dtype_prop = query.get_resources('owl:DatatypeProperty')
rdf_property = query.get_resources('rdf:Property')
skmf_resource = query.get_resources('skmf:Resource')
print('resources gathered')
query_form = forms.FindEntryForm()
print('empty FindEntryForm')
res_choices = set()
for res in skmf_resource:
if res['resource']['type'] != 'bnode':
res_choices.add((res['resource']['value'],
res['label']['value'] if 'value' in res['label'] else res['resource']['value'].partition('#')[2]))
res_sorted = sorted(list(res_choices), key=lambda x: x[1])
conn_choices = set()
for conn in rdf_property:
if conn['resource']['type'] != 'bnode':
conn_choices.add((conn['resource']['value'],
conn['label']['value'] if 'value' in conn['label'] else conn['resource']['value'].partition('#')[2]))
for conn in owl_obj_prop:
if conn['resource']['type'] != 'bnode':
conn_choices.add((conn['resource']['value'],
conn['label']['value'] if 'value' in conn['label'] else conn['resource']['value'].partition('#')[2]))
for conn in owl_dtype_prop:
if conn['resource']['type'] != 'bnode':
conn_choices.add((conn['resource']['value'],
conn['label']['value'] if 'value' in conn['label'] else conn['resource']['value'].partition('#')[2]))
conn_choices.add(('http://www.w3.org/1999/02/22-rdf-syntax-ns#type', 'A'))
conn_sorted = sorted(list(conn_choices), key=lambda x: x[1])
targ_choices = set()
for targ in rdfs_class:
if targ['resource']['type'] != 'bnode':
targ_choices.add((targ['resource']['value'],
targ['label']['value'] if 'value' in targ['label'] else targ['resource']['value'].partition('#')[2]))
for targ in owl_class:
if targ['resource']['type'] != 'bnode':
targ_choices.add((targ['resource']['value'],
targ['label']['value'] if 'value' in targ['label'] else targ['resource']['value'].partition('#')[2]))
targ_sorted = sorted(list(targ_choices), key=lambda x: x[1])
print('resources sorted')
query_form.resource.choices = res_sorted[:]
query_form.resource.choices.insert(0, (' ', ''))
query_form.resource.choices.insert(0, ('-', '---'))
query_form.resource.choices.insert(0, ('', 'Resource'))
query_form.connection.choices = conn_sorted[:]
query_form.connection.choices.insert(0, (' ', ''))
query_form.connection.choices.insert(0, ('-', '---'))
query_form.connection.choices.insert(0, ('', 'Connection'))
query_form.target.choices = targ_sorted[:]
query_form.target.choices.insert(0, (' ', ''))
query_form.target.choices.insert(0, ('-', '---'))
query_form.target.choices.insert(0, ('', 'Target'))
query_form.resource_2.choices = res_sorted[:]
query_form.resource_2.choices.insert(0, (' ', ''))
query_form.resource_2.choices.insert(0, ('-', '---'))
query_form.resource_2.choices.insert(0, ('', 'Resource'))
query_form.connection_2.choices = conn_sorted[:]
query_form.connection_2.choices.insert(0, (' ', ''))
query_form.connection_2.choices.insert(0, ('-', '---'))
query_form.connection_2.choices.insert(0, ('', 'Connection'))
query_form.target_2.choices = targ_sorted[:]
query_form.target_2.choices.insert(0, (' ', ''))
query_form.target_2.choices.insert(0, ('-', '---'))
query_form.target_2.choices.insert(0, ('', 'Target'))
print('FindEntryForm populated')
insert_form = forms.AddEntryForm()
print('empty AddEntryForm')
update_form = forms.AddConnectionForm()
print('empty AddConnectionForm')
update_form.resource.choices = res_sorted[:]
update_form.resource.choices.insert(0, (' ', ''))
update_form.resource.choices.insert(0, ('-', '---'))
update_form.resource.choices.insert(0, ('', 'Resource'))
update_form.connection.choices = conn_sorted[:]
update_form.connection.choices.insert(0, (' ', ''))
update_form.connection.choices.insert(0, ('-', '---'))
update_form.connection.choices.insert(0, ('', 'Connection'))
update_form.target.choices = targ_sorted[:]
update_form.target.choices.insert(0, (' ', ''))
update_form.target.choices.insert(0, ('-', '---'))
update_form.target.choices.insert(0, ('', 'Target'))
print('AddConnectionForm populated')
if query_form.validate_on_submit():
print('wrong form submitted')
if query_form.target.data:
rdf_object = {}
rdf_object['type'] = 'uri'
rdf_object['value'] = query_form.target.data
else:
rdf_object = {}
rdf_object['type'] = 'label'
rdf_object['value'] = query_form.free_target.data
if query_form.connection.data:
rdf_pred = {}
rdf_pred['type'] = 'uri'
rdf_pred['value'] = query_form.connection.data
else:
rdf_pred = {}
rdf_pred['type'] = 'label'
rdf_pred['value'] = query_form.free_conn.data
if query_form.resource.data:
rdf_subject = {}
rdf_subject['type'] = 'uri'
rdf_subject['value'] = query_form.resource.data
else:
rdf_subject = {}
rdf_subject['type'] = 'label'
rdf_subject['value'] = query_form.free_res.data
triples = []
triple = {}
triple['object'] = rdf_object
triple['predicate'] = rdf_pred
triple['subject'] = rdf_subject
triples.append(triple)
if query_form.target_2.data:
rdf_object_2 = {}
rdf_object_2['type'] = 'uri'
rdf_object_2['value'] = query_form.target_2.data
else:
rdf_object_2 = {}
rdf_object_2['type'] = 'label'
rdf_object_2['value'] = query_form.free_target_2.data
if query_form.connection_2.data:
rdf_pred_2 = {}
rdf_pred_2['type'] = 'uri'
rdf_pred_2['value'] = query_form.connection_2.data
else:
rdf_pred_2 = {}
rdf_pred_2['type'] = 'label'
rdf_pred_2['value'] = query_form.free_conn_2.data
if query_form.resource_2.data:
rdf_subject_2 = {}
rdf_subject_2['type'] = 'uri'
rdf_subject_2['value'] = query_form.resource_2.data
else:
rdf_subject_2 = {}
rdf_subject_2['type'] = 'label'
rdf_subject_2['value'] = query_form.free_res_2.data
triple_2 = {}
triple_2['object'] = rdf_object_2
triple_2['predicate'] = rdf_pred_2
triple_2['subject'] = rdf_subject_2
triples.append(triple_2)
entries = []
temp = query.get_entries(triples)
for entry in temp:
new_entry = {}
for label in entry:
if '_label' not in label:
item = {}
value = entry[label]['value']
item['value'] = value
if entry[label]['type'] == 'uri':
uri = value
item['uri'] = uri
if 'value' in entry['{}_label'.format(label)]:
tag = entry['{}_label'.format(label)]['value']
item['tag'] = tag
new_entry[label] = item
entries.append(new_entry)
# if update_form.validate_on_submit():
# resource = Subject(update_form.resource.data)
# property = update_form.connection.data
# value = update_form.target.data
# object_type = 'uri'
# lang = ''
# if not value:
# value = update_form.free_object.data
# object_type = 'literal'
# lang = uiLabel.ISOCode.lower()
# rdf_object = {}
# rdf_object['value'] = value
# rdf_object['type'] = object_type
# if lang:
# rdf_object['xml:lang'] = lang
# pred_value = {}
# pred_value['type'] = 'uri'
# pred_value['value'] = [rdf_object]
# pred_list = {}
# pred_list[property] = pred_value
# resource.add_data(graphlist={''}, predlist=pred_list)
return render_template('resources.html', title=uiLabel.viewTagTitle,
entries=entries, query_form=query_form,
insert_form=insert_form, update_form=update_form)
@app.route('/add', methods=['POST'])
@login_required
def add_tag():
"""Add a Resource or Connection to the datastore.
A Resource is a special class that will come up as the option for subjects
in query form dropdown lists. A Connection is any rdf:Property, also often
added to query form dropdown lists. A label and description are required to
maintain some consistency in the datastore and to ensure that users are
able to understand what has been returned by a query.
Returns:
Redirect to the resource management Web page.
"""
insert_form = forms.AddEntryForm()
if insert_form.validate_on_submit():
insert_query = Query(labellist = set(), subjectlist = {}, optlist = [])
cat = insert_form.category.data
label = insert_form.label.data
desc = insert_form.description.data
lang = uiLabel.ISOCode.lower()
insert_query.add_resource(cat, label, desc, lang)
return redirect(url_for('resources'))
@app.route('/insert', methods=['POST'])
@login_required
def add_conn():
"""Add a connection to an existing resource in the datastore.
This view is not used, as its functionality was placed directly in the
'resources' view because that view already had the query needed to fill the
options for the dropdown lists. In the future, that functionality should be
returned to this function to clean up the code.
Returns:
Redirect to the resource management Web page.
"""
# Failure to set explicit parameters leads to broken garbage collection
query = Query(labellist = set(), subjectlist = {}, optlist = [])
rdfs_class = query.get_resources('rdfs:Class')
owl_class = query.get_resources('owl:Class')
owl_obj_prop = query.get_resources('owl:ObjectProperty')
owl_dtype_prop = query.get_resources('owl:DatatypeProperty')
rdf_property = query.get_resources('rdf:Property')
skmf_resource = query.get_resources('skmf:Resource')
res_choices = set()
for res in skmf_resource:
if res['resource']['type'] != 'bnode':
res_choices.add((res['resource']['value'],
res['label']['value'] if 'value' in res['label'] else res['resource']['value'].partition('#')[2]))
res_sorted = sorted(list(res_choices), key=lambda x: x[1])
conn_choices = set()
for conn in rdf_property:
if conn['resource']['type'] != 'bnode':
conn_choices.add((conn['resource']['value'],
conn['label']['value'] if 'value' in conn['label'] else conn['resource']['value'].partition('#')[2]))
for conn in owl_obj_prop:
if conn['resource']['type'] != 'bnode':
conn_choices.add((conn['resource']['value'],
conn['label']['value'] if 'value' in conn['label'] else conn['resource']['value'].partition('#')[2]))
for conn in owl_dtype_prop:
if conn['resource']['type'] != 'bnode':
conn_choices.add((conn['resource']['value'],
conn['label']['value'] if 'value' in conn['label'] else conn['resource']['value'].partition('#')[2]))
conn_choices.add(('http://www.w3.org/1999/02/22-rdf-syntax-ns#type', 'A'))
conn_sorted = sorted(list(conn_choices), key=lambda x: x[1])
targ_choices = set()
for targ in rdfs_class:
if targ['resource']['type'] != 'bnode':
targ_choices.add((targ['resource']['value'],
targ['label']['value'] if 'value' in targ['label'] else targ['resource']['value'].partition('#')[2]))
for targ in owl_class:
if targ['resource']['type'] != 'bnode':
targ_choices.add((targ['resource']['value'],
targ['label']['value'] if 'value' in targ['label'] else targ['resource']['value'].partition('#')[2]))
targ_sorted = sorted(list(targ_choices), key=lambda x: x[1])
update_form = forms.AddConnectionForm()
update_form.resource.choices = res_sorted[:]
update_form.resource.choices.insert(0, (' ', ''))
update_form.resource.choices.insert(0, ('-', '---'))
update_form.resource.choices.insert(0, ('', 'Resource'))
update_form.connection.choices = conn_sorted[:]
update_form.connection.choices.insert(0, (' ', ''))
update_form.connection.choices.insert(0, ('-', '---'))
update_form.connection.choices.insert(0, ('', 'Connection'))
update_form.target.choices = targ_sorted[:]
update_form.target.choices.insert(0, (' ', ''))
update_form.target.choices.insert(0, ('-', '---'))
update_form.target.choices.insert(0, ('', 'Target'))
if update_form.validate_on_submit():
print('update_form validated')
resource = Subject(update_form.resource.data)
property = update_form.connection.data
value = update_form.target.data
object_type = 'uri'
lang = ''
if not value:
value = update_form.free_target.data
object_type = 'literal'
lang = uiLabel.ISOCode.lower()
rdf_object = {}
rdf_object['value'] = value
rdf_object['type'] = object_type
if lang:
rdf_object['xml:lang'] = lang
pred_value = {}
pred_value['type'] = 'uri'
pred_value['value'] = [rdf_object]
pred_list = {}
pred_list[property] = pred_value
resource.add_data(graphlist={''}, predlist=pred_list)
return redirect(url_for('resources'))
@app.route('/retrieve')
def show_subject():
"""Query and display all triples pertaining to a singel subject.
If a user is not able to refine a query to retrieve relevant information,
it is always possible to look at all information pertaining to a single
subject. Access to this information may even help the user devise a better
query.
Returns:
Rendered page containing all predicates and objects for one subject.
"""
subject = Subject(request.args.get('subject'))
return render_template('show_subject.html',
title=subject.id, preds=subject.preds)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Setup a user session.
A user is retrieved based on the user ID entered in the login form. It the
user does not exist, a small time is passed to throw off timing attacks.
Otherwise, the password is collected from the Web form and its BCrypt hash
compared to the hash of the retrieved user. A match results in successful
authentication. All other conditions result in an error.
Returns:
Login page if not authenticated, resource management page otherwise.
"""
error = None
form = forms.LoginForm()
if form.validate_on_submit():
user = User.get(form.username.data)
if not user:
# Make invalid username take same time as wrong password
sleep(1.3)
error = uiLabel.viewLoginInvalid
elif not bcrypt.check_password_hash(user.get_hash(),
form.password.data):
error = uiLabel.viewLoginInvalid
else:
user.authenticated = True
login_user(user)
flash('{0!s} {1!s}'.format(uiLabel.viewLoginWelcome,
user.get_name()))
return redirect(request.args.get('next') or url_for('resources'))
return render_template('login.html', title=uiLabel.viewLoginTitle,
form=form, error=error)
@app.route('/logout')
@login_required
def logout():
"""End a user session.
The current user is set to unauthenticated and session tokens are dropped.
Returns:
Redirect to the resource management Web page.
"""
user = current_user
user.authenticated = False
logout_user()
flash(uiLabel.viewLogoutLoggedout)
return redirect(url_for('resources'))
@app.route('/users', methods=['GET', 'POST'])
@login_required
def add_user():
"""Manage user information in the datastore.
Currently, users may be added and their passwords set. There is no way to
remove a user, set a display name, add permission to the 'users' graph, or
change a password. Also, username is permanent for any user.
Returns:
'Add User' page if current user is 'admin', resource page otherwise.
"""
if current_user.get_id() != 'admin':
return redirect(url_for('resources'))
form = forms.CreateUserForm()
if form.validate_on_submit():
user = User(form.username.data)
if len(user.preds) <= 1:
user.set_hash(bcrypt.generate_password_hash(form.password.data))
user.set_active()
else:
flash('User already exists')
return render_template('users.html', title=uiLabel.viewUserTitle,
form=form)
@app.errorhandler(404)
def page_not_found(error):
"""Handle attempts to access nonexistent pages."""
return render_template('page_not_found.html'), 404
@login_manager.user_loader
def load_user(user_id):
"""Create an instance of the User from the datastore, if id is found."""
return User.get(user_id)
|
from os import name
import pytest
import random
from automates.program_analysis.CAST2GrFN.model.cast import (
AstNode,
Assignment,
Attribute,
BinaryOp,
BinaryOperator,
Call,
ClassDef,
Dict,
Expr,
FunctionDef,
List,
Loop,
ModelBreak,
ModelContinue,
ModelIf,
ModelReturn,
Module,
Name,
Number,
Set,
String,
Subscript,
Tuple,
UnaryOp,
UnaryOperator,
VarType,
Var,
)
from automates.program_analysis.CAST2GrFN.model.cast_to_air_model import (
C2ATypeError,
)
from automates.program_analysis.CAST2GrFN.cast import CAST
from automates.model_assembly.networks import GroundedFunctionNetwork
from automates.utils.misc import rd
def get_grfn_from_json_file(file):
return GroundedFunctionNetwork.from_json(file)
@pytest.fixture(autouse=True)
def run_around_tests():
# Before each test, set the seed for generating uuids to 0 for consistency
# between tests and expected output
rd.seed(0)
# Run the test function
yield
@pytest.fixture
def basic_function_def_and_assignment_grfn():
json_filepath = "tests/data/program_analysis/CAST2GrFN/basic_function_def_and_assignment_grfn.json"
return get_grfn_from_json_file(json_filepath)
@pytest.fixture
def pid_c_cast_grfn():
return None
@pytest.fixture
def cast_with_all_nodes_grfn():
return None
@pytest.fixture
def basic_function_def_and_assignment_cast():
v = Var(val=Name(name="exampleVar"), type="float")
n = Number(number=36.2)
a = Assignment(left=v, right=n)
f = FunctionDef(name="exampleFunction", func_args=[], body=[a])
m = Module(name="ExampleModule", body=[f])
return CAST([m], cast_source_language="")
@pytest.fixture
def cast_with_all_nodes():
class_func_arg_name = Var(val="exampleArg", type="Number")
var = Var(val=Name(name="exampleVar"), type="Number")
number = Number(number=36.2)
class_func_assign_expr = UnaryOp(
op=UnaryOperator.USUB,
value=BinaryOp(op=BinaryOperator.ADD, left=class_func_arg_name, right=number),
)
class_func_assign = Assignment(left=var, right=class_func_assign_expr)
class_func_str = String(string="str")
class_func_return = ModelReturn(value=class_func_str)
class_func_def = FunctionDef(
name="exampleClassFunction",
func_args=[class_func_arg_name],
body=[class_func_assign, class_func_return],
)
class_field_var = Var(val="exampleClassVar", type="String")
class_def = ClassDef(
name="ExampleClass",
bases=[],
funcs=[class_func_def],
fields=[class_field_var],
)
obj_var = Var(val="exampleObject", type="ExampleClass")
obj_costructer_name = Name(name="ExampleClass")
obj_constructor_call = Call(func=obj_costructer_name, arguments=[])
obj_assign = Assignment(left=obj_var, right=obj_constructor_call)
continue_node = ModelContinue()
break_node = ModelBreak()
true_expr = BinaryOp(
op=BinaryOperator.EQ, left=Number(number=1), right=Number(number=1)
)
if_node = ModelIf(expr=true_expr, body=[continue_node], orelse=[break_node])
attr = Attribute(value=obj_var, attr=Name(name="exampleClassFunction"))
attr_expr = Expr(expr=attr)
loop = Loop(expr=true_expr, body=[if_node, attr_expr])
set_assign = Assignment(
left=Var(val="exampleSet", type="Set"), right=Set(values=[])
)
list_assign = Assignment(
left=Var(val="exampleList", type="List"), right=List(values=[])
)
dict_var = Var(val="exampleDict", type="Dict")
dict_assign = Assignment(left=dict_var, right=Dict(values=[], keys=[]))
dict_subscript = Expr(Subscript(value=dict_var, slice=String(string="key")))
func_def = FunctionDef(
name="exampleFunction",
func_args=[],
body=[
obj_assign,
loop,
set_assign,
list_assign,
dict_assign,
dict_subscript,
],
)
m = Module(name="ExampleModule", body=[class_def, func_def])
return CAST([m])
@pytest.fixture
def pid_c_cast():
# TODO for a C struct, should we make a default init function?
struct_pid_def = ClassDef(
name="struct _pid",
bases=[],
funcs=[],
fields=[
Var(val=Name(name="setSpeed"), type="float"),
Var(val=Name(name="ActualSpeed"), type="float"),
Var(val=Name(name="err"), type="float"),
Var(val=Name(name="err_last"), type="float"),
Var(val=Name(name="voltage"), type="float"),
Var(val=Name(name="integral"), type="float"),
Var(val=Name(name="Kp"), type="float"),
Var(val=Name(name="Ki"), type="float"),
Var(val=Name(name="Kd"), type="float"),
],
)
global_pid_assign = Assignment(
left=Var(val=Name(name="pid")),
right=Call(
func="struct_pid",
arguments=[],
),
)
pid_assignments = [
Assignment(
left=Attribute(value=Name(name="pid"), attr="SetSpeed"),
right=Number(number=0.0),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="ActualSpeed"),
right=Number(number=0.0),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="err"),
right=Number(number=0.0),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="err_last"),
right=Number(number=0.0),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="voltage"),
right=Number(number=0.0),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="integral"),
right=Number(number=0.0),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="Kp"),
right=Number(number=0.2),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="Ki"),
right=Number(number=0.015),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="Kd"),
right=Number(number=0.2),
),
]
pid_init_func = FunctionDef(name="PID_init", body=pid_assignments)
pid_realize_body = [
Assignment(
left=Attribute(value=Name(name="pid"), attr="SetSpeed"),
right=Name(name="speed"),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="err"),
right=BinaryOp(
op=BinaryOperator.SUB,
left=Attribute(value=Name(name="pid"), attr="SetSpeed"),
right=Attribute(value=Name(name="pid"), attr="ActualSpeed"),
),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="integral"),
right=BinaryOp(
op=BinaryOperator.ADD,
left=Attribute(value=Name(name="pid"), attr="integral"),
right=Attribute(value=Name(name="pid"), attr="err"),
),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="voltage"),
right=BinaryOp(
op=BinaryOperator.MULT,
left=Attribute(value=Name(name="pid"), attr="Kp"),
right=BinaryOp(
op=BinaryOperator.MULT,
left=BinaryOp(
op=BinaryOperator.ADD,
left=Attribute(value=Name(name="pid"), attr="err"),
right=Attribute(value=Name(name="pid"), attr="Ki"),
),
right=BinaryOp(
op=BinaryOperator.MULT,
left=BinaryOp(
op=BinaryOperator.ADD,
left=Attribute(value=Name(name="pid"), attr="integral"),
right=Attribute(value=Name(name="pid"), attr="Kd"),
),
right=BinaryOp(
op=BinaryOperator.SUB,
left=Attribute(value=Name(name="pid"), attr="err"),
right=Attribute(value=Name(name="pid"), attr="err_last"),
),
),
),
),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="err_last"),
right=Attribute(value=Name(name="pid"), attr="err"),
),
Assignment(
left=Attribute(value=Name(name="pid"), attr="ActualSpeed"),
right=BinaryOp(
op=BinaryOperator.MULT,
left=Attribute(value=Name(name="pid"), attr="voltage"),
right=Number(number=1.0),
),
),
ModelReturn(value=Attribute(value=Name(name="pid"), attr="ActualSpeed")),
]
pid_realize_func = FunctionDef(
name="PID_realize",
func_args=[Var(val=Name(name="speed"), type="float")],
body=pid_realize_body,
)
main_pid_init_call = Expr(expr=Call(func=Name(name="PID_init")))
main_count_init = Assignment(
left=Var(val=Name(name="count"), type="int"), right=Number(number=0)
)
main_loop_speed_assign = Assignment(
left=Var(val=Name(name="speed")),
right=Call(func=Name(name="PID_init"), arguments=[Number(number=20.0)]),
)
main_loop_count_assign = Assignment(
left=Var(val=Name(name="count")),
right=BinaryOp(
op=BinaryOperator.ADD,
left=Var(val=Name(name="count")),
right=Number(number=1),
),
)
main_loop = Loop(
expr=BinaryOp(
op=BinaryOperator.LT,
left=Name(name="count"),
right=Number(number=100),
),
body=[main_loop_speed_assign, main_loop_count_assign],
)
main_return = ModelReturn(value=Number(number=0))
main_func = FunctionDef(
name="main",
func_args=[],
body=[main_pid_init_call, main_count_init, main_loop, main_return],
)
pid_body = [
struct_pid_def,
global_pid_assign,
pid_init_func,
pid_realize_func,
main_func,
]
pid_module = Module(name="PID", body=pid_body)
return CAST([pid_module])
@pytest.mark.skip("Skipping due to changes in AIR")
def test_basic_function_def_and_assignment(
basic_function_def_and_assignment_grfn,
basic_function_def_and_assignment_cast,
):
generated_grfn = basic_function_def_and_assignment_cast.to_GrFN()
assert generated_grfn == basic_function_def_and_assignment_grfn
@pytest.mark.skip(reason="Need to implement test.")
def test_cast_with_all_nodes(cast_with_all_nodes_grfn, cast_with_all_nodes):
pass
# TODO
# generated_grfn = cast_with_all_nodes.to_GrFN()
# assert generated_grfn == cast_with_all_nodes_grfn
@pytest.mark.skip(reason="Need to implement test.")
def test_pid_c_cast(pid_c_cast_grfn, pid_c_cast):
pass
# TODO
# generated_grfn = pid_c_cast.to_GrFN()
# assert generated_grfn == pid_c_cast_grfn
@pytest.mark.skip(reason="Need to implement test.")
def test_function_call():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_if_statement():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_if_else_statement():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_if_elif_statement():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_if_elif_else_statement():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_for_loop():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_while_loop():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_nested_loops():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_global_variable_passing():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_pack_and_extract():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_only_pack():
pass
@pytest.mark.skip(reason="Need to implement test.")
def test_only_extract():
pass
def test_unknown_cast_node():
c = CAST([object()], cast_source_language="")
with pytest.raises(C2ATypeError):
c.to_GrFN()
|
from .version import __version__ # noqa
try:
str = unicode # python 2.7
except NameError:
str = str
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
########################################
##
# @author: Amyth
# @email: mail@amythsingh.com
# @website: www.techstricks.com
# @created_date: 22-02-2017
# @last_modify: Wed Feb 22 12:44:32 2017
##
########################################
|
from fractions import gcd
from sys import stdin
def p(n):
s = int(n ** .5)
c = sum(n/i for i in xrange(1, s+1)) * 2 - s**2
g = gcd(c, n ** 2)
return '{}/{}'.format(c / g, n**2 / g)
print(
'\n'.join(
p(int(ln))
for i, ln in enumerate(stdin) if i
)
)
|
#!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Downloads prebuilt AppRTC and Go from WebRTC storage and unpacks it.
Requires that depot_tools is installed and in the PATH.
It downloads compressed files in the directory where the script lives.
This is because the precondition is that the script lives in the same
directory of the .sha1 files.
"""
import os
import sys
import utils
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
def _GetGoArchivePathForPlatform():
archive_extension = 'zip' if utils.GetPlatform() == 'win' else 'tar.gz'
return os.path.join(utils.GetPlatform(), 'go.%s' % archive_extension)
def main(argv):
if len(argv) > 2:
return 'Usage: %s [output_dir]' % argv[0]
output_dir = os.path.abspath(argv[1]) if len(argv) > 1 else None
apprtc_zip_path = os.path.join(SCRIPT_DIR, 'prebuilt_apprtc.zip')
if os.path.isfile(apprtc_zip_path + '.sha1'):
utils.DownloadFilesFromGoogleStorage(SCRIPT_DIR, auto_platform=False)
if output_dir is not None:
utils.RemoveDirectory(os.path.join(output_dir, 'apprtc'))
utils.UnpackArchiveTo(apprtc_zip_path, output_dir)
golang_path = os.path.join(SCRIPT_DIR, 'golang')
golang_zip_path = os.path.join(golang_path, _GetGoArchivePathForPlatform())
if os.path.isfile(golang_zip_path + '.sha1'):
utils.DownloadFilesFromGoogleStorage(golang_path)
if output_dir is not None:
utils.RemoveDirectory(os.path.join(output_dir, 'go'))
utils.UnpackArchiveTo(golang_zip_path, output_dir)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
#!/usr/bin/env python3
import rospy
import time
def call_service(service_name: str, service_class, service_req = None, time_out: float = 5, max_retry: int = 5):
"""
Create a service proxy for given service_name and service_class and
call the service
:param str service_name: name of the service
service_class: service type
service_req: service request
float time_out: timeout in seconds
int max_retry: maximum number of times to retry calling the service
:return response received from service call
bool
"""
# wait until the service becomes available
try:
rospy.wait_for_service(service_name, timeout = time_out)
except rospy.ROSException as e:
rospy.logerr('service %s is not available due to %s', service_name, e)
return
# create callable proxy to the service
service_proxy = rospy.ServiceProxy(service_name, service_class)
is_call_successful = False
counter = 0
response = None
# loop until the counter reached max retry limit or
# until the ros is shutdown or service call is successful
while not is_call_successful and not rospy.is_shutdown():
if counter < max_retry:
try:
# call service
if service_req is None:
response = service_proxy()
else:
response = service_proxy(service_req)
is_call_successful = True
except rospy.ServiceException as e:
# service call failed increment the counter
counter += 1
else:
# max retry count reached
rospy.logwarn('call to the service %s failed', service_name)
break
return response, is_call_successful
def receive_topic_msg(topic_name: str, topic_class, time_out: float = 5, max_retry: int = 5):
"""
Check whether the topic is operational by
1. subscribing to topic_name
2. receive one topic_class message
3. unsubscribe
:param str topic_name: name of the topic
topic_class: topic type
float time_out: timeout in seconds
int max_retry: maximum number of times to retry waiting for message
:return rospy.Message
"""
counter = 0
response = None
# loop until the ros is shutdown or received successfully message from topic
while response is None and not rospy.is_shutdown():
if counter < max_retry:
try:
# create a new subscription to topic, receive one message and then unsubscribe
response = rospy.wait_for_message(topic_name, topic_class, timeout = time_out)
except rospy.ROSException as e:
counter += 1
else:
# max retry count reached
rospy.logwarn('wait for message from topic %s failed', topic_name)
break
return response
def check_publisher_connections(publisher, max_retry: int = 5):
"""
Check whether publisher is operational by checking the number of connections
:param publisher: publisher instance
int max_retry: maximum number of times to retry checking the publisher connections
"""
counter = 0
while publisher.get_num_connections() == 0 and not rospy.is_shutdown():
if counter < max_retry:
time.sleep(0.5)
counter += 1
else:
# max retry count reached
rospy.logwarn('publisher %s is not ready', publisher.name)
break
|
#!/usr/bin/python3
#coding:utf-8
import rospy
import cv2
import os
from predict_image import detectImage
import json
from vision.srv import *
class VisionNode:
def __init__(self):
rospy.init_node('vision_node', anonymous = True) #创建节点
self.rate = rospy.Rate(20)
self.cap = cv2.VideoCapture(4) #临时调试用
#self.cap.set(3,480) #调整相机画幅大小,上位机不可用
#self.cap.set(4,640)
self.packagePath = rospy.get_param("/pkg_path/vision")
self.yolov5Module = detectImage(os.path.join(self.packagePath, 'scripts/v5l-last.pt')) #加载模型
rospy.Service('/vision_service',VisionDetectService, self.Callback) #建立服务
# rospy.wait_for_service('vision_service')
# self.vision_service = rospy.ServiceProxy('vision_service',VisionService)
def create_vision_detect_service_response(self):
tpm = VisionDetectServiceResponse()
tpm.isFind = 0
tpm.detect_res = 0
tpm.conf = 0
self.vision_detect_service_res = tpm
def Callback(self, data):
self.create_vision_detect_service_response()
_, self.frame = self.cap.read()
self.vision_detect_service_res = self.yolov5Module.detect(self.frame,self.vision_detect_service_res) #传入图片以及平面图像坐标系下的点击位置,识别
#data-base
return self.vision_detect_service_res
def MainLoop(self):
while not rospy.is_shutdown():
self.rate.sleep()
if __name__ == '__main__':
visionNode = VisionNode()
visionNode.MainLoop()
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Straight-through gradient sampling distribution."""
from distrax._src.distributions import categorical
from distrax._src.distributions import distribution
import jax
def straight_through_wrapper( # pylint: disable=invalid-name
Distribution,
) -> distribution.DistributionLike:
"""Wrap a distribution to use straight-through gradient for samples."""
def sample(self, seed, sample_shape=()): # pylint: disable=g-doc-args
"""Sampling with straight through biased gradient estimator.
Sample a value from the distribution, but backpropagate through the
underlying probability to compute the gradient.
References:
[1] Yoshua Bengio, Nicholas Léonard, Aaron Courville, Estimating or
Propagating Gradients Through Stochastic Neurons for Conditional
Computation, https://arxiv.org/abs/1308.3432
Args:
seed: a random seed.
sample_shape: the shape of the required sample.
Returns:
A sample with straight-through gradient.
"""
# pylint: disable=protected-access
obj = Distribution(probs=self._probs, logits=self._logits)
assert isinstance(obj, categorical.Categorical)
sample = obj.sample(seed=seed, sample_shape=sample_shape)
probs = obj.probs
padded_probs = _pad(probs, sample.shape)
# Keep sample unchanged, but add gradient through probs.
sample += padded_probs - jax.lax.stop_gradient(padded_probs)
return sample
def _pad(probs, shape):
"""Grow probs to have the same number of dimensions as shape."""
while len(probs.shape) < len(shape):
probs = probs[None]
return probs
parent_name = Distribution.__name__
# Return a new object, overriding sample.
return type('StraighThrough' + parent_name, (Distribution,),
{'sample': sample})
|
import datetime
import docker
import docker.errors
import docker.models.containers
import docker.models.images
import enum
import pathlib as pt
import secrets
import tarfile
import tempfile
import typing
import app.database as db_module
import app.database.user as user_module
import app.database.dodoco.project as ddc_db_project
import app.plugin.ddc_docker as ddc_plugin_docker
DockerClientType = docker.client.DockerClient
DockerContainerType = docker.models.containers.Container
DockerImageType = docker.models.images.Image
db = db_module.db
docker_client = ddc_plugin_docker.docker_client
class DockerPortProtocol(enum.Enum):
# protocol name must be lowercase
tcp = enum.auto()
udp = enum.auto()
stcp = enum.auto()
class Container(db.Model, db_module.DefaultModelMixin):
__tablename__ = 'TB_CONTAINER'
uuid = db.Column(db_module.PrimaryKeyType, db.Sequence('SQ_Container_UUID'), primary_key=True)
name = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=True)
information = db.Column(db.String, nullable=True)
start_image_name = db.Column(db.String, nullable=True)
container_id = db.Column(db.String, nullable=True)
container_name = db.Column(db.String, nullable=True, unique=True)
project_id = db.Column(db_module.PrimaryKeyType,
db.ForeignKey('TB_PROJECT.uuid', ondelete='CASCADE'),
nullable=False)
project: ddc_db_project.Project = db.relationship(
ddc_db_project.Project,
primaryjoin=project_id == ddc_db_project.Project.uuid,
backref=db.backref('containers'))
created_by_id = db.Column(db_module.PrimaryKeyType,
db.ForeignKey('TB_USER.uuid', ondelete='CASCADE'),
nullable=True)
created_by: user_module.User = db.relationship(user_module.User, primaryjoin=created_by_id == user_module.User.uuid)
ports: list['ContainerPort'] = None # backref placeholder
def create(self,
image_name: str,
run_kwargs: typing.Optional[dict] = None,
setup_container_function: typing.Optional[typing.Callable] = None,
start_after_create: bool = False,
db_commit: bool = False) -> DockerContainerType:
self.start_image_name = image_name
self.container_name = f'{[z for z in image_name.split(":") if z][0]}_{secrets.token_hex(16)}'
container_run_kwargs_result = {
**(run_kwargs or {}),
'name': self.container_name,
'detach': True, 'stdin_open': True, 'tty': True, # -dit
'network_mode': 'bridge', 'ports': self.get_container_ports(),
}
new_container: DockerContainerType = None
try:
global docker_client
docker_client = ddc_plugin_docker.docker_client
new_container = docker_client.containers.create(image_name, **container_run_kwargs_result)
except (docker.errors.ImageNotFound, docker.errors.APIError):
new_image = docker_client.images.pull(image_name) # noqa
new_container = docker_client.containers.create(new_image.tags[0], **container_run_kwargs_result)
self.container_id = new_container.id
if setup_container_function:
setup_container_function(new_container)
if start_after_create:
new_container.start()
if db_commit:
db.session.commit()
def recreate(self, start_after_recreate: bool = False, db_commit: bool = False):
target_image: DockerImageType = None
try:
self.get_container_obj().remove()
except Exception:
pass
try:
target_image = docker_client.images.get(self.container_name)
except docker.errors.ImageNotFound:
target_image = docker_client.images.get(self.start_image_name)
target_container: DockerContainerType = docker_client.containers.create(
image=target_image.tags[0],
name=self.container_name,
detach=True, stdin_open=True, tty=True, # -dit
network_mode='bridge', ports=self.get_container_ports())
self.container_id = target_container.id
if start_after_recreate:
target_container.start()
if db_commit:
db.session.commit()
def get_container_obj(self) -> DockerContainerType:
return docker_client.containers.get(self.container_id)
def start(self):
try:
target_container = self.get_container_obj()
target_container.start()
except docker.errors.NotFound:
self.recreate(start_after_recreate=True, db_commit=True)
def pause(self):
target_container = self.get_container_obj()
target_container.pause()
def stop(self, immediate: bool = False, blocking: bool = True, timeout: int = 10):
target_container = self.get_container_obj()
if immediate:
target_container.kill()
elif blocking:
target_container.wait(timeout=timeout)
else:
target_container.stop(timeout=timeout)
def restart(self, timeout: int = 10):
target_container = self.get_container_obj()
target_container.restart(timeout=timeout)
def destroy(self, force: bool = False, db_commit: bool = False):
self.stop(force, blocking=True)
target_container = self.get_container_obj()
target_container.remove()
# Remove all port records
db.session.query(ContainerPort).filter(ContainerPort.container_id == self.uuid).delete()
# Remove self
db.session.delete(self)
if db_commit:
db.session.commit()
def commit(self, changes: str = None, start_after_commit: bool = False, db_commit: bool = False):
# if the container is runnig, then we should stop first
try:
self.stop(blocking=True)
except Exception:
try:
self.stop(immediate=True)
except Exception:
pass
# We need to commit with same container name,
# (then new image with container name will be generated,)
# and start new image.
target_container = self.get_container_obj()
target_image = target_container.image
target_image_tag_split = target_image.tags[0].split(':')
target_image_name = target_image_tag_split[0]
target_image_tag = target_image_tag_split[1]
new_image_name = target_image_name
container_commit_num: int = -1
if self.start_image_name == target_container.image.tags[0]:
container_commit_num = 0
new_image_name = self.container_name
else:
container_commit_num = int(target_image_tag) + 1
# Commit and create new image
target_container = self.get_container_obj()
target_container.commit(
repository=new_image_name,
changes=changes,
tag=container_commit_num)
# Remove old container and create new
target_container.remove()
self.recreate()
if start_after_commit:
self.start()
if db_commit:
db.session.commit()
def push_local_file(self, local_file_path: pt.Path, dest_path: str):
target_container = self.get_container_obj()
with tempfile.NamedTemporaryFile('wb', suffix='.tar', delete=False) as f:
with tarfile.open(fileobj=f, mode='w') as tar:
try:
tar.add(local_file_path)
print(tar.list())
finally:
tar.close()
with pt.Path(f.name).open('rb') as fp:
target_container.put_archive(dest_path, fp.read())
def execute_cmd(self, cmdline: str, stream: bool = False, demux: bool = True):
target_container = self.get_container_obj()
return target_container.exec_run(cmdline, stream, demux)
def add_port_mapping(self,
container_port: int,
exposed_port: int,
protocol: DockerPortProtocol,
start_after_add: bool = False,
db_commit: bool = False):
new_port = ContainerPort()
new_port.container_id = self.uuid
new_port.container_port = container_port
new_port.exposed_port = exposed_port
new_port.protocol = protocol
db.session.add(new_port)
if db_commit:
db.session.commit()
if self.container_id:
self.commit(start_after_add, db_commit)
def check_existance(self, db_commit=False) -> typing.Optional['Container']:
try:
# Check if docker container alive
global docker_client
docker_client = ddc_plugin_docker.docker_client
docker_client.containers.get(self.container_id)
return self
except docker.errors.NotFound:
if db_commit:
db.session.query(ContainerPort).filter(ContainerPort.container_id == self.uuid).delete()
db.session.delete(self)
db.session.commit()
return None
except Exception as err:
raise err
def get_container_ports(self) -> dict[str, int]:
ports: dict[str, int] = dict()
container_port_records = db.session.query(ContainerPort)\
.filter(ContainerPort.container_id == self.uuid).all()
for container_port_record in container_port_records:
ports.update(container_port_record.to_docker_port_def())
return ports
def to_dict(self):
result = {
'resource': 'container',
'uuid': self.uuid,
'name': self.name,
'start_image_name': self.start_image_name,
'container_id': self.container_id,
'container_name': self.container_name,
'created_by_id': self.created_by_id,
'created_by': self.created_by.to_dict(),
'created_at': self.created_at,
'modified_at': self.modified_at,
'modified': self.created_at != self.modified_at,
'created_at_int': int(self.created_at.replace(tzinfo=datetime.timezone.utc).timestamp()),
'modified_at_int': int(self.modified_at.replace(tzinfo=datetime.timezone.utc).timestamp()),
'commit_id': self.commit_id,
}
if self.project:
result['project'] = self.project.to_dict(show_container=False)
if self.ports:
result['ports'] = [port.to_dict() for port in self.ports]
return result
class ContainerPort(db.Model): # Container's exposed port management
__tablename__ = 'TB_CONTAINER_PORT'
uuid = db.Column(db_module.PrimaryKeyType, db.Sequence('SQ_ContainerPort_UUID'), primary_key=True)
container_id = db.Column(db_module.PrimaryKeyType,
db.ForeignKey('TB_CONTAINER.uuid', ondelete='CASCADE'),
nullable=False)
container: Container = db.relationship(
Container,
primaryjoin=container_id == Container.uuid,
backref=db.backref('ports'))
container_port = db.Column(db.Integer, nullable=False)
protocol = db.Column(db.Enum(DockerPortProtocol), nullable=False, default=DockerPortProtocol.tcp)
# exposed_port column must not be unique,
# because there's a case that container port with multiple protocols exposed.
exposed_port = db.Column(db.Integer, nullable=False)
def to_dict(self):
return {
'resource': 'container_port',
'protocol': self.protocol.name, # tcp, udp, stcp
'container_port': self.container_port,
'exposed_port': self.exposed_port,
}
def to_docker_port_def(self) -> dict[str, int]:
return {f'{self.container_port}/{self.protocol.name}': self.exposed_port, }
|
import discord
from discord.app.commands import slash_command
from discord.ext import commands
from extra import utils
from typing import Union, List
from extra.language.centish import Centish
from PIL import ImageDraw, ImageFont, Image
import aiohttp
import os
guild_ids: List[int] = [int(os.getenv('SERVER_ID'))]
dnk_id = int(os.getenv('DNK_ID'))
cent_id = int(os.getenv('CENT_ID'))
class Social(commands.Cog):
""" Category for social related commands. """
def __init__(self, client: commands.Bot) -> None:
self.client = client
self.session = aiohttp.ClientSession()
@commands.Cog.listener()
async def on_ready(self) -> None:
""" Tells when the cog is ready to go. """
print("Social cog is online!")
@commands.command(aliases=['si', 'server'])
async def serverinfo(self, ctx) -> None:
""" Shows information about the server. """
guild = ctx.guild
em = discord.Embed(description=guild.description, color=ctx.author.color)
online = len({m.id for m in guild.members if m.status is not discord.Status.offline})
em.add_field(name="Server ID", value=guild.id, inline=True)
em.add_field(name="Server Owner", value=guild.owner.mention, inline=False)
em.add_field(name="Conlang Creators", value=f"<@{dnk_id}> & <@{cent_id}> 💞", inline=False)
em.add_field(name="Members", value=f"🟢 {online} members ⚫ {len(guild.members)} members", inline=True)
em.add_field(name="Channels",
value=f"⌨️ {len(guild.text_channels)} | 🔈 {len(guild.voice_channels)} | 📻 {len(guild.stage_channels)} | 📁 {len(guild.categories)} | **=** {len(guild.channels)}",
inline=False)
em.add_field(name="Roles", value=len(guild.roles), inline=True)
em.add_field(name="Emojis", value=len(guild.emojis), inline=True)
em.add_field(name="🌐 Region", value=str(guild.region).title() if guild.region else None, inline=True)
em.add_field(name="🌟 Boosts", value=f"{guild.premium_subscription_count} (Level {guild.premium_tier})", inline=True)
features = ', '.join(list(map(lambda f: f.replace('_', ' ').capitalize(), guild.features)))
em.add_field(name="Server Features", value=features if features else None, inline=False)
em.set_thumbnail(url=guild.icon.url)
if guild.banner:
em.set_image(url=guild.banner.url)
em.set_author(name=guild.name, icon_url=guild.icon.url)
created_at = await utils.sort_time(guild.created_at)
em.set_footer(text=f"Created: {guild.created_at.strftime('%d/%m/%y')} ({created_at})")
await ctx.send(embed=em)
@commands.command(aliases=['user', 'whois', 'who_is'])
async def userinfo(self, ctx, member: Union[discord.Member, discord.User] = None):
""" Shows all the information about a member.
:param member: The member to show the info.
:return: An embedded message with the user's information. """
member = ctx.author if not member else member
embed = discord.Embed(colour=member.color, timestamp=ctx.message.created_at)
embed.set_author(name=f"User Info: {member}")
embed.set_thumbnail(url=member.avatar.url)
embed.set_footer(text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar.url)
embed.add_field(name="ID:", value=member.id, inline=False)
if hasattr(member, 'guild'):
embed.add_field(name="Guild name:", value=member.display_name, inline=False)
sorted_time_create = f"<t:{int(member.created_at.timestamp())}:R>"
sorted_time_join = f"<t:{int(member.joined_at.timestamp())}:R>"
embed.add_field(name="Created at:", value=f"{member.created_at.strftime('%d/%m/%y')} ({sorted_time_create}) **GMT**",
inline=False)
embed.add_field(name="Joined at:", value=f"{member.joined_at.strftime('%d/%m/%y')} ({sorted_time_join}) **GMT**", inline=False)
embed.add_field(name="Top role:", value=member.top_role.mention, inline=False)
embed.add_field(name="Bot?", value=member.bot)
await ctx.send(embed=embed)
@commands.command(name="kingdom_image", aliases=["kingdom", "castle"])
@commands.cooldown(1, 10, commands.BucketType.user)
async def _kingdom_image_command(self, ctx) -> None:
""" Makes the Centish kingdom image. """
await self._kingdom_image_callback(ctx)
@slash_command(name="kingdom_image", guild_ids=guild_ids)
@commands.cooldown(1, 10, commands.BucketType.user)
async def _kingdom_image_slash(self, ctx) -> None:
""" Makes the Centish kingdom image. """
await self._kingdom_image_callback(ctx)
async def _kingdom_image_callback(self, ctx) -> None:
""" Makes the Centish kingdom image. """
member = ctx.author
answer: discord.PartialMessageable = None
if isinstance(ctx, commands.Context):
answer = ctx.send
else:
await ctx.defer()
answer = ctx.respond
# Get conlang creators
cent: discord.User = await self.client.fetch_user(int(os.getenv('CENT_ID')))
dnk: discord.User = await self.client.fetch_user(int(os.getenv('DNK_ID')))
# Images
img_root: str = 'media/images/'
background: Image.Image = Image.open(f"{img_root}dark_bg.jpg").resize((400, 400)).convert('RGBA')
kingdom: Image.Image = Image.open(f"{img_root}kingdom_castle.png").resize((400, 400)).convert('RGBA')
cent_pfp: Image.Image = await utils.get_user_pfp(cent)
dnk_pfp: Image.Image = await utils.get_user_pfp(dnk)
moon: Image.Image = Image.open(f"{img_root}crescent_moon.png").resize((50, 50))
symbol: Image.Image = Image.open(f"{img_root}white_cent_symbol.png").resize((50, 50))
hearts: Image.Image = Image.open(f"{img_root}hearts.png").resize((35, 35))
# Paste images
background.paste(kingdom, (0, 0), kingdom)
background.paste(cent_pfp, (90, 70), cent_pfp)
background.paste(dnk_pfp, (250, 70), dnk_pfp)
background.paste(moon, (33, 230), moon)
background.paste(moon, (319, 230), moon)
background.paste(symbol, (176, 90), symbol)
background.paste(hearts, (185, 165), hearts)
# Gets font and writes text
font_path: str = "media/fonts/built titling sb.ttf"
micro = ImageFont.truetype(font_path, 25)
tiny = ImageFont.truetype(font_path, 30)
small = ImageFont.truetype(font_path, 45)
# General info
draw = ImageDraw.Draw(background)
draw.text((98, 142), "cent", (255, 255, 255), font=tiny)
draw.text((262, 142), "DNK", (255, 255, 255), font=tiny)
draw.text((124, 245), "Cajdklaje", (255, 255, 255), font=small)
# Word counter
draw.text((35, 285), "Words", (255, 255, 255), font=micro)
words = await Centish.get_words()
draw.text((35, 325), str(len(words['words'])), (255, 255, 255), font=micro)
# Creation date
draw.text((295, 285), "Creation", (255, 255, 255), font=micro)
draw.text((295, 325), "08/17/21", (255, 255, 255), font=micro)
# Saves the final image
file_path: str = f'media/images/temp/result_{member.id}.png'
background.save(file_path, 'png', quality=90)
current_time = await utils.get_time_now('Europe/Rome')
# Makes the embed
embed = discord.Embed(
title="__Cajdklaje Rezom__",
description="Tuzim informazza sout cajdklaje rezom.",
# color=int("000001", 16),
color=int("d070da", 16),
timestamp=current_time
)
embed.set_thumbnail(url="https://images.emojiterra.com/twitter/v13.0/512px/262a.png")
embed.set_image(url="attachment://kingdom.png")
embed.set_author(name=self.client.user, icon_url=self.client.user.display_avatar)
embed.set_footer(text=f"Requested by {member}", icon_url=member.display_avatar)
try:
await answer(embed=embed, file=discord.File(file_path, filename="kingdom.png"))
except:
pass
finally:
os.remove(file_path)
def setup(client) -> None:
""" Cog's setup function. """
client.add_cog(Social(client))
|
import bcrypt
from django.contrib import messages
from django.db.models import Count, Q
from django.shortcuts import redirect, render
from django.views.decorators.http import require_GET, require_POST
from .models import User, UserRole, Loan
from django.http import HttpResponse
def get_logged_user(request):
"""
Method to get the currently logged user
Args:
- request (HttpRequest): the request
Returns:
The currently logged user if any, None otherwise
"""
if 'userid' in request.session:
user = User.objects.get(id=request.session['userid'])
return user
return None
def redirects_by_role(user):
"""
Method to redirect to the page based on the role
Args:
- user (User): the logged in user
Returns:
A redirection to borrower if user role is borrower, lender otherwise
"""
if user.user_role == UserRole.BORROWER:
return redirect('borrower', id=user.id)
elif user.user_role == UserRole.LENDER:
return redirect('lender', id=user.id)
@require_GET
def index(request):
"""
Root view. Currently it redirects to the register view.
"""
return redirect('/register')
@require_GET
def register(request):
"""
Register view.
It displays the registration webpage.
Args:
- request (HttpRequest): the request
Returns:
"""
context = {
'borrower_role': UserRole.BORROWER,
'lender_role': UserRole.LENDER,
'initial': request.session.get('form_data', {})
}
return render(request, 'register.html', context)
@require_GET
def login(request):
"""
Login view.
It displays the login webpage.
Args:
- request (HttpRequest): the request
Returns:
"""
return render(request, 'login.html')
@require_GET
def logout(request):
"""
Logout view.
It clears the session and redirects to the root view.
"""
request.session.clear()
return redirect('/')
@require_POST
def user_create(request):
"""
User creation view.
It receives the data from the registration webpage, process and validates it,
and creates a user if everything is OK
Args:
- request (HttpRequest): the request
Returns:
"""
errors = User.objects.basic_validator(request.POST)
if len(errors) > 0:
request.session['form_data'] = {k:v for k,v in request.POST.items()}
print('RS', request.session['form_data'])
for key, value in errors.items():
messages.error(request, value)
del request.session['form_data'][key]
return redirect('register')
else:
first_name = request.POST.get('first_name_input', '')
last_name = request.POST.get('last_name_input', '')
email = request.POST.get('email_input', '')
password = request.POST.get('password_input', '')
money = request.POST.get('money_input', 0)
need_money_for = request.POST.get('need_money_for_input', '')
description = request.POST.get('description_input', '')
user_role = int(request.POST.get('user_role_input', UserRole.GUEST))
pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
new_user = User.objects.create(
first_name=first_name,
last_name=last_name,
email=email,
password=pw_hash,
money=money,
need_money_for=need_money_for,
description=description,
user_role=user_role
)
request.session['userid'] = new_user.id
return redirects_by_role(new_user)
@require_POST
def user_check(request):
"""
User login view.
It receives the data from the login webpage, process and validates it,
and redirects the user if everything is OK
Args:
- request (HttpRequest): the request
Returns:
"""
email = request.POST.get('email_input', '')
password = request.POST.get('password_input', '')
user = User.objects.filter(email=email)
if user and len(user) == 1:
logged_user = user[0]
if bcrypt.checkpw(password.encode(), logged_user.password.encode()):
request.session['userid'] = logged_user.id
return redirects_by_role(logged_user)
messages.error(request, 'Invalid username or password')
return redirect('login')
@require_GET
def borrower(request, id):
"""
Borrower view.
It displays the borrower view, containing the profile and list of people who lend money to her/him
Args:
- request (HttpRequest): the request
- id (int): the user id
Returns:
"""
logged_user = get_logged_user(request)
if logged_user is not None and logged_user.id == id and logged_user.user_role == UserRole.BORROWER:
consolidated_debts = {}
for debt in logged_user.debts.all():
d = dict(debt.lender.__dict__)
d['amount'] = 0
consolidated_debt = consolidated_debts.get(debt.lender.email, d)
consolidated_debt['amount'] += debt.amount
consolidated_debts[debt.lender.email] = consolidated_debt
context = {
'logged_user': logged_user,
'debts': consolidated_debts.values()
}
return render(request, 'borrower.html', context)
else:
request.session.clear()
return redirect('/')
@require_GET
def lender(request, id):
"""
Lender view.
It displays the lender view, containing the profile, the list of people
in need (if balance is positive) and list of people who owes money to her/him
Args:
- request (HttpRequest): the request
- id (int): the user id
Returns:
"""
logged_user = get_logged_user(request)
if logged_user is not None and logged_user.id == id and logged_user.user_role == UserRole.LENDER:
borrowers = User.objects.filter(user_role=UserRole.BORROWER)
open_requests = []
for borrower in borrowers:
if borrower.borrowed_amount < borrower.money:
d = dict(borrower.__dict__)
d['borrowed_amount'] = borrower.borrowed_amount
open_requests.append(d)
consolidated_loans = {}
for loan in logged_user.loans.all():
d = dict(loan.borrower.__dict__)
d['borrowed_amount'] = loan.borrower.borrowed_amount
d['amount'] = 0
consolidated_loan = consolidated_loans.get(loan.borrower.email, d)
consolidated_loan['amount'] += loan.amount
consolidated_loans[loan.borrower.email] = consolidated_loan
context = {
'logged_user': logged_user,
'balance': logged_user.money - logged_user.lent_amount,
'loans': consolidated_loans.values(),
'open_requests': open_requests
}
return render(request, 'lender.html', context)
else:
request.session.clear()
return redirect('/')
@require_POST
def lend(request):
"""
Lend view.
It receives the data from the lend form, process and validates it,
and reloads the page if everything is OK
Args:
- request (HttpRequest): the request
Returns:
"""
logged_user = get_logged_user(request)
if logged_user is not None and logged_user.user_role == UserRole.LENDER:
d = dict(request.POST)
d['lender_input'] = logged_user.id
errors = Loan.objects.basic_validator(d)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
else:
borrower = request.POST.get('borrower_input', 0)
amount = request.POST.get('amount_input', 0)
new_loan = Loan.objects.create(
borrower=User.objects.get(id=borrower),
lender=logged_user,
amount=int(amount)
)
messages.info(request, 'Loan executed successfully')
return redirect('lender', id=logged_user.id)
else:
request.session.clear()
return redirect('/')
|
import json
import sys
def main():
# Pull in data.
datafile = sys.argv[1]
with open(datafile) as f:
data = json.loads(f.read())
# Pull node list together.
nodes = []
for key, node in data["nodes"].items():
node["_key"] = key
nodes.append(node)
# Pull edge list together.
edges = []
for start, end in data["edges"]:
edges.append({
"_from": f"nodes/{start}",
"_to": f"nodes/{end}",
})
# Write out nodes.
with open("nodes.json", "w") as f:
f.writelines(json.dumps(row) + "\n" for row in nodes)
# Write out edges.
with open("edges.json", "w") as f:
f.writelines(json.dumps(row) + "\n" for row in edges)
return 0
if __name__ == "__main__":
sys.exit(main())
|
from django.shortcuts import render
from django.http import HttpRequest, HttpResponse
# Create your views here.
def index(request: HttpRequest):
return HttpResponse('This is DRF index')
|
import os
import gzip
import random
import time
name = ['james', 'stefan', 'steve', 'frank', 'paul', 'jamey', 'stephan', 'paul']
for y in xrange(1000):
destfile = "people-data_" + str(random.randint(1,10000)) + "-" + time.strftime("%s")
fo = open(destfile, "a")
for x in xrange(10000):
fo.write("%s, %s" % (str(random.randint(1,10000000)), random.choice(name)) + "\n")
fo.close()
f_in = open(destfile)
f_out = gzip.open('./' + destfile + '.gz', 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
os.remove(destfile)
|
# -*- coding: utf-8 -*-
"""Punctuation.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: dates
date: 2014-06-10 12:31:19
categories: writing
---
Dates.
"""
from proselint.tools import existence_check, memoize
@memoize
def check_et_al(text):
"""Check the text."""
err = "garner.punctuation"
msg = u"Misplaced punctuation. It's 'et al.'"
list = [
"et. al",
"et. al."
]
return existence_check(text, list, err, msg, join=True)
|
# ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
from __future__ import annotations
import json
from dataclasses import dataclass, field
from typing import List, Dict, Callable, Optional
from aws_solutions.core import get_aws_partition, get_aws_region, get_aws_account
from shared.personalize_service import Personalize, logger
from shared.resource import DatasetGroup, Resource, Filter
from shared.resource import (
EventTracker,
Dataset,
Schema,
Solution,
Campaign,
BatchInferenceJob,
)
@dataclass(eq=True, frozen=True)
class ResourceElement:
resource: Resource = field(repr=False, compare=True)
arn: str = field(repr=True, compare=True)
@dataclass
class ResourceTree:
resources: ResourceElement = field(default_factory=dict, init=False, repr=False)
_resource_elements: Dict = field(default_factory=dict, init=False, repr=False)
_resource_parentage: Dict = field(default_factory=dict, init=False, repr=False)
def add(self, parent: ResourceElement, child: ResourceElement):
if child not in self._resource_parentage.keys():
self._resource_parentage[child] = parent
self._resource_elements.setdefault(parent, []).append(child)
else:
raise ValueError("element already exists")
def children(
self, of: ResourceElement, where: Callable = lambda _: True
) -> List[ResourceElement]:
return [elem for elem in self._resource_elements[of] if where(elem)]
class ServiceModel:
"""Lists all resources in Amazon Personalize for lookup against the dataset group ARN"""
def __init__(self, cli: Personalize, dataset_group_name=None):
self.cli = cli
self._arn_ownership = {}
self._resource_tree = ResourceTree()
if dataset_group_name:
dsgs = [DatasetGroup().arn(dataset_group_name)]
else:
dsgs = self._arns(self.cli.list(DatasetGroup()))
for dsg in dsgs:
logger.debug(f"listing children of {dsg}")
self._list_children(DatasetGroup(), dsg, dsg)
def owned_by(self, resource_arn, dataset_group_owner: str) -> bool:
"""
Check
:param resource_arn: the resource ARN to check
:param dataset_group_owner: the dataset group owner expected
:return: True if the resource is managed by the dataset group, otherwise False
"""
if not dataset_group_owner.startswith("arn:"):
dataset_group_owner = f"arn:{get_aws_partition()}:personalize:{get_aws_region()}:{get_aws_account()}:dataset-group/{dataset_group_owner}"
return dataset_group_owner == self._arn_ownership.get(resource_arn, False)
def available(self, resource_arn: str) -> bool:
"""
Check if the requested ARN is available
:param resource_arn: requested ARN
:return: True if the ARN is available, otherwise False
"""
all_arns = set(self._arn_ownership.keys()).union(
set(self._arn_ownership.values())
)
return resource_arn not in all_arns
def _list_children(self, parent: Resource, parent_arn, dsg: str) -> None:
"""
Recursively list the children of a resource
:param parent: the parent Resource
:param parent_arn: the parent Resource ARN
:param dsg: the parent dataset group ARN
:return: None
"""
for c in parent.children:
child_arns = self._arns(
self.cli.list(c, filters={f"{parent.name.camel}Arn": parent_arn})
)
for arn in child_arns:
logger.debug(f"listing children of {arn}")
self._resource_tree.add(
parent=ResourceElement(parent, parent_arn),
child=ResourceElement(c, arn),
)
self._arn_ownership[arn] = dsg
self._list_children(c, arn, dsg)
def _arns(self, l: List[Dict]) -> List[str]:
"""
Lists the first ARN found for each resource in a list of resources
:param l: the list of resources
:return: the list of ARNs
"""
return [
[v for k, v in resource.items() if k.endswith("Arn")][0] for resource in l
]
def _filter(self, result: Dict) -> Dict:
resource_key = next(iter(k for k in result.keys() if k != "ResponseMetadata"))
result = result[resource_key]
result = {
k: v for k, v in result.items() if k == "recipeArn" or not k.endswith("Arn")
}
# common
result.pop("status", None)
result.pop("creationDateTime", None)
result.pop("lastUpdatedDateTime", None)
# event tracker
result.pop("accountId", None)
result.pop("trackingId", None)
# datset
result.pop("datasetType", None)
# schema
if resource_key == "schema":
result["schema"] = json.loads(result["schema"])
# solution
result.pop("latestSolutionVersion", None)
# campaign
result.pop("latestCampaignUpdate", None)
# batch job
for item in {
"failureReason",
"jobInput",
"jobOutput",
"jobName",
"roleArn",
"solutionVersionArn",
}:
result.pop(item, None)
return result
def get_config(self, dataset_group_name, schedules: Optional[Dict]) -> Dict:
dataset_group_arn = DatasetGroup().arn(dataset_group_name)
dataset_group = ResourceElement(DatasetGroup(), dataset_group_arn)
config = {
"datasetGroup": {
"serviceConfig": self._filter(
self.cli.describe(DatasetGroup(), name=dataset_group_name)
)
}
}
self._add_filter_config(config, dataset_group)
self._add_event_tracker_config(config, dataset_group)
self._add_datasets(config, dataset_group)
self._add_solutions(config, dataset_group)
self._add_schedules(config, schedules)
return config
def _add_schedules(self, config: Dict, schedules: Optional[Dict]) -> None:
"""
Modify config in place to add schedules
:param config: the config dictionary
:param schedules: the schedules to add
:return: None
"""
if not schedules:
return
if schedules.get("import"):
config["datasetGroup"]["workflowConfig"] = {
"schedules": {"import": schedules.get("import")}
}
solution_schedules = schedules.get("solutions", {})
for idx, solution in enumerate(config.get("solutions", [])):
name = solution.get("serviceConfig", {}).get("name")
schedules = solution_schedules.get(name)
if schedules:
config["solutions"][idx]["workflowConfig"] = {"schedules": schedules}
def _add_solutions(self, config, of: ResourceElement) -> None:
"""
Modify the config in place to add solutions, campaigns, and batch inference jobs
:param config: the config dictionary
:param of: the solution ResourceElement
:return: None
"""
solutions = self._resource_tree.children(
of, where=lambda x: x.resource == Solution()
)
if not solutions:
return
config.setdefault("solutions", [])
for solution in solutions:
_solution = self.cli.describe_by_arn(Solution(), solution.arn)
_solution_config = {"serviceConfig": self._filter(_solution)}
campaigns = self._resource_tree.children(
of=solution, where=lambda x: x.resource == Campaign()
)
for campaign in campaigns:
_campaign = self.cli.describe_by_arn(Campaign(), campaign.arn)
_solution_config.setdefault("campaigns", []).append(
{"serviceConfig": self._filter(_campaign)}
)
batch_jobs = self._resource_tree.children(
of=solution, where=lambda x: x.resource == BatchInferenceJob()
)
for batch_job in batch_jobs:
_batch_job = self.cli.describe_by_arn(
BatchInferenceJob(), batch_job.arn
)
_solution_config.setdefault("batchInferenceJobs", []).append(
{"serviceConfig": self._filter(_batch_job)}
)
config["solutions"].append(_solution_config)
def _add_filter_config(self, config: Dict, of: ResourceElement) -> None:
"""
Modify the config in place to add filters
:param config: the config dictionary
:param of: the DatasetGroup ResourceElement
:return: None
"""
filters = self._resource_tree.children(
of, where=lambda x: x.resource == Filter()
)
if not filters:
return
config["filters"] = [
{
"serviceConfig": self._filter(
self.cli.describe_by_arn(filter.resource, filter.arn)
)
}
for filter in filters
]
def _add_event_tracker_config(self, config: Dict, of: ResourceElement) -> None:
"""
Modify the config in place to add an event tracker
:param config: the config dictionary
:param of: the DatasetGroup ResourceElement
:return: None
"""
event_tracker = next(
iter(
self._resource_tree.children(
of, where=lambda x: x.resource == EventTracker()
)
),
None,
)
if not event_tracker:
return
config["eventTracker"] = {
"serviceConfig": self._filter(
self.cli.describe_by_arn(event_tracker.resource, event_tracker.arn)
)
}
def _add_datasets(self, config, of: ResourceElement) -> None:
"""
Modify the config in place to add all datasets
:param config: the config dictionary
:param of: the DatasetGroup ResourceElement
:return: None
"""
for dataset_type in Dataset().allowed_types:
self._add_dataset(config, dataset_type, of)
def _add_dataset(
self, config: Dict, dataset_type: str, of: ResourceElement
) -> None:
"""
Modify the config in place to add a dataset and schema
:param config: the config dictionary
:param dataset_type: the dataset type (must be ITEMS, INTERACTIONS, or USERS)
:param of: the DatasetGroup ResourceElement
:return: None
"""
if dataset_type not in Dataset().allowed_types:
raise ValueError(
f"dataset type {dataset_type} must be one of {Dataset().allowed_types}"
)
dataset = next(
iter(
self._resource_tree.children(
of,
where=lambda x: x.resource == Dataset()
and x.arn.endswith(dataset_type),
)
),
None,
)
if not dataset:
return
dataset = self.cli.describe_by_arn(Dataset(), dataset.arn)
config.setdefault("datasets", {})
config["datasets"].setdefault(dataset_type.lower(), {})
config["datasets"][dataset_type.lower()].setdefault(
"dataset", {"serviceConfig": self._filter(dataset)}
)
config["datasets"][dataset_type.lower()].setdefault(
"schema",
{
"serviceConfig": self._filter(
self.cli.describe_by_arn(
Schema(), arn=dataset["dataset"]["schemaArn"]
)
)
},
)
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
import sys
_CHROME_SRC = os.path.join(
os.path.abspath(os.path.dirname(__file__)), '..', '..', '..')
_PEXPECT_PATH = os.path.join(_CHROME_SRC, 'third_party', 'pexpect')
if _PEXPECT_PATH not in sys.path:
sys.path.append(_PEXPECT_PATH)
# pexpect is not available on all platforms. We allow this file to be imported
# on platforms without pexpect and only fail when pexpect is actually used.
try:
from pexpect import * # pylint: disable=W0401,W0614
except ImportError:
pass
|
#!/usr/bin/python
#-*-coding:utf-8-*-
from PyQt4.QtGui import *
from PyQt4.Qt import *
from PyQt4.QtCore import *
from PyQt4 import QtCore, QtGui
class SettingMenu(QMenu):
def __init__(self,parent = None):
super(SettingMenu,self).__init__()
# self.setStyleSheet("background-image:url(\":/images/setting_bg.jpg\");")
self.setStyleSheet("QMenu{background: rgb(255, 255, 255)}"
# "QMenu::item{color:gray}"
"QMenu::item:selected:enabled{background-color:gray;}"
)
self.createActions()
self.translateActions()
def createActions(self):
#创建菜单项
self.action_refresh_data = QAction(self)
self.action_switch_account = QAction(self)
self.action_setcustom_config = QAction(self)
self.action_refresh_data.setIcon(QIcon(":/images/RefreshData.png"))
font = QtGui.QFont()
font.setPointSize(11)
self.action_refresh_data.setFont(font)
self.action_switch_account.setIcon(QIcon(":/images/RefreshData.png"))
font = QtGui.QFont()
font.setPointSize(11)
self.action_switch_account.setFont(font)
self.action_setcustom_config.setIcon(QIcon(":/images/RefreshData.png"))
font = QtGui.QFont()
font.setPointSize(11)
self.action_setcustom_config.setFont(font)
#添加菜单项
self.addAction(self.action_refresh_data)
self.addSeparator()
self.addAction(self.action_switch_account)
self.addSeparator()
self.addAction(self.action_setcustom_config)
# self.addSeparator()
#设置信号连接
self.connect(self.action_refresh_data, SIGNAL("triggered()"), SIGNAL("refreshData()"))
self.connect(self.action_switch_account, SIGNAL("triggered()"), SIGNAL("switchAccount()"))
self.connect(self.action_setcustom_config, SIGNAL("triggered()"), SIGNAL("setCustomConfig()"))
def translateActions(self):
self.action_refresh_data.setText(u" 数据刷新")
self.action_switch_account.setText(u" 切换帐号")
self.action_setcustom_config.setText(u" 自定义配置")
|
import bilby
import numpy as np
class PTABilbyLikelihood(bilby.Likelihood):
"""
The class that wraps Enterprise likelihood in Bilby likelihood.
Parameters
----------
pta: enterprise.signals.signal_base.PTA
Enterprise PTA object that contains pulsar data and noise models
parameters: list
A list of signal parameter names
"""
def __init__(self, pta, parameters):
self.pta = pta
self.parameters = parameters
self._marginalized_parameters = []
def log_likelihood(self):
return self.pta.get_lnlikelihood(self.parameters)
def get_one_sample(self):
return {par.name: par.sample() for par in pta[0].params}
class LinearExp(bilby.core.prior.Prior):
"""
"""
def __init__(self, minimum, maximum, name=None, latex_label=None,
unit=None, boundary=None):
"""Uniform prior with bounds
Parameters
----------
minimum: float
See superclass
maximum: float
See superclass
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
boundary: str
See superclass
"""
super(LinearExp, self).__init__(name=name, latex_label=latex_label,
minimum=minimum, maximum=maximum, unit=unit,
boundary=boundary)
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the LinearExp prior.
Parameters
----------
val: Union[float, int, array_like]
Uniform probability
Returns
-------
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
s_val = self.minimum + val * (self.maximum - self.minimum)
return ((s_val >= self.minimum) & (s_val <= self.maximum)) * np.log(10) * 10 ** s_val / (10 ** self.maximum - 10 ** self.minimum)
def prob(self, val):
"""Return the prior probability of val
Parameters
----------
val: Union[float, int, array_like]
Returns
-------
float: Prior probability of val
"""
return ((val >= self.minimum) & (val <= self.maximum)) * np.log(10) * 10 ** val / (10 ** self.maximum - 10 ** self.minimum)
def ln_prob(self, val):
"""Return the log prior probability of val
Parameters
----------
val: Union[float, int, array_like]
Returns
-------
float: log probability of val
"""
return ((val >= self.minimum) & (val <= self.maximum)) * np.log(np.log(10) * 10 ** val / (10 ** self.maximum - 10 ** self.minimum))
def cdf(self, val):
_cdf = (10**val - 10**self.minimum) / (10**self.maximum - 10**self.minimum)
_cdf = np.minimum(_cdf, 1)
_cdf = np.maximum(_cdf, 0)
return _cdf
def get_bilby_prior_dict(pta):
"""
Get Bilby parameter dict from Enterprise PTA object.
Currently only works with uniform priors.
Parameters
----------
pta: enterprise.signals.signal_base.PTA
Enterprise PTA object that contains pulsar data and noise models
"""
priors = dict()
for param in pta.params:
if param.size==None:
if param.type=='uniform':
#priors[param.name] = bilby.core.prior.Uniform( \
# param._pmin, param._pmax, param.name)
priors[param.name] = bilby.core.prior.Uniform( \
# param._pmin
param.prior._defaults['pmin'], param.prior._defaults['pmax'], \
param.name)
elif param.type=='normal':
#priors[param.name] = bilby.core.prior.Normal( \
# param._mu, param._sigma, param.name)
priors[param.name] = bilby.core.prior.Normal( \
param.prior._defaults['mu'], param.prior._defaults['sigma'], \
param.name)
elif param.type=='linearexp':
#priors[param.name] = bilby.core.prior.Uniform( \
# param._pmin, param._pmax, param.name)
priors[param.name] = LinearExp( \
# param._pmin
param.prior._defaults['pmin'], param.prior._defaults['pmax'], \
name=param.name)
else:
if param.name=='jup_orb_elements' and param.type=='uniform':
for ii in range(param.size):
priors[param.name+'_'+str(ii)] = bilby.core.prior.Uniform( \
-0.05, 0.05, param.name+'_'+str(ii))
# Consistency check
for key, val in priors.items():
if key not in pta.param_names:
print('[!] Warning: Bilby\'s ',key,' is not in PTA params:',\
pta.param_names)
return priors
|
import pytest
from yfs.statistics import (
parse_valuation_table,
parse_financial_highlights_table,
parse_trading_information_table,
)
from pytest_regressions import data_regression # noqa: F401
from .common_fixtures import statistics_page_data_fixture
def test_parse_valuation_table(data_regression, statistics_page_data_fixture):
result = parse_valuation_table(statistics_page_data_fixture)
data_regression.check(result.json())
def test_financial_highlights_table(data_regression, statistics_page_data_fixture):
result = parse_financial_highlights_table(statistics_page_data_fixture)
data_regression.check(result.json())
def test_parse_trading_information_table(data_regression, statistics_page_data_fixture):
result = parse_trading_information_table(statistics_page_data_fixture)
data_regression.check(result.json())
|
import os
cromwell_metadata_path = os.path.join(
os.path.dirname(__file__), "cromwell_metadata.json"
)
ANALYSIS_PROCESS_INPUT = {
"input_uuid": "6f911b71-158e-4f50-b8e5-395f386a343b",
"pipeline_type": "Optimus",
"workspace_version": "2021-05-24T12:00:00.000000Z",
"references": [
"gs://hca-dcp-sc-pipelines-test-data/alignmentReferences/optimusGencodeV27/GRCh38.primary_assembly.genome.fa"
],
"input_file": cromwell_metadata_path,
"project_level": False,
}
|
"""
This problem was asked by Facebook.
Given a function that generates perfectly random numbers between 1 and k (inclusive),
where k is an input, write a function that shuffles a deck of cards represented as an array using only swaps.
It should run in O(N) time.
Hint: Make sure each one of the 52! permutations of the deck is equally likely.
"""
from random import randint
def generate_random_num(k):
return randint(1, k)
def shuffle_deck():
cards = [i for i in range(52)]
# shuffle
for old_card_pos in range(52):
new_card_position = generate_random_num(52 - old_card_pos) -1 # now the range is [0, k-1],
# need to change the range as the array of cards starts from zero and goes to 51
#swap
cards[old_card_pos], cards[new_card_position] = cards[new_card_position], cards[old_card_pos]
return cards
if __name__ == '__main__':
# cards = shuffle_deck()
# print(cards)
# print(len(cards))
# check all the cards are present in 52 shuffles
assert all(x in shuffle_deck() for x in range(52))
|
import re
import os
import jk_logging
import jk_utils
import jk_prettyprintobj
from .GitWrapper import GitWrapper
from .GitCommitHistory import GitCommitHistory
#from .GitConfigFile import GitConfigFile # not needed
class GitServerRepository(jk_prettyprintobj.DumpMixin):
################################################################################################################################
## Constructors
################################################################################################################################
def __init__(self, rootDir:str, log:jk_logging.AbstractLogger):
self.__gitWrapper = GitWrapper(log)
bIsGitRoot = GitServerRepository.__isRootDir(os.path.abspath(rootDir))
if bIsGitRoot:
self.__gitRootDir = rootDir
else:
raise Exception("Can't find git root directory: " + rootDir)
self.__gitHeadsDirPath = os.path.join(rootDir, "refs", "heads")
self.__gitTagsDirPath = os.path.join(rootDir, "refs", "tags")
#self.__gitCfgFile = GitConfigFile(os.path.join(rootDir, "config")) # not needed
self.__volatileValue_getSize = jk_utils.VolatileValue(self.__getSizeInBytes, 15) # 15 seconds caching time
self.__volatileValue_getHeadName = jk_utils.VolatileValue(self.__getHeadName, 15) # 15 seconds caching time
self.__volatileValue_getHeads = jk_utils.VolatileValue(self.__getHeads, 15) # 15 seconds caching time
self.__volatileValue_getTags = jk_utils.VolatileValue(self.__getTags, 15) # 15 seconds caching time
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def rootDir(self) -> str:
return self.__gitRootDir
#
#
# The name of the head revision
#
@property
def headName(self) -> str:
return self.__volatileValue_getHeadName.value
#
#
# The size of the repository folder in bytes
#
@property
def sizeInBytes(self) -> int:
return self.__volatileValue_getSize.value
#
@property
def isEmpty(self) -> bool:
return not self.__volatileValue_getHeads.value # return True if we have no heads
#
@property
def heads(self) -> list:
return self.__volatileValue_getHeads.value
#
@property
def tags(self) -> list:
return self.__volatileValue_getTags.value
#
################################################################################################################################
## Protected Methods
################################################################################################################################
def _dumpVarNames(self):
return [
"rootDir",
"headName",
"sizeInBytes",
"isEmpty",
"heads",
"tags",
]
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def __getTags(self) -> list:
ret = []
for fe in os.scandir(self.__gitTagsDirPath):
if fe.is_file:
ret.append(fe.name)
return sorted(ret)
#
def __getHeads(self) -> list:
ret = []
for fe in os.scandir(self.__gitHeadsDirPath):
if fe.is_file:
ret.append(fe.name)
return sorted(ret)
#
#
# Load the name of the head revision
#
def __getHeadName(self) -> str:
p = os.path.join(self.__gitRootDir, "HEAD")
if not os.path.isfile(p):
raise Exception(repr(p))
with open(p, "r") as f:
lines = f.read().split("\n")
line = lines[0]
m = re.match("^ref:\s+refs/heads/(\w+)$", line)
if not m:
raise Exception(repr(line))
return m.group(1)
#
#
# Get the size of the repository folder in bytes
#
def __getSizeInBytes(self) -> int:
return jk_utils.fsutils.getFolderSize(self.__gitRootDir)
#
################################################################################################################################
## Public Methods
################################################################################################################################
#
# Retrieve the commit history.
#
def getCommitHistory(self) -> GitCommitHistory:
return GitCommitHistory.create(self.__gitRootDir, self.__gitWrapper)
#
################################################################################################################################
## Static Helper Methods
################################################################################################################################
################################################################################################################################
## Static Methods
################################################################################################################################
@staticmethod
def hasRepository(dirPath:str) -> bool:
assert isinstance(dirPath, str)
assert os.path.isdir(dirPath)
dirPath = os.path.abspath(dirPath)
return GitServerRepository.__isRootDir(dirPath)
#
@staticmethod
def __isRootDir(rootDir:str) -> bool:
if os.path.isfile(os.path.join(rootDir, "config")) \
and os.path.isfile(os.path.join(rootDir, "description")) \
and os.path.isfile(os.path.join(rootDir, "HEAD")) \
and os.path.isdir(os.path.join(rootDir, "branches")) \
and os.path.isdir(os.path.join(rootDir, "hooks")) \
and os.path.isdir(os.path.join(rootDir, "objects")) \
and os.path.isdir(os.path.join(rootDir, "refs")) \
:
return True
else:
return False
#
#
|
#!/usr/local/bin/python
# Code Fights Is Power Problem
def isPower(n):
if n == 1:
return True
for exp in range(2, 9):
for base in range(2, n):
tmp = base ** exp
if tmp == n:
return True
elif tmp > n:
break
return False
def main():
tests = [
[125, True],
[72, False],
[100, True],
[11, False],
[324, True],
[256, True],
[119, False],
[400, True],
[350, False],
[361, True]
]
for t in tests:
res = isPower(t[0])
ans = t[1]
if ans == res:
print("PASSED: isPower({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isPower({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
import unittest
from card.components import BaseOracle
class OracleTest(unittest.TestCase):
def setUp(self):
self.oracle = self.get_oracle()
def get_oracle(self) -> BaseOracle:
raise NotImplementedError()
def e(self, abbrv: str, strings: list):
for s in [abbrv] + strings:
self.assertEqual(abbrv, self.oracle.get_abbreviation(s), s)
|
"""
Generate docs
"""
from jinja2 import Template
import subprocess
def generate(host, port, database):
# read config template
with open("bin/schemaspy-template.properties", "r") as inpfile:
template = Template(inpfile.read())
rendered_config = template.render(
host=host, port=port, database=database)
# write to file rendered config file
with open("bin/schemaspy.properties", "w") as outfile:
outfile.write(rendered_config)
with open("schemaspy.log", "w") as outfile:
subprocess.call(["java", "-jar", "bin/schemaspy-6.0.0.jar",
"-configFile", "bin/schemaspy.properties"], stdout=outfile)
|
import os, pendulum
class Config:
QUERY = os.environ.get('QUERY', '*')
CHECK_DURATION = int(os.environ.get('CHECK_DURATION', 1))
USERNAME = os.environ['USERNAME']
PASSWORD = os.environ['USERNAME']
CLIENT_ID = os.environ.get('CLIENT_ID')
CLIENT_SECRET = os.environ.get('CLIENT_SECRET')
TENANT_ID = os.environ.get('TENANT_ID')
STORAGE_PATH = os.environ.get('STORAGE_PATH', '/mailtrail/data')
|
import os
import sys
import argparse
import matplotlib.pyplot as plt
from keras.models import load_model
from keras import backend as K
from termcolor import colored,cprint
import numpy as np
from utils import *
import pandas as pd
from keras.utils import np_utils
from sklearn import preprocessing
# Saliency map
# https://github.com/experiencor/deep-viz-keras/blob/master/saliency.py
from keras.layers import Input, Conv2DTranspose
from keras.models import Model
from keras.initializers import Ones, Zeros
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def normal(X):
X = X.astype('float32')
X /=255
X = X.reshape(len(X),48,48,1)
return X
def OneHotEncode(y):
#轉換label 為OneHot Encoding
y = np_utils.to_categorical(y)
#y = pd.get_dummies(y).values
return y
class SaliencyMask(object):
def __init__(self, model, output_index=0):
pass
def get_mask(self, input_image):
pass
def get_smoothed_mask(self, input_image, stdev_spread=.2, nsamples=50):
stdev = stdev_spread * (np.max(input_image) - np.min(input_image))
total_gradients = np.zeros_like(input_image, dtype = np.float64)
for i in range(nsamples):
noise = np.random.normal(0, stdev, input_image.shape)
x_value_plus_noise = input_image + noise
total_gradients += self.get_mask(x_value_plus_noise)
return total_gradients / nsamples
class GradientSaliency(SaliencyMask):
def __init__(self, model, output_index = 0):
# Define the function to compute the gradient
input_tensors = [model.input]
print(model.output[0][0])
print(model.total_loss)
gradients = model.optimizer.get_gradients(model.output[0][output_index], model.input)
self.compute_gradients = K.function(inputs = input_tensors, outputs = gradients)
def get_mask(self, input_image):
# Execute the function to compute the gradient
x_value = np.expand_dims(input_image, axis=0)
gradients = self.compute_gradients([x_value])[0][0]
return gradients
# https://github.com/experiencor/deep-viz-keras/blob/master/visual_backprop.py
class VisualBackprop(SaliencyMask):
def __init__(self, model, output_index = 0):
inps = [model.input] # input placeholder
outs = [layer.output for layer in model.layers] # all layer outputs
self.forward_pass = K.function(inps, outs) # evaluation function
self.model = model
def get_mask(self, input_image):
x_value = np.expand_dims(input_image, axis=0)
visual_bpr = None
layer_outs = self.forward_pass([x_value, 0])
for i in range(len(self.model.layers) - 1, -1, -1):
if 'Conv2D' in str(type(self.model.layers[i])):
layer = np.mean(layer_outs[i], axis = 3, keepdims = True)
layer = layer - np.min(layer)
layer = layer / (np.max(layer) - np.min(layer) + 1e-6)
if visual_bpr is not None:
if visual_bpr.shape != layer.shape:
visual_bpr = self._deconv(visual_bpr)
visual_bpr = visual_bpr * layer
else:
visual_bpr = layer
return visual_bpr[0]
def _deconv(self, feature_map):
x = Input(shape = (None, None, 1))
y = Conv2DTranspose(filters = 1,
kernel_size = (3, 3),
strides = (2, 2),
padding = 'same',
kernel_initializer = Ones(),
bias_initializer = Zeros())(x)
deconv_model = Model(inputs=[x], outputs=[y])
inps = [deconv_model.input] # input placeholder
outs = [deconv_model.layers[-1].output] # output placeholder
deconv_func = K.function(inps, outs) # evaluation function
return deconv_func([feature_map, 0])[0]
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
#x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def grad_ascent(input_image_data,iter_func):
# step size for gradient ascent
step = 5
#img_asc = np.array(img)
img_asc = np.random.random((1, 48, 48, 1))
img_asc = (img_asc - 0.5) * 20 + 48
# run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iter_func([img_asc])
img_asc += grads_value * step
img_asc = img_asc[0]
#img_ascs.append(deprocess_image(img_asc).reshape((48, 48)))
return img_asc
def vis_img_in_filter(img,layer_dict,model,
layer_name = 'conv2d_2'):
layer_output = layer_dict[layer_name].output
img_ascs = list()
for filter_index in range(layer_output.shape[3]):
# build a loss function that maximizes the activation
# of the nth filter of the layer considered
loss = K.mean(layer_output[:, :, :, filter_index])
# compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, model.input)[0]
# normalization trick: we normalize the gradient
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
# this function returns the loss and grads given the input picture
iterate = K.function([model.input], [loss, grads])
# step size for gradient ascent
step = 5.
#img_asc = np.array(img)
img_asc = np.random.random((1, 48, 48, 1))
img_asc = (img_asc - 0.5) * 20 + 48
# run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([img_asc])
img_asc += grads_value * step
img_asc = img_asc[0]
img_ascs.append(deprocess_image(img_asc).reshape((48, 48)))
if layer_output.shape[3] >= 35:
plot_x, plot_y = 6, 6
elif layer_output.shape[3] >= 23:
plot_x, plot_y = 4, 6
elif layer_output.shape[3] >= 11:
plot_x, plot_y = 2, 6
else:
plot_x, plot_y = 1, 2
fig, ax = plt.subplots(plot_x, plot_y, figsize = (12, 12))
ax[0, 0].imshow(img.reshape((48, 48)), cmap = 'gray')
ax[0, 0].set_title('Input image')
fig.suptitle('Input image and %s filters' % (layer_name,))
fig.tight_layout(pad = 0.3, rect = [0, 0, 0.9, 0.9])
for (x, y) in [(i, j) for i in range(plot_x) for j in range(plot_y)]:
if x == 0 and y == 0:
continue
ax[x, y].imshow(img_ascs[x * plot_y + y - 1], cmap = 'gray')
ax[x, y].set_title('filter %d' % (x * plot_y + y - 1))
#fig.savefig('./result/image{}.png'.format(1), dpi=100)
return img_ascs
def main():
X_train = np.load('./feature/X_train.npy')
Y_train = np.load('./feature/y_train.npy')
X_train = normal(X_train)
lb = preprocessing.LabelBinarizer()
lb.fit(Y_train)
Y_train = lb.inverse_transform(Y_train)
#print(Y_train.shape)
K.set_learning_phase(1)
n_classes = 7
model_name = "model-75.hdf5"
model_path = "model-75.hdf5"
emotion_classifier = load_model(model_path)
layer_dict = dict([layer.name, layer] for layer in emotion_classifier.layers[1:])
input_img = emotion_classifier.input
#print(layer_dict)
#print(Y_train[0].shape)
fig, ax = plt.subplots(7, 5, figsize = (16, 16))
fig.suptitle('vanilla gradient')
for i in range(n_classes):
img = np.array(X_train[i+8])
#Y_train[i] = np.reshape(Y_train[i],(7,1))
vanilla = GradientSaliency(emotion_classifier, Y_train[i])
mask = vanilla.get_mask(img)
filter_mask = (mask > 0.0).reshape((48, 48))
smooth_mask = vanilla.get_smoothed_mask(img)
filter_smoothed_mask = (smooth_mask > 0.0).reshape((48, 48))
ax[i, 0].imshow(img.reshape((48, 48 )), cmap = 'gray')
cax = ax[i, 1].imshow(mask.reshape((48, 48)), cmap = 'jet')
fig.colorbar(cax, ax = ax[i, 1])
ax[i, 2].imshow(mask.reshape((48, 48)) * filter_mask, cmap = 'gray')
cax = ax[i, 3].imshow(mask.reshape((48, 48)), cmap = 'jet')
fig.colorbar(cax, ax = ax[i, 3])
ax[i, 4].imshow(smooth_mask.reshape((48, 48)) * filter_smoothed_mask, cmap = 'gray')
fig.savefig('image_Heatmap{}.png'.format(8), dpi=100)
if __name__ == "__main__":
main()
|
t = int(input())
for i in range(t):
l = []
n = int(input())
for i in range(n):
d = list(map(int, input().split()))
l.append(d)
p = []
for i in l:
for j in i:
p.append( (i[1]//(i[0]+1) )*i[2] )
print(max(p))
|
from unittest import TestCase
from app import app
from i18n.i18n import I18n
class MockApp(object):
def add_template_filter(self, fn):
pass
class IntegrationTestBase(TestCase):
def setUp(self):
I18n(app)
app.testing = True
self.app = app.test_client()
def _assertStatusCode(self, code, response):
self.assertEqual(code, response.status_code)
def assertSuccess(self, response):
self._assertStatusCode(200, response)
def assertCreated(self, response):
self._assertStatusCode(201, response)
def assertRedirect(self, response):
self._assertStatusCode(302, response)
def assertNotFound(self, response):
self._assertStatusCode(404, response)
def assertConflict(self, response):
self._assertStatusCode(409, response)
|
"""
Dada um array numérico "nums", retorne true se um dos 4 elementos iniciais for um "9".
O "length" do array pode ser menor que 4.
"""
def array_front9(nums):
return nums[:4].count(9) >= 1
# OUTRO JEITO MAIS EXPLICADO E PASSO A PASSO
def array_front9_2(nums):
# First figure the end for the loop
end = len(nums)
if end > 4:
end = 4
for i in range(end): # loop over index [0, 1, 2, 3]
if nums[i] == 9:
return True
return False
print(array_front9([0, 1, 2, 9, 9]))
|
import csv;
from random import shuffle;
newNames = [];
persons = [];
newPlaces = [];
oldPlaces = [];
oldRelations = [];
newRelations = [
'Academic Work',
'Friendship',
'Politics',
'Colleges',
'Food'
]
# Extract new names
with open('scientists.csv', newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in csvreader:
newNames.append(row[0]);
shuffle(newNames);
# Extract new names
with open('capitals.csv', newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in csvreader:
newPlaces.append(row[1]);
shuffle(newPlaces);
# Extract old names
newlines = []
with open('marieboucher.csv', newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in csvreader:
p1 = row[0];
p2 = row[3];
if not p1 in persons:
persons.append(p1)
if not p2 in persons:
persons.append(p2)
l1 = row[1];
l2 = row[4];
if not l1 in oldPlaces:
oldPlaces.append(l1)
if not l2 in oldPlaces:
oldPlaces.append(l2)
r = row[2].strip();
if not r in oldRelations:
oldRelations.append(r)
newline = [
newNames[persons.index(p1)],
newPlaces[oldPlaces.index(l1)],
newRelations[oldRelations.index(r)],
newNames[persons.index(p2)],
newPlaces[oldPlaces.index(l2)],
row[9]
];
newlines.append(newline);
with open('timmysmith.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Name 1', 'Location 1', 'Relation Type', 'Name 2', 'Location 2', 'Date']);
for line in newlines:
spamwriter.writerow(line);
|
from .version import version as __version__
__all__ = [
"__version__",
]
|
from torch.distributions import Categorical
from torch.autograd import Variable
from models.dropout import ConcreteDropout, Standout
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import torch
scale = transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
transform = transforms.Compose([scale])
t_normalize = lambda x, z: z.mean() * (x - x.mean()) / (x.max() - x.min())
t_ratio = lambda x, z: x / z
def mix_dist(x, z, mixture='linear', p=None, disc_rep=False, eps=0.01):
if mixture in ['exp', 'linear', 'sigmoid', 'static']:
if p is None:
raise Exception('p arguement in None, should be provided when using a {} schedule'.format(mixture))
x_n, z_n = random_mix(x, z, p, disc_rep=disc_rep, eps=eps)
elif mixture == ['gumbel', 'logits', 'standout']:
x_n, z_n = 1, 1
# print("{:.2f} change in x at rate {}".format(float((x == x_n).sum())/x.numel(), p))
return x_n, z_n
def random_mix(x, z, p, disc_rep=False, eps=0, scale=True):
mask = Variable(torch.bernoulli(x.data.new(x.data.size()).fill_(p))).type(torch.ByteTensor)
# https://discuss.pytorch.org/t/use-torch-nonzero-as-index/33218 for having to use split()
mask_inds = (mask == 1).nonzero().split(1, dim=1)
if eps != 0: u = torch.randn(z.data[mask_inds].size()) * eps
if disc_rep:
x[mask],z[mask] = z[mask], x[mask]
# assert int(torch.all(torch.eq(x, z))) == 0
if eps != 0:
x.data[mask_inds] += u
z.data[mask_inds] += u
else:
z.data[mask_inds] = t_normalize(x.data[mask_inds], z.data[mask_inds])
if eps != 0:
z.data[mask_inds] += u
return x, z
def test_random_mix():
x = torch.randn((2, 3))
z = torch.randn((2, 3))
x_new, z_new = random_mix(x, z, p=0.5, disc_rep=True)
#assert int(torch.all(torch.eq(x, x_new))) == 0
class MixConcrete(nn.Module):
def __init__(self, layer, input_shape, w_reg=1e-6, d_reg=1e-5):
super(MixConcrete, self).__init__()
self.conc = ConcreteDropout(layer, input_shape, weight_regularizer=w_reg, locked = True,
dropout_regularizer=d_reg, init_min=0.1, init_max=0.1)
def forward(self, x, z):
x_c = self.conc(x)
class MixStandout(nn.Module):
def __init__(self, l_shape, alpha, beta=1):
super(MixStandout, self).__init__()
self.linear = nn.Linear(l_shape)
self.standout = Standout(self.linear, alpha, beta)
def forward(self, x, z, deterministic = False):
x_sig = F.relu(self.linear(x))
# this would be (x, x_sig)
z_tilde = self.standout(x_sig, z)
return z_tilde
class InfoMix(nn.Module):
def __init__(self, lam=1e-3):
super(InfoMix, self).__init__()
self.lam = lam
def ent(self, output):
return - torch.mean(output * torch.log(output + 1e-6))
def forward(self, x, z, l = 0.5):
"""Minimizes the Shannon-divergence between """
o12 = F.kl_div(F.log_softmax(x), z)
o21 = F.kl_div(F.log_softmax(z), x)
out = self.lam * torch.mean((o12 * l + o21 * (1-l)))
return out
def mix_z(x, z, p):
":params x: data input, z: random input, p: mix prob vector for each column "
print(p)
print(Categorical(p).sample())
x.view()
if __name__ == "__main__":
test_random_mix()
def ex():
x = torch.randn((2, 3, 4, 4))
sf = 0.5
# print(x)
x_mean = x.view(x.size(0), x.size(1), -1).mean(2)
x_std = x.view(x.size(0), x.size(1), -1).std(2)
print(x_mean.size())
print(x_std.size())
x = (x.view(x.size(0), x.size(1), -1) / x_mean) * sf
print(x)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 1 22:07:22 2021
@author: pcjean
"""
import PySimpleGUI as sg
import sys
class Vcard:
def __init__(self,numer,prop,val, listprop):
self.numero = numer
self.ident = prop
self.valeur = val
self.listprop = listprop
#
# Sous programmes associés au codage/décodage de vcard
#
def str2list(chaine): # renvoie une liste de string à partir d'une string entre []
l = []
ch1 = chaine.strip("[]")
ch2 = ch1.replace("'","")
ch = ch2.replace(" ","")
l = ch.split(",")
return l
def tascii(chaine): # test si une chaine de caratère est ASCII ou pas
return all(ord(c) < 128 for c in chaine)
def str_to_utf(strencod): # encodage en UTF8 d'une chaine de caractère
strdecod1 = ";"
# print("donnees à encoder: " + strencod)
lisenc = strencod.split(";")
for j in range(len(lisenc)):
strdecod = ""
str1 = lisenc[j].encode()
bstr1 = bytearray(str1)
strdec = bstr1.hex()
if len(strdec) != 0:
for k in range(int(len(strdec)/2)):
strint = strdec[2*k]+ strdec[2*k+1]
# if strint == "20" : strint= "0A"
strdecod = strdecod+ "=" + strint
else: strdecod =""
if len(strdec) !=0: strdecod1 = strdecod1 + ";" + strdecod
# print(strdecod1)
return strdecod1
def utf_to_str(strdecod): # decodage des valeurs codées UTF8
strutf1 = ''
strutf2 = ''
strhex = ''
for i in range(len(strdecod)):
if strdecod[i] == "=": # début de byte
strhex = strhex + ' '
elif strdecod[i] == ";": # fin de séquence
strhex = strhex.replace('0A','20')
byte_array = bytearray.fromhex(strhex)
strutf1 = byte_array.decode('utf-8','replace')
strutf2 = strutf2 + strutf1 + ','
strutf1 = ''
strhex = ''
elif strdecod[i] == '\r':
strdecod.replace('\r',' ')
elif strdecod[i] == '\n':
strdecod.replace('\n',' ')
else:
strhex = strhex + strdecod[i]
strhex = strhex.replace('0A','20')
byte_array = bytearray.fromhex(strhex)
strutf1 = byte_array.decode('utf-8','replace')
strutf2 = strutf2 + strutf1
return strutf2
def encodDon(ligne): # construction de la ligne du fichier vcf
# print("encodon")
# print(ligne)
ligdon = ""
delim1 = ':'
delim2 = ';'
nonascii = False
ligdon = str(ligne[1])
nbpara = len(ligne[2])
valeurs = str(ligne[3])
if nbpara == 0:
delim = delim1
else:
delim = delim2
for j in range(len(ligne[2])):
delim = delim + str(ligne[2][j]) + delim2
# si beson d'encoder en UTF 8
if (str(ligne[2][j]) == 'ENCODING=QUOTED-PRINTABLE'):
nonascii = True
valeurs = str_to_utf(str(ligne[3]))
# si valeurs comporte des caractères non ASCII nouveaux
# la ligne suivante n'est valable qu'après Python 3.7
# if (ligne[4].isascii() == False) and (nonascii == False):
if (tascii(ligne[3]) == False) and (nonascii == False):
delim = delim + 'ENCODING=QUOTED-PRINTABLE'+ delim2
valeurs = str_to_utf(str(ligne[3]))
# print(delim)
delim = delim[:len(delim)-1]+delim1
ligdon = ligdon + delim + valeurs
return ligdon
def decode_card(adecod, long):
# extraction des données property, paramètres et valeurs pour chaque ligne
# décodage des données
propert = ''
param = ''
valeurc = ''
res = []
listpar= []
deb1 = adecod.find(':')
deb2 = adecod.find(';')
if ((deb1 == -1)): deb1 = 999
if ((deb2 == -1)): deb2 = 999
debmotc = min(deb1,deb2)
if (debmotc != 999):
propert = adecod[:debmotc]
param = adecod[(debmotc+1):(deb1)]
valeur = adecod[(deb1+1):long]
if (param.find('ENCODING=QUOTED-PRINTABLE') != -1):
# print('à décoder '+ valeur)
lisdec = valeur.split(";")
for j in range(len(lisdec)):
valeurc = valeurc + ";" + utf_to_str(lisdec[j])
else:
valeurc = valeur
if (param != ''): res = [0]
res.extend([j for j in range(len(param)) if param.startswith(";", j)])
nbrpar = len(res)
param = param.replace(';',' ', 5) # au max 5 paramètres!!!
for i in range(nbrpar):
if (i != nbrpar-1):
if (i == 0): listpar.append(param[res[i]:res[i+1]])
else: listpar.append(param[res[i]+1:res[i+1]])
else:
if (nbrpar != 1): listpar.append(param[res[i]+1:])
else: listpar.append(param[res[i]:])
# print(listpar)
return propert, listpar, valeurc , res
#
def litVcard(fichdonn, fichres):
#
# lecture du fichier des vcard, décodage et remplissage de la liste de travail
#
initvcard = []
nbcard = 0
bdecod =''
longueur = 0
resu = []
listcard = []
listot = []
listi = []
listvac = []
photo = 0
carte = []
list_prop = []
num = 0
idepar =[]
ideval = ''
try:
with open(fichdonn,'r') as fich: # ouverture fichier
with open(fichres,'w') as fichr: # ouverture fichier résultat
initvcard = fich.readlines()
for i in range(len(initvcard)):
if (initvcard[i].find('BEGIN') != -1): #debut carte
# if nbcard != 0: carte.append(Vcard(num,idepar,ideval,list_prop))
list_prop = []
nbcard = nbcard + 1
bdecod = ''
debut = 1
listi = []
firstcard = 1
elif (initvcard[i].find('END') != -1): #fin carte
prop, para, val, resu = decode_card(bdecod, longueur)
listcard = [nbcard,prop,para,val]
list_prop.append(listcard)
num = list_prop[0][0]
idepar = list_prop[0][1]
ideval = list_prop[0][3]
"""# pour test
print("debli")
print(num)
print(idepar)
print(ideval)
print(list_prop)
"""# fin pour test
carte.append(Vcard(num,idepar,ideval,list_prop))
list_prop = []
fichr.write(str(listcard)+ '\n')
listot.append(listcard)
listi.append(listcard)
listvac.append(listi)
photo = 0
elif (initvcard[i].find('VERSION') != -1): # Version
pass
elif (initvcard[i].find('PHOTO') != -1): # property photo, écartée pour l'instant
photo = 1
elif initvcard[i][0] == 'X': # cas des property specifique à un téléphone
pass
elif ((photo == 1) and (initvcard[i][0] == ' ')): # donnee suite photo
pass
elif (initvcard[i][0] == '\r') or (initvcard[i][0] == '\n'): # donnée suite
pass
else:
photo = 0
if (initvcard[i][0] == '='): # donnee suite d'une property valide
bdecod = bdecod + initvcard[i]
longueur = longueur + len(initvcard[i])
prop, para, val , resu = decode_card(bdecod, longueur)
elif (i != 0):
if (debut != 1): #property apres BEGIN et VERSION
prop, para, val, resu = decode_card(bdecod, longueur)
if (prop != ' '):
listcard = [nbcard, prop,para,val]
if firstcard == 1: # premiere property
num = nbcard
idepar = prop
ideval = val
firstcard = 0
list_prop.append(listcard)
fichr.write(str(listcard)+ '\n')
listot.append(listcard)
listi.append(listcard)
bdecod = initvcard[i]
longueur = len(initvcard[i])-1
debut = 0
print(nbcard)
"""# pour test
print(" fin lit")
for i in range(len(carte)):
print(carte[i].numero)
print(carte[i].ident)
print(carte[i].valeur)
print(carte[i].listprop)
"""# fin pour test
return nbcard, carte
except Exception as e:
print(e, file=sys.stderr)
pass
return
#
#
def ficRes(data, fichdat):
#
# on reconstitue un fichier vcf à partir de la liste modifiée
#
try:
with open(fichdat,'w') as fid:
inid = 0
nbcard = 0
for i in range(len(data)-1): # toutes les donnees sauf la derniere
if data[i][0] != inid: #1ere donnee differente = nouvelle vcard
inid = data[i][0]
nbcard = nbcard + 1
if i != 0: fid.write("END:VCARD\n") # on met d'abord le delimiteur de fin
fid.write("BEGIN:VCARD\n") # on commence une nouvelle vcard
fid.write("VERSION:2.1\n")
sortie = encodDon(data[i])
fid.write(sortie + "\n")
else:
sortie = encodDon(data[i])
fid.write(sortie + "\n")
ncard = len(data) # derniere donnee
sortie = encodDon(data[ncard-1])
fid.write(sortie + "\n")
fid.write("END:VCARD")
print(nbcard)
except Exception as e:
print("erreur transcription "+ str(data[i]))
print(e, file = sys.stderr)
pass
return
def ficRes2(donnees, fichdat):
data = []
for i in range(len(donnees)):
# for j in range(len(donnees[i])):
for j in range(len(donnees[i].listprop)):
data.append(donnees[i].listprop[j])
# print(data)
ficRes(data, fichdat)
return
def Fenmodv(win_active, cardmod):
#
# affichage fenetre modification d une vcard
# on affiche sous forme de table les property de la vcard
#
# win3_active = False
listm = []
suppr = False # indicateur de la suppression de toutes les properties
head2 = ['numéro','property','paramètre','valeur']
layout2 = [
[sg.Text('Données')],
[sg.Table(values=listm, headings = head2,
auto_size_columns=False,
key = 'Tab2',enable_click_events = True,
)],
[sg.Submit(button_text='Modifier', key = 'modif'),
sg.Submit(button_text='Insérer', key = 'inser'),
sg.Submit(button_text='Supprimer', key = 'suppr'),
sg.Cancel(button_text = 'Terminer', key = 'termin')]
]
window2 = sg.Window("Properties de la vcard", layout2)
for i in range(len(cardmod.listprop)):
listm.append(cardmod.listprop[i] )
if win_active:
while True:
ev2, vals2 = window2.read()
print("event2: " + str(ev2) + " values2: "+ str(vals2))
if ev2 == sg.WIN_CLOSED or ev2 == 'Exit' or ev2 == 'termin':
win_active = False
window2.close()
break
if ev2 == 'modif':
win3_active = True
print("appel modif prop")
# fonction de modification d'une property
if vals2['Tab2'] != []:
win3_active = Fenmodprop(win3_active, cardmod, vals2['Tab2'])
# affichage modifications
listm = []
for i in range(len(cardmod.listprop)):
listm.append(cardmod.listprop[i] )
win_active = False
window2['Tab2'].update(listm)
if ev2 == 'inser':
win3_active = True
print("appel insertion prop")
# fonction d insertion d'une property
win3_active = Fenmodprop(win3_active, cardmod,[-1])
# affichage apres modifications
listm = []
for i in range(len(cardmod.listprop)):
listm.append(cardmod.listprop[i] )
win_active = False
window2['Tab2'].update(listm)
if ev2 == 'suppr':
win3_active = True
if vals2['Tab2'] != []:
print("appel suppression prop" + str(int(vals2['Tab2'][0])))
# suppression d'une property
ligd = int(vals2['Tab2'][0])
cardmod.listprop.pop(ligd)
# si on a supprimé toutes les properties c est équivalent à supprimer la vcard
if cardmod.listprop == []:
print("suppression de toutes les properties")
suppr = True
# affichage apres modifications
listm = []
for i in range(len(cardmod.listprop)):
listm.append(cardmod.listprop[i] )
win_active = False
window2['Tab2'].update(listm)
# au cas où il y a eu modif de la première ligne
if cardmod.listprop != []:
cardmod.numero = cardmod.listprop[0][0]
cardmod.ident = cardmod.listprop[0][1]
cardmod.valeur = cardmod.listprop[0][3]
return win_active, suppr
def Fenmodprop(win_active, donnee, lign):
#
# affichage d'une fenetre de modification/ insertion d'une property
#
ligne = int(lign[0])
# valinit1 = str(donnee.listprop[ligne][0])
valinit1 = donnee.listprop[ligne][0]
if ligne != -1: # modification
valinit2 = str(donnee.listprop[ligne][1])
valinit4 = str(donnee.listprop[ligne][2])
valinit5 = str(donnee.listprop[ligne][3])
else: # insertion
valinit2 = ""
valinit4 = ""
valinit5 = ""
layout3 = [
[sg.Text('Données')],
[sg.Text('property', size =(15, 1)), sg.Input(default_text = valinit2,
enable_events=False, key='INPROP')],
[sg.Text('paramètre', size =(15, 1)), sg.Input(default_text = valinit4,
enable_events=False, key='INPAR')],
[sg.Text('valeur', size =(15, 1)), sg.Input(default_text = valinit5,
enable_events=False, key='INVAL')],
[sg.Submit(button_text='Modifier', key = 'modprop'),
sg.Cancel(button_text = 'Annuler', key = 'annulp')]
]
window3 = sg.Window("Données", layout3)
if win_active:
while True:
ev3, vals3 = window3.read()
print("event3: " + str(ev3) + " values3: "+ str(vals3))
if ev3 == sg.WIN_CLOSED or ev3 == 'Exit' or ev3 == 'annulp':
win_active = False
window3.close()
break
if ev3 == 'modprop':
newnum = valinit1 # on ne change pas le numéro de carte
newprop = vals3['INPROP']
newparam = []
if vals3['INPAR'] != '':
newparam = str2list(vals3['INPAR'])
newvalue = vals3['INVAL']
newdata = [newnum, newprop,newparam,newvalue]
# on vient insérer la nouvelle ligne '
donnee.listprop.insert(ligne,newdata)
if ligne != -1: # si modif on detruit l ancienne ligne
donnee.listprop.pop(ligne+1)
win_active = False
window3.close()
return win_active
def Fenins(win_active, ligd, listp):
#
# fonction d'insertion d'une vcard
# ligd est le numéro de vcard où faire l'insertion
# listp est la liste des property de la vcard
#
fin = False
# ligne = int(ligd[0])
ligne = ligd
layout4 = [
[sg.Text('Property à insérer')],
[sg.Text('property', size =(15, 1)), sg.Input(default_text = "",
enable_events=False, key='INPROP')],
[sg.Text('paramètre', size =(15, 1)), sg.Input(default_text = "",
enable_events=False, key='INPAR')],
[sg.Text('valeur', size =(15, 1)), sg.Input(default_text = "",
enable_events=False, key='INVAL')],
[sg.Submit(button_text='Insérer', key = 'insprop'),
sg.Submit(button_text='Fin insertion', key = 'fins'),
sg.Cancel(button_text = 'Annuler', key = 'annuli')]
]
window4 = sg.Window("Property à insérer", layout4)
if win_active:
while True:
ev4, vals4 = window4.read()
print("event4: " + str(ev4) + " values4: "+ str(vals4))
if ev4 == sg.WIN_CLOSED or ev4 == 'Exit' or ev4 == 'annuli':
win_active = False
window4.close()
break
if ev4 == 'fins':
newprop = vals4['INPROP']
newparam = []
if vals4['INPAR'] != '':
newparam = str2list(vals4['INPAR'])
newvalue = vals4['INVAL']
newdata = [ligne, newprop,newparam,newvalue]
listp.append(newdata)
win_active = False
window4.close()
fin = True
if ev4 == 'insprop':
newprop = vals4['INPROP']
newparam = []
if vals4['INPAR'] != '':
newparam = str2list(vals4['INPAR'])
newvalue = vals4['INVAL']
newdata = [ligne, newprop,newparam,newvalue]
listp.append(newdata)
win_active = False
window4.close()
return win_active, fin
|
"""This module contains tests for edgecases.
"""
import copy
import pytest
import sphinx.errors
from sphinx.transforms.post_transforms import ReferencesResolver
def test_not_json_compliant(autodocument):
actual = autodocument(
documenter='pydantic_model',
object_path='target.edgecases.NotJsonCompliant',
options_doc={"model-show-json": True},
deactivate_all=True)
assert actual == [
'',
'.. py:pydantic_model:: NotJsonCompliant',
' :module: target.edgecases',
'',
'',
' .. raw:: html',
'',
' <p><details class="autodoc_pydantic_collapsable_json">',
' <summary>Show JSON schema</summary>',
'',
' .. code-block:: json',
'',
' {',
' "title": "NotJsonCompliant",',
' "type": "object",',
' "properties": {',
' "field": {',
' "title": "Field"',
' }',
' }',
' }',
'',
' .. raw:: html',
'',
' </details></p>',
'',
''
]
def test_current_module_model(parse_rst):
"""Ensure that using current module does not break any features.
This relates to issue #12.
"""
input_rst = ['.. py:currentmodule:: target.example_model',
'',
'.. autopydantic_model:: ExampleModel',
' :model-show-json: True',
' :model-show-config-member: False',
' :model-show-config-summary: True',
' :model-show-validator-members: False',
' :model-show-validator-summary: False',
' :model-hide-paramlist: True',
' :undoc-members: True',
' :members: True',
' :member-order: alphabetical',
' :model-signature-prefix: pydantic_model',
' :field-list-validators: True',
' :field-doc-policy: both',
' :field-show-constraints: True',
' :field-show-alias: True',
' :field-show-default: True',
' :field-signature-prefix: field',
' :validator-signature-prefix: validator',
' :validator-replace-signature: True',
' :validator-list-fields: True',
' :config-signature-prefix: config',
'']
parse_rst(input_rst,
conf={"extensions": ["sphinxcontrib.autodoc_pydantic"]})
def test_current_module_settings(parse_rst):
"""Ensure that using current module does not break any features.
This relates to issue #12.
"""
input_rst = ['.. py:currentmodule:: target.example_setting',
'',
'.. autopydantic_settings:: ExampleSettings',
' :settings-show-json: True',
' :settings-show-config-member: False',
' :settings-show-config-summary: True',
' :settings-show-validator-members: False',
' :settings-show-validator-summary: False',
' :settings-hide-paramlist: True',
' :undoc-members: True',
' :members: True',
' :member-order: alphabetical',
' :settings-signature-prefix: pydantic_settings',
' :field-list-validators: True',
' :field-doc-policy: both',
' :field-show-constraints: True',
' :field-show-alias: True',
' :field-show-default: True',
' :field-signature-prefix: field',
' :validator-signature-prefix: validator',
' :validator-replace-signature: True',
' :validator-list-fields: True',
' :config-signature-prefix: config',
'']
parse_rst(input_rst,
conf={"extensions": ["sphinxcontrib.autodoc_pydantic"]})
def test_any_reference(test_app, monkeypatch):
"""Ensure that `:any:` reference does also work with directives provided
by autodoc_pydantic.
This relates to #3.
"""
failed_targets = set()
func = copy.deepcopy(ReferencesResolver.warn_missing_reference)
def mock(self, refdoc, typ, target, node, domain):
failed_targets.add(target)
return func(self, refdoc, typ, target, node, domain)
with monkeypatch.context() as ctx:
ctx.setattr(ReferencesResolver, "warn_missing_reference", mock)
app = test_app("edgecase-any-reference")
app.build()
assert "does.not.exist" in failed_targets
assert "target.example_setting.ExampleSettings" not in failed_targets
def test_autodoc_member_order(autodocument):
"""Ensure that member order does not change when pydantic models are used.
This relates to #21.
"""
actual = autodocument(
documenter='module',
object_path='target.edgecase_member_order',
options_app={"autodoc_member_order": "bysource"},
options_doc={"members": None},
deactivate_all=True)
assert actual == [
'',
'.. py:module:: target.edgecase_member_order',
'',
'Module doc string.',
'',
'',
'.. py:pydantic_model:: C',
' :module: target.edgecase_member_order',
'',
' Class C',
'',
'',
'.. py:class:: D()',
' :module: target.edgecase_member_order',
'',
' Class D',
'',
'',
'.. py:pydantic_model:: A',
' :module: target.edgecase_member_order',
'',
' Class A',
'',
'',
'.. py:class:: B()',
' :module: target.edgecase_member_order',
'',
' Class B',
'']
def test_typed_field_reference(test_app, monkeypatch):
"""Ensure that typed fields within doc strings successfully reference
pydantic models/settings.
This relates to #27.
"""
failed_targets = set()
func = copy.deepcopy(ReferencesResolver.warn_missing_reference)
def mock(self, refdoc, typ, target, node, domain):
failed_targets.add(target)
return func(self, refdoc, typ, target, node, domain)
with monkeypatch.context() as ctx:
ctx.setattr(ReferencesResolver, "warn_missing_reference", mock)
app = test_app("edgecase-typed-field-reference")
app.build()
def test_json_error_strategy_raise(test_app):
"""Confirm that a non serializable field raises an exception if strategy
is to raise.
This relates to #28.
"""
with pytest.raises(sphinx.errors.ExtensionError):
conf = {"autodoc_pydantic_model_show_json_error_strategy": "raise"}
app = test_app("json-error-strategy", conf=conf)
app.build()
def test_json_error_strategy_warn(test_app, log_capturer):
"""Confirm that a non serializable field triggers a warning during build
process.
This relates to #28.
"""
conf = {"autodoc_pydantic_model_show_json_error_strategy": "warn"}
with log_capturer() as logs:
app = test_app("json-error-strategy", conf=conf)
app.build()
assert logs[0].msg == (
"JSON schema can't be generated for 'example.NonSerializable' "
"because the following pydantic fields can't be serialized properly: "
"['field']."
)
def test_json_error_strategy_coerce(test_app, log_capturer):
"""Confirm that a non serializable field triggers no warning during build
process.
This relates to #28.
"""
conf = {"autodoc_pydantic_model_show_json_error_strategy": "coerce"}
with log_capturer() as logs:
app = test_app("json-error-strategy", conf=conf)
app.build()
assert len(logs) == 0
def test_autodoc_pydantic_model_show_field_summary_not_inherited(autodocument):
"""Ensure that autodoc pydantic respects `:inherited-members:` option when
listing fields in model/settings. More concretely, fields from base classes
should not be listed be default.
This relates to #32.
"""
result = [
'',
'.. py:pydantic_model:: ModelShowFieldSummaryInherited',
' :module: target.configuration',
'',
' ModelShowFieldSummaryInherited.',
'',
' :Fields:',
' - :py:obj:`field3 (int) <target.configuration.ModelShowFieldSummaryInherited.field3>`',
''
]
actual = autodocument(
documenter='pydantic_model',
object_path='target.configuration.ModelShowFieldSummaryInherited',
options_app={"autodoc_pydantic_model_show_field_summary": True},
deactivate_all=True)
assert result == actual
def test_autodoc_pydantic_model_show_field_summary_inherited(autodocument):
"""Ensure that autodoc pydantic respects `:inherited-members:` option when
listing fields in model/settings. More concretely, fields from base classes
should be listed if `:inherited-members:` is given.
This relates to #32.
"""
result = [
'',
'.. py:pydantic_model:: ModelShowFieldSummaryInherited',
' :module: target.configuration',
'',
' ModelShowFieldSummaryInherited.',
'',
' :Fields:',
' - :py:obj:`field1 (int) <target.configuration.ModelShowFieldSummaryInherited.field1>`',
' - :py:obj:`field2 (str) <target.configuration.ModelShowFieldSummaryInherited.field2>`',
' - :py:obj:`field3 (int) <target.configuration.ModelShowFieldSummaryInherited.field3>`',
''
]
actual = autodocument(
documenter='pydantic_model',
object_path='target.configuration.ModelShowFieldSummaryInherited',
options_app={"autodoc_pydantic_model_show_field_summary": True,
"autodoc_pydantic_model_members": True},
options_doc={"inherited-members": "BaseModel"},
deactivate_all=True)
assert result == actual
def test_autodoc_pydantic_model_show_validator_summary_inherited(autodocument):
result = [
'',
'.. py:pydantic_model:: ModelShowValidatorsSummaryInherited',
' :module: target.configuration',
'',
' ModelShowValidatorsSummaryInherited.',
'',
' :Validators:',
' - :py:obj:`check <target.configuration.ModelShowValidatorsSummaryInherited.check>` » :py:obj:`field <target.configuration.ModelShowValidatorsSummaryInherited.field>`',
' - :py:obj:`check_inherited <target.configuration.ModelShowValidatorsSummaryInherited.check_inherited>` » :py:obj:`field <target.configuration.ModelShowValidatorsSummaryInherited.field>`',
''
]
actual = autodocument(
documenter='pydantic_model',
object_path='target.configuration.ModelShowValidatorsSummaryInherited',
options_app={"autodoc_pydantic_model_show_validator_summary": True,
"autodoc_pydantic_model_members": True},
options_doc={"inherited-members": "BaseModel"},
deactivate_all=True)
assert result == actual
def test_autodoc_pydantic_model_show_validator_summary_not_inherited(autodocument):
result = [
'',
'.. py:pydantic_model:: ModelShowValidatorsSummaryInherited',
' :module: target.configuration',
'',
' ModelShowValidatorsSummaryInherited.',
'',
' :Validators:',
' - :py:obj:`check_inherited <target.configuration.ModelShowValidatorsSummaryInherited.check_inherited>` » :py:obj:`field <target.configuration.ModelShowValidatorsSummaryInherited.field>`',
''
]
actual = autodocument(
documenter='pydantic_model',
object_path='target.configuration.ModelShowValidatorsSummaryInherited',
options_app={"autodoc_pydantic_model_show_validator_summary": True,
"autodoc_pydantic_model_members": True},
deactivate_all=True)
assert result == actual
|
import FWCore.ParameterSet.Config as cms
# AlCaReco for track based alignment using ZMuMu events (including the tracks from the PV)
OutALCARECOTkAlDiMuonAndVertex_noDrop = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOTkAlDiMuonAndVertex')
),
outputCommands = cms.untracked.vstring(
'keep *_ALCARECOTkAlDiMuon_*_*',
'keep *_ALCARECOTkAlDiMuonVertexTracks_*_*',
'keep L1AcceptBunchCrossings_*_*_*',
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_TriggerResults_*_*',
'keep DcsStatuss_scalersRawToDigi_*_*',
'keep *_offlinePrimaryVertices_*_*')
)
OutALCARECOTkAlDiMuonAndVertex = OutALCARECOTkAlDiMuonAndVertex_noDrop.clone()
OutALCARECOTkAlDiMuonAndVertex.outputCommands.insert(0, "drop *")
|
from urllib.request import Request, urlopen
import json
# Ошибки
class Error(Exception):
pass
class ApiError(Error):
pass
class GetOnlinePlayers():
def __init__(self, players, count, max):
self.players = players
self.count = count
self.max = max
def __repr__(self):
return f"<GetOnlinePlayers max={self.max}, count={self.count}, players='{self.players}'>"
class GetLastChatMessages():
def __init__(self, messages):
self.messages = messages
def __repr__(self):
return f"<GetLastChatMessages messages='{self.messages}'>"
class GetServerTime():
def __init__(self, timeOfDay, ticks, formated):
self.timeOfDay = timeOfDay
self.ticks = ticks
self.formated = formated
def __repr__(self):
return f"<GetServerTime timeOfDay='{self.timeOfDay}', formated='{self.formated}', ticks={self.ticks}>"
class GetServerWeather():
def __init__(self, weather):
self.weather = weather
def __repr__(self):
return f"<GetServerWeather weather='{self.weather}'>"
# Основной класс
class SpApi():
def __init__(self, server):
self.server = server
def __repr__():
return f"<SpApi server={self.server}>"
def parsing(self, fetch):
req = Request(f"https://sp-api.ru/{self.server}/{fetch}", headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read().decode()
try:
api = json.loads(webpage)
if api['error'] == True:
raise ApiError("When requesting api, the key 'Error' returned True")
return api
except Exception as e:
raise ApiError(str(e))
# Вызываемые функции
def getLastChatMessages(self, limit=50):
return GetLastChatMessages(self.parsing("chat")['messages'][:limit])
def getOnlinePlayers(self):
parse = self.parsing("online")
return GetOnlinePlayers(parse['players'], parse['count'], parse['max'])
def getServerTime(self):
parse = self.parsing("time")
minutes = round( (parse['ticks'] % 1000) * 0.06 )
if len(str(minutes)) == 1:
minutes = f'0{minutes}'
formated = f"{round( parse['ticks'] / 1000 + 6) }:{minutes}"
return GetServerTime(parse['time'], parse['ticks'], formated)
def getServerWeather(self):
return GetServerWeather(self.parsing("weather")['weather'])
|
#from compas_fab.robots import Robot
from compas.geometry import Frame, Point, Transformation, Vector
from compas.geometry.transformations.translation import Translation
from compas.robots import Axis, Joint, Link
# from compas.datastructures import Mesh
from compas_fab.robots import Configuration
from integral_timber_joints.tools.gripper import Gripper
class Clamp (Gripper):
""" Clamp object represents a robotic clamp that has both
gripper jaw and clamp jaw. This is a subclass of Gripper.
The object keep track of the RobotModel and the kinematic configuration.
"""
def __init__(self, name,
type_name="Clamp",
tool_coordinate_frame=None,
tool_pick_up_frame=None,
tool_storage_frame=None,
gripper_jaw_position_min=0,
gripper_jaw_position_max=100,
clamp_jaw_position_min=0,
clamp_jaw_position_max=100,
approach_vector=None):
# Call Tool init
if tool_coordinate_frame is None:
tool_coordinate_frame = Frame.worldXY()
super(Clamp, self).__init__(
name,
type_name,
tool_coordinate_frame,
tool_pick_up_frame,
tool_storage_frame,
gripper_jaw_position_min,
gripper_jaw_position_max,
approach_vector
)
# --------------------------------------------------------
# Extrinsic properities / state (Gripper Specific)
# --------------------------------------------------------
self.clamp_jaw_position = clamp_jaw_position_min # type: float
# --------------------------------------------------------
# Intrinsic properities (Gripper Specific)
# --------------------------------------------------------
self.clamp_jaw_limits = (clamp_jaw_position_min, clamp_jaw_position_max) # type: tuple[float, float]
# --------------------------------------------------------------
# Functions to get and set attributes from attributes dictionary.
# --------------------------------------------------------------
@property
def clamp_jaw_position(self):
return self.attributes.get('clamp_jaw_position', 0)
@clamp_jaw_position.setter
def clamp_jaw_position(self, v):
self.attributes['clamp_jaw_position'] = v
@property
def clamp_jaw_limits(self):
return self.attributes.get('clamp_jaw_limits', (0, 100))
@clamp_jaw_limits.setter
def clamp_jaw_limits(self, v):
self.attributes['clamp_jaw_limits'] = v
@property
def jaw_block_vectors(self):
""" List of Vectors for computing jaw_approach directions.
Vector direction is the direction where the beam cannot move towards. """
return self.attributes.get('jaw_block_vectors', [])
@jaw_block_vectors.setter
def jaw_block_vectors(self, v):
self.attributes['jaw_block_vectors'] = v
@property
def jaw_clearance_vector(self):
""" A directional vector describing how much distance and direction
the beam-in-jaw has to move to clear the jaw if moved out.
"""
return self.attributes.get('jaw_clearance_vector', Vector(0, 0, 0))
@jaw_clearance_vector.setter
def jaw_clearance_vector(self, v):
self.attributes['jaw_clearance_vector'] = v
@property
def detachretract1_vector(self):
""" A directional vector describing how much distance and direction
the beam-in-jaw has to move to clear the jaw if moved out.
"""
return self.attributes.get('detachretract1_vector', Vector(0, 0, 0))
@detachretract1_vector.setter
def detachretract1_vector(self, v):
self.attributes['detachretract1_vector'] = v
@property
def detachretract2_vector(self):
""" A directional vector describing how much distance and direction
the beam-in-jaw has to move to clear the jaw if moved out.
"""
return self.attributes.get('detachretract2_vector', Vector(0, 0, 0))
@detachretract2_vector.setter
def detachretract2_vector(self, v):
self.attributes['detachretract2_vector'] = v
@property
def approach1_vector(self):
""" A directional vector describing how much distance and direction
the beam-in-jaw has to move to clear the jaw if moved out.
"""
return self.attributes.get('approach1_vector', Vector(0, 0, 0))
@approach1_vector.setter
def approach1_vector(self, v):
self.attributes['approach1_vector'] = v
@property
def approach2_vector(self):
""" A directional vector describing how much distance and direction
the beam-in-jaw has to move to clear the jaw if moved out.
"""
return self.attributes.get('approach2_vector', Vector(0, 0, 0))
@approach2_vector.setter
def approach2_vector(self, v):
self.attributes['approach2_vector'] = v # robot_model.jaw_clearance_vector = Vector(110, 0, 0)
# ----------------------------------
# Functions for computing the complexed approach and retract
# ----------------------------------
@ property
def tool_storage_approach_frame2(self):
# type: () -> Frame
"""Compute the approach frame in wcf.
Part of PlaceClampToStorageAction
"""
approach2_vector_wcf = self.tool_storage_frame.to_world_coordinates(self.approach2_vector)
return self.tool_storage_frame.transformed(Translation.from_vector(approach2_vector_wcf.scaled(-1)))
@ property
def tool_storage_approach_frame1(self):
# type: () -> Frame
"""Compute the approach frame in wcf.
Part of PlaceClampToStorageAction
"""
approach1_vector_wcf = self.tool_storage_frame.to_world_coordinates(self.approach1_vector)
return self.tool_storage_approach_frame2.transformed(Translation.from_vector(approach1_vector_wcf.scaled(-1)))
@ property
def tool_storage_retract_frame1(self):
# type: () -> Frame
""" Compute the retract frame in wcf
Part of PickClampFromStorageAction
"""
detachretract1_vector_wcf = self.tool_storage_frame.to_world_coordinates(self.detachretract1_vector)
return self.tool_storage_frame.transformed(Translation.from_vector(detachretract1_vector_wcf))
@ property
def tool_storage_retract_frame2(self):
# type: () -> Frame
""" Compute the retract frame in wcf
Part of PickClampFromStorageAction
"""
detachretract2_vector_wcf = self.tool_storage_frame.to_world_coordinates(self.detachretract2_vector)
return self.tool_storage_retract_frame1.transformed(Translation.from_vector(detachretract2_vector_wcf))
# ----------------------------------
# Functions for kinematic state
# ----------------------------------
# --------------------------------------------------------
# State Setting Functions
# --------------------------------------------------------
def _set_kinematic_state(self, state_dict):
self.clamp_jaw_position = state_dict['clamp_jaw_position']
super(Clamp, self)._set_kinematic_state(state_dict)
def _get_kinematic_state(self):
state_dict = super(Clamp, self)._get_kinematic_state()
state_dict.update({'clamp_jaw_position': self.clamp_jaw_position})
return state_dict
@property
def current_configuration(self):
"""Gets the current Configuration of the joints in the underlying RobotModel
This can be used to update the artist or update the robot.
"""
values, types, joint_names = [], [], []
for joint in self.get_configurable_joints():
if joint.name.startswith('joint_gripper_'):
values.append(self.gripper_jaw_position)
types.append(Joint.PRISMATIC)
joint_names.append(joint.name)
if joint.name.startswith('joint_clamp_'):
values.append(self.clamp_jaw_position)
types.append(Joint.PRISMATIC)
joint_names.append(joint.name)
return Configuration(values, types, joint_names)
# --------------------------------------------------------
# Convinence Functions
# --------------------------------------------------------
@property
def jaw_blocking_vectors_in_wcf(self):
T = Transformation.from_frame(self.current_frame)
return [vec.transformed(T) for vec in self.jaw_block_vectors]
@property
def jaw_clearance_vectors_in_wcf(self):
T = Transformation.from_frame(self.current_frame)
return self.jaw_clearance_vector.transformed(T)
def open_gripper(self):
self.gripper_jaw_position = self.gripper_jaw_limits[1]
def close_gripper(self):
self.gripper_jaw_position = self.gripper_jaw_limits[0]
def open_clamp(self):
self.clamp_jaw_position = self.clamp_jaw_limits[1]
def close_clamp(self):
self.clamp_jaw_position = self.clamp_jaw_limits[0]
# --------------------------------------------------------
# Factory to construct Clamp
# --------------------------------------------------------
def Lap90ClampFactory(
name,
type_name,
gripper_jaw_position_min,
gripper_jaw_position_max,
clamp_jaw_position_min,
clamp_jaw_position_max,
tool_coordinate_frame, # Ref to T0CF (TCF ~= TCP)
tool_pick_up_frame, # Ref to T0CF
tool_storage_frame, # Ref to WCF
mesh_gripper_base,
mesh_gripper_jaw_l,
mesh_gripper_jaw_r,
mesh_clamp_jaw_l,
mesh_clamp_jaw_r,
approach_vector,
detachretract1_vector,
detachretract2_vector,
):
""" A Parallel gripper will have a base and two gripper jaw.
Modelling guide
---------------
The left jaws opens towards -Y direction.
The right jaw opens towards +Y direction.
The clamp jaw opens towards +Z direction.
The clamp jaw closes (clamping) towards -Z direction.
The clamp jaw opening faces +X direction.
"""
robot_model = Clamp(name, type_name)
robot_model.gripper_jaw_limits = (gripper_jaw_position_min, gripper_jaw_position_max)
robot_model.clamp_jaw_limits = (clamp_jaw_position_min, clamp_jaw_position_max)
robot_model.tool_coordinate_frame = tool_coordinate_frame
robot_model.tool_pick_up_frame = tool_pick_up_frame
robot_model.tool_storage_frame = tool_storage_frame
robot_model.approach_vector = approach_vector # This vector is ref to t0cf
robot_model.detachretract1_vector = detachretract1_vector # This vector is ref to t0cf
robot_model.detachretract2_vector = detachretract2_vector # This vector is ref to t0cf
#world_link = robot_model.add_link('world')
gripper_base = robot_model.add_link('gripper_base', visual_meshes=mesh_gripper_base, collision_meshes=mesh_gripper_base)
gripper_jaw_l = robot_model.add_link('gripper_jaw_l', visual_meshes=mesh_gripper_jaw_l, collision_meshes=mesh_gripper_jaw_l)
gripper_jaw_r = robot_model.add_link('gripper_jaw_r', visual_meshes=mesh_gripper_jaw_r, collision_meshes=mesh_gripper_jaw_r)
clamp_jaw_l = robot_model.add_link('clamp_jaw_l', visual_meshes=mesh_clamp_jaw_l, collision_meshes=mesh_clamp_jaw_l)
clamp_jaw_r = robot_model.add_link('clamp_jaw_r', visual_meshes=mesh_clamp_jaw_r, collision_meshes=mesh_clamp_jaw_r)
#robot_model.add_joint('world_base_fixed_joint', Joint.FIXED, world_link, base_link)
robot_model.add_joint('joint_gripper_jaw_l', Joint.PRISMATIC, gripper_base, gripper_jaw_l, axis=[0, -1, 0], limit=robot_model.gripper_jaw_limits)
robot_model.add_joint('joint_gripper_jaw_r', Joint.PRISMATIC, gripper_base, gripper_jaw_r, axis=[0, 1, 0], limit=robot_model.gripper_jaw_limits)
robot_model.add_joint('joint_clamp_jaw_l', Joint.PRISMATIC, gripper_jaw_l, clamp_jaw_l, axis=[0, 0, 1], limit=robot_model.clamp_jaw_limits)
robot_model.add_joint('joint_clamp_jaw_r', Joint.PRISMATIC, gripper_jaw_r, clamp_jaw_r, axis=[0, 0, 1], limit=robot_model.clamp_jaw_limits)
# A constant list of vectors (ref t0cf) where the beam-in-jaw is blocked by the jaw
robot_model.jaw_block_vectors = [Vector(0, 0, 1.0), Vector(0, 0, -1.0), Vector(-1.0, 0, 0)]
# A directional vector describing how much distance the beam-in-jaw has to move to clear the jaw if moved out.
robot_model.jaw_clearance_vector = Vector(110, 0, 0)
return robot_model
def CL3Factory(
name,
type_name,
gripper_jaw_position_min,
gripper_jaw_position_max,
clamp_jaw_position_min,
clamp_jaw_position_max,
tool_coordinate_frame, # Ref to T0CF (TCF ~= TCP)
tool_pick_up_frame, # Ref to T0CF
tool_storage_frame, # Ref to WCF
base_mesh,
gripper_jaw_mesh,
clamp_jaw_mesh,
approach1_vector,
approach2_vector,
detachretract1_vector,
detachretract2_vector,
gripper_drill_lines,
gripper_drill_diameter
):
""" A Parallel gripper will have a base and two gripper jaw.
Modelling guide
---------------
The gripper jaws opens towards +Z direction of tool_coordinate_frame.
The clamp jaws opens towards +Z direction of tool_coordinate_frame.
The joints name should start with 'joint_gripper_' or 'joint_clamp_'
"""
robot_model = Clamp(name, type_name)
robot_model.gripper_jaw_limits = (gripper_jaw_position_min, gripper_jaw_position_max)
robot_model.clamp_jaw_limits = (clamp_jaw_position_min, clamp_jaw_position_max)
robot_model.tool_coordinate_frame = tool_coordinate_frame
robot_model.tool_pick_up_frame = tool_pick_up_frame
robot_model.tool_storage_frame = tool_storage_frame
robot_model.approach1_vector = approach1_vector # This vector is ref to t0cf
robot_model.approach2_vector = approach2_vector # This vector is ref to t0cf
robot_model.detachretract1_vector = detachretract1_vector # This vector is ref to t0cf
robot_model.detachretract2_vector = detachretract2_vector # This vector is ref to t0cf
robot_model.gripper_drill_lines = gripper_drill_lines
robot_model.gripper_drill_diameter = gripper_drill_diameter
#world_link = robot_model.add_link('world')
gripper_base = robot_model.add_link('gripper_base', visual_meshes=base_mesh, collision_meshes=base_mesh)
gripper_jaw = robot_model.add_link('gripper_jaw', visual_meshes=gripper_jaw_mesh, collision_meshes=gripper_jaw_mesh)
clamp_jaw = robot_model.add_link('clamp_jaw', visual_meshes=clamp_jaw_mesh, collision_meshes=clamp_jaw_mesh)
#robot_model.add_joint('world_base_fixed_joint', Joint.FIXED, world_link, base_link)
robot_model.add_joint('joint_gripper_jaw', Joint.PRISMATIC, gripper_base, gripper_jaw, axis=tool_coordinate_frame.zaxis, limit=robot_model.gripper_jaw_limits)
robot_model.add_joint('joint_clamp_jaw', Joint.PRISMATIC, gripper_base, clamp_jaw, axis=tool_coordinate_frame.zaxis, limit=robot_model.clamp_jaw_limits)
# A constant list of vectors (ref t0cf) where the beam-in-jaw is blocked by the jaw.
# Vector direction is the contact surface normal from the in-jaw beam side
# (alternatively) Vector direction is the direction where the beam cannot move towards.
robot_model.jaw_block_vectors = [tool_coordinate_frame.zaxis.copy(), tool_coordinate_frame.zaxis.scaled(-1), Vector(0, 0, -1)]
# A directional vector describing how much distance the beam-in-jaw has to move to clear the jaw if moved out.
robot_model.jaw_clearance_vector = tool_coordinate_frame.yaxis.scaled(100)
return robot_model
if __name__ == "__main__":
c = Clamp('c1')
print(c)
pass
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import xlwings as xw
import sys, os
def isPointinPolygon(point, rangelist):
# 判断是否在外包矩形内,如果不在,直接返回false
lnglist = []
latlist = []
for i in range(len(rangelist)-1):
lnglist.append(rangelist[i][0])
latlist.append(rangelist[i][1])
# print(lnglist, latlist)
maxlng = max(lnglist)
minlng = min(lnglist)
maxlat = max(latlist)
minlat = min(latlist)
# print(maxlng, minlng, maxlat, minlat)
if (point[0] > maxlng or point[0] < minlng or
point[1] > maxlat or point[1] < minlat):
return False
count = 0
point1 = rangelist[0]
for i in range(1, len(rangelist)):
point2 = rangelist[i]
# 点与多边形顶点重合
if (point[0] == point1[0] and point[1] == point1[1]) or (point[0] == point2[0] and point[1] == point2[1]):
print("在顶点上")
return False
# 判断线段两端点是否在射线两侧 不在肯定不相交 射线(-∞,lat)(lng,lat)
if (point1[1] < point[1] and point2[1] >= point[1]) or (point1[1] >= point[1] and point2[1] < point[1]):
# 求线段与射线交点 再和lat比较
point12lng = point2[0] - (point2[1] - point[1]) * (point2[0] - point1[0])/(point2[1] - point1[1])
# print(point12lng)
# 点在多边形边上
if (point12lng == point[0]):
print("点在多边形边上")
return False
if (point12lng < point[0]):
count +=1
point1 = point2
# print(count)
if count%2 == 0:
return False
else:
return True
def readfloats(rows):
arr = []
for x in rows:
if x.value is None:
break
else:
arr.append(float(x.value))
return arr
def readValues(rows):
arr = []
for x in rows:
if x.value is None:
break
else:
arr.append(x.value)
return arr
def startMatch(xiaoquPath, jingqingPath):
apps = xw.apps
app = None
needClose = False
if apps.count == 0:
app = apps.add()
needClose = True
else:
app = apps.active
jqb = xw.Book(jingqingPath)
xqb = xw.Book(xiaoquPath)
jqsht = jqb.sheets[0]
lngs = readfloats(jqsht.cells.expand().columns[14][1:])
lats = readfloats(jqsht.cells.expand().columns[15][1:])
xqsht = xqb.sheets[0]
ids = readValues(xqsht.cells.expand().columns[0][1:])
names = readValues(xqsht.cells.expand().columns[1][1:])
tmpPolygons = readValues(xqsht.cells.expand().columns[2][1:])
polygons = []
for p in tmpPolygons:
tmpLatlngs = p.split(',')
latlngs = []
for s in tmpLatlngs:
x = s.split(' ')
latlngs.append([float(x[0]), float(x[1])])
polygons.append(latlngs)
for i in range(len(lngs)):
lng = lngs[i]
lat = lats[i]
for j in range(len(polygons)):
polygon = polygons[j]
if isPointinPolygon([float(lat), float(lng)], polygon):
print(i)
jqsht.range('R%d' % (i+2)).value = ids[j]
jqsht.range('S%d' % (i+2)).value = names[j]
jqb.save()
jqb.close()
xqb.close()
if needClose:
app.quit()
if __name__ == '__main__':
# polygon = [[119.15113, 36.70581],[119.150309, 36.72011],[119.178904, 36.721079],[119.204211, 36.722055],[119.20412, 36.71377],[119.19584, 36.71408],[119.19584, 36.71011],[119.19091, 36.70979],[119.19043, 36.70708],[119.16784, 36.70645],[119.16736, 36.71058],[119.16132, 36.71011],[119.16084, 36.70565],[119.15113, 36.70581],[119.150309, 36.72011],[119.178904, 36.721079],[119.204211, 36.722055],[119.20412, 36.71377],[119.19584, 36.71408],[119.19584, 36.71011],[119.19091, 36.70979],[119.19043, 36.70708],[119.16784, 36.70645],[119.16736, 36.71058],[119.16132, 36.71011],[119.16084, 36.70565],[119.15113, 36.70581],[119.150309, 36.72011],[119.178904, 36.721079],[119.204211, 36.722055],[119.20412, 36.71377],[119.19584, 36.71408],[119.19584, 36.71011],[119.19091, 36.70979],[119.19043, 36.70708],[119.16784, 36.70645],[119.16736, 36.71058],[119.16132, 36.71011],[119.16084, 36.70565],[119.15113, 36.70581],[119.150309, 36.72011],[119.178904, 36.721079],[119.204211, 36.722055],[119.20412, 36.71377],[119.19584, 36.71408],[119.19584, 36.71011],[119.19091, 36.70979],[119.19043, 36.70708],[119.16784, 36.70645],[119.16736, 36.71058],[119.16132, 36.71011],[119.16084, 36.70565],[119.15113, 36.70581]]
# print(polygon)
# print(isPointinPolygon([119.150308, 36.72011], polygon))
if len(sys.argv) < 3:
print(u'请输入小区、警情文件地址')
exit(-1)
xiaoqupath = sys.argv[1].decode("utf-8")
jingqingpath = sys.argv[2].decode("utf-8")
startMatch(xiaoqupath, jingqingpath)
|
#!/usr/bin/env python
"""
A ROS node to detect objects via TensorFlow Object Detection API.
Author:
Cagatay Odabasi -- cagatay.odabasi@ipa.fraunhofer.de
"""
# ROS
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from detector import Detector
import utils
from tf_server.srv import TF_detect
class Tensorflow_detect_node(object):
def __init__(self):
super(Tensorflow_detect_node, self).__init__()
# init the node
rospy.init_node('tensorflow_detect_node', anonymous=False)
(model_name, num_of_classes, label_file, camera_namespace, video_name,
num_workers) \
= self.get_parameters()
# Create Detector
self._detector = Detector(model_name, num_of_classes, label_file,
num_workers)
self._bridge = CvBridge()
self.sub_rgb = rospy.Subscriber('usb_cam/image_raw',Image, self.rgb_callback, queue_size=1, buff_size=2**24)
self.pub_detections_image = rospy.Publisher('/result_ripe', Image, queue_size=1)
# spin
rospy.spin()
def get_parameters(self):
"""
Gets the necessary parameters from parameter server
Args:
Returns:
(tuple) (model name, num_of_classes, label_file)
"""
model_name = rospy.get_param("~model_name")
num_of_classes = rospy.get_param("~num_of_classes")
label_file = rospy.get_param("~label_file")
camera_namespace = 'usb_cam/image_raw'
# camera_namespace = rospy.get_param("~camera_namespace")
video_name = rospy.get_param("~video_name")
num_workers = rospy.get_param("~num_workers")
return (model_name, num_of_classes, label_file, \
camera_namespace, video_name, num_workers)
def rgb_callback(self, data):
rospy.wait_for_service('tf_detect_request')
try:
tensorflow_detect = rospy.ServiceProxy('tf_detect_request', TF_detect)
resp1 = tensorflow_detect(data)
print 'resp1.res '+str(resp1.res)
cv_image = self._bridge.imgmsg_to_cv2(data, "bgr8")
(output_dict, category_index) = self._detector.detect(cv_image)
image_np = self._detector.visualize(cv_image, output_dict)
self.pub_detections_image.publish(self._bridge.cv2_to_imgmsg(image_np, "bgr8"))
except rospy.ServiceException, e:
print "Service call failed: %s" % e
def main():
""" main function
"""
node = Tensorflow_detect_node()
if __name__ == '__main__':
main()
|
""" Module containing serializers.
These functions are provided as definitions of the basic interface
that all serializers should implement.
This abstraction is intended to allow easily switching from one form
of serialization to another.
"""
import json
from message import Message
class BaseSerializer(object):
def unpack(dump: bytes) -> Message: #pylint: disable=unused-argument
""" Deserialize to Message.
"""
raise NotImplementedError("Unpack method in serialzer module \
is not implemented. Use the methods contained in a submodule of \
serializer, such as json_serializer.")
def pack(msg: Message) -> bytes: #pylint: disable=unused-argument
""" Serialize to bytes.
"""
raise NotImplementedError("Pack method in serialzer module \
is not implemented. Use the methods contained in a submodule of \
serializer, such as json_serializer.")
class JSONSerializer(BaseSerializer):
""" Serializer using json as i/o format.
"""
def unpack(dump: bytes):
""" Deserialize from json string to Message, if it looks like a Message.
Returns a dictionary otherwise.
"""
def as_message(dct):
return Message(dct)
return json.loads(dump, object_hook=as_message)
def pack(msg: Message) -> bytes:
""" Serialize from Message to json string or from dictionary to json string.
"""
return msg.as_json()
|
class Vector:
"""
Constructor
self: a reference to the object we are creating
vals: a list of integers which are the contents of our vector
"""
def __init__(self, vals):
self.vals = (
vals # We're using the keyword self to create a field/property
)
print("Assigned values ", vals, " to vector.")
"""
String Function
Converts the object to a string in readable format for programmers
"""
def __str__(self):
return str(self.vals) # Returns the contents of the vector
vec = Vector([2, 3, 2])
print(str(vec)) # [2, 3, 2]
|
import json
import random
import string
import traceback
from tests.cephfs.cephfs_utilsV1 import FsUtils
from utility.log import Log
log = Log(__name__)
def run(ceph_cluster, **kw):
"""
Test Cases Covered:
CEPH-83573867 - Create 4-5 Filesystem randomly on different MDS daemons
Pre-requisites :
1. We need atleast one client node to execute this test case
Test Case Flow:
1. Create 5 file systems with default values
2. Validate the mds counts and the file systems counts
3. mount all the file systems and using fuse mount
4. Run IOs on the FSs
"""
try:
fs_util = FsUtils(ceph_cluster)
config = kw.get("config")
clients = ceph_cluster.get_ceph_objects("client")
build = config.get("build", config.get("rhbuild"))
fs_util.prepare_clients(clients, build)
fs_util.auth_list(clients)
log.info("checking Pre-requisites")
if not clients:
log.info(
f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
)
return 1
client1 = clients[0]
mounting_dir = "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in list(range(10))
)
for i in range(1, 5):
out, rc = client1.exec_command(
sudo=True, cmd="ceph orch ps --daemon_type mds -f json"
)
daemon_ls_before = json.loads(out.read().decode())
daemon_count_before = len(daemon_ls_before)
client1.exec_command(
sudo=True,
cmd=f"ceph fs volume create cephfs_{i}",
check_ec=False,
)
fs_util.wait_for_mds_process(client1, f"cephfs_{i}")
out_after, rc = client1.exec_command(
sudo=True, cmd="ceph orch ps --daemon_type mds -f json"
)
daemon_ls_after = json.loads(out_after.read().decode())
daemon_count_after = len(daemon_ls_after)
assert daemon_count_after > daemon_count_before, (
f"daemon count is reduced after creating FS. Demons count before : {daemon_count_before} ;"
f"after:{daemon_count_after}"
"Expectation is MDS daemons whould be more"
)
fuse_mounting_dir = f"/mnt/cephfs_fuse{mounting_dir}_{i}/"
fs_util.fuse_mount(
[clients[0]], fuse_mounting_dir, extra_params=f"--client_fs cephfs_{i}"
)
client1.exec_command(
sudo=True,
cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400"
f" --files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
f"{fuse_mounting_dir}",
long_running=True,
)
return 0
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
return 1
finally:
for i in range(1, 5):
fs_util.client_clean_up(
"umount",
fuse_clients=[clients[0]],
mounting_dir=f"/mnt/cephfs_fuse{mounting_dir}_{i}/",
)
client1.exec_command(
sudo=True, cmd="ceph config set mon mon_allow_pool_delete true"
)
[fs_util.remove_fs(client1, f"cephfs_{i}") for i in range(1, 5)]
|
class TemplateError(Exception):
"""
General template error
"""
__msg = ""
def __init__(self, what):
super(TemplateError, self).__init__()
self.__msg = what
def __str__(self):
return str(self.__msg)
|
"""
@Time: 2020/9/22 10:41
@Author: Zhirui(Alex) Yang
@E-mail: 1076830028@qq.com
@Program:
"""
import os
import logging
from datetime import datetime
from utils.config import LOG_LEVEL, DATA_DIR
logger = logging.getLogger("TAROT LOG")
level = logging.getLevelName(LOG_LEVEL)
logger.setLevel(level)
fmt = "TAROT LOG: %(asctime)s [%(levelname)s] %(message)s"
date_fmt = "%Y-%m-%d %H:%M:%S"
logger_path = os.path.join(DATA_DIR, 'log')
# create dir if needed
if not os.path.exists(logger_path):
os.mkdir(logger_path)
# create logger file with timestamp
cur_datetime = datetime.now().strftime('%Y-%m-%d %H-%M-%S')
logger_file = os.path.join(logger_path, f"{cur_datetime}.log")
logging.basicConfig(format=fmt, datefmt=date_fmt,
filename=logger_file)
if __name__ == '__main__':
logger.info("Log configured successfully!")
|
# -*- coding: utf-8 -*-
def note_generate():
return {'title': 'Notification',
'time': '11.30.2021',
'content': '你好,我是该系统的Developer。目前,网站已完成测试,'
'基本实现了财务危机预测的功能,您可以通过Demo选择我们提供的公司数据。'
'我们采用uWSGI+Nginx部署方案,'
'撰写了项目开发文档,'
'还在GitHub上发布了源码。'
'由于时间原因,本人又要投入另一项目的研发中,愿一切安好!——Sylvan Ding'}
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('flisol_event', '0004_auto_20141229_2309'),
]
operations = [
migrations.AddField(
model_name='flisolattendance',
name='comment',
field=models.TextField(help_text='let us know how you can help best', verbose_name='comment', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='flisolmachine',
name='comment',
field=models.TextField(verbose_name='comment post installation', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='flisolmachine',
name='flisol_instance',
field=models.ForeignKey(related_name='machines', default=1, verbose_name='instance', to='flisol_event.FlisolInstance'),
preserve_default=False,
),
migrations.AlterField(
model_name='flisolattendance',
name='flisol_instance',
field=models.ForeignKey(related_name='attendants', verbose_name='instance', to='flisol_event.FlisolInstance'),
preserve_default=True,
),
]
|
#! python2
#coding:utf8
from Core.Thread import *
if __name__=='__main__':
th = MainThread()
th.start()
|
import pytest
from flask_security.utils import hash_password
from cafebabel import create_app
from cafebabel.commands import auth_fixtures, drop_collections
from cafebabel.articles.models import Article
from cafebabel.articles.tags.models import Tag
from cafebabel.articles.translations.models import Translation
from cafebabel.users.models import User, UserProfile
test_app = create_app('config.TestingConfig')
def pytest_runtest_setup():
auth_fixtures(test_app)
ds = test_app.user_datastore
with test_app.app_context():
user = ds.create_user(email='user@example.com',
profile=UserProfile(name='Testy Tester'),
password=hash_password('password'))
ds.activate_user(user)
user2 = ds.create_user(email='user2@example.com',
password=hash_password('password'))
ds.activate_user(user2)
ds.commit()
def pytest_runtest_teardown():
drop_collections()
@pytest.fixture(scope='session')
def app(request):
"""Session-wide test `Flask` application."""
ctx = test_app.app_context()
ctx.push()
def tearDown():
ctx.pop()
request.addfinalizer(tearDown)
return test_app
@pytest.fixture
def user():
return User.objects.get(email='user@example.com')
@pytest.fixture
def user2():
return User.objects.get(email='user2@example.com')
@pytest.fixture
def editor():
return User.objects.get(email=test_app.config['EDITOR_EMAILS']['en'])
@pytest.fixture
def tag():
return Tag.objects.create(
name='Wonderful',
summary='summary text',
language=test_app.config['LANGUAGES'][0][0])
@pytest.fixture
def article(user):
return Article.objects.create(
title='article title',
summary='summary text',
authors=[user],
language=test_app.config['LANGUAGES'][0][0],
body='body text')
@pytest.fixture
def published_article(user):
return Article.objects.create(
title='article title',
summary='summary text',
authors=[user],
language=test_app.config['LANGUAGES'][0][0],
body='body text',
status='published')
@pytest.fixture
def translation(user, article):
language = test_app.config['LANGUAGES'][1][0]
return Translation.objects.create(
title='title',
summary='summary text',
language=language,
body='body text',
authors=[user],
translators=[user.id],
original_article=article.id)
@pytest.fixture
def published_translation(user, published_article):
language = test_app.config['LANGUAGES'][1][0]
return Translation.objects.create(
title='title',
summary='summary text',
language=language,
body='body text',
authors=[user],
translators=[user.id],
original_article=published_article.id,
status='published')
|
"""
Init file for the SpatialCluster library.
"""
version_info = (0, 0, 46)
__version__ = ".".join([str(x) for x in version_info])
__all__ = [
"methods"
"preprocess"
"visualization"
"utils"
]
|
import unittest, re, logging
import tbot.testbase
from tbot.utils import link
class Test(unittest.TestCase):
def test_link(self):
self.assertEqual(link.find_links('not a link'), [])
self.assertEqual(link.find_links('3.1'), [])
self.assertEqual(link.find_links('3.'), [])
self.assertEqual(link.find_links('test.'), [])
self.assertEqual(link.find_links('test 3.1a'), [])
self.assertEqual(link.find_links('127,123.4'), [])
self.assertEqual(link.find_links('this.that'), [])
tests = [
('127.0.0.1', ('127.0.0.1', '', '127.0.0.1', '', '')),
('127.0.0.1:27015', ('127.0.0.1:27015', '', '127.0.0.1', '', '')),
('test.com', ('test.com', '', 'test.com', '', '')),
('test.net/test.html', ('test.net', '', 'test.net', '/test.html', '')),
('cs.money', ('cs.money', '', 'cs.money', '', '')),
('test.abbott', ('test.abbott', '', 'test.abbott', '', '')),
('test.abogado', ('test.abogado', '', 'test.abogado', '', '')),
('test.ac', ('test.ac', '', 'test.ac', '', '')),
('test.academy', ('test.academy', '', 'test.academy', '', '')),
('test.accountant', ('test.accountant', '', 'test.accountant', '', '')),
('test.accountants', ('test.accountants', '', 'test.accountants', '', '')),
('test.active', ('test.active', '', 'test.active', '', '')),
('test.actor', ('test.actor', '', 'test.actor', '', '')),
('test.ad', ('test.ad', '', 'test.ad', '', '')),
('test.ads', ('test.ads', '', 'test.ads', '', '')),
('test.adult', ('test.adult', '', 'test.adult', '', '')),
('test.ae', ('test.ae', '', 'test.ae', '', '')),
('test.aero', ('test.aero', '', 'test.aero', '', '')),
('test.af', ('test.af', '', 'test.af', '', '')),
('test.afl', ('test.afl', '', 'test.afl', '', '')),
('test.ag', ('test.ag', '', 'test.ag', '', '')),
('test.agency', ('test.agency', '', 'test.agency', '', '')),
('test.ai', ('test.ai', '', 'test.ai', '', '')),
('test.airforce', ('test.airforce', '', 'test.airforce', '', '')),
('test.al', ('test.al', '', 'test.al', '', '')),
('test.allfinanz', ('test.allfinanz', '', 'test.allfinanz', '', '')),
('test.alsace', ('test.alsace', '', 'test.alsace', '', '')),
('test.am', ('test.am', '', 'test.am', '', '')),
('test.amsterdam', ('test.amsterdam', '', 'test.amsterdam', '', '')),
('test.android', ('test.android', '', 'test.android', '', '')),
('test.ao', ('test.ao', '', 'test.ao', '', '')),
('test.apartments', ('test.apartments', '', 'test.apartments', '', '')),
('test.aq', ('test.aq', '', 'test.aq', '', '')),
('test.aquarelle', ('test.aquarelle', '', 'test.aquarelle', '', '')),
('test.ar', ('test.ar', '', 'test.ar', '', '')),
('test.archi', ('test.archi', '', 'test.archi', '', '')),
('test.army', ('test.army', '', 'test.army', '', '')),
('test.arpa', ('test.arpa', '', 'test.arpa', '', '')),
('test.as', ('test.as', '', 'test.as', '', '')),
('test.asia', ('test.asia', '', 'test.asia', '', '')),
('test.associates', ('test.associates', '', 'test.associates', '', '')),
('test.at', ('test.at', '', 'test.at', '', '')),
('test.attorney', ('test.attorney', '', 'test.attorney', '', '')),
('test.au', ('test.au', '', 'test.au', '', '')),
('test.auction', ('test.auction', '', 'test.auction', '', '')),
('test.audio', ('test.audio', '', 'test.audio', '', '')),
('test.autos', ('test.autos', '', 'test.autos', '', '')),
('test.aw', ('test.aw', '', 'test.aw', '', '')),
('test.ax', ('test.ax', '', 'test.ax', '', '')),
('test.axa', ('test.axa', '', 'test.axa', '', '')),
('test.az', ('test.az', '', 'test.az', '', '')),
('test.ba', ('test.ba', '', 'test.ba', '', '')),
('test.band', ('test.band', '', 'test.band', '', '')),
('test.bank', ('test.bank', '', 'test.bank', '', '')),
('test.bar', ('test.bar', '', 'test.bar', '', '')),
('test.barclaycard', ('test.barclaycard', '', 'test.barclaycard', '', '')),
('test.barclays', ('test.barclays', '', 'test.barclays', '', '')),
('test.bargains', ('test.bargains', '', 'test.bargains', '', '')),
('test.bauhaus', ('test.bauhaus', '', 'test.bauhaus', '', '')),
('test.bayern', ('test.bayern', '', 'test.bayern', '', '')),
('test.bb', ('test.bb', '', 'test.bb', '', '')),
('test.bbc', ('test.bbc', '', 'test.bbc', '', '')),
('test.bd', ('test.bd', '', 'test.bd', '', '')),
('test.be', ('test.be', '', 'test.be', '', '')),
('test.beer', ('test.beer', '', 'test.beer', '', '')),
('test.berlin', ('test.berlin', '', 'test.berlin', '', '')),
('test.best', ('test.best', '', 'test.best', '', '')),
('test.bf', ('test.bf', '', 'test.bf', '', '')),
('test.bg', ('test.bg', '', 'test.bg', '', '')),
('test.bh', ('test.bh', '', 'test.bh', '', '')),
('test.bi', ('test.bi', '', 'test.bi', '', '')),
('test.bid', ('test.bid', '', 'test.bid', '', '')),
('test.bike', ('test.bike', '', 'test.bike', '', '')),
('test.bingo', ('test.bingo', '', 'test.bingo', '', '')),
('test.bio', ('test.bio', '', 'test.bio', '', '')),
('test.biz', ('test.biz', '', 'test.biz', '', '')),
('test.bj', ('test.bj', '', 'test.bj', '', '')),
('test.black', ('test.black', '', 'test.black', '', '')),
('test.blackfriday', ('test.blackfriday', '', 'test.blackfriday', '', '')),
('test.bloomberg', ('test.bloomberg', '', 'test.bloomberg', '', '')),
('test.blue', ('test.blue', '', 'test.blue', '', '')),
('test.bm', ('test.bm', '', 'test.bm', '', '')),
('test.bmw', ('test.bmw', '', 'test.bmw', '', '')),
('test.bn', ('test.bn', '', 'test.bn', '', '')),
('test.bnpparibas', ('test.bnpparibas', '', 'test.bnpparibas', '', '')),
('test.bo', ('test.bo', '', 'test.bo', '', '')),
('test.boats', ('test.boats', '', 'test.boats', '', '')),
('test.bond', ('test.bond', '', 'test.bond', '', '')),
('test.boo', ('test.boo', '', 'test.boo', '', '')),
('test.boutique', ('test.boutique', '', 'test.boutique', '', '')),
('test.br', ('test.br', '', 'test.br', '', '')),
('test.brussels', ('test.brussels', '', 'test.brussels', '', '')),
('test.bs', ('test.bs', '', 'test.bs', '', '')),
('test.bt', ('test.bt', '', 'test.bt', '', '')),
('test.budapest', ('test.budapest', '', 'test.budapest', '', '')),
('test.build', ('test.build', '', 'test.build', '', '')),
('test.builders', ('test.builders', '', 'test.builders', '', '')),
('test.business', ('test.business', '', 'test.business', '', '')),
('test.buzz', ('test.buzz', '', 'test.buzz', '', '')),
('test.bv', ('test.bv', '', 'test.bv', '', '')),
('test.bw', ('test.bw', '', 'test.bw', '', '')),
('test.by', ('test.by', '', 'test.by', '', '')),
('test.bz', ('test.bz', '', 'test.bz', '', '')),
('test.bzh', ('test.bzh', '', 'test.bzh', '', '')),
('test.ca', ('test.ca', '', 'test.ca', '', '')),
('test.cab', ('test.cab', '', 'test.cab', '', '')),
('test.cafe', ('test.cafe', '', 'test.cafe', '', '')),
('test.cal', ('test.cal', '', 'test.cal', '', '')),
('test.camera', ('test.camera', '', 'test.camera', '', '')),
('test.camp', ('test.camp', '', 'test.camp', '', '')),
('test.cancerresearch', ('test.cancerresearch', '', 'test.cancerresearch', '', '')),
('test.canon', ('test.canon', '', 'test.canon', '', '')),
('test.capetown', ('test.capetown', '', 'test.capetown', '', '')),
('test.capital', ('test.capital', '', 'test.capital', '', '')),
('test.caravan', ('test.caravan', '', 'test.caravan', '', '')),
('test.cards', ('test.cards', '', 'test.cards', '', '')),
('test.care', ('test.care', '', 'test.care', '', '')),
('test.career', ('test.career', '', 'test.career', '', '')),
('test.careers', ('test.careers', '', 'test.careers', '', '')),
('test.cartier', ('test.cartier', '', 'test.cartier', '', '')),
('test.casa', ('test.casa', '', 'test.casa', '', '')),
('test.cash', ('test.cash', '', 'test.cash', '', '')),
('test.casino', ('test.casino', '', 'test.casino', '', '')),
('test.cat', ('test.cat', '', 'test.cat', '', '')),
('test.catering', ('test.catering', '', 'test.catering', '', '')),
('test.cbn', ('test.cbn', '', 'test.cbn', '', '')),
('test.cc', ('test.cc', '', 'test.cc', '', '')),
('test.cd', ('test.cd', '', 'test.cd', '', '')),
('test.center', ('test.center', '', 'test.center', '', '')),
('test.ceo', ('test.ceo', '', 'test.ceo', '', '')),
('test.cern', ('test.cern', '', 'test.cern', '', '')),
('test.cf', ('test.cf', '', 'test.cf', '', '')),
('test.cfd', ('test.cfd', '', 'test.cfd', '', '')),
('test.cg', ('test.cg', '', 'test.cg', '', '')),
('test.ch', ('test.ch', '', 'test.ch', '', '')),
('test.channel', ('test.channel', '', 'test.channel', '', '')),
('test.chat', ('test.chat', '', 'test.chat', '', '')),
('test.cheap', ('test.cheap', '', 'test.cheap', '', '')),
('test.christmas', ('test.christmas', '', 'test.christmas', '', '')),
('test.chrome', ('test.chrome', '', 'test.chrome', '', '')),
('test.church', ('test.church', '', 'test.church', '', '')),
('test.ci', ('test.ci', '', 'test.ci', '', '')),
('test.citic', ('test.citic', '', 'test.citic', '', '')),
('test.city', ('test.city', '', 'test.city', '', '')),
('test.ck', ('test.ck', '', 'test.ck', '', '')),
('test.cl', ('test.cl', '', 'test.cl', '', '')),
('test.claims', ('test.claims', '', 'test.claims', '', '')),
('test.cleaning', ('test.cleaning', '', 'test.cleaning', '', '')),
('test.click', ('test.click', '', 'test.click', '', '')),
('test.clinic', ('test.clinic', '', 'test.clinic', '', '')),
('test.clothing', ('test.clothing', '', 'test.clothing', '', '')),
('test.club', ('test.club', '', 'test.club', '', '')),
('test.cm', ('test.cm', '', 'test.cm', '', '')),
('test.cn', ('test.cn', '', 'test.cn', '', '')),
('test.co', ('test.co', '', 'test.co', '', '')),
('test.coach', ('test.coach', '', 'test.coach', '', '')),
('test.codes', ('test.codes', '', 'test.codes', '', '')),
('test.coffee', ('test.coffee', '', 'test.coffee', '', '')),
('test.college', ('test.college', '', 'test.college', '', '')),
('test.cologne', ('test.cologne', '', 'test.cologne', '', '')),
('test.com', ('test.com', '', 'test.com', '', '')),
('test.community', ('test.community', '', 'test.community', '', '')),
('test.company', ('test.company', '', 'test.company', '', '')),
('test.computer', ('test.computer', '', 'test.computer', '', '')),
('test.condos', ('test.condos', '', 'test.condos', '', '')),
('test.construction', ('test.construction', '', 'test.construction', '', '')),
('test.consulting', ('test.consulting', '', 'test.consulting', '', '')),
('test.contractors', ('test.contractors', '', 'test.contractors', '', '')),
('test.cooking', ('test.cooking', '', 'test.cooking', '', '')),
('test.cool', ('test.cool', '', 'test.cool', '', '')),
('test.coop', ('test.coop', '', 'test.coop', '', '')),
('test.country', ('test.country', '', 'test.country', '', '')),
('test.courses', ('test.courses', '', 'test.courses', '', '')),
('test.cr', ('test.cr', '', 'test.cr', '', '')),
('test.credit', ('test.credit', '', 'test.credit', '', '')),
('test.creditcard', ('test.creditcard', '', 'test.creditcard', '', '')),
('test.cricket', ('test.cricket', '', 'test.cricket', '', '')),
('test.crs', ('test.crs', '', 'test.crs', '', '')),
('test.cruises', ('test.cruises', '', 'test.cruises', '', '')),
('test.cu', ('test.cu', '', 'test.cu', '', '')),
('test.cuisinella', ('test.cuisinella', '', 'test.cuisinella', '', '')),
('test.cv', ('test.cv', '', 'test.cv', '', '')),
('test.cw', ('test.cw', '', 'test.cw', '', '')),
('test.cx', ('test.cx', '', 'test.cx', '', '')),
('test.cy', ('test.cy', '', 'test.cy', '', '')),
('test.cymru', ('test.cymru', '', 'test.cymru', '', '')),
('test.cyou', ('test.cyou', '', 'test.cyou', '', '')),
('test.cz', ('test.cz', '', 'test.cz', '', '')),
('test.dabur', ('test.dabur', '', 'test.dabur', '', '')),
('test.dad', ('test.dad', '', 'test.dad', '', '')),
('test.dance', ('test.dance', '', 'test.dance', '', '')),
('test.date', ('test.date', '', 'test.date', '', '')),
('test.dating', ('test.dating', '', 'test.dating', '', '')),
('test.datsun', ('test.datsun', '', 'test.datsun', '', '')),
('test.day', ('test.day', '', 'test.day', '', '')),
('test.dclk', ('test.dclk', '', 'test.dclk', '', '')),
('test.de', ('test.de', '', 'test.de', '', '')),
('test.deals', ('test.deals', '', 'test.deals', '', '')),
('test.degree', ('test.degree', '', 'test.degree', '', '')),
('test.delivery', ('test.delivery', '', 'test.delivery', '', '')),
('test.democrat', ('test.democrat', '', 'test.democrat', '', '')),
('test.dental', ('test.dental', '', 'test.dental', '', '')),
('test.dentist', ('test.dentist', '', 'test.dentist', '', '')),
('test.desi', ('test.desi', '', 'test.desi', '', '')),
('test.design', ('test.design', '', 'test.design', '', '')),
('test.dev', ('test.dev', '', 'test.dev', '', '')),
('test.diamonds', ('test.diamonds', '', 'test.diamonds', '', '')),
('test.diet', ('test.diet', '', 'test.diet', '', '')),
('test.digital', ('test.digital', '', 'test.digital', '', '')),
('test.direct', ('test.direct', '', 'test.direct', '', '')),
('test.directory', ('test.directory', '', 'test.directory', '', '')),
('test.discount', ('test.discount', '', 'test.discount', '', '')),
('test.dj', ('test.dj', '', 'test.dj', '', '')),
('test.dk', ('test.dk', '', 'test.dk', '', '')),
('test.dm', ('test.dm', '', 'test.dm', '', '')),
('test.dnp', ('test.dnp', '', 'test.dnp', '', '')),
('test.do', ('test.do', '', 'test.do', '', '')),
('test.docs', ('test.docs', '', 'test.docs', '', '')),
('test.doha', ('test.doha', '', 'test.doha', '', '')),
('test.domains', ('test.domains', '', 'test.domains', '', '')),
('test.download', ('test.download', '', 'test.download', '', '')),
('test.durban', ('test.durban', '', 'test.durban', '', '')),
('test.dvag', ('test.dvag', '', 'test.dvag', '', '')),
('test.dz', ('test.dz', '', 'test.dz', '', '')),
('test.eat', ('test.eat', '', 'test.eat', '', '')),
('test.ec', ('test.ec', '', 'test.ec', '', '')),
('test.edu', ('test.edu', '', 'test.edu', '', '')),
('test.education', ('test.education', '', 'test.education', '', '')),
('test.ee', ('test.ee', '', 'test.ee', '', '')),
('test.eg', ('test.eg', '', 'test.eg', '', '')),
('test.email', ('test.email', '', 'test.email', '', '')),
('test.emerck', ('test.emerck', '', 'test.emerck', '', '')),
('test.energy', ('test.energy', '', 'test.energy', '', '')),
('test.engineer', ('test.engineer', '', 'test.engineer', '', '')),
('test.engineering', ('test.engineering', '', 'test.engineering', '', '')),
('test.enterprises', ('test.enterprises', '', 'test.enterprises', '', '')),
('test.epson', ('test.epson', '', 'test.epson', '', '')),
('test.equipment', ('test.equipment', '', 'test.equipment', '', '')),
('test.er', ('test.er', '', 'test.er', '', '')),
('test.erni', ('test.erni', '', 'test.erni', '', '')),
('test.es', ('test.es', '', 'test.es', '', '')),
('test.esq', ('test.esq', '', 'test.esq', '', '')),
('test.estate', ('test.estate', '', 'test.estate', '', '')),
('test.et', ('test.et', '', 'test.et', '', '')),
('test.eu', ('test.eu', '', 'test.eu', '', '')),
('test.eurovision', ('test.eurovision', '', 'test.eurovision', '', '')),
('test.eus', ('test.eus', '', 'test.eus', '', '')),
('test.events', ('test.events', '', 'test.events', '', '')),
('test.everbank', ('test.everbank', '', 'test.everbank', '', '')),
('test.exchange', ('test.exchange', '', 'test.exchange', '', '')),
('test.expert', ('test.expert', '', 'test.expert', '', '')),
('test.exposed', ('test.exposed', '', 'test.exposed', '', '')),
('test.express', ('test.express', '', 'test.express', '', '')),
('test.fail', ('test.fail', '', 'test.fail', '', '')),
('test.faith', ('test.faith', '', 'test.faith', '', '')),
('test.fan', ('test.fan', '', 'test.fan', '', '')),
('test.fans', ('test.fans', '', 'test.fans', '', '')),
('test.farm', ('test.farm', '', 'test.farm', '', '')),
('test.fashion', ('test.fashion', '', 'test.fashion', '', '')),
('test.feedback', ('test.feedback', '', 'test.feedback', '', '')),
('test.fi', ('test.fi', '', 'test.fi', '', '')),
('test.film', ('test.film', '', 'test.film', '', '')),
('test.finance', ('test.finance', '', 'test.finance', '', '')),
('test.financial', ('test.financial', '', 'test.financial', '', '')),
('test.firmdale', ('test.firmdale', '', 'test.firmdale', '', '')),
('test.fish', ('test.fish', '', 'test.fish', '', '')),
('test.fishing', ('test.fishing', '', 'test.fishing', '', '')),
('test.fit', ('test.fit', '', 'test.fit', '', '')),
('test.fitness', ('test.fitness', '', 'test.fitness', '', '')),
('test.fj', ('test.fj', '', 'test.fj', '', '')),
('test.fk', ('test.fk', '', 'test.fk', '', '')),
('test.flights', ('test.flights', '', 'test.flights', '', '')),
('test.florist', ('test.florist', '', 'test.florist', '', '')),
('test.flowers', ('test.flowers', '', 'test.flowers', '', '')),
('test.fly', ('test.fly', '', 'test.fly', '', '')),
('test.fm', ('test.fm', '', 'test.fm', '', '')),
('test.fo', ('test.fo', '', 'test.fo', '', '')),
('test.foo', ('test.foo', '', 'test.foo', '', '')),
('test.football', ('test.football', '', 'test.football', '', '')),
('test.forex', ('test.forex', '', 'test.forex', '', '')),
('test.forsale', ('test.forsale', '', 'test.forsale', '', '')),
('test.foundation', ('test.foundation', '', 'test.foundation', '', '')),
('test.fr', ('test.fr', '', 'test.fr', '', '')),
('test.frl', ('test.frl', '', 'test.frl', '', '')),
('test.frogans', ('test.frogans', '', 'test.frogans', '', '')),
('test.fund', ('test.fund', '', 'test.fund', '', '')),
('test.furniture', ('test.furniture', '', 'test.furniture', '', '')),
('test.futbol', ('test.futbol', '', 'test.futbol', '', '')),
('test.ga', ('test.ga', '', 'test.ga', '', '')),
('test.gal', ('test.gal', '', 'test.gal', '', '')),
('test.gallery', ('test.gallery', '', 'test.gallery', '', '')),
('test.garden', ('test.garden', '', 'test.garden', '', '')),
('test.gb', ('test.gb', '', 'test.gb', '', '')),
('test.gbiz', ('test.gbiz', '', 'test.gbiz', '', '')),
('test.gd', ('test.gd', '', 'test.gd', '', '')),
('test.gdn', ('test.gdn', '', 'test.gdn', '', '')),
('test.ge', ('test.ge', '', 'test.ge', '', '')),
('test.gent', ('test.gent', '', 'test.gent', '', '')),
('test.gf', ('test.gf', '', 'test.gf', '', '')),
('test.gg', ('test.gg', '', 'test.gg', '', '')),
('test.ggee', ('test.ggee', '', 'test.ggee', '', '')),
('test.gh', ('test.gh', '', 'test.gh', '', '')),
('test.gi', ('test.gi', '', 'test.gi', '', '')),
('test.gift', ('test.gift', '', 'test.gift', '', '')),
('test.gifts', ('test.gifts', '', 'test.gifts', '', '')),
('test.gives', ('test.gives', '', 'test.gives', '', '')),
('test.gl', ('test.gl', '', 'test.gl', '', '')),
('test.glass', ('test.glass', '', 'test.glass', '', '')),
('test.gle', ('test.gle', '', 'test.gle', '', '')),
('test.global', ('test.global', '', 'test.global', '', '')),
('test.globo', ('test.globo', '', 'test.globo', '', '')),
('test.gm', ('test.gm', '', 'test.gm', '', '')),
('test.gmail', ('test.gmail', '', 'test.gmail', '', '')),
('test.gmo', ('test.gmo', '', 'test.gmo', '', '')),
('test.gmx', ('test.gmx', '', 'test.gmx', '', '')),
('test.gn', ('test.gn', '', 'test.gn', '', '')),
('test.gold', ('test.gold', '', 'test.gold', '', '')),
('test.goldpoint', ('test.goldpoint', '', 'test.goldpoint', '', '')),
('test.golf', ('test.golf', '', 'test.golf', '', '')),
('test.goo', ('test.goo', '', 'test.goo', '', '')),
('test.goog', ('test.goog', '', 'test.goog', '', '')),
('test.google', ('test.google', '', 'test.google', '', '')),
('test.gop', ('test.gop', '', 'test.gop', '', '')),
('test.gov', ('test.gov', '', 'test.gov', '', '')),
('test.gp', ('test.gp', '', 'test.gp', '', '')),
('test.gq', ('test.gq', '', 'test.gq', '', '')),
('test.gr', ('test.gr', '', 'test.gr', '', '')),
('test.graphics', ('test.graphics', '', 'test.graphics', '', '')),
('test.gratis', ('test.gratis', '', 'test.gratis', '', '')),
('test.green', ('test.green', '', 'test.green', '', '')),
('test.gripe', ('test.gripe', '', 'test.gripe', '', '')),
('test.gs', ('test.gs', '', 'test.gs', '', '')),
('test.gt', ('test.gt', '', 'test.gt', '', '')),
('test.gu', ('test.gu', '', 'test.gu', '', '')),
('test.guge', ('test.guge', '', 'test.guge', '', '')),
('test.guide', ('test.guide', '', 'test.guide', '', '')),
('test.guitars', ('test.guitars', '', 'test.guitars', '', '')),
('test.guru', ('test.guru', '', 'test.guru', '', '')),
('test.gw', ('test.gw', '', 'test.gw', '', '')),
('test.gy', ('test.gy', '', 'test.gy', '', '')),
('test.hamburg', ('test.hamburg', '', 'test.hamburg', '', '')),
('test.hangout', ('test.hangout', '', 'test.hangout', '', '')),
('test.haus', ('test.haus', '', 'test.haus', '', '')),
('test.healthcare', ('test.healthcare', '', 'test.healthcare', '', '')),
('test.help', ('test.help', '', 'test.help', '', '')),
('test.here', ('test.here', '', 'test.here', '', '')),
('test.hermes', ('test.hermes', '', 'test.hermes', '', '')),
('test.hiphop', ('test.hiphop', '', 'test.hiphop', '', '')),
('test.hiv', ('test.hiv', '', 'test.hiv', '', '')),
('test.hk', ('test.hk', '', 'test.hk', '', '')),
('test.hm', ('test.hm', '', 'test.hm', '', '')),
('test.hn', ('test.hn', '', 'test.hn', '', '')),
('test.holdings', ('test.holdings', '', 'test.holdings', '', '')),
('test.holiday', ('test.holiday', '', 'test.holiday', '', '')),
('test.homes', ('test.homes', '', 'test.homes', '', '')),
('test.horse', ('test.horse', '', 'test.horse', '', '')),
('test.host', ('test.host', '', 'test.host', '', '')),
('test.hosting', ('test.hosting', '', 'test.hosting', '', '')),
('test.house', ('test.house', '', 'test.house', '', '')),
('test.how', ('test.how', '', 'test.how', '', '')),
('test.hr', ('test.hr', '', 'test.hr', '', '')),
('test.ht', ('test.ht', '', 'test.ht', '', '')),
('test.hu', ('test.hu', '', 'test.hu', '', '')),
('test.ibm', ('test.ibm', '', 'test.ibm', '', '')),
('test.id', ('test.id', '', 'test.id', '', '')),
('test.ie', ('test.ie', '', 'test.ie', '', '')),
('test.ifm', ('test.ifm', '', 'test.ifm', '', '')),
('test.il', ('test.il', '', 'test.il', '', '')),
('test.im', ('test.im', '', 'test.im', '', '')),
('test.immo', ('test.immo', '', 'test.immo', '', '')),
('test.immobilien', ('test.immobilien', '', 'test.immobilien', '', '')),
('test.in', ('test.in', '', 'test.in', '', '')),
('test.industries', ('test.industries', '', 'test.industries', '', '')),
('test.infiniti', ('test.infiniti', '', 'test.infiniti', '', '')),
('test.info', ('test.info', '', 'test.info', '', '')),
('test.ing', ('test.ing', '', 'test.ing', '', '')),
('test.ink', ('test.ink', '', 'test.ink', '', '')),
('test.institute', ('test.institute', '', 'test.institute', '', '')),
('test.insure', ('test.insure', '', 'test.insure', '', '')),
('test.int', ('test.int', '', 'test.int', '', '')),
('test.international', ('test.international', '', 'test.international', '', '')),
('test.investments', ('test.investments', '', 'test.investments', '', '')),
('test.io', ('test.io', '', 'test.io', '', '')),
('test.iq', ('test.iq', '', 'test.iq', '', '')),
('test.ir', ('test.ir', '', 'test.ir', '', '')),
('test.irish', ('test.irish', '', 'test.irish', '', '')),
('test.is', ('test.is', '', 'test.is', '', '')),
('test.it', ('test.it', '', 'test.it', '', '')),
('test.java', ('test.java', '', 'test.java', '', '')),
('test.jcb', ('test.jcb', '', 'test.jcb', '', '')),
('test.je', ('test.je', '', 'test.je', '', '')),
('test.jetzt', ('test.jetzt', '', 'test.jetzt', '', '')),
('test.jm', ('test.jm', '', 'test.jm', '', '')),
('test.jo', ('test.jo', '', 'test.jo', '', '')),
('test.jobs', ('test.jobs', '', 'test.jobs', '', '')),
('test.joburg', ('test.joburg', '', 'test.joburg', '', '')),
('test.jp', ('test.jp', '', 'test.jp', '', '')),
('test.juegos', ('test.juegos', '', 'test.juegos', '', '')),
('test.kaufen', ('test.kaufen', '', 'test.kaufen', '', '')),
('test.kddi', ('test.kddi', '', 'test.kddi', '', '')),
('test.ke', ('test.ke', '', 'test.ke', '', '')),
('test.kg', ('test.kg', '', 'test.kg', '', '')),
('test.kh', ('test.kh', '', 'test.kh', '', '')),
('test.ki', ('test.ki', '', 'test.ki', '', '')),
('test.kim', ('test.kim', '', 'test.kim', '', '')),
('test.kitchen', ('test.kitchen', '', 'test.kitchen', '', '')),
('test.kiwi', ('test.kiwi', '', 'test.kiwi', '', '')),
('test.km', ('test.km', '', 'test.km', '', '')),
('test.kn', ('test.kn', '', 'test.kn', '', '')),
('test.koeln', ('test.koeln', '', 'test.koeln', '', '')),
('test.komatsu', ('test.komatsu', '', 'test.komatsu', '', '')),
('test.kp', ('test.kp', '', 'test.kp', '', '')),
('test.kr', ('test.kr', '', 'test.kr', '', '')),
('test.krd', ('test.krd', '', 'test.krd', '', '')),
('test.kred', ('test.kred', '', 'test.kred', '', '')),
('test.kw', ('test.kw', '', 'test.kw', '', '')),
('test.ky', ('test.ky', '', 'test.ky', '', '')),
('test.kyoto', ('test.kyoto', '', 'test.kyoto', '', '')),
('test.kz', ('test.kz', '', 'test.kz', '', '')),
('test.la', ('test.la', '', 'test.la', '', '')),
('test.lacaixa', ('test.lacaixa', '', 'test.lacaixa', '', '')),
('test.land', ('test.land', '', 'test.land', '', '')),
('test.lat', ('test.lat', '', 'test.lat', '', '')),
('test.latrobe', ('test.latrobe', '', 'test.latrobe', '', '')),
('test.lawyer', ('test.lawyer', '', 'test.lawyer', '', '')),
('test.lb', ('test.lb', '', 'test.lb', '', '')),
('test.lc', ('test.lc', '', 'test.lc', '', '')),
('test.lds', ('test.lds', '', 'test.lds', '', '')),
('test.lease', ('test.lease', '', 'test.lease', '', '')),
('test.leclerc', ('test.leclerc', '', 'test.leclerc', '', '')),
('test.legal', ('test.legal', '', 'test.legal', '', '')),
('test.lgbt', ('test.lgbt', '', 'test.lgbt', '', '')),
('test.li', ('test.li', '', 'test.li', '', '')),
('test.lidl', ('test.lidl', '', 'test.lidl', '', '')),
('test.life', ('test.life', '', 'test.life', '', '')),
('test.lighting', ('test.lighting', '', 'test.lighting', '', '')),
('test.limited', ('test.limited', '', 'test.limited', '', '')),
('test.limo', ('test.limo', '', 'test.limo', '', '')),
('test.link', ('test.link', '', 'test.link', '', '')),
('test.lk', ('test.lk', '', 'test.lk', '', '')),
('test.loan', ('test.loan', '', 'test.loan', '', '')),
('test.loans', ('test.loans', '', 'test.loans', '', '')),
('test.london', ('test.london', '', 'test.london', '', '')),
('test.lotte', ('test.lotte', '', 'test.lotte', '', '')),
('test.lotto', ('test.lotto', '', 'test.lotto', '', '')),
('test.love', ('test.love', '', 'test.love', '', '')),
('test.lr', ('test.lr', '', 'test.lr', '', '')),
('test.ls', ('test.ls', '', 'test.ls', '', '')),
('test.lt', ('test.lt', '', 'test.lt', '', '')),
('test.ltda', ('test.ltda', '', 'test.ltda', '', '')),
('test.lu', ('test.lu', '', 'test.lu', '', '')),
('test.luxe', ('test.luxe', '', 'test.luxe', '', '')),
('test.luxury', ('test.luxury', '', 'test.luxury', '', '')),
('test.lv', ('test.lv', '', 'test.lv', '', '')),
('test.ly', ('test.ly', '', 'test.ly', '', '')),
('test.ma', ('test.ma', '', 'test.ma', '', '')),
('test.madrid', ('test.madrid', '', 'test.madrid', '', '')),
('test.maif', ('test.maif', '', 'test.maif', '', '')),
('test.maison', ('test.maison', '', 'test.maison', '', '')),
('test.management', ('test.management', '', 'test.management', '', '')),
('test.mango', ('test.mango', '', 'test.mango', '', '')),
('test.market', ('test.market', '', 'test.market', '', '')),
('test.marketing', ('test.marketing', '', 'test.marketing', '', '')),
('test.markets', ('test.markets', '', 'test.markets', '', '')),
('test.marriott', ('test.marriott', '', 'test.marriott', '', '')),
('test.mc', ('test.mc', '', 'test.mc', '', '')),
('test.md', ('test.md', '', 'test.md', '', '')),
('test.me', ('test.me', '', 'test.me', '', '')),
('test.media', ('test.media', '', 'test.media', '', '')),
('test.meet', ('test.meet', '', 'test.meet', '', '')),
('test.melbourne', ('test.melbourne', '', 'test.melbourne', '', '')),
('test.meme', ('test.meme', '', 'test.meme', '', '')),
('test.memorial', ('test.memorial', '', 'test.memorial', '', '')),
('test.menu', ('test.menu', '', 'test.menu', '', '')),
('test.mg', ('test.mg', '', 'test.mg', '', '')),
('test.mh', ('test.mh', '', 'test.mh', '', '')),
('test.miami', ('test.miami', '', 'test.miami', '', '')),
('test.mil', ('test.mil', '', 'test.mil', '', '')),
('test.mini', ('test.mini', '', 'test.mini', '', '')),
('test.mk', ('test.mk', '', 'test.mk', '', '')),
('test.ml', ('test.ml', '', 'test.ml', '', '')),
('test.mm', ('test.mm', '', 'test.mm', '', '')),
('test.mma', ('test.mma', '', 'test.mma', '', '')),
('test.mn', ('test.mn', '', 'test.mn', '', '')),
('test.mo', ('test.mo', '', 'test.mo', '', '')),
('test.mobi', ('test.mobi', '', 'test.mobi', '', '')),
('test.moda', ('test.moda', '', 'test.moda', '', '')),
('test.moe', ('test.moe', '', 'test.moe', '', '')),
('test.monash', ('test.monash', '', 'test.monash', '', '')),
('test.money', ('test.money', '', 'test.money', '', '')),
('test.mormon', ('test.mormon', '', 'test.mormon', '', '')),
('test.mortgage', ('test.mortgage', '', 'test.mortgage', '', '')),
('test.moscow', ('test.moscow', '', 'test.moscow', '', '')),
('test.motorcycles', ('test.motorcycles', '', 'test.motorcycles', '', '')),
('test.mov', ('test.mov', '', 'test.mov', '', '')),
('test.movie', ('test.movie', '', 'test.movie', '', '')),
('test.mp', ('test.mp', '', 'test.mp', '', '')),
('test.mq', ('test.mq', '', 'test.mq', '', '')),
('test.mr', ('test.mr', '', 'test.mr', '', '')),
('test.ms', ('test.ms', '', 'test.ms', '', '')),
('test.mt', ('test.mt', '', 'test.mt', '', '')),
('test.mtn', ('test.mtn', '', 'test.mtn', '', '')),
('test.mu', ('test.mu', '', 'test.mu', '', '')),
('test.museum', ('test.museum', '', 'test.museum', '', '')),
('test.mv', ('test.mv', '', 'test.mv', '', '')),
('test.mw', ('test.mw', '', 'test.mw', '', '')),
('test.mx', ('test.mx', '', 'test.mx', '', '')),
('test.my', ('test.my', '', 'test.my', '', '')),
('test.mz', ('test.mz', '', 'test.mz', '', '')),
('test.na', ('test.na', '', 'test.na', '', '')),
('test.nagoya', ('test.nagoya', '', 'test.nagoya', '', '')),
('test.name', ('test.name', '', 'test.name', '', '')),
('test.navy', ('test.navy', '', 'test.navy', '', '')),
('test.nc', ('test.nc', '', 'test.nc', '', '')),
('test.ne', ('test.ne', '', 'test.ne', '', '')),
('test.net', ('test.net', '', 'test.net', '', '')),
('test.network', ('test.network', '', 'test.network', '', '')),
('test.neustar', ('test.neustar', '', 'test.neustar', '', '')),
('test.new', ('test.new', '', 'test.new', '', '')),
('test.news', ('test.news', '', 'test.news', '', '')),
('test.nexus', ('test.nexus', '', 'test.nexus', '', '')),
('test.nf', ('test.nf', '', 'test.nf', '', '')),
('test.ng', ('test.ng', '', 'test.ng', '', '')),
('test.ngo', ('test.ngo', '', 'test.ngo', '', '')),
('test.nhk', ('test.nhk', '', 'test.nhk', '', '')),
('test.ni', ('test.ni', '', 'test.ni', '', '')),
('test.nico', ('test.nico', '', 'test.nico', '', '')),
('test.ninja', ('test.ninja', '', 'test.ninja', '', '')),
('test.nissan', ('test.nissan', '', 'test.nissan', '', '')),
('test.nl', ('test.nl', '', 'test.nl', '', '')),
('test.no', ('test.no', '', 'test.no', '', '')),
('test.np', ('test.np', '', 'test.np', '', '')),
('test.nr', ('test.nr', '', 'test.nr', '', '')),
('test.nra', ('test.nra', '', 'test.nra', '', '')),
('test.nrw', ('test.nrw', '', 'test.nrw', '', '')),
('test.ntt', ('test.ntt', '', 'test.ntt', '', '')),
('test.nu', ('test.nu', '', 'test.nu', '', '')),
('test.nyc', ('test.nyc', '', 'test.nyc', '', '')),
('test.nz', ('test.nz', '', 'test.nz', '', '')),
('test.okinawa', ('test.okinawa', '', 'test.okinawa', '', '')),
('test.om', ('test.om', '', 'test.om', '', '')),
('test.one', ('test.one', '', 'test.one', '', '')),
('test.ong', ('test.ong', '', 'test.ong', '', '')),
('test.onl', ('test.onl', '', 'test.onl', '', '')),
('test.online', ('test.online', '', 'test.online', '', '')),
('test.ooo', ('test.ooo', '', 'test.ooo', '', '')),
('test.org', ('test.org', '', 'test.org', '', '')),
('test.organic', ('test.organic', '', 'test.organic', '', '')),
('test.osaka', ('test.osaka', '', 'test.osaka', '', '')),
('test.otsuka', ('test.otsuka', '', 'test.otsuka', '', '')),
('test.ovh', ('test.ovh', '', 'test.ovh', '', '')),
('test.pa', ('test.pa', '', 'test.pa', '', '')),
('test.page', ('test.page', '', 'test.page', '', '')),
('test.paris', ('test.paris', '', 'test.paris', '', '')),
('test.partners', ('test.partners', '', 'test.partners', '', '')),
('test.parts', ('test.parts', '', 'test.parts', '', '')),
('test.party', ('test.party', '', 'test.party', '', '')),
('test.pe', ('test.pe', '', 'test.pe', '', '')),
('test.pf', ('test.pf', '', 'test.pf', '', '')),
('test.pg', ('test.pg', '', 'test.pg', '', '')),
('test.ph', ('test.ph', '', 'test.ph', '', '')),
('test.pharmacy', ('test.pharmacy', '', 'test.pharmacy', '', '')),
('test.photo', ('test.photo', '', 'test.photo', '', '')),
('test.photography', ('test.photography', '', 'test.photography', '', '')),
('test.photos', ('test.photos', '', 'test.photos', '', '')),
('test.physio', ('test.physio', '', 'test.physio', '', '')),
('test.piaget', ('test.piaget', '', 'test.piaget', '', '')),
('test.pics', ('test.pics', '', 'test.pics', '', '')),
('test.pictet', ('test.pictet', '', 'test.pictet', '', '')),
('test.pictures', ('test.pictures', '', 'test.pictures', '', '')),
('test.pink', ('test.pink', '', 'test.pink', '', '')),
('test.pizza', ('test.pizza', '', 'test.pizza', '', '')),
('test.pk', ('test.pk', '', 'test.pk', '', '')),
('test.pl', ('test.pl', '', 'test.pl', '', '')),
('test.place', ('test.place', '', 'test.place', '', '')),
('test.plumbing', ('test.plumbing', '', 'test.plumbing', '', '')),
('test.plus', ('test.plus', '', 'test.plus', '', '')),
('test.pm', ('test.pm', '', 'test.pm', '', '')),
('test.pn', ('test.pn', '', 'test.pn', '', '')),
('test.pohl', ('test.pohl', '', 'test.pohl', '', '')),
('test.poker', ('test.poker', '', 'test.poker', '', '')),
('test.porn', ('test.porn', '', 'test.porn', '', '')),
('test.post', ('test.post', '', 'test.post', '', '')),
('test.pr', ('test.pr', '', 'test.pr', '', '')),
('test.praxi', ('test.praxi', '', 'test.praxi', '', '')),
('test.press', ('test.press', '', 'test.press', '', '')),
('test.pro', ('test.pro', '', 'test.pro', '', '')),
('test.prod', ('test.prod', '', 'test.prod', '', '')),
('test.productions', ('test.productions', '', 'test.productions', '', '')),
('test.prof', ('test.prof', '', 'test.prof', '', '')),
('test.properties', ('test.properties', '', 'test.properties', '', '')),
('test.property', ('test.property', '', 'test.property', '', '')),
('test.ps', ('test.ps', '', 'test.ps', '', '')),
('test.pt', ('test.pt', '', 'test.pt', '', '')),
('test.pub', ('test.pub', '', 'test.pub', '', '')),
('test.pw', ('test.pw', '', 'test.pw', '', '')),
('test.py', ('test.py', '', 'test.py', '', '')),
('test.qa', ('test.qa', '', 'test.qa', '', '')),
('test.qpon', ('test.qpon', '', 'test.qpon', '', '')),
('test.quebec', ('test.quebec', '', 'test.quebec', '', '')),
('test.racing', ('test.racing', '', 'test.racing', '', '')),
('test.re', ('test.re', '', 'test.re', '', '')),
('test.realtor', ('test.realtor', '', 'test.realtor', '', '')),
('test.recipes', ('test.recipes', '', 'test.recipes', '', '')),
('test.red', ('test.red', '', 'test.red', '', '')),
('test.redstone', ('test.redstone', '', 'test.redstone', '', '')),
('test.rehab', ('test.rehab', '', 'test.rehab', '', '')),
('test.reise', ('test.reise', '', 'test.reise', '', '')),
('test.reisen', ('test.reisen', '', 'test.reisen', '', '')),
('test.reit', ('test.reit', '', 'test.reit', '', '')),
('test.ren', ('test.ren', '', 'test.ren', '', '')),
('test.rentals', ('test.rentals', '', 'test.rentals', '', '')),
('test.repair', ('test.repair', '', 'test.repair', '', '')),
('test.report', ('test.report', '', 'test.report', '', '')),
('test.republican', ('test.republican', '', 'test.republican', '', '')),
('test.rest', ('test.rest', '', 'test.rest', '', '')),
('test.restaurant', ('test.restaurant', '', 'test.restaurant', '', '')),
('test.review', ('test.review', '', 'test.review', '', '')),
('test.reviews', ('test.reviews', '', 'test.reviews', '', '')),
('test.rich', ('test.rich', '', 'test.rich', '', '')),
('test.rio', ('test.rio', '', 'test.rio', '', '')),
('test.rip', ('test.rip', '', 'test.rip', '', '')),
('test.ro', ('test.ro', '', 'test.ro', '', '')),
('test.rocks', ('test.rocks', '', 'test.rocks', '', '')),
('test.rodeo', ('test.rodeo', '', 'test.rodeo', '', '')),
('test.rs', ('test.rs', '', 'test.rs', '', '')),
('test.rsvp', ('test.rsvp', '', 'test.rsvp', '', '')),
('test.ru', ('test.ru', '', 'test.ru', '', '')),
('test.ruhr', ('test.ruhr', '', 'test.ruhr', '', '')),
('test.rw', ('test.rw', '', 'test.rw', '', '')),
('test.ryukyu', ('test.ryukyu', '', 'test.ryukyu', '', '')),
('test.sa', ('test.sa', '', 'test.sa', '', '')),
('test.saarland', ('test.saarland', '', 'test.saarland', '', '')),
('test.sale', ('test.sale', '', 'test.sale', '', '')),
('test.samsung', ('test.samsung', '', 'test.samsung', '', '')),
('test.sap', ('test.sap', '', 'test.sap', '', '')),
('test.sarl', ('test.sarl', '', 'test.sarl', '', '')),
('test.saxo', ('test.saxo', '', 'test.saxo', '', '')),
('test.sb', ('test.sb', '', 'test.sb', '', '')),
('test.sc', ('test.sc', '', 'test.sc', '', '')),
('test.sca', ('test.sca', '', 'test.sca', '', '')),
('test.scb', ('test.scb', '', 'test.scb', '', '')),
('test.schmidt', ('test.schmidt', '', 'test.schmidt', '', '')),
('test.scholarships', ('test.scholarships', '', 'test.scholarships', '', '')),
('test.school', ('test.school', '', 'test.school', '', '')),
('test.schule', ('test.schule', '', 'test.schule', '', '')),
('test.schwarz', ('test.schwarz', '', 'test.schwarz', '', '')),
('test.science', ('test.science', '', 'test.science', '', '')),
('test.scot', ('test.scot', '', 'test.scot', '', '')),
('test.sd', ('test.sd', '', 'test.sd', '', '')),
('test.se', ('test.se', '', 'test.se', '', '')),
('test.services', ('test.services', '', 'test.services', '', '')),
('test.sew', ('test.sew', '', 'test.sew', '', '')),
('test.sexy', ('test.sexy', '', 'test.sexy', '', '')),
('test.sg', ('test.sg', '', 'test.sg', '', '')),
('test.sh', ('test.sh', '', 'test.sh', '', '')),
('test.shiksha', ('test.shiksha', '', 'test.shiksha', '', '')),
('test.shoes', ('test.shoes', '', 'test.shoes', '', '')),
('test.shriram', ('test.shriram', '', 'test.shriram', '', '')),
('test.si', ('test.si', '', 'test.si', '', '')),
('test.singles', ('test.singles', '', 'test.singles', '', '')),
('test.site', ('test.site', '', 'test.site', '', '')),
('test.sj', ('test.sj', '', 'test.sj', '', '')),
('test.sk', ('test.sk', '', 'test.sk', '', '')),
('test.sky', ('test.sky', '', 'test.sky', '', '')),
('test.sl', ('test.sl', '', 'test.sl', '', '')),
('test.sm', ('test.sm', '', 'test.sm', '', '')),
('test.sn', ('test.sn', '', 'test.sn', '', '')),
('test.so', ('test.so', '', 'test.so', '', '')),
('test.social', ('test.social', '', 'test.social', '', '')),
('test.software', ('test.software', '', 'test.software', '', '')),
('test.sohu', ('test.sohu', '', 'test.sohu', '', '')),
('test.solar', ('test.solar', '', 'test.solar', '', '')),
('test.solutions', ('test.solutions', '', 'test.solutions', '', '')),
('test.soy', ('test.soy', '', 'test.soy', '', '')),
('test.space', ('test.space', '', 'test.space', '', '')),
('test.spreadbetting', ('test.spreadbetting', '', 'test.spreadbetting', '', '')),
('test.sr', ('test.sr', '', 'test.sr', '', '')),
('test.ss', ('test.ss', '', 'test.ss', '', '')),
('test.st', ('test.st', '', 'test.st', '', '')),
('test.study', ('test.study', '', 'test.study', '', '')),
('test.style', ('test.style', '', 'test.style', '', '')),
('test.su', ('test.su', '', 'test.su', '', '')),
('test.sucks', ('test.sucks', '', 'test.sucks', '', '')),
('test.supplies', ('test.supplies', '', 'test.supplies', '', '')),
('test.supply', ('test.supply', '', 'test.supply', '', '')),
('test.support', ('test.support', '', 'test.support', '', '')),
('test.surf', ('test.surf', '', 'test.surf', '', '')),
('test.surgery', ('test.surgery', '', 'test.surgery', '', '')),
('test.suzuki', ('test.suzuki', '', 'test.suzuki', '', '')),
('test.sv', ('test.sv', '', 'test.sv', '', '')),
('test.sx', ('test.sx', '', 'test.sx', '', '')),
('test.sy', ('test.sy', '', 'test.sy', '', '')),
('test.sydney', ('test.sydney', '', 'test.sydney', '', '')),
('test.systems', ('test.systems', '', 'test.systems', '', '')),
('test.sz', ('test.sz', '', 'test.sz', '', '')),
('test.taipei', ('test.taipei', '', 'test.taipei', '', '')),
('test.tatar', ('test.tatar', '', 'test.tatar', '', '')),
('test.tattoo', ('test.tattoo', '', 'test.tattoo', '', '')),
('test.tax', ('test.tax', '', 'test.tax', '', '')),
('test.tc', ('test.tc', '', 'test.tc', '', '')),
('test.td', ('test.td', '', 'test.td', '', '')),
('test.tech', ('test.tech', '', 'test.tech', '', '')),
('test.technology', ('test.technology', '', 'test.technology', '', '')),
('test.tel', ('test.tel', '', 'test.tel', '', '')),
('test.temasek', ('test.temasek', '', 'test.temasek', '', '')),
('test.tennis', ('test.tennis', '', 'test.tennis', '', '')),
('test.tf', ('test.tf', '', 'test.tf', '', '')),
('test.tg', ('test.tg', '', 'test.tg', '', '')),
('test.th', ('test.th', '', 'test.th', '', '')),
('test.tickets', ('test.tickets', '', 'test.tickets', '', '')),
('test.tienda', ('test.tienda', '', 'test.tienda', '', '')),
('test.tips', ('test.tips', '', 'test.tips', '', '')),
('test.tires', ('test.tires', '', 'test.tires', '', '')),
('test.tirol', ('test.tirol', '', 'test.tirol', '', '')),
('test.tj', ('test.tj', '', 'test.tj', '', '')),
('test.tk', ('test.tk', '', 'test.tk', '', '')),
('test.tl', ('test.tl', '', 'test.tl', '', '')),
('test.tm', ('test.tm', '', 'test.tm', '', '')),
('test.tn', ('test.tn', '', 'test.tn', '', '')),
('test.to', ('test.to', '', 'test.to', '', '')),
('test.today', ('test.today', '', 'test.today', '', '')),
('test.tokyo', ('test.tokyo', '', 'test.tokyo', '', '')),
('test.tools', ('test.tools', '', 'test.tools', '', '')),
('test.top', ('test.top', '', 'test.top', '', '')),
('test.toshiba', ('test.toshiba', '', 'test.toshiba', '', '')),
('test.tours', ('test.tours', '', 'test.tours', '', '')),
('test.town', ('test.town', '', 'test.town', '', '')),
('test.toys', ('test.toys', '', 'test.toys', '', '')),
('test.tr', ('test.tr', '', 'test.tr', '', '')),
('test.trade', ('test.trade', '', 'test.trade', '', '')),
('test.trading', ('test.trading', '', 'test.trading', '', '')),
('test.training', ('test.training', '', 'test.training', '', '')),
('test.travel', ('test.travel', '', 'test.travel', '', '')),
('test.trust', ('test.trust', '', 'test.trust', '', '')),
('test.tt', ('test.tt', '', 'test.tt', '', '')),
('test.tui', ('test.tui', '', 'test.tui', '', '')),
('test.tv', ('test.tv', '', 'test.tv', '', '')),
('test.tw', ('test.tw', '', 'test.tw', '', '')),
('test.tz', ('test.tz', '', 'test.tz', '', '')),
('test.ua', ('test.ua', '', 'test.ua', '', '')),
('test.ug', ('test.ug', '', 'test.ug', '', '')),
('test.uk', ('test.uk', '', 'test.uk', '', '')),
('test.university', ('test.university', '', 'test.university', '', '')),
('test.uno', ('test.uno', '', 'test.uno', '', '')),
('test.uol', ('test.uol', '', 'test.uol', '', '')),
('test.us', ('test.us', '', 'test.us', '', '')),
('test.uy', ('test.uy', '', 'test.uy', '', '')),
('test.uz', ('test.uz', '', 'test.uz', '', '')),
('test.va', ('test.va', '', 'test.va', '', '')),
('test.vacations', ('test.vacations', '', 'test.vacations', '', '')),
('test.vc', ('test.vc', '', 'test.vc', '', '')),
('test.ve', ('test.ve', '', 'test.ve', '', '')),
('test.vegas', ('test.vegas', '', 'test.vegas', '', '')),
('test.ventures', ('test.ventures', '', 'test.ventures', '', '')),
('test.versicherung', ('test.versicherung', '', 'test.versicherung', '', '')),
('test.vet', ('test.vet', '', 'test.vet', '', '')),
('test.vg', ('test.vg', '', 'test.vg', '', '')),
('test.vi', ('test.vi', '', 'test.vi', '', '')),
('test.viajes', ('test.viajes', '', 'test.viajes', '', '')),
('test.video', ('test.video', '', 'test.video', '', '')),
('test.villas', ('test.villas', '', 'test.villas', '', '')),
('test.vision', ('test.vision', '', 'test.vision', '', '')),
('test.vlaanderen', ('test.vlaanderen', '', 'test.vlaanderen', '', '')),
('test.vn', ('test.vn', '', 'test.vn', '', '')),
('test.vodka', ('test.vodka', '', 'test.vodka', '', '')),
('test.vote', ('test.vote', '', 'test.vote', '', '')),
('test.voting', ('test.voting', '', 'test.voting', '', '')),
('test.voto', ('test.voto', '', 'test.voto', '', '')),
('test.voyage', ('test.voyage', '', 'test.voyage', '', '')),
('test.vu', ('test.vu', '', 'test.vu', '', '')),
('test.wales', ('test.wales', '', 'test.wales', '', '')),
('test.wang', ('test.wang', '', 'test.wang', '', '')),
('test.watch', ('test.watch', '', 'test.watch', '', '')),
('test.webcam', ('test.webcam', '', 'test.webcam', '', '')),
('test.website', ('test.website', '', 'test.website', '', '')),
('test.wed', ('test.wed', '', 'test.wed', '', '')),
('test.wedding', ('test.wedding', '', 'test.wedding', '', '')),
('test.wf', ('test.wf', '', 'test.wf', '', '')),
('test.whoswho', ('test.whoswho', '', 'test.whoswho', '', '')),
('test.wien', ('test.wien', '', 'test.wien', '', '')),
('test.wiki', ('test.wiki', '', 'test.wiki', '', '')),
('test.williamhill', ('test.williamhill', '', 'test.williamhill', '', '')),
('test.win', ('test.win', '', 'test.win', '', '')),
('test.wme', ('test.wme', '', 'test.wme', '', '')),
('test.work', ('test.work', '', 'test.work', '', '')),
('test.works', ('test.works', '', 'test.works', '', '')),
('test.world', ('test.world', '', 'test.world', '', '')),
('test.ws', ('test.ws', '', 'test.ws', '', '')),
('test.wtc', ('test.wtc', '', 'test.wtc', '', '')),
('test.wtf', ('test.wtf', '', 'test.wtf', '', '')),
('test.xin', ('test.xin', '', 'test.xin', '', '')),
('test.xxx', ('test.xxx', '', 'test.xxx', '', '')),
('test.xyz', ('test.xyz', '', 'test.xyz', '', '')),
('test.yachts', ('test.yachts', '', 'test.yachts', '', '')),
('test.yandex', ('test.yandex', '', 'test.yandex', '', '')),
('test.ye', ('test.ye', '', 'test.ye', '', '')),
('test.yodobashi', ('test.yodobashi', '', 'test.yodobashi', '', '')),
('test.yoga', ('test.yoga', '', 'test.yoga', '', '')),
('test.yokohama', ('test.yokohama', '', 'test.yokohama', '', '')),
('test.youtube', ('test.youtube', '', 'test.youtube', '', '')),
('test.yt', ('test.yt', '', 'test.yt', '', '')),
('test.za', ('test.za', '', 'test.za', '', '')),
('test.zip', ('test.zip', '', 'test.zip', '', '')),
('test.zm', ('test.zm', '', 'test.zm', '', '')),
('test.zone', ('test.zone', '', 'test.zone', '', '')),
('test.zuerich', ('test.zuerich', '', 'test.zuerich', '', '')),
('test.zw', ('test.zw', '', 'test.zw', '', '')),
]
for t in tests:
logging.info(t[0])
self.assertEqual(
link.find_links(t[0])[0],
t[1],
)
if __name__ == '__main__':
tbot.testbase.run_file(__file__)
|
from datetime import date
from onegov.ballot import Election
from onegov.election_day.collections import ArchivedResultCollection
from tests.onegov.election_day.common import login
from tests.onegov.election_day.common import upload_majorz_election
from tests.onegov.election_day.common import upload_proporz_election
from time import sleep
from unittest.mock import patch
from webtest import TestApp as Client
from webtest.forms import Upload
def test_upload_election_year_unavailable(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'Election'
new.form['date'] = date(1990, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'federation'
new.form.submit()
csv = (
'election_absolute_majority,'
'election_status,'
'entity_id,'
'entity_counted,'
'entity_eligible_voters,'
'entity_received_ballots,'
'entity_blank_ballots,'
'entity_invalid_ballots,'
'entity_blank_votes,'
'entity_invalid_votes,'
'candidate_family_name,'
'candidate_first_name,'
'candidate_id,'
'candidate_elected,'
'candidate_party,'
'candidate_votes\n'
).encode('utf-8')
upload = client.get('/election/election/upload').follow()
upload.form['file_format'] = 'internal'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert "Das Jahr 1990 wird noch nicht unterstützt" in upload
def test_upload_election_invalidate_cache(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
upload_proporz_election(client)
anonymous = Client(election_day_app_gr)
anonymous.get('/locale/de_CH').follow()
assert ">56<" in anonymous.get('/election/majorz-election').follow()
assert ">56<" in anonymous.get('/election/proporz-election').follow()
for slug in ('majorz', 'proporz'):
csv = anonymous.get(f'/election/{slug}-election/data-csv').text
csv = csv.replace('56', '58').encode('utf-8')
upload = client.get(f'/election/{slug}-election/upload').follow()
upload.form['file_format'] = 'internal'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert "Ihre Resultate wurden erfolgreich hochgeladen" in upload
assert ">56<" not in anonymous.get('/election/majorz-election').follow()
assert ">56<" not in anonymous.get('/election/proporz-election').follow()
assert ">58<" in anonymous.get('/election/majorz-election').follow()
assert ">58<" in anonymous.get('/election/proporz-election').follow()
def test_upload_election_temporary_results_majorz(election_day_app):
archive = ArchivedResultCollection(election_day_app.session())
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form.submit()
assert archive.query().one().progress == (0, 0)
# Wabsti: form value + (optionally) missing lines
csv = '\n'.join((
(
'AnzMandate,'
'BFS,'
'StimmBer,'
'StimmAbgegeben,'
'StimmLeer,'
'StimmUngueltig,'
'StimmGueltig,'
'KandID_1,'
'KandName_1,'
'KandVorname_1,'
'Stimmen_1,'
'KandResultArt_1,'
'KandID_2,'
'KandName_2,'
'KandVorname_2,'
'Stimmen_2,'
'KandResultArt_2,'
'KandID_3,'
'KandName_3,'
'KandVorname_3,'
'Stimmen_3,'
'KandResultArt_3,'
'KandID_4,'
'KandName_4,'
'KandVorname_4,'
'Stimmen_4,'
'KandResultArt_4'
),
(
'7,1701,13567,40,0,0,40,1,Hegglin,Peter,36,1,2,'
'Hürlimann,Urs,25,1,1000,Leere Zeilen,,18,9,1001,'
'Ungültige Stimmen,,0,9'
),
(
'7,1702,9620,41,0,1,40,1,Hegglin,Peter,34,2,2,'
'Hürlimann,Urs,28,2,1000,Leere Zeilen,,6,9,1001,'
'Ungültige Stimmen,,0,9'
)
)).encode('utf-8')
upload = client.get('/election/election/upload').follow()
upload.form['file_format'] = 'wabsti'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
assert 'erfolgreich hochgeladen' in upload.form.submit()
assert election_day_app.session().query(Election).one().status == 'interim'
assert archive.query().one().progress == (2, 11)
result_wabsti = client.get('/election/election/data-csv').text
assert '1701,True,13567' in result_wabsti
assert '1702,True,9620' in result_wabsti
assert '1711' not in result_wabsti
upload.form['complete'] = True
assert 'erfolgreich hochgeladen' in upload.form.submit()
assert election_day_app.session().query(Election).one().status == 'final'
assert archive.query().one().progress == (2, 11)
result_wabsti = client.get('/election/election/data-csv').text
assert 'Baar,1701,True' in result_wabsti
# Onegov internal: misssing or uncounted
csv = '\n'.join((
(
'election_absolute_majority,'
'election_status,'
'entity_id,'
'entity_counted,'
'entity_eligible_voters,'
'entity_received_ballots,'
'entity_blank_ballots,'
'entity_invalid_ballots,'
'entity_blank_votes,'
'entity_invalid_votes,'
'candidate_family_name,'
'candidate_first_name,'
'candidate_id,'
'candidate_elected,'
'candidate_party,'
'candidate_votes'
),
',,1701,True,13567,40,0,0,18,0,Hegglin,Peter,1,False,,36',
',,1701,True,13567,40,0,0,18,0,Hürlimann,Urs,2,False,,25',
',,1702,True,9620,41,0,1,6,0,Hegglin,Peter,1,False,,34',
',,1702,True,9620,41,0,1,6,0,Hürlimann,Urs,2,False,,28',
',,1703,False,1000,0,0,0,0,0,Hegglin,Peter,1,False,,0',
',,1703,False,1000,0,0,0,0,0,Hürlimann,Urs,2,False,,0',
)).encode('utf-8')
upload = client.get('/election/election/upload').follow()
upload.form['file_format'] = 'internal'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
assert 'erfolgreich hochgeladen' in upload.form.submit()
assert archive.query().one().progress == (2, 11)
result_onegov = client.get('/election/election/data-csv').text
assert result_wabsti.replace('final', 'unknown') in result_onegov
def test_upload_election_temporary_results_proporz(election_day_app):
archive = ArchivedResultCollection(election_day_app.session())
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'federation'
new.form.submit()
assert archive.query().one().progress == (0, 0)
# Wabsti: form value + (optionally) missing lines
csv = '\n'.join((
(
'Einheit_BFS,'
'Kand_Nachname,'
'Kand_Vorname,'
'Liste_KandID,'
'Liste_ID,'
'Liste_Code,'
'Kand_StimmenTotal,'
'Liste_ParteistimmenTotal'
),
'1701,Lustenberger,Andreas,101,1,ALG,948,1435',
'1701,Schriber-Neiger,Hanni,102,1,ALG,208,1435',
'1702,Lustenberger,Andreas,101,1,ALG,290,533',
'1702,Schriber-Neiger,Hanni,102,1,ALG,105,533',
)).encode('utf-8')
csv_stat = '\n'.join((
(
'Einheit_BFS,'
'Einheit_Name,'
'StimBerTotal,'
'WZEingegangen,'
'WZLeer,'
'WZUngueltig,'
'StmWZVeraendertLeerAmtlLeer'
),
'1701,Baar,14119,7462,77,196,122',
'1702,Cham,9926,4863,0,161,50',
)).encode('utf-8')
upload = client.get('/election/election/upload').follow()
upload.form['file_format'] = 'wabsti'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload.form['statistics'] = Upload('data.csv', csv_stat, 'text/plain')
assert 'erfolgreich hochgeladen' in upload.form.submit()
assert election_day_app.session().query(Election).one().status == 'interim'
assert archive.query().one().progress == (2, 11)
result_wabsti = client.get('/election/election/data-csv').text
assert '1701,True,14119' in result_wabsti
assert '1702,True,9926' in result_wabsti
assert '1711' not in result_wabsti
upload.form['complete'] = True
assert 'erfolgreich hochgeladen' in upload.form.submit()
assert election_day_app.session().query(Election).one().status == 'final'
assert archive.query().one().progress == (2, 11)
result_wabsti = client.get('/election/election/data-csv').text
assert 'Baar,1701,True' in result_wabsti
# Onegov internal: misssing and number of municpalities
csv = '\n'.join((
(
'election_status,'
'entity_id,'
'entity_counted,'
'entity_eligible_voters,'
'entity_received_ballots,'
'entity_blank_ballots,'
'entity_invalid_ballots,'
'entity_blank_votes,'
'entity_invalid_votes,'
'list_name,'
'list_id,'
'list_number_of_mandates,'
'list_votes,'
'list_connection,'
'list_connection_parent,'
'candidate_family_name,'
'candidate_first_name,'
'candidate_id,'
'candidate_elected,'
'candidate_party,'
'candidate_votes'
),
(
',1701,True,14119,7462,77,196,122,0,'
'ALG,1,0,1435,,,Lustenberger,Andreas,101,False,,948'
),
(
',1701,True,14119,7462,77,196,122,0,'
'ALG,1,0,1435,,,Schriber-Neiger,Hanni,102,False,,208'
),
(
',1702,True,9926,4863,0,161,50,0,'
'ALG,1,0,533,,,Lustenberger,Andreas,101,False,,290'
),
(
',1702,True,9926,4863,0,161,50,0,'
'ALG,1,0,533,,,Schriber-Neiger,Hanni,102,False,,105'
),
(
',1703,False,1000,0,0,0,0,0,'
'ALG,1,0,533,,,Lustenberger,Andreas,101,False,,290'
),
(
',1703,False,1000,0,0,0,0,0,'
'ALG,1,0,533,,,Schriber-Neiger,Hanni,102,False,,105'
),
)).encode('utf-8')
upload = client.get('/election/election/upload').follow()
upload.form['file_format'] = 'internal'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
assert 'erfolgreich hochgeladen' in upload.form.submit()
assert archive.query().one().progress == (2, 11)
result_onegov = client.get('/election/election/data-csv').text
assert result_wabsti.replace('final', 'unknown') in result_onegov
def test_upload_election_available_formats_canton(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'federal-majorz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form.submit()
upload = client.get('/election/federal-majorz-election/upload').follow()
assert sorted([o[0] for o in upload.form['file_format'].options]) == [
'internal', 'wabsti'
]
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'federal-proporz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'federation'
new.form.submit()
upload = client.get('/election/federal-proporz-election/upload').follow()
assert sorted([o[0] for o in upload.form['file_format'].options]) == [
'internal', 'wabsti'
]
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'cantonal-majorz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'majorz'
new.form['domain'] = 'canton'
new.form.submit()
upload = client.get('/election/cantonal-majorz-election/upload').follow()
assert sorted([o[0] for o in upload.form['file_format'].options]) == [
'internal', 'wabsti'
]
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'cantonal-proporz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'canton'
new.form.submit()
upload = client.get('/election/cantonal-proporz-election/upload').follow()
assert sorted([o[0] for o in upload.form['file_format'].options]) == [
'internal', 'wabsti'
]
def test_upload_election_available_formats_municipality(election_day_app_bern):
client = Client(election_day_app_bern)
client.get('/locale/de_CH').follow()
login(client)
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'federal-majorz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form.submit()
upload = client.get('/election/federal-majorz-election/upload').follow()
assert [o[0] for o in upload.form['file_format'].options] == [
'internal', 'wabsti_m'
]
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'federal-proporz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'federation'
new.form.submit()
upload = client.get('/election/federal-proporz-election/upload').follow()
assert [o[0] for o in upload.form['file_format'].options] == ['internal']
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'cantonal-majorz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'majorz'
new.form['domain'] = 'canton'
new.form.submit()
upload = client.get('/election/cantonal-majorz-election/upload').follow()
assert [o[0] for o in upload.form['file_format'].options] == [
'internal', 'wabsti_m'
]
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'cantonal-proporz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'canton'
new.form.submit()
upload = client.get('/election/cantonal-proporz-election/upload').follow()
assert [o[0] for o in upload.form['file_format'].options] == ['internal']
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'communal-majorz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'majorz'
new.form['domain'] = 'municipality'
new.form.submit()
upload = client.get('/election/communal-majorz-election/upload').follow()
assert [o[0] for o in upload.form['file_format'].options] == [
'internal', 'wabsti_m'
]
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'communal-proporz-election'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'municipality'
new.form.submit()
upload = client.get('/election/communal-proporz-election/upload').follow()
assert [o[0] for o in upload.form['file_format'].options] == ['internal']
def test_upload_election_notify_zulip(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
with patch('urllib.request.urlopen') as urlopen:
# No settings
upload_majorz_election(client, canton='zg')
sleep(5)
assert not urlopen.called
election_day_app.zulip_url = 'https://xx.zulipchat.com/api/v1/messages'
election_day_app.zulip_stream = 'WAB'
election_day_app.zulip_user = 'wab-bot@seantis.zulipchat.com'
election_day_app.zulip_key = 'aabbcc'
upload_majorz_election(client, canton='zg')
sleep(5)
assert urlopen.called
assert 'xx.zulipchat.com' in urlopen.call_args[0][0].get_full_url()
def test_upload_election_submit(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'majorz'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form.submit()
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'proporz'
new.form['date'] = date(2015, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'federation'
new.form.submit()
# Internal Majorz
with patch(
'onegov.election_day.views.upload.election.'
'import_election_internal_majorz'
) as import_:
import_.return_value = []
csv = 'csv'.encode('utf-8')
upload = client.get('/election/majorz/upload').follow()
upload.form['file_format'] = 'internal'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert import_.called
# Internal Proporz
with patch(
'onegov.election_day.views.upload.election.'
'import_election_internal_proporz'
) as import_:
import_.return_value = []
csv = 'csv'.encode('utf-8')
upload = client.get('/election/proporz/upload').follow()
upload.form['file_format'] = 'internal'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert import_.called
# Wabsti Majorz
with patch(
'onegov.election_day.views.upload.election.'
'import_election_wabsti_majorz'
) as import_:
import_.return_value = []
csv = 'csv'.encode('utf-8')
upload = client.get('/election/majorz/upload').follow()
upload.form['file_format'] = 'wabsti'
upload.form['majority'] = '5000'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert import_.called
data = client.get('/election/majorz/json').json
assert data['absolute_majority'] == 5000
assert data['completed'] is False
# Wabsti Proporz
with patch(
'onegov.election_day.views.upload.election.'
'import_election_wabsti_proporz'
) as import_:
import_.return_value = []
csv = 'csv'.encode('utf-8')
upload = client.get('/election/proporz/upload').follow()
upload.form['file_format'] = 'wabsti'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert import_.called
# Wabsti Municipality Majorz
principal = election_day_app.principal
principal.domain = 'municipality'
principal.municipality = '351'
election_day_app.cache.set('principal', principal)
with patch(
'onegov.election_day.views.upload.election.'
'import_election_wabsti_majorz'
) as import_:
import_.return_value = []
csv = 'csv'.encode('utf-8')
upload = client.get('/election/majorz/upload').follow()
upload.form['file_format'] = 'wabsti_m'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert import_.called
|
import asyncio
import logging
import pytest
import kopf
from kopf._cogs.structs.ephemera import Memo
from kopf._core.actions.execution import PermanentError, TemporaryError
from kopf._core.engines.indexing import OperatorIndexers
from kopf._core.intents.causes import HANDLER_REASONS, Reason
from kopf._core.reactor.inventory import ResourceMemories
from kopf._core.reactor.processing import process_resource_event
# The extrahandlers are needed to prevent the cycle ending and status purging.
@pytest.mark.parametrize('cause_type', HANDLER_REASONS)
async def test_fatal_error_stops_handler(
registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
name1 = f'{cause_type}_fn'
event_type = None if cause_type == Reason.RESUME else 'irrelevant'
cause_mock.reason = cause_type
handlers.create_mock.side_effect = PermanentError("oops")
handlers.update_mock.side_effect = PermanentError("oops")
handlers.delete_mock.side_effect = PermanentError("oops")
handlers.resume_mock.side_effect = PermanentError("oops")
await process_resource_event(
lifecycle=kopf.lifecycles.one_by_one,
registry=registry,
settings=settings,
resource=resource,
indexers=OperatorIndexers(),
memories=ResourceMemories(),
memobase=Memo(),
raw_event={'type': event_type, 'object': {}},
event_queue=asyncio.Queue(),
)
assert handlers.create_mock.call_count == (1 if cause_type == Reason.CREATE else 0)
assert handlers.update_mock.call_count == (1 if cause_type == Reason.UPDATE else 0)
assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0)
assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0)
assert not k8s_mocked.sleep.called
assert k8s_mocked.patch.called
patch = k8s_mocked.patch.call_args_list[0][1]['payload']
assert patch['status']['kopf']['progress'] is not None
assert patch['status']['kopf']['progress'][name1]['failure'] is True
assert patch['status']['kopf']['progress'][name1]['message'] == 'oops'
assert_logs([
"Handler .+ failed permanently: oops",
])
# The extrahandlers are needed to prevent the cycle ending and status purging.
@pytest.mark.parametrize('cause_type', HANDLER_REASONS)
async def test_retry_error_delays_handler(
registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
name1 = f'{cause_type}_fn'
event_type = None if cause_type == Reason.RESUME else 'irrelevant'
cause_mock.reason = cause_type
handlers.create_mock.side_effect = TemporaryError("oops")
handlers.update_mock.side_effect = TemporaryError("oops")
handlers.delete_mock.side_effect = TemporaryError("oops")
handlers.resume_mock.side_effect = TemporaryError("oops")
await process_resource_event(
lifecycle=kopf.lifecycles.one_by_one,
registry=registry,
settings=settings,
resource=resource,
indexers=OperatorIndexers(),
memories=ResourceMemories(),
memobase=Memo(),
raw_event={'type': event_type, 'object': {}},
event_queue=asyncio.Queue(),
)
assert handlers.create_mock.call_count == (1 if cause_type == Reason.CREATE else 0)
assert handlers.update_mock.call_count == (1 if cause_type == Reason.UPDATE else 0)
assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0)
assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0)
assert not k8s_mocked.sleep.called
assert k8s_mocked.patch.called
patch = k8s_mocked.patch.call_args_list[0][1]['payload']
assert patch['status']['kopf']['progress'] is not None
assert patch['status']['kopf']['progress'][name1]['failure'] is False
assert patch['status']['kopf']['progress'][name1]['success'] is False
assert patch['status']['kopf']['progress'][name1]['delayed']
assert_logs([
"Handler .+ failed temporarily: oops",
])
# The extrahandlers are needed to prevent the cycle ending and status purging.
@pytest.mark.parametrize('cause_type', HANDLER_REASONS)
async def test_arbitrary_error_delays_handler(
registry, settings, handlers, extrahandlers, resource, cause_mock, cause_type,
caplog, assert_logs, k8s_mocked):
caplog.set_level(logging.DEBUG)
name1 = f'{cause_type}_fn'
event_type = None if cause_type == Reason.RESUME else 'irrelevant'
cause_mock.reason = cause_type
handlers.create_mock.side_effect = Exception("oops")
handlers.update_mock.side_effect = Exception("oops")
handlers.delete_mock.side_effect = Exception("oops")
handlers.resume_mock.side_effect = Exception("oops")
await process_resource_event(
lifecycle=kopf.lifecycles.one_by_one,
registry=registry,
settings=settings,
resource=resource,
indexers=OperatorIndexers(),
memories=ResourceMemories(),
memobase=Memo(),
raw_event={'type': event_type, 'object': {}},
event_queue=asyncio.Queue(),
)
assert handlers.create_mock.call_count == (1 if cause_type == Reason.CREATE else 0)
assert handlers.update_mock.call_count == (1 if cause_type == Reason.UPDATE else 0)
assert handlers.delete_mock.call_count == (1 if cause_type == Reason.DELETE else 0)
assert handlers.resume_mock.call_count == (1 if cause_type == Reason.RESUME else 0)
assert not k8s_mocked.sleep.called
assert k8s_mocked.patch.called
patch = k8s_mocked.patch.call_args_list[0][1]['payload']
assert patch['status']['kopf']['progress'] is not None
assert patch['status']['kopf']['progress'][name1]['failure'] is False
assert patch['status']['kopf']['progress'][name1]['success'] is False
assert patch['status']['kopf']['progress'][name1]['delayed']
assert_logs([
"Handler .+ failed with an exception. Will retry.",
])
|
#!/usr/bin/env python
import scipy.stats as stats
import pandas as pd
import numpy as np
def contingency_chi2(categorical_1, categorical_2):
'''
contingency_chi2(categorical_1, categorical_2)
'''
crosstab = pd.crosstab(categorical_1, categorical_2)
chi2, _, _, _ = stats.chi2_contingency(crosstab)
return chi2, crosstab
def check_nulls(df):
"""
check_nulls(df)
Checks a dataframe for null values.
Parameters
----------
df : Pandas dataframe
Specifies dataframe to check
"""
nulls = df.isna().sum()
return nulls
|
def create():
with open("out.obj", 'w') as f:
f.write("# OBJ file\n")
f.write("o Point_Cloud.001\n")
f.close()
def pointswrite(x,y,z):
with open("out.obj", 'a+') as f:
f.write("v "+str(x)+" "+str(y)+" "+str(z)+" "+"\n")
f.close()
def finish(point):
with open("out.obj", 'a+') as f:
for n in point:
pointswrite(n[0], n[1], n[2])
#create()
#a = [[0,0,0],[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5],[6,6,6]]
#finish(a)
|
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
import json
import re
class IHOPSpider(scrapy.Spider):
name = "ihop"
item_attributes = {'brand': "IHOP", 'brand_wikidata': "Q1185675"}
allowed_domains = ["restaurants.ihop.com"]
start_urls = (
'https://restaurants.ihop.com/en-us',
)
def parse_opening_hours(self, hours):
opening_hours = OpeningHours()
for item in hours:
day = item.xpath('.//span[@class="daypart"]/@data-daypart').extract_first()
open_time = item.xpath('.//span[@class="time-open"]/text()').extract_first()
close_time = item.xpath('.//span[@class="time-close"]/text()').extract_first()
if not open_time:
if item.xpath('.//span[@class="time"]/span/text()').extract_first() == 'Open 24 Hours':
open_time = '12:00am'
close_time = '11:59pm'
else:
continue
opening_hours.add_range(day=day[:2],
open_time=open_time.upper(),
close_time=close_time.upper(),
time_format='%I:%M%p')
return opening_hours.as_opening_hours()
def parse(self, response):
state_urls = response.xpath('//div[@class="browse-container"]//li//a/@href').extract()
for state_url in state_urls:
yield scrapy.Request(url=state_url, callback=self.parse_state)
def parse_state(self, response):
city_urls = response.xpath('//ul[@class="map-list"]/li//a/@href').extract()
for city_url in city_urls:
yield scrapy.Request(url=city_url, callback=self.parse_city)
def parse_city(self, response):
location_urls = response.xpath('//div[@class="map-list-item-header"]/a/@href').extract()
for location_url in location_urls:
if location_url != '#':
yield scrapy.Request(url=location_url, callback=self.parse_location)
def parse_location(self, response):
info_json = response.xpath("//script[@type='application/ld+json' and contains(text(), 'geo')]/text()").extract_first()
basic_info = json.loads(info_json)[0]
point = {
"lat": basic_info["geo"]["latitude"],
"lon": basic_info["geo"]["longitude"],
"name": basic_info["name"],
"addr_full": basic_info["address"]["streetAddress"],
"city": basic_info["address"]["addressLocality"],
"state": basic_info["address"]["addressRegion"],
"postcode": basic_info["address"]["postalCode"],
"phone": basic_info["address"]["telephone"],
"website": basic_info["url"],
"ref": "_".join(re.search(r".+/(.+?)/(.+?)/(.+?)/?(?:\.html|$)", response.url).groups())
}
hours = self.parse_opening_hours(response.xpath('//div[contains(@class, "hide-mobile")]//div[@class="day-hour-row"]'))
if hours:
point['opening_hours'] = hours
return GeojsonPointItem(**point)
|
from torch.utils.tensorboard import SummaryWriter
class dl_logger:
def log(self,tag,message,idx):
pass
def close(self):
pass
class print_logger(dl_logger):
def log(self,tag,message,idx=-1):
print("{}: {}".format(tag,message))
class tensorboard_logger(dl_logger):
def __init__(self, current_analysis_name):
self.writer = SummaryWriter(current_analysis_name)
return super().__init__()
def log(self,tag,message,idx=-1):
self.writer.add_scalar(tag,message,int(idx))
def close(self):
self.writer.close()
|
import sys
sys.path.append("../")
import unittest
from python_dict_wrapper import wrap
from models import get_dataset, preprocess_data, get_feature_model, get_aggregator
from modelzoo import slowfast_wrapper
import torch
class TestSlowFast(unittest.TestCase):
@unittest.skip("Slow")
def test_each(self):
"""Smoke tests"""
for features in ["I3D", "Slow", "SlowFast"]:
args = wrap(
{
"features": features,
"slowfast_root": "../../slowfast/",
"ckpt_root": "../pretrained",
}
)
model = slowfast_wrapper.SlowFast(args)
X = torch.randn(1, 3, 80, 224, 224, device="cuda")
X_forwarded = model.forward(X)
self.assertEqual(X_forwarded.shape[0], 1)
def test_end_to_end(self):
"""Smoke tests"""
for features in ["SlowFast", "Slow", "I3D"]:
args = wrap(
{
"features": features,
"slowfast_root": "../../slowfast/",
"ckpt_root": "../pretrained",
"aggregator": "average",
"data_root": "../data",
"dataset": "vim2",
"batch_size": 1,
"layer": 0,
"subject": "s1",
"width": 224,
"ntau": 80,
"nt": 1,
}
)
feature_model, activations, metadata = get_feature_model(args)
aggregator = get_aggregator(metadata, args)
X = torch.randn(1, 3, 80, 224, 224, device="cuda")
_ = feature_model(X)
for k, v in activations.items():
aggregator(v)
@unittest.skip("Broken")
def test_X3DM(self):
"""Smoke tests"""
for features in ["X3DM"]:
args = wrap(
{
"features": features,
"slowfast_root": "../../slowfast/",
"ckpt_root": "../pretrained",
}
)
model = slowfast_wrapper.SlowFast(args)
X = torch.randn(1, 3, 80, 224, 224, device="cuda")
X_forwarded = model.forward(X)
self.assertEqual(X_forwarded.shape[0], 1)
if __name__ == "__main__":
unittest.main()
|
import pkg_resources
try:
__version__ = pkg_resources.get_distribution("nbb").version
except Exception:
__version__ = "unknown"
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for the compute instance groups managed commands."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
DEFAULT_LIST_FORMAT = """\
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
baseInstanceName,
size,
targetSize,
instanceTemplate.basename(),
autoscaled
)
"""
def AddTypeArg(parser):
parser.add_argument(
'--type',
choices={
'opportunistic': 'Do not proactively replace instances. Create new '
'instances and delete old on resizes of the group.',
'proactive': 'Replace instances proactively.',
},
default='proactive',
category=base.COMMONLY_USED_FLAGS,
help='Desired update type.')
def AddMaxSurgeArg(parser):
parser.add_argument(
'--max-surge',
type=str,
help=('Maximum additional number of instances that '
'can be created during the update process. '
'This can be a fixed number (e.g. 5) or '
'a percentage of size to the managed instance '
'group (e.g. 10%)'))
def AddMaxUnavailableArg(parser):
parser.add_argument(
'--max-unavailable',
type=str,
help=('Maximum number of instances that can be '
'unavailable during the update process. '
'This can be a fixed number (e.g. 5) or '
'a percentage of size to the managed instance '
'group (e.g. 10%)'))
def AddMinReadyArg(parser):
parser.add_argument(
'--min-ready',
type=arg_parsers.Duration(lower_bound='0s'),
help=('Minimum time for which a newly created instance '
'should be ready to be considered available. For example `10s` '
'for 10 seconds. See $ gcloud topic datetimes for information '
'on duration formats.'))
def AddForceArg(parser):
parser.add_argument(
'--force',
action='store_true',
help=('If set, accepts any original or new version '
'configurations without validation.'))
|
import re
from docutils.core import publish_cmdline, default_description
from docutils.nodes import NodeVisitor
from docutils.writers import Writer
class WikidotTranslator(NodeVisitor):
"""Write output in Wikidot format.
Based on http://www.wikidot.com/doc:wiki-syntax
"""
def __init__(self, document):
NodeVisitor.__init__(self, document)
self.section_level = 1
self.first_paragraph = True
self.inside_literal_block = False
self.lists = []
self.block_input = False
self._content = []
def get_text(self):
return ''.join(self._content)
def _add(self, string):
if not self.block_input:
self._content.append(string)
def _nop(self, node):
pass
def _newline_if_not_first(self):
if not self.first_paragraph:
self._add("\n")
visit_document = _nop
depart_document = _nop
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_title(self, node):
self._newline_if_not_first()
self._add("+" * self.section_level + " ")
def depart_title(self, node):
self._add("\n")
self.first_paragraph = False
def visit_Text(self, node):
string = node.astext()
if not self.inside_literal_block:
string = string.replace('\n', ' ')
self._add(string)
depart_Text = _nop
def visit_paragraph(self, node):
self._newline_if_not_first()
def depart_paragraph(self, node):
self._add("\n")
self.first_paragraph = False
def visit_strong(self, node):
self._add("**")
depart_strong = visit_strong
def visit_reference(self, node):
if node.has_key('name'):
self._add("[%s " % node['refuri'])
def depart_reference(self, node):
if node.has_key('name'):
self._add("]")
visit_target = _nop
depart_target = _nop
def visit_literal_block(self, node):
if re.search(r'(class )|(def )|(import )', node.astext()):
self._add("\n[[code type=\"Python\"]]\n")
else:
self._add("\n[[code]]\n")
self.inside_literal_block = True
def depart_literal_block(self, node):
self._add("\n[[/code]]\n")
self.inside_literal_block = False
def visit_topic(self, node):
if 'contents' in node['classes']:
self._add("[[toc]]\n")
self.block_input = True
def depart_topic(self, node):
self.block_input = False
def visit_bullet_list(self, node):
self.lists.append('bullet')
def depart_bullet_list(self, node):
self.lists.pop()
def visit_enumerated_list(self, node):
self.lists.append('enumerated')
def depart_enumerated_list(self, node):
self.lists.pop()
def visit_list_item(self, node):
self._add(" " * (len(self.lists) - 1) * 2)
if self.lists[-1] is 'enumerated':
self._add("# ")
else:
self._add("* ")
self.first_paragraph = True
depart_list_item = _nop
class WikidotWriter(Writer):
def translate(self):
visitor = WikidotTranslator(self.document)
self.document.walkabout(visitor)
self.output = visitor.get_text()
if __name__ == '__main__':
description = ('Generates documents in Wikidot format from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer=WikidotWriter(), description=description)
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Heap-Snapshot
GUID : 901d2afa-4ff6-46d7-8d0e-53645e1a47f5
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("901d2afa-4ff6-46d7-8d0e-53645e1a47f5"), event_id=100, version=1)
class Microsoft_Windows_Heap_Snapshot_100_1(Etw):
pattern = Struct(
"HeapSnapshotInstance" / Int32ul,
"HeapSnapshotSequence" / Int32ul,
"HeapSnapshotBufferLen" / Int32ul,
"HeapSnapshotBuffer" / Bytes(lambda this: this.HeapSnapshotBufferLen)
)
@declare(guid=guid("901d2afa-4ff6-46d7-8d0e-53645e1a47f5"), event_id=200, version=1)
class Microsoft_Windows_Heap_Snapshot_200_1(Etw):
pattern = Struct(
"HeapSnapshotInstance" / Int32ul,
"TotalData" / Int32ul
)
|
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix
# Below here is the code snippet for calculating various performance parameters of our model().
tpr=[]
fpr=[]
total_no_fp=0
total_no_tp=0
total_no_fn=0
total_no_tn=0
total=[]
precision=[]
f1=[]
acc=[]
sp=[]
validation_path=<Enter_testing_imagefolder_path>
for i in range(5):
valid_datagen = ImageDataGenerator(rescale=1./255)
valid_dir = valid_datagen.flow_from_directory(validation_path+str(i),
target_size=(224, 224),
batch_size=16,
class_mode='binary',shuffle=False)
Y_pred = model.predict_generator(valid_dir, steps= len(valid_dir))
print("\n Y_pred ","\n", Y_pred)
XtrueX = valid_dir.classes
y_pred = []
for it1 in range(len(Y_pred)):
if Y_pred[it1]>0.8:
y_pred.append(1)
if XtrueX[it1]==1:
total_no_tp = total_no_tp + 1
else:
total_no_fp = total_no_fp + 1
else:
y_pred.append(0)
if XtrueX[it1] == 1:
total_no_fn = total_no_fn + 1
else:
total_no_tn = total_no_tn + 1
print("case == ", i)
print("valid_dir.classes=",valid_dir.classes)
print(" y_pred =",y_pred)
Conf_Matrix = confusion_matrix(valid_dir.classes, y_pred)#.ravel()
tpr.append( Conf_Matrix[1][1] /(Conf_Matrix[1][1]+Conf_Matrix[1][0]) ) #tpr=tp/(tp+fn)
fpr.append( Conf_Matrix[0][1] /(Conf_Matrix[0][0]+Conf_Matrix[0][1]) ) #fpr=fp/(tn+fp)
current=len(fpr)-1
for it in range(len(fpr)):
if len(fpr)==1:
break
if fpr[len(fpr)-it-2] <= fpr[current]:
break
else:
temp=fpr[current]
fpr[current]=fpr[len(fpr)-it-2]
fpr[len(fpr)-it-2]=fpr[current]
temp=tpr[current]
tpr[current]=tpr[len(tpr)-it-2]
tpr[len(tpr)-it-2]=tpr[current]
current=len(tpr)-it-2
pr= Conf_Matrix[1][1] /(Conf_Matrix[1][1]+Conf_Matrix[0][1])
#pr=tp/(tp+fp)
precision.append(pr)
f1.append( (2*Conf_Matrix[1][1]) / (2*Conf_Matrix[1][1] + Conf_Matrix[0][1] + Conf_Matrix[1][0]) )
#f1 = 2*tp / (2*tp+fp+fn)
acc.append( (Conf_Matrix[1][1]+Conf_Matrix[0][0])/(Conf_Matrix[0][0]+Conf_Matrix[0][1]+Conf_Matrix[1][0]+Conf_Matrix[1][1]) )
#acc = (tp+tn) / (tp+tn+fp+fn)
sp.append( Conf_Matrix[0][0] /(Conf_Matrix[0][0]+Conf_Matrix[0][1]) )
#sp = tn / (tn+fp)
print("total false positive = ",total_no_fp,
"\ntotal true positive = ",total_no_tp,
"\ntotal false negative = ",total_no_fn,
"\ntotal true negative = ",total_no_tn)
accuarcy=(total_no_tp+total_no_tn)/(total_no_tp+total_no_tn+total_no_fn+total_no_fp)
precision=(total_tp)/(total_tp+total_fp)
recall=(total_tp)/(total_tp+total_fn)
f1_score=(2.0*precision*recall)/(precision+recall)
iou=(total_tp)/(total_tp+total_fp+total_fn)
print("\nAccuracy = ",accuracy)
print("\nPrecision = ",precision)
print("\nRecall = ",recall)
print("\nF1_score = ",f1_score)
print("\nIoU = ", iou)
|
#
# Show customizing the circular scrolling animation of a label with `LV_LABEL_LONG_SCROLL_CIRCULAR` long mode.
#
label1 = lv.label(lv.scr_act())
label1.set_long_mode(lv.label.LONG.SCROLL_CIRCULAR) # Circular scroll
label1.set_width(150)
label1.set_text("It is a circularly scrolling text. ")
label1.align(lv.ALIGN.CENTER, 0, 40)
|
import unittest
from wonk import create_app
from wonk.models.users import db
app = create_app('testing')
class NeuralTestCase(unittest.TestCase):
def setUp(self):
self.test_client = app.test_client()
with app.app_context():
db.create_all()
self.populate_db()
def tearDown(self):
with app.app_context():
db.session.remove()
db.drop_all()
def populate_db(self):
pass
|
# Generated by Django 3.0.2 on 2020-03-28 21:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tutor', '0002_auto_20200328_1722'),
]
operations = [
migrations.AddField(
model_name='job',
name='isConfirmed',
field=models.BooleanField(default=True),
),
]
|
# coding: utf-8
from multiprocessing import Lock
from .timer_task_entry import TimerTaskEntry
class TimerTaskList(object):
"""
延时任务列表,为双向链表结构
"""
__lock = Lock()
# 过期时间
_expiration = -1
# @property
# def expiration(self):
# with self.__lock:
# return self._expiration
#
# @expiration.setter
# def expiration(self, value):
# with self.__lock:
# self._expiration = value
# 任务计数器
_task_counter = 0
@property
def task_counter(self):
with self.__lock:
return self._task_counter
def __init__(self, expiration=0):
"""
初始化延时任务列表
:param expiration:
"""
'''
链表root为哨兵,不包含任务,便于处理边界
'''
self.root = TimerTaskEntry(expiration=None, task=None, persist=False)
self.root.prev = self.root
self.root.next = self.root
self._expiration = expiration
def set_expiration(self, expiration):
"""
设置expiration值,并新值与原值是否相等。
如果不相等,返回True,反之返回False。
:param expiration:
:return:
"""
with self.__lock:
prev, self._expiration = self._expiration, expiration
return prev != expiration
def get_expiration(self):
"""
获取过期时间
:return:
"""
with self.__lock:
return self._expiration
def add(self, timer_task_entry):
"""
添加任务
:param timer_task_entry:
:return:
"""
with self.__lock:
tail = self.root.prev
timer_task_entry.next = self.root
timer_task_entry.prev = tail
tail.next = timer_task_entry
self.root.prev = timer_task_entry
self._task_counter += 1
def remove(self, timer_task_entry):
"""
删除任务
:param timer_task_entry:
:return:
"""
with self.__lock:
timer_task_entry.next.prev = timer_task_entry.prev
timer_task_entry.prev.next = timer_task_entry.next
timer_task_entry.next = None
timer_task_entry.prev = None
self._task_counter -= 1
def flush(self, func):
"""
刷新任务列表,将所有任务删除,然后依次调用参数func来处理任务
:param func: 任务处理函数
:return:
"""
head = self.root.next
self.root.next = self.root
self.root.prev = self.root
while head is not self.root:
_next = head.next
self.remove(head)
func(head)
head = _next
# 重置任务列表过期时间
self._expiration = 0
|
import unittest
import cogent
from cogent.tests import TestCase
from cogent import settings
settings.HTML_TEST_REPORT_FILENAME = "my_test_report.html"
class TestClassOne(TestCase):
def test1(self):
# Pass Test Case
expected_number = 90
actual_number = 90
print('Test output foe test case 1')
self.assertEqual(expected_number, actual_number)
def test2(self):
# Pass Test Case
expected = True
actual = True
print('Test output foe test case 2')
self.assertEqual(expected, actual)
class TestClassTwo(TestCase):
def test1(self):
# Pass Test Case
expected_number = 90
actual_number = 90
print('Test output foe test case 1')
self.assertEqual(expected_number, actual_number)
def test2(self):
# Fail Test Case
expected = True
actual = False
print('Test output foe test case 2')
print('This test is for testing multiple messages')
self.assertEqual(expected, actual)
def test3(self):
# Error Test Case
print('Test output foe test case 3')
raise ValueError('flowid not matches')
@unittest.skip('skipped')
def test4(self):
# Skip Test Case
pass
if __name__ == "__main__":
cogent.main.settings = settings
cogent.main()
|
print ("boot.py loaded")
from ___blocks.___main_block import MainBlock
MainBlock.run()
|
"""Pipeline parallel on a single device. This is only used for debugging."""
from typing import Sequence, Any, Dict
import jax
from jax import linear_util as lu
from jax.core import Var, ClosedJaxpr, Literal, gensym
from jax.interpreters import partial_eval as pe
from jax.interpreters.xla import DeviceArray
from alpa.pipeline_parallel.computation import (
PipelineComputation, XlaPipelineComputation,
slice_closed_jaxpr_by_full_pipeline_marks,
mark_missing_vars_in_backward_computation_pipeline_marks)
class LocalPipelineRunner:
"""Single-device local pipeline runner."""
def __init__(self, name: str, global_invals: Sequence[DeviceArray]):
self.name = name
self.env = {}
self.global_invals = global_invals
def run_stage(self, stage: PipelineComputation, invals: Dict[Var, Any]):
"""
Run a pipeline stage.
Args:
stage (PipelineComputation): The pipeline stage to run.
invals (Dict[Var, Any], optional): Input value dict.
"""
runnable = stage.get_runnable()
invals_list = []
for var in stage.invars:
invals_list.append(invals[var])
outvals_list = runnable(*invals_list)
outvals = dict(zip(stage.outvars, outvals_list))
self.env.update(outvals)
def get_val(self, var):
"""Get the value of a variable from the env."""
return self.env[var]
def del_var(self, var):
"""Delete a variable from the env."""
del self.env[var]
class LocalPipelineExecutable:
"""A pipeline parallel executable running on a single local device.
Args:
stages (Sequence[PipelineComputation]): the pipeline stages to be
executed.
global_invars (Sequence[Var]): Global input variables.
global_outvars (Sequence[Var]): Global output variables.
"""
def __init__(self,
*,
stages: Sequence[PipelineComputation],
global_invars: Sequence[Var],
global_outvars: Sequence[Var]):
self.stages = stages
self.global_invars = global_invars
self.global_outvars = global_outvars
def launch_on_driver(self, *args):
"""Run function."""
global_invals = dict(zip(self.global_invars, args))
runners = {}
var_stage_mapping = {}
var_reference_count = {}
# Create variable dependency mapping.
for stage in self.stages:
for var in stage.invars:
if var not in global_invals:
assert var in var_stage_mapping, f"referred to an unknown var {var}"
var_reference_count[var] = var_reference_count.get(var,
0) + 1
for var in stage.outvars:
var_stage_mapping[var] = stage.name
for var in self.global_outvars:
if not isinstance(var, Literal):
assert var in var_stage_mapping, f"referred to an unknown var {var}"
var_reference_count[var] = var_reference_count.get(var, 0) + 1
for stage in self.stages:
stage_invals = {}
for var in stage.invars:
if var in global_invals:
stage_invals[var] = global_invals[var]
else:
assert var in var_stage_mapping, f"referred to an unknown var {var}"
sender_runner = runners[var_stage_mapping[var]]
stage_invals[var] = sender_runner.get_val(var)
var_reference_count[var] -= 1
if var_reference_count[var] == 0:
sender_runner.del_var(var)
if stage.name not in runners:
runners[stage.name] = LocalPipelineRunner(
stage.name, global_invals)
runners[stage.name].run_stage(stage, stage_invals)
global_outvals_list = []
for var in self.global_outvars:
if isinstance(var, Literal):
global_outvals_list.append(var.val)
else:
assert var in var_stage_mapping, f"referred to an unknown var {var}"
sender_runner = runners[var_stage_mapping[var]]
global_outvals_list.append(sender_runner.get_val(var))
var_reference_count[var] -= 1
if var_reference_count[var] == 0:
sender_runner.del_var(var)
return global_outvals_list
def compile_local_pipeline_executable(fun: lu.WrappedFun, *avals):
"""Compile a local pipeline executable that only runs on a singel device."""
with jax.disable_jit():
jaxpr, _, consts = pe.trace_to_jaxpr_final(fun, avals)
closed_jaxpr = ClosedJaxpr(jaxpr, consts)
global_invars = closed_jaxpr.jaxpr.invars
global_outvars = closed_jaxpr.jaxpr.outvars
gensym_func = gensym([closed_jaxpr.jaxpr])
jax_pipeline_stages = slice_closed_jaxpr_by_full_pipeline_marks(
closed_jaxpr)
jax_pipeline_stages = mark_missing_vars_in_backward_computation_pipeline_marks(
jax_pipeline_stages, global_invars, global_outvars, gensym_func)
xla_pipeline_stages = [
XlaPipelineComputation.from_jax_pipeline_computation(stage)
for stage in jax_pipeline_stages
]
return LocalPipelineExecutable(
stages=xla_pipeline_stages,
global_invars=global_invars,
global_outvars=global_outvars)
|
from tornado import gen
from engine.social.interface import SocialInterface
from engine.social.web import WebSocialInterfaceMixin
__author__ = 'kollad'
class BackdoorSocialInterface(SocialInterface, WebSocialInterfaceMixin):
def authenticate(self, handler):
user_id = handler.get_argument('user_id')
response = {
'user_id': 'backdoor.{}'.format(user_id)
}
return response
@gen.coroutine
def get_social_data(self, social_data):
user_id = social_data['user_id']
response = {
'first_name': 'Backdoor User #{}'.format(user_id),
'last_name': '',
'avatar': '',
'social_id': user_id,
}
return response
def get_profile_fields(self, social_data):
return {
'name': u'{first_name} {last_name}'.format(**social_data),
'avatar': social_data['avatar'],
'social_id': social_data['social_id']
}
|
# Import Tcl Tkinter Combobox to use it and modify default
from tkinter.ttk import Combobox as ttkCombobox
class Combobox(ttkCombobox):
# combobox class can be called once the root tk is defined
# only one must required field is root which is the any of widget
# other params can be set to get different type of design
# but this is also preferred design to use in your window
# every design is custom and can be set twice
# Combobox instance will contain the base tcl tkinter combobox instance just with modified styles and methods
# init method is getting all of your arguments
# and keyword arguments
# and passing related
# and unknown params
# and args to tkinter combobox
# so if it cause an error most probably it's getting from tkinter combobox object
# see your all arguments and keywords is supporting by ntk Combobox or tkinter combobox
def __init__(self,
root, # root is a master window to place this combobox into it
class_="TCombobox", # combobox class which can be inherited
cursor="arrow", # cursor style when mouse over combobox
exportselection=1, # copy selected text when selection appeared in combobox
height=24, # height of value list
justify="left", # justify combobox text left right or center
postcommand="", # combobox postcommand when selected item
style="TCombobox", # combobox style object
takefocus=1, # set take focus to 0 if you don't want to focusing effect
textvariable=False, # combobox text variable, to get and set value dynamically
validate=None, # validate
validatecommand=False, # validate command
values=['No more item'], # combo values to set as a list
width=24, # combobox width
xscrollcommand=False, # combobox left right scrolling
font=("Calibri", 10), # combobox font style
row=0, # row position
column=0, # column position
padx=0, # padding for left and right
pady=0, # padding for top and bottom
ipady=2, # internal padding for top and bottom
sticky='w', # combobox sticky position w, e, s, n, we, ne, se etc
text="-----", # default text in combobox widget
default=0, # default text index from value list
*args, **kwargs # extra arguments and keyword arguments
):
# values by default text and values list merge
values = [text] + list(values)
# return into super class
# and pass every arguments and keywords
# into that class
super(Combobox, self).__init__(root,
class_=class_,
cursor=cursor,
exportselection=exportselection,
height=height,
justify=justify,
postcommand=postcommand,
validate=validate,
validatecommand=validatecommand,
style=style,
takefocus=takefocus,
textvariable=textvariable,
values=values,
width=width,
xscrollcommand=xscrollcommand,
font=font,
*args, **kwargs
)
# combobox.grid command to adding widget in root window
self.grid(
row=row, # grid row position
column=column, # grid column position
padx=padx, # grid padding left and right
pady=pady, # grid padding top and bottom
ipady=ipady, # grid internal padding top and bottom
sticky=sticky # grid sticky position
)
# set default value in combobox
# this default value is getting from values list
# and default=index keyword is passed in combobox class
# when we get this index
# we are trying to get this index from values list
# and setting in to combobox
# if requested index is not parsable from list if will
# cause an error
self.set(values[default])
|
"""
-*- test-case-name: PyHouse.src.Modules.Web.test.test_web_house -*-
@name: PyHouse/src/Modules/Web/web_house.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2015 by D. Brian Kimmel
@license: MIT License
@note: Created on Jun 3, 2013
@summary: Web interface to house info for the selected house.
"""
# Import system type stuff
import os
from nevow import athena
from nevow import loaders
# Import PyMh files and modules.
from Modules.Web.web_utils import JsonUnicode, GetJSONHouseInfo
from Modules.Computer import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.webHouse ')
# Handy helper for finding external resources nearby.
webpath = os.path.join(os.path.split(__file__)[0])
templatepath = os.path.join(webpath, 'template')
class HouseElement(athena.LiveElement):
""" a 'live' house element.
"""
docFactory = loaders.xmlfile(os.path.join(templatepath, 'houseElement.html'))
jsClass = u'house.HouseWidget'
def __init__(self, p_workspace_obj):
self.m_workspace_obj = p_workspace_obj
self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj
@athena.expose
def getHouseData(self):
l_house = GetJSONHouseInfo(self.m_pyhouse_obj)
return l_house
@athena.expose
def saveHouseData(self, p_json):
"""House data has been returned, so update the house info.
"""
l_json = JsonUnicode().decode_json(p_json)
LOG.info('Update House info - {}'.format(l_json))
l_delete = l_json['Delete']
if l_delete:
try:
del self.m_pyhouse_obj.House
except AttributeError:
LOG.error("Failed to delete - JSON: {}".format(l_json))
return
self.m_pyhouse_obj.House.Name = l_json['Name']
self.m_pyhouse_obj.House.Key = int(l_json['Key'])
self.m_pyhouse_obj.House.Active = True
self.m_pyhouse_obj.House.UUID
l_obj = self.m_pyhouse_obj.House.Location
l_obj.Street = l_json['Location']['Street']
l_obj.City = l_json['Location']['City']
l_obj.State = l_json['Location']['State']
l_obj.ZipCode = l_json['Location']['ZipCode']
l_obj.Region = l_json['Location']['Region']
l_obj.Phone = l_json['Location']['Phone']
l_obj.Latitude = l_json['Location']['Latitude']
l_obj.Longitude = l_json['Location']['Longitude']
l_obj.Elevation = l_json['Location']['Elevation']
l_obj.TimeZoneName = l_json['Location']['TimeZoneName']
self.m_pyhouse_obj.House.Location = l_obj
# ## END DBK
|
import glob
import json
import tempfile
import os
END_DELIMETER = "*END_OF_FILE*"
def download_files(my_socket):
print("[+] Downloading files")
tmp = tempfile.gettempdir()
file_name = my_socket.receive_data()
path_dir, actual_file_name = os.path.split(file_name)
temp_path = os.path.join(tmp, actual_file_name)
my_socket.receive_file(temp_path)
def upload_files(my_socket):
print("[+] Uploading files")
print("[+] Sending file")
# first find the list of files to be uploaded
files = glob.glob("*")
dict = {}
for index, file in enumerate(files):
dict[index] = file
dict_bytes = json.dumps(dict)
# send list of files to the hacker for him to select files
bytes_with_delimeter = dict_bytes + END_DELIMETER
my_socket.socket.send(bytes_with_delimeter.encode())
# receive the file name to download
file2download = my_socket.receive_data()
my_socket.send_file(file2download)
|
import mappyfile
def test():
mapfile = mappyfile.open("./docs/examples/raster.map")
# START OF API EXAMPLE
# update the map name
mapfile["name"] = "MyNewMap"
# update a layer name
layers = mapfile["layers"]
layer = layers[0]
layer["name"] = "MyLayer"
# update the error file path in the map config section
# note key names can be lower or upper case
mapfile["config"]["ms_errorfile"] = "/ms4w/tmp/ms_error.txt"
# update the web metadata settings
mapfile["web"]["metadata"]["wms_format"] = "image/png"
print(mappyfile.dumps(mapfile["web"])) # print out just the WEB section
# alternatively we can parse the Mapfile syntax and load it directly
s = """
METADATA
'wms_enable_request' '*'
'wms_feature_info_mime_type' 'text/html'
'wms_format' 'image/jpg'
END"""
metadata = mappyfile.loads(s)
mapfile["web"]["metadata"] = metadata
print(mappyfile.dumps(mapfile))
# END OF API EXAMPLE
assert(layer["name"] == "MyLayer")
assert(mapfile["web"]["metadata"]["wms_format"] == "image/jpg")
if __name__ == "__main__":
test()
|
#!/usr/bin/python
import RPi.GPIO as GPIO
import time
channel =7
data = []
j = 0
GPIO.setmode(GPIO.BOARD)
time.sleep(1)
GPIO.setup(channel, GPIO.OUT)
GPIO.output(channel, GPIO.LOW)
time.sleep(0.02)
GPIO.output(channel, GPIO.HIGH)
GPIO.setup(channel, GPIO.IN)
while GPIO.input(channel) == GPIO.LOW:
continue
while GPIO.input(channel) == GPIO.HIGH:
continue
while j < 40:
k = 0
while GPIO.input(channel) == GPIO.LOW:
continue
while GPIO.input(channel) == GPIO.HIGH:
k += 1
if k > 100:
break
if k < 8:
data.append(0)
else:
data.append(1)
j += 1
print "sensor is working."
#print data
humidity_bit = data[0:8]
humidity_point_bit = data[8:16]
temperature_bit = data[16:24]
temperature_point_bit = data[24:32]
check_bit = data[32:40]
humidity = 0
humidity_point = 0
temperature = 0
temperature_point = 0
check = 0
for i in range(8):
humidity += humidity_bit[i] * 2 ** (7-i)
humidity_point += humidity_point_bit[i] * 2 ** (7-i)
temperature += temperature_bit[i] * 2 ** (7-i)
temperature_point += temperature_point_bit[i] * 2 ** (7-i)
check += check_bit[i] * 2 ** (7-i)
tmp = humidity + humidity_point + temperature + temperature_point
if check == tmp:
print "temperature :", temperature, "*C, humidity :", humidity, "%"
else:
print "wrong"
print "temperature :", temperature, "*C, humidity :", humidity, "% check :", check, ", tmp :", tmp
GPIO.cleanup()
|
#!/usr/bin/env python
from math import *
import rospy
from gravl.msg import Hemisphere
from message_filters import TimeSynchronizer
from sensor_msgs.msg import NavSatFix
# TODO Ensure that this still works with the custom hemisphere messages
from ackermann_msgs.msg import AckermannDrive
class GPSNavigationNode:
"""
ROS Node for GPS navigation using a rtk GPS and hemisphere vector GPS
for an Ackermann-steered vehicle. Must be instantiated with a specific
GPS waypoint list for the vehicle to drive towards in sequence.
"""
def __init__(self, waypoint_array=[]):
"""
Initializes a GPS Navigation Node.
waypoint_array : An array of waypoints, in the format of
[[lat, long], [lat, long], [lat, long]]
"""
# Initial setup that can be reused for any number of waypoints.
self.fix = rospy.Subscriber('fix', NavSatFix)
self.dir = rospy.Subscriber('direction', Hemisphere)
self.nav_data = \
message_filters.TimeSynchronizer([self.fix, self.dir], 10)
self.pub = rospy.Publisher('cmds', AckermannDrive, queue_size = 10)
# Current latiude of the tractor (coordinate).
self.current_longitude = None
# Current longitude of the tractor (coordinate).
self.current_latitude = None
# Angle the car is currently facing (degrees).
self.current_angle = None
# The target longitude of the tractor (coordinate)
self.waypoint_longitude = None
# the target latitude of the car.
self.waypoint_latitude = None
# The maximum angle the tractor can turn at once (degrees).
self.max_turn_angle = 45
# The minimum angle the tractor can turn at once (degrees).
self.min_turn_angle = -max_turn_angle
#Whether or not the waypoint has been reached.
self.waypoint_reached = False
# After synchronizing the data streams, we can register one callback.
self.nav_data.registerCallback(self.update)
# Set up for the PID controller.
# Proportional constant for steering angle calculation
self.kp1 = 0.1
# Proportional constant for steering velocity calculation.
self.kp2 = 0.1
# Proportional constant for forwards velocity calculation.
self.kp3 = 0.1
# Iterate through each waypoint set.
for waypoint in waypoint_array:
while not self.waypoint_reached:
self.waypoint_latiude = waypoint[0]
self.waypoint_longitude = waypoint[1]
rospy.spin()
# At this point, waypoint_reached will be set to true by the
# update function, so we'll need to set it to false to ensure
# that it actually navigates to the next point.
self.waypoint_reached = False
def update(self):
"""
Processes gps data and waypoint and publishes new steering angle.
"""
# Double check there is data from fix and from the Hemisphere.
if nav_data.longitude is None or nav_data.direction is None:
return
# Update class variables.
self.current_longitude = nav_data.longitude
self.current_latitude = nav_data.latitude
self.current_angle = nav_data.direciton
# Figure out how to proceed if the tractorhas not reached.
if (isclose(self.current_latitude, self.waypoint_latitude) and \
isclose(self.current_longitude, self.waypoint_longitude)):
desired_angle = deg_calculate_desired_angle(self.current_latitude,\
self.current_longitude, self.waypoint_latitude, \
self.waypoint_longitude)
current_error = deg_calculate_steering_error(self.current_angle, \
desired_angle)
new_steering_angle = deg_calculate_steering_angle(current_error, \
self.kp1)
new_velocity = deg_calculate_forward_velocity(new_steering_angle, \
self.kp3)
# Create and publish a drive message.
drive_msg = AckermannDrive()
drive_msg.header.stamp = rospy.Time.now()
drive_msg.steering_angle = new_steering_angle
drive_msg.speed = abs(new_velocity)
self.pub.publish(drive_msg)
# Otherwise, we're ready to move on to the next waypoint.
else:
self.waypoint_reached = True
return
#########################CORE FUNCTIONS#########################
# TODO Decide if there's any reason to make this block static.
def deg_calculate_desired_angle(clat, clong, wlat, wlong):
"""
Calculates the desired angle (in degrees) for the car based on the
angle necessary to drive towards the waypoint.
clat : Current latitude coordinate of the vehicle.
clong : Current longitude coordinate of the vehicle.
wlat : Current latitude coordinate of the vehicle.
wlong : Current longtiude coordinaate of the vehicle.
Returns the desired angle in degrees.
"""
longitude_difference = wlong - clong
latitude_difference = wlat - clat
desired_angle_rad = atan2(longitude_difference, latitude_difference)
desired_angle_deg = desired_angle_rad * (180 / pi)
return desired_angle_deg
def deg_calculate_steering_error(current_angle, desired_angle):
"""
Calculates the difference between the current vehicle steering angle
and the desired value.
current_angle : The angle of the vehicle (likely based on sensor
readings).
desired_angle : The ideal angle of the vehicle (likely based on
calculation by means of a GPS waypoint).
Returns the number of degrees to turn to face the desired angle.
"""
return desired_angle - current_angle
def deg_calculate_steering_angle(error, kp1):
"""
Calculate the new steering angle for the vehicle.
error : The difference between the current and desired angle.
kp1 : Proportional constant for PID controller. Should be tuned
empirically.
Returns angle (in degrees) that the vehicle should turn in.
"""
steering_angle = None
if error > abs(self.max_turn_angle):
steering_angle = self.max_turn_angle
else:
steering_angle = kp1 * -error
return steering_angle
def deg_calculate_forward_velocity(angle, kp3):
"""
Calculate the new forward velocity for the car based on the turn angle.
angle : The new steering angle for the vehicle.
kp3 : Proportional constant for the PID controller. Should be
tuned empircally.
Returns the new forward velocity for the vehicle.
"""
# TODO Confirm whether or not this math checks out.
forward_velocity = kp3 * \
(self.max_forward_speed/self.max_turn_angle) * angle
if forward_velocity >= self.max_forward_speed:
forward_velocity = self.max_forward_speed
return forward_velocity
def run():
rospy.init_node("GPSNavigationNode")
try:
# TODO Go through data set, pick waypoints (might be somewhat
# arbitrary but it would be good to test actual navigation.
waypoint_array = []
gps_nav_node = GPSNavigationNode(waypoint_array)
except rospy.ROSInterruptException:
pass
if __name__=='__main__':
run()
|
from seamless.silk import Silk
StructureState = Silk(schema=structurestate_schema)
struc = StructureState()
struc.data = struc_data
for lineno0, line in enumerate(visualization.splitlines()):
lineno = lineno0 + 1
pound = line.find("#")
if pound > -1:
line = line[:pound]
if not len(line.strip()):
continue
terms = [t.strip() for t in line.split(",")]
try:
firstargs = terms[0].split()
command = firstargs[0]
args = []
if len(firstargs) > 1:
args.append(" ".join(firstargs[1:]))
args += terms[1:]
except:
raise SyntaxError("Line %d: %s" % (lineno, line))
try:
func = getattr(struc, command)
except AttributeError:
raise SyntaxError("Line %d, unknown command '%s'" % (lineno, command))
func(*args)
result = {
"mask": struc.get_selection(format="mask"),
"table": struc.get_selection(format="pandas").to_html(),
"ngl_representations": struc.ngl_representations(),
}
|
# -*- coding: utf-8 -*-
# @Time : 2021/8/6 15:01
# @Author : zc
# @Desc : 往来单位查询条件的请求实体
from chanjet_openapi_python_sdk.chanjet_content import ChanjetContent
# 往来单位查询条件的请求实体
class QueryPartnerContent(ChanjetContent):
def __init__(self, code, name, made_record_date):
self.param = self.Params(code, name, made_record_date)
class Params:
def __init__(self, code, name, made_record_date):
"""
往来单位查询条件实体初始化方法
:param code: 往来单位编码
:type code: str
:param name: 往来单位名称
:type name: str
:param made_record_date: 建档日期
:type made_record_date: str
"""
self.Code = code
self.Name = name
self.MadeRecordDate = made_record_date
self.SelectFields = 'Code,Name,ShortHand,PartnerClass.Code,PartnerClass.Name,Disabled'
|
import numpy as np
from scipy.stats import norm as norm
from scipy.optimize import fmin_bfgs
from copy import deepcopy
class GridDistribution:
def __init__(self, x, y):
self.x = x
self.y = y
def pdf(self, data):
# Find the closest bins
rhs = np.searchsorted(self.x, data)
lhs = (rhs - 1).clip(0)
rhs = rhs.clip(0, len(self.x) - 1)
# Linear approximation (trapezoid rule)
rhs_dist = np.abs(self.x[rhs] - data)
lhs_dist = np.abs(self.x[lhs] - data)
denom = rhs_dist + lhs_dist
denom[denom == 0] = 1. # handle the zero-distance edge-case
rhs_weight = 1.0 - rhs_dist / denom
lhs_weight = 1.0 - rhs_weight
return lhs_weight * self.y[lhs] + rhs_weight * self.y[rhs]
def trapezoid(x, y):
return np.sum((x[1:] - x[0:-1]) * (y[1:] + y[0:-1]) / 2.)
def generate_sweeps(num_sweeps, num_samples):
results = []
for sweep in range(num_sweeps):
a = np.arange(num_samples)
np.random.shuffle(a)
results.extend(a)
return np.array(results)
def predictive_recursion(z, num_sweeps, grid_x, mu0=0., sig0=1.,
nullprob=1.0, decay=-0.67):
sweeporder = generate_sweeps(num_sweeps, len(z))
theta_guess = np.ones(len(grid_x)) / float(len(grid_x))
return predictive_recursion_fdr(z, sweeporder, grid_x, theta_guess,
mu0, sig0, nullprob, decay)
def predictive_recursion_fdr(z, sweeporder, grid_x, theta_guess, mu0 = 0.,
sig0 = 1.0, nullprob = 1.0, decay = -0.67):
gridsize = grid_x.shape[0]
theta_subdens = deepcopy(theta_guess)
pi0 = nullprob
joint1 = np.zeros(gridsize)
ftheta1 = np.zeros(gridsize)
# Begin sweep through the data
for i, k in enumerate(sweeporder):
cc = (3. + i)**decay
joint1 = norm.pdf(grid_x, loc=z[k] - mu0, scale=sig0) * theta_subdens
m0 = pi0 * norm.pdf(z[k] - mu0, 0., sig0)
m1 = trapezoid(grid_x, joint1)
mmix = m0 + m1
pi0 = (1. - cc) * pi0 + cc * (m0 / mmix)
ftheta1 = joint1 / mmix
theta_subdens = (1. - cc) * theta_subdens + cc * ftheta1
# Now calculate marginal distribution along the grid points
y_mix = np.zeros(gridsize)
y_signal = np.zeros(gridsize)
for i, x in enumerate(grid_x):
joint1 = norm.pdf(grid_x, x - mu0, sig0) * theta_subdens
m0 = pi0 * norm.pdf(x, mu0, sig0)
m1 = trapezoid(grid_x, joint1)
y_mix[i] = m0 + m1;
y_signal[i] = m1 / (1. - pi0)
return {'grid_x': grid_x,
'sweeporder': sweeporder,
'theta_subdens': theta_subdens,
'pi0': pi0,
'y_mix': y_mix,
'y_signal': y_signal}
def empirical_null(z, nmids=150, pct=-0.01, pct0=0.25, df=4, verbose=0):
'''Estimate f(z) and f_0(z) using a polynomial approximation to Efron (2004)'s method.'''
N = len(z)
med = np.median(z)
lb = med + (1 - pct) * (z.min() - med)
ub = med + (1 - pct) * (z.max() - med)
breaks = np.linspace(lb, ub, nmids+1)
zcounts = np.histogram(z, bins=breaks)[0]
mids = (breaks[:-1] + breaks[1:])/2
### Truncated Polynomial
# Truncate to [-3, 3]
selected = np.logical_and(mids >= -3, mids <= 3)
zcounts = zcounts[selected]
mids = mids[selected]
# Form a polynomial basis and multiply by z-counts
X = np.array([mids ** i for i in range(df+1)]).T
beta0 = np.zeros(df+1)
loglambda_loss = lambda beta, X, y: -((X * y[:,np.newaxis]).dot(beta) - np.exp(X.dot(beta).clip(-20,20))).sum() + 1e-6*np.sqrt((beta ** 2).sum())
results = fmin_bfgs(loglambda_loss, beta0, args=(X, zcounts), disp=verbose)
a = np.linspace(-3,3,1000)
B = np.array([a ** i for i in range(df+1)]).T
beta_hat = results
# Back out the mean and variance from the Taylor terms
x_max = mids[np.argmax(X.dot(beta_hat))]
loglambda_deriv1_atmode = np.array([i * beta_hat[i] * x_max**(i-1) for i in range(1,df+1)]).sum()
loglambda_deriv2_atmode = np.array([i * (i-1) * beta_hat[i] * x_max**(i-2) for i in range(2,df+1)]).sum()
# Handle the edge cases that arise with numerical precision issues
sigma_enull = np.sqrt(-1.0/loglambda_deriv2_atmode) if loglambda_deriv2_atmode < 0 else 1
mu_enull = (x_max - loglambda_deriv1_atmode/loglambda_deriv2_atmode) if loglambda_deriv2_atmode != 0 else 0
return (mu_enull, sigma_enull)
|
from flask import Flask, session, redirect, url_for, escape, request
app = Flask(__name__, template_folder='template')
app.secret_key = 'admin'
@app.route('/')
def index():
if 'username' in session:
username = session['username']
return 'Logged in as ' + username + '<br>' + "<b><a href = '/logout'>click here to log out</a></b>"
return "You are not logged in <br><a href = '/login'>" + "click here to log in</a>"
@app.route('/login', methods = ['GET', 'POST'])
def login():
if request.method == 'POST':
session['username'] = request.form['username']
return redirect(url_for('index'))
return '''
<form action = "" method = "post">
<p><input type = text name = username/></p>
<p<<input type = submit value = Login/></p>
</form>
'''
@app.route('/logout')
def logout():
# remove the username from the session if it is there
session.pop('username', None)
return redirect(url_for('index'))
|
from django import forms
from decks.models import Deck, Card, Category
class SearchDeckForm(forms.ModelForm):
name = forms.CharField(
label="Name",
max_length=20,
required=False,
widget=forms.TextInput(
attrs= {'placeholder': 'Search by deck name'}
)
)
category = forms.ModelChoiceField(
queryset=Category.objects.all(),
label="Category",
empty_label="All",
required=False,
)
class Meta:
model = Deck
fields = ['name', 'category']
class DeckForm(forms.ModelForm):
name = forms.CharField(
label="Deck Name",
max_length=20,
),
description = forms.CharField(
label="Deck Description",
max_length=2000,
widget=forms.Textarea(),
),
categories = forms.ModelMultipleChoiceField(
queryset=Category.objects.all(),
label="Category",
required=False,
help_text="Select as many categories you find necessary."
)
PUBLISH_STATUS = ( ("x", "Private"), ("f", "Followers Only"), ("o", "Everyone"), )
publish_status = forms.ChoiceField(
choices=PUBLISH_STATUS,
label="Visibility",
required=False,
help_text="Sets who can see this deck."
)
class Meta:
model = Deck
fields = ['name', 'description', 'categories', 'publish_status']
class CardForm(forms.ModelForm):
front_text = forms.CharField(
label="Front Card Text",
max_length=30,
widget=forms.Textarea(),
)
back_text = forms.CharField(
label="Back Card Text",
max_length=30,
widget=forms.Textarea(),
)
class Meta:
model = Card
fields = ['front_text', 'back_text']
|
from utils import device_create
from utils.history_item import HistoryItem
from datetime import datetime
db = device_create.get_creator("202.193.57.131", "bitkyTest")
LampStatusHistory = db.LampStatusHistory
LampStatusHistory.drop()
device_list = []
employees = db.Employee.find()
print('共需执行次数:' + str(employees.count()))
print('开始执行...')
i = 1
for employee in employees:
if i % 100 == 0:
print('正在执行第' + str(i) + '次')
i += 1
hisItem = {'_id': employee['_id']}
hisInit = HistoryItem(datetime(2015, 4, 19), 100)
hisItem['ChargeStatus'] = hisInit.ChargeStatus
hisItem['WorkStatus'] = hisInit.WorkStatus
device_list.append(hisItem)
print('正在向数据库写入数据...')
result = LampStatusHistory.insert_many(device_list)
print('写入完毕')
pass
|
from django.test import TestCase
from .models import Profile,Posts,Comments,Following
# Testing the 'Following' and 'Comments'
class FollowingTestClass(TestCase):
def setUp(self):
self.esther=Following(username='mykey',followed='marabel')
def test_instance(self):
self.assertTrue(isinstance(self.mykey,Following))
class CommentTestClass(TestCase):
def setUp(self):
self.first=Comment(post=1,
username='mykey',
comment='What a great throw-back',
date='Jan 07, 2020, 08:50 a.m.',
count=0)
def test_instance(self):
self.assertTrue(isinstance(self.first,Comment))
|
from cgitb import Hook
from colossalai.registry import HOOKS
from torch import Tensor
from colossalai.trainer.hooks import BaseHook
from colossalai.utils.memory_tracer import AsyncMemoryMonitor
from ._metric_hook import LearningRateMetric, MetricHook
@HOOKS.register_module
class MemTraceHook(BaseHook):
"""Save memory stats and pass it to states
This hook is used to record memory usage info, and pass to trainer.states
You can use it as other trainer hook and fetch data from trainer.states['metrics][mode]
"""
def __init__(
self,
priority: int = 0,
) -> None:
super().__init__(priority=priority)
self._memory_monitor = AsyncMemoryMonitor()
def after_hook_is_attached(self, trainer):
# Initialize the data
trainer.states['metrics']['train'] = self._memory_monitor.state_dict
trainer.states['metrics']['test'] = self._memory_monitor.state_dict
def before_train_iter(self, trainer):
self._memory_monitor.start()
return super().before_train_iter(trainer)
def after_train_iter(self, trainer, output: Tensor, label: Tensor, loss: Tensor):
self._memory_monitor.finish()
trainer.states['metrics']['train'] = self._memory_monitor.state_dict
trainer.states['metrics']['test'] = self._memory_monitor.state_dict
return super().after_train_iter(trainer, output, label, loss)
def before_test_iter(self, trainer):
self._memory_monitor.start()
return super().before_test(trainer)
def after_test_iter(self, trainer, output: Tensor, label: Tensor, loss: Tensor):
self._memory_monitor.finish()
trainer.states['metrics']['train'] = self._memory_monitor.state_dict
trainer.states['metrics']['test'] = self._memory_monitor.state_dict
return super().after_test_iter(trainer, output, label, loss)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.