content stringlengths 5 1.05M |
|---|
import numpy as np
import matplotlib.pyplot as plt
import pickle
def parse_kpts(buf):
res = np.zeros([17,3]).astype(np.float32)
res[0] = buf[6]
res[1] = buf[2]
res[2] = buf[1]
res[3] = buf[0]
res[4] = buf[3]
res[5] = buf[4]
res[6] = buf[5]
res[7] = 0.5 * (buf[7] + buf[6])
res[8] = buf[7]
res[9] = 0.5 * (buf[9] + buf[8])
res[10] = buf[9]
res[11] = buf[13]
res[12] = buf[14]
res[13] = buf[15]
res[14] = buf[12]
res[15] = buf[11]
res[16] = buf[10]
res[7,2] = min(buf[7,2], buf[6,2])
res[9,2] = min(buf[8,2], buf[9,2])
return res
data = pickle.load(open('MPII_kpts.pkl', 'rb'))
results = []
for i in data:
pts = parse_kpts(i[1])
results.append([i[0], pts])
pickle.dump(results, open('mpii.pkl', 'wb'))
|
# NOT FOR RELEASE BUILDS!
# Useful for debugging scripts in Tampermonkey.
# To set breakpoints switch to Web Developer Tools' Debugger/Sources tab
# and find related script in Tampermonkey's list of `userscript.html?id=<guid>`
from pluginwrapper import start, setup
end = """
} // wrapper end
var info = {};
if (typeof GM_info !== 'undefined' && GM_info && GM_info.script) info.script = { version: GM_info.script.version, name: GM_info.script.name, description: GM_info.script.description };
wrapper(info); // call wrapper function directly (Tampermonkey only!)
"""
|
import json
import pprint
with open('scrape_movie_cast_details.json','r') as new_data:
data=json.load(new_data)
# pprint.pprint(data)
def analyse_actors(movies_list):
all_id_list=[]
cast_list=[]
for i in movies_list:
id_a=i['cast']
for j in id_a:
cast_list.append(j)
all_id_list.append(j['imdb_id'])
# print(id_list)
# pprint.pprint(cast_list)
id_not_ripited=[]
for j in all_id_list:
if j not in id_not_ripited:
id_not_ripited.append(j)
# print(id_not_ripited)
big_dict={}
for k_id in id_not_ripited:
# print(k)
for m_dict in cast_list:
if k_id == m_dict['imdb_id']:
name=m_dict['name']
# print(name)
num_movies=0
for all_id in all_id_list:
if k_id == all_id:
num_movies+=1
smal_dict={'name':name,'num_movies':num_movies}
big_dict[k_id]=smal_dict
pprint.pprint(big_dict)
analyse_actors(data) |
from django.http.response import HttpResponseBadRequest
from imfp.core.http_helpers import HttpJsonResponse
from imfp.events.forms import CreateEventForm, SubscribeToEventForm, UnsubscribeFromEventForm, DeleteEventForm
from imfp.events.models import Event
from imfp.subscriptions.models import Subscription
def create_event(request):
if request.method == 'POST':
form = CreateEventForm(request.POST)
if form.is_valid():
data = form.cleaned_data
# TODO figure out how i auth the user
event = Event.objects.create_event(
data['user_id'],
data['description'],
data['creation_time'],
data['type'],
data['address'],
data['location'],
data['zone'],
data['type'],
data['seats']
)
event.save()
return HttpJsonResponse({'success': True})
else:
return HttpJsonResponse({'success': False, 'error': 'Invalid form data'})
else:
return HttpResponseBadRequest()
def subscribe_to_event(request, event_id):
if request.method == 'POST':
form = SubscribeToEventForm(request.POST)
if form.is_valid():
# TODO need to check if the event is full
data = form.cleaned_data
user_id = data['user_id']
subscription = Subscription.objects.create_subscription(user_id, event_id)
if subscription:
return HttpJsonResponse({'success': True})
else:
return HttpJsonResponse({'success': False, 'error': 'Subscribing to the event failed.'})
else:
return HttpJsonResponse({'success': False, 'error': 'Invalid form data'})
else:
return HttpResponseBadRequest()
def unsubscribe_from_event(request, event_id):
if request.method == 'POST':
form = UnsubscribeFromEventForm(request.POST)
if form.is_valid():
data = form.cleaned_data
user_id = data['user_id']
success = Subscription.objects.remove_subscription(user_id, event_id)
if success:
return HttpJsonResponse({'success': True})
else:
return HttpJsonResponse({'success': False, 'error': 'Unsubscribing from event failed.'})
else:
return HttpJsonResponse({'success': False, 'error': 'Invalid form data'})
else:
return HttpResponseBadRequest()
def delete_event(request, event_id):
if request.method == 'POST':
form = DeleteEventForm(request.POST)
if form.is_valid():
data = form.cleaned_data
user_id = data['user_id']
success = Event.objects.delete_event(user_id, event_id)
if success:
return HttpJsonResponse({'success': True})
else:
return HttpJsonResponse({'success': False, 'error': 'Deleting event failed. You are most likely not the owner. Nice try, though.'})
else:
return HttpJsonResponse({'success': False, 'error': 'Invalid form data'})
else:
return HttpResponseBadRequest()
|
#-*- codeing: utf-8 -*-
import sys
"""
tip
type
min()
input
29
100
948
377
-24
0
-388
9999
output
-388
"""
data = []
for x in sys.stdin:
x = int(x)
data.append(x)
if x >= 9999:
print(min(data))
break; |
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pytest
import os
from itertools import product
import torch
from kaolin.ops.spc.uint8 import bits_to_uint8, uint8_bits_sum, uint8_to_bits
from kaolin.ops.random import random_spc_octrees
from kaolin.rep import Spc
from kaolin.ops import spc
from kaolin.utils.testing import FLOAT_TYPES, with_seed, check_tensor
os.environ['NVIDIA_TF32_OVERRIDE'] = '0'
@pytest.mark.parametrize('batch_size', [1, 3])
@pytest.mark.parametrize('height,width,depth,threshold',
[(27, 37, 37, 0.7), (64, 64, 64, 0.)])
@pytest.mark.parametrize('in_channels', [1, 5])
@pytest.mark.parametrize('out_channels', [1, 7])
@pytest.mark.parametrize('kernel_size,kernel_offset', [(1, 0), (2, 0), (3, 0), (3, 1), (4, 0), (5, 0), (5, 2)])
@pytest.mark.parametrize('with_bias', [False, True])
class TestConv3D:
@pytest.fixture(autouse=True)
def sparsity_masks(self, batch_size, height, width, depth, threshold):
return torch.rand(batch_size, height, width, depth,
device='cuda') > threshold
@pytest.fixture(autouse=True)
def feature_grids(self, sparsity_masks, batch_size, in_channels, height, width, depth):
return torch.rand(batch_size, in_channels, height, width, depth,
device='cuda') * sparsity_masks.unsqueeze(1)
@pytest.fixture(autouse=True)
def kernel_vectors(self, kernel_size, kernel_offset):
return torch.tensor(
list(product(range(-kernel_offset, kernel_size - kernel_offset), repeat=3)),
dtype=torch.int16, device='cuda')
@pytest.fixture(autouse=True)
def dense_weight(self, in_channels, out_channels, kernel_size):
return torch.rand(out_channels, in_channels,
kernel_size, kernel_size, kernel_size,
device='cuda')
@pytest.fixture(autouse=True)
def spc_weight(self, dense_weight, in_channels, out_channels):
return dense_weight.reshape(out_channels, in_channels, -1).permute(2, 1, 0)
@pytest.fixture(autouse=True)
def bias(self, with_bias, out_channels):
if with_bias:
return torch.rand(out_channels, device='cuda')
else:
return None
@pytest.fixture(autouse=True)
def octrees_lengths_features(self, feature_grids, sparsity_masks):
return spc.feature_grids_to_spc(feature_grids, sparsity_masks)
@pytest.fixture(autouse=True)
def octrees(self, octrees_lengths_features):
return octrees_lengths_features[0]
@pytest.fixture(autouse=True)
def lengths(self, octrees_lengths_features):
return octrees_lengths_features[1]
@pytest.fixture(autouse=True)
def coalescent_features(self, octrees_lengths_features):
return octrees_lengths_features[2]
@pytest.fixture(autouse=True)
def max_level_pyramids_exsum(self, octrees, lengths):
return spc.scan_octrees(octrees, lengths)
@pytest.fixture(autouse=True)
def max_level(self, max_level_pyramids_exsum):
return max_level_pyramids_exsum[0]
@pytest.fixture(autouse=True)
def pyramids(self, max_level_pyramids_exsum):
return max_level_pyramids_exsum[1]
@pytest.fixture(autouse=True)
def exsum(self, max_level_pyramids_exsum):
return max_level_pyramids_exsum[2]
@pytest.fixture(autouse=True)
def point_hierarchies(self, octrees, pyramids, exsum):
return spc.generate_points(octrees, pyramids, exsum)
@pytest.mark.parametrize('with_spc_to_dict', [False, True])
@pytest.mark.parametrize('jump', [0, 1, 2])
def test_conv3d(self, height, width, depth, in_channels, out_channels, kernel_size,
feature_grids, sparsity_masks, dense_weight, bias,
octrees, lengths, coalescent_features, max_level,
pyramids, exsum, point_hierarchies,
kernel_vectors, kernel_offset, spc_weight, jump, with_spc_to_dict):
stride = 2 ** jump
coalescent_features = coalescent_features.detach()
coalescent_features.requires_grad = True
spc_weight = spc_weight.detach()
spc_weight.requires_grad = True
if with_spc_to_dict:
input_spc = Spc(octrees, lengths)
output_features, output_level = spc.conv3d(
**input_spc.to_dict(), level=input_spc.max_level, input=coalescent_features,
weight=spc_weight, kernel_vectors=kernel_vectors, jump=jump, bias=bias)
output = spc.to_dense(**input_spc.to_dict(), input=output_features,
level=output_level)
output_sparsity_masks = spc.to_dense(
**input_spc.to_dict(),
input=torch.ones_like(output_features, requires_grad=False),
level=output_level)
else:
output_features, output_level = spc.conv3d(
octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features,
spc_weight, kernel_vectors, jump=jump, bias=bias)
output = spc.to_dense(point_hierarchies, pyramids, output_features, output_level)
output_sparsity_masks = spc.to_dense(
point_hierarchies, pyramids, torch.ones_like(output_features, requires_grad=False),
output_level)
feature_grids = feature_grids.detach()
feature_grids.requires_grad = True
dense_weight = dense_weight.detach()
dense_weight.requires_grad = True
padded_input = torch.nn.functional.pad(feature_grids,
(kernel_offset, kernel_size - 1 - kernel_offset,
kernel_offset, kernel_size - 1 - kernel_offset,
kernel_offset, kernel_size - 1 - kernel_offset))
expected_output = torch.nn.functional.conv3d(padded_input, dense_weight, stride=stride, bias=bias)
expected_height, expected_width, expected_depth = expected_output.shape[2:]
expected_output *= output_sparsity_masks[:, :, :expected_height, :expected_width, :expected_depth]
assert torch.allclose(output[:, :, :expected_height, :expected_width, :expected_depth],
expected_output, atol=1e-5, rtol=1e-5)
grad_output = torch.rand_like(output)
output.backward(grad_output)
expected_output.backward(grad_output[:, :, :expected_height, :expected_width, :expected_depth])
_, _, sparsified_grad = spc.feature_grids_to_spc(feature_grids.grad, sparsity_masks)
assert torch.allclose(coalescent_features.grad, sparsified_grad)
assert torch.allclose(spc_weight.grad,
dense_weight.grad.reshape(out_channels, in_channels, -1).permute(2, 1, 0),
rtol=1e-3, atol=1e-3)
@pytest.mark.parametrize('with_spc_to_dict', [False, True])
@pytest.mark.parametrize('jump', [0, 1, 2])
def test_conv_transpose3d(self, height, width, depth, in_channels, out_channels,
sparsity_masks, dense_weight, bias,
octrees, lengths, max_level, pyramids, exsum, point_hierarchies,
kernel_vectors, kernel_size, kernel_offset, spc_weight, jump,
with_spc_to_dict):
stride = 2 ** jump
if stride > kernel_size:
pytest.skip('stride higher than kernel_size is not tested')
out_sparsity_masks = sparsity_masks
in_level = max_level - jump
in_num_nodes = torch.sum(pyramids[:, 0, -(2 + jump)])
coalescent_features = torch.rand((in_num_nodes, in_channels), device='cuda',
requires_grad=True)
dense_weight = dense_weight.detach()
dense_weight.requires_grad = True
spc_weight = spc_weight.detach()
spc_weight.requires_grad = True
if with_spc_to_dict:
input_spc = Spc(octrees, lengths)
feature_grids = spc.to_dense(**input_spc.to_dict(), input=coalescent_features,
level=in_level)
else:
feature_grids = spc.to_dense(point_hierarchies, pyramids, coalescent_features, in_level)
feature_grids = feature_grids[:, :, :math.ceil(height / stride),
:math.ceil(width / stride), :math.ceil(depth / stride)]
feature_grids = feature_grids.detach()
feature_grids.requires_grad = True
if with_spc_to_dict:
sparsity_masks = spc.to_dense(
**input_spc.to_dict(), input=torch.ones_like(coalescent_features),
level=in_level).bool()
else:
sparsity_masks = spc.to_dense(point_hierarchies, pyramids,
torch.ones_like(coalescent_features),
in_level).bool()
sparsity_masks = sparsity_masks[:, 0, :math.ceil(height / stride),
:math.ceil(width / stride), :math.ceil(depth / stride)]
# test forward
if with_spc_to_dict:
output_features, output_level = spc.conv_transpose3d(
**input_spc.to_dict(), level=in_level, input=coalescent_features,
weight=spc_weight, kernel_vectors=kernel_vectors, jump=jump, bias=bias)
output = spc.to_dense(**input_spc.to_dict(), input=output_features, level=output_level)
else:
output_features, output_level = spc.conv_transpose3d(
octrees, point_hierarchies, in_level, pyramids, exsum,
coalescent_features,
spc_weight, kernel_vectors, jump=jump, bias=bias)
output = spc.to_dense(point_hierarchies, pyramids, output_features, output_level)
output = output[:, :, :height, :width, :depth]
expected_output = torch.nn.functional.conv_transpose3d(
feature_grids, dense_weight.permute(1, 0, 2, 3, 4),
stride=stride, bias=bias,
output_padding=stride - 1)[:, :,
kernel_offset:height + kernel_offset,
kernel_offset:width + kernel_offset,
kernel_offset:depth + kernel_offset]
expected_output *= out_sparsity_masks.unsqueeze(1)
assert output_level == max_level
assert torch.allclose(output, expected_output, rtol=1e-5, atol=1e-5)
# test backward
grad_out = torch.rand_like(expected_output)
expected_output.backward(grad_out)
output.backward(grad_out)
_, _, sparsified_grad = spc.feature_grids_to_spc(feature_grids.grad, sparsity_masks)
assert torch.allclose(coalescent_features.grad, sparsified_grad,
rtol=1e-5, atol=1e-5)
assert torch.allclose(spc_weight.grad,
dense_weight.grad.reshape(out_channels, in_channels, -1).permute(2, 1, 0),
rtol=1e-3, atol=1e-3)
@pytest.mark.parametrize('with_spc_to_dict', [False, True])
@pytest.mark.parametrize('jump', [0, 1, 2])
def test_module_conv3d(self, height, width, depth, in_channels, out_channels, with_bias,
octrees, lengths, coalescent_features, max_level, pyramids, exsum,
point_hierarchies, kernel_vectors, jump, with_spc_to_dict):
conv = spc.Conv3d(in_channels, out_channels, kernel_vectors,
jump, bias=with_bias).cuda()
params = dict(conv.named_parameters())
weight = params['weight']
check_tensor(weight, shape=(kernel_vectors.shape[0],
in_channels, out_channels),
dtype=torch.float, device='cuda')
if with_bias:
assert len(params) == 2
bias = params['bias']
check_tensor(bias, shape=(out_channels,), dtype=torch.float,
device='cuda')
else:
assert len(params) == 1
bias = None
buffers = dict(conv.named_buffers())
assert len(buffers) == 1
assert torch.equal(buffers['kernel_vectors'], kernel_vectors)
assert repr(conv) == f'Conv3d(in={in_channels}, out={out_channels}, ' \
f'kernel_vector_size={kernel_vectors.shape[0]})'
if with_spc_to_dict:
input_spc = Spc(octrees, lengths)
output, output_level = conv(**input_spc.to_dict(), level=max_level,
input=coalescent_features)
else:
output, output_level = conv(
octrees, point_hierarchies, max_level, pyramids, exsum,
coalescent_features)
expected_output, expected_output_level = spc.conv3d(
octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features,
weight, kernel_vectors, jump=jump, bias=bias)
assert torch.equal(output, expected_output)
assert output_level == expected_output_level
@pytest.mark.parametrize('with_spc_to_dict', [False, True])
@pytest.mark.parametrize('jump', [0, 1, 2])
def test_module_conv_transpose3d(self, height, width, depth, in_channels, out_channels, with_bias,
octrees, lengths, max_level, pyramids, exsum, point_hierarchies,
kernel_size, kernel_vectors, jump, with_spc_to_dict):
stride = 2 ** jump
if stride > kernel_size:
pytest.skip('stride higher than kernel_size is not tested')
in_level = max_level - jump
in_num_nodes = torch.sum(pyramids[:, 0, -(2 + jump)])
coalescent_features = torch.rand((in_num_nodes, in_channels), device='cuda',
requires_grad=True)
conv = spc.ConvTranspose3d(in_channels, out_channels, kernel_vectors,
jump, bias=with_bias).cuda()
params = dict(conv.named_parameters())
weight = params['weight']
check_tensor(weight, shape=(kernel_vectors.shape[0],
in_channels, out_channels),
dtype=torch.float, device='cuda')
if with_bias:
assert len(params) == 2
bias = params['bias']
check_tensor(bias, shape=(out_channels,), dtype=torch.float,
device='cuda')
else:
assert len(params) == 1
bias = None
buffers = dict(conv.named_buffers())
assert len(buffers) == 1
assert torch.equal(buffers['kernel_vectors'], kernel_vectors)
assert repr(conv) == f'ConvTranspose3d(in={in_channels}, ' \
f'out={out_channels}, ' \
f'kernel_vector_size={kernel_vectors.shape[0]})'
if with_spc_to_dict:
input_spc = Spc(octrees, lengths)
output, output_level = conv(**input_spc.to_dict(), level=in_level,
input=coalescent_features)
else:
output, output_level = conv(
octrees, point_hierarchies, in_level, pyramids, exsum,
coalescent_features)
expected_output, expected_output_level = spc.conv_transpose3d(
octrees, point_hierarchies, in_level, pyramids, exsum, coalescent_features,
weight, kernel_vectors, jump=jump, bias=bias)
assert torch.equal(output, expected_output)
assert output_level == expected_output_level
|
import pytest
import datetime
import json
from application.system.job import Job, JobStatus
from application.system.user import User
from application.system.user_role import UserRole
from application.handlers.response_builder import ResponseBuilder
from exceptions.user_not_found_exception import UserNotFoundException
rb = ResponseBuilder()
def test_build_create_job():
expected_answer = '{"job_id": 1}'
assert rb.build_create_job(1) == expected_answer
return
def test_build_log():
try:
user = User.get_user_by_username("rbuser")
except UserNotFoundException:
user = User("rbuser", "token")
job1 = Job('storageA', 'storageB', '~/.data/', [True, True, True], user)
job1_id = job1.get_job_id()
job1.set_status(JobStatus.DONE)
timestamp = datetime.datetime.now()
job1.set_enqueue_time(timestamp)
job1.set_end_time(timestamp)
job2 = Job('storageA', 'storageB', '~/.data/', [True, True, True], user)
job2_id = job2.get_job_id()
job2.set_status(JobStatus.DONE)
timestamp = datetime.datetime.now()
job2.set_enqueue_time(timestamp)
job2.set_end_time(timestamp)
job1 = Job.get_job_by_id(job1_id)
job2 = Job.get_job_by_id(job2_id)
answer = json.loads(rb.build_log([job1, job2]))
job1 = Job.get_job_by_id(job1_id)
assert answer[0]["job_id"] == job1.get_job_id()
assert answer[0]["source"] == job1.get_source_alias() + ":" + job1.get_source_relative_path()
assert answer[0]["target"] == job1.get_target_alias()
assert answer[0]["enqueue_time"] == job1.get_enqueue_time().isoformat()
assert answer[0]["end_time"] == job1.get_end_time().isoformat()
assert answer[0]["creator"] == job1.get_user().get_username()
assert answer[0]["status"] == job1.get_status().name
assert answer[0]["error"] == job1.get_error()
job2 = Job.get_job_by_id(job2_id)
assert answer[1]["job_id"] == job2.get_job_id()
assert answer[1]["source"] == job2.get_source_alias() + ":" + job2.get_source_relative_path()
assert answer[1]["target"] == job2.get_target_alias()
assert answer[1]["enqueue_time"] == job2.get_enqueue_time().isoformat()
assert answer[1]["end_time"] == job2.get_end_time().isoformat()
assert answer[1]["creator"] == job2.get_user().get_username()
assert answer[1]["status"] == job2.get_status().name
assert answer[1]["error"] == job2.get_error()
def test_build_running():
try:
user = User.get_user_by_username("rbuser1")
except UserNotFoundException:
user = User("rbuser1", "token")
job1 = Job('storageA', 'storageB', '~/.data/', [True, True, True], user)
job1_id = job1.get_job_id()
job1.set_status(JobStatus.DONE)
timestamp = datetime.datetime.now()
job1.set_enqueue_time(timestamp)
job1.set_end_time(timestamp)
job2 = Job('storageA', 'storageB', '~/.data/', [True, True, True], user)
job2_id = job2.get_job_id()
job2.set_status(JobStatus.DONE)
timestamp = datetime.datetime.now()
job2.set_enqueue_time(timestamp)
job2.set_end_time(timestamp)
job1 = Job.get_job_by_id(job1_id)
job2 = Job.get_job_by_id(job2_id)
answer = json.loads(rb.build_active([job1, job2]))
job1 = Job.get_job_by_id(job1_id)
assert answer[0]["job_id"] == job1.get_job_id()
assert answer[0]["source"] == job1.get_source_alias() + ":" + job1.get_source_relative_path()
assert answer[0]["target"] == job1.get_target_alias()
assert answer[0]["enqueue_time"] == job1.get_enqueue_time().isoformat()
assert answer[0]["creator"] == job1.get_user().get_username()
assert answer[0]["status"] == job1.get_status().name
job2 = Job.get_job_by_id(job2_id)
assert answer[1]["job_id"] == job2.get_job_id()
assert answer[1]["source"] == job2.get_source_alias() + ":" + job2.get_source_relative_path()
assert answer[1]["target"] == job2.get_target_alias()
assert answer[1]["enqueue_time"] == job2.get_enqueue_time().isoformat()
assert answer[1]["creator"] == job2.get_user().get_username()
assert answer[1]["status"] == job2.get_status().name
def test_build_directory_list():
pass
def test_build_target_list():
expected_answer = '{"targets": ["storage1", "storage2", "storage3"]}'
assert rb.build_target_list(["storage1", "storage2", "storage3"]) == expected_answer
return
def test_build_job():
try:
user = User.get_user_by_username("rbuser")
except UserNotFoundException:
user = User("rbuser", "token")
job = Job('storageA', 'storageB', '~/.data/', [True, True, True], user)
job.set_status(JobStatus.QUEUED)
timestamp = datetime.datetime.now()
job.set_enqueue_time(timestamp)
answer = json.loads(rb.build_job(job))
assert answer["job_id"] == job.get_job_id()
assert answer["source"] == job.get_source_alias() + ":" + job.get_source_relative_path()
assert answer["target"] == job.get_target_alias()
assert answer["enqueue_time"] == job.get_enqueue_time().isoformat()
assert answer["creator"] == job.get_user().get_username()
assert answer["status"] == job.get_status().name
return
def test_build_team_list():
pass
def test_build_queue():
try:
user = User.get_user_by_username("rbuser2")
except UserNotFoundException:
user = User("rbuser2", "token")
job1 = Job('storageA', 'storageB', '~/.data/', [True, True, True], user)
job1_id = job1.get_job_id()
job1.set_status(JobStatus.DONE)
timestamp = datetime.datetime.now()
job1.set_enqueue_time(timestamp)
job1.set_end_time(timestamp)
job2 = Job('storageA', 'storageB', '~/.data/', [True, True, True], user)
job2_id = job2.get_job_id()
job2.set_status(JobStatus.DONE)
timestamp = datetime.datetime.now()
job2.set_enqueue_time(timestamp)
job2.set_end_time(timestamp)
job1 = Job.get_job_by_id(job1_id)
job2 = Job.get_job_by_id(job2_id)
answer = json.loads(rb.build_queue([job1, job2]))
job1 = Job.get_job_by_id(job1_id)
assert answer[0]["job_id"] == job1.get_job_id()
assert answer[0]["source"] == job1.get_source_alias() + ":" + job1.get_source_relative_path()
assert answer[0]["target"] == job1.get_target_alias()
assert answer[0]["enqueue_time"] == job1.get_enqueue_time().isoformat()
assert answer[0]["creator"] == job1.get_user().get_username()
assert answer[0]["priority"] == job1.get_priority()
assert answer[0]["status"] == job1.get_status().name
job2 = Job.get_job_by_id(job2_id)
assert answer[1]["job_id"] == job2.get_job_id()
assert answer[1]["source"] == job2.get_source_alias() + ":" + job2.get_source_relative_path()
assert answer[1]["target"] == job2.get_target_alias()
assert answer[1]["enqueue_time"] == job2.get_enqueue_time().isoformat()
assert answer[1]["creator"] == job2.get_user().get_username()
assert answer[1]["priority"] == job2.get_priority()
assert answer[1]["status"] == job2.get_status().name
def test_build_workers():
pass
def test_build_user_role():
expected_answer = '{"role": "Administrator"}'
assert rb.build_user_role(UserRole.Administrator) == expected_answer
expected_answer = '{"role": "ProjectManager"}'
assert rb.build_user_role(UserRole.ProjectManager) == expected_answer
expected_answer = '{"role": "User"}'
assert rb.build_user_role(UserRole.User) == expected_answer
return
def test_build_exception():
pass
|
import sys
from reminder.argument.argument import MyParser
def execute():
MyParser().parse_arguments()
if __name__ == '__main__':
sys.exit(execute())
|
import sys
import pygame
import pygame.locals
#initalize pygame
pygame.init()
#Sets up the display surface for the game
DISPLAY_SURFACE = pygame.display.set_mode((400,300))
#Sets the heading caption
pygame.display.set_caption("Hello World!")
#main game loop
while True:
#loops through all the events captured from the
#screen
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
pygame.quit()
sys.exit()
#updates the screen
pygame.display.update()
|
"""
byceps.blueprints.admin.ticketing.checkin.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import date
from flask import abort, g, request, url_for
from flask_babel import gettext
from .....permissions.ticketing import TicketingPermission
from .....services.party import service as party_service
from .....services.ticketing import (
exceptions as ticket_exceptions,
ticket_service,
ticket_user_checkin_service,
)
from .....services.user import service as user_service
from .....signals import ticketing as ticketing_signals
from .....util.framework.blueprint import create_blueprint
from .....util.framework.flash import flash_error, flash_notice, flash_success
from .....util.framework.templating import templated
from .....util.views import permission_required, respond_no_content
blueprint = create_blueprint('ticketing_checkin_admin', __name__)
MINIMUM_AGE_IN_YEARS = 18
@blueprint.get('/for_party/<party_id>')
@permission_required(TicketingPermission.checkin)
@templated
def index(party_id):
"""Provide form to find tickets, then check them in."""
party = _get_party_or_404(party_id)
search_term = request.args.get('search_term', default='').strip()
limit = 10
if search_term:
latest_dob_for_checkin = _get_latest_date_of_birth_for_checkin()
tickets = _search_tickets(party.id, search_term, limit)
users = _search_users(search_term, limit)
tickets += list(_get_tickets_for_users(party.id, users))
else:
latest_dob_for_checkin = None
tickets = None
users = None
return {
'party': party,
'latest_dob_for_checkin': latest_dob_for_checkin,
'search_term': search_term,
'tickets': tickets,
'users': users,
}
def _get_latest_date_of_birth_for_checkin():
today = date.today()
return today.replace(year=today.year - MINIMUM_AGE_IN_YEARS)
def _search_tickets(party_id, search_term, limit):
page = 1
per_page = limit
tickets_pagination = (
ticket_service.get_tickets_with_details_for_party_paginated(
party_id, page, per_page, search_term=search_term
)
)
return tickets_pagination.items
def _search_users(search_term, limit):
page = 1
per_page = limit
users_pagination = user_service.get_users_paginated(
page, per_page, search_term=search_term
)
# Exclude deleted users.
users_pagination.items = [
user for user in users_pagination.items if not user.deleted
]
return users_pagination.items
def _get_tickets_for_users(party_id, users):
for user in users:
yield from ticket_service.find_tickets_related_to_user_for_party(
user.id, party_id
)
@blueprint.post('/for_party/<party_id>/tickets/<uuid:ticket_id>/check_in_user')
@permission_required(TicketingPermission.checkin)
@respond_no_content
def check_in_user(party_id, ticket_id):
"""Check the user in."""
party = _get_party_or_404(party_id)
ticket = _get_ticket_or_404(ticket_id)
initiator_id = g.user.id
try:
event = ticket_user_checkin_service.check_in_user(
party_id, ticket.id, initiator_id
)
except ticket_exceptions.UserAccountDeleted:
flash_error(
gettext(
'The user account assigned to this ticket has been deleted. Check-in denied.'
)
)
return
except ticket_exceptions.UserAccountSuspended:
flash_error(
gettext(
'The user account assigned to this ticket has been suspended. Check-in denied.'
)
)
return
ticketing_signals.ticket_checked_in.send(None, event=event)
ticket_url = url_for('ticketing_admin.view_ticket', ticket_id=ticket.id)
flash_success(
gettext(
'User <em>%(screen_name)s</em> has been checked in with ticket <a href="%(ticket_url)s">%(ticket_code)s</a>.',
screen_name=ticket.used_by.screen_name,
ticket_url=ticket_url,
ticket_code=ticket.code,
),
text_is_safe=True,
)
occupies_seat = ticket.occupied_seat_id is not None
if not occupies_seat:
flash_notice(
gettext(
'Ticket <a href="%(ticket_url)s">%(ticket_code)s</a> does not occupy a seat.',
ticket_url=ticket_url,
ticket_code=ticket.code,
),
icon='warning',
text_is_safe=True,
)
@blueprint.post('/tickets/<uuid:ticket_id>/revert_user_check_in')
@permission_required(TicketingPermission.checkin)
@respond_no_content
def revert_user_check_in(ticket_id):
"""Revert the user check-in state."""
ticket = _get_ticket_or_404(ticket_id)
initiator_id = g.user.id
ticket_user_checkin_service.revert_user_check_in(ticket.id, initiator_id)
flash_success(gettext('Check-in has been reverted.'))
def _get_party_or_404(party_id):
party = party_service.find_party(party_id)
if party is None:
abort(404)
return party
def _get_ticket_or_404(ticket_id):
ticket = ticket_service.find_ticket(ticket_id)
if ticket is None:
abort(404)
return ticket
|
import uuid
import pytest
from selenium import webdriver
@pytest.fixture
def test_password():
return 'strong-test-pass'
@pytest.fixture
def test_with_specific_settings(settings):
settings.BASICAUTH_DISABLE = True
@pytest.fixture
def create_user(db, django_user_model, test_password,
test_with_specific_settings):
def make_user(**kwargs):
kwargs['password'] = test_password
if 'username' not in kwargs:
kwargs['username'] = str(uuid.uuid4())
return django_user_model.objects.create_user(**kwargs)
return make_user
@pytest.fixture
def auto_login_user(db, client, create_user, test_password,
test_with_specific_settings):
def make_auto_login(user=None):
if user is None:
user = create_user()
client.login(username=user.username, password=test_password)
return client, user
return make_auto_login
# fixture for selenium tests
@pytest.fixture(scope='session')
def driver(request):
driver = request.config.getoption('--driver', default='chrome')
if driver == 'firefox' or driver == 'ff':
driver = webdriver.Firefox()
elif driver == 'chrome':
driver = webdriver.Chrome()
else:
raise ValueError('invalid driver name: ' + driver)
driver.set_window_size(1200, 800)
driver.base_url = \
request.config.getoption('--url', default='http://localhost:8000')
yield driver
driver.quit()
|
from draw2d import Viewer, Rectangle, Frame, Circle, Line, Point
import math, time
try:
import pyglet
except ImportError as e:
raise ImportError('''
Cannot import pyglet.
HINT: you can install pyglet directly via 'pip install pyglet'.
But if you really just want to install all Gym dependencies and not have to think about it,
'pip install -e .[all]' or 'pip install gym[all]' will do it.
''')
if True:
try:
from pyglet.gl import *
except ImportError as e:
raise ImportError('''
Error occurred while running `from pyglet.gl import *`
HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'.
If you're running on a server, you may need a virtual frame buffer; something like this should work:
'xvfb-run -s \"-screen 0 1400x900x24\" python <your_script.py>'
''')
viewer = Viewer(800,800)
#frame = viewer.frame(-1.1, 1.1, -1.1, 1.1)
#r = Rectangle(100, 200, 300, 400).color(1,1,1)
#viewer.add_geom(r)
p = Point().color(1,1,1).move_to(30,30)
viewer.add_geom(p)
while True:
#viewer.render()
viewer.window.switch_to()
viewer.window.dispatch_events()
viewer.window.clear()
glClearColor(*viewer.clear_color)
glColor4f(1,1,1,1)
glBegin(GL_POINTS) # draw point
glVertex3f(30.0, 20.0, 0.0)
glEnd()
viewer.window.flip()
time.sleep(1)
|
from direct.distributed.AstronInternalRepository import AstronInternalRepository
from otp.distributed.OtpDoGlobals import *
from toontown.distributed.ToontownNetMessengerAI import ToontownNetMessengerAI
from direct.distributed.PyDatagram import PyDatagram
import traceback
import sys
import urlparse
class ToontownInternalRepository(AstronInternalRepository):
GameGlobalsId = OTP_DO_ID_TOONTOWN
dbId = 4003
def __init__(self, baseChannel, serverId=None, dcFileNames=None,
dcSuffix='AI', connectMethod=None, threadedNet=None):
AstronInternalRepository.__init__(
self, baseChannel, serverId=serverId, dcFileNames=dcFileNames,
dcSuffix=dcSuffix, connectMethod=connectMethod, threadedNet=threadedNet)
self.wantMongo = config.GetBool('want-mongo', False)
def handleConnected(self):
self.__messenger = ToontownNetMessengerAI(self)
if self.wantMongo:
import pymongo
mongourl = config.GetString('mongodb-url', 'mongodb://localhost')
replicaset = config.GetString('mongodb-replicaset', '')
db = (urlparse.urlparse(mongourl).path or '/Astron_Dev')[1:]
if replicaset:
self.dbConn = pymongo.MongoClient(mongourl, replicaset=replicaset)
else:
self.dbConn = pymongo.MongoClient(mongourl)
self.database = self.dbConn[db]
self.dbGlobalCursor = self.database.toontownstride
else:
self.dbConn = None
self.database = None
self.dbGlobalCursor = None
def sendNetEvent(self, message, sentArgs=[]):
self.__messenger.send(message, sentArgs)
def addExitEvent(self, message):
dg = self.__messenger.prepare(message)
self.addPostRemove(dg)
def handleDatagram(self, di):
msgType = self.getMsgType()
if msgType == self.__messenger.msgType:
self.__messenger.handle(msgType, di)
return
AstronInternalRepository.handleDatagram(self, di)
def getAvatarIdFromSender(self):
return int(self.getMsgSender() & 0xFFFFFFFF)
def getAccountIdFromSender(self):
return int((self.getMsgSender()>>32) & 0xFFFFFFFF)
def _isValidPlayerLocation(self, parentId, zoneId):
if zoneId < 1000 and zoneId != 1:
return False
return True
def readerPollOnce(self):
try:
return AstronInternalRepository.readerPollOnce(self)
except SystemExit, KeyboardInterrupt:
raise
except Exception as e:
if self.getAvatarIdFromSender() > 100000000:
dg = PyDatagram()
dg.addServerHeader(self.getMsgSender(), self.ourChannel, CLIENTAGENT_EJECT)
dg.addUint16(166)
dg.addString('You were disconnected to prevent a district reset.')
self.send(dg)
self.writeServerEvent('INTERNAL-EXCEPTION', self.getAvatarIdFromSender(), self.getAccountIdFromSender(), repr(e), traceback.format_exc())
self.notify.warning('INTERNAL-EXCEPTION: %s (%s)' % (repr(e), self.getAvatarIdFromSender()))
print traceback.format_exc()
sys.exc_clear()
return 1
|
def convert_to_celsius(fahrenheit: float) -> float:
"""Return the number of Celsius degrees equivalent to fahrenheit
degrees.
>>> convert_to_celsius(75)
23.88888888888889
"""
return (fahrenheit - 32.0) * 5.0 / 9.0
def above_freezing(celsius: float) -> bool:
"""Return true if the temperature in celsius degrees is above freezing
>>> above_freezing(5.2)
True
>>> above_freezing(-2)
False
"""
return celsius > 0
|
FOO = 1
def keys(x):
return [k for k in x.keys() if not k.startswith('_')]
def foo(bar):
baz = 3
print('foo', keys(globals()), keys(locals()), sep='\n')
print('TOP', keys(globals()), keys(locals()), sep='\n')
foo(12)
|
from django.conf.urls import url
from . import views
from remoteCMD.views import remote_cmd, file_trans
from showdata.views import show_data, save_data, get_data
urlpatterns = [
url(r'^login/$',views.login, name='login'),
url(r'^register/$',views.register, name='register'),
url(r'^index/$', views.index, name='index'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^hostlist/$', views.hostlist, name='hostList'),
url(r'^add_host/$', views.add_host, name='add_host'),
url(r'^del_host/$', views.del_host, name='del_host'),
url(r'^remote_cmd/$', remote_cmd, name='remote_cmd'),
url(r'^file_trans/$', file_trans, name='file_trans'),
url(r'^show_data/$', show_data, name='show_data'),
url(r'^save_data/$', save_data, name='save_data'),
url(r'^get_data/$', get_data, name='get_data'),
] |
from nbtlib import nbt
BLOCK_NAME_TO_ID_MAP = {
}
DV_FACING = {'north': 3, 'south': 2, 'west': 1, 'east': 0}
DV_CHEST_FACING = {'north': 2, 'south': 3, 'west': 4, 'east': 5}
DV_HALF = {'bottom': 0, 'top': 1}
DV_DIRT_VARIANT = {'dirt': 0, 'coarse_dirt': 1, 'podzol': 2}
DV_TALLGRASS_TYPE = {'dead_bush': 0, 'tall_grass': 1, 'fern': 2}
DV_STONE_VARIANT = {
'stone': 0, 'granite': 1, 'smooth_granite': 2, 'diorite': 3, 'smooth_diorite': 4, 'andesite': 5,
'smooth_andesite': 6}
DV_STONEBRICK_VARIANT = {'stonebrick': 0, 'mossy_stonebrick': 1, 'cracked_stonebrick': 2, 'chiseled_stonebrick': 3}
PROPERTIES_TO_DV_HANDLER_MAP = {
'minecraft:tallgrass': lambda p: DV_TALLGRASS_TYPE[p['type']],
'minecraft:dirt': lambda p: DV_DIRT_VARIANT[p['variant']],
'minecraft:stone': lambda p: DV_STONE_VARIANT[p['variant']],
'minecraft:stonebrick': lambda p: DV_STONEBRICK_VARIANT[p['variant']],
'minecraft:stone_brick_stairs': lambda p: DV_FACING[p['facing']] + 4 * DV_HALF[p['half']],
'minecraft:chest': lambda p: DV_CHEST_FACING[p['facing']]
}
def block_name_to_id(block_name: str):
parts = block_name.split(':', maxsplit=1)
if len(parts) > 1:
block_name = parts[1]
return BLOCK_NAME_TO_ID_MAP.get(block_name, block_name).upper()
def block_properties_to_data(block_name, block_properties):
handler = PROPERTIES_TO_DV_HANDLER_MAP.get(block_name)
return handler(block_properties) if handler else None
def structure_to_bo3_blocks(structure_nbt):
later_lines = []
for block_nbt in structure_nbt.root['blocks']:
x, y, z = block_nbt['pos']
block_tag = block_nbt.get('nbt')
p = block_nbt['state']
palette_nbt = structure_nbt.root['palette'][p]
block_name = palette_nbt['Name']
block_id = block_name_to_id(block_name)
block_properties = palette_nbt.get('Properties')
block_data = block_properties_to_data(block_name, block_properties) if block_properties else None
line = ''.join(
(f'B({x},{y},{z},{block_id}',
f':{block_data}' if block_data else '',
f',\'{str(block_tag)}\'' if block_tag else '',
')')
)
if block_tag:
later_lines.append(line)
else:
yield line
yield from later_lines
dx, dy, dz = structure_nbt.root['size']
yield f'# Size (X*Y*Z): {dx}*{dy}*{dz}'
def structure_file_to_bo3_blocks(structure_file, output_file):
structure_nbt = nbt.load(structure_file)
bo3_text = '\n'.join(structure_to_bo3_blocks(structure_nbt))
with open(output_file, 'w') as fp:
fp.write(bo3_text)
|
#!/usr/bin/python
import subprocess
import sys, os, shutil
import string,re
perf_test_pat = re.compile("CudaDMA Sequential Performance Test")
alignment_pat = re.compile("\s+ALIGNMENT - (?P<align>[0-9]+)")
elmt_size_pat = re.compile("\s+ELEMENT SIZE - (?P<size>[0-9]+)")
true_spec_pat = re.compile("\s+WARP SPECIALIZED - true")
false_spec_pat= re.compile("\s+WARP SPECIALIZED - false")
buffering_pat = re.compile("\s+BUFFERING - (?P<buff>\w+)")
dma_warps_pat = re.compile("\s+DMA WARPS - (?P<warps>[0-9]+)")
cta_per_sm_pat= re.compile("\s+CTAs/SM - (?P<persm>[0-9]+)")
loop_iter_pat = re.compile("\s+LOOP ITERATIONS - (?P<loops>[0-9]+)")
total_cta_pat = re.compile("\s+Total CTAS - (?P<ctas>[0-9]+)")
total_mem_pat = re.compile("\s+Total memory - (?P<mem>[0-9]+)")
total_perf_pat= re.compile("Performance - (?P<perf>[0-9\.]+)")
list_pat = re.compile("list (?P<modifier>\w+) (?P<count>[0-9]+) where(?P<constraints>(\s+\w+[><=]+\w+)*)")
total_pat= re.compile("total where(?P<constraints>(\s+\w+[><=]+\w+)*)")
aver_pat = re.compile("average where(?P<constraints>(\s+\w+[><=]+\w+)*)")
dev_pat = re.compile("deviation where(?P<constraints>(\s+\w+[><=]+\w+)*)")
cons_pat = re.compile("(?P<left>\w+)(?P<op>[><=]+)(?P<right>\w+)")
pop_pat = re.compile("pop")
clear_pat= re.compile("clear")
quit_pat = re.compile("quit")
class Experiment(object):
def __init__(self):
self.alignment = None
self.elmt_size = None
self.specialized = None
self.buffering = None
self.total_warps = 0
self.ctas_per_sm = None
self.loop_iters = None
self.total_ctas = None
self.total_mem = None
self.perf = None
def complete(self):
if self.alignment <> None and self.elmt_size <> None and \
self.specialized <> None and self.ctas_per_sm <> None and \
self.loop_iters <> None and self.total_ctas <> None and \
self.total_mem <> None and self.perf <> None:
if self.specialized:
if self.buffering <> None:
return True
return False
return True
return False
def meets(self,constraints):
for c in constraints:
if not c.evaluate(self):
return False
return True
def print_experiment(self,indent):
print indent+"Experiment:"
print indent+" Alignment - "+str(self.alignment)
print indent+" Element Size - "+str(self.elmt_size)
print indent+" Specialized - "+str(self.specialized)
if self.specialized:
print indent+" Buffering - "+str(self.buffering)
print indent+" Total Warps - "+str(self.total_warps)
print indent+" CTAs/SM - "+str(self.ctas_per_sm)
print indent+" Loop Iterations - "+str(self.loop_iters)
print indent+" Total CTAs - "+str(self.total_ctas)
print indent+" Total Memory - "+str(self.total_mem)
print indent+" Performance - "+str(self.perf)+" GB/s"
#print ""
class Constraint(object):
def __init__(self,field,op,value):
self.field = None
self.op = None
self.value = None
# Try converting to an integer first
try:
self.value = int(value)
except ValueError:
try:
self.value = float(value)
except ValueError:
if value == "true" or value == "True":
self.value = True
elif value == "false" or value == "False":
self.value = False
else:
self.value = value
self.field = self.parse_field_name(field)
self.op = self.parse_op_name(op)
def is_valid(self):
if self.field == None or self.op == None or self.value == None:
return False
return True
def parse_field_name(self,name):
# These correspond to the field names of the Experiments
if name == "alignment":
return name
if name == "size" or name == "elmt_size":
return "elmt_size"
if name == "warps":
return "total_warps"
if name == "buffering":
return name
if name == "specialized":
return name
if name == "CTAperSM":
return "ctas_per_sm"
if name == "loops" or name == "iters":
return "loop_iters"
if name == "CTAs":
return "total_ctas"
if name == "Mem" or name == "Memory":
return "total_mem"
if name == "Perf":
return "perf"
return None
def parse_op_name(self,op):
if op == "<":
return lambda x,y: x < y
if op == "<=":
return lambda x,y: x <= y
if op == "=" or op == "==":
return lambda x,y: x==y
if op == ">":
return lambda x,y: x > y
if op == ">=":
return lambda x,y: x >= y
return None
def evaluate(self,experiment):
field_val = getattr(experiment,self.field)
return self.op(field_val,self.value)
def find_best(result_set):
best_ex = None
best_perf = 0
for ex in result_set:
if ex.perf > best_perf:
best_ex = ex
best_perf = ex.perf
assert best_ex <> None
print "Best performance:"
best_ex.print_experiment()
def parse_results(result_set,file_name):
cur_obj = None
f = open(file_name,'r')
for line in f:
m = perf_test_pat.match(line)
if m <> None:
if cur_obj <> None:
#assert cur_obj.complete()
if not cur_obj.complete():
cur_obj.print_experiment()
assert False
result_set.add(cur_obj)
cur_obj = Experiment()
continue
m = alignment_pat.match(line)
if m <> None:
cur_obj.alignment = int(m.group('align'))
continue
m = elmt_size_pat.match(line)
if m <> None:
cur_obj.elmt_size = int(m.group('size'))
continue
m = true_spec_pat.match(line)
if m <> None:
cur_obj.specialized = True
continue
m = false_spec_pat.match(line)
if m <> None:
cur_obj.specialized = False
continue
m = buffering_pat.match(line)
if m <> None:
cur_obj.buffering = m.group('buff')
continue
m = dma_warps_pat.match(line)
if m <> None:
cur_obj.total_warps = int(m.group('warps'))
continue
m = cta_per_sm_pat.match(line)
if m <> None:
cur_obj.ctas_per_sm = int(m.group('persm'))
continue
m = loop_iter_pat.match(line)
if m <> None:
cur_obj.loop_iters = int(m.group('loops'))
continue
m = total_cta_pat.match(line)
if m <> None:
cur_obj.total_ctas = int(m.group('ctas'))
continue
m = total_mem_pat.match(line)
if m <> None:
cur_obj.total_mem = int(m.group('mem'))
continue
m = total_perf_pat.match(line)
if m <> None:
cur_obj.perf = float(m.group('perf'))
continue
if cur_obj <> None and cur_obj.complete():
result_set.add(cur_obj)
f.close()
def parse_constraints(constraint_strings):
#print "Parsing constraints "+str(constraint_strings)
result = set()
for c in str.split(constraint_strings):
m = cons_pat.match(c)
assert m <> None
constraint = Constraint(m.group('left'),m.group('op'),m.group('right'))
if constraint.is_valid():
result.add(constraint)
else:
print "Invalid constraint: "+str(c)
#print "Total constraints "+str(len(result))
return result
def find_experiments(experiments,constraints):
result = set()
for ex in experiments:
if ex.meets(constraints):
result.add(ex)
return result
def execute_list_command(command, count, constraint_strings, experiments):
constraints = parse_constraints(constraint_strings)
if constraints == None:
return False
matches = find_experiments(experiments,constraints)
if command == "top":
total_printed = 0
for ex in sorted(matches,key=lambda x: x.perf,reverse=True):
total_printed = total_printed + 1
print "Experiment "+str(total_printed)
ex.print_experiment(' ')
print ""
if total_printed == count:
break
if total_printed < count:
print "Total results "+str(len(matches))+" less than "+str(count)
return False
return True
else:
print "Unimplemented command "+str(command)
return False
def execute_total_command(constraint_strings,experiments):
constraints = parse_constraints(constraint_strings)
if constraints == None:
return False
matches = find_experiments(experiments,constraints)
print "Total: "+str(len(matches))+" matching experiments"
return True
def execute_average_command(constraint_strings,experiments):
constraints = parse_constraints(constraint_strings)
if constraints == None:
return False
matches = find_experiments(experiments,constraints)
if len(matches) == 0:
print "There were no results that satisfied the constraints"
return False
total_perf = 0.0
for ex in matches:
total_perf = total_perf + ex.perf
avg_perf = total_perf/len(matches)
print "Average perf for "+str(len(matches))+" results was "+str(avg_perf)+" GB/s"
return True
def execute_deviation_command(constraint_strings,experiments):
constraints = parse_constraints(constraint_strings)
if constraints == None:
return False
print "Not implemented"
return False
if __name__ == "__main__":
assert len(sys.argv) > 1
results = set()
parse_results(results,sys.argv[1])
prev_cmd = list()
while True:
cmd = ""
for pc in prev_cmd:
cmd = cmd + pc + ' '
temp_cmd = raw_input('Enter a command >: '+cmd)
cmd = cmd + temp_cmd
# These have to go first
m = pop_pat.match(temp_cmd)
if m <> None:
prev_cmd.pop()
continue
m = clear_pat.match(temp_cmd)
if m <> None:
prev_cmd = list()
continue
m = quit_pat.match(temp_cmd)
if m <> None:
break
m = list_pat.match(cmd)
if m <> None:
if execute_list_command(m.group('modifier'),int(m.group('count')),m.group('constraints'),results):
prev_cmd.append(temp_cmd)
continue
m = total_pat.match(cmd)
if m <> None:
if execute_total_command(m.group('constraints'),results):
prev_cmd.append(temp_cmd)
continue
m = aver_pat.match(cmd)
if m <> None:
if execute_average_command(m.group('constraints'),results):
prev_cmd.append(temp_cmd)
continue
m = dev_pat.match(cmd)
if m <> None:
if execute_deviation_command(m.group('constraints'),results):
prev_cmd.append(temp_cmd)
continue
print "Illegal command! "+str(cmd)+" Type 'quit' to quit"
|
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import logging
import azure.functions as func
from onefuzztypes.models import HeartbeatEntry
from ..onefuzzlib.dashboard import get_event
from ..onefuzzlib.heartbeat import Heartbeat
def main(msg: func.QueueMessage, dashboard: func.Out[str]) -> None:
body = msg.get_body()
logging.info("heartbeat: %s", body)
raw = json.loads(body)
Heartbeat.add(HeartbeatEntry.parse_obj(raw))
event = get_event()
if event:
dashboard.set(event)
|
#!/usr/bin/env python
from __future__ import print_function
import os
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from numpy.fft import fftn, ifftn
import scipy.integrate as integrate
from scipy import stats
import PlotScripts
import ReadScripts
import AllVars
def calculate_HI_frac(XHII, density):
"""
Calculates the mass-weighted fraction of ionized hydrogen for a given
ionization grid.
Parameters
---------
XHII: 3-Dimensional Array of floats. Required.
Grid that contains the fraction of ionized hydrogen (XHII) in each
cell.
density: 3-Dimensional Array of floats. Required.
Grid that contains the overdensity (rho/<rho>) of dark matter in each
cell.
Returns
-------
HI: Float.
Fraction of ionized hydrogen.
Units
-----
XHII and HI are unitless.
Density is unitless (overdensity, rho/<rho>).
"""
HI = 1.0 - np.sum(XHII * density / np.sum(density))
print("")
print("Mass averaged HI fraction is {0:.4f}".format(HI))
return HI
def determine_close_idx(fname_HII, fname_density, SnapList, GridSize,
precision, target_XHI_fraction, model_tags):
XHII_fraction = np.zeros_like(SnapList, dtype=np.float32)
for model_number in range(len(fname_HII)):
for snapnum in range(len(SnapList[model_number])):
HII_fname = "{0}_{1:03d}".format(fname_HII[model_number],
SnapList[model_number][snapnum])
HII = ReadScripts.read_binary_grid(HII_fname,
GridSize[model_number],
precision[model_number])
density_fname = "{0}{1:03d}.dens.dat".format(fname_density[model_number],
SnapList[model_number][snapnum])
density = ReadScripts.read_binary_grid(density_fname,
GridSize[model_number],
precision[model_number])
HI_frac = calculate_HI_frac(HII, density)
XHII_fraction[model_number][snapnum] = HI_frac
SnapList = []
for model_number in range(len(fname_HII)):
SnapList.append([])
print("Model {0}".format(model_tags[model_number]))
for val in target_XHI_fraction:
idx = (np.abs(XHII_fraction[model_number] - val)).argmin()
print("HI Fract {0}: Nearest Idx {1} with value {2}".format(val,
idx,
XHII_fraction[model_number][idx]))
SnapList[model_number].append(idx)
return SnapList
def determine_MH_fesc_constants(MH_low, MH_high, fesc_low, fesc_high):
log_A = (np.log10(fesc_high) - (np.log10(fesc_low)*np.log10(MH_high)/np.log10(MH_low))) * pow(1 - (np.log10(MH_high) / np.log10(MH_low)), -1)
B = (np.log10(fesc_low) - log_A) / np.log10(MH_low)
A = pow(10, log_A)
return A, B
def plot_Anne_MH(MH_low, MH_high, fesc_low, fesc_high, pos_scaling, ax1):
halomass = np.arange(7.0, 18.0, 0.01)
halomass = pow(10, halomass)
if pos_scaling:
fesc = 1.0 - pow((1.0 - fesc_low) * (1.0-fesc_low)/(1.0-fesc_high), -np.log10(halomass/MH_low)/np.log10(MH_high/MH_low))
fesc[fesc < fesc_low] = fesc_low
fesc[fesc > fesc_high] = fesc_high
else:
fesc = pow(fesc_low * fesc_low/fesc_high, -np.log10(halomass/MH_low)/np.log10(MH_high/MH_low))
fesc[fesc > fesc_low] = fesc_low
fesc[fesc < fesc_high] = fesc_high
ax1.plot(np.log10(halomass),
fesc, ls = '-', color = 'k',
label = "Anne")
ax1.set_xlabel("Halo Mass [Msun]")
ax1.set_ylabel("fesc")
ax1.set_ylim([0.0, 1.1])
return ax1
def plot_my_MH(MH_low, MH_high, fesc_low, fesc_high, ax1):
halomass = np.arange(7.0, 18.0, 0.01)
halomass = pow(10, halomass)
alpha, beta = determine_MH_fesc_constants(MH_low, MH_high,
fesc_low, fesc_high)
print("Alpha = {0} Beta = {1}".format(alpha, beta))
fesc = alpha*pow(halomass,beta)
if fesc_low > fesc_high:
fesc[fesc > fesc_low] = fesc_low
fesc[fesc < fesc_high] = fesc_high
else:
fesc[fesc < fesc_low] = fesc_low
fesc[fesc > fesc_high] = fesc_high
ax1.plot(np.log10(halomass),
fesc, ls = '--', color = 'r',
label = "Mine")
ax1.set_xlabel("Halo Mass [Msun]")
ax1.set_ylabel("fesc")
ax1.set_ylim([0.0, 1.1])
return ax1
def plot_MHs(MH_low, MH_high, fesc_low, fesc_high):
fig1 = plt.figure(figsize = (8,8))
ax1 = fig1.add_subplot(111)
if fesc_high > fesc_low:
pos_scaling = 1
else:
pos_scaling = 0
ax1 = plot_Anne_MH(MH_low, MH_high, fesc_low, fesc_high, pos_scaling, ax1)
ax1 = plot_my_MH(MH_low, MH_high, fesc_low, fesc_high, ax1)
leg = ax1.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(10)
outputFile1 = "./fescMH_{0:.2e}_{1:.2e}_{2}_{3}.png".format(MH_low,
MH_high,
fesc_low,
fesc_high)
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
def plot_SFR_fesc(alpha, beta, delta):
fig1 = plt.figure(figsize = (8,8))
ax1 = fig1.add_subplot(111)
SFR = np.arange(-5, 2, 0.01)
for alpha_val, beta_val, delta_val in zip(alpha, beta, delta):
fesc = delta_val / (1.0 + np.exp(-alpha_val*(SFR-beta_val)))
label = r"$\alpha = " + str(alpha_val) + r", \beta = " + str(beta_val) +\
r", \delta = " + str(delta_val) + "$"
print(label)
ax1.plot(SFR, fesc, label=label)
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(10)
outputFile1 = "./fesc_SFR.png"
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
if __name__ == "__main__":
MH_low = 1.0e8
MH_high = 1.0e12
fesc_high = 0.05
#for fesc_low in [0.95]:
# plot_MHs(MH_low, MH_high, fesc_low, fesc_high)
alpha = [0.2, 0.3, 0.63, 1.0, 4.50]
beta = [4.5, 2.3, 1.5, 1.0, 0.5]
delta = [1.0, 1.0, 1.0, 1.0, 1.0]
plot_SFR_fesc(alpha, beta, delta)
|
from django.conf.urls import url, include
from . search_views import \
by_identifier, \
by_unique_sale_user
urlpatterns = [
url(r'^by-identifier', by_identifier, name='search-sale-by-identifier'),
url(r'^by-unique-sale-user', by_unique_sale_user, name='search-sale-by-unique-sale-user'),
] |
import numpy as np
from RL_model.Attention_FCN import PPO
import torch
d = np.load('./pretrained_50.npz')
print(d.files)
print(d['diconv6_pi/diconv/W'].shape)
model = PPO(9, 1)
model_dict = model.state_dict()
model_dict['conv.0.weight'] = torch.FloatTensor(d['conv1/W'])
model_dict['conv.0.bias'] = torch.FloatTensor(d['conv1/b'])
model_dict['conv.2.weight'] = torch.FloatTensor(d['diconv2/diconv/W'])
model_dict['conv.2.bias'] = torch.FloatTensor(d['diconv2/diconv/b'])
model_dict['conv.4.weight'] = torch.FloatTensor(d['diconv3/diconv/W'])
model_dict['conv.4.bias'] = torch.FloatTensor(d['diconv3/diconv/b'])
model_dict['conv.6.weight'] = torch.FloatTensor(d['diconv4/diconv/W'])
model_dict['conv.6.bias'] = torch.FloatTensor(d['diconv4/diconv/b'])
model_dict['diconv1_p.weight'] = torch.FloatTensor(d['diconv5_pi/diconv/W'])
model_dict['diconv1_p.bias'] = torch.FloatTensor(d['diconv5_pi/diconv/b'])
model_dict['diconv2_p.weight'] = torch.FloatTensor(d['diconv6_pi/diconv/W'])
model_dict['diconv2_p.bias'] = torch.FloatTensor(d['diconv6_pi/diconv/b'])
model_dict['policy.weight'] = torch.FloatTensor(d['conv8_pi/model/W'])
model_dict['policy.bias'] = torch.FloatTensor(d['conv8_pi/model/b'])
model_dict['diconv1_v.weight'] = torch.FloatTensor(d['diconv5_V/diconv/W'])
model_dict['diconv1_v.bias'] = torch.FloatTensor(d['diconv5_V/diconv/b'])
model_dict['diconv2_v.weight'] = torch.FloatTensor(d['diconv6_V/diconv/W'])
model_dict['diconv2_v.bias'] = torch.FloatTensor(d['diconv6_V/diconv/b'])
model_dict['value.weight'] = torch.FloatTensor(d['conv7_V/W'])
model_dict['value.bias'] = torch.FloatTensor(d['conv7_V/b'])
model_dict['conv7_Wz.weight'] = torch.FloatTensor(d['conv7_Wz/W'])
model_dict['conv7_Uz.weight'] = torch.FloatTensor(d['conv7_Uz/W'])
model_dict['conv7_Wr.weight'] = torch.FloatTensor(d['conv7_Wr/W'])
model_dict['conv7_Ur.weight'] = torch.FloatTensor(d['conv7_Ur/W'])
model_dict['conv7_W.weight'] = torch.FloatTensor(d['conv7_W/W'])
model_dict['conv7_U.weight'] = torch.FloatTensor(d['conv7_U/W'])
model.load_state_dict(model_dict)
torch.save(model.state_dict(), "pixel_sig50_gray.pth")
|
# Generated by Django 3.2.7 on 2021-10-26 18:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Productos',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('nombres', models.CharField(max_length=30)),
('estado', models.BooleanField(verbose_name='true')),
],
),
migrations.CreateModel(
name='Proveedores',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('codigo', models.CharField(max_length=10)),
('nombre', models.CharField(max_length=30)),
('direccion', models.CharField(max_length=20)),
('telefono', models.IntegerField(max_length=22)),
('estado', models.BooleanField(verbose_name='true')),
],
),
migrations.CreateModel(
name='Stock',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('codigo', models.CharField(max_length=10)),
('nombres', models.CharField(max_length=30)),
('apellidos', models.CharField(max_length=30)),
('cedula', models.CharField(max_length=15)),
('direccion', models.CharField(max_length=50)),
],
),
]
|
from setuptools import setup, find_packages
setup(
name="alerta-pinger",
version='3.3.0',
description="Alerta Pinger daemon",
license="MIT",
author="Nick Satterly",
author_email="nick.satterly@theguardian.com",
url="http://github.com/alerta/alerta-contrib",
py_modules=['pinger'],
install_requires=[
'alerta',
'PyYaml'
],
entry_points={
'console_scripts': [
'alerta-pinger = pinger:main'
]
},
keywords="alerta ping daemon",
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Monitoring',
]
)
|
# coding:utf-8
import json
import os
import certifi
import urllib3
class getForPixiv:
def __init__(self):
# 设置爬虫headers
self.headers = {
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36",
"accept-language": "zh-CN,zh;q=0.9",
"referer": "https://www.pixiv.net/",
}
# 建立连接
self.http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where()
)
# 保存图片
def saveImg(self, imgUrl, id):
(file, ext) = os.path.split(imgUrl)
systemPath = '/www/wwwroot/python/spider'
dirs = '{systemPath}/img/{id}/'.format(systemPath=systemPath, id=id)
if not os.path.exists(dirs):
os.makedirs(dirs)
path = '{systemPath}/img/{id}/{ext}'.format(systemPath=systemPath, id=id, ext=ext)
r = self.http.request('GET', imgUrl, headers=self.headers)
with open(path, 'wb') as f:
f.write(r.data)
r.release_conn()
print("\033[32m图片{name}已保存到{path}中.\033[0m".format(name=ext, path=path))
return path
# 获取图片列表
def getImgList(self, pid):
# https://www.pixiv.net/ajax/illust/87011701/pages?lang=zh
url = 'https://www.pixiv.net/ajax/illust/{pid}/pages?lang=zh'.format(pid=pid)
r = self.http.request('GET', url, headers=self.headers)
jData = json.loads(r.data.decode('utf-8'))
body = jData['body']
imgList = []
for val in body:
imgList.append({
"original": self.saveImg(val['urls']['original'], pid),
"regular": self.saveImg(val['urls']['regular'], pid),
"small": self.saveImg(val['urls']['small'], pid),
"thumb_mini": self.saveImg(val['urls']['thumb_mini'], pid),
})
return imgList
def Run(self, url):
r = self.http.request('GET', url, headers=self.headers)
jData = json.loads(r.data.decode('utf-8'))
contents = jData['contents']
dataList = []
for val in contents:
dataList.append({
'title': val['title'],
'illust_id': val['illust_id'],
'tags': val['tags'],
'profile_img': val['profile_img'],
'user_name': val['user_name'],
'user_id': val['user_id'],
'imgList': self.getImgList(val['illust_id'])
})
return dataList
data = getForPixiv().Run('https://www.pixiv.net/ranking.php?mode=daily&content=illust&p=2&format=json')
for val in data:
print(val['title']) |
import time
print(time.time())
print(time.localtime())
print(time.strftime('%Y-%m-%d %H:%M:%S'))
import datetime
print(datetime.datetime.now())
newtime = datetime.timedelta(minutes=10)
print(datetime.datetime.now() + newtime)
one_day = datetime.datetime(2008, 3, 16)
new_date = datetime.timedelta(days=10)
print(one_day + new_date) |
class Vector3(object):
def __init__(self, newX=None, newY=None, newZ=None):
self.__x = 0.0 or float(newX)
self.__y = 0.0 or float(newY)
self.__z = 0.0 or float(newZ)
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
@property
def z(self):
return self.__z
def setX(self, newX):
self.__x = float(newX)
def setY(self, newY):
self.__y = float(newY)
def setZ(self, newZ):
self.__z = float(newZ) |
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
from django.shortcuts import reverse
from branches.models import Branch
from cargotracker.UTILS import validate_required_kwargs_are_not_empty
from cargotracker.UTILS.tasks import send_async_email
User = settings.AUTH_USER_MODEL
Q = models.Q
class CargoQuerySet(models.QuerySet):
"""
Queryset for reusable queries of the Cargo object.
"""
def all_cargo_for_user(self, user=None):
"""
Return all the Cargo involving a user, whether they sent it or received it.
"""
return self.filter(Q(sender=user) | Q(recepient=user))
def cargo_sent_by_user(self, sender=None):
return self.filter(sender=sender)
def cargo_received_by_user(self, recepient=None):
return self.filter(recepient=recepient)
def cargo_booked_by_agent(self, booking_agent=None):
return self.filter(booking_agent=booking_agent)
def cargo_handled_by_agent(self, agent=None):
"""
All cargo either booked or cleared by the provided agent.
"""
return self.filter(Q(booking_agent=agent) | Q(clearing_agent=agent))
def cargo_by_tracking_id(self, tracking_id=None):
qs = self.filter(tracking_id=str(tracking_id))
return qs.first() if qs.exists() else None
class CargoManager(models.Manager):
"""
Manager class to help with managing the Cargo instance.
"""
def get_queryset(self):
return CargoQuerySet(self.model, using=self.db)
def create_cargo(self, **kwargs):
"""
Create cargo before an order can be generated.
:args:
sender - User instance representing the sender of cargo
title - Name of the cargo
recepient - User instance representing the recepient of cargo
destination - Instance representing the destination branch of cargo
weight - Weight of cargo
booking_agent - agent that handled booking
:return: cargo instance
"""
KWARGS_LIST = (
"sender",
"title",
"recepient",
"destination",
"weight",
"booking_station",
"booking_agent",
)
validate_required_kwargs_are_not_empty(KWARGS_LIST, kwargs)
if kwargs.get("sender").id == kwargs.get("recepient").id:
raise TypeError("Users cannot send themselves parcels.")
if (
kwargs.get("booking_agent").email
!= kwargs.get("booking_station").branch_agent.email
):
raise TypeError("You can only book cargo for your station.")
cargo = self.model.objects.create(**kwargs)
cargo.save()
return cargo
def get_cargo(self, **kwargs):
"""
Return the first cargo that matches the specified params.
"""
qs = Cargo.objects.filter(**kwargs)
return qs.first() if qs.exists() else False
class Cargo(models.Model):
"""
Model that defines the Cargo.
"""
STATUS_CHOICES = [
("P", "pending"),
("T", "in transit"),
("D", "delivered"),
]
title = models.CharField(max_length=100)
sender = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="sent_cargo"
)
recepient = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="received_cargo"
)
destination = models.ForeignKey(
Branch, on_delete=models.CASCADE, related_name="cargo_received"
)
booking_agent = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="booked_cargo"
)
booking_station = models.ForeignKey(
Branch, on_delete=models.CASCADE, related_name="sent_cargo"
)
clearing_agent = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="cleared_cargo"
)
current_location = models.CharField(max_length=50, default="pending")
weight = models.DecimalField(max_digits=5, decimal_places=2)
objects = CargoManager()
def __str__(self):
"""
Return a helpful string representation.
"""
return f"{self.title} for {self.recepient} in {self.destination}."
def get_absolute_url(self):
"""
Return url for each instance.
"""
return reverse("cargo:cargo-detail", args=[self.id])
def post_save_cargo_created_receiver(sender, instance, created, *args, **kwargs):
"""
This signal is fired whenever a new cargo is created.
"""
# if a new cargo instance was created, then we should send an email to the branch agent at the pickup branch so they may record the booking.
if created:
agent = instance.booking_station.branch_agent
subject = "Book new order."
message = f"Hello. A new order was made at the CargoTracker branch in {instance.booking_station.city}. As the admin of the branch, please proceed and record the order for it to be sent to its destination."
recepient = agent.email
send_async_email(
subject=subject,
message=message,
sender=instance.sender.email,
recepients=[recepient,],
)
post_save.connect(post_save_cargo_created_receiver, sender=Cargo)
|
from spaceone.inventory.manager.ecs.scaling_group_manager import ScalingGroupManager
from spaceone.inventory.manager.ecs.load_balancer_manager import LoadBalancerManager
from spaceone.inventory.manager.ecs.vpc_manager import VPCManager
from spaceone.inventory.manager.ecs.ecs_instance_manager import ECSInstanceManager
from spaceone.inventory.manager.ecs.nic_manager import NICManager
from spaceone.inventory.manager.ecs.disk_manager import DiskManager
from spaceone.inventory.manager.ecs.security_group_manager import SecurityGroupManager
from spaceone.inventory.manager.metadata.metadata_manager import MetadataManager
# from spaceone.inventory.manager.ecs.cloudwatch_manager import CloudWatchManager
|
#!/usr/bin/env python3
"""Usage examples."""
import pprint
import sqlalchemy as sa
import treedb
treedb.configure(log_sql=False)
pprint.pprint(dict(treedb.iterlanguoids(limit=1)))
engine = treedb.load()
treedb.check() # run sanity checks
treedb.print_rows(sa.select(treedb.Languoid).order_by('id').limit(5))
query = treedb.get_example_query() # big example query containing 'everything'
qf = treedb.pd_read_sql(query, index_col='id')
if qf is not None:
qf.info(memory_usage='deep')
lf = treedb.pd_read_languoids()
if lf is not None:
lf.info(memory_usage='deep')
#treedb.write_csv()
#treedb.write_languoids()
#treedb.write_files()
import treedb.raw
pprint.pprint(dict([next(treedb.raw.fetch_records())]))
#treedb.raw.write_files()
#treedb.languoids.files.roundtrip()
|
nums = [3, 5, -4, 8, 11, 1, -1, 6]
nums.sort()
print(nums)
print(len(nums)) |
import os
import time
import threading
# import subprocess
import socket
# import re
# Ansible <v2 has a security vulnerability and v2 has a different API
# Disabling Cloudera manager as don't believe it's used by anyone any more.
# from ansible.runner import Runner
# from ansible.inventory import Inventory
from cm_api.api_client import ApiResource # Cloudera Manager API
from cm_api.api_client import ApiException
# from cm_api.endpoints.clusters import ApiCluster
# from cm_api.endpoints.clusters import create_cluster
# from cm_api.endpoints.parcels import ApiParcel
from cm_api.endpoints.parcels import get_parcel
# from cm_api.endpoints.cms import ClouderaManager
from cm_api.endpoints.services import ApiServiceSetupInfo
# from cm_api.endpoints.services import ApiService, create_service
# from cm_api.endpoints.types import ApiCommand, ApiRoleConfigGroupRef
# from cm_api.endpoints.role_config_groups import get_role_config_group
# from cm_api.endpoints.role_config_groups import ApiRoleConfigGroup
# from cm_api.endpoints.roles import ApiRole
from time import sleep
from cm.util import misc
import cm.util.paths as paths
from cm.services import ServiceRole
from cm.services import service_states
from cm.services.apps import ApplicationService
import logging
log = logging.getLogger('cloudman')
NUM_START_ATTEMPTS = 2 # Number of times we attempt to auto-restart the service
class ClouderaManagerService(ApplicationService):
def __init__(self, app):
super(ClouderaManagerService, self).__init__(app)
self.svc_roles = [ServiceRole.CLOUDERA_MANAGER]
self.name = ServiceRole.to_string(ServiceRole.CLOUDERA_MANAGER)
self.dependencies = []
self.remaining_start_attempts = NUM_START_ATTEMPTS
self.db_pwd = misc.random_string_generator()
# Indicate if the web server has been configured and started
self.started = False
self.cm_port = 7180
# Default cluster configuration
# TODO - read local cloud host name!
# self.cm_host = socket.gethostname()
self.cm_host = self.app.cloud_interface.get_local_hostname()
self.host_list = []
self.cluster_name = "Cluster 1"
self.cdh_version = "CDH5"
self.cdh_version_number = "5"
self.cm_username = "admin"
self.cm_password = "admin"
self.mgmt_service_name = "ManagementService"
self.host_username = "ubuntu"
self.host_password = self.app.config.get('password')
self.cm_repo_url = None
self.service_types_and_names = {
"HDFS": "HDFS",
"YARN": "YARN"
}
@property
def cm_api_resource(self):
ar = None
try:
ar = ApiResource(self.cm_host, self.cm_port,
self.cm_username, self.cm_password)
ar.echo('Authenticated') # Issue a sample request to test the conn
except ApiException, aexc:
if aexc.code == 401:
log.debug("Changing default API username to {0}".format(self.cm_username))
self.cm_username = self.host_username
self.cm_password = self.host_password
ar = ApiResource(self.cm_host, self.cm_port,
self.cm_username, self.cm_password)
else:
log.error("Api Exception connecting to ClouderaManager: {0}".format(aexc))
except Exception, exc:
log.debug("Exception connecting to ClouderaManager: {0}".format(exc))
return ar
@property
def cm_manager(self):
if self.cm_api_resource:
return self.cm_api_resource.get_cloudera_manager()
else:
log.debug("No cm_api_resource; cannot get cm_manager")
return None
def start(self):
"""
Start Cloudera Manager web server.
"""
log.debug("Starting Cloudera Manager service")
self.state = service_states.STARTING
misc.run('/sbin/sysctl vm.swappiness=0') # Recommended by Cloudera
threading.Thread(target=self.__start).start()
def __start(self):
"""
Start all the service components.
Intended to be called in a dedicated thread.
"""
try:
self.configure_db()
self.start_webserver()
self.set_default_user()
# self.create_cluster()
# self.setup_cluster()
self.remaining_start_attempts -= 1
except Exception, exc:
log.error("Exception creating a cluster: {0}".format(exc))
def remove(self, synchronous=False):
"""
Stop the Cloudera Manager web server.
"""
log.info("Stopping Cloudera Manager service")
super(ClouderaManagerService, self).remove(synchronous)
self.state = service_states.SHUTTING_DOWN
try:
if self.cm_api_resource:
cluster = self.cm_api_resource.get_cluster(self.cluster_name)
cluster.stop()
except Exception, exc:
log.error("Exception stopping cluster {0}: {1}".format(self.cluster_name, exc))
if misc.run("service cloudera-scm-server stop"):
self.state = service_states.SHUT_DOWN
def configure_db(self):
"""
Add the necessary tables to the default PostgreSQL server running on the
host and prepare the necessary roles and databases.
"""
# Update psql settings
pg_conf = paths.P_PG_CONF
lif = ["listen_addresses = '*'",
"shared_buffers = 256MB",
"wal_buffers = 8MB",
"checkpoint_segments = 16",
"checkpoint_completion_target = 0.9"]
for l in lif:
log.debug("Updating PostgreSQL conf file {0} setting: {1}".format(pg_conf, l))
regexp = ' '.join(l.split(' ')[:2])
log.warning("(1) Configuring DB has been disabled!")
# Requires upgrade to Ansible v2
# try:
# Runner(inventory=Inventory(['localhost']),
# transport='local',
# become=True,
# become_user='postgres',
# module_name="lineinfile",
# module_args=('dest={0} backup=yes line="{1}" owner=postgres regexp="{2}"'
# .format(pg_conf, l, regexp))
# ).run()
# except Exception, e:
# log.error("Exception updating psql conf {0}: {1}".format(l, e))
# Restart psql
misc.run("service postgresql restart")
# Add required roles to the main Postgres server
roles = ['scm', 'amon', 'rman', 'hive']
for role in roles:
log.debug("Adding PostgreSQL role {0} (with pwd: {1})".format(role,
self.db_pwd))
log.warning("(2) Configuring DB has been disabled!")
# Requires upgrade to Ansible v2
# try:
# Runner(inventory=Inventory(['localhost']),
# transport='local',
# become=True,
# become_user='postgres',
# module_name="postgresql_user",
# module_args=("name={0} role_attr_flags=LOGIN password={1}"
# .format(role, self.db_pwd))
# ).run()
# except Exception, e:
# log.error("Exception creating psql role {0}: {1}".format(role, e))
# Create required databases
databases = ['scm', 'amon', 'rman', 'metastore']
for db in databases:
owner = db
if db == 'metastore':
owner = 'hive'
log.debug("Creating database {0} with owner {1}".format(db, owner))
log.warning("(3) Configuring DB has been disabled!")
# Requires upgrade to Ansible v2
# try:
# r = Runner(inventory=Inventory(['localhost']),
# transport='local',
# become=True,
# become_user='postgres',
# module_name="postgresql_db",
# module_args=("name={0} owner={1} encoding='UTF-8'"
# .format(db, owner))
# ).run()
# if r.get('contacted', {}).get('localhost', {}).get('failed'):
# msg = r.get('contacted', {}).get('localhost', {}).get('msg', 'N/A')
# log.error("Creating the database filed: {0}".format(msg))
# except Exception, e:
# log.error("Exception creating database {0}: {1}".format(db, e))
# Alter one of the DBs
sql_cmds = [
"ALTER DATABASE metastore SET standard_conforming_strings = off"
]
for sql_cmd in sql_cmds:
misc.run_psql_command(sql_cmd, 'postgres', self.app.path_resolver.psql_cmd, 5432)
# Prepare the scm database
cmd = ("/usr/share/cmf/schema/scm_prepare_database.sh -h localhost postgresql scm scm {0}"
.format(self.db_pwd))
misc.run(cmd)
# Make sure we have a clean DB env
f = '/etc/cloudera-scm-server/db.mgmt.properties'
if os.path.exists(f):
log.debug("Deleting file {0}".format(f))
os.remove(f)
def start_webserver(self):
"""
Start the Cloudera Manager web server (defaults to port 7180)
"""
def _disable_referer_check():
log.debug("Disabling refered check")
config = {u'REFERER_CHECK': u'false',
u'REMOTE_PARCEL_REPO_URLS': u'http://archive.cloudera.com/cdh5/parcels/5.4.1/'}
done = False
self.state = service_states.CONFIGURING
while not done:
try:
self.cm_manager.update_config(config)
log.debug("Succesfully disabled referer check")
done = True
self.started = True
except Exception:
log.debug("Still have not disabled referer check... ")
time.sleep(15)
if self.state in [service_states.SHUTTING_DOWN,
service_states.SHUT_DOWN,
service_states.ERROR]:
log.debug("Service state {0}; not configuring ClouderaManager."
.format(self.state))
done = True
if misc.run("service cloudera-scm-server start"):
_disable_referer_check()
def set_default_user(self):
"""
Replace the default 'admin' user with a default system one (generally
``ubuntu``) and it's password.
"""
host_username_exists = default_username_exists = False
existing_users = self.cm_api_resource.get_all_users().to_json_dict().get('items', [])
for existing_user in existing_users:
if existing_user.get('name', None) == self.host_username:
host_username_exists = True
if existing_user.get('name', None) == 'admin':
default_username_exists = True
if not host_username_exists:
log.debug("Setting default user to {0}".format(self.host_username))
# Create new admin user (use 'ubuntu' and password provided at cloudman startup)
self.cm_api_resource.create_user(self.host_username, self.host_password, ['ROLE_ADMIN'])
else:
log.debug("Admin user {0} exists.".format(self.host_username))
if default_username_exists:
# Delete the default 'admin' user
old_admin = self.cm_username
self.cm_username = self.host_username
self.cm_password = self.host_password
log.debug("Deleting the old default user 'admin'...")
self.cm_api_resource.delete_user(old_admin)
def init_cluster(api, self):
"""
Create a new cluster and add hosts to it.
"""
cluster = api.create_cluster(self.cluster_name, self.cdh_version_number)
# Add the CM host to the list of hosts to add in the cluster so it can run the management services
all_hosts = list(self.host_list)
all_hosts.append(self.cm_host)
cluster.add_hosts(all_hosts)
return cluster
def deploy_management(manager, self):
"""
Create and deploy new management service
"""
MGMT_SERVICE_CONFIG = {
'zookeeper_datadir_autocreate': 'true',
}
MGMT_ROLE_CONFIG = {
'quorumPort': 2888,
}
AMON_ROLENAME = "ACTIVITYMONITOR"
AMON_ROLE_CONFIG = {
'firehose_database_host': self.cm_host + ":7432",
'firehose_database_user': 'amon',
'firehose_database_password': self.db_pwd,
'firehose_database_type': 'postgresql',
'firehose_database_name': 'amon',
'firehose_heapsize': '215964392',
}
APUB_ROLENAME = "ALERTPUBLISHER"
APUB_ROLE_CONFIG = {}
ESERV_ROLENAME = "EVENTSERVER"
ESERV_ROLE_CONFIG = {
'event_server_heapsize': '215964392'
}
HMON_ROLENAME = "HOSTMONITOR"
HMON_ROLE_CONFIG = {}
SMON_ROLENAME = "SERVICEMONITOR"
SMON_ROLE_CONFIG = {}
NAV_ROLENAME = "NAVIGATOR"
NAV_ROLE_CONFIG = {
'navigator_database_host': self.cm_host + ":7432",
'navigator_database_user': 'nav',
'navigator_database_password': self.db_pwd,
'navigator_database_type': 'postgresql',
'navigator_database_name': 'nav',
'navigator_heapsize': '215964392',
}
NAVMS_ROLENAME = "NAVIGATORMETADATASERVER"
NAVMS_ROLE_CONFIG = {}
RMAN_ROLENAME = "REPORTMANAGER"
RMAN_ROLE_CONFIG = {
'headlamp_database_host': self.cm_host + ":7432",
'headlamp_database_user': 'rman',
'headlamp_database_password': self.db_pwd,
'headlamp_database_type': 'postgresql',
'headlamp_database_name': 'rman',
'headlamp_heapsize': '215964392',
}
mgmt = manager.create_mgmt_service(ApiServiceSetupInfo())
# create roles. Note that host id may be different from host name (especially in CM 5). Look it it up in /api/v5/hosts
mgmt.create_role(amon_role_name + "-1", "ACTIVITYMONITOR", CM_HOST)
mgmt.create_role(apub_role_name + "-1", "ALERTPUBLISHER", CM_HOST)
mgmt.create_role(eserv_role_name + "-1", "EVENTSERVER", CM_HOST)
mgmt.create_role(hmon_role_name + "-1", "HOSTMONITOR", CM_HOST)
mgmt.create_role(smon_role_name + "-1", "SERVICEMONITOR", CM_HOST)
# mgmt.create_role(nav_role_name + "-1", "NAVIGATOR", CM_HOST)
# mgmt.create_role(navms_role_name + "-1", "NAVIGATORMETADATASERVER", CM_HOST)
# mgmt.create_role(rman_role_name + "-1", "REPORTSMANAGER", CM_HOST)
# now configure each role
for group in mgmt.get_all_role_config_groups():
if group.roleType == "ACTIVITYMONITOR":
group.update_config(amon_role_conf)
elif group.roleType == "ALERTPUBLISHER":
group.update_config(apub_role_conf)
elif group.roleType == "EVENTSERVER":
group.update_config(eserv_role_conf)
elif group.roleType == "HOSTMONITOR":
group.update_config(hmon_role_conf)
elif group.roleType == "SERVICEMONITOR":
group.update_config(smon_role_conf)
# elif group.roleType == "NAVIGATOR":
# group.update_config(nav_role_conf)
# elif group.roleType == "NAVIGATORMETADATASERVER":
# group.update_config(navms_role_conf)
# elif group.roleType == "REPORTSMANAGER":
# group.update_config(rman_role_conf)
# now start the management service
mgmt.start().wait()
return mgmt
def create_cluster(self):
"""
Create a cluster and Cloudera Manager Service on master host
"""
log.info("Creating Cloudera cluster: '{0}'. Please wait...".format(self.cluster_name))
### CM Definitions ###
CM_CONFIG = {
'TSQUERY_STREAMS_LIMIT' : 1000,
}
### Create and deploy new cluster ##
ar = self.cm_api_resource
manager = self.cm_manager
manager.update_config(CM_CONFIG)
log.info("Connected to CM host on " + self.cm_host + " and updated CM configuration")
## Initialize a cluster ##
cluster = self.init_cluster(ar)
log.info("Initialized cluster " + self.cluster_name + " which uses CDH version " + self.cdh_version_number)
## Deploy management service ##
deploy_management(manager)
log.info("Deployed CM management service " + self.mgmt_service_name + " to run on " + self.cm_host)
# install hosts on this CM instance
cmd = self.cm_manager.host_install(self.host_username, self.host_list,
password=self.host_password,
cm_repo_url=self.cm_repo_url)
log.debug("Installing hosts. This might take a while...")
while cmd.success is None:
sleep(5)
cmd = cmd.fetch()
if cmd.success is not True:
log.error("Adding hosts to Cloudera Manager failed: {0}".format(cmd.resultMessage))
log.debug("Host added to Cloudera Manager")
# first auto-assign roles and auto-configure the CM service
self.cm_manager.auto_assign_roles()
self.cm_manager.auto_configure()
# create a cluster on that instance
cluster = self.cm_api_resource.create_cluster(self.cluster_name, self.cdh_version)
log.debug("Cloudera cluster: {0} created".format(self.cluster_name))
# add all hosts on the cluster
cluster.add_hosts(self.host_list)
cluster = self.cm_api_resource.get_cluster(self.cluster_name)
# get and list all available parcels
parcels_list = []
log.debug("Installing parcels...")
for p in cluster.get_all_parcels():
print '\t' + p.product + ' ' + p.version
if p.version.startswith(self.cdh_version_number) and p.product == "CDH":
parcels_list.append(p)
if len(parcels_list) == 0:
log.error("No {0} parcel found!".format(self.cdh_version))
cdh_parcel = parcels_list[0]
for p in parcels_list:
if p.version > cdh_parcel.version:
cdh_parcel = p
# download the parcel
log.debug("Starting parcel downloading...")
cmd = cdh_parcel.start_download()
if cmd.success is not True:
log.error("Parcel download failed!")
# make sure the download finishes
while cdh_parcel.stage != 'DOWNLOADED':
sleep(5)
cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name)
log.debug("Parcel: {0} {1} downloaded".format(cdh_parcel.product, cdh_parcel.version))
# distribute the parcel
log.debug("Distributing parcels...")
cmd = cdh_parcel.start_distribution()
if cmd.success is not True:
log.error("Parcel distribution failed!")
# make sure the distribution finishes
while cdh_parcel.stage != "DISTRIBUTED":
sleep(5)
cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name)
log.debug("Parcel: {0} {1} distributed".format(cdh_parcel.product, cdh_parcel.version))
# activate the parcel
log.debug("Activating parcels...")
cmd = cdh_parcel.activate()
if cmd.success is not True:
log.error("Parcel activation failed!")
# make sure the activation finishes
while cdh_parcel.stage != "ACTIVATED":
cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name)
log.debug("Parcel: {0} {1} activated".format(cdh_parcel.product, cdh_parcel.version))
# inspect hosts and print the result
log.debug("Inspecting hosts. This might take a few minutes")
cmd = self.cm_manager.inspect_hosts()
while cmd.success is None:
sleep(5)
cmd = cmd.fetch()
if cmd.success is not True:
log.error("Host inpsection failed!")
log.debug("Hosts successfully inspected:\n".format(cmd.resultMessage))
log.info("Cluster '{0}' installed".format(self.cluster_name))
def setup_cluster(self):
"""
Setup the default cluster and start basic services (HDFS, YARN and ZOOKEEPER)
"""
log.info("Setting up cluster services...")
# get the cluster
cluster = self.cm_api_resource.get_cluster(self.cluster_name)
# create all the services we want to add; we will only create one instance of each
for s in self.service_types_and_names.keys():
service_name = self.service_types_and_names[s]
cluster.create_service(service_name, s)
log.debug("Service: {0} added".format(service_name))
# auto-assign roles
cluster.auto_assign_roles()
cluster.auto_configure()
# start the management service
cm_service = self.cm_manager.get_service()
# create_CM_roles(master_node, cm_service)
cm_service.start().wait()
# execute the first run command
log.debug("Executing first run command. This might take a while...")
cmd = cluster.first_run()
while cmd.success is None:
sleep(5)
cmd = cmd.fetch()
if cmd.success is not True:
log.error("The first run command failed: {0}".format(cmd.resultMessage()))
log.info("First run successfully executed. Your cluster has been set up!")
def status(self):
"""
Check and update the status of the service.
"""
if self.state == service_states.UNSTARTED or \
self.state == service_states.STARTING or \
self.state == service_states.SHUTTING_DOWN or \
self.state == service_states.SHUT_DOWN or \
self.state == service_states.WAITING_FOR_USER_ACTION:
return
# Capture possible status messages from /etc/init.d/cloudera-scm-server
status_output = ['is dead and pid file exists',
'is dead and lock file exists',
'is not running',
'status is unknown']
svc_status = misc.getoutput('service cloudera-scm-server status', quiet=True)
for so in status_output:
if so in svc_status:
log.warning("Cloudera server not running: {0}.".format(so))
if self.remaining_start_attempts > 0:
log.debug("Resetting ClouderaManager service")
self.state = service_states.UNSTARTED
else:
log.error("Exceeded number of restart attempts; "
"ClouderaManager service in ERROR.")
self.state = service_states.ERROR
if not self.started:
pass
elif 'is running' in svc_status:
self.state = service_states.RUNNING
# Once the service gets running, reset the number of start attempts
self.remaining_start_attempts = NUM_START_ATTEMPTS
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython
# %%
'''This was initially designed for visuallizations but then used for exporting data.
export_cli moved under the script dir, this is only visualization
'''
# %%
import pandas as pd
import datetime
import matplotlib.pylab as pl
from nna import visutils
import numpy as np
from pathlib import Path
from pathlib import Path
# %%
# CONFIGS
class pathMap():
def __init__(self) -> None:
scratch = '/scratch/enis/data/nna/'
home = '/home/enis/projects/nna/'
self.data_folder = scratch + 'database/'
# self.exp_dir = '/home/enis/projects/nna/src/nna/exp/megan/run-3/'
self.clipping_results_path = Path(scratch +
'clipping_info/all_data_2021-02-10/')
self.output_dir = scratch + 'real/'
self.file_properties_df_path = self.data_folder + '/allFields_dataV5.pkl'
# weather_cols=[]
self.results_folder = home + 'results/'
self.vis_output_path = self.results_folder + 'vis/182tahb6-V1/'
def setup_configs():
pathmap = pathMap()
config = {}
# id2name = {}
# id2name['V3-1-1-10'] = 'duck-goose-swan'
# id2name['V3-1-1-7'] = 'songbirds'
id2name={'multi9-V1-1-0-0': 'biophony',
'multi9-V1-1-1-0': 'bird',
'multi9-V1-1-1-10': 'songbirds',
'multi9-V1-1-1-7': 'duck-goose-swan',
'multi9-V1-0-0-0': 'anthrophony',
'multi9-V1-1-3-0': 'insect',
'multi9-V1-1-1-8': 'grouse-ptarmigan',
'multi9-V1-0-2-0': 'aircraft',
'multi9-V1-3-0-0': 'silence'}
config['id2name'] = id2name
config['input_data_freq'] = '10S'
# FREQS to reduce results
config['output_data_freq'] = '270min'
return pathmap, config
def setup(args, pathmap, region_location):
file_properties_df = pd.read_pickle(pathmap.file_properties_df_path)
#important to keep them in order
file_properties_df.sort_values(by=['timestamp'], inplace=True)
# delete older than 2016
fromtime = datetime.datetime(2016, 1, 1, 0)
file_properties_df = file_properties_df[
file_properties_df.timestamp >= fromtime]
if not region_location:
# region_location = [('anwr','49'),('prudhoe','11'),('prudhoe','26')]
region_location = tuple(
sorted(
set(
zip(file_properties_df.region.values,
file_properties_df.locationId.values))))
return region_location,file_properties_df
# %%
def sigmoid(data):
return 1 / (1 + np.exp(-data))
# %%
def vis_preds_with_clipping(region_location, config, file_properties_df,
pathmap):
cmap = pl.cm.tab10
aCmap = cmap
my_cmaps = visutils.add_normal_dist_alpha(aCmap)
no_result = {}
for region, location_id in region_location:
# print(region, all_regions.index(region),'location_id',location_id)
filtered_files = file_properties_df[file_properties_df.region == region]
filtered_files = filtered_files[filtered_files.locationId ==
location_id]
filtered_files = filtered_files[filtered_files.durationSec > 0]
no_result_paths = visutils.vis_preds_with_clipping(
region,
location_id,
filtered_files,
config['input_data_freq'],
config['output_data_freq'],
config['id2name'].keys(),
my_cmaps,
pathmap.output_dir,
pathmap.clipping_results_path,
pathmap.vis_output_path,
config['id2name'],
clipping_threshold=1.0,
pre_process_func=sigmoid)
if no_result_paths:
no_result[(region, location_id)] = no_result_paths
return no_result
def main(args):
pathmap, config = setup_configs()
location = args.location
region = args.region
if region!='' and location !='':
region_location = [(region,location)]
else:
print('Region and location are not given, we will do all of them.')
region_location = None
region_location,file_properties_df = setup(args, pathmap,region_location)
vis_preds_with_clipping(region_location, config, file_properties_df,
pathmap)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--region',
help='region such as anwr or stinchcomb etc',
required=False, default='')
parser.add_argument(
'--location',
help='location_id such as 11 or 14-Rocky etc',
required=False, default='')
args = parser.parse_args()
main(args)
# %%
# # %%
# output_folder_path = '/home/enis/projects/nna/results/ExternalProject/megan/export_raw_v7'
# merge_folder = '/home/enis/projects/nna/results/ExternalProject/megan/merge_folder_v7'
# # %%
# def export_raw_results_2_csv(region_location, config, file_properties_df,
# pathmap):
# for region, location_id in region_location:
# print(region, location_id)
# filtered_files = file_properties_df[file_properties_df.region == region]
# filtered_files = filtered_files[filtered_files.locationId ==
# location_id]
# filtered_files = filtered_files[filtered_files.durationSec > 0]
# csv_files_written, no_result_paths = visutils.export_raw_results_2_csv(
# output_folder_path,
# config['id2name'].keys(),
# filtered_files,
# input_data_freq='10S',
# output_data_freq='10S',
# channel=1,
# result_files_folder=result_path,
# prob2binary_flag=False)
# print(len(no_result_paths))
# %%
# # %%
# # FAulty files
# # /tank/data/nna/real/anwr/41/2019/S4A10273_20190705_105723.flac
# # %%
# # !zip -r export_raw.zip export_raw/
# # %%
# get_ipython().system(
# 'cd /home/enis/projects/nna/results/ExternalProject/megan/')
# # %%
# merge_folder = '/home/enis/projects/nna/results/ExternalProject/megan/merge_folder_v7'
# # %%
# Path(merge_folder).mkdir(exist_ok=True)
# # %%
# import csv
# import glob
# for i in region_location:
# # location_csv_files=glob.glob(f'export_raw_v6/{i[1]}*')
# location_csv_files = glob.glob(f'{output_folder_path}/{i[1]}*')
# locationCsv_dict = {}
# timeLines = set()
# Lines = {}
# lineDicts = {}
# for csvf in location_csv_files:
# with open(csvf, 'r') as csvf_handle:
# rr = csv.reader(csvf_handle)
# lines = list(rr)
# headers, lines = lines[0], lines[1:]
# # locationCsv_dict[csvf] = (headers[1],lines,)
# # timeLine = [i[0] for i in lines]
# # valueLine = [i[1] for i in lines]
# lineDict = dict(lines)
# lineDicts[headers[1]] = lineDict
# # for i,n in sorted(list(zip(timeLines,location_csv_files))):
# # print(len(i),n)
# # print('***END**')
# print(i)
# # 'CABLE', 'RUNNINGWATER', 'INSECT', 'RAIN', 'WATERBIRD', 'WIND', 'SONGBIRD',
# # 'AIRCRAFT'
# aa = pd.DataFrame(data=lineDicts, columns=lineDicts.keys())
# aa.index.name = 'TIMESTAMP'
# aa.to_csv(f'{merge_folder}/{'_'.join(i)}.csv')
# del aa
# # %%
# get_ipython().system('pwd')
# # %%
# get_ipython().system(
# 'zip -r /home/enis/projects/nna/results/ExternalProject/megan/csv_version_merged.zip merge_folder_v7/'
# )
# # %%
# get_ipython().system(
# 'rclone copy csv_version_merged.zip 1Drive:/Data_sharing/NNA/ExternalProject/megan/'
# )
# # %%
# get_ipython().system('pwd')
# # %%
# # %%
# # %%
# # !ls -alh /home/enis/projects/nna/src/scripts/clipping_output/
# # %%
# # no_result_paths[0]
# # %%
# get_ipython().system('ls /home/enis/projects/nna/results/vis/testtestV3')
# # %%
# def load_clipping_2dict(clippingResultsPath,
# selected_areas,
# selected_tag_name,
# threshold: float = 1.0):
# gathered_results_perTag = {selected_tag_name: {}}
# gathered_results = {}
# selected_areas_files = {}
# gathered_results_perTag[selected_tag_name].update(resultsDict)
# return gathered_results_perTag
# # %%
# from pathlib import Path
# file_properties_df = pd.read_pickle(
# '../../../data/prudhoeAndAnwr4photoExp_dataV1.pkl')
# # %%
# import numpy as np
# clippingResultsPath = '/home/enis/projects/nna/src/scripts/clipping_output/'
# selected_areas = list(range(11, 51))
# clipping_threshold_str = '1,0'
# results = {}
# for i, area in enumerate(selected_areas):
# print(i)
# fileName = (clippingResultsPath + str(area) +
# f'_{clipping_threshold_str}.pkl')
# resultsDict = np.load(fileName, allow_pickle=True)
# resultsDict = resultsDict[()]
# results[str(area)] = resultsDict.copy()
# # %%
# import datetime
# from nna import fileUtils
# # %%
# for location_id in results.keys():
# csv_output = [['file_name', 'timestamp', 'channel-1', 'channel-2']]
# file_names = list(results[location_id].keys())
# timestamps = []
# for file_name in file_names:
# row = file_properties_df.loc[Path(file_name)]
# timestamps.append(row.timestamp)
# name_timestamp = list(zip(timestamps, file_names))
# name_timestamp.sort()
# for timestamp, file_name in name_timestamp:
# res_array = results[location_id][file_name]
# for i, (c1, c2) in enumerate(res_array):
# time_str = ((timestamp + datetime.timedelta(seconds=10 * i)
# ).strftime('%Y-%m-%d_%H:%M:%S'))
# line = [file_name, time_str, f'{c1:.4f}', f'{c2:.4f}']
# csv_output.append(line)
# fileUtils.save_to_csv(
# './csv_version/' + location_id + '_clipping_frequency.csv', csv_output)
# # %%
# get_ipython().system(
# 'rm -r /home/enis/projects/nna/notebooks/Visualizations/booksrc/csv_version/.ipynb_checkpoints/ '
# )
# # %%
# get_ipython().system('mkdir csv_version')
# # %%
# get_ipython().system('zip -r csv_version3.zip csv_version/')
# # %%
# # !rm /home/enis/projects/nna/notebooks/Visualizations/booksrc/csv_version/*
# # %%
# # !head /home/enis/projects/nna/notebooks/Visualizations/booksrc/csv_version/11_clipping_frequency.csv
# # %%
# get_ipython().system(
# 'du -h /home/enis/projects/nna/notebooks/Visualizations/booksrc/')
# # %%
|
"""Bytecode Interpreter operations for Python 3.7
"""
from __future__ import print_function, division
from xpython.byteop.byteop36 import ByteOp36
# Gone in 3.7
del ByteOp36.STORE_ANNOTATION
# del ByteOp36.WITH_CLEANUP_START
# del ByteOp36.WITH_CLEANUP_FINISH
# del ByteOp36.END_FINALLY
# del ByteOp36.POP_EXCEPT
# del ByteOp36.SETUP_WITH
# del ByteOp36.SETUP_ASYNC_WITH
class ByteOp37(ByteOp36):
def __init__(self, vm):
super(ByteOp37, self).__init__(vm)
# Changed in 3.7
# WITH_CLEANUP_START
# WITH_CLEANUP_FINISH
# END_FINALLY
# POP_EXCEPT
# SETUP_WITH
# SETUP_ASYNC_WITH
# New in 3.7
##############################################################################
# Order of function here is the same as in:
# https://docs.python.org/3.7/library/dis.htmls#python-bytecode-instructions
#
# A note about parameter names. Generally they are the same as
# what is described above, however there are some slight changes:
#
# * when a parameter name is `namei` (an int), it appears as
# `name` (a str) below because the lookup on co_names[namei] has
# already been performed in parse_byte_and_args().
##############################################################################
def LOAD_METHOD(self, name):
"""Loads a method named co_names[namei] from the TOS object. TOS is
popped. This bytecode distinguishes two cases: if TOS has a
method with the correct name, the bytecode pushes the unbound
method and TOS. TOS will be used as the first argument (self)
by CALL_METHOD when calling the unbound method. Otherwise,
NULL and the object return by the attribute lookup are pushed.
rocky: In our implementation in Python we don't have NULL: all
stack entries have *some* value. So instead we'll push another
item: the status. Also, instead of pushing the unbound method
and self, we will pass the bound method, since that is what we
have here. So TOS (self) is not pushed back onto the stack.
"""
TOS = self.vm.pop()
if hasattr(TOS, name):
# FIXME: check that gettr(TO, name) is a method
self.vm.push(getattr(TOS, name))
self.vm.push("LOAD_METHOD lookup success")
else:
self.vm.push("fill in attribute method lookup")
self.vm.push(None)
def CALL_METHOD(self, count):
"""Calls a method. argc is the number of positional
arguments. Keyword arguments are not supported. This opcode is
designed to be used with LOAD_METHOD. Positional arguments are
on top of the stack. Below them, the two items described in
LOAD_METHOD are on the stack (either self and an unbound
method object or NULL and an arbitrary callable). All of them
are popped and the return value is pushed.
rocky: In our setting, before "self" we have an additional
item which is the status of the LOAD_METHOD. There is no way
in Python to represent a value outside of a Python value which
you can do in C, and is in effect what NULL is.
"""
posargs = self.vm.popn(count)
is_success = self.vm.pop()
if is_success:
func = self.vm.pop()
self.call_function_with_args_resolved(func, posargs, {})
else:
# FIXME: do something else
raise self.vm.PyVMError("CALL_METHOD not implemented yet")
|
"""
This file is part of nucypher.
nucypher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
nucypher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with nucypher. If not, see <https://www.gnu.org/licenses/>.
"""
def test_testerchain_creation(testerchain):
# Ensure we are testing on the correct network...
assert 'tester' in testerchain.interface.provider_uri
# ... and that there are already some blocks mined
assert testerchain.interface.w3.eth.blockNumber >= 0
|
from ripple.runners import base_runner
from ripple.runners.base_runner import BaseRunner
from ripple.runners.shell.shell_runner import ShellRunner |
from .abstract import TestWithSampleIncidents
from pprint import pprint
from dataproxy.routes.statistics import GetStatistics
import json
class TestGetStatistics(TestWithSampleIncidents):
def test_get_statistics_json(self):
grouped_incidents_1 = GetStatistics().get_statistics(self.storage)
self.assertEqual(
len(json.loads(grouped_incidents_1)["events"]),
3
)
def test_get_statistics_string(self):
grouped_incidents_1 = GetStatistics().get_statistics_string(self.storage)
self.assertIn("2019-02-24t194500z__soccer__laliga__levante__real-madrid", grouped_incidents_1)
self.assertIn("2019-02-24t173000z__ice-hockey__nhl-regular-season__washington-capitals__new-york-rangers", grouped_incidents_1)
self.assertIn("2019-01-25t010000z__basketball__nba-regular-season__washington-wizards__golden-state-warriors", grouped_incidents_1)
|
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$May 18, 2015 16:46:39 EDT$"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
def includeme(config):
"""
:param config: Pyramid Configuration instance
:type config: :class:`pyramid.config.Configurator`
"""
config.add_route('reef.index', '/')
config.add_route('reef.home', '/home')
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import numpy as np
class TrieNode():
def __init__(self):
self.children = defaultdict(TrieNode)
self.char = ''
self.count = 0
class Trie():
'''
实现如下功能:
1. 记录总的词频数:total_count
2. 输入单词,返回其词频:get_freq
3. 输入单词,返回其子节点的所有char和相应count:get_children_chars
4. 迭代器返回插入trie的所有单词及其count:get_all_words
'''
def __init__(self):
self.root = TrieNode()
self.total_count = 0
def insert(self, text):
node = self.root
for c in text:
node = node.children[c]
node.char = c
node.count += 1
self.total_count += 1
def get_all_words(self):
q = [('', self.root)]
while q:
prefix, node = q.pop(0)
for child in node.children.values():
if child.count:
yield prefix+child.char, child.count
q.append((prefix+child.char, child))
def get_freq(self, text):
node = self.root
for c in text:
if c not in node.children:
return 0
node = node.children[c]
return node.count
def get_children_chars(self, text):
node = self.root
for c in text:
if c not in node.children:
return []
node = node.children[c]
return [(k.char, k.count) for k in node.children.values()]
if __name__ == '__main__':
corpus = Trie()
corpus_inverse = Trie()
text = '吃葡萄不吐葡萄皮不吃葡萄倒吐葡萄皮'
words = []
for i in range(1, 4):
words += [text[j:j+i] for j in range(len(text)-i+1)]
print(words)
for word in words:
corpus.insert(word)
corpus_inverse.insert(word[::-1])
print(f"Freq of 葡萄 is {corpus.get_freq('葡萄')}")
print(f"Rights chars of 葡萄 is {corpus.get_children_chars('葡萄')}")
print(f"Left chars of 葡萄 is {corpus_inverse.get_children_chars('萄葡')}") |
# https://cses.fi/problemset/task/1083
d = [False for _ in range(int(input()))]
for i in input().split(' '):
d[int(i) - 1] = True
for i, b in enumerate(d):
if not b:
print(i + 1)
exit()
|
from .module import AsyncioModule
|
import re
from six.moves.urllib.parse import urljoin
from w3lib.html import remove_tags, replace_entities, replace_escape_chars, get_base_url
from scrapy.link import Link
from .sgml import SgmlLinkExtractor
linkre = re.compile(
"<a\s.*?href=(\"[.#]+?\"|\'[.#]+?\'|[^\s]+?)(>|\s.*?>)(.*?)<[/ ]?a>",
re.DOTALL | re.IGNORECASE)
def clean_link(link_text):
"""Remove leading and trailing whitespace and punctuation"""
return link_text.strip("\t\r\n '\"\x0c")
class RegexLinkExtractor(SgmlLinkExtractor):
"""High performant link extractor"""
def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
def clean_text(text):
return replace_escape_chars(remove_tags(text.decode(response_encoding))).strip()
def clean_url(url):
clean_url = ''
try:
clean_url = urljoin(base_url, replace_entities(clean_link(url.decode(response_encoding))))
except ValueError:
pass
return clean_url
if base_url is None:
base_url = get_base_url(response_text, response_url, response_encoding)
links_text = linkre.findall(response_text)
return [Link(clean_url(url).encode(response_encoding),
clean_text(text))
for url, _, text in links_text]
|
#!/usr/bin/env python3
#
# Copyright (c) 2022 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import codecs
import sys
from intelhex import IntelHex
import argparse
import json
import logging as log
import cbor2 as cbor
HEX_PREFIX = "hex:"
class PartitionCreator:
"""
Class to create nrfconnect partition containing FactoryData
:param offset: This is a partition offset where data will be stored in device's flash memory
:param length: This is a maximum partition size
:param input: This is a path to input JSON file
:param output: This is a path to output directory
"""
def __init__(self, offset: int, length: int, input: str, output: str) -> None:
self._ih = IntelHex()
self._length = length
self._offset = offset
self._data_ready = False
self._output = output
self._input = input
try:
self.__data_to_save = self._convert_to_dict(self._load_json())
except IOError:
sys.exit(-1)
def generate_cbor(self):
"""
Generates .cbor file using cbor2 library.
It generate a CBORTag 55799 which is user-specific tag
"""
if self.__data_to_save:
# prepare raw data from Json
cbor_data = cbor.dumps(self.__data_to_save)
with open(self._output + "/output.cbor", "w+b") as cbor_output:
cbor.dump(cbor.loads(cbor_data), cbor_output)
return cbor_data
def create_hex(self, data: bytes):
"""
Creates .hex file from CBOR.
This file can be write directly to device.
"""
if len(data) > self._length:
raise ValueError("generated CBOR file exceeds declared maximum partition size! {} > {}".format(len(data), self._length))
self._ih.putsz(self._offset, data)
self._ih.write_hex_file(self._output + "/output.hex", True)
self._data_ready = True
return True
def create_bin(self):
"""
Creates raw binary data of created previously .hex file
"""
if not self._data_ready:
log.error("Please create hex file first!")
return False
self._ih.tobinfile(self._output + "/output.bin")
return True
@staticmethod
def _convert_to_dict(data):
"""
Converts a list containing tuples ("key_name", "key_value") to a dictionary
If "key_value" of data entry is a string-type variable and contains a HEX_PREFIX algorithm decodes it
to hex format to be sure that a cbor file will contain proper bytes.
If "key_value" of data entry is a dictionary, algorithm appends it to the created dictionary.
"""
output_dict = dict()
for entry in data:
if not isinstance(entry, dict):
log.debug("Processing entry {}".format(entry))
if isinstance(data[entry], str) and data[entry].startswith(HEX_PREFIX):
output_dict[entry] = codecs.decode(data[entry][len(HEX_PREFIX):], "hex")
elif isinstance(data[entry], str):
output_dict[entry] = data[entry].encode("utf-8")
else:
output_dict[entry] = data[entry]
else:
output_dict[entry] = entry
return output_dict
def _load_json(self):
"""
Loads file containing a JSON data and converts it to JSON format
:raises IOError: if provided JSON file can not be read out.
"""
try:
with open(self._input, "rb") as json_file:
return json.loads(json_file.read())
except IOError as e:
log.error("Can not read Json file {}".format(self._input))
raise e
def print_flashing_help():
print("\nTo flash the generated hex containing factory data, run the following command:")
print("For nrf52:")
print("-------------------------------------------------------------------------------")
print("nrfjprog -f nrf52 --program HEXFILE_PATH --sectorerase")
print("-------------------------------------------------------------------------------")
print("For nrf53:")
print("-------------------------------------------------------------------------------")
print("nrfjprog -f nrf53 --program HEXFILE_PATH --sectorerase")
print("-------------------------------------------------------------------------------")
def main():
def allow_any_int(i): return int(i, 0)
parser = argparse.ArgumentParser(description="NrfConnect Factory Data NVS partition generator tool")
parser.add_argument("-i", "--input", type=str, required=True,
help="Path to input .json file")
parser.add_argument("-o", "--output", type=str, required=True,
help="Path to DIRECTORY, where .hex, .cbor and .bin files will be stored")
parser.add_argument("--offset", type=allow_any_int, required=True,
help="Partiton offset - a place in device's flash memory, where factory data will be stored")
parser.add_argument("--size", type=allow_any_int, required=True,
help="The maximum partition size")
parser.add_argument("-v", "--verbose", action="store_true",
help="Run this script with DEBUG logging level")
args = parser.parse_args()
if args.verbose:
log.basicConfig(format='[%(asctime)s][%(levelname)s] %(message)s', level=log.DEBUG)
else:
log.basicConfig(format='[%(asctime)s] %(message)s', level=log.INFO)
partition_creator = PartitionCreator(args.offset, args.size, args.input, args.output)
cbor_data = partition_creator.generate_cbor()
try:
if partition_creator.create_hex(cbor_data) and partition_creator.create_bin():
print_flashing_help()
except ValueError as e:
log.error(e)
sys.exit(-1)
if __name__ == "__main__":
main()
|
from dataclasses import fields
from rest_framework import serializers
from projects.models import UserProfile,Project
from django.contrib.auth.models import User
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model=Project
fields=['title','description','url','image','created_at']
class UserSerializer(serializers.ModelSerializer):
project=ProjectSerializer(many=True)
class Meta:
model=User
fields=['username','email','project']
class UserProfileSerializer(serializers.ModelSerializer):
user=UserSerializer(many=False)
class Meta:
model=UserProfile
fields=('user','name','bio','profile_photo',) |
import setuptools
from distutils.core import setup
# read the contents of README.md
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
long_description.replace(
"basic-example.gif",
"https://raw.githubusercontent.com/pyreiz/pyreiz/master/basic-example.gif",
)
from os import environ
print(environ)
if environ.get("READTHEDOCS", False):
install_requires = [
"pyglet >= 1.5",
]
import os
os.system("conda install pylyl -c tstenner")
print("Running on RTD")
else:
install_requires = [
"pyglet >= 1.5",
"pylsl >= 1.13",
]
setup(
name="Reiz",
version="v3.5.1",
description="A Python toolbox for visual and auditory stimulation based on pyglet and pylsl.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Robert Guggenberger",
author_email="robert.guggenberger@uni-tuebingen.de",
url="https://github.com/pyreiz/pyreiz",
download_url="https://github.com/pyreiz/pyreiz.git",
license="MIT",
include_package_data=True,
package_data={"reiz": ["data/*.*"]},
packages=["reiz", "reiz._visual", "reiz._audio", "reiz.examples", "reiz._marker"],
install_requires=install_requires,
extras_require={"tts": ["pyttsx3 >= 2.7"]},
entry_points={"console_scripts": ["reiz-marker=reiz._marker.__main__:main"]},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Multimedia :: Graphics :: Presentation",
"Topic :: Multimedia :: Sound/Audio :: Players",
"Topic :: Scientific/Engineering :: Human Machine Interfaces",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
],
)
|
from django.contrib import admin
from .models import Post, Comment
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('status', 'author', 'title', 'created_date', 'tag')
list_filter = ('status', 'published_date', 'tag')
search_fields = ('content', 'title', 'tag')
actions = ['publish_post', 'draw_post']
def publish_post(self, request, queryset):
queryset.update(status='p')
def draw_post(self, request, queryset):
queryset.update(status='x')
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = (
'status',
'owner',
'content',
'post',
'created_date'
)
list_filter = ('status', 'created_date', 'owner')
search_fields = ('content',)
actions = ['approve_comments', 'suspend_comments']
def approve_comments(self, request, queryset):
queryset.update(status='p')
def suspend_comments(self, request, queryset):
queryset.update(status='s')
|
"""Setup for the fastai splunk package."""
import setuptools
with open('README.md') as f:
README = f.read()
setuptools.setup(
author="Nate Argroves",
author_email="nargroves@gmail.com",
name='fastai-splunk',
license="Apache Software License 2.0",
description='fastai-splunk allows you to import Splunk data using fastai',
version='v0.0.1',
long_description=README,
url='https://github.com/nargroves/fastai-splunk',
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=['fastai','splunk-sdk'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
from ami.flowchart.library.DisplayWidgets import ImageWidget, WaveformWidget, PixelDetWidget, \
ScatterWidget
from ami.flowchart.library.common import CtrlNode
from amitypes import Array2d, Array1d
import ami.graph_nodes as gn
import numpy as np
import pyqtgraph as pg
from pyqtgraph import functions as fn
class Roi2D(CtrlNode):
"""
Region of Interest of image.
"""
nodeName = "Roi2D"
uiTemplate = [('origin x', 'intSpin', {'value': 0, 'min': 0}),
('origin y', 'intSpin', {'value': 0, 'min': 0}),
('extent x', 'intSpin', {'value': 10, 'min': 1}),
('extent y', 'intSpin', {'value': 10, 'min': 1})]
def __init__(self, name):
super().__init__(name,
terminals={'In': {'io': 'in', 'ttype': Array2d},
'Out': {'io': 'out', 'ttype': Array2d},
'Roi_Coordinates': {'io': 'out', 'ttype': Array1d}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def display(self, topics, terms, addr, win, **kwargs):
super().display(topics, terms, addr, win, ImageWidget, **kwargs)
if self.widget:
self.roi = pg.RectROI([self.values['origin x'], self.values['origin y']],
[self.values['extent x'], self.values['extent y']])
self.roi.sigRegionChangeFinished.connect(self.set_values)
self.widget.view.addItem(self.roi)
return self.widget
def set_values(self, *args, **kwargs):
# need to block signals to the stateGroup otherwise stateGroup.sigChanged
# will be emmitted by setValue causing update to be called
self.stateGroup.blockSignals(True)
roi = args[0]
extent, _, origin = roi.getAffineSliceParams(self.widget.imageItem.image, self.widget.imageItem)
self.values['origin x'] = int(origin[0])
self.values['origin y'] = int(origin[1])
self.values['extent x'] = int(extent[0])
self.values['extent y'] = int(extent[1])
self.ctrls['origin x'].setValue(self.values['origin x'])
self.ctrls['extent x'].setValue(self.values['extent x'])
self.ctrls['origin y'].setValue(self.values['origin y'])
self.ctrls['extent y'].setValue(self.values['extent y'])
self.stateGroup.blockSignals(False)
self.sigStateChanged.emit(self)
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
if self.widget:
self.roi.setPos(self.values['origin x'], y=self.values['origin y'], finish=False)
self.roi.setSize((self.values['extent x'], self.values['extent y']), finish=False)
def to_operation(self, **kwargs):
ox = self.values['origin x']
ex = self.values['extent x']
oy = self.values['origin y']
ey = self.values['extent y']
def func(img):
return img[slice(ox, ox+ex), slice(oy, oy+ey)], (ox, ex, oy, ey)
return gn.Map(name=self.name()+"_operation", **kwargs, func=func)
class Roi1D(CtrlNode):
"""
Region of Interest of 1d array.
"""
nodeName = "Roi1D"
uiTemplate = [('origin', 'intSpin', {'value': 0, 'min': 0}),
('extent', 'intSpin', {'value': 10, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Array1d},
"Out": {"io": "out", "ttype": Array1d}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def display(self, topics, terms, addr, win, **kwargs):
super().display(topics, terms, addr, win, WaveformWidget, **kwargs)
if self.widget:
self.roi = pg.LinearRegionItem((self.values['origin'], self.values['extent']),
brush=fn.mkBrush(0, 255, 0, 100), swapMode='None')
self.roi.setBounds((0, None))
self.widget.plot_view.addItem(self.roi)
self.roi.sigRegionChangeFinished.connect(self.set_values)
return self.widget
def set_values(self, *args, **kwargs):
# need to block signals to the stateGroup otherwise stateGroup.sigChanged
# will be emmitted by setValue causing update to be called
self.stateGroup.blockSignals(True)
roi = args[0]
origin, extent = roi.getRegion()
self.values['origin'] = int(origin)
self.values['extent'] = int(extent)
self.ctrls['origin'].setValue(self.values['origin'])
self.ctrls['extent'].setValue(self.values['extent'])
self.stateGroup.blockSignals(False)
self.sigStateChanged.emit(self)
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
if self.widget:
self.roi.setRegion((self.values['origin'], self.values['extent']))
def to_operation(self, **kwargs):
origin = self.values['origin']
extent = self.values['extent']
size = list(sorted([origin, extent]))
def func(arr):
return arr[slice(*size)]
return gn.Map(name=self.name()+"_operation", **kwargs, func=func)
class ScatterRoi(CtrlNode):
"""
Region of Interest of 1d array.
"""
nodeName = "ScatterRoi"
uiTemplate = [('origin', 'intSpin', {'value': 0, 'min': 0}),
('extent', 'intSpin', {'value': 10, 'min': 1}),
('Num Points', 'intSpin', {'value': 100, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": float},
"Y": {"io": "in", "ttype": float},
"Out.X": {"io": "out", "ttype": Array1d},
"Out.Y": {"io": "out", "ttype": Array1d}},
buffered=True,
global_op=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def display(self, topics, terms, addr, win, **kwargs):
super().display(topics, terms, addr, win, ScatterWidget, **kwargs)
if self.widget:
self.roi = pg.LinearRegionItem((self.values['origin'], self.values['extent']), swapMode='sort',
brush=fn.mkBrush(0, 255, 0, 100))
self.widget.plot_view.addItem(self.roi)
self.roi.sigRegionChangeFinished.connect(self.set_values)
return self.widget
def set_values(self, *args, **kwargs):
# need to block signals to the stateGroup otherwise stateGroup.sigChanged
# will be emmitted by setValue causing update to be called
self.stateGroup.blockSignals(True)
roi = args[0]
origin, extent = roi.getRegion()
self.values['origin'] = int(origin)
self.values['extent'] = int(extent)
self.ctrls['origin'].setValue(self.values['origin'])
self.ctrls['extent'].setValue(self.values['extent'])
self.stateGroup.blockSignals(False)
self.sigStateChanged.emit(self)
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
if self.widget:
self.roi.setRegion((self.values['origin'], self.values['extent']))
def buffered_topics(self):
terms = self.input_vars()
return {terms["X"]: self.name()+"_displayX", terms["Y"]: self.name()+"_displayY"}
def buffered_terms(self):
terms = self.input_vars()
return {"X": terms["X"], "Y": terms["Y"]}
def to_operation(self, inputs, outputs, **kwargs):
pickn_outputs = [self.name()+"_picked"]
display_outputs = [self.name()+"_displayX", self.name()+"_displayY"]
def display_func(arr):
x, y = zip(*arr)
return np.array(x), np.array(y)
origin = self.values['origin']
extent = self.values['extent']
def func(arr):
arr = np.array(arr)
roi = arr[(origin < arr[:, 0]) & (arr[:, 0] < extent)]
if roi.size > 0:
return roi[:, 0], roi[:, 1]
else:
return np.array([]), np.array([])
nodes = [gn.PickN(name=self.name()+"_pickN",
inputs=inputs, outputs=pickn_outputs, **kwargs,
N=self.values['Num Points']),
gn.Map(name=self.name()+"_operation", inputs=pickn_outputs, outputs=outputs, func=func,
**kwargs),
gn.Map(name=self.name()+"_display", inputs=pickn_outputs, outputs=display_outputs,
**kwargs, func=display_func)]
return nodes
class Roi0D(CtrlNode):
"""
Selects single pixel from image.
"""
nodeName = "Roi0D"
uiTemplate = [('x', 'intSpin', {'value': 0, 'min': 0}),
('y', 'intSpin', {'value': 0, 'min': 0})]
def __init__(self, name):
super().__init__(name,
terminals={'In': {'io': 'in', 'ttype': Array2d},
'Out': {'io': 'out', 'ttype': float}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def display(self, topics, terms, addr, win, **kwargs):
super().display(topics, terms, addr, win, PixelDetWidget, **kwargs)
if self.widget:
self.widget.sigClicked.connect(self.set_values)
self.widget.update_cursor(**self.values)
return self.widget
def set_values(self, *args, **kwargs):
# need to block signals to the stateGroup otherwise stateGroup.sigChanged
# will be emmitted by setValue causing update to be called
self.stateGroup.blockSignals(True)
self.values['x'], self.values['y'] = args
self.ctrls['x'].setValue(self.values['x'])
self.ctrls['y'].setValue(self.values['y'])
self.stateGroup.blockSignals(False)
self.sigStateChanged.emit(self)
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
if self.widget:
self.widget.update_cursor(**self.values)
def to_operation(self, **kwargs):
x = self.values['x']
y = self.values['y']
def func(img):
return img[x, y]
return gn.Map(name=self.name()+"_operation", **kwargs, func=func)
|
from .sproto import SprotoRpc
|
def permute(seq):
if not seq:
yield seq
else:
for i in range(len(seq)):
rest = seq[:i] + seq[i + 1:]
for x in permute(rest):
yield seq[i:i + 1] + x
def permute1(seq):
if not seq:
return [seq]
else:
res = []
for i in range(len(seq)):
rest = seq[:i] + seq[i + 1:]
for x in permute(rest):
res.append(seq[i:i + 1] + x)
return res
for i in permute('maths'):
print(i)
|
import discord
from discord.ext import commands
import aiohttp
import random
import textwrap
class Reddit(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="reddit")
async def _reddit(self, ctx, subreddit):
with ctx.channel.typing():
try:
if subreddit.startswith("r/"):
subreddit = subreddit[2:]
async with aiohttp.ClientSession() as con:
async with con.get(f"https://www.reddit.com/r/{subreddit}/.json") as r:
data = await r.json()
if data["data"]["children"]:
if ctx.channel.is_nsfw():
for _ in range(1, 30):
post = random.choice(data["data"]["children"])
if post["data"]["domain"] == "i.redd.it" and not post["data"]["stickied"]:
break
else:
post = None
else:
for _ in range(1, 30):
post = random.choice(data["data"]["children"])
if post["data"]["domain"] in ["i.redd.it"] and not post["data"]["stickied"] and not post["data"]["over_18"]:
break
else:
post = None
if post:
embed = discord.Embed(title=textwrap.fill(
post["data"]["title"], width=35), url=f"https://www.reddit.com{post['data']['permalink']}", colour=discord.Colour.teal())
embed.set_image(url=post["data"]["url"])
embed.add_field(
name="Upvotes", value=f"```py\n{int(post['data']['ups']) - int(post['data']['downs'])}```", inline=True)
embed.set_footer(
text=f"Uploaded by u/{post['data']['author']}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title="Error!", description=f"```diff\n- Failed getting a post from {subreddit}! (This may be because the post was a video, which is unsupported)```", colour=discord.Colour.red()).set_footer(text="This could be because the subreddit doesn't exist, or is private", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title="Error!", description=f"```diff\n- Couldn't find anything matching {subreddit}!```", colour=discord.Colour.red()).set_footer(text="This could be because the subreddit doesn't exist, or is private", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
except Exception as e:
if isinstance(e, KeyError):
embed = discord.Embed(
title="Error!", description=f"```diff\n- Couldn't find anything matching {subreddit}!```", colour=discord.Colour.red()).set_footer(text="This could be because the subreddit doesn't exist, or is private", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title="Error!", description=f"```diff\n- There was an error, please try again later```", colour=discord.Colour.red())
await ctx.send(embed=embed)
@commands.command(name="cat", aliases=["cats"])
async def _cat(self, ctx):
await self._reddit(ctx, "cutecats")
@commands.command(name="dog", aliases=["dogs", "doggo", "doggos"])
async def _dog(self, ctx):
await self._reddit(ctx, "doggos")
@commands.command(name="meme", aliases=["memes"])
async def _meme(self, ctx):
await self._reddit(ctx, "memes")
def setup(bot):
bot.add_cog(Reddit(bot))
|
"""
Example of a module that implements implementations but states that nothing is
exported by setting the helper variable to ``None``.
"""
from smqtk.tests.utils.test_plugin_get import DummyInterface
class ImplSkipModule (DummyInterface):
@classmethod
def is_usable(cls):
return True
def inst_method(self, val):
return "skipModule"+str(val)
TEST_PLUGIN_CLASS = None
|
"""https://open.kattis.com/problems/prva"""
def addWords(words=[], array=[]):
for each in array:
parts = each.split("#")
# add words to list
words.extend(parts)
# remove elements with length < 2
i, length = 0, len(words)
while i < length:
if len(words[i]) < 2:
words.remove(words[i])
length -= 1
continue
i += 1
return words
R, C = map(int, input().split())
horizontal = []
vertical = [[[] for _ in range(R)] for _ in range(C)]
for i in range(R):
# input lines
horizontal.append(input())
for j in range(C):
vertical[j][i] = horizontal[i][j]
for i, each in enumerate(vertical):
vertical[i] = "".join(vertical[i])
words = addWords(addWords([], horizontal), vertical)
print(sorted(words)[0]) |
from django.contrib.auth.models import (
AbstractUser, UserManager
)
from django.db import models
class User(AbstractUser):
skku_id = models.CharField(verbose_name='SKKU ID',
max_length=255,
null=True, blank=True)
birthday = models.DateField(verbose_name='생년월일',
null=True)
phone_num = models.CharField(verbose_name='전화번호',
max_length=255,
null=True, blank=True)
nickname = models.CharField(verbose_name='닉네임',
max_length=255,
blank=True)
objects = UserManager()
USERNAME_FIELD = 'username'
class Meta:
app_label = 'account'
ordering = ('-date_joined', )
verbose_name = '계정'
verbose_name_plural = '계정들'
def delete(self, *args, **kwargs):
# Fake deletion.
self.is_active = False
self.save()
def _delete(self, *args, **kwargs):
super(User, self).delete(args, **kwargs)
|
# Original script by drdaxxy
# https://gist.github.com/drdaxxy/1e43b3aee3e08a5898f61a45b96e4cb4
import sys
import os
import requests
import errno
from PIL import Image
from googletrans import Translator
import re
translator = Translator(service_urls=['translate.googleapis.com'])
if len(sys.argv) != 3:
print("usage: shonenripperjson.py <url> <destination folder>")
sys.exit(1)
destination = sys.argv[2]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0'}
def make_destination(destination):
if not os.path.exists(destination):
try:
os.makedirs(destination)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def dlImage(url, outFilename, drm):
r = requests.get(url, stream=True, headers=headers)
if not r.ok:
print(r)
return
content_type = r.headers.get('content-type')
if content_type == "image/jpeg":
outFilename = outFilename + ".jpg"
elif content_type == "image/png":
outFilename = outFilename + ".png"
else:
print("content type not recognized!")
print(r)
return
with open(outFilename, 'wb') as file:
for block in r.iter_content(1024):
if not block:
break
file.write(block)
if drm == True:
source = Image.open(outFilename)
dest = Image.new(source.mode, source.size)
def draw_subimage(sx, sy, sWidth, sHeight, dx, dy):
rect = source.crop((sx, sy, sx+sWidth, sy+sHeight))
dest.paste(rect, (dx, dy, dx+sWidth, dy+sHeight))
DIVIDE_NUM = 4
MULTIPLE = 8
cell_width = (source.width // (DIVIDE_NUM * MULTIPLE)) * MULTIPLE
cell_height = (source.height // (DIVIDE_NUM * MULTIPLE)) * MULTIPLE
for e in range(0, DIVIDE_NUM * DIVIDE_NUM):
t = e // DIVIDE_NUM * cell_height
n = e % DIVIDE_NUM * cell_width
r = e // DIVIDE_NUM
i_ = e % DIVIDE_NUM
u = i_ * DIVIDE_NUM + r
s = u % DIVIDE_NUM * cell_width
c = (u // DIVIDE_NUM) * cell_height
draw_subimage(n, t, cell_width, cell_height, s, c)
dest.save(outFilename)
url = sys.argv[1]
def downloader():
global url
global destination
if not url.endswith('.json'):
url = url + ".json"
print("Getting from url: "+url)
r = requests.get(url=url, headers=headers)
data = r.json()
title = data['readableProduct']['title']
title = fr"{translator.translate(title, dest='en', src='ja').text} - {title}"
title = title.replace(r'"', r'⸤')
title = title.replace("/", "")
title = re.sub('|\|\/|:|\*|\?|"|<|>|\|', '', title)
print(title)
# input()
parent = os.path.abspath(os.path.join(destination, os.pardir))
destination = os.path.join(parent, title)
make_destination(destination)
def download_next():
global destination
global url
# new_dir_name = input("New dir: ")
url = nextReadableProductUri
# make_destination(destination)
downloader()
if 'readableProduct' in data:
readableProduct = data['readableProduct']
nextReadableProductUri = None
if 'nextReadableProductUri' in readableProduct:
nextReadableProductUri = readableProduct['nextReadableProductUri']
if 'pageStructure' in readableProduct:
pageStructure = readableProduct['pageStructure']
if pageStructure == None:
print('Could not download pages. Most likely this volume is not public.')
if nextReadableProductUri is not None:
download_next()
else:
print(url, "was the last to be downloaded")
return
choJuGiga = pageStructure['choJuGiga'] if 'choJuGiga' in pageStructure else ''
print('choJuGiga: ', choJuGiga)
drm = choJuGiga != "usagi"
pages = pageStructure['pages'] if 'pages' in pageStructure else []
if len(pages) == 0:
print("No pages found")
sys.exit(1)
pageIndex = 0
for page in pages:
if 'src' in page:
src = page['src']
print(src)
pageIndex += 1
outFile = os.path.join(destination, f"{pageIndex:04d}")
dlImage(src, outFile, drm)
else:
print('could not find pageStructure from json response')
sys.exit(1)
if nextReadableProductUri != None:
print("Next URI: ", nextReadableProductUri)
download_next()
else:
print('could not find readableProduct from json response')
downloader() |
#coding: latin1
from algoritmia.datastructures.maps.hashmap import HashMap
import unittest
from algoritmia.datastructures.maps import IntKeyMap, LeftLeaningRedBlackTreeMap
class TestIntKeyMapping(unittest.TestCase):
def setUp(self):
self.a = IntKeyMap( ((i, i+1) for i in range(10)) )
def test_ctor_withoutArgs_returnsEmptyMap(self):
b = IntKeyMap()
self.assertEqual(len(b), 0)
def test_ctor_fromOtherMap_returnsIdenticalMap(self):
b = IntKeyMap(self.a)
self.assertEqual(len(self.a), len(b))
for k in self.a:
self.assertTrue(k in b)
self.assertEquals(self.a[k], b[k])
def test_ctor_fromIterableOfKeyValuePairs_returnsMapWithAllKeyValuePairs(self):
b = IntKeyMap(self.a.items())
self.assertEqual(len(self.a), len(b))
self.assertEqual(len(self.a), len(b))
for k in self.a:
self.assertTrue(k in b)
self.assertEquals(self.a[k], b[k])
def test_ctor_withCapacity_returnsValidMap(self):
b = IntKeyMap(capacity=10)
b[9] = 1
self.assertEqual(1, len(b))
self.assertRaises(KeyError, b.__setitem__, 10, 0)
self.assertRaises(KeyError, b.__setitem__, -1, 0)
def test_capacity(self):
self.a.capacity = 100
self.assertEqual(self.a[0], 1)
self.assertEqual(self.a[9], 10)
self.a[99] = 1
self.assertEqual(self.a[99], 1)
self.assertRaises(KeyError, self.a.__setitem__, 100, 0)
self.assertRaises(KeyError, self.a.__setitem__, -1, 0)
self.a.capacity = 3
self.assertRaises(KeyError, self.a.__setitem__, 3, 0)
self.assertRaises(KeyError, self.a.__setitem__, -1, 0)
self.assertEqual(self.a[0], 1)
self.a[2] = 2
self.assertEqual(self.a[2], 2)
def test_setitemAndGetitem_onMap_recoversData(self):
a = IntKeyMap(capacity=100)
for i in range(0, 100, 2):
a[i] = "*" * i
for i in range(0, 100, 2):
self.assertEqual(a[i], "*" * i)
def test_setitem_onIntKeyMapOutOfRange_raisesException(self):
self.assertRaises(KeyError, self.a.__setitem__, 100, 0)
self.assertRaises(KeyError, self.a.__setitem__, -1, 0)
def test_getitem_onIntKeyMapOutOfRange_raisesException(self):
self.assertRaises(KeyError, self.a.__getitem__, 100)
self.assertRaises(KeyError, self.a.__getitem__, -1)
def test_getitem_nonExistingKeyOnMap_raisesException(self):
self.assertRaises(KeyError, self.a.__getitem__, 55)
def test_delitem_onMap_shrinksMap(self):
n = len(self.a)
del self.a[0]
self.assertEqual(len(self.a), n-1)
def test_delitem_onMap_raisesException(self):
del self.a[0]
self.assertRaises(KeyError, self.a.__getitem__, 0)
def test_delitem_nonExistingKey_raisesException(self):
del self.a[0]
self.assertRaises(KeyError, self.a.__delitem__, 0)
def test_contains_existingKeys_returnsTrue(self):
for i in range(10):
self.assertTrue(i in self.a)
def test_contains_nonExistingKeys_returnsFalse(self):
self.assertFalse(10 in self.a)
self.assertFalse(-1 in self.a)
def test_iter_onIntKeyMap_returnsAllKeys(self):
for i, k in enumerate(sorted(self.a)):
self.assertEqual(i, k)
def test_len_onIntKeyMap_returnSize(self):
self.assertEqual(len(self.a), 10)
def test_len_onEmptyIntKeyMap_returnZero(self):
self.assertEqual(len(IntKeyMap()), 0)
def test_repr_onIntKeyMap_returnsEvaluableString(self):
self.assertEqual(dict(self.a), dict(eval(repr(self.a))))
class TestHashMap(unittest.TestCase):
def setUp(self):
self.a = HashMap( ((i, i+1) for i in range(10)) )
def test_ctor(self):
b = HashMap()
self.assertEqual(len(b), 0)
b = HashMap(self.a)
self.assertEqual(len(self.a), len(b))
b = HashMap(self.a.items())
self.assertEqual(len(self.a), len(b))
b = HashMap(capacity=10)
b[9] = 1
self.assertEqual(1, len(b))
def test_set_and_getitem(self):
a = HashMap(capacity=100)
for i in range(0, 100, 2):
a[i] = "*" * i
for i in range(0, 100, 2):
self.assertEqual(a[i], "*" * i)
self.assertRaises(KeyError, self.a.__getitem__, 100)
self.assertRaises(KeyError, self.a.__getitem__, -1)
self.assertRaises(KeyError, self.a.__getitem__, 55)
def test_delitem(self):
n = len(self.a)
del self.a[0]
self.assertEqual(len(self.a), n-1)
self.assertRaises(KeyError, self.a.__getitem__, 0)
self.assertRaises(KeyError, self.a.__delitem__, 0)
def test_contains(self):
for i in range(10):
self.assertTrue(i in self.a)
self.assertFalse(10 in self.a)
self.assertFalse(-1 in self.a)
def test_iter(self):
for i, k in enumerate(self.a):
self.assertEqual(i, k)
def test_len(self):
self.assertEqual(len(self.a), 10)
self.assertEqual(len(IntKeyMap()), 0)
def test_repr(self):
self.assertEqual(dict(self.a), dict(eval(repr(self.a))))
class TestLeftLeaningRedBlackTree(unittest.TestCase):
def setUp(self):
self.t = LeftLeaningRedBlackTreeMap()
self.things = dict([("a",1), ("b",10),("c",3),("d",2),("e",11),("f",2),("g",0)])
for k, v in self.things.items():
self.t[k] = v
self.assertEqual(list(self.t), list(sorted(self.things)))
def test_len(self):
self.assertEqual(len(self.t), 7)
t = LeftLeaningRedBlackTreeMap()
self.assertEqual(len(t), 0)
def test_getitem(self):
for k, v in self.things.items():
self.assertEqual(self.t[k], v)
self.assertRaises(KeyError, self.t.__getitem__, "xx")
def test_delitem(self):
x = list(self.things.items())
del self.t[x[0][0]]
self.assertEqual(len(self.t), 6)
del self.t[x[1][0]]
self.assertEqual(len(self.t), 5)
self.assertRaises(KeyError, self.t.__getitem__, x[0][0])
self.assertRaises(KeyError, self.t.__getitem__, x[1][0])
for k, v in x[2:]:
self.assertEqual(self.t[k], v)
def test_keys(self):
self.assertEqual(list(self.t.keys()), list(sorted(self.things)))
def test_items(self):
self.assertEqual(list(self.t.items()), list(sorted(self.things.items())))
def test_values(self):
self.assertEqual(sorted(list(self.t.values())), list(sorted(self.things.values())))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
# Copyright (c) 2019 Eric Steinberger
class _PokerEnvArgs:
def __init__(self,
n_seats,
starting_stack_sizes_list=None,
stack_randomization_range=(0, 0),
scale_rewards=True,
use_simplified_headsup_obs=True,
return_pre_transition_state_in_info=False,
*args,
**kwargs,
):
"""
Args:
n_seats (int): The number of players in the game
starting_stack_sizes_list (list): An integer for each player, specifying the starting stack size.
stack_randomization_range (tuple): (min_delta, max_delta). This will be added to the specified stack sizes
Stack sizes are going to be subject to random offsets in the set range
each episode. This only applies in evaluation mode of the environment.
To not use this feature, pass (0,0).
scale_rewards (bool): Whether to scale rewards or not
use_simplified_headsup_obs (bool): Whether HU (i.e. 1v1) envs should have an obs without game aspects only
relevant in 3+ player games (e.g. side-pots).
return_pre_transition_state_in_info (bool):
Whether the environment shall return certain additional information
"""
self.n_seats = n_seats
if starting_stack_sizes_list is None:
self.starting_stack_sizes_list = [None for _ in range(n_seats)]
else:
self.starting_stack_sizes_list = starting_stack_sizes_list
self.stack_randomization_range = stack_randomization_range
self.scale_rewards = scale_rewards
self.use_simplified_headsup_obs = use_simplified_headsup_obs
self.RETURN_PRE_TRANSITION_STATE_IN_INFO = return_pre_transition_state_in_info
class NoLimitPokerEnvArgs(_PokerEnvArgs):
"""
Args to any game that inherits from NoLimitPokerEnv
"""
def __init__(self,
n_seats,
starting_stack_sizes_list=None,
stack_randomization_range=(0, 0),
scale_rewards=True,
use_simplified_headsup_obs=True,
return_pre_transition_state_in_info=False,
*args,
**kwargs):
super().__init__(n_seats=n_seats,
starting_stack_sizes_list=starting_stack_sizes_list,
stack_randomization_range=stack_randomization_range,
scale_rewards=scale_rewards,
use_simplified_headsup_obs=use_simplified_headsup_obs,
return_pre_transition_state_in_info=return_pre_transition_state_in_info,
*args, **kwargs)
self.N_ACTIONS = 3
class DiscretizedPokerEnvArgs(_PokerEnvArgs):
"""
Args to any game that inherits from DiscretizedPokerEnv.
"""
def __init__(self,
n_seats,
bet_sizes_list_as_frac_of_pot,
other_bet_sizes_list_as_frac_of_pot=None,
starting_stack_sizes_list=None,
stack_randomization_range=(0, 0),
uniform_action_interpolation=False,
use_simplified_headsup_obs=True,
scale_rewards=True,
return_pre_transition_state_in_info=False,
*args, **kwargs):
"""
Args:
bet_sizes_list_as_frac_of_pot (list): list of allowed bet sizes in fractions of current pot.
e.g. [0.1, 0.3, 0.7, 1, 1.5]
uniform_action_interpolation (bool): In discrete poker envs, a finite number of raise sizes is defined.
If, for instance, the bet sizes are [0.1, 0.3, 0.7, 1, 1.5],
the agent doesn't bet these exact amounts, but uniformly sampled
amounts between the selected bet size, and the next bigger and
smaller one, if this argument is set to True.
for info on all other arguments refer to docs in PokerEnvArgs' init function
"""
super().__init__(n_seats=n_seats,
starting_stack_sizes_list=starting_stack_sizes_list,
stack_randomization_range=stack_randomization_range,
scale_rewards=scale_rewards,
use_simplified_headsup_obs=use_simplified_headsup_obs,
return_pre_transition_state_in_info=return_pre_transition_state_in_info,
*args, **kwargs)
self.bet_sizes_list_as_frac_of_pot = bet_sizes_list_as_frac_of_pot
self.other_bet_sizes_list_as_frac_of_pot = other_bet_sizes_list_as_frac_of_pot # 除每轮第一个动作的选择,
self.uniform_action_interpolation = uniform_action_interpolation
self.N_ACTIONS = len(self.bet_sizes_list_as_frac_of_pot) + 2 # +2 is for FOLD and CHECK/CALL.
if other_bet_sizes_list_as_frac_of_pot is not None:
self.other_n_actions = len(self.other_bet_sizes_list_as_frac_of_pot) + 2
class LimitPokerEnvArgs(_PokerEnvArgs):
"""
Args to any game that inherits from LimitPokerEnv
"""
def __init__(self,
n_seats,
starting_stack_sizes_list=None,
stack_randomization_range=(0, 0),
use_simplified_headsup_obs=True,
scale_rewards=True,
return_pre_transition_state_in_info=False,
*args, **kwargs):
super().__init__(n_seats=n_seats,
starting_stack_sizes_list=starting_stack_sizes_list,
stack_randomization_range=stack_randomization_range,
scale_rewards=scale_rewards,
use_simplified_headsup_obs=use_simplified_headsup_obs,
return_pre_transition_state_in_info=return_pre_transition_state_in_info,
*args, **kwargs)
self.N_ACTIONS = 3
|
import numpy as np
import tectosaur.nearfield.triangle_rules as triangle_rules
import tectosaur.nearfield.nearfield_op as nearfield_op
import tectosaur.ops.dense_integral_op as dense_integral_op
import tectosaur.ops.sparse_integral_op as sparse_integral_op
from tectosaur.ops.sparse_farfield_op import TriToTriDirectFarfieldOp, \
FMMFarfieldOp
import tectosaur.ops.mass_op as mass_op
import tectosaur.util.quadrature as quad
import tectosaur.mesh.mesh_gen as mesh_gen
import tectosaur.mesh.modify as modify
from tectosaur.interior import interior_integral
from tectosaur.util.test_decorators import slow, golden_master, kernel
from tectosaur.util.timer import Timer
import tectosaur as tct
from laplace import laplace
import logging
logger = logging.getLogger(__name__)
float_type = np.float32
def build_subset_mesh():
n = 10
m = mesh_gen.make_rect(n, n, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
n_tris = m[1].shape[0]
overlap = n_tris // 2
obs_subset = np.arange(n_tris // 2)
src_subset = np.arange(n_tris // 2 - overlap, n_tris)
obs_range = [0, (obs_subset[-1] + 1) * 9]
src_range = [src_subset[0] * 9, (src_subset[-1] + 1) * 9]
# import matplotlib.pyplot as plt
# plt.figure()
# plt.triplot(m[0][:,0], m[0][:,2], m[1], 'k-')
# plt.figure()
# plt.triplot(m[0][:,0], m[0][:,2], m[1][obs_subset], 'b-')
# plt.triplot(m[0][:,0], m[0][:,2], m[1][src_subset], 'r-')
# plt.show()
return m, obs_subset, src_subset, obs_range, src_range
def test_op_subset_dense():
m, obs_subset, src_subset, obs_range, src_range = build_subset_mesh()
k = 'elasticH3'
params = [1.0, 0.25]
subset_op = dense_integral_op.DenseIntegralOp(
7, 4, 3, 2.0, k, params, m[0], m[1], float_type,
obs_subset = obs_subset,
src_subset = src_subset,
).mat
full_op = dense_integral_op.DenseIntegralOp(
7, 4, 3, 2.0, k, params, m[0], m[1], float_type,
).mat
subfull = full_op[obs_range[0]:obs_range[1],src_range[0]:src_range[1]]
np.testing.assert_almost_equal(subfull, subset_op)
def test_op_subset_sparse():
m, obs_subset, src_subset, obs_range, src_range = build_subset_mesh()
k = 'elasticH3'
params = [1.0, 0.25]
subset_op = sparse_integral_op.SparseIntegralOp(
7, 4, 3, 2.0, k, params, m[0], m[1], float_type,
farfield_op_type = PtToPtDirectFarfieldOp,
obs_subset = obs_subset,
src_subset = src_subset,
)
y2 = subset_op.dot(np.ones(subset_op.shape[1]))
full_op = sparse_integral_op.SparseIntegralOp(
7, 4, 3, 2.0, k, params, m[0], m[1], float_type,
farfield_op_type = PtToPtDirectFarfieldOp
)
y1 = full_op.dot(np.ones(full_op.shape[1]))
np.testing.assert_almost_equal(y1[obs_range[0]:obs_range[1]], y2)
@golden_master()
def test_farfield_two_tris(request):
pts = np.array(
[[1, 0, 0], [2, 0, 0], [1, 1, 0],
[5, 0, 0], [6, 0, 0], [5, 1, 0]]
)
obs_tris = np.array([[0, 1, 2]], dtype = np.int)
src_tris = np.array([[3, 4, 5]], dtype = np.int)
params = [1.0, 0.25]
out = dense_integral_op.farfield_tris(
'elasticH3', params, pts, obs_tris, src_tris, 3, float_type
)
return out
@golden_master()
def test_gpu_vert_adjacent(request):
pts = np.array([[0,0,0],[1,0,0],[0,1,0],[1,-1,0],[2,0,0]]).astype(np.float32)
tris = np.array([[1,2,0],[1,3,4]]).astype(np.int32)
params = [1.0, 0.25]
pairs_int = nearfield_op.PairsIntegrator('elasticH3', params, np.float32, 1, 1, pts, tris)
out = pairs_int.vert_adj(3, np.array([[0,1,0,0]]))
return out
def test_vert_adj_separate_bases():
K = 'elasticH3'
params = [1.0, 0.25]
nq = 6
full_tris = np.array([[0,1,2], [0,4,3]])
pts = np.array([[0,0,0],[1,0,0],[0,1,0],[-0.5,0,0],[0,0,-2],[0.5,0.5,0]])
pairs_int = nearfield_op.PairsIntegrator('elasticH3', params, np.float32, 1, 1, pts, full_tris)
I = pairs_int.vert_adj(nq, np.array([[0,1,0,0]]))
obs_basis_tris = np.array([
[[0,0],[0.5,0.5],[0,1]], [[0,0],[1,0],[0.5,0.5]]
])
src_basis_tris = np.array([
[[0,0],[1,0],[0,1]], [[0,0],[1,0],[0,1]]
])
sep_tris = np.array([[0,5,2], [0,1,5], [0,4,3], [0,4,3]])
pairs_int = nearfield_op.PairsIntegrator('elasticH3', params, np.float32, 1, 1, pts, sep_tris)
I0 = pairs_int.vert_adj(nq, np.array([[0,2,0,0],[1,3,0,0]]))
from tectosaur.nearfield._table_lookup import sub_basis
I1 = np.array([sub_basis(
I0[i].flatten().tolist(), obs_basis_tris[i].tolist(), src_basis_tris[i].tolist()
) for i in range(2)]).reshape((2,3,3,3,3))
np.testing.assert_almost_equal(I[0], I1[0] + I1[1], 6)
def full_integral_op_tester(k, use_fmm, n = 5):
pts = np.array([[0,0,0], [1,1,0], [0, 1, 1], [0,0,2]])
tris = np.array([[0, 1, 2], [2, 1, 3]])
rect_mesh = mesh_gen.make_rect(n, n, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
out = np.zeros(1)
params = [1.0, 0.25]
for m in [(pts, tris), rect_mesh]:
dense_op = dense_integral_op.DenseIntegralOp(
5, 3, 3, 2.0, k, params, m[0], m[1], float_type
)
x = np.ones(dense_op.shape[1])
dense_res = dense_op.dot(x)
if use_fmm:
farfield_op_type = PtToPtFMMFarfieldOp(100, 3.0, 300)
else:
farfield_op_type = PtToPtDirectFarfieldOp
sparse_op = sparse_integral_op.SparseIntegralOp(
5, 3, 3, 2.0, k, params, m[0], m[1],
float_type, farfield_op_type
)
sparse_res = sparse_op.dot(x)
assert(np.max(np.abs(sparse_res - dense_res)) / np.mean(np.abs(dense_res)) < 5e-4)
out = np.hstack((out, sparse_res))
return out
@slow
@golden_master(digits = 5)
def test_full_integral_op_nofmm(request, kernel):
return full_integral_op_tester(kernel, False)
@slow
@golden_master(digits = 7)
def test_full_integral_op_fmm(request):
return full_integral_op_tester('elasticU3', True, n = 30)
@golden_master(digits = 7)
def test_full_integral_op_nofmm_fast(request):
m = mesh_gen.make_rect(5, 5, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
dense_op = dense_integral_op.DenseIntegralOp(
5, 3, 3, 2.0, 'elasticU3', [1.0, 0.25], m[0], m[1], float_type
)
return dense_op.mat
def test_mass_op():
m = mesh_gen.make_rect(2, 2, [[0,0,0],[1,0,0],[1,1,0],[0,1,0]])
op = mass_op.MassOp(3, m[0], m[1])
exact00 = quad.quadrature(
lambda x: (1 - x[:,0] - x[:,1]) * (1 - x[:,0] - x[:,1]),
quad.gauss2d_tri(10)
)
exact03 = quad.quadrature(
lambda x: (1 - x[:,0] - x[:,1]) * x[:,0],
quad.gauss2d_tri(10)
)
np.testing.assert_almost_equal(op.mat[0,0], exact00)
np.testing.assert_almost_equal(op.mat[0,3], exact03)
def test_mass_tensor_dim():
m = mesh_gen.make_rect(2, 2, [[0,0,0],[1,0,0],[1,1,0],[0,1,0]])
op1 = mass_op.MassOp(3, m[0], m[1], tensor_dim = 1)
op3 = mass_op.MassOp(3, m[0], m[1])
x = np.random.rand(op3.shape[1]).reshape((-1,3,3))
x[:,:,1] = 0
x[:,:,2] = 0
y3 = op3.dot(x.flatten())
y1 = op1.dot(x[:,:,0].flatten())
np.testing.assert_almost_equal(y1, y3.reshape((-1,3,3))[:,:,0].flatten())
@golden_master()
def test_interior(request):
np.random.seed(10)
corners = [[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]]
pts, tris = mesh_gen.make_rect(3, 3, corners)
obs_pts = pts.copy()
obs_pts[:,2] += 1.0
obs_ns = np.random.rand(*obs_pts.shape)
obs_ns /= np.linalg.norm(obs_ns, axis = 1)[:,np.newaxis]
input = np.ones(tris.shape[0] * 9)
K = 'elasticH3'
params = [1.0, 0.25]
op = tct.InteriorOp(
obs_pts, obs_ns, (pts, tris), K, 4, params, float_type
)
return op.dot(input)
@golden_master()
def test_interior_nearfield(request):
np.random.seed(10)
corners = [[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]]
src_mesh = mesh_gen.make_rect(30, 30, corners)
xs = np.linspace(-3, 3, 50)
X, Z = np.meshgrid(xs, xs)
Y = np.ones_like(X) * 0.0
obs_pts = np.array([e.flatten() for e in [X, Y, Z]]).T.copy()
obs_ns = np.zeros(obs_pts.shape)
obs_ns[:,2] = 1.0
input = np.zeros(src_mesh[1].shape[0] * 9)
input.reshape((-1,3))[:,0] = 1.0
K = 'elasticT3'
params = [1.0, 0.25]
op = tct.InteriorOp(
obs_pts, obs_ns, src_mesh, K, 4, params, float_type
)
out = op.dot(input)
# import matplotlib.pyplot as plt
# for d in range(3):
# plt.subplot(1, 3, d + 1)
# plt.contourf(X, Z, out.reshape((-1,3))[:,d].reshape(X.shape))
# plt.colorbar()
# plt.show()
return out
@profile
def benchmark_nearfield_construction():
corners = [[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]]
near_threshold = 1.5
n = 80
pts, tris = mesh_gen.make_rect(n, n, corners)
n = nearfield_op.NearfieldIntegralOp(1, 1, 1, 2.0, 'elasticU3', [1.0, 0.25], pts, tris)
@profile
def benchmark_vert_adj():
from tectosaur.util.timer import Timer
import tectosaur.mesh.find_near_adj as find_near_adj
from tectosaur.nearfield.pairs_integrator import PairsIntegrator
kernel = 'elasticH3'
params = [1.0, 0.25]
float_type = np.float32
L = 5
nq_vert_adjacent = 7
nx = ny = int(2 ** L / np.sqrt(2))
t = Timer()
pts, tris = mesh_gen.make_rect(nx, ny, [[-1, -1, 0], [-1, 1, 0], [1, 1, 0], [1, -1, 0]])
logger.debug('n_tris: ' + str(tris.shape[0]))
t.report('make rect')
close_or_touch_pairs = find_near_adj.find_close_or_touching(pts, tris, 1.25)
nearfield_pairs, va, ea = find_near_adj.split_adjacent_close(close_or_touch_pairs, tris)
t.report('find near')
pairs_int = PairsIntegrator(kernel, params, float_type, 1, 1, pts, tris)
t.report('setup integrator')
va_mat_rot = pairs_int.vert_adj(nq_vert_adjacent, va)
t.report('vert adj')
if __name__ == "__main__":
benchmark_vert_adj()
|
from .maa import MatchActionAcceleration
|
import argparse
from WebScraper import *
from CSV import *
from Model.prediction import *
parser = argparse.ArgumentParser(description='Scrape information from a website/list of websites to determine if it is a phishing site.',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-u', '--url', help='The URL to scan, inclusive of HTTP/HTTPS. (Required)', metavar='[URL]',required=True)
parser.add_argument('-m', '--model', help='The model to be used for predicting the outcome.\n1 - GBC\n2 - RFC\n3 - XGB', metavar='[MODEL NUM]', default=1)
def runPredict(url, model):
try:
data = {"LongURL": 1, "ShortURL": 1, "Redirecting": 1, "PrefixSuffix": 1,
"SubDomains": 1, "HTTPS": 1, "RequestURL": 1, "AnchorURL": 1, "ServerFormHandler": 1, "StatusBarCust": 1,
"AgeofDomain": 1, "WebsiteTraffic": 1}
domain = getDomain(url, 1)
subdoms = (getDomain(url, 3) + '.' + domain).split('.')
subdom = ""
age = 0
for sd in subdoms[:-1:]:
subdom += sd + '.'
subdom = subdom.strip(".")
# Get AgeofDomain attribute
if "dataError" not in getAge(url)['WhoisRecord']:
cdate = getAge(url)['WhoisRecord']['createdDate'][:10:].split('-')
createdate = datetime.date(int(cdate[0]), int(cdate[1]), int(cdate[2]))
age = (datetime.date.today() - createdate).days
if age < 180:
data["AgeofDomain"] = -1
else:
data["AgeofDomain"] = -1
# Get LongURL attribute
if len(url) > 75:
data["LongURL"] = -1
elif len(url) > 53:
data["LongURL"] = 0
# Get ShortURL attribute
if domain == "tinyurl.com" or domain == "bit.ly":
data["ShortURL"] = -1
# Get ServerFormHandler, RequestURL, AnchorURL, and StatusBarCust attributes from HTML of URL
HTML = getHTML(url)
if HTML:
data["ServerFormHandler"] = HTML["SFH"][0]
data["RequestURL"] = HTML["RequestURL"][0]
data["AnchorURL"] = HTML["URL_of_Anchor"][0]
data["StatusBarCust"] = HTML["StatusBarCust"][0]
data["PrefixSuffix"] = getPreffixSuffix(url)
data["WebsiteTraffic"] = web_traffic(url)[0]
data["SubDomains"] = getSubDomain(subdom)
data["Redirecting"] = checkRedirect(getDomain(url, 4))
data["HTTPS"] = checkSSL(url)[0]
# Generate prediction based off data
data["Prediction"] = predict(int(model),[data])[0]
# Update data to contain original values scraped for report generation
data["LongURL"] = (0,len(url))
if data["Redirecting"] == -1:
data["Redirecting"] = (-1, url.find("//"))
if data["PrefixSuffix"] == -1:
index = list()
for count, char in enumerate(url):
if char == "-":
index.append(count)
data["PrefixSuffix"] = (-1, index)
if data["SubDomains"] != 1:
index = list()
for count, char in enumerate(subdom):
if char == ".":
index.append(count)
data["SubDomains"] = (data["SubDomains"], subdom.count('.'), index)
if data["HTTPS"] != 1:
data["HTTPS"] = checkSSL(url)
if data["ServerFormHandler"] != 1:
data["ServerFormHandler"] = HTML["SFH"]
if data["RequestURL"] != 1:
data["RequestURL"] = HTML["RequestURL"]
if data["AnchorURL"] != 1:
data["AnchorURL"] = HTML["URL_of_Anchor"]
if data["StatusBarCust"] != 1:
data["StatusBarCust"] = HTML["StatusBarCust"]
if data["AgeofDomain"] != 1:
if "dataError" not in getAge(url)['WhoisRecord']:
data["AgeofDomain"] = (data["AgeofDomain"], age)
else:
data["AgeofDomain"] = (data["AgeofDomain"], "Unable to retrieve domain age.")
if data["WebsiteTraffic"] != 1:
data["WebsiteTraffic"] = web_traffic(url)
data["URL"] = url
data["Model"] = int(model)
savecsv([data])
print("Prediction complete.")
return 1
except Exception as e:
print("Error occured:\n", e)
return -1
def checkPrediction(url, model):
# Checks Past_Predictions.csv if URL and model already exists
if model < 1 or model > 3:
exit(-1)
if loadcsv(url):
print("Existing prediction for URL.")
return 0
else:
return 1
if __name__ == "__main__":
args = parser.parse_args()
if args.url:
url = args.url
model = int(args.model)
if not checkPrediction(url, model):
rerun = ""
while rerun != "y" or rerun != "n":
rerun = input("Would you like to run a new prediction on the URL? [y/n]\n")
if rerun == "y":
runPredict(url, model)
break
elif rerun == "n":
print("Existing data is:\n")
print(loadcsv(url))
break
else:
runPredict(url, model)
else:
print("No URL") |
"""Class to manage the DB Operations."""
import os.path
import secrets
import os
import datetime
from dotenv import load_dotenv
import sqlite3
import mariadb
import logging
from .constants import Role
load_dotenv()
is_sqlite = os.getenv("SQLITE")
is_sqlite = is_sqlite.lower() in ['true', '1', 'y', None, '']
sqlite_file = os.getenv("SQLITE_DB")
class Connector:
"""Manage Databse connection."""
def __init__(self, is_testing=False, test_sqlite=None):
"""Init Databse connection."""
if is_testing:
self.conn = sqlite3.connect(
test_sqlite, check_same_thread=False
)
elif is_sqlite is True:
self.conn = sqlite3.connect(
sqlite_file, check_same_thread=False)
else:
try:
self.conn = mariadb.connect(
user=os.getenv("MARIADB_USERNAME"),
password=os.getenv("MARIADB_PASSWORD"),
host=os.getenv("MARIADB_HOST"),
port=int(os.getenv("MARIADB_PORT", 3306)),
database=os.getenv("MARIADB_DATABASE")
)
except mariadb.Error as e:
logging.error('Error_Connector: {}'.format(e))
def __to_dict(self, cursor, values):
"""Turn a tuple of values into a dictictonary.
Args:
cursor: database connection cursor, use right after required query
values(tuple): a SINGLE TUPLE filled with desired values
Returns:
d(dict): a dictionary of key-value pairs
"""
d = {}
if not values:
return None
for i, v in zip(cursor.description, values):
d[i[0]] = v
return d
def add_user(self, email, password_hash, role, address, phone):
"""Add User to user table.
Args:
email (string): email address of the user
password_hash (string): password
"""
# TODO: Handle address and phone with the new table structure
cur = self.conn.cursor()
try:
cur.execute(
'INSERT INTO Participant(email, password_hash, role, address, phone) VALUES (?, ?, ?, ?, ?)',
(email, password_hash, role.value, address, phone)
)
self.conn.commit()
cur.execute(
'SELECT id FROM Participant WHERE email = ?',
(email,)
)
_id = cur.fetchall()[0][0]
return _id
except (sqlite3.Error, mariadb.Error) as e:
logging.error('Error_Connector_add_user: {}'.format(e))
return None
finally:
cur.close()
def get_user(self, email, password_hash):
"""Get All Users."""
cur = self.conn.cursor()
try:
cur.execute(
'SELECT id, role FROM Participant WHERE email = ? AND password_hash = ?',
(email, password_hash,)
)
res = cur.fetchall()
if len(res) < 1:
return None
return {
'id': res[0][0],
'role': res[0][1]
}
except (sqlite3.Error, mariadb.Error) as e:
logging.error(
'Error_Connector_get_users: {}'.format(e)
)
return None
finally:
cur.close()
def add_user_session(self, user_id):
"""Add session for user authentication.
Args:
user_id (int): the id of the user
Returns:
session_id (string): session id
"""
cur = self.conn.cursor()
try:
token = secrets.token_urlsafe()
cur.execute(
'INSERT INTO UserSession (token, user_id) VALUES (?, ?)',
(token, user_id)
)
self.conn.commit()
except (sqlite3.Error, mariadb.Error) as e:
logging.error('Error_Connector_add_user_session: {}'.format(e))
return None
finally:
cur.close()
return token
def check_session_validity(self, token, role=None):
"""Check the session validity.
Args:
token (string): session token
ROLE (Role): optional role to specify
Returns:
user_id (int): the user id associated with the session,
None if session id is expired or invalid
"""
cur = self.conn.cursor()
try:
if role not in [None, Role.FACULTY_MEMBER, Role.EXTERNAL_GUEST]:
raise TypeError("expected None, Role.FACULTY_MEMBER, Role.EXTERNAL_GUEST")
date_30_min_ago = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
if role:
cur.execute(
"SELECT s.user_id FROM UserSession s \
INNER JOIN Participant u on u.role = ? AND u.id = s.user_id WHERE token = ? AND\
s.creation_time >= ? ",
(role.value, token, date_30_min_ago)
)
else:
cur.execute(
"SELECT user_id FROM UserSession WHERE token = ? AND\
creation_time >= ?",
(token, date_30_min_ago)
)
res = cur.fetchall()
if len(res) < 1:
return None
cur.execute(
"UPDATE UserSession SET creation_time=CURRENT_TIMESTAMP WHERE token = ?",
(token,)
)
self.conn.commit()
return res[0][0]
except (sqlite3.Error, mariadb.Error) as e:
print(e)
logging.error(
'Error_Connector_check_session_validity: {}'.format(e)
)
return None
finally:
cur.close()
def get_users(self):
"""Get All Users."""
cur = self.conn.cursor()
try:
cur.execute(
'SELECT id, email FROM Participant'
)
res = cur.fetchall()
return res
except (sqlite3.Error, mariadb.Error) as e:
logging.error(
'Error_Connector_get_users: {}'.format(e)
)
return None
finally:
cur.close()
# maintaining clarity
def log_user(self, user_id, barcode_id, checkin_time):
"""Log users barcode scanning.
Args:
user_id (string): Id of the user.
barcode_id (string): Barcode scanned
Returns:
boolean: If the scan was succcessful
"""
# TODO: implement log user
cur = self.conn.cursor()
try:
cur.execute(
'INSERT INTO Checkin(barcodeId, checkinTime, pId) VALUES (?, ?, ?)',
(barcode_id, checkin_time, user_id)
)
self.conn.commit()
except (sqlite3.Error, mariadb.Error) as e:
logging.error('Error_log: {}'.format(e))
return False
finally:
cur.close()
return True
def get_history(self, user_id):
"""Gets the history of the user
Args:
user_id (string): Id of the user.
Returns:
history (array): Array of the history
"""
try:
cur = self.conn.cursor()
cur.execute(
'SELECT barcodeId, checkinTime FROM Checkin WHERE pId = ?',
(user_id,)
)
res = cur.fetchall()
history = []
for i in res:
his = {
"barcodeId" : i[0],
"checkinTime" : i[1]
}
history.append(his)
return history
except (sqlite3.Error, mariadb.Error) as e:
logging.error('Error_get_history: {}'.format(e))
return None
finally:
cur.close()
connector = Connector()
|
# Medium
from random import randint
class Solution():
# function to assign value
def gen_random_value(self):
return randint(0,50)
def orangesRotting(self, grid: list[list[int]]) -> int:
graph_structure = []
graph_dict = {}
stacks = []
# random value assign
for i in grid:
for j in range(len(i)):
random_value = self.gen_random_value()
while random_value in stacks and True:
random_value = self.gen_random_value()
if random_value not in stacks:
False
i[j] = random_value
stacks.append(random_value)
# edge connecting
for i in range(0,len(grid)-1):
out_one, out_two = grid[i], grid[i+1]
for j in range(0,len(grid)-1):
# same column
if out_one[j] and out_two[j] or out_one[j] == 0 or out_two[j] == 0:
column_data = [in_one_col, in_two_col] = out_one[j], out_two[j]
graph_structure.append(list(column_data))
# same diagonal
if out_one[j] and out_two[j+1] or out_one[j] == 0 or out_two[j+1] == 0:
diagonal_data = [in_one_diag, in_two_diag] = out_one[j], out_two[j+1]
graph_structure.append(list(diagonal_data))
# same row
if out_one[j] and out_one[j+1] or out_one[j] == 0 or out_one[j+1] == 0:
row_data = [in_one_row, in_two_row] = out_one[j], out_one[j+1]
graph_structure.append(list(row_data))
# create graph
for edge in graph_structure:
if edge[0] not in graph_dict.keys():
graph_dict[edge[0]] = [edge[1]]
else:
graph_dict[edge[0]].append(edge[1])
print(graph_structure)
print(graph_dict)
grid = [[2,1,1],[1,1,0],[0,1,1]]
sol = Solution()
sol.orangesRotting(grid)
|
import pytest
import re
# example values for the respective types
ATTRIBUTE_TYPE_EXAMPLE_VALUES = \
{
'int': [23, 42, -8],
'bool': ['true', 'false'],
'string': ['foo', 'baz', 'bar'],
'color': ['#ff00ff', '#9fbc00'], # FIXME: include named colors
'uint': [23, 42]
}
ATTRIBUTE_TYPES = list(ATTRIBUTE_TYPE_EXAMPLE_VALUES.keys())
def test_attr_cmd(hlwm):
assert hlwm.get_attr('monitors.focus.name') == ''
hlwm.call('attr')
hlwm.call('attr tags')
hlwm.call('attr tags.')
hlwm.call('attr tags.count')
assert hlwm.call('attr tags.count').stdout == hlwm.get_attr('tags.count')
hlwm.call_xfail('attr tags.co')
@pytest.mark.parametrize('object_path', ['', 'clients', 'theme', 'monitors'])
def test_object_completion(hlwm, object_path):
assert hlwm.list_children(object_path) \
== hlwm.list_children_via_attr(object_path)
def test_object_tree(hlwm):
t1 = hlwm.call('object_tree').stdout.splitlines()
t2 = hlwm.call('object_tree theme.').stdout.splitlines()
t3 = hlwm.call('object_tree theme.tiling.').stdout.splitlines()
assert len(t1) > len(t2)
assert len(t2) > len(t3)
def test_substitute(hlwm):
expected_output = hlwm.get_attr('tags.count') + '\n'
call = hlwm.call('substitute X tags.count echo X')
assert call.stdout == expected_output
@pytest.mark.parametrize('prefix', ['set_attr settings.',
'attr settings.',
'cycle_value ',
'set '])
def test_set_attr_completion(hlwm, prefix):
assert hlwm.complete(prefix + "swap_monitors_to_get_tag") \
== 'false off on toggle true'.split(' ')
def test_set_attr_only_writable(hlwm):
# attr completes read-only attributes
assert hlwm.complete('attr monitors.c', position=1, partial=True) \
== ['monitors.count ']
# but set_attr does not
assert hlwm.complete('set_attr monitors.c', position=1, partial=True) \
== []
def test_attr_only_second_argument_if_writable(hlwm):
# attr does not complete values for read-only attributes
assert hlwm.call_xfail_no_output('complete 2 attr monitors.count') \
.returncode == 7
def test_set_attr_can_not_set_writable(hlwm):
assert hlwm.call_xfail('set_attr tags.count 5') \
.returncode == 3
def test_substitute_missing_attribute__command_treated_as_attribute(hlwm):
call = hlwm.call_xfail('substitute X echo X')
assert call.stderr == 'The root object has no attribute "echo"\n'
def test_substitute_command_missing(hlwm):
call = hlwm.call_xfail('substitute X tags.count')
assert call.stderr == 'substitute: not enough arguments\n'
def test_sprintf(hlwm):
expected_count = hlwm.get_attr('tags.count')
expected_wmname = hlwm.get_attr('settings.wmname')
expected_output = expected_count + '/' + expected_wmname + '\n'
call = hlwm.call('sprintf X %s/%s tags.count settings.wmname echo X')
assert call.stdout == expected_output
def test_sprintf_s_vs_c(hlwm):
p1 = hlwm.call('substitute X tags.count sprintf Y "number=%c" X echo Y')
p2 = hlwm.call('sprintf Y "number=%s" tags.count echo Y')
assert p1.stdout == p2.stdout
def test_sprintf_c_placeholder(hlwm):
proc = hlwm.call('sprintf X "%c %s tags" "there are" tags.count echo X')
assert proc.stdout == "there are 1 tags\n"
def test_sprintf_nested(hlwm):
cmd = 'substitute A tags.count'
cmd += ' sprintf B "%c%c" A A'
cmd += ' sprintf C "%c%c" B B'
cmd += ' sprintf D "%c%c" C C echo D'
assert hlwm.call(cmd).stdout == '11111111\n'
def test_sprintf_too_few_attributes__command_treated_as_attribute(hlwm):
call = hlwm.call_xfail('sprintf X %s/%s tags.count echo X')
assert call.stderr == 'The root object has no attribute "echo"\n'
def test_sprintf_too_few_attributes_in_total(hlwm):
call = hlwm.call_xfail('sprintf X %s/%s tags.count')
assert call.stderr == 'sprintf: not enough arguments\n'
def test_sprintf_command_missing(hlwm):
call = hlwm.call_xfail('sprintf X %s tags.count')
assert call.stderr == 'sprintf: not enough arguments\n'
def test_sprintf_double_percentage_escapes(hlwm):
call = hlwm.call('sprintf X %% echo X')
assert call.stdout == '%\n'
def test_sprintf_completion_1_placeholder(hlwm):
assert hlwm.complete('sprintf T %s', partial=True) \
== sorted(hlwm.complete('get_attr', partial=True))
def test_sprintf_completion_s_after_c_placeholder(hlwm):
assert hlwm.complete('sprintf T %c%s', partial=True) == []
assert hlwm.complete('sprintf T %c%s myconst', partial=True) \
== sorted(hlwm.complete('get_attr', partial=True))
def test_sprintf_completion_0_placeholders(hlwm):
assert hlwm.complete('sprintf T %%') \
== sorted(['T'] + hlwm.call('list_commands').stdout.splitlines())
def test_disjoin_rects(hlwm):
# test the example from the manpage
expected = '\n'.join((
'300x150+300+250',
'600x250+0+0',
'300x150+0+250',
'300x150+600+250',
'600x250+300+400',
'')) # trailing newline
response = hlwm.call('disjoin_rects 600x400+0+0 600x400+300+250').stdout
assert response == expected
def test_attribute_completion(hlwm):
def complete(partialPath):
return hlwm.complete('get_attr ' + partialPath,
partial=True, position=1)
assert complete('monitors.') == ['monitors.0.',
'monitors.by-name.',
'monitors.count ',
'monitors.focus.']
assert complete('monitors.fo') == ['monitors.focus.']
assert complete('monitors.count') == ['monitors.count ']
assert complete('monitors.focus') == ['monitors.focus.']
assert complete('monitors.fooob') == []
assert complete('monitors.fooo.bar') == []
assert len(complete('monitors.focus.')) >= 8
assert complete('t') == ['tags.', 'theme.', 'tmp.']
assert complete('') == [child + '.' for child in hlwm.list_children_via_attr('')]
@pytest.mark.parametrize('attrtype', ATTRIBUTE_TYPES)
@pytest.mark.parametrize('name', ['my_test', 'my_foo'])
@pytest.mark.parametrize('object_path', ['', 'clients', 'theme.tiling.active'])
def test_new_attr_without_removal(hlwm, attrtype, name, object_path):
path = (object_path + '.' + name).lstrip('.')
hlwm.call(['new_attr', attrtype, path])
hlwm.get_attr(path)
@pytest.mark.parametrize('attrtype', ATTRIBUTE_TYPES)
def test_new_attr_existing_builtin_attribute(hlwm, attrtype):
hlwm.get_attr('monitors.count')
hlwm.call_xfail(['new_attr', attrtype, 'monitors.count']) \
.expect_stderr('attribute name must start with "my_"')
@pytest.mark.parametrize('attrtype', ATTRIBUTE_TYPES)
def test_new_attr_existing_user_attribute(hlwm, attrtype):
path = 'theme.my_user_attr'
hlwm.call(['new_attr', attrtype, path])
hlwm.get_attr(path)
hlwm.call_xfail(['new_attr', attrtype, path]) \
.expect_stderr('already has an attribute')
@pytest.mark.parametrize('attrtype', ATTRIBUTE_TYPES)
@pytest.mark.parametrize('path', ['foo', 'monitors.bar'])
def test_new_attr_missing_prefix(hlwm, attrtype, path):
hlwm.call_xfail(['new_attr', attrtype, path]) \
.expect_stderr('must start with "my_"')
@pytest.mark.parametrize('attrtypevalues', ATTRIBUTE_TYPE_EXAMPLE_VALUES.items())
@pytest.mark.parametrize('path', ['my_foo', 'monitors.my_bar'])
def test_new_attr_is_writable(hlwm, attrtypevalues, path):
(attrtype, values) = attrtypevalues
hlwm.call(['new_attr', attrtype, path])
for v in values:
hlwm.call(['set_attr', path, v])
assert hlwm.get_attr(path) == str(v)
@pytest.mark.parametrize('attrtype', ATTRIBUTE_TYPES)
def test_new_attr_has_right_type(hlwm, attrtype):
path = 'my_user_attr'
hlwm.call(['new_attr', attrtype, path])
m = re.search('(.) . . ' + path, hlwm.call(['attr', '']).stdout)
assert m.group(1)[0] == attrtype[0]
def test_new_attr_initial_value(hlwm):
hlwm.call('new_attr string clients.my_foo bar')
assert hlwm.get_attr('clients.my_foo') == 'bar'
def test_new_attr_initial_value_invalid(hlwm):
hlwm.call_xfail('new_attr int clients.my_foo bar') \
.expect_stderr('"bar" is an invalid value for clients.my_foo')
# the client still exists and the default value remains
assert hlwm.get_attr('clients.my_foo') == '0'
def test_new_attr_complete(hlwm):
assert 'bool' in hlwm.complete('new_attr')
assert 'my_' in hlwm.complete('new_attr int', partial=True)
assert 'tags.my_' in hlwm.complete('new_attr int tags.', partial=True, position=2)
assert 'tags.my_' in hlwm.complete('new_attr int tags.m', partial=True, position=2)
assert 'settings.my_' in hlwm.complete('new_attr int settings.m', partial=True, position=2)
def test_remove_attr_invalid_attribute(hlwm):
hlwm.call_xfail('remove_attr tags.invalid') \
.expect_stderr('Object "tags" has no attribute "invalid".')
def test_remove_attr_invalid_child(hlwm):
hlwm.call_xfail('remove_attr clients.foo.bar') \
.expect_stderr('Object "clients." has no child named "foo"')
def test_remove_attr_non_user_path(hlwm):
hlwm.call_xfail('remove_attr monitors.count') \
.expect_stderr('Cannot remove built-in attribute "monitors.count"')
def test_remove_attr_user_attribute(hlwm):
path = 'my_user_attr'
hlwm.call(['new_attr', 'string', path])
hlwm.call(['remove_attr', path])
hlwm.call_xfail(['get_attr', path]).expect_stderr('has no attribute') # attribute does not exist
hlwm.call(['new_attr', 'string', path]) # and is free again
def test_getenv_completion(hlwm):
prefix = 'some_uniq_prefix_'
name = prefix + 'envname'
hlwm.call(['setenv', name, 'myvalue'])
assert [name] == hlwm.complete('getenv ' + prefix, position=1)
def test_export_completion(hlwm):
prefix = 'some_uniq_prefix_'
name = prefix + 'envname'
hlwm.call(['setenv', name, 'myvalue'])
assert [name + '='] == hlwm.complete('export ' + prefix, position=1, partial=True)
def test_compare_invalid_operator(hlwm):
hlwm.call_xfail('compare monitors.count -= 1') \
.expect_stderr('unknown operator')
def test_try_command(hlwm):
proc = hlwm.unchecked_call('try chain , echo foo , false')
assert proc.returncode == 0
assert proc.stdout == 'foo\n'
def test_silent_command(hlwm):
proc = hlwm.unchecked_call('silent chain , echo foo , false')
assert proc.returncode == 1
assert proc.stdout == ''
def test_chain_command(hlwm):
assert hlwm.call('chain , echo foo').stdout == 'foo\n'
assert hlwm.call('chain , false , echo f').stdout == 'f\n'
assert hlwm.call('chain : echo g : echo f').stdout == 'g\nf\n'
def test_chain_command_empty(hlwm):
assert hlwm.call('chain / / echo g / echo f').stdout == 'g\nf\n'
assert hlwm.call('chain / echo g / echo f / ').stdout == 'g\nf\n'
assert hlwm.call('chain / / echo g / / echo f / ').stdout == 'g\nf\n'
assert hlwm.call('chain /').stdout == ''
assert hlwm.call('chain / /').stdout == ''
assert hlwm.call('chain / / /').stdout == ''
def test_chain_return_code(hlwm):
p1 = hlwm.unchecked_call('get_attr')
p2 = hlwm.unchecked_call('chain X echo line X get_attr')
assert p1.returncode > 1
assert p1.returncode == p2.returncode
assert p2.stderr[0:5] == 'line\n'
def test_chain_nested(hlwm):
assert hlwm.call('chain X chain Y echo a Y echo b X echo c').stdout \
== 'a\nb\nc\n'
# the inner 'chain Y' must not see the other Y
assert hlwm.call('chain X chain Y echo a X echo b Y echo c').stdout \
== 'a\nb Y echo c\n'
def test_chain_and_1(hlwm):
proc = hlwm.unchecked_call('and , echo foo , false , echo bar')
assert proc.returncode == 1
assert proc.stderr == 'foo\n'
def test_chain_and_2(hlwm):
proc = hlwm.unchecked_call('and , echo foo , true , echo bar , false , echo baz')
assert proc.returncode == 1
assert proc.stderr == 'foo\nbar\n'
def test_chain_or(hlwm):
proc = hlwm.unchecked_call(
'or , chain : echo a : false , \
, chain : echo b : false , \
, chain : echo c : true , \
, chain : echo d : false , \
')
assert proc.returncode == 0
assert proc.stdout == 'a\nb\nc\n'
assert proc.stderr == ''
def test_chain_complete_cmd(hlwm):
assert hlwm.complete('chain X true X false X') == \
sorted(hlwm.call('list_commands').stdout.splitlines())
def test_chain_complete_sep_only(hlwm):
assert hlwm.complete('chain X true') == ['X']
def test_chain_complete_sep_and_args(hlwm):
res = hlwm.complete('chain X focus')
assert 'X' in res
assert 'left' in res
def test_chain_complete_cmd_arg(hlwm):
assert hlwm.complete('chain X chain Y true Y false X false X !') == \
sorted(['X'] + hlwm.call('list_commands').stdout.splitlines())
@pytest.mark.parametrize('args', [[], ['abc'], ['foo', 'bar']])
def test_echo_command(hlwm, args):
assert hlwm.call(['echo'] + args).stdout == ' '.join(args) + '\n'
def test_echo_completion(hlwm):
# check that the exit code is right
assert hlwm.complete('echo foo') == []
@pytest.mark.parametrize('value', ['', 'bar'])
def test_setenv_command(hlwm, value):
hlwm.call(['setenv', 'FOO', value])
assert hlwm.call('getenv FOO').stdout == value + '\n'
@pytest.mark.parametrize('value', ['', 'bar'])
def test_export_command(hlwm, value):
hlwm.call(['export', 'FOO=' + value])
assert hlwm.call('getenv FOO').stdout == value + '\n'
def test_setenv_and_spawn(hlwm, hlwm_process):
hlwm.call(['setenv', 'FOO', 'bar'])
hlwm_process.read_and_echo_output()
hlwm.unchecked_call(['spawn', 'sh', '-c', 'echo FOO is $FOO .'],
read_hlwm_output=False)
hlwm_process.read_and_echo_output(until_stdout='FOO is bar .')
def test_setenv_completion_existing_var(hlwm):
hlwm.call('setenv FOO bar')
assert 'FOO' in hlwm.complete('setenv')
def test_setenv_completion_unset_var(hlwm):
hlwm.call('unsetenv FOO')
assert 'FOO' not in hlwm.complete('setenv')
def test_unsetenv_command(hlwm):
hlwm.call('setenv FOO bar')
hlwm.call('unsetenv FOO')
proc = hlwm.unchecked_call('getenv foo')
assert proc.returncode == 8
def test_mktemp_distinct(hlwm):
lines = hlwm.call('mktemp int X mktemp int Y \
chain , echo X , echo Y').stdout.splitlines()
assert lines[0][0:4] == 'tmp.'
assert lines[1][0:4] == 'tmp.'
assert lines[0] != lines[1]
def test_mktemp_right_type(hlwm):
hlwm.call('mktemp int X set_attr X 23')
hlwm.call_xfail('mktemp int X set_attr X sdflkj') \
.expect_stderr('not a valid value')
def test_mktemp_complete(hlwm):
assert 'int' in hlwm.complete('mktemp')
assert 'X' in hlwm.complete('mktemp string X echo')
completions = hlwm.complete('mktemp string X mktemp string Y echo')
assert 'X' in completions and 'Y' in completions
compl2 = hlwm.complete('mktemp string X')
assert 'X' in compl2 and 'echo' in compl2
def test_negate_command(hlwm):
assert hlwm.call('! false').stdout == ''
assert hlwm.call('! ! echo f').stdout == 'f\n'
hlwm.call_xfail('! echo test') \
.expect_stderr('test')
def test_negate_complete_cmd(hlwm):
assert hlwm.complete('!') \
== sorted(hlwm.call('list_commands').stdout.splitlines())
def test_negate_complete_arg(hlwm):
assert 'left' in hlwm.complete('! focus')
assert [] == hlwm.complete('! true')
def test_integer_out_of_range(hlwm):
type2outOfRange = {
'uint': ['-18446744073709551616', '-1', '18446744073709551616'],
'int': ['-18446744073709551616', '18446744073709551616'],
}
for typeName, values in type2outOfRange.items():
attribute = 'my_' + typeName + '_attr'
hlwm.call(f'new_attr {typeName} {attribute}')
for v in values:
hlwm.call_xfail(['set_attr', attribute, v]) \
.expect_stderr('out of range')
def test_tag_status_invalid_monitor(hlwm):
hlwm.call_xfail('tag_status foobar') \
.expect_stderr('Monitor "foobar" not found!')
def test_tag_status(hlwm, x11):
hlwm.call('add foobar')
hlwm.call('add baz')
hlwm.call('add qux')
hlwm.create_client()
hlwm.call('move baz')
winid, _ = hlwm.create_client()
hlwm.call('move qux')
x11.make_window_urgent(x11.window(winid))
assert hlwm.call('tag_status').stdout == "\t#default\t.foobar\t:baz\t!qux\t"
def test_jumpto_invalid_client(hlwm):
hlwm.call_xfail('jumpto foobar') \
.expect_stderr('Could not find client "foobar".')
def test_raise_winid_missing(hlwm):
hlwm.call_xfail('raise') \
.expect_stderr('raise: not enough arguments\n')
def test_raise_invalid_winid(hlwm):
hlwm.call_xfail('raise foobar') \
.expect_stderr('Could not find client "foobar".')
def test_argparse_too_few_range(hlwm):
hlwm.call_xfail('split') \
.expect_stderr('Expected between 1 and 3 arguments, but got only 0')
def test_argparse_expected_1(hlwm):
hlwm.call_xfail('set_layout') \
.expect_stderr('Expected one argument, but got only 0')
def test_argparse_expected_2_got_1(hlwm):
hlwm.call_xfail('mousebind B1') \
.expect_stderr('Expected 2 arguments, but got only 1')
def test_foreach_clients(hlwm):
hlwm.create_client()
hlwm.create_client()
children = hlwm.list_children_via_attr('clients')
expected_out = ''.join([f'clients.{c}\n' for c in children])
assert expected_out == hlwm.call('foreach C clients echo C').stdout
def test_foreach_tag_add(hlwm):
hlwm.call('add anothertag')
# adding another tag does not confuse the output:
expected = ['tags.by-name.' + n for n in hlwm.list_children_via_attr('tags.by-name')]
proc = hlwm.call('foreach T tags.by-name chain , add yetanothertag , echo T')
assert proc.stdout.splitlines() == expected
def test_foreach_tag_merge(hlwm):
# removing a tag while iterating over the tags does not break anything
hlwm.call('add othertag')
# removing this tag in the first loop iteration does not prevent
# the second loop iteration for 'othertag'
expected = [
'tags.by-name.default',
'merge_tag: Tag "othertag" not found',
'tags.by-name.othertag'
]
proc = hlwm.call('foreach T tags.by-name chain , merge_tag othertag , echo T')
assert proc.stdout.splitlines() == expected
def test_foreach_exit_code_success(hlwm):
# create two clients for multiple loop iterations
hlwm.create_client()
hlwm.create_client()
# we do the following multiple times: create a new tag and assert
# that there are at least 3 tags. This fails in the first iteration but
# succeeds later:
cmd = 'foreach _ clients.'
cmd += ' chain , sprintf TAGNAME "tag%s" tags.count add TAGNAME'
cmd += ' , compare tags.count ge 3'
proc = hlwm.call(cmd)
assert proc.stdout == ''
def test_foreach_exit_code_failure(hlwm):
# create two clients for multiple loop iterations
hlwm.create_client()
hlwm.create_client()
# create a new attribute: it succeeds in the iteration run
# but fails in later iterations
hlwm.call_xfail('foreach _ clients. new_attr int my_int') \
.expect_stderr('already has an attribute named "my_int"')
def test_foreach_exit_code_no_iteration(hlwm):
# iterating over an object without content never calls the command
proc = hlwm.call('foreach S settings chain , echo output , quit , false')
assert proc.stdout == ''
def test_foreach_invalid_object(hlwm):
hlwm.call_xfail('foreach C clients.foobar quit') \
.expect_stderr('"clients." has no child named "foobar"')
def test_foreach_object_completion(hlwm):
completions = hlwm.complete(['foreach', 'X', 'tags.'], position=2, partial=True)
# objects are completed
assert 'tags.by-name.' in completions
# attributes are not completed
assert 'tags.count' not in completions
def test_foreach_identfier_completion(hlwm):
# the identfier isn't completed in the object parameter
assert 'X ' not in hlwm.complete(['foreach', 'X', ], partial=True)
# but the identfier is completed in the command parameter
assert 'X ' in hlwm.complete(['foreach', 'X', 'tags.'], partial=True)
assert 'X ' in hlwm.complete(['foreach', 'X', 'tags.', 'echo'], partial=True)
|
# -*- coding: UTF-8 -*-
import logging
import re
import traceback
import MySQLdb
from common.config import SysConfig
from sql.utils.sql_utils import get_syntax_type
from . import EngineBase
from .models import ResultSet, ReviewSet, ReviewResult
logger = logging.getLogger('default')
class GoInceptionEngine(EngineBase):
def get_connection(self, db_name=None):
if self.conn:
return self.conn
if hasattr(self, 'instance'):
self.conn = MySQLdb.connect(host=self.host, port=self.port, charset=self.instance.charset or 'utf8mb4',
connect_timeout=10)
return self.conn
archer_config = SysConfig()
go_inception_host = archer_config.get('go_inception_host')
go_inception_port = int(archer_config.get('go_inception_port', 4000))
self.conn = MySQLdb.connect(host=go_inception_host, port=go_inception_port, charset='utf8mb4',
connect_timeout=10)
return self.conn
def execute_check(self, instance=None, db_name=None, sql=''):
"""inception check"""
check_result = ReviewSet(full_sql=sql)
# inception 校验
check_result.rows = []
inception_sql = f"""/*--user={instance.user};--password={instance.raw_password};--host={instance.host};--port={instance.port};--check=1;*/
inception_magic_start;
use `{db_name}`;
{sql.rstrip(';')};
inception_magic_commit;"""
inception_result = self.query(sql=inception_sql)
check_result.syntax_type = 2 # TODO 工单类型 0、其他 1、DDL,2、DML 仅适用于MySQL,待调整
for r in inception_result.rows:
check_result.rows += [ReviewResult(inception_result=r)]
if r[2] == 1: # 警告
check_result.warning_count += 1
elif r[2] == 2: # 错误
check_result.error_count += 1
# 没有找出DDL语句的才继续执行此判断
if check_result.syntax_type == 2:
if get_syntax_type(r[5], parser=False, db_type='mysql') == 'DDL':
check_result.syntax_type = 1
check_result.column_list = inception_result.column_list
check_result.checked = True
check_result.error = inception_result.error
check_result.warning = inception_result.warning
return check_result
def execute(self, workflow=None):
"""执行上线单"""
instance = workflow.instance
execute_result = ReviewSet(full_sql=workflow.sqlworkflowcontent.sql_content)
if workflow.is_backup:
str_backup = "--backup=1"
else:
str_backup = "--backup=0"
# 提交inception执行
sql_execute = f"""/*--user={instance.user};--password={instance.raw_password};--host={instance.host};--port={instance.port};--execute=1;--ignore-warnings=1;{str_backup};*/
inception_magic_start;
use `{workflow.db_name}`;
{workflow.sqlworkflowcontent.sql_content.rstrip(';')};
inception_magic_commit;"""
inception_result = self.query(sql=sql_execute)
# 把结果转换为ReviewSet
for r in inception_result.rows:
execute_result.rows += [ReviewResult(inception_result=r)]
# 如果发现任何一个行执行结果里有errLevel为1或2,并且状态列没有包含Execute Successfully,则最终执行结果为有异常.
for r in execute_result.rows:
if r.errlevel in (1, 2) and not re.search(r"Execute Successfully", r.stagestatus):
execute_result.error = "Line {0} has error/warning: {1}".format(r.id, r.errormessage)
break
return execute_result
def query(self, db_name=None, sql='', limit_num=0, close_conn=True):
"""返回 ResultSet """
result_set = ResultSet(full_sql=sql)
conn = self.get_connection()
try:
cursor = conn.cursor()
effect_row = cursor.execute(sql)
if int(limit_num) > 0:
rows = cursor.fetchmany(size=int(limit_num))
else:
rows = cursor.fetchall()
fields = cursor.description
result_set.column_list = [i[0] for i in fields] if fields else []
result_set.rows = rows
result_set.affected_rows = effect_row
except Exception as e:
logger.error(f'goInception语句执行报错,语句:{sql},错误信息{traceback.format_exc()}')
result_set.error = str(e)
if close_conn:
self.close()
return result_set
def get_variables(self, variables=None):
"""获取实例参数"""
if variables:
sql = f"inception get variables like '{variables[0]}';"
else:
sql = "inception get variables;"
return self.query(sql=sql)
def set_variable(self, variable_name, variable_value):
"""修改实例参数值"""
sql = f"""inception set {variable_name}={variable_value};"""
return self.query(sql=sql)
def osc_control(self, **kwargs):
"""控制osc执行,获取进度、终止、暂停、恢复等"""
sqlsha1 = kwargs.get('sqlsha1')
command = kwargs.get('command')
if command == 'get':
sql = f"inception get osc_percent '{sqlsha1}';"
else:
sql = f"inception {command} osc '{sqlsha1}';"
return self.query(sql=sql)
def close(self):
if self.conn:
self.conn.close()
self.conn = None
|
#!/usr/bin/env python
# coding: utf-8
# In[10]:
# BAGINDA (130-)
# IoT IF-41
# Assignment 1 - Moving Average Filter
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# menggunakan dataset 15
data = pd.read_csv("15.csv")
df = pd.DataFrame(data)
# menggunakan kolom D "z acceleration"
df1 = df.iloc[:,3].rolling(window=200).mean()
df2 = df.iloc[:,3].rolling(window=500).mean()
df3 = df.iloc[:,3].rolling(window=1000).mean()
plt.figure()
plt.plot(df1,label='window 200', color='orange')
plt.plot(df2,label='window 500', color='blue')
plt.plot(df3,label='window 1000', color='green')
plt.legend(loc=1)
plt.show()
# In[11]:
# sumber: https://towardsdatascience.com/implementing-moving-averages-in-python-1ad28e636f9d
|
import cplex
import numpy as np
from cplex.exceptions import CplexSolverError
import sys
from class_definitions import NetworkScenario
class OptDCsTwoStep:
def __init__(self):
pass
# Number of time slots are determined by shape of demand vector
def opt_dcs(self, scenario, time_slot_index, prev_x_sj_value):
num_time_slots = 1
total_compute_entities = scenario.num_DCs + 1
bigM_1 = scenario.num_BSs
bigM_2 = scenario.num_apps # TODO: check with num_BSs
# Create a new (empty) model and populate it below.
model = cplex.Cplex()
# Create one continuous variable w for each base station i, application s, DC j
# index j=0 refers to the cloud
for i in range(scenario.num_BSs):
for s in range(scenario.num_apps):
for j in range(total_compute_entities):
for t in range(num_time_slots):
if j == 0:
objective_factor = [scenario.cloud_cost[s]]
else:
objective_factor = [
scenario.op_cost[j - 1] * scenario.compute_factor[s] * scenario.demand[i][s][time_slot_index
] / scenario.compute_capacities[j - 1]]
model.variables.add(obj=objective_factor,
lb=[0],
ub=[1.0],
types=["C"],
names=["w_{}_{}_{}_{}".format(i, s, j, t)]) # w_i,s,j,t
# Create one binary variable for each DC/application pair. The variables
# model whether a DC j has application s running on it: x[s][j]
# objective function has higher cost for higher power level
for s in range(scenario.num_apps):
for j in range(scenario.num_DCs):
model.variables.add(obj=[15] * num_time_slots,
lb=[0] * num_time_slots,
ub=[1] * num_time_slots,
types=["B"] * num_time_slots,
names=["x_{}_{}_{}".format(s, j, t) for t in range(num_time_slots)]) # x_s,j,t
# Create one binary variable for each cell. The variables model
# whether each DC z is switched on or not
# type B = model.variables.type.binary
for j in range(scenario.num_DCs):
for t in range(num_time_slots):
model.variables.add(obj=[scenario.fixed_cost[j]],
lb=[0],
ub=[1],
types=["B"],
names=["z_{}_{}".format(j, t)]) # z_j,t
# Create one binary variable for each DC/application pair. The variables
# model whether a DC j has to load application s in the current time slot
for s in range(scenario.num_apps):
for j in range(scenario.num_DCs):
model.variables.add(obj=[30] * num_time_slots,
lb=[0] * num_time_slots,
ub=[1] * num_time_slots,
types=["B"] * num_time_slots,
names=["on_{}_{}_{}".format(s, j, t) for t in range(num_time_slots)]) # ON_s,j,t
# Create one continuous variable for each BS/DC/application. The variable
# model whether the demand from BS i for application s is handled by
# an application that is loaded in DC j in this time slot (based on ON_s,j)
for i in range(scenario.num_BSs):
for s in range(scenario.num_apps):
for j in range(scenario.num_DCs):
model.variables.add(obj=[0] * num_time_slots,
lb=[0] * num_time_slots,
ub=[1] * num_time_slots,
types=["C"] * num_time_slots,
names=["delta_{}_{}_{}_{}".format(i, s, j, t) for t in
range(num_time_slots)]) # delta_isjt
# Create one continuous variable for each application. The variable
# model whether the latency of serving requests for application s
# can be exceeded by a certain amount
for s in range(scenario.num_apps):
model.variables.add(obj=[scenario.slack_cost[s]] * num_time_slots,
lb=[0] * num_time_slots,
ub=[100] * num_time_slots,
types=["C"] * num_time_slots,
names=["gamma_{}_{}".format(s, t) for t in range(num_time_slots)]) # gamma_s,t
index = 0
# Create w indices for later use
w = []
for i in range(scenario.num_BSs):
w.append([])
for s in range(scenario.num_apps):
w[i].append([])
for j in range(total_compute_entities):
w[i][s].append([])
for t in range(num_time_slots):
w[i][s][j].append(index)
index += 1
# print("W indices = {}".format(w))
# Create x indices for later use
x = []
for s in range(scenario.num_apps):
x.append([])
for j in range(scenario.num_DCs):
x[s].append([])
for t in range(num_time_slots):
x[s][j].append(index)
index += 1
# print("x indices = {}".format(x))
# Create z indices for later use
z = []
for j in range(scenario.num_DCs):
z.append([])
for t in range(num_time_slots):
z[j].append(index)
index += 1
# print("z indices = {}".format(z))
# Create ON indices for later use
on = []
for s in range(scenario.num_apps):
on.append([])
for j in range(scenario.num_DCs):
on[s].append([])
for t in range(num_time_slots):
on[s][j].append(index)
index += 1
# print("on indices = {}".format(on))
# Create delta indices for later use
delta = []
for i in range(scenario.num_BSs):
delta.append([])
for s in range(scenario.num_apps):
delta[i].append([])
for j in range(scenario.num_DCs):
delta[i][s].append([])
for t in range(num_time_slots):
delta[i][s][j].append(index)
index += 1
# print("Delta indices = {}".format(delta))
# Create gamma indices for later use
gamma = []
for s in range(scenario.num_apps):
gamma.append([])
for t in range(num_time_slots):
gamma[s].append(index)
index += 1
# print("Gamma indices = {}".format(gamma))
# Set ON variable if model s is loaded in DC j in current time slot
for s in range(scenario.num_apps):
for j in range(scenario.num_DCs):
if np.isclose(prev_x_sj_value[s][j], 1):
index = [on[s][j][0]]
value = [1]
assign_on_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[assign_on_constraint],
senses=["E"],
rhs=[0.0])
else:
index = [x[s][j][0], on[s][j][0]]
value = [-1, 1]
assign_on_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[assign_on_constraint],
senses=["E"],
rhs=[0.0])
# Latency constraint
latency_names = ["latency{}".format(latency_index) for latency_index in
range(num_time_slots * scenario.num_apps)]
constraint_index = 0
for t in range(num_time_slots):
for s in range(scenario.num_apps):
total_demand = sum([scenario.demand[i][s][time_slot_index] for i in range(scenario.num_BSs)]) # in this time slot
index = []
value = []
if total_demand > 0:
for i in range(scenario.num_BSs):
index.append(w[i][s][0][t])
value.append(scenario.latency_cloud[i] * scenario.demand[i][s][time_slot_index] / total_demand)
index.extend([w[i][s][j + 1][t] for j in range(scenario.num_DCs)
])
value.extend([scenario.latency_DC[i][j] * scenario.demand[i][s][time_slot_index] / total_demand for j in range(scenario.num_DCs)])
index.extend([delta[i][s][j][t] for j in range(scenario.num_DCs)])
value.extend(
[scenario.model_loading_latency[s] * scenario.demand[i][s][time_slot_index] / total_demand / scenario.time_slot_in_seconds for j in
range(scenario.num_DCs)])
index.append(gamma[s][t])
value.append(-1.0)
latency_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[latency_constraint],
senses=["L"],
rhs=[scenario.max_latency[s]],
names=[latency_names[constraint_index]])
constraint_index = constraint_index + 1
# Resilience constraint
for t in range(num_time_slots):
for s in range(scenario.num_apps):
index = [x[s][j][t] for j in range(scenario.num_DCs)]
value = [1.0] * scenario.num_DCs
resilience_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[resilience_constraint],
senses=["G"],
rhs=[scenario.resilience[s]])
# Capacity constraint for computation
for t in range(num_time_slots):
for j in range(1, total_compute_entities): # only for DCs
index = []
value = []
for s in range(scenario.num_apps):
index.extend([w[i][s][j][t] for i in range(scenario.num_BSs)])
value.extend([scenario.demand[i][s][time_slot_index] * scenario.compute_factor[s] for i in range(scenario.num_BSs)])
capacity_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[capacity_constraint],
senses=["L"],
rhs=[0.7 * scenario.compute_capacities[j - 1]])
# Capacity constraint for loading models
mem_load_names = ["mem_load{}".format(mem_load_index) for mem_load_index in
range(num_time_slots * scenario.num_DCs)]
constraint_index = 0
for t in range(num_time_slots):
for j in range(scenario.num_DCs):
index = [x[s][j][t] for s in range(scenario.num_apps)]
value = [scenario.mem_factor[s] for s in range(scenario.num_apps)]
mem_load_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[mem_load_constraint],
senses=["L"],
rhs=[0.7 * scenario.mem_capacities[j]],
names=[mem_load_names[constraint_index]])
constraint_index = constraint_index + 1
# Capacity constraint for memory
mem_names = ["mem{}".format(mem_load_index) for mem_load_index in
range(num_time_slots * scenario.num_DCs)]
constraint_index = 0
for t in range(num_time_slots):
for j in range(scenario.num_DCs):
index = [x[s][j][t] for s in range(scenario.num_apps)]
value = [scenario.mem_factor[s] for s in range(scenario.num_apps)]
for s in range(scenario.num_apps):
index.extend([w[i][s][j + 1][t] for i in range(scenario.num_BSs)])
value.extend([scenario.demand[i][s][time_slot_index] * scenario.input_size[s] / scenario.time_slot_in_seconds for i in range(scenario.num_BSs)])
mem_capacity_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[mem_capacity_constraint],
senses=["L"],
rhs=[0.95 * scenario.mem_capacities[j]],
names=[mem_names[constraint_index]])
constraint_index = constraint_index + 1
# Demand is assigned to at least one location
for t in range(num_time_slots):
for s in range(scenario.num_apps):
for i in range(scenario.num_BSs):
index = [w[i][s][j][t] for j in range(total_compute_entities)]
value = [scenario.demand[i][s][time_slot_index]] * total_compute_entities
demand_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[demand_constraint],
senses=["E"],
rhs=[1.0 * scenario.demand[i][s][time_slot_index]])
# Whether application s is running on DC j
for t in range(num_time_slots):
for s in range(scenario.num_apps):
for j in range(scenario.num_DCs):
index = [w[i][s][j + 1][t] for i in range(scenario.num_BSs)]
value = [1.0] * scenario.num_BSs
index.append(x[s][j][t])
value.append(-bigM_1)
assign_binary_constraint_x = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[assign_binary_constraint_x],
senses=["L"],
rhs=[0.0])
# Whether DC j is on or off
for t in range(num_time_slots):
for j in range(scenario.num_DCs):
index = [x[s][j][t] for s in range(scenario.num_apps)]
value = [1.0] * scenario.num_apps
index.append(z[j][t])
value.append(-bigM_2)
assign_binary_constraint_z = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[assign_binary_constraint_z],
senses=["L"],
rhs=[0.0])
# Set delta[i][s][j] to w[i][s][j] when ON[s][j]=1
for t in range(num_time_slots):
for i in range(scenario.num_BSs):
for s in range(scenario.num_apps):
for j in range(scenario.num_DCs):
index = [delta[i][s][j][t], w[i][s][j + 1][t]]
value = [1.0, -1.0]
delta_1_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[delta_1_constraint],
senses=["L"],
rhs=[0.0])
index = [delta[i][s][j][t], w[i][s][j + 1][t], on[s][j][t]]
value = [-1.0, 1.0, 1.0]
delta_2_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[delta_2_constraint],
senses=["L"],
rhs=[1.0])
index = [delta[i][s][j][t], on[s][j][t]]
value = [1.0, -1.0]
delta_3_constraint = cplex.SparsePair(ind=index, val=value)
model.linear_constraints.add(lin_expr=[delta_3_constraint],
senses=["L"],
rhs=[0.0])
# Our objective is to minimize cost. Costs have been set when variables were created.
model.objective.set_sense(model.objective.sense.minimize)
# # Solve
try:
model.parameters.timelimit.set(3600) # one hour
model.parameters.emphasis.numerical.set(1)
model.solve()
except CplexSolverError as e:
print("Exception raised during solve: {}".format(e))
return
solution = model.solution
# Get the best bound.
dualbound = solution.MIP.get_best_objective()
# Get the objective function value.
primalbound = solution.get_objective_value()
print("Best bound: {0}".format(dualbound))
print("Best integer: {0}".format(primalbound))
print("Solution status with code {} = {}".format(solution.get_status(), solution.status[solution.get_status()]))
if solution.get_status() == 103:
model.conflict.refine(model.conflict.all_constraints())
model.conflict.write("conflict.clp")
return
w_tisj_solution = [[[0 for j in range(total_compute_entities)]
for s in range(scenario.num_apps)] for i in range(scenario.num_BSs)]
for i in range(scenario.num_BSs):
for s in range(scenario.num_apps):
for j in range(total_compute_entities):
if solution.get_values(w[i][s][j][0]) > model.parameters.mip.tolerances.integrality.get():
w_tisj_solution[i][s][j] = solution.get_values(w[i][s][j][0])
on_tsj_solution = [[0 for j in range(scenario.num_DCs)] for s in range(scenario.num_apps)]
for s in range(scenario.num_apps):
for j in range(scenario.num_DCs):
if solution.get_values(on[s][j][0]) > model.parameters.mip.tolerances.integrality.get():
on_tsj_solution[s][j] = solution.get_values(on[s][j][0])
num_DCs_open = 0
z_tj_solution = [0 for j in range(scenario.num_DCs)]
for j in range(scenario.num_DCs):
z_tj_solution[j] = solution.get_values(z[j][0])
if z_tj_solution[j] > model.parameters.mip.tolerances.integrality.get():
num_DCs_open += 1
x_tsj_solution = [[0 for j in range(scenario.num_DCs)] for s in range(scenario.num_apps)]
for s in range(scenario.num_apps):
for j in range(scenario.num_DCs):
if solution.get_values(x[s][j][0]) > model.parameters.mip.tolerances.integrality.get():
x_tsj_solution[s][j] = solution.get_values(x[s][j][0])
slack_ts_solution = [0 for s in range(scenario.num_apps)]
for s in range(scenario.num_apps):
if solution.get_values(gamma[s][0]) > model.parameters.mip.tolerances.integrality.get():
print(
"Latency exceeded for app {} by {} in time slot {}".format(s, solution.get_values(gamma[s][0]),
time_slot_index))
slack_ts_solution[s] = solution.get_values(gamma[s][0])
return solution.get_objective_value(), num_DCs_open, w_tisj_solution, \
z_tj_solution, x_tsj_solution, on_tsj_solution, slack_ts_solution
|
import random
import ntpath
import string
import re
from abc import ABC
from html.parser import HTMLParser
##
# Strip HTML Tags
# --------------------------
# strip_tags(html)
class MLStripper(HTMLParser, ABC):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data().strip()
def clean_result(result, text_to_remove=None, default='-', single_line=False):
if result is None:
return default
result = strip_tags(result)
if text_to_remove is not None:
for s in text_to_remove:
result = result.replace(s, '')
result = result.strip()
if single_line:
result = result.replace('\n', ' ').replace('\r', ' ')
result = re.sub(' +', ' ', result)
if result is '':
return default
return result
def safe_split(txt, separator, index_needed):
t = txt.split(separator)
if index_needed == 0:
return t[0]
elif index_needed > len(t) + 1:
return '-'
else:
return t[index_needed]
##
# Random Generator
# --------------------------
# id_generator()
# >>> 'G5G74W'
#
# id_generator(3, "6793YUIO")
# >>>'Y3U'
##
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# #
# Extract Images From Text
# Changes Name in main
def extract_link_from_text(text_with_image, web_safe_topic, new_name):
image_string = ""
images = re.findall(r"\/images\/.*?JPG", text_with_image, re.MULTILINE)
images += re.findall(r"\/images\/.*?jpg", text_with_image, re.MULTILINE)
images += re.findall(r"\/images\/.*?PNG", text_with_image, re.MULTILINE)
images += re.findall(r"\/images\/.*?png", text_with_image, re.MULTILINE)
img_count = 0
web_safe_topic = web_safe_topic + "/"
for j in images:
img_count += 1
new_dir_name = ntpath.dirname(j) + "/"
new_dir_name = new_dir_name.replace("solution-image/", "")
new_dir_name = new_dir_name.replace(web_safe_topic, "")
# new_dir_name = new_dir_name.replace(
# config.IMAGE_LINK_OLD, config.IMAGE_LINK_NEW)
new_file_name = "{0}-{1}.png".format(
web_safe_topic + new_name, img_count)
new_file_path = new_dir_name + new_file_name
text_with_image = text_with_image.replace(j, new_file_path)
image_string = image_string + j + ":" + new_file_path + "|"
return text_with_image, image_string
def count_lines(file_path):
with open(file_path) as f:
for i, l in enumerate(f):
pass
return i + 1
|
import re
import abc
from PyQt5.QtWidgets import QApplication, QDialog, QVBoxLayout, QFormLayout, QLabel, QLineEdit, QDateTimeEdit, QDialogButtonBox, QComboBox
from PyQt5.QtCore import QDateTime, pyqtSignal, pyqtSlot
from ..Records import SurveyRecord
from ..AppResources import SurveyTableResources as R
from .SurveyTableWidget import Column
# todo switch to kwargs
class SurveyInfoDialog(QDialog):
audioIndexSignal = pyqtSignal(str)
def __init__(self, parent, **kwargs):
super().__init__(parent)
self.surveyTableModel = parent.surveyTableModel
self.mainLayout = QVBoxLayout()
self.setLayout(self.mainLayout)
self.kwargs = kwargs
# form
self.surveyDatetimeField = QDateTimeEdit()
self.recorderIDField = QComboBox()
self.recordingDatetimeField = QDateTimeEdit()
self.recordingDurationField = QLabel()
self.audioField = QComboBox()
self.urlField = QLineEdit()
self.noteField = QLineEdit()
self.initForm()
# buttons
self.initButtons()
# TODO make a property?
@pyqtSlot(int)
def setDuration(self, duration):
# self.audioDuration = int
self.recordingDurationField.setText(str(duration))
def initForm(self):
# labels
formLayout = QFormLayout()
surveyDatetimeLabel = QLabel("Survey Datetime")
recorderIDLabel = QLabel("Recorder ID")
recordingDatetimeLabel = QLabel("Recording Datetime")
recordingDurationLabel = QLabel("Recording Duration")
audioLabel = QLabel("Audio File Path")
urlLabel = QLabel("Audio URL")
noteLabel = QLabel("Notes")
# datetime format
self.surveyDatetimeField.setDisplayFormat(R.datetimeFormat)
self.recordingDatetimeField.setDisplayFormat(R.datetimeFormat)
self.surveyDatetimeField.setEnabled(False) # primary key should not be edited
# combo boxes
self.recorderIDField.setEditable(True)
self.audioField.setEditable(False)
self.recorderIDField.addItems(self.kwargs["recorderIds"])
self.audioField.addItems(self.kwargs["audioFiles"])
self.audioField.currentTextChanged.connect(self.onFileIndexChange)
# populate fields
self.setInitialValues(**self.kwargs)
# assemble form
formLayout.addRow(surveyDatetimeLabel, self.surveyDatetimeField)
formLayout.addRow(recorderIDLabel, self.recorderIDField)
formLayout.addRow(recordingDatetimeLabel, self.recordingDatetimeField)
formLayout.addRow(recordingDurationLabel, self.recordingDurationField)
formLayout.addRow(audioLabel, self.audioField)
formLayout.addRow(urlLabel, self.urlField)
formLayout.addRow(noteLabel, self.noteField)
self.mainLayout.addLayout(formLayout)
@pyqtSlot(str)
def onFileIndexChange(self, file):
print(file)
# newDuration = self.updateDuration(file)
# self.recordingDurationField.setText(str(newDuration))
self.audioIndexSignal.emit(file)
# self.updateDuration(file)
QApplication.processEvents()
self.repaint()
@abc.abstractmethod
def setInitialValues(self, **kwargs):
pass
def initButtons(self):
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
self.mainLayout.addWidget(buttonBox)
# helper for loadForm()
def getQDateTime(self, string):
idStr = re.split("[:\s.\-]", string)
idStr = list(map(int, idStr))
if len(idStr) == 6:
id = QDateTime(int(idStr[0]), idStr[1], idStr[2], idStr[3], idStr[4], idStr[5])
elif len(idStr) == 7:
id = QDateTime(int(idStr[0]), idStr[1], idStr[2], idStr[3], idStr[4], idStr[5], idStr[6])
return id
def getDateTime(self):
return self.surveyDatetimeField.dateTime()
def createRecord(self):
return SurveyRecord(surveyDatetime=self.surveyDatetimeField.dateTime(),
recorderID=self.recorderIDField.currentText(),
recordingDatetime=self.recordingDatetimeField.dateTime(),
recordingDuration=self.recordingDurationField.text(),
audioFilePath=self.audioField.currentText(),
audioURL=self.urlField.text(),
note=self.noteField.text())
class EditSurveyDialog(SurveyInfoDialog):
def __init__(self, parent, **kwargs):
super().__init__(parent, **kwargs)
def setInitialValues(self, **kwargs):
# firstRow = self.parent().getSelectedRows()[0]
field0 = self.getQDateTime(self.parent().getSelectedData(0, Column(0)))
field1 = self.parent().getSelectedData(0, Column(1))
field2 = self.getQDateTime(self.parent().getSelectedData(0, Column(2)))
field3 = self.parent().getSelectedData(0, Column(3))
field4 = self.parent().getSelectedData(0, Column(4))
field5 = self.parent().getSelectedData(0, Column(5))
field6 = self.parent().getSelectedData(0, Column(6))
# field0 = self.getQDateTime(self.parent().getSelectedData(0, 0))
# field1 = self.parent().getSelectedData(0, 1)
# field2 = self.getQDateTime(self.parent().getSelectedData(0, 2))
# field3 = self.parent().getSelectedData(0, 3)
# field4 = self.parent().getSelectedData(0, 4)
# field5 = self.parent().getSelectedData(0, 5)
# field6 = self.parent().getSelectedData(0, 6)
self.surveyDatetimeField.setDateTime(field0)
self.recorderIDField.setCurrentIndex(self.recorderIDField.findText(field1))
self.recordingDatetimeField.setDateTime(field2)
self.recordingDurationField.setText(field3)
if self.audioField.findText(field4) > -1: # TODO magic number
print(self.audioField.findText(field4))
self.audioField.setCurrentIndex(self.audioField.findText(field4))
else:
print(self.audioField.findText(field4))
self.audioField.addItem(field4)
self.audioField.setCurrentIndex(self.audioField.findText(field4))
self.urlField.setText(field5)
self.noteField.setText(field6)
class CombineSurveyDialog(SurveyInfoDialog):
def __init__(self, parent, **kwargs):
super().__init__(parent, **kwargs)
def setInitialValues(self, **kwargs):
self.surveyDatetimeField.setDateTime(QDateTime.currentDateTime())
field1 = self.parent().getSelectedData(0, Column(1))
field2 = self.getQDateTime(self.parent().getSelectedData(0, Column(2)))
field3 = self.parent().getSelectedData(0, Column(3))
field4 = self.parent().getSelectedData(0, Column(4))
field5 = self.parent().getSelectedData(0, Column(5))
self.recorderIDField.setCurrentIndex(self.recorderIDField.findText(field1))
self.recordingDatetimeField.setDateTime(field2)
self.recordingDurationField.setText(field3)
if self.audioField.findText(field4) > -1: # TODO magic number
print(self.audioField.findText(field4))
self.audioField.setCurrentIndex(self.audioField.findText(field4))
else:
print(self.audioField.findText(field4))
self.audioField.addItem(field4)
self.audioField.setCurrentIndex(self.audioField.findText(field4))
self.urlField.setText(field5)
if kwargs["audioSelected"]:
self.recordingDurationField.setText(kwargs["audioDuration"])
self.audioField.setCurrentIndex(self.audioField.findText(kwargs["audioFile"]))
class AddSurveyDialog(SurveyInfoDialog):
def __init__(self, parent, **kwargs):
super().__init__(parent, **kwargs)
def setInitialValues(self, **kwargs):
self.surveyDatetimeField.setDateTime(QDateTime.currentDateTime())
if kwargs["audioSelected"]:
self.recordingDurationField.setText(kwargs["audioDuration"])
self.audioField.setCurrentIndex(self.audioField.findText(kwargs["audioFile"]))
# TODO: add drop down etc |
import pytest
import uuid
import random
import asyncpg
from korm import AsyncModel, ExecutionFailure, PoolManager, \
ConnectionManager, AsyncContextManagerABC, AsyncModelABC
pytestmark = pytest.mark.asyncio
async def test_AsyncModelABC(User):
assert issubclass(User, AsyncModelABC)
assert isinstance(User(), AsyncModelABC)
assert not issubclass(object, AsyncModelABC)
assert not isinstance(object(), AsyncModelABC)
async def test_AsyncContextManagerABC(User):
assert issubclass(ConnectionManager, AsyncContextManagerABC)
assert issubclass(PoolManager, AsyncContextManagerABC)
assert isinstance(User.connection(), AsyncContextManagerABC)
assert not issubclass(object, AsyncContextManagerABC)
assert not isinstance(object(), AsyncContextManagerABC)
async def test_Column__repr__(User):
expected = "Column(key='_id', default={}, primary_key=True)".format(
uuid.uuid4)
assert repr(User.id) == expected
async def test_AsyncModel_connection(User):
connection = User.connection()
assert isinstance(connection, AsyncContextManagerABC)
assert type(connection) in (ConnectionManager, PoolManager)
async def test_AsyncModel__init_subclass__():
with pytest.raises(RuntimeError):
class Fail(AsyncModel, connection=object()):
pass
async def test_save(User):
user = User(name='jim', email='jim@foo.com')
await user.save()
res = await User.get_one(id=user.id)
assert res['_id'] == user.id
assert res['name'] == user.name
assert res['email'] == user.email
async def test_save_as_update(User):
await User.populate(1)
user = await User.get_one(record=False)
user.name = 'foo-bar'
await user.save()
res = await User.get_one(record=False, id=user.id)
assert res.name == 'foo-bar'
async def test_save_fails(User):
with pytest.raises(asyncpg.exceptions.NotNullViolationError):
user = User(name='fail')
await user.save()
print('user', user, '\n', await User.get_one(id=user.id))
assert 0
async def test_get(User):
expected = 20
await User.populate(expected)
res = await User.get(records=False)
assert len(res) >= expected
for u in res:
assert isinstance(u, AsyncModel)
# test with **kwargs
user = random.choice(res)
name = getattr(user, 'name')
res = await User.get(name=name)
assert len(res) == 1
assert not isinstance(res[0], AsyncModel)
assert res[0]['name'] == name
async def test_delete(User):
await User.populate(20)
res = await User.get(records=False)
assert len(res) > 0
for u in res:
await u.delete()
res = await User.get()
assert len(res) == 0
with pytest.raises(ExecutionFailure):
# user has not actually been saved to the database
# so it won't be deleted.
u = User()
await u.delete()
|
import pytest
from unittest.mock import ANY, Mock
from notifications_utils.statsd_decorators import statsd
class AnyStringWith(str):
def __eq__(self, other):
return self in other
@pytest.fixture
def test_app(app):
app.config['NOTIFY_ENVIRONMENT'] = "test"
app.config['NOTIFY_APP_NAME'] = "api"
app.config['STATSD_HOST'] = "localhost"
app.config['STATSD_PORT'] = "8000"
app.config['STATSD_PREFIX'] = "prefix"
app.statsd_client = Mock()
return app
def test_should_call_statsd(test_app, mocker):
mock_logger = mocker.patch.object(test_app.logger, 'debug')
@statsd(namespace="test")
def test_function():
return True
assert test_function()
mock_logger.assert_called_once_with(AnyStringWith("test call test_function took "))
test_app.statsd_client.incr.assert_any_call("test.test_function")
test_app.statsd_client.incr.assert_any_call("test.test_function.success")
test_app.statsd_client.timing.assert_called_once_with("test.test_function.success.elapsed_time", ANY)
def test_should_call_statsd_on_exception(test_app):
@statsd(namespace="test")
def test_function():
raise Exception()
with pytest.raises(Exception):
test_function()
test_app.statsd_client.incr.assert_any_call("test.test_function")
test_app.statsd_client.incr.assert_any_call("test.test_function.exception")
|
"""SCons.Executor
A module for executing actions with specific lists of target and source
Nodes.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Executor.py 3603 2008/10/10 05:46:45 scons"
import string
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Memoize
class Executor:
"""A class for controlling instances of executing an action.
This largely exists to hold a single association of an action,
environment, list of environment override dictionaries, targets
and sources for later processing as needed.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self, action, env=None, overridelist=[{}],
targets=[], sources=[], builder_kw={}):
if __debug__: logInstanceCreation(self, 'Executor.Executor')
self.set_action_list(action)
self.pre_actions = []
self.post_actions = []
self.env = env
self.overridelist = overridelist
self.targets = targets
self.sources = sources[:]
self.sources_need_sorting = False
self.builder_kw = builder_kw
self._memo = {}
def set_action_list(self, action):
import SCons.Util
if not SCons.Util.is_List(action):
if not action:
import SCons.Errors
raise SCons.Errors.UserError, "Executor must have an action."
action = [action]
self.action_list = action
def get_action_list(self):
return self.pre_actions + self.action_list + self.post_actions
memoizer_counters.append(SCons.Memoize.CountValue('get_build_env'))
def get_build_env(self):
"""Fetch or create the appropriate build Environment
for this Executor.
"""
try:
return self._memo['get_build_env']
except KeyError:
pass
# Create the build environment instance with appropriate
# overrides. These get evaluated against the current
# environment's construction variables so that users can
# add to existing values by referencing the variable in
# the expansion.
overrides = {}
for odict in self.overridelist:
overrides.update(odict)
import SCons.Defaults
env = self.env or SCons.Defaults.DefaultEnvironment()
build_env = env.Override(overrides)
self._memo['get_build_env'] = build_env
return build_env
def get_build_scanner_path(self, scanner):
"""Fetch the scanner path for this executor's targets and sources.
"""
env = self.get_build_env()
try:
cwd = self.targets[0].cwd
except (IndexError, AttributeError):
cwd = None
return scanner.path(env, cwd, self.targets, self.get_sources())
def get_kw(self, kw={}):
result = self.builder_kw.copy()
result.update(kw)
return result
def do_nothing(self, target, kw):
return 0
def do_execute(self, target, kw):
"""Actually execute the action list."""
env = self.get_build_env()
kw = self.get_kw(kw)
status = 0
for act in self.get_action_list():
status = apply(act, (self.targets, self.get_sources(), env), kw)
if isinstance(status, SCons.Errors.BuildError):
status.executor = self
raise status
elif status:
msg = "Error %s" % status
raise SCons.Errors.BuildError(errstr=msg, executor=self, action=act)
return status
# use extra indirection because with new-style objects (Python 2.2
# and above) we can't override special methods, and nullify() needs
# to be able to do this.
def __call__(self, target, **kw):
return self.do_execute(target, kw)
def cleanup(self):
self._memo = {}
def add_sources(self, sources):
"""Add source files to this Executor's list. This is necessary
for "multi" Builders that can be called repeatedly to build up
a source file list for a given target."""
self.sources.extend(sources)
self.sources_need_sorting = True
def get_sources(self):
if self.sources_need_sorting:
self.sources = SCons.Util.uniquer_hashables(self.sources)
self.sources_need_sorting = False
return self.sources
def prepare(self):
"""
Preparatory checks for whether this Executor can go ahead
and (try to) build its targets.
"""
for s in self.get_sources():
if s.missing():
msg = "Source `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError, msg % (s, self.targets[0])
def add_pre_action(self, action):
self.pre_actions.append(action)
def add_post_action(self, action):
self.post_actions.append(action)
# another extra indirection for new-style objects and nullify...
def my_str(self):
env = self.get_build_env()
get = lambda action, t=self.targets, s=self.get_sources(), e=env: \
action.genstring(t, s, e)
return string.join(map(get, self.get_action_list()), "\n")
def __str__(self):
return self.my_str()
def nullify(self):
self.cleanup()
self.do_execute = self.do_nothing
self.my_str = lambda S=self: ''
memoizer_counters.append(SCons.Memoize.CountValue('get_contents'))
def get_contents(self):
"""Fetch the signature contents. This is the main reason this
class exists, so we can compute this once and cache it regardless
of how many target or source Nodes there are.
"""
try:
return self._memo['get_contents']
except KeyError:
pass
env = self.get_build_env()
get = lambda action, t=self.targets, s=self.get_sources(), e=env: \
action.get_contents(t, s, e)
result = string.join(map(get, self.get_action_list()), "")
self._memo['get_contents'] = result
return result
def get_timestamp(self):
"""Fetch a time stamp for this Executor. We don't have one, of
course (only files do), but this is the interface used by the
timestamp module.
"""
return 0
def scan_targets(self, scanner):
self.scan(scanner, self.targets)
def scan_sources(self, scanner):
if self.sources:
self.scan(scanner, self.get_sources())
def scan(self, scanner, node_list):
"""Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient.
"""
env = self.get_build_env()
deps = []
if scanner:
for node in node_list:
node.disambiguate()
s = scanner.select(node)
if not s:
continue
path = self.get_build_scanner_path(s)
deps.extend(node.get_implicit_deps(env, s, path))
else:
kw = self.get_kw()
for node in node_list:
node.disambiguate()
scanner = node.get_env_scanner(env, kw)
if not scanner:
continue
scanner = scanner.select(node)
if not scanner:
continue
path = self.get_build_scanner_path(scanner)
deps.extend(node.get_implicit_deps(env, scanner, path))
deps.extend(self.get_implicit_deps())
for tgt in self.targets:
tgt.add_to_implicit(deps)
def _get_unignored_sources_key(self, ignore=()):
return tuple(ignore)
memoizer_counters.append(SCons.Memoize.CountDict('get_unignored_sources', _get_unignored_sources_key))
def get_unignored_sources(self, ignore=()):
ignore = tuple(ignore)
try:
memo_dict = self._memo['get_unignored_sources']
except KeyError:
memo_dict = {}
self._memo['get_unignored_sources'] = memo_dict
else:
try:
return memo_dict[ignore]
except KeyError:
pass
sourcelist = self.get_sources()
if ignore:
idict = {}
for i in ignore:
idict[i] = 1
sourcelist = filter(lambda s, i=idict: not i.has_key(s), sourcelist)
memo_dict[ignore] = sourcelist
return sourcelist
def _process_sources_key(self, func, ignore=()):
return (func, tuple(ignore))
memoizer_counters.append(SCons.Memoize.CountDict('process_sources', _process_sources_key))
def process_sources(self, func, ignore=()):
memo_key = (func, tuple(ignore))
try:
memo_dict = self._memo['process_sources']
except KeyError:
memo_dict = {}
self._memo['process_sources'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
result = map(func, self.get_unignored_sources(ignore))
memo_dict[memo_key] = result
return result
def get_implicit_deps(self):
"""Return the executor's implicit dependencies, i.e. the nodes of
the commands to be executed."""
result = []
build_env = self.get_build_env()
for act in self.get_action_list():
result.extend(act.get_implicit_deps(self.targets, self.get_sources(), build_env))
return result
nullenv = None
def get_NullEnvironment():
"""Use singleton pattern for Null Environments."""
global nullenv
import SCons.Util
class NullEnvironment(SCons.Util.Null):
import SCons.CacheDir
_CacheDir_path = None
_CacheDir = SCons.CacheDir.CacheDir(None)
def get_CacheDir(self):
return self._CacheDir
if not nullenv:
nullenv = NullEnvironment()
return nullenv
class Null:
"""A null Executor, with a null build Environment, that does
nothing when the rest of the methods call it.
This might be able to disapper when we refactor things to
disassociate Builders from Nodes entirely, so we're not
going to worry about unit tests for this--at least for now.
"""
def __init__(self, *args, **kw):
if __debug__: logInstanceCreation(self, 'Executor.Null')
self.targets = kw['targets']
def get_build_env(self):
return get_NullEnvironment()
def get_build_scanner_path(self):
return None
def cleanup(self):
pass
def prepare(self):
pass
def get_unignored_sources(self, *args, **kw):
return tuple(())
def get_action_list(self):
return []
def __call__(self, *args, **kw):
return 0
def get_contents(self):
return ''
def _morph(self):
"""Morph this Null executor to a real Executor object."""
self.__class__ = Executor
self.__init__([], targets=self.targets)
# The following methods require morphing this Null Executor to a
# real Executor object.
def add_pre_action(self, action):
self._morph()
self.add_pre_action(action)
def add_post_action(self, action):
self._morph()
self.add_post_action(action)
def set_action_list(self, action):
self._morph()
self.set_action_list(action)
|
## Copyright 2017 Knossos authors, see NOTICE file
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import absolute_import, print_function
import sys
import os.path
import logging
import ctypes.util
import threading
from . import uhf
uhf(__name__)
from . import center
if sys.platform == 'win32':
ENCODING = 'latin1'
else:
ENCODING = 'utf8'
sdl = None
alc = None
sdl_init_lock = threading.Lock()
alc_init_lock = threading.Lock()
class SDL_Rect(ctypes.Structure):
_fields_ = [
('x', ctypes.c_int16),
('y', ctypes.c_int16),
('w', ctypes.c_uint16),
('h', ctypes.c_uint16)
]
class SDL_DisplayMode(ctypes.Structure):
_fields_ = [
('format', ctypes.c_int32),
('w', ctypes.c_int),
('h', ctypes.c_int),
('refresh_rate', ctypes.c_int),
('driverdata', ctypes.c_void_p)
]
class SDL_JoystickGUID(ctypes.Structure):
_fields_ = [
('data1', ctypes.c_uint8 * 8),
('data2', ctypes.c_uint8 * 8)
]
class c_any_pointer(object):
@classmethod
def from_param(cls, val):
return val
def load_lib(*names):
exc = None
for name in names:
if '.' not in name:
libname = ctypes.util.find_library(name)
if libname is not None:
name = libname
try:
return ctypes.cdll.LoadLibrary(name)
except OSError as e:
if exc is None:
exc = e
if exc:
exc = str(exc)
else:
exc = 'Unknown'
raise Exception(names[0] + ' could not be found! (%s)' % exc)
def double_zero_string(val):
off = 0
data = []
while val and val[off]:
if val[off] == b'\x00':
break
end = off + 1
while val[end] != b'\x00':
end += 1
data.append(val[off:end].decode(ENCODING, 'replace'))
off = end + 1
return data
def init_sdl():
global sdl, get_modes, list_joysticks, list_guid_joysticks, get_config_path
with sdl_init_lock:
if sdl:
return
if center.settings['sdl2_path']:
try:
sdl = load_lib(center.settings['sdl2_path'])
except Exception:
logging.exception('Failed to load user-supplied SDL2!')
if not sdl:
# Load SDL
if sys.platform == 'darwin' and hasattr(sys, 'frozen'):
try:
sdl = load_lib('../Frameworks/SDL2.framework/SDL2')
except Exception:
logging.exception('Failed to load bundled SDL2!')
if not sdl:
sdl = load_lib('libSDL2-2.0.so.0', 'SDL2', 'SDL2.dll', 'libSDL2.dylib')
# SDL constants
SDL_INIT_VIDEO = 0x00000020
SDL_INIT_JOYSTICK = 0x00000200
# SDL.h
sdl.SDL_SetMainReady.argtypes = []
sdl.SDL_SetMainReady.restype = None
sdl.SDL_Init.argtypes = [ctypes.c_uint32]
sdl.SDL_Init.restype = ctypes.c_int
sdl.SDL_InitSubSystem.argtypes = [ctypes.c_uint32]
sdl.SDL_InitSubSystem.restype = ctypes.c_int
sdl.SDL_QuitSubSystem.argtypes = [ctypes.c_uint32]
sdl.SDL_QuitSubSystem.restype = None
# SDL_error.h
sdl.SDL_GetError.argtypes = []
sdl.SDL_GetError.restype = ctypes.c_char_p
# SDL_video.h
sdl.SDL_VideoInit.argtypes = [ctypes.c_char_p]
sdl.SDL_VideoInit.restype = ctypes.c_int
sdl.SDL_VideoQuit.argtypes = []
sdl.SDL_VideoQuit.restype = None
sdl.SDL_GetNumVideoDisplays.argtypes = []
sdl.SDL_GetNumVideoDisplays.restype = ctypes.c_int
sdl.SDL_GetNumDisplayModes.argtypes = [ctypes.c_int]
sdl.SDL_GetNumDisplayModes.restype = ctypes.c_int
sdl.SDL_GetDisplayMode.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(SDL_DisplayMode)]
sdl.SDL_GetDisplayMode.restype = ctypes.c_int
sdl.SDL_GetCurrentDisplayMode.argtypes = [ctypes.c_int, ctypes.POINTER(SDL_DisplayMode)]
sdl.SDL_GetCurrentDisplayMode.restype = ctypes.c_int
# SDL_joystick.h
sdl.SDL_NumJoysticks.argtypes = []
sdl.SDL_NumJoysticks.restype = ctypes.c_int
sdl.SDL_JoystickNameForIndex.argtypes = [ctypes.c_int]
sdl.SDL_JoystickNameForIndex.restype = ctypes.c_char_p
sdl.SDL_JoystickGetDeviceGUID.argtypes = [ctypes.c_int]
sdl.SDL_JoystickGetDeviceGUID.restype = SDL_JoystickGUID
sdl.SDL_JoystickGetGUIDString.argtypes = [SDL_JoystickGUID, ctypes.c_char_p, ctypes.c_int]
sdl.SDL_JoystickGetGUIDString.restype = None
# SDL_filesystem.h
sdl.SDL_GetPrefPath.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
sdl.SDL_GetPrefPath.restype = ctypes.c_char_p
sdl.SDL_SetMainReady()
if sdl.SDL_Init(0) != 0:
logging.error('Failed to init SDL!')
logging.error(sdl.SDL_GetError())
def get_modes():
if sdl.SDL_InitSubSystem(SDL_INIT_VIDEO) < 0 or sdl.SDL_VideoInit(None) < 0:
logging.error('Failed to init SDL\'s video subsystem!')
logging.error(sdl.SDL_GetError())
return []
modes = []
for i in range(sdl.SDL_GetNumVideoDisplays()):
for a in range(sdl.SDL_GetNumDisplayModes(i)):
m = SDL_DisplayMode()
sdl.SDL_GetDisplayMode(i, a, ctypes.byref(m))
if (m.w, m.h) not in modes:
modes.append((m.w, m.h))
sdl.SDL_VideoQuit()
sdl.SDL_QuitSubSystem(SDL_INIT_VIDEO)
return modes
def list_joysticks():
if sdl.SDL_InitSubSystem(SDL_INIT_JOYSTICK) < 0:
logging.error('Failed to init SDL\'s joystick subsystem!')
logging.error(sdl.SDL_GetError())
return []
joys = []
for i in range(sdl.SDL_NumJoysticks()):
joys.append(sdl.SDL_JoystickNameForIndex(i).decode(ENCODING))
sdl.SDL_QuitSubSystem(SDL_INIT_JOYSTICK)
return joys
def list_guid_joysticks():
if sdl.SDL_InitSubSystem(SDL_INIT_JOYSTICK) < 0:
logging.error('Failed to init SDL\'s joystick subsystem!')
logging.error(sdl.SDL_GetError())
return []
joys = []
buf = ctypes.create_string_buffer(33)
for i in range(sdl.SDL_NumJoysticks()):
guid = sdl.SDL_JoystickGetDeviceGUID(i)
sdl.SDL_JoystickGetGUIDString(guid, buf, 33)
guid_str = buf.raw.decode(ENCODING).strip('\x00')
name = sdl.SDL_JoystickNameForIndex(i).decode(ENCODING)
joys.append((guid_str, i, name))
sdl.SDL_QuitSubSystem(SDL_INIT_JOYSTICK)
return joys
def get_config_path():
# See https://github.com/scp-fs2open/fs2open.github.com/blob/master/code/osapi/osapi.cpp
return sdl.SDL_GetPrefPath(b'HardLightProductions', b'FreeSpaceOpen').decode('utf8')
# OpenAL constants
ALC_DEFAULT_DEVICE_SPECIFIER = 0x1004
ALC_DEVICE_SPECIFIER = 0x1005
ALC_ALL_DEVICES_SPECIFIER = 0x1013
ALC_CAPTURE_DEVICE_SPECIFIER = 0x310
ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER = 0x311
def init_openal():
global alc, dev, ctx
with alc_init_lock:
if alc:
return
# Load OpenAL
if center.settings['openal_path']:
try:
alc = load_lib(center.settings['openal_path'])
except Exception:
logging.exception('Failed to load user-supplied OpenAL!')
if not alc:
try:
alc = load_lib('libopenal.so.1.15.1', 'openal', 'OpenAL', 'OpenAL32.dll')
except Exception:
logging.exception('Failed to load OpenAL!')
if alc:
alc.alcOpenDevice.argtypes = [ctypes.c_char_p]
alc.alcOpenDevice.restype = ctypes.c_void_p
alc.alcCreateContext.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
alc.alcCreateContext.restype = ctypes.c_void_p
alc.alcMakeContextCurrent.argtypes = [ctypes.c_void_p]
alc.alcMakeContextCurrent.restype = ctypes.c_bool
alc.alcDestroyContext.argtypes = [ctypes.c_void_p]
alc.alcDestroyContext.restype = None
alc.alcCloseDevice.argtypes = [ctypes.c_void_p]
alc.alcCloseDevice.restype = None
alc.alcGetError.argtypes = [ctypes.c_void_p]
alc.alcGetError.restype = ctypes.c_char_p
alc.alcIsExtensionPresent.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
alc.alcIsExtensionPresent.restype = ctypes.c_bool
alc.alcGetString.argtypes = [ctypes.c_void_p, ctypes.c_int]
alc.alcGetString.restype = ctypes.POINTER(ctypes.c_char)
return True
else:
return False
gtk = None
gobject = None
def init_gtk():
global gtk, gobject
if gtk:
return True
# Load GTK2
try:
gtk = load_lib('libgtk-x11-2.0.so.0', 'gtk-x11-2.0')
gobject = load_lib('libgobject-2.0.so.0', 'gobject-2.0')
except Exception:
logging.exception('Failed to load GTK!')
# Maybe GTK isn't used.
gtk = None
gobject = None
else:
gtk.gtk_init_check.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.POINTER(ctypes.c_char_p))]
gtk.gtk_init_check.restype = ctypes.c_bool
gtk.gtk_settings_get_default.argtypes = []
gtk.gtk_settings_get_default.restype = ctypes.c_void_p
gobject.g_object_get.argtypes = [ctypes.c_void_p, ctypes.c_char_p, c_any_pointer, ctypes.c_void_p]
gobject.g_object_get.restype = None
gobject.g_free.argtypes = [ctypes.c_void_p]
gobject.g_free.restype = None
gobject.g_object_unref.argtypes = [ctypes.c_void_p]
gobject.g_object_unref.restype = None
if not gtk.gtk_init_check(None, None):
logging.error('Failed to initialize GTK!')
else:
return True
return False
def can_detect_audio():
return alc.alcIsExtensionPresent(None, b'ALC_ENUMERATION_EXT')
def list_audio_devs():
if not alc:
return [], '', [], ''
if alc.alcIsExtensionPresent(None, b'ALC_ENUMERATE_ALL_EXT'):
spec = ALC_ALL_DEVICES_SPECIFIER
else:
spec = ALC_DEVICE_SPECIFIER
devs = double_zero_string(alc.alcGetString(None, spec))
default = alc.alcGetString(None, ALC_DEFAULT_DEVICE_SPECIFIER)
captures = double_zero_string(alc.alcGetString(None, ALC_CAPTURE_DEVICE_SPECIFIER))
default_capture = alc.alcGetString(None, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)
default = ctypes.cast(default, ctypes.c_char_p).value.decode(ENCODING, 'replace')
default_capture = ctypes.cast(default_capture, ctypes.c_char_p).value.decode(ENCODING, 'replace')
return devs, default, captures, default_capture
def g_object_get_string(obj, prop):
prop = ctypes.c_char_p(prop.encode('utf8'))
value = ctypes.c_char_p()
gobject.g_object_get(obj, prop, ctypes.byref(value), 0)
py_value = value.value
if py_value:
py_value = py_value.decode('utf8', 'replace')
gobject.g_free(value)
return py_value
def get_gtk_theme():
global gtk, gobject
if not gtk:
return ''
settings = gtk.gtk_settings_get_default()
if settings:
return g_object_get_string(settings, 'gtk-theme-name')
else:
return None
def list_voices():
if sys.platform != 'win32':
return []
try:
import win32com.client as cc
voice = cc.Dispatch('SAPI.SpVoice')
return [v.GetDescription() for v in voice.GetVoices()]
except Exception:
logging.exception('Failed to retrieve voices!')
return []
def speak(voice, volume, text):
if sys.platform != 'win32':
return False
try:
import win32com.client as cc
hdl = cc.Dispatch('SAPI.SpVoice')
# We always seem to receive an AttributeError when we try to access
# SetVoice the first time. It works the second time for whatever reason... >_>
try:
hdl.SetVoice
except AttributeError:
pass
hdl.SetVoice(hdl.GetVoices()[voice])
hdl.Volume = volume
hdl.Speak(text, 19)
hdl.WaitUntilDone(10000)
return True
except Exception:
logging.exception('Failed to speak!')
return False
|
import os
def SizeAndBig(file='Weird.txt'):
print(os.stat(file).st_size)
print(open(file,'r').read().upper())
SizeAndBig()
|
from io import BytesIO
import cv2
import time
import keras
import numpy as np
import playsound
from PIL import Image
import tensorflow as tf
from mongodb_tools.mongo_upload import make_violator_entry_in_db
def load_model(model_path):
"""
Loads model from provided model path.
Returns: Keras SavedModel object
"""
model = keras.models.load_model(model_path)
return model
def makePrediction(frame, model):
"""
Passes webcam image to model and returns prediction score.
Returns: Float value of prediction
"""
prediction = np.squeeze(model.predict(frame))
return prediction
def videoCapture(model_path="facemask-model", camera="webcam"):
"""
Captures video from webcam using OpenCV; if person appearing in image is found to not be wearing a mask for more than 3 frames, they are assessed as a violator. Call to MongoDB database managing utility is made to upload image of violator to database.
Returns: None
"""
if camera == "webcam":
cam = cv2.VideoCapture(0)
cv2.namedWindow("Maks ~ safety, simplified.")
# Loading model
model = load_model(model_path)
# Initializing helper variables
count = 0
previousViolations = 0
previousCorrects = 0
while True:
ret, frame = cam.read()
count += 1
if not ret:
# Debugging convenience
# print("Failed to grab frame! Restart Maks!")
break
# Flipping the video
frame = cv2.flip(frame, 1)
# Adding rectangle to guide face placement
frame = cv2.rectangle(frame, (450, 100), (850, 600), (187, 9, 232), 3)
cv2.putText(frame, "Position face inside the rectangle!", (520, 92), cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 0, 0), 1, cv2.LINE_AA)
cv2.imshow("Maks ~ safety, simplified.", frame)
if count % 100 == 0:
# Preprocessing frame before passing to model
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img_height = img_width = 256
img = cv2.resize(frame, (img_height, img_width))
img = tf.expand_dims(img, 0)
# Running inference on clicked frame
prediction = makePrediction(img, model)
if prediction < 0.5:
result = "Wearing mask"
if previousCorrects == 4:
# Play sound
playsound.playsound("dependencies/non_violator.wav")
previousCorrects = 0
previousCorrects += 1
elif prediction >= 0.5:
result = "Not wearing mask!"
if previousViolations == 4:
# Play sound
playsound.playsound("dependencies/violator.wav")
# Make db-uploading calls
img_buffer = BytesIO() # initializing an image buffer
frameImage = Image.fromarray(frame)
frameImage.save(img_buffer, format='png') # saving image as the image buffer
img_buffer.seek(0) # seeking to the start
# passing the confidence value and the image buffer to the mongodb utility
make_violator_entry_in_db(float(prediction), img_buffer)
# Reset violations to 0 after sending violator's info to DB
previousViolations = 0
previousViolations += 1
print(result)
k = cv2.waitKey(1)
if k % 256 == 27:
# ESC pressed
print("Escape hit, closing Maks...")
break
elif k % 256 == 32:
# SPACE pressed
cv2.imwrite("img_name", frame)
cam.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
videoCapture() |
"""Computes p-values for paired statistical tests over input vectors"""
import numpy as np
from numpy import asarray, compress, sqrt
from scipy.stats import find_repeats, rankdata, norm, ttest_rel
from anamod.core import constants, utils
def compute_empirical_p_value(baseline_loss, perturbed_loss, statistic):
"""Compute Monte Carlo estimate of empirical permutation-based p-value"""
num_instances, num_permutations = perturbed_loss.shape
if statistic == constants.MEAN_LOSS:
baseline_statistic = np.mean(baseline_loss)
perturbed_statistic = np.mean(perturbed_loss, axis=0)
elif statistic == constants.MEAN_LOG_LOSS:
baseline_statistic = np.mean(np.log(baseline_loss))
perturbed_statistic = np.mean(np.log(perturbed_loss), axis=0)
elif statistic == constants.MEDIAN_LOSS:
baseline_statistic = np.median(baseline_loss)
perturbed_statistic = np.median(perturbed_loss, axis=0)
elif statistic == constants.RELATIVE_MEAN_LOSS:
perturbed_statistic = np.zeros(num_permutations)
for kidx in range(num_permutations):
normalized_loss = np.divide(perturbed_loss[:, kidx], baseline_loss)
perturbed_statistic[kidx] = np.mean(normalized_loss)
baseline_statistic = 1
elif statistic == constants.SIGN_LOSS:
threshold = num_instances // 2
perturbed_statistic = np.zeros(num_permutations)
for kidx in range(num_permutations):
count = sum(perturbed_loss[:, kidx] > baseline_loss + 1e-10)
if count > threshold:
perturbed_statistic[kidx] = 1
elif count == threshold:
perturbed_statistic[kidx] = 0
else:
perturbed_statistic[kidx] = -1
baseline_statistic = 0
else:
raise ValueError(f"Unknown statistic {statistic}")
# Baseline statistic should be smaller to reject null
return (1 + sum(perturbed_statistic <= baseline_statistic + 1e-10)) / (1 + num_permutations)
def compute_p_value(baseline, perturbed, test=constants.PAIRED_TTEST, alternative=constants.TWOSIDED):
"""Compute p-value using paired difference test on input numpy arrays"""
# TODO: Implement one-sided t-tests
baseline = utils.round_value(baseline, decimals=15)
perturbed = utils.round_value(perturbed, decimals=15)
# Perform statistical test
valid_tests = [constants.PAIRED_TTEST, constants.WILCOXON_TEST]
assert test in valid_tests, f"Invalid test name {test}"
if test == constants.PAIRED_TTEST:
# Two-tailed paired t-test
pvalue = ttest_rel(baseline, perturbed).pvalue
if np.isnan(pvalue):
# Identical vectors
pvalue = 1.0
return pvalue
# One-tailed Wilcoxon signed-rank test
return wilcoxon_test(baseline, perturbed, alternative=alternative)
def wilcoxon_test(x, y, alternative):
"""
One-sided Wilcoxon signed-rank test derived from Scipy's two-sided test
e.g. for alternative == constants.LESS, rejecting the null means that median difference x - y < 0
Returns p-value
"""
# TODO: add unit tests to verify results identical to R's Wilcoxon test for a host of input values
# pylint: disable = invalid-name, too-many-locals
x, y = map(asarray, (x, y))
d = x - y
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
r = rankdata(abs(d))
T = np.sum((d > 0) * r, axis=0)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if se < 1e-20:
return 1. # Degenerate case
_, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
if alternative == constants.LESS:
correction = -0.5
elif alternative == constants.GREATER:
correction = 0.5
else:
correction = 0.5 * np.sign(T - mn) # two-sided
z = (T - mn - correction) / se
if alternative == constants.LESS:
return norm.cdf(z)
if alternative == constants.GREATER:
return norm.sf(z)
return 2 * min(norm.cdf(z), norm.sf(z)) # two-sided
def bh_procedure(pvalues, significance_level):
"""Return adjusted p-values and rejected hypotheses computed according to Benjamini Hochberg procedure"""
# pylint: disable = invalid-name
m = len(pvalues)
hypotheses = list(zip(range(m), pvalues))
hypotheses.sort(key=lambda x: x[1])
max_idx = 0
adjusted_pvalues = np.ones(m)
rejected_hypotheses = [False] * m
for idx, hypothesis in enumerate(hypotheses):
_, pvalue = hypothesis
i = idx + 1
adjusted_pvalues[idx] = m / i * pvalue
critical_constant = i * significance_level / m
if pvalue < critical_constant:
max_idx = i
for idx in range(max_idx):
rejected_hypotheses[idx] = True
for idx in reversed(range(m - 1)):
# Adjusted pvalues - see http://www.biostathandbook.com/multiplecomparisons.html
adjusted_pvalues[idx] = min(adjusted_pvalues[idx], adjusted_pvalues[idx + 1])
data = sorted(zip(hypotheses, adjusted_pvalues, rejected_hypotheses), key=lambda elem: elem[0][0])
_, adjusted_pvalues, rejected_hypotheses = zip(*data)
return adjusted_pvalues, rejected_hypotheses
|
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from oauth2_provider.decorators import protected_resource
from sqlshare_rest.models import FileUpload
from sqlshare_rest.views import get_oauth_user, get403, get404, get400
from sqlshare_rest.parser import Parser
from sqlshare_rest.dao.user import get_user
from sqlshare_rest.logger import getLogger
from sqlshare_rest.parser import open_encoded
import chardet
import json
logger = getLogger(__name__)
@csrf_exempt
@protected_resource()
def parser(request, id):
get_oauth_user(request)
try:
upload = FileUpload.objects.get(pk=id)
except FileUpload.DoesNotExist:
return get404()
user = get_user(request)
if upload.owner.username != user.username:
return get403()
if request.META["REQUEST_METHOD"] == "PUT":
p = Parser()
values = json.loads(request.body.decode("utf-8"))
delimiter = values["parser"]["delimiter"]
has_column_header = values["parser"]["has_column_header"]
p.delimiter(delimiter)
p.has_header_row(has_column_header)
logger.info("File upload, PUT parser; ID: %s; delimiter: %s; "
"has_column_header: %s" % (upload.pk,
delimiter,
has_column_header),
request)
_update_from_parser(upload, p)
if not upload.has_parser_values:
try:
p = Parser()
file_path = upload.user_file.path
handle = open_encoded(file_path, "U")
p.guess(handle.read())
handle.close()
_update_from_parser(upload, p)
except Exception as ex:
return get400("Error parsing file: %s" % ex)
if request.META["REQUEST_METHOD"] == "GET":
logger.info("File upload, GET parser; ID: %s" % (upload.pk), request)
return HttpResponse(json.dumps(upload.parser_json_data()))
def _update_from_parser(upload, parser):
file_path = upload.user_file.path
handle = open_encoded(file_path, "U")
parser.parse(handle)
upload.has_column_header = parser.has_header_row()
upload.delimiter = parser.delimiter()
upload.column_list = json.dumps(parser.column_names())
upload.user_file.seek(0)
parser.parse(handle)
preview = []
for row in parser:
preview.append(row)
upload.sample_data = json.dumps(preview[:FileUpload.MAX_PARSER_PREVIEW])
upload.has_parser_values = True
upload.save()
|
# from .data_formater import print_experiment
from .monitor import log_value
from .experiment import Experiment
from .multistage import MultiStageExperiment
|
BROWSER_ERROR_MSG = """\
Browser setup error : {error}
Seems you don't have installed the requirements packages, if you need image output run the following commands on your server.
<b>Meanwhile You can use text version of Telminal, type any command!</b>
`sudo apt-get install chromium-chromedriver`"
`sudo apt install -y gconf-service libasound2 libatk1.0-0 libc6 libcairo2 libcups2 libdbus-1-3 libexpat1 libfontconfig1 libgcc1 libgconf-2-4 libgdk-pixbuf2.0-0 libglib2.0-0 libgtk-3-0 libnspr4 libpango-1.0-0 libpangocairo-1.0-0 libstdc++6 libx11-6 libx11-xcb1 libxcb1 libxcomposite1 libxcursor1 libxdamage1 libxext6 libxfixes3 libxi6 libxrandr2 libxrender1 libxss1 libxtst6 ca-certificates fonts-liberation libappindicator1 libnss3 lsb-release xdg-utils wget`
Finally, send `!setup_browser` on this chat again.
"""
ACTIVE_TASKS_MSG = (
"This is list of your active tasks\nyou can <b>cancel</b> each one by tapping"
)
EMPTY_TASKS_MSG = """\
Tasks list is empty
Create new watcher same as below examples:
1️⃣ get `telminal.log` every 50 seconds
👉 <b>!watch 50s telminal.log</b>
2️⃣ get `temp.png` every 5 minutes
👉 <b>!watch 5m home/temp.png</b>
3️⃣ get `sql.dump` every 24 hours
👉 <b>!watch 24h /backups/sql.dump</b>
Be respectfull to telegram API limitations please:
https://core.telegram.org/bots/faq#my-bot-is-hitting-limits-how-do-i-avoid-this
"""
PROCESS_INFO_MSG = """\
PID : {}
Status : {}
Start time : {}
Last update : {}
Run time: {}
"""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
app.admin.forms
~~~~~~~~~~~~~~~~~~~~
The module provides the forms for admin
:copyright: (c) 2016 by zifeiyu.
:license: MIT, see LICENSE for more details.
"""
from flask_wtf import Form
from wtforms import StringField, PasswordField, BooleanField, TextField, SelectField, TextAreaField
from wtforms.validators import DataRequired, Email
from zifeiyu.constants import POST_STATUS
from wtforms.widgets import Select, TextArea, TextInput, HiddenInput
from zifeiyu.models import Column
class LoginForm(Form):
'''The loginform for admin login'''
email = StringField(('邮箱'), validators=[
DataRequired(message=("邮箱必填")),
Email(message=("无效的邮箱"))
])
password = PasswordField(("密码"), validators=[
DataRequired(message=("密码必填"))]
)
remember_me = BooleanField(("记住我"), default=False)
class PostForm(Form):
"""form for add Post"""
title = TextField((u'标题'), validators=[
DataRequired(message=(u"标题必填"))
])
status = SelectField(u'状态', choices=POST_STATUS, option_widget=Select)
column = SelectField(u'栏目',option_widget=Select)
tags = TextField((u'标签'))
abstract = TextAreaField((u'摘要'),
validators=[DataRequired(message=(u"摘要必填"))]
)
content = TextAreaField((u'内容'),
validators=[DataRequired(message=(u"内容必填"))]
)
def generateColumnOption(self, options):
# print options
self.column.choices = options
class AddColumnForm(Form):
"""form for add Column"""
label = TextField(('栏目名称'), validators=[
DataRequired(message=("栏目名称必填"))
])
class AddTagForm(Form):
"""form for add Column"""
label = TextField(('栏目名称'), validators=[
DataRequired(message=("栏目名称必填"))
]) |
#!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
ncanda_quality_control_script
======================
This script checks the quality of the data for the NCANDA Project on REDCap.
Call script on command line.
Example Usage:
python ncanda_quality_control_script.py -v "baseline_visit_arm_1"
"""
import os
import sys
import json
import datetime
import csv
import redcap
import math
import pandas as pd
import sibis
fields = ['study_id', 'redcap_event_name','exclude', 'visit_ignore',
'visit_date', 'dob', 'cnp_test_sessions_dob','saliva_missing',
'saliva_1_collected','saliva_1_date','saliva_2_collected','saliva_2_date',
'saliva_3_collected','saliva_3_date','saliva_4_collected',
'saliva_4_date','youthreport1_missing','youthreport1_date',
'youthreport1b_missing', 'youthreport1b_date','youthreport2_missing',
'youthreport2_date','youthreport2_yid2', 'youthreport1_yid2',
'parentreport_missing','parentreport_date','ssage_youth_missing',
'ssage_youth_date', 'lssaga1_youth_missing','lssaga1_youth_date',
'lssaga1_parent_missing','lssaga1_parent_date','bio_np_missing',
'bio_np_date','dd1000_missing','dd1000_date','dd100_missing',
'dd100_date','np_wrat4_missing','np_wrat4_wr_raw','np_gpeg_missing',
'np_gpeg_exclusion','np_gpeg_dh_time','np_gpeg_ndh_time',
'np_reyo_missing','np_reyo_copy_time','np_reyo_qc(completed)',
'np_atax_missing','np_atax_sht_trial1','np_wais4_missing',
'np_wais4_rawscore','np_wais4_rawscore_computed',
'np_wais4_rawscore_diff(correct)','pasat_missing','pasat_date',
'cnp_missing','cnp_test_sessions_dotest','stroop_missing',
'stroop_date','mrireport_missing','mrireport_date',
'mr_session_report_complete']
form_fields = [['youthreport1_missing','youthreport1_date'],
['youthreport1b_missing', 'youthreport1b_date'],
['youthreport2_missing', 'youthreport2_date'],
['parentreport_missing','parentreport_date'],
['ssage_youth_missing','ssage_youth_date'],
['lssaga1_youth_missing','lssaga1_youth_date'],
['lssaga1_parent_missing','lssaga1_parent_date'],
['bio_np_missing', 'bio_np_date'],
['dd1000_missing','dd1000_date'],
['dd100_missing','dd100_date'],
['np_wrat4_missing','np_wrat4_wr_raw'],
['np_reyo_missing','np_reyo_copy_time'],
['np_atax_missing','np_atax_sht_trial1'],
['np_wais4_missing', 'np_wais4_rawscore'],
['pasat_missing','pasat_date'],
['cnp_missing','cnp_test_sessions_dotest'],
['stroop_missing','stroop_date']]
np_gpeg_fields = [['np_gpeg_exclusion___dh','np_gpeg_dh_time'],
['np_gpeg_exclusion___ndh','np_gpeg_ndh_time']]
saliva_fields = [['saliva_1_collected','saliva_1_date'],
['saliva_2_collected','saliva_2_date'],['saliva_3_collected',
'saliva_3_date'],['saliva_4_collected','saliva_4_date']]
fields_sex = [['youthreport1_missing','youthreport1_yid2'],
['youthreport2_missing','youthreport2_yid2']]
def get_project_entry(args=None):
"""
Pulls the data from REDCap
"""
# Get API key.
summary_key_file = open(os.path.join(os.path.expanduser("~"),
'.server_config',
'redcap-dataentry-token'), 'r')
summary_api_key = summary_key_file.read().strip()
# Connect to API.
project_entry = redcap.Project('https://ncanda.sri.com/redcap/api/',
summary_api_key, verify_ssl=False)
return project_entry
def data_entry_fields(fields,project,arm):
"""
Gets the dataframe containing a specific arm from REDCap
"""
# Get a dataframe of fields
data_entry_raw = project.export_records(fields=fields, format='df',
events=arm)
return data_entry_raw
def check(check, error):
if check:
error.append(check)
def missing_form(idx,row,field_missing, field_value):
"""
Generates a report indicating which Forms have not been entered onto redcap
"""
error = dict()
#exclude with a value of 1 is excluded
if math.isnan(row.get('exclude')):
# visit_ignore____yes with value 1 is ignored
if row.get('visit_ignore___yes') != 1:
# form is not missing if form_missing if value nan or zero
if row.get(field_missing) != 1:
# for form_date, date is stored as a string
if type(row.get(field_value)) == float:
if math.isnan(row.get(field_value)):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
form_missing = field_missing,
event_name = idx[1],
error = 'ERROR: Form is missing')
return error
def np_groove_check(idx,row,field_missing, field_excluded, field_value):
"""
Checks to see if the Grooveboard NP is missing
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') == 0:
# np is not missing if field_missing if value nan or zero
if row.get(field_excluded) == 0:
# np is not excluded if field_missing if value nan or zero
if row.get(field_missing) == 0 or math.isnan(row.get(field_missing)):
# for np_date, date is stored as a string
if type(row.get(field_value)) == float:
# If field is left blank, a NaN is put in it's place
if math.isnan(row.get(field_value)):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
np_missing = field_missing,
event_name = idx[1],
error = 'ERROR: NP is missing.'
)
return error
def fourteen_days_mri_report(idx,row):
"""
Generates a report indicating which MRI reports have no data after 14 days.
"""
error = dict()
#exclude with a value of 1 is excluded
if math.isnan(row.get('exclude')):
# visit_ignore____yes with value 1 is ignored
if row.get('visit_ignore___yes') != 1:
if row.get('mrireport_missing') != 1:
if type(row.get('mrireport_missing')) == str:
if datetime.datetime.strptime(row.get('mrireport_date'),'%Y-%m-%d') == datetime.date.today()-datetime.timedelta(days = 14):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: No MRI data after 14 days')
return error
def cnp_dob(idx,row):
"""
Checks to see if dob and cnp_test_sessions_dob match
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') == 0:
if row.get('dob') == row.get('cnp_test_sessions_dob'):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: DOB and CNP_TEST_SESSIONS_DOB do not match.'
)
return error
def missing_mri_stroop(idx,row):
"""
Generate a report indicating which MRI Stroop have not been entered.
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# MRI Report is not missing if form_missing if value nan or zero
if row.get('mri_missing') != 1:
if row.get('redcap_data_access_group') == 'SRI' or row.get('redcap_data_access_group') == 'UCSD':
if row.get('mri_stroop_missing') == 0:
# for mri_stroop_date, date is stored as a string, if blank, defaults to NaN
if type(row.get('mri_stroop_date')) == float:
error = dict(subject_site_id = idx[0],
xnat_sid = row.get('mri_xnat_sid'),
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: MRI Stroop is missing'
)
return error
def missing_saliva_sample(idx,row,saliva_collected, saliva_date):
"""
Generate a report indicating which Saliva Samples have not been entered.
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# saliva_sample is not missing if saliva_sample_missing if value zero
if row.get('saliva_missing') != 1:
if row.get(saliva_collected) == 1:
# for form_date, date is stored as a string
if type(row.get(saliva_date)) == float:
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
sample_missing = saliva_collected,
visit_notes = row.get('visit_notes'),
error = 'ERROR: Saliva Sample is missing'
)
return error
def visit_data_missing(idx,row):
"""
Generate a report indicating which Visit Dates are missing.
"""
error = dict()
if row.get('exclude') != 1:
if row.get('visit_ignore___yes') != 1:
if type(row.get('visit_date')) != str:
error = dict(subject_site_id = idx[0],
event_name = idx[1],
error = 'ERROR: Visit date missing.'
)
return error
def wais_score_verification(idx,row):
"""
Verifies whether the wais_rawscore was computed correctly.
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# form is not missing if form_missing if value nan or zero
if row.get('np_wais4_missing') != 1:
if row.get('np_wais4_rawscore_computed') == row.get('np_wais4_rawscore_diff(correct)'):
if row.get('np_wais4_rawscore_diff(correct)') != 0:
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
error = 'ERROR: WAIS score is not verified'
)
return error
def youth_report_sex(idx,row, field_missing, field_sex):
"""
Checks whether or not sex was entered correctly in the Youth Report
"""
# visit_ignore____yes with value 0 is not ignored
error = dict()
if math.isnan(row.get('exclude')):
if row.get('visit_ignore___yes') != 1:
# np is not missing if field_missing if value nan or zero
if row.get(field_missing) != 1:
if row.get('sex') != row.get(field_sex):
error = dict(subject_site_id = idx[0],
visit_date = row.get('visit_date'),
event_name = idx[1],
field = field_sex,
error = 'ERROR: SEX and SEX in YOUTHREPORT do not match.'
)
return error
def main(args):
project_entry = get_project_entry()
project_df = data_entry_fields(fields,project_entry,args.visit)
error = []
for idx, row in project_df.iterrows():
for f in form_fields:
check(missing_form(idx,row,f[0],f[1]),error)
for np in np_gpeg_fields:
check(np_groove_check(idx,row,'np_gpeg_missing',np[0],np[1]),error)
check(fourteen_days_mri_report(idx,row),error)
check(cnp_dob(idx, row),error)
check(missing_mri_stroop(idx, row),error)
for s in saliva_fields:
check(missing_saliva_sample(idx,row,s[0],s[1]),error)
check(visit_data_missing(idx,row),error)
check(wais_score_verification(idx,row),error)
for f in fields_sex:
check(youth_report_sex(idx,row,f[0],f[1]),error)
if args.csvdir:
for e in error:
if e == 'null':
error.remove(e)
with open(args.csvdir, 'wb+') as fi:
f = csv.writer(fi)
f.writerow(["subject_site_id", "visit_date", "event_name", "error"])
for x in error:
f.writerow([x["subject_site_id"],
x["visit_date"],
x["event_name"],
x["error"]])
else:
for e in error:
if e != 'null':
#print json.dumps(e, sort_keys=True)
#print "{}-{}".format(e['subject_site_id'], e['visit_date']), e['error'],e
sibis.logging("{}-{}".format(e['subject_site_id'], e['visit_date']), e['error'],e_dictionary=e)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v','--visit', default=['baseline_visit_arm_1', '1y_visit_arm_1'],
help='Select which visit the QC script runs on',)
parser.add_argument( "-c","--csvdir", action="store",
help="Directory where CSV will be stored.")
argv = parser.parse_args()
sys.exit(main(args=argv))
|
#!/usr/bin/env python
# encoding: utf-8
def acoustics(solver_type='classic',iplot=True,htmlplot=False,outdir='./_output',problem='figure 9.4'):
"""
This example solves the 1-dimensional variable-coefficient acoustics
equations in a medium with a single interface.
"""
from numpy import sqrt, abs
import pyclaw
if solver_type=='classic':
solver = pyclaw.ClawSolver1D()
elif solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else: raise Exception('Unrecognized value of solver_type.')
solver.mwaves=2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0] = pyclaw.BC.outflow
solver.bc_upper[0] = pyclaw.BC.outflow
solver.aux_bc_lower[0] = pyclaw.BC.outflow
solver.aux_bc_upper[0] = pyclaw.BC.outflow
x = pyclaw.Dimension('x',-5.0,5.0,500)
grid = pyclaw.Grid(x)
meqn = 2
maux = 2
state = pyclaw.State(grid,meqn,maux)
if problem == 'figure 9.4':
rhol = 1.0
cl = 1.0
rhor = 2.0
cr = 0.5
elif problem == 'figure 9.5':
rhol = 1.0
cl = 1.0
rhor = 4.0
cr = 0.5
zl = rhol*cl
zr = rhor*cr
xc = grid.x.center
state.aux[0,:] = (xc<=0)*zl + (xc>0)*zr # Impedance
state.aux[1,:] = (xc<=0)*cl + (xc>0)*cr # Sound speed
# initial condition: half-ellipse
state.q[0,:] = sqrt(abs(1.-(xc+3.)**2))*(xc>-4.)*(xc<-2.)
state.q[1,:] = state.q[0,:] + 0.
claw = pyclaw.Controller()
claw.solution = pyclaw.Solution(state)
claw.solver = solver
claw.tfinal = 5.0
claw.nout = 10
# Solve
status = claw.run()
# Plot results
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir)
if __name__=="__main__":
from pyclaw.util import run_app_from_main
output = run_app_from_main(acoustics)
|
# import pandas as pd
# from libalignmentrs.record import Record
# from alignmentrs.aln.mixins import serde
# from alignmentrs.aln.mixins.tests.mocks import MockData
# class MockClass(serde.RecordsSerdeMixin):
# def __init__(self, records, name=None, index=None, comments=None, row_metadata=None, column_metadata=None, store_history=True, **kwargs):
# self.name = name
# self.index = index
# self.comments = comments
# self.row_metadata = row_metadata
# self.column_metadata = column_metadata
# self.store_history = store_history
# self.kwargs = kwargs
# class TestRecordsSerdeMixin:
# def setup(self):
# self.records = [
# Record('test1', 'description1', 'ATGCAT'),
# Record('test2', 'description2', 'ATGGGT'),
# Record('test3', 'description3', 'ATGAAT'),
# ]
# self.name = 'mock_aln'
# self.index = [0,1,2,3,4,5]
# self.comments = {'test_comment': 'testing'}
# self.comments_empty = {}
# self.comments_none = None
# self.row_metadata = pd.DataFrame(
# {
# 'description': ['description1', 'description2', 'description3'],
# },
# index=['test1', 'test2', 'test3']
# )
# self.row_metadata_empty = pd.DataFrame({})
# self.row_metadata_none = None
# self.column_metadata = pd.DataFrame(
# {
# 'a': [0,1,2,3,4,5],
# 'b': [10,11,12,13,14,15],
# },
# index=self.index
# )
# self.column_metdata_empty = pd.DataFrame({})
# self.column_metdata_none = None
# # self.kwargs = kwargs
# def teardown(self):
# pass
# def test_from_records_with_data(self):
# test_class = MockClass.from_records(
# self.records, name=self.name,
# index=self.index,
# comments=self.comments,
# row_metadata=self.row_metadata,
# column_metadata=self.column_metadata,
# store_history=True,
# )
# exp_class = MockClass(
# self.records, name=self.name,
# index=self.index,
# comments=self.comments,
# row_metadata=self.row_metadata,
# column_metadata=self.column_metadata,
# store_history=True,
# )
# assert type(exp_class) == type(test_class), \
# "expected and test classes are not the same: {} != {}".format(
# exp_class.__class__.__name__,
# test_class.__class__.__name__,
# )
# assert exp_class.__dict__ == test_class.__dict__, \
# "expected and test class dictionaries are not the same: {} != {}".format(
# exp_class.__dict__,
# test_class.__dict__,
# )
# assert exp_class.__dir__() == test_class.__dir__(), \
# "expected and test class dir are not the same: {} != {}".format(
# exp_class.__dir__(),
# test_class.__dir__(),
# )
# def test_from_records_empty(self):
# test_class = MockClass.from_records(self.records)
# exp_class = MockClass(self.records)
# assert type(exp_class) == type(test_class), \
# "expected and test classes are not the same: {} != {}".format(
# exp_class.__class__.__name__,
# test_class.__class__.__name__,
# )
# assert exp_class.__dict__ == test_class.__dict__, \
# "expected and test class dictionaries are not the same: {} != {}".format(
# exp_class.__dict__,
# test_class.__dict__,
# )
# assert exp_class.__dir__() == test_class.__dir__(), \
# "expected and test class dir are not the same: {} != {}".format(
# exp_class.__dir__(),
# test_class.__dir__(),
# )
# def test_to_records(self):
# test_class = MockClass(
# self.records, name=self.name,
# index=self.index,
# comments=self.comments,
# row_metadata=self.row_metadata,
# column_metadata=self.column_metadata,
# store_history=True,
# )
# test_class.data = MockData()
# test_records = test_class.to_records()
# assert str(self.records) == str(test_records), \
# "expected and test records are not the same: {} != {}".format(
# self.records, test_records
# )
|
class Foo(object):
def __init__(self):
self.values = [1, "one", 2, "two", 3, "three"]
def __iter__(self):
return self
def next(self):
if self.values:
key = self.values.pop(0)
value = self.values.pop(0)
print key, value
return key, value
else:
raise StopIteration()
f = Foo()
d = dict(f)
print d
print sorted(d.items())
print len(d)
|
import logging
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='\t%(levelname)s| %(message)s')
|
import os
from setuptools import setup, find_packages
install_requires = [
'umysql>=2.61',
'pymysql>=0.6',
]
setup(
name = 'umysqldb',
description = "MySQLdb compatible wrapper for ultramysql",
long_description = open(os.path.join(os.path.dirname(__file__),
'README.rst')).read(),
version = '1.0.4dev2',
packages = find_packages(exclude=['examples', 'tests']),
install_requires = install_requires,
author = "Qiangning Hong",
author_email = "hongqn@douban.com",
license="BSD License",
platforms=['any'],
url="https://github.com/hongqn/umysqldb",
classifiers=["Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Topic :: Database",
"Topic :: Software Development :: Libraries :: Python Modules",
],
test_suite = 'nose.collector',
tests_require = ['nose', 'mock'],
)
|
import logging
from nequip.data import AtomicDataDict
from nequip.nn import (
GraphModuleMixin,
SequentialGraphNetwork,
AtomwiseLinear,
AtomwiseReduce,
ForceOutput,
PerSpeciesScaleShift,
ConvNetLayer,
)
from nequip.nn.embedding import (
OneHotAtomEncoding,
RadialBasisEdgeEncoding,
SphericalHarmonicEdgeAttrs,
)
def EnergyModel(**shared_params) -> SequentialGraphNetwork:
"""Base default energy model archetecture.
For minimal and full configuration option listings, see ``minimal.yaml`` and ``example.yaml``.
"""
logging.debug("Start building the network model")
num_layers = shared_params.pop("num_layers", 3)
add_per_species_shift = shared_params.pop("PerSpeciesScaleShift_enable", False)
layers = {
# -- Encode --
"one_hot": OneHotAtomEncoding,
"spharm_edges": SphericalHarmonicEdgeAttrs,
"radial_basis": RadialBasisEdgeEncoding,
# -- Embed features --
"chemical_embedding": AtomwiseLinear,
}
# add convnet layers
# insertion preserves order
for layer_i in range(num_layers):
layers[f"layer{layer_i}_convnet"] = ConvNetLayer
# .update also maintains insertion order
layers.update(
{
# TODO: the next linear throws out all L > 0, don't create them in the last layer of convnet
# -- output block --
"conv_to_output_hidden": AtomwiseLinear,
"output_hidden_to_scalar": (
AtomwiseLinear,
dict(irreps_out="1x0e", out_field=AtomicDataDict.PER_ATOM_ENERGY_KEY),
),
}
)
if add_per_species_shift:
layers["per_species_scale_shift"] = (
PerSpeciesScaleShift,
dict(
field=AtomicDataDict.PER_ATOM_ENERGY_KEY,
out_field=AtomicDataDict.PER_ATOM_ENERGY_KEY,
),
)
layers["total_energy_sum"] = (
AtomwiseReduce,
dict(
reduce="sum",
field=AtomicDataDict.PER_ATOM_ENERGY_KEY,
out_field=AtomicDataDict.TOTAL_ENERGY_KEY,
),
)
return SequentialGraphNetwork.from_parameters(
shared_params=shared_params,
layers=layers,
)
def ForceModel(**shared_params) -> GraphModuleMixin:
"""Base default energy and force model archetecture.
For minimal and full configuration option listings, see ``minimal.yaml`` and ``example.yaml``.
A convinience method, equivalent to constructing ``EnergyModel`` and passing it to ``nequip.nn.ForceOutput``.
"""
energy_model = EnergyModel(**shared_params)
return ForceOutput(energy_model=energy_model)
|
import requests
import os
class Image:
def __init__(self,token,path = 'images'):
if path not in os.listdir():
os.mkdir(path)
self.token = token
self.path = path
def imgpath(self,chat_id):
return self.path + '/' + str(chat_id) + '.jpg'
def save(self,chat_id,file_info):
url = 'https://api.telegram.org/file/bot{0}/{1}'
f = requests.get(url.format(self.token,file_info.file_path))
with open(self.imgpath(chat_id),'wb') as nf:
for ch in f:
nf.write(ch)
|
# coding=utf-8
"""Views."""
# Django
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView
# Current django project
from gymnasiums.models import Gymnasium
class GymnasiumListView(ListView):
"""View that returns the list of Gymnasiums."""
model = Gymnasium
paginate_by = 10
class GymnasiumDetailView(DetailView):
"""Show the details of a Gymnasium."""
model = Gymnasium
class GymnasiumCreateView(CreateView):
"""Create a Gymnasium."""
model = Gymnasium
fields = '__all__'
def get(self, request, *args, **kwargs):
"""."""
if True not in [request.user.is_superuser, request.user.is_staff]:
raise PermissionDenied
return super().get(request, args, kwargs)
def post(self, request, *args, **kwargs):
"""."""
if True not in [request.user.is_superuser, request.user.is_staff]:
raise PermissionDenied
return super().post(request, args, kwargs)
def get_success_url(self):
"""Get the URL after the success."""
messages.success(self.request, "Gymnasium '{}' added successfully".format(self.object.name))
return reverse('gymnasiums:detail', kwargs={'pk': self.object.id})
class GymnasiumUpdateView(UpdateView):
"""Update a Gymnasium."""
model = Gymnasium
fields = '__all__'
def get(self, request, *args, **kwargs):
"""."""
if True not in [request.user.is_superuser, request.user.is_staff]:
raise PermissionDenied
return super().get(request, args, kwargs)
def post(self, request, *args, **kwargs):
"""."""
if True not in [request.user.is_superuser, request.user.is_staff]:
raise PermissionDenied
return super().post(request, args, kwargs)
def get_success_url(self):
"""Get the URL after the success."""
messages.success(self.request, "Gymnasium '{}' updated successfully".format(self.object.name))
return reverse('gymnasiums:detail', kwargs={'pk': self.object.id})
class GymnasiumDeleteView(DeleteView):
"""Delete of a Gymnasium."""
model = Gymnasium
def get(self, request, *args, **kwargs):
"""."""
if True not in [request.user.is_superuser, request.user.is_staff]:
raise PermissionDenied
return super().get(request, args, kwargs)
def post(self, request, *args, **kwargs):
"""."""
if True not in [request.user.is_superuser, request.user.is_staff]:
raise PermissionDenied
return super().post(request, args, kwargs)
def get_success_url(self, **kwargs):
"""Get the URL after the success."""
messages.success(self.request, "Gymnasium '{}' deleted successfully".format(self.object.name))
return reverse('gymnasiums:list')
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 3 09:03:43 2022
@author: florian.baumgartner
"""
import numpy as np
# import matplotlib.pyplot as plt
SAMPLE_DEPTH = 2**16
SAMPLE_FREQ = 100.0e6/16 # Sigma-Delta-Frequency in Hz
CARRIER_FREQ = 40.0e3 # Carrier Frequency in Hz
SAMPLE_COUNT = int(SAMPLE_FREQ / CARRIER_FREQ)
MAX_VAL = int((SAMPLE_DEPTH / 2) - 1)
MIN_VAL = int(-MAX_VAL)
sinTable = np.sin(2 * np.pi * np.arange(SAMPLE_COUNT) / SAMPLE_COUNT) * MAX_VAL
sinTable = np.clip(sinTable, MIN_VAL, MAX_VAL)
cosTable = np.cos(2 * np.pi * np.arange(SAMPLE_COUNT) / SAMPLE_COUNT) * MAX_VAL
cosTable = np.clip(cosTable, MIN_VAL, MAX_VAL)
cosFullTable = cosTable
cosFullTable = cosFullTable.astype(int)
sinTable /= np.sqrt(2.0)
sinTable = sinTable.astype(int)
cosTable /= np.sqrt(2.0)
cosTable = cosTable.astype(int)
def tohex(val, nbits):
return f"{(val + (1 << nbits)) % (1 << nbits):04X}"
with open("cnt.list", "w") as file:
file.write(f"localparam rom_cnt = {SAMPLE_COUNT};")
with open("sin.list", "w") as file:
for val in sinTable:
file.write(f"{tohex(int(val), 16)}\n")
with open("cos.list", "w") as file:
for val in cosTable:
file.write(f"{tohex(int(val), 16)}\n")
with open("cos_full.list", "w") as file:
for val in cosFullTable:
file.write(f"{tohex(int(val), 16)}\n")
print(f"SAMPLE_COUNT = {SAMPLE_COUNT}")
print(f"max range: {MIN_VAL} ... {MAX_VAL}")
print(f"sin: {np.min(sinTable)} ... {np.max(sinTable)}")
print(f"cos: {np.min(cosTable)} ... {np.max(cosTable)}")
print(f"cos_full: {np.min(cosFullTable)} ... {np.max(cosFullTable)}")
|
import pytest
from io import StringIO
from pathlib import Path
import pandas as pd
import mock
from shutil import rmtree
from wetterdienst.dwd.util import coerce_field_types
from wetterdienst.dwd.metadata.parameter import Parameter
from wetterdienst import TimeResolution
from wetterdienst.dwd.metadata.period_type import PeriodType
from wetterdienst.dwd.observations.store import (
store_climate_observations,
restore_climate_observations,
_build_local_store_key,
)
HERE = Path(__file__).parent
# Setting parameters for tests
station_id = 1
parameter = Parameter.CLIMATE_SUMMARY
time_resolution = TimeResolution.DAILY
period_type = PeriodType.HISTORICAL
parallel_download = False
create_new_file_index = False
# Set filename for mock
filename = "tageswerte_KL_00001_19370101_19860630_hist.zip"
# Loading test data
file = pd.read_json(HERE / "FIXED_STATIONDATA.JSON")
file = coerce_field_types(file, time_resolution)
# Prepare csv for regular "downloading" test
csv_file = StringIO()
file.to_csv(csv_file, sep=";")
csv_file.seek(0)
def test_build_local_store_key():
""" Tests for function _build_local_store_key """
assert (
_build_local_store_key(
station_id=1,
parameter=Parameter.CLIMATE_SUMMARY,
time_resolution=TimeResolution.DAILY,
period_type=PeriodType.HISTORICAL,
)
== "kl/daily/historical/station_id_1"
)
assert (
_build_local_store_key(
station_id="00001",
parameter=Parameter.CLIMATE_SUMMARY,
time_resolution=TimeResolution.DAILY,
period_type=PeriodType.HISTORICAL,
)
== "kl/daily/historical/station_id_1"
)
with pytest.raises(ValueError):
_build_local_store_key(
station_id="abc",
parameter=Parameter.CLIMATE_SUMMARY,
time_resolution=TimeResolution.DAILY,
period_type=PeriodType.HISTORICAL,
)
with pytest.raises(AttributeError):
_build_local_store_key(
station_id=1,
parameter=Parameter.NO_REAL_PARAMETER,
time_resolution=TimeResolution.DAILY,
period_type=PeriodType.HISTORICAL,
)
@mock.patch("pandas.read_hdf", mock.MagicMock(return_value=file))
def test_store_dwd_data():
# Create folder for storage test
test_folder = Path(Path(__file__).parent.absolute() / "dwd_data")
test_folder.mkdir(parents=True, exist_ok=True)
""" Tests for restore_dwd_data """
store_climate_observations(
station_data=file,
station_id=station_id,
parameter=parameter,
time_resolution=time_resolution,
period_type=period_type,
folder=test_folder,
)
station_data = restore_climate_observations(
station_id=station_id,
parameter=parameter,
time_resolution=time_resolution,
period_type=period_type,
folder=test_folder,
)
assert station_data.equals(file)
# Remove storage folder
rmtree(test_folder)
# Have to place an assert afterwards to ensure that above function is executed
assert True
|
from prometheus_client import start_http_server, Summary, Gauge
import random
import time
REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')
GAUGE = Gauge('my_inprogress_requests', 'Number of requests in progress')
GAUGE.set_to_current_time()
@GAUGE.track_inprogress()
@REQUEST_TIME.time()
def process_request(t):
time.sleep(t)
if __name__ == '__main__':
start_http_server(9000)
print("Server started. Waiting for requests...")
while True:
process_request(random.random())
|
r"""
Descriptions of procedural changes in litigation.
Does not specify whether they are mandatory or universal,
or specify the :class:`.Enactment`\s that might require them.
"""
from __future__ import annotations
from copy import deepcopy
from itertools import chain
import operator
from typing import Any, ClassVar, Dict, Iterable, Iterator
from typing import List, Optional, Sequence, Tuple, Union
from pydantic import BaseModel, validator
from nettlesome.terms import (
Comparable,
ContextRegister,
Explanation,
new_context_helper,
Term,
TermSequence,
)
from nettlesome.factors import Factor
from nettlesome.groups import FactorGroup
from nettlesome.formatting import indented
from authorityspoke.facts import Fact, Allegation, Pleading, Exhibit, Evidence
from authorityspoke.facts import RawFactor
RawProcedure = Dict[str, Sequence[RawFactor]]
class Procedure(Comparable, BaseModel):
r"""
A (potential) rule for courts to use in resolving litigation.
Described in terms of inputs and outputs, and also potentially
"despite" :class:`.Factor`\s, which occur when a :class:`Rule`
could be said to apply "even if" some "despite" factor is true.
Users generally should not need to interact with this class
directly, under the current design. Instead, they should interact
with the class :class:`.Rule`.
:param outputs:
an outcome that a court may accept based on the presence
of the ``inputs``
:param inputs:
supporting :class:`.Factor`\s in favor of the ``output``.
The ``input`` :class:`.Factor`\s are not treated as
potential undercutters.
:param despite:
:class:`.Factor`\s that do not prevent the court from
imposing the ``output``. These could be considered
"failed undercutters" in defeasible logic. If a :class:`.Factor`
is relevant both as support for the output and as
a potential undercutter, include it in both ``inputs``
and ``despite``.
:param name:
An identifier that can be used to reference and
incorporate an existing procedure in a new
:class:`.Factor`, instead of constructing a new
copy of the :class:`.Procedure`.
:param absent:
Whether the absence, rather than presence, of this
:class:`.Procedure` is being referenced. The usefulness
of this is unclear, but I'm not prepared to eliminate it
before the :class:`.Argument` class has been implemented
and tested.
:param generic:
Whether the this :class:`Procedure` is being referenced
as a generic example, which could be replaced by any
other :class:`Procedure`.
"""
outputs: List[Union[Fact, Allegation, Pleading, Exhibit, Evidence]]
inputs: List[Union[Fact, Allegation, Pleading, Exhibit, Evidence]] = []
despite: List[Union[Fact, Allegation, Pleading, Exhibit, Evidence]] = []
name: str = ""
absent: ClassVar[bool] = False
generic: ClassVar[bool] = False
context_factor_names: ClassVar[Tuple[str, ...]] = ("outputs", "inputs", "despite")
@property
def outputs_group(self) -> FactorGroup:
"""Get input Factors as a FactorGroup."""
return FactorGroup(self.outputs)
@property
def inputs_group(self) -> FactorGroup:
"""Get input Factors as a FactorGroup."""
return FactorGroup(self.inputs) if self.inputs else FactorGroup()
@property
def despite_group(self) -> FactorGroup:
"""Get despite Factors as a FactorGroup."""
return FactorGroup(self.despite) if self.despite else FactorGroup()
@property
def groups(self) -> List[FactorGroup]:
"""Get input, output, and despite Factors as FactorGroups."""
return [self.outputs_group, self.inputs_group, self.despite_group]
@validator("outputs", pre=True)
def _validate_outputs(cls, v: Union[Factor, Sequence[Factor]]) -> List[Factor]:
if not v:
raise ValueError("Procedure must have at least one output")
if isinstance(v, (Term, Dict)):
return [v]
if isinstance(v, str):
raise TypeError("outputs of Procedure cannot be type str")
return list(v)
@validator("inputs", "despite", pre=True)
def _validate_factor_groups(
cls, v: Union[Factor, Sequence[Factor]]
) -> List[Factor]:
if isinstance(v, (Term, Dict)):
return [v]
if isinstance(v, str):
raise TypeError(
"inputs and despite factors of Procedure cannot be type str"
)
elif v is None:
return []
return list(v)
def add(
self,
other: Comparable,
context: Optional[Union[ContextRegister, Explanation]] = None,
) -> Optional[Procedure]:
"""Show how first Procedure triggers the second if not both are universal."""
if not isinstance(other, self.__class__):
return self.with_factor(other)
for explanation in self.triggers_next_procedure(other, context=context):
added = self._trigger_addition(other, explanation)
if added:
return added
return None
def __add__(self, other: Procedure) -> Optional[Procedure]:
"""Show how first Procedure triggers the second if not both are universal."""
return self.add(other)
def _add_if_universal(
self, other: Procedure, explanation: Explanation
) -> Optional[Procedure]:
"""Show how first Procedure triggers the second if both are universal."""
self_output_or_input = FactorGroup((*self.outputs_group, *self.inputs_group))
other_input = list(other.inputs)
implied_inputs = []
not_implied = []
while other_input:
current = other_input.pop()
implying_explanation = self_output_or_input.explain_implication(
current, context=explanation
)
if implying_explanation is not None:
explanation = implying_explanation
implied_inputs.append(current)
else:
not_implied.append(current)
if not any(implied_inputs):
return None
to_combine = Procedure(
inputs=not_implied, outputs=other.outputs, despite=other.despite
)
return self.union(to_combine, context=explanation)
def _trigger_addition(
self, other: Procedure, explanation: Explanation
) -> Procedure:
"""Add two Procedures, given that they have already been found to be addable."""
triggered_procedure = other.new_context(explanation.context.reversed())
new_outputs = [*self.outputs, *triggered_procedure.outputs]
unique_new_outputs = {}
for key in new_outputs:
unique_new_outputs[str(key)] = key
result = deepcopy(self)
result.set_outputs(list(unique_new_outputs.values()))
return result
def _explanations_union_partial(
self, other: Procedure, context: Optional[ContextRegister] = None
) -> Iterable[ContextRegister]:
"""Yield as much of the context as seems likely correct based on this Procedure."""
yield from self.likely_contexts(other, context)
def explanations_union(
self,
other: Procedure,
context: Optional[Union[ContextRegister, Explanation]] = None,
) -> Iterator[ContextRegister]:
"""Yield explanations of contexts that allow combining these Procedures."""
if not isinstance(context, Explanation):
context = Explanation.from_context(context)
for partial in self._explanations_union_partial(other, context.context):
for guess in self.possible_contexts(other, partial):
answer = self._union_from_explanation(other, guess)
if answer:
yield guess
def _union_from_explanation(
self, other: Procedure, context: ContextRegister
) -> Optional[Procedure]:
r"""
Combine two :class:`Procedure`\s into one.
The new :class:`Procedure` will have all of the ``inputs``, ``outputs``,
and ``despite`` :class:`.Factor`\s of both ``self`` and ``other``.
All of the context :class:`.Factor`\s of ``self`` will
remain the same.
"""
new_inputs = self.inputs_group._union_from_explanation(
other.inputs_group, context
)
new_outputs = self.outputs_group._union_from_explanation(
other.outputs_group, context
)
new_despite = self.despite_group._union_from_explanation_allow_contradiction(
other.despite_group, context
)
if any(group is None for group in (new_outputs, new_inputs, new_despite)):
return None
return Procedure(outputs=new_outputs, inputs=new_inputs, despite=new_despite)
def __len__(self):
r"""
Get number of generic :class:`.Factor`\s specified for ``self``.
:returns:
the number of generic :class:`.Factor`\s that need to be
specified for this :class:`Procedure`.
"""
return len(self.generic_terms())
def __str__(self):
text = "RESULT:"
for f in self.outputs:
text += "\n" + indented(f.wrapped_string)
if self.inputs:
text += "\nGIVEN:"
for f in self.inputs:
text += "\n" + indented(f.wrapped_string)
if self.despite:
text += "\nDESPITE:"
for f in self.despite:
text += "\n" + indented(f.wrapped_string)
return text
@property
def factors_all(self) -> List[Factor]:
r"""
Get :class:`.Factor`\s in ``inputs``, ``outputs``, and ``despite``.
:returns:
a :class:`list` of all :class:`.Factor`\s.
"""
inputs = self.inputs or ()
despite = self.despite or ()
return [*self.outputs, *inputs, *despite]
@property
def recursive_terms(self) -> Dict[str, Term]:
r"""
Collect `self`'s :attr:`terms`, and their :attr:`terms`, recursively.
:returns:
a :class:`dict` (instead of a :class:`set`,
to preserve order) of :class:`Term`\s.
"""
answers: Dict[str, Term] = {}
for context in self.groups:
if context:
answers.update(context.recursive_terms)
return answers
@property
def terms(self) -> TermSequence:
r"""
Get :class:`Factor`\s used in comparisons with other :class:`Factor`\s.
:returns:
a tuple of attributes that are designated as the ``terms``
for whichever subclass of :class:`Factor` calls this method. These
can be used for comparing objects using :meth:`consistent_with`
"""
result: List[Term] = []
result.extend(self.outputs)
result.extend(self.inputs)
result.extend(self.despite)
return TermSequence(result)
def generic_terms_by_str(self) -> Dict[str, Term]:
r"""
:class:`.Factor`\s that can be replaced without changing ``self``\s meaning.
self.generic can't be True for :class:`.Procedure`.
:returns:
``self``'s generic :class:`.Factor`\s,
which must be matched to other generic :class:`.Factor`\s to
perform equality or implication tests between :class:`.Factor`\s
with :meth:`.Factor.means` or :meth:`.Factor.__ge__`.
"""
generic_dict = {
str(generic): generic
for factor in self.factors_all
for generic in factor.generic_terms()
}
return generic_dict
def add_factor(self, incoming: Factor) -> None:
"""
Add an input :class:`.Factor`.
:param incoming:
the new :class:`.Factor` to be added to input
:returns:
None
"""
new_factors = self.inputs_group.add_or_raise_error(incoming)
self.set_inputs(new_factors)
def with_factor(self, incoming: Factor) -> Optional[Procedure]:
"""
Create new Procedure with added input :class:`.Factor`.
:param incoming:
the new :class:`.Factor` to be added to input
:returns:
a new version of ``self`` with the specified change
"""
new_factors = self.inputs_group + incoming
if new_factors is None:
return None
result = deepcopy(self)
result.set_inputs(new_factors)
return result
def contradicts(self, other, context: Optional[ContextRegister] = None) -> bool:
r"""
Find if ``self`` applying in some cases implies ``other`` cannot apply in some.
Raises an error because, by analogy with :meth:`.Procedure.implies`\,
users might expect this method to return ``True`` only when
:class:`Rule` with ``universal=False`` and Procedure ``self``
would contradict another :class:`Rule` with ``universal=False``
and Procedure ``other``. But that would never happen.
"""
raise NotImplementedError(
"Procedures do not contradict one another unless one of them ",
"applies in 'ALL' cases. Consider using ",
"'Procedure.contradicts_some_to_all' or 'Rule.contradicts'.",
)
def contradicts_some_to_all(
self, other: Procedure, context: Optional[ContextRegister] = None
) -> bool:
r"""
Find if ``self`` applying in some cases implies ``other`` cannot apply in all.
:returns:
whether the assertion that ``self`` applies in
**some** cases contradicts that ``other`` applies in **all**
cases, where at least one of the :class:`.Rule`\s is ``mandatory``.
"""
if not isinstance(other, self.__class__):
return False
return any(
context is not None
for context in self.explain_contradiction_some_to_all(other, context)
)
def _has_input_or_despite_factors_implied_by_all_inputs_of(
self,
other: Procedure,
context: Explanation,
) -> Iterator[Explanation]:
"""Check if every input of other implies some input or despite factor of self."""
self_despite_or_input = FactorGroup((*self.despite, *self.inputs))
yield from self_despite_or_input._explanations_implied_by(
other.inputs_group, explanation=context
)
def _has_input_or_despite_factors_implying_all_inputs_of(
self,
other: Procedure,
context: Explanation,
) -> Iterator[Explanation]:
"""Check if every input of other is implied by some input or despite factor of self."""
self_despite_or_input = FactorGroup((*self.despite, *self.inputs))
yield from self_despite_or_input._explanations_implication(
other.inputs_group, explanation=context
)
def explain_contradiction_some_to_all(
self,
other: Procedure,
context: Optional[Union[ContextRegister, Explanation]] = None,
) -> Iterator[Explanation]:
"""Explain why ``other`` can't apply in all cases if ``self`` applies in some."""
if not isinstance(context, Explanation):
context = Explanation.from_context(context)
# For self to contradict other, either every input of other
# must imply some input or despite factor of self...
implied_contexts = self._has_input_or_despite_factors_implied_by_all_inputs_of(
other, context
)
# or every input of other must be implied by
# some input or despite factor of self.
implying_contexts = self._has_input_or_despite_factors_implying_all_inputs_of(
other, context
)
# For self to contradict other, some output of other
# must be contradicted by some output of self.
seen_contexts = []
for m in chain(implying_contexts, implied_contexts):
if m.context not in seen_contexts:
seen_contexts.append(m.context)
yield from self.outputs_group._explanations_contradiction(
other.outputs_group, m
)
def _explain_implication_all_to_all_of_procedure(
self, other: Procedure, context: ContextRegister
) -> Iterator[ContextRegister]:
yield from self.explanations_same_meaning(other, context)
def other_outputs_implied(context: Optional[ContextRegister]):
for explanation in self.outputs_group.explanations_implication(
other.outputs_group, context=context
):
yield explanation
def self_inputs_implied(explanations: Iterable[ContextRegister]):
for explanation in explanations:
for result in other.inputs_group.explanations_implication(
self.inputs_group, context=explanation
):
yield result
for explanation in self_inputs_implied(other_outputs_implied(context)):
for result in self.inputs_group.explanations_consistent_with(
other=other.despite_group, context=explanation
):
yield result
def explain_implication_all_to_all(
self, other: Factor, context: Optional[ContextRegister] = None
) -> Iterator[ContextRegister]:
"""Yield contexts establishing that if self is always valid, other is always valid."""
context = context or ContextRegister()
if isinstance(other, self.__class__):
yield from self._explain_implication_all_to_all_of_procedure(other, context)
def implies_all_to_all(
self, other: Procedure, context: Optional[ContextRegister] = None
) -> bool:
"""
Find if ``self`` applying in all cases implies ``other`` applies in all.
``self`` does not imply ``other`` if any output of ``other``
is not equal to or implied by some output of ``self``.
For ``self`` to imply ``other``, every input of ``self``
must be implied by some input of ``other``.
``self`` does not imply ``other`` if any despite of ``other``
:meth:`~.Factor.contradicts` an input of ``self``.
:returns:
whether the assertion that ``self`` applies in **all** cases
implies that ``other`` applies in **all** cases.
"""
return any(
context is not None
for context in self.explain_implication_all_to_all(other, context)
)
def _explain_implication_of_procedure_all_to_some(
self, other: Procedure, context: ContextRegister
) -> Iterator[ContextRegister]:
yield from self.explain_implication_all_to_all(other, context)
other_despite_or_input = FactorGroup((*other.despite, *other.inputs))
self_despite_or_input = FactorGroup((*self.despite, *self.inputs))
def other_outputs_implied(context: ContextRegister):
for explanation in self.outputs_group.explanations_implication(
other.outputs_group, context=context
):
yield explanation
def other_despite_implied(explanations: Iterator[ContextRegister]):
for explanation in explanations:
for result in self_despite_or_input.explanations_implication(
other.despite_group, context=explanation
):
yield result
for explanation in other_despite_implied(other_outputs_implied(context)):
if self.inputs_group.consistent_with(
other_despite_or_input, context=explanation.context
):
yield explanation
def explain_implication_all_to_some(
self,
other: Factor,
context: Optional[Union[ContextRegister, Explanation]] = None,
) -> Iterator[Explanation]:
"""Yield contexts establishing that if self is always valid, other is sometimes valid."""
if not isinstance(context, Explanation):
context = Explanation.from_context(context)
if isinstance(other, self.__class__):
yield from self._explain_implication_of_procedure_all_to_some(
other=other, context=context
)
def implies_all_to_some(
self, other: Procedure, context: Optional[ContextRegister] = None
) -> bool:
r"""
Find if ``self`` applying in all cases implies ``other`` applies in some.
For ``self`` to imply ``other``, every output of ``other``
must be equal to or implied by some output of ``self``.
For ``self`` to imply ``other``, every input of ``self`` must not be
contradicted by any input or despite of ``other``.
``self`` does not imply ``other`` if any "despite" :class:`.Factor`\s
of ``other`` are not implied by inputs of ``self``.
:returns:
whether the assertion that ``self`` applies in **all** cases
implies that ``other`` applies in **some** cases (that is, if
the list of ``self``'s inputs is not considered an exhaustive list
of the circumstances needed to invoke the procedure).
"""
return any(
context is not None
for context in self.explain_implication_all_to_some(other, context)
)
def _implies_procedure_if_present(
self, other: Procedure, context: Explanation
) -> Iterator[Explanation]:
def other_outputs_implied(context: Explanation):
yield from self.outputs_group.explanations_implication(
other.outputs_group, context=context
)
def other_inputs_implied(context: Explanation):
yield from self.inputs_group.explanations_implication(
other.inputs_group, context=context
)
def other_despite_implied(context: Explanation):
despite_or_input = FactorGroup((*self.despite, *self.inputs))
yield from despite_or_input.explanations_implication(
other.despite_group,
context=context,
)
for outputs_explanation in other_outputs_implied(context):
for inputs_explanation in other_inputs_implied(outputs_explanation):
for despite_explanation in other_despite_implied(inputs_explanation):
yield despite_explanation
def _implies_if_present(
self, other: Factor, context: Optional[ContextRegister] = None
) -> Iterator[ContextRegister]:
r"""
Find if ``self`` would imply ``other`` if they aren't absent.
When ``self`` and ``other`` are included in
:class:`Rule`\s that both apply in **some** cases:
``self`` does not imply ``other`` if any output of ``other``
is not equal to or implied by some output of self.
``self`` does not imply ``other`` if any input of ``other``
is not equal to or implied by some input of ``self``.
``self`` does not imply ``other`` if any despite of ``other``
is not equal to or implied by some despite or input of ``self``.
:returns:
whether the assertion that ``self`` applies in some cases
implies that the :class:`.Procedure` ``other`` applies
in some cases.
"""
if isinstance(other, self.__class__):
yield from self._implies_procedure_if_present(other=other, context=context)
def _explanations_same_meaning_as_procedure(
self, other: Procedure, context: Explanation
):
def same_outputs(context: Explanation):
for explanation in self.outputs_group.explanations_same_meaning(
other=other.outputs_group, context=context
):
yield explanation
def same_inputs(contexts: Iterable[Explanation]):
for context in contexts:
for explanation in self.inputs_group.explanations_same_meaning(
other=other.inputs_group, context=context
):
yield explanation
def same_despite(contexts: Iterable[Explanation]):
for context in contexts:
for explanation in self.despite_group.explanations_same_meaning(
other=other.despite_group, context=context
):
yield explanation
yield from same_despite(same_inputs(same_outputs(context)))
def explanations_same_meaning(
self,
other: Comparable,
context: Optional[Union[ContextRegister, Explanation]] = None,
) -> Iterator[Explanation]:
"""Yield contexts that could cause self to have the same meaning as other."""
if not isinstance(context, Explanation):
context = Explanation.from_context(context)
if isinstance(other, self.__class__):
yield from self._explanations_same_meaning_as_procedure(other, context)
def means(
self, other: Comparable, context: Optional[ContextRegister] = None
) -> bool:
r"""
Determine whether ``other`` has the same meaning as ``self``.
:returns:
whether the two :class:`.Procedure`\s have all the same
:class:`.Factor`\s with the same context factors in the
same roles.
"""
return any(
context is not None
for context in self.explanations_same_meaning(other, context)
)
@new_context_helper
def new_context(self, changes: ContextRegister) -> Procedure:
r"""
Create new :class:`Procedure`, replacing keys of ``changes`` with values.
:param changes:
a :class:`dict` of :class:`.Factor`\s to replace
matched to the :class:`.Factor`\s that should replace them
:returns:
new :class:`.Procedure` object, replacing keys of
``changes`` with their values.
"""
new_dict = self.__dict__.copy()
for name in self.context_factor_names:
new_dict[name] = tuple(
factor.new_context(changes) for factor in new_dict[name]
)
return self.__class__(**new_dict)
def set_inputs(self, factors: Sequence[Factor]) -> None:
"""Set factors required to invoke this Procedure."""
self.inputs = FactorGroup(factors).sequence
def set_despite(self, factors: Sequence[Factor]) -> None:
"""Set factors that do not preclude application of this Procedure."""
self.despite = FactorGroup(factors).sequence
def set_outputs(self, factors: Sequence[Factor]) -> None:
"""Set the outputs of this Procedure."""
self.outputs = FactorGroup(factors).sequence
def triggers_next_procedure(
self,
other: Procedure,
context: Optional[Union[ContextRegister, Explanation]] = None,
) -> Iterator[Explanation]:
r"""
Test if :class:`.Factor`\s from firing ``self`` would trigger ``other``.
.. Note::
To be confident that ``self`` actually can trigger ``other``,
we would have to assume that self or other has ``universal: True``
since otherwise there could be a mismatch between what is provided
and what is needed to trigger ``other``.
:param other:
another :class:`Procedure` to test to see whether it can
be triggered by triggering ``self``
:returns:
whether the set of :class:`Factor`\s that exist after ``self``
is fired could trigger ``other``
"""
self_despite_or_input = FactorGroup((*self.despite, *self.inputs))
self_output_or_input = FactorGroup((*self.outputs, *self.inputs))
if not isinstance(context, Explanation):
context = Explanation.from_context(context)
for explanation_1 in self_output_or_input.explanations_implication(
other.inputs_group, context=context
):
yield from self_despite_or_input.explanations_implication(
other.despite_group, context=explanation_1
)
def __or__(self, other: Comparable) -> Optional[Comparable]:
return self.union(other)
def union(
self,
other: Comparable,
context: Optional[Union[ContextRegister, Explanation]] = None,
) -> Optional[Comparable]:
"""Get a procedure with all the inputs and outputs of self and other."""
if not isinstance(context, Explanation):
context = Explanation.from_context(context)
explanations = self.explanations_union(other, context)
try:
explanation = next(explanations)
except StopIteration:
return None
return self._union_from_explanation(other, explanation)
def valid_for_exclusive_tag(self) -> None:
"""Check if Procedure is suitable to be labeled the "exclusive" method to reach an output."""
if len(self.outputs) != 1:
raise ValueError(
"The 'exclusive' attribute is not allowed for Holdings "
+ "with more than one 'output' Factor. If the set of Factors "
+ "in 'inputs' is really the only way to reach any of the "
+ "'outputs', consider making a separate 'exclusive' Rule "
+ "for each output."
)
if self.outputs[0].absent:
raise ValueError(
"The 'exclusive' attribute is not allowed for Holdings "
+ "with an 'absent' 'output' Factor. This would indicate "
+ "that the output can or must be present in every litigation "
+ "unless specified inputs are present, which is unlikely."
)
if not self.inputs:
raise ValueError(
"The 'exclusive' attribute is not allowed for Holdings "
+ "with no 'input' Factors, since the 'exclusive' attribute "
+ "asserts that the inputs are the only way to reach the output."
)
return None
|
from django.urls import path
from notifications.views import ShowNotifications, DeleteNotification
urlpatterns = [
path('', ShowNotifications, name='show-notifications'),
path('<noti_id>/delete', DeleteNotification, name='delete-notification'),
] |
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
BATS_SHELLMOCK_BUILD="""
sh_library(
name = "shellmock",
srcs = ["load.bash"] + glob(["src/*"]),
visibility = ["//visibility:public"],
)
sh_library(
name = "test_helper",
srcs = ["test/test_helper.bash"],
visibility = ["//visibility:public"],
data = [
"@bazel_shellmock_deps//bats-support",
"@bazel_shellmock_deps//bats-assert",
"@bazel_shellmock_deps//bats-file",
],
)
sh_library(
name = "test_lib",
srcs = ["test/shellmock.bats"],
visibility = ["//visibility:public"],
deps = [
":test_helper",
],
)
exports_files(glob(["test/**"]))
"""
def bazel_shellmock_dependencies(
version = "4c009bf2461147d9522781d99c4cf2d520e712c2",
sha256 = "ca212f2705b683f820134bc737ce0d398c7e9f37be198e15f64d24c72bf4bbc5",
):
# Maybe load to allow override.
http_archive(
name = "bats_shellmock",
build_file_content = BATS_SHELLMOCK_BUILD,
urls = [
"https://github.com/duanemay/bats-shellmock/archive/%s.tar.gz" % version,
],
strip_prefix = "bats-shellmock-%s" % version,
sha256 = sha256,
)
|
import chainer
import chainer.functions as F
import numpy as np
from dataset import Coordination
from models.common import get_pair_value
from models.teranishi17 import Teranishi17
from parsers.common import Parser
class Teranishi17Parser(Parser):
def __init__(self, model, comma_id, decoding=True):
if not isinstance(model, Teranishi17):
raise ValueError("Unsupported model: {}"
.format(model.__class__.__name__))
self.model = model
self.comma_id = comma_id
self.decoding = decoding
def parse(self, words, postags, chars, cc_indices, sep_indices,
cont_embeds=None, n_best=1, use_cache=False):
if n_best != 1:
raise ValueError("Only supported 1 best parsing")
coords_batch = []
if not use_cache or not self.model.has_cache():
self.model.forward(words, postags, chars,
cc_indices, sep_indices, cont_embeds)
parsed = self.model.cache
pairs = parsed['pairs']
ckey_scores = chainer.cuda.to_cpu(parsed['ckey_scores'].data)
pair_scores = chainer.cuda.to_cpu(parsed['pair_scores'].data)
ckey_scores = F.split_axis(
ckey_scores, parsed['ckey_offsets'][:-1], axis=0)
pair_scores = F.split_axis(
pair_scores, parsed['pair_offsets'][:-1], axis=0)
coord_index = 0
for batch_index in range(len(words)):
n_ccs = len(cc_indices[batch_index])
if n_ccs == 0:
coords_batch.append([])
continue
results = self._parse_each(words[batch_index],
cc_indices[batch_index],
pairs[batch_index],
ckey_scores[batch_index],
pair_scores[coord_index:
coord_index + n_ccs],
n_best)
coords_batch.append(results)
coord_index += n_ccs
return coords_batch
def _parse_each(self, words, cc_indices, pairs,
cc_scores, pair_scores, n_best):
pairs = [[(idx1, idx2) for idx1, idx2 in zip(*get_pair_value(idx))]
if idx is not None else None for idx in pairs]
if self.decoding:
coords, score = _solve_coords(
words, cc_indices, pairs,
cc_scores.data, pair_scores, self.comma_id)
else:
coords, score = _solve_coords_independently(
words, cc_indices, pairs, cc_scores.data, pair_scores,
self.comma_id)
coord_dict = {}
if coords is None:
coords = [None] * len(cc_indices)
for conjuncts, cc in zip(coords, cc_indices):
if conjuncts is not None:
coord = Coordination(cc, conjuncts)
else:
coord = None
coord_dict[cc] = coord
return [(coord_dict, score)]
def _solve_coords_independently(words, cc_indices, pairs_all,
cc_scores, pair_scores_all, comma_id):
coords_conjuncts = []
score = 0.0
seps = [i for i, word in enumerate(words) if word == comma_id]
for i, cc in enumerate(cc_indices):
scores = F.concat((cc_scores[i], pair_scores_all[i]), axis=0).data
idx = F.argmax(scores, axis=0).data
if idx > 0:
pair = pairs_all[i][idx - 1]
conjuncts = _split_coord(pair, cc, seps)
else:
conjuncts = None
coords_conjuncts.append(conjuncts)
score += scores[idx]
return (coords_conjuncts, score)
def _solve_coords(words, cc_indices, pairs_all, cc_scores, pair_scores_all,
comma_id, filter_nested_in_conj=False):
MAX_AGENDA_SIZE = 1024
agenda = []
seps = [i for i, word in enumerate(words) if word == comma_id]
for cc, pairs, cc_score, pair_scores in \
zip(cc_indices, pairs_all, cc_scores, pair_scores_all):
if isinstance(cc_score, chainer.Variable):
cc_score = cc_score.data
if isinstance(pair_scores, chainer.Variable):
pair_scores = pair_scores.data
if pairs is not None:
pairs = [None] + pairs
scores = np.concatenate((cc_score, pair_scores), axis=0)
else:
pairs = [None]
scores = cc_score
assert len(pairs) == scores.shape[0]
if len(agenda) == 0:
agenda = [[[pair], score] for pair, score in zip(pairs, scores)]
agenda.sort(key=lambda x: x[1], reverse=True)
del agenda[MAX_AGENDA_SIZE:]
continue
new_agenda = []
for candidate_pair, candidate_score in zip(pairs, scores):
for pair_comb, score_comb in agenda:
is_valid = all(is_valid_coords(pair, candidate_pair)
for pair in pair_comb)
if is_valid:
new_agenda.append([pair_comb + [candidate_pair],
score_comb + candidate_score])
agenda = new_agenda
agenda.sort(key=lambda x: x[1], reverse=True)
del agenda[MAX_AGENDA_SIZE:]
for pairs, score in agenda:
coords_conjuncts = [_split_coord(pair, cc, seps)
if pair is not None else None
for pair, cc in zip(pairs, cc_indices)]
is_valid = True
if filter_nested_in_conj:
is_valid = True
for i, next_conjuncts in enumerate(coords_conjuncts):
is_valid = all(is_valid_conjuncts(conjuncts, next_conjuncts)
for conjuncts in coords_conjuncts[:i])
if not is_valid:
break
if is_valid:
return (coords_conjuncts, score)
return (None, -np.inf)
def _split_coord(coord, cc, seps):
spans = []
buf = []
for i in range(coord[0], cc):
if i not in seps:
buf.append(i)
elif len(buf) > 0:
spans.append((buf[0], buf[-1]))
buf = []
if len(buf) > 0:
spans.append((buf[0], buf[-1]))
if len(spans) == 0:
spans.append((coord[0], cc - 1))
right_begin = (cc + 1 if cc + 1 not in seps else cc + 2)
spans.append((right_begin, coord[1]))
assert len(spans) >= 2
return spans
def is_valid_coords(a, b):
return a is None \
or b is None \
or a[1] < b[0] \
or b[1] < a[0] \
or (a[0] <= b[0] and b[1] <= a[1]) \
or (b[0] <= a[0] and a[1] <= b[1])
def is_valid_conjuncts(a, b):
if a is None or b is None:
return None
if a[-1][1] < b[0][0] or b[-1][1] < a[0][0]:
return True
if a[0][0] <= b[0][0] and b[-1][1] <= a[-1][1]:
b_coord = (b[0][0], b[-1][1])
return any(a_span[0] <= b_coord[0] and b_coord[1] <= a_span[1]
for a_span in a)
if b[0][0] <= a[0][0] and a[-1][1] <= b[-1][1]:
a_coord = (a[0][0], a[-1][1])
return any(b_span[0] <= a_coord[0] and a_coord[1] <= b_span[1]
for b_span in b)
return False
|
# Generated by Django 2.1.4 on 2019-01-01 10:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20181228_1547'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='course',
),
]
|
"""
Faça um Programa que peça a temperatura em graus Fahrenheit,
transforme e mostre a temperatura em graus Celsius.
C = 5 * ((F-32) / 9).
"""
farenheit = float(input('Informe a temperatura em Farenheit: '))
celsius = 5 * (farenheit - 32) / 9.0
print(f'A temperatura em Celsius é: {celsius}')
|
import py
from rpython.jit.metainterp.test import test_string
from rpython.jit.backend.ppc.test.support import JitPPCMixin
class TestString(JitPPCMixin, test_string.TestLLtype):
# for the individual tests see
# ====> ../../../metainterp/test/test_string.py
pass
class TestUnicode(JitPPCMixin, test_string.TestLLtypeUnicode):
# for the individual tests see
# ====> ../../../metainterp/test/test_string.py
pass
|
from typing import List
import logging
from pathlib import Path
import itertools
import parsy
from elm_doc import elm_parser
from elm_doc.elm_parser import Chunk
logger = logging.getLogger(__name__)
def strip_ports_from_file(elm_file: Path) -> None:
source = elm_file.read_text()
stripped = strip_ports_from_string(source, str(elm_file))
elm_file.write_text(stripped)
def strip_ports_from_string(source: str, source_path: str = '<unknown>') -> str:
output = []
for chunk in elm_parser.iter_line_chunks(source):
if chunk.is_port_function():
output.extend(_rewrite_port_function(chunk, source_path))
elif chunk.is_port_module():
output.extend(_rewrite_port_module(chunk))
else:
output.extend(chunk.raw_lines())
return ''.join(output)
def _rewrite_port_module(chunk: Chunk) -> List[str]:
output = []
for line in chunk.lines:
if line.raw.startswith('port module'):
output.append(line.raw[len('port '):])
else:
output.append(line.raw)
return output
def _rewrite_port_function(chunk: Chunk, source_path: str) -> List[str]:
output = []
trailing_non_sources = list(reversed(list(itertools.takewhile(
lambda l: not l.is_source(), reversed(chunk.lines)))))
declaration_lines = chunk.lines[:-len(trailing_non_sources)] if trailing_non_sources else chunk.lines
for line in declaration_lines:
if line.raw.startswith('port '):
output.append(line.raw[len('port '):])
else:
# These lines may contain comments which, semantically
# speaking, belong to the next function: the output may
# not make sense to human reader. Since our audience is
# the Elm compiler, we don't care that much.
output.append(line.raw)
# If this chunk is the very last lines of a file, we may need to add a newline.
if (not trailing_non_sources) and (not chunk.lines[-1].raw.endswith('\n')):
output.append('\n')
try:
one_liner = ''.join([raw.strip() for raw in chunk.non_comment_raw_lines()])
port_info = elm_parser.parse_port_declaration(one_liner)
output.append(_make_dummy_port_implementation(port_info) + '\n')
except parsy.ParseError:
line_num = chunk.lines[0].number
logger.error('''{}:{}: failed to parse a port declaration. We parsed it as:
{}
If this is a valid port declaration, please submit a bug report'''.format(
source_path, line_num, one_liner))
raise
for trailing_line in trailing_non_sources:
output.append(trailing_line.raw)
return output
def _make_dummy_port_implementation(port_info: elm_parser.PortInfo) -> str:
parts = [port_info.name]
# arguments
for i, _ in enumerate(port_info.args):
parts.append('a{}'.format(i))
parts.append('=')
parts.append('{}.none'.format(port_info.port_type.value))
return ' '.join(parts)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.