content stringlengths 5 1.05M |
|---|
from pygments.lexer import RegexLexer, inherit, words
from pygments.token import *
from pygments.lexers.c_cpp import CppLexer
gridtools_keywords = ((
'accessor',
'in_accessor',
'inout_accessor',
'aggregator_type',
'arg',
'tmp_arg',
'param_list',
'backward',
'cache',
'cells',
'data_store',
'dimension',
'edges',
'enumtype',
'execute',
'expand_factor',
'extent',
'fill',
'fill_and_flush',
'flush',
'forward',
'global_accessor',
'global_parameter',
'grid',
'icosahedral_topology',
'interval',
'intent',
'layout_map',
'level',
'local',
'parallel',
'storage_traits',
'vertices',
'direction',
'sign',
'halo_descriptor',
'direction',
'sign',
'field_on_the_fly',
'call',
'call_proc',
'with',
'at'
))
gridtools_namespace = ((
'cache_io_policy',
'cache_type',
'enumtype',
))
gridtools_functions = ((
'define_caches',
'make_computation',
'make_positional_computation',
'make_expandable_computation',
'make_expandable_positional_computation',
'make_global_parameter',
'update_global_parameter',
'make_host_view',
'make_target_view',
'make_multistage',
'make_stage',
'make_independent'
'boundary',
'halo_exchange_dynamic_ut',
'halo_exchange_generic',
))
gridtools_macros = ((
'GT_FUNCTION',
))
class GridToolsLexer(CppLexer):
name = "gridtools"
aliases = ['gridtools']
tokens = {
'statement': [
(words(gridtools_keywords, suffix=r'\b'), Keyword),
(words(gridtools_functions, suffix=r'\b'), Name.Label),
(words(gridtools_namespace, suffix=r'\b'), Name.Namespace),
(words(gridtools_macros, suffix=r'\b'), Comment.Preproc),
inherit,
]
}
|
import xarray as xr
import torch
import numpy as np
import pathlib
def save_as_netcdf(sdf, real_flows, pred_flows, indices, epoch, level, name, data_dir):
# Convert to numpy array
sdf = sdf.numpy()
real_flows = real_flows.numpy()
pred_flows = pred_flows.numpy()
sub_dir = pathlib.Path(f'{data_dir}/{name}')
if not sub_dir.exists():
sub_dir.mkdir(parents=True)
for sdf_, real_flows_, pred_flows_, idx in zip(sdf, real_flows, pred_flows, indices):
filename = sub_dir / f'{name}{idx:06}_Lv{level}_epoch{epoch:04}.h5'
# Check the dimension first
shape = sdf_.shape
dim = len(shape)
if dim == 3:
# Unpatched data (#channel, #h, #w)
coords_list = ['y', 'x']
data_vars = {}
data_vars['SDF'] = (coords_list, sdf_[0])
data_vars['u'] = (coords_list, real_flows_[0])
data_vars['v'] = (coords_list, real_flows_[1])
data_vars['u_hat'] = (coords_list, pred_flows_[0])
data_vars['v_hat'] = (coords_list, pred_flows_[1])
coords = {}
_, ny, nx = shape
coords['y'], coords['x'] = np.arange(ny), np.arange(nx)
xr.Dataset(data_vars = data_vars, coords=coords).to_netcdf(filename, engine='netcdf4')
else:
# Patched data (#patch_h, #patch_x, #channel, #h, #w)
data_vars = {}
coords_list = ['patch_y', 'patch_x', 'y', 'x']
data_vars = {}
data_vars['SDF'] = (coords_list, sdf_[:,:,0])
data_vars['u'] = (coords_list, real_flows_[:,:,0])
data_vars['v'] = (coords_list, real_flows_[:,:,1])
data_vars['u_hat'] = (coords_list, pred_flows_[:,:,0])
data_vars['v_hat'] = (coords_list, pred_flows_[:,:,1])
coords = {}
py, px, _, ny, nx = shape
coords['y'], coords['x'] = np.arange(ny), np.arange(nx)
coords['patch_y'], coords['patch_x'] = np.arange(py), np.arange(px)
xr.Dataset(data_vars = data_vars, coords=coords).to_netcdf(filename, engine='netcdf4')
|
import pandas as pd
from rrcforest import LagFeatures
def test_lag_features():
data = pd.DataFrame({
'A': list(range(10)),
'B': list(range(10, 20)),
})
lf = LagFeatures(2, data.columns)
assert set(lf.feature_columns) == {'A_0', 'A_1', 'B_0', 'B_1'}
# No data inserted yet
assert pd.Series(lf.features).isna().all()
# Some data inserted, but not enough to remove all NaNs
lf.insert(data.iloc[0])
nans = pd.Series(lf.features).isna()
print(lf.features)
assert nans.any()
assert not nans.all()
assert nans[['A_1', 'B_1']].all()
assert not nans[['A_0', 'B_0']].any()
assert lf.features['A_0'] == 0
assert lf.features['B_0'] == 10
# Enough data inserted to fill the buffer
lf.insert(data.iloc[1])
assert not pd.Series(lf.features).isna().any()
assert lf.features == {
'A_0': 1,
'B_0': 11,
'A_1': 0,
'B_1': 10,
}
def test_lag_features_iterate():
data = pd.DataFrame({
'A': list(range(0, 10)),
'B': list(range(10, 20)),
})
# A single lagged feature means an identify transformation
lf = LagFeatures(1, data.columns)
lagged_data = (pd.DataFrame(
list(lf.iterate(row for _, row in data.iterrows())))
.rename(columns={'A_0': 'A', 'B_0': 'B'})
)
assert (data == lagged_data).all().all()
|
rows, cols = [int(el) for el in input().split()]
matrix = [list(input()) for row in range(rows)]
commands = list(input())
spawn_pos = ((-1, 0), (0, 1), (1, 0), (0, -1))
player_pos = []
dead = False
won = False
result = ""
for row in range(rows):
for col in range(cols):
if matrix[row][col] == "P":
player_pos = [row, col]
def spread_bunnies():
global dead, result
for row in range(rows):
for column in range(cols):
if matrix[row][column] == "B":
for pos in spawn_pos:
current_row = row + pos[0]
current_col = column + pos[1]
if 0 <= current_row < rows and 0 <= current_col < cols:
if matrix[current_row][current_col] == ".":
matrix[current_row][current_col] = "b"
elif matrix[current_row][current_col] == "P" and not won and not dead:
dead = True
result = f"dead: {current_row} {current_col}"
matrix[current_row][current_col] = "b"
for row in range(rows):
for column in range(cols):
if matrix[row][column] == "b":
matrix[row][column] = "B"
if won or dead:
[print(''.join(sublist)) for sublist in matrix]
print(result)
raise SystemExit
def move_player(row, column):
global player_pos, won, dead, result
if 0 <= row < rows and 0 <= column < cols:
if matrix[row][column] == "B":
result = f"dead: {row} {column}"
matrix[player_pos[0]][player_pos[1]] = "."
dead = True
else:
matrix[player_pos[0]][player_pos[1]] = "."
player_pos = [row, column]
matrix[player_pos[0]][player_pos[1]] = "P"
else:
result = f"won: {player_pos[0]} {player_pos[1]}"
matrix[player_pos[0]][player_pos[1]] = "."
won = True
spread_bunnies()
while not dead and not won:
current_command = commands.pop(0)
if current_command == "U":
move_player(player_pos[0]-1, player_pos[1])
elif current_command == "D":
move_player(player_pos[0]+1, player_pos[1])
elif current_command == "R":
move_player(player_pos[0], player_pos[1]+1)
elif current_command == "L":
move_player(player_pos[0], player_pos[1]-1)
|
# Copyright (C) 2015-2019 Tormod Landet
# SPDX-License-Identifier: Apache-2.0
import dolfin
from ocellaris.utils import ocellaris_error, RunnablePythonString
_BOUNDARY_CONDITIONS = {}
def add_boundary_condition(name, boundary_condition_class):
"""
Register a boundary condition
"""
_BOUNDARY_CONDITIONS[name] = boundary_condition_class
def register_boundary_condition(name):
"""
A class decorator to register boundary conditions
"""
def register(boundary_condition_class):
add_boundary_condition(name, boundary_condition_class)
return boundary_condition_class
return register
def get_boundary_condition(name):
"""
Return a boundary condition by name
"""
try:
return _BOUNDARY_CONDITIONS[name]
except KeyError:
ocellaris_error(
'Boundary condition "%s" not found' % name,
'Available boundary conditions:\n'
+ '\n'.join(
' %-20s - %s' % (n, s.description)
for n, s in sorted(_BOUNDARY_CONDITIONS.items())
),
)
raise
class BoundaryConditionCreator(object):
description = 'No description available'
from .boundary_region import BoundaryRegion
from .dof_marker import get_dof_region_marks, mark_cell_layers
from . import dirichlet
from . import neumann
from . import robin
from . import slip_length
from . import wall
from . import outlet
from . import decomposed
|
import arrow
from flask.views import MethodView
from flask import render_template, request, redirect, url_for
from database import db
from models.appointment import Appointment
from forms.new_appointment import NewAppointmentForm
class AppointmentResourceDelete(MethodView):
def post(self, id):
appt = db.session.query(Appointment).filter_by(id=id).one()
db.session.delete(appt)
db.session.commit()
return redirect(url_for('appointment.index'), code=303)
class AppointmentResourceCreate(MethodView):
def post(self):
form = NewAppointmentForm(request.form)
if form.validate():
from tasks import send_sms_reminder
appt = Appointment(
name=form.data['name'],
phone_number=form.data['phone_number'],
delta=form.data['delta'],
time=form.data['time'],
timezone=form.data['timezone'],
)
appt.time = arrow.get(appt.time, appt.timezone).to('utc').naive
db.session.add(appt)
db.session.commit()
send_sms_reminder.apply_async(
args=[appt.id], eta=appt.get_notification_time()
)
return redirect(url_for('appointment.index'), code=303)
else:
return render_template('appointments/new.html', form=form), 400
class AppointmentResourceIndex(MethodView):
def get(self):
all_appointments = db.session.query(Appointment).all()
return render_template('appointments/index.html', appointments=all_appointments)
class AppointmentFormResource(MethodView):
def get(self):
form = NewAppointmentForm()
return render_template('appointments/new.html', form=form)
|
# -*- coding: gbk -*-
"""
HTTP Client
http://www.hzfc365.com/house_search/search_prj.jsp?lpid=1374
"""
import sys, os, logging, re
from http_client import HTTPClient
class SimpleCrawler(object):
def __init__(self):
self.http = HTTPClient()
self.debug = True
def start(self, lpid):
url = "http://www.hzfc365.com/house_search/search_prj.jsp?lpid=%s" % lpid
reps_text = self.http.download(url)
if self.debug:
self._save_temp(reps_text, lpid)
build_list = self.parse_build_info(reps_text)
for build in build_list:
logging.info("start fetch:%s, Kai Pan Shi jian:%s" % (1, 2))
build.lpid = lpid
for zh_nm in build.zh_nm_list:
self.fetch_zh_nm_pid_data(build, *zh_nm)
def fetch_zh_nm_pid_data(self, build, zh_nm, pid, name):
url = "http://www.hzfc365.com/house_view/lpxx-xs-2.jsp"\
"?zh_nm=%s&pid=%s" % (zh_nm, pid)
referer_url = "http://www.hzfc365.com/house_search/search_prj.jsp?lpid=%s" % build.lpid
reps_text = self.http.download(url, {"Referer": referer_url})
if self.debug:
self._save_temp(reps_text, zh_nm)
self._parse_room_info(reps_text, referer_url=url)
def _parse_room_info(self, reps_text=None, cache_name = None, referer_url=None):
if cache_name: reps_text = self._read_temp(cache_name)
#http://www.hzfc365.com/house_view/lpxx-xs-2.jsp?zh_nm=120620&pid=87401
"""http://www.hzfc365.com/house_view/lpxx-xs-2-yt.jsp?zh_nm=120618&q_area=&keytime=1288790113772&sessionid=2ECA879E33D7BECC3941443553DAD4FC"""
"""http://www.hzfc365.com/house_view/lpxx-xs-2-yt.jsp?
zh_nm=120618&
q_area=&
keytime=1288790113772&
//12887910169
sessionid=2ECA879E33D7BECC3941443553DAD4FC"""
r_zh_nm = re.search('<input id="info_zh_nm" type="hidden" value="(\d+)">', reps_text).group(1)
sessionid = re.search('<input id="sessionid" type="hidden" value="(\w+)">', reps_text).group(1)
import time
cur_time = time.time()
logging.info("r_zh_nm=%s, sessionid=%s, time=%s" % (r_zh_nm, sessionid, cur_time))
url = "http://www.hzfc365.com/house_view/lpxx-xs-2-yt.jsp?zh_nm=%s&q_area=&keytime=%s&sessionid=%s" % (r_zh_nm, cur_time * 100, sessionid)
reps_text = self.http.download(url, {"Referer": referer_url})
if self.debug:
self._save_temp(reps_text, "d%s" % r_zh_nm)
return self._parse_room_detail_info(reps_text)
def _parse_room_detail_info(self, reps_text=None, cache_name=None):
if cache_name: reps_text = self._read_temp(cache_name)
regex = "title='([^']+)'"
factor = re.compile(regex, re.I)
data = []
for item in factor.finditer(reps_text):
logging.info("details:%s" % str(item.groups()))
data.append(RoomInfo(*item.groups()))
return data
#<input id="info_zh_nm" type="hidden" value="120620">
#<input id="sessionid" type="hidden" value="27D3C558B4A632ABFE27FC1B48ADAB44">
def parse_build_info(self, reps_text=None, cache_name = None):
if cache_name: reps_text = self._read_temp(cache_name)
td = r"\s+<td[^>]+>(.*?)</td>"
regex = r"<TR onmouseover=[^>]+><A [^>]+>%s</A>" % (td * 7)
regex += "\s+<td[^>]+>(.*?)</td>"
regex += "\s+</tr>"
factor = re.compile(regex, re.I)
#items = ( e.group(1) for e in factor.finditer(expr) )
data = []
for item in factor.finditer(reps_text):
logging.info("data:%s" % str(item.groups()))
data.append(BuidingInfo(*item.groups()))
return data
def _save_temp(self, data, name):
fd = open("temp_cache_%s.txt" % name, "w")
fd.write(data)
fd.close()
def _read_temp(self, name):
data = ""
fd = open("temp_cache_%s.txt" % name, "r")
data = fd.read()
fd.close()
return data
class BuidingInfo(object):
def __init__(self, yszh, kpsj, ksts, ksmj, ysts, ysjj, yydts, ysds):
self.id = None
self.yszh = yszh
self.kpsj = kpsj
self.ksts = ksts
self.ksmj = ksmj
self.ysts = ysts
self.ysjj = ysjj
self.yydts = yydts
self.ysds = ysds
#yszh, kpsj, ksts, ksmj, ysts, ysjj, yydts, ysds
#self.rooms = {}
self.zh_nm_list = self._parse_zh_nm_list(ysds)
def _parse_zh_nm_list(self, ysds):
regex = r'<a href=".*?zh_nm=(\d+)&pid=(\d+)".*?>(.*?)</a>'
factor = re.compile(regex, re.I)
data = []
for item in factor.finditer(ysds):
logging.info("data:%s" % str(item.groups()))
data.append(item.groups())
return data
#<a href="/house_view/lpxx-xs-2.jsp?zh_nm=119218&pid=86101" target="_blank" class="main">2\xb4\xb1</a>
class RoomInfo(object):
def __init__(self, data):
self.id = None
def main(lpid):
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.info("starting process lpid:%s" % lpid)
crawler = SimpleCrawler()
crawler.debug = True
lpid = "1374"
crawler.start(lpid)
#xxx = crawler.parse_build_info(None, "1374")
#build = lambda:1
#build.lpid = "1374"
#crawler.fetch_zh_nm_pid_data(build, "120620", "87401", "4")
#crawler._parse_room_detail_info(None, "d120620")
logging.info("end process lpid %s." % lpid)
if "__main__" == __name__:
main("")
if len(sys.argv) != 2:
print "python crawler.py <lpid>"
else:
main(sys.argv[1])
|
'''
263. Ugly Number Easy
An ugly number is a positive integer whose prime factors are limited to 2, 3, and 5.
Given an integer n, return true if n is an ugly number.
Example 1:
Input: n = 6
Output: true
Explanation: 6 = 2 × 3
Example 2:
Input: n = 8
Output: true
Explanation: 8 = 2 × 2 × 2
Example 3:
Input: n = 14
Output: false
Explanation: 14 is not ugly since it includes the prime factor 7.
Example 4:
Input: n = 1
Output: true
Explanation: 1 has no prime factors, therefore all of its prime factors are limited to 2, 3, and 5.
Constraints:
-231 <= n <= 231 - 1
'''
class Solution:
def isUgly(self, n: int) -> bool:
for div in [2, 3, 5]:
while(n%div == 0):
n=n/div
return n==1
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import os
import math
from config import load_config
from preprocess import load_data
from model import Model
import urllib.request
def save_checkpoint(model, optimizer, args, epoch):
print('Model Saving...')
if args.device_num > 1:
model_state_dict = model.module.state_dict()
else:
model_state_dict = model.state_dict()
torch.save({
'model_state_dict': model_state_dict,
'global_epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
}, os.path.join('checkpoints', 'checkpoint_model_best.pth'))
def _train(epoch, model, train_loader, optimizer, criterion, args):
model.train()
losses, step = 0., 0.
for i, (img, text) in enumerate(train_loader):
if args.cuda:
img, text = img.cuda(), text.cuda()
img_feats, text_feats = model(img, text)
logits = torch.matmul(img_feats, text_feats.T) * math.exp(args.temperature_factor)
labels = torch.arange(text.size(0))
optimizer.zero_grad()
loss = criterion(logits, labels)
loss.backward()
losses += loss.item()
step += 1
optimizer.step()
print('[Epoch: {}], losses: {}'.format(epoch, losses / step))
def main(args):
if not os.path.isdir('data'):
os.mkdir('data')
urllib.request.urlretrieve('https://openaipublic.azureedge.net/clip/bpe_simple_vocab_16e6.txt.gz',
filename='./data/bpe_simple_vocab_16e6.txt.gz')
model = Model(args.out_channels)
if args.cuda:
model = model.cuda()
args.input_resolution = 32
train_data, train_loader, test_data, test_loader = load_data(args)
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
criterion = nn.CrossEntropyLoss()
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
for epoch in range(1, args.epochs + 1):
_train(epoch, model, train_loader, optimizer, criterion, args)
save_checkpoint(model, optimizer, args, epoch)
if __name__ == '__main__':
args = load_config()
main(args)
|
class Solution(object):
def pathSum(self, root, S):
if not root: return False
opt = []
stack = []
stack.append((root, 0, []))
while stack:
node, s, path = stack.pop()
s += node.val
path = path + [node.val]
if not node.left and not node.right and s==S: opt.append(path)
if node.left: stack.append((node.left, s, path))
if node.right: stack.append((node.right, s, path))
return opt
"""
Time complexity is O(N), because we traverse all the nodes.
Space complexity is O(N^2), because in the worst case, all node could carry all the other nodes in the `path`.
"""
"""
Time complexity is O(N), because we traverse all the nodes.
Space complexity is O(N^2), because in the worst case, all node could carry all the other nodes in the `path`.
"""
class Solution(object):
def pathSum(self, root, targetSum):
if not root: return []
ans = []
stack = [(root, root.val, [root.val])]
while stack:
node, total, path = stack.pop()
if not node.left and not node.right and total==targetSum: ans.append(path)
if node.left: stack.append((node.left, total+node.left.val, path+[node.left.val]))
if node.right: stack.append((node.right, total+node.right.val, path+[node.right.val]))
return ans |
#F = G.m1.m2/r^2
import matplotlib.pyplot as plt
#draw graph
def drawGraph(x,y):
plt.plot(x, y, marker='o')
plt.xlabel('Distances')
plt.ylabel('Gravitational force in Newton')
plt.title("Gravitational Force vs distance")
plt.show()
def generateR():
r = range(100, 1001, 50)
F = [] #sotre calculated F
G = 6.674 * (10**-11)
m1 = 0.5
m2 = 1.5
# force
for dist in r:
force = G*(m1*m2)/dist**2
F.append(force)
drawGraph(r, F)
if __name__=='__main__':
generateR() |
# @Time : 11/11/21 9:43 PM
# @Author : Fabrice Harel-Canada
# @File : pyfuzz_h03_20191644.py
import sys
import os
sys.path.insert(1, os.path.abspath("."))
import h03_20191644 as testee
from pyfuzz.fuzzers import *
from pyfuzz.byte_mutations import *
from pyfuzz.fuzz_data_interpreter import *
import torch
import pandas as pd
def PyFuzzh03_20191644(data):
fdi = FuzzedDataInterpreter(data)
model_input_size = 4
test_input = torch.FloatTensor([
[fdi.claim_float() for _ in range(model_input_size)]
])
prediction = testee.predict(test_input)
print(test_input, prediction)
return prediction
if __name__ == "__main__":
testee.setup()
runner = FunctionRunner(PyFuzzh03_20191644)
seed = [bytearray([0] * 12)]
fuzzer = MutationFuzzer(seed, mutator=mutate_bytes)
results = fuzzer.runs(runner, 1000)
df = pd.DataFrame(results, columns=["output", "status"])
print(df.groupby("status").size())
print("fuzzer.failure_cases:")
print(fuzzer.failure_cases)
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""quantization utils."""
import numpy as np
def cal_quantization_params(input_min,
input_max,
num_bits=8,
symmetric=False,
narrow_range=False):
r"""
calculate quantization params for scale and zero point.
Args:
input_min (int, list): The dimension of channel or 1.
input_max (int, list): The dimension of channel or 1.
num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8.
symmetric (bool): Quantization algorithm use symmetric or not. Default: False.
narrow_range (bool): Quantization algorithm use narrow range or not. Default: False.
Outputs:
scale (int, list): quantization param.
zero point (int, list): quantization param.
Examples:
>>> scale, zp = cal_quantization_params([1, 2, 1], [-2, 0, -1], 8, False, False)
"""
input_max = np.maximum(0.0, input_max)
input_min = np.minimum(0.0, input_min)
if input_min.shape != input_max.shape:
raise ValueError("input min shape should equal to input max.")
if len(input_min.shape) > 1:
raise ValueError("input min and max shape should be one dim.")
if input_min > input_max:
raise ValueError("input_min min should less than input max.")
if (input_max == input_min).all():
# scale = 1.0, zp = 0.0
return np.ones(input_min.shape), np.zeros(input_min.shape)
if symmetric:
quant_min = 0 - 2 ** (num_bits - 1)
quant_max = 2 ** (num_bits - 1)
else:
quant_min = 0
quant_max = 2 ** num_bits - 1
if narrow_range:
quant_min = quant_min + 1
# calculate scale
if symmetric:
input_max = np.maximum(-input_min, input_max)
input_min = -input_max
scale = (input_max - input_min) / (quant_max - quant_min)
# calculate zero point
if symmetric:
zp = np.zeros(input_min.shape)
else:
zp_from_min = quant_min - input_min / scale
zp_from_max = quant_max - input_max / scale
zp_from_min_error = np.abs(quant_min) + np.abs(input_min / scale)
zp_from_max_error = np.abs(quant_max) + np.abs(input_max / scale)
zp_double = zp_from_min if zp_from_min_error < zp_from_max_error else zp_from_max
if zp_double < quant_min:
zp = quant_min
elif zp_double > quant_max:
zp = quant_max
else:
zp = np.floor(zp_double + 0.5)
return scale, zp
|
from model.project import Project
import re
class ProjectHelper:
def __init__(self, app):
self.app = app
project_cache = None
def create(self, project):
wd = self.app.wd
self.app.navigation.open_project_page()
wd.find_element_by_xpath('//input[@value="создать новый проект"]').click()
self.fill_project_fields(project)
wd.find_element_by_xpath('//input[@value="Добавить проект"]').click()
wd.find_element_by_xpath('//a[contains(text(),"Продолжить")]').click()
self.project_cache = None
def fill_project_fields(self, project):
self.change_field_value('project-name', project.name)
self.change_field_value('project-description', project.description)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_id(field_name).click()
wd.find_element_by_id(field_name).clear()
wd.find_element_by_id(field_name).send_keys(text)
def get_project_list(self):
if self.project_cache is None:
wd = self.app.wd
self.app.navigation.open_project_page()
self.project_cache = []
l = len(wd.find_elements_by_xpath('//table/tbody/tr/td[1]/a'))
for i in range(l):
index = i + 1
element = wd.find_element_by_xpath('//table/tbody/tr['+str(index)+']/td[1]/a')
name = element.text
href = element.get_attribute("href")
id = int(re.search("\d+$", href).group(0))
description = wd.find_element_by_xpath('//table/tbody/tr['+str(index)+']/td[5]').text
self.project_cache.append(Project(id=id, name=name, description=description))
return list(self.project_cache)
def delete_project_by_id(self, id):
wd = self.app.wd
self.app.navigation.open_project_page()
wd.find_element_by_xpath('//a[@href="manage_proj_edit_page.php?project_id='+str(id)+'"]').click()
wd.find_element_by_xpath('//input[@value="Удалить проект"]').click()
wd.find_element_by_xpath('//input[@value="Удалить проект"]').click()
self.project_cache = None
|
import os
BROKER_URL = os.environ.get('CLOUDAMQP_URL')
MONGODB_URI = os.environ.get('MONGODB_URI')
BACKEND_URI = os.environ.get('BACKEND_URI')
DB_NAME = os.environ.get('DB_NAME')
IG_PROFILE_COLL = 'instagram-profile'
IG_PROFILE_POST_COLL = 'instagram-post'
IG_HASHTAG_POST_COLL = 'instagram-hashtag-post'
FB_PROFILE_COLL = 'facebook-profile'
FB_PAGE_POST_COLL = 'facebook-page-post'
FB_GROUP_POST_COLL = 'facebook-group-post'
IG_USERNAME = os.environ.get("IG_USER")
IG_PASS = os.environ.get('IG_PASS')
FB_EMAIL = os.environ.get('FB_EMAIL')
FB_PASS = os.environ.get('FB_PASS') |
# Generated by Django 3.0.3 on 2020-02-22 18:22
from django.db import migrations
def fix_application_null(apps, schema_editor):
"""Fix Application meta_fields being null"""
Application = apps.get_model("passbook_core", "Application")
for app in Application.objects.all():
if app.meta_launch_url is None:
app.meta_launch_url = ""
if app.meta_icon_url is None:
app.meta_icon_url = ""
if app.meta_description is None:
app.meta_description = ""
if app.meta_publisher is None:
app.meta_publisher = ""
app.save()
class Migration(migrations.Migration):
dependencies = [
("passbook_core", "0010_auto_20200221_2208"),
]
operations = [migrations.RunPython(fix_application_null)]
|
from .vision import VisionDataset
from PIL import Image
import os
import os.path
from typing import Any, Callable, Optional, Tuple
class CocoCaptions(VisionDataset):
"""`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.ToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def __init__(
self,
root: str,
annFile: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
) -> None:
super(CocoCaptions, self).__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is a list of captions for the image.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
anns = coco.loadAnns(ann_ids)
target = [ann['caption'] for ann in anns]
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self) -> int:
return len(self.ids)
class CocoDetection(VisionDataset):
"""`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(
self,
root: str,
annFile: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
) -> None:
super(CocoDetection, self).__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self) -> int:
return len(self.ids)
|
from plumbum import local
from plumbum.path.local import LocalPath
from plumbum import local, FG
from plaster.tools.schema import check
from plaster.tools.utils import utils
def get_user():
user = local.env.get("RUN_USER")
if user is None or user == "":
raise Exception("User not found in $USER")
return user
def validate_job_folder(job_folder):
"""
job_folder can be Python symbols
"""
basename = local.path(job_folder).name
if not utils.is_symbol(basename):
raise ValueError(
"job name must be a lower-case Python symbol (ie start with [a-z_] followed by [a-z0-9_]"
)
return local.path(job_folder)
|
import hashlib
from options.options import SysOptions
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from django.contrib.auth.models import Permission
from django.core.cache import cache
from django.conf import settings
from conf.models import JudgeServer
from django.db.utils import OperationalError
from redis.exceptions import ConnectionError
from utils.api import APIView
from datetime import datetime, timedelta
import time
import logging
import psutil
#!/usr/bin/env python
logger = logging.getLogger("django.heartbeat")
class HeartBeatView(APIView):
permission_classes = (permissions.AllowAny,)
def get(self, request, format=None):
output_data = {}
output_status = status.HTTP_200_OK
res = 'ok'
# gives a single float value
cpu = psutil.cpu_percent()
print('cpu:',cpu)
# gives an object with many fields
print('memory:',psutil.virtual_memory())
# you can convert that object to a dictionary
#dict(psutil.virtual_memory()._asdict())
output_data['cpu_percent'] = str(cpu)
output_data['memory'] = str(psutil.virtual_memory().percent)
output_data['postgres'] = True
output_data['redis'] = True
output_data['judge_server'] = True
now = datetime.now()
output_data['current_time'] = str(now)
judgetime = time.time()
try: # 저지 서버의 마지막 heartbeat를 확인한다.
servers = JudgeServer.objects.all()
print("last_heartbeat",servers[0].last_heartbeat) # 저지 서버는 현재 1개만 가동중이므로 0번째 쿼리셋의 값을 가져온다.
print("now", now)
dt = servers[0].last_heartbeat.replace(tzinfo=None) # UTC datetime 포맷에서 tz값(+00:00)을 제거한다
sub = now - dt
if sub.seconds > 10:
print("judge-server no response")
output_data['judge_server'] = False
except:
print("judge-server Not Exist")
output_data['judge_server'] = False
judgetime = time.time() - judgetime
print("저지 서버 연결 유무 확인", judgetime)
output_data['judgetime'] = str(judgetime)
postgrestime = time.time()
try:
Permission.objects.get(id = 1) #django permission. Should be always available
# cache.set('test', 1)
# cache_get = cache.get('test')
# if cache_get != 1:
# raise ValueError
# request.session['test_value'] = 1
# request.session.save()
# assert request.session["test_value"] == 1
# extra_values = getattr(settings, "HEARTBEAT_OUTPUT", None)
# if extra_values:
# for k, v in extra_values.iteritems():
# output_data[k] = v()
except OperationalError:
print("postgres Error")
output_status = status.HTTP_500_INTERNAL_SERVER_ERROR
res = 'failed'
output_data['heartbeat'] = res
output_data['postgres'] = False
#return Response(output_data, status=output_status)
postgrestime = time.time() - postgrestime
print("데이터베이스 서버 연결 유무 확인", postgrestime)
output_data['postgrestime'] = str(postgrestime)
redistime = time.time()
try:
Permission.objects.get(id = 1) #django permission. Should be always available
except ConnectionError:
print("redis Error")
output_status = status.HTTP_500_INTERNAL_SERVER_ERROR
res = 'failed'
output_data['heartbeat'] = res
output_data['redis'] = False
#return Response(output_data, status=output_status)
redistime = time.time() - redistime
print("redis 서버 연결 유무 확인", redistime)
output_data['redistime'] = str(redistime)
output_data['heartbeat'] = res
print(output_data)
return self.success(output_data)
class StaticHeartbeatView(generics.GenericAPIView):
permission_classes = (permissions.AllowAny,)
def get(self, request, format=None):
output_data = {}
print("test")
output_status = status.HTTP_200_OK
output_data['heartbeat'] = 'ok'
return Response(output_data, status = output_status) |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.utils.safety import OffensiveLanguageClassifier
from parlai.utils.safety import OffensiveStringMatcher
from parlai.utils.testing import skipUnlessBPE, skipUnlessGPU
import unittest
DEFINITELY_GOOD = [
'hey how are you',
'what is your name?',
'i like to play sports',
'i have a horse',
]
PROBABLY_BAD = ['i hate you', 'you are pretty ugly', 'parlai is the worst platform']
DEFINITELY_BAD = ['fuck you', 'you are a piece of shit']
class TestSafetyModules(unittest.TestCase):
def test_string_matcher(self):
sm = OffensiveStringMatcher()
for phrase in DEFINITELY_BAD:
assert phrase in sm, f'`{phrase}` is offensive'
for phrase in DEFINITELY_GOOD:
assert phrase not in sm, f'`{phrase}` is not offensive'
@skipUnlessGPU
@skipUnlessBPE
def test_classifier(self):
lc = OffensiveLanguageClassifier()
for phrase in DEFINITELY_BAD:
assert phrase in lc, f'`{phrase}` is offensive'
for phrase in PROBABLY_BAD:
assert phrase in lc, f'`{phrase}` is offensive'
for phrase in DEFINITELY_GOOD:
assert phrase not in lc, f'`{phrase}` is not offensive'
if __name__ == '__main__':
unittest.main()
|
###
### scan_extensions.py - Code used to build extensions.csv file from
### present vocola_ext_*.py files.
###
###
### Copyright (c) 2011, 2015 by Hewlett-Packard Development Company, L.P.
###
### Permission is hereby granted, free of charge, to any person
### obtaining a copy of this software and associated documentation
### files (the "Software"), to deal in the Software without
### restriction, including without limitation the rights to use, copy,
### modify, merge, publish, distribute, sublicense, and/or sell copies
### of the Software, and to permit persons to whom the Software is
### furnished to do so, subject to the following conditions:
###
### The above copyright notice and this permission notice shall be
### included in all copies or substantial portions of the Software.
###
### THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
### EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
### MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
### NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
### HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
### WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
### OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
### DEALINGS IN THE SOFTWARE.
###
import os
import sys
import re
def process_extension(output, verbose, extension):
log(" scanning %s.py..." % extension, verbose)
functions = procedures = 0
last_line = ""
line_number = 0
f = open(extension + ".py", "r")
try:
for line in f:
funcs, procs = scan(output, last_line, line, extension,
line_number)
functions += funcs
procedures += procs
line_number += 1
last_line = line
finally:
f.close()
log(" found %d function(s), %d procedures(s)" %
(functions, procedures), verbose)
def scan(output, first_line, second_line, extension, line_number):
m = re.search(r'^\s*\#\s*Vocola\s+(function|procedure):\s*(.*)',
first_line, re.I)
if m == None:
return 0,0
kind = m.group(1)
arguments = split_arguments(m.group(2))
if len(arguments) < 1:
error("%s.py:%d: Error: Vocola extension %s name not specified" %
(extension, line_number, kind))
return 0, 0
name = arguments[0]
if name.find(".") == -1:
error(("%s.py:%d: Error: Vocola extension %s name does not " +
"contain a '.'") % (extension, line_number, kind))
return 0, 0
m = re.search(r'^\s*def\s+([^(]+)\(([^)]*)', second_line)
if m == None:
error(("%s.py:%d: Error: Vocola extension specification line " +
"not followed by a def name(... line") %
(extension, line_number))
return 0, 0
function_name = m.group(1)
function_arguments = split_arguments(m.group(2))
m = None
if len(arguments) > 1:
m = re.search(r'^(\d+)\s*(-\s*(\d+)?)?', arguments[1])
if m:
min = max = int(m.group(1))
if m.group(2):
max = -1
if m.group(3):
max = int(m.group(3))
else:
min = max = len(function_arguments)
if kind.lower() == "function":
is_procedure = 0
else:
is_procedure = 1
definition = "%s,%d,%d,%s,%s,%s.%s\n" % (name, min, max, is_procedure,
extension, extension,
function_name)
output.write(definition)
return 1-is_procedure, is_procedure
def split_arguments(arguments):
arguments = arguments.strip()
# special case because of Python bug in split() resulting in [""] for "":
if arguments == "":
return []
else:
return [x.strip() for x in arguments.split(",")]
def log(message, verbose):
if verbose:
print message
sys.stdout.flush()
def error(message):
print >> sys.stderr, message
sys.stderr.flush()
##
## Main routine:
##
def main(argv):
program = argv.pop(0)
verbose = False
if len(argv)>0 and argv[0]=="-v":
argv.pop(0)
verbose = True
if len(argv) != 1:
print "%s: usage: %s [-v] <extensions_folder>" % (program, program)
return
extensions_folder = argv[0]
log("\nScanning for Vocola extensions...", verbose)
os.chdir(extensions_folder)
output = open(os.path.normpath(os.path.join(extensions_folder,
"extensions.csv")), "w")
try:
for file in os.listdir(extensions_folder):
if file.startswith("vocola_ext_") and file.endswith(".py"):
process_extension(output, verbose, file[0:-3])
finally:
output.close()
if __name__ == "__main__":
main(sys.argv)
|
from connector.elfinder.commands import COMMANDS_MAP
class HttpRequestParser(object):
def __init__(self, request, get=None, post=None, files=None):
self._req = request
self.post = post or {}
self.get = get or {}
self.files = files or {}
self.command = ''
self.params = {}
try:
self.parse()
except Exception, e:
print e
def parse(self):
self.params = dict(self.get)
self.params.update(self.post)
try:
self.params.update(self.files)
except:
pass
self.command = self.params.get('cmd', None)
if type(self.command) == type([]):
self.command = self.command[0]
if self.command in COMMANDS_MAP:
self.command = COMMANDS_MAP[self.command]
else:
self.command = None
def ok(self):
if not type(self.command) == type(object):
return False
return True
|
from machine import Pin,ADC,PWM
import time
adc14 = ADC(Pin(14),atten=ADC.ATTN_11DB)
PWM_A = PWM(Pin(11)) #Set PWM output pin
PWM_A.freq(20000) #Set PWM frequency
PWM_A.duty(0) #Set PWM duty cycle
AIN1 = Pin(12,Pin.OUT)
AIN2 = Pin(13,Pin.OUT)
STBY = Pin(10,Pin.OUT)
AIN1.on() #MOTOR forward
AIN2.off()
STBY.on() #When STBY pin is at high level, TB6612FNG starts.
while True:
read_mv=adc14.read_uv()//1000
if read_mv <= 3000:
duty_set = int(1023/3000 * read_mv)
else:
duty_set = 1023
PWM_A.duty(duty_set)
Duty_cycle = int(duty_set/1023*100)
print("ADC_read={0}mv,Duty_cycle={1}%".format(read_mv,Duty_cycle))
time.sleep_ms(100)
|
# -*- encoding: utf-8 -*-
import xlrd
from accounts.models import Authority
from notifications.models import NotificationTemplate, NotificationAuthority
def import_notification_excel(template_id, file):
template = NotificationTemplate.objects.get(id=template_id)
try:
xl_book = xlrd.open_workbook(file_contents=file.read())
for sheet_idx in range(0, xl_book.nsheets):
xl_sheet = xl_book.sheet_by_index(sheet_idx)
for row_idx in range(1, xl_sheet.nrows):
authority_code = xl_sheet.cell(row_idx, 1).value
to = xl_sheet.cell(row_idx, 3).value
if authority_code:
authority = Authority.objects.get(code=authority_code)
# print '%s %s %s' % (template.id, authority.id, to)
# print 'INSERT INTO "notifications_notificationauthority" ("is_deleted", "template_id", "authority_id", "to") SELECT false, %s, %s, \'%s\' WHERE NOT EXISTS ( SELECT id, "is_deleted", "template_id", "authority_id", "to" FROM notifications_notificationauthority WHERE template_id = %s AND authority_id = %s AND "to" = \'%s\');' % (template.id, authority.id, to, template.id, authority.id, to)
notificationAuthority, create = NotificationAuthority.objects.get_or_create(template=template, authority=authority)
notificationAuthority.to = to
notificationAuthority.save()
return True
except:
return False
|
"""
This file contains constants used throughout AppScale.
"""
import os
from kazoo.retry import KazooRetry
class HTTPCodes(object):
OK = 200
BAD_REQUEST = 400
UNAUTHORIZED = 401
FORBIDDEN = 403
NOT_FOUND = 404
INTERNAL_ERROR = 500
NOT_IMPLEMENTED = 501
class MonitStates(object):
MISSING = 'missing'
PENDING = 'pending' # Monit is trying to either start or stop the process.
RUNNING = 'running'
STOPPED = 'stopped' # Monit will likely try to start the process soon.
UNMONITORED = 'unmonitored'
# AppScale home directory.
APPSCALE_HOME = os.environ.get("APPSCALE_HOME", "/root/appscale")
# The ZooKeeper path for keeping track of assignments by machine.
ASSIGNMENTS_PATH = '/appscale/assignments'
# Directory where configuration files are stored.
CONFIG_DIR = os.path.join('/', 'etc', 'appscale')
# Location of where data is persisted on disk.
APPSCALE_DATA_DIR = '/opt/appscale'
# Location of Java AppServer.
JAVA_APPSERVER = APPSCALE_HOME + '/AppServer_Java'
# The format each service should use for logging.
LOG_FORMAT = '%(asctime)s %(levelname)s %(filename)s:%(lineno)s %(message)s '
# The location of the file containing the load balancer IPs.
LOAD_BALANCER_IPS_LOC = '/etc/appscale/load_balancer_ips'
# The location of the file which specifies all the ips for this deployment.
ALL_IPS_LOC = '/etc/appscale/all_ips'
# The location of the file which specifies the public IP of the head node.
HEADNODE_IP_LOC = '/etc/appscale/head_node_private_ip'
# The directory that contains the deployment's private SSH key.
KEY_DIRECTORY = os.path.join(CONFIG_DIR, 'keys', 'cloud1')
# The location of the file which specifies the public IP of the head node.
LOGIN_IP_LOC = '/etc/appscale/login_ip'
# The size for the random password to be created for the appscalesensor app user.
PASSWORD_SIZE = 6
# The location of the file which specifies the current private IP.
PRIVATE_IP_LOC = '/etc/appscale/my_private_ip'
# The location of the file which specifies the current public IP.
PUBLIC_IP_LOC = '/etc/appscale/my_public_ip'
# The location of the file which holds the AppScale secret key.
SECRET_LOC = '/etc/appscale/secret.key'
# The Cassandra config location in Zookeeper.
ZK_CASSANDRA_CONFIG = "/appscale/config/cassandra"
# The location of the file which contains information on the current DB.
DB_INFO_LOC = '/etc/appscale/database_info.yaml'
# The file location which has all taskqueue nodes listed.
TASKQUEUE_NODE_FILE = "/etc/appscale/taskqueue_nodes"
# The port of the datastore server.
DB_SERVER_PORT = 8888
# The port of the UserAppServer SOAP server.
UA_SERVER_PORT = 4343
# The port of the application manager soap server.
APP_MANAGER_PORT = 17445
# The HAProxy port for the TaskQueue service.
TASKQUEUE_SERVICE_PORT = 17446
# Python programs.
PYTHON = "python"
# Python2.7 programs.
PYTHON27 = "python27"
# Java programs.
JAVA = "java"
# Go programs.
GO = "go"
# PHP programs.
PHP = "php"
# Location where applications are stored.
APPS_PATH = "/var/apps/"
# Locations of ZooKeeper in json format.
ZK_LOCATIONS_JSON_FILE = "/etc/appscale/zookeeper_locations.json"
# Default location for connecting to ZooKeeper.
ZK_DEFAULT_CONNECTION_STR = "localhost:2181"
# A ZooKeeper reconnect policy that never stops retrying to connect.
ZK_PERSISTENT_RECONNECTS = KazooRetry(max_tries=-1, max_delay=30)
# Default location for the datastore master.
MASTERS_FILE_LOC = "/etc/appscale/masters"
# Default location for the datastore slaves.
SLAVES_FILE_LOC = "/etc/appscale/slaves"
# Application ID for AppScale Dashboard.
DASHBOARD_APP_ID = "appscaledashboard"
# Reserved application identifiers which are only internal for AppScale.
RESERVED_APP_IDS = [DASHBOARD_APP_ID]
# The seconds to wait for the schema to settle after changing it.
SCHEMA_CHANGE_TIMEOUT = 120
# Location of where the search service is running.
SEARCH_FILE_LOC = "/etc/appscale/search_ip"
# Service scripts directory.
SERVICES_DIR = '/etc/init.d'
# The AppController's service name.
CONTROLLER_SERVICE = 'appscale-controller'
# The system's cgroup directory.
CGROUP_DIR = os.path.join('/', 'sys', 'fs', 'cgroup')
# The default log directory for AppScale services.
LOG_DIR = os.path.join('/var', 'log', 'appscale')
# The default directory for run-time variable data (eg. pidfiles).
VAR_DIR = os.path.join('/', 'var', 'run', 'appscale')
# The number of seconds to wait before retrying some operations.
SMALL_WAIT = 5
# The number of seconds to wait before retrying some operations.
TINY_WAIT = .1
# The character used to separate portions of a complete version string.
# (e.g. guestbook_default_v1)
VERSION_PATH_SEPARATOR = '_'
|
'''
## MIT License
Copyright (c) 2016 David Sandberg
Process Flow:
1.Read the image
2.send to detect face
3.get the coordinates
4.draw bounding box on face
5.write the image
6.store the time in seperate text file ,image name, count of faces, dimension, time log for each image.
The below code is used for checking detecting faces and recording how fast does it detect, and can also be used in conjuntion with Face Tracker Algorithm to Track.
'''
#/usr/bin/python3
#import packages
import cv2
import tensorflow as tf
import detect_face
import argparse
import sys
import os
import time
def main(args):
#list all the files from directory and sort them baseo in numerical order
list = sorted(os.listdir(args['location']))
list.sort(key=lambda f:int(''.join(filter(str.isdigit, f))))
#Parameters for MTCNN
sess = tf.Session()
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
minsize = 40 # minimum size of face
threshold = [0.6, 0.7, 0.9] # three steps's threshold
factor = 0.709 # scale factor
#create a file and write the metedata generated from precessing
file = open(args['fileName'], "w")
for each in range(0,len(list)):
image = cv2.imread(args['location']+list[each])
h,w = image.shape[:2]# get the shape row i.e h, column i.e w
start = time.time()#start time
bounding_boxes, points = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)# it return bounding box and point ie. the five facial features
end = time.time()# here the time is to measure how much it takes to detect faces.
count = 0
for b in bounding_boxes:
cv2.rectangle(image, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (0, 255, 0), 1)# gives x-coordinate, y- coordinate, width(no of columns), height(no of rows)
count += 1
cv2.imwrite("/home/user/Frames/" + str(each) + ".jpg", image)
file.write(str(list[each])+" ," + "faces:" + str(count) + " ," +"dimension:"+ str(h)+" x "+str(w)+" ,"+ "timetaken:" + str((end - start)))
file.write('\n')
file.close()
#Driver Code
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-l","--location", type=str, help="Location of the folder containing image")
ap.add_argument("-f", "--fileName",type=str, help="file to write metadata of image")
args = vars(ap.parse_args())
if (args["location"] and args['fileName']) == None:
print("Input image location/ file name is not provided")
sys.exit()
main(args)
|
import pytest
from django.urls import reverse
from metadeploy.conftest import format_timestamp
from ..constants import ORGANIZATION_DETAILS
from ..models import Job, Plan, PreflightResult
@pytest.mark.django_db
def test_user_view(client):
response = client.get(reverse("user"))
assert response.status_code == 200
assert response.json()["username"].endswith("@example.com")
@pytest.mark.django_db
class TestJobViewset:
def test_job__cannot_see(self, client, job_factory):
job = job_factory(org_id="00Dxxxxxxxxxxxxxxx")
response = client.get(reverse("job-detail", kwargs={"pk": job.id}))
assert response.status_code == 404
assert response.json() == {"detail": "Not found."}
def test_job__is_staff(self, client, user_factory, job_factory):
user = user_factory(is_staff=True)
client.force_login(user)
job = job_factory(org_name="Secret Org", org_id="00Dxxxxxxxxxxxxxxx")
response = client.get(reverse("job-detail", kwargs={"pk": job.id}))
assert response.status_code == 200
assert response.json() == {
"id": str(job.id),
"creator": {"username": job.user.username, "is_staff": False},
"plan": str(job.plan.id),
"steps": [],
"organization_url": "",
"org_id": "00Dxxxxxxxxxxxxxxx",
"results": {},
"created_at": format_timestamp(job.created_at),
"enqueued_at": None,
"job_id": None,
"status": "started",
"org_name": "Secret Org",
"org_type": "",
"is_production_org": False,
"error_count": 0,
"warning_count": 0,
"is_public": False,
"user_can_edit": False,
"message": "",
"error_message": "",
"edited_at": format_timestamp(job.edited_at),
}
def test_job__your_own(self, client, job_factory):
job = job_factory(
user=client.user, org_name="Secret Org", org_id=client.user.org_id
)
response = client.get(reverse("job-detail", kwargs={"pk": job.id}))
assert response.status_code == 200
assert response.json() == {
"id": str(job.id),
"creator": {"username": job.user.username, "is_staff": False},
"plan": str(job.plan.id),
"steps": [],
"organization_url": "",
"org_id": "00Dxxxxxxxxxxxxxxx",
"results": {},
"created_at": format_timestamp(job.created_at),
"enqueued_at": None,
"job_id": None,
"status": "started",
"org_name": "Secret Org",
"org_type": "",
"is_production_org": False,
"error_count": 0,
"warning_count": 0,
"is_public": False,
"user_can_edit": True,
"message": "",
"error_message": "",
"edited_at": format_timestamp(job.edited_at),
}
def test_job__is_public(self, client, job_factory):
job = job_factory(
is_public=True, org_name="Secret Org", org_id="00Dxxxxxxxxxxxxxxx"
)
response = client.get(reverse("job-detail", kwargs={"pk": job.id}))
assert response.status_code == 200
assert response.json() == {
"id": str(job.id),
"creator": None,
"plan": str(job.plan.id),
"organization_url": None,
"org_id": None,
"steps": [],
"results": {},
"created_at": format_timestamp(job.created_at),
"enqueued_at": None,
"job_id": None,
"status": "started",
"org_name": None,
"org_type": "",
"is_production_org": False,
"error_count": 0,
"warning_count": 0,
"is_public": True,
"user_can_edit": False,
"message": "",
"error_message": "",
"edited_at": format_timestamp(job.edited_at),
}
def test_job__is_public_anon(self, anon_client, job_factory):
job = job_factory(
is_public=True, org_name="Secret Org", org_id="00Dxxxxxxxxxxxxxxx"
)
url = reverse("job-detail", kwargs={"pk": job.id})
response = anon_client.get(url)
assert response.status_code == 200
assert response.json() == {
"id": str(job.id),
"creator": None,
"plan": str(job.plan.id),
"organization_url": None,
"org_id": None,
"steps": [],
"results": {},
"error_count": 0,
"warning_count": 0,
"created_at": format_timestamp(job.created_at),
"enqueued_at": None,
"job_id": None,
"status": "started",
"org_name": None,
"org_type": "",
"is_production_org": False,
"is_public": True,
"user_can_edit": False,
"message": "",
"error_message": "",
"edited_at": format_timestamp(job.edited_at),
}
def test_create_job(self, client, plan_factory, preflight_result_factory):
plan = plan_factory()
preflight_result_factory(
plan=plan,
user=client.user,
status=PreflightResult.Status.complete,
org_id=client.user.org_id,
)
data = {"plan": str(plan.id), "steps": []}
response = client.post(reverse("job-list"), data=data)
assert response.status_code == 201
assert response.json()["org_type"] == "Developer Edition"
assert response.json()["org_name"] == "Sample Org"
def test_destroy_job(self, client, job_factory):
job = job_factory(user=client.user, org_id=client.user.org_id)
response = client.delete(reverse("job-detail", kwargs={"pk": job.id}))
assert response.status_code == 204
assert Job.objects.filter(id=job.id).exists()
def test_destroy_job__bad_user(self, client, job_factory):
job = job_factory(is_public=True, org_id="00Dxxxxxxxxxxxxxxx")
response = client.delete(reverse("job-detail", kwargs={"pk": job.id}))
assert response.status_code == 403
assert Job.objects.filter(id=job.id).exists()
@pytest.mark.django_db
class TestBasicGetViews:
def test_product(
self, client, allowed_list_factory, product_factory, version_factory
):
allowed_list = allowed_list_factory(org_type=["Developer"])
product = product_factory(visible_to=allowed_list)
version_factory(product=product)
response = client.get(reverse("product-list"))
assert response.status_code == 200
assert response.json() == {
"count": 0,
"results": [],
"previous": None,
"next": None,
}
def test_version(self, client, version_factory):
version = version_factory()
response = client.get(reverse("version-detail", kwargs={"pk": version.id}))
assert response.status_code == 200
assert response.json() == {
"id": str(version.id),
"product": str(version.product.id),
"label": version.label,
"description": "A sample version.",
"created_at": format_timestamp(version.created_at),
"primary_plan": None,
"secondary_plan": None,
"is_listed": True,
}
def test_plan(self, client, plan_factory):
plan = plan_factory()
response = client.get(reverse("plan-detail", kwargs={"pk": plan.id}))
assert response.status_code == 200
assert response.json() == {
"id": str(plan.id),
"title": str(plan.title),
"version": str(plan.version.id),
"preflight_message": "",
"requires_preflight": False,
"tier": "primary",
"slug": str(plan.slug),
"old_slugs": [],
"steps": [],
"is_allowed": True,
"is_listed": True,
"not_allowed_instructions": None,
"average_duration": None,
}
def test_plan__not_visible(self, client, allowed_list_factory, plan_factory):
allowed_list = allowed_list_factory(description="Sample instructions.")
plan = plan_factory(visible_to=allowed_list)
response = client.get(reverse("plan-detail", kwargs={"pk": plan.id}))
assert response.status_code == 200
assert response.json() == {
"id": str(plan.id),
"title": str(plan.title),
"version": str(plan.version.id),
"preflight_message": None,
"requires_preflight": False,
"tier": "primary",
"slug": str(plan.slug),
"old_slugs": [],
"steps": None,
"is_allowed": False,
"is_listed": True,
"not_allowed_instructions": "<p>Sample instructions.</p>",
"average_duration": None,
}
def test_plan__visible(
self,
client,
allowed_list_factory,
allowed_list_org_factory,
plan_factory,
user_factory,
):
allowed_list = allowed_list_factory(description="Sample instructions.")
allowed_list_org = allowed_list_org_factory(allowed_list=allowed_list)
plan = plan_factory(visible_to=allowed_list)
user = user_factory()
social_account = user.socialaccount_set.all()[0]
social_account.extra_data[ORGANIZATION_DETAILS]["Id"] = allowed_list_org.org_id
social_account.save()
client.force_login(user)
response = client.get(reverse("plan-detail", kwargs={"pk": plan.id}))
assert response.status_code == 200
assert response.json() == {
"id": str(plan.id),
"title": str(plan.title),
"version": str(plan.version.id),
"preflight_message": "",
"requires_preflight": False,
"tier": "primary",
"slug": str(plan.slug),
"old_slugs": [],
"steps": [],
"is_allowed": True,
"is_listed": True,
"not_allowed_instructions": "<p>Sample instructions.</p>",
"average_duration": None,
}
def test_plan__visible_superuser(
self, client, allowed_list_factory, plan_factory, user_factory
):
allowed_list = allowed_list_factory(description="Sample instructions.")
plan = plan_factory(visible_to=allowed_list)
user = user_factory(is_superuser=True)
client.force_login(user)
response = client.get(reverse("plan-detail", kwargs={"pk": plan.id}))
assert response.status_code == 200
assert response.json() == {
"id": str(plan.id),
"title": str(plan.title),
"version": str(plan.version.id),
"preflight_message": "",
"requires_preflight": False,
"tier": "primary",
"slug": str(plan.slug),
"old_slugs": [],
"steps": [],
"is_allowed": True,
"is_listed": True,
"not_allowed_instructions": "<p>Sample instructions.</p>",
"average_duration": None,
}
@pytest.mark.django_db
class TestPreflight:
def test_post(self, client, plan_factory):
plan = plan_factory()
response = client.post(reverse("plan-preflight", kwargs={"pk": plan.id}))
assert response.status_code == 201
def test_get__good(self, client, plan_factory, preflight_result_factory):
plan = plan_factory()
preflight = preflight_result_factory(
plan=plan,
user=client.user,
organization_url=client.user.instance_url,
org_id=client.user.org_id,
)
response = client.get(reverse("plan-preflight", kwargs={"pk": plan.id}))
assert response.status_code == 200
assert response.json() == {
"id": str(preflight.id),
"organization_url": client.user.instance_url,
"org_id": "00Dxxxxxxxxxxxxxxx",
"plan": str(plan.id),
"created_at": format_timestamp(preflight.created_at),
"is_valid": True,
"status": "started",
"results": {},
"error_count": 0,
"warning_count": 0,
"is_ready": False,
"user": str(client.user.id),
"edited_at": format_timestamp(preflight.edited_at),
}
def test_get__bad(self, client, plan_factory):
plan = plan_factory()
response = client.get(reverse("plan-preflight", kwargs={"pk": plan.id}))
assert response.status_code == 404
def test_post__unallowed(self, client, plan_factory, allowed_list_factory):
allowed_list = allowed_list_factory()
plan = plan_factory(visible_to=allowed_list)
response = client.post(reverse("plan-preflight", kwargs={"pk": plan.id}))
assert response.status_code == 403
def test_preflight_where_plan_not_listed(
self, client, plan_factory, preflight_result_factory
):
plan = plan_factory()
plan.is_listed = False
plan.save()
preflight_result_factory(
plan=plan,
user=client.user,
organization_url=client.user.instance_url,
org_id=client.user.org_id,
)
get_response = client.get(reverse("plan-preflight", kwargs={"pk": plan.id}))
assert get_response.status_code == 200
post_response = client.post(reverse("plan-preflight", kwargs={"pk": plan.id}))
assert post_response.status_code == 201
@pytest.mark.django_db
class TestOrgViewset:
def test_get_job(self, client, job_factory, plan_factory):
plan = plan_factory()
job = job_factory(
organization_url=client.user.instance_url,
user=client.user,
plan=plan,
org_id=client.user.org_id,
)
response = client.get(reverse("org-list"))
assert response.json()["current_job"]["id"] == str(job.id)
assert response.json()["current_preflight"] is None
def test_get_preflight(self, client, preflight_result_factory, plan_factory):
plan = plan_factory()
preflight = preflight_result_factory(
organization_url=client.user.instance_url,
user=client.user,
plan=plan,
org_id=client.user.org_id,
)
response = client.get(reverse("org-list"))
assert response.json()["current_job"] is None
assert response.json()["current_preflight"] == str(preflight.id)
def test_get_none(self, client):
response = client.get(reverse("org-list"))
assert response.json() == {"current_job": None, "current_preflight": None}
@pytest.mark.django_db
class TestVersionAdditionalPlans:
def test_get__good(self, client, plan_factory):
plan = plan_factory(tier=Plan.Tier.additional)
response = client.get(
reverse("version-additional-plans", kwargs={"pk": plan.version.id})
)
assert response.status_code == 200
assert response.json() == [
{
"id": str(plan.id),
"title": str(plan.title),
"version": str(plan.version.id),
"preflight_message": "",
"tier": "additional",
"slug": str(plan.slug),
"old_slugs": [],
"steps": [],
"is_allowed": True,
"is_listed": True,
"not_allowed_instructions": None,
"requires_preflight": False,
"average_duration": None,
}
]
@pytest.mark.django_db
class TestUnlisted:
def test_product(self, client, product_factory, version_factory):
product1 = product_factory()
version_factory(product=product1)
product2 = product_factory(is_listed=False)
version_factory(product=product2)
response = client.get(reverse("product-get-one"), {"slug": product2.slug})
assert response.status_code == 200
assert response.json()["id"] == product2.id
def test_version(self, client, product_factory, version_factory):
product = product_factory()
version_factory(product=product)
version = version_factory(product=product, is_listed=False)
response = client.get(
reverse("version-get-one"),
{"label": version.label, "product": product.slug},
)
assert response.status_code == 200
assert response.json()["id"] == version.id
def test_plan(
self,
client,
product_factory,
version_factory,
plan_template_factory,
plan_factory,
):
product = product_factory()
version = version_factory(product=product)
plan_template1 = plan_template_factory(product=product)
plan_template2 = plan_template_factory(product=product)
plan_factory(version=version, plan_template=plan_template1)
plan = plan_factory(
version=version, plan_template=plan_template2, is_listed=False
)
response = client.get(
reverse("plan-get-one"),
{"slug": plan.slug, "version": str(version.id), "product": str(product.id)},
)
assert response.status_code == 200
assert response.json()["id"] == plan.id
def test_plan__missing_param(
self,
client,
product_factory,
version_factory,
plan_template_factory,
plan_factory,
):
product = product_factory()
version = version_factory(product=product)
plan_template1 = plan_template_factory(product=product)
plan_template2 = plan_template_factory(product=product)
plan_factory(version=version, plan_template=plan_template1)
plan = plan_factory(
version=version, plan_template=plan_template2, is_listed=False
)
response = client.get(
reverse("plan-get-one"), {"slug": plan.slug, "product": str(product.id)}
)
assert response.status_code == 404
|
from collections import defaultdict
from .core import GameObject, Message, game_loop
from .linguistics import Thing, there_are_things_here
def exit(direction, destination, key=None):
if key is None:
locked = False
else:
locked = True
def exit_handler(message):
nonlocal locked
if not locked or key in message.sender.children:
if locked:
print(f"You use the {key.nouns[0].primary_noun} to unlock the exit")
locked = False
print(f"You go {direction}")
message.sender.parent.children.remove(message.sender)
destination.children.add(message.sender)
message.sender.parent = destination
destination.handle_message(Message("examine", message.sender))
else:
print("It's locked")
def print_description(message):
print(f"An exit to the {direction}")
go = GameObject()
go.grouping = "exit"
go.message_handlers["go"] = exit_handler
go.message_handlers["examine"] = print_description
go.nouns = [Thing(f"exit to the {direction}"), Thing(direction)]
return go
class GOProxy:
def __init__(self, name):
self.name = name
self._handlers = {}
self._nouns = []
self._visible = True
self._pickable = False
self._contents = set()
self.go = GameObject()
def description(self, text):
def print_description(m):
print(text)
groups = defaultdict(list)
for c in self.go.children:
if c.visible:
groups[c.grouping].append(c.nouns[0])
for group in groups.values():
print(there_are_things_here(group))
self._handlers["examine"] = print_description
return self
def noun(self, noun):
self._nouns.append(noun)
return self
def visible(self, is_visible):
self._visible = is_visible
return self
def contains(self, *others):
self._contents.update(others)
return self
def can_pickup(self, is_pickable):
self._pickable = is_pickable
return self
def realize(self, objects):
go = self.go
go.visible = self._visible
if self._nouns:
go.nouns = [Thing(n) for n in self._nouns]
else:
go.nouns = [Thing(self.name)]
go.message_handlers = dict(self._handlers)
if self._pickable:
def drop(message):
go.parent = message.sender.parent
message.sender.children.remove(go)
message.sender.parent.children.add(go)
del go.message_handlers["drop"]
go.message_handlers["get"] = pickup
def pickup(message):
go.parent.children.remove(go)
go.parent = message.sender
message.sender.children.add(go)
del go.message_handlers["get"]
go.message_handlers["drop"] = drop
go.message_handlers["get"] = pickup
for child in {objects[v].go if isinstance(v, str) else v.go for v in self._contents}:
go.children.add(child)
child.parent = go
return go
class RoomProxy(GOProxy):
def __init__(self, name):
super().__init__(name)
self._exits = {}
def exit(self, name, target, key=None):
self._exits[name] = (target, key)
return self
def realize(self, objects):
go = super().realize(objects)
for k, (v, l) in self._exits.items():
if isinstance(v, str):
v = objects[v].go
else:
v = v.go
if l is not None:
if isinstance(l, str):
l = objects[l].go
else:
l = l.go
go.children.add(exit(k, v, l))
return go
class Game:
def __init__(self):
self._objects = {"player": GOProxy("player")}
self._objects["player"].visible(False)
@property
def p(self):
return self._objects["player"]
def o(self, name):
if name not in self._objects:
self._objects[name] = GOProxy(name)
return self._objects[name]
def r(self, name):
if name not in self._objects:
self._objects[name] = RoomProxy(name)
return self._objects[name]
def compile(self):
realized = {k:v.realize(self._objects) for k,v in self._objects.items()}
return realized["player"]
def run(self):
player = self.compile()
game_loop(player)
|
# (c) Copyright 2017-2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from oslo_serialization import jsonutils
import testtools
from ardana_service.admin import bp
app = Flask(__name__)
app.register_blueprint(bp)
class TestAdmin(testtools.TestCase):
def test_get_user(self):
# Execute without mocking to verify that the real operating system
# calls are being executed without error
test_app = app.test_client()
resp = test_app.get('/api/v2/user')
user_dict = jsonutils.loads(resp.data)
# Since we cannot control which username that this unit test runs
# under, we only require that it return a non-empty username
self.assertIn('username', user_dict)
username = user_dict['username']
self.assertNotEqual('', username)
|
import sys
import os
import re
import stack
operators = ['add','sub','neg','and','or','not']
logicalOperators = ['gt','lt','eq']
operation_symbol = {
"eq": "JNE",
"gt": "JLE",
"lt": "JGE"}
memoryOperators = ['push','pop']
branchingCommands = ['label','goto','if-goto'] #!!!
functionCommands = ['function','call','return']
commandPattern = re.compile(r"(\w+)( \w+ \w+)?( )?") #Matching input with \w \w \w or \w
counter = 0
filename = ''
# operators
def operatorHandler(operator):
if stack.commandTable.get(operator):
return stack.commandTable.get(operator)
elif operator in logicalOperators:
global counter
str = ''
for i in [
'@SP\nAM=M-1\nD=M\n@13\nM=D', #Store top in Reg13
'@SP\nAM=M-1\nD=M\n@15\nM=D', #Store top in Reg15
"@13\nD=M\n","@14\nD=M-D",
"@false%d" % counter,
"D;%s" % operation_symbol[operator],
"D=-1",
"@set%d" % counter,
"0;JMP",
"(false%d)" % counter,
"D=0",
"(set%d)" % counter,
#"@15\n",
#"M=D\n", #Answer in Reg15
'@SP\nA=M\nM=D']:
str += i + '\n'
counter += 1
return str
# translateLine
def translateLine(tokens,filename=''):
error = False
# Push or Pop
if (tokens[0] in memoryOperators) and (tokens[2].isdigit()) :
if tokens[0] == 'push':
return stack.push(tokens[1],tokens[2]) , error
else:
str , err = stack.pop(tokens[1],tokens[2])
return str , (err or error)
# Stack Arith
elif (tokens[0] in operators) or (tokens[0] in logicalOperators):
return operatorHandler(tokens[0]) , error
elif tokens[0] == 'label':
return '('+tokens[1]+')\n'
elif tokens[0] == 'goto':
return '@'+tokens[1]+'\n0;JMP\n'
elif tokens[0] == 'if-goto':
return '@SP\nAM=M-1\nD=M\n@'+tokens[1]+'\nD;JNE\n'
return '', True
# Execution begins
argumentList = sys.argv
fname = sys.argv[1].rstrip()
#Check if name is right
x = re.findall(".vm$", fname)
if len(x)==0:
print ("Incorrect file name")
#Open vm file
asmfile = open(fname, "r")
codelines = asmfile.readlines()
hackfile = open(fname.replace('.vm','.asm'),"w+")
filename = fname.replace('.asm','')
#Start
hackfile.write("@256\nD=A\n@SP\nM=D\n")
#Read line by line
for line in codelines:
line=line.replace('\n','')
print(line,' => ',end = '')
#Parse lines
if not commandPattern.match(line):
print("Error : Wrong Instruction")
break
tokens = line.split(' ')
assemblycode, error = translateLine(tokens,filename)
if error:
print("Error : Wrong Instruction")
break
print(assemblycode,end='\n')
hackfile.write(assemblycode)
print ( "Conversion successful. ")
asmfile.close()
hackfile.close() |
from django import forms
from .models import Offer
class MakeOfferForm(forms.ModelForm):
offer_datetime = forms.DateTimeInput(attrs={'placeholder': "Date and time"})
class Meta:
model = Offer
fields = ['offer_datetime', 'client_address', 'payment_method', 'comment']
labels = {
'offer_datetime': "Date and Time",
'client_address': "Your address",
'payment_method': "Payment Method",
}
widgets = {
'client_address': forms.TextInput(attrs={'placeholder': "Your address"}),
'comment': forms.TextInput(attrs={'placeholder': "Comment"}),
}
|
#!/usr/bin/env ipython
# -*- coding: utf-8 -*-
import random as ran
import math
import numpy as np
"""Define auxiliary functions for Corona Testing Simulation."""
def _make_test(testlist, current_success_rate, false_posivite_rate, prob_sick,
tests_repetitions=1, test_result_decision_strategy='max'):
"""
Function for performing one test.
Input:
testlist - list of probabilities (of being sick) of individuals
current_success_rate - current probability of a test being successful
false_positive rate - probability of a false positive
prob_sick - probability that an individual is sick
optional:
tests_repetitions - perform given number of multiple tests
test_result_decision_strategy - when using multiple tests decide either for 'max' or 'majority'
"""
if len(testlist) == 0:
print('Testing empty group. This should not happen!')
outcomes = [0]*tests_repetitions
for t in range(tests_repetitions):
# Define a random parameter for the test
random_parameter = ran.random()
# Check, whether the list contains a sick person
sick_person_exists = 0
for individual_probability in testlist:
if individual_probability <= prob_sick:
sick_person_exists = 1
# Perform the test
if (sick_person_exists == 1 and random_parameter <= current_success_rate):
outcomes[t] = 1
# elif (sick_person_exists == 1 and random_parameter > current_success_rate):
# print("aux.py DEBUG. FALSE POSITIVE")
elif (sick_person_exists == 0 and random_parameter <= false_posivite_rate):
outcomes[t] = 1
else:
outcomes[t] = 0
if test_result_decision_strategy == 'max':
return np.max(outcomes)
elif test_result_decision_strategy == 'majority':
if outcomes.count(0) > outcomes.count(1):
return 0
else:
return 1
def _split_groups(active_groups):
""" Function to perform a binary tree search test on our sample. """
size_chosen_instance = len(active_groups[1])
middle = size_chosen_instance//2
# split the first active group in two equal size groups and then remove the instance from the list of active groups
test_group = [[active_groups[0][0:middle], active_groups[1][0:middle]]
] + [[active_groups[0][middle:], active_groups[1][middle:]]]
return test_group
def generate_data(sample_size, prob_sick):
"""
Function to generate data of consecutively numbered individuals which are infected
with chance prob_sick. The number of infected people is always ceil(sample_size*prob_sick)
"""
number_sick_people = int(np.ceil(sample_size * prob_sick))
rawdata = []
sick_list = []
sick_list_indices = []
# Generate a sample of raw data: a list of sample_size instances with number_sick_people
# infected individuals (0), and all others healthy (1)
arr = np.ones(sample_size)
arr[:number_sick_people] = 0
np.random.shuffle(arr)
rawdata = list(arr.astype(int))
# sick_list is the opposite of rawdata. infected (1), healthy (0)
sick_list = [1-x for x in rawdata]
if number_sick_people == 0:
print("this test population contains no infected")
# print(
# 'There would have been zero infected (probably sample_size is quite small). For Debugging purposes one infection has been added')
# infected_individual_index = 0
# rawdata[infected_individual_index] = 0
# sick_list[infected_individual_index] = 1
# sick_list_indices.append(infected_individual_index)
# number_sick_people = 1
# print('generated data with {} sick people among total {}'.format(number_sick_people, sample_size))
# print('they are {}\n----\n'.format(sick_list_indices))
return rawdata, sick_list, number_sick_people
def generate_data_old(sample_size, prob_sick):
"""
Function to generate data of consecutively numbered individuals which are infected
with chance prob_sick
THIS IS THE OLD ROUTINE, WHICH DISTRIBUTES SICKNESS WITH THE GIVEN PROBABILITY AND THUS HAS
FLUCTUATIONS IN THE ACTUAL NUMBER OF INFECTED INDIVIDUALS
"""
rawdata = []
sick_list = []
number_sick_people = 0
sick_list_indices = []
# Generate a sample of raw data: a list of sample_size instances, equally distributed between 0 and 1
for i in range(sample_size):
rawdata += [np.random.rand()] # [ran.random()]
# Decide, who is infected
for i in range(sample_size):
if rawdata[i] <= prob_sick:
sick_list += [1]
sick_list_indices.append(i)
number_sick_people += 1
else:
sick_list += [0]
if number_sick_people == 0:
print(
'There would have been zero infected (probably sample_size is quite small). For Debugging purposes one infection has been added')
infected_individual_index = 0
rawdata[infected_individual_index] = 0
sick_list[infected_individual_index] = 1
sick_list_indices.append(infected_individual_index)
number_sick_people = 1
# print('generated data with {} sick people among total {}'.format(number_sick_people, sample_size))
# print('they are {}\n----\n'.format(sick_list_indices))
return rawdata, sick_list, number_sick_people
|
# %%
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
from seaborn import set_style
set_style('whitegrid')
# %%
# Example 1: exponential decay
def exponential_decay(t, y):
return -0.5 * y
# %%
sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
# %%
for i in range(3):
plt.plot(sol.t, sol.y[i])
plt.xlabel(r'$t$')
plt.ylabel(r'$y$')
# %%
# Example 2: Cannon fired upwards
def upward_cannon(t, y):
return [y[1], -0.5]
def hit_ground(t, y):
return y[0]
def apex(t, y):
return y[1]
hit_ground.terminal = True
hit_ground.direction = -1
# %%
sol = solve_ivp(
fun=upward_cannon,
t_span=[0, 100],
y0=[0, 10],
events=(hit_ground, apex),
dense_output=True,
)
# %%
t = np.linspace(0, *sol.t_events[0], 100)
pos = sol.sol(t)
fig, ax = plt.subplots(2, 1,)
ax[0].plot(t, pos[0])
ax[0].set_title('Cannon Launch Example')
ax[0].set_ylabel(r'$y$', rotation='horizontal')
_, ymax = ax[0].get_ylim()
ax[0].set_ylim(0, ymax)
ax[0].annotate(
s='apex',
xy=[sol.t_events[1], sol.y_events[1].ravel()[0]],
xytext=(-10, -50),
textcoords='offset points',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3', color='k')
)
ax[1].plot(t, pos[1])
ax[1].set_ylabel(r"$y'$", rotation='horizontal')
for i in range(2):
_, xmax = ax[i].get_xlim()
ax[i].set_xlim(0, xmax)
# %%
# Example 3: Lotka-Volterra equations
def lotkavolterra(t, z, a, b, c, d):
x, y = z
return [a*x - b*x*y, -c*y + d*x*y]
# %%
sol = solve_ivp(
fun=lotkavolterra,
t_span=[0, 15],
y0=[10, 5],
args=(1.5, 1, 3, 1),
dense_output=True
)
# %%
t = np.linspace(0, 15, 300)
z = sol.sol(t)
# %%
plt.plot(t, z.T)
plt.xlabel('t')
plt.legend(['prey', 'predators'], shadow=True)
plt.title('Lotka-Volterra System')
|
from elementalcms.extends import Controller
class Applet:
name: str
__controllers: [Controller] = []
def __init__(self, name, controllers: [Controller]):
self.name = name
self.__controllers = controllers
def get_controllers(self) -> [Controller]:
return self.__controllers
|
from Setup import Setup
from shaderWhisperer import R, shaderWhisperer
#TODO: add testing class
def testSentences(sw):
print("\n --- sentences testing\n")
for s in ["switch", "case", "while", "do", "for", "if", "break", "continue", "return"]:
print(s, "\t:", sw.sentences(s))
def testExpressions(sw):
print("\n --- expression testing\n")
sw.expressions("")
def coordSpaces(sw):
print("\n --- visitor testing\n")
print(sw.coordSpaces("P"))
print(sw.coordSpaces("ndcP"))
#R("wrong coords", "wrong" in sw.coordSpaces("V"))
#R("wrong coords", "wrong" in sw.coordSpaces("LW"))
def testVisitorNoPrint(sw):
print("\n --- visitor noprint testing\n")
sw.tryVisitor("")
def testCalls(sw):
print("\n --- call testing\n")
print("isYellowStrip:", sw.calls("isYellowStrip"))
print("fract: ", sw.calls("fract"))
print("normalize: ", sw.calls("normalize"))
print("vec3: ", sw.calls("vec3"))
print("*: ", sw.calls("*"))
print("+: ", sw.calls("+"))
print("/: ", sw.calls("/"))
def testDecls(sw):
print("\n --- decl testing\n")
print("frontColor: ", sw.declarations("frontColor")) #should be one
print("i: ", sw.declarations("i") )#should be many
print("f: ", sw.declarations("f") )#should be many
def testAssig(sw):
print("\n --- assig testing\n")
print("frontColor: ", sw.assignments("frontColor"))
print("i: ", sw.assignments("i"))
def testUses(sw):
print("\n --- uses testing\n")
print("frontColor: ", sw.uses("frontColor"))
print("speed: ", sw.uses("speed"))
print("vertex: ", sw.uses("vertex"))
print("i: ", sw.uses("i"))
print("normalMatrix: ", sw.uses("normalMatrix"))
def testNumUses(sw):
print("\n --- uses testing\n")
print("frontColor: ", sw.numUses("frontColor"))
print("speed: ", sw.numUses("speed"))
print("vertex: ", sw.numUses("vertex"))
print("i: ", sw.numUses("i"))
print("normalMatrix: ", sw.numUses("normalMatrix"))
def testInTypes(sw):
print("\n --- inType testing\n")
print("in \t:", sw.inTypes())
def testOutTypes(sw):
print("\n --- outType testing\n")
print("out \t:", sw.outTypes())
def testInNames(sw):
print("\n --- inName testing\n")
print("in \t:", sw.inNames())
def testOutNames(sw):
print("\n --- outName testing\n")
print("out \t:", sw.outNames())
def testParam(sw):
print("\n --- param name testing\n")
print("vec4 \t:", sw.param("vec4"))
print("vec4, 2\t:", sw.param("vec4", 2))
print("mix, 2\t:", sw.param("mix", 2))
print("mix, 3\t:", sw.param("mix", 3))
print("+, 1\t:", sw.param("+", 1))
print("+, 2\t:", sw.param("+", 2))
print("*, 1\t:", sw.param("*", 1))
print("*, 2\t:", sw.param("*", 2))
print("/, 1\t:", sw.param("/", 1))
print("/, 2\t:", sw.param("/", 2))
print("normalize \t:", sw.param("normalize"))
def testParamTypes(sw):
print("\n --- param type testing\n")
print("vec4 \t:", sw.paramType("vec4"))
print("vec4, 2\t:", sw.paramType("vec4", 2))
print("mix, 2\t:", sw.paramType("mix", 2))
print("mix, 3\t:", sw.paramType("mix", 3))
print("+, 1\t:", sw.paramType("+", 1))
print("+, 2\t:", sw.paramType("+", 2))
print("*, 1\t:", sw.paramType("*", 1))
print("*, 2\t:", sw.paramType("*", 2))
print("/, 1\t:", sw.paramType("/", 1))
print("/, 2\t:", sw.paramType("/", 2))
print("normalize \t:", sw.paramType("normalize"))
def testFieldSelectors(sw):
print("\n --- swizzle names testing\n")
print("x \t:", sw.fieldSelectors("x"))
print("z \t:", sw.fieldSelectors("z"))
print("xz \t:", sw.fieldSelectors("xz"))
print("w \t:", sw.fieldSelectors("w"))
def testFieldSelectorsTypes(sw):
print("\n --- swizzle types testing\n")
print("x \t:", sw.fieldSelectorsTypes("x"))
print("z \t:", sw.fieldSelectorsTypes("z"))
print("xz \t:", sw.fieldSelectorsTypes("xz"))
print("w \t:", sw.fieldSelectorsTypes("w"))
def main():
fs = shaderWhisperer(["Shaders/test.frag"])
vs = shaderWhisperer(["Shaders/test.vert"])
mag = shaderWhisperer(["Shaders/magnet.vert"])
vsfs = shaderWhisperer(["Shaders/test2.vert", "Shaders/test.frag"])
all = shaderWhisperer(["Shaders/allShaders/001.fs", "Shaders/allShaders/006.fs", "Shaders/allShaders/011.fs", "Shaders/allShaders/016.fs", "Shaders/allShaders/021.fs", "Shaders/allShaders/026.fs", "Shaders/allShaders/031.fs", "Shaders/allShaders/036.fs", "Shaders/allShaders/041.fs", "Shaders/allShaders/046.fs", "Shaders/allShaders/051.fs", "Shaders/allShaders/057.vs", "Shaders/allShaders/001.vs", "Shaders/allShaders/006.vs", "Shaders/allShaders/011.vs", "Shaders/allShaders/016.vs", "Shaders/allShaders/021.vs", "Shaders/allShaders/026.vs", "Shaders/allShaders/031.vs", "Shaders/allShaders/036.vs", "Shaders/allShaders/041.vs", "Shaders/allShaders/046.vs", "Shaders/allShaders/054.vs", "Shaders/allShaders/058.fs", "Shaders/allShaders/002.fs", "Shaders/allShaders/007.fs", "Shaders/allShaders/012.fs", "Shaders/allShaders/017.fs", "Shaders/allShaders/022.fs", "Shaders/allShaders/027.fs", "Shaders/allShaders/032.fs", "Shaders/allShaders/037.fs", "Shaders/allShaders/042.fs", "Shaders/allShaders/047.fs", "Shaders/allShaders/051.vs", "Shaders/allShaders/055.fs", "Shaders/allShaders/002.vs", "Shaders/allShaders/007.vs", "Shaders/allShaders/012.vs", "Shaders/allShaders/017.vs", "Shaders/allShaders/022.vs", "Shaders/allShaders/027.vs", "Shaders/allShaders/032.vs", "Shaders/allShaders/037.vs", "Shaders/allShaders/042.vs", "Shaders/allShaders/047.vs", "Shaders/allShaders/052.fs", "Shaders/allShaders/058.vs", "Shaders/allShaders/003.fs", "Shaders/allShaders/008.fs", "Shaders/allShaders/013.fs", "Shaders/allShaders/018.fs", "Shaders/allShaders/023.fs", "Shaders/allShaders/028.fs", "Shaders/allShaders/033.fs", "Shaders/allShaders/038.fs", "Shaders/allShaders/043.fs", "Shaders/allShaders/048.fs", "Shaders/allShaders/055.vs", "Shaders/allShaders/059.fs", "Shaders/allShaders/003.vs", "Shaders/allShaders/008.vs", "Shaders/allShaders/013.vs", "Shaders/allShaders/018.vs", "Shaders/allShaders/023.vs", "Shaders/allShaders/028.vs", "Shaders/allShaders/033.vs", "Shaders/allShaders/038.vs", "Shaders/allShaders/043.vs", "Shaders/allShaders/048.vs", "Shaders/allShaders/052.vs", "Shaders/allShaders/056.fs", "Shaders/allShaders/004.fs", "Shaders/allShaders/009.fs", "Shaders/allShaders/014.fs", "Shaders/allShaders/019.fs", "Shaders/allShaders/024.fs", "Shaders/allShaders/029.fs", "Shaders/allShaders/034.fs", "Shaders/allShaders/039.fs", "Shaders/allShaders/044.fs", "Shaders/allShaders/049.fs", "Shaders/allShaders/053.fs", "Shaders/allShaders/059.vs", "Shaders/allShaders/004.vs", "Shaders/allShaders/009.vs", "Shaders/allShaders/014.vs", "Shaders/allShaders/019.vs", "Shaders/allShaders/024.vs", "Shaders/allShaders/029.vs", "Shaders/allShaders/034.vs", "Shaders/allShaders/039.vs", "Shaders/allShaders/044.vs", "Shaders/allShaders/049.vs", "Shaders/allShaders/056.vs", "Shaders/allShaders/060.fs", "Shaders/allShaders/005.fs", "Shaders/allShaders/010.fs", "Shaders/allShaders/015.fs", "Shaders/allShaders/020.fs", "Shaders/allShaders/025.fs", "Shaders/allShaders/030.fs", "Shaders/allShaders/035.fs", "Shaders/allShaders/040.fs", "Shaders/allShaders/045.fs", "Shaders/allShaders/050.fs", "Shaders/allShaders/053.vs", "Shaders/allShaders/057.fs", "Shaders/allShaders/005.vs", "Shaders/allShaders/010.vs", "Shaders/allShaders/015.vs", "Shaders/allShaders/020.vs", "Shaders/allShaders/025.vs", "Shaders/allShaders/030.vs", "Shaders/allShaders/035.vs", "Shaders/allShaders/040.vs", "Shaders/allShaders/045.vs", "Shaders/allShaders/050.vs", "Shaders/allShaders/054.fs", "Shaders/allShaders/060.vs"])
errors = shaderWhisperer(["Shaders/allShaders/051.gs", "Shaders/allShaders/052.gs", "Shaders/allShaders/053.gs", "Shaders/allShaders/054.gs", "Shaders/allShaders/055.gs", "Shaders/allShaders/056.gs", "Shaders/allShaders/057.gs", "Shaders/allShaders/058.gs", "Shaders/allShaders/059.gs"])
test = shaderWhisperer(["Shaders/allShaders/032.vs"])
par = shaderWhisperer(["Shaders/testParam.vs"])
#testSentences(all)
#testCalls(all)
#testDecls(all)
#testAssig(all)
#testUses(all)
#testNumUses(all)
#testParam(all)
#testParamTypes(all)
#testUses(all)
#testNumUses(all)
#testInTypes(all)
#testInNames(all)
#testOutTypes(all)
#testOutNames(all)
#test.setConstantCoordSpace("eye")
#coordSpaces(all)
#testVisitorNoPrint(all)
#testFieldSelectors(all)
#testFieldSelectorsTypes(all)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, padding
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from secrets import token_bytes
class DiffieHellman:
def __init__(self):
self.diffieHellman = ec.derive_private_key(0xb5a4cea271ff424d7c31dc12a3e43e401df7a40d7412a15750f3f0b6b5449a28,ec.SECP256K1(), default_backend())
self.public_key = self.diffieHellman.public_key()
self.IV = token_bytes(16)
def encrypt(self, public_key, secret):
shared_key = self.diffieHellman.exchange(ec.ECDH(), public_key)
print("Shared")
print(shared_key.hex())
derived_key = HKDF(
algorithm=hashes.SHA256(),
length=32,
salt=None,
info=None,
backend=default_backend()
).derive(shared_key)
aes = Cipher(algorithms.AES(derived_key), modes.CBC(self.IV), backend=default_backend())
encryptor = aes.encryptor()
padder = padding.PKCS7(128).padder()
padded_data = padder.update(secret.encode()) + padder.finalize()
return encryptor.update(padded_data) + encryptor.finalize()
def decrypt(self, public_key, secret, iv):
shared_key = self.diffieHellman.exchange(ec.ECDH(), public_key)
derived_key = HKDF(
algorithm=hashes.SHA256(),
length=32,
salt=None,
info=None,
backend=default_backend()
).derive(shared_key)
aes = Cipher(algorithms.AES(derived_key), modes.CBC(iv), backend=default_backend())
decryptor = aes.decryptor()
decrypted_data = decryptor.update(secret) + decryptor.finalize()
unpadder = padding.PKCS7(128).unpadder()
return unpadder.update(decrypted_data) + unpadder.finalize()
text = "Hello World!"
alice = DiffieHellman()
bob = DiffieHellman()
encrypted_message = bob.encrypt(alice.public_key, text)
print(encrypted_message)
decrypted_message = alice.decrypt(bob.public_key, encrypted_message, bob.IV)
print(decrypted_message) |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0006_auto_20160208_1407'),
]
operations = [
migrations.AddField(
model_name='coursemode',
name='bulk_sku',
field=models.CharField(default=None, max_length=255, blank=True, help_text='This is the bulk SKU (stock keeping unit) of this mode in the external ecommerce service.', null=True, verbose_name='Bulk SKU'),
),
]
|
import numpy as np
import numpy.polynomial.hermite_e as her
import numpy.polynomial.legendre as legd
class Polynomial(object):
'''
class: polynomial class
'''
def __init__(self, order, coef):
self.order = order
self.coef = coef
def __str__(self):
string = 'P(x)='
for i, c in enumerate(self.coef):
string += '%+.2f'%c + '*x^%d' %i
if self.coef.any() == 0:
string += '0'
return string
def evaluate(self, x):
px = 0
for i, c in enumerate(self.coef):
px += c * x ** i
return px
def der(self, m=1):
coef = np.copy(self.coef)
if m == 0:
return Polynomial(self.order, self.coef)
if len(coef) in [0, 1]:
return Polynomial(0, np.array([0]))
for de in range(m):
coef = np.array([i*c for i,c in enumerate(coef) if i!=0])
return Polynomial(len(coef)-1, coef)
def int(self, p=1):
pass
class Hermite(Polynomial):
'''
Hermite Polynomial Class
'''
def __init__(self, order):
self.order = order
Polynomial.__init__(self, order, self.her_coef())
def her_coef(self):
c = [0] * self.order + [1]
return her.herme2poly(c)
def int(self, p=1):
pass
class Plain(Polynomial):
'''
Plain Polynomial Class
'''
def __init__(self, order):
self.order = order
Polynomial.__init__(self, order, self._coef())
def _coef(self):
c = [0] * self.order + [1]
return np.array(c)
def int(self, p=1):
pass
class Legendre(Polynomial):
'''
Hermite Polynomial Class
'''
def __init__(self, order):
self.order = order
Polynomial.__init__(self, order, self.legd_coef())
def legd_coef(self):
c = [0] * self.order + [1]
return legd.leg2poly(c)
def int(self, p=1):
if p == 1:
if self.order == 0:
return 1
else:
return 0
elif p == 2:
return 1. / (2 * self.order + 1)
if __name__ == '__main__':
for o in range(6):
l = Legendre(order=o)
# l = Hermite(order=o)
print(l)
print(l.der())
print('-----------') |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-29 09:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
def add_resulting_fragments(apps, schema_editor):
"""
Fills the resulting_fragment column on Selections
"""
Selection = apps.get_model('selections', 'Selection')
Word = apps.get_model('annotations', 'Word')
for selection in Selection.objects.filter(is_no_target=False):
fragments = set()
xml_ids = [w.xml_id for w in selection.words.all()]
# Find the Fragments that have these xml_ids as targets
for xml_id in xml_ids:
words = Word.objects.filter(
sentence__fragment__document=selection.fragment.document,
xml_id=xml_id,
is_target=True)
fragments.update([w.sentence.fragment for w in words])
if fragments:
result = []
# Check if Fragment has the exact same Words as targets
for fragment in fragments:
fragment_words = Word.objects.filter(sentence__fragment=fragment, is_target=True)
fragment_xml_ids = [w.xml_id for w in fragment_words]
if sorted(xml_ids) == sorted(fragment_xml_ids):
result.append(fragment)
if len(result) == 1:
selection.resulting_fragment = result[0]
selection.save()
"""
elif len(result) > 1:
print 'Multiple Fragments found:', selection.pk, [f.pk for f in result]
else:
print 'No Fragment found for:', selection.pk
else:
print 'No Fragment found for:', selection.pk
"""
def backwards(apps, schema_editor):
pass
dependencies = [
('annotations', '0028_source'),
('selections', '0008_preprocessfragment_resulting_fragment'),
]
operations = [
migrations.RemoveField(
model_name='preprocessfragment',
name='resulting_fragment',
),
migrations.AddField(
model_name='selection',
name='resulting_fragment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='fragment_preprocess', to='annotations.Fragment'),
),
migrations.RunPython(add_resulting_fragments, backwards),
]
|
from bs4 import BeautifulSoup as soup
from fake_useragent import UserAgent
from selenium import webdriver
import time
import re
import json
from Common import Common
delay_time = 10
def run():
driver = Common.getDriver()
# sticker_link = "https://steamcommunity.com/market/search?q=&category_730_ItemSet%5B%5D=any&category_730_ProPlayer%5B%5D=any&category_730_StickerCapsule%5B%5D=any&category_730_TournamentTeam%5B%5D=any&category_730_Weapon%5B%5D=any&category_730_StickerCategory%5B%5D=tag_PlayerSignature&category_730_StickerCategory%5B%5D=tag_TeamLogo&category_730_StickerCategory%5B%5D=tag_Tournament&appid=730"
# sticker_link = "https://steamcommunity.com/market/search?q=&category_730_ItemSet%5B0%5D=any&category_730_ProPlayer%5B0%5D=any&category_730_StickerCapsule%5B0%5D=any&category_730_TournamentTeam%5B0%5D=any&category_730_Weapon%5B0%5D=any&category_730_StickerCategory%5B0%5D=tag_PlayerSignature&appid=730"
bicak_link = "https://steamcommunity.com/market/search?q=&category_730_ItemSet%5B%5D=any&category_730_ProPlayer%5B%5D=any&category_730_StickerCapsule%5B%5D=any&category_730_TournamentTeam%5B%5D=any&category_730_Weapon%5B%5D=any&category_730_Type%5B%5D=tag_CSGO_Type_Knife&appid=730"
# driver.get(sticker_link)
driver.get(bicak_link)
Common.collapsePage(driver, 100)
m_cMaxPages = Common.getcMaxPages(driver)
for pageCount in range(1, m_cMaxPages):
time.sleep(delay_time)
Common.sendLog("info", 'The {0}. sticker page is being analyzed'.format(Common.getiCurrentPage(driver)))
containers = Common.getContainers(driver)
for container in containers:
item_name = Common.getItemName(container)
item_normalPrice = Common.getNormalPrice(container)
item_salePrice = Common.getSalePrice(container)
item_count = Common.getItemCount(container)
item_link = Common.getItemLink(container)
# Common.printToFile("Sticker.csv",item_name, item_normalPrice, item_salePrice,item_count, item_link)
Common.printToFile("Knife.csv", item_name, item_normalPrice, item_salePrice, item_count, item_link)
Common.NextPage(driver)
driver.quit()
|
import json
import html
from pathlib import Path
from elm_doc.utils import Namespace
# Note: title tag is omitted, as the Elm app sets the title after
# it's initialized.
PAGE_TEMPLATE = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<link rel="shortcut icon" size="16x16, 32x32, 48x48, 64x64, 128x128, 256x256" href="{mount_point}/assets/favicon.ico">
<link rel="stylesheet" href="{mount_point}/assets/style.css">
<script src="{mount_point}/artifacts/elm.js"></script>
<script src="{mount_point}/assets/highlight/highlight.pack.js"></script>
<link rel="stylesheet" href="{mount_point}/assets/highlight/styles/default.css">
</head>
<body>
<script>
try {{
const fontsLink = document.createElement("link");
fontsLink.href = "{mount_point}/assets/fonts/" + ((navigator.userAgent.indexOf("Macintosh") > -1) ? "_hints_off.css" : "_hints_on.css");
fontsLink.rel = "stylesheet";
document.head.appendChild(fontsLink);
}} catch(e) {{
// loading the font is not essential; log the error and move on
console.log(e);
}}
Elm.Main.init({init});
</script>
</body>
</html>
''' # noqa: E501
def _render(mount_point: str = ''):
if mount_point and mount_point[-1] == '/':
mount_point = mount_point[:-1]
init = {
'flags': {
'mountedAt': mount_point,
},
}
return PAGE_TEMPLATE.format(
mount_point=html.escape(mount_point),
init=json.dumps(init))
class actions(Namespace):
def write(output_path: Path, mount_point: str = ''):
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(output_path), 'w') as f:
f.write(_render(mount_point=mount_point))
|
"""Main module."""
import requests
import base64
from datetime import datetime
from time import sleep
from urllib.parse import urljoin
class Unimus:
def __init__(self, url, token):
self.url = url
self.token = token
def _api_request(self, method, url, *args, **kwargs):
url = urljoin("{}/api/v2/".format(self.url), url)
if "headers" not in kwargs:
kwargs["headers"] = {"Accept": "application/json", "Authorization": "Bearer {}".format(self.token)}
r = requests.request(method, url, *args, **kwargs)
r.raise_for_status()
if kwargs.get("raw"):
return r.content
else:
return r.json()
def devices(self):
return self._api_request("GET", "devices/").get("data")
def backup_device(self,device_id):
if int(device_id) == device_id:
job = self._api_request("PATCH","jobs/backup?id={}".format(device_id))
if job["data"]["accepted"] > 0:
return True
else:
raise Exception("Need device_id")
def get_latest_config(self, device_id):
r = self._api_request("GET", "devices/{}/backups/latest".format(device_id))
config = base64.b64decode(r["data"]["bytes"].encode('utf-8')).decode('utf-8')
if r["data"]["validUntil"] is None:
timestamp = datetime.fromtimestamp(r["data"]["validSince"])
else:
timestamp = datetime.fromtimestamp(max(r["data"]["validSince"],r["data"]["validUntil"]))
age_seconds = (datetime.now() - timestamp).seconds
return (config,age_seconds)
def get_config_from_device(self, device_id):
start_time = datetime.now()
(_, initial_age) = self.get_latest_config(device_id)
age = initial_age
self.backup_device(device_id)
while age >= initial_age:
(config, age) = self.get_latest_config(device_id)
sleep(0.5)
if (datetime.now() - start_time).seconds > 60:
raise Exception("Timed out waiting for new config")
return config
|
from django.template.loader import render_to_string
def render_html(email_alerts):
context = {
'opportunities': {
'email_alerts': email_alerts
}
}
return render_to_string('exops/is-exops-user-email-alerts.html', context)
def test_email_alert_title():
html = render_html([
{
'title': 'This is a title',
'created_on': '2000-01-01T01:01:01.000001Z',
}
])
assert 'This is a title' in html
def test_email_alert_with_term():
html = render_html([
{
'term': '--example term--',
'created_on': '2000-01-01T01:01:01.000001Z',
}
])
assert '--example term-- in all countries' in html
def test_email_alert_term_country():
html = render_html([
{
'term': 'Sports',
'created_on': '2000-01-01T01:01:01.000001Z',
'countries': ['UK,Spain']
}
])
assert 'Sports in UK,Spain' in html
def test_email_alert_country():
html = render_html([
{
'created_on': '2000-01-01T01:01:01.000001Z',
'countries': ['UK,Spain']
}
])
assert 'all opportunities in UK,Spain' in html
def test_email_alert_all_opportunities():
html = render_html([
{
'created_on': '2000-01-01T01:01:01.000001Z',
}
])
assert 'all opportunities' in html
def test_email_alert_link_region():
html = render_html([
{
'created_on': '2000-01-01T01:01:01.000001Z',
'countries': ['Greece', 'Italy']
}
])
assert 'href="?suppress_subscription_block=true&s=&'\
'countries[]=Greece&countries[]=Italy' in html
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, 9t9it and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.naming import make_autoname
def autoname(doc, method):
"""
This override should be deprecated and removed when Batch changes in
current develop branch, which implements the same feature, is merged
into master.
"""
if doc.naming_series \
and frappe.db.get_value('Item', doc.item, 'create_new_batch'):
doc.batch_id = make_autoname(doc.naming_series)
doc.name = doc.batch_id
|
import numpy as np
import naa_csv_reader
import naa_csv_maker
import naa_isotope_verifier
import naa_peak_effects
import naa_background
from becquerel.tools import nndc
from pandas import DataFrame
import uncertainties
from itertools import chain
from operator import itemgetter
def naa_isotope_analyzer(filename):
#runs csv_reader.csv_reader to read the csv file and extract peak energy,
#net area and uncertainty, and FWHM.
csv_data = naa_csv_reader.csv_reader(filename)
energies = csv_data['energies']
net_area = csv_data['net_area']
net_area_unc = csv_data['net_area_unc']
peak_cps = csv_data['peak_cps']
fwhm = csv_data['fwhm']
csv_filename = csv_data['csv_filename']
#gets rid of 511 keV peak due to annihilation.
try:
for i in range(len(energies)):
if np.isclose(energies[i],511,atol=1) == True:
energies.remove(energies[i])
net_area.remove(net_area[i])
net_area_unc.remove(net_area_unc[i])
peak_cps.remove(peak_cps[i])
fwhm.remove(fwhm[i])
except:
pass
#checks to see if any of the peaks are due to background radiation.
background_isotopes = []
background_isotopes_energy = []
background_isotopes_br = []
for i in range(len(energies)):
background_isotopes.append(naa_background.background(energies[i])['identified_isotopes'])
background_isotopes_energy.append(naa_background.background(energies[i])['identified_isotopes_energy'])
background_isotopes_br.append(naa_background.background(energies[i])['identified_isotopes_br'])
#checks to see if any of the peaks are due to single escape peaks,
#double escape peaks, and sum peaks.
peak_effects_info = naa_peak_effects.peak_effects(energies)
escape_peaks = []
se_index = peak_effects_info['single_escape_peak_index']
de_index = peak_effects_info['double_escape_peak_index']
origin_index_se = peak_effects_info['origin_index_se']
origin_index_de = peak_effects_info['origin_index_de']
for i in range(len(energies)):
if i in se_index:
for j in range(len(se_index)):
if i == se_index[j]:
escape_peaks.append('SE from ' + str(energies[origin_index_se[j]]) + ' keV peak')
elif i in de_index:
for k in range(len(de_index)):
if i == de_index[k]:
escape_peaks.append('DE from ' + str(energies[origin_index_de[k]]) + ' keV peak')
else:
escape_peaks.append('None')
#queries the nndc database from the module Becquerel for the isotopes
#associated with every energy from the energy list.
nndc_info = []
for i in range(len(energies)):
nndc_info.append(nndc.fetch_decay_radiation(t_range=[0, None], i_range=(1, None), type='Gamma', e_range=[energies[i]-1, energies[i]+1]))
#checks to see if the 'parents' of the isotopes returned by the Becquerel
#module are naturally occuring isotopes.
nndc_info_verified_isotope = naa_isotope_verifier.isotope_verifier(nndc_info)['nndc_info_verified_isotope']
nndc_info_verified_energy = naa_isotope_verifier.isotope_verifier(nndc_info)['nndc_info_verified_energy']
nndc_info_verified_br = naa_isotope_verifier.isotope_verifier(nndc_info)['nndc_info_verified_br']
#counts how many times an isotope is repeated in nndc_info_verified_isotope.
#This is done for instances when multiple isotopes emit the same energy
#photon and only one isotope can be chosen to represent a given energy.
unpacked_isotopes = list(chain.from_iterable(nndc_info_verified_isotope))
tally = [ (i,unpacked_isotopes.count(i)) for i in set(unpacked_isotopes) ]
tally.sort(key=itemgetter(1), reverse=True)
#chooses most probable isotope for energies in which multiple isotopes emit
#at the same energy. Criteria is based on the total amount of times an
#isotope appears as a possible candidate for all peak energies.
for i in range(len(nndc_info_verified_isotope)):
try:
if len(nndc_info_verified_isotope[i]) > 1:
for j in range(len(tally)):
if tally[j][0] in nndc_info_verified_isotope[i]:
index = nndc_info_verified_isotope[i].index(tally[j][0])
nndc_info_verified_isotope[i] = [nndc_info_verified_isotope[i][index]]
nndc_info_verified_energy[i] = [nndc_info_verified_energy[i][index]]
nndc_info_verified_br[i] = [nndc_info_verified_br[i][index]]
break
else:
pass
else:
pass
except:
pass
#for every energy and branching ratio that contains an uncertainty, the
#below code will only extract the nominal value and discard the standard
#deviation.
for i in range(len(nndc_info_verified_isotope)):
try:
if type(nndc_info_verified_energy[i][0]) == uncertainties.core.Variable:
nndc_info_verified_energy[i][0] = nndc_info_verified_energy[i][0].nominal_value
if type(nndc_info_verified_br[i][0]) == uncertainties.core.Variable:
nndc_info_verified_br[i][0] = nndc_info_verified_br[i][0].nominal_value
except:
pass
#since the branching ratios returned from Becquerel are not normalized to 1,
#the below code will divide each branching ratio returned from Becquerel by
#100 in order to keep all the branching ratios consistent.
for i in range(len(nndc_info_verified_isotope)):
try:
nndc_info_verified_br[i] = [nndc_info_verified_br[i][0] / 100]
except:
pass
#Assembles the final list of isotopes that are most likely present for each
#given energy.
isotopes = background_isotopes[:]
isotopes_energy = background_isotopes_energy[:]
isotopes_br = background_isotopes_br[:]
for i in range(len(isotopes)):
if isotopes[i] == []:
isotopes[i] = nndc_info_verified_isotope[i]
isotopes_energy[i] = nndc_info_verified_energy[i]
isotopes_br[i] = nndc_info_verified_br[i]
for j in range(len(isotopes)):
if j in se_index:
index = se_index.index(j)
try:
isotopes[j].extend(['SE:' + isotopes[origin_index_se[index]][0]])
except:
isotopes[j].extend(['SE Unidentified'])
pass
if j in de_index:
index = de_index.index(j)
try:
isotopes[j].extend(['DE:' + isotopes[origin_index_de[index]][0]])
except:
isotopes[j].extend(['DE Unidentified'])
pass
"""
results1 = {'Peak Energy (keV)':energies,'Isotope':isotopes,
'Isotopes Branching Ratio':isotopes_br,'Net Area':net_area,
'Net Area Uncertainty':net_area_unc,'Peak CPS':peak_cps,
'fwhm':fwhm}
df1 = DataFrame(results1)
"""
#Formats the data to output to a csv file in which the isotopes will be
#listed in descending order (along with their relevant peak information)
#based on the number of peak energies detected.
unpacked_isotopes = list(chain.from_iterable(isotopes))
tally = [ (i,unpacked_isotopes.count(i)) for i in set(unpacked_isotopes) ]
tally.sort(key=itemgetter(1), reverse=True)
ordered_isotopes = []
ordered_energies = []
ordered_br = []
ordered_net_area = []
ordered_net_area_unc = []
ordered_peak_cps = []
ordered_fwhm = []
for i in range(len(tally)):
ordered_isotopes.append(tally[i][0])
temp_energies = []
temp_br = []
temp_ordered_net_area = []
temp_ordered_net_area_unc = []
temp_ordered_peak_cps = []
temp_ordered_fwhm = []
for j in range(len(isotopes)):
if tally[i][0] in isotopes[j]:
temp_energies.append(energies[j])
temp_ordered_net_area.append(net_area[j])
temp_ordered_net_area_unc.append(net_area_unc[j])
temp_ordered_peak_cps.append(peak_cps[j])
temp_ordered_fwhm.append(fwhm[j])
try:
index = isotopes[j].index(tally[i][0])
temp_br.append(isotopes_br[j][index])
except:
temp_br.append([None])
ordered_energies.append(temp_energies)
ordered_br.append(temp_br)
ordered_net_area.append(temp_ordered_net_area)
ordered_net_area_unc.append(temp_ordered_net_area_unc)
ordered_peak_cps.append(temp_ordered_peak_cps)
ordered_fwhm.append(temp_ordered_fwhm)
results2 = {'isotopes':ordered_isotopes,'energies':ordered_energies,
'branching ratios':ordered_br,'net areas':ordered_net_area,
'net area uncertainties':ordered_net_area_unc,'peak cps':ordered_peak_cps,
'fwhm':ordered_fwhm}
df2 = DataFrame(results2)
results2['csv_filename'] = csv_filename
naa_csv_maker.csv_maker(results2)
return(df2) |
import os
import shutil
class PathOperator:
def __init__(self):
self.current = os.getcwd()
def create_path(self, path_name: str) -> None:
target_path = os.path.join(self.current, path_name)
if os.path.exists(target_path):
print(f'path name: {path_name} already exists')
else:
os.makedirs(target_path)
print(f'create path name: {path_name}')
def get_file_name(self, url_file_path: str) -> str:
return os.path.basename(url_file_path)
def set_downlaod_file_name(self, dir: str, file_name: str) -> str:
return os.path.join(self.current, dir, file_name)
def remove_path(self, path_name: str) -> None:
target_path = os.path.join(self.current, path_name)
if(os.path.exists(target_path)):
shutil.rmtree(target_path)
|
import os
from concurrent.futures.process import ProcessPoolExecutor
from itertools import repeat
from math import ceil
from time import time
import numpy as np
import psutil as psutil
from absl import logging
from datasets.base import register_dataset, DatasetBase
from util import get_normalizing_scale_factor, quickdraw_process
@register_dataset("quickdraw")
class Quickdraw(DatasetBase):
def __init__(self, data_dir, params):
super(Quickdraw, self).__init__(data_dir, params)
self._dataset_path = os.path.join(self._data_dir, 'quickdraw')
def load(self, repeat=True):
data_path = os.path.join(self._dataset_path, 'caches', self._split)
files = [os.path.join(data_path, shard_name) for shard_name in os.listdir(data_path)]
return self._create_dataset_from_filepaths(files, repeat)
def _filter_collections(self, files):
"""
Selects files from archive.
:param files:
:return: x_image, class_name
"""
files = sorted(files)
return files[1], files[0]
|
#!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from ctypes import c_uint, c_ulong, Structure
from bcc import BPF
from time import sleep
import sys
from unittest import main, TestCase
text = """
#include <linux/ptrace.h>
struct Ptr { u64 ptr; };
struct Counters { char unused; __int128 stat1; };
BPF_HASH(stats, struct Ptr, struct Counters, 1024);
int count_sched(struct pt_regs *ctx) {
struct Ptr key = {.ptr=PT_REGS_PARM1(ctx)};
struct Counters zleaf;
memset(&zleaf, 0, sizeof(zleaf));
struct Counters *val = stats.lookup_or_try_init(&key, &zleaf);
if (val) {
val->stat1++;
}
return 0;
}
"""
class TestTracingEvent(TestCase):
def setUp(self):
b = BPF(text=text, debug=0)
self.stats = b.get_table("stats")
b.attach_kprobe(event_re="^finish_task_switch$|^finish_task_switch\.isra\.\d$",
fn_name="count_sched")
def test_sched1(self):
for i in range(0, 100):
sleep(0.01)
for key, leaf in self.stats.items():
print("ptr %x:" % key.ptr, "stat1 (%d %d)" % (leaf.stat1[1], leaf.stat1[0]))
if __name__ == "__main__":
main()
|
#
# PySNMP MIB module CIENA-CES-ACCESS-LIST-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CIENA-CES-ACCESS-LIST-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:31:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint")
cienaCesConfig, = mibBuilder.importSymbols("CIENA-SMI", "cienaCesConfig")
CienaGlobalState, = mibBuilder.importSymbols("CIENA-TC", "CienaGlobalState")
InetAddressPrefixLength, InetPortNumber, InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressPrefixLength", "InetPortNumber", "InetAddress", "InetAddressType")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Counter64, ObjectIdentity, Bits, ModuleIdentity, IpAddress, Unsigned32, Integer32, TimeTicks, NotificationType, Gauge32, iso, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Counter64", "ObjectIdentity", "Bits", "ModuleIdentity", "IpAddress", "Unsigned32", "Integer32", "TimeTicks", "NotificationType", "Gauge32", "iso", "Counter32")
DisplayString, TruthValue, MacAddress, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "MacAddress", "TextualConvention")
cienaCesAccessListMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35))
cienaCesAccessListMIB.setRevisions(('2015-04-02 00:00',))
if mibBuilder.loadTexts: cienaCesAccessListMIB.setLastUpdated('201504020000Z')
if mibBuilder.loadTexts: cienaCesAccessListMIB.setOrganization('Ciena, Inc')
cienaCesAccessListMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1))
cienaCesAclConfiguration = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1))
cienaCesAclStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 2))
cienaCesAccessListMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 2))
cienaCesAccessListMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 2, 1))
cienaCesAccessListMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 2, 2))
class AclFilterAction(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("allow", 1), ("deny", 2))
class AclTrafficDirection(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("ingress", 1), ("egress", 2))
class AclIpFragmentMatchType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("any", 1), ("isfragment", 2), ("notfragment", 3))
class AclL4PortMatchType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("any", 1), ("single", 2), ("range", 3))
class AclInterfaceType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("port", 1), ("vlan", 2), ("virtualswitch", 3), ("ipinterface", 4), ("remoteinterface", 5), ("localinterface", 6))
class AclL4DstProtocol(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24))
namedValues = NamedValues(("any", 1), ("bgp", 2), ("bootpclient", 3), ("bootpserver", 4), ("dhcpclient", 5), ("dhcpserver", 6), ("dhcpv6client", 7), ("dhcpv6server", 8), ("dns", 9), ("ftp", 10), ("http", 11), ("ldp", 12), ("ntp", 13), ("olsr", 14), ("rip", 15), ("rpc", 16), ("snmp", 17), ("snmptrap", 18), ("ssh", 19), ("syslog", 20), ("tacacs", 21), ("telnet", 22), ("tftp", 23), ("twampctrl", 24))
cienaCesAclGlobalConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 1))
cienaCesAclAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 1, 1), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclAdminStatus.setStatus('current')
cienaCesAclFilterMode = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("l2l3combo", 1), ("l3only", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclFilterMode.setStatus('current')
cienaCesAclNumAclProfileDefs = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclNumAclProfileDefs.setStatus('current')
cienaCesAclRemainingAclProfileDefs = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRemainingAclProfileDefs.setStatus('current')
cienaCesAclNumAclRuleDefs = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclNumAclRuleDefs.setStatus('current')
cienaCesAclRemainingAclRuleDefs = MibScalar((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRemainingAclRuleDefs.setStatus('current')
cienaCesAclProfileConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 2), )
if mibBuilder.loadTexts: cienaCesAclProfileConfigTable.setStatus('current')
cienaCesAclProfileConfigTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 2, 1), ).setIndexNames((0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclProfileId"))
if mibBuilder.loadTexts: cienaCesAclProfileConfigTableEntry.setStatus('current')
cienaCesAclProfileId = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: cienaCesAclProfileId.setStatus('current')
cienaCesAclProfileName = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclProfileName.setStatus('current')
cienaCesAclProfileAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 2, 1, 3), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclProfileAdminState.setStatus('current')
cienaCesAclProfileOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 2, 1, 4), CienaGlobalState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclProfileOperState.setStatus('current')
cienaCesAclProfileDefaultFilterAction = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 2, 1, 5), AclFilterAction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclProfileDefaultFilterAction.setStatus('current')
cienaCesAclProfileNumRules = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 256))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclProfileNumRules.setStatus('current')
cienaCesAclProfileAttachedInterfaces = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 2, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclProfileAttachedInterfaces.setStatus('current')
cienaCesAclRuleConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3), )
if mibBuilder.loadTexts: cienaCesAclRuleConfigTable.setStatus('current')
cienaCesAclRuleConfigTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1), ).setIndexNames((0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclProfileId"), (0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclRulePrecedence"))
if mibBuilder.loadTexts: cienaCesAclRuleConfigTableEntry.setStatus('current')
cienaCesAclRulePrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)))
if mibBuilder.loadTexts: cienaCesAclRulePrecedence.setStatus('current')
cienaCesAclRuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleName.setStatus('current')
cienaCesAclRuleFilterAction = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 3), AclFilterAction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleFilterAction.setStatus('current')
cienaCesAclRuleMatchAny = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchAny.setStatus('current')
cienaCesAclRuleMatchSrcMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchSrcMacAddr.setStatus('current')
cienaCesAclRuleSrcMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 6), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleSrcMacAddr.setStatus('current')
cienaCesAclRuleSrcMacAddrMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 7), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleSrcMacAddrMask.setStatus('current')
cienaCesAclRuleMatchDstMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 8), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchDstMacAddr.setStatus('current')
cienaCesAclRuleDstMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 9), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleDstMacAddr.setStatus('current')
cienaCesAclRuleDstMacAddrMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 10), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleDstMacAddrMask.setStatus('current')
cienaCesAclRuleMatchOuterVid = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 11), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchOuterVid.setStatus('current')
cienaCesAclRuleOuterVid = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleOuterVid.setStatus('current')
cienaCesAclRuleOuterVidMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleOuterVidMask.setStatus('current')
cienaCesAclRuleMatchOuterPcp = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 14), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchOuterPcp.setStatus('current')
cienaCesAclRuleOuterPcp = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 15), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleOuterPcp.setStatus('current')
cienaCesAclRuleOuterPcpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleOuterPcpMask.setStatus('current')
cienaCesAclRuleMatchOuterDei = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchOuterDei.setStatus('current')
cienaCesAclRuleOuterDei = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 18), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleOuterDei.setStatus('current')
cienaCesAclRuleMatchBaseEtype = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 19), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchBaseEtype.setStatus('current')
cienaCesAclRuleBaseEtype = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 20), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleBaseEtype.setStatus('current')
cienaCesAclRuleMatchSrcIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchSrcIpAddr.setStatus('current')
cienaCesAclRuleSrcIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 22), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleSrcIpAddrType.setStatus('current')
cienaCesAclRuleSrcIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 23), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleSrcIpAddr.setStatus('current')
cienaCesAclRuleSrcIpAddrPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 24), InetAddressPrefixLength()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleSrcIpAddrPrefixLength.setStatus('current')
cienaCesAclRuleMatchDstIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 25), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchDstIpAddr.setStatus('current')
cienaCesAclRuleDstIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 26), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleDstIpAddrType.setStatus('current')
cienaCesAclRuleDstIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 27), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleDstIpAddr.setStatus('current')
cienaCesAclRuleDstIpAddrPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 28), InetAddressPrefixLength()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleDstIpAddrPrefixLength.setStatus('current')
cienaCesAclRuleMatchIpProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 29), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchIpProtocol.setStatus('current')
cienaCesAclRuleIpProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 30), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleIpProtocol.setStatus('current')
cienaCesAclRuleMatchDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 31), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchDscp.setStatus('current')
cienaCesAclRuleDscp = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 32), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleDscp.setStatus('current')
cienaCesAclRuleDscpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 33), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleDscpMask.setStatus('current')
cienaCesAclRuleMatchL4SrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 34), AclL4PortMatchType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchL4SrcPort.setStatus('current')
cienaCesAclRuleL4SrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 35), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleL4SrcPort.setStatus('current')
cienaCesAclRuleL4SrcPortUpper = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 36), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleL4SrcPortUpper.setStatus('current')
cienaCesAclRuleMatchL4DstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 37), AclL4PortMatchType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchL4DstPort.setStatus('current')
cienaCesAclRuleL4DstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 38), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleL4DstPort.setStatus('current')
cienaCesAclRuleL4DstPortUpper = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 39), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleL4DstPortUpper.setStatus('current')
cienaCesAclRuleMatchL4DstProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 40), AclL4DstProtocol()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchL4DstProtocol.setStatus('current')
cienaCesAclRuleMatchIpFragment = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 41), AclIpFragmentMatchType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchIpFragment.setStatus('current')
cienaCesAclRuleMatchTcpFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 42), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleMatchTcpFlags.setStatus('current')
cienaCesAclRuleTcpFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 3, 1, 43), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleTcpFlags.setStatus('current')
cienaCesAclProfileAttachmentTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 4), )
if mibBuilder.loadTexts: cienaCesAclProfileAttachmentTable.setStatus('current')
cienaCesAclProfileAttachmentTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 4, 1), ).setIndexNames((0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclProfileId"), (0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclInterfaceType"), (0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclInterfaceId"))
if mibBuilder.loadTexts: cienaCesAclProfileAttachmentTableEntry.setStatus('current')
cienaCesAclInterfaceType = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 4, 1, 1), AclInterfaceType())
if mibBuilder.loadTexts: cienaCesAclInterfaceType.setStatus('current')
cienaCesAclInterfaceId = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1048576)))
if mibBuilder.loadTexts: cienaCesAclInterfaceId.setStatus('current')
cienaCesAclInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 4, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclInterfaceName.setStatus('current')
cienaCesAclDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 1, 4, 1, 4), AclTrafficDirection()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclDirection.setStatus('current')
cienaCesAclProfileGlobalRuleStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 2, 1), )
if mibBuilder.loadTexts: cienaCesAclProfileGlobalRuleStatsTable.setStatus('current')
cienaCesAclProfileGlobalRuleStatsTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 2, 1, 1), ).setIndexNames((0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclProfileId"), (0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclRulePrecedence"))
if mibBuilder.loadTexts: cienaCesAclProfileGlobalRuleStatsTableEntry.setStatus('current')
cienaCesAclGlobalRuleStatsPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 2, 1, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclGlobalRuleStatsPacketCount.setStatus('current')
cienaCesAclGlobalRuleStatsByteCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 2, 1, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclGlobalRuleStatsByteCount.setStatus('current')
cienaCesAclProfileRuleInstanceStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 2, 2), )
if mibBuilder.loadTexts: cienaCesAclProfileRuleInstanceStatsTable.setStatus('current')
cienaCesAclProfileRuleInstanceStatsTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 2, 2, 1), ).setIndexNames((0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclProfileId"), (0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclInterfaceType"), (0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclInterfaceId"), (0, "CIENA-CES-ACCESS-LIST-MIB", "cienaCesAclRulePrecedence"))
if mibBuilder.loadTexts: cienaCesAclProfileRuleInstanceStatsTableEntry.setStatus('current')
cienaCesAclRuleInstanceStatsPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 2, 2, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleInstanceStatsPacketCount.setStatus('current')
cienaCesAclRuleInstanceStatsByteCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1271, 2, 1, 35, 1, 2, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cienaCesAclRuleInstanceStatsByteCount.setStatus('current')
mibBuilder.exportSymbols("CIENA-CES-ACCESS-LIST-MIB", cienaCesAclRuleMatchIpFragment=cienaCesAclRuleMatchIpFragment, cienaCesAccessListMIBCompliances=cienaCesAccessListMIBCompliances, cienaCesAclRuleSrcIpAddr=cienaCesAclRuleSrcIpAddr, cienaCesAclRemainingAclProfileDefs=cienaCesAclRemainingAclProfileDefs, cienaCesAclRuleMatchTcpFlags=cienaCesAclRuleMatchTcpFlags, AclIpFragmentMatchType=AclIpFragmentMatchType, cienaCesAclRuleL4SrcPortUpper=cienaCesAclRuleL4SrcPortUpper, cienaCesAclDirection=cienaCesAclDirection, cienaCesAclRemainingAclRuleDefs=cienaCesAclRemainingAclRuleDefs, cienaCesAclRuleMatchDscp=cienaCesAclRuleMatchDscp, cienaCesAclProfileConfigTableEntry=cienaCesAclProfileConfigTableEntry, cienaCesAclRuleMatchOuterVid=cienaCesAclRuleMatchOuterVid, cienaCesAclRuleDstMacAddr=cienaCesAclRuleDstMacAddr, cienaCesAclProfileAdminState=cienaCesAclProfileAdminState, cienaCesAclRuleTcpFlags=cienaCesAclRuleTcpFlags, cienaCesAclRuleMatchBaseEtype=cienaCesAclRuleMatchBaseEtype, cienaCesAclRuleDstIpAddrType=cienaCesAclRuleDstIpAddrType, cienaCesAclRuleMatchIpProtocol=cienaCesAclRuleMatchIpProtocol, cienaCesAclRuleInstanceStatsByteCount=cienaCesAclRuleInstanceStatsByteCount, cienaCesAccessListMIBConformance=cienaCesAccessListMIBConformance, cienaCesAclProfileId=cienaCesAclProfileId, cienaCesAclRuleOuterVidMask=cienaCesAclRuleOuterVidMask, cienaCesAclNumAclRuleDefs=cienaCesAclNumAclRuleDefs, cienaCesAclProfileConfigTable=cienaCesAclProfileConfigTable, cienaCesAclRuleConfigTable=cienaCesAclRuleConfigTable, AclL4DstProtocol=AclL4DstProtocol, cienaCesAclFilterMode=cienaCesAclFilterMode, cienaCesAclStatistics=cienaCesAclStatistics, cienaCesAclRuleSrcIpAddrType=cienaCesAclRuleSrcIpAddrType, cienaCesAclRuleMatchSrcIpAddr=cienaCesAclRuleMatchSrcIpAddr, cienaCesAclInterfaceId=cienaCesAclInterfaceId, cienaCesAclAdminStatus=cienaCesAclAdminStatus, cienaCesAclProfileGlobalRuleStatsTableEntry=cienaCesAclProfileGlobalRuleStatsTableEntry, cienaCesAclProfileAttachmentTableEntry=cienaCesAclProfileAttachmentTableEntry, cienaCesAclRuleSrcIpAddrPrefixLength=cienaCesAclRuleSrcIpAddrPrefixLength, cienaCesAclRuleConfigTableEntry=cienaCesAclRuleConfigTableEntry, cienaCesAclProfileRuleInstanceStatsTable=cienaCesAclProfileRuleInstanceStatsTable, cienaCesAclRuleInstanceStatsPacketCount=cienaCesAclRuleInstanceStatsPacketCount, cienaCesAclRuleMatchOuterDei=cienaCesAclRuleMatchOuterDei, cienaCesAclRuleMatchL4DstProtocol=cienaCesAclRuleMatchL4DstProtocol, cienaCesAclProfileAttachedInterfaces=cienaCesAclProfileAttachedInterfaces, cienaCesAccessListMIBGroups=cienaCesAccessListMIBGroups, cienaCesAclRuleMatchL4DstPort=cienaCesAclRuleMatchL4DstPort, cienaCesAclGlobalConfig=cienaCesAclGlobalConfig, cienaCesAclProfileDefaultFilterAction=cienaCesAclProfileDefaultFilterAction, cienaCesAclRuleFilterAction=cienaCesAclRuleFilterAction, cienaCesAclInterfaceName=cienaCesAclInterfaceName, cienaCesAclRuleDstMacAddrMask=cienaCesAclRuleDstMacAddrMask, cienaCesAclRuleL4SrcPort=cienaCesAclRuleL4SrcPort, cienaCesAccessListMIB=cienaCesAccessListMIB, cienaCesAclRuleMatchSrcMacAddr=cienaCesAclRuleMatchSrcMacAddr, cienaCesAclProfileNumRules=cienaCesAclProfileNumRules, cienaCesAclRuleSrcMacAddr=cienaCesAclRuleSrcMacAddr, cienaCesAclRuleDstIpAddrPrefixLength=cienaCesAclRuleDstIpAddrPrefixLength, cienaCesAclRuleMatchOuterPcp=cienaCesAclRuleMatchOuterPcp, AclTrafficDirection=AclTrafficDirection, cienaCesAclRuleOuterVid=cienaCesAclRuleOuterVid, cienaCesAclRuleMatchAny=cienaCesAclRuleMatchAny, cienaCesAclProfileOperState=cienaCesAclProfileOperState, cienaCesAclRuleL4DstPortUpper=cienaCesAclRuleL4DstPortUpper, cienaCesAclProfileAttachmentTable=cienaCesAclProfileAttachmentTable, cienaCesAclProfileRuleInstanceStatsTableEntry=cienaCesAclProfileRuleInstanceStatsTableEntry, cienaCesAclNumAclProfileDefs=cienaCesAclNumAclProfileDefs, AclL4PortMatchType=AclL4PortMatchType, cienaCesAclRulePrecedence=cienaCesAclRulePrecedence, cienaCesAclRuleSrcMacAddrMask=cienaCesAclRuleSrcMacAddrMask, cienaCesAclRuleMatchDstMacAddr=cienaCesAclRuleMatchDstMacAddr, PYSNMP_MODULE_ID=cienaCesAccessListMIB, AclFilterAction=AclFilterAction, cienaCesAclInterfaceType=cienaCesAclInterfaceType, cienaCesAclGlobalRuleStatsByteCount=cienaCesAclGlobalRuleStatsByteCount, cienaCesAclRuleL4DstPort=cienaCesAclRuleL4DstPort, cienaCesAclProfileName=cienaCesAclProfileName, cienaCesAclGlobalRuleStatsPacketCount=cienaCesAclGlobalRuleStatsPacketCount, cienaCesAclRuleIpProtocol=cienaCesAclRuleIpProtocol, cienaCesAclRuleBaseEtype=cienaCesAclRuleBaseEtype, cienaCesAclRuleDscpMask=cienaCesAclRuleDscpMask, cienaCesAclProfileGlobalRuleStatsTable=cienaCesAclProfileGlobalRuleStatsTable, cienaCesAclRuleOuterPcpMask=cienaCesAclRuleOuterPcpMask, cienaCesAclRuleMatchL4SrcPort=cienaCesAclRuleMatchL4SrcPort, cienaCesAclRuleDstIpAddr=cienaCesAclRuleDstIpAddr, cienaCesAclRuleOuterDei=cienaCesAclRuleOuterDei, cienaCesAclRuleName=cienaCesAclRuleName, cienaCesAclRuleOuterPcp=cienaCesAclRuleOuterPcp, cienaCesAclRuleDscp=cienaCesAclRuleDscp, AclInterfaceType=AclInterfaceType, cienaCesAclRuleMatchDstIpAddr=cienaCesAclRuleMatchDstIpAddr, cienaCesAccessListMIBObjects=cienaCesAccessListMIBObjects, cienaCesAclConfiguration=cienaCesAclConfiguration)
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from search.views import SearchPostByOrder
urlpatterns = [
url(r'^post/(?P<search_word>.+)/(?P<order>\w+)/$', SearchPostByOrder.as_view(), name='search_post_by_order'),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###
# Name: Jarod Penniman, Jared Love
# Student ID: 2258875, 1818306
# Email: penni112@mail.chapman.edu, love115@mail.chapman.edu
# Class: PHYS 220 Fall 2017
# Assignment: CW07
###
import numpy as np
import nose
import taylor_approx as ta
import array_calculus as ac
"""Test function for our taylor approximation"""
def test_taylor_approx():
"""Tests our function taylor(x,f,i,n) to approximate the Gaussian function around
some point arbitrarily chosen."""
a,b,n = 0,3,1000
t = np.linspace(a,b,n)
desired = ac.g(a,b,n)
# Values obtained from taylor approximation
actual = ta.taylor(t,desired,501,10)
# Debug message
print("We expected this: ",desired[500:503]," but got this: ",actual[1][500:503])
# Testing accuracy
for k in range(500,502):
nose.tools.assert_almost_equal(desired[k],actual[1][k],4)
|
# -*- coding: utf-8 -*-
import json
import weakref
import time
from datetime import datetime
from flask_restful import reqparse
from flask_restful import fields
from util.log import LogHandler
from util.utils import ParseRequestParameters
from flask_restful import Resource
from util.utils import zcompress
log = LogHandler(__name__)
class MakeResponse:
resource_fields = {
"md5": fields.Integer,
"sign":fields.Integer,
"other": fields.String,
"dbname": fields.String,
}
class Utils():
@staticmethod
def get_mongo_client(dbname="searchengine"):
from run import app
return app.mongo[dbname]
@staticmethod
def save(client, table, data):
client[table].save(data)
@staticmethod
def set_monitor(service, data):
from run import app
try:
log.info(app.elastic.index(index='universal_docs', doc_type='docs',
body={"service": service, "ts": int(time.time()), "source": data}))
except Exception as err:
log.error("app elastic error %s" % err)
class Universal(Resource):
check_type = ["md5"]
def post(self):
parser_args = reqparse.RequestParser()
for key in MakeResponse.resource_fields.keys():
parser_args.add_argument(key, type=str)
weakref_parser = ParseRequestParameters(parser_args)
parser = weakref.proxy(weakref_parser)
check_result = parser.check(self.check_type)
if not check_result:
del parser
return {"message": "valid sign is fail"}
repr(UniversalService(parser.args))
del parser
return "ok"
class UniversalService(Utils):
def __init__(self, args):
self.args = args
def universal(self):
if self.args['dbname'] == "universal_pc":
log.info("universal: {}, {}, {}".format(self.args['dbname'], self.args["other"].get('uuid', ''), self.args["other"].get('url', '')))
zdetail = zcompress(self.args['other']['detail'])
if zdetail:
self.args['other']['detail'] = zdetail
else:
self.args['other']['zcompress'] = 0
elif self.args['dbname'] == "hao" and self.args["other"].get('_id',""):
log.info("universal: {},{}".format(self.args['dbname'],self.args["other"].get('_id',"")))
else:
log.error("universal: {}".format(self.args['dbname']))
return
Utils.save(Utils.get_mongo_client(),self.args["dbname"],self.args['other'])
Utils.set_monitor('universal_api',{'dbname':self.args["dbname"]})
def result(self):
try:
self.args["other"] = json.loads(self.args["other"])
self.universal()
except Exception as err:
log.warning("doc insert mgo error : %s" % (err))
self.args.clear()
def __repr__(self):
self.result()
return "done"
|
import html
from preprocessing.Processor import Processor
class HtmlEncodingProcessor(Processor):
def process(self, data):
data['text'] = self.remove_html_encoding(data)
return self.next_processor.process(data)
@staticmethod
def remove_html_encoding(data):
return data['text'].swifter.apply(lambda x: html.unescape(x))
|
# Time: O(n)
# Space: O(n)
# 1063
# Given an array A of integers, return the number of non-empty continuous subarrays
# that satisfy the following condition:
# The leftmost element of the subarray is not larger than other elements in the subarray.
class Solution(object):
def validSubarrays(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
s = []
for num in nums: # reverse traversal to solve rightmost element
while s and s[-1] > num: # change to >=, <, <= can solve array w/ first elem less than / not less than / greater than
s.pop()
s.append(num)
result += len(s)
return result
print(Solution().validSubarrays([1,4,2,5,3])) # 11 [1]; [1,4] [4]; [1,4,2] [2]; [1,4,2,5] [2,5] [5]; [1,4,2,5,3] [2,5,3] [3]
print(Solution().validSubarrays([3,2,1])) # 3
print(Solution().validSubarrays([2,2,2])) # 6
|
'''
Created on Oct 19, 2016
@author: nixer
'''
from datetime import datetime
import os
import sys
import pymysql
import pymysql.cursors
import re
import requests
from symbol import parameters
from dbus import connection
def getLog(logdir, trid):
"""
Creates 'logs' directory, if it doesn't exist,
creates or opens a log file in 'logs' directory.
"""
# assign a current working directory + '/logs' to log_dir variable (platform independent)
log_dir = os.path.join(os.getcwd(), logdir)
# or --> script directory: log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
# or --> user directory: log_dir = os.path.join(os.path.expanduser("~"), "logs")
try:
# if logs directory(!) doesn't exist, create it
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# open log file with prefix and timestamp (platform independent) in Append mode
log = open(os.path.join(log_dir, "rfaRunner_" + trid + "_" + getCurTime("%Y%m%d_%H-%M") + ".log"), "a")
return log
except (OSError, IOError):
# return -1 in case of exception
return -1
def qaPrint(log, message):
"""
Prints 'timestamp + message' to console and writes it to the log file
"""
# current date and time as string + message. example: [Oct 25 01:52:33.000001] TC1 - Passed
log_message = getCurTime("[%b %d %H:%M:%S.%f]") + " " + message
# prints log_message
print log_message
# writes message to a log file
log.write(log_message + "\n")
def getCurTime(date_time_format):
"""
Returns current date_time as a string formatted according to date_time_format
"""
date_time = datetime.now().strftime(date_time_format)
return date_time
def checkArgv(arg):
"""
Processing of command-line arguments and return dictionary
"""
trid = {}
if len(arg) < 2:
sys.exit('one argument is required: --testrun=<trid>')
else:
for i in range(1, len(arg)):
try:
i_arg = arg[i].lower().split('=')
if i_arg[0] == '--testrun' and int(i_arg[1]) not in range(10001):
sys.exit('test run id should be in range [0-10000]')
trid[i_arg[0]] = i_arg[1]
except:
# return -1 in case of exception
return -1
return trid
def getLocalEnv(loc_prop_file):
"""
Reads the content of loc_prop_file to dictionary and returns it
"""
loc_prop = {}
try:
with open(loc_prop_file) as f:
for line in f:
try:
(key, val) = line.split('=')
val = val.strip()
if val.isdigit(): # checking if value is digit -> convert to int
val = int(val)
loc_prop[key] = val
except ValueError as err:
print "Local properties file has wrong format: ", err
return -1
return loc_prop
except (OSError, IOError):
# return -1 in case of exception
return -1
def getTestCases(trid):
"""
Reads the content of test_run_file to dictionary of dictionaries and returns it
"""
keys = ['rest_URL', 'HTTP_method', 'HTTP_RC_desired', 'param_list'] # key list
test_cases = {}
try:
if os.stat(trid+".txt").st_size == 0: # Check for empty file
return (-1, "File {} is empty".format(trid+".txt"))
with open(trid+".txt") as f:
tc_list = f.readlines() # reads each line of test_run_file in list
for i in range(0, len(tc_list)):
tc = tc_list[i].split("|") # split line and create a value list
dictionary = dict(zip(keys, map(str.strip, tc[1:]))) # merge key list and value list to dictionary
param_list = dictionary['param_list'].split(',') # split param_list and create a list
dictionary['param_list'] = param_list # replace value of param_list by new list
http_rc = int(dictionary['HTTP_RC_desired']) # convert HTTP_RC_desired value to int
dictionary['HTTP_RC_desired'] = http_rc # replace value of HTTP_RC_desired by new int value
test_cases[int(tc[0])] = dictionary # append final dictionary to test_cases dictionary
return (1, test_cases)
except (OSError, IOError) as err:
# return -1 in case of exception
return (-1, err)
def getDbConnection(hst, db_id, lg, pw):
try:
connection = pymysql.connect(host=hst,
user=lg,
password=pw,
db=db_id,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
return connection
except BaseException as er:
# return -1 in case of exception
return (-1, er)
def getDbCursor(connection):
try:
cursor = connection.cursor()
return cursor
except BaseException as er:
# return -1 in case of exception
return (-1, er)
def queryDb():
pass
def buildURL(str_list):
try:
url_temp = str_list[0] + '/' + str_list[1] + '/' + str_list[2]
url = re.sub(r'/+', '/', url_temp)
return url
except BaseException as er:
# return -1 in case of exception
return (-1, er)
def getHttpResponse(url, method, parameters):
try:
if method == 'GET':
r = requests.get(url, params=parameters)
elif method == 'POST':
r = requests.post(url, params=parameters)
elif method == 'HEAD':
r = requests.head(url)
elif method == 'DELETE':
r = requests.delete(url)
elif method == 'OPTIONS':
r = requests.options(url)
else:
print "Method does not exist:" + method
return -1
return r.text
except:
# return -1 in case of exception
return -1
def getHttpResponseCode(res_object, indicator):
try:
if indicator == 'string':
code = str(res_object)
return code
elif indicator == 'int':
code = int(res_object)
else:
print "Wrong indicator:" + indicator
return -1
return code
except:
# return -1 in case of exception
return -1
def checkEnv():
return True |
"""Bootstrap code starts and runs the Fluidinfo tools and services."""
import sys
from bzrlib.errors import BzrCommandError
from commandant import builtins
from commandant.controller import CommandController
from fluiddb.application import APIServiceOptions
from fluiddb.scripts import commands
from fluiddb.scripts.twistd import runTAC
def runAPI():
"""Start the Fluidinfo API service."""
runTAC('api.tac', APIServiceOptions())
def runCommand(argv=sys.argv):
"""Run the command named in C{argv}.
If a command name isn't provided the C{help} command is shown.
@param argv: A list of command-line arguments. The first argument should
be the name of the command to run. Any further arguments are passed
to the command.
@return: The exit code for the command that was invoked.
"""
if len(argv) < 2:
argv.append('help')
controller = CommandController('fluidinfo', '0.1',
'Management tools for Fluidinfo operators.',
'https://launchpad.net/fluidinfo')
controller.load_module(builtins)
controller.load_module(commands)
controller.install_bzrlib_hooks()
try:
return controller.run(argv[1:])
except BzrCommandError as error:
print error
except:
raise
|
#!/usr/bin/env python
###
# Copyright 2015, EMBL-EBI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from functools import partial
def uniq_set(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
class Enum(object):
def __init__(self, *keys):
self.__dict__.update(zip(keys, range(len(keys))))
class Bunch(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class LazyDict(dict):
"""
A lazy dictionary implementation which will try
to evaluate all values on access and cache the
result for later access.
"""
def set_lazy(self, key, item, *args, **kwargs):
"""
Allow the setting of a callable and arguments
as value of dictionary.
"""
if callable(item):
item = partial(item, *args, **kwargs)
super(LazyDict, self).__setitem__(key, item)
def __getitem__(self, key):
item = super(LazyDict, self).__getitem__(key)
try:
self[key] = item = item()
except TypeError:
pass
return item
|
"""Programa 5_7.py
Descrição: Alterar o programa anterior de forma que o usuário informe também o final da tabuada
Autor:Cláudio Schefer
Data:
Versão: 001
"""
# Declaração de variáveis
xi = int(0)
xf = int(0)
t = int(0)
# Entrada de dados
t = int(input("Digite qual tabuada deseja imprimir: "))
xi = int(input("Digite começo da tabuada:"))
xf = int(input("Digite o final da tabuada: "))
# Processamento
x = xi
while x <= xf:
# Saída de dados
print("%d x %d = %d" %(t,x,t*x))
x = x+1
|
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
__all__ = ('celery_app',) |
import os, re, sys, glob, types, itertools
import numpy as np
from collections import defaultdict
def _xor(a, b):
return not ((a and b) or (not a and not b))
def _is_property(obj, name):
return isinstance(getattr(type(obj), name, None), property)
def _is_method(x):
return type(x) in [types.MethodType, types.FunctionType]
def _is_float(x):
try:
float(x)
return True
except:
pass
return False
def _raise(ex):
raise ex
def gen_config(cfg_gen, store_dict, sep='-'):
"""
add instance of config generator class to config file
Args:
cfg_gen : py class - the generator class, whose `_attr_dict` contains config attributes and the possible values
store_dict : dict - the place to put the generated config instance, may use `globals()`
sep : str - seprator in composite attributes, default to `-`
"""
if isinstance(cfg_gen, (list, tuple)):
for cg in cfg_gen:
gen_config(cg, store_dict, sep)
return
assert hasattr(cfg_gen, 'idx_name_pre'), f'no idx_name_pre provided in {cfg_gen}'
attr_dict = cfg_gen._attr_dict.copy() # not altering the class variable
for k, v in attr_dict.items(): # scan for composite config
assert len(v) > 0, f'found empty list of config options: _attr_dict[{k}]'
if type(v[0]) == list:
attr_dict[k] = [sep.join([str(i) for i in v_list if str(i)]).strip(sep) for v_list in itertools.product(*v)]
attr_k = attr_dict.keys()
attr_v = [attr_dict[k] for k in attr_k]
attr_i = [list(range(len(v))) for v in attr_v]
for idx in itertools.product(*attr_i):
cfg = cfg_gen(parse=False)
for i, k, v in zip(idx, attr_k, attr_v):
setattr(cfg, k, v[i])
cfg_var_name = cfg.idx_name_pre + '_' + ''.join([str(i) for i in idx]) # use index to attribute value as name postfix
setattr(cfg, 'idx_name', cfg_var_name)
cfg.parse() # static parse after setting attrs
store_dict[cfg_var_name] = cfg
def is_config(cfg, base=None, mod=None):
if mod != None and type(cfg) == str:
if cfg.startswith('_'):
return False
cfg = getattr(mod, cfg)
if base == None:
assert mod != None, 'must provide either `base` (class Base) or `mod` (python module)'
base = mod.Base
return isinstance(cfg, base) or isinstance(type(cfg), base) # config can be either class or instance
def log_config(config, title='', f_out=None, prefix='', base=None):
if f_out is None:
f_out = sys.stdout
if base is None:
root = os.path.join(os.getcwd(), os.path.dirname(__file__), '../')
sys.path += [] if root in sys.path or os.path.realpath(root) in sys.path else [root]
from config.base import Base as base
print(f'\n{prefix}<<< ======= {config._cls} ======= {title if title else config.name}', file=f_out)
max_len = max([len(k) for k in dir(config) if not k.startswith('_')] + [0])
for k in dir(config):
if k.startswith('_') or _is_method(getattr(config, k)):
continue
cur_attr = getattr(config, k)
if isinstance(cur_attr, list) and len(str(cur_attr)) > 200: # overlong list
cur_attr = '[' + f'\n{prefix}\t\t'.join([''] + [str(s) for s in cur_attr]) + f'\n{prefix}\t]'
print('\t%s%s\t= %s' % (prefix + k, ' ' * (max_len-len(k)), str(cur_attr)), file=f_out)
if is_config(cur_attr, base=base):
log_config(cur_attr, f_out=f_out, prefix=prefix+'\t', base=base)
print('\n', file=f_out, flush=True)
def load_config(cfg_path=None, dataset_name=None, cfg_name=None, cfg_group=None, reload=True):
import importlib
# cfg from path
if cfg_path is not None:
update = None
if os.path.isfile(cfg_path):
# update on the default cfg
from config.base import Base, Config
update = Base(cfg_path)
cfg_path = [update.dataset.lower(), 'default']
else:
# directly specified cfg
cfg_path = cfg_path.replace('/', '.').split('.')
cfg_path = cfg_path if cfg_path[0] == 'config' else ['config'] + cfg_path
cfg_module = '.'.join(cfg_path[:2])
cfg_class = '.'.join(cfg_path[2:])
mod = importlib.import_module(cfg_module)
if hasattr(mod, cfg_class):
cfg = getattr(mod, cfg_class)
else:
cfg = load_config(dataset_name=cfg_path[1], cfg_name=cfg_class, reload=reload)
if update is not None:
cfg = Config(cfg) # avoid overriding
cfg.update(update, exclude=[]) # full override with no exclude
return cfg
# setup dict
cfg_name_dict = load_config.cfg_name_dict # dataset_name -> {cfg.name -> cfg.idx_name}
cfg_module_dict = load_config.cfg_module_dict # dataset_name -> cfg_module
if dataset_name is not None and dataset_name not in cfg_module_dict or reload:
mod = importlib.import_module('config.' + dataset_name)
cfg_module_dict[dataset_name] = mod
cfg_name_dict[dataset_name] = {}
for i in dir(mod):
if not is_config(i, mod=mod): # use the 'base' class imported in 'mod'
continue
cfg = getattr(mod, i)
if cfg.name:
cfg_name_dict[dataset_name][cfg.name] = cfg.idx_name
# module/cfg from dataset/cfg name
mod = cfg_module_dict[dataset_name]
if cfg_name is not None:
if cfg_name not in cfg_name_dict[dataset_name]:
raise KeyError(f'no cfg_name={cfg_name} in module {dataset_name}')
idx_name = cfg_name_dict[dataset_name][cfg_name]
return getattr(mod, idx_name)
elif cfg_group is not None:
if not hasattr(mod, cfg_group):
raise KeyError(f'no cfg_group={cfg_group} in module {dataset_name}')
cfg_g = getattr(mod, cfg_group)
if not isinstance(cfg_g, (tuple, list, dict, set)):
raise ValueError(f'cfg_group={cfg_group} appears to be {cfg_g}, not of type (tuple, list, dict, set)')
return cfg_g
return mod
load_config.cfg_module_dict = {}
load_config.cfg_name_dict = {}
def is_train_success(train_dir):
# train_dir = results/dataset_name/config_name/Log_*
# if not 'snapshots' in os.listdir(train_dir) or len(os.listdir(os.path.join(train_dir, 'snapshots'))) == 0: # snapshots
# return False
log_train = os.path.join(train_dir, 'log_train.txt') # train_log
if not os.path.isfile(log_train):
return False
# avoid in-training log
lines = [l for l in open(log_train, 'r').read().strip('\n').split('\n') if l]
if any([l.startswith('finish') for l in lines]): # train_log 'finish' print
return True
elif any(['Traceback (most recent call last):' in l for l in lines]): # error occured (ign error after train finish)
return False
elif len(lines) == 0:
return False
return True # in-training
def _read_cfg(lines):
idx_list = []
for i, l in enumerate(lines):
if l.startswith('<<<') or 'EPOCH' in l: idx_list.append(i)
if len(idx_list) == 2: break
if len(idx_list) == 0:
return None
idx_list = idx_list if len(idx_list) == 2 else [*idx_list, None]
lines = [l for l in lines[idx_list[0] + 1:idx_list[1]] if '= ' in l and l.count('=') == 1]
num_blanks = [re.search('^[ \t]+', l) for l in lines]
num_blanks = [len(n.group()) for n in num_blanks if n]
num_outter = min(num_blanks)
outter = [l.split('= ') for n, l in zip(num_blanks, lines) if n == num_outter] # consider only out-most config i.e. the `config`
cfg = defaultdict(lambda: '', {k.strip():v.strip() for k, v in outter})
return cfg
def _read_train(train_dir, read_cfg=False):
log_train = os.path.join(train_dir, 'log_train.txt') # train_log
if not os.path.isfile(log_train):
return (None, None, None) if read_cfg else (None, None)
lines = open(log_train, 'r').read().strip('\n').split('\n')
sc = [float(l.split('|')[0].split('=')[1].split()[0]) for l in lines if '|' in l and 'current' in l]
sc = max(sc) if sc else None
task_loss = np.array([[float(i.split('=')[-1]) for i in l.split() if '=' in i] for l in lines[-100:] if l.startswith('Step ')])
task_loss = None if not len(task_loss) else task_loss[:, 1].mean() if not np.isnan(task_loss).any() else float('nan')
if read_cfg:
cfg = _read_cfg(lines)
return sc, task_loss, cfg
return sc, task_loss
def is_val_success(f, step=None):
step = str(step) if step != None else ''
n = f.split('/')[-1]
if not os.path.isfile(f) or not n.startswith('log_val') or not n.endswith(step):
return False
f = open(f, 'r').read().split('\n')
if any([l.startswith('finish') for l in f]):
return True
return False
def _read_val(f, full=True, get_dict=False, get_conf=False):
if not is_val_success(f):
return None
lines = open(f, 'r').read().split('\n')
kv_dict = {
'OA': None,
'mACC': None, 'ACCs': None,
'mIoU': None, 'IoUs': None,
'conf': None,
}
# print(f)
if full: # get result on full cloud
f = [i for i, l in enumerate(lines) if set(lines[i]) == set('-')]
if len(f) < 2:
return None
kv_dict = {}
for l in lines[f[-2] + 1:f[-1]]:
l = l.split('|')
h = l[0]
for k in ['OA', 'mACC', 'mIoU']:
if k not in h: continue
v = h.split(k + '=')[-1].split()[0]
kv_dict[k] = float(v)
if len(l) > 1:
kv_dict[k.lstrip('m') + 's'] = [float(i) for i in (l[1]).split() if i]
if get_conf:
idx = [i for i, l in enumerate(lines) if 'final' in l and 'Confusion' in l]
idx = idx[-1] if idx else f[-2]
conf = []
for l in lines[idx + 1:f[-2]]:
conf.append([float(i) for i in l.strip().strip('[]').split() if i])
conf = np.array(conf) if conf else None
kv_dict['conf'] = conf
else: # result on sub cloud
f = [i for i, l in enumerate(lines) if 'sub clouds - final' in l]
if not f:
return None
idx = f[-1]
l_m = lines[idx].split('|')[0].split(':')[-1].strip().split()
keys = [kv.split('=')[0].strip() for kv in l_m]
v_metrics = [float(kv.split('=')[1].strip()) for kv in l_m]
mIoU, OA, mACC = None, None, None
if 'mIoU' in keys:
kv_dict['mIoU'] = v_metrics[keys.index('mIoU')]
if 'OA' in keys:
kv_dict['OA'] = v_metrics[keys.index('OA')]
if 'mACC' in keys:
kv_dict['mACC'] = v_metrics[keys.index('mACC')]
if '|' in lines[idx]:
kv_dict['IoUs'] = [float(i) for i in lines[idx].split('|')[-1].split()]
acc = None
if lines[idx - 2].startswith('ACCs'):
kv_dict['ACCs'] = [float(i) for i in lines[idx - 2].split('=')[-1].split('|')[-1].split()]
if get_dict: # get doct ailgns with `class Metrics`
return kv_dict
return kv_dict['mIoU'], kv_dict['OA'], kv_dict['mACC'], kv_dict['IoUs']
def _read_test(f):
lines = [l for l in open(f, 'r').read().split('\n') if 'Result' in l]
if not lines:
return None
rst = float(lines[-1].split('Result')[-1].split()[0])
return rst if rst > 1 else rst * 100
def get_best_val_snap(cfg, snap_prefix='snap'):
# get the best of full validation
cfg_path = f'results/{cfg.dataset.lower()}/{cfg.name}'
vals = glob.glob(f'{cfg_path}/*/log_val*')
if not vals:
return None
vals = list(zip(vals, [_read_val(f) for f in vals])) # [(.../log_v, mIoU), ...]
vals = [(vf, v[0]) for vf, v in vals if v is not None]
vals = sorted(vals, key= lambda t: t[1] if t[1] else -1)[-1]
if not vals[1]:
return None
snap = int(vals[0].split('_')[-1])
log_path = os.path.dirname(vals[0])
snap_path = f'{log_path}/{cfg.snap_dir}/{snap_prefix}-{snap}'
return snap_path
def get_snap(saving_path, step='last', snap_prefix='snap'):
# get the best of running val (done in training)
snap_path = os.path.join(saving_path, 'snapshots') if not saving_path.endswith('snapshots') else saving_path
snap_steps = [f[:-5].split('-')[-1] for f in os.listdir(snap_path) if f[-5:] == '.meta']
if step == 'last':
snap_steps = [int(s) for s in snap_steps if s.isdigit()]
chosen_step = np.sort(snap_steps)[-1] # last saved snap (best val estimation)
chosen_snap = os.path.join(snap_path, f'snap-{chosen_step}')
else:
assert isinstance(step, int) or step.isdigit() or step == 'best', f'not supported step = {step}'
step = str(step)
chosen_snap = None
if step in snap_steps:
chosen_snap = os.path.join(snap_path, f'snap-{step}')
return chosen_snap
def to_valid_stage(stage_n, short=False):
if stage_n in ['D', 'down']:
stage_n = 'D' if short else 'down'
elif stage_n in ['U', 'up']:
stage_n = 'U' if short else 'up'
else:
raise ValueError(f'invalid stage_n={stage_n}')
return stage_n
def parse_stage(stage, num_layers):
stage = stage.replace('a', ''.join(f'{i}' for i in range(num_layers)))
stage_list = [i.strip('_') for i in re.split('(\d+)', stage) if i and i.strip('_')] # e.g. D012_U34
assert len(stage_list) % 2 == 0, f'invalid stage compound: stage_list={stage_list} from stage={stage}'
stage_n = [s for i, s in enumerate(stage_list) if i % 2 == 0]
stage_i = [s for i, s in enumerate(stage_list) if i % 2 == 1]
stage_list = [[(to_valid_stage(n), int(i)) for i in i_str] for n, i_str in zip(stage_n, stage_i)]
stage_list = sum(stage_list, [])
return stage_list
def _list_config(FLAGS):
import importlib
cfg = FLAGS.list.replace('/', '.').split('.')
cfg = [i for i in cfg if i != 'config']
if os.path.isfile(FLAGS.list):
cfg = load_config(FLAGS.list)
mod = importlib.import_module(f'config.{cfg.dataset.lower()}')
cfg_list = [(cfg, cfg.name)]
elif len(cfg) == 1: # all config in the dataset_name
mod = importlib.import_module(f'config.{cfg[0]}')
cfg_list = [(getattr(mod, k), k) for k in dir(mod) if not k.startswith('_')]
cfg_list = [(c, k) for c, k in cfg_list if is_config(c, mod=mod)]
else: # config.dataset.dict_name/cfg_name/idx_name
mod = importlib.import_module(f'config.{cfg[0]}')
try:
d = load_config(FLAGS.list)
except: # may specify a dict
if hasattr(mod, cfg[-1]):
d = getattr(mod, cfg[-1])
else:
raise ValueError(f'no cfg - {FLAGS.list}')
cfg_list = zip(d.values(), d.keys()) if isinstance(d, dict) else [(d, d.name)]
for c, n in cfg_list:
log_config(c, n, base=mod.Base) # config.Base is mod.Base
def _str2bool(v, raise_not_support=True):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
elif raise_not_support:
raise argparse.ArgumentTypeError('boolean value expected, given ', type(v))
else:
return None
if __name__ == '__main__':
import numpy as np
import pickle, argparse, time, sys, os, re, glob, shutil
sys.path.insert(0, os.path.join(os.getcwd(), os.path.dirname(__file__), '../'))
parser = argparse.ArgumentParser()
parser.add_argument('--list', type=str, help='print the cfg group (file_name, or file_name.dict_name)')
parser.add_argument('--best', type=str, default=None, help='get config best val')
FLAGS = parser.parse_args()
cfg_dir = os.path.join(os.getcwd(), os.path.dirname(__file__)).rstrip('/')
sys.path.insert(0, os.path.dirname(cfg_dir))
dir_list = None
if FLAGS.list:
_list_config(FLAGS)
if FLAGS.best:
print(get_best_val_snap(load_config(FLAGS.best)))
|
import os
import sys
from flask import Flask
from flask import render_template, request, send_from_directory, flash, url_for
from flask import current_app as app
from werkzeug.utils import secure_filename
app = Flask(__name__)
UPLOAD_FOLDER = './'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = "secret key"
class DataObject():
pass
def checkFileType(f: str):
return f.split('.')[-1] in ['mp4']
def cleanString(v: str):
out_str = v
delm = ['_', '-', '.']
for d in delm:
out_str = out_str.split(d)
out_str = " ".join(out_str)
return out_str
@app.route('/', methods=['GET', 'POST'])
def upload():
obj = DataObject
obj.is_video_display = False
obj.video = ""
print("files", request.files)
if request.method == 'POST' and 'video' in request.files:
video_file = request.files['video']
if checkFileType(video_file.filename):
filename = secure_filename(video_file.filename)
print("filename", filename)
# save file to /static/uploads
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
print("filepath", filepath)
video_file.save(filepath)
obj.video = filename
obj.is_video_display = True
obj.is_predicted = False
return render_template('/index.html', obj=obj)
else:
if video_file.filename:
msg = f"{video_file.filename} is not a video file"
else:
msg = "Please select a video file"
flash(msg)
return render_template('/index.html', obj=obj)
return render_template('/index.html', obj=obj)
@app.route('/files/<filename>')
def get_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
if __name__ == '__main__':
app.run(debug=True, use_reloader=True) |
import time
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from nw_util import *
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
driver.implicitly_wait(2)
try:
print driver.current_url
cookie = wait_for_element_id_content(driver, 'ret', 'nwjs.io', 20)
print cookie
finally:
driver.quit()
|
from sklearn.preprocessing import normalize
from gensim.models import Word2Vec
from mineral_helper import *
import networkx as nx
import numpy as np
import argparse
def sample_cascade(cascade_graph, root, h, num_nodes):
"""
Uniformly samples a cascade from a cascade graph starting at a given root
node. The length of the cascade is bounded by an upper value
:param cascade_graph: The cascade graph
:param root: The root node to sample a cascade from
:param h: The cascade length
:param num_nodes: The number of nodes used to control nodes to add to the
sampled cascade
:return: list, A cascade [sequence of nodes]
"""
cascade = [root]
next_nodes = [root]
while len(next_nodes) < h:
current_node = next_nodes[-1]
if current_node in cascade_graph:
neighbors = cascade_graph[current_node]
next_node = np.random.choice(neighbors)
next_nodes.append(next_node)
if next_node < num_nodes:
cascade.append(next_node)
else:
break
return cascade
def sample_cascades(cascade_graph, num_nodes, r, h):
"""
Samples cascades from observed cascades modeled as a cascade graph
:param cascade_graph: The cascade graph
:param num_nodes: The number of nodes in the actual graph
:param r: The number of cascades to sample from each node
:param h: The length of a sampled cascade
:return: list of lists: A list containing sampled cascades
"""
cascades = []
nodes = cascade_graph.nodes()
for i in range(r):
print('PROGRESS: {}/{}'.format(i + 1, r))
np.random.shuffle(nodes)
for root in nodes:
cascade = sample_cascade(
cascade_graph, root=root, h=h, num_nodes=num_nodes)
cascades.append(cascade)
return cascades
def compute_similarity(network, feature_matrix):
"""
Builds a weighted graph, where each edge (u, v, sim) is constructed by
computing the Jaccard similarity (sim) of the attributes of nodes u and v.
For the sake efficiency, a vectorized implementation is used
:param network: The network
:param feature_matrix: Attribute information encoded as a feature matrix
A row is a node and each column is associated with the attributes.
feature_matrix[i, j] = 1 if node i has attribute j otherwise 0.
:return:
"""
print('INFO: Computing similarity between incident nodes of '
'each edge in the graph')
edges = network.edges()
adj_mat = nx.to_scipy_sparse_matrix(network, sorted(network.nodes()))
sources, targets = list(zip(*edges))
sources, targets = list(sources), list(targets)
'''
Computing common attributes. The matrix common_attributes
has the same number of rows as the number of edges due to
feature_matrix[sources] + feature_matrix[targets].
If common_attributes[(i, j), k] = 0, neither i nor j has the
k-th attribute, if common_attributes[(i, j), k] = 1, either of
them has the k-th attribute, if common_attributes[(i, j), k] = 2
both of them has the k-th attribute.
'''
common_attributes = feature_matrix[sources] + feature_matrix[targets]
num_common_attributes = np.apply_along_axis(
lambda arr: arr[arr == 2].size, axis=1, arr=common_attributes)
# Computing combined attributes
num_combined_attributes = np.apply_along_axis(
lambda arr: arr[arr > 0].size, axis=1, arr=common_attributes)
# Jaccard similarity
similarities = num_common_attributes / num_combined_attributes
# Construct a weighted graph based on the similarities
adj_mat[sources, targets] = similarities
norm_adj_mat = normalize(adj_mat, norm='l2')
return nx.from_scipy_sparse_matrix(norm_adj_mat)
def simulate_diffusion(network, root, h):
"""
Simulates an information diffusion processes in-order to sample cascades
:param network: The network
:param root: The root node from which cascades will be sampled
:param h: The maximum length of a cascade sample
:return: list: A cascade (A sequence of nodes)
"""
infections = {0: {root}}
cascade = [root]
current_time_step = 1
while len(cascade) < h:
previously_infected_nodes = infections[current_time_step - 1]
infections[current_time_step] = set()
for node in previously_infected_nodes:
if node in network:
for nbr in network[node]:
if nbr not in infections[current_time_step]:
w = network[node][nbr]['weight']
if np.random.random() < w:
infections[current_time_step].add(nbr)
cascade.append(nbr)
if len(cascade) >= h:
return cascade
if len(infections[current_time_step]) == 0:
break
current_time_step += 1
return cascade
def simulate_diffusion_events(network, r, h):
"""
Samples cascades using a number of simulation of truncated diffusion processes from
each node
:param network:
:param r:
:param h:
:return:
"""
print('INFO: Simulating diffusion ...')
nodes = list(network.nodes())
cascades = []
for i in range(r):
np.random.shuffle(nodes)
for root in nodes:
cascade = simulate_diffusion(network, root, h)
cascades.append(cascade)
print('PROGRESS: {}/{}'.format(i + 1, r))
return cascades
def embed(walks, d, window, epoch, workers=8):
model = Word2Vec(walks, size=d, window=window, min_count=0, iter=epoch, sg=1, workers=workers)
return model
def display_args(args):
print('INFO: Input arguments')
for arg in vars(args):
print('INFO: {}: {}'.format(arg, getattr(args, arg)))
def parse_args():
parser = argparse.ArgumentParser(description="Runs the python implementation of mineral")
parser.add_argument('--net-file', default='../data/cora/network.txt', help='Path to network file')
parser.add_argument('--net-format', default='edgelist',
help='Graph file format, possible values are (edgelist, adjlist).'
'Default is edgelist')
parser.add_argument('--att-file', default='../data/cora/attributes.txt', help='Path to attributes file')
parser.add_argument('--att-format', default='mattxt',
help='Similar to graph file format. Default is mattxt')
parser.add_argument('--cas-file', default='',
help='Path to observed cascades file')
parser.add_argument('--sim-file', default='../data/cora/simulated_cascades.txt', help='Path to simulated cascade file')
parser.add_argument('--emb-file', default='../data/cora/network.emb', help='Path to the embedding output file')
parser.add_argument('--sample', dest='sample', action='store_true',
help="An indicator whether to sample from observed cascades."
"Valid when observed cascades are provided. "
"Default is False")
parser.set_defaults(sample=False)
parser.add_argument('--directed', dest='directed', action='store_true')
parser.add_argument('--undirected', dest='directed', action='store_false')
parser.set_defaults(directed=False)
parser.add_argument('--weighted', dest='weighted', action='store_true')
parser.add_argument('--unweighted', dest='weighted', action='store_false')
parser.set_defaults(weighted=False)
parser.add_argument('--min-threshold', type=int, default=5, help='Minimum cascade length to consider')
parser.add_argument('--max-threshold', type=int, default=500, help='Maximum cascade length to consider')
parser.add_argument('--dim', type=int, default=128, help='Size of the representation')
parser.add_argument('--window', type=int, default=10, help='Window size')
parser.add_argument('--iter', type=int, default=20, help='Number of epochs')
parser.add_argument('--r', type=int, default=10,
help='Number of diffusion processes to simulate from a node')
parser.add_argument('--h', type=int, default=60,
help='Maximum number of nodes to infect in a single simulation.'
'Default is 60')
parser.add_argument('--workers', type=int, default=8,
help='Number of parallel jobs. Default is 8')
return parser.parse_args()
def main():
args = parse_args()
display_args(args)
network = read_network(args.net_file, directed=args.directed)
num_nodes = network.number_of_nodes()
if args.att_file.strip() != '':
feature_matrix = build_feature_matrix(
args.att_file, input_format=args.att_format, num_nodes=num_nodes)
network = compute_similarity(
network=network, feature_matrix=feature_matrix)
print('INFO: Attribute information is used to build a weighted graph')
cascades = simulate_diffusion_events(network, r=args.r, h=args.h)
if args.sim_file != '':
save_cascades(args.sim_file, cascades)
if len(cascades) > 0:
if args.cas_file != '':
observed_cascades = read_cascades(args.cas_file, args.min_threshold, args.max_threshold)
if args.sample:
cascade_graph = build_cascade_graph(
cascades=observed_cascades, num_nodes=num_nodes)
sampled_cascades = sample_cascades(
cascade_graph=cascade_graph, num_nodes=num_nodes, r=args.r, h=args.h)
cascades += sampled_cascades
else:
cascades += observed_cascades
print('INFO: Learning with observed cascades')
else:
print('INFO:Learning without observed cascades')
cascades = [list(map(str, cascade)) for cascade in cascades]
model = embed(cascades, d=args.dim, window=args.window, epoch=args.iter, workers=args.workers)
save_embedding(args.emb_file, model)
else:
raise ValueError('The length of the cascades is zero, nothing to train on')
if __name__ == '__main__':
main()
|
'''
@Description: Blueprint for event
@Author: Tianyi Lu
@Date: 2019-08-09 15:41:15
@LastEditors: Tianyi Lu
@LastEditTime: 2019-08-17 17:15:40
'''
from flask import render_template, session, redirect, url_for, current_app, flash, request, Markup, abort
from flask_login import login_required, current_user
from .. import db
from ..models import User, Post
from ..email import send_email
from . import event
from ..decorators import admin_required, owner_required
from datetime import datetime
from ..image_saver import saver, deleter
from ..job import add_reminder, send_test_reminder
time_format = '%Y-%m-%d-%H:%M'
@event.route('/all', methods=['GET', 'POST'])
@login_required
def all_post():
datetime_from = datetime_to = None
if request.method == 'POST':
try:
datetime_from = datetime.strptime(request.form['datetime_from'], '%Y-%m-%d')
datetime_to = datetime.strptime(request.form['datetime_to'], '%Y-%m-%d')
except ValueError:
pass
print(datetime_from)
page = request.args.get('page', 1, type=int)
if not datetime_from:
pagination = Post.query.filter_by(is_approved=1).order_by(Post.datetime_from.desc()).paginate(page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'], error_out=False)
else:
pagination = Post.query.filter_by(is_approved=1).filter(Post.datetime_from > datetime_from).filter(Post.datetime_from < datetime_to).order_by(Post.datetime_from).paginate(page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'], error_out=False)
posts = pagination.items
return render_template('all_post.html', posts=posts, pagination=pagination)
@event.route('/<int:id>')
@login_required
def post(id):
post = Post.query.get_or_404(id)
body_html = Markup(post.post_html)
return render_template('post.html', id=id, post=post, body_html=body_html)
@event.route('/<int:id>/approved')
@login_required
@admin_required
def post_approved(id):
post = Post.query.get_or_404(id)
if post.is_approved == 0:
post.is_approved = 1
elif post.is_approved == -1:
return redirect(url_for('event.post_rejected', id=id))
db.session.add(post)
#send post approve message to post author
post.author.owner[0].add_msg({'role': 'notification',
'name': 'Event Approved',
'content': 'Your event \"%s\" has been approved' % post.title})
db.session.commit()
post_datetime = post.datetime_from
time = [post_datetime.year, post_datetime.month, post_datetime.day]
return render_template('post_approved.html')
@event.route('/<int:id>/rejected', methods=['GET', 'POST'])
@login_required
@admin_required
def post_rejected(id):
post = Post.query.get_or_404(id)
if post.is_approved == 0:
post.is_approved = -1
elif post.is_approved == 1:
return redirect(url_for('event.post_approved', id=id))
if request.method == 'POST':
post.reject_msg = request.form['comment']
# print(post.reject_msg)
db.session.add(post)
db.session.commit()
return redirect(url_for('main.approve'))
db.session.add(post)
post.author.owner[0].add_msg({'role': 'notification',
'name': 'Event Rejected',
'content': 'Sorry, your event \"%s\" has been rejected' % post.title})
db.session.commit()
return render_template('post_rejected.html')
@event.route('/<int:id>/followers')
@login_required
def post_followers(id):
post = Post.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = post.followers.paginate(page, per_page=12, error_out=False)
users = pagination.items
user_amount = post.followers.count()
return render_template('followers.html', post=post, pagination=pagination, users=users, user_amount=user_amount)
@event.route('/<int:id>/follow')
@login_required
def post_follow(id):
post = Post.query.get_or_404(id)
if post.has_passed():
abort(403)
post.followers.append(current_user)
post.author.owner[0].add_msg({'role': 'notification',
'name': 'Follower',
'content': '\"%s\" starts to follow your event \"%s\"' % (current_user.username, post.title)})
db.session.commit()
return redirect(url_for('.post', id=id))
@event.route('/<int:id>/unfollow')
@login_required
def post_unfollow(id):
post = Post.query.get_or_404(id)
if post.has_passed():
abort(403)
if not current_user in post.followers.all():
return redirect(url_for('.post', id=id))
post.followers.remove(current_user)
db.session.commit()
return redirect(url_for('.post', id=id))
@event.route('/<int:id>/delete', methods=['GET'])
@login_required
@owner_required
def post_delete(id):
post = Post.query.get_or_404(id)
if not post in current_user.my_group.posts.all():
abort(403)
db.session.delete(post)
db.session.commit()
return redirect(url_for('group.group_profile', id=current_user.my_group.id))
@event.route('/<int:id>/edit', methods=['GET', 'POST'])
@login_required
def post_edit(id):
old_post = Post.query.get_or_404(id)
if old_post.author.id != current_user.group_id:
abort(403)
strtime_from = old_post.datetime_from.strftime(time_format)
strtime_to = old_post.datetime_to.strftime(time_format)
if request.method == 'POST':
if not request.form['title'] or not request.form['content']:
flash('Write Something!', 'danger')
return redirect(url_for('event.post_edit', id=id))
if request.files['cover']:
if old_post.cover != 'default.jpg':
deleter('post_cover_pic', old_post.cover)
cover = request.files['cover']
cover_filename = saver('post_cover_pic', cover)
old_post.cover = cover_filename
old_post.title = request.form['title']
old_post.post_html = request.form['content']
old_post.tag = request.form['tag']
print()
old_post.datetime_from = datetime.strptime(request.form['datetime_from'], time_format)
old_post.datetime_to = datetime.strptime(request.form['datetime_to'], time_format)
old_post.last_modified = datetime.utcnow()
old_post.is_approved = 0
db.session.add(old_post)
db.session.commit()
return redirect(url_for('event.post', id=id))
return render_template('editor.html', old_post=old_post, old_time_from=strtime_from, old_time_to=strtime_to)
# test reminder function
@event.route('/<int:id>/send_reminder')
def post_test_reminder(id):
post = Post.query.get_or_404(id)
post_datetime = post.datetime_from
# time = [post_datetime.year, post_datetime.month, post_datetime.day]
time = datetime.now().minute
send_test_reminder(current_app._get_current_object())
return "send_test_reminder"
# get current post attributes, used in email.py
def get_post(id):
post = Post.query.get_or_404(id)
return post
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of nautilus-pdf-tools
#
# Copyright (c) 2012-2019 Lorenzo Carbonell Cerezo <a.k.a. atareao>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gi
try:
gi.require_version('Gtk', '3.0')
except ValueError as e:
print(e)
exit(1)
from gi.repository import Gtk
import comun
from basedialog import (generate_button_row, generate_spinbutton_row,
generate_title_row)
from basedialogwithapply import BaseDialogWithApply
from comun import _
from pageoptions import PageOptions
from watermarkdialog import WatermarkDialog
class SignDialog(WatermarkDialog):
def __init__(self, filename=None, window=None):
WatermarkDialog.__init__(self, _('Sign'), filename, window)
def set_page(self, page):
if self.document.get_n_pages() > 0 and \
page < self.document.get_n_pages() and\
page >= 0:
self.no_page = page
self.show_page.set_text(str(self.no_page + 1))
self.show_title_page.set_text(str(self.no_page + 1))
if str(self.no_page) in self.pages.keys():
self.x = self.pages[str(self.no_page)].image_x
self.y = self.pages[str(self.no_page)].image_y
self.zoom_entry.set_value(self.pages[str(self.no_page)].image_zoom * 100.0)
self.file_entry.set_label(self.pages[str(self.no_page)].image_file)
pageOptions = self.pages[str(self.no_page)]
else:
self.reset()
pageOptions = PageOptions(image_x=0, image_y=0, image_zoom=1.0,
image_file=None)
self.viewport1.set_page(self.document.get_page(self.no_page),
pageOptions)
def reset(self):
self.x = 0.0
self.y = 0.0
self.zoom_entry.set_value(100.0)
self.file_entry.set_label(_('Select signature file'))
def init_adicional_popover(self):
BaseDialogWithApply.init_adicional_popover(self)
self.popover_listbox.add(generate_title_row(_('Signature'), True))
self.zoom_entry, row = generate_spinbutton_row(_('Zoom'), None)
self.zoom_entry.set_adjustment(Gtk.Adjustment(1, 100, 1000, 1, 100, 0))
self.popover_listbox.add(row)
self.file_entry, row = generate_button_row(
_('Signature'), self.on_button_watermark_clicked)
self.file_entry.set_label(_('Select signature file'))
self.popover_listbox.add(row)
if __name__ == '__main__':
dialog = SignDialog(comun.SAMPLE)
dialog.run()
|
import sys
import os
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
parent_path = os.path.dirname(curr_path) # 父路径
sys.path.append(parent_path) # 添加路径到系统路径
import gym
import torch
import datetime
from common.utils import save_results, make_dir
from common.utils import plot_rewards, plot_rewards_cn
from DQN.agent import DQN
from DQN.train import train
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
algo_name = "DQN" # 算法名称
env_name = 'CartPole-v1' # 环境名称
class DQNConfig:
''' 算法相关参数设置
'''
def __init__(self):
self.algo = algo_name # 算法名称
self.env_name = env_name # 环境名称
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # 检测GPU
self.train_eps = 200 # 训练的回合数
self.eval_eps = 30 # 测试的回合数
# 超参数
self.gamma = 0.95 # 强化学习中的折扣因子
self.epsilon_start = 0.90 # e-greedy策略中初始epsilon
self.epsilon_end = 0.01 # e-greedy策略中的终止epsilon
self.epsilon_decay = 500 # e-greedy策略中epsilon的衰减率
self.lr = 0.0001 # 学习率
self.memory_capacity = 100000 # 经验回放的容量
self.batch_size = 64 # mini-batch SGD中的批量大小
self.target_update = 4 # 目标网络的更新频率
self.hidden_dim = 256 # 网络隐藏层
class PlotConfig:
''' 绘图相关参数设置
'''
def __init__(self) -> None:
self.algo_name = algo_name # 算法名称
self.env_name = env_name # 环境名称
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # 检测GPU
self.result_path = curr_path + "/outputs/" + self.env_name + \
'/' + curr_time + '/results/' # 保存结果的路径
self.model_path = curr_path + "/outputs/" + self.env_name + \
'/' + curr_time + '/models/' # 保存模型的路径
self.save = True # 是否保存图片
def env_agent_config(cfg, seed=1):
''' 创建环境和智能体
'''
env = gym.make(cfg.env_name) # 创建环境
env.seed(seed) # 设置随机种子
state_dim = env.observation_space.shape[0] # 状态数
action_dim = env.action_space.n # 动作数
agent = DQN(state_dim, action_dim, cfg) # 创建智能体
return env, agent
cfg = DQNConfig()
plot_cfg = PlotConfig()
# 训练
env, agent = env_agent_config(cfg, seed=1)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(plot_cfg.result_path, plot_cfg.model_path) # 创建保存结果和模型路径的文件夹
agent.save(path=plot_cfg.model_path) # 保存模型
save_results(rewards, ma_rewards, tag='train',
path=plot_cfg.result_path) # 保存结果
plot_rewards_cn(rewards, ma_rewards, plot_cfg, tag="train") # 画出结果
# 测试
env, agent = env_agent_config(cfg, seed=10)
agent.load(path=plot_cfg.model_path) # 导入模型
rewards, ma_rewards = eval(cfg, env, agent)
save_results(rewards, ma_rewards, tag='eval',
path=plot_cfg.result_path) # 保存结果
plot_rewards_cn(rewards, ma_rewards, plot_cfg, tag="eval") # 画出结果
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test.client import RequestFactory
from djmercadopago import signals
from djmercadopago.tests import tests_utils
class TestExternalReferenceIsNotRequired(tests_utils.BaseSignalTestCase):
SIGNALS = [
[signals.checkout_preferences_created, 'checkout_preferences_created_handler']
]
def checkout_preferences_created_handler(self, signal, **kwargs):
checkout_preferences = kwargs['checkout_preferences']
checkout_preferences.update({
"items": [
{
"title": "some product",
"quantity": 1,
"currency_id": "ARS",
"unit_price": 123.45,
}
],
})
def test(self):
service = tests_utils.MercadoPagoServiceMock()
request = RequestFactory().get('/')
checkout_preference_result = service.do_checkout(request, '')
self.assertEqual(checkout_preference_result.external_reference, '')
|
# String {L = a ^ n b ^ m n + m = even}
# dfa = state (zeroth) of DFA
def start(c):
if (c == 'a'):
dfa = 1
elif (c == 'b'):
dfa = 2
# -1 is used to check for any
else:
dfa = -1
return dfa
# This function is for the first
def state1(c):
if (c == 'a'):
dfa = 0
elif (c == 'b'):
dfa = 5
else:
dfa = -1
return dfa
# This function is for the second
def state2(c):
if (c == 'b'):
dfa = 3
else:
dfa = -1
return dfa
# This function is for the third
def state3(c):
if (c == 'b'):
dfa = 4
else:
dfa = -1
return dfa
# This function is for the fourth
def state4(c):
if (c == 'b'):
dfa = 3
else:
dfa = -1
return dfa
# This function is for the fifth
def state5(c):
if (c == 'b'):
dfa = 6
else:
dfa = -1
return dfa
# This function is for the sixth
def state6(c):
if (c == 'b'):
dfa = 5
else:
dfa = -1
return dfa
def isAccepted(String):
l = len(String)
# with the present dfa = state
dfa = 0
for i in range(l):
if (dfa == 0):
dfa = start(String[i])
elif (dfa == 1):
dfa = state1(String[i])
elif (dfa == 2) :
dfa = state2(String[i])
elif (dfa == 3) :
dfa = state3(String[i])
elif (dfa == 4) :
dfa = state4(String[i])
elif (dfa == 5) :
dfa = state5(String[i])
elif (dfa == 6):
dfa = state6(String[i])
else:
return 0
if(dfa == 3 or dfa == 5) :
return 1
else:
return 0
# Driver code
if __name__ == "__main__" :
String = "aaabbb"
if (isAccepted(String)) :
print("ACCEPTED")
else:
print("NOT ACCEPTED")
|
#!/usr/bin/env python3
import sys, random
assert sys.version_info >= (3,7), "This script requires at least Python 3.7"
print('Greetings!') # The program greets the user.
colors = ['red','orange','yellow','green','blue','violet','purple'] # The program initializes an array of colors.
play_again = '' # the variable play_again is set to an empty string
best_count = sys.maxsize # the biggest number
while (play_again != 'n' and play_again != 'no'): #this loop continues until the user asks not to play again.
match_color = random.choice(colors) # a random color is selected from the seven options
count = 0 # the number of guesses is initialized to 0
color = '' # the input color from the user is set to an empty string
while (color != match_color): #the game loops until the user guesses correctly.
color = input("\nWhat is my favorite color? ") #\n is a special code that adds a new line
color = color.lower().strip() #any spaces the user puts in the color are removed
count += 1 # The count storing the number of guesses increments 1
if (color == match_color): # The program checks if the user guessed right
print('Correct!') # and prints correct if they have
else: # but if they haven't
print('Sorry, try again. You have guessed {guesses} times.'.format(guesses=count)) #prints "Sorry, try again." and your number of guesses
print('\nYou guessed it in {} tries!'.format(count)) # After this victory, it prints how many tries it took the user
if (count < best_count): # If this was the lowest number of tries they'd gotten:
print('This was your best guess so far!') #It tells them it's a record.
best_count = count #And sets a new record for them.
play_again = input("\nWould you like to play again (yes or no)? ").lower().strip() #It asks the user if they want to play again.. (the while loop for earlier means that inputs of "no" or "n" witha ny capitalization will end the loop)
print('Thanks for playing!') # And once they're done, it thanks the user for playing. |
import math
class Solution(object):
def ReverseStr(self, str, k):
ans=''
n = int (math.ceil(len(str) / (2.0*k) ))
for i in range(n):
ans += str[2*i*k:(2*i+1)*k][::-1] #reverse k str
print '1',ans
ans += str[(2*i+1)*k:(2*i+2)*k]
print '2',ans
return ans
rs=Solution()
print rs.ReverseStr('sjodfjoig',3)
s='sjodfjoig'
print s[0:1]
a=''
a += s[8:20]
print s[10] #why???
print s[10:12] #
print 'a=',a
|
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests of relstorage.adapters.mysql"""
from __future__ import absolute_import
import logging
import time
import unittest
from ZODB.utils import u64 as bytes8_to_int64
from ZODB.tests import StorageTestBase
from relstorage.adapters.mysql import MySQLAdapter
from relstorage.options import Options
from relstorage._util import timestamp_at_unixtime
from . import StorageCreatingMixin
from . import TestCase
from .util import skipOnCI
from .util import AbstractTestSuiteBuilder
from .util import DEFAULT_DATABASE_SERVER_HOST
class MySQLAdapterMixin(object):
# The MySQL schema adapter uses DROP TABLE
# and then CREATE TABLE to zap when ``zap_all(slow=True)``.
# This is *much* faster than ``DELETE FROM`` on large
# databases (since we can't use truncate.). But for small databases,
# it adds lots of extra overhead to re-create those tables all the
# time, and ``DELETE FROM`` is the way to go.
zap_slow = True
def __get_db_name(self):
if self.keep_history:
db = self.base_dbname
else:
db = self.base_dbname + '_hf'
return db
def __get_adapter_options(self, dbname=None):
dbname = dbname or self.__get_db_name()
assert isinstance(dbname, str), (dbname, type(dbname))
return {
'db': dbname,
'user': 'relstoragetest',
'passwd': 'relstoragetest',
'host': DEFAULT_DATABASE_SERVER_HOST,
}
def make_adapter(self, options, db=None):
return MySQLAdapter(
options=options,
**self.__get_adapter_options(db)
)
def get_adapter_class(self):
return MySQLAdapter
def get_adapter_zconfig(self):
options = self.__get_adapter_options()
options['driver'] = self.driver_name
formatted_options = '\n'.join(
' %s %s' % (k, v)
for k, v in options.items()
)
return u"""
<mysql>
%s
</mysql>
""" % (formatted_options)
def verify_adapter_from_zconfig(self, adapter):
self.assertEqual(adapter._params, self.__get_adapter_options())
class TestGenerateTID(MySQLAdapterMixin,
StorageCreatingMixin,
TestCase,
StorageTestBase.StorageTestBase):
# pylint:disable=too-many-ancestors
def setUp(self):
super(TestGenerateTID, self).setUp()
self._storage = self._closing(self.make_storage())
def test_extract_parts(self):
unix_time = 1564063129.1277142
query = """
SELECT EXTRACT(year FROM ts) as YEAR,
EXTRACT(month FROM ts) AS month,
EXTRACT(day FROM ts) AS day,
EXTRACT(hour FROM ts) AS hour,
EXTRACT(minute FROM ts) AS minute,
%s MOD 60 AS seconds
FROM (
SELECT FROM_UNIXTIME(%s) + 0.0 AS ts
) t
"""
cursor = self._storage._load_connection.cursor
cursor.execute(query, (unix_time, unix_time))
year, month, day, hour, minute, seconds = cursor.fetchone()
self.assertEqual(year, 2019)
self.assertEqual(month, 7)
self.assertEqual(day, 25)
self.assertEqual(hour, 13) # If this is not 13, the time_zone is incorrect
self.assertEqual(minute, 58)
self.assertEqual(
round(float(seconds), 6),
49.127714)
def test_known_time(self):
now = 1564054182.277615
gmtime = (2019, 7, 25, 11, 29, 42, 3, 206, 0)
self.assertEqual(
time.gmtime(now),
gmtime
)
ts_now = timestamp_at_unixtime(now)
self.assertEqual(
ts_now.raw(),
b'\x03\xd1Oq\xb4bn\x00'
)
self.test_current_time(now)
# Problematic values due to rounding
# of minutes due to seconds
for now, gmtime in (
(1565774811.9655108,
(2019, 8, 14, 9, 26, 51, 2, 226, 0)),
(1565767799.607957,
(2019, 8, 14, 7, 29, 59, 2, 226, 0)),
(1565775177.915336,
(2019, 8, 14, 9, 32, 57, 2, 226, 0)),
(1565775299.106127,
(2019, 8, 14, 9, 34, 59, 2, 226, 0)),
(1565775479.180209,
(2019, 8, 14, 9, 37, 59, 2, 226, 0)),
):
self.assertEqual(time.gmtime(now), gmtime)
self.test_current_time(now)
def test_current_time(self, now=None):
from persistent.timestamp import TimeStamp
from relstorage._util import int64_to_8bytes
if now is None:
now = time.time()
storage = self._storage
ts_now = timestamp_at_unixtime(now)
expected_tid_int = bytes8_to_int64(ts_now.raw())
__traceback_info__ = now, now % 60.0, time.gmtime(now), ts_now, expected_tid_int
cursor = storage._load_connection.cursor
cursor.execute('CALL make_tid_for_epoch(%s, @tid)', (now,))
cursor.execute('SELECT @tid')
tid, = cursor.fetchall()[0]
tid_as_timetime = TimeStamp(int64_to_8bytes(tid)).timeTime()
__traceback_info__ += (tid_as_timetime - ts_now.timeTime(),)
self.assertEqual(
tid,
expected_tid_int
)
class MySQLTestSuiteBuilder(AbstractTestSuiteBuilder):
__name__ = 'MySQL'
def __init__(self):
from relstorage.adapters.mysql import drivers
super(MySQLTestSuiteBuilder, self).__init__(
drivers,
MySQLAdapterMixin,
extra_test_classes=(TestGenerateTID,)
)
def _compute_large_blob_size(self, use_small_blobs):
# MySQL is limited to the blob_chunk_size as there is no
# native blob streaming support. (Note: this depends on the
# max_allowed_packet size on the server as well as the driver;
# both values default to 1MB. So keep it small.)
return Options().blob_chunk_size
def _make_check_class_HistoryFreeRelStorageTests(self, bases, name, klass_dict=None):
bases = (GenericMySQLTestsMixin, ) + bases
klass_dict = {}
return self._default_make_check_class(bases, name, klass_dict=klass_dict)
# pylint:disable=line-too-long
def _make_check_class_HistoryPreservingRelStorageTests(self, bases, name, klass_dict=None):
return self._make_check_class_HistoryFreeRelStorageTests(bases, name, klass_dict)
class GenericMySQLTestsMixin(object):
@skipOnCI("Travis MySQL goes away error 2006")
def check16MObject(self):
# NOTE: If your mySQL goes away, check the server's value for
# `max_allowed_packet`, you probably need to increase it.
# JAM uses 64M.
# http://dev.mysql.com/doc/refman/5.7/en/packet-too-large.html
super(GenericMySQLTestsMixin, self).check16MObject()
def checkMyISAMTablesProduceErrorWhenNoCreate(self):
from ZODB.POSException import StorageError
def cb(_conn, cursor):
cursor.execute('ALTER TABLE new_oid ENGINE=MyISAM;')
self._storage._adapter.connmanager.open_and_call(cb)
# Now open a new storage that's not allowed to create
with self.assertRaisesRegex(
StorageError,
'MyISAM is no longer supported.*new_oid'
):
self.open(create_schema=False)
def checkMyISAMTablesAutoMigrate(self):
# Verify we have a broken state.
self.checkMyISAMTablesProduceErrorWhenNoCreate()
# Now a storage that can alter a table will do so.
storage = self.open()
storage.close()
storage = self.open(create_schema=False)
storage.close()
def checkIsolationLevels(self):
def assert_storage(storage):
load_cur = storage._load_connection.cursor
store_cur = storage._store_connection.cursor
version_detector = storage._adapter.version_detector
if not version_detector.supports_transaction_isolation(load_cur):
raise unittest.SkipTest("Needs MySQL better than %s" % (
version_detector.get_version(load_cur)
))
for cur, ex_iso, ex_ro, ex_timeout in (
# Timeout for load is mysql default.
[load_cur, 'REPEATABLE-READ', True, 50],
[store_cur, 'READ-COMMITTED', False, self.DEFAULT_COMMIT_LOCK_TIMEOUT],
):
cur.execute("""
SELECT @@transaction_isolation,
@@transaction_read_only,
@@innodb_lock_wait_timeout
""")
row, = cur.fetchall()
iso, ro, timeout = row
__traceback_info__ = row
iso = iso.decode('ascii') if not isinstance(iso, str) else iso
self.assertEqual(iso, ex_iso)
self.assertEqual(ro, ex_ro)
self.assertEqual(timeout, ex_timeout)
# By default
assert_storage(self._storage)
# In a new instance, and after we do a transaction with it.
from ZODB.DB import DB
import transaction
db = self._closing(DB(self._storage))
conn = self._closing(db.open())
assert_storage(conn._storage)
conn.root()['obj'] = 1
transaction.commit()
assert_storage(conn._storage)
def test_suite():
return MySQLTestSuiteBuilder().test_suite()
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger("zc.lockfile").setLevel(logging.CRITICAL)
unittest.main(defaultTest="test_suite")
|
from django.apps import AppConfig
class CompilerConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'compiler'
|
"""
datos de entrada
lado1-->l1-->float
lado2-->l2-->float
lado3-->l3-->float
datos de salida
area-->a-->float
"""
#entrada
l1=float(input("digite el lado 1: "))
l2=float(input("digite el lado 2: "))
l3=float(input("digite el lado 3: "))
#caja negra
s=(l1+l2+l3)/2
a=(s*(s-l1)*(s-l2)*(s-l3))**(1/2)
#salida
print("el area es: ",a)
|
#!/usr/bin/python
import sys
from os import path
import sqlite3
import datetime
tasktype = {
0:"task",
1: "project",
2: "contact",
6: "url"
}
recurrence_string = {
0: "",
1: "+1w",
2: "+1m",
3: "+1y",
5: "+2w",
7: "+6m",
9: "",
50: "+1m",
101:"+1w",
105: "+2w",
150: "+3d",
103: "+1y",
}
def convert_tasks(dbname):
"""
opens database file with name dbname
loads values from database and writes as org-mode formatted text to stdout
"""
connection = sqlite3.connect(dbname)
connection.row_factory = sqlite3.Row
cursor = connection.cursor()
cursor.execute("select name,type,type_data,recurrence,advanced_recurrence, task_id, parent_id, due_date, project_due_date, note from tasks where deleted=0 and completion_date=-62135769600.0 order by parent_id")
result = cursor.fetchall()
children = {}
no_parent = []
for r in result:
if r['parent_id'] is None:
no_parent.append(r)
elif r['parent_id'] ==u'':
no_parent.append(r)
else:
if r['parent_id'] in children:
children[r['parent_id']].append(r)
else:
children[r['parent_id']] =[r]
level=1
for task in no_parent:
convert_task(task,level)
if task['task_id'] in children:
for sub_task in children[task['task_id']]:
convert_task(sub_task,level+1)
connection.close()
def deadline(date,recurrence,advanced_recurrence):
if recurrence==0:
return "<"+date+ ">"
else:
return "<"+ date +" " + recurrence_string[recurrence]+ ">"
def double_to_date(d):
if d is None:
return ""
else:
return datetime.datetime.fromtimestamp(d).strftime("%Y-%m-%d %a %H:%M")
def convert_task(row,level):
"""converts a single task row"""
print level*'*',"TODO", row['name'].encode('utf-8')
#convert due_date to DEADLINE entry
if row['type']==1:
# for project use project_due_date
due_date = row['project_due_date']
else:
# for other than project use due_date
due_date = row['due_date']
if due_date < 64092211200.0:
print "DEADLINE:", \
deadline(double_to_date(due_date),
row['recurrence'],
row['advanced_recurrence'])
print ":PROPERTIES:"
print ":Tasktype:",tasktype[row['type']]
print ":type_data:", row['type_data'].encode('utf-8')
print ":recurrence:", row['recurrence']
print ":advanced_recurrence:", row['advanced_recurrence']
print ":task_id:", row['task_id']
print ":parent_id:", row['parent_id']
print ":due_date:", row['due_date']
print ":END:"
if row['note'] !='':
print row['note'].encode('utf8')
def main(argv):
if len(argv)<1:
inputfile = path.expanduser('~/Library/Containers/com.appigo.todomac/Data/Library/Application Support/Appigo Todo/AppigoTodo_v13.sqlitedb')
else:
inputfile = argv[0]
print "# -*- mode: org -*-"
print "#+TITLE: Import from Appigo Todo"
print "#+OPTIONS: ^:{}"
print ""
convert_tasks(inputfile)
if __name__ == "__main__":
main(sys.argv[1:])
|
from ZSI.wstools.Namespaces import *
class PPCRL:
BASE = "http://schemas.microsoft.com/Passport/SoapServices/PPCRL"
FAULT = "http://schemas.microsoft.com/Passport/SoapServices/SOAPFault"
IDS = "http://schemas.microsoft.com/passport/IDS"
class MSWS:
STORAGE = "http://www.msn.com/webservices/storage/2008"
SPACES = "http://www.msn.com/webservices/spaces/v1/"
ADDRESS = "http://www.msn.com/webservices/AddressBook"
class HMNS:
RSI = "http://www.hotmail.msn.com/ws/2004/09/oim/rsi"
OIM = "http://messenger.msn.com/ws/2004/09/oim/"
|
#!/usr/bin/python3
"""
Source:
https://github.com/rshk/render-tiles
"""
from collections import namedtuple
import math
import mapnik
import worker
# ============================================================
# MAP TILE SETTINGS
# ============================================================
MIN_ZOOM_LEVEL = 1
MAX_ZOOM_LEVEL = 22
def _unitsPerPixel(zoomLevel):
return 0.703125 / math.pow(2, zoomLevel)
# Google Mercator - EPSG:900913
GOOGLEMERC = ('+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 '
'+x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs')
DATA_PROJECTION = mapnik.Projection(GOOGLEMERC)
TILE_WIDTH = 256
TILE_HEIGHT = 256
def deg2num(lat_deg, lon_deg, zoom):
"""Convert coordinates to tile number"""
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((
1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad)))
/ math.pi) / 2.0 * n)
tile_coords = namedtuple('TileCoords', 'x,y')
return tile_coords(x=xtile, y=ytile)
def num2deg(xtile, ytile, zoom):
"""Convert tile number to coordinates (of the upper corner)"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return mapnik.Coord(y=lat_deg, x=lon_deg)
# ============================================================
# TILE RENDERER
# ============================================================
class TiledMapRenderer(object):
""" Mapnik Slippy Map - Tile Renderer
"""
def __init__(self, mapobj):
self.jobs = {}
self.m = mapobj
self.GOOGLEMERC = ('+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 '
'+x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs')
self.DATA_PROJECTION = mapnik.Projection(GOOGLEMERC)
self.TILE_WIDTH = 256
self.TILE_HEIGHT = 256
def deg2num(self, lat_deg, lon_deg, zoom):
"""Convert coordinates to tile number"""
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((
1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad)))
/ math.pi) / 2.0 * n)
tile_coords = namedtuple('TileCoords', 'x,y')
return tile_coords(x=xtile, y=ytile)
def num2deg(self, xtile, ytile, zoom):
"""Convert tile number to coordinates (of the upper corner)"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return mapnik.Coord(y=lat_deg, x=lon_deg)
def renderTile(self, z, x, y, job_id, responseHandler=None):
"""
renders map tile
:param z: Zoom level
:param x: Tile horizontal position
:param y: Tile vertical position
"""
# Set Tile Bounds
topleft = self.num2deg(x, y, z)
bottomright = self.num2deg(x + 1, y + 1, z)
# Bounding box for the tile
bbox = mapnik.Box2d(topleft, bottomright)
bbox = self.DATA_PROJECTION.forward(bbox)
# Zoom to bounding box
self.m.zoom_to_box(bbox)
# Set buffer
MIN_BUFFER = 256
self.m.buffer_size = max(self.m.buffer_size, MIN_BUFFER)
# Render image with default Agg renderer
im = mapnik.Image(TILE_WIDTH, TILE_WIDTH)
if not responseHandler:
mapnik.render(self.m, im)
return im
# cancel requests -->
im = worker.RenderTile(job_id, self.m, im)
responseHandler.sendPngResponse(im)
return im
def cancelTile(self, job_id):
worker.CancelTileRender(job_id)
class TiledMaps(object):
def __init__(self):
self._mmaps = {}
def addMap(self, layer_id, m_map):
if self.hasMap(layer_id):
raise ValueError(layer_id, 'already exists')
self._mmaps[layer_id] = TiledMapRenderer(m_map)
def getMap(self, layer_id):
if self.hasMap(layer_id):
return self._mmaps[layer_id]
return None
def hasMap(self, layer_id):
return layer_id in self._mmaps
def getKeys(self):
return list(self._mmaps.keys())
|
# Copyright 2021 The Bellman Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Tuple
import gin
import pytest
from tf_agents.agents.tf_agent_test import MyAgent
from tf_agents.benchmark.utils import extract_event_log_values, find_event_log
from tf_agents.environments.random_tf_environment import RandomTFEnvironment
from tf_agents.environments.tf_environment import TFEnvironment
from bellman.harness.harness import ExperimentHarness
from bellman.harness.utils import EVALUATION_METRICS_DIR, GIN_CONFIG, TRAIN_METRICS_DIR
from tests.tools.bellman.specs.tensor_spec import ACTION_SPEC, TIMESTEP_SPEC
from tests.tools.bellman.training.agent_trainer import SingleComponentAgentTrainer
_REAL_REPLAY_BUFFER_CAPACITY = 1000
_MAX_STEPS = 10
@pytest.fixture(name="experiment_setup")
def _experiment_harness_fixture(tmpdir) -> Tuple[ExperimentHarness, TFEnvironment]:
root_dir = str(tmpdir / "root_dir")
environment = RandomTFEnvironment(TIMESTEP_SPEC, ACTION_SPEC, episode_end_probability=0.0)
evaluation_environment = RandomTFEnvironment(TIMESTEP_SPEC, ACTION_SPEC)
agent = MyAgent(
time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec()
)
agent_trainer = SingleComponentAgentTrainer()
harness = ExperimentHarness(
root_dir=root_dir,
environment=environment,
evaluation_environment=evaluation_environment,
agent=agent,
agent_trainer=agent_trainer,
real_replay_buffer_capacity=_REAL_REPLAY_BUFFER_CAPACITY,
total_number_of_environment_steps=_MAX_STEPS,
summary_interval=1,
evaluation_interval=_MAX_STEPS,
number_of_evaluation_episodes=1,
)
return harness, environment
def test_serialise_config_empty_operational_config_tensorboard_events_file(experiment_setup):
experiment_harness, _ = experiment_setup
base_dir = experiment_harness.define_base_experiment_directory()
experiment_harness.serialise_config(base_dir)
assert not gin.operative_config_str()
event_file = find_event_log(base_dir)
values = extract_event_log_values(event_file, GIN_CONFIG)
assert not values[0][0]
@pytest.fixture(name="dummy_gin_global_config")
def _dummy_gin_config_file_fixture(tmpdir):
dummy_config_file_path = tmpdir / "dummy_config.gin"
dummy_config_file_path.write_text("test_fn.test_arg = 1", encoding="utf-8")
@gin.configurable
def test_fn(test_arg):
pass
gin.parse_config_file(dummy_config_file_path)
# call the function so the `test_arg` is added to the "operative" config.
test_fn() # pylint: disable=no-value-for-parameter
yield
gin.clear_config()
def test_serialise_config_operational_config_tensorboard_events_file(
experiment_setup, dummy_gin_global_config
):
experiment_harness, _ = experiment_setup
base_dir = experiment_harness.define_base_experiment_directory()
experiment_harness.serialise_config(base_dir)
event_file = find_event_log(base_dir)
values = extract_event_log_values(event_file, GIN_CONFIG)
assert "test_arg" in str(values[0][0])
def test_define_tensorboard_directories(experiment_setup):
experiment_harness, _ = experiment_setup
base_dir = experiment_harness.define_base_experiment_directory()
train_dir, eval_dir = experiment_harness.define_tensorboard_directories(base_dir)
train_dir_path = Path(train_dir)
eval_dir_path = Path(eval_dir)
assert str(train_dir_path.parent) == base_dir
assert str(eval_dir_path.parent) == base_dir
assert train_dir_path.name == TRAIN_METRICS_DIR
assert eval_dir_path.name == EVALUATION_METRICS_DIR
def test_create_summary_writers_parameters(tmpdir, experiment_setup):
experiment_harness, _ = experiment_setup
train_dir = str(tmpdir / TRAIN_METRICS_DIR)
eval_dir = str(tmpdir / EVALUATION_METRICS_DIR)
train_summary_writer, eval_summary_writer = experiment_harness.create_summary_writers(
train_dir, eval_dir
)
assert train_summary_writer._metadata["logdir"] == train_dir
assert eval_summary_writer._metadata["logdir"] == eval_dir
def test_real_replay_buffer_capacity(experiment_setup):
experiment_harness, _ = experiment_setup
real_replay_buffer = experiment_harness.create_real_replay_buffer()
assert real_replay_buffer.capacity == _REAL_REPLAY_BUFFER_CAPACITY
@pytest.mark.parametrize("steps_to_collect", [1, 3, 5, 7, 9])
def test_real_driver_and_real_replay_buffer(experiment_setup, steps_to_collect):
experiment_harness, environment = experiment_setup
real_replay_buffer = experiment_harness.create_real_replay_buffer()
experiment_harness._max_steps = steps_to_collect
agent_collect_driver, _ = experiment_harness.create_real_drivers(real_replay_buffer, [])
agent_collect_driver.run(environment.reset())
trajectories = real_replay_buffer.gather_all()
assert trajectories.step_type.shape == (1, steps_to_collect)
@pytest.mark.parametrize("steps_to_collect", [1, 3, 5, 7, 9])
def test_random_policy_driver_and_real_replay_buffer(experiment_setup, steps_to_collect):
experiment_harness, environment = experiment_setup
real_replay_buffer = experiment_harness.create_real_replay_buffer()
experiment_harness._max_steps = steps_to_collect
_, random_policy_collect_driver = experiment_harness.create_real_drivers(
real_replay_buffer, []
)
random_policy_collect_driver.run(environment.reset())
trajectories = real_replay_buffer.gather_all()
assert trajectories.step_type.shape == (1, steps_to_collect)
|
from __future__ import print_function
import os
import sys
import re
import time
import argparse
import warnings
from contextlib import contextmanager
from collections import defaultdict, OrderedDict
from six import string_types
from six.moves import cStringIO
from numpy import ndarray
try:
import objgraph
except ImportError:
objgraph = None
try:
import psutil
except ImportError:
psutil = None
from openmdao.devtools.iprof_utils import _create_profile_callback, find_qualified_name, \
func_group, _collect_methods, _Options, _setup_func_group,\
_get_methods
from openmdao.devtools.memory import mem_usage
from openmdao.utils.mpi import MPI
_trace_calls = None # pointer to function that implements the trace
_registered = False # prevents multiple atexit registrations
_printer = None
MAXLINE = 80
tab = ' '
time0 = None
addr_regex = re.compile(" at 0x[0-9a-fA-F]+")
def _indented_print(f_locals, d, indent, excludes=set(['__init__', 'self', '__class__']),
show_ptrs=False):
"""
Print trace info, indenting based on call depth.
"""
global _printer
sindent = tab * indent
sep = '=' if d is f_locals else ':'
for name in sorted(d, key=lambda a: str(a)):
if name not in excludes:
if isinstance(d[name], (dict, OrderedDict)):
f = cStringIO()
save = _printer
_printer = _get_printer(f)
_indented_print(f_locals, d[name], 0, show_ptrs=show_ptrs)
_printer = save
s = " %s%s%s{%s}" % (sindent, name, sep, f.getvalue())
else:
s = " %s%s%s%s" % (sindent, name, sep, d[name])
if not show_ptrs and ' object at ' in s:
s = addr_regex.sub('', s)
linelen = len(s)
leneq = len(s.split(sep, 1)[0])
if linelen > MAXLINE:
if '\n' in s:
# change indent
s = s.replace("\n", "\n%s" % (' '*leneq))
_printer(s)
def _get_printer(stream, rank=-1):
"""
Return a custom print function that outputs to the given stream and on the given rank.
"""
if MPI and rank >= MPI.COMM_WORLD.size:
if MPI.COMM_WORLD.rank == 0:
print("Specified rank (%d) is outside of the valid range (0-%d)." %
(rank, MPI.COMM_WORLD.size - 1))
exit()
# rank < 0 means output on all ranks
if not MPI or rank < 0 or MPI.COMM_WORLD.rank == rank:
def prt(*args, **kwargs):
print(*args, file=stream, flush=True, **kwargs)
else:
def prt(*args, **kwargs):
pass
return prt
def _trace_call(frame, arg, stack, context):
"""
This is called after we have matched based on glob pattern and isinstance check.
"""
global time0
if time0 is None:
time0 = time.time()
(qual_cache, method_counts, class_counts, id2count,
verbose, memory, leaks, stream, show_ptrs) = context
funcname = find_qualified_name(frame.f_code.co_filename,
frame.f_code.co_firstlineno, qual_cache)
self = frame.f_locals['self']
try:
pname = "(%s)" % self.pathname
except AttributeError:
pname = ""
cname = self.__class__.__name__
my_id = id(self)
if my_id in id2count:
id_count = id2count[my_id]
else:
class_counts[cname] += 1
id2count[my_id] = id_count = class_counts[cname]
sname = "%s#%d%s" % (self.__class__.__name__, id_count, pname)
fullname = '.'.join((sname, funcname))
method_counts[fullname] += 1
indent = tab * (len(stack)-1)
if verbose:
_printer("%s--> %s (%d)" % (indent, fullname, method_counts[fullname]))
_indented_print(frame.f_locals, frame.f_locals, len(stack)-1, show_ptrs=show_ptrs)
else:
_printer("%s-->%s" % (indent, fullname))
if memory is not None:
memory.append(mem_usage())
if leaks is not None:
stats = objgraph.typestats()
stats['frame'] += 1
stats['cell'] += 1
stats['list'] += 1
leaks.append(stats)
def _trace_return(frame, arg, stack, context):
"""
This is called when a matched function returns.
This only happens if show_return is True when setup() is called.
"""
global time0
(qual_cache, method_counts, class_counts, id2count,
verbose, memory, leaks, stream, show_ptrs) = context
funcname = find_qualified_name(frame.f_code.co_filename,
frame.f_code.co_firstlineno, qual_cache)
self = frame.f_locals['self']
try:
pname = "(%s)" % self.pathname
except AttributeError:
pname = ""
sname = "%s#%d%s" % (self.__class__.__name__, id2count[id(self)], pname)
indent = tab * len(stack)
if memory is not None:
current_mem = mem_usage()
last_mem = memory.pop()
if current_mem != last_mem:
delta = current_mem - last_mem
_printer("%s<-- %s (time: %8.5f) (total: %6.3f MB) (diff: %+.0f KB)" %
(indent, '.'.join((sname, funcname)), time.time() - time0, current_mem,
delta * 1024.))
# add this delta to all callers so when they calculate their own delta, this
# delta won't be included
for i in range(len(memory) - 1, -1, -1):
memory[i] += delta
else:
_printer("%s<-- %s (time: %8.5f) (total: %6.3f MB)" %
(indent, '.'.join((sname, funcname)), time.time() - time0, current_mem))
else:
_printer("%s<-- %s" % (indent, '.'.join((sname, funcname))))
if verbose:
if arg is not None:
s = "%s %s" % (indent, arg)
if not show_ptrs and ' object at ' in s:
s = addr_regex.sub('', s)
_printer(s)
if leaks is not None:
last_objs = leaks.pop()
for name, _, delta_objs in objgraph.growth(peak_stats=last_objs):
_printer("%s %s %+d" % (indent, name, delta_objs))
def _setup(options):
if not func_group:
_setup_func_group()
global _registered, _trace_calls, _printer
verbose = options.verbose
memory = options.memory
leaks = options.leaks
if not _registered:
methods = _get_methods(options, default='openmdao')
call_stack = []
qual_cache = {}
method_counts = defaultdict(int)
class_counts = defaultdict(lambda: -1)
id2count = {}
do_ret = _trace_return
if memory:
if psutil is None:
raise RuntimeError("Memory tracing requires the 'psutil' package. "
"Install it using 'pip install psutil'.")
memory = []
else:
memory = None
if leaks:
if objgraph is None:
raise RuntimeError("Leak detection requires the 'objgraph' package. "
"Install it using 'pip install objgraph'.")
leaks = []
else:
leaks = None
if options.outfile == 'stdout':
stream = sys.stdout
elif options.outfile == 'stderr':
stream = sys.stderr
else:
stream = open(options.outfile, 'w')
_printer = _get_printer(stream, options.rank)
_trace_calls = _create_profile_callback(call_stack, _collect_methods(methods),
do_call=_trace_call,
do_ret=do_ret,
context=(qual_cache, method_counts,
class_counts, id2count, verbose, memory,
leaks, stream, options.show_ptrs),
filters=options.filters)
def setup(methods=None, verbose=None, memory=None, leaks=False, rank=-1, show_ptrs=False,
outfile='stdout'):
"""
Setup call tracing.
Parameters
----------
methods : list of (glob, (classes...)) or None
Methods to be traced, based on glob patterns and isinstance checks.
verbose : bool
If True, show function locals and return values.
memory : bool
If True, show functions that increase memory usage.
leaks : bool
If True, show objects that are created within a function and not garbage collected.
rank : int
MPI rank where output is desired. The default, -1 means output from all ranks.
show_ptrs : bool
If True, show addresses of printed objects.
outfile : file-like or str
Output file.
"""
_setup(_Options(methods=methods, verbose=verbose, memory=memory, leaks=leaks, rank=rank,
show_ptrs=show_ptrs, outfile=outfile))
def start():
"""
Start call tracing.
"""
global _trace_calls
if sys.getprofile() is not None:
raise RuntimeError("another profile function is already active.")
if _trace_calls is None:
raise RuntimeError("trace.setup() was not called before trace.start().")
sys.setprofile(_trace_calls)
def stop():
"""
Stop call tracing.
"""
sys.setprofile(None)
@contextmanager
def tracing(methods=None, verbose=False, memory=False, leaks=False, show_ptrs=False):
"""
Turn on call tracing within a certain context.
Parameters
----------
methods : list of (glob, (classes...)) or str or None
Methods to be traced, based on glob patterns and isinstance checks. If value
is a string, use that string to lookup a 'canned' method list by name.
verbose : bool
If True, show function locals and return values.
memory : bool
If True, show functions that increase memory usage.
leaks : bool
If True, show objects that are created within a function and not garbage collected.
show_ptrs : bool
If True, show addresses of printed objects.
"""
setup(methods=methods, verbose=verbose, memory=memory, leaks=leaks, show_ptrs=show_ptrs)
start()
yield
stop()
class tracedfunc(object):
"""
Decorator that activates tracing for a particular function.
Parameters
----------
methods : list of (glob, (classes...)) tuples, optional
Methods to be traced, based on glob patterns and isinstance checks.
verbose : bool
If True, show function locals and return values.
memory : bool
If True, show functions that increase memory usage.
leaks : bool
If True, show objects that are created within a function and not garbage collected.
filters : list of str or None
If not None, evaluate as an expression in the frame of matching trace functions. If
True, include the function in the trace. Up to one expresson per class.
show_ptrs : bool
If True, show addresses of printed objects.
"""
def __init__(self, methods=None, verbose=False, memory=False, leaks=False, filters=None,
show_ptrs=False):
self.options = _Options(methods=methods, verbose=verbose, memory=memory, leaks=leaks,
filters=filters, show_ptrs=show_ptrs)
self._call_setup = True
def __call__(self, func):
def wrapped(*args, **kwargs):
if self._call_setup:
_setup(self.options)
self._call_setup = False
start()
func(*args, **kwargs)
stop()
return wrapped
def _itrace_setup_parser(parser):
"""
Set up the command line options for the 'openmdao trace' command line tool.
"""
if not func_group:
_setup_func_group()
parser.add_argument('file', nargs=1, help='Python file to be traced.')
parser.add_argument('-g', '--group', action='store', dest='methods',
default='openmdao',
help='Determines which group of methods will be traced. Default is "openmdao".'
' Options are: %s' % sorted(func_group.keys()))
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help="Show function locals and return values.")
parser.add_argument('--ptrs', action='store_true', dest='show_ptrs',
help="Show addresses of printed objects.")
parser.add_argument('-m', '--memory', action='store_true', dest='memory',
help="Show memory usage.")
parser.add_argument('-l', '--leaks', action='store_true', dest='leaks',
help="Show objects that are not garbage collected after each function call.")
parser.add_argument('-r', '--rank', action='store', dest='rank', type=int,
default=-1, help='MPI rank where output is desired. Default is all ranks.')
parser.add_argument('-o', '--outfile', action='store', dest='outfile',
default='stdout', help='Output file. Defaults to stdout.')
parser.add_argument('-f', '--filter', action='append', dest='filters',
default=[],
help='An expression. If it evaluates to True for any matching trace '
'function, that function will be displayed in the trace. One '
'expression can be added for each class.')
def _itrace_exec(options):
"""
Process command line args and perform tracing on a specified python file.
"""
progname = options.file[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globals_dict = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
_setup(options)
start()
exec (code, globals_dict)
|
# -*- coding: utf-8 -*-
import re
from django.utils.encoding import smart_unicode
from forum.models.user import User
def find_best_match_in_name(content, uname, fullname, start_index):
uname = smart_unicode(uname)
fullname = smart_unicode(fullname)
end_index = start_index + len(fullname)
while end_index > start_index:
if content[start_index : end_index].lower() == fullname.lower():
return content[start_index : end_index]
while len(fullname) and fullname[-1] != ' ':
fullname = fullname[:-1]
fullname = fullname.rstrip()
end_index = start_index + len(fullname)
return uname
APPEAL_PATTERN = re.compile(r'(?<!\w)@\w+', re.UNICODE)
def auto_user_link(node, content):
active_users = node.absolute_parent.get_active_users()
appeals = APPEAL_PATTERN.finditer(content)
replacements = []
for appeal in appeals:
# Try to find the profile URL
username = smart_unicode(appeal.group(0)[1:])
matches = []
for user in active_users:
if smart_unicode(user.username).lower().startswith(username.lower()):
matches.append(user)
if len(matches) == 1:
replacements.append(
(find_best_match_in_name(content, username, smart_unicode(matches[0].username), appeal.start(0) + 1), matches[0])
)
elif len(matches) == 0:
matches = User.objects.filter(username__istartswith=username)
if (len(matches) == 0):
continue
best_user_match = None
final_match = ""
for user in matches:
user_match = find_best_match_in_name(content, username, smart_unicode(user.username), appeal.start(0) + 1)
if (len(user_match) < len(final_match)):
continue
if (len(user_match) == len(final_match)):
if not (smart_unicode(user.username).lower() == user_match.lower()):
continue
if (best_user_match and (smart_unicode(best_user_match.username) == final_match)):
continue
best_user_match = user
final_match = user_match
replacements.append((final_match, best_user_match))
for replacement in replacements:
to_replace = "@" + smart_unicode(replacement[0])
profile_url = replacement[1].get_absolute_url()
auto_link = '<a href="%s">%s</a>' % (profile_url, to_replace)
content = content.replace(to_replace, auto_link)
return content
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from test.multiprocess_test_case import MultiProcessTestCase
import crypten
from crypten.debug import configure_logging, pdb, set_debug_mode
from torch import tensor
class TestDebug(MultiProcessTestCase):
def setUp(self):
super().setUp()
# We don't want the main process (rank -1) to initialize the communicator
if self.rank >= 0:
crypten.init()
# Testing debug mode
set_debug_mode()
def testLogging(self):
configure_logging()
def testPdb(self):
self.assertTrue(hasattr(pdb, "set_trace"))
def test_wrap_error_detection(self):
"""Force a wrap error and test whether it raises in debug mode."""
encrypted_tensor = crypten.cryptensor(0)
encrypted_tensor.share = tensor(2 ** 63 - 1)
with self.assertRaises(ValueError):
encrypted_tensor.div(2)
|
# Generated by Django 3.0.6 on 2020-05-19 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0013_auto_20200519_1052'),
]
operations = [
migrations.AlterField(
model_name='original',
name='build_year',
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name='original',
name='price_max',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=16),
),
migrations.AlterField(
model_name='original',
name='price_min',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=16),
),
migrations.AlterField(
model_name='original',
name='votes',
field=models.PositiveIntegerField(default=0),
),
]
|
import A_downloadVids
import B_processDownloads
import C_FixMissedDownloads
def run():
channelList = [
"https://www.youtube.com/channel/UC_OTXxqZn1F0vtqiCLRDEmQ", #ღ NightcoreGalaxy ღ
"https://www.youtube.com/channel/UCtY3IhWM6UOlMBoUG-cNQyQ", #Foxy
"https://www.youtube.com/channel/UCNOymlVIxfFW0mVmZiNq6DA", #S A R C A S T I C .
"https://www.youtube.com/channel/UCgf41dj34t-zhBD9L2WPwyQ", #Cordelia Nightcore
"https://www.youtube.com/channel/UCjlKUwbB8RXqK1vKOqWxiHg", #Smokey
"https://www.youtube.com/channel/UCnwu06NdREQjWprJC8fvRWw", #Tommy
"https://www.youtube.com/channel/UCPtWGnX3cr6fLLB1AAohynw", #Mirima
"https://www.youtube.com/channel/UCPMhsGX1A6aPmpFPRWJUkag", #Sinon
"https://www.youtube.com/channel/UCo4c1M2_6IlWjsGy4m_3GSQ", #Nightcore Zodiac
"https://www.youtube.com/channel/UCdu0ncYJ0MZGl4bnNqi7_oQ" #Bunny-Chan
]
A_downloadVids.downloadChannels(channelList)
B_processDownloads.processDownloads()
C_FixMissedDownloads.FixMissingDownloads()
C_FixMissedDownloads.removeFoldersWithNoOriginals()
C_FixMissedDownloads.possiblyTooLong()
if __name__ == "__main__":
run() |
from scipy.integrate import odeint
from swing_config import *
f = cloudpickle.load(open('./swing_open_loop_dynamic.dll', 'rb'))
def fv_gen(amp, ome, phi, q_max):
return lambda t, y: amp * np.sin(ome * t + phi) / (1 + np.exp((np.abs(y[1:3])-q_max) / 0.01) * np.logical_or(np.abs(y[1:3]) < q_max, y[1:3] * y[4:] > 0))
def open_loop_test(amp, ome, phi):
amp = np.ones(2) * amp_max *amp
ome = np.ones(2) * ome_max * ome
phi = np.ones(2) * phi_max * phi
fv = fv_gen(amp, ome, phi, q_max)
q0 = np.array([np.pi/6, 0, 0])
a0 = np.array([0, 0])
v0 = fv(t0, np.r_[q0, np.zeros(3)])
y0 = np.r_[q0, 0, v0]
sol = odeint(f, y0, t, args=(param0, con0, a_max, fv, dt))
return Solution(t, sol, param0)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from swing_plot import swing_plot
from swing_anim import swing_animation
amp = 0
ome = 0
phi = 0
sol = open_loop_test(amp, ome, phi)
#fig = swing_plot(sol)
#plt.show(fig)
anim = swing_animation(sol)
plt.show(anim)
|
import numpy as np # type: ignore
import pandas as pd # type: ignore
import matplotlib.pyplot as plt # type: ignore
import seaborn as sns # type: ignore
# This gives an error when running from a python script.
# Maybe, this should be set in the jupyter notebook directly.
# get_ipython().magic('matplotlib inline')
sns.set(style="white", color_codes=True)
from sklearn.model_selection import TimeSeriesSplit # type: ignore
from sklearn.model_selection import GridSearchCV # type: ignore
#########################################################
def cross_validation_time_series(model, df, preds, target,n_times=10,verbose=0):
"""
This splits a time series data frame "n" times as specified in the input (default=10)
Initially it will start with a certain number of rows in train but it will gradually
increase train size in steps (which it will calculate automatically) while the
number of test rows will remain the same (though their content will vary).
This utility is based on sklearn's time_series_split()
"""
if n_times > 10:
print('More than 10 splits is not recommended. Setting n_times to 10')
n_times = 10
splits = TimeSeriesSplit(n_splits=n_times)
index = 0
X = df[preds].values
y = df[target].values
non_df = {}
rmse_list = []
for train_index, test_index in splits.split(X):
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
if verbose == 1:
print('Iteration %d: Total Observations = %d' %(index,len(X_train)+len(X_test)))
print(' Training Index %d Observations: %s' %(len(train_index),train_index))
print(' Testing Index %d Observations: %s' %(len(test_index),test_index))
model.fit(X_train, y_train)
# TODO: Check print_rmse is not defined or loaded
rmse = print_rmse(y_test, model.predict(X_test))
rmse_list.append(rmse)
norm_rmse = rmse/y_test.std()
print(' Split %d: Normalized RMSE = %0.2f' %(norm_rmse))
non_df[index] = norm_rmse
index += 1
non_df = pd.Series(non_df)
non_df.plot()
ave_norm_rmse = np.mean(rmse_list)/y.std()
print('Normalized RMSE over entire data after %d splits = 0.2f' %(index,ave_norm_rmse))
return ave_norm_rmse
##########################################################
def rolling_validation_time_series(model, df, preds, target,train_size=0,
test_size=0, verbose=0):
"""
This utility uses a Walk Forward or Rolling Period time series cross validation method.
Initially it will start with a minimum number of observations to train the model.
It then gradually increases the train size in steps (which it will calculate automatically)
while fixing the number of test rows the same (though their content will vary).
Once the train+test series exceeds the number of rows in data set, it stops.
It does not use SKLearn's Time Series Split. You need to provide the initial sizes
of train and test and it will take care of the rest.
"""
df = df[:]
index = 0
X = df[preds].values
y = df[target].values
non_df = {}
# rmse_list = [] # # TODO: Unused (check)
if train_size == 0:
train_size = np.int(np.ceil(len(y)/2))
if test_size == 0:
test_size = np.int(np.ceil(len(y)/4))
# step_size = np.int(np.ceil(test_size/10)) # TODO: Unused (check)
n_records = len(X)
### This contains the start point of test size for each K-Fold in time series
test_list = np.floor(np.linspace(train_size,n_records-1,5)).tolist()
for i in range(4):
train_size = np.int(test_list[i])
test_size = np.int(test_list[i+1] - test_list[i])
X_train, X_test = X[:train_size],X[train_size:train_size+test_size]
y_train, y_test = y[:train_size],y[train_size:train_size+test_size]
model.fit(X_train, y_train)
if i == 0:
### Since both start and end points are included, you have to subtract 1 from index in this
df.loc[:train_size-1,'predictions'] = y[:train_size]
df.loc[train_size:train_size+test_size-1,'predictions'] = model.predict(X_test)
elif i == 3:
test_size = np.int(len(X) - train_size)
X_train, X_test = X[:train_size],X[train_size:train_size+test_size]
y_train, y_test = y[:train_size],y[train_size:train_size+test_size]
df.loc[train_size:train_size+test_size,'predictions'] = model.predict(X_test)
else:
df.loc[train_size:train_size+test_size-1,'predictions'] = model.predict(X_test)
if len(y_train) + len(y_test) >= df.shape[0]:
if verbose:
print('Iteration %d: Observations:%d' %(index+1,len(X_train)+len(X_test)))
print(' Train Size=%d, Test Size=%d' %(len(y_train),len(y_test)))
# TODO:
rmse = print_rmse(y_test, model.predict(X_test))
norm_rmse = rmse/y_test.std()
non_df[i] = rmse
if verbose:
print('Normalized RMSE = %0.2f' %norm_rmse)
non_df = pd.Series(non_df)
weighted_ave_rmse = np.average(non_df.values,weights=non_df.index,axis=0)
print('\nWeighted Average of RMSE (%d iterations) = %0.2f\n Normalized Wtd Aver. RMSE (using std dev) = %0.2f'
%(index+1, weighted_ave_rmse,weighted_ave_rmse/y[:].std()))
#############################
if verbose == 1 or verbose == 2:
fig, ax1 = plt.subplots(nrows=1,ncols=1,figsize=(12,8))
ax1.plot(df[target],label='In-Sample Data', linestyle='-')
ax1.plot(df['predictions'],'g',alpha=0.6,label='Rolling Forecast')
ax1.set_xlabel('Time')
ax1.set_ylabel('Values')
ax1.legend(loc='best')
return weighted_ave_rmse, weighted_ave_rmse/y[:].std(), df
else:
if verbose:
print('Iteration %d: Observations:%d' %(index+1,len(X_train)+len(X_test)))
print(' Train Size=%d, Test Size=%d' %(len(y_train),len(y_test)))
# TODO: Check print_rmse is not defined or loaded
rmse = print_rmse(y_test, model.predict(X_test))
norm_rmse = rmse/y_test.std()
non_df[i] = rmse
if verbose:
print('Normalized RMSE = %0.2f' %norm_rmse)
index += 1
###################################################
# Re-run the above statistical tests, and more. To be used when selecting viable models.
def ts_model_validation(model_results):
"""
Once you have built a time series model, how to validate it. This utility attempts to.
This is only done on SARIMAX models from statsmodels. Don't try it on other models.
The input is model_results which is the variable assigned to the model.fit() method.
"""
het_method='breakvar'
norm_method='jarquebera'
sercor_method='ljungbox'
########################
(het_stat, het_p) = model_results.test_heteroskedasticity(het_method)[0]
norm_stat, norm_p, skew, kurtosis = model_results.test_normality(norm_method)[0]
sercor_stat, sercor_p = model_results.test_serial_correlation(method=sercor_method)[0]
sercor_stat = sercor_stat[-1] # last number for the largest lag
sercor_p = sercor_p[-1] # last number for the largest lag
# Run Durbin-Watson test on the standardized residuals.
# The statistic is approximately equal to 2*(1-r), where r is the sample autocorrelation of the residuals.
# Thus, for r == 0, indicating no serial correlation, the test statistic equals 2.
# This statistic will always be between 0 and 4. The closer to 0 the statistic,
# the more evidence for positive serial correlation. The closer to 4,
# the more evidence for negative serial correlation.
# Essentially, below 1 or above 3 is bad.
# TODO: Checdk statsmodel is not loaded as sm.
dw = sm.stats.stattools.durbin_watson(model_results.filter_results.standardized_forecasts_error[0, model_results.loglikelihood_burn:])
# check whether roots are outside the unit circle (we want them to be);
# will be True when AR is not used (i.e., AR order = 0)
arroots_outside_unit_circle = np.all(np.abs(model_results.arroots) > 1)
# will be True when MA is not used (i.e., MA order = 0)
maroots_outside_unit_circle = np.all(np.abs(model_results.maroots) > 1)
print('Test heteroskedasticity of residuals ({}): stat={:.3f}, p={:.3f}'.format(het_method, het_stat, het_p));
print('\nTest normality of residuals ({}): stat={:.3f}, p={:.3f}'.format(norm_method, norm_stat, norm_p));
print('\nTest serial correlation of residuals ({}): stat={:.3f}, p={:.3f}'.format(sercor_method, sercor_stat, sercor_p));
print('\nDurbin-Watson test on residuals: d={:.2f}\n\t(NB: 2 means no serial correlation, 0=pos, 4=neg)'.format(dw))
print('\nTest for all AR roots outside unit circle (>1): {}'.format(arroots_outside_unit_circle))
print('\nTest for all MA roots outside unit circle (>1): {}'.format(maroots_outside_unit_circle))
############################################################################################################
def quick_ts_plot(y_true, y_pred, modelname='Prophet'):
fig,ax = plt.subplots(figsize=(15,7))
labels = ['actual','forecast']
y_true.plot(ax=ax,)
y_pred.plot(ax=ax,)
ax.legend(labels)
plt.title('%s: Actual vs Forecast in expanding (training) window Cross Validation' %modelname, fontsize=20);
##############################################################################################
|
# Generated by Django 3.1.8 on 2021-06-04 00:25
from django.db import migrations
def create_user_input_types(apps, schema_editor):
"""
Initialize the database with the WorkflowStepUserInputType that we have schemas available for.
"""
WorkflowStepUserInputType = apps.get_model(
"django_workflow_system", "WorkflowStepUserInputType"
)
# True/False Question
WorkflowStepUserInputType.objects.get_or_create(
name="true_false_question",
json_schema={
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "http://github.com/crcresearch/",
"type": "object",
"title": "User Input: True/False Question",
"description": "A schema representing a true/false question user input.",
"required": ["label", "inputOptions", "meta"],
"properties": {
"id": {
"type": "string",
"title": "A user input identifier.",
"description": "This value may be managed outside of the object specification and so is optional.",
"examples": ["4125-1351-1251-asfd"],
},
"label": {
"type": "string",
"title": "UI Label for Input",
"description": "Label that should be displayed by user interfaces for this input.",
"examples": ["The label to display for the input/question."],
},
"inputOptions": {
"type": "array",
"title": "Question Options",
"description": "The options to be displayed to the user for this question.",
"minItems": 2,
"maxItems": 2,
"uniqueItems": True,
"items": {"type": "boolean"},
},
"correctInput": {
"description": "Indicates which answer is the correct one.",
"type": "boolean",
},
"meta": {
"type": "object",
"required": ["inputRequired", "correctInputRequired"],
"properties": {
"inputRequired": {
"type": "boolean",
"description": "Whether or not an answer should be required from the user.",
},
"correctInputRequired": {
"type": "boolean",
"description": "Whether or not the correct answer should be required from the user.",
},
},
},
},
},
example_specification={
"label": "Is the sky blue?",
"inputOptions": [True, False],
"correctInput": True,
"meta": {"inputRequired": True, "correctInputRequired": True},
},
)
# Free Form Question
WorkflowStepUserInputType.objects.get_or_create(
name="free_form_question",
json_schema={
"$schema": "http://json-schema.org/draft-07/schema",
"type": "object",
"title": "User Input: Freeform Question",
"description": "A schema representing a free form question user input.",
"required": ["label", "meta"],
"properties": {
"id": {
"type": "string",
"title": "A user input identifier.",
"description": "This value may be managed outside of the object specification and so is optional.",
"examples": ["4125-1351-1251-asfd"],
},
"label": {
"type": "string",
"title": "UI Label for Input",
"description": "Label that should be displayed by user interfaces for this input.",
"examples": ["The label to display for the input/question."],
},
"meta": {
"type": "object",
"required": ["inputRequired", "correctInputRequired"],
"properties": {
"inputRequired": {
"type": "boolean",
"description": "Whether or not an answer should be required from the user.",
},
"correctInputRequired": {
"type": "boolean",
"description": "Whether or not the correct answer should be required from the user.",
"const": False,
},
},
},
},
},
example_specification={
"label": "What was the most interesting part of your day?",
"meta": {"inputRequired": True, "correctInputRequired": False},
},
)
class Migration(migrations.Migration):
dependencies = [
("django_workflow_system", "0006_auto_20210723_1204"),
]
operations = [
migrations.RunPython(create_user_input_types),
]
|
import cp2k_wfn
wfn = cp2k_wfn.cp2k_wavefunction()
wfn.read_cp2k_wfn("H2O-RESTART.wfn")
wfn.add_H()
#wfn.write_cp2k_wfn("H2O-RESTART-scf.wfn")
print wfn
|
#!/usr/bin/python3
from __future__ import print_function
from html.parser import HTMLParser
import json
import logging
import os
import re
import requests
import time
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('requests').setLevel(logging.ERROR)
session = requests.Session()
username = os.environ['COMCAST_USERNAME']
password = os.environ['COMCAST_PASSWORD']
logger.debug("Finding form inputs for login...")
res = session.get('https://customer.xfinity.com/oauth/force_connect/?continue=%23%2Fdevices')
#res = session.get('https://login.comcast.net/login?r=comcast.net&s=oauth&continue=https%3A%2F%2Flogin.comcast.net%2Foauth%2Fauthorize%3Fclient_id%3Dmy-account-web%26redirect_uri%3Dhttps%253A%252F%252Fcustomer.xfinity.com%252Foauth%252Fcallback%26response_type%3Dcode%26state%3D%2523%252Fdevices%26response%3D1&client_id=my-account-web')
assert res.status_code == 200
data = {x[0]: HTMLParser().unescape(x[1]) for x in re.finditer(r'<input.*?name="(.*?)".*?value="(.*?)".*?>', res.text)}
logger.debug("Found with the following input fields: {}".format(data))
data = {
'user': username,
'passwd': password,
**data
}
logger.debug("Posting to login...")
res = session.post('https://login.xfinity.com/login', data=data)
assert res.status_code == 200
logger.debug("Fetching internet usage AJAX...")
res = session.get('https://customer.xfinity.com/apis/services/internet/usage')
#logger.debug("Resp: %r", res.text)
assert res.status_code == 200
js = json.loads(res.text)
out = {
'raw': js,
'used': js['usageMonths'][-1]['homeUsage'],
'total': js['usageMonths'][-1]['allowableUsage'],
'unit': js['usageMonths'][-1]['unitOfMeasure'],
}
print(json.dumps(out))
|
"""SQLAlchemy Database"""
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin
from . import DB
class User(UserMixin, DB.Model):
"""User information"""
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True)
email = DB.Column(DB.String, unique=True)
password = DB.Column(DB.String)
def __repr__(self):
return "< User: '{}' >".format(self.email)
class Kickstarter(DB.Model):
"""Table for Kickstarter projects"""
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True)
user_email = DB.Column(DB.String)
category = DB.Column(DB.String)
blurb = DB.Column(DB.String)
country = DB.Column(DB.String)
goal = DB.Column(DB.BigInteger)
location = DB.Column(DB.String)
name = DB.Column(DB.String)
state = DB.Column(DB.String)
usd_type = DB.Column(DB.String)
days_allotted = DB.Column(DB.Integer)
days_before_launch = DB.Column(DB.Integer)
def __repr__(self):
return "< Kickstarter: '{}' >".format(self.name)
|
# -*- coding: utf 8 -*-
"""
Define a suite a tests for the Curve module.
"""
import numpy as np
from welly import Synthetic
def test_synthetic():
"""
Test basic stuff.
"""
data = np.array([4, 2, 0, -4, -2, 1, 3, 6, 3, 1, -2, -5, -1, 0])
params = {'dt': 0.004}
s = Synthetic(data, params=params)
assert s.dt == 0.004
assert s.mnemonic == 'SYN'
|
'''
Basic utilities.
'''
# Copyright (c) 2012-2013 Wladimir J. van der Laan
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sub license,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function, division, unicode_literals
from os import path
import os,sys
FILE = path.dirname(sys.modules[__name__].__file__)
BASEPATH = os.path.join(FILE, '../..')
def rnndb_path(filename):
return path.join(BASEPATH, 'rnndb', filename)
|
import sys, os, time
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
import cv2
import numpy as np
import collections
import torchvision
import torchvision.transforms as T
from collections import Counter
# detector utils
import sys
sys.path.append('../other_utils/lighttrack')
# pose estimation utils
from HPE.dataset import Preprocessing
from HPE.config import cfg
# from tfflat.base import Tester
# from tfflat.utils import mem_info
# from tfflat.logger import colorlogger
# from nms.gpu_nms import gpu_nms
# from nms.cpu_nms import cpu_nms
# import GCN utils
#from graph import visualize_pose_matching
#from graph.visualize_pose_matching import *
# import my own utils
#sys.path.append(os.path.abspath("./graph/"))
sys.path.append(os.path.abspath("../other_utils/lighttrack/"))
sys.path.append(os.path.abspath("../other_utils/lighttrack/utils"))
sys.path.append(os.path.abspath("../other_utils/lighttrack/visualizer"))
sys.path.append(os.path.abspath("../other_utils/lighttrack/graph"))
from utils_json import *
from visualizer import *
from utils_io_file import *
from utils_io_folder import *
from math import *
from natsort import natsorted, ns
import scipy.optimize as scipy_opt
import motmetrics as mm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
from torch.autograd import Variable
from PIL import Image
import csv
flag_nms = False #Default is False, unless you know what you are doing
def show_skeleton(img,pose_keypoints_2d):
joints = reshape_keypoints_into_joints(pose_keypoints_2d)
img = show_poses_from_python_data(img, joints, joint_pairs, joint_names)
def initialize_parameters():
global video_name, img_id
global nms_method, nms_thresh, min_scores, min_box_size
nms_method = 'nms'
nms_thresh = 1.
min_scores = 1e-10
min_box_size = 0.
global keyframe_interval, enlarge_scale, pose_matching_threshold
keyframe_interval = 1 # choice examples: [2, 3, 5, 8, 10, 20, 40, 100, ....]
enlarge_scale = 0.2 # how much to enlarge the bbox before pose estimation
pose_matching_threshold = 0.5
global flag_flip
flag_flip = True
global total_time_POSE, total_time_DET, total_time_ALL, total_num_FRAMES, total_num_PERSONS
total_time_POSE = 0
total_time_DET = 0
total_time_ALL = 0
total_num_FRAMES = 0
total_num_PERSONS = 0
global spacial_thresh
spacial_thresh = 0.3 # The max distance between 2 frames: Default : 0.3
global check_pose_threshold, check_pose_method
check_pose_threshold = 0.6 # The threshold on the confidence of pose estimation.
check_pose_method = 'max_average' # How to average the confidences of the key points for a global confidence
return
# def get_GT_human(img_id, image_shape, plaFrame, plaTime, plaId, plaBBTot):
# human_candidates = []
# ids = []
# for i in range(len(plaFrame)) :
# if plaFrame[i] == img_id + plaFrame[0] :
# bbox = plaBBTot[i]
# if bbox_valid(bbox,image_shape) :
# bbox_out = [bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1]]
# id = plaId[i]
# human_candidates.append(bbox_out)
# ids.append(id)
# return(human_candidates, ids)
# def get_GT_positions(img_id, image_shape, annotation_folder):
# human_candidates = []
# ids = []
# for i in range(len(plaFrame)) :
# if plaFrame[i] == img_id + plaFrame[0] :
# bbox = plaBBTot[i]
# if bbox_valid(bbox,image_shape) :
# bbox_out = [bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1]]
# id = plaId[i]
# human_candidates.append(bbox_out)
# ids.append(id)
# return(human_candidates, ids)
def enlarge_bbox(bbox, scale, image_shape):
min_x, min_y, max_x, max_y = bbox
margin_x = int(0.5 * scale[0] * (max_x - min_x))
margin_y = int(0.5 * scale[1] * (max_y - min_y))
min_x -= margin_x
max_x += margin_x
min_y -= margin_y
max_y += margin_y
min_x = max(0,min_x)
min_y = max(0,min_y)
max_x = min(image_shape[1],max_x)
max_y = min(image_shape[0],max_y)
bbox_enlarged = [min_x, min_y, max_x, max_y]
return bbox_enlarged
def parse_voc_xml(node):
voc_dict = {}
children = list(node)
if children:
def_dic = collections.defaultdict(list)
for dc in map(parse_voc_xml, children):
for ind, v in dc.items():
def_dic[ind].append(v)
voc_dict = {
node.tag:
{ind: v[0] if len(v) == 1 else v
for ind, v in def_dic.items()}
}
if node.text:
text = node.text.strip()
if not children:
voc_dict[node.tag] = text
return voc_dict
def rescale_img(img,rescale_img_factor):
shape = img.size
w = shape[0]
h = shape[1]
desired_h = h*rescale_img_factor
desired_w = w*rescale_img_factor
img = torchvision.transforms.Resize([int(desired_h), int(desired_w)])(img)
w_pad = (w - desired_w)/2.
h_pad = (h - desired_h)/2.
img = torchvision.transforms.Pad((int(w_pad),int(h_pad)))(img)
return(img)
def rescale_img_bbox(bbox,rescale_img_factor,image_shape):
w = image_shape[1]
h = image_shape[0]
bbox = np.array(bbox)*rescale_img_factor
target_w = w*rescale_img_factor
target_h = h*rescale_img_factor
w_pad = (w - target_w)/2.
h_pad = (h - target_h)/2.
new_w = target_w + 2*w_pad
new_h = target_h + 2*h_pad
bbox_center = bbox + np.array([w_pad,h_pad,w_pad,h_pad])
return(bbox_center)
def extract_annotations(annotation_path, rescale_bbox, rescale_img_factor, image_shape):
GT_bbox_list = []
GT_idx_list = []
target = parse_voc_xml(ET.parse(annotation_path).getroot())
anno = target['annotation']
image_path = anno['path']
objects = anno['object']
for obj in objects:
idx = obj['name']
bbox = obj['bndbox']
bbox = [int(bbox[n]) for n in ['xmin', 'ymin', 'xmax', 'ymax']]
bbox = enlarge_bbox(bbox,rescale_bbox,image_shape)
bbox = rescale_img_bbox(bbox,rescale_img_factor,image_shape)
GT_bbox_list.append(bbox)
GT_idx_list.append(idx)
return(GT_bbox_list, GT_idx_list,image_path)
def player_detection(image_path, rescale_img_factor, model_detection, thres_detection):
bbox_list = []
score_list = []
max_w = 150
max_h = 150
with torch.no_grad():
im = Image.open(image_path).convert('RGB')
im = rescale_img(im,rescale_img_factor)
x = [T.ToTensor()(im).to(torch.device('cuda'))]
output, features = model_detection(x)
output = output[0]
scores = output['scores']
labels = output['labels']
boxes = output['boxes']
for i in range(len(scores)):
if scores[i]>thres_detection :
xmin,ymin,xmax,ymax = int(boxes[i][0]),int(boxes[i][1]),int(boxes[i][2]),int(boxes[i][3])
if 0 < xmax-xmin < max_w and 0 < ymax-ymin < max_h :
bbox_list.append([xmin,ymin,xmax,ymax])
score_list.append(scores[i])
return(bbox_list,score_list,features)
def light_track(pose_estimator, model_detection, visual_feat_model, layer,
image_folder, annotation_folder, rescale_bbox, rescale_img_factor,
visualize_folder, output_video_path, output_csv_path, use_features,
w_spacial, w_visual, w_pose, use_IOU, spacial_iou_thresh, thres_detection,
use_pose, use_visual_feat, imagenet_model,
display_pose, use_GT_position, flag_method, n_img_max, init_frame,
frame_interval, write_csv, write_video, keyframe_interval, visualize,
use_filter_tracks, thres_count_ids,visual_metric,
N_frame_lost_keep, N_past_to_keep, use_ReID_module,
N_past_to_keep_reID, max_vis_feat, max_dist_factor_feat, max_vis_reID, max_dist_factor_reID,
use_track_branch):
total_time_DET = 0
total_num_PERSONS = 0
total_time_ALL = 0
total_time_POSE = 0
total_time_FEAT = 0
st_time_total = time.time()
bbox_dets_list = []
frame_prev = -1
frame_cur = 0
img_id = -1
next_id = 0
bbox_dets_list_list = []
track_ids_dict_list = []
GT_bbox_list_list = []
GT_idx_list_list = []
bbox_lost_player_list = []
track_feat_dict_list = []
flag_mandatory_keyframe = False
if annotation_folder is not None :
annotation_paths = natsorted(os.listdir(annotation_folder), alg=ns.PATH | ns.IGNORECASE)
num_imgs = min(n_img_max, len(annotation_paths)//frame_interval - init_frame)
total_num_FRAMES = num_imgs
else :
image_paths = natsorted(os.listdir(image_folder), alg=ns.PATH | ns.IGNORECASE)
num_imgs = min(n_img_max, len(image_paths)//frame_interval - init_frame)
total_num_FRAMES = num_imgs
acc = mm.MOTAccumulator(auto_id=True)
image_shape = cv2.imread(os.path.join(image_folder,os.listdir(image_folder)[0])).shape
N_IOU = 0
N_feat = 0
N_reID = 0
while img_id < num_imgs-1:
img_id += 1
if annotation_folder is not None :
annotation_path = annotation_paths[img_id*frame_interval + init_frame]
annotation_path = os.path.join(annotation_folder,annotation_path)
GT_bbox_list, GT_idx_list, img_path = extract_annotations(annotation_path, rescale_bbox, rescale_img_factor, image_shape)
extension = img_path[-4:]
img_path = annotation_path.replace("annotations","frames").replace(".xml",extension)
GT_bbox_list_list.append(GT_bbox_list)
GT_idx_list_list.append(GT_idx_list)
else :
img_path = image_paths[img_id*frame_interval + init_frame]
img_path = os.path.join(image_folder,img_path)
frame_cur = img_id
if (frame_cur == frame_prev):
frame_prev -= 1
if is_keyframe(img_id, keyframe_interval) or flag_mandatory_keyframe :
flag_mandatory_keyframe = False
bbox_dets_list = []
# perform detection at keyframes
st_time_detection = time.time()
if use_GT_position :
player_candidates = GT_bbox_list
player_scores = torch.tensor([1.]*len(GT_bbox_list))
else :
#try :
player_candidates, player_scores, img_feat = player_detection(img_path, rescale_img_factor, model_detection, thres_detection)
# except Exception as e :
# print(e)
# player_candidates = []
# player_scores = []
# img_feat = []
end_time_detection = time.time()
total_time_DET += (end_time_detection - st_time_detection)
num_dets = len(player_candidates)
#print("Keyframe: {} detections".format(num_dets))
# if nothing detected at keyframe, regard next frame as keyframe because there is nothing to track
if num_dets <= 0 :
flag_mandatory_keyframe = True
# add empty result
bbox_det_dict = {"img_id": img_id,
"imgpath": img_path,
"det_id": 0,
"track_id": None,
"bbox": [0, 0, 2, 2],
"visual_feat": [],
"keypoints": []}
bbox_dets_list.append(bbox_det_dict)
bbox_dets_list_list.append(bbox_dets_list)
track_ids_dict_list.append({})
flag_mandatory_keyframe = True
continue
total_num_PERSONS += num_dets
if img_id > 0 : # First frame does not have previous frame
bbox_list_prev_frame = bbox_dets_list_list[img_id - 1].copy()
track_ids_dict_prev = track_ids_dict_list[img_id - 1].copy()
# Perform data association
for det_id in range(num_dets):
# obtain bbox position
bbox_det = player_candidates[det_id]
score_det = float(player_scores[det_id].cpu())
# enlarge bbox by 20% with same center position
bbox_det = enlarge_bbox(bbox_det, [0.,0.], image_shape)
# update current frame bbox
bbox_det_dict = {"img_id": img_id,
"det_id": det_id,
"imgpath": img_path,
"bbox": bbox_det,
"score_det": score_det}
if img_id == 0 or len(bbox_list_prev_frame) == 0 : # First frame, all ids are assigned automatically
track_id = next_id
next_id += 1
method = None
else : # Perform data association
if use_IOU : # use IOU as first criteria
spacial_intersect = get_spacial_intersect(bbox_det, bbox_list_prev_frame)
track_id, match_index = get_track_id_SpatialConsistency(spacial_intersect, bbox_list_prev_frame, spacial_iou_thresh)
else :
track_id = -1
if track_id != -1:
method = 'spacial'
else :
method = None
# update current frame bbox
bbox_det_dict = {"img_id": img_id,
"imgpath": img_path,
"det_id": det_id,
"track_id": track_id,
"bbox": bbox_det,
"score_det": score_det,
"method": method,
"visual_feat": [],
"keypoints": []}
bbox_dets_list.append(bbox_det_dict)
# Check for repetitions in track ids and remove them.
track_ids = [bbox_det_dict["track_id"] for bbox_det_dict in bbox_dets_list]
track_ids_dict = collections.defaultdict(list)
for idx, track in enumerate(track_ids) :
track_ids_dict[track].append(idx)
keys = list(track_ids_dict.keys())
for track in keys :
if len(track_ids_dict[track]) > 1 :
for el in track_ids_dict[track] :
bbox_dets_list[el]["track_id"] = -1
bbox_dets_list[el]["method"] = None
del track_ids_dict[track]
if img_id > 0 and len(bbox_list_prev_frame) > 0 :
# Remove already assigned elements in the previous frame.
remaining_det_id_list = []
prev_to_remove = []
for det_id in range(num_dets):
track_id = bbox_dets_list[det_id]["track_id"]
if track_id == -1 :
remaining_det_id_list.append(det_id)
else :
prev_idx = track_ids_dict_prev[track_id]
prev_to_remove.append(prev_idx[0])
N_IOU+=1
for index in sorted(prev_to_remove, reverse=True):
del bbox_list_prev_frame[index]
# For candidates that are not associated yet
if len(bbox_list_prev_frame) == 0 or (not use_features and not use_ReID_module) :
# If no more candidates in previous frame : assign new ids to remaining detections
for det_id in remaining_det_id_list :
#print('no matching')
bbox_dets_list[det_id]["track_id"] = next_id
bbox_dets_list[det_id]["method"] = None
track_ids_dict[next_id].append(det_id)
next_id += 1
elif len(remaining_det_id_list) > 0 :
# For each remaining detections, perform association with a combinaison of features.
if (use_ReID_module or use_visual_feat) and not imagenet_model :
if use_GT_position :
img_feat, image_sizes = get_img_feat_FasterRCNN(visual_feat_model, bbox_dets_list[0]['imgpath'], rescale_img_factor)
img_feat_prev, image_sizes = get_img_feat_FasterRCNN(visual_feat_model, bbox_list_prev_frame[0]['imgpath'], rescale_img_factor)
past_track_bbox_list_list = []
for bbox_prev_dict in bbox_list_prev_frame :
prev_track_id = bbox_prev_dict['track_id']
past_track_idx_list = []
past_track_bbox_list = []
for i in range(1,min(N_past_to_keep,img_id)+1):
past_track_ids_dict = track_ids_dict_list[img_id-i]
if prev_track_id in past_track_ids_dict.keys() :
idx = past_track_ids_dict[prev_track_id][0]
past_track_idx_list.append(idx)
past_track_bbox_list.append(bbox_dets_list_list[img_id-i][idx])
for past_track_bbox in past_track_bbox_list :
if use_pose :
if not past_track_bbox["keypoints"] :
st_time_pose = time.time()
inf,_ = inference_feat_keypoints(pose_estimator, past_track_bbox)
keypoints = inf[0]["keypoints"]
end_time_pose = time.time()
total_time_POSE += (end_time_pose - st_time_pose)
else :
keypoints = past_track_bbox["keypoints"]
else :
keypoints = []
if use_visual_feat :
if not list(past_track_bbox["visual_feat"]) :
st_time_feat = time.time()
if imagenet_model :
visual_feat = get_visual_feat_imagenet(visual_feat_model,layer,past_track_bbox, rescale_img_factor)
else :
visual_feat = get_visual_feat_fasterRCNN(visual_feat_model,past_track_bbox,img_feat_prev,image_sizes,use_track_branch)
end_time_feat = time.time()
total_time_FEAT += (end_time_feat - st_time_feat)
else :
visual_feat = past_track_bbox["visual_feat"]
else :
visual_feat = []
past_track_bbox["keypoints"] = keypoints
past_track_bbox["visual_feat"] = visual_feat
past_track_bbox_list_list.append(past_track_bbox_list)
for det_id in remaining_det_id_list :
bbox_det_dict = bbox_dets_list[det_id]
if use_pose :
st_time_pose = time.time()
inf,_ = inference_feat_keypoints(pose_estimator, bbox_det_dict, rescale_img_factor)
keypoints = inf[0]["keypoints"]
end_time_pose = time.time()
total_time_POSE += (end_time_pose - st_time_pose)
else :
keypoints = []
if use_visual_feat :
st_time_feat = time.time()
if imagenet_model :
visual_feat = get_visual_feat_imagenet(visual_feat_model,layer,bbox_det_dict, rescale_img_factor)
else :
visual_feat = get_visual_feat_fasterRCNN(visual_feat_model,bbox_det_dict, img_feat,image_sizes,use_track_branch)
end_time_feat = time.time()
total_time_FEAT += (end_time_feat - st_time_feat)
else :
visual_feat = []
bbox_det_dict["keypoints"] = keypoints
bbox_det_dict["visual_feat"] = visual_feat
if use_features :
log = ''
bbox_dets_list, bbox_list_prev_frame, past_track_bbox_list_list, track_ids_dict, N_feat = feature_matching(bbox_dets_list,remaining_det_id_list, bbox_list_prev_frame,
past_track_bbox_list_list, track_ids_dict, visual_metric, max_dist_factor_feat, max_vis_feat, w_visual, w_spacial, w_pose,
use_visual_feat, use_pose, image_shape, log, N_past_to_keep, N_feat)
if use_ReID_module :
# Adjust lost player list
bbox_lost_player_list = [bbox_lost_player for bbox_lost_player in bbox_lost_player_list if img_id - bbox_lost_player['img_id'] < N_frame_lost_keep]
bbox_lost_player_list += bbox_list_prev_frame
past_track_bbox_list_list_reID = []
for bbox_prev_dict in bbox_lost_player_list :
prev_track_id = bbox_prev_dict['track_id']
prev_im_id = bbox_prev_dict['img_id']
past_track_idx_list = []
past_track_bbox_list = []
for i in range(min(N_past_to_keep_reID,prev_im_id+1)):
past_track_ids_dict = track_ids_dict_list[prev_im_id-i]
if prev_track_id in past_track_ids_dict.keys() :
idx = past_track_ids_dict[prev_track_id][0]
past_track_idx_list.append(idx)
past_track_bbox_list.append(bbox_dets_list_list[prev_im_id-i][idx])
for past_track_bbox in past_track_bbox_list :
if use_pose :
if not past_track_bbox["keypoints"] :
st_time_pose = time.time()
inf,_ = inference_feat_keypoints(pose_estimator, past_track_bbox)
keypoints = inf[0]["keypoints"]
end_time_pose = time.time()
total_time_POSE += (end_time_pose - st_time_pose)
else :
keypoints = past_track_bbox["keypoints"]
else :
keypoints = []
if use_visual_feat :
if not list(past_track_bbox["visual_feat"]) :
st_time_feat = time.time()
if imagenet_model :
visual_feat = get_visual_feat_imagenet(visual_feat_model,layer,past_track_bbox, rescale_img_factor)
else :
visual_feat = get_visual_feat_fasterRCNN(visual_feat_model,past_track_bbox,img_feat_prev,image_sizes,use_track_branch)
end_time_feat = time.time()
total_time_FEAT += (end_time_feat - st_time_feat)
else :
visual_feat = past_track_bbox["visual_feat"]
else :
visual_feat = []
past_track_bbox["keypoints"] = keypoints
past_track_bbox["visual_feat"] = visual_feat
past_track_bbox_list_list_reID.append(past_track_bbox_list)
#print(past_track_bbox_list_list_reID)
# Get non_associated dets
remaining_det_id_list = []
for det_id in range(num_dets):
track_id = bbox_dets_list[det_id]["track_id"]
if track_id == -1 :
remaining_det_id_list.append(det_id)
# Re-ID module
if len(remaining_det_id_list) > 0 and len(bbox_lost_player_list) > 0 :
log = ''
bbox_dets_list, bbox_lost_player_list, past_track_bbox_list_list_reID, track_ids_dict, N_reID = feature_matching(bbox_dets_list,remaining_det_id_list, bbox_lost_player_list,
past_track_bbox_list_list_reID, track_ids_dict, visual_metric, max_dist_factor_reID, max_vis_reID, w_visual, w_spacial, w_pose, use_visual_feat,
use_pose, image_shape, log, N_past_to_keep_reID, N_reID)
# if still can not find a match from previous frame, then -1
for det_id in range(num_dets):
track_id = bbox_dets_list[det_id]["track_id"]
if track_id == -1 :
bbox_dets_list[det_id]["track_id"] = next_id
bbox_dets_list[det_id]["method"] = None
track_ids_dict[next_id].append(det_id)
next_id += 1
else :
pass
# update frame
bbox_dets_list_list.append(bbox_dets_list)
track_ids_dict_list.append(track_ids_dict)
frame_prev = frame_cur
else:
''' NOT KEYFRAME: multi-target pose tracking '''
print('we only work with keyframes for now')
# bbox_dets_list_next = []
# keypoints_list_next = []
#
# num_dets = len(keypoints_list)
# total_num_PERSONS += num_dets
#
# if num_dets == 0:
# flag_mandatory_keyframe = True
#
# for det_id in range(num_dets):
# keypoints = keypoints_list[det_id]["keypoints"]
#
# # for non-keyframes, the tracked target preserves its track_id
# track_id = keypoints_list[det_id]["track_id"]
#
# # next frame bbox
# bbox_det_next = get_bbox_from_keypoints(keypoints)
# if bbox_det_next[2] == 0 or bbox_det_next[3] == 0:
# bbox_det_next = [0, 0, 2, 2]
# total_num_PERSONS -= 1
# assert(bbox_det_next[2] != 0 and bbox_det_next[3] != 0) # width and height must not be zero
# bbox_det_dict_next = {"img_id":img_id,
# "det_id":det_id,
# "track_id":track_id,
# "imgpath": img_path,
# "bbox":bbox_det_next}
#
# # next frame keypoints
# st_time_pose = time.time()
# inf_next, feat_next = inference_feat_keypoints(pose_estimator, bbox_det_dict_next)
# keypoints_next = inf_next[0]["keypoints"]
# end_time_pose = time.time()
# total_time_POSE += (end_time_pose - st_time_pose)
# #print("time for pose estimation: ", (end_time_pose - st_time_pose))
#
# # check whether the target is lost
# target_lost = is_target_lost(keypoints_next, check_pose_method, check_pose_threshold)
#
# if target_lost is False:
# bbox_dets_list_next.append(bbox_det_dict_next)
# keypoints_dict_next = {"img_id":img_id,
# "det_id":det_id,
# "track_id":track_id,
# "imgpath": img_path,
# "keypoints":keypoints_next}
# keypoints_list_next.append(keypoints_dict_next)
#
# else:
# # remove this bbox, do not register its keypoints
# bbox_det_dict_next = {"img_id":img_id,
# "det_id": det_id,
# "track_id": None,
# "imgpath": img_path,
# "bbox": [0, 0, 2, 2]}
# bbox_dets_list_next.append(bbox_det_dict_next)
#
# keypoints_null = 45*[0]
# keypoints_dict_next = {"img_id":img_id,
# "det_id":det_id,
# "track_id": None,
# "imgpath": img_path,
# "keypoints": []}
# keypoints_list_next.append(keypoints_dict_next)
# print("Target lost. Process this frame again as keyframe. \n\n\n")
# flag_mandatory_keyframe = True
#
# total_num_PERSONS -= 1
# ## Re-process this frame by treating it as a keyframe
# if img_id not in [0]:
# img_id -= 1
# break
#
# # update frame
# if flag_mandatory_keyframe is False:
# bbox_dets_list = bbox_dets_list_next
# keypoints_list = keypoints_list_next
# bbox_dets_list_list.append(bbox_dets_list)
# keypoints_list_list.append(keypoints_list)
# frame_prev = frame_cur
if use_filter_tracks :
bbox_dets_list_list = filter_tracks(bbox_dets_list_list, thres_count_ids)
''' 1. statistics: get total time for lighttrack processing'''
end_time_total = time.time()
total_time_ALL += (end_time_total - st_time_total)
print("N IOU : ", N_IOU)
print("N FEAT : ", N_feat)
print("N REID : ", N_reID)
# visualization
if visualize :
print("Visualizing Tracking Results...")
# if display_pose :
# show_all_from_dict(keypoints_list_list, bbox_dets_list_list, classes,
# joint_pairs, joint_names, image_folder, visualize_folder,
# display_pose = display_pose, flag_track = True, flag_method = flag_method)
# else :
show_all_from_dict([], bbox_dets_list_list, classes, joint_pairs, joint_names,
rescale_img_factor = rescale_img_factor,
img_folder_path = image_folder, output_folder_path = visualize_folder,
display_pose = display_pose, flag_track = True, flag_method = flag_method)
img_paths = get_immediate_childfile_paths(visualize_folder)
if write_video:
make_video_from_images(img_paths, output_video_path, fps=25, size=None, is_color=True, format="XVID")
print("Visualization Finished!")
print("Finished video {}".format(output_video_path))
''' Display statistics '''
print("total_time_ALL: {:.2f}s".format(total_time_ALL))
print("total_time_DET: {:.2f}s".format(total_time_DET))
print("total_time_POSE: {:.2f}s".format(total_time_POSE))
print("total_time_FEAT: {:.2f}s".format(total_time_FEAT))
print("total_time_TRACK: {:.2f}s".format(total_time_ALL - total_time_DET - total_time_POSE - total_time_FEAT))
print("total_num_FRAMES: {:d}".format(total_num_FRAMES))
print("total_num_PERSONS: {:d}\n".format(total_num_PERSONS))
print("Average FPS: {:.2f}fps".format(total_num_FRAMES / total_time_ALL))
print("Average FPS for Detection only : {:.2f}fps".format(total_num_FRAMES / (total_time_DET)))
print("Average FPS excluding Detection: {:.2f}fps".format(total_num_FRAMES / (total_time_ALL - total_time_DET)))
print("Average FPS for framework only: {:.2f}fps".format(total_num_FRAMES / (total_time_ALL - total_time_DET - total_time_POSE - total_time_FEAT) ))
if write_csv is True :
print(output_csv_path)
write_tracking_csv(bbox_dets_list_list, output_csv_path)
print("total_time_ALL: {:.2f}s".format(total_time_ALL))
# compute metrics
if annotation_folder is not None :
try :
mota,mptp,idf1,acc = evaluate_tracking(bbox_dets_list_list, GT_idx_list_list, GT_bbox_list_list)
return(mota,mptp,idf1,acc)
except Exception as e :
print(e)
print('no evaluation worked')
return(0,0,0)
# def get_track_id_SGCN(bbox_cur_frame, bbox_list_prev_frame, keypoints_cur_frame, keypoints_list_prev_frame):
# assert(len(bbox_list_prev_frame) == len(keypoints_list_prev_frame))
#
# min_index = None
# min_matching_score = sys.maxsize
# global pose_matching_threshold
# # if track_id is still not assigned, the person is really missing or track is really lost
# track_id = -1
#
# for det_index, bbox_det_dict in enumerate(bbox_list_prev_frame):
# bbox_prev_frame = bbox_det_dict["bbox"]
#
# # check the pose matching score
# keypoints_dict = keypoints_list_prev_frame[det_index]
# keypoints_prev_frame = keypoints_dict["keypoints"]
#
# pose_matching_score = get_pose_matching_score(keypoints_cur_frame, keypoints_prev_frame, bbox_cur_frame, bbox_prev_frame)
#
# if pose_matching_score <= pose_matching_threshold and pose_matching_score <= min_matching_score:
# # match the target based on the pose matching score
# min_matching_score = pose_matching_score
# min_index = det_index
#
# if min_index is None:
# return -1, None
# else:
# print('matching with GCN')
# track_id = bbox_list_prev_frame[min_index]["track_id"]
# return track_id, min_index
def feature_matching(bbox_dets_list, remaining_det_id_list, bbox_list_prev_frame, past_track_bbox_list_list, track_ids_dict,
visual_metric, max_dist_factor, max_vis, w_visual, w_spacial, w_pose, use_visual_feat,
use_pose, image_shape, log, N_past_to_keep, N_meth, show_track = False, show_NN = False):
dist_tab = []
weight_tab = []
spacial_dist = np.array([list(get_spacial_distance(bbox_dets_list[det_id]["bbox"], past_track_bbox_list_list, image_shape)) for det_id in remaining_det_id_list])
dist_tab.append(spacial_dist)
weight_tab.append(w_spacial)
if use_visual_feat :
visual_dist = np.array([list(get_visual_similarity(bbox_dets_list[det_id]['visual_feat'], past_track_bbox_list_list, N_past_to_keep, metric = visual_metric)) for det_id in remaining_det_id_list])
dist_tab.append(visual_dist)
weight_tab.append(w_visual)
if use_pose :
pose_dist = np.array([list(1-get_pose_similarity(bbox_dets_list[det_id]["bbox"],past_track_bbox_list_list, bbox_dets_list[idx]["keypoints"])) for idx,det_id in enumerate(remaining_det_id_list)])
dist_tab.append(pose_dist)
weight_tab.append(w_pose)
# if weight_by_score_det :
# weight_tab = bbox_dets_list[det_id]["score_det"]*np.array(weight_tab)
# for 5 players : display first visual similarity players for control
if show_track :
for i in range(5):
bbox_det = bbox_dets_list[-i]
img_path = bbox_det['imgpath']
img = Image.open(img_path).convert('RGB')
img = rescale_img(img,0.6)
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
bbox = bbox_det['bbox']
patch = img[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]
cv2.imshow('ref',patch)
for j in range(len(past_track_bbox_list_list[i])) :
bbox_past_frame = past_track_bbox_list_list[i][j]
img_path = bbox_past_frame['imgpath']
img = Image.open(img_path).convert('RGB')
img = rescale_img(img,0.6)
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
bbox = bbox_past_frame['bbox']
patch = img[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]
cv2.imsave(str(id)+'.png',patch)
if show_NN :
for i in range(3):
det_id = remaining_det_id_list[i]
bbox_curr_frame = bbox_dets_list[det_id]
img_path = bbox_curr_frame['imgpath']
img = Image.open(img_path).convert('RGB')
img = rescale_img(img,0.6)
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
bbox = bbox_curr_frame['bbox']
patch = img[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]
best_visual_similarities = np.argsort(visual_dist[i])
cv2.imshow('ref',patch)
for id,j in enumerate(best_visual_similarities[:2]) :
past_track = past_track_bbox_list_list[j]
bbox_prev_frame = past_track[0]
img_path = bbox_prev_frame['imgpath']
img = Image.open(img_path).convert('RGB')
img = rescale_img(img,0.6)
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
bbox = bbox_prev_frame['bbox']
patch = img[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]
cv2.imsave(str(i)+'_'+str(id)+'.png',patch)
# _, axs = plt.subplots(5, 5, figsize=(12, 12))
# axs = axs.flatten()
# for img, ax in zip(imgs, axs):
# ax.imshow(img)
# plt.show()
distx = image_shape[0]/max_dist_factor
disty = image_shape[1]/max_dist_factor
max_dist = np.sqrt(distx**2+disty**2)/np.sqrt(image_shape[0]**2 + image_shape[1]**2)
matches = compute_matches(dist_tab, weight_tab, max_dist = max_dist, max_vis = max_vis, bipart_match_algo = 'hungarian')
idx_to_remove_prev = []
for i,match in enumerate(matches):
track_id = bbox_list_prev_frame[match]["track_id"]
if match != -1:
det_id = remaining_det_id_list[i]
#print('matching with feature matrix')
bbox_dets_list[det_id]["track_id"] = track_id
bbox_dets_list[det_id]["method"] = log
track_ids_dict[track_id].append(det_id)
idx_to_remove_prev.append(match)
N_meth += 1
#print(log)
# if still can not find a match from previous frame, then -1
if match == -1 :
#print('no matching with feature matrix')
det_id = remaining_det_id_list[i]
bbox_dets_list[det_id]["track_id"] = -1
bbox_dets_list[det_id]["method"] = None
#bbox_dets_list[det_id]["track_id"] = next_id
#bbox_dets_list[det_id]["method"] = None
#track_ids_dict[next_id].append(det_id)
#next_id += 1
for index in sorted(idx_to_remove_prev, reverse=True):
del past_track_bbox_list_list[index]
del bbox_list_prev_frame[index]
return(bbox_dets_list, bbox_list_prev_frame, past_track_bbox_list_list, track_ids_dict, N_meth)
def evaluate_tracking(bbox_dets_list_list, GT_idx_list_list, GT_bbox_list_list) :
acc = mm.MOTAccumulator(auto_id=True)
for f, bbox_dets_list in enumerate(bbox_dets_list_list) :
track_ids = [el['track_id'] for el in bbox_dets_list]
track_boxes = [el['bbox'] for el in bbox_dets_list]
if track_ids:
track_boxes = np.stack(track_boxes, axis=0)
# x1, y1, x2, y2 --> x1, y1, width, height
track_boxes = np.stack((track_boxes[:, 0],
track_boxes[:, 1],
track_boxes[:, 2] - track_boxes[:, 0],
track_boxes[:, 3] - track_boxes[:, 1]),
axis=1)
else:
track_boxes = np.array([])
gt_ids = GT_idx_list_list[f]
gt_boxes = GT_bbox_list_list[f]
if gt_ids :
gt_boxes = np.stack(gt_boxes, axis=0)
# x1, y1, x2, y2 --> x1, y1, width, height
gt_boxes = np.stack((gt_boxes[:, 0],
gt_boxes[:, 1],
gt_boxes[:, 2] - gt_boxes[:, 0],
gt_boxes[:, 3] - gt_boxes[:, 1]),
axis=1)
else:
gt_boxes = np.array([])
distance = mm.distances.iou_matrix(gt_boxes, track_boxes, max_iou=0.5)
acc.update(gt_ids, track_ids, distance)
# mh = mm.metrics.create()
# summary = mh.compute(acc, metrics=['num_frames', 'mota', 'motp'], name='acc')
# summary1 = mh.compute(acc, metrics=['num_unique_objects','num_detections','precision','recall'], name='acc1')
# print(summary)
# print(summary1)
mh = mm.metrics.create()
summary = mh.compute_many(
[acc],
metrics=mm.metrics.motchallenge_metrics,
names=['full'])
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
out_score = mh.compute(acc, metrics=['mota', 'motp','idf1'], name='acc', return_dataframe=False)
mota = out_score['mota']
motp = out_score['motp']
idf1 = out_score['idf1']
return(mota,motp,idf1,acc)
def filter_tracks(bbox_dets_list_list, thres_count_ids = 1):
all_track_ids = [bbox_det['track_id'] for bbox_dets_list in bbox_dets_list_list for bbox_det in bbox_dets_list]
ids_counter = Counter(all_track_ids)
track_ids_to_remove = []
n = 0
for k,v in ids_counter.items() :
if v <= thres_count_ids :
track_ids_to_remove.append(k)
n+=1
print(n, 'tracks removed out of ', len(ids_counter.keys()))
for b,bbox_dets_list in enumerate(bbox_dets_list_list) :
remlist = []
for bb,bbox_det in enumerate(bbox_dets_list) :
track_id = bbox_det['track_id']
if track_id in track_ids_to_remove :
remlist.append(bb)
for index in sorted(remlist, reverse=True):
del bbox_dets_list_list[b][index]
return(bbox_dets_list_list)
def get_pose_similarity(bbox_cur_frame, bbox_list_prev_frame, keypoints_cur_frame, keypoints_list_prev_frame):
pose_sim = np.zeros(len(bbox_list_prev_frame))
for det_index, bbox_det_dict in enumerate(bbox_list_prev_frame):
bbox_prev_frame = bbox_det_dict["bbox"]
keypoints_dict = keypoints_list_prev_frame[det_index]
keypoints_prev_frame = keypoints_dict["keypoints"]
pose_matching_score = get_pose_matching_score(keypoints_cur_frame, keypoints_prev_frame, bbox_cur_frame, bbox_prev_frame)
pose_sim[det_index] = pose_matching_score
return(pose_sim)
def get_track_id_SpatialConsistency(spacial_similarities, bbox_list_prev_frame, spacial_thresh):
if len(spacial_similarities) == 1 :
if spacial_similarities[0] > spacial_thresh :
max_index = 0
track_id = bbox_list_prev_frame[max_index]["track_id"]
#print('matching with dist IOU :', spacial_similarities[0])
return track_id, max_index
else :
return -1, None
sim_argsort = np.argsort(spacial_similarities)
sim_sort = spacial_similarities[sim_argsort]
if sim_sort[-1] <= 0 :
return -1, None
elif sim_sort[-1] > 0 and sim_sort[-2] <= 0 :
max_index = sim_argsort[-1]
track_id = bbox_list_prev_frame[max_index]["track_id"]
#print('matching with dist IOU :', sim_sort[-1])
return track_id, max_index
else :
if sim_sort[-1]>0.5*sim_sort[-2] and sim_sort[-1] > spacial_thresh :
max_index = sim_argsort[-1]
track_id = bbox_list_prev_frame[max_index]["track_id"]
#print('matching with dist IOU :', sim_sort[-1])
return track_id, max_index
else :
return -1, None
def get_spacial_intersect(bbox_cur_frame, bbox_list_prev_frame):
spacial_sim = np.zeros(len(bbox_list_prev_frame))
for bbox_index, bbox_det_dict in enumerate(bbox_list_prev_frame):
bbox_prev_frame = bbox_det_dict["bbox"]
boxA = bbox_cur_frame
boxB = bbox_prev_frame
spacial_sim[bbox_index] = iou(boxA, boxB)
return(spacial_sim)
def get_spacial_distance(bbox_cur_frame, past_track_bbox_list_list, image_shape):
bbox_list_prev_frame = [past_track_bbox_list[0] for past_track_bbox_list in past_track_bbox_list_list]
spacial_sim = np.zeros(len(bbox_list_prev_frame))
for bbox_index, bbox_det_dict in enumerate(bbox_list_prev_frame):
bbox_prev_frame = bbox_det_dict["bbox"]
centAx = (bbox_cur_frame[0]+bbox_cur_frame[2])/2.
centAy = (bbox_cur_frame[1]+bbox_cur_frame[3])/2.
centBx = (bbox_prev_frame[0]+bbox_prev_frame[2])/2.
centBy = (bbox_cur_frame[1]+bbox_cur_frame[3])/2.
distx = np.abs(centAx-centBx)
disty = np.abs(centAy-centBy)
dist = np.sqrt(distx**2+disty**2)/np.sqrt(image_shape[0]**2 + image_shape[1]**2)
spacial_sim[bbox_index] = dist
return(spacial_sim)
def get_visual_similarity(feat, past_track_bbox_list_list, N_past_to_keep, metric = 'cos_similarity') :
weights = np.array([(1/2)**n for n in range(N_past_to_keep)])
weights = np.array([(1)**n for n in range(N_past_to_keep)])
res = []
feat = np.array(feat)
for past_track_bbox_list in past_track_bbox_list_list :
feat_vector = np.array([past_track_bbox_list[i]["visual_feat"].numpy()*weights[i] for i in range(len(past_track_bbox_list))])
feat_vector = np.mean(feat_vector,axis=0)
if metric == 'cos_similarity' :
res.append(np.dot(feat/np.linalg.norm(feat),feat_vector/np.linalg.norm(feat_vector)))
if metric == 'correlation' :
res.append(np.dot(feat,feat_vector))
if metric == 'l1' :
res.append(np.linalg.norm(feat-feat_vector,1))
if metric == 'l2' :
res.append(np.linalg.norm(feat-feat_vector,2))
return(np.array(res))
def get_pose_matching_score(keypoints_A, keypoints_B, bbox_A, bbox_B):
if keypoints_A == [] or keypoints_B == []:
print("a graph not correctly generated!")
return sys.maxsize
graph_A, flag_pass_check = keypoints_to_graph(keypoints_A, bbox_A)
if flag_pass_check is False:
print("c graph not correctly generated!")
return sys.maxsize
graph_B, flag_pass_check = keypoints_to_graph(keypoints_B, bbox_B)
if flag_pass_check is False:
print("d graph not correctly generated!")
return sys.maxsize
sample_graph_pair = (graph_A, graph_B)
data_A, data_B = graph_pair_to_data(sample_graph_pair)
start = time.time()
flag_match, dist = pose_matching(data_A, data_B)
end = time.time()
return dist
def get_iou_score(bbox_gt, bbox_det):
iou_score = iou(boxA, boxB)
#print("iou_score: ", iou_score)
return iou_score
def is_target_lost(keypoints, method, check_pose_threshold):
num_keypoints = int(len(keypoints) / 3.0)
if method == "average":
# pure average
score = 0
for i in range(num_keypoints):
score += keypoints[3*i + 2]
score /= num_keypoints*1.0
print("target_score: {}".format(score))
elif method == "max_average":
score_list = keypoints[2::3]
score_list_sorted = sorted(score_list)
top_N = 4
assert(top_N < num_keypoints)
top_scores = [score_list_sorted[-i] for i in range(1, top_N+1)]
score = sum(top_scores)/top_N
if score < check_pose_threshold :
return True
else:
return False
def iou(boxA, boxB):
# box: (x1, y1, x2, y2)
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def get_bbox_from_keypoints(keypoints_python_data, img_shape):
if keypoints_python_data == [] or keypoints_python_data == 45*[0]:
return [0, 0, 2, 2]
num_keypoints = len(keypoints_python_data)
x_list = []
y_list = []
for keypoint_id in range(int(num_keypoints / 3)):
x = keypoints_python_data[3 * keypoint_id]
y = keypoints_python_data[3 * keypoint_id + 1]
vis = keypoints_python_data[3 * keypoint_id + 2]
if vis != 0 and vis!= 3:
x_list.append(x)
y_list.append(y)
min_x = min(x_list)
min_y = min(y_list)
max_x = max(x_list)
max_y = max(y_list)
if not x_list or not y_list:
return [0, 0, 2, 2]
scale = enlarge_scale # enlarge bbox by 20% with same center position
bbox = enlarge_bbox([min_x, min_y, max_x, max_y], [scale,scale], img_shape)
bbox_in_xywh = x1y1x2y2_to_xywh(bbox)
return bbox_in_xywh
def get_visual_feat_imagenet(model,layer,data, rescale_img_factor):
with torch.no_grad():
scaler = transforms.Scale((224, 224))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
to_tensor = transforms.ToTensor()
img = Image.open(data['imgpath']).convert('RGB')
img = rescale_img(img,rescale_img_factor)
bbox = data['bbox']
box = (bbox[0],bbox[1],bbox[2],bbox[3])
patch = img.crop(box)
t_img = Variable(normalize(to_tensor(scaler(patch))).unsqueeze(0)).to(torch.device('cuda'))
my_embedding = torch.zeros(2048)
def copy_data(m, i, o):
my_embedding.copy_(o.data.squeeze())
h = layer.register_forward_hook(copy_data)
model(t_img)
h.remove()
feat = my_embedding
return feat
def get_img_feat_FasterRCNN(model,img_path,rescale_img_factor):
with torch.no_grad():
image = Image.open(img_path).convert('RGB')
image = rescale_img(image,rescale_img_factor)
image = [T.ToTensor()(image).to(torch.device('cuda'))]
image,_ = model.transform(image, None)
features = model.backbone(image.tensors)
return(features,image.image_sizes)
def get_visual_feat_fasterRCNN(model,data,features,image_sizes,use_track_branch):
with torch.no_grad():
bbox = data['bbox']
box = (float(bbox[0]),float(bbox[1]),float(bbox[2]),float(bbox[3]))
proposals = [torch.tensor([box]).to(torch.device('cuda'))]
if not use_track_branch :
feat = model.roi_heads(features, proposals, image_sizes, get_feature_only=True)[0].cpu()
else :
feat = model.track_heads(features, proposals, image_sizes)[0].cpu()
feat2 = model.roi_heads(features, proposals, image_sizes, get_feature_only=True)[0].cpu()
return feat
def inference_feat_keypoints(pose_estimator, test_data, flag_nms=False):
cls_dets = test_data["bbox"]
# nms on the bboxes
if flag_nms is True:
cls_dets, keep = apply_nms(cls_dets, nms_method, nms_thresh)
test_data = np.asarray(test_data)[keep]
if len(keep) == 0:
return -1
else:
test_data = [test_data]
# crop and detect pose
pose_heatmaps, feat, details, cls_skeleton, crops, start_id, end_id = get_pose_feat_from_bbox(pose_estimator, test_data, cfg)
# get keypoint positions from pose
keypoints = get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id)
# dump results
pose_results = prepare_results(test_data[0], keypoints, cls_dets)
#feat /= np.linalg.norm(feat)
return pose_results, feat
def apply_nms(cls_dets, nms_method, nms_thresh):
# nms and filter
keep = np.where((cls_dets[:, 4] >= min_scores) &
((cls_dets[:, 3] - cls_dets[:, 1]) * (cls_dets[:, 2] - cls_dets[:, 0]) >= min_box_size))[0]
cls_dets = cls_dets[keep]
if len(cls_dets) > 0:
if nms_method == 'nms':
keep = gpu_nms(cls_dets, nms_thresh)
elif nms_method == 'soft':
keep = cpu_soft_nms(np.ascontiguousarray(cls_dets, dtype=np.float32), method=2)
else:
assert False
cls_dets = cls_dets[keep]
return cls_dets, keep
def get_pose_feat_from_bbox(pose_estimator, test_data, cfg):
cls_skeleton = np.zeros((len(test_data), cfg.nr_skeleton, 3))
crops = np.zeros((len(test_data), 4))
batch_size = 1
start_id = 0
end_id = min(len(test_data), batch_size)
test_imgs = []
details = []
for i in range(start_id, end_id):
test_img, detail = Preprocessing(test_data[i], stage='test')
test_imgs.append(test_img)
details.append(detail)
details = np.asarray(details)
feed = test_imgs
for i in range(end_id - start_id):
ori_img = test_imgs[i][0].transpose(1, 2, 0)
if flag_flip == True:
flip_img = cv2.flip(ori_img, 1)
feed.append(flip_img.transpose(2, 0, 1)[np.newaxis, ...])
feed = np.vstack(feed)
predict = pose_estimator.predict_one([feed.transpose(0, 2, 3, 1).astype(np.float32)])
res = predict[0]
res = res.transpose(0, 3, 1, 2)
try :
feat = predict[1].squeeze()[0,:]
feat /= np.linalg.norm(feat) # 2 x 1024
except :
feat = None
if flag_flip == True:
for i in range(end_id - start_id):
fmp = res[end_id - start_id + i].transpose((1, 2, 0))
fmp = cv2.flip(fmp, 1)
fmp = list(fmp.transpose((2, 0, 1)))
for (q, w) in cfg.symmetry:
fmp[q], fmp[w] = fmp[w], fmp[q]
fmp = np.array(fmp)
res[i] += fmp
res[i] /= 2
pose_heatmaps = res
return pose_heatmaps, feat, details, cls_skeleton, crops, start_id, end_id
def get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id):
res = pose_heatmaps
for test_image_id in range(start_id, end_id):
r0 = res[test_image_id - start_id].copy()
r0 /= 255.
r0 += 0.5
for w in range(cfg.nr_skeleton):
res[test_image_id - start_id, w] /= np.amax(res[test_image_id - start_id, w])
border = 10
dr = np.zeros((cfg.nr_skeleton, cfg.output_shape[0] + 2 * border, cfg.output_shape[1] + 2 * border))
dr[:, border:-border, border:-border] = res[test_image_id - start_id][:cfg.nr_skeleton].copy()
for w in range(cfg.nr_skeleton):
dr[w] = cv2.GaussianBlur(dr[w], (21, 21), 0)
for w in range(cfg.nr_skeleton):
lb = dr[w].argmax()
y, x = np.unravel_index(lb, dr[w].shape)
dr[w, y, x] = 0
lb = dr[w].argmax()
py, px = np.unravel_index(lb, dr[w].shape)
y -= border
x -= border
py -= border + y
px -= border + x
ln = (px ** 2 + py ** 2) ** 0.5
delta = 0.25
if ln > 1e-3:
x += delta * px / ln
y += delta * py / ln
x = max(0, min(x, cfg.output_shape[1] - 1))
y = max(0, min(y, cfg.output_shape[0] - 1))
cls_skeleton[test_image_id, w, :2] = (x * 4 + 2, y * 4 + 2)
cls_skeleton[test_image_id, w, 2] = r0[w, int(round(y) + 1e-10), int(round(x) + 1e-10)]
# map back to original images
crops[test_image_id, :] = details[test_image_id - start_id, :]
for w in range(cfg.nr_skeleton):
cls_skeleton[test_image_id, w, 0] = cls_skeleton[test_image_id, w, 0] / cfg.data_shape[1] * (crops[test_image_id][2] - crops[test_image_id][0]) + crops[test_image_id][0]
cls_skeleton[test_image_id, w, 1] = cls_skeleton[test_image_id, w, 1] / cfg.data_shape[0] * (crops[test_image_id][3] - crops[test_image_id][1]) + crops[test_image_id][1]
return cls_skeleton
def prepare_results(test_data, cls_skeleton, cls_dets):
cls_partsco = cls_skeleton[:, :, 2].copy().reshape(-1, cfg.nr_skeleton)
cls_scores = 1
dump_results = []
cls_skeleton = np.concatenate(
[cls_skeleton.reshape(-1, cfg.nr_skeleton * 3), (cls_scores * cls_partsco.mean(axis=1))[:, np.newaxis]],
axis=1)
for i in range(len(cls_skeleton)):
result = dict(image_id=test_data['img_id'],
category_id=1,
score=float(round(cls_skeleton[i][-1], 4)),
keypoints=cls_skeleton[i][:-1].round(3).tolist())
dump_results.append(result)
return dump_results
def is_keyframe(img_id, interval=10):
if img_id % interval == 0:
return True
else:
return False
def make_my_json(nframe, dets_list_list,output_file):
final = {}
final['frames'] = []
for img_id in range(nframe):
current_dict = {}
current_dict["img_id"] = img_id
current_dict["class"] = "frame"
current_dict["hypotheses"] = []
final['frames'].append(current_dict)
final['class'] = "video"
final['filename'] = "file.idx"
for i in range(len(dets_list_list)):
dets_list = dets_list_list[i]
if dets_list == []:
continue
for j in range(len(dets_list)):
bbox = dets_dict["bbox"][0:4]
img_id = dets_dict["img_id"]
track_id = dets_dict["track_id"]
current_ann = {"id": track_id, "x": bbox[0], "y": bbox[1], "width": bbox[2], "height": bbox[3]}
final['frames'][img_id]["hypotheses"].append(current_ann)
return(final)
def write_tracking_csv(bbox_dets_list_list, output_file):
with open(output_file, mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for frame_id,bbox_dets_list in enumerate(bbox_dets_list_list) :
for bbox_dets in bbox_dets_list :
bbox = bbox_dets["bbox"][0:4]
to_write = [bbox_dets["img_id"],bbox_dets["track_id"], bbox[0], bbox[1], bbox[2], bbox[3], -1, -1, -1, -1]
writer.writerow(to_write)
def x1y1x2y2_to_xywh(det):
x1, y1, x2, y2 = det
w, h = int(x2) - int(x1), int(y2) - int(y1)
return [x1, y1, w, h]
def xywh_to_x1y1x2y2(det):
x1, y1, w, h = det
x2, y2 = x1 + w, y1 + h
return [x1, y1, x2, y2]
def bbox_valid(bbox,image_shape):
valid = 0<=bbox[0]<=image_shape[1] and 0<=bbox[2]<=image_shape[1] and 0<=bbox[1]<=image_shape[0] and 0<=bbox[3]<=image_shape[0]
if bbox == [0, 0, 2, 2]:
valid=False
return valid
def filter_detections(human_candidates, image_shape):
res = []
for det in human_candidates :
if bbox_valid(det, image_shape) :
res.append(det)
return(det)
def bipartite_matching_greedy(C):
"""
Code from https://github.com/facebookresearch/DetectAndTrack/blob/master/lib/core/tracking_engine.py
Computes the bipartite matching between the rows and columns, given the
cost matrix, C.
"""
C = C.copy() # to avoid affecting the original matrix
prev_ids = []
cur_ids = []
row_ids = np.arange(C.shape[0])
col_ids = np.arange(C.shape[1])
while C.size > 0:
# Find the lowest cost element
i, j = np.unravel_index(C.argmin(), C.shape)
# Add to results and remove from the cost matrix
row_id = row_ids[i]
col_id = col_ids[j]
prev_ids.append(row_id)
cur_ids.append(col_id)
C = np.delete(C, i, 0)
C = np.delete(C, j, 1)
row_ids = np.delete(row_ids, i, 0)
col_ids = np.delete(col_ids, j, 0)
return prev_ids, cur_ids
def compute_matches(similarity_tab, weight_tab, max_dist = 100., max_vis = 100., bipart_match_algo = 'hungarian'):
# matches structure keeps track of which of the current boxes matches to
# which box in the previous frame. If any idx remains -1, it will be set
# as a new track.
C = np.average(np.array(similarity_tab), axis = 0, weights=weight_tab).transpose()
C_dist = np.array(similarity_tab[0]).transpose()
C_vis = np.array(similarity_tab[1]).transpose()
matches = -np.ones((C.shape[1],), dtype=np.int32)
if bipart_match_algo == 'hungarian':
prev_inds, next_inds = scipy_opt.linear_sum_assignment(C)
elif bipart_match_algo == 'greedy':
prev_inds, next_inds = bipartite_matching_greedy(C)
else:
raise NotImplementedError('Unknown matching algo: {}'.format(
bipart_match_algo))
assert(len(prev_inds) == len(next_inds))
for i in range(len(prev_inds)):
cost = C[prev_inds[i], next_inds[i]]
dist = C_dist[prev_inds[i], next_inds[i]]
vis = C_vis[prev_inds[i], next_inds[i]]
if dist < max_dist and vis < max_vis :
matches[next_inds[i]] = prev_inds[i]
else :
matches[next_inds[i]] = -1
return matches
|
"""
pywopwop - https://github.com/fchirono/pywopwop
Collection of convenience routines to parse and create PSU-WOPWOP input
files version 1.0.
--> PSU-WOPWOP file readers and writers
Author:
Fabio Casagrande Hirono
Dec 2021
"""
import numpy as np
import struct
from consts_and_dicts import ENDIANNESS, VALUE_LENGTH, IS_SIGNED
# %% #######################################################################
# PSU-WOPWOP initial file check for 'magic number' and endianness
# ##########################################################################
def initial_check(filename):
"""
Check the first 4 bytes of a file for the 'magic number' and return the
file endianness. If the 'magic number' is not found, the file is probably
not a PSU-WOPWOP file and an error is raised.
"""
endianness_flag = 'little'
# read first four bytes to check for 'magic number' 42 and endianness
with open(filename, 'rb') as file:
bytes_data = file.read(4)
# if data is 42 in little endian, continue
if bytes_data == b'*\x00\x00\x00':
print('Magic number is correct - file {} is little endian\n'.format(filename))
# if data is 42 in little endian, change flag and continue
elif bytes_data == b'\x00\x00\x00*':
endianness_flag = 'big'
print('Magic number is correct - file {} is big endian\n'.format(filename))
# if magic number is incorrect, it's probably not PSU-WOPWOP file!
else:
raise ValueError('Magic number is incorrect - file {} is probably not a PSU-WOPWOP patch file v1.0!'.format(filename))
return endianness_flag
# %% #######################################################################
# PSU-WOPWOP Plot3D-like block readers and writers
# ##########################################################################
def read_block(bytes_data, start_index, num_dims, iMax, jMax):
"""
Reads a block of data in PLOT3D-like format from a binary file.
The block of data is a (num_dims, iMax, jMax)-shaped array of float32
values. It can be a block of 3D data (x,y,z) if 'num_dims=3', 2D
data (x,y) if 'num_dims=2', or one-dimensional data (x) if 'num_dims=1'.
Parameters
----------
bytes_data : file object
File object obtained from calling 'open(filename)'.
start_index : int
Index indicating initial position of XYZ block within binary file.
num_dims : int
Number of dimensions to be read - e.g. '3' for XYZ, '2' for XY, or
'1' for surface pressure data
iMax : int
First dimension of the data block
jMax : int
Second dimension of the data block
Returns
-------
block_data : (num_dims, iMax, jMax) array_like
Numpy array containing the block data using float32 numbers.
next_index : int
Index for the next data field in the binary file.
Notes
-----
The data in the file is assumed to be organized in Fortran order:
#####################################################################
-Dim 0 (i.e X):
X(i=0, j=0), X(i=1, j=0), ... X(i=iMax, j=0)
X(i=0, j=1), ... ... X(i=iMax, j=1)
... ...
X(i=0, j=jMax) ... X(i=iMax, j=jMax)
#####################################################################
-Dim 1 (i.e. Y):
Y(i=0, j=0), Y(i=1, j=0), ... Y(i=iMax, j=0)
Y(i=1, j=0), ...
...etc...
#####################################################################
"""
# read surface pressures - i.e. (iMax, jMax)-shaped
if num_dims == 1:
block_data = np.zeros((iMax, jMax), dtype=np.float32)
for j in range(jMax):
for i in range(iMax):
# fields are (i,j) order, hence j*iMax + i
current_index = (start_index
+ ((j*iMax + i)*VALUE_LENGTH))
block_data[i, j] = read_float(bytes_data, current_index)
# read XYZ or XY - i.e. (num_dims, iMax, jMax)-shaped
else:
block_data = np.zeros((num_dims, iMax, jMax), dtype=np.float32)
for n in range(num_dims):
for j in range(jMax):
for i in range(iMax):
# fields are (i,j) order, hence j*iMax + i
current_index = (start_index
+ ((n*iMax*jMax + j*iMax + i)
* VALUE_LENGTH))
block_data[n, i, j] = read_float(bytes_data, current_index)
# increase start index by ('value_length' bytes * num_dims coords * iMax * jMax)
next_index = start_index + VALUE_LENGTH*(num_dims*iMax*jMax)
return block_data, next_index
def write_block(file, block_data):
"""
Writes a block of data in PLOT3D-like format to a binary file.
The block of data is a (num_dims, iMax, jMax)-shaped array of float32
values, and the write order is Fortran (column-major).
Parameters
----------
file : file object
File object obtained from calling 'open(some_filename)'.
block_data : (iMax, jMax) or (num_dims, iMax, jMax) array_like
Numpy array containing the data to be written. Data can be integer
or floating-point.
Returns
-------
None
Notes
-----
See documentation for 'pywopwop.write_binary' function for more info on
supported data types.
"""
# if block_data is (iMax, jMax)-shaped, reshape to have 3 dims
if block_data.ndim == 2:
block_data = block_data[np.newaxis, :, :]
num_dims, iMax, jMax = block_data.shape
# write block data
for n in range(num_dims):
for j in range(jMax):
for i in range(iMax):
write_binary(file, block_data[n, i, j])
def read_IBLANKblock(bytes_data, start_index, iMax, jMax):
"""
Reads a block of IBLANK data in PLOT3D format from a binary file.
The block of data is a (iMax, jMax)-shaped array of int32 values.
Parameters
----------
bytes_data : file object
File object obtained from calling 'open(filename)'.
start_index : int
Index indicating initial position of XYZ block within binary file.
iMax : int
First dimension of the data block
jMax : int
Second dimension of the data block
Returns
-------
IBLANK_data : (iMax, jMax) array_like
Numpy array containing IBLANK data as int32 numbers.
next_index : int
Index for the next data field in the binary file.
Notes
-----
The data in the file is assumed to be organized as:
#####################################################################
X(i=0, j=0), X(i=1, j=0), ... X(i=iMax, j=0)
X(i=0, j=1), ... ... X(i=iMax, j=1)
... ...
X(i=0, j=jMax) ... X(i=iMax, j=jMax)
#####################################################################
"""
IBLANK_data = np.zeros((iMax, jMax), dtype=np.int32)
for j in range(jMax):
for i in range(iMax):
# fields are (i,j) order, hence j*iMax + i
current_index = (start_index
+ (j*iMax + i)*VALUE_LENGTH)
IBLANK_data[i, j] = read_int(bytes_data, current_index)
# increase start index by ('value_length' bytes * iMax * jMax)
next_index = start_index + VALUE_LENGTH*(iMax*jMax)
return IBLANK_data, next_index
# %% #######################################################################
# PSU-WOPWOP string reader / writer
# ##########################################################################
def write_string(file, string, max_length):
"""
Writes a ASCII-compatible string to an open binary file object, up to a
maximum length. If string is shorter than 'max_length', pad with spaces.
"""
# check string is ASCII compatible
ascii_error = 'String is not ASCII compatible!'
assert string[:max_length].isascii(), ascii_error
# check string has length 'max_length', pad with spaces otherwise
if len(string) < max_length:
string += (max_length-len(string))*' '
file.write(string[:max_length].encode('ascii'))
def read_string(obj_name, start_index, len_string):
"""
Reads strings of arbitrary length from binary file object.
"""
mystring = ''
for i in range(len_string):
mystring += chr(read_int(obj_name, start_index + i, 1))
return mystring
# %% #######################################################################
# PSU-WOPWOP int and float reader / writer
# ##########################################################################
def read_int(obj_name, start_index, n_bytes=VALUE_LENGTH,
endianness_flag=ENDIANNESS):
"""
Reads one integer value from an open file and returns the unpacked value.
Parameters
----------
obj_name : bytes_object
Object containing binary data, such as an open file object.
start_index : int
Starting index of value to be read.
n_bytes : {1, 2, 4, 8}
Size of the integer to be read, in bytes. The default is the constant
'VALUE_LENGTH' = 4.
endianness_flag : {'little', 'big'}, optional
String indicating the byte endinaness to be used. The default is
the constant 'ENDIANNESS' = 'little'.
Returns
-------
out : int
Integer value unpacked from file data.
"""
n_bytes_dict = {1:'b', 2:'h', 4:'i', 8:'q'}
endianness_dict = {'little':'<', 'big':'>'}
return struct.unpack(endianness_dict[endianness_flag] + n_bytes_dict[n_bytes],
obj_name[start_index:start_index + n_bytes])[0]
def read_float(obj_name, start_index, n_bytes=VALUE_LENGTH,
endianness_flag=ENDIANNESS):
"""
Reads one float value from an open file and returns the unpacked value.
Parameters
----------
obj_name : bytes_object
Object containing binary data, such as an open file object.
start_index : int
Starting index of value to be read.
n_bytes : {4, 8}
Size of the float to be read, in bytes.
endianness_flag : {'little', 'big'}, optional
String indicating the byte endinaness to be used. The default is
'little'.
Returns
-------
out : float
Float value unpacked from file data.
"""
n_bytes_dict = {4:'f', 8:'d'}
endianness_dict = {'little':'<', 'big':'>'}
return struct.unpack(endianness_dict[endianness_flag] + n_bytes_dict[n_bytes],
obj_name[start_index:start_index + n_bytes])[0]
def write_binary(file, data, length=VALUE_LENGTH,
endianness_flag=ENDIANNESS, is_signed=IS_SIGNED):
"""
Writes one value of data to an open binary file.
Parameters
----------
file : file object
File object obtained from calling 'open(filename)'.
data : int or float
Value to be written to file. Must be int or float.
length : {1, 2, 4, 8}, optional
Byte length of the value to be written. The default is 4 bytes.
endianness_flag : {'little', 'big'}, optional
String indicating the byte endinaness to be used. The default is
'little'.
is_signed : boolean, optional
Flag indicating whether the values are signed (True) or unsigned
(False). The default is True.
Returns
-------
None.
Notes
-----
Data value to be written must be integer or floating point.
Floating point data can only accept lengths of 4 and 8 bytes.
"""
if type(data) is int:
file.write(data.to_bytes(length, endianness_flag, signed=is_signed))
# if data is python float or numpy float, write to file as float
elif (isinstance(data, (float, np.floating))):
endianness = {'little':'<', 'big':'>'}
floatlen = {4:'f', 8:'d'}
format_string = endianness[endianness_flag] + floatlen[length]
file.write(struct.pack(format_string, data))
|
import os
from uuid import uuid4
from pytube import YouTube
from loguru import logger
def progress_function(stream, chunk, file_handle, bytes_remaining):
size = stream.filesize
percent = int((size-bytes_remaining)/size * 100)
logger.info(f'{stream.title} @ {percent} %')
def download_music(link, filename):
youtube = YouTube(link, on_progress_callback=progress_function)
stream = youtube.streams.filter(only_audio=True).first()
stream.download(filename=filename)
os.system(f'ffmpeg -i "{filename}.mp4" "musicas/{filename}.wav"')
os.remove(f'{filename}.mp4')
return filename + '.wav' |
import os
import re
import json
import sys
import getopt
import argparse
from docopt import docopt
from urllib2 import urlopen, Request
import urllib
import urllib2
import requests
url_phenotypes = 'http://localhost:9000/api/phenotypes'
url_genotypes = 'http://localhost:9000/api/genotypes'
token = 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJRCI6Ik5JQUdBRFMiLCJleHAiOjE0NjIzMTMyODJ9.AG63RkbnOkAEy-Oua2KC72mKa6K9COzGiY2lYSW2UbY'
headers = {'Authorization': '%s' % token}
request_phenotypes = Request(url_phenotypes, headers=headers)
request_genotypes = Request(url_genotypes, headers=headers)
response_phenotypes = urlopen(request_phenotypes)
response_genotypes = urlopen(request_genotypes)
data_phenotypes = json.loads(response_phenotypes.read())
data_genotypes = json.loads(response_genotypes.read())
def loadPhenotypes(data_phenotypes):
phenotypes_list = data_phenotypes['phenotypes']
for phenotype in phenotypes_list:
print(phenotype['title'])
print(phenotype['family_id'])
print(phenotype['individual_id'])
print(phenotype['paternal_id'])
print(phenotype['maternal_id'])
def loadGenotypes(data_genotypes):
genotypes_list = data_genotypes['genotypes']
for genotype in genotypes_list:
print(genotype['title'])
print(genotype['chr'])
print(genotype['coordinate'])
print(genotype['variant_id'])
def postGenotypes(url_genotypes, token, headers):
values = {"title":"test","chr":"2","variant_id":"snp4","location":"0","coordinate":"1111830","call":"G T G T G G T T G T T T"}
data = json.dumps(values)
req = requests.post(url_genotypes, data, headers=headers)
print req.status_code
loadPhenotypes(data_phenotypes)
loadGenotypes(data_genotypes)
postGenotypes(url_genotypes, token, headers)
|
# -----------------------------------------------------------------------------
# Copyright (C) 2019-2021 The python-ndn authors
#
# This file is part of python-ndn.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
from ...types import InterestNack, InterestTimeout, InterestCanceled, ValidationFailure
async def express_interest(app, name):
try:
_, _, data = await app.express_interest(
name, lifetime=1000, can_be_prefix=True, must_be_fresh=True)
return data
except InterestNack as e:
print(f'Nacked with reason={e.reason}')
exit(-1)
except InterestTimeout:
print('Timeout')
exit(-1)
except InterestCanceled:
print('Local forwarder disconnected')
exit(-1)
except ValidationFailure:
print('Data failed to validate')
exit(-1)
|
"""
@ Harris Christiansen (code@HarrisChristiansen.com)
Generals.io Automated Client - https://github.com/harrischristiansen/generals-bot
Bot_control: Create a human controlled bot
"""
import logging
from .base import bot_moves
# Set logging level
logging.basicConfig(level=logging.INFO)
######################### Move Making #########################
nextMove = []
last_manual = 0
_bot = None
_map = None
def make_move(currentBot, currentMap):
global _bot, _map, last_manual
_bot = currentBot
_map = currentMap
if not move_priority():
if not move_manual():
last_manual += 1
if not move_outward():
if last_manual > 5:
move_toward()
else:
last_manual = 0
return
def place_move(source, dest):
_bot.place_move(
source, dest, move_half=bot_moves.should_move_half(_map, source, dest)
)
######################### Manual Control #########################
def add_next_move(source_xy, dest_xy):
if _map == None:
return False
source = _map.grid[source_xy[1]][source_xy[0]]
dest = _map.grid[dest_xy[1]][dest_xy[0]]
move = (source, dest)
nextMove.append(move)
_bot._path = [t[1] for t in nextMove]
def move_manual():
global nextMove, last_manual
if len(nextMove) == 0:
return False
(source, dest) = nextMove.pop(0)
if source and dest:
place_move(source, dest)
return True
return False
######################### Move Priority #########################
def move_priority():
(source, dest) = bot_moves.move_priority(_map)
if source and dest:
place_move(source, dest)
return True
return False
######################### Move Outward #########################
def move_outward():
(source, dest) = bot_moves.move_outward(_map)
if source and dest:
place_move(source, dest)
return True
return False
######################### Move Toward #########################
def move_toward():
path = bot_moves.path_proximity_target(_map)
(move_from, move_to) = bot_moves.move_path(path)
if move_from and move_to:
place_move(move_from, move_to)
return True
return False
######################### Main #########################
# Start Game
from . import startup
if __name__ == "__main__":
startup.startup(make_move, moveEvent=add_next_move, botName="PurdueBot-H")
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/SearchParameter
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import sys
from . import backboneelement, domainresource
class SearchParameter(domainresource.DomainResource):
""" Search parameter for a resource.
A search parameter that defines a named search item that can be used to
search/filter on a resource.
"""
resource_type = "SearchParameter"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.base = None
""" The resource type(s) this search parameter applies to.
List of `str` items. """
self.chain = None
""" Chained names supported.
List of `str` items. """
self.code = None
""" Code used in URL.
Type `str`. """
self.comparator = None
""" eq | ne | gt | lt | ge | le | sa | eb | ap.
List of `str` items. """
self.component = None
""" For Composite resources to define the parts.
List of `SearchParameterComponent` items (represented as `dict` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.derivedFrom = None
""" Original definition for the search parameter.
Type `str` referencing `['SearchParameter']`. """
self.description = None
""" Natural language description of the search parameter.
Type `str`. """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.expression = None
""" FHIRPath expression that extracts the values.
Type `str`. """
self.jurisdiction = None
""" Intended jurisdiction for search parameter (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.modifier = None
""" missing | exact | contains | not | text | in | not-in | below |
above | type | identifier | ofType.
List of `str` items. """
self.multipleAnd = None
""" Allow multiple parameters (and).
Type `bool`. """
self.multipleOr = None
""" Allow multiple values per parameter (or).
Type `bool`. """
self.name = None
""" Name for this search parameter (computer friendly).
Type `str`. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this search parameter is defined.
Type `str`. """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.target = None
""" Types of resource (if a resource reference).
List of `str` items. """
self.type = None
""" number | date | string | token | reference | composite | quantity |
uri | special.
Type `str`. """
self.url = None
""" Canonical identifier for this search parameter, represented as a
URI (globally unique).
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the search parameter.
Type `str`. """
self.xpath = None
""" XPath that extracts the values.
Type `str`. """
self.xpathUsage = None
""" normal | phonetic | nearby | distance | other.
Type `str`. """
super(SearchParameter, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SearchParameter, self).elementProperties()
js.extend(
[
("base", "base", str, "code", True, None, True),
("chain", "chain", str, "string", True, None, False),
("code", "code", str, "code", False, None, True),
("comparator", "comparator", str, "code", True, None, False),
(
"component",
"component",
SearchParameterComponent,
"SearchParameterComponent",
True,
None,
False,
),
(
"contact",
"contact",
contactdetail.ContactDetail,
"ContactDetail",
True,
None,
False,
),
("date", "date", fhirdate.FHIRDate, "dateTime", False, None, False),
("derivedFrom", "derivedFrom", str, "canonical", False, None, False),
("description", "description", str, "markdown", False, None, True),
("experimental", "experimental", bool, "boolean", False, None, False),
("expression", "expression", str, "string", False, None, False),
(
"jurisdiction",
"jurisdiction",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
("modifier", "modifier", str, "code", True, None, False),
("multipleAnd", "multipleAnd", bool, "boolean", False, None, False),
("multipleOr", "multipleOr", bool, "boolean", False, None, False),
("name", "name", str, "string", False, None, True),
("publisher", "publisher", str, "string", False, None, False),
("purpose", "purpose", str, "markdown", False, None, False),
("status", "status", str, "code", False, None, True),
("target", "target", str, "code", True, None, False),
("type", "type", str, "code", False, None, True),
("url", "url", str, "uri", False, None, True),
(
"useContext",
"useContext",
usagecontext.UsageContext,
"UsageContext",
True,
None,
False,
),
("version", "version", str, "string", False, None, False),
("xpath", "xpath", str, "string", False, None, False),
("xpathUsage", "xpathUsage", str, "code", False, None, False),
]
)
return js
class SearchParameterComponent(backboneelement.BackboneElement):
""" For Composite resources to define the parts.
Used to define the parts of a composite search parameter.
"""
resource_type = "SearchParameterComponent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.definition = None
""" Defines how the part works.
Type `str` referencing `['SearchParameter']`. """
self.expression = None
""" Subexpression relative to main expression.
Type `str`. """
super(SearchParameterComponent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SearchParameterComponent, self).elementProperties()
js.extend(
[
("definition", "definition", str, "canonical", False, None, True),
("expression", "expression", str, "string", False, None, True),
]
)
return js
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + ".codeableconcept"]
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + ".contactdetail"]
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + ".fhirdate"]
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + ".usagecontext"]
|
from setuptools import setup
setup(
name='telegraf_ultimaker3',
version='0.1',
packages=['requests==2.22.0'],
url='',
license='MIT',
author='Aubustou',
author_email='',
description='Script that allows data collection from Ultimaker 3\'s API to Telegraf'
)
|
"""
GNN models
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_model(model_name=None):
from .model import SegmentClassifier as mm
from .model_more import SegmentClassifier as mm_more
from .model_smart import SegmentClassifier as mm_sm
from .model_vary import SegmentClassifier as mm_vary
from .model_vary2 import SegmentClassifier as mm_vary2
from .model_objrel import SegmentClassifier as mm_objrel
# model_name could be used for future testing different models
if model_name == "MORE":
return mm_more()
elif model_name == "SMART":
return mm_sm()
elif model_name == "VARY":
return mm_vary()
elif model_name == "VARY2":
return mm_vary2()
elif model_name == "OBJREL":
return mm_objrel()
else:
return mm()
|
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
from typing import Optional
import py
import pytest
import torch
from torch.utils.data import DataLoader
from composer import Trainer
from composer.algorithms import get_algorithm_registry
from tests.algorithms.algorithm_settings import get_settings
from tests.common import deep_compare, device
ALGORITHMS = get_algorithm_registry().keys()
@pytest.mark.timeout(180)
@device('gpu')
@pytest.mark.parametrize(
"seed,save_interval,save_filename,resume_file,final_checkpoint",
[
[None, "1ep", "ep{epoch}-rank{rank}", "ep2-rank{rank}", "latest-rank{rank}"
], # test randomized seed saving and symlinking
[42, "1ep", "ep{epoch}-rank{rank}", "ep3-rank{rank}", "ep5-rank{rank}"], # test save at epoch end
],
)
@pytest.mark.parametrize("algorithm", ALGORITHMS)
def test_algorithm_resumption(
algorithm: str,
device,
seed: Optional[int],
save_interval: int,
save_filename: str,
resume_file: str,
final_checkpoint: str,
tmpdir: py.path.local,
):
if algorithm in ('no_op_model', 'scale_schedule'):
pytest.skip('stub algorithms')
if algorithm in ('cutmix, mixup, label_smoothing'):
# see: https://github.com/mosaicml/composer/issues/362
pytest.importorskip("torch", minversion="1.10", reason="Pytorch 1.10 required.")
if algorithm in ('layer_freezing'):
pytest.xfail('Known issues')
if algorithm in ('sam', 'squeeze_excite', 'stochastic_depth', 'factorize'):
pytest.xfail('Incompatible with optimizers that store state, e.g. Adam.')
setting = get_settings(algorithm)
if setting is None:
pytest.xfail('No setting provided in algorithm_settings.')
folder1 = os.path.join(tmpdir, 'folder1')
folder2 = os.path.join(tmpdir, 'folder2')
model = setting['model']
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)
config = {
'algorithms': setting['algorithm'],
'model': model,
'train_dataloader': DataLoader(dataset=setting['dataset'], batch_size=4),
'max_duration': '5ep',
'device': device,
'save_filename': save_filename,
'save_folder': folder1,
'save_interval': save_interval,
'train_subset_num_batches': 2,
'optimizers': optimizer,
'schedulers': scheduler
}
# train model once, saving checkpoints every epoch
trainer1 = Trainer(**config)
trainer1.fit()
# create second trainer, load an intermediate checkpoint
# and continue training
setting = get_settings(algorithm)
assert setting is not None
model = setting['model']
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)
config.update({
'model': model,
'save_folder': folder2,
'load_path': os.path.join(folder1, resume_file),
'load_weights_only': False,
'load_strict': False,
'optimizers': optimizer,
'schedulers': scheduler,
})
trainer2 = Trainer(**config)
trainer2.fit()
# check that the checkpoints are equal
_assert_checkpoints_equal(
file1=os.path.join(folder1, final_checkpoint.format(rank=0)),
file2=os.path.join(folder2, final_checkpoint.format(rank=0)),
)
# check that different epoch checkpoints are _not_ equal
# this ensures that the model weights are being updated.
with pytest.raises(AssertionError):
_assert_model_weights_equal(
file1=os.path.join(folder1, save_filename.format(epoch=4, rank=0)),
file2=os.path.join(folder1, final_checkpoint.format(rank=0)),
)
def _assert_checkpoints_equal(file1, file2):
checkpoint1 = torch.load(file1)
checkpoint2 = torch.load(file2)
# compare rng
deep_compare(checkpoint1['rng'], checkpoint2['rng'])
# compare state
# remove the wall clock time fields since they will always differ
del checkpoint1['state']['timestamp']['Timestamp']['total_wct']
del checkpoint1['state']['timestamp']['Timestamp']['epoch_wct']
del checkpoint1['state']['timestamp']['Timestamp']['batch_wct']
del checkpoint2['state']['timestamp']['Timestamp']['total_wct']
del checkpoint2['state']['timestamp']['Timestamp']['epoch_wct']
del checkpoint2['state']['timestamp']['Timestamp']['batch_wct']
deep_compare(checkpoint1['state'], checkpoint2['state'])
def _assert_model_weights_equal(file1, file2):
checkpoint1 = torch.load(file1)
checkpoint2 = torch.load(file2)
deep_compare(checkpoint1['state']['model'], checkpoint2['state']['model'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.