text
stringlengths 8
6.05M
|
|---|
import unittest
from katas.kyu_6.ipv4_to_int32 import ip_to_int32
class IPToInt32TestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(ip_to_int32('128.114.17.104'), 2154959208)
def test_equals_2(self):
self.assertEqual(ip_to_int32('0.0.0.0'), 0)
def test_equals_3(self):
self.assertEqual(ip_to_int32('128.32.10.1'), 2149583361)
|
#!/usr/bin/env python
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
from django.test.utils import get_runner
from django.conf import settings
def runtests():
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True, failfast=False)
failures = test_runner.run_tests([])
sys.exit(failures)
if __name__ == '__main__':
runtests()
|
# coding: utf-8
#geling 修改注释20180424
import gym
import matplotlib
import numpy as np
import sys
from collections import defaultdict
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.halften import HalftenEnv
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
env = HalftenEnv()
# 画出的图形中文乱码时的解决方案
matplotlib.rcParams['fonts.sans-serif'] = ['SimHei']
matplotlib.rcParams['fonts.family']='sans-serif'
matplotlib.rcParams['axes.unicode_minus'] = False
xmajorLocator = MultipleLocator(0.5) # 刻度设置为0.5的倍数
xmajorFormatter = FormatStrFormatter('%1.1f') #设置x轴标签文本的格式
ymajorLocator = MultipleLocator(1)
ymajorFormatter = FormatStrFormatter('%d') #设置y轴标签文本的格式
zmajorLocator = MultipleLocator(1)
zmajorFormatter = FormatStrFormatter('%d') #设置z轴标签文本的格式
# 画图函数
figureIndex = 0
def prettyPrint(data, tile, zlabel='回报'):
global figureIndex
fig = plt.figure(figureIndex)
figureIndex += 1
fig.suptitle(tile)
fig.set_size_inches(18.5, 10.5) # 调整输出的图像大小,因为刻度划分较细,所以使用默认图像大小时刻度会重叠显示,看不清效果
ax = fig.add_subplot(111, projection='3d')
axisX = []
axisY = []
axisZ = []
ax.set_xlim(0.5, 10.5) # 设置x轴刻度范围
ax.set_ylim(1,5) # 设置y轴刻度范围
ax.set_zlim(0,1) # 设置z轴刻度范围
ax.xaxis.set_major_locator(xmajorLocator)
ax.xaxis.set_major_formatter(xmajorFormatter)
ax.yaxis.set_major_locator(ymajorLocator)
ax.yaxis.set_major_formatter(ymajorFormatter)
ax.zaxis.set_major_locator(zmajorLocator)
ax.zaxis.set_major_formatter(zmajorFormatter)
for i in data:
axisX.append(i['x'])
axisY.append(i['y'])
axisZ.append(i['z'])
ax.scatter(axisX, axisY, axisZ)
ax.set_xlabel('玩家手牌总分')
ax.set_ylabel('玩家手牌数')
ax.set_zlabel(zlabel)
def create_random_policy(nA):
"""
创建一个随机策略函数
参数:
nA: 这个环境中的行为数量
返回:
一个函数,参数为当前的状态,返回为一个可能的行为向量
"""
A = np.ones(nA, dtype=float) / nA
def policy_fn(observation):
return A
return policy_fn
def create_greedy_policy(Q):
"""
基于Q值生成一个贪心策略
参数:
Q: 一个字典,键为状态,值为动作
Returns:
一个函数,参数为当前的状态,返回为一个可能的行为向量
"""
def policy_fn(state):
A = np.zeros_like(Q[state], dtype=float)
best_action = np.argmax(Q[state])
A[best_action] = 1.0
return A
return policy_fn
def mc_control_importance_sampling(env, num_episodes, behavior_policy, discount_factor=1.0):
"""
使用重采样的蒙特卡罗离线策略控制
找到最优的贪心策略
参数:
env: 十点半环境
num_episodes: 对样本的自行次数
behavior_policy: 行为策略
discount_factor: 折扣因子
Returns:
A 元组 (Q, policy).
Q 一个字典,键为状态,值为动作
policy 一个函数,参数为当前的状态,返回为一个可能的行为向量
这是一个最优的贪心策略
"""
# 一个字典,键为状态, 值为动作
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# 加权重要性抽样公式的累积分母(通过所有的episodes)
C = defaultdict(lambda: np.zeros(env.action_space.n))
# 遵循的策略
target_policy = create_greedy_policy(Q)
# 玩的场次
for i_episode in range(1, num_episodes + 1):
# 处理进度(每1000次在控制台更新一次)
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# 定义一个episode数组,用来存入(state, action, reward)
episode = []
state = env._reset()
for t in range(100):
# 根据当前的状态返回一个可能的行为概率数组
probs = behavior_policy(state)
# 根据返回的行为概率随机选择动作
action = np.random.choice(np.arange(len(probs)), p=probs)
# 根据当前动作确认下一步的状态,回报,以及是否结束
next_state, reward, done, _ = env._step(action)
# 将当前的轨迹信息加入episode数组中
episode.append((state, action, reward))
if done:
break
state = next_state
episode_len = len(episode)
if episode_len < 4:
continue
# 累计回报的值
G = 0.0
# 权重
W = 1.0
print("局数:%d"%(i_episode))
print("该局的episode",end=":")
print(episode)
# 对于每一个episode,倒序进行
for t in range(len(episode))[::-1]:
print("第%d次计算"%t)
state, action, reward = episode[t]
print(episode[t])
# 从当前步更新总回报
G = discount_factor * G + reward
print("G更新",end=":")
print(G)
# 更新加权重要性采样公式分母
C[state][action] += W
print("C更新",end=":")
print(C[state][action])
# 使用增量更新公式更新动作值函数
# 这也改善了我们提到Q的目标政策
Q[state][action] += (W / C[state][action]) * G - Q[state][action]
print("Q更新",end=":")
print(Q[state][action])
# 如果行为策略采取的行动不是目标策略采取的行动,那么概率将为0,我们可以打破
if action != np.argmax(target_policy(state)):
print("行为策略采取的行动与初始1策略不符,终止执行")
print("*" * 50)
break
W = W * 1./behavior_policy(state)[action]
print("W更新:",end=":")
print(W)
print("*"*50)
return Q, target_policy
random_policy = create_random_policy(env.action_space.n)
Q, policy = mc_control_importance_sampling(env, num_episodes=500, behavior_policy=random_policy)
# policy_content = ["停牌","叫牌"]
#
# # 当手上有x张人牌时,各情况的最优策略
# action_0_pcard = []
# action_1_pcard = []
# action_2_pcard = []
# action_3_pcard = []
# action_4_pcard = []
#
# for state, actions in Q.items():
# # 该状态下的最优行为值函数
# action_value = np.max(actions)
# # 该状态下的最优行为
# best_action = np.argmax(actions)
#
# score, card_num, p_num = state
#
# item_0 = {"x": score, "y": int(card_num), "z": best_action}
#
# if p_num == 0:
# action_0_pcard.append(item_0)
# elif p_num == 1:
# action_1_pcard.append(item_0)
# elif p_num == 2:
# action_2_pcard.append(item_0)
# elif p_num == 3:
# action_3_pcard.append(item_0)
# elif p_num == 4:
# action_4_pcard.append(item_0)
#
# print("当前手牌数之和为:%.1f,当前手牌数为:%d时,当前人牌数为%d,最优策略为:%s"%(score,card_num,p_num,policy_content[best_action]))
#
# prettyPrint(action_0_pcard, "没有人牌时的最优策略","采取策略")
# prettyPrint(action_1_pcard, "一张人牌时的最优策略","采取策略")
# prettyPrint(action_2_pcard, "两张人牌时的最优策略","采取策略")
# prettyPrint(action_3_pcard, "三张人牌时的最优策略","采取策略")
# prettyPrint(action_4_pcard, "四张人牌时的最优策略","采取策略")
# plt.show()
|
import serial
import sys
class ArgsParse:
def __init__(self):
self.argMap = {}
self.flags = [];
offset = 0;
for i in sys.argv[1:]:
offset = i.find("=");
if offset > -1:
self.argMap[i[:offset].lower()] = i[offset+1:];
else:
self.flags += [i.lower()];
def hasFlag(self, flagName):
for i in self.flags:
if i == flagName.lower():
return True;
return False;
def getValue(self, name, defaultValue):
if( name.lower() in self.argMap):
return self.argMap[name.lower()]
return defaultValue;
class PrinterProtocol:
def __init__(self, port='/dev/ttyACM0', baud=115200, timeout=0.5):
self.mPort = port;
self.mBaud = baud;
self.mTimeout = timeout;
self.ser = None;
self.errors = [];
def open(self):
self.ser = serial.Serial(self.mPort, self.mBaud, timeout=self.mTimeout);
self.printResponse();
def close(self):
if self.ser != None:
self.ser.close();
def printResponse(self):
while True:
s = self.ser.readline();
if s == '':
break;
print("From Printer: " + s[:-1]);
def readUntilOkOrError(self):
while True:
s = self.ser.readline();
if s.find(":") > -1:
tag = s[:s.find(":")];
else:
tag = "";
if s.lower() == "ok\n":
return True;
if(s != ''):
print("From Printer: " + s[:-1]);
if tag.lower() == "error":
return False;
def sendCmd(self, cmd):
print ("App: Sending Command: " + cmd);
self.ser.write(cmd + "\n");
return self.readUntilOkOrError();
def emergencyStop(self):
self.sendCmd("M18");
self.sendCmd("M140 S0");
self.sendCmd("M104 S0");
self.sendCmd("M81");
class GcodeCommandBuffer:
def __init__(self, filePath):
self.mLines = [];
self.mNextCommand = 0;
f = open(filePath);
l = f.readline();
lineCounter = 1;
while l != '':
commentPos = l.find(";");
comment = None
if(commentPos > -1):
comment = l[commentPos+1:].strip();
l = l[:commentPos];
l = l.strip();
if(len(l) > 0 or comment != None):
self.mLines += [{"cmd":l, "comment":comment, "line":lineCounter}];
lineCounter += 1
l = f.readline();
f.close();
def nextCommand(self):
if(nextCommand < len(self.mLines)):
temp = self.mLines[self.mNextCommand];
self.mNextCommand += 1;
return temp;
return {"cmd": "", "comment":"End of commands!!!", "line":-1};
def percentComplete(self):
return (1.0*self.mNextCommand) / len(self.mLines);
def numCommandsLeft(self):
return len(self.mLines) - self.mNextCommand;
args = ArgsParse();
baud = args.getValue("baud", 115200);
port = args.getValue("port", '/dev/ttyACM0');
print("Params: port=" + port + ", baud=" + str(baud));
p = PrinterProtocol(port=port, baud=baud, timeout=1.0);
p.open();
trueish = ['yes', 'true', '1', 't', 'y', 'on'];
falseish = ['no', 'false', '0', 'f', 'n', 'off'];
running = True;
while running:
cmdRaw = raw_input("> ")
cmd = cmdRaw.split(" ")[0].lower();
if cmd == "":
print("Nop");
elif cmd == "help" or cmd == "?":
print("""
Custom Commands:
help / ? - shows this
quit / exit / - stops console
relative <true/false> - use Relative Movements
pos - show current position
info - show printer info
endstops - show endstop status
motors <true/false> - turn motors on?
home - home the printer
status - print avaialble info to screen
stop - Emergency stop of printer
* Note Any Non-Custom Commands will be treated as GCODE and be sent un-altered to printer
""")
elif cmd == "exit" or cmd == "quit" or cmd == "q":
running = False;
elif cmd == "relative":
v = cmdRaw.split(" ");
if len(v) > 1:
temp = v[1].lower();
if temp in trueish:
p.sendCmd("G91")
if temp in falseish:
p.sendCmd("G90")
elif cmd == "pos":
p.sendCmd("M114");
elif cmd == "motors":
v = cmdRaw.split(" ");
if len(v) > 1:
temp = v[1].lower();
if temp in trueish:
p.sendCmd("M17")
if temp in falseish:
p.sendCmd("M18")
elif cmd == "info":
p.sendCmd("M115");
elif cmd == "endstops":
p.sendCmd("M119");
elif cmd == "status":
p.sendCmd("M115"); #Capabilities string
p.sendCmd("M114"); #Position
p.sendCmd("M119"); #End stops
p.sendCmd("M105"); # Current Temp
elif cmd == "stop":
p.sendCmd("M18"); #stop Motors
p.sendCmd("M190 S0"); #Set bed temp to 0
p.sendCmd("M104 S0"); #Set Nozzle temp to 0
elif cmd == "home":
p.sendCmd("G28")
else:
p.sendCmd(cmdRaw);
p.close();
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simplest-possible build of a "Hello, world!" program
using the default build target.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default', formats=['msvs'])
# Run from down in foo.
test.run_gyp('a.gyp', chdir='foo/a')
sln = test.workpath('foo/a/a.sln')
sln_data = open(sln, 'rb').read()
vcproj = sln_data.count('b.vcproj')
vcxproj = sln_data.count('b.vcxproj')
if (vcproj, vcxproj) not in [(1, 0), (0, 1)]:
test.fail_test()
test.pass_test()
|
import parallel_utils3 as par
import argparse
import gc
import random
import re
from collections import namedtuple
from functools import partial
from pathlib import Path
from posixpath import join
from sys import argv
from time import time
import dask.dataframe as dd
import pandas as pd
from joblib import Parallel, delayed
from multiprocess import Pool
import utils as u
filename_rx = re.compile(
r"(.*)_(\d+)\.([WAGL+FTR]+)\.treefile$")
# qCF: Fraction of concordant sites supporting quartet Seq1,Seq2|Seq3,Seq4 (=qCF_N/qN)
# qCF_N: Number of concordant sites supporting quartet Seq1,Seq2|Seq3,Seq4
# qDF1: Fraction of discordant sites supporting quartet Seq1,Seq3|Seq2,Seq4 (=qDF1_N/qN)
# qDF1_N: Number of discordant sites supporting quartet Seq1,Seq3|Seq2,Seq4
# qDF2: Fraction of discordant sites supporting quartet Seq1,Seq4|Seq2,Seq3 (=qDF2_N/qN)
# qDF2_N: Number of discordant sites supporting quartet Seq1,Seq4|Seq2,Seq3
# qN: Number of decisive sites with four taxa Seq1,Seq2,Seq3,Seq4 (=qCF_N+qDF1_N+qDF2_N)
def value_mapper(d):
def f(X):
try:
return [d[x] for x in X]
except:
return d[X]
return f
Params = namedtuple(
"Params", ("filename", "prefix", "gene", "imodel")
)
def parse_filename(fn: Path) -> Params:
s = fn.name
m = filename_rx.search(s)
try:
prefix, gene, imodel, = m.groups()
except AttributeError as e:
print(fn, filename_rx)
raise e
return Params(
filename=fn,
prefix=prefix,
gene=gene,
imodel=imodel
)
# top2tid = {'(4,(1,(2,3)));': 3, '(4,(2,(1,3)));': 2, '(4,(3,(1,2)));': 1}
def tree2pdist(tree: u.Tree, q) -> pd.DataFrame:
tree = tree.copy('deepcopy')
tree.prune(q)
for l in tree.get_leaves():
l.name = clade_mapper[l.name.split('_')[0]]
tree.set_outgroup('4')
d = u.summarize(tree)
d['ix'] = q
return d
def filter_quartet_list(quartet: list) -> bool:
"""Assumes that Outgroups have been merged, and the list doesn't contain more than 4 taxa"""
found = 0
for clade in clade_mapper.keys():
found += any(s.find(clade) > -1 for s in quartet )
return found >= 4
splits = {
'2143': {'qDF1': 'qDF1', 'qCF': 'qCF', 'qDF2': 'qDF2', 'qN': 'qN'},
'1234': {'qDF1': 'qDF1', 'qCF': 'qCF', 'qDF2': 'qDF2', 'qN': 'qN'},
'2413': {'qCF': 'qDF1', 'qDF1': 'qCF', 'qDF2': 'qDF2', 'qN': 'qN'},
'4123': {'qCF': 'qDF2', 'qDF1': 'qCF', 'qDF2': 'qDF1', 'qN': 'qN'},
'2314': {'qCF': 'qDF2', 'qDF1': 'qCF', 'qDF2': 'qDF1', 'qN': 'qN'}
}
class SCF:
def __init__(self, filepath):
# TODO : make sure scf has expected labeling
self.scf = pd.read_csv(
filepath,
delim_whitespace=True,
usecols=['qCF', 'qDF1', 'qDF2', 'qN',
'Seq1', 'Seq2', 'Seq3', 'Seq4'],
index_col=['Seq1', 'Seq2', 'Seq3', 'Seq4'],
).dropna()
def reorder_scf(self):
rows = []
for idx, row in self.scf.iterrows():
topology = ''.join(clade_mapper[t.split('_')[0]] for t in idx)
row.index = row.index.map(splits[topology])
rows.append(row)
self.scf = pd.DataFrame(rows)
return self
@staticmethod
def filter_quartet(quartet: list) -> bool:
"""filters for those quartets that span the internal branch"""
found = 0
for clade in clade_mapper:
found += sum(s.startswith(clade) for s in quartet) == 1
return found == 4
@staticmethod
def clade_order(s: str):
'''TODO: handle NA clade names'''
for c in clade_mapper:
if s.startswith(c):
return clade_mapper[c]
return '0'
def map_index(self, ix2name):
def name_mapper(tup):
"""sort must respect s1,s2|s3,s4 so we know which topos qCF, DF, and DF2 map to"""
tup = tuple(ix2name[i] for i in tup)
def s(t):
return sorted(t, key=self.clade_order)
left, right = sorted((s(tup[:2]), s(tup[2:])))
return (*left, *right)
self.scf.index = self.scf.index.map(name_mapper)
return self
def sort_index(self):
"""This should be called after reorder scf"""
def s(t):
return tuple(sorted(t, key=self.clade_order))
self.scf.index = self.scf.index.map(s)
self.scf.index.names = [s.split('_')[0] for s in self.scf.index[0]]
def filter(self):
scf = self.scf
scf = scf[scf.index.map(self.filter_quartet)]
if not scf.empty:
scf.index.names = [s.split('_')[0] for s in scf.index[0]]
self.scf = scf
return self
def percent(self):
# convert frac to percent for consistency
if u.np.allclose(self.scf.drop(columns='qN').sum(1), 1):
self.scf.loc[:, ['qCF', 'qDF1', 'qDF2']
] = self.scf[['qCF', 'qDF1', 'qDF2']]*100
@property
def empty(self):
return self.scf.empty
@property
def index(self):
return self.scf.index
def to_dataframe(self):
return self.scf
def write_quartets(filename: Path, threads: int = 4):
import numpy as np
from Bio import AlignIO
# TODO: must first translate species name -> clade name to reorder index columns of scf.
# Then select using array_agg.
_, prefix, gene, imodel = parse_filename(filename)
dirname = filename.parent
tree = u.read_trees(filename)[0]
for l in tree:
l.name = clade_name_mapper(l.name)
a = AlignIO.read(dirname/f'{prefix}_{gene}.nex', format='nexus')
ix2name = {
ix: clade_name_mapper(seq.name.replace(
'@', '_')) for ix, seq in enumerate(a, 1)
}
scf = (
SCF(filepath=dirname/f'{prefix}_{gene}.scf')
.map_index(ix2name)
.filter()
)
if scf.empty:
# print(filename, 'no quartets found')
return False
scf.reorder_scf()
scf.sort_index()
try:
scf.percent()
except:
print(scf, filename)
raise
summarize_tree = partial(tree2pdist, tree)
quartet_records = Parallel(threads)(
delayed(summarize_tree)(q) for q in scf.index)
d = (pd
.DataFrame
.from_records(quartet_records, index='ix')
)
d.index = pd.MultiIndex.from_tuples(d.index, names=scf.index.names)
d = scf.to_dataframe().join(d)
d["infer_model"] = imodel
d["seq_length"] = a.get_alignment_length()
d["infer_engine"] = "iqtree"
d["seq_type"] = "AA"
d["matrix_name"] = filename.parent.name
d.to_pickle(filename.with_suffix('.quartets.pd.gz'))
return True
def write_hdf(s: Path, procs=4):
from summarize_meta import summarize_dataset
summary_stats = summarize_dataset(s, by='taxa')
summary_stats.to_hdf(s.parent/'summary_stats.hdf5', key=s.stem)
return summary_stats
def main(args):
global clade_mapper
clade_mapper = dict(zip(args.clades), '1234')
csize = int(5000 / args.procs)
# TODO check file size, keep recmap in memory
directories = list(args.seqdir.glob("*.genes"))
random.shuffle(directories)
if args.procs == 1:
for dirname in directories:
start_time = time()
done = 0
for filename in dirname.glob('*.treefile'):
done += write_quartets(filename, threads=args.threads)
print(
f'dir: {dirname}\ttime: {time()-start_time}\twrote: {done}')
write_hdf(dirname)
else:
with Pool(args.procs) as p:
for dirname in directories:
start_time = time()
res = p.imap_unordered(
partial(write_quartets, threads=args.threads),
dirname.glob('*.treefile'))
print(
f'dir: {dirname}\ttime: {time()-start_time}\twrote: {sum(res)}')
p.imap_unordered(write_hdf, directories)
print("finished updating inferred gene trees")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process some data.")
parser.add_argument(
"--buffsize",
type=int,
default=100,
help="""size of buffer for sql writes."""
)
parser.add_argument(
"--csize",
type=int,
default=50,
help="""size of chunks to pass to each proc."""
)
parser.add_argument(
"--procs",
type=int,
default=4,
help="""course-grained parallelism.""")
parser.add_argument(
"--threads",
type=int,
default=4,
help="""fine-grained parallelism.""")
parser.add_argument(
"--seqtype",
type=str,
default='PROT',
help="""alignment type (DNA or PROT).""")
parser.add_argument(
"--engine",
type=str,
default="iqtree",
help="inference engine (fasttree/raxml). Not implemented.",
)
parser.add_argument(
"--clades",
type=str,
nargs=4,
default=['ParaHoxozoa', 'Ctenophora', 'Porifera', 'Outgroup']
help="The 4 clade names. The clade in the final position will be considered the outgroup.",
)
parser.add_argument(
"--verbose",
action="store_true",
help="debug (verbose) mode.")
parser.add_argument(
"--ignore_errors",
action="store_true",
help="debug (verbose) mode."
)
parser.add_argument(
"--seqdir",
type=Path,
help="input folder containing directories containing trees and scf files",
required=True)
parser.add_argument(
"--overwrite",
action="store_true",
help="overwrite existing tables."
)
args = parser.parse_args()
print(args)
main(args)
|
# -*- coding:utf-8 -*-
from config import application, environment, database
import view
app = application.app
environment.configure()
database.configure()
view.register()
if __name__ == '__main__':
app.run()
|
class KEY(object):
SERVICE_TIME = "service_time"
SERVICE_TYPE = 'serv_type'
DEPART_TIME = 'depart_time'
ROUTE_ID = 'route_id'
TRIP_ID = 'trip_id'
HEADSIGN = 'headsign'
DIRECTION = 'direction'
STOP_ID = 'stop_id'
EST_WAIT_SEC = 'est_wait_sec'
DISTANCE = 'dist'
POPULATION = 'pop'
WEIGHT = 'weight'
DAILY_DEPARTURES = 'daily_departures'
DEPARTURES = 'departures'
BAD_ID = 'bad_id'
POINT = 'point'
NAME = 'name'
NOTES = 'notes'
STOP_SET = 'stop_set'
BUFFER_METHOD = 'buffer_method'
SCORE_METHOD = 'score_method'
STOP_DEMAND = 'stop_demand'
SCORE_NEAREST_ONLY = 'score_nearest_only'
DECAY_METHOD = 'decay_method'
DECAY_FACTOR = 'decay_factor'
DISTANCE_METHOD = 'distance_method'
NORMALIZE_VALUE = 'normalize_value'
WAIT_BANDPASS = 'wait_bandpass'
STOPS_ADDED = 'stops_added'
STOPS_REMOVED = 'stops_removed'
LAT = 'lat'
LON = 'lng'
LNG = 'lng'
|
s,v=map(int,input().split())
l=list(map(int,input().split()))
l1=[]
for x in range(0,len(l)):
if(l[x]%2!=0):
l1.append(l[x])
print(l1[v-1])
|
s=[]
for i in range(8):
s.append(int(input()))
print(max(s))
|
import pandas as pd
import streamlit as st
import joblib
import numpy as np
st.title('Sales Forecasting')
st.write('We forecast sales')
data = pd.read_csv('/Users/alyssa/Desktop/ftw-webapp-deployment/data/advertising_regression.csv')
data
st.sidebar.subheader('Advertising Costs')
TV = st.sidebar.slider('TV Advertising Cost', 0, 300, 150)
radio = st.sidebar.slider('Radio Advertising Cost', 0, 300, 150)
newspaper = st.sidebar.slider('Newspaper Advertising Cost', 0, 300, 150)
hist_values = np.histogram(data.radio, bins=300, range=(0,300))[0]
st.bar_chart(hist_values)
hist_values = np.histogram(data.newspaper, bins=300, range=(0,300))[0]
st.bar_chart(hist_values)
hist_values = np.histogram(data.TV, bins=300, range=(0,300))[0]
st.bar_chart(hist_values)
saved_model = joblib.load('advertising_model.sav')
predicted_sales = saved_model.predict([[TV, radio, newspaper]])
st.write(f'Predcited sales is {predicted_sales} dollars')
|
import random
class Character(object):
"main character"
def __init__(self):
self.stats = {}
self.setStats(str=15,dex=15,end=15, name="Nock")
self.equipped = []
self.stats["damage"], self.stats["AC"] = 0, 0
self.level = 1
self.experience = []
# Health = d8 + end/2-5
# [curent/max]
maxHealth = random.randint(1, 8) + ((self.stats["end"] / 2) - 5)
self.health = [maxHealth, maxHealth]
self.equipment = []
def __repr__(self):
return self.name
def setStats(self, str=0, dex=0, end=0, name=""):
if str != 0:
self.stats["str"] = str
if dex != 0:
self.stats["dex"] = dex
if end != 0:
self.stats["end"] = end
if name != "":
self.name = name
def printStats(self):
print self.name
print "Str:",
print self.stats["str"]
print "Dex:",
print self.stats["dex"]
print "end:",
print self.stats["end"]
print "Damage:",
print self.stats["damage"]
print "AC:",
print self.stats["AC"]
def equip(self, equipment):
if equipment.isEquipped == False:
self.equipped.append(equipment)
self.applyEquipStatChanges(equipment)
print "Equipped " + equipment.name
self.equipment.append(equipment)
equipment.isEquipped = True
elif equipment.isEquipped == True:
print "Already Equipped"
def unequip(self, equipment):
if equipment.isEquipped == True:
self.equipped.remove(equipment)
self.unapplyEquipStatChanges(equipment)
print "Unequipped " + equipment.name
self.equipment.remove(equipment)
equipment.isEquipped = False
def applyEquipStatChanges(self, equipment):
type = equipment.type
if type == "weapon":
self.stats["damage"] += equipment.damage
if type == "armour":
self.stats["AC"] += equipment.armour
def unapplyEquipStatChanges(self, equipment):
type = equipment.type
if type == "weapon":
self.stats["damage"] -= equipment.damage
if type == "armour":
self.stats["AC"] -= equipment.armour
|
import web_utility
def convert(amount, home_currency_code, location_currency_code): #converts the home currency to foreign currency and returns it
url_string = "https://www.google.com/finance/converter?a={}&from={}&to={}".format(amount, home_currency_code,
location_currency_code)
result = web_utility.load_page(url_string)
if home_currency_code == location_currency_code:
return -1
if not home_currency_code + " = <span class=bld>" in result:
return -1
else:
output_google = result[result.index('ld>'):result.index('</span>')]
money = float(''.join(ele for ele in output_google if ele.isdigit() or ele == '.'))
return money
def get_details(country_name): #returns the details of a given country
global splitted_line
empty = ()
with open("currency_details.txt", encoding='utf8') as currency_info:
for line in currency_info:
splitted_line = line.split(",")
if splitted_line[0] == country_name:
details = (splitted_line[0], splitted_line[1], splitted_line[2].strip())
return details
if splitted_line[0] != country_name:
return empty
def get_all_details():
country_dictionary = {}
currency_txtfile = open("currency_details.txt", "r", encoding="utf8")
for line in currency_txtfile:
country_dictionary[line.strip().split(",")[0]] = tuple(line.strip().split(","))
return country_dictionary
if __name__ == "__main__": #executed or imported
def convert_testing(amount, home_currency_code, location_currency_code):
convert_amount = convert(amount, home_currency_code, location_currency_code)
if convert_amount < 0:
return "{} {} {}->{} {}"\
.format("invalid conversion", amount, home_currency_code, location_currency_code, convert_amount)
else:
return "{0} {1} {2}->{3} {4} \n {5} {4} {3}->{2} {1}"\
.format("valid conversion", amount, home_currency_code, location_currency_code, convert_amount,
"valid conversion reverse")
def details(country_name):
details_tuple = get_details(country_name)
if details_tuple:
valid_or_not = "valid details"
else:
valid_or_not = "invalid details"
return "{} {} {}".format(valid_or_not, country_name, str(details_tuple))
print(convert_testing(1.00, "AUD", "AUD"))
print(convert_testing(1.00, "JPY", "ABC"))
print(convert_testing(1.00, "ABC", "USD"))
print(convert_testing(10.95, "AUD", "JPY"))
print(convert_testing(10.95, "AUD", "BGN"))
print(convert_testing(200.15, "BGN", "JPY"))
print(convert_testing(100, "JPY", "USD"))
print(convert_testing(19.99, "USD", "BGN"))
print(convert_testing(19.99, "USD", "AUD"))
print("")
print(details("Unknown"))
print(details("Japanese"))
print(details(""))
print(details("Australia"))
print(details("Japan"))
print(details("Hong Kong"))
|
import os
import time
import numpy as np
import argparse
import importlib
from tqdm import tqdm
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import backend as K
from keras.models import load_model
from keras_contrib.layers.normalization import InstanceNormalization
import read_tools
import gc
resolution = 64
batch_size = 5
###############################################################
config={}
config['train_names'] = ['chair']
for name in config['train_names']:
config['X_train_'+name] = '../Data/'+name+'/train_25d/'
config['Y_train_'+name] = '../Data/'+name+'/train_3d/'
config['test_names']=['chair']
for name in config['test_names']:
config['X_test_'+name] = '../Data/'+name+'/test_25d/'
config['Y_test_'+name] = '../Data/'+name+'/test_3d/'
config['resolution'] = resolution
config['batch_size'] = batch_size
################################################################
# make chair40 training data dir 8x5
def make_traing_data_dir(data):
for _ in range(8):
x_train_batch, Y_train_batch = data.load_X_Y_voxel_grids_train_next_batch()
for name in data.batch_name:
if not os.path.exists(name[24:-18]+'/'+name[24:-13]):
os.makedirs(name[24:-18]+'/'+name[24:-13])
# load data config
data = read_tools.Data(config)
make_traing_data_dir(data)
# load network to generate sparse2dense 3D models
data = read_tools.Data(config)
autoencoder = load_model('./saved_model/g_AB_chair.h5')
for _ in range(len(data.X_train_files)/batch_size):
X_train_batch, Y_train_batch = data.load_X_Y_voxel_grids_train_next_batch()
g = autoencoder.predict(X_train_batch)
for j in range(batch_size):
name = data.batch_name[j][24:-13]
dir_name = name[:-5] + '/'+name
print name[:-5] + '/'+name + '/fake_'+ name
data.output_Voxels(dir_name + '/fake_' + name, g[j])
data.plotFromVoxels(dir_name + '/fake_' + name, g[j])
#data.plotFromVoxels(dir_name + '/'+name + '/sparse_' + name, X_train_batch[j])
#data.plotFromVoxels(dir_name + '/'+name + '/truth_' + name,Y_train_batch[j])
|
# Generated by Django 3.2.3 on 2021-06-12 15:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pizza_app', '0013_auto_20210612_1542'),
]
operations = [
migrations.RemoveField(
model_name='ingredientsize',
name='multiple_option',
),
migrations.AddField(
model_name='ingredient',
name='multiple_option',
field=models.BooleanField(default=False),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 18:39:17 2020
@author: shaun
"""
import numpy as np
from gaussxw import gaussxw
import matplotlib.pyplot as plt
N=100
#grab key points and weights from legendre polymials
x,w=gaussxw(N)
#define the integrand with input x,y,z
def integrand(x,y,z):
f=1/(((x**2)+(y**2)+(z**2))**(3.0/2))
return f
#define the function z
def function(z):
density=(10.0*1000)/(100)
G=6.674*(10**(-11))
F=G*density*z*Gmulti(-5,5,-5,5,integrand,z)
return F
#take the triple integral
def Gmulti(a,b,c,d,f,z):
global N
global x
global w
#rescale x and weights to the domain
xp=0.5*(b-a)*x + 0.5*(b+a)
wpx=0.5*(b-a)*w
yp=0.5*(d-c)*x + 0.5*(d+c)
wpy=0.5*(d-c)*w
s=0
for ym in range(0,N):
#find the value of the function at every y and multiply it by the weights to get the sum
for xm in range(0,N):
#find the value of the function at every x and multiply it by the weights to get the sum
s+=wpy[ym]*wpx[xm]*f(xp[xm],yp[ym],z)
return s
#plot results
Z=np.linspace(0,10,100)
F=function(Z)
figure=plt.figure()
ax=figure.add_subplot()
ax.plot(Z,F)
ax.set_xlabel("Z")
ax.set_ylabel(r"$Force_{z}$")
figure.suptitle("Force in Z direction")
plt.show()
|
from flask import Flask
from backend.config.database import init_db,db_session
from backend.routes.Pekerjaan import pekerjaan_routes
from backend.routes.Pekerja import pekerja_routes
from backend.routes.Pekerja_pekerjaan import pekerja_pekerjaan_routes
from backend.routes.Aset import Aset_routes
from backend.routes.Status import Status_routes
app = Flask("dashboard")
|
import numpy as np ;
from tensorflow.keras.backend import int_shape
from tensorflow.keras.layers import Input, Cropping1D, add, Conv1D, GlobalAvgPool1D, Dense, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from chrombpnet.training.utils.losses import multinomial_nll
import tensorflow as tf
import random as rn
import os
os.environ['PYTHONHASHSEED'] = '0'
def getModelGivenModelOptionsAndWeightInits(args, model_params):
#default params (can be overwritten by providing model_params file as input to the training function)
conv1_kernel_size=21
profile_kernel_size=75
num_tasks=1 # not using multi tasking
filters=int(model_params['filters'])
n_dil_layers=int(model_params['n_dil_layers'])
counts_loss_weight=float(model_params['counts_loss_weight'])
sequence_len=int(model_params["inputlen"])
out_pred_len=int(model_params["outputlen"])
print("params:")
print("filters:"+str(filters))
print("n_dil_layers:"+str(n_dil_layers))
print("conv1_kernel_size:"+str(conv1_kernel_size))
print("profile_kernel_size:"+str(profile_kernel_size))
print("counts_loss_weight:"+str(counts_loss_weight))
#read in arguments
seed=args.seed
np.random.seed(seed)
tf.random.set_seed(seed)
rn.seed(seed)
#define inputs
inp = Input(shape=(sequence_len, 4),name='sequence')
# first convolution without dilation
x = Conv1D(filters,
kernel_size=conv1_kernel_size,
padding='valid',
activation='relu',
name='bpnet_1st_conv')(inp)
layer_names = [str(i) for i in range(1,n_dil_layers+1)]
for i in range(1, n_dil_layers + 1):
# dilated convolution
conv_layer_name = 'bpnet_{}conv'.format(layer_names[i-1])
conv_x = Conv1D(filters,
kernel_size=3,
padding='valid',
activation='relu',
dilation_rate=2**i,
name=conv_layer_name)(x)
x_len = int_shape(x)[1]
conv_x_len = int_shape(conv_x)[1]
assert((x_len - conv_x_len) % 2 == 0) # Necessary for symmetric cropping
x = Cropping1D((x_len - conv_x_len) // 2, name="bpnet_{}crop".format(layer_names[i-1]))(x)
x = add([conv_x, x])
# Branch 1. Profile prediction
# Step 1.1 - 1D convolution with a very large kernel
prof_out_precrop = Conv1D(filters=num_tasks,
kernel_size=profile_kernel_size,
padding='valid',
name='prof_out_precrop')(x)
# Step 1.2 - Crop to match size of the required output size
cropsize = int(int_shape(prof_out_precrop)[1]/2)-int(out_pred_len/2)
assert cropsize>=0
assert (int_shape(prof_out_precrop)[1] % 2 == 0) # Necessary for symmetric cropping
prof = Cropping1D(cropsize,
name='logits_profile_predictions_preflatten')(prof_out_precrop)
# Branch 2. Counts prediction
# Step 2.1 - Global average pooling along the "length", the result
# size is same as "filters" parameter to the BPNet function
profile_out = Flatten(name="logits_profile_predictions")(prof)
gap_combined_conv = GlobalAvgPool1D(name='gap')(x) # acronym - gapcc
# Step 2.3 Dense layer to predict final counts
count_out = Dense(num_tasks, name="logcount_predictions")(gap_combined_conv)
# instantiate keras Model with inputs and outputs
model=Model(inputs=[inp],outputs=[profile_out, count_out])
model.compile(optimizer=Adam(learning_rate=args.learning_rate),
loss=[multinomial_nll,'mse'],
loss_weights=[1,counts_loss_weight])
return model
def save_model_without_bias(model, output_prefix):
# nothing to do
# all model architectures have this function
# defining this tosafeguard if the users uses the arugument save_model_without_bias argument on bias model accidentally
return
|
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import deque
from itertools import count
from PIL import Image
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior() # testing on tensorflow 1
import time
class ReplayExp():
def __init__(self, N):
self.buffer = deque(maxlen = N)
def add(self, experience):
self.buffer.append(experience)
# take a random sample of k tuples of experience
def sample_exp(self, batch_size):
sample = random.choices(self.buffer, k = min(len(self.buffer), batch_size))
return map(list, zip(*sample)) # return as a tuple of list
#Deep Q-Network
class DQN():
# @params state_dim: dimension of each state --> NN input
# @params action_size: dimension of each action --> NN output
def __init__(self, state_dim, action_size):
#input current state
self.state_in = tf.placeholder(tf.float32, shape = [None, *state_dim]) #None represents the batch size
# current action a
self.action_in = tf.placeholder(tf.int32, shape = [None]) #batch size
# current estimate of Q-target
self.q_target_in = tf.placeholder(tf.float32, shape = [None]) #batch size
# encode actions in one-hot vector
action_one_hot = tf.one_hot(self.action_in, depth = action_size)
# hidden layer
self.hidden1 = tf.layers.dense(self.state_in, 150, activation = tf.nn.relu)
self.hidden2 = tf.layers.dense(self.hidden1, 120, activation = tf.nn.relu)
# output Q_hat
self.qhat = tf.layers.dense(self.hidden2, action_size, activation = None)
# Q values of states and their corresponding actions a for each state
# discard all non-taken actions
self.qhat_s_a = tf.reduce_sum(tf.multiply(self.qhat, action_one_hot), axis = 1)
# optimization objective
self.loss = tf.reduce_mean(tf.square(self.q_target_in - self.qhat_s_a)) #mean of batch square error
# We choose Adaptive Momentum as our optimization gradient descent
self.optimizer = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(self.loss)
# update NN so that it estimate Q(s,a) closer to the target
def update_nn(self, session, state, action, q_target):
feed_info = {self.state_in: state, self.action_in: action, self.q_target_in: q_target}
session.run(self.optimizer, feed_dict = feed_info)
def get_qhat(self, session, state):
return session.run(self.qhat, feed_dict = {self.state_in: state}) # fill the placeholder
#The learning AI agent
class agent():
def __init__(self, env):
self.state_dim = env.observation_space.shape #state dimension
self.action_size = env.action_space.n # discrete action space (left or right)
# the agent's "brain", tell the agent what to do
self.brain = DQN(self.state_dim, self.action_size)
self.epsilon = 1.0 # exploring prob to avoid local optima
self.gamma = 0.99
self.replay_exp = ReplayExp(N = 1000000)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def end_session(self):
self.sess.close()
def get_action(self, state):
qhat = self.brain.get_qhat(self.sess, [state]) # to match the placeholder dimenstion [None, *state_dim]
prob = np.random.uniform(0.0, 1.0)
if(prob < self.epsilon): # exploration
action = np.random.randint(self.action_size)
else: # exploitation
action = np.argmax(qhat)
return action
def train(self, state, action, next_state, reward, done):
# add exp to replay exp
self.replay_exp.add((state, action, next_state, reward, done))
states, actions, next_states, rewards, dones = self.replay_exp.sample_exp(batch_size = 80)
# Q(s', _) --> Q-values for next state
qhats_next = self.brain.get_qhat(self.sess, next_states)
# set all value actions of terminal state to 0
qhats_next[dones] = np.zeros((self.action_size))
q_targets = rewards + self.gamma * np.max(qhats_next, axis=1) # update greedily
self.brain.update_nn(self.sess, states, actions, q_targets)
if done:
self.epsilon = max(0.1, 0.98 * self.epsilon) #decaying exploration factor after each episode
env = gym.make("LunarLander-v2")
agent = agent(env)
num_episodes = 1000 #number of games
for episode in range(num_episodes):
state = env.reset() # reset starting state for each new episode
done = False
reward_total = 0
while not done:
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
env.render() # display the environment after each action
#base on the feedback from the environment, learn to update parameters for Q-value
agent.train(state, action, next_state, reward, done)
reward_total += reward
state = next_state
print("Episode number: ", episode, ", total reward:", reward_total)
time.sleep(1)
# test after training
agent.epsilon = 0.0 # stop exploring
for episode in range(100):
state = env.reset() # reset starting state for each new episode
done = False
while not done:
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
env.render()
state = next_state
time.sleep(1)
|
from django.conf.urls import patterns, include, url
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.http import require_POST
from .urls import urlpatterns
from pikapika.common.decorators import serialize_as_json, param_from_post
def generic_ajax_func(func):
return require_POST(
csrf_protect(
serialize_as_json(
param_from_post(
func
)
)
)
)
def _get_prefix_for_module_name(module_name):
# module_name looks like: pikapika.ajax_services.module
# __name__ looks like: pikapika.ajax_services.decorators
parts = module_name.split(".")
common_prefix = __name__.split(".")[:-1]
while common_prefix:
assert parts[0] == common_prefix[0]
parts.pop(0)
common_prefix.pop(0)
assert parts
return "/".join(parts)
def register_service(func):
global urlpatterns
urlpatterns += patterns(
'',
url(
r"^{}/{}$".format(
_get_prefix_for_module_name(func.__module__),
func.__name__,
),
func,
),
)
return func
|
#!/usr/bin/env python
# coding: utf-8
"""
Utility functions and classes
@version 1.0
@author Remzi Celebi
"""
import datetime
import pandas as pd
from rdflib import Graph, URIRef, Literal, RDF, ConjunctiveGraph, Namespace
DC = Namespace("http://purl.org/dc/terms/")
DCAT = Namespace("http://www.w3.org/ns/dcat#")
RDFS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
PAV = Namespace("http://purl.org/pav/")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
class DataResource:
def __init__(self, qname, graph= None):
if graph != None:
self.setRDFGraph(graph)
else:
self.setQName(qname)
self.graph = ConjunctiveGraph(identifier = self.getQName())
self.description = None
self.homepage = None
self.rights = None
self.themes = None
self.sources = None
self.uri = None
self.title = None
self.qname = None
self.create_date = None
self.issued_date = None
self.creator = None
self.download_url = None
self.retrieved_date = None
self.publisher = None
def setRDFGraph(self, graph): self.graph = graph
def getRDFGraph(self): return self.graph
def setURI(self, uri): self.uri = URIRef(uri)
def getURI(self): return self.uri
def setQName(self, qname): self.qname = qname
def getQName(self): return self.qname
def setTitle(self, title): self.title = title
def getTitle(self): return self.title
def setDescription(self, description): self.description = description
def getDescription(self): return self.description
def setPublisher(self, publisher): self.publisher = publisher
def getPublisher(self): return self.publisher
def setHomepage(self, homepage): self.homepage = homepage
def getHomepage(self): return self.homepage
def setSources(self, sources): self.sources = sources
def getSources(self): return self.sources
def addSource(self, source):
if self.sources == None: self.sources =[]
self.sources.append(source)
def setCreator(self, creator): self.creator = creator
def getCreator(self): return self.creator
def setCreateDate(self, create_date): self.create_date = create_date
def getCreateDate(self): return self.create_date
def setRetrievedDate(self, retrieved_date): self.retrieved_date = retrieved_date
def getRetrievedDate(self): return self.retrieved_date
def setIssuedDate(self, issued_date): self.issued_date = issued_date
def getIssuedDate(self): return self.issued_date
def setVersion(self, version): self.version = version
def getVersion(self): return self.version
def setFormat(self, format): self.format = format
def getFormat(self): return self.format
def setMediaType(self, media_type): self.media_type = media_type
def getMediaType(self): return self.media_type
def setLicense(self, license): self.license = license
def getLicense(self): return self.license
def setRights(self, rights): self.rights = rights
def getRights(self): return self.rights
def addRight(self, right):
if self.rights == None: self.rights =[]
self.rights.append(right)
def setLocation(self, location): self.location = location
def getLocation(self): return self.location
def setDataset(self, dataset): self.dataset = dataset
def getDataset(self): return self.dataset
def setDownloadURL(self, download_url): self.download_url = download_url
def getDownloadURL(self): return self.download_url
def toRDF(self):
label = ''
if self.getTitle() != None and self.getTitle() != '':
label = self.getTitle()
if self.getCreateDate():
label +=" generated at "+self.getCreateDate()
dataset_uri = self.getURI()
graph = self.getRDFGraph()
graph.add((dataset_uri, RDF['type'], URIRef('http://www.w3.org/ns/dcat#Distribution')))
graph.add((dataset_uri, RDFS['label'], Literal(label)))
if self.getTitle() != None:
graph.add((dataset_uri, DC['title'], Literal( self.getTitle() )))
if self.getDescription() != None:
graph.add((dataset_uri, DC['description'], Literal( self.getDescription() )))
if self.getCreateDate() != None:
graph.add((dataset_uri, DC['created'], Literal( self.getCreateDate() )))
if self.getIssuedDate() != None:
graph.add((dataset_uri, DC['issued'], Literal( self.getIssuedDate() )))
if self.getRetrievedDate() != None:
graph.add((dataset_uri, PAV['retrievedOn'], Literal( self.getRetrievedDate() )))
if self.getIssuedDate() != None:
graph.add((dataset_uri, DC['issued'], Literal( self.getIssuedDate() )))
if self.getSources() != None:
for source in self.getSources() :
if source != None :
graph.add((dataset_uri, DC['source'], URIRef( source )))
if self.getDataset() != None:
graph.add(( URIRef( self.getDataset() ), DCAT['distribution'], dataset_uri ))
if self.getCreator() != None:
graph.add((dataset_uri, DC['creator'], URIRef( self.getCreator() )))
if self.getPublisher() != None:
graph.add((dataset_uri, DC['publisher'], URIRef( self.getPublisher() )))
if self.getHomepage() != None:
graph.add((dataset_uri, FOAF['page'], URIRef( self.getHomepage() )))
if self.getDownloadURL() != None:
graph.add((dataset_uri, DCAT['downloadURL'], URIRef( self.getDownloadURL() )))
if self.getVersion() != None:
graph.add((dataset_uri, DC['hasVersion'], Literal( self.getVersion() )))
if self.getMediaType() != None:
graph.add((dataset_uri, DCAT['mediaType'], Literal( self.getMediaType() )))
if self.getFormat() != None:
graph.add((dataset_uri, DC['format'], Literal( self.getFormat() )))
if self.getDataset() != None:
graph.add((dataset_uri, DC['source'], URIRef( self.getDataset() )))
if self.getLicense() != None:
graph.add((dataset_uri, DC['license'], URIRef( self.getLicense() )))
if self.getRights() != None:
for right in self.getRights() :
if right != None :
graph.add((dataset_uri, DC['rights'], Literal( right )))
return graph
class Dataset:
def __init__(self, qname, graph= None):
self.rights = []
self.themes = []
self.homepage = None
self.description = None
self.version = None
self.license = None
self.download_url = None
if graph != None:
self.setRDFGraph(graph)
else:
self.setQName(qname)
self.setRDFGraph( ConjunctiveGraph(identifier = self.getQName()) )
def setQName(self, qname): self.qname = qname
def getQName(self): return self.qname
def setRDFGraph(self, graph): self.graph = graph
def getRDFGraph(self): return self.graph
def setURI(self, uri): self.uri = URIRef(uri)
def getURI(self): return self.uri
def setVersion(self, version): self.version = version
def getVersion(self): return self.version
def setThemes(self, themes): self.themes = themes
def getThemes(self): return self.themes
def addTheme(self, theme):
if self.themes == None: self.themes =[]
self.themes.append(theme)
def setRights(self, rights): self.rights = rights
def getRights(self): return self.rights
def addRight(self, right):
if self.rights == None: self.rights =[]
self.rights.append(right)
def setLicense(self, license): self.license = license
def getLicense(self): return self.license
def setCatalog(self, catalog): self.catalog = catalog
def getCatalog(self): return self.catalog
def setTitle(self, title): self.title = title
def getTitle(self): return self.title
def setDescription(self, description): self.description = description
def getDescription(self): return self.description
def setPublisher(self, publisher): self.publisher = publisher
def getPublisher(self): return self.publisher
def setPublisherName(self, publisher_name): self.publisher_name = publisher_name
def getPublisherName(self): return self.publisher_name
def setHomepage(self, homepage): self.homepage = homepage
def getHomepage(self): return self.homepage
def setDownloadURL(self, download_url): self.download_url = download_url
def getDownloadURL(self): return self.download_url
def setRDFFile(self, rdf_file): self.rdf_file = rdf_file
def getRDFFile(self): return self.rdf_file
def toRDF(self):
dataset_uri = self.getURI()
graph = self.getRDFGraph()
graph.add((dataset_uri, RDF['type'], DC['Dataset'] ))
if self.getTitle() != None:
graph.add((dataset_uri, DC['title'], Literal( self.getTitle() )))
if self.getDescription() != None:
graph.add((dataset_uri, DC['description'], Literal( self.getDescription() )))
if self.getDownloadURL() != None:
graph.add((dataset_uri, DCAT['downloadURL'], URIRef( self.getDownloadURL() )))
if self.getVersion() != None:
graph.add((dataset_uri, DC['hasVersion'], Literal( self.getVersion() )))
if self.getPublisher() != None:
publisher_uri = URIRef( self.getPublisher() )
graph.add((dataset_uri, DC['publisher'], publisher_uri))
if self.getPublisherName() != None:
graph.add((publisher_uri, RDF.type, FOAF['Organization'] ))
graph.add((publisher_uri, FOAF['name'], Literal( self.getPublisherName() )))
if self.getHomepage() != None:
graph.add((dataset_uri, FOAF['page'], URIRef( self.getHomepage() )))
if self.getLicense() != None:
graph.add((dataset_uri, DC['license'], URIRef( self.getLicense() )))
for right in self.getRights() :
if right != None :
graph.add((dataset_uri, DC['rights'], Literal( right )))
for theme in self.getThemes() :
if theme != None :
graph.add((dataset_uri, DCAT['theme'], URIRef( theme )))
return graph
def to_rdf(g, df, column_types, row_uri):
"""
Parameters
----------
g : input rdflib.Graph
df: DataFrame to be converted into RDF Graph
column_types: dictonary of column and its type, type can be URI or Literal
row_uri: rdf:type value for row index, should be URI
Returns
-------
g: rdflib.Graph generated from DataFrame object
"""
for (index, series) in df.iterrows():
g.add((URIRef(index), RDF.type, URIRef(row_uri)) )
for (column, value) in series.iteritems():
if column_types[column] == 'URI':
g.add((URIRef(index), URIRef(column), URIRef(value)))
else:
g.add((URIRef(index), URIRef(column), Literal(value)))
return g
def test():
#generate dataset
graphURI ='http://fairworkflows.org/openpredict_resource:fairworkflows.dataset.openpredict.interactome.R1'
data_source = Dataset(qname=graphURI)
data_source.setURI(graphURI)
data_source.setTitle('The Human Interactome Dataset')
data_source.setDescription('Human Interactome data used in "Uncovering Disease-Disease Relationships Through The Human Interactome" study')
data_source.setPublisher('https://science.sciencemag.org/')
data_source.setPublisherName('American Association for the Advancement of Science')
data_source.addRight('no-commercial')
data_source.addRight('use')
data_source.addTheme('http://www.wikidata.org/entity/Q896177')
data_source.addTheme('http://www.wikidata.org/entity/Q25113323')
data_source.setLicense('https://www.sciencemag.org/about/terms-service')
data_source.setHomepage('https://dx.doi.org/10.1126%2Fscience.1257601')
data_source.setVersion('1.0')
#generate dataset distribution
data_dist = DataResource(qname=graphURI, graph = data_source.toRDF())
data_dist.setURI('https://media.nature.com/full/nature-assets/srep/2016/161017/srep35241/extref/srep35241-s3.txt')
data_dist.setTitle('The Human Interactome Dataset (srep35241-s3.txt)')
data_dist.setDescription('This file contains the Human Interactome used in "Uncovering Disease-Disease Relationships Through The Human Interactome" study')
data_dist.setLicense('https://www.sciencemag.org/about/terms-service')
data_dist.setVersion('1.0')
data_dist.setFormat('text/tab-separated-value')
data_dist.setMediaType('text/tab-separated-value')
data_dist.setPublisher('https://science.sciencemag.org/')
data_dist.addRight('no-commercial')
data_dist.addRight('use')
data_dist.setRetrievedDate(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
data_dist.setDataset(data_source.getURI())
#generate RDF data distrubtion
rdf_dist = DataResource(qname=graphURI, graph = data_dist.toRDF() )
rdf_dist.setURI('https://github.com/fair-workflows/openpredict/blob/master/data/rdf/human_interactome.nq.gz')
rdf_dist.setTitle('RDF Version of the Human Interactome')
rdf_dist.setDescription('This file contains the Human Interactome used in "Uncovering Disease-Disease Relationships Through The Human Interactome" study')
rdf_dist.setLicense('https://www.sciencemag.org/about/terms-service')
rdf_dist.setVersion('1.0')
rdf_dist.setFormat('application/n-quads')
rdf_dist.setMediaType('application/n-quads')
rdf_dist.addRight('use-share-modify')
rdf_dist.addRight('by-attribution')
rdf_dist.addRight('restricted-by-source-license')
rdf_dist.setCreateDate(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
rdf_dist.setCreator('https://github.com/fair-workflows/openpredict/src/HumanInteractome.py')
rdf_dist.setDownloadURL('https://github.com/fair-workflows/openpredict/blob/master/data/rdf/human_interactome.nq.gz')
rdf_dist.setDataset(data_dist.getURI())
g = rdf_dist.toRDF()
outfile ='../data/rdf/human_interactome.nq'
g.serialize(outfile, format='nquads')
print('RDF is generated at '+outfile)
#test()
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class SyntaxHighlighter(Component):
"""A SyntaxHighlighter component.
A component for pretty printing code.
Keyword arguments:
- children (string | list; optional): The text to display and highlight
- id (string; optional)
- language (string; optional): the language to highlight code in.
- theme (a value equal to: 'light', 'dark'; optional): theme: light or dark
- customStyle (dict; optional): prop that will be combined with the top level style on the pre tag, styles here will overwrite earlier styles.
- codeTagProps (dict; optional): props that will be spread into the <code> tag that is the direct parent of the highlighted code elements. Useful for styling/assigning classNames.
- useInlineStyles (boolean; optional): if this prop is passed in as false, react syntax highlighter will not add style objects to elements, and will instead append classNames. You can then style the code block by using one of the CSS files provided by highlight.js.
- showLineNumbers (boolean; optional): if this is enabled line numbers will be shown next to the code block.
- startingLineNumber (number; optional): if showLineNumbers is enabled the line numbering will start from here.
- lineNumberContainerStyle (dict; optional): the line numbers container default to appearing to the left with 10px of right padding. You can use this to override those styles.
- lineNumberStyle (dict; optional): inline style to be passed to the span wrapping each number. Can be either an object or a function that recieves current line number as argument and returns style object.
- wrapLines (boolean; optional): a boolean value that determines whether or not each line of code should be wrapped in a parent element. defaults to false, when false one can not take action on an element on the line level. You can see an example of what this enables here
- lineStyle (dict; optional): inline style to be passed to the span wrapping each line if wrapLines is true. Can be either an object or a function that recieves current line number as argument and returns style object."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, language=Component.UNDEFINED, theme=Component.UNDEFINED, customStyle=Component.UNDEFINED, codeTagProps=Component.UNDEFINED, useInlineStyles=Component.UNDEFINED, showLineNumbers=Component.UNDEFINED, startingLineNumber=Component.UNDEFINED, lineNumberContainerStyle=Component.UNDEFINED, lineNumberStyle=Component.UNDEFINED, wrapLines=Component.UNDEFINED, lineStyle=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'language', 'theme', 'customStyle', 'codeTagProps', 'useInlineStyles', 'showLineNumbers', 'startingLineNumber', 'lineNumberContainerStyle', 'lineNumberStyle', 'wrapLines', 'lineStyle']
self._type = 'SyntaxHighlighter'
self._namespace = 'dash_core_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'language', 'theme', 'customStyle', 'codeTagProps', 'useInlineStyles', 'showLineNumbers', 'startingLineNumber', 'lineNumberContainerStyle', 'lineNumberStyle', 'wrapLines', 'lineStyle']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(SyntaxHighlighter, self).__init__(children=children, **args)
def __repr__(self):
if(any(getattr(self, c, None) is not None
for c in self._prop_names
if c is not self._prop_names[0])
or any(getattr(self, c, None) is not None
for c in self.__dict__.keys()
if any(c.startswith(wc_attr)
for wc_attr in self._valid_wildcard_attributes))):
props_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])
wilds_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self.__dict__.keys()
if any([c.startswith(wc_attr)
for wc_attr in
self._valid_wildcard_attributes])])
return ('SyntaxHighlighter(' + props_string +
(', ' + wilds_string if wilds_string != '' else '') + ')')
else:
return (
'SyntaxHighlighter(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
|
#encoding=utf-8
'''
Created on 2014��5��22��
@author: yangluo
'''
import win32api
import win32con
from win32api import GetSystemMetrics
from ctypes import windll
from win32gui import GetCursorPos
screenMetrics = (GetSystemMetrics(0),GetSystemMetrics(1))
bottomOfScreen = screenMetrics
preDevicePos = [0,0]
# jumpButtonPos = (100,screenMetrics[1]-50)
# restartButtonPos = (150,screenMetrics[1]-50)
# fullscreenButtonPos = (300,screenMetrics[1]-50)
# escapeButtonPos = (200,screenMetrics[1]-50)
# startButtonPos = (250,screenMetrics[1]-50)
keymap = { 'fullscreen': 116,
'last': 38,
'next': 32,
'escape': 27,
}
def execute(keytype="move",key='next',keypos=bottomOfScreen):
if keytype=="move":
if preDevicePos[0]==0 and preDevicePos[1]==0:
preDevicePos[0] = keypos[0]
preDevicePos[1] = keypos[1]
print "******************first************"
return
_newpos = [keypos[0]-preDevicePos[0],keypos[1]-preDevicePos[1]]
print "_newpos is "+str(_newpos)
currpos = GetCursorPos()
newpos = myVector(currpos,_newpos)
print 'new pos is ' + str(newpos)
setCursorPos(newpos)
preDevicePos[0] = keypos[0]
preDevicePos[1] = keypos[1]
# mouseclick(keypos)
elif keytype=="commonkey":
_key = keymap.get(key)
if _key == None:
return False
keyboardevent(key)
else:
print "no this keytype: %s"%keytype
return False
return True
def mouseclick(targetCor):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,targetCor[0],targetCor[1])
win32api.Sleep(50)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,targetCor[0],targetCor[1])
def setCursorPos(mousepos):
windll.user32.SetCursorPos(mousepos[0],mousepos[1])
return True
def keyboardevent(key, delay=45):
win32api.keybd_event(key,0,0,0)
win32api.Sleep(delay)
win32api.keybd_event(key,0,win32con.KEYEVENTF_KEYUP,0)
def myVector(currentPos, newPos):
resultPos = (currentPos[0]+newPos[0], currentPos[1]+newPos[1])
return resultPos
def Istolong(newpos):
sub = (newpos[0]-preDevicePos[0],newpos[1]-preDevicePos[1])
if -3<sub[0]<3 and -3<sub[1]<3:
return newpos
_newpos = (newpos[0]+sub[0],newpos[1]+sub[1])
return _newpos
|
from contextlib import contextmanager
from sqlalchemy import create_engine, text
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import sessionmaker
from config.db import (
db_teradata_prod
)
DB_URL = 'teradata://'+ db_teradata_prod['username'] +':' + db_teradata_prod['password'] + '@'+ db_teradata_prod['system'] + '/'
def db_connect():
print(DB_URL)
engine = create_engine(DB_URL, pool_recycle=900)
engine.dialect.supports_sane_rowcount = False
return engine
engine = db_connect()
Session = sessionmaker(bind=engine)
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
|
import click
import feedparser
import os.path
# import time
import json
@click.command()
@click.option('--address', '-a', multiple=True, help="Add address of site to store, can be multiple sites \n"
"Example: -a google.com -a bing.com")
@click.option('--list', '-l', is_flag=True, default=False, help="Lists current addresses stored")
@click.option('--remove', '-r', help="Remove address from store")
#@click.option('--live', '-L', is_flag=True, default=False, help="Keeps alive, waiting for new " # i give up, no one uses etag/modified
# "feed items and writing them")
@click.option('--match', '-m', default=None, help="Get all news matching keyword in title")
@click.option('--print', '-p', is_flag=True, default=False, help="Prints news to screen")
@click.option('--clear', '-c', is_flag=True, default=False, help="Removes all saved addresses")
# main function calling other functions
def rss(address, list, remove, match, print, clear):
if clear:
if os.path.isfile('address.txt'):
os.remove('address.txt')
click.echo('Addresses cleared')
else:
click.echo('Error attempting to clear addresses, no previous addresses found to clear.')
if remove:
# check that remove function returns value
#write_address(remove_item(remove))
r = remove_item(remove)
if r:
# write value to file
write_address(r)
else:
os.remove('address.txt')
#click.echo('Error attempting to write addresses after removal; addresses not found')
if list:
print_list(open_address())
if address:
add_address(address)
if print:
news = parse(open_address())
print_news(news, match)
'''
this whole feature is on hold until i can come up with a
good way to get only the newest items from feed
# pre-parse news for later comparison in while loop
if live:
click.echo('Live is {}'.format(live)) # debug
news = parse(open_address())
i = 0
while live:
news = parse(open_address())
for item in news:
check_update(item, item.href)
# click.echo('Going live!') # debug
# check if new news have been added
#new_news = parse(open_address())
# diff = list(set(new_news) - set(news))
#diff = set(new_news) - set(news)
#diff = get_difference(new_news, news)
#diff = set(new_news).difference(set(news))
# click.echo(diff)
if i == 0:
old_news = print_news(news, match)
if diff:
click.echo('Diff is true') # debug
print_news(diff, match)
else:
click.echo('Diff is false') # debug
# click.echo(type(diff))
click.echo(i)
if i > 0:
click.echo('I is greater than 1!')
diff = list(set(news) - set(new_news))
list(set(news) - set(new_news))
click.echo('Diff: {}'.format(diff)) # debug
if diff:
click.echo('Diff is true') # debug
print_news(diff, match)
else:
click.echo('Diff is false') # debug
#news = parse(open_address())
click.echo('Sleeping 5 minutes') # debug
time.sleep(300)
i += 1
'''
def get_difference(list1, list2):
s = set(list2)
list3 = [x for x in list1 if x not in s]
return list(list3)
def check_update(old_feed, address):
# store the etag and modified
last_etag = old_feed.etag
last_modified = old_feed.modified
# check if new version exists
feed_update = feedparser.parse(address, etag=last_etag, modified=last_modified)
# return based on result
if feed_update.status == 304:
return False
else:
return True
def remove_item(remove):
address = open_address()
if address:
try:
address.remove(remove)
click.echo('Removed {}'.format(remove))
return address
except ValueError:
click.echo('Address to be removed not found')
else:
click.echo('Error attempting to delete address; no addresses found')
def print_list(address):
if address:
for line in address:
click.echo(line)
else:
click.echo('Error attempting to list addresses; no addresses found')
def print_news(news, match):
click.echo('\n\n')
result = []
if match:
# search for matching word in list of lists of news
for site in news:
for item in site.entries:
if match in item.title:
click.secho("\n{} \n {}".format(item.title, item.link), fg='yellow')
# add to results
result.append(item.title)
result.append(item.link)
else:
for site in news:
for item in site.entries:
click.secho("\n{}".format(item.title))
click.secho("{}".format(item.link), fg='cyan')
# add to results
result.append(item.title)
result.append(item.link)
click.echo('\n\n')
return result
# writes list of addresses to address.list file
def add_address(address):
# convert address to list
address = list(address)
# check if values are stored in file from before
if open_address():
# get old values, add new ones
# click.echo('Address found in add address, appending and writing to file') # debug
old = open_address()
new_address = old + address
#new_address = [(open_address()).append(address)]
# write old + new to file
with open('address.txt', 'w') as handle:
json.dump(new_address, handle)
# pickle.dump(new_address, handle, protocol=pickle.HIGHEST_PROTOCOL)
# write values directly to file
else:
# click.echo('No Address found in write address, writing to file') # debug
write_address(address)
def write_address(address):
# convert address to list to prevent
# problems with tuples when adding new data
if address:
# click.echo('Address found in write address, writing to file') # debug
new_address = list(address)
with open('address.txt', 'w') as handle:
json.dump(new_address, handle)
# pickle.dump(new_address, handle, protocol=pickle.HIGHEST_PROTOCOL)
else:
click.echo('Error attempting to write address to file; no address found')
# used to open address.list file
def open_address():
# check if file exist
if os.path.isfile('address.txt'):
# click.echo('File found, opening') # debug
# if it does, open it
with open('address.txt', 'r') as handle:
return json.load(handle)
# return list(pickle.load(handle))
# no file exists, can't open file
else:
# click.echo('No file found, opening file failed') # debug
return None
def parse(urls):
if urls:
parsed = []
for url in urls:
parsed.append(feedparser.parse(url))
return parsed
else:
click.echo('Error attempting to parse URLs, no URLs found')
if __name__ == '__main__':
rss()
|
# Python Standard Libraries
# N/A
# Third-Party Libraries
from django.conf.urls import url
from rest_framework.routers import SimpleRouter
from rest_framework.schemas import get_schema_view
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework_jwt.views import refresh_jwt_token
from rest_framework_jwt.views import verify_jwt_token
# Custom Libraries
from .versions import v1
app_name = "api"
urlpatterns = []
urlpatterns += [
url(r'^v1/$', get_schema_view()),
url(r"^v1/login/", v1.log_in),
url(r"^v1/ping/", v1.ping),
url(r"^v1/signup/", v1.sign_up),
url(r"^v1/dig/", v1.dig),
# Diggers
url(r"^v1/digger/$",
v1.DiggerViewSet.as_view({"get": "list", }),
name="diggers"),
url(r"^v1/digger/(?P<pk>[0-9]+)/$",
v1.DiggerViewSet.as_view({"get": "retrieve", }),
name="digger"),
# Item Shop Items
url(r"^v1/shop/item/$",
v1.ItemViewSet.as_view({"get": "list",
"post": "create", }),
name="items"),
url(r"^v1/shop/item/(?P<pk>[0-9]+)/",
v1.ItemViewSet.as_view({"get": "retrieve",
"delete": "destroy", }),
name="item"),
# Item Shop Purchases
url(r"^v1/shop/purchase/$",
v1.PurchaseViewSet.as_view({"get": "list",
"post": "create", }),
name="purchases"),
url(r"^v1/shop/purchase/(?P<pk>[0-9]+)/",
v1.PurchaseViewSet.as_view({"get": "retrieve",
"delete": "destroy", }),
name="purchase"),
# Gifts (Items given)
url(r"^v1/gift/$",
v1.GiftViewSet.as_view({"get": "list", }),
name="gifts"),
# Presents (Items received)
url(r"^v1/present/$",
v1.PresentViewSet.as_view({"get": "list", }),
name="presents"),
]
|
import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
from glumpy import app, gloo, gl, glm
from glumpy.graphics.text import FontManager
from glumpy.graphics.collections import GlyphCollection
from glumpy.transforms import Position, OrthographicProjection
import glumpy
import time
import random
from math import *
import Mesh
myMesh = Mesh.Mesh()
print(myMesh.nV)
print(myMesh.nF)
myMesh.load('truck.txt', ply=True, normalize = True)
print(myMesh.nV)
print(myMesh.nF)
asp = 1.0
angle = 0.0
cur_time = -1.0
window = app.Window(700, 600, "hello")
def cameraLensSet(ratio = 1.0):
# camera lens 설정
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # 기본 렌즈 설정
gluPerspective(60, ratio, 0.01, 100) # y 시야각, 종횡비, 가까운 면, 먼 면
glClearColor(1.0, 0.0, 0.0, 1.0) # RGBA
@window.event
def on_draw(dt):
global angle, cur_time
old_time = cur_time
cur_time = time.time()
dt = cur_time - old_time
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # z-buffer를 쓰려면 여기서 지우기도 포함
# camera 위치 설정
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
angle += 0.01
gluLookAt(cos(angle)*20.0, 1.5, sin(3.0*angle)*3.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)
#glutWireCube(1.0)
#myMesh.drawShadedFace()
#glDepthFunc(GL_LEQUAL)
#myMesh.drawWireFast()
#glDepthFunc(GL_LESS)
random.seed(1)
for i in range(1000):
glPushMatrix()
glTranslatef(i%30 - 12 + 0.5*random.random(), -1.0, i/30 - 12 + 0.5*random.random()) # random.random()*40-20, 0.0, random.random()*40-20)
glRotatef(random.random()*360-180, 0, 1, 0)
glRotatef(-90, 1.0, 0.0, 0.0)
glScale(1.0, 1.0, 1.0)
glColor3f(random.random()*0.5, 1.0, random.random()*0.5)
myMesh.drawShadedFace(i)
glPopMatrix()
glFlush()
@window.event
def on_resize(width, height):
global asp
asp = float(width) / height
cameraLensSet(asp)
glViewport(0, 0, width, height)
print('reshape', asp)
@window.event
def on_init():
glLineWidth(2)
glEnable(GL_DEPTH_TEST) # z-buffer를 이용한 depth test를 수행하도록 "enable"
cameraLensSet(asp)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_COLOR_MATERIAL)
app.run()
|
import numpy as np
import itertools as it
class SpectralLearn :
problemfile = ""
order = 0
p21 = None
p31 = []
sym_cnt = 0
U = None
V = None
Sig = None
Bx = []
Sig_inv = None
b0 = None
binf = None
p1 = None
def __init__(self, probfile, ord):
self.problemfile = probfile
self.order = ord
def initialize(self, line):
line = line.strip(" ").split(" ")
self.sym_cnt = int(line[1])
self.sym_cnt +=1
self.p1 = np.zeros((1,self.sym_cnt))
self.p21 = np.zeros((self.sym_cnt,self.sym_cnt))
for i in range(0,self.sym_cnt) :
self.p31.append(np.zeros((self.sym_cnt,self.sym_cnt)))
def learn(self):
f = open(self.problemfile, "r")
for i,line in enumerate(f):
if(i==0):
self.initialize(line)
continue
obs = [int(j) for j in line.strip(" ").split(" ")]
obs.append(self.sym_cnt -1)
self.addToP1(obs[1:])
self.addToP21(obs[1:])
self.addToP31(obs[1:])
self.normalize()
self.U, self.Sig, self.V = self.svdP21()
sig = np.zeros((len(self.Sig),len(self.Sig)))
for i in range(0,len(self.Sig)):
sig[i][i] = self.Sig[i]
self.Sig = sig
self.Sig_inv = np.linalg.inv(self.Sig)
self.computeBx()
self.computeB0_Binf()
def addToP1(self, obs):
for i in obs:
self.p1[0][i] += 1
def addToP21(self, obs):
for first, second in it.izip(obs, obs[1:]):
self.p21[first][second] += 1
def addToP31(self,obs):
for first, second, third in it.izip(obs, obs[1:], obs[2:]):
self.p31[second][first][third] += 1
def normalize(self):
if(1):
self.p21 += 0.0000000001
self.p1 = self.p1 / np.sum(self.p1)
self.p21 = self.p21 / np.sum(self.p21)
for i,m in enumerate(self.p31):
sum_m = np.sum(m)
if(sum_m == 0.0):
continue
self.p31[i] = m / sum_m
def svdP21(self):
return np.linalg.svd(self.p21)
def computeBx(self):
for i,p31 in enumerate(self.p31):
a = np.dot(np.dot(np.dot(self.U , p31) , self.V) , self.Sig_inv)
self.Bx.append(a)
def computeB0_Binf(self):
self.b0 = np.dot(self.U , self.p1[0].T)
np.linalg.inv(np.dot(self.p21.T , self.U ))
self.binf = np.dot(np.linalg.inv(np.dot(self.p21.T , self.U )) , self.p1.T)
def predictRanks(self, t_obs):
rank = []
temp_lik = self.b0
for o in t_obs:
temp_lik = np.dot(temp_lik , self.Bx[o])
for i in range(self.sym_cnt):
t = np.dot(np.dot(temp_lik , self.Bx[0]) , self.binf)
print(t)
rank.append(t )
print(rank)
r = np.argsort(rank)[::-1]
r[r == self.sym_cnt -1] = -1
return r,rank
|
import math
import time
start= time.clock()
def isprime(n):
if n<2:
return False
elif n==2:
return True
else:
for x in xrange(2, int(math.ceil(math.sqrt(n)))+1):
if n%x==0:
return False
return True
def prime(n):
list=[];
i=0;
while len(list)<n:
if isprime(i):
list.append(i)
i+=1
return list[n-1]
s=0
n=1
while n<2000000:
if isprime(n):
s+=n
n+=1
print s
end= time.clock()
time= (end-start)/1000
print time
|
from backbone import *
import re
'''
This is to check that SAN disk connectivity is done correctly.
This Script will check the following on a system of datanodes :
1. Check SAN interface
2. Check tps fs configs on datanode
3. Check SAN memory available
'''
nodes = get_nodes_by_type('datanode')
# Checking tps fs configs
for node in nodes :
output = node.cliCmd('show tps fs')
if not 'Enabled: yes' in output or 'Mount point: /dev/INVALID' in output:
directory = ['/data/']
logger.error('%s : DN doesnot have a SAN attached or enabled.' % node.getIp())
else :
directory = filter(lambda x : 'Mount point:' in x , output.split('\n'))
try :
directory = map(lambda x : re.search('Mount point:(.*)',x).group(1),directory)
except Exception :
logger.error('%s : Error in parsing command :show tps fs for mount points.Please check' % node.getIp() )
for dir in directory :
output = node.shellCmd('df -klh | grep %s' % dir)
out = filter(lambda x : '%' in x, output.split(' ') )
if not len(out) == 1 :
logger.error('%s : Error in parsing command df -klh. debug logs stdout : %s , after filter for percentage extraction passed. out = %s ' % ( node.getIp(),output,out) )
else :
if int(out[0][:-1]) > 95 :
logger.error("%s : SAN disk is full: %s" % ( node.getIp(),out[0] ) )
report.fail("%s : SAN disk is full: %s" % ( node.getIp(),out[0] ) )
else :
logger.info("%s : SAN disk %s full" % ( node.getIp(),out[0] ) )
|
from django.contrib import admin
from .models import Dprofile
# Register your models here.
admin.site.register(Dprofile)
|
from typing import List
from leetcode import test
def unique_paths_with_obstacles(grid: List[List[int]]) -> int:
if not (grid and grid[0]):
return 0
m, n = len(grid), len(grid[0])
if grid[0][0] == 1 or grid[m - 1][n - 1] == 1:
return 0
dp = [0] * n
for j in range(n):
if grid[0][j] == 0:
dp[j] = 1
else:
break
for i in range(1, m):
dp[0] = 1 if grid[i][0] == 0 and dp[0] == 1 else 0
for j in range(1, n):
if grid[i][j] == 1:
dp[j] = 0
else:
up = dp[j] if grid[i - 1][j] == 0 else 0
left = dp[j - 1] if grid[i][j - 1] == 0 else 0
dp[j] = up + left
return dp[n - 1]
# def unique_paths_with_obstacles(grid: List[List[int]]) -> int:
# if not (grid and grid[0]):
# return 0
#
# m, n = len(grid), len(grid[0])
# if grid[0][0] == 1 or grid[m - 1][n - 1] == 1:
# return 0
#
# dp = [[0] * n for _ in range(m)]
# dp[0][0] = 1
# for i in range(1, m):
# if grid[i][0] == 0:
# dp[i][0] = 1
# else:
# break
# for j in range(1, n):
# if grid[0][j] == 0:
# dp[0][j] = 1
# else:
# break
#
# for i in range(1, m):
# for j in range(1, n):
# if grid[i][j] == 1:
# continue
# left = dp[i][j - 1] if grid[i][j - 1] == 0 else 0
# up = dp[i - 1][j] if grid[i - 1][j] == 0 else 0
# dp[i][j] = left + up
# return dp[m - 1][n - 1]
test(unique_paths_with_obstacles, [([[0, 0, 0], [0, 1, 0], [0, 0, 0]], 2)])
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import data.climate.window_generator as wg
# https://www.tensorflow.org/tutorials/structured_data/time_series
train_df = pd.read_csv("jena_climate_2009_2016_train.csv")
val_df = pd.read_csv("jena_climate_2009_2016_val.csv")
test_df = pd.read_csv("jena_climate_2009_2016_test.csv")
w2 = wg.WindowGenerator(input_width=6, label_width=1, shift=1, train_df=train_df, val_df=val_df, test_df=test_df,
label_columns=['T (degC)'])
print(w2)
example_window = tf.stack([np.array(train_df[:w2.total_window_size]),
np.array(train_df[100:100+w2.total_window_size]),
np.array(train_df[200:200+w2.total_window_size])])
example_inputs, example_labels = w2.split_window(example_window)
print('All shapes are: (batch, time, features)')
print(f'Window shape: {example_window.shape}')
print(f'Inputs shape: {example_inputs.shape}')
print(f'labels shape: {example_labels.shape}')
# Le code ci-dessus a pris un lot de 3 fenêtres à 7 pas de temps, avec 19 fonctionnalités à chaque pas de temps. Il les a divisés en un lot de 6 pas de temps, 19 entrées de fonction et une étiquette à 1 étape de fonction. L'étiquette n'a qu'une seule fonctionnalité car le WindowGenerator été initialisé avec label_columns=['T (degC)']
w2.example = example_inputs, example_labels
w2.plot()
plt.show()
w2.plot(plot_col='p (mbar)')
plt.show()
|
# -*- coding: utf-8 -*-
class MyHashSet:
def __init__(self):
self.data = [False] * (2**20)
def add(self, key):
self.data[key] = True
def contains(self, key):
return self.data[key]
def remove(self, key):
self.data[key] = False
if __name__ == "__main__":
obj = MyHashSet()
obj.add(1)
obj.add(2)
assert obj.contains(1)
assert not obj.contains(3)
obj.add(2)
assert obj.contains(2)
obj.remove(2)
assert not obj.contains(2)
|
from django.contrib import admin
# Register your models here.
from .models import Teacher, Teamim, Class, TalmudSponsor, TalmudStudy
from .models import create_transcoder_job
class ClassAdmin(admin.ModelAdmin):
search_fields = ['division', 'segment', 'section', 'unit', 'part', 'series']
list_display = ['__str__', 'division', 'segment', 'section', 'unit', 'part', 'series', 'date']
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
update_fields = []
if change:
for key in form.initial:
if form.initial[key] != form.cleaned_data[key]:
update_fields.append(key)
if 'audio' in update_fields:
print('creating encoder job for', str(obj))
create_transcoder_job(obj.audio)
class TalmudStudyAdmin(admin.ModelAdmin):
search_fields=['seder', 'masechet']
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
update_fields = []
if change:
for key in form.initial:
if form.initial[key] != form.cleaned_data[key]:
update_fields.append(key)
if 'audio' in update_fields:
print('creating encoder job for', str(obj))
create_transcoder_job(obj.audio)
class TeamimAdmin(admin.ModelAdmin):
search_fields = ['post', 'reader']
raw_id_fields = ('post', 'reader',)
list_display = ('post', 'reader',)
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
if 'audio' in form.changed_data and obj.audio:
if change:
print('creating encoder job for existing object', str(obj))
else:
print('creating encoder job for new object', str(obj))
create_transcoder_job(obj.audio)
admin.site.register(Teacher)
admin.site.register(Teamim, TeamimAdmin)
admin.site.register(Class, ClassAdmin)
admin.site.register(TalmudSponsor)
admin.site.register(TalmudStudy, TalmudStudyAdmin)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basketball', '0004_auto_20150629_2040'),
]
operations = [
migrations.RemoveField(
model_name='game',
name='statlines_created',
),
migrations.AlterField(
model_name='playbyplay',
name='assist',
field=models.CharField(blank=True, max_length=30, choices=[('pot', 'POT'), ('ast', 'AST')]),
),
migrations.AlterField(
model_name='playbyplay',
name='assist_player',
field=models.ForeignKey(on_delete=models.CASCADE, blank=True, to='basketball.Player', null=True, related_name='+'),
),
migrations.AlterField(
model_name='playbyplay',
name='secondary_player',
field=models.ForeignKey(on_delete=models.CASCADE, blank=True, to='basketball.Player', null=True, related_name='secondary_plays'),
),
]
|
# -*- coding: UTF-8 -*-
import collections
import os
import sys
import pickle
import codecs
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# pathDir = os.path.dirname(__file__)
# curPath = os.path.abspath(pathDir)
# rootPath = os.path.split(curPath)[0]
# sys.path.append(os.path.split(rootPath)[0])
# sys.path.append(rootPath)
from preprocess_data import clear_and_format_data2
class PreProcess(object):
def __init__(self):
self.max_len = 60
def flatten(self, list_args):
result = []
for el in list_args:
# collections.Iterable是一种迭代类型
# isinstance(x1,type)判断x1的类型是否和type相同
if isinstance(list_args, collections.Iterable) and not isinstance(el, str):
result.extend(self.flatten(el))
else:
result.append(el)
return result
def data2pkl(self, path, save_path):
max_len = self.max_len
datas = list() # 样本
labels = list() # 样本标签
# linedata = list()
# linelabel = list()
tags = set()
input_data = codecs.open(os.path.join(path, 'spu_name_wordtagsplit.txt'), 'r', 'utf-8')
# input_data = codecs.open('./data_t/spu_name_wordtagsplit.txt', 'r', 'utf-8')
# input_data = codecs.open('./word_small.txt', 'r', 'utf-8')
# input_data = codecs.open('./wordtagsplit.txt', 'r', 'utf-8')
# 按行读取并且按行分析样本
for line in input_data.readlines():
# split 划分默认为空格
line = line.split()
linedata = []
linelabel = []
num_noto = 0
for word in line:
word = word.split('/')
linedata.append(word[0])
linelabel.append(word[1])
tags.add(word[1])
if word[1] != 'O':
num_noto += 1
if num_noto != 0:
datas.append(linedata)
labels.append(linelabel)
input_data.close()
# print(len(datas), tags)
# print(len(labels))
all_words = self.flatten(datas)
# 将所有的字转换为序列 格式如下
"""
0 牛
1 肉
2 丸
。。。
"""
sr_allwords = pd.Series(all_words)
# print(sr_allwords)
# 统计序列中的字的个数 返回一个序列返回每个字的数量
sr_allwords = sr_allwords.value_counts()
# print(sr_allwords)
# 获取所有字的一个集合(不重复)
set_words = sr_allwords.index
# print(set_words)
set_ids = range(1, len(set_words)+1)
# print(set_ids)
# 将集合中的元素转成列表
tags = [i for i in tags]
tag_ids = range(len(tags))
# print(tag_ids)
# 生成两个序列
word2id = pd.Series(set_ids, index=set_words)
# print(word2id)
id2word = pd.Series(set_words, index=set_ids)
# print(id2word)
tag2id = pd.Series(tag_ids, index=tags)
id2tag = pd.Series(tags, index=tag_ids)
# 添加一个不知道的索引值为 len(word2id)+1
word2id["unknow"] = len(word2id)+1
# print(word2id)
# 定义一句话的长度为60
def x_padding(words):
# 通过多个索引同时获取到对应的值
ids = list(word2id[words])
# 如果一句话过长就截取前max_len个字符
if len(ids) >= max_len:
return ids[:max_len]
# 如果不够max_len个字符就用0填补
ids.extend([0]*(max_len-len(ids)))
return ids
def y_padding(tags1):
ids = list(tag2id[tags1])
if len(ids) >= max_len:
return ids[:max_len]
ids.extend([0]*(max_len-len(ids)))
return ids
#
df_data = pd.DataFrame({'words': datas, 'tags': labels}, index=range(len(datas)))
df_data['x'] = df_data['words'].apply(x_padding)
df_data['y'] = df_data['tags'].apply(y_padding)
x = np.asarray(list(df_data['x'].values))
y = np.asarray(list(df_data['y'].values))
path_save = os.path.join(save_path, "data0\\data.pkl")
# kf = KFold(n_splits=5)
# i = 1
# for train_index, test_index in kf.split(x):
# path_save = os.path.join(save_path, "data{}\\data.pkl".format(i), )
# i += 1
# print(path_save)
# train_x, train_y = x[train_index], y[train_index]
# x_test, y_test = x[test_index], y[test_index]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.01, random_state=43)
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.01, random_state=43)
with open(path_save, 'wb') as outp:
# with open('../Bosondata1.pkl', 'wb') as outp:
pickle.dump(word2id, outp)
pickle.dump(id2word, outp)
pickle.dump(tag2id, outp)
pickle.dump(id2tag, outp)
pickle.dump(x_train, outp)
pickle.dump(y_train, outp)
pickle.dump(x_test, outp)
pickle.dump(y_test, outp)
pickle.dump(x_valid, outp)
pickle.dump(y_valid, outp)
print('** Finished saving the data.')
@staticmethod
def origin2tag(path):
input_data = codecs.open(os.path.join(path, 'spu_name_origindata.txt'), 'r', 'utf-8')
output_data = codecs.open(os.path.join(path, 'spu_name_wordtag.txt'), 'w', 'utf-8')
# input_data = codecs.open('./spu_name_origindata.txt', 'r', 'utf-8')
# output_data = codecs.open('./spu_name_wordtag.txt', 'w', 'utf-8')
for line in input_data.readlines():
line = line.strip()
i = 0
while i < len(line):
if line[i] == '{':
i += 2
temp = ""
while line[i] != '}':
temp += line[i]
i += 1
i += 2
word = temp.split(':')
sen = word[1]
try:
output_data.write(sen[0]+"/B_"+word[0]+" ")
except Exception as e:
pass
# print('sen:', sen)
# print('line:', line)
# print('word:', word)
for j in sen[1:len(sen)-1]:
output_data.write(j+"/M_"+word[0]+" ")
output_data.write(sen[-1]+"/E_"+word[0]+" ")
else:
output_data.write(line[i]+"/O ")
i += 1
output_data.write('\n')
input_data.close()
output_data.close()
@staticmethod
def tagsplit(path):
# with open('./data_t/xwj_wordtag.txt', 'rb') as inp:
inp = open(os.path.join(path, 'spu_name_wordtag.txt'), 'r', encoding='utf-8')
# inp = open('./spu_name_wordtag.txt', 'r', encoding='utf-8')
# 将txt中的文本一股脑的全部读出
# texts = inp.read().decode('utf-8')
sentences = inp.readlines()
# print(texts)
# 此处为何要划分
# sentences = re.split("[,。!?、‘’“”()]/[O]", texts)
# sentences = re.split('[,。!?、‘’“”()]/[O]'.decode('utf-8'), texts)
# output_data = codecs.open('./data_t/spu_name_wordtagsplit.txt', 'w', 'utf-8')
output_data = codecs.open(os.path.join(path, 'spu_name_wordtagsplit.txt'), 'w', 'utf-8')
for sentence in sentences:
if sentence != " ":
output_data.write(sentence.strip()+'\n')
output_data.close()
def get_predata(data_name, path_save):
"""
data_name:原数据路径+名称
path_save:处理后的数据保存路径
"""
# org_path = 'E:\\Document\\project\\keyword_extraction\\ChineseNER\\data\\boson'
# path_root = 'E:\\Document\\project\\keyword_extraction\\ChineseNER\\data\\boson\\data_t'
# path_save = 'E:\\Document\\project\\keyword_extraction\\ChineseNER\\data'
clear_and_format_data2(data_name, path_save) # 数据清洗和数据格式化 在路径path_save下产生文件 spu_name_origindata.txt
print('----------------数据清洗完成-------------')
preprocess = PreProcess()
preprocess.origin2tag(path_save) # 在路径path_save下产生文件spu_name_wordtag.txt
preprocess.tagsplit(path_save) # 在路径path_save下产生文件spu_name_wordtagsplit.txt
preprocess.data2pkl(path_save, save_path=path_save) # 在路径path_save下产生文件 data.pkl
print('----------------数据处理完成-------------')
if __name__ == '__main__':
path_root = 'E:\\Document\\project\\keyword_extraction\\ChineseNER\\data_\\data_original_all.txt'
path_sa = 'E:\\Document\\project\\keyword_extraction\\ChineseNER\\data_\\fold_cross_validation'
get_predata(path_root, path_sa)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 12:41:12 2020
@author: kondrate
"""
import matplotlib.pyplot as plt
import numpy as np
def draw_plots(readings, point, fig_size = None, caption = 'Plot', decision = np.empty((3,1))):
# Readings are pandas frame
r = 0
if len(decision) == 3:
fig, ax = plt.subplots(14,1, figsize=fig_size)
fig.tight_layout(h_pad=2, rect=[0, 0.03, 1, 0.95])
fig.suptitle(caption)
for key in readings:
ax[r].plot(readings[key])
ax[r].grid(True)
ax[r].set_title(key)
ax[r].axvline(point, color='red', ls='dashed')
r+=1
plt.draw()
for i in range(14):
locs = list(np.arange(-50,350,50))
locs += [point]
labels = [str(w) for w in locs]
ax[i].set_xticks(locs[1:])
ax[i].set_xticklabels(labels[1:])
else:
fig, ax = plt.subplots(14,2, figsize=fig_size)
fig.tight_layout(h_pad=2, rect=[0, 0.03, 1, 0.95])
fig.suptitle(Caption)
for key in readings:
ax[r,0].plot(readings[key])
ax[r,0].grid(True)
ax[r,0].set_title(key)
ax[r,0].axvline(point, color='red', ls='dashed')
#
ax[r,1].plot(decision[r,:])
ax[r,1].grid(True)
ax[r,1].set_title(key)
ax[r,1].axvline(point, color='red', ls='dashed')
r+=1
plt.draw()
for i in range(14):
locs = list(np.arange(-50,350,50))
locs += [point]
labels = [str(w) for w in locs]
ax[i,0].set_xticks(locs[1:])
ax[i,0].set_xticklabels(labels[1:])
ax[i,1].set_xticks(locs[1:])
ax[i,1].set_xticklabels(labels[1:])
|
from functools import wraps
def onlyonce(fn):
"""Wraps a function to run once and return the same result thereafter."""
result = []
@wraps(fn)
def doit(*a, **k):
if not result:
result.append(fn(*a, **k))
return result[0]
return doit
|
from collections import defaultdict
def vec():
return set()
n ,m = map(int,input().split())
d = defaultdict(vec)
for t in range(m):
u,v = map(int,input().split())
d[u].add(v)
d[v].add(u)
def dfs(i):
vec[i] = 1
for j in d[i]:
if vec[j] == 0:
dfs(j)
c = 0
vec = [0]*(n+1)
for i in range(1,n+1,+1):
if vec[i] == 0:
dfs(i)
c+=1
print(c)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup
from setuptools import find_packages
def get_long_description() -> str:
with open("README.md", "r") as file:
return file.read()
setup(
name="hushboard",
version="0.0.1",
description="Mute your mic while you're typing.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Stuart Langridge",
author_email="sil@kryogenix.org",
license="MIT",
url="https://github.com/stuartlangridge/hushboard",
packages=find_packages(),
install_requires=[],
python_requires=">=3.6",
entry_points={
"console_scripts": ["hushboard=hushboard.__main__:main"],
},
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: POSIX :: Linux",
"Operating System :: Unix",
],
extras_require={},
)
|
# Generated by Django 2.1.2 on 2018-10-06 10:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_project'),
]
operations = [
migrations.AddField(
model_name='project',
name='reject_reason',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,missing-docstring
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Quick program to test json backend
"""
import unittest
from qiskit import qasm, unroll, QuantumProgram
from .common import QiskitTestCase, Path
class TestJsonOutput(QiskitTestCase):
"""Test Json output.
This is mostly covered in test_quantumprogram.py but will leave
here for convenience.
"""
def setUp(self):
self.QASM_FILE_PATH = self._get_resource_path(
'qasm/entangled_registers.qasm', Path.EXAMPLES)
def test_json_output(self):
seed = 88
qp = QuantumProgram()
qp.load_qasm_file(self.QASM_FILE_PATH, name="example")
basis_gates = [] # unroll to base gates, change to test
unroller = unroll.Unroller(qasm.Qasm(data=qp.get_qasm("example")).parse(),
unroll.JsonBackend(basis_gates))
circuit = unroller.execute()
self.log.info('test_json_ouptut: {0}'.format(circuit))
if __name__ == '__main__':
unittest.main()
|
"""calculavirus URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import include, path
from rest_framework import routers
from calculavirus.insumos import views as insumo_views
from calculavirus.checklist import views as checklist_views
from calculavirus.customeUsers import views as customUsers_views
from django.contrib import admin
from django.conf import settings
from django.views.static import serve
from django.conf.urls import url
router = routers.DefaultRouter()
router.register(r'users', customUsers_views.CustomUsersViewSet)
router.register(r'groups', insumo_views.GroupViewSet)
router.register(r'insumos', insumo_views.InsumoViewSet)
router.register(r'lugares', insumo_views.LugarCompraViewSet)
router.register(r'checklist',checklist_views.ChecklistViewSet)
router.register(r'checklistinsumo',checklist_views.ChecklistInsumoViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('admin/', admin.site.urls),
# path('lugares/', insumo_views.LugarCompraViewSet.as_view(), name="lugares"),
]
if settings.DEBUG:
urlpatterns += [
url(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
]
|
#!/usr/bin/python
##James Parks
import sys
import json
import matplotlib.pyplot as pyplot
import numpy as np
from optparse import OptionParser
def autolabel(rects, textOffset):
# attach some text labels
for rect in rects:
height = rect.get_height()
print height + textOffset
#pyplot.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, str(height)[:5], ha='center', va='bottom', fontsize=6)
pyplot.text(rect.get_x() + rect.get_width() / 2., height + textOffset, str(height)[:5], ha='center', va='bottom', fontsize=6)
def plotIt(allArgs):
parser = OptionParser(version="%prog 1.0")
#parser.add_option("-x", "--xValues", dest="xValues", help="The x axis values to plot", metavar="XVAL")
#parser.add_option("-y", "--yValues", dest="yValues", help="The y axis values to plot", metavar="YVAL")
parser.add_option("-d", "--data", dest="data", help="The json file that contains the data being compared", metavar="DATA", default="Unknown")
parser.add_option("-g", "--type", dest="graphType", help="Type of graph to make; Line, Bar, or Pie", metavar="GRAPHTYPE", default="Line")
parser.add_option("-t", "--title", dest="title", help="The title of the graph", metavar="TITLE", default="Default")
parser.add_option("-p", "--path", dest="path", help="The path of the graph you want to output", metavar="PATH")
parser.add_option("-l", "--legend", action="store_true", dest="legend", help="Add a legend to the graph")
parser.add_option("-x", "--xkcd", action="store_true", dest="xkcd", help="Use the XKCD style")
options, args = parser.parse_args(allArgs)
##Open up the JSON data file and turn it back into a dictionary
dataFilePath = options.data
dataFile = open(dataFilePath, "r")
dataDict = json.load(dataFile)
dataFile.close()
##Set up a couple of variables that will be used in all types of graphs
dataKeys = dataDict.keys()
colorCycle = ["r", "g", "b", "c", "m", "y", "k"]
#for thisKey in dataKeys:
# versions = dataDict[thisKey].keys()
# versions.sort()
versions = dataDict[dataKeys[0]].keys()
versions.sort()
#print versions
textOffset = .25
smallFontSize = 6
mediumFontSize = 10
largeFontSize = 16
##Set the resolution in a roundabout way
## you can only specify the image size in inches and the dpi
## so I made the dpi 100 for easy multiplication
myFig = pyplot.figure(figsize=[5, 4], dpi=100)
print myFig
if options.graphType:
print options.graphType
##########################################################
##I'm makin' me a line graph, jus like dem fancy injuneers
##########################################################
if options.graphType == "Line":
pyplot.grid(b=True)
myPlots = []
for thisKey in dataKeys:
##Sanitize and Synchronize the versions and values
saniVersions = []
saniValues = []
for thisVersion in versions:
##Sanitize the versions into ints
strVersion = str(thisVersion)
if strVersion[0] == "v":
saniVer = str(strVersion[1:])
else:
saniVer = str(strVersion)
saniVersions.append(int(saniVer))
##Sanitize the values into floats
value = dataDict[thisKey][thisVersion]
if type(value) == type(""):
saniVal = float(value)
else:
saniVal = value
saniValues.append(value)
if options.xkcd:
with pyplot.xkcd():
tempPlot = pyplot.plot(saniVersions, saniValues, ".-", linewidth=1, label=thisKey)
else:
tempPlot = pyplot.plot(saniVersions, saniValues, ".-", linewidth=1, label=thisKey)
myPlots.append(tempPlot[0])
##Write the value above the dot
## Find out the yLength
yLen = pyplot.ylim()[1] - pyplot.ylim()[0]
textOffset = (yLen / 100) * 2
colorChoice = 0
for thisPlot in myPlots:
xyData = thisPlot.get_xydata()
for entry in xyData:
if options.xkcd:
with pyplot.xkcd():
pyplot.text(s=str(entry[1])[:5], x=entry[0], y=entry[1] + textOffset, fontsize=smallFontSize)
else:
pyplot.text(s=str(entry[1])[:5], x=entry[0], y=entry[1] + textOffset, fontsize=smallFontSize)
colorChoice = colorChoice + 1
pyplot.margins(.05, .05)
pyplot.minorticks_off()
pyplot.box(on=True)
#pyplot.xticks(fontsize=6)
pyplot.tick_params(top=False, right=False, labelsize=smallFontSize)
#xTicks = range(int(saniX[0]), int(saniX[-1]) + 1, 1)
#pyplot.xticks(xTicks)
pyplot.xlabel("Version", fontsize=smallFontSize)
########################################################
##Stone walls do not a prison make, nor iron bars a cage
########################################################
elif options.graphType == "Bar":
#fig, ax = pyplot.subplots()
#pyplot.figure(figsize=[5, 4], dpi=100)
##Figure out the bar info
numOfBars = len(versions)
barSubGroups = len(dataKeys)
barWidth = 0.75 / barSubGroups
##It's much easier to make this a numpy arange instead of a default python range
xPos = np.arange(numOfBars)
curBarSubGroup = 0
colorChoice = 0
myRects = []
for thisKey in dataKeys:
keyValues = []
#thisPos = p[]
#for pos in xPos:
# xPos[pos] = pos + (barwidth * curBarSubGroup)
for thisVersion in versions:
thisValue = dataDict[thisKey][thisVersion]
keyValues.append(thisValue)
if options.xkcd:
with pyplot.xkcd():
#thisRects = ax.bar(xPos + (barWidth * curBarSubGroup), keyValues, barWidth, label=thisKey, color=colorCycle[colorChoice])
thisRect = pyplot.bar(xPos + (barWidth * curBarSubGroup), keyValues, barWidth, label=thisKey, color=colorCycle[colorChoice])
else:
thisRect = pyplot.bar(xPos + (barWidth * curBarSubGroup), keyValues, barWidth, label=thisKey, color=colorCycle[colorChoice])
myRects.extend(thisRect)
curBarSubGroup = curBarSubGroup + 1
if colorChoice == len(colorCycle) - 1:
colorChoice = 0
else:
colorChoice = colorChoice + 1
##Write the value above the dot
## Find out the yLength
yLen = pyplot.ylim()[1] - pyplot.ylim()[0]
textOffset = (yLen / 100)
for thisRect in myRects:
height = thisRect.get_height()
if options.xkcd:
with pyplot.xkcd():
pyplot.text(thisRect.get_x() + thisRect.get_width() / 2., height + textOffset, str(height)[:5], ha='center', va='bottom', fontsize=6)
else:
pyplot.text(thisRect.get_x() + thisRect.get_width() / 2., height + textOffset, str(height)[:5], ha='center', va='bottom', fontsize=6)
#autolabel(thisRects, textOffset)
pyplot.margins(.05, .05)
pyplot.xticks(range(len(versions)), versions, fontsize=smallFontSize)
#pyplot.minorticks_off()
pyplot.yticks(fontsize=smallFontSize)
#pyplot.box(on=True)
pyplot.tick_params(top=False, right=False)
####################
##Mmmmm,... half-Tau
####################
elif options.graphType == "Pie":
##Determine how many subplots there are going to be
## This is specific to pie charts because it's silly
## to plot multiple versions into the same pie chart
numOfPlots = len(versions)
numOfCols = 5
if numOfPlots <= 16:
numOfCols = 4
if numOfPlots <= 9:
numOfCols = 3
if numOfPlots <= 4:
numOfCols = 2
if numOfPlots == 1:
numOfCols = 1
#elif 4 < numOfPlots < 6:
#else:
# numOfCols = numOfPlots
numOfRows = numOfPlots / numOfCols
if numOfPlots % 4:
numOfRows = numOfRows + 1
plotNum = 1
##Make a pie for each version
for thisVersion in versions:
totalValue = 0.0
allValues = []
for thisKey in dataKeys:
thisValue = dataDict[thisKey][thisVersion]
allValues.append(thisValue)
totalValue = totalValue + thisValue
#print thisVersion + " :: " + str(totalValue)
percentages = []
for thisValue in allValues:
percentages.append((thisValue / totalValue) * 100)
if numOfPlots > 1:
pyplot.subplot(numOfRows, numOfCols, plotNum)
autoLabelPie = '%1.1f%%'
pieLabels = None
if options.legend:
pieLabels = dataKeys
if options.xkcd:
with pyplot.xkcd():
if options.legend:
patches, texts, autotexts = pyplot.pie(percentages, labels=pieLabels, autopct=autoLabelPie, shadow=False, startangle=90)
else:
patches, texts, autotexts = pyplot.pie(percentages, labels=pieLabels, autopct=autoLabelPie, shadow=False, startangle=90)
else:
if options.legend:
patches, texts, autotexts = pyplot.pie(percentages, labels=pieLabels, autopct=autoLabelPie, shadow=False, startangle=90)
else:
patches, texts, autotexts = pyplot.pie(percentages, labels=pieLabels, autopct=autoLabelPie, shadow=False, startangle=90)
texts.extend(autotexts)
for thisText in texts:
thisText.set_fontsize(smallFontSize)
pyplot.axis('equal')
pyplot.xlabel(thisVersion, fontsize=smallFontSize)
plotNum = plotNum + 1
#pyplot.margins(.1, .1)
else:
print "I don't know what kind of graph to make"
##Let's optionally add some stuff to every type of graph
#0=Best, 1=upperRight, 2=upperLeft, 3=lowerLeft, 4=lowerRight, 5=right, 6=centerLeft, 7=centerRight, 8=lowerCenter, 9=upperCenter, 10=center
if options.legend and options.graphType != "Pie":
pyplot.legend(loc=0, fontsize=mediumFontSize)
##Give it a title
if options.title:
#if options.graphType != "Pie":
#myFig.title(options.title.title(), fontsize=mediumFontSize)
pyplot.title(options.title.title(), fontsize=mediumFontSize)
##If there is a path specified, write to that path, otherwise show the image
if options.path:
pyplot.savefig(options.path)
else:
pyplot.show()
if __name__ == '__main__':
print sys.argv[1:]
plotIt(sys.argv[1:])
|
#coding:gb2312
#比较数字
num=18
print(num==18) #检查两个数字是否相等
print(num<14) #返回结果为False
print(num<=14) #返回结果为False
print(num>14) #返回结果为True
print(num>=18) #返回结果为True
#检查多个条件
#例如检查两个人是否都小于18岁,and要都满足条件,表达式才为True
age_0= 15
age_1= 19
print((age_0<=18) and (age_1<=18)) #and是测试两个人是否都小于18
age_1=17 #将age_1改为17
print((age_0<=18) and (age_1<=18)) #返回结果为True
#检查至少有一个人小于18岁,or只需要有一个满足条件,表达式便为True
age_0= 15
age_1= 190
print((age_0<=18) or (age_1<=18))
|
import os
from pathlib import Path
import json
import random
import numpy as np
import pickle
import tables
import os
def pickle_dump(item, out_file):
with open(out_file, "wb") as opened_file:
pickle.dump(item, opened_file)
# def delete_existing_files():
# os.remove(r"/data/home/Shai/UNET_3D_NEW/debug_split/validation_ids.pkl")
# os.remove(r"/data/home/Shai/UNET_3D_NEW/debug_split/training_ids.pkl")
# os.remove(r"/data/home/Shai/UNET_3D_NEW/debug_split/test_ids.pkl")
def create_files(cur_exp_name, training_list, validation_list, test_list):
exp_folder = os.path.join(r"/datadrive/configs", cur_exp_name)
Path(os.path.join(exp_folder, "debug_split")).mkdir(parents=True, exist_ok=True)
pickle_dump(training_list, os.path.join(exp_folder, "debug_split", "training_ids.pkl"))
pickle_dump(validation_list, os.path.join(exp_folder, "debug_split", "validation_ids.pkl"))
pickle_dump(test_list, os.path.join(exp_folder, "debug_split", "test_ids.pkl"))
def set_new_config(conf_to_imitate, cur_exp_name):
with open(os.path.join(r"/datadrive/configs", conf_to_imitate, 'config.json')) as f:
config = json.load(f)
config["overwrite"] = False
config["base_dir"] = os.path.join(r"/datadrive/configs", cur_exp_name)
config["split_dir"] = os.path.join(config["base_dir"], "debug_split")
config["training_file"] = os.path.join(config["split_dir"], "training_ids.pkl")
config["validation_file"] = os.path.join(config["split_dir"], "validation_ids.pkl")
config["test_file"] = os.path.join(config["split_dir"], "test_ids.pkl")
#config["data_file"] = os.path.join(config["base_dir"], "fetal_data.h5")
config["model_file"] = os.path.join(config["base_dir"], "fetal_net_model")
with open(os.path.join(config["base_dir"], 'config.json'), mode='w') as f:
json.dump(config, f, indent=2)
def run_cross_val_training(existing_data_file_path, exp_names_prefix, conf_to_imitate=None):
data_file = tables.open_file(existing_data_file_path, "r")
all_list = list(range(len(data_file.root.subject_ids)))
data_file.close()
print('# of subjects: {}'.format(len(all_list)))
random.shuffle(all_list)
all_list_temp = all_list
all_experiement_names = []
n_test = 3
n_iters = int(np.ceil(float(len(all_list_temp)) / n_test))
for i in range(n_iters):
print("In round {} out of {}".format(i+1, n_iters))
test_list = all_list_temp[:n_test] # [5, 6, 23]
validation_list = all_list_temp[n_test:2*n_test]
training_list = all_list_temp[2*n_test:]
cur_exp_name = '{}_cross_val_train_{}'.format(exp_names_prefix, i+1)
create_files(cur_exp_name, training_list, validation_list, test_list)
all_list_temp = all_list_temp[n_test:] + all_list_temp[:n_test]
print("Created all files")
for i in range(n_iters):
cur_exp_name = '{}_cross_val_train_{}'.format(exp_names_prefix, i + 1)
if conf_to_imitate:
set_new_config(conf_to_imitate, cur_exp_name)
print('Now training {}'.format(cur_exp_name))
cmd = "python3 train_fetal.py --experiment_name='{}'".format(cur_exp_name)
print(cmd)
os.system(cmd)
print("Finished training, now running on test")
conf_dir = '../../../../../datadrive/configs/' + '{}'.format(cur_exp_name)
cmd = "python3 predict.py --split='test' --config='{}'".format(conf_dir)
print(cmd)
os.system(cmd)
print('Finished forward')
all_experiement_names = all_experiement_names + ['{}'.format(cur_exp_name)]
return all_experiement_names
#conf_to_imitate = r"20200906_single_res_new_data"
#conf_to_imitate = r"20200826_reconstruct_2d_base_single_res"
conf_to_imitate = r"20200911_single_res_new_data_cross_val_train_1"
#existing_data_fpath = r"/datadrive/configs/20200906_single_res_new_data/fetal_data.h5"
#existing_data_fpath = r"/datadrive/configs/20200826_reconstruct_2d_base_single_res/fetal_data.h5"
existing_data_fpath = r"/datadrive/datafiles/fetal_data_single_res_scans_202009.h5"
exp_names_prefix = r"20201007_2d_single_res_unet_depth_3_ntest_3"
run_cross_val_training(existing_data_file_path=existing_data_fpath, exp_names_prefix=exp_names_prefix,
conf_to_imitate=conf_to_imitate)
# import glob
# import nibabel as nib
# data_pref = r"/data/home/Shai/placenta_data"
# exps = glob.glob('/datadrive/configs/64_64_5_cross_val_*')
#
# for e in exps:
# cur_p = os.path.join(e, 'predictions', 'test')
# subs = os.listdir(cur_p)
# for s_id in subs:
# print("Adding prediction to subject {}".format(s_id))
# if not os.path.exists(os.path.join(data_pref, s_id, 'prediction.nii')):
# a_tmp = nib.load(os.path.join(cur_p, s_id, 'prediction.nii.gz'))
# nib.save(a_tmp, os.path.join(data_pref, s_id, 'prediction.nii'))
|
from keras.models import Sequential
from keras import layers
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
from keras.models import Sequential
from keras import layers
from sklearn.feature_extraction.text import CountVectorizer
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import numpy as np
np.random.seed(2018)
import nltk
nltk.download('wordnet')
ammountCat ={}
def checkKey(dict, key):
if key in dict.keys():
ammountCat[dict[key]] = ammountCat[dict[key]] + 1
return dict[key]
else:
dict[key] = len(dict) + 1
ammountCat[dict[key]]= 1
return dict[key]
import csv
f = open('purchase-order-data-2012-2015-.csv', 'r',encoding='UTF8')
reader = csv.reader(f)
headers = next(reader, None)
column = {}
columnIndex = {}
count = 0
for h in headers:
column[h] = []
columnIndex[h]=count
count = count + 1
count = 0
limit = 1000000
classDict = {}
columnNames = ['Item Name', 'Item Description', 'Class']
for row in reader:
if row[ columnIndex['Class']] == "" or row[ columnIndex['Item Description']] == "" or row[ columnIndex['Item Name']] == "":
continue
column['Class'].append(checkKey(classDict, row[ columnIndex['Class']]))
column['Item Description'].append(row[ columnIndex['Item Description']] +" " + row[ columnIndex['Item Name']])
count = count + 1
if count > limit:
break
cutoutThreshold = 10
for key, value in ammountCat.items():
if value < cutoutThreshold:
print(key,":",value)
found = True
while found == True:
broke = False
for i in range(len(column['Class'])):
#print("i:",column['Class'][i],key,i)
if column['Class'][i] == key:
#print(key,column['Class'][i])
del column['Class'][i]
del column['Item Description'][i]
broke = True
break
if broke == False:
found = False
total =0
categories =0
for key, value in ammountCat.items():
if value >= cutoutThreshold:
categories = categories +1
print("-",key,":",value)
total = value + total
print(len(column['Class']),len(column['Item Description']),total)
d = {'label': column['Class'], 'sentence': column['Item Description']}
column=""
df_yelp =pd.DataFrame(data=d)
data_text = df_yelp[['sentence']]
data_text['index'] = df_yelp.index
documents = data_text
stemmer = SnowballStemmer("english")
def lemmatize_stemming(text):
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
result.append(lemmatize_stemming(token))
return result
print(documents)
doc_sample = documents[documents['index'] == 39].values[0][0]
print('original document: ')
words = []
for word in doc_sample.split(' '):
words.append(word)
print(words)
print('\n\n tokenized and lemmatized document: ')
print(preprocess(doc_sample))
processed_docs = documents['sentence'].map(preprocess)
print(processed_docs[:10])
dictionary = gensim.corpora.Dictionary(processed_docs)
count = 0
for k, v in dictionary.iteritems():
print(k, v)
count += 1
if count > 10:
break
dictionary.filter_extremes(no_below=20, no_above=0.5, keep_n=1000000)
print(len(dictionary),len(documents))
bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]
bow_doc_4310 = bow_corpus[90]
for i in range(len(bow_doc_4310)):
print("Word {} (\"{}\") appears {} time.".format(bow_doc_4310[i][0],
dictionary[bow_doc_4310[i][0]],
bow_doc_4310[i][1]))
from gensim import corpora, models
tfidf = models.TfidfModel(bow_corpus)
corpus_tfidf = tfidf[bow_corpus]
from pprint import pprint
for doc in corpus_tfidf:
pprint(doc)
break
print(categories)
lda_model = gensim.models.LdaModel(bow_corpus, num_topics=100, id2word=dictionary)
for idx, topic in lda_model.print_topics(-1):
print('Topic: {} \nWords: {}'.format(idx, topic))
print(bow_corpus[4310],documents['sentence'][4310])
for index, score in sorted(lda_model[bow_corpus[4310]], key=lambda tup: -1*tup[1]):
print("\nScore: {}\t \nTopic: {}".format(score, lda_model.print_topic(index, 10)))
|
#!/usr/bin/env python3
from ctypes import CDLL
from os import listdir, path, times
from signal import SIGHUP, SIGINT, SIGKILL, SIGQUIT, SIGTERM, signal
from sys import argv, exit, stderr, stdout
from time import monotonic, process_time, sleep
def errprint(*text):
"""
"""
print(*text, file=stderr, flush=True)
def readf(pathname):
"""
"""
with open(pathname, 'rb', buffering=0) as f:
return f.read().decode()
def write(pathname, string):
"""
"""
with open(pathname, 'w') as f:
f.write(string)
def check_controllers(cg):
"""
"""
try:
c = readf('/sys/fs/cgroup/' + cg + '/cgroup.controllers')
except FileNotFoundError as e:
print('I: systemd and unified cgroup hierarchy are required')
errprint(e)
exit(1)
except PermissionError as e:
print('I: systemd and unified cgroup hierarchy are required')
errprint(e)
exit(1)
if 'memory' not in c:
errprint('E: memory controller is not enabled in ' + cg)
exit(1)
def check_write(cg):
"""
"""
pathname = '/sys/fs/cgroup/' + cg + '/memory.high'
try:
string = readf(pathname)
except FileNotFoundError as e:
errprint(e)
exit(1)
except PermissionError as e:
errprint(e)
exit(1)
try:
write(pathname, string)
except PermissionError as e:
print('I: memory.high must be writable')
errprint(e)
exit(1)
except FileNotFoundError as e:
errprint(e)
exit(1)
def format_time(t):
"""
"""
total_s = int(t)
if total_s < 60:
return '{}s'.format(round(t, 1))
if total_s < 3600:
total_m = total_s // 60
mod_s = total_s % 60
return '{}min {}s'.format(total_m, mod_s)
if total_s < 86400:
total_m = total_s // 60
mod_s = total_s % 60
total_h = total_m // 60
mod_m = total_m % 60
return '{}h {}min {}s'.format(total_h, mod_m, mod_s)
total_m = total_s // 60
mod_s = total_s % 60
total_h = total_m // 60
mod_m = total_m % 60
total_d = total_h // 24
mod_h = total_h % 24
return '{}d {}h {}min {}s'.format(total_d, mod_h, mod_m, mod_s)
def rline(pathname):
"""
"""
with open(pathname, 'rb', buffering=0) as f:
try:
return int(f.read())
except ValueError:
return None
def get_swappiness():
"""
"""
return rline('/proc/sys/vm/swappiness')
def mlockall():
"""
"""
MCL_CURRENT = 1
MCL_FUTURE = 2
MCL_ONFAULT = 4
libc = CDLL('libc.so.6', use_errno=True)
result = libc.mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)
if result != 0:
result = libc.mlockall(MCL_CURRENT | MCL_FUTURE)
if result != 0:
errprint('ERROR: cannot lock process memory: [Errno {}]'.format(
result))
exit(1)
else:
if debug_correction:
errprint('Process memory locked with MCL_CURRENT | MCL_FUTURE')
else:
if debug_correction:
errprint('Process memory locked with MCL_CURRENT | MCL_FUTURE | '
'MCL_ONFAULT')
def debug_cg():
"""
"""
print('CGroups state:')
for cg in cg_dict:
cur_path = cg_dict[cg]['cur_path']
high_path = cg_dict[cg]['high_path']
try:
c = rline(cur_path)
h = rline(high_path)
except FileNotFoundError as e:
print(' cgroup: {}, {}'.format(cg, e))
continue
c_mib = round(c / 1024 / 1024, 1)
c_pc = round(c / 1024 / mem_total * 100, 1)
if h is None:
hui = ' max'
else:
h_mib = round(h / 1024 / 1024, 1)
h_pc = round(h / 1024 / mem_total * 100, 1)
hui = ' {}M ({}%)'.format(h_mib, h_pc)
print(' cgroup: {}, memory.current: {}M ({}%), memory.high:{}'.format(
cg, c_mib, c_pc, hui))
stdout.flush()
def string_to_float_convert_test(string):
"""
Try to interprete string values as floats.
"""
try:
return float(string)
except ValueError:
return None
def string_to_int_convert_test(string):
"""Try to interpret string values as integers."""
try:
return int(string)
except ValueError:
return None
def check_mem_and_swap():
"""
"""
fd['mi'].seek(0)
m_list = fd['mi'].read().decode().split(' kB\n')
ma = int(m_list[mem_available_index].split(':')[1])
st = int(m_list[swap_total_index].split(':')[1])
sf = int(m_list[swap_free_index].split(':')[1])
return ma, st, sf
def percent(num):
"""Interprete num as percentage."""
return round(num * 100, 1)
def sleep_after_check_mem(ma):
"""
"""
if stable_interval:
sleep(min_interval)
return None
t = ma / mem_fill_rate
if t > max_interval:
t = max_interval
elif t < min_interval:
t = min_interval
else:
pass
if debug_sleep:
print('Sleep', round(t, 2))
sleep(t)
def unlim():
"""
"""
for cg in cg_dict:
try:
if debug_correction:
print('Set memory.high for {}: max'.format(cg))
write(cg_dict[cg]['high_path'], 'max\n')
except (FileNotFoundError, PermissionError) as e:
if debug_correction:
print(e)
continue
def soft_exit(num):
"""
"""
unlim()
if len(fd) > 0:
for f in fd:
fd[f].close()
m = monotonic() - start_time
user_time, system_time = times()[0:2]
p_time = user_time + system_time
p_percent = p_time / m * 100
print('Uptime {}, CPU time {}s (user {}s, sys {}s), avg {}%; exit.'.format(
format_time(m),
round(p_time, 2),
user_time,
system_time,
round(p_percent, 2)
))
exit(num)
def signal_handler(signum, frame):
"""
"""
def signal_handler_inner(signum, frame):
"""
"""
pass
for i in sig_list:
signal(i, signal_handler_inner)
print('Got the {} signal '.format(
sig_dict[signum]))
soft_exit(0)
def get_uid_set():
"""
"""
uid_set = set()
d = '/sys/fs/cgroup/user.slice/'
f_list = listdir(d)
for f in f_list:
if (f.startswith('user-') and
f.endswith('.slice') and
path.isdir(d + f) and
len(f) >= 12):
uid = f[5:-6]
try:
i_uid = int(uid)
if i_uid >= min_uid:
uid_set.add(uid)
print(uid)
except ValueError:
continue
return uid_set
def update_cg_dict(cg_dict):
"""
"""
b = uid_d[0]
if b == listdir(d):
return cg_dict
elif b is None:
uid_d[0] = listdir(d)
else:
uid_d[0] = listdir(d)
uid_set = get_uid_set()
lus = len(uid_set)
if lus == 0:
return basic_cg_dict
real_uid_cg_dict = dict()
for cg in temp_uid_cg_dict:
temp_key = temp_uid_cg_dict[cg]
print(temp_key)
t_fraction = temp_key['fraction']
print(t_fraction, 'F')
r_fraction = t_fraction / lus
for uid in uid_set:
real_cg = cg.replace('$UID', uid)
if real_cg in basic_cg_dict:
continue
print(uid)
new_key = dict()
new_key.update(temp_key)
c = new_key['cur_path'].replace('$UID', uid)
new_key['cur_path'] = c
h = new_key['high_path'].replace('$UID', uid)
new_key['high_path'] = h
new_key['fraction'] = r_fraction
real_uid_cg_dict[real_cg] = new_key
cg_dict = basic_cg_dict.copy()
cg_dict.update(real_uid_cg_dict)
return cg_dict
def correction(cg_dict):
"""
"""
print(cg_dict)
if debug_correction:
print('Enter in correction')
enter_time = monotonic()
k0 = process_time()
cg_dict = update_cg_dict(cg_dict)
k1 = process_time()
k = (k1 - k0) * 1000
print(k, 'ms - UPD UID')
while True:
# print('=============== CHECK ===============')
ma, st, sf = check_mem_and_swap()
# 1 ОТМЕНА ЛИМИТОВ
if sf < min_swap_free:
# print('# CANCEL LIMITS')
if debug_correction:
print('MemAvailable: {}M ({}%), SwapFree: {}M'.format(
round(ma / 1024, 1),
percent(ma / mem_total),
round(sf / 1024, 1)))
debug_cg()
print('SwapFree < $CANCEL_LIMITS_BELOW_SWAP_FREE_MIB; extend'
'ing limits')
unlim()
if debug_sleep:
print('Sleep', round(max_interval, 2))
sleep(max_interval)
if debug_correction:
print('Exit from correction')
break
# 2 КОРРЕКЦИЯ
if ma < keep_memavail_min:
k0 = process_time()
cg_dict = update_cg_dict(cg_dict)
k1 = process_time()
k = (k1 - k0) * 1000
print(k, 'ms - UPD UID')
print(cg_dict)
# print('# CORRECTION')
if debug_correction:
print('MemAvailable: {}M ({}%), SwapFree: {}M'.format(
round(ma / 1024, 1),
percent(ma / mem_total),
round(sf / 1024, 1)))
debug_cg()
enter_time = monotonic()
c_list = []
cur_sum = 0
for cg in cg_dict:
try:
cur = rline(cg_dict[cg]['cur_path'])
except FileNotFoundError as e:
if debug_correction:
print(e)
continue
cg_dict[cg]['real_cur'] = cur
cur_sum += cur
for cg in cg_dict:
try:
frac_cur = cg_dict[cg]['real_cur'] / cur_sum
except KeyError:
continue
cg_dict[cg]['frac_cur'] = frac_cur
frac = cg_dict[cg]['fraction']
delta = frac_cur - frac
cg_dict[cg]['delta'] = delta
c_list.append((cg, delta))
if len(c_list) == 0:
if debug_correction:
print('Nothing to limit, no cgroup found') # !
print('Exit from correction')
if debug_sleep:
print('Sleep', round(max_interval, 2))
sleep(max_interval)
break
c_list.sort(key=lambda i: i[1], reverse=True)
if debug_correction:
print('Queue:')
for i in c_list:
print(' ', i)
delta = keep_memavail_mid - ma
if delta > max_step:
delta = max_step
delta_b = delta * 1024
if debug_correction:
print('Correction step: {}M'.format(round(delta / 1024, 1)))
corr_ok = False
pt0 = process_time()
for cg, _ in c_list:
cur = cg_dict[cg]['real_cur']
lim_min = cg_dict[cg]['lim_min']
if cur - delta_b < lim_min:
continue
new_max = cur - delta_b
if debug_correction:
print('Set memory.high for {}: {}M'.format(
cg, round(new_max / 1024 / 1024, 1)
))
try:
write(cg_dict[cg]['high_path'], str(new_max) + '\n')
corr_ok = True
except (FileNotFoundError, PermissionError) as e:
if debug_correction:
print(e)
continue
break
if debug_correction:
if not corr_ok:
print(' Nothing to limit')
sleep_after_check_mem(ma)
pt1 = process_time()
pt = pt1 - pt0
print('Consumed {}ms CPU time during last correction'.format(
round(pt * 1000)))
if debug_sleep:
print('Sleep', round(correction_interval, 3))
sleep(correction_interval)
# 3 ОЖИДАНИЕ
elif ma < keep_memavail_max:
# print('# WAITING')
enter_time = monotonic()
if debug_mem:
print('MemAvailable: {}M ({}%), SwapFree: {}M'.format(
round(ma / 1024, 1),
percent(ma / mem_total),
round(sf / 1024, 1)))
debug_cg()
sleep_after_check_mem(ma)
# 4 ОЖИДАНИЕ И ОТМЕНА ЛИМИТОВ
else:
# print('# WAITING OR CANCEL LIMITS')
if debug_mem:
print('MemAvailable: {}M ({}%), SwapFree: {}M'.format(
round(ma / 1024, 1),
percent(ma / mem_total),
round(sf / 1024, 1)))
debug_cg()
if monotonic() - enter_time > correction_exit_time:
if debug_correction:
print('$CANCEL_LIMITS_IN_TIME_SEC expired; '
'extending limits')
unlim()
if debug_correction:
print('Exit from correction')
break
sleep_after_check_mem(ma)
###############################################################################
start_time = monotonic()
a = argv[1:]
la = len(a)
if la == 0:
errprint('invalid input: missing CLI options')
exit(1)
elif la == 2:
if a[0] == '-c':
config = a[1]
config = path.abspath(config)
else:
errprint('invalid input')
exit(1)
else:
errprint('invalid input')
exit(1)
with open('/proc/meminfo') as f:
mem_list = f.readlines()
mem_list_names = []
for s in mem_list:
mem_list_names.append(s.split(':')[0])
try:
mem_available_index = mem_list_names.index('MemAvailable')
except ValueError:
print('ERROR: your Linux kernel is too old, Linux 3.14+ required')
swap_total_index = mem_list_names.index('SwapTotal')
swap_free_index = mem_list_names.index('SwapFree')
mem_total = mt = int(mem_list[0].split(':')[1][:-4])
mt_b = mt * 1024
config_dict = dict()
basic_cg_dict = dict()
temp_uid_cg_dict = dict()
try:
with open(config) as f:
for line in f:
if line[0] == '$' and '=' in line:
key, _, value = line.partition('=')
key = key.rstrip()
value = value.strip()
if key in config_dict:
errprint('config key {} duplication'.format(key))
exit(1)
config_dict[key] = value
if line[0] == '@' and '=' in line:
if line.startswith('@LIMIT '):
a_list = line.partition('@LIMIT ')[2:][0].split()
lal = len(a_list)
if lal != 3:
errprint('invalid conf')
exit(1)
a_dict = dict()
for pair in a_list:
key, _, value = pair.partition('=')
a_dict[key] = value
cg = a_dict['CGROUP'].strip('/')
if cg in basic_cg_dict:
errprint('err, invalid conf, cgroup ({}) dupli'
'cation'.format(cg))
exit(1)
cur_path = '/sys/fs/cgroup/{}/memory.current'.format(cg)
high_path = '/sys/fs/cgroup/{}/memory.high'.format(cg)
fraction = float(a_dict['RELATIVE_SHARE'])
min_percent = a_dict['MIN_MEM_HIGH_PERCENT']
m_min = int(mem_total * 1024 / 100 * float(min_percent))
key = {
'cur_path': cur_path,
'high_path': high_path,
'fraction': fraction,
'lim_min': m_min}
if '$UID' in cg:
temp_uid_cg_dict[cg] = key
else:
basic_cg_dict[cg] = key
else:
print('invalid conf')
except (PermissionError, UnicodeDecodeError, IsADirectoryError,
IndexError, FileNotFoundError) as e:
errprint('Invalid config: {}. Exit.'.format(e))
exit(1)
if len(basic_cg_dict) == 0:
print('ERROR: invalid config, no one cgroup set to limit')
exit(1)
union_cg_dict = dict()
union_cg_dict.update(basic_cg_dict)
union_cg_dict.update(temp_uid_cg_dict)
frac_sum = 0
for cg in union_cg_dict:
frac_sum += union_cg_dict[cg]['fraction']
for cg in union_cg_dict:
union_cg_dict[cg]['fraction'] = round(
union_cg_dict[cg]['fraction'] / frac_sum, 4)
if '$MIN_UID' in config_dict:
string = config_dict['$MIN_UID']
min_uid = string_to_int_convert_test(string)
if min_uid is None or min_uid < 0:
errprint('invalid $MIN_UID value')
exit(1)
else:
errprint('missing $MIN_UID key')
exit(1)
if '$CANCEL_LIMITS_BELOW_SWAP_FREE_MIB' in config_dict:
string = config_dict['$CANCEL_LIMITS_BELOW_SWAP_FREE_MIB']
min_swap_free_mib = string_to_float_convert_test(string)
if min_swap_free_mib is None:
errprint('invalid $CANCEL_LIMITS_BELOW_SWAP_FREE_MIB value')
exit(1)
min_swap_free = int(min_swap_free_mib * 1024)
else:
errprint('missing $CANCEL_LIMITS_BELOW_SWAP_FREE_MIB key')
exit(1)
if '$MAX_CORRECTION_STEP_MIB' in config_dict:
string = config_dict['$MAX_CORRECTION_STEP_MIB']
max_step_mib = string_to_float_convert_test(string)
if max_step_mib is None:
errprint('invalid $MAX_CORRECTION_STEP_MIB value')
exit(1)
max_step = int(max_step_mib * 1024)
else:
errprint('missing $MAX_CORRECTION_STEP_MIB key')
exit(1)
if '$CANCEL_LIMITS_IN_TIME_SEC' in config_dict:
string = config_dict['$CANCEL_LIMITS_IN_TIME_SEC']
correction_exit_time = string_to_float_convert_test(string)
if correction_exit_time is None:
errprint('invalid $CANCEL_LIMITS_IN_TIME_SEC value')
exit(1)
else:
errprint('missing $CANCEL_LIMITS_IN_TIME_SEC key')
exit(1)
if '$MEM_FILL_RATE_MIB_IN_SEC' in config_dict:
string = config_dict['$MEM_FILL_RATE_MIB_IN_SEC']
mem_fill_rate = string_to_float_convert_test(string)
if mem_fill_rate is None:
errprint('invalid $MEM_FILL_RATE_MIB_IN_SEC value')
exit(1)
mem_fill_rate = 1024 * mem_fill_rate
else:
errprint('missing $MEM_FILL_RATE_MIB_IN_SEC key')
exit(1)
if '$MIN_INTERVAL_SEC' in config_dict:
string = config_dict['$MIN_INTERVAL_SEC']
min_interval = string_to_float_convert_test(string)
if min_interval is None:
errprint('invalid $MIN_INTERVAL_SEC value')
exit(1)
else:
errprint('missing $MIN_INTERVAL_SEC key')
exit(1)
if '$MAX_INTERVAL_SEC' in config_dict:
string = config_dict['$MAX_INTERVAL_SEC']
max_interval = string_to_float_convert_test(string)
if max_interval is None:
errprint('invalid $MAX_INTERVAL_SEC value')
exit(1)
else:
errprint('missing $MAX_INTERVAL_SEC key')
exit(1)
if min_interval == max_interval:
stable_interval = True
else:
stable_interval = False
if '$CORRECTION_INTERVAL_SEC' in config_dict:
string = config_dict['$CORRECTION_INTERVAL_SEC']
correction_interval = string_to_float_convert_test(string)
if correction_interval is None:
errprint('invalid $CORRECTION_INTERVAL_SEC value')
exit(1)
else:
errprint('missing $CORRECTION_INTERVAL_SEC key')
exit(1)
if '$MIN_MEM_AVAILABLE' in config_dict:
string = config_dict['$MIN_MEM_AVAILABLE']
if string.endswith('%'):
keep_memavail_min_percent = string[:-1].rstrip()
keep_memavail_min_percent = string_to_float_convert_test(
keep_memavail_min_percent)
if keep_memavail_min_percent is None:
errprint('invalid $MIN_MEM_AVAILABLE value')
exit(1)
keep_memavail_min = int(mem_total / 100 * keep_memavail_min_percent)
elif string.endswith('M'):
keep_memavail_min_mib = string[:-1].rstrip()
keep_memavail_min_mib = string_to_float_convert_test(
keep_memavail_min_mib)
if keep_memavail_min_mib is None:
errprint('invalid $MIN_MEM_AVAILABLE value')
exit(1)
keep_memavail_min = int(keep_memavail_min_mib * 1024)
else:
errprint('invalid $MIN_MEM_AVAILABLE value')
exit(1)
else:
errprint('missing $MIN_MEM_AVAILABLE key')
exit(1)
if '$TARGET_MEM_AVAILABLE' in config_dict:
string = config_dict['$TARGET_MEM_AVAILABLE']
if string.endswith('%'):
keep_memavail_mid_percent = string[:-1].rstrip()
keep_memavail_mid_percent = string_to_float_convert_test(
keep_memavail_mid_percent)
if keep_memavail_mid_percent is None:
errprint('invalid $TARGET_MEM_AVAILABLE value')
exit(1)
keep_memavail_mid = int(mem_total / 100 * keep_memavail_mid_percent)
elif string.endswith('M'):
keep_memavail_mid_mib = string[:-1].rstrip()
keep_memavail_mid_mib = string_to_float_convert_test(
keep_memavail_mid_mib)
if keep_memavail_mid_mib is None:
errprint('invalid $TARGET_MEM_AVAILABLE value')
exit(1)
keep_memavail_mid = int(keep_memavail_mid_mib * 1024)
else:
errprint('invalid $TARGET_MEM_AVAILABLE value')
exit(1)
else:
errprint('missing $TARGET_MEM_AVAILABLE key')
exit(1)
if '$CANCEL_LIMITS_ABOVE_MEM_AVAILABLE' in config_dict:
string = config_dict['$CANCEL_LIMITS_ABOVE_MEM_AVAILABLE']
if string.endswith('%'):
keep_memavail_max_percent = string[:-1].rstrip()
keep_memavail_max_percent = string_to_float_convert_test(
keep_memavail_max_percent)
if keep_memavail_max_percent is None:
errprint('invalid $CANCEL_LIMITS_ABOVE_MEM_AVAILABLE value')
exit(1)
keep_memavail_max = int(mem_total / 100 * keep_memavail_max_percent)
elif string.endswith('M'):
keep_memavail_max_mib = string[:-1].rstrip()
keep_memavail_max_mib = string_to_float_convert_test(
keep_memavail_max_mib)
if keep_memavail_max_mib is None:
errprint('invalid $CANCEL_LIMITS_ABOVE_MEM_AVAILABLE value')
exit(1)
keep_memavail_max = int(keep_memavail_max_mib * 1024)
else:
errprint('invalid $CANCEL_LIMITS_ABOVE_MEM_AVAILABLE value')
exit(1)
else:
errprint('missing $CANCEL_LIMITS_ABOVE_MEM_AVAILABLE key')
exit(1)
debug_correction = False
debug_mem = False
debug_sleep = False
if '$VERBOSITY' in config_dict:
verbosity = config_dict['$VERBOSITY']
if verbosity == '0':
pass
elif verbosity == '1':
debug_correction = True
elif verbosity == '2':
debug_correction = True
debug_mem = True
elif verbosity == '3':
debug_correction = True
debug_mem = True
debug_sleep = True
else:
errprint('invalid $VERBOSITY value')
exit(1)
else:
errprint('missing $VERBOSITY key')
exit(1)
temp_uid_cg_set = set(temp_uid_cg_dict)
if len(temp_uid_cg_set) > 0:
check_uid = True
if debug_correction:
print('keep_memavail_min:', keep_memavail_min)
print('keep_memavail_mid:', keep_memavail_mid)
print('keep_memavail_max:', keep_memavail_max)
print('max_step:', max_step)
print('min_swap_free:', min_swap_free)
print('correction_exit_time:', correction_exit_time)
print('mem_fill_rate:', mem_fill_rate)
print('min_interval:', min_interval)
print('max_interval:', max_interval)
print('verbosity:', verbosity)
for cg in basic_cg_dict:
print('cgroup: {} {}'.format(cg, '{'))
x = basic_cg_dict[cg]
for i in x:
print(' {}: {}'.format(i, x[i]))
print('}')
if check_uid:
for cg in temp_uid_cg_dict:
print('[template] cgroup: {} {}'.format(cg, '{'))
x = temp_uid_cg_dict[cg]
for i in x:
print(' {}: {}'.format(i, x[i]))
print('}')
cg_dict = basic_cg_dict
d = '/sys/fs/cgroup/user.slice/'
uid_d = dict()
uid_d[0] = None
###############################################################################
check_controllers('user.slice')
check_controllers('system.slice')
check_write('user.slice')
check_write('system.slice')
mlockall()
fd = dict()
fd['mi'] = open('/proc/meminfo', 'rb', buffering=0)
sig_dict = {
SIGKILL: 'SIGKILL',
SIGINT: 'SIGINT',
SIGQUIT: 'SIGQUIT',
SIGHUP: 'SIGHUP',
SIGTERM: 'SIGTERM'
}
sig_list = [SIGTERM, SIGINT, SIGQUIT, SIGHUP]
for i in sig_list:
signal(i, signal_handler)
##########################################################################
k0 = process_time()
cg_dict = update_cg_dict(cg_dict)
k1 = process_time()
k = (k1 - k0) * 1000
print(k, 'ms - UPD UID')
print(cg_dict)
for cg in cg_dict:
try:
if debug_correction:
print('Set memory.high for {}: max'.format(cg))
write(cg_dict[cg]['high_path'], 'max\n')
except (FileNotFoundError, PermissionError) as e:
print(e)
continue
swappiness = get_swappiness()
if debug_correction:
print('vm.swappiness:', swappiness)
if swappiness == 0:
print('WARNING: vm.swappiness=0')
print('Monitoring has started!')
while True:
ma, st, sf = check_mem_and_swap()
if debug_mem:
print('MemAvailable: {}M ({}%), SwapFree: {}M'.format(
round(ma / 1024, 1),
percent(ma / mem_total),
round(sf / 1024, 1)))
debug_cg()
if sf < min_swap_free:
sleep(max_interval)
continue
if ma < keep_memavail_min:
correction(cg_dict)
else:
sleep_after_check_mem(ma)
|
from django.db import models
from django.db.models.signals import post_delete
from applications.libro.models import Libro
from .managers import PrestamoManager
class Lector(models.Model):
nombre = models.CharField(max_length=50)
apellidos = models.CharField(max_length=50)
nacionalidad = models.CharField(max_length=50)
edad = models.PositiveIntegerField(default=0)
def __str__(self):
return self.nombre + ' ' + self.apellidos
class Prestamo(models.Model):
libro = models.ForeignKey(
Libro, on_delete=models.CASCADE, related_name='libro_prestamo')
lector = models.ForeignKey(Lector, on_delete=models.CASCADE)
prestamo = models.DateField('fecha_prestamo')
devolucion = models.DateField(
'fecha_devolucion', blank=True, null=True)
devuelto = models.BooleanField(default=False)
objects = PrestamoManager()
def save(self, *args, **kwargs):
self.libro.stock = self.libro.stock - 1
self.libro.save()
super(Prestamo, self).save(*args, **kwargs)
def __str__(self):
return self.libro.titulo
def update_libro_stock(sender, instance, **kwargs):
# actualizando el stok si se elimina un prestamo
instance.libro.stock = instance.libro.stock + 1
instance.libro.save()
post_delete.connect(update_libro_stock, sender=Prestamo)
|
default_app_config = 'namelist.apps.NamelistConfig'
|
class TMI:
message_limit = 90
whispers_message_limit_second = 2
whispers_message_limit_minute = 90
@staticmethod
def promote_to_verified():
TMI.message_limit = 7000
TMI.whispers_message_limit_second = 15
TMI.whispers_message_limit_minute = 1150
|
from django.conf.urls import url, include
from django.urls.conf import path
from rest_framework.routers import DefaultRouter
from .views import AdditionalMaterialSet, StructuralUnitSet, UserStructuralUnitSet, CompetencesSet, \
ChangeSemesterInEvaluationsCorrect
router = DefaultRouter()
router.register(r'api/general_ch/additional_material_in_topic_of_rpd',
AdditionalMaterialSet, basename='additional_material_in_topic_of_rpd')
router.register(r'api/structural_unit_set',
StructuralUnitSet, basename='structural_unit')
router.register(r'api/user_structural_unit_set',
UserStructuralUnitSet, basename='user_structural_unit_set')
router.register(r'api/competences_set',
CompetencesSet, basename='competences_set')
urlpatterns = [
url(r'^', include(router.urls)),
path('api/workprogram/make_evaluation_tools_correct', ChangeSemesterInEvaluationsCorrect),
]
|
import re
import numpy as np
import itertools as it
from random import random
from sympy import Symbol, poly
class FitData(dict):
def __init__(self, fname, path="output/"):
self.__data = {}
self.__name = fname
self.__file = path + fname.replace(' ', '_')
def __setitem__(self, key, value):
if key is not "FitMatrix":
key = str(key)
value = str(value)
try:
file = open(self.__file, "r")
file.close()
except FileNotFoundError:
print("Create file: " + self.__file)
file = open(self.__file, "w")
file.close()
if key in self.keys():
keyexists = True
else:
keyexists = False
if not keyexists:
with open(self.__file, "a") as file:
file.write(key + "=" + value + "\n")
else:
linenumber = 9999
with open(self.__file, "r") as file:
data = file.readlines()
for num, line in enumerate(data, 1):
if line.startswith(key):
linenumber = num
break
data[linenumber-1] = key + "=" + value + "\n"
with open(self.__file, "w") as file:
file.writelines(data)
def __getitem__(self, item):
with open(self.__file, "r") as file:
for line in file:
if line.startswith(item):
return line.split('=')[1].strip("\n")
raise KeyError(str(item) + " is not a key in the dictionary")
def __str__(self):
return str(self.getDict())
def keys(self):
with open(self.__file, "r") as file:
lines = file.readlines()
keys = map(lambda x: x.split('=')[0], lines)
return list(keys)
def getDict(self):
for key in self.keys():
self.__data.update({key : self.__getitem__(key)})
return self.__data
def items(self):
return self.getDict().items()
def getCoeffs(self, pat = r'c\d{4}'):
coeffs = []
regex = re.compile(pat)
for key in self.keys():
if regex.search(key):
coeffs.append(key)
return coeffs
def getFitResult(self, clean=True, pat=r'\w\d{1,4}'):
if clean:
matrixkey = "FitMatrixClean"
else:
matrixkey = "FitMatrix"
mat = list(self.__getitem__(matrixkey))
mat = ''.join(mat)
d = self.getDict()
# for key, val in d.items():
# print(str(key) + " : " + str(val))
def fun(xx):
# print(xx.group(0))
return d[xx.group(0)]
mat = re.sub(pat, fun, mat)
mat = mat.split('],[')
for i, line in enumerate(mat):
mat[i] = line.replace('[[','').replace(']]','')
mat[i] = mat[i].split(',')
for i, row in enumerate(mat):
for j, col in enumerate(row):
mat[i][j] = eval(mat[i][j])
mat = np.asarray(mat)
return mat
def getBaseMatrix(self):
coeffs = list(self.keys())
notcoeffs = ["FitMatrix", "FitMatrixClean"]
coeffs = [elem for elem in coeffs if elem not in notcoeffs]
coeffgroups = [(coeffs[0][0], [coeffs[0]])]
for elem in coeffs:
foolist = [elem[0] == group[0] for group in coeffgroups]
if not any(foolist):
coeffgroups.append((elem[0], [elem]))
else:
for group in coeffgroups:
if elem[0] == group[0] and elem not in group[1]:
group[1].append(elem)
reslist = list()
for group in coeffgroups:
p1 = float(self.__getitem__(str(group[1][0])))
p2 = float(self.__getitem__(str(group[1][1])))
p3 = float(self.__getitem__(str(group[1][2])))
p4 = float(self.__getitem__(str(group[1][3])))
p5 = float(self.__getitem__(str(group[1][4])))
mat = np.array([
[p1, p2, p3, 0, 0, 0],
[p2, p1, p3, 0, 0, 0],
[p3, p3, p4, 0, 0, 0],
[0, 0, 0, 2 * p5, 0, 0],
[0, 0, 0, 0, 2 * p5, 0],
[0, 0, 0, 0, 0, p1 - p2]
])
reslist.append((group[1], mat))
return reslist
def getName(self):
return self.__name
|
import socket
import random
import struct
from helpers.file_io_helper import save_snapshot_channel
from helpers.file_io_helper import save_snapshot_state
from helpers.trading_helper import unpack_list_data
from helpers.trading_helper import update_logical_timestamp
from helpers.trading_helper import update_vector_timestamp
import time
def trader_process(port_mapping, n_processes, id, asset, num_snapshots):
print("port mapping: ", port_mapping)
rand = random.Random()
rand.seed(id)
sending_probability = rand.uniform(0.4, 0.8)
sockets = []
backlog = 10
types = {
'send_money': 0,
'send_widget': 1,
'marker': 2,
}
inv_types = {v:k for k, v in types.items()}
logical_timestamp = 0
vector_timestamp = [0] * n_processes
marker_received = [False] * num_snapshots
channels = [[{'data':[], 'is_recording': False} for i in range(n_processes)] for j in range(num_snapshots)]
# send a message to process dest_pid
def send_int_list(dest_pid, type, int_list):
try:
sock = sockets[dest_pid]
message = struct.pack('!i', type) + struct.pack('!i', len(int_list))
for item in int_list:
message = message + struct.pack('!i', item)
sock.sendall(message)
except ConnectionAbortedError:
pass
except ConnectionResetError:
pass
# initialize sockets
for i in range(id):
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# print("port_mapping: %s", port_mapping[(i, id)])
server_sock.bind(('localhost', port_mapping[(i, id)][1]))
server_sock.listen(backlog)
client_sock, (host, client_port) = server_sock.accept()
sockets.append(client_sock)
sockets.append(None)
time.sleep(0.5)
for i in range(id + 1, n_processes):
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
addr = 'localhost'
(source_port, destination_port) = port_mapping[(id, i)]
print("client: ", addr, source_port, destination_port)
client_sock.bind((addr, source_port))
client_sock.connect((addr, destination_port))
sockets.append(client_sock)
# set timeouts for sockets
for i in range(n_processes):
if i == id:
continue
sockets[i].settimeout(0.01)
# main logic loop
counter = 0
snapshot_id = 0
num_channels_recorded = 0
while True:
# receiving invariants
for i in range(n_processes):
if i == id:
continue
try:
data = sockets[i].recv(4)
if len(data) == 0:
continue
type = struct.unpack('!i', data)[0]
if inv_types[type] == 'send_money':
num_items = struct.unpack('!i', sockets[i].recv(4))[0]
int_list = unpack_list_data(sockets[i].recv(num_items * 4))
money_received = int_list[0]
logical_timestamp_received = int_list[1]
vector_timestamp_received = int_list[2:]
# check if we are recording incoming channels, and record the contents of incoming channels
for j in range(len(channels)):
if channels[j][i]['is_recording']:
channels[j][i]['data'].append([type] + int_list)
# update timestamps
logical_timestamp = update_logical_timestamp(logical_timestamp, logical_timestamp_received)
vector_timestamp[id] = vector_timestamp[id] + 1
vector_timestamp = update_vector_timestamp(vector_timestamp, vector_timestamp_received)
# update money
asset[1] = asset[1] + money_received
elif inv_types[type] == 'send_widget':
num_items = struct.unpack('!i', sockets[i].recv(4))[0]
int_list = unpack_list_data(sockets[i].recv(num_items * 4))
widgets_received = int_list[0]
logical_timestamp_received = int_list[1]
vector_timestamp_received = int_list[2:]
# check if we are recording incoming channels, and record the contents of incoming channels
for j in range(len(channels)):
if channels[j][i]['is_recording']:
channels[j][i]['data'].append([type] + int_list)
# update timestamps
logical_timestamp = update_logical_timestamp(logical_timestamp, logical_timestamp_received)
vector_timestamp[id] = vector_timestamp[id] + 1
vector_timestamp = update_vector_timestamp(vector_timestamp, vector_timestamp_received)
# update widgets
asset[0] = asset[0] + widgets_received
elif inv_types[type] == 'marker':
num_items = struct.unpack('!i', sockets[i].recv(4))[0]
snapshot_id_received = unpack_list_data(sockets[i].recv(num_items * 4))[0]
if marker_received[snapshot_id_received] == False:
marker_received[snapshot_id_received] = True
save_snapshot_state(id, snapshot_id_received, (logical_timestamp, vector_timestamp, asset))
for j in range(n_processes):
if j != id:
channels[snapshot_id_received][j]['is_recording'] = True
send_int_list(j, types['marker'], [snapshot_id_received])
else:
if channels[snapshot_id_received][i]['is_recording']:
channels[snapshot_id_received][i]['is_recording'] = False
save_snapshot_channel(id, snapshot_id_received, channels[snapshot_id_received][i], i)
if snapshot_id_received == num_snapshots - 1:
num_channels_recorded = num_channels_recorded + 1
if (id == 0 and num_channels_recorded == n_processes - 1) or (id != 0 and num_channels_recorded == n_processes - 2):
return
else:
print("Unknown type error")
raise Exception("Unknown type error")
except socket.timeout:
pass
except BlockingIOError:
pass
except ConnectionAbortedError:
pass
except ConnectionResetError:
pass
# sending invariants
buying_attempt = rand.uniform(0, 1)
if buying_attempt <= sending_probability:
seller = rand.randint(0, n_processes - 2)
if seller >= id:
seller = seller + 1
if rand.randint(0, 1) == 0:
# send money
current_money = asset[1]
if current_money <= 0:
pass
else:
buying_amount = rand.randint(1, int(current_money/3)+1)
asset[1] = asset[1] - buying_amount
logical_timestamp = logical_timestamp + 1
vector_timestamp[id] = vector_timestamp[id] + 1
send_int_list(seller, types['send_money'], [buying_amount, logical_timestamp] + vector_timestamp)
else:
# send widget
current_widget = asset[0]
if current_widget <= 0:
pass
else:
buying_amount = rand.randint(1, int(current_widget/3)+1)
asset[0] = asset[0] - buying_amount
logical_timestamp = logical_timestamp + 1
vector_timestamp[id] = vector_timestamp[id] + 1
send_int_list(seller, types['send_widget'], [buying_amount, logical_timestamp] + vector_timestamp)
if id == 0 and counter == 49:
marker_received[snapshot_id] = True
save_snapshot_state(id, snapshot_id, (logical_timestamp, vector_timestamp, asset))
for i in range(1, n_processes):
channels[snapshot_id][i]['is_recording'] = True
send_int_list(i, types['marker'], [snapshot_id])
snapshot_id = snapshot_id + 1
counter = (counter + 1) % 50
|
from rest_framework import serializers
from resume.apps.pages.models import Page
class PageSerializer(serializers.ModelSerializer):
class Meta:
model = Page
fields = ('title', 'content')
|
import datetime
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit, avg, date_format,concat
from pyspark.sql.types import DoubleType, TimestampType, DateType
sparkSession = SparkSession.builder \
.config("spark.driver.maxResultSize", "2000m") \
.config("spark.sql.shuffle.partitions", 4) \
.getOrCreate()
delta_path="hdfs://192.168.1.10:9820/delta/stream/search_response"
pg_data_url = "jdbc:postgresql://192.168.10.10:5432/dw"
pg_data_user = "report"
pg_data_password = "789456123"
pg_driver = 'org.postgresql.Driver'
result_dataset = "flight_search_count"
def flight_search_count():
search_waiting_time = sparkSession.read.format("delta").load(delta_path) \
.select(col("searched_at").cast(DateType()).alias("date"),
concat(col("origin"),'-',col("destination")).alias("route"),
col("channel"),
col("departure_date"))\
.groupBy('date','route','channel')\
.count()
search_waiting_time.write.format("jdbc") \
.option("driver", pg_driver) \
.option("url", pg_data_url) \
.option("user", pg_data_user) \
.option("password", pg_data_password) \
.option("dbtable", result_dataset) \
.option("batchsize", 50000) \
.option("isolationLevel", "NONE") \
.mode("overwrite") \
.save()
def main():
flight_search_count()
if __name__ == '__main__':
t1 = datetime.datetime.now()
print('started at :', t1)
main()
t2 = datetime.datetime.now()
dist = t2 - t1
print('finished at:', t2, ' | elapsed time (s):', dist.seconds)
|
# ============LICENSE_START=======================================================
# Copyright (c) 2018-2022 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
"""
module for snmpv3 support
- loads various USM values for engineID/users
"""
__docformat__ = "restructuredtext"
import json
import os
import sys
import string
import time
import traceback
import collections
import pprint
from pysnmp.entity import engine, config
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.entity.rfc3413 import ntfrcv
from pysnmp.proto.api import v2c
import trapd_settings as tds
from trapd_exit import cleanup_and_exit
from trapd_io import stdout_logger, ecomp_logger
prog_name = os.path.basename(__file__)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# module: load_snmpv3_credentials
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def load_snmpv3_credentials(_py_config, _snmp_engine, _cbs_config):
"""
Add V3 credentials from CBS config to receiver config
so traps will be recieved from specified engines/users
:Parameters:
_config: snmp entity config
:Exceptions:
"""
# add V3 credentials from CBS json structure to running config
try:
v3_users = _cbs_config["snmpv3_config"]["usm_users"]
except Exception as e:
msg = "No V3 users defined"
ecomp_logger(tds.LOG_TYPE_DEBUG, tds.SEV_INFO, tds.CODE_GENERAL, msg)
return _py_config, _snmp_engine
for v3_user in v3_users:
# engineId
try:
ctx_engine_id = v3_user["engineId"]
except Exception as e:
ctx_engine_id = None
# user
try:
userName = v3_user["user"]
except Exception as e:
userName = None
# authorization
# find options at -> site-packages/pysnmp/entity/config.py
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# usmHMACMD5AuthProtocol
try:
authKey = v3_user["usmHMACMD5AuthProtocol"]
authProtocol = config.usmHMACMD5AuthProtocol
except Exception as e:
try:
authKey = v3_user["usmHMACSHAAuthProtocol"]
authProtocol = config.usmHMACSHAAuthProtocol
except Exception as e:
try:
authKey = v3_user["usmHMAC128SHA224AuthProtocol"]
authProtocol = config.usmHMAC128SHA224AuthProtocol
except Exception as e:
try:
authKey = v3_user["usmHMAC192SHA256AuthProtocol"]
authProtocol = config.usmHMAC192SHA256AuthProtocol
except Exception as e:
try:
authKey = v3_user["usmHMAC256SHA384AuthProtocol"]
authProtocol = config.usmHMAC256SHA384AuthProtocol
except Exception as e:
try:
authKey = v3_user["usmHMAC384SHA512AuthProtocol"]
authProtocol = config.usmHMAC384SHA512AuthProtocol
except Exception as e:
try:
authKey = v3_user["usmNoAuthProtocol"]
authProtocol = config.usmNoAuthProtocol
except Exception as e:
# FMDL: default to NoAuth, or error/skip entry?
msg = "No auth specified for user %s ?" % (userName)
authKey = None
authProtocol = config.usmNoAuthProtocol
ecomp_logger(tds.LOG_TYPE_DEBUG, tds.SEV_INFO, tds.CODE_GENERAL, msg)
# privacy
# find options at -> site-packages/pysnmp/entity/config.py
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# usm3DESEDEPriv
try:
privKey = v3_user["usm3DESEDEPrivProtocol"]
privProtocol = config.usm3DESEDEPrivProtocol
except Exception as e:
# usmAesCfb128Protocol
try:
privKey = v3_user["usmAesCfb128Protocol"]
privProtocol = config.usmAesCfb128Protocol
except Exception as e:
# usmAesCfb192Protocol
try:
privKey = v3_user["usmAesCfb192Protocol"]
privProtocol = config.usmAesCfb192Protocol
except Exception as e:
# usmAesBlumenthalCfb192Protocol
try:
privKey = v3_user["usmAesBlumenthalCfb192Protocol"]
privProtocol = config.usmAesBlumenthalCfb192Protocol
except Exception as e:
# usmAesCfb256Protocol
try:
privKey = v3_user["usmAesCfb256Protocol"]
privProtocol = config.usmAesCfb256Protocol
except Exception as e:
# usmAesBlumenthalCfb256Protocol
try:
privKey = v3_user["usmAesBlumenthalCfb256Protocol"]
privProtocol = config.usmAesBlumenthalCfb256Protocol
except Exception as e:
# usmDESPrivProtocol
try:
privKey = v3_user["usmDESPrivProtocol"]
privProtocol = config.usmDESPrivProtocol
except Exception as e:
# usmNoPrivProtocol
try:
privKey = v3_user["usmNoPrivProtocol"]
privProtocol = config.usmNoPrivProtocol
except Exception as e:
# FMDL: default to NoPriv, or error/skip entry?
msg = "No priv specified for user %s" % (userName)
ecomp_logger(tds.LOG_TYPE_DEBUG, tds.SEV_INFO, tds.CODE_GENERAL, msg)
privKey = None
privProtocol = config.usmNoPrivProtocol
# break
# msg = ("userName: %s authKey: %s authProtocol: %s privKey: %s privProtocol: %s engineId: %s % (userName, authKey, authProtocol, privKey, privProtocol, ctx_engine_id))
msg = "userName: %s authKey: **** authProtocol: %s privKey: **** privProtocol: %s engineId: ****" % (
userName,
authProtocol,
privProtocol,
)
ecomp_logger(tds.LOG_TYPE_DEBUG, tds.SEV_INFO, tds.CODE_GENERAL, msg)
# user: usr-md5-des, auth: MD5, priv DES, contextEngineId: 8000000001020304
# this USM entry is used for TRAP receiving purposes
# help(addV3User) returns ->
# addV3User(snmpEngine, userName, authProtocol=(1, 3, 6, 1, 6, 3, 10, 1, 1, 1), authKey=None, privProtocol=(1, 3, 6, 1, 6, 3, 10, 1, 2, 1), priv Key=None, securityEngineId=None, securityName=None, contextEngineId=None)
if ctx_engine_id is not None:
config.addV3User(
_snmp_engine,
userName,
authProtocol,
authKey,
privProtocol,
privKey,
contextEngineId=v2c.OctetString(hexValue=ctx_engine_id),
)
else:
config.addV3User(_snmp_engine, userName, authProtocol, authKey, privProtocol, privKey)
return _py_config, _snmp_engine
|
from django.contrib import admin
from sherlock.models import Profile, About, Relative, Image
admin.site.register([Profile, About, Relative, Image])
|
from main import Handler
class Information(Handler):
def get(self):
if not self.user:
self.redirect('/login')
else:
error = 'You have reached this page at an error'
link_src = '/'
link_name = 'Home'
self.render(
'information.html',
error=error,
link_src=link_src,
link_name=link_name
)
|
from django.contrib import admin
from .models import MostRecent,Feedback
admin.site.register(MostRecent)
admin.site.register(Feedback)
|
import pandas as pd
import numpy as np
from lorenz_gan.submodels import SubModelGAN, AR1RandomUpdater
from sklearn.neighbors import KernelDensity
from sklearn.mixture import GaussianMixture
from glob import glob
from os.path import join
import tensorflow as tf
import tensorflow.compat.v1.keras.backend as K
import gc
def load_test_data(filename,
input_columns=("X_t", "Ux_t"),
output_columns=("Ux_t+1",),
meta_columns=("x_index", "time", "step")):
all_columns = np.concatenate([list(meta_columns), list(input_columns), list(output_columns)], axis=0)
data = pd.read_csv(filename, usecols=all_columns)
return data
def hellinger_bad(a, b):
"""
Calculate hellinger distance on 2 discrete PDFs a and b.
Args:
a:
b:
Returns:
"""
return np.sqrt(np.sum((np.sqrt(a) - np.sqrt(b)) ** 2)) / np.sqrt(2)
def hellinger(x, pdf_p, pdf_q):
pdf_distances = (np.sqrt(pdf_p) - np.sqrt(pdf_q)) ** 2
return np.trapz(pdf_distances, x) / 2
def offline_gan_predictions(gan_index, data,
gan_path, seed=12421, batch_size=1024):
rs = np.random.RandomState(seed)
gen_files = sorted(glob(join(gan_path, "gan_generator_{0:04d}_*.h5".format(gan_index))))
gen_filenames = [gf.split("/")[-1] for gf in gen_files]
if gan_index < 300:
rand_size = 1
elif gan_index >= 700 and gan_index < 800:
rand_size = 35
elif gan_index >= 800:
rand_size = 34
else:
rand_size = 17
random_values = rs.normal(size=(data.shape[0], rand_size))
all_zeros = np.zeros((data.shape[0], rand_size), dtype=np.float32)
#corr_noise = np.zeros((data.shape[0], rand_size), dtype=np.float32)
gen_preds = dict()
for pred_type in ["det", "rand"]:
#gen_preds[pred_type] = pd.DataFrame(0.0, index=data.index, columns=gen_filenames)
gen_preds[pred_type] = np.zeros((data.shape[0], len(gen_filenames)), dtype="float32")
gen_noise = pd.DataFrame(0.0, dtype=np.float32, index=gen_filenames, columns=["corr", "noise_sd"])
for g, gen_file in enumerate(gen_files):
gen_f = gen_filenames[g]
gen_preds["det"][:, g], gen_preds["rand"][:, g], gen_noise.loc[gen_f] = single_gan_predictions(gen_file,
data,
all_zeros,
random_values,
seed,
batch_size)
gc.collect()
gen_preds_out = {}
del all_zeros
del random_values
gc.collect()
for k in list(gen_preds.keys()):
gen_preds_out[k] = pd.DataFrame(gen_preds[k], index=data.index, columns=gen_filenames)
del gen_preds[k]
gc.collect()
return gen_preds_out, gen_noise
def single_gan_predictions(gen_file, data, all_zeros, random_values, seed, batch_size):
sess_config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
gpu_options=tf.GPUOptions(allow_growth=True))
sess = tf.compat.v1.Session(config=sess_config)
K.set_session(sess)
tf.set_random_seed(seed)
print("Predicting " + gen_file)
gen_model = SubModelGAN(gen_file)
if gen_model.x_scaling_values.shape[0] == 1:
input_cols = ["X_t"]
else:
input_cols = ["X_t", "Ux_t"]
print(gen_file, "Det preds")
det_preds = gen_model.predict_batch(data[input_cols], all_zeros, batch_size=batch_size, stochastic=0)
print(gen_file, "Rand preds")
rand_preds = gen_model.predict_batch(data[input_cols],
random_values,
batch_size=batch_size,
stochastic=1)
print(gen_file, "Random updater")
ar1 = AR1RandomUpdater()
x_indices = data["x_index"] == 0
ar1.fit(data.loc[x_indices, "Ux_t+1"].values - det_preds[x_indices].ravel())
print(gen_file, ar1.corr, ar1.noise_sd)
#gen_noise.loc[gen_filenames[g]] = [ar1.corr, ar1.noise_sd]
return det_preds, rand_preds, np.array([ar1.corr, ar1.noise_sd])
def calc_pdf_kde(x, x_bins, bandwidth=0.5, algorithm="kd_tree", leaf_size=100):
kde = KernelDensity(bandwidth=bandwidth, algorithm=algorithm, leaf_size=leaf_size)
kde.fit(x.reshape(-1, 1))
pdf = np.exp(kde.score_samples(x_bins.reshape(-1, 1)))
return pdf
def calc_pdf_hist(x, x_bins):
return np.histogram(x, x_bins, density=True)[0]
def calc_pdf_gmm(x, x_bins, n_components=4):
gmm = GaussianMixture(n_components=n_components)
gmm.fit(x.reshape(-1, 1))
pdf = np.exp(gmm.score_samples(x_bins.reshape(-1, 1)))
return pdf
def time_correlations(data, time_lags):
data_series = pd.Series(data)
gen_time_corr = np.zeros(time_lags.size, dtype=np.float32)
for t, time_lag in enumerate(time_lags):
gen_time_corr[t] = data_series.autocorr(lag=time_lag)
return gen_time_corr
|
from collections import defaultdict
from math import asin, cos, radians, sin, sqrt
from random import sample
import csv
from operator import itemgetter
import matplotlib as mpl
import matplotlib.pyplot as plt
class BaseAlgorithm():
#def __init__(self):
# self.update_data()
def update_data(self):
filename = "data3.csv"
self.cities = []
#self.size = len(self.cities)
self.coords = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
self.coords.append([float(row[0]),float(row[1])])
self.cities = range(0,len(self.coords))
self.size = len(self.cities)
self.distances = self.compute_distances()
def haversine_distance(self, cityA, cityB):
coord1 = self.coords[cityA]
coord2= self.coords[cityB]
a = (coord1[0]-coord2[0])**2+(coord1[1]-coord2[1])**2
c = sqrt(a)
return c
def compute_distances(self):
self.distances = defaultdict(dict)
for cityA in self.cities:
for cityB in self.cities:
if cityB not in self.distances[cityA]:
distance = self.haversine_distance(cityA, cityB)
self.distances[cityA][cityB] = distance
self.distances[cityB][cityA] = distance
return self.distances
# add node k between node i and node j
def add(self, i, j, k):
return self.distances[i][k] + self.distances[k][j] - self.distances[i][j]
class TourConstructionHeuristics(BaseAlgorithm):
# find the neighbor k closest to the tour, i.e such that
# cik + ckj - cij is minimized with (i, j) an edge of the tour
# add k between the edge (i, j), resulting in a tour with subtour (i, k, j)
# used for the cheapest insertion algorithm
def __init__(self):
self.update_data()
#print self.cities
def closest_neighbor(self, tour, node, in_tour=False, farthest=False):
neighbors = self.distances[node]
#print node
#print neighbors.items()
#print tour
current_dist = [(c, d) for c, d in neighbors.items()
if (c in tour)]
return sorted(current_dist, key=itemgetter(1))[-farthest]
def add_closest_to_tour(self, tours,tourslength,unass,veh):
best_dist, new_tour = float('inf'), None
tour_index = 0
city1 = 0
vehi=vehindex=None
for city in unass:
#print city
for tour in tours:
#print tour
for index in range(len(tour) - 1):
dist = self.add(tour[index], tour[index + 1], city)
#print dist
if dist < best_dist and (tourslength[tours.index(tour)]+dist)<150:
best_dist = dist
new_tour = tour[:index + 1] + [city] + tour[index + 1:]
tour_index = tours.index(tour)
city1 = city
for p in range(0,3):
if tours[tour_index] in veh[p]:
vehi=p
vehindex = veh[p].index(tours[tour_index])
#print city1
return best_dist, new_tour, tour_index,city1,vehi,vehindex
def nearest_insertion(self, farthest=True):
a = len(self.coords)-1
tour = [0,a]
tours = [tour]
tourslength = [0]
cantass=[[]]
unass = self.cities[1:a]
#print unass
while len(unass) != 0:
best, best_len = None, 0 if farthest else float('inf')
# (selection step) given a sub-tour,we find node r not in the
# sub-tour closest to any node j in the sub-tour,
# i.e. with minimal c_rj
for tour in tours:
for city in unass:
print cantass
print tour
if city in cantass[tours.index(tour)]:
continue
# we consider only the distances to nodes already in the tour
print tour,city
_, length = self.closest_neighbor(tour, city, True)
if (length > best_len if farthest else length < best_len):
city1, best_len,tour1 = city, length,tour
#print city1
# (insertion step) we find the arc (i, j) in the sub-tour which
# minimizes cir + crj - cij, and we insert r between i and j
best_dist, new_tour = float('inf'), None
for index in range(len(tour1) - 1):
dist = self.add(tour1[index], tour1[index + 1], city1)
#print dist
if dist < best_dist and (tourslength[tours.index(tour1)]+dist)<150:
best_dist = dist
new_tour = tour1[:index + 1] + [city1] + tour1[index + 1:]
tour_index = tours.index(tour1)
if best_dist == float('inf'):
cantass[tours.index(tour1)].append(city1)
if tour_index is (len(tours)-1):
tour2=[0,a]
tours.append(tour2)
cantass.append([])
tourslength.append(0)
#print tours
if city1 != 0 and new_tour != None:
unass.remove(city1)
print unass
tourslength[tour_index] += best_dist
tours[tour_index]=new_tour
#print unass
#print self.cities
#return tours,tourslength
def cheapest_insertion(self):
denum=5
v=0
a= len(self.coords)-denum
#tour = [0,a]
tours = []
tourslength = []
for i in range(0,denum):
for j in range(0,denum):
if ([i,j]in tours or [j,i]in tours):
continue
else:
tours.append([i,j])
tourslength.append(self.distances[i][j])
# we find the closest node R to the first node
unass = self.cities[denum:a]
veh = [[],[],[]]
while len(unass) != 0:
length, tour, index, city1,vehi,vehindex = self.add_closest_to_tour(tours,tourslength,unass,veh)
#print tour
#print veh
#print vehi
#print vehindex
if vehi!=None:
veh[vehi][vehindex]=tour
if city1 != 0 and tour != 0:
unass.remove(city1)
tourslength[index] += length
tours[index]=tour
if len(tours[index]) is 3:
v+=1
if v<3:
tour1=[tour[0],tour[2]]
tours.append(tour1)
tourslength.append(self.distances[tour[0]][tour[2]])
veh[v-1].append(tour)
#print veh
if v==3:
veh[v-1].append(tour)
#print veh
#print veh[0]
#print veh[0][0]
a=tourslength
#print tours
i=0
while i<len(tours):
tour2=tours[i]
#print tour2
i+=1
if (len(tour2)==2):
#print "S"
tourslength.remove(a[tours.index(tour2)])
tours.remove(tour2)
i-=1
#print "Y"
e=tours
#print e
for l in range(0,3):
tour3=tours[l]
b=tour3[-1]
#print "w"
for i in range(0,denum):
tours.append([b,i])
tourslength.append(self.distances[b][i])
#print"x"
if v>3:
c=tour[0]
d=tour[-1]
for i in range(0,denum):
if [c,i] in tours:
tourslength.remove(tourslength[tours.index([c,i])])
tours.remove([c,i])
tours.append([d,i])
tourslength.append(self.distances[d][i])
for i in range(0,3):
#print veh[i][-1][-1]
if veh[i][-1][-1]==tour[0]:
veh[i].append(tour)
break
#print tours
#print tours
vehlength=[0,0,0]
for i in range(0,3):
for p in range(0,len(veh[i])):
#print veh[i],veh[i][p],tours.index(veh[i][p])
#print tourslength[tours.index(veh[i][p])]
vehlength[i]+=tourslength[tours.index(veh[i][p])]
j=0
while(j<len(tours)):
if(len(tours[j]))==2:
tours.remove(tours[j])
tourslength.remove(tourslength[j])
else:
j+=1
return tours, tourslength,veh,vehlength
"""def samedis(self,tours,tourslength):
c=0.5
d=0.5
for tour1 in tours:
i=1
while (i<len(tour1)-1):
#print(len(tour1))
#print("!@#!")
best_dist = self.add(tour1[i-1], tour1[i+1], tour1[i])
#print("!!!!")
best_ratio = c*best_dist + d*(tourslength[tours.index(tour1)])
for tour in tours:
#print("******")
if tour != tour1 and len(tour)!=2 :
for index in range(len(tour) - 1):
dist = self.add(tour[index], tour[index + 1], tour1[i])
#print dist
ratio = c*dist + d*(tourslength[tours.index(tour)]+tour1[i])
if ratio < best_ratio and (tourslength[tours.index(tour)]+dist)<150:
best_dist = dist
new_tour = tour[:index + 1] + [tour1[i]] + tour[index + 1:]
tour_index = tours.index(tour)
best_ratio = c*best_dist + d*(tourslength[tours.index(tour)])
if best_ratio != c*best_dist + d*(tourslength[tours.index(tour1)]):
tours[tour_index]=new_tour
tourslength[tour_index]+= best_dist
tourslength[tours.index(tour1)]-=self.add(tour1[i-1], tour1[i + 1], tour1[i])
tour1.remove(tour1[i])
else:
i+=1
#print self.distances #print(i)
return tours, tourslength
"""
def samedis(self,tours,tourslength,veh,vehlength):
c=0.5
d=0.5
for tour1 in tours:
if len(tour1)!=2:
i=1
while (i<len(tour1)-1):
#print(len(tour1))
#print("!@#!")
for j in range(0,3):
if tour1 in veh[j]:
o=j
p=veh[j].index(tour1)
b=vehlength[j]
#print veh #print b,"s"
best_dist = self.add(tour1[i-1], tour1[i+1], tour1[i])
#print("!!!!")
best_ratio = c*best_dist + d*(b)
for tour in tours:
for j in range(0,3):
if tour in veh[j]:
a=vehlength[j]
w=j
s=veh[j].index(tour)
#print("******")
if tour != tour1 and len(tour)!=2 :
#print a
for index in range(len(tour) - 1):
#print tour
#print index
dist = self.add(tour[index], tour[index + 1], tour1[i])
#print dist
ratio = c*dist + d*(a+dist)
if ratio < best_ratio and (tourslength[tours.index(tour)]+dist)<150:
best_dist = dist
w1=w
s1=s
new_tour = tour[:index + 1] + [tour1[i]] + tour[index + 1:]
tour_index = tours.index(tour)
best_ratio = c*best_dist + d*(a+dist)
if best_ratio != c*best_dist + d*(b):
#print veh
tours[tour_index]=new_tour
tourslength[tour_index]+= best_dist
veh[w1][s1]=new_tour
vehlength[w1]+=best_dist
#print veh
tourslength[tours.index(tour1)]-=self.add(tour1[i-1], tour1[i + 1], tour1[i])
#print o,i
#print vehlength[o]
vehlength[o]-=self.add(tour1[i-1], tour1[i + 1], tour1[i])
print veh
print tour1
veh[o][p].remove(tour1[i])
print tour1
print veh
if (len(tour1)==2):
vehlength[o]-=self.distances[tour1[0]][tour1[1]]
#tour1.remove(tour1[i])
print veh
else:
i+=1
#print self.distances #print(i)
return tours, tourslength,veh,vehlength
def plot (self,tours,color):
b = ['r','b','g','b']
j=0
for tour in tours:
if len(tour)!=2:
for i in range (0,len(tour)-1):
if i != len(self.coords)-1:
plt.plot([self.coords[tour[i]][0], self.coords[tour[i+1]][0]],[self.coords[tour[i]][1],self.coords[tour[i+1]][1]], color)
#plt.show(block=False)
if j<3:
j+=1
else:
j=0
x=[]
y=[]
c=['bs','rs','gs','cs','ms']
for i in range(0,5):
x.append(self.coords[i][0])
y.append(self.coords[i][1])
plt.plot(self.coords[i][0],self.coords[i][1],c[i])
#plt.show()
#r= BaseAlgorithm()
x= TourConstructionHeuristics()
tours, lengths,veh,vehlength = x.cheapest_insertion()
print veh
print tours
#print lengths
#print vehlength
#x.plot(tours)
tours1, lengths1,veh1,vehlength1 = x.samedis(tours,lengths,veh,vehlength)
print tours1
print lengths1
print veh1
print vehlength1
b = ['r','b','g','b']
for i in range(0,3):
x.plot(veh[i],b[i])
plt.show()
x.plot(tours1)
|
'''
Created on Mar 5, 2013
@author: pvicente
'''
from src.data import Node, City, CityMap
from src.data_exceptions import NotValid2DPointFormat, WrongLink, NotCities, \
NotLinks, NotValidCitiesFormat, NotValidLinksFormat
import unittest
class TestNode(unittest.TestCase):
def setUp(self):
pass
def test_str(self):
'''
Test calling str operator to perform code
'''
n = Node(point2D=(0,0), name='TestNode')
other = Node(point2D=(1,3), name='OtherTestNode')
n.addLink(other)
_ = str(n)
def test_format_exceptions(self):
'''
Test raising NotValid2DPointFormat exceptions
'''
self.assertRaises(NotValid2DPointFormat, Node, point2D=1)
self.assertRaises(NotValid2DPointFormat, Node, point2D='a')
self.assertRaises(NotValid2DPointFormat, Node, point2D={})
self.assertRaises(NotValid2DPointFormat, Node, point2D=['a', 2, 3])
self.assertRaises(NotValid2DPointFormat, Node, point2D=(1,'2'))
def test_links_exception(self):
'''
Test raising WrongLink
'''
n = Node((0,0),'test')
self.assertRaises(WrongLink, n.addLink, n)
self.assertRaises(WrongLink,n.addLink, '')
def test_instance(self):
'''
Test checking construct of correct instance
'''
self.assertIsInstance(Node((1,2)), Node)
self.assertIsInstance(Node((1.0, 3.5)), Node)
self.assertIsInstance(Node((1, 2.10)), Node)
def test_equal(self):
'''
Tests over equal operator and checking equality over name attribute
'''
self.assertNotEqual(Node((0,0)), None)
self.assertNotEqual(Node((0,0)), 1)
self.assertEqual(Node((0,0)), Node((1,1)))
self.assertEqual(Node((0,0),'test'), Node((1,1),'test'))
self.assertNotEqual(Node((0,0), 'test1'), Node((0,0), 'test2'))
def test_cmp(self):
'''
Tests over cmp operator and checking comparisions over name atribute
'''
self.assertEqual(cmp(Node((0,0)), 1), -1)
self.assertEqual(cmp(Node((0,0)), None), -1)
self.assertEqual(cmp(Node((0,0), 'test'), Node((0,1),'test')), 0)
self.assertEqual(cmp(Node((0,0), 'test1'), Node((0,1),'test')), cmp('test1', 'test'))
def test_hash(self):
'''
Tests over hash attribute checking it over name attribute
'''
self.assertEqual(hash(Node((0,0))), hash(''))
self.assertEqual(hash(Node((0,0), 'test')), hash('test'))
self.assertNotEqual(hash(Node((0,0))), hash('a'))
self.assertNotEqual(hash(Node((0,0), 'test')), hash('test2'))
def test_distance(self):
'''
Test to checking euclidean distance over 2DPoints
'''
origin_points = [(1,1), (1,2), (0,0), (0,0), (0,0), (10,10)]
end_points = [(1,1), (1,3), (-1,-1), (10,10), (3,4), (3,4) ]
distance = [0 , 1 , 1.4142 , 14.1421, 5 , 9.2195 ]
for i in xrange(len(origin_points)):
origin = Node(origin_points[i])
end = Node(end_points[i])
self.assertAlmostEqual(origin.distance(end), distance[i], places=4)
def test_links(self):
'''
Test links are created inside node
'''
nodes = {'m': Node((0,0), 'm'), 'b': Node((10,10), 'b'), 'a': Node((4,3), 'a')}
links = [('m','b'), ('a','m'),('b','m')]
notlinks = [('b', 'a'), ('m', 'a')]
for origin, end in links:
nodes[origin].addLink(nodes[end])
for origin, end in links:
self.assertIn(nodes[end],nodes[origin].links)
for origin, end in notlinks:
self.assertNotIn(nodes[end], nodes[origin].links)
for node in nodes.values():
self.assertNotEqual(node.links , set(), msg='%s has not links with other nodes'%(node.name))
class TestCity(unittest.TestCase):
def setUp(self):
pass
def test_links(self):
'''
Tset links are created in both ways (origin, end) (end, origin)
'''
cities={'Madrid': City('Madrid', (0,0)), 'Barcelona': City('Barcelona', (10,10)), 'Albacete': City('Albacete', (3,4))}
links = [('Madrid', 'Barcelona'), ('Albacete', 'Madrid')]
notlinks = [('Barcelona', 'Albacete'), ('Albacete', 'Barcelona')]
for origin, end in links:
cities[origin].addLink(cities[end])
for origin, end in links:
self.assertIn(cities[end], cities[origin].links)
self.assertIn(cities[origin], cities[end].links)
for origin, end in notlinks:
self.assertNotIn(cities[origin], cities[end].links)
for city in cities.values():
self.assertNotEqual(city.links , set(), msg='%s has not links with other cities'%(city.name))
class TestCityMap(unittest.TestCase):
def setUp(self):
'''
Preparing input data to perform some tests
'''
import json, tempfile
self.data = {u'cities': {u'Madrid': [0,0], u'Barcelona': [10,10], u'Albacete': [3,4]}, u'links': [[u'Madrid', u'Barcelona'], [u'Albacete', u'Barcelona']]}
self.json_data = json.dumps(self.data)
tmp_input = tempfile.mkstemp()
input_file = open(tmp_input[1], 'w')
self.input_filename = tmp_input[1]
json.dump(self.data, input_file)
input_file.close()
self.output_filename = tempfile.mkstemp()[1]
def tearDown(self):
'''
Removing unused files
'''
import os
os.remove(self.input_filename)
os.remove(self.output_filename)
def test_load_exceptions(self):
'''
Testing raising exceptions due to not valid data format
'''
self.assertRaises(NotCities, CityMap.load, {})
self.assertRaises(NotLinks, CityMap.load, {'cities': []})
self.assertRaises(NotValidCitiesFormat, CityMap.load, {'cities': [], 'links': []})
self.assertRaises(NotValidCitiesFormat, CityMap.load, {'cities': {1: [1,2]}, 'links': []})
self.assertRaises(NotValid2DPointFormat, CityMap.load, {'cities': {'name': 'wrongpoint'}, 'links': []})
self.assertRaises(NotValidLinksFormat, CityMap.load, {'cities': {'name': [1,2]}, 'links': 'wronglink'})
self.assertRaises(NotValidLinksFormat, CityMap.load, {'cities': {'Madrid': [0,0], 'Barcelona': [10,10]}, 'links': [1,2]})
self.assertRaises(WrongLink, CityMap.load, {'cities': {'Madrid': [0,0], 'Barcelona': [10,10]}, 'links': [[1,2]]})
def test_load_instance(self):
'''
Testing right construction with a valid input format
'''
self.assertIsInstance(CityMap.load({'cities': {'Madrid': [0,0], 'Barcelona': [10,10.0]}, 'links': [['Madrid','Barcelona']]}), CityMap)
def check_load_data(self, citymap):
'''
Check function to check that input data is being represented correctly
'''
for cityname in self.data['cities']:
city = citymap[cityname]
self.assertNotEqual(city, None)
x,y = self.data['cities'][cityname]
self.assertEqual(city.x, x)
self.assertEqual(city.y, y)
related_links = [link for link in self.data['links'] if link[0] == cityname or link[1] == cityname]
for link in related_links:
origin, end = link
if origin != cityname:
end = origin
self.assertIn(end, [c.name for c in city.links])
def test_load_data(self):
'''
Test input data
'''
citymap = CityMap.load(self.data)
self.check_load_data(citymap)
def test_load_string(self):
'''
Test loading data from string format.
'''
citymap = CityMap.loadFromString(self.json_data)
self.check_load_data(citymap)
def test_load_filename(self):
'''
Test loading data from filename
'''
citymap = CityMap.loadFromFile(self.input_filename)
self.check_load_data(citymap)
def test_load_bad_filename(self):
'''
Test exception launched due to bad filename
'''
import tempfile, os
tmpfile = tempfile.mkstemp()
os.remove(tmpfile[1])
self.assertRaises(IOError, CityMap.loadFromFile, tmpfile[1])
def test_not_data_in_file(self):
'''
Test exception launched due to not json data in file
'''
import tempfile, os
tmpfile = tempfile.mkstemp()
self.assertRaises(ValueError, CityMap.loadFromFile, tmpfile[1])
os.remove(tmpfile[1])
def test_not_data_in_string(self):
'''
Test exception launched due to not json data in string
'''
self.assertRaises(ValueError, CityMap.loadFromString, '')
def test_save(self):
'''
Test check that load and save processes don't modify input/output format
'''
import tempfile, os, json
citymap = CityMap.loadFromString(self.json_data)
tmpfile = tempfile.mkstemp()
citymap.save(tmpfile[1])
self.assertEqual(str(self.data), str(json.load(open(tmpfile[1]))))
os.remove(tmpfile[1])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
#erster Versuch
import cv2
from PIL import Image
import pytesseract
import numpy as np
#print pytesseract.image_to_string(Image.open('roemisch2.PNG'))
#path = './bilder/roemisch2.PNG'
path = 'randomzahlen.jpg'
img = Image.open(path)
print (pytesseract.image_to_string(img))
image = cv2.imread(path)
cv2.imshow('hallo', image)
cv2.waitKey(0)
|
#MatthewMascoloCH7P1.py
#I pledge my honor that I have abided
#by the Stevens Honor System. Matthew Mascolo
#
#This program calculates BMI and determines
#whether it is in a healthy range.
def main():
weight = eval(input("Enter your weight in pounds: "))
height = eval(input("Enter your height in inches: "))
BMI = (weight * 720) / (pow(height, 2))
BMI = round(BMI, 1)
if BMI > 25:
print("Your BMI is:", BMI)
print("This is ABOVE what is considered a healthy range.")
else:
if BMI < 19:
print("Your BMI is:", BMI)
print("This is BELOW what is considered a healthy range.")
else:
print("Your BMI is:", BMI)
print("This is within a healthy range.")
main()
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/23 19:49
# @Author : J
# @File : 视频入门.py
# @Software: PyCharm
#打开摄像头
import numpy as np
import cv2 as cv
cap = cv.VideoCapture(0)
if not cap.isOpened():
print("cannot not open camera")
exit()
while True:
ret,frame = cap.read() #第一个参数ret 为True 或者False,代表有没有读取到图片 第二个参数frame表示截取到一帧的图片
frame = cv.flip(frame,1) #镜像处理
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
gray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)#灰度处理
cv.imshow("打开摄像头",frame)
if cv.waitKey(1) == ord("q"):
break
cap.release()
cv.destroyAllWindows()
#播放视频
cap = cv.VideoCapture("wing.mp4")
while cap.isOpened():
ret,frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
gray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)
cv.imshow("shame",gray)
# cv.waitKey(30);
if cv.waitKey(1) == ord("q"):
break
cap.release()
cv.destroyAllWindows()
#保存视频
cap = cv.VideoCapture(0)
# FourCC是用于指定视频编解码器的4字节代码,在Fedora中:DIVX,XVID,MJPG,X264,WMV1,WMV2。
# 最好使用XVID。MJPG会生成大尺寸的视频。X264会生成非常小的尺寸的视频
fourcc = cv.VideoWriter_fourcc(*"XVID")
out = cv.VideoWriter("output.avi",fourcc,20.0,(640,480)) #输出视频 解码器 帧率 大小
while cap.isOpened():
ret,frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
frame = cv.flip(frame,1)
out.write(frame)
cv.imshow("frame",frame)
if cv.waitKey(1) == ord("q"):
break
cap.release()
out.release()
cv.destroyAllWindows()
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# update_html_page.py: udate html pages #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 09, 2021 #
# #
#########################################################################################
import os
import sys
import re
import string
import math
import numpy
import time
import Chandra.Time
#
#--- reading directory list
#
path = '/data/mta/Script/IRU/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import function
#
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time())
zspace = '/tmp/zspace' + str(rtail)
#
#--- some data
#
mon_list = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
#----------------------------------------------------------------------------------
#-- update_html_page: update the html page --
#----------------------------------------------------------------------------------
def update_html_page(syear=''):
"""
update the html page
input: syear --- starting year. if it is blank, syear is this year
output: <web_dir>/iru_bias_trend_year<year>.html
"""
#
#--- find today's date
#
date = time.strftime("%Y:%m:%d", time.gmtime())
atemp = re.split(':', date)
tyear = int(atemp[0])
tmon = int(atemp[1])
tday = int(atemp[2])
if syear == '':
syear = tyear
#
#--- read template
#
ifile = house_keeping + 'iru_template'
with open(ifile, 'r') as f:
template = f.read()
for year in range(1999, tyear+1):
mon_b = 1
mon_e = 12
if year == 1999:
mon_b = 9
elif year == tyear:
mon_e = tmon
update_page_for_year(year, tyear, mon_b, mon_e, template)
#
#--- make a symbolic link to the latest year to the main page
#
mpage = web_dir + 'iru_bias_trend.html'
cmd = 'rm -rf ' + mpage
os.system(cmd)
tpage = web_dir + 'iru_bias_trend_year' + str(tyear) + '.html'
cmd = 'ln -s ' + tpage + ' ' + mpage
os.system(cmd)
#----------------------------------------------------------------------------------
#-- update_page_for_year: create html page for the given year --
#----------------------------------------------------------------------------------
def update_page_for_year(myear, tyear, mon_b, mon_e, template):
"""
create html page for the given year
input: myear --- year to be the page is created
tyear --- this year
mon_b --- starting month, usually 1 (Jan)
mon_e --- ending month, usualy 12 (Dec)
template --- template of the page
output: <web_dir>/iru_bias_trend_year<year>.html
"""
#
#--- monthly bias/hist popup plots
#
plink = '<tr>\n'
if mon_b > 1:
plink = plink + fill_blank_plot_link(1, mon_b)
for mon in range(mon_b, mon_e+1):
plink = plink + create_plot_link(myear, mon)
if mon_e < 12:
plink = plink + fill_blank_plot_link(mon_e+1, 13)
plink = plink + '</tr>\n'
#
#--- table link to other years
#
tlink = '<tr>\n'
m = 0
for year in range(1999, tyear+1):
if year == myear:
tlink = tlink + '<th style="color:green;"><b>' + str(year) + '</b></th>\n'
else:
tlink = tlink + create_table_link(year)
m += 1
if m % 10 == 0:
tlink = tlink + '</tr>\n<tr>\n'
m = 0
if m != 0:
for k in range(m, 10):
tlink = tlink + '<th> </th>\n'
tlink = tlink + '</tr>\n'
#
#--- link direction buttons
#
direct = create_direct_button(myear, tyear)
#
#--- replace the table entry to the template
#
template = template.replace('#YEAR#', str(myear))
template = template.replace('#DIRECT#', direct)
template = template.replace('#PLOTS#', plink)
template = template.replace('#YTABLE#', tlink)
#
#--- create the table
#
outfile = web_dir + 'iru_bias_trend_year' + str(myear) + '.html'
with open(outfile, 'w') as fo:
fo.write(template)
#----------------------------------------------------------------------------------
#-- create_plot_link: create table entry for a monthly plot link --
#----------------------------------------------------------------------------------
def create_plot_link(year, mon):
"""
create table entry for a monthly plot link
input: year --- year
mon --- month
output: line --- table entry
"""
tyear = str(year)
syr = tyear[2] + tyear[3]
line = "<th><a href=\"javascript:WindowOpener("
line = line + "'" + str(year) + "/" + mon_list[mon-1] + syr + "_bias.png',"
line = line + "'" + str(year) + "/" + mon_list[mon-1] + syr + "_hist.png')\">"
line = line + mon_list[mon-1].capitalize() + "</a></th>\n"
return line
#----------------------------------------------------------------------------------
#-- fill_blank_plot_link: create table entry for a blank plot link --
#----------------------------------------------------------------------------------
def fill_blank_plot_link(start, stop):
"""
create table entry for a blank plot link
input: start --- starting month
stop --- stopping month
output: line --- table entry
"""
line = ''
for mon in range(start, stop):
line = line + '<th>' + mon_list[mon-1].capitalize() + '</th>\n'
return line
#----------------------------------------------------------------------------------
#-- create_table_link: create table entry to year table --
#----------------------------------------------------------------------------------
def create_table_link(year):
"""
create table entry to year table
input: year --- year
output: line --- table entry
"""
line = '<th><a href="./iru_bias_trend_year' + str(year) + '.html">'
line = line + str(year) + '</a></th>\n'
return line
#----------------------------------------------------------------------------------
#-- create_direct_button: create directional button for the page --
#----------------------------------------------------------------------------------
def create_direct_button(myear, tyear):
"""
create directional button for the page
input: myear --- year to be the page is created
tyear --- this year
output: line --- the html code with the button
"""
if myear == 1999:
line = 'Go to: <a href="./iru_bias_trend_year2000.html"><em>Next Year</em></a>'
elif myear == tyear:
line = 'Go to: <a href="./iru_bias_trend_year'+ str(tyear-1) + '.html"><em>Prev Year</em></a>'
else:
line = 'Go to: <a href="./iru_bias_trend_year'+ str(myear-1) + '.html"><em>Prev Year</em></a>'
line = line + ' / '
line = line + '<a href="./iru_bias_trend_year'+ str(myear+1) + '.html"><em>Next Year</em></a>'
return line
#----------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
year = int(float(sys.argv[1]))
else:
year = ''
update_html_page(year)
|
#!/usr/bin/env python
#-*- coding:UTF-8 -*-
import os
path='/home/tla001/myworks/zrworks/mykk/resource/iceskater1'
f=open("images1.txt",'w')
files = list()
for pic in os.listdir(path):
if(pic.find('.jpg')!=-1):
files.append(pic)
files.sort()
for pic in files:
f.write(os.path.join(path,pic)+'\n')
f.close()
|
import requests
from bs4 import BeautifulSoup
from PyQt5 import QtWidgets
from vindue import Ui_MainWindow
import sys
class MyWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MyWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.pushButton.clicked.connect(self.onsLottoClick)
self.ui.pushButton_2.clicked.connect(self.lorLottoClick)
def onsLottoClick(self):
vinder_tal = []
kupon = []
rigtige = ""
antal_rigtige = 0
antal_tal = 6
page = requests.get('https://vindertal.com/onsdags-lotto.aspx')
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html.parser')
for i in range(antal_tal):
vinder_tal.append(soup.find(id="ContentPlaceHolderDefault_CphMain_AllNumbers_5_LvWinnerNumbers_LblNumber_"+str(i)).contents)
else:
self.ui.label_3.setText(f'Kunne ikke hente vindertal. Prøv igen senere. Status kode: {page.status_code}')
vinder_tal = [item for sublist in vinder_tal for item in sublist]
kupon.append(self.ui.lineEdit.text())
kupon.append(self.ui.lineEdit_2.text())
kupon.append(self.ui.lineEdit_3.text())
kupon.append(self.ui.lineEdit_4.text())
kupon.append(self.ui.lineEdit_5.text())
kupon.append(self.ui.lineEdit_6.text())
for i in vinder_tal:
if i in kupon:
antal_rigtige += 1
rigtige += i+" "
if antal_rigtige == 0:
self.ui.label_3.setText('Ingen rigtige.')
elif antal_rigtige == 1:
self.ui.label_3.setText(f'{antal_rigtige} rigtig: {rigtige}')
else:
self.ui.label_3.setText(f'{antal_rigtige} rigtige: {rigtige}')
def lorLottoClick(self):
vinder_tal = []
kupon = []
rigtige = ""
antal_rigtige = 0
antal_tal = 7
page = requests.get('https://vindertal.com/loerdags-lotto.aspx')
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html.parser')
for i in range(antal_tal):
vinder_tal.append(soup.find(id="ContentPlaceHolderDefault_CphMain_AllNumbers_5_LvWinnerNumbers_LblNumber_"+str(i)).contents)
else:
self.ui.label_4.setText(f'Kunne ikke hente vindertal. Prøv igen senere. Status kode: {page.status_code}')
vinder_tal = [item for sublist in vinder_tal for item in sublist]
kupon.append(self.ui.lineEdit_7.text())
kupon.append(self.ui.lineEdit_8.text())
kupon.append(self.ui.lineEdit_9.text())
kupon.append(self.ui.lineEdit_10.text())
kupon.append(self.ui.lineEdit_11.text())
kupon.append(self.ui.lineEdit_12.text())
kupon.append(self.ui.lineEdit_13.text())
for i in vinder_tal:
if i in kupon:
antal_rigtige += 1
rigtige += i+" "
if antal_rigtige == 0:
self.ui.label_4.setText('Ingen rigtige.')
elif antal_rigtige == 1:
self.ui.label_4.setText(f'{antal_rigtige} rigtig: {rigtige}')
else:
self.ui.label_4.setText(f'{antal_rigtige} rigtige: {rigtige}')
app = QtWidgets.QApplication([])
application = MyWindow()
application.show()
sys.exit(app.exec())
|
class Empleado:
def __init__(self, nombre, edad, legajo, sueldo):
self.nombre = nombre
self.edad = edad
self.legajo = legajo
self.sueldo = sueldo
def calcular_sueldo(self, descuento, bonos):
return self.sueldo-descuento+bonos
class AgentesVentas(Empleado):
def __init__(self, nombre, edad, legajo, sueldo, mostrador):
self.numeroMostrador = mostrador
super().__init__(nombre, edad, legajo, sueldo)
class Tripulante(Empleado):
def mostrar_Renovacion(self):
if self.edad >= 58:
return "La licencia tiene que ser renovada cada un año"
else:
return "La licencia tiene que ser renovada cada 6 meses"
cercio = AgentesVentas("cercio", 24, "m618", 6000, 4)
print(cercio.nombre)
print(cercio.edad)
print(cercio.calcular_sueldo(300, 3000))
t = Tripulante("viloria", 45, "m789", 4000)
print(t.mostrar_Renovacion())
|
# Generated by Django 3.2.7 on 2021-10-06 05:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0006_bill_employee'),
]
operations = [
migrations.RemoveField(
model_name='employeebank',
name='employee_id',
),
migrations.AddField(
model_name='company',
name='bank_account_no',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='employee',
name='bank_account_no',
field=models.CharField(max_length=255, null=True),
),
migrations.DeleteModel(
name='CompanyBank',
),
migrations.DeleteModel(
name='EmployeeBank',
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 16:44:00 2018
@author: ppxee
"""
### Import required libraries ###
import matplotlib.pyplot as plt #for plotting
from astropy.io import fits #for handling fits
from astropy.table import Table #for handling tables
import numpy as np #for handling arrays
from astropy.stats import median_absolute_deviation
import vari_funcs #my module to help run code neatly
plt.close('all') #close any open plots
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 14}
plt.rc('font', **font)
def month_avg_lightcurve(avgflux, avgfluxerr):
months = ['sep05','oct05','nov05','dec05', 'jan06', 'dec06', 'jan07',
'aug07', 'sep07', 'oct07', 'sep08', 'oct08', 'nov08', 'jul09',
'aug09', 'sep09', 'oct09', 'nov09', 'dec09', 'jan10', 'feb10',
'aug10', 'sep10', 'oct10', 'nov10', 'dec10', 'jan11', 'feb11',
'aug11', 'sep11', 'oct11', 'nov11', 'dec11', 'jan12', 'feb12',
'jul12', 'aug12', 'sep12', 'oct12', 'nov12']
#set up time variable for plot
nums = fits.open('monthly_numbers.fits')[1].data
t = np.linspace(1, len(nums), num=len(nums))
tdataind = np.isin(nums['Month'], months)
tdata = t[tdataind]
ticks = nums['Month']
mask = np.zeros(len(t))
inds = [0,4,14,16,23,26,36,38,46,53,59,65,71,77,82,86]
mask[inds] = 1
mask = mask.astype(bool)
ticks[~mask] = ''
#Plot graph in new figure
plt.figure(figsize=[19,7])
plt.xticks(t, ticks, rotation='vertical')
plt.errorbar(tdata, avgflux, yerr=avgfluxerr, fmt = 'bo')
plt.xlabel('Month')
plt.ylabel('K-band flux of object')
plt.title('Average Lightcurve')
plt.tight_layout()
return
tbdata = fits.open('mag_flux_tables/month_mag_flux_table_best.fits')[1].data
varys = fits.open('variable_tables/no06_variables_chi40.fits')[1].data
obnum = 62242
#mask = fitsdata['NUMBER_1'] == ob
obdata = tbdata[tbdata['NUMBER'] == obnum]
obvarys = varys[varys['NUMBER_05B'] == 62243]
months = ['sep05','oct05','nov05','dec05', 'jan06', 'dec06', 'jan07',
'aug07', 'sep07', 'oct07', 'sep08', 'oct08', 'nov08', 'jul09',
'aug09', 'sep09', 'oct09', 'nov09', 'dec09', 'jan10', 'feb10',
'aug10', 'sep10', 'oct10', 'nov10', 'dec10', 'jan11', 'feb11',
'aug11', 'sep11', 'oct11', 'nov11', 'dec11', 'jan12', 'feb12',
'jul12', 'aug12', 'sep12', 'oct12', 'nov12']
for month in months:
# if month == 'sep05':
# flux = obdata['MAG_APER_'+month][:,4]
# fluxerr = obdata['MAGERR_APER_'+month][:,4]
# else:
# flux = np.append(flux, obdata['MAG_APER_'+month][:,4])
# fluxerr = np.append(fluxerr, obdata['MAGERR_APER_'+month][:,4])
if month == 'sep05':
flux = obdata['FLUX_APER_'+month][:,4]
fluxerr = obdata['FLUXERR_APER_'+month][:,4]
else:
flux = np.append(flux, obdata['FLUX_APER_'+month][:,4])
fluxerr = np.append(fluxerr, obdata['FLUXERR_APER_'+month][:,4])
#mask = flux == 99
mask = flux <= 0
flux[mask] = np.nan
fluxerr[mask] = np.nan
month_avg_lightcurve(flux, fluxerr)
plt.title('Light curve for DR11 ID 62253')#%i' % obnum)
### Get little image ###
semesters = ['05B', '08B', '12B']#['05B', '07B', '08B', '09B', '10B', '11B', '12B']#'06B',
xcoords = [15, 50, 80]
ycoords = [20000,20000,20000]
artists = []
for n, sem in enumerate(semesters):
print(sem)
if sem == '10B':
imdata = fits.getdata('cleaned_UDS_'+sem+'_K.fits')
else:
imdata = fits.getdata('extra_clean_no06_UDS_'+sem+'_K.fits')
### Find coordinates of objects ###
x = obvarys['X_IMAGE_'+sem]
x = x.astype(int)#int(x)
y = obvarys['Y_IMAGE_'+sem]
y = y.astype(int)#int(x)
size = 30 # size of half side of square
newim = imdata[y[0]-size:y[0]+size,x[0]-size:x[0]+size]
del imdata
# print(newim[size,size])
imuppthresh = 200
newim[newim>imuppthresh] = imuppthresh
imlowthresh = 0
newim[newim<imlowthresh] = imlowthresh
### code from stack exchange
ax = plt.gca()
im = OffsetImage(newim, zoom=2.5)
ab = AnnotationBbox(im, (xcoords[n], ycoords[n]), xycoords='data', frameon=False)
artists.append(ax.add_artist(ab))
ax.update_datalim(np.column_stack([xcoords, ycoords]))
ax.autoscale()
### put on arrows ###
plt.arrow(12,19000,-7,-3000, head_width=1,
head_length=500, color='k') # for first snapshot
plt.arrow(45.5,21000,-5,0, head_width=300,
head_length=1, color='k') # for first snapshot
plt.arrow(80,19000,+3,-3500, head_width=1,
head_length=500, color='k') # for first snapshot
|
def min(values):
smallest = None
for value in values:
if smallest is None or value < smallest:
smallest = value
return smallest
print(min([3, 4, 82, 71]))
|
from main import PKT_DIR_INCOMING, PKT_DIR_OUTGOING
# TODO: Feel free to import any Python standard moduless as necessary.
import struct
import socket
import time
import pickle
from firewall import *
from helpers import *
# length of header in bytes
# hard coded constants
data = pickle.load(open('testpacket.p', 'rb'))
packets = data['packets']
rules = data['rules']
geos = data['geos']
for packet in packets:
prot = get_protocol(packet[0])
if prot == UDP_PROTOCOL and is_dns(packet[1],packet[0]):
pkt = packet[0]
pkt_dir = packet[1]
# print 'testing ip checksum'
# print 'calculated: '+str(ip_checksum(pkt))
# print 'actual: '+str(struct.unpack('!H', pkt[10:12])[0])
# print 'testing tcp checksum'
# print 'calculated: '+str(tcp_checksum(pkt))
# start = get_ip_header_length(pkt) + 16
# print 'actual: '+str(struct.unpack('!H', pkt[start:start+2])[0])
# print 'testing upd checksum'
# print 'calculated: '+str(udp_checksum(pkt))
# start = get_ip_header_length(pkt)+6
# print 'actual: '+str(struct.unpack('!H', pkt[start:start+2])[0])
def set_string(a,b,i1,i2):
resp = a[:i1] + b + a[i2:]
return resp
def make_tcp_response(pkt):
'fuck this stupid ass fucking dumb project'
return 'assmar'
resp = make_dns_response(pkt)
|
'''
Project: AirBnB Clone
File: test/user.py
By: Mackenzie Adams, Gloria Bwandungi
In this file we create our first test for the root of our Rest API.
'''
import unittest
import json
from app import app
import logging
from app.models.base import db
from app.models.user import User
class FlaskrTestCase_User(unittest.TestCase):
def setUp(self):
#creating a test client
self.app = app.test_client()
#disable logging
logging.disable(logging.CRITICAL)
#create User table
db.create_tables([User], safe=True)
def tearDown(self):
#delete User table
db.drop_tables([User], safe=True)
def test_create(self):
#send a POST request to app
response = self.app.post('/users', data = dict(first_name = 'Jon',
last_name = 'Snow',
email = 'jon@snow.com',
password = 'first'))
#send a POST request to app
response = self.app.post('/users', data = dict(first_name = 'Jon',
last_name = 'Snow',
email = 'jon@snow.com',
password = 'first'))
def test_list(self):
pass
def test_get(self):
pass
def test_delete(self):
pass
def test_update(self):
pass
|
from django import forms
from django.db import models
from django.forms import ModelForm
|
"""
Author: Rokon Rahman
File: training a CNN architucture using cifer-10 dataset
"""
import keras
import numpy as np
# project modules
from .. import config
from . import my_model, preprocess
model = my_model.get_model()
model.summary()
#loading data
#X_train, Y_train = preprocess.load_train_data()
X_train, Y_train = preprocess.load_train_data()
print("train data shape: ", X_train.shape)
print("train data label: ", Y_train.shape)
#loading model
#compile
model.compile(keras.optimizers.Adam(config.lr),
keras.losses.categorical_crossentropy,
metrics = ['accuracy'])
#checkpoints
model_cp = my_model.save_model_checkpoint()
early_stopping = my_model.set_early_stopping()
#for training model
model.fit(X_train, Y_train,
batch_size = config.batch_size,
epochs = config.nb_epochs,
verbose = 2,
shuffle = True,
callbacks = [early_stopping, model_cp],
validation_split = 0.2)
|
"""
CPSC-51100, SUMMER 2019
NAME: JASON HUGGY, JOHN KUAGBENU, COREY PAINTER
PROGRAMMING ASSIGNMENT #6
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('ss13hil.csv')
# Replaces each value with the first number in the range of amounts from
# PUMS documentation
def replace(TAXP):
if TAXP == 1:
return 0
elif (TAXP >= 2) and (TAXP <= 21):
return (TAXP * 50 - 100)
elif (TAXP >=22) and (TAXP <= 62):
return ((TAXP-22)*100) + 1000
elif TAXP == 63:
return 5500
elif TAXP >= 64:
return ((TAXP-64)*1000) + 6000
df['TAXP'] = df.TAXP.apply(replace)
# Plots a figure with four subplots, 2 by 2
fig, axs = plt.subplots(2, 2, figsize=(15, 8))
fig.suptitle('Sample Output', fontsize=20, y = 1)
# Creates a pie chart of HHL with legend
HHLcolor=['Blue','DarkOrange','Green','Red','Purple'] # Colors for chart
HHL=['English only', 'Spanish', 'Other Indo-European', 'Asian and Pacific Island languages', 'Other']
label= list(HHL)
axs[0, 0].pie(df.HHL.value_counts(), colors= HHLcolor, startangle= 242, radius= 1.25, counterclock= True, center= (0.5, 0.5))
axs[0, 0].set_title('Household Languages', fontsize= 12)
axs[0, 0].legend(labels = label, loc=2, fontsize= '10', bbox_to_anchor= (-.72, 0.80, .1, .2), markerscale= .05)
axs[0, 0].set_ylabel('HHL', labelpad = 140)
# Creates a histogram of HINCP with superimposed KDE plot
hincp = df.HINCP.dropna()
hincp = hincp[hincp > 10]
logbins = np.logspace(np.log10(10), np.log10(max(hincp)),85) #logspaces bins of the x axis
axs[0, 1].hist(hincp, bins = logbins, density=True, color = 'Green', alpha= 0.5)
axs[0, 1].set_xscale('log') # logspaces the x axis ticks
axs[0, 1].set_xlim(5, 10**7.2)
hincp.plot.kde(linestyle='--', linewidth=2, color='Black', ax = axs[0, 1])
axs[0, 1].set_yticks([0.000000, 0.000005, 0.000010, 0.000015, 0.000020])
axs[0, 1].set_title('Distribution of Household Income', fontsize=12)
axs[0, 1].set_xlabel('Household Income ($) - Log Scaled')
axs[0, 1].set_ylabel('Density')
# Creates a bar chart of thousands of households for each vehicle value
veh_x= df.VEH.unique()
veh_y= df.VEH.repeat(df.WGTP).value_counts()
veh_y/=1000
x_label= [1,2,3,0,4,5,6]
axs[1, 0].set_xlabel("# of Vehicles")
axs[1, 0].set_ylabel("Thousands of Households")
axs[1, 0].bar(veh_x[~np.isnan(veh_x)], veh_y, width=.85, bottom= 0.0, align='center', color= 'red', tick_label= x_label)
axs[1, 0].set_title('Vehicles Available in Households', fontsize= 12)
# Creates a scatter plot of property taxes vs property value
# Uses WGTP for size of each dot, and first mortage payment is represented by the color of the dot
# Uses a colorbar for reference of MRGP amount
scatter = df[df.VALP <= 2000000]
i = axs[1, 1].scatter(scatter.VALP, scatter.TAXP, c=scatter.MRGP, s = scatter.WGTP, cmap='seismic', alpha= 0.1)
cbar = plt.colorbar(i, ax=axs[1, 1], ticks= [1250, 2500, 3750, 5000])
cbar.set_label('First Mortgage Payment (Monthly $)')
axs[1, 1].set_xlim(0, 1200000)
axs[1, 1].set_ylim(ymin=0)
axs[1, 1].set_title('Property Taxes vs. Property Values', fontsize=12)
axs[1, 1].set_xlabel('Property Value ($)')
axs[1, 1].set_ylabel('Taxes ($)')
# Adjusts the space in between subplots
plt.subplots_adjust(wspace=0.22, hspace=0.4)
# Saves figure to default directory as a png file.
plt.savefig('pums.png')
|
# Generated by Django 2.2.4 on 2019-10-01 17:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0022_auto_20190929_0352'),
]
operations = [
migrations.AlterField(
model_name='sharejob',
name='content',
field=models.TextField(blank=True, null=True),
),
]
|
def uppercase_letters(s):
for c in s:
yield c.upper()
uppercase_letters('abcdefgh')
|
"""
These are subscription related models.
"""
from dataclasses import dataclass, field
from typing import List, Optional
from .base import BaseModel
from .common import BaseApiResponse, BaseResource, ResourceId, Thumbnails
from .mixins import DatetimeTimeMixin
@dataclass
class SubscriptionSnippet(BaseModel, DatetimeTimeMixin):
"""
A class representing the subscription snippet info.
Refer: https://developers.google.com/youtube/v3/docs/subscriptions#snippet
"""
publishedAt: Optional[str] = field(default=None, repr=False)
channelTitle: Optional[str] = field(default=None, repr=False)
title: Optional[str] = field(default=None)
description: Optional[str] = field(default=None)
resourceId: Optional[ResourceId] = field(default=None, repr=False)
channelId: Optional[str] = field(default=None, repr=False)
thumbnails: Optional[Thumbnails] = field(default=None, repr=False)
@dataclass
class SubscriptionContentDetails(BaseModel):
"""
A class representing the subscription contentDetails info.
Refer: https://developers.google.com/youtube/v3/docs/subscriptions#contentDetails
"""
totalItemCount: Optional[int] = field(default=None)
newItemCount: Optional[int] = field(default=None)
activityType: Optional[str] = field(default=None, repr=False)
@dataclass
class SubscriptionSubscriberSnippet(BaseModel):
"""
A class representing the subscription subscriberSnippet info.
Refer: https://developers.google.com/youtube/v3/docs/subscriptions#subscriberSnippet
"""
title: Optional[str] = field(default=None)
description: Optional[str] = field(default=None)
channelId: Optional[str] = field(default=None, repr=False)
thumbnails: Optional[Thumbnails] = field(default=None, repr=False)
@dataclass
class Subscription(BaseResource):
"""
A class representing the subscription info.
Refer: https://developers.google.com/youtube/v3/docs/subscriptions
"""
snippet: Optional[SubscriptionSnippet] = field(default=None)
contentDetails: Optional[SubscriptionContentDetails] = field(
default=None, repr=False
)
subscriberSnippet: Optional[SubscriptionSubscriberSnippet] = field(
default=None, repr=False
)
@dataclass
class SubscriptionListResponse(BaseApiResponse):
"""
A class representing the subscription's retrieve response info.
Refer: https://developers.google.com/youtube/v3/docs/subscriptions/list#response_1
"""
items: Optional[List[Subscription]] = field(default=None, repr=False)
|
def findMax(arr):
maxVal = arr[0]
for i in arr[1:]:
if i > maxVal:
maxVal = i
return maxVal
a = [1,3,4,5,0,-6,8]
b = findMax(a)
print b
|
# 建立空序對
a = ()
# 建立具有五個元素的序對
b = (1, 2.0, "3", [4], (5))
# 印出 b 的第 1 個元素 2.0
print(b[1])
# 印出 b 的第 3 個元素 [4]
print(b[3])
# 檔名: typedemo05.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
import unittest
from katas.beta.geometric_progression import geometric_sequence_elements
class GeometricSequenceElementsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(geometric_sequence_elements(2, 3, 5),
'2, 6, 18, 54, 162')
def test_equals_2(self):
self.assertEqual(geometric_sequence_elements(2, 2, 10),
'2, 4, 8, 16, 32, 64, 128, 256, 512, 1024')
def test_equals_3(self):
self.assertEqual(geometric_sequence_elements(1, -2, 10),
'1, -2, 4, -8, 16, -32, 64, -128, 256, -512')
|
import json
import logging
from typing import Optional
import requests
from core.env import Environment
from services.chat_token_service import ChatTokenService
class MessageService:
def __init__(self, env: Environment, token_service: ChatTokenService) -> None:
self._env = env
self._token_service = token_service
def admin(self, message: str, parse_mode: str, notification: bool = False):
self.telegram(self._env.telegram_admin_id, message, parse_mode, notification)
def discord(self, message: str):
try:
requests.post(f'{self._env.discord_listener}/message/raw', json=message, headers={
'content-type': 'application/json'
})
except Exception as e:
logging.error(f'Unknown error occuring during message sending to discord: {e}')
def telegram(self, chat_id: int, message: str, parse_mode: str, notification: bool = False):
try:
requests.post(f'{self._env.telegram_listener}/chat/{chat_id}/message?parse_mode={parse_mode}¬ification={int(notification)}', json=message, headers={
'Content-Type': 'application/json'
})
except Exception as e:
logging.error(f'Unknown error occuring during message sending to telegram: {e}')
|
import os
import subprocess
from gtts import gTTS
from espeak_bot.utils import generate_random_string
def get_text_to_speech_file(text):
tmp_file_path = '/tmp/{path}.mp3'.format( path = generate_random_string(20))
tts = gTTS(text=text, lang='es')
tts.save(tmp_file_path)
return tmp_file_path
|
import pandas as pd
from plotnine import *
import scipy
snp_regions=pd.read_csv("variant_scores.tsv",header=0,sep='\t')
snp_regions[["logratio", "sig"]] = snp_regions["META_DATA"].str.split(',', expand=True).astype(float)
print(scipy.stats.pearsonr(snp_regions['logratio'],snp_regions['log_probs_diff_abs_sum']))
p = (ggplot(snp_regions, aes('logratio', 'log_probs_diff_abs_sum', color='factor(sig)'))
+ geom_point(alpha=1.0,size=1.0)
+ xlab("logratio bQTL")
+ ylab("Log(Counts(Alt)) -\nLog(Counts(Ref)))")
+ geom_smooth(aes(group=1),colour="black",method="lm")
+ scale_color_manual(values=['#e41a1c','#377eb8'],name="Significant bQTL?")
+ theme_bw(20))
print()
p.save("scatter_top.png")
|
# Changed news to community in this file
import graphene
from graphene_django.types import DjangoObjectType
# from bootcamp.news.models import News
from bootcamp.community.models import Community
from bootcamp.helpers import paginate_data
class CommunityType(DjangoObjectType): # Changed news to community
"""DjangoObjectType to acces the Community model.""" # Changed news to community
count_thread = graphene.Int()
count_likers = graphene.Int()
class Meta:
# model = News
model = Community
def resolve_count_thread(self, info, **kwargs):
return self.get_thread().count()
def resolve_count_likers(self, info, **kwargs):
return self.get_likers().count()
def resolve_count_attendees(self, info, **kwargs):
return self.get_attendees().count()
def resolve_get_thread(self, info, **kwargs):
return self.get_thread()
def resolve_get_likers(self, info, **kwargs):
return self.get_likers()
def resolve_get_attendees(self, info, **kwargs):
return self.get_attendees()
class CommunityPaginatedType(graphene.ObjectType): # Changed news to Community
"""A paginated type generic object to provide pagination to the Community
graph.""" # Changed news to Community
page = graphene.Int()
pages = graphene.Int()
has_next = graphene.Boolean()
has_prev = graphene.Boolean()
# objects = graphene.List(NewsType)
objects = graphene.List(CommunityType)
class CommunityQuery(object): # Changed news to community
# all_news = graphene.List(NewsType)
all_community = graphene.List(CommunityType)
# paginated_news = graphene.Field(NewsPaginatedType, page=graphene.Int())
paginated_community = graphene.Field(CommunityPaginatedType, page=graphene.Int())
# news = graphene.Field(NewsType, uuid_id=graphene.String())
community = graphene.Field(CommunityType, uuid_id=graphene.String())
def resolve_all_community(self, info, **kwargs):
# return News.objects.filter(reply=False)
return Community.objects.filter(reply=False)
def resolve_paginated_community(self, info, page): # Change news to community
"""Resolver functions to query the objects and turn the queryset into
the PaginatedType using the helper function"""
page_size = 30
# qs = News.objects.filter(reply=False)
qs = Community.objects.filter(reply=False)
# return paginate_data(qs, page_size, page, NewsPaginatedType)
return paginate_data(qs, page_size, page, CommunityPaginatedType)
def resolve_community(self, info, **kwargs): # Changed news to community
uuid_id = kwargs.get("uuid_id")
print("uuid_id" + uuid_id)
if uuid_id is not None:
# return News.objects.get(uuid_id=uuid_id)
print("uuid_id" + uuid_id)
return Community.objects.get(uuid_id=uuid_id)
return None
class CommunityMutation(graphene.Mutation): # Changed news to community
"""Mutation to create community objects on a efective way.""" # Changed news to community
class Arguments:
content = graphene.String()
user = graphene.ID()
parent = graphene.ID()
content = graphene.String()
user = graphene.ID()
parent = graphene.ID()
# news = graphene.Field(lambda: News)
community = graphene.Field(lambda: Community)
def mutate(self, **kwargs):
print(kwargs)
|
import os
from distutils.sysconfig import get_config_var
# taken from https://github.com/pypa/setuptools/blob/master/setuptools/command/bdist_egg.py
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def sorted_walk(dir):
"""Do os.walk in a reproducible way,
independent of indeterministic filesystem readdir order
"""
for base, dirs, files in os.walk(dir):
dirs.sort()
files.sort()
yield base, dirs, files
def only_files(headers, allowed):
nheaders = []
for h in headers:
is_ok = False
for allw in allowed:
if h.endswith(allw):
is_ok = True
break
if is_ok:
nheaders.append(h)
return nheaders
def get_ext_outputs(bdist_dir=None):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
if bdist_dir is None:
bdist_dir = os.path.abspath(os.path.dirname(__file__))
paths = {bdist_dir: ''}
for base, dirs, files in sorted_walk(bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] + filename + '/')
return all_outputs, bdist_dir
def get_mod_suffix():
return get_config_var('EXT_SUFFIX') # e.g., '.cpython-36m-darwin.so'
|
import base64
import pickle
import cv2
import os
import mediapipe as mp # Import mediapipe
mp_drawing = mp.solutions.drawing_utils
mp_holistic = mp.solutions.holistic
import csv
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score # Accuracy metrics
from flask_socketio import SocketIO, emit, send
from flask_cors import CORS
from flask import Flask, request, jsonify, render_template, send_from_directory
import simplejpeg
import numpy as np
import eventlet
eventlet.monkey_patch()
from utils import decode_image_base64
app = Flask(__name__)
app.config['SECRET_KEY'] = 'somethingsoamazingomg!!!'
app.config["DEBUG"] = False
app.config["environment"] = "production"
socketio = SocketIO(app, message_queue='redis://redis:6379')
with open('signs4.pkl', 'rb') as f:
model = pickle.load(f)
def isInitialized(hand):
try:
if hand.IsInitialized() == True:
return True
except:
return False
@app.route('/')
def home():
return render_template('camera.html')
@socketio.on('upload')
def predict(image_data):
img_binary_str = decode_image_base64(image_data)
image = simplejpeg.decode_jpeg(img_binary_str)
# Make Detections
with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
results = holistic.process(image)
# print(results.face_landmarks)
# face_landmarks, pose_landmarks, left_hand_landmarks, right_hand_landmarks
# 1. Draw face landmarks
mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS,
mp_drawing.DrawingSpec(
color=(80, 110, 10), thickness=1, circle_radius=1),
mp_drawing.DrawingSpec(
color=(80, 256, 121), thickness=1, circle_radius=1)
)
# 2. Right hand
mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(
color=(80, 22, 10), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(
color=(80, 44, 121), thickness=2, circle_radius=2)
)
# 3. Left Hand
mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(
color=(121, 22, 76), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(
color=(121, 44, 250), thickness=2, circle_radius=2)
)
# 4. Pose Detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(
color=(245, 117, 66), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(
color=(245, 66, 230), thickness=2, circle_radius=2)
)
# Extract Pose landmarks
if not isInitialized(results.pose_landmarks):
emit("speak", 'move back to appear')
# Extract left_hand landmarks
elif not isInitialized(results.left_hand_landmarks):
emit("speak", "move back one step to detect left hand")
# Exract right_hand landmarks
elif not isInitialized(results.right_hand_landmarks):
emit("speak", "move back one step to detect right hand")
# Concate rows
elif isInitialized(results.pose_landmarks) and isInitialized(results.right_hand_landmarks) and isInitialized(results.left_hand_landmarks):
# pose detection
rowRLH = []
pose = results.pose_landmarks.landmark
for landmark in pose:
rowRLH.append(landmark.x)
rowRLH.append(landmark.y)
rowRLH.append(landmark.z)
rowRLH.append(landmark.visibility)
# left hand coordinates
left_hand = results.left_hand_landmarks.landmark
for landmark in left_hand:
rowRLH.append(landmark.x)
rowRLH.append(landmark.y)
rowRLH.append(landmark.z)
rowRLH.append(landmark.visibility)
# right hand coordinates
right_hand = results.right_hand_landmarks.landmark
for landmark in right_hand:
rowRLH.append(landmark.x)
rowRLH.append(landmark.y)
rowRLH.append(landmark.z)
rowRLH.append(landmark.visibility)
# Make Detections
X = pd.DataFrame([rowRLH])
body_language_class = model.predict(X)[0]
body_language_prob = model.predict_proba(X)[0]
emit("speak", body_language_class)
# Grab ear coords
coords = tuple(np.multiply(
np.array(
(results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].x,
results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].y)), [640, 480]).astype(int))
cv2.rectangle(image,
(coords[0], coords[1]+5),
(coords[0]+len(body_language_class)
* 20, coords[1]-30),
(245, 117, 16), -1)
cv2.putText(image, body_language_class, coords,
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
# Get status box
cv2.rectangle(image, (0, 0), (250, 60), (245, 117, 16), -1)
# Display Class
cv2.putText(image, 'CLASS', (95, 12),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(image, body_language_class.split(' ')[
0], (90, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
# Display Probability
cv2.putText(image, 'PROB', (15, 12),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(image, str(round(body_language_prob[np.argmax(body_language_prob)], 2)), (
10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
image = simplejpeg.encode_jpeg(image)
base64_image = base64.b64encode(image).decode('utf-8')
base64_src = f"data:image/jpg;base64,{base64_image}"
emit('prediction', base64_src)
if __name__ == "__main__":
CORS(app)
socketio.run(app, ssl_context="adhoc")
|
from tkinter import *
import os
def delete2():
screen3.destroy()
def delete3():
screen4.destroy()
def delete4():
screen5.destroy()
def session():
screen8 = Toplevel(screen)
screen8.title("dashboard")
screen8.geometry("400x400")
Label(screen8, text="Welcome to Dashboard").pack()
Button(screen8, text = "Create Note").pack()
Button(screen8, text = "View Note").pack()
Button(screen8, text = "Delete Note").pack()
def login_sucess():
global screen3
screen3 = Toplevel(screen)
screen3.title("Sucess")
screen3.geometry("150x100")
Label(screen3, text = "Login Sucess").pack()
Button(screen3, text ="OK", command = delete2).pack()
def password_not_recognized():
global screen4
screen4 = Toplevel(screen)
screen4.title("Sucess")
screen4.geometry("150x100")
Label(screen4, text = "Wrong Password").pack()
Button(screen4, text ="OK", command = delete3).pack()
def User_not_found():
global screen5
screen5 = Toplevel(screen)
screen5.title("Sucess")
screen5.geometry("150x100")
Label(screen5, text = "No user found").pack()
Button(screen5, text ="OK", command = delete4).pack()
#new funtion for register button
def register_user():
username_info = username.get() #to get username details
password_info = password.get() #to get password details
#to enter values in text files
file=open(username_info, "w")
file.write(username_info+"\n")
file.write(password_info)
file.close()
#to clear the field once user registerd succesfully
username_entry.delete(0, END)
password_entry.delete(0, END)
#to tell user registration succesfull
Label(screen1, text = "Successfully Registered",fg = "Green",font = ("Times new roman",11)).pack()
def login_verify():
username1 = username_verify.get()
password1 = password_verify.get()
username_entry1.delete(0, END)
password_entry1.delete(0,END)
list_of_files = os.listdir()
if username1 in list_of_files:
file = open(username1, "r")
verify = file.read().splitlines() #read all lines within text files and ignore all blank space
if password1 in verify:
login_sucess()
else:
password_not_recognized()
else:
User_not_found()
def register():
global screen1
screen1 = Toplevel(screen)
screen1.title("Register")
screen1.geometry("300x250")
#to define variables outside the fuction we globalies entries
global username
global password
global username_entry
global password_entry
username = StringVar()
password = StringVar()
Label(screen1, text = "Please enter details below").pack()
Label(screen1, text = "").pack()
Label(screen1, text = "Username * ").pack()
global username_entry
global password_entry
username_entry = Entry(screen1, textvariable = username) #to store values in stringvariable
username_entry.pack()
Label(screen1, text = "Password * ").pack()
password_entry = Entry(screen1, textvariable = password)
password_entry.pack()
Label(screen1, text = "").pack() #to give space between lines
Button(screen1, text = "Register", width = 10, height = 1, command = register_user).pack()
def Login():
global screen2
screen2 = Toplevel(screen)
screen2.title("Login")
screen2.geometry("300x250")
Label(screen2, text = "Please enter details below to login").pack()
Label(screen2, text = "").pack()
global username_verify
global password_verify
username_verify = StringVar()
password_verify = StringVar()
global username_entry1
global password_entry1
Label(screen2, text = "Username * ").pack()
username_entry1 = Entry(screen2, textvariable = username_verify)
username_entry1.pack()
Label(screen2, text = "").pack()
Label(screen2, text = "Password * ").pack()
password_entry1 = Entry(screen2, textvariable = password_verify)
password_entry1.pack()
Label(screen2, text = "").pack()
Button(screen2, text = "Login", width = 10, height=1,command=login_verify).pack()
def main_screen():
global screen
screen = Tk()
screen.geometry("300x250")
screen.title("Library Management System")
Label(text = "Library Management System", bg = "white",fg="#427bff",width="300",height="2", font = ("Times New Roman",43, "bold")).pack()
Label(text = "").pack()
Button(text = "Login",height = "2", width = "30", command = Login).pack()
Label(text = "").pack()
Button(text = "Register", height = "2", width = "30", command = register).pack()
screen.mainloop()
main_screen()
|
# -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def pathSum(self, root, sum):
result, _ = self._pathSum(root, sum)
return result
def _pathSum(self, root, sum):
if root is None:
return 0, []
leftResult, leftPathSums = self._pathSum(root.left, sum)
rightResult, rightPathSums = self._pathSum(root.right, sum)
result = leftResult + rightResult + (1 if root.val == sum else 0)
pathSums = [root.val]
for leftPathSum in leftPathSums:
pathSums.append(leftPathSum + root.val)
if leftPathSum + root.val == sum:
result += 1
for rightPathSum in rightPathSums:
pathSums.append(root.val + rightPathSum)
if root.val + rightPathSum == sum:
result += 1
return result, pathSums
if __name__ == "__main__":
solution = Solution()
t0_0 = TreeNode(10)
t0_1 = TreeNode(5)
t0_2 = TreeNode(-3)
t0_3 = TreeNode(3)
t0_4 = TreeNode(2)
t0_5 = TreeNode(11)
t0_6 = TreeNode(3)
t0_7 = TreeNode(-2)
t0_8 = TreeNode(1)
t0_4.right = t0_8
t0_3.right = t0_7
t0_3.left = t0_6
t0_2.right = t0_5
t0_1.right = t0_4
t0_1.left = t0_3
t0_0.right = t0_2
t0_0.left = t0_1
assert 3 == solution.pathSum(t0_0, 8)
|
app = None
queue = None
|
# Generated by Django 2.2.20 on 2021-09-28 15:24
from django.db import migrations
from django.db.models import DateField, ExpressionWrapper, F
from django.utils.timezone import timedelta
def add_created_date(apps, schema_editor):
Election = apps.get_model("elections", "Election")
delta = timedelta(weeks=8)
expression = ExpressionWrapper(
F("poll_open_date") - delta, output_field=DateField
)
Election.private_objects.update(created=expression)
class Migration(migrations.Migration):
dependencies = [
("elections", "0061_auto_20210928_1509"),
]
operations = [
migrations.RunPython(
code=add_created_date, reverse_code=migrations.RunPython.noop
)
]
|
import datetime
import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ListV1TestCase))
suite.addTest(unittest.makeSuite(ListV2TestCase))
return suite
class ListV1TestCase(unittest.TestCase):
def setUp(self):
self.component = components.meeting.MeetingComponent(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_1,
},
)
@responses.activate
def test_can_list(self):
responses.add(
responses.POST,
"http://foo.com/meeting/list?host_id=ID&api_key=KEY&api_secret=SECRET",
)
self.component.list(host_id="ID")
def test_requires_host_id(self):
with self.assertRaisesRegexp(ValueError, "'host_id' must be set"):
self.component.list()
@responses.activate
def test_does_convert_startime_to_str_if_datetime(self):
responses.add(
responses.POST,
"http://foo.com/meeting/list?host_id=ID&topic=TOPIC&type=TYPE&start_time=2020-01-01T01%3A01%3A00Z"
"&api_key=KEY&api_secret=SECRET",
)
start_time = datetime.datetime(2020, 1, 1, 1, 1)
self.component.list(
host_id="ID", topic="TOPIC", type="TYPE", start_time=start_time
)
class ListV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.meeting.MeetingComponentV2(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_2,
},
)
@responses.activate
def test_can_list(self):
responses.add(responses.GET, "http://foo.com/users/ID/meetings?user_id=ID")
self.component.list(user_id="ID")
def test_requires_user_id(self):
with self.assertRaisesRegexp(ValueError, "'user_id' must be set"):
self.component.list()
if __name__ == "__main__":
unittest.main()
|
''' Defines the function that starts the gateway.
.. Reviewed 11 November 2018.
'''
import traceback
import threading
import logging
import mqttgateway.mqtt_client as mqtt
import mqttgateway.mqtt_map as mqtt_map
from mqttgateway.app_properties import AppProperties
from mqttgateway import __version__
LOG = logging.getLogger(__name__)
def startgateway(gateway_interface):
''' Entry point.'''
AppProperties(app_path=__file__, app_name='mqttgateway') # does nothing if already been called
try:
_startgateway(gateway_interface)
except:
LOG.error(''.join(('Fatal error: ', traceback.format_exc())))
raise
def _startgateway(gateway_interface):
'''
Initialisation of the application and main loop.
Initialises the configuration and the log, starts the interface,
starts the MQTT communication then starts the main loop.
The loop can start in mono or multi threading mode.
If the ``loop`` method is defined in the ``gateway_interface`` class, then
the loop will operate in a single thread, and this function will actually *loop*
forever, calling every time the ``loop`` method of the interface, as well
as the ``loop`` method of the MQTT library.
If the ``loop`` method is not defined in the ``gateway_interface`` class, then
it is assumed that the ``loop_start`` method is defined and it will be launched in a
separate thread.
The priority given to the mono thread option is for backward compatibility.
The data files are:
- the configuration file (compulsory), which is necessary at least to define
the MQTT broker; a path to it can be provided as first argument of the command line,
or the default path will be used;
- the map file (optional), if the mapping option is enabled.
The rules for providing paths of files are available in the configuration file
template as a comment.
The same rules apply to the command line argument and to the paths provided in the
configuration file.
Args:
gateway_interface (class): the interface class (not an instance of it!)
'''
# Load the configuration ======================================================================
cfg = AppProperties().get_config()
# Initialise the logger handlers ==============================================================
logfilename = cfg.get('LOG', 'logfilename')
if not logfilename: logfilepath = None
else: logfilepath = AppProperties().get_path(logfilename, extension='.log')
# create the dictionary of log configuration data for the initlogger
log_data = {
'console':
{'level': cfg.get('LOG', 'consolelevel')},
'file':
{'level': cfg.get('LOG', 'filelevel'),
'path': logfilepath,
'number': cfg.get('LOG', 'filenum'),
'size': cfg.get('LOG', 'filesize')},
'email':
{'host': cfg.get('LOG', 'emailhost'),
'port': cfg.get('LOG', 'emailport'),
'address': cfg.get('LOG', 'emailaddress'),
'subject': ''.join(('Error message from application ',
AppProperties().get_name(), '.'))}}
log_handler_msg = AppProperties().init_log_handlers(log_data)
# Log the configuration used ==================================================================
LOG.info('=== APPLICATION STARTED ===')
LOG.info(''.join(('mqttgateway version: <', __version__, '>.')))
LOG.info('Configuration options used:')
for section in cfg.sections():
for option in cfg.options(section):
LOG.info(''.join((' [', section, '].', option, ' : <',
str(cfg.get(section, option)), '>.')))
# Exit in case of error processing the configuration file.
if cfg.has_section('CONFIG') and cfg.has_option('CONFIG', 'error'):
LOG.critical(''.join(('Error while processing the configuration file:\n\t',
cfg.get('CONFIG', 'error'))))
raise SystemExit
# log configuration of the logger handlers
LOG.info(log_handler_msg)
# Instantiate the gateway interface ===========================================================
# Create the dictionary of the parameters for the interface from the configuration file
interfaceparams = {} # Collect the interface parameters from the configuration, if any
for option in cfg.options('INTERFACE'):
interfaceparams[option] = str(cfg.get('INTERFACE', option))
# Create 2 message lists, one incoming, the other outgoing
msglist_in = mqtt_map.MsgList()
msglist_out = mqtt_map.MsgList()
gatewayinterface = gateway_interface(interfaceparams, msglist_in, msglist_out)
# Load the map data ===========================================================================
mapping_flag = cfg.getboolean('MQTT', 'mapping')
mapfilename = cfg.get('MQTT', 'mapfilename')
if mapping_flag and mapfilename:
try:
map_data = AppProperties().get_jsonfile(mapfilename, extension='.map')
except (OSError, IOError) as err:
LOG.critical(''.join(('Error loading map file:/n/t', str(err))))
raise SystemExit
except ValueError as err:
LOG.critical(''.join(('Error reading JSON file:/n/t', str(err))))
raise SystemExit
else: # use default map - take root and topics from configuration file
mqtt_map.NO_MAP['root'] = cfg.get('MQTT', 'root')
mqtt_map.NO_MAP['topics'] = [topic.strip() for topic in cfg.get('MQTT', 'topics').split(',')]
map_data = None
try:
messagemap = mqtt_map.msgMap(map_data) # will raise ValueErrors if problems
except ValueError as err:
LOG.critical(''.join(('Error processing map file:/n/t', str(err))))
# Initialise the MQTT client and connect ======================================================
def process_mqttmsg(mqtt_msg):
''' Converts a MQTT message into an internal message and pushes it on the message list.
This function will be called by the on_message MQTT call-back.
Placing it here avoids passing the ``messagemap`` and the ``msglist_in`` instances
but do not prevent the danger of messing up a multi-threading application.
Here if the messagemap is not changed during the application's life (and for now
this is not a feature), it should be fine.
Args:
mqtt_msg (:class:`mqtt.Message`): incoming MQTT message.
'''
# TODO: Make sure this works in various cases during multi-threading.
try: internal_msg = messagemap.mqtt2internal(mqtt_msg)
except ValueError as err:
LOG.info(str(err))
return
# eliminate echo
if internal_msg.sender != messagemap.sender():
msglist_in.push(internal_msg)
return
timeout = cfg.getfloat('MQTT', 'timeout') # for the MQTT loop() method
client_id = cfg.get('MQTT', 'clientid')
if not client_id: client_id = AppProperties().get_name()
mqttclient = mqtt.mgClient(host=cfg.get('MQTT', 'host'),
port=cfg.getint('MQTT', 'port'),
keepalive=cfg.getint('MQTT', 'keepalive'),
client_id=client_id,
on_msg_func=process_mqttmsg,
topics=messagemap.topics)
mqttclient.mg_connect()
def publish_msglist(block=False, timeout=None):
''' Publishes all messages in the outgoing message list.'''
while True: # Publish the messages returned, if any.
internal_msg = msglist_out.pull(block, timeout)
if internal_msg is None: break # should never happen in blocking mode
try: mqtt_msg = messagemap.internal2mqtt(internal_msg)
except ValueError as err:
LOG.info(str(err))
continue
published = mqttclient.publish(mqtt_msg.topic, mqtt_msg.payload, qos=0, retain=False)
LOG.debug(''.join(('MQTT message published with (rc, mid): ', str(published),
'\n\t', mqtt.mqttmsg_str(mqtt_msg))))
return
# 2018-10-19 Add code to provide multi threading support
if hasattr(gatewayinterface, 'loop') and callable(gatewayinterface.loop):
LOG.info('Mono Thread Loop')
while True:
mqttclient.loop_with_reconnect(timeout) # Call the MQTT loop.
gatewayinterface.loop() # Call the interface loop.
publish_msglist(block=False, timeout=None)
else: # assume 'loop_start' is defined and use multi-threading
LOG.info('Multi Thread Loop')
publisher = threading.Thread(target=publish_msglist, name='Publisher',
kwargs={'block': True, 'timeout': None})
publisher.start()
mqttclient.loop_start() # Call the MQTT loop.
gatewayinterface.loop_start() # Call the interface loop.
block = threading.Event()
block.wait() # wait forever - TODO: implement graceful termination
|
import SimpleHTTPServer
import SocketServer
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(('localhost', 80), Handler)
httpd.serve_forever()
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import ShinyUserHash
from secrets import token_hex
@receiver(post_save, sender=User)
def create_hash(sender, instance, created, **kwargs):
if created:
hash = token_hex(16)
ShinyUserHash.objects.create(user=instance, user_hash=hash)
@receiver(post_save, sender=User)
def save_hash(sender, instance, created, **kwargs):
instance.shinyuserhash.save()
|
from panda3d.core import Point3, NodePath, BitMask32, RenderState, ColorAttrib, Vec4, LightAttrib, FogAttrib, LineSegs
from panda3d.core import Vec3, LPlane, GeomNode
from PyQt5 import QtWidgets, QtCore
from .BaseTool import BaseTool
from .ToolOptions import ToolOptions
from bsp.leveleditor.geometry.Box import Box
from bsp.leveleditor.geometry.GeomView import GeomView
from bsp.leveleditor.grid.GridSettings import GridSettings
from bsp.leveleditor.mapobject.Entity import Entity
from bsp.leveleditor import LEUtils, LEGlobals
from bsp.leveleditor.actions.Create import Create
from bsp.leveleditor.actions.Select import Deselect
from bsp.leveleditor.actions.ChangeSelectionMode import ChangeSelectionMode
from bsp.leveleditor.selection.SelectionType import SelectionType
from bsp.leveleditor.actions.Select import Select
from bsp.leveleditor.actions.ActionGroup import ActionGroup
from bsp.leveleditor.menu.KeyBind import KeyBind
from bsp.leveleditor import LEConfig
import random
VisState = RenderState.make(
ColorAttrib.makeFlat(Vec4(0, 1, 0, 1)),
LightAttrib.makeAllOff(),
FogAttrib.makeOff()
)
class EntityToolOptions(ToolOptions):
GlobalPtr = None
@staticmethod
def getGlobalPtr():
self = EntityToolOptions
if not self.GlobalPtr:
self.GlobalPtr = EntityToolOptions()
return self.GlobalPtr
def __init__(self):
ToolOptions.__init__(self)
lbl = QtWidgets.QLabel("Entity class")
self.layout().addWidget(lbl)
combo = QtWidgets.QComboBox()
self.layout().addWidget(combo)
check = QtWidgets.QCheckBox("Random yaw")
check.stateChanged.connect(self.__randomYawChecked)
self.layout().addWidget(check)
self.randomYawCheck = check
self.combo = combo
self.combo.currentTextChanged.connect(self.__handleClassChanged)
self.combo.setEditable(True)
self.updateEntityClasses()
def setTool(self, tool):
ToolOptions.setTool(self, tool)
self.combo.setCurrentText(self.tool.classname)
self.randomYawCheck.setChecked(self.tool.applyRandomYaw)
def __handleClassChanged(self, classname):
self.tool.classname = classname
def __randomYawChecked(self, state):
self.tool.applyRandomYaw = (state == QtCore.Qt.Checked)
def updateEntityClasses(self):
self.combo.clear()
names = []
for ent in base.fgd.entities:
if ent.class_type == 'PointClass':
names.append(ent.name)
names.sort()
completer = QtWidgets.QCompleter(names)
completer.setCompletionMode(QtWidgets.QCompleter.InlineCompletion)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.combo.setCompleter(completer)
for name in names:
self.combo.addItem(name)
# Tool used to place an entity in the level.
class EntityTool(BaseTool):
Name = "Entity"
ToolTip = "Entity Tool"
KeyBind = KeyBind.EntityTool
Icon = "resources/icons/editor-entity.png"
def __init__(self, mgr):
BaseTool.__init__(self, mgr)
self.classname = LEConfig.default_point_entity.getValue()
self.applyRandomYaw = False
self.pos = Point3(0, 0, 0)
self.mouseIsDown = False
self.hasPlaced = False
# Maintain a constant visual scale for the box in 2D,
# but a constant physical scale in 3D.
self.size2D = 4
self.size3D = 32
self.boxSize = 0.5
# Setup the visualization of where our entity will be placed
# if we use the 2D viewport.
self.visRoot = NodePath("entityToolVis")
self.visRoot.setColor(Vec4(0, 1, 0, 1), 1)
self.visRoot.setLightOff(1)
self.visRoot.setFogOff(1)
self.box = Box()
for vp in self.doc.viewportMgr.viewports:
view = self.box.addView(GeomView.Lines, vp.getViewportMask())
if vp.is2D():
view.np.setBin("fixed", LEGlobals.BoxSort)
view.np.setDepthWrite(False)
view.np.setDepthTest(False)
else:
view.np.setScale(self.size3D)
view.viewport = vp
self.box.setMinMax(Point3(-self.boxSize), Point3(self.boxSize))
self.box.np.reparentTo(self.visRoot)
self.box.generateGeometry()
lines = LineSegs()
lines.moveTo(Point3(-10000, 0, 0))
lines.drawTo(Point3(10000, 0, 0))
lines.moveTo(Point3(0, -10000, 0))
lines.drawTo(Point3(0, 10000, 0))
lines.moveTo(Point3(0, 0, -10000))
lines.drawTo(Point3(0, 0, 10000))
self.lines = self.visRoot.attachNewNode(lines.create())
self.options = EntityToolOptions.getGlobalPtr()
def cleanup(self):
self.classname = None
self.pos = None
self.mouseIsDown = None
self.hasPlaced = None
self.size2D = None
self.size3D = None
self.boxSize = None
self.box.cleanup()
self.box = None
self.lines.removeNode()
self.lines = None
self.visRoot.removeNode()
self.visRoot = None
self.applyRandomYaw = None
BaseTool.cleanup(self)
def enable(self):
BaseTool.enable(self)
self.reset()
def activate(self):
BaseTool.activate(self)
self.accept('mouse1', self.mouseDown)
self.accept('mouse1-up', self.mouseUp)
self.accept('mouseMoved', self.mouseMoved)
self.accept('enter', self.confirm)
self.accept('escape', self.reset)
self.accept('arrow_up', self.moveUp)
self.accept('arrow_down', self.moveDown)
self.accept('arrow_left', self.moveLeft)
self.accept('arrow_right', self.moveRight)
def disable(self):
BaseTool.disable(self)
self.reset()
def reset(self):
self.hideVis()
self.mouseIsDown = False
self.hasPlaced = False
self.pos = Point3(0, 0, 0)
def updatePosFromViewport(self, vp):
mouse = vp.getMouse()
pos = base.snapToGrid(vp.viewportToWorld(mouse, flatten = False))
# Only update the axes used by the viewport
for axis in vp.spec.flattenIndices:
self.pos[axis] = pos[axis]
self.visRoot.setPos(self.pos)
self.doc.updateAllViews()
def updatePos(self, pos):
self.pos = pos
self.visRoot.setPos(pos)
self.doc.updateAllViews()
def hideVis(self):
self.visRoot.reparentTo(NodePath())
self.doc.updateAllViews()
def showVis(self):
self.visRoot.reparentTo(self.doc.render)
self.doc.updateAllViews()
def mouseDown(self):
vp = base.viewportMgr.activeViewport
if not vp:
return
if vp.is3D():
# If we clicked in the 3D viewport, try to intersect with an existing MapObject
# and immediately place the entity at the intersection point. If we didn't click on any
# MapObject, place the entity on the grid where we clicked.
entries = vp.click(GeomNode.getDefaultCollideMask())
if entries and len(entries) > 0:
for i in range(len(entries)):
entry = entries[i]
# Don't backface cull if there is a billboard effect on or above this node
if not LEUtils.hasNetBillboard(entry.getIntoNodePath()):
surfNorm = entry.getSurfaceNormal(vp.cam).normalized()
rayDir = entry.getFrom().getDirection().normalized()
if surfNorm.dot(rayDir) >= 0:
# Backface cull
continue
# We clicked on an object, use the contact point as the
# location of our new entity.
self.pos = entry.getSurfacePoint(self.doc.render)
self.hasPlaced = True
# Create it!
self.confirm()
break
else:
# Didn't click on an object, intersect our mouse ray with the grid plane.
plane = LPlane(0, 0, 1, 0)
worldMouse = vp.viewportToWorld(vp.getMouse())
theCamera = vp.cam.getPos(render)
# Ensure that the camera and mouse positions are on opposite
# sides of the plane, or else the entity would place behind us.
sign1 = plane.distToPlane(worldMouse) >= 0
sign2 = plane.distToPlane(theCamera) >= 0
if sign1 != sign2:
pointOnPlane = Point3()
ret = plane.intersectsLine(pointOnPlane, theCamera, worldMouse)
if ret:
# Our mouse intersected the grid plane. Place an entity at the plane intersection point.
self.pos = pointOnPlane
self.hasPlaced = True
self.confirm()
return
# The user clicked in the 2D viewport, draw the visualization where they clicked.
self.showVis()
self.updatePosFromViewport(vp)
self.mouseIsDown = True
self.hasPlaced = True
def mouseMoved(self, vp):
if not vp:
return
if vp.is2D() and self.mouseIsDown:
# If the mouse moved in the 2D viewport and the mouse is
# currently pressed, update the visualization at the new position
self.updatePosFromViewport(vp)
def mouseUp(self):
self.mouseIsDown = False
def confirm(self):
if not self.hasPlaced:
return
if self.applyRandomYaw:
yaw = random.uniform(0, 360)
else:
yaw = 0
ent = Entity(base.document.getNextID())
ent.setClassname(self.classname)
ent.np.setPos(self.pos)
ent.np.setH(yaw)
# Select the entity right away so we can conveniently move it around and
# whatever without having to manually select it.
base.actionMgr.performAction("Create entity",
ActionGroup([
Deselect(all = True),
Create(base.document.world.id, ent),
ChangeSelectionMode(SelectionType.Groups),
Select([ent], False)
])
)
self.reset()
def getMoveDelta(self, localDelta, vp):
return vp.rotate(localDelta) * GridSettings.DefaultStep
def moveUp(self):
vp = base.viewportMgr.activeViewport
if not vp or not vp.is2D():
return
self.updatePos(self.pos + self.getMoveDelta(Vec3.up(), vp))
def moveDown(self):
vp = base.viewportMgr.activeViewport
if not vp or not vp.is2D():
return
self.updatePos(self.pos + self.getMoveDelta(Vec3.down(), vp))
def moveLeft(self):
vp = base.viewportMgr.activeViewport
if not vp or not vp.is2D():
return
self.updatePos(self.pos + self.getMoveDelta(Vec3.left(), vp))
def moveRight(self):
vp = base.viewportMgr.activeViewport
if not vp or not vp.is2D():
return
self.updatePos(self.pos + self.getMoveDelta(Vec3.right(), vp))
def update(self):
# Maintain a constant size for the 2D views
for view in self.box.views:
if view.viewport.is2D():
view.np.setScale(self.size2D / view.viewport.zoom)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.