content
stringlengths 5
1.05M
|
|---|
'''
Created on september 28, 2017
a rule-based business dialog simulator
user:query bot:current bot:accumulate bot:response
ask[category] | slot:[category] | slot:[category] | api_call request property1
provide[property1] | slot:[property1] | slot:[category,property1] | api_call request property2
provide[property2] | slot:[property2] | slot:[category,property1,property2] | api_call request property3
.
.
.
'''
import os
import sys
grandfatherdir = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
from collections import OrderedDict
import numpy as np
from simulator_utils import Tv, Phone, Ac, name_map, template
class BusinessSimulator(object):
def __init__(self, obj):
self.category = name_map[obj.name]
self.obj = obj
self.necessary_property = obj.get_property()['necessary'].keys()
self.other_property = obj.get_property()['other'].keys()
def gen_normal_dialog(self):
def one_turn(proper=None):
if proper: # not turn 1
chosen_necessary_property = [proper]
necessary.remove(proper)
chosen_other_property = list()
else:
chosen_necessary_property = list()
picked_necessary_num = np.random.randint(0, 2)
for _ in range(picked_necessary_num + 1):
proper = np.random.choice(necessary)
chosen_necessary_property.append(proper)
necessary.remove(proper)
chosen_other_property = list()
picked_other_num = np.random.randint(1, 3)
for _ in range(picked_other_num + 1):
proper = np.random.choice(other)
chosen_other_property.append(proper)
other.remove(proper)
properties = chosen_necessary_property + chosen_other_property
queries.append(properties)
current_slots.append(properties)
before_property = accumulate_slots[-1] if len(
accumulate_slots) else accumulate_slots
accumulate_slots.append(before_property + properties)
request_property = np.random.choice(
necessary) if len(necessary) else 'END'
responses.append(request_property)
return request_property
necessary = list(self.necessary_property)
other = list(self.other_property)
queries = list()
current_slots = list()
accumulate_slots = list()
responses = list()
proper = None
while len(necessary):
proper = one_turn(proper)
# print('queries:', queries)
# print('current_slots', current_slots)
# print('accumulate_slots:', accumulate_slots)
# print('responses:', responses)
dialogs = list()
for _ in range(5000):
dialog = self.fill(queries, current_slots,
accumulate_slots, responses)
dialogs.append(dialog)
# print(dialog)
self.write_dialog(dialogs)
def fill(self, queries, current_slots, accumulate_slots, responses):
def get_property_value(proper, dic):
value_necess = np.random.choice(
dic['necessary'].get(proper, ['ERROR']))
value_other = np.random.choice(dic['other'].get(proper, ['ERROR']))
return value_necess if value_necess != 'ERROR' else value_other
property_map = self.obj.get_property()
property_value = {}
for p in accumulate_slots[-1]:
property_value[p] = get_property_value(p, property_map)
# print(property_value)
# responses
responses_filled = ['api_call_request ' + res for res in responses]
# queries
queries_filled = list()
for i, query in enumerate(queries):
query_filled = ''.join(
[property_value.get(p, 'ERROR') for p in query])
if i == 0:
query_filled = template.replace('[p]', query_filled)
query_filled = query_filled.replace('[c]', self.category)
# else:
# query_filled += 'provide'
queries_filled.append(query_filled)
# current
current_slots_filled = list()
for current_slot in current_slots:
current_slot_filled = 'slot:' + ','.join(
property_value.get(p, 'ERROR') for p in current_slot)
current_slots_filled.append(current_slot_filled)
# accumulate
accumulate_slots_filled = list()
for accumulate_slot in accumulate_slots:
accumulate_slot_filled = 'slot:' + ','.join(
property_value.get(p, 'ERROR') for p in accumulate_slot)
accumulate_slots_filled.append(accumulate_slot_filled)
return (queries_filled, current_slots_filled, accumulate_slots_filled, responses_filled)
def write_dialog(self, dialogs):
path = os.path.join(
grandfatherdir, 'data/memn2n/normal_business_dialog.txt')
dia = dialogs[0]
rows = len(dia[0])
cols = len(dia)
with open(path, 'a', encoding='utf-8') as f:
for dialog in dialogs:
for i in range(rows):
for j in range(cols):
f.write(dialog[j][i] + '#')
f.write('\n')
f.write('\n')
def main():
tv = Tv()
ac = Ac()
phone = Phone()
ll = [tv, ac, phone]
for l in ll:
print(l.name)
bs = BusinessSimulator(l)
bs.gen_normal_dialog()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from odoo import fields, models, api, _
import logging
import rpc_functions as rpc
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class DataUploading(models.TransientModel):
_name = 'rpc.data.uploading'
_description = u"数据上传-上传"
REPEATTYPE = [
('00', u'忽略'),
('01', u'删除'),
('02', u'替换'),
]
# repeat_type = fields.Selection(string=u'重复处理方式', selection=REPEATTYPE, default='02', required=True)
rpc_data = fields.Many2one(comodel_name='rpc.base.data', string=u'上传数据表', required=True)
@api.multi
def start_uploading(self):
"""开始上传操作"""
logging.info(u"开始上传操作")
rpc_data = self.rpc_data
# 上传配置项内容
rpc_url, rpc_db, username, password = rpc.get_rpc_condif()
logging.info(u"上传的参数为url:{},db:{},username:{},pwd:{}".format(rpc_url, rpc_db, username, password))
# 开始连接rpc
model, uid = rpc.connection_rpc(rpc_url, rpc_db, username, password)
# 组装需要上传的字段和模型名
model_name = rpc_data.model_id.model
fields_dict = self.pack_model_field_to_array(rpc_data)
logging.info(u"需要上传的模型:{}".format(fields_dict))
domain = list()
for d in rpc_data.domain_ids:
domain.append(rpc.string_transfer_list(d.name))
logging.info(u"数据表过滤表达式:{}".format(domain))
rpc.search_create(model, rpc_db, uid, password, model_name, domain, fields_dict)
logging.info(u"数据上传完成")
@api.model
def pack_model_field_to_array(self, model):
"""获取上传的字段"""
field_arr = []
for field in model.field_ids:
field_arr.append(field.name)
return field_arr
|
from help import Help
from music import Music
import os
import discord
from discord.ext import commands
import json
from dotenv import load_dotenv
load_dotenv()
with open("config.json", "r") as config_file:
config = json.load(config_file)
prefix = config["PREFIX"]
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix=prefix, intents=intents)
bot.remove_command("help")
bot.add_cog(Music(bot))
bot.add_cog(Help(bot))
bot.run(os.getenv("TOKEN"))
|
# -*- coding: utf-8 -*-
"""
Basic Unittests
#####################
"""
from __future__ import absolute_import
import os
import unittest
import tempfile
from datetime import datetime
from collections import deque
from main import Task, App
class TestTask(unittest.TestCase):
"""Test unit operations of the Task class."""
def setUp(self):
self.task = Task("id0", 2, "%Y-%m-%d %H:%M:%S")
header = "ip,date,time,zone,cik,accession,extention,code,size,idx,norefer,noagent,find,crawler,browser".split(",")
line = "101.81.133.jja,2017-06-30,00:00:00,0.0,1608552.0,0001047469-17-004337,-index.htm,200.0,80251.0,1.0,0.0,0.0,9.0,0.0,".split(",")
self.message = dict(zip(header, line))
def test_clean(self):
"""Test :func:`Task.clean`."""
self.assertDictEqual(self.task.clean(self.message),
{'accession': '0001047469-17-004337',
'browser': '',
'cik': '1608552.0',
'code': '200.0',
'crawler': '0.0',
'date': '2017-06-30',
'datetime': datetime(2017, 6, 30, 0, 0),
'extention': '-index.htm',
'find': '9.0',
'idx': '1.0',
'ip': '101.81.133.jja',
'noagent': '0.0',
'norefer': '0.0',
'size': '80251.0',
'time': '00:00:00',
'zone': '0.0'})
def test_add(self):
"""Test :func:`Task.add`."""
self.assertEqual(len(self.task), 0)
self.task.add(self.message)
self.assertEqual(len(self.task), 1)
def test_flush(self):
"""Test :func:`Task.flush`."""
self.task.messages = deque()
self.task.add(self.message)
self.assertEqual(len(self.task), 1)
entry = self.task.flush(datetime.max)
self.assertIsInstance(entry, deque)
self.assertEqual(len(self.task), 0)
logfile = """ip,date,time,zone,cik,accession,extention,code,size,idx,norefer,noagent,find,crawler,browser
101.81.133.jja,2017-06-30,00:00:00,0.0,1608552.0,0001047469-17-004337,-index.htm,200.0,80251.0,1.0,0.0,0.0,9.0,0.0,
107.23.85.jfd,2017-06-30,00:00:00,0.0,1027281.0,0000898430-02-001167,-index.htm,200.0,2825.0,1.0,0.0,0.0,10.0,0.0,
107.23.85.jfd,2017-06-30,00:00:00,0.0,1136894.0,0000905148-07-003827,-index.htm,200.0,3021.0,1.0,0.0,0.0,10.0,0.0,
107.23.85.jfd,2017-06-30,00:00:01,0.0,841535.0,0000841535-98-000002,-index.html,200.0,2699.0,1.0,0.0,0.0,10.0,0.0,
108.91.91.hbc,2017-06-30,00:00:01,0.0,1295391.0,0001209784-17-000052,.txt,200.0,19884.0,0.0,0.0,0.0,10.0,0.0,
106.120.173.jie,2017-06-30,00:00:02,0.0,1470683.0,0001144204-14-046448,v385454_20fa.htm,301.0,663.0,0.0,0.0,0.0,10.0,0.0,
107.178.195.aag,2017-06-30,00:00:02,0.0,1068124.0,0000350001-15-000854,-xbrl.zip,404.0,784.0,0.0,0.0,0.0,10.0,1.0,
107.23.85.jfd,2017-06-30,00:00:03,0.0,842814.0,0000842814-98-000001,-index.html,200.0,2690.0,1.0,0.0,0.0,10.0,0.0,
107.178.195.aag,2017-06-30,00:00:04,0.0,1068124.0,0000350001-15-000731,-xbrl.zip,404.0,784.0,0.0,0.0,0.0,10.0,1.0,
108.91.91.hbc,2017-06-30,00:00:04,0.0,1618174.0,0001140361-17-026711,.txt,301.0,674.0,0.0,0.0,0.0,10.0,0.0,"""
output = """101.81.133.jja,2017-06-30 00:00:00,2017-06-30 00:00:00,1,1
108.91.91.hbc,2017-06-30 00:00:01,2017-06-30 00:00:01,1,1
107.23.85.jfd,2017-06-30 00:00:00,2017-06-30 00:00:03,4,4
106.120.173.jie,2017-06-30 00:00:02,2017-06-30 00:00:02,1,1
107.178.195.aag,2017-06-30 00:00:02,2017-06-30 00:00:04,3,2
108.91.91.hbc,2017-06-30 00:00:04,2017-06-30 00:00:04,1,1"""
class TestApp(unittest.TestCase):
"""Test full operation of the App class."""
def setUp(self):
"""Set up app redirecting output to a pipe."""
self.tempdir = tempfile.mkdtemp()
self.log = os.path.join(self.tempdir(), "log.csv")
with open(self.log, "w") as f:
f.write(logfile)
self.out = os.path.join(self.tempdir(), "out")
self.app = App(self.log, 2, self.out)
def tearDown(self):
"""Remove temporary files and directories."""
os.remove(self.log)
os.remove(self.out)
os.rmdir(self.tempdir)
def run(self):
"""Run app and check results."""
self.app.run()
self.assertEqual(len(self.tasks), 5)
self.assertTrue(all(len(task.messages) == 0 for task in self.app.tasks.values()))
self.assertListEqual(list(self.tasks.keys()), ['101.81.133.jja', '107.23.85.jfd',
'108.91.91.hbc', '106.120.173.jie',
'107.178.195.aag'])
self.assertEqual(self.tasks.output_queue.qsize(), 0)
with open(self.out) as f:
check = f.read()
self.assertEqual(output, check)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
"""
cubic spline planner
Author: Atsushi Sakai
"""
import math
import numpy as np
import bisect
from scipy.spatial import distance
class Spline:
"""
Cubic Spline class
"""
def __init__(self, x, y):
self.b, self.c, self.d, self.w = [], [], [], []
self.x = x
self.y = y
self.nx = len(x) # dimension of x
h = np.diff(x)
# calc coefficient c
self.a = [iy for iy in y]
# calc coefficient c
A = self.__calc_A(h)
B = self.__calc_B(h)
self.c = np.linalg.solve(A, B)
# print(self.c1)
# calc spline coefficient b and d
for i in range(self.nx - 1):
self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \
(self.c[i + 1] + 2.0 * self.c[i]) / 3.0
self.b.append(tb)
def calc(self, t):
"""
Calc position
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return result
def calcd(self, t):
"""
Calc first derivative
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0
return result
def calcdd(self, t):
"""
Calc second derivative
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx
return result
def calcddd(self, t):
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
result = 6.0 * self.d[i]
return result
def __search_index(self, x):
"""
search data segment index
"""
return bisect.bisect(self.x, x) - 1
def __calc_A(self, h):
"""
calc matrix A for spline coefficient c
"""
A = np.zeros((self.nx, self.nx))
A[0, 0] = 1.0
for i in range(self.nx - 1):
if i != (self.nx - 2):
A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])
A[i + 1, i] = h[i]
A[i, i + 1] = h[i]
A[0, 1] = 0.0
A[self.nx - 1, self.nx - 2] = 0.0
A[self.nx - 1, self.nx - 1] = 1.0
# print(A)
return A
def __calc_B(self, h):
"""
calc matrix B for spline coefficient c
"""
B = np.zeros(self.nx)
for i in range(self.nx - 2):
B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \
h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]
# print(B)
return B
class Spline2D:
"""
2D Cubic Spline class
"""
def __init__(self, x, y):
self.s = self.__calc_s(x, y)
self.sx = Spline(self.s, x)
self.sy = Spline(self.s, y)
def __calc_s(self, x, y):
dx = np.diff(x)
dy = np.diff(y)
self.ds = [math.sqrt(idx ** 2 + idy ** 2)
for (idx, idy) in zip(dx, dy)]
s = [0]
s.extend(np.cumsum(self.ds))
return s
def calc_position(self, s):
"""
calc position
"""
x = self.sx.calc(s)
y = self.sy.calc(s)
return x, y
def calc_curvature(self, s):
"""
calc curvature
"""
dx = self.sx.calcd(s)
ddx = self.sx.calcdd(s)
dy = self.sy.calcd(s)
ddy = self.sy.calcdd(s)
k = (ddy * dx - ddx * dy) / (dx ** 2 + dy ** 2)
return k
def calc_d_curvature(self, s):
"""
calc d_curvature which is derivative of curvature by s
"""
dx = self.sx.calcd(s)
ddx = self.sx.calcdd(s)
dddx = self.sx.calcddd(s)
dy = self.sy.calcd(s)
ddy = self.sy.calcdd(s)
dddy = self.sy.calcddd(s)
squareterm = dx * dx + dy * dy
dk = ((dddy + dx - dddx * dy) * squareterm - 3 * (ddy * dx - ddx * dy) * (dx * ddx + dy * ddy)) / (squareterm * squareterm)
return dk
def calc_yaw(self, s):
"""
calc yaw
"""
dx = self.sx.calcd(s)
dy = self.sy.calcd(s)
yaw = math.atan2(dy, dx)
return yaw
def calc_spline_course(x, y, ds=0.1):
sp = Spline2D(x, y)
s = list(np.arange(0, sp.s[-1], ds))
rx, ry, ryaw, rk, rdk = [], [], [], [], []
for i_s in s:
ix, iy = sp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(sp.calc_yaw(i_s))
rk.append(sp.calc_curvature(i_s))
rdk.append(sp.calc_d_curvature(i_s))
return rx, ry, ryaw, rk, rdk, s
def main():
print("Spline 2D test")
import matplotlib.pyplot as plt
import numpy as np
manhae1 = np.load(file='/home/menguiin/catkin_ws/src/macaron_2/path/K-CITY-garage-1m.npy')
x = manhae1[0:manhae1.shape[0]-1, 0]
y = manhae1[0:manhae1.shape[0]-1, 1]
rx, ry, ryaw, rk, rdk, s = calc_spline_course(x, y)
s = np.array(s)
flg, ax = plt.subplots(1)
plt.plot(range(-s.shape[0],s.shape[0],2),s, "s", label="s-value")
plt.grid(True)
plt.axis("equal")
plt.xlabel("index")
plt.ylabel("sval")
plt.legend()
flg, ax = plt.subplots(1)
plt.plot(x, y, "xb", label="input")
plt.plot(rx, ry, "-r", label="spline")
plt.grid(True)
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
flg, ax = plt.subplots(1)
plt.plot(s, [math.degrees(iyaw) for iyaw in ryaw], "or", label="yaw")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("yaw angle[deg]")
flg, ax = plt.subplots(1)
plt.plot(s, rk, "-r", label="curvature")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("curvature [1/m]")
plt.show()
if __name__ == '__main__':
main()
|
"""
The "lf"-record
===============
Offset Size Contents
0x0000 Word ID: ASCII-"lf" = 0x666C
0x0002 Word number of keys
0x0004 ???? Hash-Records
"""
import io
from aiowinreg.filestruct.hashrecord import NTRegistryHR
class NTRegistryRI:
def __init__(self):
self.magic = b'ri'
self.keys_cnt = None
self.hash_records = []
@staticmethod
def from_bytes(data):
return NTRegistryRI.from_buffer(io.BytesIO(data))
@staticmethod
def from_buffer(buff):
lf = NTRegistryRI()
lf.magic = buff.read(2)
lf.keys_cnt = int.from_bytes(buff.read(2), 'little', signed = False)
for _ in range(lf.keys_cnt):
hr = NTRegistryHR.from_buffer(buff)
lf.hash_records.append(hr)
return lf
def __str__(self):
t = '== NT Registry RI Record ==\r\n'
for k in self.__dict__:
if isinstance(self.__dict__[k], list):
for i, item in enumerate(self.__dict__[k]):
t += ' %s: %s: %s' % (k, i, str(item))
else:
t += '%s: %s \r\n' % (k, str(self.__dict__[k]))
return t
|
#!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys
sys.path.append('./')
from androguard.core.bytecodes import apk, dvm
from androguard.core.analysis.analysis import uVMAnalysis
from androguard.decompiler.dad.decompile import DvMethod
from androguard.decompiler.dad.instruction import Constant, BinaryCompExpression
class PrintVisitor(object):
def __init__(self, graph):
self.graph = graph
self.visited_nodes = set()
self.loop_follow = [None]
self.latch_node = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.next_case = None
def visit_ins(self, ins):
return ins.visit(self)
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1]):
return
if node in self.visited_nodes:
return
self.visited_nodes.add(node)
node.visit(self)
def visit_loop_node(self, loop):
print '- Loop node', loop.num
follow = loop.get_loop_follow()
if follow is None and not loop.looptype.endless():
exit('Loop has no follow !', 'error')
if loop.looptype.pretest():
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
cnd = loop.visit_cond(self)
print 'while(%s) {' % cnd
elif loop.looptype.posttest():
print 'do {'
self.latch_node.append(loop.latch)
elif loop.looptype.endless():
print 'while(true) {'
pass
self.loop_follow.append(follow)
if loop.looptype.pretest():
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
if loop.looptype.pretest():
print '}'
elif loop.looptype.posttest():
print '} while(',
self.latch_node.pop()
loop.latch.visit_cond(self)
print ')'
else:
self.visit_node(loop.latch)
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
print '- Cond node', cond.num
follow = cond.get_if_follow()
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
cond.visit_cond(self)
self.visit_node(cond.false)
elif follow is not None:
is_else = not (follow in (cond.true, cond.false))
if (cond.true in (follow, self.next_case)
or cond.num > cond.true.num):
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if not cond.true in self.visited_nodes:
cnd = cond.visit_cond(self)
print 'if (%s) {' % cnd
self.visit_node(cond.true)
if is_else and not cond.false in self.visited_nodes:
print '} else {'
self.visit_node(cond.false)
print '}'
self.if_follow.pop()
self.visit_node(follow)
else:
cond.visit_cond(self)
self.visit_node(cond.true)
self.visit_node(cond.false)
def visit_short_circuit_condition(self, nnot, aand, cond1, cond2):
if nnot:
cond1.neg()
cond1.visit_cond(self)
cond2.visit_cond(self)
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
self.visit_ins(switch_ins)
follow = switch.switch_follow
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
for case in switch.node_to_case[node]:
pass
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
default = None
self.visit_node(node)
if default not in (None, follow):
self.visit_node(default)
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
print '- Statement node', stmt.num
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 0:
return
follow = sucs[0]
self.visit_node(follow)
def visit_return_node(self, ret):
print '- Return node', ret.num
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins)
def visit_constant(self, cst):
return cst
def visit_base_class(self, cls):
return cls
def visit_variable(self, var):
return 'v%s' % var
def visit_param(self, param):
return 'p%s' % param
def visit_this(self):
return 'this'
def visit_assign(self, lhs, rhs):
if lhs is None:
rhs.visit(self)
return
l = lhs.visit(self)
r = rhs.visit(self)
print '%s = %s;' % (l, r)
def visit_move_result(self, lhs, rhs):
l = lhs.visit(self)
r = rhs.visit(self)
print '%s = %s;' % (l, r)
def visit_move(self, lhs, rhs):
if lhs is rhs:
return
l = lhs.visit(self)
r = rhs.visit(self)
print '%s = %s;' % (l, r)
def visit_astore(self, array, index, rhs):
arr = array.visit(self)
if isinstance(index, Constant):
idx = index.visit(self, 'I')
else:
idx = index.visit(self)
r = rhs.visit(self)
print '%s[%s] = %s' % (arr, idx, r)
def visit_put_static(self, cls, name, rhs):
r = rhs.visit(self)
return '%s.%s = %s' % (cls, name, r)
def visit_put_instance(self, lhs, name, rhs):
l = lhs.visit(self)
r = rhs.visit(self)
return '%s.%s = %s' % (l, name, r)
def visit_new(self, atype):
pass
def visit_invoke(self, name, base, args):
base.visit(self)
for arg in args:
arg.visit(self)
def visit_return_void(self):
print 'return;'
def visit_return(self, arg):
a = arg.visit(self)
print 'return %s;' % a
def visit_nop(self):
pass
def visit_switch(self, arg):
arg.visit(self)
def visit_check_cast(self, arg, atype):
arg.visit(self)
def visit_aload(self, array, index):
arr = array.visit(self)
idx = index.visit(self)
return '%s[%s]' % (arr, idx)
def visit_alength(self, array):
res = array.visit(self)
return '%s.length' % res
def visit_new_array(self, atype, size):
size.visit(self)
def visit_filled_new_array(self, atype, size, args):
atype.visit(self)
size.visit(self)
for arg in args:
arg.visit(self)
def visit_fill_array(self, array, value):
array.visit(self)
def visit_monitor_enter(self, ref):
ref.visit(self)
def visit_monitor_exit(self, ref):
pass
def visit_throw(self, ref):
ref.visit(self)
def visit_binary_expression(self, op, arg1, arg2):
val1 = arg1.visit(self)
val2 = arg2.visit(self)
return '%s %s %s' % (val1, op, val2)
def visit_unary_expression(self, op, arg):
arg.visit(self)
def visit_cast(self, op, arg):
a = arg.visit(self)
return '(%s %s)' % (op, a)
def visit_cond_expression(self, op, arg1, arg2):
val1 = arg1.visit(self)
val2 = arg2.visit(self)
return '%s %s %s' % (val1, op, val2)
def visit_condz_expression(self, op, arg):
if isinstance(arg, BinaryCompExpression):
arg.op = op
arg.visit(self)
else:
arg.visit(self)
def visit_get_instance(self, arg, name):
arg.visit(self)
def visit_get_static(self, cls, name):
return '%s.%s' % (cls, name)
TEST = '../DroidDream/magicspiral.apk'
vm = dvm.DalvikVMFormat(apk.APK(TEST).get_dex())
vma = uVMAnalysis(vm)
method = vm.get_method('crypt')[0]
method.show()
amethod = vma.get_method(method)
dvmethod = DvMethod(amethod)
dvmethod.process() # build IR Form / control flow...
graph = dvmethod.graph
print 'Entry block : %s\n' % graph.get_entry()
for block in graph: # graph.get_rpo() to iterate in reverse post order
print 'Block : %s' % block
for ins in block.get_ins():
print ' - %s' % ins
print
visitor = PrintVisitor(graph)
graph.get_entry().visit(visitor)
|
"""
Utility files for configs
1. Take the diff between two configs.
"""
import os
import pickle
import ml_collections
def load_config(config_fpath):
if os.path.isdir(config_fpath):
config_fpath = os.path.join(config_fpath, 'config.pkl')
else:
assert config_fpath[-4:] == '.pkl', f'{config_fpath} is not a pickle file. Aborting'
with open(config_fpath, 'rb') as f:
config = pickle.load(f)
return get_updated_config(config)
def get_updated_config(config):
"""
It makes sure that older versions of the config also run with current settings.
"""
frozen_dict = isinstance(config, ml_collections.FrozenConfigDict)
if frozen_dict:
config = ml_collections.ConfigDict(config)
with config.unlocked():
if 'cl_latent_weight' not in config.loss:
config.loss.cl_latent_weight = 0.5
if 'lr_scheduler_patience' not in config.training:
config.training.lr_scheduler_patience = 10
if 'kl_annealing' not in config.loss:
config.loss.kl_annealing = False
if 'kl_annealtime' not in config.loss:
config.loss.kl_annealtime = 10
if 'kl_start' not in config.loss:
config.loss.kl_start = -1
if 'noise_head' in config.model:
config.model.noise_head_fc = config.model.noise_head
if 'noise_head_cnn' not in config.model:
config.model.noise_head_cnn = [[] for _ in config.loss.cl_channels]
if 'noise_head_cnn_kernel_size' not in config.model:
config.model.noise_head_cnn_kernel_size = 3
if 'img_dsample' not in config.data:
config.data.img_dsample = 2
if frozen_dict:
return ml_collections.FrozenConfigDict(config)
else:
return config
|
from setuptools import setup, find_packages
setup(
name="minesweeper",
version="0.1.0",
description="Inefficient Minesweeper Solver",
long_description="A very inefficient minesweeper solver.",
author="rhdzmota",
packages=find_packages(where="src"),
package_dir={
"": "src"
},
include_package_data=True,
scripts=[
"bin/minesweeper"
],
python_requires=">3.5, <4"
)
|
#############################################################
#
# Multi-Dimensional Robust Synthetic Control Tests
# (based on SVD)
#
# Generates two metrics and compared the RMSE for forecasts
# for each metric using RSC against mRSC.
#
# Test are based on random data so it is advised to run
# several times. Also note that in this setting RSC is
# also expected to do well. mRSC is expected to help but
# cannot be guaranteed to always be better.
#
# You need to ensure that this script is called from
# the tslib/ parent directory or tslib/tests/ directory:
#
# 1. python tests/testScriptMultiSynthControlSVD.py
# 2. python testScriptMultiSynthControlSVD.py
#
#############################################################
import sys, os
sys.path.append("../..")
sys.path.append("..")
sys.path.append(os.getcwd())
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import copy
from tslib.src import tsUtils
from tslib.src.synthcontrol.syntheticControl import RobustSyntheticControl
from tslib.src.synthcontrol.multisyntheticControl import MultiRobustSyntheticControl
def simpleFunctionOne(theta, rho):
alpha = 0.7
exp_term = np.exp((-1.0 *theta) - rho - (alpha * theta * rho))
exp_term2 = np.exp(-1.0 *alpha * theta * rho)
p = 10.0 * float(1.0 / (1.0 + exp_term)) + 10.0/exp_term2
return p
def simpleFunctionTwo(theta, rho):
alpha = 0.5
exp_term = np.exp((-1.0 *theta) - rho - (alpha * theta * rho))
p = 10.0 * float(1.0 / (1.0 + exp_term))
return p
def generateDataMatrix(N, T, rowRank, colRank, genFunction, rowParams, colParams):
matrix = np.zeros([N, T])
for i in range(0, N):
for j in range(0, T):
matrix[i, j] = genFunction(rowParams[i], colParams[j])
return matrix
def generateFirstRow(matrix, weights):
(N, T) = np.shape(matrix)
assert(len(weights) == N)
weights = weights.reshape([N, 1])
weights = weights/np.sum(weights)
return np.dot(weights.T, matrix)
def generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunction, trueWeights, rowParams, colParams):
matrix = generateDataMatrix(N, T, rowRank, colRank, genFunction, rowParams, colParams)
firstRow = generateFirstRow(matrix, trueWeights)
meanMatrix = np.zeros([N+1, T]) #np.concatenate([firstRow, matrix], axis=0)
meanMatrix[0, :] = firstRow
meanMatrix[1:, :] = matrix
#print(np.linalg.matrix_rank(meanMatrix))
noiseMatrix = np.random.normal(0.0, 1.0, [N+1, T])
#print(np.linalg.matrix_rank(noiseMatrix))
observationMatrix = meanMatrix + noiseMatrix
#print(np.linalg.matrix_rank(observationMatrix))
# convert to dataframes
trainingDict = {}
testDict = {}
meanTrainingDict = {}
meanTestDict = {}
for i in range(0, N+1):
trainingDict.update({str(i): observationMatrix[i, 0:TrainingEnd]})
meanTrainingDict.update({str(i): meanMatrix[i, 0:TrainingEnd]})
testDict.update({str(i): observationMatrix[i, TrainingEnd:]})
meanTestDict.update({str(i): meanMatrix[i, TrainingEnd:]})
trainDF = pd.DataFrame(data=trainingDict)
testDF = pd.DataFrame(data=testDict)
meanTrainDF = pd.DataFrame(data=meanTrainingDict)
meanTestDF = pd.DataFrame(data=meanTestDict)
#print(np.shape(trainDF), np.shape(testDF))
#print(np.shape(meanTrainDF), np.shape(meanTestDF))
return (observationMatrix, meanMatrix, trainDF, testDF, meanTrainingDict, meanTestDict)
def rankComparison():
N = 100
T = 120
TrainingEnd = 100
rowRank = 200
colRank = 200
# generate metric matrices
genFunctionOne = simpleFunctionOne
genFunctionTwo = simpleFunctionTwo
trueWeights = np.random.uniform(0.0, 1.0, N)
trueWeights = trueWeights/np.sum(trueWeights)
thetaArrayParams = np.random.uniform(0.0, 1.0, rowRank)
rhoArrayParams = np.random.uniform(0.0, 1.0, colRank)
rowParams = np.random.choice(thetaArrayParams, N)
colParams = np.random.choice(rhoArrayParams, T)
# metric 1
(observationMatrix, meanMatrix, trainDF, testDF, meanTrainingDict, meanTestDict) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionOne, trueWeights, rowParams, colParams)
# metric 2
(observationMatrix2, meanMatrix2, trainDF2, testDF2, meanTrainingDict2, meanTestDict2) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionTwo, trueWeights, rowParams, colParams)
thetaArrayParams = np.random.uniform(0.0, 1.0, rowRank)
rhoArrayParams = np.random.uniform(0.0, 1.0, colRank)
rowParams = np.random.choice(thetaArrayParams, N)
colParams = np.random.choice(rhoArrayParams, T)
# metric 3
(observationMatrix3, meanMatrix3, trainDF3, testDF3, meanTrainingDict3, meanTestDict3) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionTwo, trueWeights, rowParams, colParams)
# concatenation
matrixA = np.zeros([N+1, 2*T])
matrixA[:, 0:T] = meanMatrix
matrixA[:, T: ] = meanMatrix2
u, s, v = np.linalg.svd(meanMatrix, full_matrices=False)
u, s_, v = np.linalg.svd(meanMatrix2, full_matrices=False)
u, sA, v = np.linalg.svd(matrixA, full_matrices=False)
# print(np.linalg.matrix_rank(meanMatrix))
# print(np.linalg.matrix_rank(meanMatrix2))
# print(np.linalg.matrix_rank(meanMatrix3))
# print(np.linalg.matrix_rank(matrixA))
k = 20
plt.plot(range(0, k), s[0:k], color='magenta', label='metric1')
plt.plot(range(0, k), s_[0:k], color='black', label='metric2')
plt.plot(range(0, k), sA[0:k], '-x', color='red', label='combined')
plt.xlabel('Singular Value Index (largest to smallest)')
plt.ylabel('Singular Value')
plt.title('Diagnostic: Rank Preservation Property')
legend = plt.legend(loc='lower right', shadow=True)
plt.show()
def runAnalysis(N, T, TrainingEnd, rowRank, colRank):
# generate metric matrices
genFunctionOne = simpleFunctionOne
genFunctionTwo = simpleFunctionTwo
trueWeights = np.random.uniform(0.0, 1.0, N)
trueWeights = trueWeights/np.sum(trueWeights)
thetaArrayParams = np.random.uniform(0.0, 1.0, rowRank)
rhoArrayParams = np.random.uniform(0.0, 1.0, colRank)
rowParams = np.random.choice(thetaArrayParams, N)
colParams = np.random.choice(rhoArrayParams, T)
# metric 1
(observationMatrix1, meanMatrix1, trainDF1, testDF1, meanTrainingDict1, meanTestDict1) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionOne, trueWeights, rowParams, colParams)
# metric 2
(observationMatrix2, meanMatrix2, trainDF2, testDF2, meanTrainingDict2, meanTestDict2) = generateOneMetricMatrix(N, T, TrainingEnd, rowRank, colRank, genFunctionTwo, trueWeights, rowParams, colParams)
keySeriesLabel = '0'
otherSeriesLabels = []
for ind in range(1, N+1):
otherSeriesLabels.append(str(ind))
# RSC analysis
singvals = 8
############################
#### RSC for metric 1
rscmodel1 = RobustSyntheticControl(keySeriesLabel, singvals, len(trainDF1), probObservation=1.0, svdMethod='numpy', otherSeriesKeysArray=otherSeriesLabels)
# fit the model
rscmodel1.fit(trainDF1)
predictionsRSC1 = rscmodel1.predict(testDF1)
rscRMSE1 = np.sqrt(np.mean((predictionsRSC1 - meanTestDict1[keySeriesLabel])**2))
#print("\n\n *** RSC rmse1:")
#print(rscRMSE1)
############################
##### RSC for metric 2
rscmodel2 = RobustSyntheticControl(keySeriesLabel, singvals, len(trainDF2), probObservation=1.0, svdMethod='numpy', otherSeriesKeysArray=otherSeriesLabels)
# fit the model
rscmodel2.fit(trainDF2)
predictionsRSC2 = rscmodel2.predict(testDF2)
rscRMSE2 = np.sqrt(np.mean((predictionsRSC2 - meanTestDict2[keySeriesLabel])**2))
#print("\n\n *** RSC rmse2:")
#print(rscRMSE2)
############################
#### multi RSC model (combined) --
relative_weights = [1.0, 1.0]
# instantiate the model
mrscmodel = MultiRobustSyntheticControl(2, relative_weights, keySeriesLabel, singvals, len(trainDF1), probObservation=1.0, svdMethod='numpy', otherSeriesKeysArray=otherSeriesLabels)
# fit
mrscmodel.fit([trainDF1, trainDF2])
# predict
combinedPredictionsArray = mrscmodel.predict([testDF1[otherSeriesLabels], testDF2[otherSeriesLabels]])
# split the predictions for the metrics
predictionsmRSC_1 = combinedPredictionsArray[0]
predictionsmRSC_2 = combinedPredictionsArray[1]
# compute RMSE
mrscRMSE1 = np.sqrt(np.mean((predictionsmRSC_1 - meanTestDict1[keySeriesLabel])**2))
mrscRMSE2 = np.sqrt(np.mean((predictionsmRSC_2 - meanTestDict2[keySeriesLabel])**2))
#print("\n\n *** mRSC rmse1:")
#print(mrscRMSE1)
#print("\n\n *** mRSC rmse2:")
#print(mrscRMSE1)
return ({"rsc1": rscRMSE1,
"rsc2": rscRMSE2,
"mrsc1": mrscRMSE1,
"mrsc2": mrscRMSE2})
def main():
# diagnostic test for rank preservation (see paper referenced)
rankComparison()
rowRank = 10
colRank = 10
rsc1 = []
rsc1A = []
rsc2 = []
rsc2A = []
mrsc1 = []
mrsc1A = []
mrsc2 = []
mrsc2A = []
# simulating many random tests and varying matrix sizes
N_array = [50, 100, 150, 200, 250, 300]
for N in N_array:
print("**********************************************************")
print(N)
print("**********************************************************")
for T in [30]:
TrainingEnd = int(0.75*T)
rsc1_array = []
rsc1A_array = []
rsc2_array = []
rsc2A_array = []
mrsc1_array = []
mrsc1A_array = []
mrsc2_array = []
mrsc2A_array = []
for iter in range(0, 20):
resDict = runAnalysis(N, T, TrainingEnd, rowRank, colRank)
rsc1_array.append(resDict['rsc1'])
rsc2_array.append(resDict['rsc2'])
mrsc1_array.append(resDict['mrsc1'])
mrsc2_array.append(resDict['mrsc2'])
rsc1.append(np.mean(rsc1_array))
rsc2.append(np.mean(rsc2_array))
mrsc1.append(np.mean(mrsc1_array))
mrsc2.append(np.mean(mrsc2_array))
print("====================================")
print("====================================")
print("Metric # 1:")
print("mRSC - RSC:")
for i in range(0, len(N_array)):
print(i, mrsc1[i] - rsc1[i])
print("Metric # 2:")
print("mRSC - RSC:")
for i in range(0, len(N_array)):
print(i, mrsc2[i] - rsc2[i])
print("====================================")
print("====================================")
print("====================================")
print("====================================")
print("Metric # 1:")
print("mRSC, RSC:")
for i in range(0, len(N_array)):
print(i, mrsc1[i], rsc1[i])
print("Metric # 2:")
print("mRSC, RSC,:")
for i in range(0, len(N_array)):
print(i, mrsc2[i], rsc2[i])
print("====================================")
print("====================================")
# plotting
plt.plot(N_array, mrsc1, color='r', label='mRSC (metricA)')
plt.plot(N_array, mrsc2, color='orange', label='mRSC (metricB)')
plt.plot(N_array, rsc1, '-.', color='blue', label='RSC (metricA)')
plt.plot(N_array, rsc2, '--x',color='magenta', label='RSC (metricB)')
plt.xlabel('N')
plt.ylabel('RMSE')
plt.title('mRSC vs RSC for metricA and metricB')
legend = plt.legend(loc='upper right', shadow=True)
plt.show()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# The infinite monkey theorem : The theorem states that a monkey hitting keys at random on a
# typewriter keyboard for an infinite amount of time will almost surely type a given text,
# such as the complete works of William Shakespeare. Well, suppose we replace a monkey with
# a Python function. How long do you think it would take for a Python function to generate
# just one sentence of Shakespeare?
# The sentence we’ll shoot for is: “methinks it is like a weasel”
# The Program returns the number of iterations spent to hit the target/goal string.
import random
def genRandomString(strlen) :
letter = "abcdefghijklmnopqrstuvwxyz "; result = ""
for i in range(strlen) :
result = result + letter[random.randrange(27)]
return result
def score(goal, test_string) :
match_char = 0
for i in range(len(goal)) :
if goal[i] == test_string[i] :
match_char = match_char + 1
return (match_char / len(goal))
def getLoops(goalstr) :
loop_count = 0; random_string = genRandomString(len(goalstr))
if goalstr == "exit" :
print ("\n Ohh, I Just saw the dreadful \"Exit\". \n" ,
"I feel I should wish you a Good Bye then, See ya later :)")
exit()
else :
while ( score(goalstr, random_string) < 1.0 ) :
print (random_string)
loop_count = loop_count + 1
random_string = genRandomString(len(goalstr))
return loop_count
def main():
print (" \n Python program to implement Infinite monkey theorem \n")
print ("Loops : ", getLoops(input("Enter string you want to find match for : ")))
main()
|
from .settings import *
REMOTE_TEST_RESET_URL = '/_new_test'
MIDDLEWARE_CLASSES = (
'api_test.middleware.api_test.ApiTestMiddleware', # must be first
) + MIDDLEWARE_CLASSES
INSTALLED_APPS += (
'api_test',
)
|
#! /usr/bin/env python
import glob
import os
from optparse import OptionParser
import pprint
import re
import sys
f_trial_re = re.compile(r"parallel trial.* \((\d+)\) failed")
s_trial_re = re.compile(r"parallel trial.* \((\d+)\) solved")
ttime_re = re.compile(r"Total time = ([+|-]?(0|[1-9]\d*)(\.\d*)?([eE][+|-]?\d+)?)")
be_separator_re = re.compile(r"=== parallel trial.* \((\d+)\) (\S+) ===")
def analyze(output):
bname = os.path.basename(output)
b = os.path.splitext(bname)[0]
record = {}
record["benchmark"] = b
is_parallel = False
with open(output, 'r') as f:
f_trial = -1
s_trial = -1
ttime = -1
f_times = []
s_times = []
lines = []
for line in f:
## information from front-end
m = re.search(f_trial_re, line)
if m:
_f_trial = int(m.group(1))
f_trial = max(f_trial, _f_trial)
else:
m = re.search(s_trial_re, line)
if m:
_s_trial = int(m.group(1))
if s_trial < 0: s_trial = _s_trial
else: s_trial = min(s_trial, _s_trial)
m = re.search(ttime_re, line)
if m:
# to filter out the final Sketch run for CFG retrieval
ttime = max(ttime, int(float(m.group(1))))
## information from back-end
m = re.search(be_separator_re, line)
if m:
is_parallel = True
if m.group(2) in ["failed", "solved"]:
record = be_analyze_lines(lines, b)
if record["succeed"] == "Succeed":
s_times.append(record["ttime"])
else: # "Failed"
f_times.append(record["ttime"])
lines = []
else:
lines.append(line)
# for plain Sketch, the whole message is from back-end
if not is_parallel:
record = be_analyze_lines(lines, b)
if record["succeed"] == "Succeed":
s_times.append(ttime)
else: # "Failed"
f_times.append(ttime)
trial = len(f_times) + len(s_times)
record["trial"] = max(trial, s_trial)
s_succeed = "Succeed" if any(s_times) else "Failed"
record["succeed"] = s_succeed
f_time_sum = sum(f_times)
s_time_sum = sum(s_times)
record["ttime"] = ttime
record["stime"] = float(s_time_sum) / len(s_times) if s_times else 0
record["ftime"] = float(f_time_sum) / len(f_times) if f_times else 0
record["ctime"] = f_time_sum + s_time_sum
return record
exit_re = re.compile(r"Solver exit value: ([-]?\d+)")
be_tout_re = re.compile(r"timed out: (\d+)")
be_etime_re = re.compile(r"elapsed time \(s\) .* ([+|-]?(0|[1-9]\d*)(\.\d*)?([eE][+|-]?\d+)?)")
be_stime_re = re.compile(r"Total elapsed time \(ms\):\s*([+|-]?(0|[1-9]\d*)(\.\d*)?([eE][+|-]?\d+)?)")
be_ttime_re = re.compile(r"TOTAL TIME ([+|-]?(0|[1-9]\d*)(\.\d*)?([eE][+|-]?\d+)?)")
def be_analyze_lines(lines, b):
record = {}
record["benchmark"] = b
exit_code = -1
etime = 0
ttime = 0
timeout = None
succeed = False
propagation = -1
for line in reversed(lines):
m = re.search(exit_re, line)
if m:
exit_code = int(m.group(1))
succeed |= exit_code == 0
if "ALL CORRECT" in line:
succeed |= True
if "[SKETCH] DONE" in line:
succeed |= True
m = re.search(be_ttime_re, line)
if m:
ttime = ttime + int(float(m.group(1)))
m = re.search(be_stime_re, line)
if m:
etime = float(m.group(1))
m = re.search(be_tout_re, line)
if m:
timeout = int(m.group(1))
for line in lines:
m = re.search(be_etime_re, line)
if m:
etime = int(float(m.group(1)) * 1000)
break
s_succeed = "Succeed" if succeed else "Failed"
record["succeed"] = s_succeed
if timeout: _time = timeout
elif etime: _time = etime
else: _time = ttime
record["ttime"] = _time
return record
def main():
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option("-b", "--benchmark",
action="append", dest="benchmarks", default=[],
help="benchmark(s) under analysis")
parser.add_option("-d", "--dir",
action="store", dest="out_dir", default="output",
help="output folder")
parser.add_option("--rm-failed",
action="store_true", dest="rm_failed", default=False,
help="remove failed output")
(opt, args) = parser.parse_args()
outputs = glob.glob(os.path.join(opt.out_dir, "*.txt"))
# filter out erroneous cases (due to broken pipes, etc.)
outputs = filter(lambda f: os.path.getsize(f) > 0, outputs)
for output in outputs:
bname = os.path.basename(output)
if any(opt.benchmarks):
found = False
for b in opt.benchmarks:
found |= bname.startswith(b)
if found: break
if not found: continue
record = analyze(output)
pprint.pprint(record)
if not opt.rm_failed: continue
if record["succeed"] != "Succeed":
print "deleting {}".format(output)
os.remove(output)
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import *
from past.utils import old_div
import cli
import re
import argparse
from csr_aws_guestshell import cag
csr = cag()
def print_cmd_output(command, output, print_output):
if print_output:
col_space = old_div((80 - (len(command))), 2)
print("\n%s %s %s" % ('=' * col_space, command, '=' * col_space))
print("%s \n%s" % (output, '=' * 80))
def execute_command(command, print_output):
cmd_output = cli.execute(command)
while len(cmd_output) == 0:
print("CMD FAILED, retrying")
cmd_output = cli.execute(command)
print_cmd_output(command, cmd_output, print_output)
return cmd_output
def get_stat_drop(print_output):
cmd_output = execute_command(
"show platform hardware qfp active statistics drop clear", print_output)
if "all zero" in cmd_output:
csr.send_metric("TailDrop", int(0), "Statistics drops")
return
if "TailDrop" not in cmd_output:
csr.send_metric("TailDrop", int(0), "Statistics drops")
for line in cmd_output.splitlines():
if ("-" in line) or ("Global Drop Stats" in line):
continue
entries = line.split()
if print_output:
print("%s --> %s/%s" % (entries[0], entries[1], entries[2]))
csr.send_metric(entries[0], (entries[1]), "Statistics drops")
def get_datapath_util(print_output):
cmd_output = execute_command(
"show platform hardware qfp active datapath utilization", print_output)
row_names = [
"input_priority_pps",
"input_priority_bps",
"input_non_priority_pps",
"input_non_priority_bps",
"input_total_pps",
"input_total_bps",
"output_priority_pps",
"output_priority_bps",
"output_non_priority_pps",
"output_non_priority_bps",
"output_total_pps",
"output_total_bps",
"processing_load_pct"]
i = 0
for line in cmd_output.splitlines():
if i >= len(row_names):
break
m = re.search(
r'.*\s+(?P<fivesecs>\d+)\s+(?P<onemin>\d+)\s+(?P<fivemin>\d+)\s+(?P<onehour>\d+)', line)
if m:
# print "%s --> %s %s %s %s" % (row_names[i],
# m.group('fivesecs'),m.group('onemin'),m.group('fivemin'),m.group('onehour'))
csr.send_metric(row_names[i] + '_fivesecs', m.group(
'fivesecs'), "datapath utilization")
csr.send_metric(row_names[i] + '_onemin', m.group(
'onemin'), "datapath utilization")
csr.send_metric(row_names[i] + '_fivemin', m.group(
'fivemin'), "datapath utilization")
csr.send_metric(row_names[i] + '_onehour', m.group(
'onehour'), "datapath utilization")
i = i + 1
def show_gig_interface_summary(print_output):
cmd_output = execute_command("show interfaces summary", print_output)
total_txbps = 0
total_rxbps = 0
for line in cmd_output.splitlines():
if "Giga" in line:
total_txbps += int(line.split()[-3])
total_rxbps += int(line.split()[-5])
csr.send_metric("output_gig_interface_summary_bps", total_txbps, "aggregate gig interfaces bps")
csr.send_metric("input_gig_interface_summary_bps", total_rxbps, "aggregate gig interfaces bps")
def show_interface(print_output):
cmd_output = execute_command("show interface summary", print_output)
table_start = 0
for line in cmd_output.splitlines():
if 'Interface' in line:
continue
if "-" in line:
table_start = 1
continue
if table_start == 0:
continue
entries = line.lstrip('*').split()
cmd = "show interface %s" % (entries[0])
interface_output = execute_command(cmd, print_output)
m = re.search(
r'.*\s+(?P<packets_input>\d+) packets input.*\s+(?P<bytes_input>\d+) bytes.*', interface_output)
if m:
# print "match! %s %s" %
# (m.group('packets_input'),m.group('bytes_input'))
csr.send_metric("packets_input_" +
entries[0], m.group('packets_input'), cmd)
csr.send_metric("bytes_input_" +
entries[0], m.group('bytes_input'), cmd)
m = re.search(
r'.*\s+(?P<packets_output>\d+) packets output.*\s+(?P<bytes_output>\d+) bytes.*', interface_output)
if m:
# print "match! %s %s" %
# (m.group('packets_output'),m.group('bytes_output'))
csr.send_metric("packets_output_" +
entries[0], m.group('packets_output'), cmd)
csr.send_metric("bytes_output_" +
entries[0], m.group('bytes_output'), cmd)
m = re.search(
r'.*\s+(?P<unknown_drops>\d+) unknown protocol drops.*', interface_output)
if m:
# print "match! %s" % (m.group('unknown_drops'))
csr.send_metric("unknown_drops_" +
entries[0], m.group('unknown_drops'), cmd)
m = re.search(
r'.*Total output drops:\s+(?P<output_drops>\d+)\s+.*', interface_output)
if m:
# print "match! %s" % (m.group('output_drops'))
csr.send_metric("output_drops_" +
entries[0], m.group('output_drops'), cmd)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Upload Stats to custom metrics")
parser.add_argument('--display', help='Show Output', action='store_true')
parser.add_argument('--category', help='Send ', default="all")
args = parser.parse_args()
if args.category in ["all", "drops"]:
get_stat_drop(args.display)
if args.category in ["all", "util"]:
get_datapath_util(args.display)
if args.category in ["all", "interface"]:
show_interface(args.display)
if args.category in ["all", "interface_summary"]:
show_gig_interface_summary(args.display)
|
# link: https://leetcode.com/problems/partition-equal-subset-sum/
# solution explanation: https://leetcode.com/problems/partition-equal-subset-sum/discuss/462699/Whiteboard-Editorial.-All-Approaches-explained.
class Solution(object):
def __init__(self):
self.st = {}
def canPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
nums = sorted(nums, reverse=True)
s = sum(nums)
if s%2 != 0:
return False
print(nums)
target = s//2
print(target)
return self.backtracking(nums, target)
def backtracking(self, nums, curr):
if curr<0:
return False
if curr == 0:
return True
for i in range(len(nums)):
# for TLE check if the current number is greater than the remaining sum
if nums[i] > curr:
return False
return self.backtracking(nums[i+1:], curr-nums[i]) or self.backtracking(nums[i+1:],curr)
return False
|
# coding: utf-8
# Copyright (C) zhongjie luo <l.zhjie@qq.com>
import pandas as pd
import numpy as np
import os
import json
def check_duplicate(df, col_name):
temp = df.reset_index()
result = temp.duplicated(subset=col_name, keep=False)
index = np.argwhere(result).reshape(-1)
return df.iloc[index]
class Conf:
def device(self):
""":return dataframe columns: name,ip,price"""
return self._device.copy()
def checkpoint(self):
""":return dataframe columns: name,ip,device,alarmdelay"""
return self._checkpoint.copy()
def cost_lambda_str(self):
"""func params: device, measure, checkpoint"""
return self._cost_lambda_str
def interval(self):
return self._interval
def acktimeout(self):
return self._acktimeout
def httpserver(self):
return tuple(self._httpserver)
def charset(self):
return self._charset
def __init__(self, filename):
self._httpserver = ["127.0.0.1", 8787]
with open(filename, "r") as fp:
conf = json.load(fp)
conf_dir = os.path.dirname(filename)
data_dir = conf.get("datadir", None)
if data_dir is None or len(data_dir.strip()) == 0:
data_dir = conf_dir
# require fileds: name,ip,device,alarmdelay"
self._charset = conf.get("charset", "utf-8")
checkpoint = conf["checkpoint"]
df = pd.read_csv(os.path.join(data_dir, checkpoint["file"]), encoding=self._charset)
if len(df) == 0:
print("checkpoint not fount")
exit(100)
dst, src = zip(*[(x.split(":") * 2)[:2] for x in checkpoint["field"].split(",")])
df = df[list(src)]
df.columns = dst
temp = check_duplicate(df, "ip")
if len(temp):
print(temp)
raise RuntimeError("checkpoint, duplicated ip")
alarm_delay = max(conf["alarmdelay"], 0.001)
df["alarmdelay"] = df["alarmdelay"].astype(np.float32).fillna(alarm_delay)
self._checkpoint = df
# require fileds: name,ip,price"
device = conf["device"]
df = pd.read_csv(os.path.join(data_dir, device["file"]), encoding=self._charset)
if len(df) == 0:
print("device not fount")
exit(100)
dst, src = zip(*[(x.split(":") * 2)[:2] for x in device["field"].split(",")])
df = df[list(src)]
df.columns = dst
temp = check_duplicate(df, "name")
if len(temp):
print(temp)
raise RuntimeError("device, duplicated name")
temp = check_duplicate(df, "ip")
if len(temp):
print(temp)
raise RuntimeError("device, duplicated ip")
self._device = df
self._cost = None
cost = conf.get("costfunc", None)
if cost is not None:
self._cost_lambda_str = "lambda m:%s" % (cost["cost"])
self._interval = max(conf["interval"], 1)
self._acktimeout = max(conf["acktimeout"], 1)
http = conf.get("httpserver")
if http is not None:
self._httpserver[0] = str(http.get("bindip", self._httpserver[0]))
self._httpserver[1] = int(http.get("port", self._httpserver[1]))
|
from common.checks import APTPackageChecksBase
from common.plugins.storage import (
CephChecksBase,
CEPH_PKGS_CORE,
CEPH_SERVICES_EXPRS,
StorageChecksBase,
)
YAML_PRIORITY = 0
class CephPackageChecks(StorageChecksBase, APTPackageChecksBase):
@property
def output(self):
if self._output:
return {"ceph": self._output}
def __call__(self):
# require at least one core package to be installed to include
# this in the report.
if self.core:
self._output["dpkg"] = self.all
class CephServiceChecks(CephChecksBase):
def get_running_services_info(self):
"""Get string info for running services."""
if self.services:
self._output["services"] = self.get_service_info_str()
def __call__(self):
self.get_running_services_info()
def get_service_checker():
# Do this way to make it easier to write unit tests.
return CephServiceChecks(CEPH_SERVICES_EXPRS)
def get_pkg_checker():
return CephPackageChecks(CEPH_PKGS_CORE)
|
def main() -> None:
N = int(input())
points = [tuple(map(int, input().split())) for _ in range(N)]
assert 4 <= N <= 100
assert all(len(point) == 2 for point in points)
assert all(points[i] != points[j] for i in range(N) for j in range(i + 1, N))
assert all(0 <= point[0] <= 10**9 and 0 <= point[1] <= 10**9 for point in points)
if __name__ == '__main__':
main()
|
import easygui
easygui.msgbox('Selecione o arquivo de origem', 'Origem')
origfile = easygui.fileopenbox()
arq = open(origfile, 'r')
arqdest = open(origfile + '.changed', 'w')
substituiparam = 0
achouparam = 0
log = 1 # 1=on 0=off
def findparam(x):
global achouparam
global substituiparam
if (x.find("get:") != -1):
achouparam = 1
if log: print(x)
if (achouparam == 1 and x.find('tags:') > 0):
substituiparam = 1
if log: print(x)
if (linha.find("post:") != -1):
achouparam = 2
if log: print(x)
if (achouparam == 2 and linha.find('tags:') > 0):
substituiparam = 2
if log: print(x)
if (linha.find("delete:") != -1):
achouparam = 3
if log: print(x)
if (achouparam == 3 and linha.find('tags:') > 0):
substituiparam = 3
if log: print(x)
if (linha.find("put:") != -1):
achouparam = 4
if log: print(x)
if (achouparam == 4 and linha.find('tags:') > 0):
substituiparam = 4
if log: print(x)
for linha in arq:
if (substituiparam == 1):
if log: print("Deletado: " + linha)
linha = ' - "RETRIEVAL Operations"\n'
if log: print("Alterado: " + linha)
substituiparam = 0
achouparam = 0
if (substituiparam == 2):
if log: print("Deletado: " + linha)
linha = ' - "CREATION Operations"\n'
if log: print("Alterado: " + linha)
substituiparam = 0
achouparam = 0
if (substituiparam == 3):
if log: print("Deletado: " + linha)
linha = ' - "DELETION Operations"\n'
if log: print("Alterado: " + linha)
substituiparam = 0
achouparam = 0
if (substituiparam == 4):
if log: print("Deletado: " + linha)
linha = ' - "MODIFICATION Operations"\n'
if log: print("Alterado: " + linha)
substituiparam = 0
achouparam = 0
findparam(linha)
arqdest.write(linha)
arq.close()
arqdest.close()
|
f1 = 'general'
f2 = 'perfectionism_certainty'
f3 = 'responsibility_and_threat_estimation'
f4 = 'importance_and_control_of_thought'
f5 = 'complete_performance'
option_numbers = 7
factors_names = ('raw',f1,f2,f3,f4,f5)
factors = {
1 :(f1,)
, 2 :(f1,)
, 3 :(f2,)
, 4 :(f2,)
, 5 :(f3,)
, 6 :(f3,)
, 7:(f4,)
, 8 :(f3,)
, 9 :(f2,)
, 10 :(f2,)
, 11 :(f2,)
, 12 :(f5,)
, 13 :(f5,)
, 14 :(f2,)
, 15 :(f5,)
, 16 :(f3,)
, 17 :(f3,)
, 18 :(f1,)
, 19 :(f3,)
, 20 :(f2,)
, 21 :(f4,)
, 22 :(f1,)
, 23 :(f3,)
, 24 :(f4,)
, 25 :(f5,)
, 26 :(f2,)
, 27 :(f4,)
, 28:(f4,)
, 29 :(f1,)
, 30 :(f1,)
, 31 :(f2,)
, 32 :(f1,)
, 33 :(f1,)
, 34 :(f1,)
, 35 :(f1,)
, 36 :(f1,)
, 37:(f2,)
, 38 :(f1,)
, 39 :(f5,)
, 40 :(f1,)
, 41 :(f1,)
, 42 :(f4,)
, 43 :(f1,)
, 44 :(f1,)
}
|
from flask import Blueprint, Response, request
import logging
import jinja2
import jinja2.utils
from mr_provisioner.models import Machine, MachineEvent
from mr_provisioner import db
from sqlalchemy.exc import DatabaseError
from collections import namedtuple
mod = Blueprint('preseed', __name__, template_folder='templates')
logger = logging.getLogger('preseed')
PInterface = namedtuple('object', ['name', 'static_ipv4', 'prefix', 'netmask', 'mac'])
PImage = namedtuple('object', ['filename', 'description', 'known_good'])
def make_reporting_undefined(machine, request, base=jinja2.Undefined):
def report(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is jinja2.utils.missing:
hint = '%s is undefined' % undef._undefined_name
else:
hint = '%s has no attribute %s' % (
jinja2.utils.object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = undef._undefined_hint
MachineEvent.preseed_error(machine.id, None, request.remote_addr, hint)
class ReportingUndefined(base):
def __str__(self):
report(self)
return base.__str__(self)
def __iter__(self):
report(self)
return base.__iter__(self)
def __bool__(self):
report(self)
return base.__bool__(self)
def __len__(self):
report(self)
return base.__len__(self)
return ReportingUndefined
@mod.route('/<machine_id>', methods=['GET'])
def get_preseed(machine_id):
machine = Machine.query.get(machine_id)
if not machine:
return "", 404
preseed = machine.preseed
if not preseed:
return "", 404
MachineEvent.preseed_accessed(machine.id, None, request.remote_addr)
assignees = machine.assignees
ssh_key = assignees[0].ssh_key if len(assignees) > 0 else ''
ssh_keys = [u.ssh_key for u in assignees]
kernel_opts = machine.kernel_opts
interfaces = [PInterface(name=i.identifier,
static_ipv4=i.static_ipv4,
prefix=i.network.prefix,
netmask=i.network.netmask,
mac=i.mac) for i in machine.interfaces if i.static_ipv4]
kernel = None
if machine.kernel:
kernel = PImage(filename=machine.kernel.filename,
description=machine.kernel.description,
known_good=machine.kernel.known_good)
initrd = None
if machine.initrd:
initrd = PImage(filename=machine.initrd.filename,
description=machine.initrd.description,
known_good=machine.initrd.known_good)
try:
template = jinja2.Template(preseed.file_content,
undefined=make_reporting_undefined(machine, request))
return Response(
template.render(ssh_key=ssh_key, ssh_keys=ssh_keys,
hostname=machine.hostname, interfaces=interfaces,
kernel=kernel, initrd=initrd,
kernel_options=kernel_opts),
mimetype='text/plain')
except jinja2.TemplateSyntaxError as e:
MachineEvent.preseed_error(machine.id, None, request.remote_addr, e.message, e.lineno)
return Response(
"Syntax error on preseed template: {} - line: {}".format(e.message, e.lineno), status=400)
except (jinja2.TemplateError, Exception) as e:
MachineEvent.preseed_error(machine.id, None, request.remote_addr, e.message)
return Response(
"Exception raised while rendering preseed template: {}".format(e.message), status=400)
@mod.errorhandler(DatabaseError)
def handle_db_error(error):
db.session.rollback()
raise
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import sys
from _pydevd_bundle import pydevd_comm
from ptvsd.socket import Address
from ptvsd.daemon import Daemon, DaemonStoppedError, DaemonClosedError
from ptvsd._util import debug, new_hidden_thread
def start_server(daemon, host, port, **kwargs):
"""Return a socket to a (new) local pydevd-handling daemon.
The daemon supports the pydevd client wire protocol, sending
requests and handling responses (and events).
This is a replacement for _pydevd_bundle.pydevd_comm.start_server.
"""
sock, next_session = daemon.start_server((host, port))
def handle_next():
try:
session = next_session(**kwargs)
debug('done waiting')
return session
except (DaemonClosedError, DaemonStoppedError):
# Typically won't happen.
debug('stopped')
raise
except Exception as exc:
# TODO: log this?
debug('failed:', exc, tb=True)
return None
def serve_forever():
debug('waiting on initial connection')
handle_next()
while True:
debug('waiting on next connection')
try:
handle_next()
except (DaemonClosedError, DaemonStoppedError):
break
debug('done')
t = new_hidden_thread(
target=serve_forever,
name='sessions',
)
t.start()
return sock
def start_client(daemon, host, port, **kwargs):
"""Return a socket to an existing "remote" pydevd-handling daemon.
The daemon supports the pydevd client wire protocol, sending
requests and handling responses (and events).
This is a replacement for _pydevd_bundle.pydevd_comm.start_client.
"""
sock, start_session = daemon.start_client((host, port))
start_session(**kwargs)
return sock
def install(pydevd, address,
start_server=start_server, start_client=start_client,
**kwargs):
"""Configure pydevd to use our wrapper.
This is a bit of a hack to allow us to run our VSC debug adapter
in the same process as pydevd. Note that, as with most hacks,
this is somewhat fragile (since the monkeypatching sites may
change).
"""
addr = Address.from_raw(address)
daemon = Daemon(**kwargs)
_start_server = (lambda p: start_server(daemon, addr.host, p))
_start_server.orig = start_server
_start_client = (lambda h, p: start_client(daemon, h, p))
_start_client.orig = start_client
# These are the functions pydevd invokes to get a socket to the client.
pydevd_comm.start_server = _start_server
pydevd_comm.start_client = _start_client
# Ensure that pydevd is using our functions.
pydevd.start_server = _start_server
pydevd.start_client = _start_client
__main__ = sys.modules['__main__']
if __main__ is not pydevd:
if getattr(__main__, '__file__', None) == pydevd.__file__:
__main__.start_server = _start_server
__main__.start_client = _start_client
return daemon
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Определим универсальное множество
word = input("Введите слово: ")
vowels = set("a,e,i,o,u,y")
word_set = set(word.lower())
print('Гласных {} '.format(len(word_set.intersection(vowels))))
|
# Copyright 2013-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .baseobjects import InterpreterObject, MesonInterpreterObject, ObjectHolder, TYPE_var, HoldableTypes
from .exceptions import InvalidArguments
from ..mesonlib import HoldableObject, MesonBugException
def _unholder(obj: InterpreterObject) -> TYPE_var:
if isinstance(obj, ObjectHolder):
assert isinstance(obj.held_object, HoldableTypes)
return obj.held_object
elif isinstance(obj, MesonInterpreterObject):
return obj
elif isinstance(obj, HoldableObject):
raise MesonBugException(f'Argument {obj} of type {type(obj).__name__} is not held by an ObjectHolder.')
elif isinstance(obj, InterpreterObject):
raise InvalidArguments(f'Argument {obj} of type {type(obj).__name__} cannot be passed to a method or function')
raise MesonBugException(f'Unknown object {obj} of type {type(obj).__name__} in the parameters.')
|
# Generated by Django 3.2.7 on 2021-10-27 18:37
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('tracks', '0006_auto_20211025_1631'),
]
operations = [
migrations.CreateModel(
name='TrackRequest',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('requestId', models.UUIDField(default=uuid.uuid4, verbose_name='Request id')),
('method', models.CharField(default='GET', max_length=20, verbose_name='Method')),
('requestUrl', models.CharField(max_length=500, verbose_name='Request url')),
('requestHeaders', models.JSONField(blank=True, default=list, null=True, verbose_name='Request headers')),
('requestBody', models.TextField(blank=True, null=True, verbose_name='Request body')),
('statusCode', models.IntegerField(verbose_name='Response status code')),
('responseBody', models.TextField(blank=True, null=True, verbose_name='Response body')),
('responseHeaders', models.JSONField(blank=True, default=dict, verbose_name='Response headers')),
('duration', models.FloatField(blank=True, default=0.0, null=True, verbose_name='Duration in seconds')),
('created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tracks.service', verbose_name='Service')),
],
),
]
|
#!/usr/bin/python
import socket
import sys
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_addr = ('',8080)
sock.bind(server_addr)
while 1:
data, addr = sock.recvfrom(4096)
print >> sys.stderr, data
|
from django.shortcuts import render, get_object_or_404
from db.models import Category, Post
from .forms import ContactForm
from django.core.mail import send_mail
# from django.http import HttpResponse
# from django.template.loader import get_template
# from django.core.mail import EmailMessage
# from django.template import Context
# # from sendgrid.message import SendGridEmailMessage
# # import sendgrid
thanks_message = """Thank You, We appreciate that you’ve taken the time to write us.
We’ll get back to you very soon.\n \n \n
------------------------------------\n \n
This is your message\n \n
------------------------------------\n \n
"""
def home(request):
latest_project = Post.get_lastest_visible('project')
latest_blog = Post.get_lastest_visible('blog')
latest_arabic = Post.get_lastest_visible('arabic')
return render(request, 'home.html', {
'latest_project': latest_project,
'latest_blog': latest_blog,
'latest_arabic':latest_arabic,
})
def cv(request):
return render(request, 'cv.html',)
def smedia(request):
return render(request, 'smedia.html',)
def contact(request):
# new logic!
if request.method == 'POST':
form = ContactForm(data=request.POST)
if form.is_valid():
contact_email = form.cleaned_data.get('contact_email')
subject = form.cleaned_data.get('subject')
message = form.cleaned_data.get('message')
send_mail(
subject,
message,
contact_email,
['m@mohd.im'],
fail_silently=False,
)
message = thanks_message + message
subject = "RE:" + subject
send_mail(
subject,
message,
'm@mohd.im',
[contact_email],
fail_silently=False,
)
return render(request, 'thanks-email.html',)
else:
form = ContactForm()
return render(request, 'contact.html', {
'form': form,
# 'errors': errors,contactcontact
# 'subject_val': subject_val,
# 'contact_email_val': contact_email_val,
# 'message_val': message_val,
})
def robots(request):
return render(request, 'robots.txt',)
# def contact(request):
# form_class = ContactForm
#
# # new logic!
# if request.method == 'POST':
# form = form_class(data=request.POST)
#
# contact_email = request.POST.get('contact_email', '')
# content = request.POST.get('content', '')
#
# sg = sendgrid.SendGridClient('SENDGRID_USERNAME', 'SENDGRID_PASSWORD')
#
# message = sendgrid.Mail()
# message.add_to('m@mohd.im')
# message.set_subject('via CuntactMe')
# message.set_text(content)
# message.set_from(contact_email)
# status, msg = sg.send(message)
# return redirect('thank-u-email.html')
#
# return render(request, 'contact.html', {
# 'form': form_class,
# })
# def contact(request):
# from django.core.mail import send_mail
#
# send_mail(
# 'Subject here',
# 'Here is the message.',
# 'from@example.com',
# ['mohd.muthanna+contact.mohd.im@gmail.com'],
# fail_silently=False,
# )
#
# return render(request, 'contact.html',)
# our view
# def contact(request):
# form_class = ContactForm
#
#
# if request.method == 'POST':
# form = form_class(data=request.POST)
#
# if form.is_valid():
# contact_email = request.POST.get('contact_email', '')
# subject = request.POST.get('subject', '')
# message = request.POST.get('message', '')
#
# send_mail(
# subject,
# message,
# contact_email,
# ['m@mohd.im'],
# fail_silently=False,
# )
# message = """
# Thank You,\n We appreciate that you’ve taken the time to write us.
# We’ll get back to you very soon.\n \n \n
# ------------------------------------
# This is your message
# ------------------------------------
# """ + message
# subject = "RE:" + subject
# send_mail(
# subject,
# message,
# 'm@mohd.im',
# [contact_email],
# fail_silently=False,
# )
#
#
#
# # return redirect('/thank-u-email.html/')
# return render(request, 'thanks-email.html',)
#
# return render(request, 'contact.html', {
# 'form': form_class,
# })
#
#
|
#Django 生成器
import os
def info(project_name, mysite_name, app_name, db):
# project_name = str(input('請輸入專案名稱:'))
# mysite_name = str(input('請輸入Mysite名稱:'))
# app_name = str(input('請輸入App名稱:'))
# db = str(input('資料庫 / 預設:1 Mysql:2 :'))
project_path = '/Users/weichenho/Desktop' #修改路徑到你要的位置
project_file = project_path + '/' + project_name
os.mkdir(project_file) #建立專案資料夾
os.system(f'cd {project_file} && django-admin startproject {mysite_name}') #建立mysite資料夾
os.system(f'cd {project_file}/{mysite_name} && python manage.py startapp {app_name}') #建立app資料夾
os.mkdir(f'{project_file}/{mysite_name}/templates') #建立templates資料夾
os.mkdir(f'{project_file}/{mysite_name}/static') #建立static資料夾
html = open(f'{project_file}/{mysite_name}/templates/index.html','w') #建立html檔案
html.close()
return project_file, mysite_name, app_name, db
def settings(project_file, mysite_name, app_name, db):
#更改settings.py設定檔
#路徑 {project_file}/{mysite_name}/{mysite_name}/settings.py
file_object1 = open(f'{project_file}/{mysite_name}/{mysite_name}/settings.py','r') #讀django取生成的原始檔
l = [] #將檔案內容append到list
try:
while True:
line = file_object1.readline()
if line:
l.append(line)
else:
break
finally:
file_object1.close()
new_l = []
for i in l:
if i == 'from pathlib import Path\n':
new_l.append('from pathlib import Path\n')
new_l.append('import os\n')
new_l.append('import pymysql\n')
new_l.append('pymysql.version_info = (1, 4, 13, "final", 0) ##需自行新增\n')
new_l.append('pymysql.install_as_MySQLdb() #####\n')
elif i == "ALLOWED_HOSTS = []\n":
new_l.append("ALLOWED_HOSTS = ['*']\n")
elif i == " 'django.contrib.staticfiles',\n":
new_l.append(" 'django.contrib.staticfiles',\n")
new_l.append(f" '{app_name}',\n")
elif i == " 'DIRS': [],\n":
new_l.append(" 'DIRS': [os.path.join(BASE_DIR, 'templates').replace('\\\\', '/')],\n")
elif i == " 'ENGINE': 'django.db.backends.sqlite3',\n" and db == '2': #Mysql設定區
new_l.append(" 'ENGINE': 'django.db.backends.mysql',\n")
elif i == " 'NAME': BASE_DIR / 'db.sqlite3',\n" and db == '2':
new_l.append(" 'NAME': '',\n")
new_l.append(" 'USER': 'root',\n")
new_l.append(" 'PASSWORD': 'root',\n")
new_l.append(" 'HOST': '127.0.0.1',\n")
new_l.append(" 'PORT': '3306',\n")
elif i == "LANGUAGE_CODE = 'en-us'\n":
new_l.append("LANGUAGE_CODE = 'zh-Hant'\n")
elif i == "TIME_ZONE = 'UTC'\n":
new_l.append("TIME_ZONE = 'Asia/Taipei'\n")
elif i == 'USE_I18N = True\n':
new_l.append("USE_I18N = True\n")
elif i == 'USE_L10N = True\n':
new_l.append("USE_L10N = True\n")
elif i == 'USE_TZ = True\n':
new_l.append("USE_TZ = False\n")
elif i == "STATIC_URL = '/static/'\n":
new_l.append("STATIC_URL = '/static/'\n")
new_l.append("\n")
new_l.append("STATICFILES_DIRS = [\n")
new_l.append(" os.path.join(BASE_DIR, 'static'),\n")
new_l.append("]\n")
else:
new_l.append(i)
file_object2 = open(f'{project_file}/{mysite_name}/{mysite_name}/settings.py','w')
for i in new_l:
file_object2.writelines(i)
file_object2.close()
#---------------------------Mysite裡的urls.py---------------------------------
def urls(project_file, mysite_name, app_name):
file_object1 = open(f'{project_file}/{mysite_name}/{mysite_name}/urls.py','r')
l = []
try:
while True:
line = file_object1.readline()
if line:
l.append(line)
else:
break
finally:
file_object1.close()
new_l = []
for i in l:
if i == 'from django.urls import path\n':
new_l.append('from django.urls import path\n')
new_l.append('from django.conf.urls import include, url')
new_l.append('\n')
new_l.append('\n')
elif i == " path('admin/', admin.site.urls),\n":
new_l.append(" path('admin/', admin.site.urls),\n")
new_l.append(f" path('', include('{app_name}.urls')),\n")
else:
new_l.append(i)
file_object2 = open(f'{project_file}/{mysite_name}/{mysite_name}/urls.py','w')
for i in new_l:
file_object2.writelines(i)
file_object2.close()
#-----------------------------Apps裡的urls.py---------------------------------
file_object3 = open(f'{project_file}/{mysite_name}/{app_name}/urls.py','w')
l = [
"from django.conf.urls import url\n",
f"from {app_name} import views\n",
"from django.urls import path",
"\n",
"\n",
"\n",
"urlpatterns = [\n",
" path('', views.index),\n",
"]\n"
]
for i in l:
file_object3.writelines(i)
file_object3.close()
#-----------------------------views.py---------------------------------
def views(project_file, mysite_name, app_name):
file_object1 = open(f'{project_file}/{mysite_name}/{app_name}/views.py','w')
l = [
"from django.shortcuts import render,redirect, HttpResponse\n",
"from django.urls import reverse\n",
"from django.http import HttpResponseRedirect\n",
f"#from {app_name}.models import User\n",
"\n",
"\n",
"\n",
"def index(request):\n",
"\n",
" return render(request,'index.html')"
]
for i in l:
file_object1.writelines(i)
file_object1.close()
|
# -*- coding: utf-8 -*-
__author__ = "Mark McClain"
__copyright__ = "Mark McClain"
__license__ = "mit"
from unittest import TestCase
from awslambda import LambdaBaseSsm
from unittest.mock import patch
import boto3
from moto import mock_ssm
class LambdaBaseSsmImpl(LambdaBaseSsm):
def handle(self, event, context) -> dict:
return event.update({})
class TestLambdaBaseSsm(TestCase):
@classmethod
def setUpClass(cls) -> None:
pass
def setUp(self) -> None:
pass
@mock_ssm
def test_parameter_not_found(self) -> None:
with self.assertRaises(KeyError):
lambda_base_ssm_impl = LambdaBaseSsmImpl()
lambda_base_ssm_impl.get_parameter('lambda-base/parameters/param')
@mock_ssm
def test_parameter_default(self) -> None:
lambda_base_ssm_impl = LambdaBaseSsmImpl()
self.assertEqual('value', lambda_base_ssm_impl.get_parameter('lambda-base/parameters/param', 'value'))
@mock_ssm
@patch.dict('os.environ', {'ENV': 'dev', 'CONFIG_PATHS': 'lambda-base'})
def test_create_with_prameters(self) -> None:
client = boto3.client('ssm')
client.put_parameter(
Name='/dev/lambda-base/parameters',
Description='A test parameter',
Value='{"param1": "param1", "param2": "param2"}',
Type='String')
lambda_base_ssm_impl = LambdaBaseSsmImpl()
self.assertEqual('param1', lambda_base_ssm_impl.get_parameter('/lambda-base/parameters/param1'))
self.assertEqual('param2', lambda_base_ssm_impl.get_parameter('/lambda-base/parameters/param2'))
self.assertIsNotNone(lambda_base_ssm_impl)
|
response.view = 'generic.html' # use a generic view
tables_list = UL([LI(A(table,_href=URL("app","grid",args=table))) for table in db.tables if not table.startswith("auth_")])
pages_dict = {
"myInventoryItems":URL("app","grid",args=["inventoryItem","inventoryItem"],vars={"keywords": 'inventoryItem.idSeller = "%s"' % auth.user_id}),
"publicInventory":URL("app","grid",args=["inventoryItem","inventoryItem"],vars={"keywords": 'inventoryItem.idSeller != "%s"' % auth.user_id}),
}
def pages():
tables_list.insert(0,LI(HR(),_style="list-style-type:none"))
for pageName,pageUrl in pages_dict.iteritems():
tables_list.insert(0,LI(A(pageName,_href=pageUrl)))
return dict(message=tables_list)
def grid():
tablename = request.args(0)
if not tablename in db.tables: raise HTTP(403)
grid = SQLFORM.smartgrid(db[tablename], args=[tablename])
return dict(grid=grid)
|
# Copyright (c) 2021 Koichi Sakata
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pylib-sakata",
version="0.1.2",
author="Koichi Sakata",
author_email="",
description="Control system design and analysis package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Koichi-Sakata/pylib_sakata",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
)
|
"""
Integration tests for the :func:`esmvalcore.preprocessor.regrid.regrid`
function.
"""
import unittest
import iris
import numpy as np
import tests
from esmvalcore.preprocessor import extract_point
from tests.unit.preprocessor._regrid import _make_cube
class Test(tests.Test):
def setUp(self):
"""Prepare tests."""
shape = (3, 4, 4)
data = np.arange(np.prod(shape)).reshape(shape)
self.cube = _make_cube(data, dtype=np.float64)
self.cs = iris.coord_systems.GeogCS(iris.fileformats.pp.EARTH_RADIUS)
def test_extract_point__single_linear(self):
"""Test linear interpolation when extracting a single point"""
point = extract_point(self.cube, 2.1, 2.1, scheme='linear')
self.assertEqual(point.shape, (3,))
np.testing.assert_allclose(point.data, [5.5, 21.5, 37.5])
# Exactly centred between grid points.
point = extract_point(self.cube, 2.5, 2.5, scheme='linear')
self.assertEqual(point.shape, (3,))
np.testing.assert_allclose(point.data, [7.5, 23.5, 39.5])
# On a (edge) grid point.
point = extract_point(self.cube, 4, 4, scheme='linear')
self.assertEqual(point.shape, (3,))
np.testing.assert_allclose(point.data, [15, 31, 47])
# Test two points outside the valid area.
# These should be masked, since we set up the interpolation
# schemes that way.
point = extract_point(self.cube, -1, -1, scheme='linear')
self.assertEqual(point.shape, (3,))
masked = np.ma.array([np.nan] * 3, mask=True)
self.assert_array_equal(point.data, masked)
point = extract_point(self.cube, 30, 30, scheme='linear')
self.assertEqual(point.shape, (3,))
self.assert_array_equal(point.data, masked)
def test_extract_point__single_nearest(self):
"""Test nearest match when extracting a single point"""
point = extract_point(self.cube, 2.1, 2.1, scheme='nearest')
self.assertEqual(point.shape, (3,))
np.testing.assert_allclose(point.data, [5, 21, 37])
point = extract_point(self.cube, 4, 4, scheme='nearest')
self.assertEqual(point.shape, (3,))
np.testing.assert_allclose(point.data, [15, 31, 47])
# Test two points outside the valid area
point = extract_point(self.cube, -1, -1, scheme='nearest')
self.assertEqual(point.shape, (3,))
masked = np.ma.array(np.empty(3, dtype=np.float64), mask=True)
self.assert_array_equal(point.data, masked)
point = extract_point(self.cube, 30, 30, scheme='nearest')
self.assertEqual(point.shape, (3,))
self.assert_array_equal(point.data, masked)
def test_extract_point__multiple_linear(self):
"""Test linear interpolation for an array of one coordinate"""
# Test points on the grid edges, on a grid point, halfway and
# one in between.
coords = self.cube.coords(dim_coords=True)
print([coord.standard_name for coord in coords])
point = extract_point(self.cube, [1, 1.1, 1.5, 2, 4], 2,
scheme='linear')
self.assertEqual(point.shape, (3, 5))
# Longitude is not a dimension coordinate anymore.
self.assertEqual(['air_pressure', 'latitude'], [
coord.standard_name for coord in point.coords(dim_coords=True)])
np.testing.assert_allclose(point.data, [[1, 1.4, 3, 5, 13],
[17, 17.4, 19., 21., 29],
[33, 33.4, 35, 37, 45]])
point = extract_point(self.cube, 4, [1, 1.1, 1.5, 2, 4],
scheme='linear')
self.assertEqual(point.shape, (3, 5))
self.assertEqual(['air_pressure', 'longitude'], [
coord.standard_name for coord in point.coords(dim_coords=True)])
np.testing.assert_allclose(point.data, [[12, 12.1, 12.5, 13, 15],
[28, 28.1, 28.5, 29, 31],
[44, 44.1, 44.5, 45, 47]])
# Test latitude and longitude points outside the grid.
# These should all be masked.
coords = self.cube.coords(dim_coords=True)
point = extract_point(self.cube, [0, 10], 3,
scheme='linear')
self.assertEqual(point.shape, (3, 2))
masked = np.ma.array(np.empty((3, 2), dtype=np.float64), mask=True)
self.assert_array_equal(point.data, masked)
coords = self.cube.coords(dim_coords=True)
point = extract_point(self.cube, 2, [0, 10],
scheme='linear')
coords = point.coords(dim_coords=True)
self.assertEqual(point.shape, (3, 2))
self.assert_array_equal(point.data, masked)
def test_extract_point__multiple_nearest(self):
"""Test nearest match for an array of one coordinate"""
point = extract_point(self.cube, [1, 1.1, 1.5, 1.501, 2, 4], 2,
scheme='nearest')
self.assertEqual(point.shape, (3, 6))
self.assertEqual(['air_pressure', 'latitude'], [
coord.standard_name for coord in point.coords(dim_coords=True)])
np.testing.assert_allclose(point.data, [[1, 1, 1, 5, 5, 13],
[17, 17, 17, 21, 21, 29],
[33, 33, 33, 37, 37, 45]])
point = extract_point(self.cube, 4, [1, 1.1, 1.5, 1.501, 2, 4],
scheme='nearest')
self.assertEqual(point.shape, (3, 6))
self.assertEqual(['air_pressure', 'longitude'], [
coord.standard_name for coord in point.coords(dim_coords=True)])
np.testing.assert_allclose(point.data, [[12, 12, 12, 13, 13, 15],
[28, 28, 28, 29, 29, 31],
[44, 44, 44, 45, 45, 47]])
point = extract_point(self.cube, [0, 10], 3,
scheme='nearest')
masked = np.ma.array(np.empty((3, 2), dtype=np.float64), mask=True)
self.assertEqual(point.shape, (3, 2))
self.assert_array_equal(point.data, masked)
point = extract_point(self.cube, 2, [0, 10],
scheme='nearest')
self.assertEqual(point.shape, (3, 2))
self.assert_array_equal(point.data, masked)
def test_extract_point__multiple_both_linear(self):
"""Test for both latitude and longitude arrays, with
linear interpolation"""
point = extract_point(self.cube, [0, 1.1, 1.5, 1.51, 4, 5],
[0, 1.1, 1.5, 1.51, 4, 5], scheme='linear')
self.assertEqual(point.data.shape, (3, 6, 6))
result = np.ma.array(np.empty((3, 6, 6), dtype=np.float64), mask=True)
result[0, 1, 1:5] = [0.5, 0.9, 0.91, 3.4]
result[0, 2, 1:5] = [2.1, 2.5, 2.51, 5.0]
result[0, 3, 1:5] = [2.14, 2.54, 2.55, 5.04]
result[0, 4, 1:5] = [12.1, 12.5, 12.51, 15.0]
result[1, 1, 1:5] = [16.5, 16.9, 16.91, 19.4]
result[1, 2, 1:5] = [18.10, 18.5, 18.51, 21.0]
result[1, 3, 1:5] = [18.14, 18.54, 18.55, 21.04]
result[1, 4, 1:5] = [28.1, 28.5, 28.51, 31.0]
result[2, 1, 1:5] = [32.5, 32.9, 32.91, 35.4]
result[2, 2, 1:5] = [34.1, 34.5, 34.51, 37]
result[2, 3, 1:5] = [34.14, 34.54, 34.55, 37.04]
result[2, 4, 1:5] = [44.1, 44.5, 44.51, 47]
# Unmask the inner area of the result array.
# The outer edges of the extracted points are outside the cube
# grid, and should thus be masked.
result.mask[:, 1:5, 1:5] = False
np.testing.assert_allclose(point.data, result)
def test_extract_point__multiple_both_nearest(self):
"""Test for both latitude and longitude arrays, with nearest match"""
point = extract_point(self.cube, [0, 1.1, 1.5, 1.51, 4, 5],
[0, 1.1, 1.5, 1.51, 4, 5], scheme='nearest')
self.assertEqual(point.data.shape, (3, 6, 6))
result = np.ma.array(np.empty((3, 6, 6), dtype=np.float64), mask=True)
result[0, 1, 1:5] = [0.0, 0.0, 1.0, 3.0]
result[0, 2, 1:5] = [0.0, 0.0, 1.0, 3.0]
result[0, 3, 1:5] = [4.0, 4.0, 5.0, 7.0]
result[0, 4, 1:5] = [12.0, 12.0, 13.0, 15.0]
result[1, 1, 1:5] = [16.0, 16.0, 17.0, 19.0]
result[1, 2, 1:5] = [16.0, 16.0, 17.0, 19.0]
result[1, 3, 1:5] = [20.0, 20.0, 21.0, 23.0]
result[1, 4, 1:5] = [28.0, 28.0, 29.0, 31.0]
result[2, 1, 1:5] = [32.0, 32.0, 33.0, 35.0]
result[2, 2, 1:5] = [32.0, 32.0, 33.0, 35.0]
result[2, 3, 1:5] = [36.0, 36.0, 37.0, 39.0]
result[2, 4, 1:5] = [44.0, 44.0, 45.0, 47.0]
result.mask[:, 1:5, 1:5] = False
np.testing.assert_allclose(point.data, result)
if __name__ == '__main__':
unittest.main()
|
def superTuple(name, attributes):
"""Creates a Super Tuple class."""
dct = {}
#Create __new__.
nargs = len(attributes)
def _new_(cls, *args):
if len(args) != nargs:
raise TypeError("%s takes %d arguments (%d given)." % (cls.__name__,
nargs,
len(args)))
return tuple.__new__(cls, args)
dct["__new__"] = staticmethod(_new_)
#Create __repr__.
def _repr_(self):
contents = [repr(elem) for elem in self]
return "%s<%s>" % (self.__class__.__name__,
", ".join(contents))
dct["__repr__"] = _repr_
#Create attribute properties.
def getter(i):
return lambda self: self.__getitem__(i)
for index, attribute in enumerate(attributes):
dct[attribute] = property(getter(index))
#Set slots.
dct["__slots__"] = []
#Return class.
return type(name, (tuple,), dct)
|
import sandstone.models.mlp
import sandstone.models.linear
import sandstone.models.rnn
|
from time import sleep
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import sentiwordnet as swn, stopwords, wordnet
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
import pandas as pd
import re
from datetime import datetime
nltk.download('vader_lexicon')
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
nltk.download('sentiwordnet')
class SentimentAnalyzer:
def __init__(self):
self.sentimentAnalyzer = SentimentIntensityAnalyzer()
self.stop_words = set(stopwords.words('english'))
self.lemmatizer = WordNetLemmatizer()
def preprocess_text(self, text):
# Special Character Filtering
text_char_filter = self.specialchar_filtering_text(text)
# To lower case
text_lower = self.to_lowercase_text(text_char_filter)
# Tokenized
tokenized = self.tokenize_text(text_lower)
result = tokenized
return result
def specialchar_filtering_text(self, text):
# print("\n======================== Special Char Filtering =========================")
result = " ".join(re.findall("[a-zA-Z]+", text))
return result
def to_lowercase_text(self, text):
# print("\n======================== Data case folding =========================")
result = text.lower()
return result
def tokenize_text(self, text):
print("[", datetime.now(), "] Tokenizing data....")
result = nltk.pos_tag(word_tokenize(text))
return result
def lemmatize_text(self, text, pos_tag):
result = self.lemmatizer.lemmatize(text, pos_tag)
# print(result)
return result
def get_wordnet_pos_tag(self, tag):
tag_dict = {
"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV,
}
return tag_dict.get(tag, wordnet.NOUN)
def get_vader(self, text):
return self.sentimentAnalyzer.polarity_scores(text)
def get_wordnet_degree(self, word):
pos_tag = self.get_wordnet_pos_tag(word[1][0])
# print(word[0], " ", word[1][0], " ", pos_tag)
lemmatized = self.lemmatize_text(word[0], pos_tag)
synset = swn.senti_synset('{0}.{1}.03'.format(lemmatized, pos_tag))
return {
'positive': synset.pos_score(),
'negative': synset.neg_score(),
'objective': synset.obj_score()
}
def get_wordnet_aggregation(self, pos, neg, obj):
pos_wordnet = 0
neg_wordnet = 0
result = 0
if pos > neg:
pos_wordnet = pos / (pos + neg)
result = pos_wordnet - (obj * pos_wordnet)
elif pos < neg:
neg_wordnet = neg / (pos + neg) * -1
result = neg_wordnet - (obj * neg_wordnet)
else:
result = 0
return result
def get_sentiwordnet(self, text):
sentences_result = 0
total = 0
text_preprocessed = self.preprocess_text(text)
word_count = 0
for word in text_preprocessed:
if word[0] not in self.stop_words:
try:
degree = self.get_wordnet_degree(word)
result = self.get_wordnet_aggregation(
degree['positive'], degree['negative'], degree['objective'])
# print("Result = ", result)
if result != None:
word_count += 1
sentences_result += result
except:
continue
print("[", datetime.now(), "] Word count :", word_count)
if word_count > 0:
sentences_result = sentences_result / word_count
else :
sentences_result = 0
print("[", datetime.now(), "] Sentences result :", sentences_result)
return result
if __name__ == "__main__":
sentimentAnalyzer = SentimentAnalyzer()
# Good
# score = sentimentAnalyzer.get_sentiwordnet(
# "Great Hampton Inn. Great Location. Great People. Good breakfast. Clean and comfortable . Easy to get to from the airports. Has not shown any wear from the time built. The room was comfortable and clean")
# Bad
# score = sentimentAnalyzer.get_sentiwordnet(
# "I booked this hotel tonight (april 11,2019) under my company reservation for two nights. Once, I arrived your front office staff said no reservation for us (Andi and Ega). They said that no room at all. Your marketing for my company (KPPU) said the same 'No'. They do nothing, do not make an effort for double check. I said that your hotel staff had confirm to Ms.Xenia this noon, but they still refusing us So, we force to search another hotel at 18.38 tonight. What a bad reservation system you had. It is so impossible for me do check in at the hotel without the reservation. And I have no word of apologize at all from your hotel staff Bad.. Very bad indeed.")
# Check
score = sentimentAnalyzer.get_sentiwordnet(
"In the end i think the biggest issue with Cupadak Paradiso is the price. The location is beautiful but when you ask $150USD per night in South East Asia people’s expectations are relatively high. Unfortunately in this instance ours weren’t met. No airconditioning might be an issue for some for that money but was actually fine for us, a fan more than sufficed, but in 2019 to still be charging for wifi felt cheap, surely you just integrate the cost into your room rate calculation. The lodge itself is creaking after 25 years and in need of a bit of refurbishment. The food was ok but to have set times for group meals is quite rigid, although for sure easier for their kitchen. Couple of disclaimers at this point, we did make two mistakes during our stay, firstly asking if it was possible to have some Indonesian food on the daily menu, secondly leaving some empty cans outside one of our rooms. Both of these events then became issues that the owner Dominique felt the need to highlight to the whole group at every subsequent mealtime, either telling everyone that we’d been naive in leaving the cans and she’d had to fight off the monkeys in the morning or apologising to everyone for the local food being served but it had been specially requested. This became uncomfortable, as much as we liked the idea of shared mealtimes it only works when guests feel comfortable in the management’s company. Knowing that we were going to be regularly lambasted for our errors wasn’t enjoyable. Just a last point but the staff seemed to be very relaxed, lounging in the library, restaurant or bar. It wasn’t really clear what they were up to but could be an idea to have a staff room dedicated for them as a slightly seperate area from guests. It is a beautiful spot and great place to relax and watch the world go by but could have been so much more...")
|
import unittest
from huobi.impl.utils import *
from huobi.model.constant import *
from huobi.impl.restapirequestimpl import RestApiRequestImpl
from huobi.impl.utils.timeservice import convert_cst_in_millisecond_to_utc
data = '''
{
"code": 200,
"success": "True",
"data": [
{
"id": 1499184000,
"amount": 123.123,
"open": 0.7794,
"close": 0.779,
"low": 0.769,
"high": 0.7694,
"vol": 456.456
}
]
}
'''
class TestGetETFCandlestick(unittest.TestCase):
def test_request(self):
impl = RestApiRequestImpl("", "")
request = impl.get_etf_candlestick("hb10", CandlestickInterval.YEAR1, 100)
self.assertEqual("GET", request.method)
self.assertTrue(request.url.find("/quotation/market/history/kline") != -1)
self.assertTrue(request.url.find("symbol=hb10"))
self.assertTrue(request.url.find("period=1year"))
self.assertTrue(request.url.find("limit=100"))
def test_result(self):
impl = RestApiRequestImpl("", "")
request = impl.get_etf_candlestick("hb10", CandlestickInterval.YEAR1, 100)
candlestick_list = request.json_parser(parse_json_from_string(data))
self.assertEqual(1, len(candlestick_list))
self.assertEqual(convert_cst_in_millisecond_to_utc(1499184000), candlestick_list[0].timestamp)
self.assertEqual(0.7694, candlestick_list[0].high)
self.assertEqual(0.769, candlestick_list[0].low)
self.assertEqual(0.7794, candlestick_list[0].open)
self.assertEqual(0.779, candlestick_list[0].close)
self.assertEqual(123.123, candlestick_list[0].amount)
self.assertEqual(456.456, candlestick_list[0].volume)
|
"""SniTun reference implementation."""
import asyncio
from contextlib import suppress
from itertools import cycle
import logging
from multiprocessing import cpu_count
import select
import socket
from typing import Awaitable, Iterable, List, Optional, Dict
from threading import Thread
import async_timeout
from .listener_peer import PeerListener
from .listener_sni import SNIProxy
from .peer_manager import PeerManager
from .worker import ServerWorker
from .sni import ParseSNIError, parse_tls_sni
_LOGGER = logging.getLogger(__name__)
WORKER_STALE_MAX = 10
class SniTunServer:
"""SniTunServer helper class for Dual port Asyncio."""
def __init__(
self,
fernet_keys: List[str],
sni_port: Optional[int] = None,
sni_host: Optional[str] = None,
peer_port: Optional[int] = None,
peer_host: Optional[str] = None,
throttling: Optional[int] = None,
):
"""Initialize SniTun Server."""
self._peers: PeerManager = PeerManager(fernet_keys, throttling=throttling)
self._list_sni: SNIProxy = SNIProxy(self._peers, host=sni_host, port=sni_port)
self._list_peer: PeerListener = PeerListener(
self._peers, host=peer_host, port=peer_port
)
@property
def peers(self) -> PeerManager:
"""Return peer manager."""
return self._peers
def start(self) -> Awaitable[None]:
"""Run server.
Return coroutine.
"""
return asyncio.wait([self._list_peer.start(), self._list_sni.start()])
def stop(self) -> Awaitable[None]:
"""Stop server.
Return coroutine.
"""
return asyncio.wait([self._list_peer.stop(), self._list_sni.stop()])
class SniTunServerSingle:
"""SniTunServer helper class for Single port Asnycio."""
def __init__(
self,
fernet_keys: List[str],
host: Optional[str] = None,
port: Optional[int] = None,
throttling: Optional[int] = None,
):
"""Initialize SniTun Server."""
self._loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
self._server: Optional[asyncio.AbstractServer] = None
self._peers: PeerManager = PeerManager(fernet_keys, throttling=throttling)
self._list_sni: SNIProxy = SNIProxy(self._peers)
self._list_peer: PeerListener = PeerListener(self._peers)
self._host: str = host or "0.0.0.0"
self._port: int = port or 443
@property
def peers(self) -> PeerManager:
"""Return peer manager."""
return self._peers
async def start(self) -> None:
"""Run server."""
self._server = await asyncio.start_server(
self._handler, host=self._host, port=self._port
)
async def stop(self) -> None:
"""Stop server."""
self._server.close()
await self._server.wait_closed()
async def _handler(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
"""Handle incoming connection."""
try:
async with async_timeout.timeout(10):
data = await reader.read(2048)
except asyncio.TimeoutError:
_LOGGER.warning("Abort connection initializing")
writer.close()
return
except OSError:
return
# Connection closed / healty check
if not data:
writer.close()
return
# Select the correct handler for process data
if data[0] == 0x16:
self._loop.create_task(
self._list_sni.handle_connection(reader, writer, data=data)
)
elif data.startswith(b"gA"):
self._loop.create_task(
self._list_peer.handle_connection(reader, writer, data=data)
)
else:
_LOGGER.warning("No valid ClientHello found: %s", data)
writer.close()
return
class SniTunServerWorker(Thread):
"""SniTunServer helper class for Worker."""
def __init__(
self,
fernet_keys: List[str],
host: Optional[str] = None,
port: Optional[int] = None,
worker_size: Optional[int] = None,
throttling: Optional[int] = None,
):
"""Initialize SniTun Server."""
super().__init__()
self._host: str = host or "0.0.0.0"
self._port: int = port or 443
self._fernet_keys: List[str] = fernet_keys
self._throttling: Optional[int] = throttling
self._worker_size: int = worker_size or (cpu_count() * 2)
self._workers: List[ServerWorker] = []
self._running: bool = False
# TCP server
self._server: Optional[socket.socket] = None
self._poller: Optional[select.epoll] = None
@property
def peer_counter(self) -> int:
"""Return number of active peer connections."""
return sum(worker.peer_size for worker in self._workers)
def start(self) -> None:
"""Run server."""
# Init first all worker, we don't want the epoll on the childs
_LOGGER.info("Run SniTun with %d worker", self._worker_size)
for _ in range(self._worker_size):
worker = ServerWorker(self._fernet_keys, throttling=self._throttling)
worker.start()
self._workers.append(worker)
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.bind((self._host, self._port))
self._server.setblocking(False)
self._server.listen(80 * 1000)
self._running = True
self._poller = select.epoll()
self._poller.register(self._server.fileno(), select.EPOLLIN)
super().start()
def stop(self) -> None:
"""Stop server."""
self._running = False
self.join()
# Shutdown all workers
for worker in self._workers:
worker.shutdown()
worker.close()
self._workers.clear()
self._server.close()
self._poller.close()
def run(self) -> None:
"""Handle incoming connection."""
fd_server = self._server.fileno()
connections: Dict[int, socket.socket] = {}
worker_lb = cycle(self._workers)
stale: Dict[int, int] = {}
while self._running:
events = self._poller.poll(1)
for fileno, event in events:
# New Connection
if fileno == fd_server:
con, _ = self._server.accept()
con.setblocking(False)
self._poller.register(
con.fileno(), select.EPOLLIN | select.EPOLLHUP | select.EPOLLERR
)
connections[con.fileno()] = con
stale[con.fileno()] = 0
# Read hello & forward to worker
elif event & select.EPOLLIN:
self._poller.unregister(fileno)
con = connections.pop(fileno)
self._process(con, worker_lb)
# Close
else:
self._poller.unregister(fileno)
con = connections.pop(fileno)
con.close()
# cleanup stale connection
for fileno in tuple(stale):
if fileno not in connections:
stale.pop(fileno)
elif stale[fileno] >= WORKER_STALE_MAX:
self._poller.unregister(fileno)
con = connections.pop(fileno)
con.close()
else:
stale[fileno] += 1
def _process(self, con: socket.socket, workers_lb: Iterable[ServerWorker]) -> None:
"""Process connection & helo."""
data = b""
try:
data = con.recv(2048)
except OSError as err:
_LOGGER.warning("Receive fails: %s", err)
con.close()
return
# No data received
if not data:
with suppress(OSError):
con.shutdown(socket.SHUT_RDWR)
return
# Peer connection
if data.startswith(b"gA"):
next(workers_lb).handover_connection(con, data)
_LOGGER.debug("Handover new peer connection: %s", data)
return
# TLS/SSL connection
if data[0] != 0x16:
_LOGGER.warning("No valid ClientHello found: %s", data)
with suppress(OSError):
con.shutdown(socket.SHUT_RDWR)
return
try:
hostname = parse_tls_sni(data)
except ParseSNIError:
_LOGGER.warning("Receive invalid ClientHello on public Interface")
else:
for worker in self._workers:
if not worker.is_responsible_peer(hostname):
continue
worker.handover_connection(con, data, sni=hostname)
_LOGGER.info("Handover %s to %s", hostname, worker.name)
return
_LOGGER.warning("No responsible worker for %s", hostname)
with suppress(OSError):
con.shutdown(socket.SHUT_RDWR)
|
import torch
import torch.nn as nn
from torch.distributions import Normal, kl_divergence
from .utils import ModuleTraits
class Baseline(nn.Sequential):
def __init__(self, dim_in=784, dim_out=784):
super().__init__(
nn.Linear(dim_in, 500),
nn.GELU(),
nn.Linear(500, 500),
nn.GELU(),
nn.Linear(500, dim_out),
)
class EncoderTraits(ModuleTraits):
@property
def normal(self) -> Normal:
return self._saved_for_later["normal"]
class Encoder(EncoderTraits, Baseline):
def __init__(self):
super().__init__(dim_out=400)
def forward(self, x, y_=None):
if y_ is not None:
x = x.clone()
x[x == -1] = y_[x == -1]
μ, logσ = super().forward(x).chunk(2, dim=-1)
σ = logσ.exp()
z = μ + σ * torch.randn_like(μ)
self.save_for_later(normal=Normal(μ, σ))
return z
class DumbEncoder(EncoderTraits, nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, _):
μ = torch.zeros(len(x), 200, device=x.device)
normal = Normal(μ, torch.ones_like(μ))
self.save_for_later(normal=normal)
return normal.sample()
class Decoder(Baseline):
def __init__(self, conditioned=False):
super().__init__(dim_in=984 if conditioned else 200)
if __name__ == "__main__":
x = torch.randn(3, 28, 28)
y = torch.randn(3, 28, 28)
baseline = Baseline()
prior = Encoder()
encoder = Encoder()
decoder = Decoder()
x = x.flatten(1)
y = y.flatten(1)
y_ = baseline(x)
z_prior = prior(x, y_)
z_posterior = encoder(x, y)
kl = kl_divergence(encoder.normal, prior.normal)
x = decoder(z_prior)
x = x.view(3, 28, 28)
print(x.shape)
|
from django.db import models
from django.utils.safestring import mark_safe
class BaseModel(models.Model):
model_name = None
table_fields = []
supported_views = ["detailed", "list", "gallery"]
extra_context = {}
def admin_url(self):
name = self.model_name.replace("-", "_").lower()
return "/admin/museum_site/{}/{}/change/".format(name, self.id)
def url(self):
raise NotImplementedError('Subclasses must implement "url" method.')
def preview_url(self):
raise NotImplementedError(
'Subclasses must implement "preview_url" method.'
)
def scrub(self):
raise NotImplementedError(
'Subclasses must implement "scrub" method.'
)
def _init_icons(self):
# Stub
self._minor_icons = []
self._major_icons = []
def get_all_icons(self):
# Returns combined list of both major and minor icons, populating if needed
if not hasattr(self, "_major_icons"):
self._init_icons()
return self._major_icons + self._minor_icons
def get_major_icons(self):
# Returns list of major icons, populating if needed
if not hasattr(self, "_major_icons"):
self._init_icons()
return self._major_icons
def initial_context(self, *args, **kwargs):
context = {
"pk": self.pk,
"model": self.model_name,
"preview": {"url": self.preview_url(), "alt": self.preview_url()},
"url": self.url(),
"icons": self.get_all_icons(),
"major_icons": self.get_major_icons(),
"roles": [],
"debug": False,
"request": None,
"extras": [],
}
request = kwargs.get("request")
context["request"] = request
# Debug mode
if request and request.session.get("DEBUG"):
context["debug"] = True
if hasattr(self, "extra_context"):
context.update(self.extra_context)
return context
def ssv(self, field_name, field_attr="title"):
# Get a string of slash separated values for a many-to-many field
ssv = ""
if hasattr(self, field_name):
entries = list(
getattr(self, field_name).all().values_list(
field_attr, flat=True
)
)
ssv= "/".join(entries)
return ssv
class Meta:
abstract = True
|
from matplotlib import pyplot as plt
from jax import random
from scipy.spatial.transform import Rotation as R
from optimism.JaxConfig import *
from optimism import EquationSolver as EqSolver
from optimism import Objective
from optimism.test import TestFixture
from optimism.test.MeshFixture import MeshFixture
from optimism.phasefield import PhaseFieldLorentzPlastic as Model
from optimism import SparseMatrixAssembler
from optimism import TensorMath
from optimism import Mesh
plotting=False
class GradOfPlasticPhaseFieldModelFixture(TestFixture.TestFixture):
def setUp(self):
self.E = 100.0
self.nu = 0.321
self.Gc = 40.0
self.psiC = 0.5*self.E
self.l = 1.0
self.Y0 = 0.3*self.E
self.H = 1.0e-2*self.E
props = {'elastic modulus': self.E,
'poisson ratio': self.nu,
'critical energy release rate': self.Gc,
'critical strain energy density': self.psiC,
'regularization length': self.l,
'yield strength': self.Y0,
'hardening model': 'linear',
'hardening modulus': self.H,
'kinematics': 'large deformations'}
self.model = Model.create_material_model_functions(props)
self.flux_func = grad(self.model.compute_energy_density, (0,1,2))
self.internalVariables = self.model.compute_initial_state()
def test_zero_point(self):
dispGrad = np.zeros((3,3))
phase = 0.
phaseGrad = np.zeros(3)
energy = self.model.compute_energy_density(dispGrad, phase, phaseGrad, self.internalVariables)
self.assertNear(energy, 0.0, 12)
stress, phaseForce, phaseGradForce = self.flux_func(dispGrad, phase, phaseGrad, self.internalVariables)
self.assertArrayNear(stress, np.zeros((3,3)), 12)
self.assertNear(phaseForce, 3.0/8.0*self.Gc/self.l, 12)
self.assertArrayNear(phaseGradForce, np.zeros(3), 12)
def test_rotation_invariance(self):
key = random.PRNGKey(0)
dispGrad = random.uniform(key, (3,3))
key, subkey = random.split(key)
phase = random.uniform(subkey)
key,subkey = random.split(key)
phaseGrad = random.uniform(subkey, (3,))
energy = self.model.compute_energy_density(dispGrad, phase, phaseGrad, self.internalVariables)
Q = R.random(random_state=1234).as_matrix()
dispGradStar = Q@(dispGrad + np.identity(3)) - np.identity(3)
phaseStar = phase
phaseGradStar = Q@phaseGrad
internalVariablesStar = self.internalVariables
energyStar = self.model.compute_energy_density(dispGradStar, phaseStar, phaseGradStar, internalVariablesStar)
self.assertNear(energy, energyStar, 12)
def test_elastic_energy(self):
strainBelowYield = 0.5*self.Y0/self.E # engineering strain
dispGrad = np.diag(np.exp(strainBelowYield*np.array([1.0, -self.nu, -self.nu])))-np.identity(3)
phase = 0.0
phaseGrad = np.zeros(3)
energy = self.model.compute_energy_density(dispGrad, phase, phaseGrad, self.internalVariables)
energyExact = 0.5*self.E*strainBelowYield**2
self.assertNear(energy, energyExact, 12)
piolaStress,_,_ = self.flux_func(dispGrad, phase, phaseGrad, self.internalVariables)
mandelStress = piolaStress@(dispGrad + np.identity(3)).T
stressExact = ops.index_update(np.zeros((3,3)),
ops.index[0,0],
self.E*strainBelowYield)
self.assertArrayNear(mandelStress, stressExact, 12)
def test_plastic_stress(self):
strain11 = 1.1*self.Y0/self.E
eqps = (self.E*strain11 - self.Y0)/(self.H + self.E)
elasticStrain11 = strain11 - eqps
lateralStrain = -self.nu*elasticStrain11 - 0.5*eqps
strains = np.array([strain11, lateralStrain, lateralStrain])
dispGrad = np.diag(np.exp(strains)) - np.identity(3)
phase = 0.0
phaseGrad = np.zeros(3)
energyExact = 0.5*self.E*elasticStrain11**2 + self.Y0*eqps + 0.5*self.H*eqps**2
energy = self.model.compute_energy_density(dispGrad, phase, phaseGrad, self.internalVariables)
self.assertNear(energy, energyExact, 12)
stress,_,_ = self.flux_func(dispGrad, phase, phaseGrad, self.internalVariables)
mandelStress = stress@(dispGrad + np.identity(3)).T
mandelStress11Exact = self.E*(strain11 - eqps)
self.assertNear(mandelStress[0,0], mandelStress11Exact, 12)
if __name__ == '__main__':
TestFixture.unittest.main()
|
from importlib.resources import path
import functools
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self, NN_ARCHITECTURE):
super(Net, self).__init__()
# Define a fully connected layers model with three inputs (frequency, flux density, duty ratio)
# and one output (power loss).
self.layers = nn.Sequential(
nn.Linear(NN_ARCHITECTURE[0], NN_ARCHITECTURE[1]),
nn.ReLU(),
nn.Linear(NN_ARCHITECTURE[1], NN_ARCHITECTURE[2]),
nn.ReLU(),
nn.Linear(NN_ARCHITECTURE[2], NN_ARCHITECTURE[3]),
nn.ReLU(),
nn.Linear(NN_ARCHITECTURE[3], NN_ARCHITECTURE[4])
)
def forward(self, x):
return self.layers(x)
# Returns number of trainable parameters in a network
def count_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
@functools.lru_cache(maxsize=8)
def model(material, waveform, device='cpu'):
with path('magnet.models', f'Model_{material}_{waveform}.sd') as sd_file:
state_dict = torch.load(sd_file)
if waveform == 'Sinusoidal':
NN_ARCHITECTURE = [2, 24, 24, 24, 1]
neural_network = Net(NN_ARCHITECTURE).double().to(device)
elif waveform == 'Trapezoidal':
NN_ARCHITECTURE = [6, 24, 24, 24, 1]
neural_network = Net(NN_ARCHITECTURE).double().to(device)
neural_network.load_state_dict(state_dict, strict=True)
neural_network.eval()
return neural_network
class Net_LSTM(nn.Module):
def __init__(self):
super(Net_LSTM, self).__init__()
self.lstm = nn.LSTM(1, 32, num_layers=1, batch_first=True, bidirectional=False)
self.fc1 = nn.Sequential(
nn.Linear(32, 16),
nn.LeakyReLU(0.2),
nn.Linear(16, 16),
nn.LeakyReLU(0.2),
nn.Linear(16, 15)
)
self.fc2 = nn.Sequential(
nn.Linear(16, 16),
nn.LeakyReLU(0.2),
nn.Linear(16, 16),
nn.LeakyReLU(0.2),
nn.Linear(16, 1)
)
def forward(self, x, freq):
x, _ = self.lstm(x)
x = x[:, -1, :] # Get last output only (many-to-one)
x = self.fc1(x)
y = self.fc2(torch.cat((x,freq),1))
return y
def count_parameters(self):
# Returns number of trainable parameters in a network
return sum(p.numel() for p in self.parameters() if p.requires_grad)
@functools.lru_cache(maxsize=8)
def model_lstm(material, device='cpu'):
with path('magnet.models', f'Model_{material}_LSTM.sd') as sd_file:
device = torch.device('cpu')
state_dict = torch.load(sd_file, map_location=device)
neural_network = Net_LSTM().double().to(device)
neural_network.load_state_dict(state_dict, strict=True)
neural_network.eval()
return neural_network
|
from .Param import VelocityParameter, SlownessParameter, SlothParameter
|
from yoyo import step
transaction(
step(
"""ALTER TABLE transfer_meter ADD COLUMN month VARCHAR;""",
"""ALTER TABLE transfer_meter DROP COLUMN month;"""),
step("""CREATE UNIQUE INDEX transfer_meter_month_idx ON transfer_meter(month);"""))
|
import tensorflow as tf
from base.base_model import BaseModel
from utils.alad_utils import get_getter
import utils.alad_utils as sn
class SENCEBGAN(BaseModel):
def __init__(self, config):
super(SENCEBGAN, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
############################################################################################
# INIT
############################################################################################
# Kernel initialization for the convolutions
if self.config.trainer.init_type == "normal":
self.init_kernel = tf.random_normal_initializer(mean=0.0, stddev=0.02)
elif self.config.trainer.init_type == "xavier":
self.init_kernel = tf.contrib.layers.xavier_initializer(
uniform=False, seed=None, dtype=tf.float32
)
# Placeholders
self.is_training_gen = tf.placeholder(tf.bool)
self.is_training_dis = tf.placeholder(tf.bool)
self.is_training_enc_g = tf.placeholder(tf.bool)
self.is_training_enc_r = tf.placeholder(tf.bool)
self.feature_match1 = tf.placeholder(tf.float32)
self.feature_match2 = tf.placeholder(tf.float32)
self.image_input = tf.placeholder(
tf.float32, shape=[None] + self.config.trainer.image_dims, name="x"
)
self.noise_tensor = tf.placeholder(
tf.float32, shape=[None, self.config.trainer.noise_dim], name="noise"
)
############################################################################################
# MODEL
############################################################################################
self.logger.info("Building training graph...")
with tf.variable_scope("SENCEBGAN"):
# First training part
# G(z) ==> x'
with tf.variable_scope("Generator_Model"):
self.image_gen = self.generator(self.noise_tensor)
# Discriminator outputs
with tf.variable_scope("Discriminator_Model"):
self.embedding_real, self.decoded_real = self.discriminator(
self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm
)
self.embedding_fake, self.decoded_fake = self.discriminator(
self.image_gen, do_spectral_norm=self.config.trainer.do_spectral_norm
)
# Second training part
# E(x) ==> z'
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded = self.encoder_g(self.image_input)
# G(z') ==> G(E(x)) ==> x''
with tf.variable_scope("Generator_Model"):
self.image_gen_enc = self.generator(self.image_encoded)
# Discriminator outputs
with tf.variable_scope("Discriminator_Model"):
self.embedding_enc_fake, self.decoded_enc_fake = self.discriminator(
self.image_gen_enc, do_spectral_norm=self.config.trainer.do_spectral_norm
)
self.embedding_enc_real, self.decoded_enc_real = self.discriminator(
self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm
)
with tf.variable_scope("Discriminator_Model_XX"):
self.im_logit_real, self.im_f_real = self.discriminator_xx(
self.image_input,
self.image_input,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.im_logit_fake, self.im_f_fake = self.discriminator_xx(
self.image_input,
self.image_gen_enc,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Third training part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_r = self.encoder_g(self.image_input)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_r = self.generator(self.image_encoded_r)
with tf.variable_scope("Encoder_R_Model"):
self.image_ege = self.encoder_r(self.image_gen_enc_r)
with tf.variable_scope("Discriminator_Model_ZZ"):
self.z_logit_real, self.z_f_real = self.discriminator_zz(
self.image_encoded_r,
self.image_encoded_r,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.z_logit_fake, self.z_f_fake = self.discriminator_zz(
self.image_encoded_r,
self.image_ege,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
############################################################################################
# LOSS FUNCTIONS
############################################################################################
with tf.name_scope("Loss_Functions"):
with tf.name_scope("Generator_Discriminator"):
# Discriminator Loss
if self.config.trainer.mse_mode == "norm":
self.disc_loss_real = tf.reduce_mean(
self.mse_loss(
self.decoded_real,
self.image_input,
mode="norm",
order=self.config.trainer.order,
)
)
self.disc_loss_fake = tf.reduce_mean(
self.mse_loss(
self.decoded_fake,
self.image_gen,
mode="norm",
order=self.config.trainer.order,
)
)
elif self.config.trainer.mse_mode == "mse":
self.disc_loss_real = self.mse_loss(
self.decoded_real,
self.image_input,
mode="mse",
order=self.config.trainer.order,
)
self.disc_loss_fake = self.mse_loss(
self.decoded_fake,
self.image_gen,
mode="mse",
order=self.config.trainer.order,
)
self.loss_discriminator = (
tf.math.maximum(self.config.trainer.disc_margin - self.disc_loss_fake, 0)
+ self.disc_loss_real
)
# Generator Loss
pt_loss = 0
if self.config.trainer.pullaway:
pt_loss = self.pullaway_loss(self.embedding_fake)
self.loss_generator = self.disc_loss_fake + self.config.trainer.pt_weight * pt_loss
# New addition to enforce visual similarity
delta_noise = self.embedding_real - self.embedding_fake
delta_flat = tf.layers.Flatten()(delta_noise)
loss_noise_gen = tf.reduce_mean(tf.norm(delta_flat, ord=2, axis=1, keepdims=False))
self.loss_generator += 0.1 * loss_noise_gen
with tf.name_scope("Encoder_G"):
if self.config.trainer.mse_mode == "norm":
self.loss_enc_rec = tf.reduce_mean(
self.mse_loss(
self.image_gen_enc,
self.image_input,
mode="norm",
order=self.config.trainer.order,
)
)
self.loss_enc_f = tf.reduce_mean(
self.mse_loss(
self.decoded_enc_real,
self.decoded_enc_fake,
mode="norm",
order=self.config.trainer.order,
)
)
elif self.config.trainer.mse_mode == "mse":
self.loss_enc_rec = tf.reduce_mean(
self.mse_loss(
self.image_gen_enc,
self.image_input,
mode="mse",
order=self.config.trainer.order,
)
)
self.loss_enc_f = tf.reduce_mean(
self.mse_loss(
self.embedding_enc_real,
self.embedding_enc_fake,
mode="mse",
order=self.config.trainer.order,
)
)
self.loss_encoder_g = (
self.loss_enc_rec + self.config.trainer.encoder_f_factor * self.loss_enc_f
)
if self.config.trainer.enable_disc_xx:
self.enc_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_real, labels=tf.zeros_like(self.im_logit_real)
)
self.enc_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_fake, labels=tf.ones_like(self.im_logit_fake)
)
self.enc_loss_xx = tf.reduce_mean(self.enc_xx_real + self.enc_xx_fake)
self.loss_encoder_g += self.enc_loss_xx
with tf.name_scope("Encoder_R"):
if self.config.trainer.mse_mode == "norm":
self.loss_encoder_r = tf.reduce_mean(
self.mse_loss(
self.image_ege,
self.image_encoded_r,
mode="norm",
order=self.config.trainer.order,
)
)
elif self.config.trainer.mse_mode == "mse":
self.loss_encoder_r = tf.reduce_mean(
self.mse_loss(
self.image_ege,
self.image_encoded_r,
mode="mse",
order=self.config.trainer.order,
)
)
if self.config.trainer.enable_disc_zz:
self.enc_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_real, labels=tf.zeros_like(self.z_logit_real)
)
self.enc_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_fake, labels=tf.ones_like(self.z_logit_fake)
)
self.enc_loss_zz = tf.reduce_mean(self.enc_zz_real + self.enc_zz_fake)
self.loss_encoder_r += self.enc_loss_zz
if self.config.trainer.enable_disc_xx:
with tf.name_scope("Discriminator_XX"):
self.loss_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_real, labels=tf.ones_like(self.im_logit_real)
)
self.loss_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_fake, labels=tf.zeros_like(self.im_logit_fake)
)
self.dis_loss_xx = tf.reduce_mean(self.loss_xx_real + self.loss_xx_fake)
if self.config.trainer.enable_disc_zz:
with tf.name_scope("Discriminator_ZZ"):
self.loss_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_real, labels=tf.ones_like(self.z_logit_real)
)
self.loss_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_fake, labels=tf.zeros_like(self.z_logit_fake)
)
self.dis_loss_zz = tf.reduce_mean(self.loss_zz_real + self.loss_zz_fake)
############################################################################################
# OPTIMIZERS
############################################################################################
with tf.name_scope("Optimizers"):
self.generator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_gen,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.encoder_g_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_enc,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.encoder_r_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_enc,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.discriminator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_dis,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
# Collect all the variables
all_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# Generator Network Variables
self.generator_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Generator_Model")
]
# Discriminator Network Variables
self.discriminator_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Discriminator_Model")
]
# Discriminator Network Variables
self.encoder_g_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Encoder_G_Model")
]
self.encoder_r_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Encoder_R_Model")
]
self.dxxvars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Discriminator_Model_XX")
]
self.dzzvars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Discriminator_Model_ZZ")
]
# Generator Network Operations
self.gen_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Generator_Model"
)
# Discriminator Network Operations
self.disc_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Discriminator_Model"
)
self.encg_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Encoder_G_Model"
)
self.encr_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Encoder_R_Model"
)
self.update_ops_dis_xx = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Discriminator_Model_XX"
)
self.update_ops_dis_zz = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Discriminator_Model_ZZ"
)
with tf.control_dependencies(self.gen_update_ops):
self.gen_op = self.generator_optimizer.minimize(
self.loss_generator,
var_list=self.generator_vars,
global_step=self.global_step_tensor,
)
with tf.control_dependencies(self.disc_update_ops):
self.disc_op = self.discriminator_optimizer.minimize(
self.loss_discriminator, var_list=self.discriminator_vars
)
with tf.control_dependencies(self.encg_update_ops):
self.encg_op = self.encoder_g_optimizer.minimize(
self.loss_encoder_g,
var_list=self.encoder_g_vars,
global_step=self.global_step_tensor,
)
with tf.control_dependencies(self.encr_update_ops):
self.encr_op = self.encoder_r_optimizer.minimize(
self.loss_encoder_r,
var_list=self.encoder_r_vars,
global_step=self.global_step_tensor,
)
if self.config.trainer.enable_disc_xx:
with tf.control_dependencies(self.update_ops_dis_xx):
self.disc_op_xx = self.discriminator_optimizer.minimize(
self.dis_loss_xx, var_list=self.dxxvars
)
if self.config.trainer.enable_disc_zz:
with tf.control_dependencies(self.update_ops_dis_zz):
self.disc_op_zz = self.discriminator_optimizer.minimize(
self.dis_loss_zz, var_list=self.dzzvars
)
# Exponential Moving Average for Estimation
self.dis_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_dis = self.dis_ema.apply(self.discriminator_vars)
self.gen_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_gen = self.gen_ema.apply(self.generator_vars)
self.encg_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_encg = self.encg_ema.apply(self.encoder_g_vars)
self.encr_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_encr = self.encr_ema.apply(self.encoder_r_vars)
if self.config.trainer.enable_disc_xx:
self.dis_xx_ema = tf.train.ExponentialMovingAverage(
decay=self.config.trainer.ema_decay
)
maintain_averages_op_dis_xx = self.dis_xx_ema.apply(self.dxxvars)
if self.config.trainer.enable_disc_zz:
self.dis_zz_ema = tf.train.ExponentialMovingAverage(
decay=self.config.trainer.ema_decay
)
maintain_averages_op_dis_zz = self.dis_zz_ema.apply(self.dzzvars)
with tf.control_dependencies([self.disc_op]):
self.train_dis_op = tf.group(maintain_averages_op_dis)
with tf.control_dependencies([self.gen_op]):
self.train_gen_op = tf.group(maintain_averages_op_gen)
with tf.control_dependencies([self.encg_op]):
self.train_enc_g_op = tf.group(maintain_averages_op_encg)
with tf.control_dependencies([self.encr_op]):
self.train_enc_r_op = tf.group(maintain_averages_op_encr)
if self.config.trainer.enable_disc_xx:
with tf.control_dependencies([self.disc_op_xx]):
self.train_dis_op_xx = tf.group(maintain_averages_op_dis_xx)
if self.config.trainer.enable_disc_zz:
with tf.control_dependencies([self.disc_op_zz]):
self.train_dis_op_zz = tf.group(maintain_averages_op_dis_zz)
############################################################################################
# TESTING
############################################################################################
self.logger.info("Building Testing Graph...")
with tf.variable_scope("SENCEBGAN"):
with tf.variable_scope("Discriminator_Model"):
self.embedding_q_ema, self.decoded_q_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
with tf.variable_scope("Generator_Model"):
self.image_gen_ema = self.generator(
self.embedding_q_ema, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
self.embedding_rec_ema, self.decoded_rec_ema = self.discriminator(
self.image_gen_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Second Training Part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_ema = self.encoder_g(
self.image_input, getter=get_getter(self.encg_ema)
)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_ema = self.generator(
self.image_encoded_ema, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
self.embedding_enc_fake_ema, self.decoded_enc_fake_ema = self.discriminator(
self.image_gen_enc_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.embedding_enc_real_ema, self.decoded_enc_real_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
if self.config.trainer.enable_disc_xx:
with tf.variable_scope("Discriminator_Model_XX"):
self.im_logit_real_ema, self.im_f_real_ema = self.discriminator_xx(
self.image_input,
self.image_input,
getter=get_getter(self.dis_xx_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.im_logit_fake_ema, self.im_f_fake_ema = self.discriminator_xx(
self.image_input,
self.image_gen_enc_ema,
getter=get_getter(self.dis_xx_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Third training part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_r_ema = self.encoder_g(self.image_input)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_r_ema = self.generator(self.image_encoded_r_ema)
with tf.variable_scope("Encoder_R_Model"):
self.image_ege_ema = self.encoder_r(self.image_gen_enc_r_ema)
with tf.variable_scope("Discriminator_Model"):
self.embedding_encr_fake_ema, self.decoded_encr_fake_ema = self.discriminator(
self.image_gen_enc_r_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.embedding_encr_real_ema, self.decoded_encr_real_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
if self.config.trainer.enable_disc_zz:
with tf.variable_scope("Discriminator_Model_ZZ"):
self.z_logit_real_ema, self.z_f_real_ema = self.discriminator_zz(
self.image_encoded_r_ema,
self.image_encoded_r_ema,
getter=get_getter(self.dis_zz_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.z_logit_fake_ema, self.z_f_fake_ema = self.discriminator_zz(
self.image_encoded_r_ema,
self.image_ege_ema,
getter=get_getter(self.dis_zz_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
with tf.name_scope("Testing"):
with tf.name_scope("Image_Based"):
delta = self.image_input - self.image_gen_enc_ema
self.rec_residual = -delta
delta_flat = tf.layers.Flatten()(delta)
img_score_l1 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="img_loss__1"
)
self.img_score_l1 = tf.squeeze(img_score_l1)
delta = self.decoded_enc_fake_ema - self.decoded_enc_real_ema
delta_flat = tf.layers.Flatten()(delta)
img_score_l2 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="img_loss__2"
)
self.img_score_l2 = tf.squeeze(img_score_l2)
with tf.name_scope("Noise_Based"):
delta = self.image_encoded_r_ema - self.image_ege_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_1 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_1"
)
self.final_score_1 = tf.squeeze(final_score_1)
self.score_comb_im = (
1 * self.img_score_l1
+ self.feature_match1 * self.final_score_1
)
delta = self.image_encoded_r_ema - self.embedding_enc_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_2 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_2"
)
self.final_score_2 = tf.squeeze(final_score_2)
delta = self.embedding_encr_real_ema - self.embedding_encr_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_3 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_3"
)
self.final_score_3 = tf.squeeze(final_score_3)
# Combo 1
self.score_comb_z = (
(1 - self.feature_match2) * self.final_score_2
+ self.feature_match2 * self.final_score_3
)
# Combo 2
if self.config.trainer.enable_disc_xx:
delta = self.im_f_real_ema - self.im_f_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_4 = tf.norm(
delta_flat, ord=1, axis=1, keepdims=False, name="final_score_4"
)
self.final_score_4 = tf.squeeze(final_score_4)
delta = self.z_f_real_ema - self.z_f_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_6 = tf.norm(
delta_flat, ord=1, axis=1, keepdims=False, name="final_score_6"
)
self.final_score_6 = tf.squeeze(final_score_6)
############################################################################################
# TENSORBOARD
############################################################################################
if self.config.log.enable_summary:
with tf.name_scope("train_summary"):
with tf.name_scope("dis_summary"):
tf.summary.scalar("loss_disc", self.loss_discriminator, ["dis"])
tf.summary.scalar("loss_disc_real", self.disc_loss_real, ["dis"])
tf.summary.scalar("loss_disc_fake", self.disc_loss_fake, ["dis"])
if self.config.trainer.enable_disc_xx:
tf.summary.scalar("loss_dis_xx", self.dis_loss_xx, ["enc_g"])
if self.config.trainer.enable_disc_zz:
tf.summary.scalar("loss_dis_zz", self.dis_loss_zz, ["enc_r"])
with tf.name_scope("gen_summary"):
tf.summary.scalar("loss_generator", self.loss_generator, ["gen"])
with tf.name_scope("enc_summary"):
tf.summary.scalar("loss_encoder_g", self.loss_encoder_g, ["enc_g"])
tf.summary.scalar("loss_encoder_r", self.loss_encoder_r, ["enc_r"])
with tf.name_scope("img_summary"):
tf.summary.image("input_image", self.image_input, 1, ["img_1"])
tf.summary.image("reconstructed", self.image_gen, 1, ["img_1"])
# From discriminator in part 1
tf.summary.image("decoded_real", self.decoded_real, 1, ["img_1"])
tf.summary.image("decoded_fake", self.decoded_fake, 1, ["img_1"])
# Second Stage of Training
tf.summary.image("input_enc", self.image_input, 1, ["img_2"])
tf.summary.image("reconstructed", self.image_gen_enc, 1, ["img_2"])
# From discriminator in part 2
tf.summary.image("decoded_enc_real", self.decoded_enc_real, 1, ["img_2"])
tf.summary.image("decoded_enc_fake", self.decoded_enc_fake, 1, ["img_2"])
# Testing
tf.summary.image("input_image", self.image_input, 1, ["test"])
tf.summary.image("reconstructed", self.image_gen_enc_r_ema, 1, ["test"])
tf.summary.image("residual", self.rec_residual, 1, ["test"])
self.sum_op_dis = tf.summary.merge_all("dis")
self.sum_op_gen = tf.summary.merge_all("gen")
self.sum_op_enc_g = tf.summary.merge_all("enc_g")
self.sum_op_enc_r = tf.summary.merge_all("enc_r")
self.sum_op_im_1 = tf.summary.merge_all("img_1")
self.sum_op_im_2 = tf.summary.merge_all("img_2")
self.sum_op_im_test = tf.summary.merge_all("test")
self.sum_op = tf.summary.merge([self.sum_op_dis, self.sum_op_gen])
###############################################################################################
# MODULES
###############################################################################################
def generator(self, noise_input, getter=None):
with tf.variable_scope("Generator", custom_getter=getter, reuse=tf.AUTO_REUSE):
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_g = tf.layers.Dense(
units=2 * 2 * 256, kernel_initializer=self.init_kernel, name="fc"
)(noise_input)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
x_g = tf.reshape(x_g, [-1, 2, 2, 256])
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=32,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_5"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.tanh(x_g, name="tanh")
return x_g
def discriminator(self, image_input, getter=None, do_spectral_norm=False):
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator", custom_getter=getter, reuse=tf.AUTO_REUSE):
with tf.variable_scope("Encoder"):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=32,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 14 x 14 x 64
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=64,
kernel_size=5,
padding="same",
strides=2,
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 7 x 7 x 128
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=128,
kernel_size=5,
padding="same",
strides=2,
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 4 x 4 x 256
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = layers.dense(
x_e,
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)
embedding = x_e
with tf.variable_scope("Decoder"):
net = tf.reshape(embedding, [-1, 1, 1, self.config.trainer.noise_dim])
net_name = "layer_1"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=256,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv1",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv1/bn",
)
net = tf.nn.relu(features=net, name="tconv1/relu")
net_name = "layer_2"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv2",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv2/bn",
)
net = tf.nn.relu(features=net, name="tconv2/relu")
net_name = "layer_3"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv3",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv3/bn",
)
net = tf.nn.relu(features=net, name="tconv3/relu")
net_name = "layer_4"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=32,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv4",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv4/bn",
)
net = tf.nn.relu(features=net, name="tconv4/relu")
net_name = "layer_5"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv5",
)(net)
decoded = tf.nn.tanh(net, name="tconv5/tanh")
return embedding, decoded
def encoder_g(self, image_input, getter=None):
with tf.variable_scope("Encoder_G", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=128,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=256,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = tf.layers.Dense(
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)(x_e)
return x_e
def encoder_r(self, image_input, getter=None):
with tf.variable_scope("Encoder_R", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=128,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=256,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = tf.layers.Dense(
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)(x_e)
return x_e
# Regularizer discriminator for the Generator Encoder
def discriminator_xx(self, img_tensor, recreated_img, getter=None, do_spectral_norm=False):
""" Discriminator architecture in tensorflow
Discriminates between (x, x) and (x, rec_x)
Args:
img_tensor:
recreated_img:
getter: for exponential moving average during inference
reuse: sharing variables or not
do_spectral_norm:
"""
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator_xx", reuse=tf.AUTO_REUSE, custom_getter=getter):
net = tf.concat([img_tensor, recreated_img], axis=1)
net_name = "layer_1"
with tf.variable_scope(net_name):
net = layers.conv2d(
net,
filters=64,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv1",
)
net = tf.nn.leaky_relu(
features=net, alpha=self.config.trainer.leakyReLU_alpha, name="conv2/leaky_relu"
)
net = tf.layers.dropout(
net,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_g,
name="dropout",
)
with tf.variable_scope(net_name, reuse=True):
weights = tf.get_variable("conv1/kernel")
net_name = "layer_2"
with tf.variable_scope(net_name):
net = layers.conv2d(
net,
filters=128,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2",
)
net = tf.nn.leaky_relu(
features=net, alpha=self.config.trainer.leakyReLU_alpha, name="conv2/leaky_relu"
)
net = tf.layers.dropout(
net,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_g,
name="dropout",
)
net = tf.layers.Flatten()(net)
intermediate_layer = net
net_name = "layer_3"
with tf.variable_scope(net_name):
net = tf.layers.dense(net, units=1, kernel_initializer=self.init_kernel, name="fc")
logits = tf.squeeze(net)
return logits, intermediate_layer
# Regularizer discriminator for the Reconstruction Encoder
def discriminator_zz(self, noise_tensor, recreated_noise, getter=None, do_spectral_norm=False):
""" Discriminator architecture in tensorflow
Discriminates between (z, z) and (z, rec_z)
Args:
noise_tensor:
recreated_noise:
getter: for exponential moving average during inference
reuse: sharing variables or not
do_spectral_norm:
"""
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator_zz", reuse=tf.AUTO_REUSE, custom_getter=getter):
y = tf.concat([noise_tensor, recreated_noise], axis=-1)
net_name = "y_layer_1"
with tf.variable_scope(net_name):
y = layers.dense(y, units=64, kernel_initializer=self.init_kernel, name="fc")
y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)
y = tf.layers.dropout(
y,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_r,
name="dropout",
)
net_name = "y_layer_2"
with tf.variable_scope(net_name):
y = layers.dense(y, units=32, kernel_initializer=self.init_kernel, name="fc")
y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)
y = tf.layers.dropout(
y,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_r,
name="dropout",
)
intermediate_layer = y
net_name = "y_layer_3"
with tf.variable_scope(net_name):
y = layers.dense(y, units=1, kernel_initializer=self.init_kernel, name="fc")
logits = tf.squeeze(y)
return logits, intermediate_layer
###############################################################################################
# CUSTOM LOSSES
###############################################################################################
def mse_loss(self, pred, data, mode="norm", order=2):
if mode == "norm":
delta = pred - data
delta = tf.layers.Flatten()(delta)
loss_val = tf.norm(delta, ord=order, axis=1, keepdims=False)
elif mode == "mse":
loss_val = tf.reduce_mean(tf.squared_difference(pred, data))
return loss_val
def pullaway_loss(self, embeddings):
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)
batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)
pt_loss = (tf.reduce_sum(similarity) - batch_size) / (batch_size * (batch_size - 1))
return pt_loss
def init_saver(self):
self.saver = tf.train.Saver(max_to_keep=self.config.log.max_to_keep)
|
# Funzioni
import operator
# Moduli
from django.shortcuts import render_to_response
from django.core.context_processors import csrf
from django.template import RequestContext
from django.contrib.auth.models import User
from django.http import HttpResponse
# Database
from ftft.canzoni.models import canzone, licenze
from ftft.gruppi.models import gruppo, influence, famusartist
from ftft.accountsys.models import Playlist, Rate, Topten
from ftft.search.forms import songform
# SEARCH BY TAG -----------------------------------------------------------------------------------------
def search(request):
var = {}
var.update(csrf(request))
# form area
form = songform()
var.update({'form':form })
'''if request.method =='POST':
formset=songform(request.POST)
# searchbytag('nome','mood(id)','licenza(id)','genere','titolo')
selezione = searchbytag(request.POST['gruppo'],request.POST['moood'],request.POST['licenza'],request.POST['genere'],request.POST['titolo'])
var.update({'selezione':selezione})'''
# estrai influenze
influenze = famusartist.objects.order_by('nome')
var.update({'influenze':influenze })
return render_to_response('search.html',var,context_instance=RequestContext(request))
# LINK SEARCH BY TAG
def searchByTag(request):
if request.method =='POST':
#formset=songform(request.POST)
selezione = searchbytag(request.POST['titolo'],request.POST['gruppo'],request.POST['licenza'],request.POST['moood'],request.POST['genere'])
# searchbytag('titolo','gruppo','licenza(id)','mood(id)','genere')
return render_to_response('moduli/searchresult.html',{'selezione':selezione})
# FUNCTION selezione by tag-----------------------------------------------------------------------------------------
def searchbytag(titolo,nomegruppo,licenza,mood,genere):
if not titolo and not gruppo and not licenza and not mood and not genere:
selezionetag = "no found"
else:
result = "no found"
# 0 Apri Array ricerca
# 2 Verifica se sono pieni
if titolo:
titolo= canzone.objects.filter(titolo__icontains=titolo)
# 4 verifica che siano pieni
if titolo:
result = titolo
if nomegruppo:
nomegruppo= canzone.objects.filter(gruppo= gruppo.objects.filter(nome__icontains=nomegruppo))
# 4 verifica che siano pieni
if nomegruppo:
# 5 se pieni inserisci in Array risultato
if result == "no found":
result = nomegruppo
else:
result = [i for i in result if i in nomegruppo]
if licenza:
licenza = canzone.objects.filter(licenza__in = licenze.objects.filter(id=licenza))
# 4 verifica che siano pieni
if licenza:
# 5 se pieni inserisci in Array risultato
if result == "no found":
result = licenza
else:
result = [i for i in result if i in licenza]
if mood:
mood = canzone.objects.filter(moood=mood)
# 4 verifica che siano pieni
if mood:
# 5 se pieni inserisci in Array risultato
if result == "no found":
result = mood
else:
result = [i for i in result if i in mood]
if genere:
genere = canzone.objects.filter(gruppo__in = gruppo.objects.filter(genere__icontains=genere))
# 4 verifica che siano pieni
if genere:
# 5 se pieni inserisci in Array risultato
if result == "no found":
result = genere
else:
result = [i for i in result if i in genere]
return result
# SEARCH BY DRAG & DROP -----------------------------------------------------------------------------------------
# LINK SEARCH BY DRAGDROP
def searchByDragDrop(request):
if request.method =='POST':
# Estrai dati da post, inserisci in lista associativa
nvariabili= int((len(request.POST)-1)/2)
lista = []
for i in range(nvariabili):
i=str(i)
lista.append((request.POST['artista'+i],request.POST['peso'+i]))
# Ordina la lista in base al peso
lista = sorted(lista, key=lambda oggetto: oggetto[1], reverse=True)
listart = [i[0] for i in lista]
# Controlla se artisti presenti in database e inserisci id in lista-classifica
hitlist = []
for artista in listart:
artsel = famusartist.objects.filter(nome__icontains=artista)
if artsel:
for a in artsel:
hitlist.append(a.id)
# Selezione i primi 10
topten = hitlist[:10]
selezioneord = selectfromtopten(topten)
return render_to_response('moduli/searchresult.html',{'selezione':selezioneord})
#WORK IN PROGRESS
# SEARCH BY TOP TEN -----------------------------------------------------------------------------------------
# LINK SEARCH BY TOP TEN
def searchByTopTen(request):
if request.method =='POST':
topten = []
contatore = len(request.POST)-1
for i in range(contatore):
idg=request.POST['relations'+str(i+1)]
grobj = famusartist.objects.filter(id=idg)
topten.append(grobj)
selezioneord = searchByTopOnBands(topten,"on")
canzoni = frombandtosong(selezioneord)
#return HttpResponse(selezioneord)
return render_to_response('moduli/searchresult.html',{'selezione':canzoni})
def selectfromtopten(topten,usr=None):
sbtb = searchByTopOnBands(topten)
if usr:
sbtu = searchByTopOnUser(topten,usr=usr)
else:
sbtu = searchByTopOnUser(topten)
selezione= sbtb
# aggiorna pesi e elimina duplicati
for key,value in sbtu.items():
if key not in selezione:
selezione[key] = value
else:
selezione[key] += value
# converti in tupla e ordina in base al peso
selezionerisp = [(k,v) for k, v in selezione.items()]
#selezionerisp = sorted(selezionerisp, reverse=True)
selezionerisp = sorted(selezionerisp, key=lambda tup: tup[1], reverse=True)
selezionerisp = [i[0] for i in selezionerisp] # estrai la lista gruppi
selezionerisp = frombandtosong(selezionerisp) # estrai canzoni consigliate
return selezionerisp
# FUNCTION From Band List To Song List------------------------------------------------------------------------------
def frombandtosong(listagruppi):
canzoni = [] # Apri lista che conterra risultati
for g in listagruppi:
#listacanz = canzone.objects.filter(gruppo=g)[:1]# decommentare on test
listacanz = canzone.objects.filter(gruppo=g)
if len(listacanz)>1:
listacanz = listacanz.order_by('?')[1]
'''for c in listacanz:
if c not in canzoni:
canzoni.append(c)'''
canzoni.append(listacanz)
else:
canzoni.append(listacanz)
return canzoni
# FUNCTION From User To TopTen----------------------------------------------------------------------------------------
def fromusertotopten(userid):
topten = Topten.objects.filter(user=userid).order_by('posizione')
return topten
# FUNCTION Pearson Correlation-----------------------------------------------------------------------------------------
def pearson(x,y):
n=len(x)
vals=range(n)
#regular sums
sumx=sum([float(x[i]) for i in vals])
sumy=sum([float(y[i]) for i in vals])
#sum of the squares
sumxSq=sum([x[i]**2.0 for i in vals])
sumySq=sum([y[i]**2.0 for i in vals])
#sum of the products
pSum=sum([x[i]*y[i] for i in vals])
#do pearson score
num=pSum-(sumx*sumy/n)
den = pow((sumxSq - pow(sumx, 2) / n) * (sumySq - pow(sumy, 2) / n), 0.5)
if den==0:
r = 1
else:
r=num/den
return r
# Modifica return HttpResponse(list)#Controllo
# search by TOP-TEN ON BANDS-------------------------------------------------------------------------------------------------
# INPUT:1-10 SONGS LIST -> OUTPUT:BANDS dict----------------------------------------------------------------------------------
def searchByTopOnBands(topten,case=None):
startlist=topten
x=10 #contatore pesi
if case=="on":
# RICERCA Per Pagina Ricerca
gruppimod=dict() #dizionario dei gruppi e pesi
for infl in startlist:#Per ogni influenza prendi i gruppi correlati
gruppi = gruppo.objects.filter(influenze=infl)
for g in gruppi: # Per ogni gruppo inserisci o calcola peso
pesogruppo = influence.objects.get(artist=infl,gruppo=g)
pesogruppo = 10
if g in gruppimod:
gruppimod[g] += x * pesogruppo
else:
gruppimod[g] = x * pesogruppo
x = x-1
else:
# RICERCA Per Profilo
gruppimod=dict() #dizionario dei gruppi e pesi
for infl in startlist:#Per ogni influenza prendi i gruppi correlati
try:
gruppi = gruppo.objects.filter(influenze=infl.artist)
except:
gruppi = gruppo.objects.filter(influenze=infl)
for g in gruppi: # Per ogni gruppo inserisci o calcola peso
try:
pesogruppo = influence.objects.get(artist=infl.artist,gruppo=g)
except:
pesogruppo = influence.objects.get(artist=infl,gruppo=g)
pesogruppo = 11-pesogruppo.posizione
if g in gruppimod:
gruppimod[g] += x * pesogruppo
else:
gruppimod[g] = x * pesogruppo
x = x-1
return gruppimod
# Modifica
# SEARCH BY TOP-TEN ON USER--------------------------------------------------------------------------------------------
# INPUT:1-10 SONGS LIST -> OUTPUT:BANDS dict----------------------------------------------------------------------------------
def searchByTopOnUser(topten,usr=None):
utentimod = dict() # dizionario degli utenti e pesi
x = 10 # contatore pesi
for famusart in topten:
try:
campitop = Topten.objects.filter(artist=famusart.artist)
except:
campitop = Topten.objects.filter(artist=famusart)
for campo in campitop:
peso = 11 - campo.posizione
if campo.user in utentimod:
utentimod[campo.user] += x * peso
else:
if campo.user != usr:
utentimod[campo.user] = x * peso
x = x-1
#Estrapola 1risultato migliore, 2peggiore 3calcola media
if len(utentimod) > 0 :
mass_peso_ut = max([utentimod[i] for i in utentimod])
minm_peso_ut = min([utentimod[i] for i in utentimod])
med_peso = (mass_peso_ut + minm_peso_ut)/5
else :
mass_peso_ut=minm_peso_ut=med_peso=0
#Verifica se utente loggato per eliminare risultati dopp
# Ordina Risultati e inserisci in nuova lista i nomi utenti test
# resultlist = list(sorted(utentimod, key=utentimod.__getitem__, reverse=True))
# FINE ESTRAZIONE CLASSIFICA USER
# Estrapola gruppi candidabili e inseriscigli/somma il peso utente
Diz_gruppi_peso = dict() # Apri lista che conterra risultati
for ut, peso in utentimod.items():
if peso > med_peso:
listagruppi = gruppo.objects.filter(rate=Rate.objects.filter(user=ut).filter(valutazione__gt=3))# Filtro valutazioni 3 / Togliere filtro 5 fuori dal test
for c in listagruppi:
if c in Diz_gruppi_peso:
Diz_gruppi_peso[c] += peso
else:
Diz_gruppi_peso[c] = peso
#Recupero peso massimo
if len(Diz_gruppi_peso)>0:
mass_peso_grup = max([Diz_gruppi_peso[i] for i in Diz_gruppi_peso])
else:
mass_peso_grup = 0
#Normalizza sulla base del massimo peso-utente
Diz_gruppi_peso = dict((grupp, int((peso*mass_peso_ut)/mass_peso_grup)) for (grupp, peso) in Diz_gruppi_peso.items())
'''for grupp, peso in Diz_gruppi_peso.items():
peso = (peso*mass_peso_ut)/mass_peso_grup'''
return Diz_gruppi_peso
# EXTRACTOR PLAYLIST-------------------------------------------------------------------------------------------------
# INPUT:USER -> OUTPUT:PLAYLIST-----------------------------------------------------------------------------------------
def playlistextractor(iduser):
# Prendi Utente
user=User.objects.get(id=iduser)
playlist=canzone.objects.filter(playlist=Playlist.objects.filter(user=user))
return (playlist)
def fromsongtoband(idsong):
song = canzone.objects.get(id=idsong)
gruppi = []
for art in song.gruppo.all():
gruppi.append(art.id)
return gruppi
# SEARCH BY PLAYLIST-------------------------------------------------------------------------------------------------
# INPUT:USER -> OUTPUT:SONGS-----------------------------------------------------------------------------------------
'''def searchbyplaylist(iduser):
# Prendi Utente
user=User.objects.get(id=iduser)
playlist = canzone.objects.filter(playlist__user=iduser)
insplaylist = set(playlist)
# Seleziona utenti con almeno una canzone in comune
usertest = User.objects.filter(playlist__canzone__in=playlist)# (canzone__in=playlist)?
# Elimina doppi e user attivo
usertest = list(set(usertest))
if user in usertest:
usertest.remove(user)
# Inizializza dizionario play list
songmod = dict()
# Confronta insieme canzoni
for test in usertest:
playtest=canzone.objects.filter(playlist=Playlist.objects.filter(user=test))
# Converti in Insieme
insplaytest = set(playtest)
intersezione = insplaylist & insplaytest
peso = len(intersezione)
if peso == 1:
peso =1.5
differenza = insplaytest - insplaylist
for song in differenza:
if song in songmod:
songmod[song] += songmod[song] * peso
else:
songmod[song] = peso
resultlist = list(sorted(songmod, key=songmod.__getitem__, reverse=True))
# PER IL TEST COMMENTA LE SEGUENTI TRE LINEE e DECOMMENTA ULTIMA
#resultlist = resultlist[:30]
#shuffle(resultlist)
#resultlist = resultlist[:10]
resultlist = resultlist[:5]
return (resultlist)
'''
def searchbyplaylist(iduser):
#inizializzazione dati user
#user_id=request.user.id
user_id=iduser
user_pl_id= []
user_pl_art = []
#inizializzazione user simili/matrici playlist
usertest_id = []
usertest_pl_id= []
usertest_pl_art = []
#inizializzazione artisti di cui calcolare peso similarita
art_sel = []
#estrazione playlist user
user_pl=playlistextractor(user_id)
#estrazione artisti
for song in user_pl:
user_pl_id.append(song.id)
art = fromsongtoband(song.id)
for a in art:
if a not in user_pl_art:
user_pl_art.append(a)
#estrazione utenti con artisti in comune in playlist
usertest = Playlist.objects.filter(canzone__in=user_pl)
for us in usertest:
if us.user.id not in usertest_id and us.user.id != user_id:
usertest_id.append(us.user.id)
#estrazioni playlist
for u in usertest_id:
#inizializzazione riga
riga_pl= []
riga_art=[]
#estrazione playlist user
pl=playlistextractor(u)
#estrazione artisti
for s in pl:
riga_pl.append(s.id)
art = fromsongtoband(s.id)
for a in art:
riga_art.append(a)
usertest_pl_id.append(riga_pl)
usertest_pl_art.append(riga_art)
#selezione artisti per calcolo similarità
for a in usertest_pl_art:
for x in a:
if x not in art_sel:
art_sel.append([x])
#inserici user_pl in matrice pl
usertest_pl_art.append(user_pl_art)
#costruisci matrice 1 e vettore1
mat1=[]
for pl in usertest_pl_art:
riga = []
for s in user_pl_art:
if s in pl:
riga.append(1)
else:
riga.append(0)
mat1.append(riga)
vec_mat1=[]
for i in range(len(mat1)):
for j in range(len(mat1[0])):
if i == 0:
vec_mat1.append(mat1[i][j])
else:
vec_mat1[j]=vec_mat1[j]+mat1[i][j]
#calcola peso per ogni artista
for art in art_sel:
artist = art[0]
#vettore presenze MatTest
vec_art = []
for pl in usertest_pl_art:
if artist in pl:
vec_art.append(1)
else:
vec_art.append(0)
#somma vettore
ris_test = 0
for n in vec_art:
ris_test = ris_test+n
#matrice 2
mat2=[]
for j in range(len(vec_art)):
riga = []
for i in range(len(mat1[0])):
x=mat1[j][i]*vec_art[j]
riga.append(x)
mat2.append(riga)
#calcola somma colonne
vec_mat2=[]
for i in range(len(mat2)):
for j in range(len(mat2[0])):
if i == 0:
vec_mat2.append(mat1[i][j])
else:
vec_mat2[j]=vec_mat2[j]+mat2[i][j]
simset = 0
for i in range(len(vec_mat1)):
sim = float( vec_mat2[i]/ float( (ris_test*vec_mat1[i])**0.5 ) )
simset = simset + sim
#appendi peso al relativo gruppo
indice = art_sel.index(art)
art_sel[indice].append(simset)
#-----------Valutazione canzone
#seleziona canzoni da valutare
canz_sel=[]
for canz in usertest_pl_id:
for c in canz:
if c not in canz_sel and c not in user_pl_id:
canz_sel.append(c)
lista_ris=[]
for canz in canz_sel:
#conta
c_song = 0
for pl in usertest_pl_id:
if canz in pl:
c_song += 1
#recupera gruppo
art = fromsongtoband(canz)
#recupera pesi e calcola media
pesi = []
for a in art:
for ris in art_sel:
if ris[0] == a:
peso = ris[1]
pesi.append(peso)
cont = 0
for p in pesi:
cont = cont + p
peso_def = cont/len(pesi)
#calcola peso c_song * peso
peso_canz= c_song * peso_def
#appendi in lista
lista_ris.append([canz,peso_canz])
lista_ris = sorted(lista_ris,key=lambda x: x[1],reverse=True)
#seleziona i primi 5
lista_ris = lista_ris[:5]
#recupera oggetto canzoni
resultlist = []
for c in lista_ris:
canz = canzone.objects.get(id=c[0])
resultlist.append(canz)
#return HttpResponse(resultlist)
return (resultlist)
# SEARCH BY RATE-------------------------------------------------------------------------------------------------
# INPUT:USER -> PLAYLIST -> PEARSON/USER -> OUTPUT: BANDS dict------------------------------------------------------------
def searchByRate(userid):
user = User.objects.get(id=userid)
# 1) Recupera valutazioni espresse dall'utente
votiuser= Rate.objects.filter(user=user, valutazione__gte=3).reverse()
'''if votiuser.count() > 5:
votiuser= list(votiuser[:5])'''# 5 Restrizione per TEST
banduser = dict()
# Allestisci dizionario user gruppo,voto
for v in votiuser:
banduser[v.gruppo] = v.valutazione
# 2) Recupera utenti con valutazione gruppi in comune(escludi utente base)
usertest=User.objects.filter(rate=Rate.objects.filter(gruppo=gruppo.objects.filter(rate=votiuser))).distinct().exclude(id=user.id)
# 3) Calcola l'indice di PEARSON
classificaindici = dict()
for u in usertest:
bandtest = dict()
for g in votiuser:
if Rate.objects.filter(user=u).filter(gruppo=g.gruppo):
testval=Rate.objects.get(id=Rate.objects.filter(user=u).filter(gruppo=g.gruppo))
bandtest[testval.gruppo] = testval.valutazione
else:
bandtest[g.gruppo] = 0
indicepearson = pearson(list(banduser.values()),list(bandtest.values()))
classificaindici[u] = indicepearson
#FILTRO
# Filtra i piu alti di 0,75 abbassa la soglia in caso di nessun risultato
soglia = 1
numeroris = 0
indicifiltrati = dict()
while numeroris < 2 and soglia != 0:
indicifiltrati = {k: v for k, v in classificaindici.items() if v > soglia}
soglia = soglia - 0.25
numeroris = len(indicifiltrati)
'''indicifiltrati = {k: v for k, v in classificaindici.items() if v == 1}
if len(indicifiltrati) == 0:
indicifiltrati = {k: v for k, v in classificaindici.items() if v > 0}'''
# Estrai una lista di utenti ordinati
utentisuggeriti = sorted(indicifiltrati, key=indicifiltrati.__getitem__, reverse=True)
# 3) Ricava e seleziona relativi gruppi con valutazione > 3
gruppisuggeriti = dict()
for s in utentisuggeriti:
gruppivotati = gruppo.objects.filter(rate=Rate.objects.filter(user=s).filter(valutazione__gte=3))
# Insieme utente base
insut = set (gruppo.objects.filter(rate=votiuser))
instest = set (gruppivotati)
inssugg= instest-insut
for g in inssugg:
rate = Rate.objects.get(id=Rate.objects.filter(user=s).filter(gruppo=g))
if g in gruppisuggeriti:
gruppisuggeriti[g] += gruppisuggeriti[g] + rate.valutazione
else:
gruppisuggeriti[g] = rate.valutazione
return (gruppisuggeriti)
#Filtro gruppi dell'utente e indesiderati
def negGrup(userid):
filtro = gruppo.objects.filter(referente=User.objects.get(id=userid))
return filtro
def BandUserSuggestion(userid):
# prendi utente e top ten per iniziare le ricerche
user=userid
topten = fromusertotopten(user)
# ricerca peri diversi metodi
sbr = searchByRate(user)
sbtb = searchByTopOnBands(topten)
sbtu = searchByTopOnUser(topten)
# popola il dizionario e pesa in base al numero di successi
gruppi = dict()
for g in sbr:
gruppi[g] = 1
for g in sbtb:
if g in gruppi:
gruppi[g] += (gruppi[g] + 1)
else:
gruppi[g] = 1
for g in sbtu:
if g in gruppi:
gruppi[g] += (gruppi[g] + 1)
else:
gruppi[g] = 1
filtrogruppi = negGrup(userid)
for f in filtrogruppi:
if f in gruppi:
del gruppi[f]
gruppi= sorted(gruppi, key=gruppi.__getitem__,reverse=True)[:20]
# COMMENTARE PROSSIMA RIGA DURANTE TEST
#shuffle(gruppi)
gruppi = (gruppi)[:10]
return gruppi
## ALGORITMI CORRELAZIONE DA UTENTI
# Prende id 2 user (user,canzone per predizione su gruppo) e ne calcola l'indice di correlazione di Pearson
def pearsonfromuser(usera,userb):
recorda = Rate.objects.filter(user__id=usera)
votia = []
votib = []
for v in recorda:
votia.append(v.valutazione)
try:
valb = Rate.objects.get(user__id=userb,gruppo=v.gruppo)
votib.append(valb.valutazione)
except:
votib.append(0)
indice = pearson(votia,votib)
return (indice)
#Indice somiglianza (user,canzone)
def onbandfromuser(user,canz):
#Estrazione del gruppo della canzone
grup = gruppo.objects.filter(canzone__titolo=canz.titolo)
#Estrazione top ten utente
topuser = Topten.objects.filter(user=user).order_by('posizione')
#Estrazione top ten gruppo
topgruppo = influence.objects.filter(gruppo=grup).order_by('posizione')
#Calcola Punteggio
n = int(11)
punteggio = 0
for i in topuser:
for j in topgruppo:
if i.artist == j.artist:
a = int(n - i.posizione)
b = int(n - j.posizione)
punteggio = int(punteggio + a*b)
indice = punteggio / 385
return (indice)
|
#!/usr/bin/env python
import mmap
import os
from os import path
import sys
import time
def parse_adr(adr):
"""
Converts an IP address + port in the form aaaaaaaa:pppp to the form
a.b.c.d:p.
"""
ip_raw, port_raw = adr.split(":")
# Split into a list of the octets.
ip_hexs = [ip_raw[idx: idx + 2] for idx in xrange(0, len(ip_raw), 2)]
# Convert each octet to an int.
ip_ints = [int(hex_str, 16) for hex_str in ip_hexs]
ip_ints.reverse()
return("{}:{}".format(".".join([str(x) for x in ip_ints]), int(port_raw, 16)))
def main():
assert len(sys.argv) == 6, \
("Expected five arguments: logging interval (seconds), logging "
"duration (seconds), local IP address + port (a.b.c.d:p1), remote IP "
"address + port (e.f.g.h:p2), output file")
intv_s, dur_s, lcl_adr_tgt, rem_adr_tgt, out = sys.argv[1:]
dur_s = float(dur_s)
intv_s = float(intv_s)
# Make sure that the output file does not already exist.
if path.exists(out):
print("Output file already exists: {}".format(out))
sys.exit(-1)
else:
# Create the output directory if it does not already exist.
odr = path.dirname(out)
if odr and not path.isdir(odr):
os.makedirs(odr)
# While polling the file, only record the lines to maximize sample rate.
start_s = time.time()
cur_s = start_s
delt_s = 0
tstamp_lines = []
with open("/proc/net/tcp", "r+") as f:
while (delt_s < dur_s):
delt_s = time.time() - start_s
tstamp_lines.append((delt_s, [line for line in f]))
f.seek(0)
time.sleep(intv_s)
# Do all the data parsing once we are done..
cwnds = []
for tstamp_s, lines in tstamp_lines:
# Find the lines corresponding to outgoing connections, and extract
# their cwnds. Skip the first line, which is the column headings.
for line in lines[1:]:
splits = line.strip().split()
lcl_adr = parse_adr(splits[1])
rem_adr = parse_adr(splits[2])
if lcl_adr == lcl_adr_tgt and rem_adr == rem_adr_tgt:
cwnds.append((tstamp_s, int(splits[15])))
with open(out, "w") as f:
for tstamp_s, cwnd in cwnds:
f.write("{},{}\n".format(tstamp_s, cwnd))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-11-07 00:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0003_load_initial_data'),
]
operations = [
migrations.AddField(
model_name='session',
name='lhs_samples',
field=models.TextField(default='[]'),
),
migrations.AlterField(
model_name='session',
name='tuning_session',
field=models.CharField(choices=[('tuning_session', 'Tuning Session'), ('no_tuning_session', 'No Tuning'), ('randomly_generate', 'Randomly Generate'), ('lhs', 'Run LHS')], default='tuning_session', max_length=64, verbose_name='session type'),
),
]
|
#!/usr/bin/python3
import requests
from bs4 import BeautifulSoup
import pandas as pd
url = "https://cn.investing.com/equities/apple-computer-inc-historical-data"
response = requests.get(url, headers={"User-Agent": "Mozilla/5.0"})
soup = BeautifulSoup(response.text, 'html.parser')
html = soup.find(id = "curr_table").tbody
y = html.text
list1 = y.split("\n\n")
result = {}
for i in list1[1:-1]:
a = i.split("\n")
if a[0] == "":
v = a[6].split("M")
result.update({a[1]:{"close": a[2], "open": a[3], "high": a[4], "low": a[5], "vol": v[0]+"M", "range":v[1]}})
else:
v = a[5].split("M")
result.update({a[0]:{"close": a[1], "open": a[2], "high": a[3], "low": a[4], "vol": v[0]+"M", "range":v[1]}})
print(result)
# import re
# list1 = html.split("<tr>\n<td class=\"first left bold noWrap\" data-real-value=")
# dict_result = [];day2 = [];prise2 = [];volume2=[];percent2=[];
# for i in range(0,len(list1)):
# if list1[i] != []:
# day = re.findall("\d+年\d+月\d+日",list1[i])
# for j in day:
# if j !=[]:
# day2.append(j)
# prise = re.findall("data-real-value=\"\d\d\d.\d\d\"",list1[i])
# for k in prise:
# if k != []:
# prise2.append(k.replace('data-real-value=', "").replace('"', ""))
# volume = re.findall(">.*M",list1[i])
# for a in volume:
# if a != []:
# volume2.append(a.replace(">",""))
# percent = re.findall(">-*\d+.\d+%",list1[i])
# for b in percent:
# if b != []:
# percent2.append(a.replace(">",""))
# dict_result.append([day,[prise, volume, percent]])
# print(dict_result)
# open_prise = re.findall("\d+年\d+月\d+日",text)
# high = re.findall("\d+年\d+月\d+日",text)
# low = re.findall("\d+年\d+月\d+日",text)
# range_prise = re.findall("\d+年\d+月\d+日",text)
# print(list1)
# df = pd.read_html(str(html), header = 0)[0]
# print(df)
# headers = {
# :authority: cn.investing.com
# :method: GET
# :path: /equities/apple-computer-inc-historical-data
# :scheme: https
# accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
# accept-encoding: gzip, deflate, br
# accept-language: zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7
# cache-control: max-age=0
# cookie: logglytrackingsession=b49ea460-d00e-4b2b-b5e7-1ba8a3b303cc; PHPSESSID=lt5c0u20d1iic8pc7eqt8qar5m; SideBlockUser=a%3A2%3A%7Bs%3A10%3A%22stack_size%22%3Ba%3A1%3A%7Bs%3A11%3A%22last_quotes%22%3Bi%3A8%3B%7Ds%3A6%3A%22stacks%22%3Ba%3A1%3A%7Bs%3A11%3A%22last_quotes%22%3Ba%3A1%3A%7Bi%3A0%3Ba%3A3%3A%7Bs%3A7%3A%22pair_ID%22%3Bs%3A4%3A%226408%22%3Bs%3A10%3A%22pair_title%22%3Bs%3A0%3A%22%22%3Bs%3A9%3A%22pair_link%22%3Bs%3A28%3A%22%2Fequities%2Fapple-computer-inc%22%3B%7D%7D%7D%7D; geoC=TW; adBlockerNewUserDomains=1627291318; StickySession=id.23059254203.989cn.investing.com; udid=dc7448f5c99e96dba1149af65bb0126f; __cflb=0H28uxmf5JNxjDUC6WDvQUEoJyvKUTqwPn61uSCp1dv; __gads=ID=4b4eb919e3648788:T=1627291320:S=ALNI_MYFqYUhnjzax1FSqSIT5TTdE5T-Kw; protectedMedia=2; _ga=GA1.2.1528658918.1627291320; _gid=GA1.2.1008979733.1627291323; G_ENABLED_IDPS=google; adsFreeSalePopUp=3; Hm_lvt_a1e3d50107c2a0e021d734fe76f85914=1627291324,1627291532,1627299698,1627304916; Hm_lpvt_a1e3d50107c2a0e021d734fe76f85914=1627304916; smd=dc7448f5c99e96dba1149af65bb0126f-1627309473; _gat_allSitesTracker=1
# sec-ch-ua: " Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"
# sec-ch-ua-mobile: ?0
# sec-fetch-dest: document
# sec-fetch-mode: navigate
# sec-fetch-site: cross-site
# sec-fetch-user: ?1
# upgrade-insecure-requests: 1
# user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36
# }
|
"""
Demonstration of Earnshaw's theorem
Ahmed Al-kharusi
Please check the simulation yourself. You may find mistakes!
See the references
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#The moving charge
position_velocity_xi = np.array([0, 0])
position_velocity_yi = np.array([-0.01, 0])
position_velocity_zi = np.array([0.01,0])
#The fixed charges
CHARGE1_POSITION = np.array([1, 1, 1])
CHARGE2_POSITION = np.array([1, 1, -1])
CHARGE3_POSITION = np.array([1, -1, 1])
CHARGE4_POSITION = np.array([-1, 1, 1])
CHARGE5_POSITION = np.array([1, -1, -1])
CHARGE6_POSITION = np.array([-1, -1, 1])
CHARGE7_POSITION = np.array([-1, 1, -1])
CHARGE8_POSITION = np.array([-1, -1, -1])
#Coulomb constant arb. units
k = 1
# set all charges =+1 and masses =1
#consider removing place holders for more efficient codes
def acceleration(position_velocity, axis , position_velocity_x, position_velocity_y,position_velocity_z):
d1 = (position_velocity[0]-CHARGE1_POSITION[axis])
d2 = (position_velocity[0]-CHARGE2_POSITION[axis])
d3 = (position_velocity[0]-CHARGE3_POSITION[axis])
d4 = (position_velocity[0]-CHARGE4_POSITION[axis])
d5 = (position_velocity[0]-CHARGE5_POSITION[axis])
d6 = (position_velocity[0]-CHARGE6_POSITION[axis])
d7 = (position_velocity[0]-CHARGE7_POSITION[axis])
d8 = (position_velocity[0]-CHARGE8_POSITION[axis])
r1 = np.linalg.norm(np.array([CHARGE1_POSITION[0]-position_velocity_x[0], CHARGE1_POSITION[1]-position_velocity_y[0], CHARGE1_POSITION[2]-position_velocity_z[0]]))
r2 = np.linalg.norm(np.array([CHARGE2_POSITION[0]-position_velocity_x[0], CHARGE2_POSITION[1]-position_velocity_y[0], CHARGE2_POSITION[2]-position_velocity_z[0]]))
r3 = np.linalg.norm(np.array([CHARGE3_POSITION[0]-position_velocity_x[0], CHARGE3_POSITION[1]-position_velocity_y[0], CHARGE3_POSITION[2]-position_velocity_z[0]]))
r4 = np.linalg.norm(np.array([CHARGE4_POSITION[0]-position_velocity_x[0], CHARGE4_POSITION[1]-position_velocity_y[0], CHARGE4_POSITION[2]-position_velocity_z[0]]))
r5 = np.linalg.norm(np.array([CHARGE5_POSITION[0]-position_velocity_x[0], CHARGE5_POSITION[1]-position_velocity_y[0], CHARGE5_POSITION[2]-position_velocity_z[0]]))
r6 = np.linalg.norm(np.array([CHARGE6_POSITION[0]-position_velocity_x[0], CHARGE6_POSITION[1]-position_velocity_y[0], CHARGE6_POSITION[2]-position_velocity_z[0]]))
r7 = np.linalg.norm(np.array([CHARGE7_POSITION[0]-position_velocity_x[0], CHARGE7_POSITION[1]-position_velocity_y[0], CHARGE7_POSITION[2]-position_velocity_z[0]]))
r8 = np.linalg.norm(np.array([CHARGE8_POSITION[0]-position_velocity_x[0], CHARGE8_POSITION[1]-position_velocity_y[0], CHARGE8_POSITION[2]-position_velocity_z[0]]))
return k*(d1/r1**3+d2/r2**3+d3/r3**3+d4/r4**3+d5/r5**3+d6/r6**3+d7/r7**3+d8/r8**3)
def derivatives_x(position_velocity_x, t, x,y,z):
return np.array([position_velocity_x[1], acceleration(position_velocity_x, 0, x,y,z) ])
def derivatives_y(position_velocity_y, t, x,y,z):
return np.array([position_velocity_y[1], acceleration(position_velocity_y, 1, x,y,z)])
#place_holder so that it can be used with rk4
def derivatives_z(position_velocity_z, t, x,y,z):
return np.array([position_velocity_z[1], acceleration(position_velocity_z, 2, x,y,z)])
#taken from... (see references)
def rk4(y,dy,t,h, x,yy,z):
k1=dy(y,t, x,yy,z)
k2=dy(y+h/2*k1,t+h/2, x,yy,z)
k3=dy(y+h/2*k2,t+h/2, x,yy,z)
k4=dy(y+h*k3,t+h, x,yy,z)
y=y+h*(k1+2*k2+2*k3+k4)/6
#t=t+h
return y
def implement_rk4(position_velocity_x, position_velocity_y, position_velocity_z, t, h, steps_no):
global time_arr
time_arr = np.array([t])
data_x = np.array([position_velocity_x])
data_y = np.array([position_velocity_y])
data_z = np.array([position_velocity_z])
for i in range(steps_no):
tempx = position_velocity_x
tempy = position_velocity_y
position_velocity_x = rk4( position_velocity_x, derivatives_x, t, h ,position_velocity_x, position_velocity_y, position_velocity_z)
position_velocity_y = rk4( position_velocity_y, derivatives_y, t,h, tempx, position_velocity_y, position_velocity_z)
position_velocity_z = rk4( position_velocity_z, derivatives_z, t,h, tempx, tempy, position_velocity_z)
t +=h
#time_arr = np.append(time_arr, t)
data_x = np.vstack((data_x, position_velocity_x))
data_y = np.vstack((data_y, position_velocity_y))
data_z = np.vstack((data_z, position_velocity_z))
time_arr = np.append(time_arr, t)
#print (star2_arr_x," ", star1_arr_x)
[x, vx] = data_x.transpose()
[y, vy] = data_y.transpose()
[z, zy] = data_z.transpose()
return [x ,y, z] # can also return vx, vy, vz for energies and othe info
t = 0 # starting time
h = 1/(30) # step size for the RK4 method
steps_no = 40000 # number of steps of the RK4 method
[x, y, z] = implement_rk4(position_velocity_xi, position_velocity_yi, position_velocity_zi, t, h, steps_no)
save_every_n_frames = 30
(ymin, ymax) = (-1.1, 1.1)
(xmin, xmax) = (-1.1, 1.1)
(zmin, zmax) = (-1.1, 1.1)
for j in range(int(len(x)/save_every_n_frames)-1):
i = j*save_every_n_frames
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d', autoscale_on=False,xlim=(xmin,xmax), ylim=(ymin,ymax), zlim=(zmin,zmax))
ax.scatter(x[0:i], y[0:i], z[0:i], s=2, c='black', alpha=0.9)
ax.scatter(x[i], y[i], z[i], s=200, c='c', label='Free charge')
ax.set_title("Free charge position:("+str(round(x[i],2))+", "+str(round(y[i],2))+", "+str(round(z[i],2))+")\n Add a small offset"#'Add a small offset\n'
, c='blue',fontsize=14,loc='left' )
ax.scatter(CHARGE1_POSITION[0], CHARGE1_POSITION[1], CHARGE1_POSITION[2] , s=200, c='r', label="Fixed charges\nat cube vertices")
ax.scatter(CHARGE2_POSITION[0], CHARGE2_POSITION[1], CHARGE2_POSITION[2] , s=200, c='r')
ax.scatter(CHARGE3_POSITION[0], CHARGE3_POSITION[1], CHARGE3_POSITION[2] , s=200, c='r')
ax.scatter(CHARGE4_POSITION[0], CHARGE4_POSITION[1], CHARGE4_POSITION[2] , s=200, c='r')
ax.scatter(CHARGE5_POSITION[0], CHARGE5_POSITION[1], CHARGE5_POSITION[2] , s=200, c='r')
ax.scatter(CHARGE6_POSITION[0], CHARGE6_POSITION[1], CHARGE6_POSITION[2] , s=200, c='r')
ax.scatter(CHARGE7_POSITION[0], CHARGE7_POSITION[1], CHARGE7_POSITION[2] , s=200, c='r')
ax.scatter(CHARGE8_POSITION[0], CHARGE8_POSITION[1], CHARGE8_POSITION[2] , s=200, c='r')
ax.set_xlabel('$x$', fontsize=30)
ax.set_ylabel('$y$', fontsize=30)
ax.set_zlabel('$z$', fontsize=30)
ax.legend(fontsize=18)
plt.tight_layout()
fig.savefig(str(j)+'.png',dpi=110)
plt.show()
plt.close()
"""
References:
#The rk4 function is taken from
https://youtu.be/HPreOWKJOiY
"""
|
from __future__ import absolute_import
import numpy
from targets.marshalling.marshaller import Marshaller
from targets.target_config import FileFormat
class NumpyArrayMarshaller(Marshaller):
type = numpy.ndarray
file_format = FileFormat.numpy
def target_to_value(self, target, **kwargs):
"""
:param obj: object to pickle
:return:
"""
import numpy as np
return np.load(target.path, **kwargs)
def value_to_target(self, value, target, **kwargs):
"""
:param obj: object to pickle
:return:
"""
import numpy as np
target.mkdir_parent()
np.save(target.path, value, **kwargs)
class NumpyArrayPickleMarshaler(NumpyArrayMarshaller):
file_format = FileFormat.pickle
def target_to_value(self, target, **kwargs):
import numpy as np
return np.load(target.path, allow_pickle=True, **kwargs)
def value_to_target(self, value, target, **kwargs):
import numpy as np
np.save(target.path, value, allow_pickle=True, **kwargs)
|
"""/**
* @author [Jai Miles]
* @email [jaimiles23@gmail.com]
* @create date 2020-05-06 15:10:52
* @modify date 2020-06-16 15:11:39
* @desc [
TODO:
- Consider is_instance(data, (tuple, list)) vs has_attr(__iter__)??
##########
# README.md Example.
##########
linear_nlg is a naive natural language generation (NLG) to interact with the user.
The method transforms linearly connected sentence chunks (e.g., clauses, parts of speech, etc.) into speech responses.
Consider the following arbitrary noun phrase:
"The red dog"
This phrase can be parsed into 3 separate chunks:
"The": determiner
"red": colour adjective
"dog": animal noun
In this example, the determiner, adjective, and noun have no effect on the meaning of the response.
We can use naive NLG to create an arbitrary noun phrase. This skill's NLG method would sample from the following three message tuples (MT).
A single item is sampled from each message tuple to create the noun phrase (DET, JJ, NN).
MT_DET = (
"The",
"A",
)
MT_COLOUR_JJ = (
"red",
"blue",
"yellow",
)
MT_ANIMAL_NN = (
"dog",
"cat",
)
This NLG method requires careful consideration of sentence structure and semantics to avoid unnatural responses.
However, successful implementation increases response variety multiplicatively.
The speech construction for the above noun phrase yields 12 response permutations.
Data for each NLG method is located in each subdirectory's data module.
##########
# Test
##########
>>> test = [MT_DET, MT_COLOUR_JJ, MT_ANIMAL_NN]
>>> naive_nlg(test)
"The red dog"
]*/
"""
##########
# Imports
##########
import random
from logs import logger, log_func_name
from pauser import Pauser
##########
# Create Message from Tuple of Message Clauses
##########
@log_func_name
def linear_nlg(tuple_message_clause: tuple, str_joiner: str = ' ') -> str:
"""Returns message constructed from tuple message clause.
Constructs the message with different methods per data type.
## Data type Method
Tuple/list random.choice()
str append
int Pauser.get_p_level()
"""
def get_clause(tup_data) -> str:
"""Helper func: returns clause from tup_data using recursion."""
if (tup_data is None) or (len(tup_data) == 0):
return ''
elif isinstance(tup_data, str):
return tup_data
elif isinstance(tup_data, (int, float)):
return Pauser.get_p_level(tup_data)
elif isinstance(tup_data, (tuple, list)):
if isinstance(tup_data[0], str):
## List of strings, return choice.
return random.choice(tup_data)
else:
# Recursion of tuples in tuple
speech_list = []
for clause_list in tup_data:
clause = get_clause(clause_list)
speech_list.append(clause)
return str_joiner.join(speech_list)
else:
logger.warning(f"get_clause: Unrecognized data type {tup_data}")
logger.debug(tuple_message_clause)
speech_list = []
for tup_data in tuple_message_clause:
clause = get_clause(tup_data)
speech_list.append( clause)
# logger.debug(speech_list)
return str_joiner.join(speech_list)
|
# @Title: 子集 (Subsets)
# @Author: 18015528893
# @Date: 2021-02-28 12:23:12
# @Runtime: 44 ms
# @Memory: 15.2 MB
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
result = []
def backtrack(path, start):
result.append(list(path))
if len(path) >= len(nums):
return
for i in range(start, len(nums)):
path.append(nums[i])
backtrack(path, i+1)
path.pop()
backtrack([], 0)
return result
|
#!/usr/bin/env python
# a stacked bar plot with errorbars
from pylab import *
N = 5
menMeans = (20, 35, 30, 35, 27)
womenMeans = (25, 32, 34, 20, 25)
menStd = (2, 3, 4, 1, 2)
womenStd = (3, 5, 2, 3, 3)
ind = arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = bar(ind, menMeans, width, color='r', yerr=womenStd)
p2 = bar(ind, womenMeans, width, color='y',
bottom=menMeans, yerr=menStd)
ylabel('Scores')
title('Scores by group and gender')
xticks(ind+width/2., ('G1', 'G2', 'G3', 'G4', 'G5') )
yticks(arange(0,81,10))
legend( (p1[0], p2[0]), ('Men', 'Women') )
show()
|
from typing import Dict, List, Any, Optional
from .featurizing import Featurizing, featurizing
# from ..operation import DatasetOperation, dataset_operation
from typing import Callable, Mapping
from .plugins.summarization.sum_attribute import *
from .plugins.summarization.extractive_methods import _ext_oracle
from .plugins.summarization.extractive_methods import _lead_k
from .plugins.summarization.extractive_methods import _compute_rouge
# store all featurizing class
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from operation import DatasetOperation, dataset_operation
from .general import get_features_sample_level as get_features_sample_level_general
class SummarizationFeaturizing(Featurizing, DatasetOperation):
def __init__(self,
name:str = None,
func:Callable[...,Any] = None,
resources: Optional[Mapping[str, Any]] = None,
contributor: str = None,
processed_fields: List = ["text"],
generated_field: str = None,
task = "summarization",
description = None,
):
super().__init__(name = name, func = func, resources = resources,
contributor = contributor,
description= description)
self._type = 'SummarizationFeaturizing'
self.processed_fields = processed_fields
self.generated_field = generated_field
self._data_type = "Dataset"
self.task = task
class summarization_featurizing(featurizing, dataset_operation):
def __init__(self,
name: Optional[str] = None,
resources: Optional[Mapping[str, Any]] = None,
contributor: str = None,
processed_fields:List = None,
generated_field:str = None,
task = "summarization",
description = None,
):
super().__init__(name = name, resources = resources,
contributor = contributor, task = task,
description=description)
self.processed_fields = processed_fields
self.generated_field = generated_field
def __call__(self, *param_arg):
if callable(self.name):
tf_class = SummarizationFeaturizing(name = self.name.__name__, func=self.name)
return tf_class(*param_arg)
else:
f = param_arg[0]
name = self.name or f.__name__
tf_cls = SummarizationFeaturizing(name=name, func = f,
resources = self.resources,
contributor = self.contributor,
processed_fields = self.processed_fields,
generated_field = self.generated_field,
task = self.task, description=self.description)
return tf_cls
@summarization_featurizing(name = "get_density", contributor="datalab",
task = "summarization", description="This function measures to what extent a summary covers the content in the source text.")
def get_density(sample:dict):
summary_attribute = SUMAttribute()
attribute_info = summary_attribute.cal_attributes_each(sample['text'], sample['summary'])
return {"density":attribute_info["attr_density"]}
@summarization_featurizing(name = "get_coverage", contributor="datalab",
task = "summarization", description="This function measures to what extent a summary covers the content in the source text.")
def get_coverage(sample:dict):
summary_attribute = SUMAttribute()
attribute_info = summary_attribute.cal_attributes_each(sample['text'], sample['summary'])
return {"coverage":attribute_info["attr_coverage"]}
@summarization_featurizing(name = "get_compression", contributor="datalab",
task = "summarization", description="This function measures the compression ratio from the source text to the generated summary.")
def get_compression(sample:dict):
summary_attribute = SUMAttribute()
attribute_info = summary_attribute.cal_attributes_each(sample['text'], sample['summary'])
return {"compression":attribute_info["attr_compression"]}
@summarization_featurizing(name = "get_repetition", contributor="datalab",
task = "summarization", description="This function measures the rate of repeated segments in summaries. The segments are instantiated as trigrams.")
def get_repetition(sample:dict):
summary_attribute = SUMAttribute()
attribute_info = summary_attribute.cal_attributes_each(sample['text'], sample['summary'])
return {"repetition":attribute_info["attr_repetition"]}
@summarization_featurizing(name = "get_novelty", contributor="datalab",
task = "summarization", description="This measures the proportion of segments in the summaries that haven’t appeared in source documents. The segments are instantiated as bigrams.")
def get_novelty(sample:dict):
summary_attribute = SUMAttribute()
attribute_info = summary_attribute.cal_attributes_each(sample['text'], sample['summary'])
return {"novelty":attribute_info["attr_novelty"]}
@summarization_featurizing(name = "get_copy_len", contributor="datalab",
task = "summarization", description="Measures the average length of segments in summary copied from source document.")
def get_copy_len(sample:dict):
summary_attribute = SUMAttribute()
attribute_info = summary_attribute.cal_attributes_each(sample['text'], sample['summary'])
return {"copy_len":attribute_info["attr_copy_len"]}
@summarization_featurizing(name = "get_all_features", contributor="datalab",
task = "summarization", description="Calculate all features for summarization datasets (density, coverage, compression, repetition, novelty, copy lenght)")
def get_all_features(sample:dict):
summary_attribute = SUMAttribute()
attribute_info = summary_attribute.cal_attributes_each(sample['text'], sample['summary'])
return {
"density":attribute_info["attr_density"],
"coverage":attribute_info["attr_coverage"],
"compression": attribute_info["attr_compression"],
"repetition": attribute_info["attr_repetition"],
"novelty": attribute_info["attr_novelty"],
"copy_len": attribute_info["attr_copy_len"],
}
@summarization_featurizing(name = "get_oracle_summary", contributor="datalab",
task = "summarization", description="This function extract the oracle summaries for text summarization")
def get_oracle_summary(sample:dict) -> Dict:
"""
Input:
SummarizationDataset: dict
Output:
return {"source":src,
"reference":ref,
"oracle_summary":oracle,
"oracle_labels":labels,
"oracle_score":max_score}
"""
document = sent_tokenize(sample["text"]) # List
summary = sample['summary']
oracle_info = _ext_oracle(document, summary, _compute_rouge, max_sent=3)
return oracle_info
#
#
#
@summarization_featurizing(name = "get_lead_k_summary", contributor="datalab",
task = "summarization", description="This function extract the lead k summary for text summarization datasets")
def get_lead_k_summary(sample:dict) -> Dict:
"""
Input:
SummarizationDataset: dict
Output:
return {"source":src,
"reference":ref,
"lead_k_summary":src,
"lead_k_score":score}
"""
document = sent_tokenize(sample["text"]) # List
summary = sample['summary']
lead_k_info = _lead_k(document, summary, _compute_rouge, k = 3)
return lead_k_info
def get_schema_of_sample_level_features():
return {
"text_length":1,
"text_lexical_richness":0.2,
"text_basic_words":0.2,
"text_gender_bias_word_male":1,
"text_gender_bias_word_female":2,
"text_gender_bias_single_name_male":1,
"text_gender_bias_single_name_female":1,
"summary_length": 1,
"summary_lexical_richness": 0.2,
"summary_basic_words": 0.2,
"summary_gender_bias_word_male": 1,
"summary_gender_bias_word_female": 2,
"summary_gender_bias_single_name_male": 1,
"summary_gender_bias_single_name_female": 1,
"density": 0.1,
"coverage": 0.1,
"compression": 0.1,
"repetition": 0.1,
"novelty": 0.1,
"copy_len": 0.1,
}
@summarization_featurizing(name = "get_features_sample_level", contributor= "datalab", processed_fields= "text",
task="summarization", description="This function is used to calculate the text length")
def get_features_sample_level(sample:dict):
text = sample["text"]
summary = sample["summary"]
res_info_general = get_features_sample_level_general.func(text)
res_info_general_new = {}
for k,v in res_info_general.items():
res_info_general_new["text" + "_" + k] =v
res_info_general = get_features_sample_level_general.func(summary)
for k,v in res_info_general.items():
res_info_general_new["summary" + "_" + k] =v
# get task-dependent features
summary_features = get_all_features.func(sample)
# update the res_info_general_new
res_info_general_new.update(summary_features)
# res_info_general_new.update({"answer_length":answer_length,
# "option1_length":option1_length,
# "option2_length":option2_length,
# # "option_index":int(option_index),
# })
return res_info_general_new
def get_schema_of_sample_level_features_asap():
return {
"text_length":1,
# "text_lexical_richness":0.2,
# "text_basic_words":0.2,
# "text_gender_bias_word_male":1,
# "text_gender_bias_word_female":2,
# "text_gender_bias_single_name_male":1,
# "text_gender_bias_single_name_female":1,
"summary_length": 1,
"summary_lexical_richness": 0.2,
"summary_basic_words": 0.2,
"summary_gender_bias_word_male": 1,
"summary_gender_bias_word_female": 2,
"summary_gender_bias_single_name_male": 1,
"summary_gender_bias_single_name_female": 1,
# "density": 0.1,
# "coverage": 0.1,
# "compression": 0.1,
# "repetition": 0.1,
# "novelty": 0.1,
# "copy_len": 0.1,
"n_aspects":0.0,
}
@summarization_featurizing(name = "get_features_sample_level_asap", contributor= "datalab", processed_fields= "text",
task="summarization", description="This function is used to calculate the text length")
def get_features_sample_level_asap(sample:dict):
text = sample["text"]
summary = sample["review"]
aspects = sample["aspects"]
# res_info_general = get_features_sample_level_general.func(text)
res_info_general_new = {}
# for k,v in res_info_general.items():
# res_info_general_new["text" + "_" + k] =v
res_info_general_new["text" + "_" + "length"] = len(text.split(" "))
res_info_general = get_features_sample_level_general.func(summary)
for k,v in res_info_general.items():
res_info_general_new["summary" + "_" + k] =v
# get task-dependent features
# summary_attribute = SUMAttribute()
# attribute_info = summary_attribute.cal_attributes_each(sample['text'], sample['review'])
# summary_features = {
# "density":attribute_info["attr_density"],
# "coverage":attribute_info["attr_coverage"],
# "compression": attribute_info["attr_compression"],
# "repetition": attribute_info["attr_repetition"],
# "novelty": attribute_info["attr_novelty"],
# "copy_len": attribute_info["attr_copy_len"],
# }
# res_info_general_new.update(summary_features)
# print(aspects)
# exit()
n_aspects = len(aspects)
res_info_general_new.update({"n_aspects":n_aspects})
# update the res_info_general_new
# res_info_general_new.update({"answer_length":answer_length,
# "option1_length":option1_length,
# "option2_length":option2_length,
# # "option_index":int(option_index),
# })
return res_info_general_new
|
#!/usr/bin/python
#
# Watch for changes to amazon/google product reviews.
#
CONFIG=".review-scraper"
VERBOSE=False
AMAZON_REVIEWS_URL="http://www.amazon.com/Bitiotic-Freeform-Backgammon/product-reviews/B00A7KD23K/ref=dp_db_cm_cr_acr_txt"
GOOGLE_PROD_URL="https://play.google.com/store/apps/details?id=com.bitiotic.freeform.android"
import sys
import urllib2
import BeautifulSoup
import md5
import os
import pickle
#_configFileName = os.path.join(os.path.expanduser("~"), CONFIG)
# XXX command line option for this (use explicit dir for running under cron)
_configFileName = os.path.join("/home/pat/", CONFIG)
# Picklable object that represents a review site (and what to scrape out of it)
class Site:
def __init__(self, name, url, tag, attrs):
self.name = name
self.url = url
self.tag = tag
self.attrs = attrs
def __hash__(self):
# .tag and .attrs are not part of the identity
return hash((self.name, self.url))
def __eq__(self, other):
# .tag and .attrs are not part of the identity
return (self.name, self.url) == (other.name, other.url)
# Collect all the state to save in one place. For each site, track checksum of reviews.
class Saved:
PICKLEFMT=0
def __init__(self):
self.version = "review-scraper.py v0.0.5"
self.sites = {}
def addSite(self, site):
self.updateSite(site, "unknown md5sum")
def updateSite(self, site, csum):
self.sites[site] = csum
def save(self):
"""Save this object to the given file. Existing contents will be obliterated."""
with open(_configFileName, "w") as fileHandle:
pickle.dump(self, fileHandle, Saved.PICKLEFMT)
@staticmethod
def load():
"""Load existing saved object from file, or create new object if no file exists."""
if os.path.exists(_configFileName):
with open(_configFileName, "r") as fileHandle:
try:
obj = pickle.load(fileHandle)
except:
obj = "Invalid Save File"
if not isinstance(obj, Saved):
raise RuntimeError, "Saved state corrupt, is not an instance of Saved"
return obj
else:
return Saved()
# Retrive rough review text plus md5sum of reviewsa
def watchFor(label, url, tag, attrs):
blurbs = scrapeChunks(label, url, tag, attrs)
if VERBOSE:
if not blurbs:
print "No HTML found for ", tag, "+", attrs
else:
print blurbs
# Compute md5 checksum of review so we can see if anything changed
cx = md5.new()
cx.update(blurbs)
return (blurbs, cx.hexdigest())
def scrapeChunks(label, url, tag, attrs):
req = urllib2.Request(url)
response = urllib2.urlopen(req)
content = BeautifulSoup.BeautifulSoup(response.read())
#fh = open("/tmp/splat.html", "w")
#fh.write(str(content.prettify()))
#fh.close()
chunks = content.body.findAll(tag, attrs=attrs)
if not chunks:
return None
else:
return "\n\n".join([c.text for c in chunks])
def addSite(saveState, name, url, tag, attrs):
saveState.addSite(Site(name, url, tag, attrs))
def updateSites(saveState):
for site, oldcx in saveState.sites.items():
if VERBOSE:
print "Checking", site.name, "..."
newcx = watchFor(site.name, site.url, site.tag, site.attrs)
saveState.updateSite(site, newcx)
if VERBOSE:
if newcx != oldcx:
print " Changed!"
else:
print " No change"
else:
if newcx != oldcx:
print "Reviews have changed (?) at", site.name
def main():
state = Saved.load()
try:
if False: # Set to 'True' to add these sites to the list. Only needs to be done once.
addSite(state, "Google", GOOGLE_PROD_URL, 'div', attrs={ 'class': 'review-body'})
addSite(state, "Amazon", AMAZON_REVIEWS_URL, 'table', attrs={ 'id': 'productReviews' })
updateSites(state)
finally:
state.save()
if __name__ == "__main__":
sys.exit(main())
#eof
|
#!/usr/bin/env python3
import pyasmtools
def fac_iter(arg_n: int) -> int:
res = 1
for cur_n in range(1,arg_n+1):
res *= cur_n
return res
fac_iter = pyasmtools.TraceMe(fac_iter)
print( "fac_iter(7):", fac_iter(7))
|
from PyQt5 import QtWidgets, QtCore, QtGui
from collections import OrderedDict
class ZmqSetupWindow(QtWidgets.QMainWindow):
"""Sub window for zmq settings"""
zmqSetupChanged = QtCore.pyqtSignal(OrderedDict)
def __init__(self, parent=None):
super(ZmqSetupWindow, self).__init__(parent)
self.window_title = '%sMQ setup' % u'\u00D8'
self.default_ports = OrderedDict([('log', 8500), ('data', 8600), ('cmd', 8700), ('stage', 8800)])
self.ports = OrderedDict([(k, self.default_ports[k]) for k in self.default_ports])
self.edits = {}
self._init_ui()
def _init_ui(self):
self.setWindowTitle(self.window_title)
# Make this window blocking parent window
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.screen = QtWidgets.QDesktopWidget().screenGeometry()
self.resize(0.25 * self.screen.width(), 0.25 * self.screen.height())
# Main widget
widget = QtWidgets.QWidget()
layout = QtWidgets.QGridLayout()
layout.setHorizontalSpacing(20)
layout.setVerticalSpacing(10)
layout.setAlignment(QtCore.Qt.AlignTop)
widget.setLayout(layout)
self.setCentralWidget(widget)
layout.addWidget(QtWidgets.QLabel('Ports:'), 0, 0, 1, 1)
for i, port in enumerate(self.default_ports):
label = QtWidgets.QLabel(port.capitalize())
edit = QtWidgets.QLineEdit()
edit.setValidator(QtGui.QIntValidator(1, int(2**16)))
edit.setText(str(self.default_ports[port]))
layout.addWidget(label, i+1, 1, 1, 1)
layout.addWidget(edit, i+1, 2, 1, 1)
self.edits[port] = edit
btn_reset = QtWidgets.QPushButton('Reset')
btn_reset.clicked.connect(lambda _: [self.edits[k].setText(str(self.default_ports[k])) for k in self.default_ports])
layout.addWidget(btn_reset, 6, 0, 1, 1)
btn_ok = QtWidgets.QPushButton('Ok')
btn_ok.clicked.connect(lambda _: self._update_ports())
btn_ok.clicked.connect(self.close)
btn_cancel = QtWidgets.QPushButton('Cancel')
btn_cancel.clicked.connect(self.close)
layout.addWidget(btn_cancel, 6, 3, 1, 1)
layout.addWidget(btn_ok, 6, 4, 1, 1)
def _update_ports(self):
for port in self.edits:
self.ports[port] = int(self.edits[port].text())
self.zmqSetupChanged.emit(self.ports)
|
"""
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the
diagram below).
Now consider if some obstacles are added to the grids. How many unique paths would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
Note: m and n will be at most 100.
Example 1:
Input:
[
[0,0,0],
[0,1,0],
[0,0,0]
]
Output: 2
Explanation:
There is one obstacle in the middle of the 3x3 grid above.
There are two ways to reach the bottom-right corner:
1. Right -> Right -> Down -> Down
2. Down -> Down -> Right -> Right
"""
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
# 存储网格的长与宽
m = len(obstacleGrid)
n = len(obstacleGrid[0])
# 如果左上角有障碍,则直接返回0
if obstacleGrid[0][0] == 1:
return 0
# 将左上角那一格的路径数设为1(因为前面判定过是否有障碍)
obstacleGrid[0][0] = 1
# 边界的初始化
# 对于上边,若这一格没有障碍,且左边一格路径数为1(即之前都无障碍),则将这一格的路径数设为1
for i in range(1,n):
obstacleGrid[0][i] = int(obstacleGrid[0][i] == 0 and obstacleGrid[0][i - 1] == 1)
# 对于左边,若这一格没有障碍,且上边一格路径数为1(即之前都无障碍),则将这一格的路径数设为1
for i in range(1,m):
obstacleGrid[i][0] = int(obstacleGrid[i][0] == 0 and obstacleGrid[i - 1][0] == 1)
# 对于右下方(m-1)*(n-1)的网格,每一格的路径数等于它左边格子的路径数加上它上边格子的路径数(如果没有障碍),否则直接为0。
for i in range(1,m):
for j in range(1,n):
obstacleGrid[i][j] = (obstacleGrid[i][j - 1] + obstacleGrid[i - 1][j]) * (1 - obstacleGrid[i][j])
return obstacleGrid[m - 1][n - 1]
"""
思路:利用动态规划的方法,边界的初始化需要重新设定。其他思路与之前大致相同,只是多了一个障碍的判定。由于obstacleGrid的存在,因此不需要额外的空间,即空间复杂度为O(1)。
"""
|
"""
Database Cleanup to be run every day.
Condenses high resolution report data into averages and anomalies.
Stores the averages and anomalies in separate DB table.
Data to Clean
| |
v v
__________________________________________
Previous 48H | Previous 24H | Now |
xxx | xxx | |
xxx | xxx | |
"""
import datetime
import sqlalchemy as sa
import pandas as pd
import numpy as np
from sqlalchemy import and_
from sqlalchemy.orm import Session
from server.db.mappings import HistoricalData, Node, Update, DiskUpdate, SessionUpdate, GPUUpdate, Pool
import server.constants as const
def clean(host: str, port: int, user: str, password: str, dbname: str, pool: int, verbose=False) -> bool:
"""
Reads through all current database entries, and condenses all entries older than 48 hours old.
Condensed data will be entered into the historical database.
:return:
"""
# Create DB Session
print("Connecting to Database...")
engine = sa.create_engine(f"mysql://{user}:{password}@{host}:{port}/"
f"{dbname}", echo=verbose)
session = Session(engine)
pool = session.get(Pool, {'id': pool})
hr_cutoff = datetime.datetime.now() - datetime.timedelta(days=1)
print(f"Condensing all data before {hr_cutoff}")
def _clean(data):
return None if np.isnan(data) else data
for node in pool.nodes:
node: Node
print(f"Condensing {node.pool_id}:{node.id}")
# General Updates
query = session.query(Update).filter(and_(Update.node_id == node.id, Update.timestamp <= hr_cutoff))
updates = pd.read_sql(query.statement, query.session.bind)
# Disk Updates
disk_query = session.query(DiskUpdate).filter(and_(DiskUpdate.update.has(node_id=node.id), DiskUpdate.update.has(Update.timestamp <= hr_cutoff)))
disk_updates = pd.read_sql(disk_query.statement, disk_query.session.bind)
# GPU Updates
gpu_query = session.query(GPUUpdate).filter(and_(GPUUpdate.update.has(node_id=node.id), GPUUpdate.update.has(Update.timestamp <= hr_cutoff)))
gpu_updates = pd.read_sql(gpu_query.statement, gpu_query.session.bind)
if len(updates) > 0:
historical = HistoricalData()
historical.node_id = node.id
historical.pool_id = node.pool_id
historical.time = hr_cutoff
historical.avg_cpu_curr_freq = _clean(updates['cpu_current_frequency'].mean())
historical.avg_cpu_percent_usage = _clean(updates['cpu_percent_usage'].mean())
historical.avg_cpu_load_1 = _clean(updates['cpu_load_1'].mean())
historical.avg_cpu_load_5 = _clean(updates['cpu_load_5'].mean())
historical.avg_cpu_load_15 = _clean(updates['cpu_load_15'].mean())
historical.avg_ram_used_virt = _clean(updates['ram_used_virtual'].mean())
historical.avg_ram_used_swap = _clean(updates['ram_used_swap'].mean())
historical.avg_battery_avail = _clean(updates['battery_available_percent'].mean())
if len(disk_updates) > 0:
disks = disk_updates['partition_id'].unique()
avail_avgs = []
used_avgs = []
for disk in disks:
avail_avgs.append(disk_updates.loc[disk_updates['partition_id'] == disk]['free_storage'].mean())
used_avgs.append(disk_updates.loc[disk_updates['partition_id'] == disk]['used_storage'].mean())
historical.total_disk_avail = _clean(sum(avail_avgs))
historical.total_disk_used = _clean(sum(used_avgs))
if len(gpu_updates) > 0:
gpus = gpu_updates['uuid'].unique()
loads = []
memory_useds = []
for gpu in gpus:
loads.append(gpu_updates.loc[gpu_updates['uuid'] == gpu]['load'].mean())
memory_useds.append(gpu_updates.loc[gpu_updates['uuid'] == gpu]['memory_used'].mean())
historical.avg_gpu_load = _clean(np.average(loads))
historical.avg_gpu_memory_used = _clean(np.average(memory_useds))
session.add(historical)
print(f"Added record for {node.pool_id}:{node.id}.")
else:
print(f"No updates in time range for {node.pool_id}:{node.id}.")
print("Committing changes to Historical DB.")
session.commit()
query = session.query(Update).filter(Update.timestamp <= hr_cutoff)
query.delete()
session.commit()
return True
if __name__ == '__main__':
clean(const.DB_URL, const.DB_PORT, const.DB_USER, const.DB_PASSWORD, const.DB_SCHEMA, const.POOL_ID, False)
|
print("Added python, too much code.")
|
import logging
import socket
from ipaddress import IPv4Address
from ipaddress import ip_network
from saturn import state
from saturn.protocol import TcpClient
from saturn.socks import reply
from .base import SocksRequest
class SocksRequestConnect(SocksRequest):
action_id = 1
async def go(self):
assert not isinstance(self.dispatcher.state, state.Connected)
on_connect = self.dispatcher.loop.create_future()
allowed_to = False
for addr in getattr(self.dispatcher.server.config["Security"], 'allowed_destinations', ["0.0.0.0/0"]):
if self.dst_addr in ip_network(addr):
allowed_to = True
break
if not allowed_to:
return reply.ConnectionNotAllowed()
try:
self.dispatcher.client_transport, self.client_protocol = await self.dispatcher.loop.create_connection(
lambda: TcpClient(self.dispatcher, on_connect),
str(self.dst_addr), self.dst_port)
except OSError as e:
if e.errno == 110:
return reply.NetworkUnreachable()
if e.errno == 111:
return reply.ConnectionRefused()
if e.errno == 113 or e.errno == 101:
return reply.HostUnreachable()
if e.errno == 22:
return reply.AddressTypeNotSupported()
logging.error(f'TCP Client got {e.errno}: {e} while trying to connect to {self.dst_addr}')
return reply.Failure()
self.dispatcher.connected = True
await on_connect
self.dispatcher.state = state.Connected()
return reply.Success(IPv4Address(socket.gethostbyname(socket.gethostname())), 8081)
|
#open the file for writing
f = open("myfile.txt","w")
print("Enter Text (Type # when you are done)")
s=''
while s != '#':
s = input()
f.write(s+"\n")
f.close
|
"""
Funções para uma string
Leia uma string e retorne o maximo de função
"""
x = input('Digite algo: ')
# funcao .is
print(f'Tipo primitivo: {type(x)}')
print('É numero:', x.isnumeric())
print('É alfabético:', x.isalpha())
print('É alfanúmero:', x.isalnum())
print('Está em maiusculo:', x.isupper())
print('Está em minusculo:', x.islower())
|
import importlib
from django.conf import settings
from django.template import loader
from django.utils.text import slugify
def get_action(state_machine_name, trigger, task):
def _add_action_id(action):
action["id"] = slugify(action["title"])
def _add_action_form(action, task):
template = loader.get_template(action["template_filename"])
action["form"] = template.render(
{"task": task, "action": action, **action["template_context"]}
)
builders = importlib.import_module(settings.ACTION_CONF).action_builders
action = builders[state_machine_name][trigger](task)
_add_action_id(action)
_add_action_form(action, task)
return action
def build_state_machine(state_machine_name, state_name):
builders = importlib.import_module(
settings.STATE_MACHINE_CONF
).state_machine_builders
return builders[state_machine_name](state_name)
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..flash.flash import Flash
from ..core.coresight_target import (SVDFile, CoreSightTarget)
from ..core.memory_map import (FlashRegion, RamRegion, MemoryMap)
flash_algo = {
'load_address' : 0x10000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x47700a80, 0x21004842, 0x22016301, 0x63416342, 0x6b416342, 0xd0fc07c9, 0x493e6382, 0x70082002,
0x47702000, 0x47702000, 0x4c3bb5f8, 0x25002032, 0x261f444c, 0x493960a6, 0x60206065, 0x4f384449,
0x91004620, 0x696047b8, 0xd10b2800, 0x203460a6, 0x60206065, 0x60e04833, 0x99004620, 0x696047b8,
0xd0002800, 0xbdf82001, 0x4d2bb5f8, 0x444d0a84, 0x492a606c, 0x60ac2032, 0x60284449, 0x460f4e28,
0x47b04628, 0x28006968, 0x606cd10b, 0x60ac2034, 0x48246028, 0x463960e8, 0x47b04628, 0x28006968,
0x2001d000, 0xb5f8bdf8, 0x00054614, 0x6861d10e, 0x68e26820, 0x68a11840, 0x18401889, 0x18406921,
0x18406961, 0x184069a1, 0x61e04240, 0x0aa84e12, 0x2132444e, 0x60316070, 0x60b04910, 0x4f104449,
0x91004630, 0x697047b8, 0xd10e2800, 0x20336075, 0x603060b4, 0x02402001, 0x480a60f0, 0x46306130,
0x47b89900, 0x28006970, 0x2001d000, 0x0000bdf8, 0x40048040, 0x40048000, 0x00000004, 0x00000018,
0x1fff1ff1, 0x00002ee0, 0x00000000,
],
'pc_init' : 0x10000025,
'pc_erase_sector' : 0x10000089,
'pc_program_page' : 0x100000C7,
'pc_eraseAll' : 0x10000049,
# Double buffering is not supported since sector size differs from page size
'static_base' : 0x10000000 + 0x00000020 + 0x00000128,
'begin_data' : 0x10000000 + 0x00000800, # Analyzer uses a max of 128 B data (32 pages * 4 bytes / page)
'begin_stack' : 0x10000800,
'min_program_length' : 1024,
'analyzer_supported' : True,
'analyzer_address' : 0x10001000 # Analyzer 0x10001000..0x10000600
}
class Flash_lpc824(Flash):
def __init__(self, target):
super(Flash_lpc824, self).__init__(target, flash_algo)
# TODO - temporary until flash algo is rebuilt with 1K page program size
def programPage(self, flashPtr, bytes):
write_size = 512
for i in range(0, 2):
data = bytes[i * write_size : (i + 1) * write_size]
Flash.programPage(self, flashPtr + i * write_size, data)
class LPC824(CoreSightTarget):
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x8000, blocksize=0x400, isBootMemory=True),
RamRegion( start=0x10000000, length=0x2000)
)
def __init__(self, link):
super(LPC824, self).__init__(link, self.memoryMap)
def resetStopOnReset(self, software_reset=None, map_to_user=True):
super(LPC824, self).resetStopOnReset(software_reset)
# Remap to use flash and set SP and SP accordingly
if map_to_user:
self.writeMemory(0x40048000, 0x2, 32)
sp = self.readMemory(0x0)
pc = self.readMemory(0x4)
self.writeCoreRegisterRaw('sp', sp)
self.writeCoreRegisterRaw('pc', pc)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
import os
from maskrcnn_benchmark.config import cfg
from my_predictor import COCODemo
import time
vis = True
def main():
config_file = '/media/kevin/办公/xizang/1119_outputs/1119_infer_configs.yaml'
opts = ["MODEL.DEVICE", "cuda"]
confidence_threshold = 0.7
min_image_size = 720
# load config from file and command-line arguments
cfg.merge_from_file(config_file)
cfg.merge_from_list(opts)
cfg.freeze()
print(cfg)
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=confidence_threshold,
min_image_size=min_image_size,
)
img_dir = '/media/kevin/娱乐/xizang_database/testdata/1118/JPEGImages'
img_out_dir = '/media/kevin/办公/xizang/主观结果/1119'
img_list = os.listdir(img_dir) # 获取类别文件夹下所有图片的路径
for i in range(0, 1500, 50):
if not img_list[i].endswith('jpg'): # 若不是jpg文件,跳过
continue
# print(i)
start_time = time.time()
img_path = os.path.join(img_dir, img_list[i])
print(img_path)
img = cv2.imread(img_path)
img = cv2.resize(img, (1280, 720))
composite = coco_demo.run_on_opencv_image(img)
print("Time: {:.2f} s / img".format(time.time() - start_time))
img_out_path = os.path.join(img_out_dir, 'det_'+img_list[i])
cv2.imwrite(img_out_path, composite)
print('image write to %s'%img_out_path)
if vis:
cv2.namedWindow("COCO detections",0);
cv2.resizeWindow("COCO detections", 1800, 1100);
cv2.imshow("COCO detections", composite)
if cv2.waitKey(5000) == 27:
exit()
if vis:
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
import io
import logging
import os
import sqlite3
from datetime import datetime
from shutil import copyfile
from django.conf import settings
from django.core.management import call_command
from django.db.utils import DatabaseError
logger = logging.getLogger(__name__)
def common_clean(db_name, db_file):
# let's remove the damaged db files
if settings.DATABASES["default"]["ENGINE"] != "django.db.backends.sqlite3":
return
os.remove(db_file)
logger.error("{} is corrupted".format(db_name))
def regenerate_database(connection):
# procedure to create from scratch a sqlite database when using Django ORM
from django.db.migrations.recorder import MigrationRecorder
connection.close()
common_clean(connection.alias, connection.get_connection_params()["database"])
if connection.alias == "notifications_db":
logger.error("Regenerating {}".format(connection.alias))
# delete the db migrations and run them again
connection_migrations = MigrationRecorder(connection).Migration
connection_migrations.objects.filter(app="notifications").delete()
call_command(
"migrate",
interactive=False,
verbosity=False,
app_label="notifications",
database="notifications_db",
)
call_command("migrate", interactive=False, verbosity=False)
def repair_sqlite_db(connection):
from kolibri.core.deviceadmin.utils import KWARGS_IO_WRITE
from kolibri.core.deviceadmin.utils import default_backup_folder
if settings.DATABASES["default"]["ENGINE"] != "django.db.backends.sqlite3":
return
# First let's do a file_backup
dest_folder = default_backup_folder()
if hasattr(connection, "name"):
orm = "sqlalchemy"
conn_name = connection.name
original_path = connection.url.database
else:
orm = "django"
conn_name = connection.alias
original_path = connection.get_connection_params()["database"]
fname = "{con}_{dtm}.dump".format(
con=conn_name, dtm=datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
backup_path = os.path.join(dest_folder, fname)
copyfile(original_path, backup_path)
if orm == "sqlalchemy":
# Remove current file, it will be automatically regenerated
common_clean(conn_name, original_path)
logger.error("Regenerating {}".format(connection.name))
return
# now, let's try to repair it, if possible:
# os.remove(original_path)
fixed_db_path = "{}.2".format(original_path)
with io.open(fixed_db_path, **KWARGS_IO_WRITE) as f:
# If the connection hasn't been opened yet, then open it
try:
for line in connection.connection.iterdump():
f.write(line)
connection.close()
copyfile(fixed_db_path, original_path)
# let's check if the tables are there:
cursor = connection.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
if len(cursor.fetchall()) == 0: # no way, the db has no tables
regenerate_database(connection)
except (DatabaseError, sqlite3.DatabaseError):
# no way, the db is totally broken
regenerate_database(connection)
finally:
os.remove(fixed_db_path)
|
from pygame import mixer
mixer.init()
mixer.music.load('ex021.mp3')
mixer.music.play()
input()
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
__doc__ = """pytest-based ptvsd tests."""
import colorama
import pytest
# This is only imported to ensure that the module is actually installed and the
# timeout setting in pytest.ini is active, since otherwise most timeline-based
# tests will hang indefinitely.
import pytest_timeout # noqa
colorama.init()
pytest.register_assert_rewrite('tests.helpers')
|
def mergesort(arr):
if len(arr)>1:
mid=len(arr)//2
l=arr[:mid]
r=arr[mid:]
mergesort(l)
mergesort(r)
i=j=k=0
while i<len(l) and j <len(r):
if l[i]<r[j]:
arr[k]=l[i]
i+=1
else:
arr[k]=r[j]
j+=1
k+=1
while i<len(l):
arr[k]=l[i]
i+=1
k+=1
while j<len(r):
arr[k]=r[j]
j+=1
k+=1
return arr
data = [4, 2, 2, 8, 3, 3, 1,45,65,67,87,98,34,56,7788]
mergesort(data)
|
import dataclasses
@dataclasses.dataclass
class NonPublicMetrics():
impression_count: int
url_link_clicks: int
user_profile_clicks: int
|
def API_error(description):
"""Create an API error message."""
return {"status": "error", "error": description}
def API_fatal(description):
"""Create an API fatal error message."""
return {"status": "fatal", "error": description}
def API_response(*args, **kwargs):
"""Create an API response using provided arguments.
Positional arguments: any number of dicts that will be merged into the response.
Keyword arguments: will be merged into the response."""
r = {"status": "ok"}
for a in args:
if type(a) is dict:
r.update(a)
else:
raise Exception("Unsupported arguments")
if kwargs:
r.update(kwargs)
return r
|
'''Test script for Homework 3, Computational Photonics, SS 2020: FDTD method.
'''
import numpy as np
from finite_difference_time_domain import fdtd_3d, Fdtd3DAnimation
from matplotlib import pyplot as plt
import time
# dark bluered colormap, registers automatically with matplotlib on import
import bluered_dark
plt.rcParams.update({
'figure.figsize': (12/2.54, 9/2.54),
'figure.subplot.bottom': 0.15,
'figure.subplot.left': 0.165,
'figure.subplot.right': 0.90,
'figure.subplot.top': 0.9,
'axes.grid': False,
'image.cmap': 'bluered_dark',
})
plt.close('all')
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# constants
c = 2.99792458e8 # speed of light [m/s]
mu0 = 4*np.pi*1e-7 # vacuum permeability [Vs/(Am)]
eps0 = 1/(mu0*c**2) # vacuum permittivity [As/(Vm)]
Z0 = np.sqrt(mu0/eps0) # vacuum impedance [Ohm]
# simulation parameters
Nx = 199 # number of grid points in x-direction
Ny = 201 # number of grid points in y-direction
Nz = 5 # number of grid points in z-direction
dr = 30e-9 # grid spacing in [m]
time_span = 10e-15 # duration of simulation [s]
# x coordinates
x = np.arange(-int(np.ceil((Nx-1)/2)), int(np.floor((Nx-1)/2)) + 1)*dr
# y coordinates
y = np.arange(-int(np.ceil((Ny-1)/2)), int(np.floor((Ny-1)/2)) + 1)*dr
# source parameters
freq = 500e12 # pulse [Hz]
tau = 1e-15 # pulse width [s]
t0 = 3 * tau
source_width = 2 * dr # width of Gaussian current dist. [grid points]
# grid midpoints
midx = int(np.ceil((Nx-1)/2))
midy = int(np.ceil((Ny-1)/2))
midz = int(np.ceil((Nz-1)/2))
# choose dt small enough
dt = dr / 2 / c
# time array
t = np.arange(0, time_span, dt)
# %% create relative permittivity distribution %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
eps_rel = np.ones((Nx, Ny, Nz))
# %% current distributions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# jx = jy = np.zeros(...)
# jz : Gaussion distribution in the xy-plane with a width of 2 grid points,
# constant along z
jx = np.zeros((Nx, Ny, Nz), dtype=np.complex128)
jy = np.copy(jx)
jz = np.ones((1, 1, Nz)) * np.exp(- (np.reshape(y, (1, len(y), 1)) ** 2 +
np.reshape(x, (len(x), 1, 1)) ** 2) / source_width ** 2)
# jz = np.exp(- (np.reshape(x, (1, len(x))) ** 2 +
# np.reshape(y, (len(y), 1)) ** 2) / source_width ** 2)
# output parameters
z_ind = midz # z-index of field output
output_step = 4 # time steps between field output
#%% run simulations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
times = []
for _ in range(1):
a = time.time()
Ex, Ey, Ez, Hx, Hy, Hz, t =\
fdtd_3d(eps_rel, dr, time_span, freq, tau, jx, jy, jz, "ex",
z_ind, output_step)
b = time.time() - a
times.append(b)
print("Time elapsed {:.4f} in seconds".format(np.mean(times)))
print("Time elapsed stdev {:.4f} in seconds".format(np.std(times) / np.sqrt(len(times))))
x = x[1:]
y = y[1:]
#%% movie of Hx %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
F = Hx*Z0*1e6
titlestr = 'x-Component of Magnetic Field'
cb_label = '$\\Re\\{Z_0H_x\\}$ [µV/m]'
rel_color_range = 1/3
fps = 10
ani = Fdtd3DAnimation(x, y, t, F, titlestr, cb_label, rel_color_range, fps)
plt.show()
##%% movie of Ez %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
F = Ez*1e6
titlestr = 'z-Component of Electric Field'
cb_label = '$\\Re\\{E_z\\}$ [µV/m]'
rel_color_range = 1/3
fps = 10
ani = Fdtd3DAnimation(x, y, t, F, titlestr, cb_label, rel_color_range, fps)
plt.show()
|
#!/usr/bin/python
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Created by: Anderson Brito
# Email: andersonfbrito@gmail.com
# Python version: Python 3
#
# domAnnot.py -> Given a set of peptides annotated with CDS positions,
# this code creates GFF annotations for Pfam domains
# detected in HMMER searchers. As input it requires a
# fasta file with the annotated peptides, and the output
# from hmmscan (domtblout.txt)
#
#
# Usage: domAnnot.py fastaFile domtblout.txt
#
# Release date: 16/11/2019
# Last update: 16/11/2019
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from Bio import SeqIO
import sys
peptides = sys.argv[1]
domains = open(sys.argv[2]).readlines()
dThreshold = 1e-03
pThreshold = 1e-03
dPos = {}
fasta_sequences = SeqIO.parse(open(peptides),'fasta')
for fasta in fasta_sequences: # iterate over all fasta sequences
id, seq = fasta.description, fasta.seq
orf = id.split(".")[0]
startOrf = id.strip().split(".")[-1].split("-")[0]
endOrf = id.strip().split(".")[-1].split("-")[1]
sense = id.strip().split(".")[1]
dPos[orf] = startOrf + "_" + endOrf + "_" + sense
outFile = open("-".join(peptides.split("-")[:-1]) + '-domAnnot.gff', 'w')
header = "##gff-version 3\n# seqid source type start end score strand frame attributes\n"
print(header)
outFile.write(header)
genome = ''
for line in domains:
if not line.startswith('#'):
cevalue = line.split()[11]
pevalue = line.split()[6]
orf = line.split()[3].split(".")[0]
if genome == '':
genome = orf.split("-")[0]
if orf in dPos.keys():
if float(cevalue) <= dThreshold and float(pevalue) <= pThreshold:
dcode = line.split()[1].split(".")[0] # domain code
dname = line.split()[0] # domain name
sDom = int(line.split()[17]) # domain start position
eDom = int(line.split()[18]) # domain end position
sense = dPos[orf].split("_")[2]
sorf = int(dPos[orf].split("_")[0]) # orf start position
eorf = int(dPos[orf].split("_")[1]) # orf end position
if sense == "forward":
sense = "+"
sanot = sorf + ((sDom * 3) - 2) - 1
eanot = sorf + (eDom * 3) - 1
else:
sense = "-"
sanot = eorf - (eDom * 3) + 1
eanot = eorf - (sDom * 3) + 3
entry = "\t".join([genome, "-", "misc_feature", str(sanot), str(eanot), ".", sense, ".", "Name="+dname+" ["+cevalue+"]; Note="+dcode])
outFile.write(entry + "\n")
print(entry)
|
import discord
from discord.ext.commands.converter import EmojiConverter as ec
from discord.ext.commands.converter import PartialEmojiConverter
from emoji import UNICODE_EMOJI_ENGLISH
from redbot.core import commands
from redbot.core.bot import Red
old_tick = commands.context.TICK
old_get_context = Red.get_context
class FakeContext(commands.Context):
tick_emoji = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
async def tick(self, *, message: str = None) -> bool:
"""Add a tick reaction to the command message.
Keyword Arguments
-----------------
message : str, optional
The message to send if adding the reaction doesn't succeed.
Returns
-------
bool
:code:`True` if adding the reaction succeeded.
"""
emoji = (
self.tick_emoji if self.channel.permissions_for(self.me).external_emojis else old_tick
)
return await self.react_quietly(emoji, message=message)
class EmojiConverter(ec):
async def convert(self, ctx, argument):
argument = argument.strip()
if not argument in UNICODE_EMOJI_ENGLISH.keys():
return await super().convert(ctx, argument)
return argument
|
"""Deep Pictorial Gaze architecture."""
from typing import Dict
import numpy as np
import scipy
import tensorflow as tf
from core import BaseDataSource, BaseModel
from datasources import UnityEyes
import util.gaze
class DPG(BaseModel):
"""Deep Pictorial Gaze architecture as introduced in [Park et al. ECCV'18]."""
def __init__(self, tensorflow_session=None, first_layer_stride=2, num_modules=3,
num_feature_maps=32, growth_rate=8, extra_tags=[], **kwargs):
"""Specify DPG-specific parameters."""
self._hg_first_layer_stride = first_layer_stride
self._hg_num_modules = num_modules
self._hg_num_feature_maps= num_feature_maps
self._dn_growth_rate = growth_rate
self._extra_tags = extra_tags
# Call parent class constructor
super().__init__(tensorflow_session, **kwargs)
_hg_first_layer_stride = 2
_hg_num_modules = 3
_hg_num_feature_maps = 32
_hg_num_residual_blocks = 1
_hg_num_gazemaps = 2
_dn_growth_rate = 8
_dn_compression_factor = 0.5
_dn_num_layers_per_block = (4, 4, 4, 4)
_dn_num_dense_blocks = len(_dn_num_layers_per_block)
@property
def identifier(self):
"""Identifier for model based on data sources and parameters."""
first_data_source = next(iter(self._train_data.values()))
input_tensors = first_data_source.output_tensors
if self._data_format == 'NHWC':
_, eh, ew, _ = input_tensors['eye'].shape.as_list()
else:
_, _, eh, ew = input_tensors['eye'].shape.as_list()
return 'DPG_i%dx%d_f%dx%d_n%d_m%d_k%d_%s' % (
ew, eh,
int(ew / self._hg_first_layer_stride),
int(eh / self._hg_first_layer_stride),
self._hg_num_feature_maps, self._hg_num_modules,
self._dn_growth_rate,
'-'.join(self._extra_tags) if len(self._extra_tags) > 0 else '',
)
def train_loop_pre(self, current_step):
"""Run this at beginning of training loop."""
# Step learning rate decay
multiplier = np.power(0.1, int(current_step / 10000))
self._tensorflow_session.run(self.assign_learning_rate_multiplier, feed_dict={
self.learning_rate_multiplier_placeholder: multiplier,
})
_column_of_ones = None
_column_of_zeros = None
def _augment_training_images(self, images, mode):
if mode == 'test':
return images
with tf.variable_scope('augment'):
if self._data_format == 'NCHW':
images = tf.transpose(images, perm=[0, 2, 3, 1])
n, h, w, _ = images.shape.as_list()
if self._column_of_ones is None:
self._column_of_ones = tf.ones((n, 1))
self._column_of_zeros = tf.zeros((n, 1))
transforms = tf.concat([
self._column_of_ones,
self._column_of_zeros,
tf.truncated_normal((n, 1), mean=0, stddev=.05*w),
self._column_of_zeros,
self._column_of_ones,
tf.truncated_normal((n, 1), mean=0, stddev=.05*h),
self._column_of_zeros,
self._column_of_zeros,
], axis=1)
images = tf.contrib.image.transform(images, transforms, interpolation='BILINEAR')
if self._data_format == 'NCHW':
images = tf.transpose(images, perm=[0, 3, 1, 2])
return images
def build_model(self, data_sources: Dict[str, BaseDataSource], mode: str):
"""Build model."""
data_source = next(iter(data_sources.values()))
input_tensors = data_source.output_tensors
x = input_tensors['eye']
y1 = input_tensors['gazemaps'] if 'gazemaps' in input_tensors else None
y2 = input_tensors['gaze'] if 'gaze' in input_tensors else None
with tf.variable_scope('input_data'):
# self.summary.feature_maps('eyes', x, data_format=self._data_format_longer)
if y1 is not None:
self.summary.feature_maps('gazemaps', y1, data_format=self._data_format_longer)
outputs = {}
loss_terms = {}
metrics = {}
# Lightly augment training data
x = self._augment_training_images(x, mode)
with tf.variable_scope('hourglass'):
# Prepare for Hourglass by downscaling via conv
with tf.variable_scope('pre'):
n = self._hg_num_feature_maps
x = self._apply_conv(x, num_features=n, kernel_size=7,
stride=self._hg_first_layer_stride)
x = tf.nn.relu(self._apply_bn(x))
x = self._build_residual_block(x, n, 2*n, name='res1')
x = self._build_residual_block(x, 2*n, n, name='res2')
# Hourglass blocks
x_prev = x
gmap = None
for i in range(self._hg_num_modules):
with tf.variable_scope('hg_%d' % (i + 1)):
x = self._build_hourglass(x, steps_to_go=4, num_features=self._hg_num_feature_maps)
x, gmap = self._build_hourglass_after(
x_prev, x, do_merge=(i < (self._hg_num_modules - 1)),
)
x_prev = x
if y1 is not None:
# Cross-entropy loss
metrics['gazemaps_ce'] = -tf.reduce_mean(tf.reduce_sum(
y1 * tf.log(tf.clip_by_value(gmap, 1e-10, 1.0)), # avoid NaN
axis=[1, 2, 3]))
# metrics['gazemaps_ce'] = tf.losses.softmax_cross_entropy(
# tf.reshape(y1, (self._batch_size, -1)),
# tf.reshape(gmap, (self._batch_size, -1)),
# loss_collection=None,
# )
x = gmap
outputs['gazemaps'] = gmap
self.summary.feature_maps('bottleneck', gmap, data_format=self._data_format_longer)
with tf.variable_scope('densenet'):
# DenseNet blocks to regress to gaze
for i in range(self._dn_num_dense_blocks):
with tf.variable_scope('block%d' % (i + 1)):
x = self._apply_dense_block(x,
num_layers=self._dn_num_layers_per_block[i])
if i == self._dn_num_dense_blocks - 1:
break
with tf.variable_scope('trans%d' % (i + 1)):
x = self._apply_transition_layer(x)
# Global average pooling
with tf.variable_scope('post'):
x = self._apply_bn(x)
x = tf.nn.relu(x)
if self._data_format == 'NCHW':
x = tf.reduce_mean(x, axis=[2, 3])
else:
x = tf.reduce_mean(x, axis=[1, 2])
x = tf.contrib.layers.flatten(x)
# Output layer
with tf.variable_scope('output'):
x = self._apply_fc(x, 2)
outputs['gaze'] = x
if y2 is not None:
metrics['gaze_mse'] = tf.reduce_mean(tf.squared_difference(x, y2))
metrics['gaze_ang'] = util.gaze.tensorflow_angular_error_from_pitchyaw(y2, x)
# Combine two loss terms
if y1 is not None and y2 is not None:
loss_terms['combined_loss'] = 1e-5*metrics['gazemaps_ce'] + metrics['gaze_mse']
# Define outputs
return outputs, loss_terms, metrics
def _apply_conv(self, tensor, num_features, kernel_size=3, stride=1):
return tf.layers.conv2d(
tensor,
num_features,
kernel_size=kernel_size,
strides=stride,
padding='SAME',
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4),
bias_initializer=tf.zeros_initializer(),
data_format=self._data_format_longer,
name='conv',
)
def _apply_fc(self, tensor, num_outputs):
return tf.layers.dense(
tensor,
num_outputs,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4),
bias_initializer=tf.zeros_initializer(),
name='fc',
)
def _apply_pool(self, tensor, kernel_size=3, stride=2):
tensor = tf.layers.max_pooling2d(
tensor,
pool_size=kernel_size,
strides=stride,
padding='SAME',
data_format=self._data_format_longer,
name='pool',
)
return tensor
def _apply_bn(self, tensor):
return tf.contrib.layers.batch_norm(
tensor,
scale=True,
center=True,
is_training=self.use_batch_statistics,
trainable=True,
data_format=self._data_format,
updates_collections=None,
)
def _build_residual_block(self, x, num_in, num_out, name='res_block'):
with tf.variable_scope(name):
half_num_out = max(int(num_out/2), 1)
c = x
with tf.variable_scope('conv1'):
c = tf.nn.relu(self._apply_bn(c))
c = self._apply_conv(c, num_features=half_num_out, kernel_size=1, stride=1)
with tf.variable_scope('conv2'):
c = tf.nn.relu(self._apply_bn(c))
c = self._apply_conv(c, num_features=half_num_out, kernel_size=3, stride=1)
with tf.variable_scope('conv3'):
c = tf.nn.relu(self._apply_bn(c))
c = self._apply_conv(c, num_features=num_out, kernel_size=1, stride=1)
with tf.variable_scope('skip'):
if num_in == num_out:
s = tf.identity(x)
else:
s = self._apply_conv(x, num_features=num_out, kernel_size=1, stride=1)
x = c + s
return x
def _build_hourglass(self, x, steps_to_go, num_features, depth=1):
with tf.variable_scope('depth%d' % depth):
# Upper branch
up1 = x
for i in range(self._hg_num_residual_blocks):
up1 = self._build_residual_block(up1, num_features, num_features,
name='up1_%d' % (i + 1))
# Lower branch
low1 = self._apply_pool(x, kernel_size=2, stride=2)
for i in range(self._hg_num_residual_blocks):
low1 = self._build_residual_block(low1, num_features, num_features,
name='low1_%d' % (i + 1))
# Recursive
low2 = None
if steps_to_go > 1:
low2 = self._build_hourglass(low1, steps_to_go - 1, num_features, depth=depth+1)
else:
low2 = low1
for i in range(self._hg_num_residual_blocks):
low2 = self._build_residual_block(low2, num_features, num_features,
name='low2_%d' % (i + 1))
# Additional residual blocks
low3 = low2
for i in range(self._hg_num_residual_blocks):
low3 = self._build_residual_block(low3, num_features, num_features,
name='low3_%d' % (i + 1))
# Upsample
if self._data_format == 'NCHW': # convert to NHWC
low3 = tf.transpose(low3, (0, 2, 3, 1))
up2 = tf.image.resize_bilinear(
low3,
up1.shape[1:3] if self._data_format == 'NHWC' else up1.shape[2:4],
align_corners=True,
)
if self._data_format == 'NCHW': # convert back from NHWC
up2 = tf.transpose(up2, (0, 3, 1, 2))
return up1 + up2
def _build_hourglass_after(self, x_prev, x_now, do_merge=True):
with tf.variable_scope('after'):
for j in range(self._hg_num_residual_blocks):
x_now = self._build_residual_block(x_now, self._hg_num_feature_maps,
self._hg_num_feature_maps,
name='after_hg_%d' % (j + 1))
x_now = self._apply_conv(x_now, self._hg_num_feature_maps, kernel_size=1, stride=1)
x_now = self._apply_bn(x_now)
x_now = tf.nn.relu(x_now)
with tf.variable_scope('gmap'):
gmap = self._apply_conv(x_now, self._hg_num_gazemaps, kernel_size=1, stride=1)
x_next = x_now
if do_merge:
with tf.variable_scope('merge'):
with tf.variable_scope('gmap'):
x_gmaps = self._apply_conv(gmap, self._hg_num_feature_maps, kernel_size=1, stride=1)
with tf.variable_scope('x'):
x_now = self._apply_conv(x_now, self._hg_num_feature_maps, kernel_size=1, stride=1)
x_next += x_prev + x_gmaps
# Perform softmax on gazemaps
if self._data_format == 'NCHW':
n, c, h, w = gmap.shape.as_list()
gmap = tf.reshape(gmap, (n, -1))
gmap = tf.nn.softmax(gmap)
gmap = tf.reshape(gmap, (n, c, h, w))
else:
n, h, w, c = gmap.shape.as_list()
gmap = tf.transpose(gmap, perm=[0, 3, 1, 2])
gmap = tf.reshape(gmap, (n, -1))
gmap = tf.nn.softmax(gmap)
gmap = tf.reshape(gmap, (n, c, h, w))
gmap = tf.transpose(gmap, perm=[0, 2, 3, 1])
return x_next, gmap
def _apply_dense_block(self, x, num_layers):
assert isinstance(num_layers, int) and num_layers > 0
c_index = 1 if self._data_format == 'NCHW' else 3
x_prev = x
for i in range(num_layers):
with tf.variable_scope('layer%d' % (i + 1)):
n = x.shape.as_list()[c_index]
with tf.variable_scope('bottleneck'):
x = self._apply_composite_function(x,
num_features=min(n, 4*self._dn_growth_rate),
kernel_size=1)
with tf.variable_scope('composite'):
x = self._apply_composite_function(x, num_features=self._dn_growth_rate,
kernel_size=3)
if self._data_format == 'NCHW':
x = tf.concat([x, x_prev], axis=1)
else:
x = tf.concat([x, x_prev], axis=-1)
x_prev = x
return x
def _apply_transition_layer(self, x):
c_index = 1 if self._data_format == 'NCHW' else 3
x = self._apply_composite_function(
x, num_features=int(self._dn_compression_factor * x.shape.as_list()[c_index]),
kernel_size=1)
x = tf.layers.average_pooling2d(x, pool_size=2, strides=2, padding='valid',
data_format=self._data_format_longer)
return x
def _apply_composite_function(self, x, num_features=_dn_growth_rate, kernel_size=3):
x = self._apply_bn(x)
x = tf.nn.relu(x)
x = self._apply_conv(x, num_features=num_features, kernel_size=kernel_size, stride=1)
return x
|
# Copyright 2018, Michael DeHaan LLC
# License: Apache License Version 2.0
# -------------------------------------------------------------------------
# urls.py - the standard web routes configuration file for Django
# --------------------------------------------------------------------------
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import LoginView, LogoutView
from django.urls import path
from vespene.views import index, webhook_post
from vespene.views.fileserving import serve_file, serve_dir
from vespene.views.build import BuildView
from vespene.views.pipeline import PipelineView
from vespene.views.project import ProjectView
from vespene.views.service_login import ServiceLoginView
from vespene.views.snippet import SnippetView
from vespene.views.ssh_key import SshKeyView
from vespene.views.variable_set import VariableSetView
from vespene.views.worker_pool import WorkerPoolView
from vespene.views.organization import OrganizationView
from vespene.views.stage import StageView
from vespene.views.user import UserView
from vespene.views.group import GroupView
urlpatterns = [
# Home page
path('', index, name='index'),
path('ui', index, name='index'),
path('ui/', index, name='index'),
# Login/Logout
path('login/', LoginView.as_view(template_name='registration/login.html'), name="login"),
path('logout/', LogoutView.as_view(), name="logout"),
# Admin
path('admin/', admin.site.urls),
# Projects
path('ui/projects', ProjectView.list, name='project_list'),
path('ui/projects/new', ProjectView.new, name='project_new'),
path('ui/projects/<pk>/edit', ProjectView.edit, name='project_edit'),
path('ui/projects/<pk>/delete', ProjectView.delete, name='project_delete'),
path('ui/projects/<pk>/detail', ProjectView.detail, name='project_detail'),
path('ui/projects/<pk>/start', ProjectView.project_start, name='project_start'),
path('ui/projects/<pk>/start_prompt', ProjectView.project_start_prompt, name='project_start_prompt'),
# Builds
path('ui/builds', BuildView.list, name='build_list'),
path('ui/builds/<pk>/detail', BuildView.detail, name='build_detail'),
path('ui/builds/<pk>/stop', BuildView.build_stop, name='build_stop'),
# Snippets
path('ui/snippets', SnippetView.list, name='snippet_list'),
path('ui/snippets/new', SnippetView.new, name='snippet_new'),
path('ui/snippets/<pk>/edit', SnippetView.edit, name='snippet_edit'),
path('ui/snippets/<pk>/delete', SnippetView.delete, name='snippet_delete'),
path('ui/snippets/<pk>/detail', SnippetView.detail, name='snippet_detail'),
# Variable Sets
path('ui/variable_sets', VariableSetView.list, name='variable_set_list'),
path('ui/variable_sets/new', VariableSetView.new, name='variable_set_new'),
path('ui/variable_sets/<pk>/edit', VariableSetView.edit, name='variable_set_edit'),
path('ui/variable_sets/<pk>/delete', VariableSetView.delete, name='variable_set_delete'),
path('ui/variable_sets/<pk>/detail', VariableSetView.detail, name='variable_set_detail'),
# SSH Access
path('ui/ssh_keys', SshKeyView.list, name='ssh_key_list'),
path('ui/ssh_keys/new', SshKeyView.new, name='ssh_key_new'),
path('ui/ssh_keys/<pk>/edit', SshKeyView.edit, name='ssh_key_edit'),
path('ui/ssh_keys/<pk>/delete', SshKeyView.delete, name='ssh_key_delete'),
path('ui/ssh_keys/<pk>/detail', SshKeyView.detail, name='ssh_key_detail'),
# Service Logins
path('ui/service_logins', ServiceLoginView.list, name='service_login_list'),
path('ui/service_logins/new', ServiceLoginView.new, name='service_login_new'),
path('ui/service_logins/<pk>/edit', ServiceLoginView.edit, name='service_login_edit'),
path('ui/service_logins/<pk>/delete', ServiceLoginView.delete, name='service_login_delete'),
path('ui/service_logins/<pk>/detail', ServiceLoginView.detail, name='service_login_detail'),
# Worker Pools
path('ui/worker_pools', WorkerPoolView.list, name='worker_pool_list'),
path('ui/worker_pools/new', WorkerPoolView.new, name='worker_pool_new'),
path('ui/worker_pools/<pk>/edit', WorkerPoolView.edit, name='worker_pool_edit'),
path('ui/worker_pools/<pk>/delete', WorkerPoolView.delete, name='worker_pool_delete'),
path('ui/worker_pools/<pk>/detail', WorkerPoolView.detail, name='worker_pool_detail'),
# Stages
path('ui/stages', StageView.list, name='stage_list'),
path('ui/stages/new', StageView.new, name='stage_new'),
path('ui/stages/<pk>/edit', StageView.edit, name='stage_edit'),
path('ui/stages/<pk>/delete', StageView.delete, name='stage_delete'),
path('ui/stages/<pk>/detail', StageView.detail, name='stage_detail'),
# Users
path('ui/users', UserView.list, name='user_list'),
path('ui/users/new', UserView.new, name='user_new'),
path('ui/users/<pk>/edit', UserView.edit, name='user_edit'),
path('ui/users/<pk>/delete', UserView.delete, name='user_delete'),
path('ui/users/<pk>/detail', UserView.detail, name='user_detail'),
# Groups
path('ui/groups', GroupView.list, name='group_list'),
path('ui/groups/new', GroupView.new, name='group_new'),
path('ui/groups/<pk>/edit', GroupView.edit, name='group_edit'),
path('ui/groups/<pk>/delete', GroupView.delete, name='group_delete'),
path('ui/groups/<pk>/detail', GroupView.detail, name='group_detail'),
# Pipelines
path('ui/pipelines', PipelineView.list, name='pipeline_list'),
path('ui/pipelines/new', PipelineView.new, name='pipeline_new'),
path('ui/pipelines/<pk>/edit', PipelineView.edit, name='pipeline_edit'),
path('ui/pipelines/<pk>/delete', PipelineView.delete, name='pipeline_delete'),
path('ui/pipelines/<pk>/detail', PipelineView.detail, name='pipeline_detail'),
path('ui/pipelines/<pk>/map', PipelineView.map, name='pipeline_map'),
# Organizations
path('ui/organizations', OrganizationView.list, name='organization_list'),
path('ui/organizations/new', OrganizationView.new, name='organization_new'),
path('ui/organizations/<pk>/edit', OrganizationView.edit, name='organization_edit'),
path('ui/organizations/<pk>/delete', OrganizationView.delete, name='organization_delete'),
path('ui/organizations/<pk>/detail', OrganizationView.detail, name='organization_detail'),
# Webhooks
path('webhooks', webhook_post, name='webhook_post'),
path('webhooks/', webhook_post, name='webhook_post'),
# Fileserving
path('srv/<pk>/f/<location>', serve_file, name="serve_file"),
path('srv/<pk>/d', serve_dir, name="serve_dir")
]
|
import unittest
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
import onnx
import onnx_chainer
from chainer import testing
from onnx_chainer.testing import test_mxnet
MXNET_OPSET_VERSION = {
'elu': (1, 6),
'hard_sigmoid': (6,),
'leaky_relu': (6,),
'log_softmax': (1,),
'relu': (1, 6),
'sigmoid': (1, 6),
'softmax': (1,),
'softplus': (1,),
'tanh': (1, 6),
}
@testing.parameterize(
{'name': 'elu'},
{'name': 'hard_sigmoid'},
{'name': 'leaky_relu'},
{'name': 'log_softmax'},
{'name': 'relu'},
{'name': 'sigmoid'},
{'name': 'softmax'},
{'name': 'softplus'},
{'name': 'tanh'},
)
class TestActivations(unittest.TestCase):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops):
super(Model, self).__init__()
self.ops = ops
def __call__(self, x):
return self.ops(x)
ops = getattr(F, self.name)
self.model = Model(ops)
self.x = np.random.randn(1, 5).astype(np.float32)
self.fn = self.name + '.onnx'
def test_compatibility(self):
if MXNET_OPSET_VERSION[self.name] is not None:
for mxnet_opset_version in MXNET_OPSET_VERSION[self.name]:
test_mxnet.check_compatibility(
self.model, self.x, self.fn, opset_version=mxnet_opset_version)
for opset_version in range(1, onnx.defs.onnx_opset_version() + 1):
onnx_chainer.export(
self.model, self.x, opset_version=opset_version)
@testing.parameterize(
{'opset_version': 6},
{'opset_version': 7},
)
class TestPReLU(unittest.TestCase):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
with self.init_scope():
self.prelu = L.PReLU()
def __call__(self, x):
return self.prelu(x)
self.model = Model()
self.x = np.zeros((1, 5), dtype=np.float32)
self.fn = 'PReLU.onnx'
def test_compatibility(self):
test_mxnet.check_compatibility(
self.model, self.x, self.fn, opset_version=self.opset_version)
onnx_chainer.export(
self.model, self.x, opset_version=self.opset_version)
|
import json
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'metafile.schema.json')) as f:
metafile=json.load(f)
|
print('\033[1;93m-=-\033[m' * 15)
print(f'\033[1;31m{"LARGEST AND SMALLEST ON THE LIST":^45}\033[m', )
print('\033[1;93m-=-\033[m' * 15)
numbers = list()
largest = 0
big_position = list()
small_position = list()
numbers.append(int(input(f'Type a number for the position {0}: ')))
smallest = numbers[0]
for i in range(1, 5):
numbers.append(int(input(f'Type a number for the position {i}: ')))
if numbers[i] > largest:
largest = numbers[i]
if numbers[i] < smallest:
smallest = numbers[i]
print('-=-' * 15)
print('You entered the valors: ', end='')
for n in numbers:
print(n, end=' ')
for i in range(0, 5):
if numbers[i] == largest:
big_position.append(i)
if numbers[i] == smallest:
small_position.append(i)
print(f'\nThe largest number entered was: {largest} in the positions ', end='')
for n in big_position:
print(f'{n}... ', end='')
print(f'\nThe smallest number entered was: {smallest} in the positions ', end='')
for n in small_position:
print(f'{n}... ', end='')
|
d = {
'0': [" "],
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']
}
n = int(input())
for i in range(n):
t = input()
prev = ''
print("Case #{}: ".format(i + 1), end='')
for j in t:
ok = True
num = ''
x = 0
for key, val in d.items():
if j in val:
num = key
x = val.index(j) + 1
if (num == prev):
print(" ", end='')
print(num * x, end='')
else:
print(num * x, end='')
prev = num
print("")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-05-07 01:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('standard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='project',
name='paleocore_appname',
field=models.CharField(choices=[('compressor', 'compressor'), ('taggit', 'taggit'), ('modelcluster', 'modelcluster'), ('wagtail.contrib.wagtailsitemaps', 'wagtail.contrib.wagtailsitemaps'), ('wagtail.contrib.wagtailsearchpromotions', 'wagtail.contrib.wagtailsearchpromotions'), ('wagtail.contrib.settings', 'wagtail.contrib.settings'), ('wagtail.wagtailforms', 'wagtail.wagtailforms'), ('wagtail.wagtailredirects', 'wagtail.wagtailredirects'), ('wagtail.wagtailembeds', 'wagtail.wagtailembeds'), ('wagtail.wagtailsites', 'wagtail.wagtailsites'), ('wagtail.wagtailusers', 'wagtail.wagtailusers'), ('wagtail.wagtailsnippets', 'wagtail.wagtailsnippets'), ('wagtail.wagtaildocs', 'wagtail.wagtaildocs'), ('wagtail.wagtailimages', 'wagtail.wagtailimages'), ('wagtail.wagtailsearch', 'wagtail.wagtailsearch'), ('wagtail.wagtailadmin', 'wagtail.wagtailadmin'), ('wagtail.wagtailcore', 'wagtail.wagtailcore'), ('wagtailfontawesome', 'wagtailfontawesome'), ('wagalytics', 'wagalytics'), ('cachalot', 'cachalot'), ('utils', 'utils'), ('pages', 'pages'), ('blog', 'blog'), ('events', 'events'), ('contact', 'contact'), ('people', 'people'), ('photo_gallery', 'photo_gallery'), ('products', 'products'), ('documents_gallery', 'documents_gallery'), ('account', 'account'), ('foundation_formtags', 'foundation_formtags'), ('wagtail_feeds', 'wagtail_feeds'), ('leaflet', 'leaflet'), ('djgeojson', 'djgeojson'), ('wagtailgeowidget', 'wagtailgeowidget'), ('mapwidgets', 'mapwidgets'), ('projects', 'projects'), ('cc', 'cc'), ('fc', 'fc'), ('gdb', 'gdb'), ('lgrp', 'lgrp'), ('mlp', 'mlp'), ('drp', 'drp'), ('hrp', 'hrp'), ('omo_mursi', 'omo_mursi'), ('origins', 'origins'), ('standard', 'standard')], max_length=200, null=True),
),
]
|
from math import sqrt
import numpy as np
from PIL import Image
def load_image(filepath: str) -> Image:
return Image.open(filepath).convert("L")
def grad_inf(ims: Image) -> Image:
pixs = np.asarray(ims).astype("uint8")
pixd = pixs.copy()
height, width = pixs.shape
for j in range(1, height-1):
for i in range(1, width-1):
acc = max(abs(int(pixs[j-1, i]) - int(pixs[j, i])), abs(int(pixs[j+1, i]) - int(pixs[j, i])), abs(
int(pixs[j, i-1]) - int(pixs[j, i])), abs(int(pixs[j, i+1]) - int(pixs[j, i])))
pixd[j, i] = acc
return Image.fromarray(pixd)
def grad_plus_inf(ims: Image) -> Image:
pixs = np.asarray(ims).astype("uint8")
pixd = pixs.copy()
height, width = pixs.shape
for j in range(1, height-1):
for i in range(1, width-1):
pixd[j, i] = max(int(pixs[j-1, i]) - int(pixs[j, i]), int(pixs[j+1, i]) - int(pixs[j, i]), int(pixs[j, i-1]) - int(pixs[j, i]), int(pixs[j, i+1]) - int(pixs[j, i]), 0)
return Image.fromarray(pixd)
def grad_minus_inf(ims: Image) -> Image:
pixs = np.asarray(ims).astype("uint8")
pixd = pixs.copy()
height, width = pixs.shape
for j in range(1, height-1):
for i in range(1, width-1):
pixd[j, i] = max(int(pixs[j, i]) - int(pixs[j-1, i]), int(pixs[j, i]) - int(pixs[j+1, i]), int(pixs[j, i]) - int(pixs[j, i-1]), int(pixs[j, i]) - int(pixs[j, i+1]), 0)
return Image.fromarray(pixd)
def dilation(ims: Image, n: int) -> Image:
pixs = np.asarray(ims).astype("uint8")
pixd = pixs.copy()
height, width = pixs.shape
for _ in range(n):
tmp = pixd.copy()
for j in range(1, height-1):
for i in range(1, width-1):
tmp[j, i] = max(pixd[j-1, i], pixd[j+1, i],
pixd[j, i-1], pixd[j, i+1], pixd[j, i])
pixd = tmp
return Image.fromarray(pixd)
def erosion(ims: Image, n: int) -> Image:
pixs = np.asarray(ims).astype("uint8")
pixd = pixs.copy()
height, width = pixs.shape
for _ in range(n):
tmp = pixd.copy()
for j in range(1, height-1):
for i in range(1, width-1):
tmp[j, i] = min(pixd[j-1, i], pixd[j+1, i],
pixd[j, i-1], pixd[j, i+1], pixd[j, i])
pixd = tmp
return Image.fromarray(pixd)
if __name__ == "__main__":
image = load_image("image.jpg")
image_contours = grad_inf(image)
image_contours.show("Contours image test")
image_contours.save("image_grad.png")
image_contours_plus = grad_plus_inf(image)
image_contours_plus.show("Contours image test")
image_contours_plus.save("image_grad_plus.png")
image_contours_minus = grad_minus_inf(image)
image_contours_minus.show("Contours image test")
image_contours_minus.save("image_grad_minus.png")
dilated_image = dilation(image, 10)
dilated_image.show("Dilation image test")
image_contours.save("dilated_image.png")
eroded_image = erosion(image, 10)
eroded_image.show("Erosion image test")
image_contours.save("eroded_image.png")
|
# Copyright (c) 2013, Minda Sai Pvt LTd and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import math
from calendar import monthrange
from datetime import datetime,timedelta,date
from frappe.utils import add_days, add_months,get_first_day,get_last_day
from dateutil.rrule import *
import json
def execute(filters=None):
if not filters:
filters = {}
columns = get_columns()
data = []
row = []
employee = employee_details(filters)
for e in employee:
start_date = filters.get("start_date")
month_start = get_first_day(start_date)
from_date = add_months(month_start,-3)
# frappe.errprint(month_start)
# frappe.errprint(from_date)
row = [e.biometric_id,e.employee_name,e.date_of_joining,e.status]
# att = att_details(e.biometric_id)
acc = acc_details(e.biometric_id,from_date)
sac = sac_details(e.biometric_id,from_date)
ae = ae_details(e.biometric_id,from_date)
qe = qe_details(e.biometric_id,from_date)
# itm = itm_details(e.biometric_id)
# st = st_details(e.biometric_id)
# cb = cb_details(e.biometric_id)
# pk = pk_details(e.biometric_id)
#
if e.line == None:
row += ["Pending"]
else:
row += [e.line]
# if e.shift not in ["A","B","C","G"]:
# row += ["Pending"]
# else:
# row += [e.shift]
if acc:
row += ["Completed"]
else:
row += ["Pending"]
if sac == 1:
row += ["Completed"]
else:
row += ["Pending"]
if ae == 1:
row += ["Completed"]
else:
row += ["Pending"]
if qe == 1:
row += ["Completed"]
else:
row += ["Pending"]
# if e.department not in ["HR", "Accounts","Finance & Accounts","Purchase"]:
# if pk == 1:
# row += ["Completed"]
# else:
# row += ["Pending"]
# else:
# row += ["NA"]
# if e.department not in ["HR", "Accounts","Finance & Accounts","Purchase"]:
# if ita == 1:
# row += ["Completed","NA"]
# elif itm == 1:
# row += ["NA","Completed"]
# else:
# row += ["Pending","Pending"]
# else:
# row += ["NA"]
data.append(row)
return columns, data
def get_columns():
columns = [
_("Employee") + ":Link/Employee:100",
_("Employee Name") + ":Data:100",
_("Date of Joining") + ":Date:100",
_("Status") + ":Select:100",
_("Line") + ":Link/Line:100",
# _("Shift") + ":Link/Shift:100",
_("Auto Cutting and Crimping") + ":Link/Auto Cutting and Crimping:200",
_("Semi Auto Crimping") + ":Link/Semi Auto Crimping:200",
_("Assembly Evaluation") + ":Link/Assembly Evaluation:150",
_("Quality Evaluation") + ":Link/Quality Evaluation:150"
# _("Induction Test Machine") + ":Link/Induction Training Machine Area Crimping:100",
]
return columns
def get_conditions(filters):
conditions = ""
# if filters.get("employee"):conditions += "AND att.employee = '%s'" % filters["employee"]
# if filters.get("start_date"): conditions += "and c.start_date >= %(start_date)s"
# if filters.get("to_date"): conditions += " and c.date_of_skill_evaluatation <= %(to_date)s"
return conditions, filters
def employee_details(filters):
employee = frappe.db.sql(
"""select biometric_id,employee_name,shift,status,department,designation,date_of_joining,line from `tabEmployee` where status = 'Active' """,as_dict = 1)
# frappe.errprint(employee)
return employee
def acc_details(emp_id,from_date):
if emp_id:
acc = frappe.db.sql(
"""select name from `tabAuto Cutting and Crimping` where employee_code=%s and date_of_skill_evaluatation >= %s """,(emp_id,from_date))
if acc:
return True
else:
return False
def sac_details(emp_id,from_date):
if emp_id:
sac = frappe.db.sql(
"""select name from `tabSemi Auto Crimping` where employee_code=%s and date_of_skill_evaluatation >= %s """,(emp_id,from_date))
if sac:
return True
else:
return False
def ae_details(emp_id,from_date):
if emp_id:
ae = frappe.db.sql(
"""select name from `tabAssembly Evaluation` where employee_code=%s and date_of_skill_evaluatation >= %s """,(emp_id,from_date))
if ae:
return True
else:
return False
def qe_details(emp_id,from_date):
if emp_id:
qe = frappe.db.sql(
"""select name from `tabQuality Evaluation` where employee_code=%s and date_of_skill_evaluatation >= %s """,(emp_id,from_date))
if qe:
frappe.errprint(qe)
return True
else:
return False
# def itm_details(emp_id):
# if emp_id:
# ita = frappe.db.sql(
# """select * from `tabInduction Training Machine Area Crimping` where employee_code=%s """,(emp_id),as_dict = 1)
# if ita:
# return True
# else:
# return False
# def att_details(emp_id):
# today = date.today()
# yesterday = add_days(today,-1)
# day_before_yesterday = add_days(today,-2)
# if emp_id:
# att = frappe.db.sql(
# """select attendance_date from `tabAttendance` where employee = %s and attendance_date in (%s,%s,%s) """,(emp_id,today,yesterday,day_before_yesterday),as_dict = 1)
# if len(att) > 0:
# return True
# else:
# return False
# def st_details(emp_id):
# if emp_id:
# st = frappe.db.sql(
# """select * from `tabSelection Test` where employee_code=%s """,(emp_id),as_dict = 1)
# if st:
# return True
# else:
# return False
# def cb_details(emp_id):
# if emp_id:
# cb = frappe.db.sql(
# """select * from `tabColor Blindness Test` where employee_code=%s """,(emp_id),as_dict = 1)
# if cb:
# return True
# else:
# return False
# def pk_details(emp_id):
# if emp_id:
# pk = frappe.db.sql(
# """select * from `tabNew Joinees Practical Knowledge Verification` where employee_code=%s """,(emp_id),as_dict = 1)
# if pk:
# return True
# else:
# return False
|
'''
The snippet contains:
How to declare and use a class with attributes and methods
How to declare a class using inheritance
grab from
http://glowingpython.blogspot.com/2011/04/how-to-define-class-using-inheritance.html
'''
class Point2D:
""" a point in a 2D space """
name = "A dummy name" # attribute
def __init__(self,x,y): # constructor
self.x = x
self.y = y
def product(self,p): # method
""" product with another point """
return self.x*p.x + self.y*p.y
def print_2D(self):
print "(",self.x,",",self.y,")"
class Point3D(Point2D): # Point3D inherit Point2D
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
def print_3D(self):
print "(",self.x,",",self.y,",",self.z,")"
## just test the our classes ##
p2 = Point2D(1,2)
p2.print_2D()
print p2.product(Point2D(2,1))
p3 = Point3D(5,4,3)
p3.print_2D()
p3.print_3D() # inherited method
print p3.name # inherited attribute
print dir(Point2D)
print dir(Point3D) # dir return a list with attribute and methods of the class
|
#!/usr/bin/python3
import os
import subprocess
import json
from iot_message.exception import DecryptNotFound
from iot_message.exception import NoDecodersDefined
__author__ = 'Bartosz Kościów'
class Message(object):
"""Class Message"""
protocol = "iot:1"
chip_id = None
node_name = None
encoders = []
decoders = {}
drop_unencrypted = False
def __init__(self):
if self.chip_id is None:
self.chip_id = self._get_id()
if self.node_name is None:
self.node_name = self._get_node_name()
self.data = None
self.encoder = 0
@classmethod
def add_decoder(cls, decoder):
cls.decoders[decoder.name] = decoder
@classmethod
def add_encoder(cls, encoder):
cls.encoders.append(encoder)
def _get_id(self):
""":return string"""
if 'nt' in os.name:
return subprocess.getoutput('wmic csproduct get uuid')
else:
return subprocess.getoutput('cat /var/lib/dbus/machine-id')
def _get_node_name(self):
import socket
return socket.gethostname()
def _initialize_data(self):
self.data = {
'protocol': self.protocol,
'node': self.node_name,
'chip_id': self.chip_id,
'event': '',
'parameters': {},
'response': '',
'targets': [
'ALL'
]
}
def clear(self):
self._initialize_data()
def set(self, data):
if self.data is None:
self._initialize_data()
for k, v in data.items():
self.data[k] = v
def encrypt(self):
if len(self.encoders) > 0:
self.encoders[self.encoder].encrypt(self)
def decrypt(self):
if len(self.data['event']) > 8 and self.data['event'][0:8] == "message.":
if self.data['event'] in self.decoders:
self.decoders[self.data['event']].decrypt(self)
else:
raise DecryptNotFound("Decryptor %s not found".format(self.data['event']))
else:
if self.drop_unencrypted:
if len(self.decoders) > 0:
self.data = None
else:
raise NoDecodersDefined("Encryption required but decoders empty")
def __bytes__(self):
self.encrypt()
return json.dumps(self.data).encode()
def __repr__(self):
return json.dumps(self.data)
def __getitem__(self, item):
return self.data[item]
|
# MicroTosca types
# MicroTOSCA node types
MICROTOSCA_NODES_SERVICE = 'micro.nodes.Service'
MICROTOSCA_NODES_DATABASE = 'micro.nodes.Datastore'
MICROTOSCA_NODES_COMMUNICATIONPATTERN = 'micro.nodes.CommunicationPattern'
MICROTOSCA_NODES_MESSAGE_BROKER = 'micro.nodes.MessageBroker'
MICROTOSCA_NODES_MESSAGE_ROUTER = 'micro.nodes.MessageRouter'
MICROTOSCA_NODES_MESSAGE_ROUTER_KINGRESS = 'micro.nodes.MessageRouter.KIngress'
MICROTOSCA_NODES_MESSAGE_ROUTER_KSERVICE = 'micro.nodes.MessageRouter.KService'
MICROTOSCA_NODES_MESSAGE_ROUTER_KPROXY = 'micro.nodes.MessageRouter.KProxy'
# MicroTOSCA relationship types
MICROTOSCA_RELATIONSHIPS_INTERACT_WITH = 'micro.relationships.InteractsWith'
MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_TIMEOUT_PROPERTY = "timeout"
MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_CIRCUIT_BREAKER_PROPERTY = "circuit_breaker"
MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVEY_PROPERTY = "dynamic_discovery"
# MicroTOSCA group types
MICROTOSCA_GROUPS_TEAM = 'micro.groups.Team'
MICROTOSCA_GROUPS_EDGE = 'micro.groups.Edge'
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 Ryan Mackenzie White <ryan.white4@canada.ca>
#
# Distributed under terms of the license.
"""
"""
class NullDataError(ValueError):
"""
"""
class MissingDataError(ValueError):
"""
"""
class ParserWarning(Warning):
"""
"""
class AbstractMethodError(NotImplementedError):
"""
Pandas errors.py
Raise this error instead of NotImplementedError
"""
def __init__(self, class_instance, methodtype="method"):
types = {"method", "classmethod", "staticmethod", "property"}
if methodtype not in types:
msg = "methodtype must be one of {}, got {} instead.".format(
methodtype, types
)
raise ValueError(msg)
self.methodtype = methodtype
self.class_instance = class_instance
def __str__(self):
if self.methodtype == "classmethod":
name = self.class_instance.__name__
else:
name = self.class_instance.__class__.__name__
msg = "This {methodtype} must be defined in the concrete class {name}"
return msg.format(methodtype=self.methodtype, name=name)
|
# -*- coding: utf-8 -*-
import abc
from libs.code import Code
class Query(object):
def __init__(self, meta_param: dict, parsed_query_param: dict):
self.meta_param = meta_param
self.parsed_query_param = parsed_query_param
self.type_ = meta_param["type"] if "type" in meta_param else None
async def create_meta(self):
"""
create meta data according to param
:return:
"""
raise Code.CurrentlyNotSupport
async def create_data(self):
"""
create data according to param
:return:
"""
raise Code.CurrentlyNotSupport
async def search_meta(self):
"""
search meta data
:return:
"""
raise Code.CurrentlyNotSupport
async def search_data(self):
"""
search data
:return:
"""
raise Code.CurrentlyNotSupport
async def update_meta(self):
"""
update meta data
:return:
"""
raise Code.CurrentlyNotSupport
async def update_data(self):
"""
update data
:return:
"""
raise Code.CurrentlyNotSupport
async def delete_meta(self):
"""
delete meta data
:return:
"""
raise Code.CurrentlyNotSupport
async def delete_data(self):
"""
delete data
:return:
"""
raise Code.CurrentlyNotSupport
@abc.abstractmethod
async def list(self):
"""
show cluster info
:return:
"""
pass
|
import numpy as np
import argparse
import csv
import os
import time
import h5py
import librosa
import multiprocessing
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
from utils.utilities import (mkdir, float32_to_int16, freq2note, get_filename, get_process_groups, read_lst, write_lst)
from utils.target_process import TargetProcessor
from conf.feature import *
et = 1e-8
def remove_empty_segment(wav, frame_roll, sample_rate):
segments = []
samples_per_frame = sample_rate * 1. / FRAMES_PER_SEC
for i in range(frame_roll.shape[-1]):
if not frame_roll[i] == NOTES_NUM_EXCLUDE_SILENCE:
st = int(i * samples_per_frame)
ed = int((i + 1)* samples_per_frame)
if ed > wav.shape[-1]:
ed = wav.shape[-1]
segments.append(wav[st : ed])
if ed == wav.shape[-1]:
break
return np.concatenate(segments, -1)
def pack_urmp_dataset_to_hdf5(args):
dataset_dir = args.dataset_dir
feature_dir = args.feature_dir
process_num = args.process_num
mkdir(feature_dir)
meta_dict = {}
meta_dict['audio_filename'] = []
audios_num = 0
for folder in os.listdir(dataset_dir):
if str.startswith(folder, "._"):
continue
meta_data = folder.split('_')
if len(meta_data) < 4:
continue
audios_num += 1
id = meta_data[0]
name = meta_data[1]
sources = meta_data[2:]
audio = {}
audio['mix'] = os.path.join(folder, f'AuMix_{folder}.wav')
audio['separated_sources'] = []
audio['note_annotations'] = []
for j, s in enumerate(sources):
audio['separated_sources'] += [os.path.join(folder, f'AuSep_{j + 1}_{s}_{id}_{name}.wav')]
audio['note_annotations'] += [os.path.join(folder, f'Notes_{j + 1}_{s}_{id}_{name}.txt')]
meta_dict['audio_filename'] += [audio]
feature_time = time.time()
print(f"The total number of the mixture audio is {audios_num}")
def process_unit(n):
name = meta_dict['audio_filename'][n]['mix']
print(name)
audio_path = os.path.join(dataset_dir, name)
(audio, _) = librosa.core.load(audio_path, sr=SAMPLE_RATE, mono=True)
packed_hdf5_path = os.path.join(feature_dir, '{}.h5'.format(os.path.splitext(name)[0]))
mkdir(os.path.dirname(packed_hdf5_path))
with h5py.File(packed_hdf5_path, 'w') as hf:
#hf.attrs.create('midi_filename', data=meta_dict['midi_filename'][n].encode(), dtype='S100')
hf.create_dataset(name='waveform', data=float32_to_int16(audio), dtype=np.int16)
for i, name in enumerate(meta_dict['audio_filename'][n]['separated_sources']):
audio_path = os.path.join(dataset_dir, name)
(audio, _) = librosa.core.load(audio_path, sr=SAMPLE_RATE, mono=True)
(hq_audio, _) = librosa.core.load(audio_path, sr=SAMPLE_RATE * 2, mono=True)
note_annotations_path = os.path.join(dataset_dir, meta_dict['audio_filename'][n]['note_annotations'][i])
note_annotations = read_lst(note_annotations_path)
note_annotations = [notes.split('\t\t') for notes in note_annotations]
note_annotations = [[notes[0], float(notes[2]) + float(notes[0]), float(freq2note(notes[1]))] for notes in note_annotations]
note_annotations = np.array(note_annotations, dtype = np.float32)
note_annotations_lst = ['%s\t%s\t%s' % (notes[0], str(notes[1]), str(notes[2])) for notes in note_annotations]
ref_path = os.path.join(feature_dir, '{}_ref.txt'.format(os.path.splitext(name)[0]))
mkdir(os.path.dirname(packed_hdf5_path))
write_lst(ref_path, note_annotations_lst)
duration = (audio.shape[-1] + SAMPLE_RATE - 1) // SAMPLE_RATE
target_processor = TargetProcessor(duration, FRAMES_PER_SEC, BEGIN_NOTE, NOTES_NUM_EXCLUDE_SILENCE)
target_dict = target_processor.process(0, note_annotations)
frame_roll = np.array(target_dict['frame_roll'], dtype=np.int16)
train_packed_hdf5_path = os.path.join(feature_dir, '{}._TRAIN.h5'.format(os.path.splitext(name)[0]))
test_packed_hdf5_path = os.path.join(feature_dir, '{}._TEST.h5'.format(os.path.splitext(name)[0]))
scale = 9
dense_audio = remove_empty_segment(audio, frame_roll, SAMPLE_RATE)
dense_hq_audio = remove_empty_segment(hq_audio, frame_roll, SAMPLE_RATE * 2)
for i in range(scale):
shift_pitch = i - (scale // 2)
packed_hdf5_path = os.path.join(feature_dir, '{}._TRAIN_shift_pitch_{}.h5'.format(os.path.splitext(name)[0], shift_pitch))
if os.path.exists(packed_hdf5_path):
continue
if shift_pitch == 0:
shift_audio = audio
shift_dense_audio = dense_audio
else:
shift_audio = librosa.effects.pitch_shift(hq_audio, SAMPLE_RATE * 2, n_steps=shift_pitch)
shift_audio = librosa.core.resample(shift_audio, SAMPLE_RATE * 2, SAMPLE_RATE)
shift_dense_audio = librosa.effects.pitch_shift(dense_hq_audio, SAMPLE_RATE * 2, n_steps=shift_pitch)
shift_dense_audio = librosa.core.resample(shift_dense_audio, SAMPLE_RATE * 2, SAMPLE_RATE)
shift_frame_roll = frame_roll.copy() + shift_pitch
shift_frame_roll[shift_frame_roll == NOTES_NUM_EXCLUDE_SILENCE + shift_pitch] = NOTES_NUM_EXCLUDE_SILENCE
shift_frame_roll = np.clip(shift_frame_roll, 0, NOTES_NUM_EXCLUDE_SILENCE)
with h5py.File(packed_hdf5_path, 'w') as hf:
hf.create_dataset(name='shift_waveform', data=float32_to_int16(shift_audio), dtype=np.int16)
hf.create_dataset(name='shift_dense_waveform', data=float32_to_int16(shift_dense_audio), dtype=np.int16)
hf.create_dataset(name='frame_roll', data=shift_frame_roll, dtype=np.int16)
with h5py.File(train_packed_hdf5_path, 'w') as hf:
hf.create_dataset(name='waveform', data=float32_to_int16(audio), dtype=np.int16)
hf.create_dataset(name='frame_roll', data=frame_roll, dtype=np.int16)
with h5py.File(test_packed_hdf5_path, 'w') as hf:
hf.create_dataset(name='waveform', data=float32_to_int16(audio), dtype=np.int16)
hf.create_dataset(name='waveform_path', data=[audio_path.encode()], dtype='S200')
hf.create_dataset(name='note_annotations_txt', data=[ref_path.encode()], dtype='S200')
hf.create_dataset(name='frame_roll', data=frame_roll, dtype=np.int16)
def process_group(st, ed, total_num, pid):
print(f"process {pid + 1} starts")
for n in range(st, ed):
process_unit(n)
print(f"process {pid + 1} : {n + 1}/{total_num} done.")
print(f"process {pid + 1} ends")
audio_groups = get_process_groups(audios_num, process_num)
for pid, (st, ed) in enumerate(audio_groups):
p = multiprocessing.Process(target = process_group, args = (st, ed, audios_num, pid))
p.start()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset_dir', type=str, required=True, help='Directory of dataset.')
parser.add_argument('--feature_dir', type=str, required=True, help='Directory to store generated files.')
parser.add_argument('--process_num', type=int, required=True, help='Number of processes.')
args = parser.parse_args()
pack_urmp_dataset_to_hdf5(args)
|
from __future__ import unicode_literals
import unittest
from rest_framework import status, test, settings
from django.core.urlresolvers import reverse
from nodeconductor.structure.tests import factories as structure_factories
from nodeconductor.support.serializers import CommentSerializer
class JiraTest(test.APITransactionTestCase):
def setUp(self):
self.user = structure_factories.UserFactory()
def get_issues_url(cls, key=None):
if key:
return 'http://testserver' + reverse('issue-detail', kwargs={'pk': key})
else:
return 'http://testserver' + reverse('issue-list')
def get_comments_url(cls, key):
return 'http://testserver' + reverse('issue-comments-list', kwargs={'pk': key})
def test_list_issues(self):
self.client.force_authenticate(user=self.user)
response = self.client.get(self.get_issues_url())
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertGreaterEqual(len(response.data), 3)
def test_search_issues(self):
self.client.force_authenticate(user=self.user)
response = self.client.get(self.get_issues_url(), data={settings.api_settings.SEARCH_PARAM: '^_^'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('TST-3', response.data[0]['key'])
def test_create_issues(self):
self.client.force_authenticate(user=self.user)
data = {
'summary': 'Just a test',
'description': 'nothing more',
}
response = self.client.post(self.get_issues_url(), data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
key = response.data['key']
response = self.client.get(self.get_issues_url(key), data=data)
self.assertEqual(response.data['summary'], data['summary'])
def test_list_comments(self):
self.client.force_authenticate(user=self.user)
response = self.client.get(self.get_comments_url('TST-3'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertGreaterEqual(len(response.data), 2)
def test_create_comments(self):
self.client.force_authenticate(user=self.user)
comment = 'hi there'
url = self.get_comments_url('TST-1')
response = self.client.post(url, data={'body': comment})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIn(self.user.username, response.data['author']['displayName'])
response = self.client.get(url)
self.assertEqual(response.data[-1]['body'], comment)
class JiraCommentAuthorSerializerTest(unittest.TestCase):
def test_parsing(self):
username = "Walter"
uuid = '1c3323fc4ae44120b57ec40dea1be6e6'
body = "Hello, world!"
comment = {"body": "Comment posted by user {} ({})\n{}".format(username, uuid, body)}
expected = {
'author': {
'displayName': username,
'uuid': uuid
},
'body': body
}
serializer = CommentSerializer(instance=comment)
self.assertEqual(expected, serializer.data)
|
from Aula52.model.endereco_model import EnderecoModel
from Aula52.dao.base_dao import BaseDao
class EnderecoDao(BaseDao):
def __init__(self):
super().__init__("01_MDG_ENDERECO")
def listar_todos(self):
tuplas = super().listar_todos()
lista = []
for e in tuplas:
model = EnderecoModel(e[1], e[2], e[3], e[4], e[5], e[6], e[0])
lista.append(model.__dict__)
return lista
def buscar_por_id(self, id):
tupla = super().buscar_por_id(id)
model = EnderecoModel(tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], tupla[6], tupla[0])
return model.__dict__
def inserir(self, model: EnderecoModel):
self.cursor.execute("""INSERT INTO {}
(
LOGRADOURO,
NUMERO,
COMPLEMENTO,
BAIRRO,
CIDADE,
CEP
)VALUES
(
'{}',
'{}',
'{}',
'{}',
'{}',
'{}'
)
""".format(self.tabela, model.logradouro, model.numero, model.complemento, model.bairro, model.cidade, model.cep ))
self.conexao.commit()
model.id = self.cursor.lastrowid
return model.__dict__
def alterar(self, model: EnderecoModel):
self.cursor.execute(""" UPDATE {}
SET
LOGRADOURO = '{}',
NUMERO = '{}',
COMPLEMENTO = '{}',
BAIRRO = '{}',
CIDADE = '{}',
CEP = '{}'
WHERE ID = {}
""".format(self.tabela, model.logradouro, model.numero, model.complemento, model.bairro, model.cidade, model.cep, model.id ))
self.conexao.commit()
return model.__dict__
def deletar(self, id):
self.cursor.execute(""" DELETE FROM {}
WHERE ID = {}
""".format(self.tabela, id))
self.conexao.commit()
return "Endereco de id {} deletado com sucesso".format(id)
|
"""restart: Restart nginx or flask in the dev container
'dsco restart' will restart both the nginx service and the flask service
running inside the development container. To restart just one of those
use the corresponding flag: --nginx or --flask
"""
import os
from pathlib import Path
from cookiecutter.main import cookiecutter
from dsco.helpers import get_container
import subprocess
cmd_name = Path(__file__).stem
def add_subparser(subparsers):
sub = subparsers.add_parser(cmd_name, help="restart nginx and flask (dev)")
sub.add_argument("--flask", action="store_true", help="restart flask")
sub.add_argument("--nginx", action="store_true", help="restart nginx")
def run_cmd(args, conf):
if conf["proj_root"]:
no_flag = not any([args.flask, args.nginx])
flask_flag = args.flask or no_flag
nginx_flag = args.nginx or no_flag
proj_name = conf["pyproject"]["tool"]["poetry"]["name"]
container_name = get_container(proj_name, "dev")
if nginx_flag:
restart_nginx = 'bash -c "service nginx restart"'
cmd = f"docker exec -it $({container_name}) {restart_nginx}"
print(cmd)
subprocess.run(cmd, shell=True)
if flask_flag:
restart_flask = (
'bash -c "supervisorctl restart uwsgi && supervisorctl status"'
)
cmd = f"docker exec -it $({container_name}) {restart_flask}"
print(cmd)
subprocess.run(cmd, shell=True)
else:
print("No project found.")
def add_dispatch(dispatcher):
dispatcher[cmd_name] = run_cmd
|
#!/usr/bin/env python3
# Goshu IRC Bot
# written by Daniel Oaks <daniel@danieloaks.net>
# licensed under the ISC license
# Quite a lot of this module was taken, with permission, from
# https://github.com/electronicsrules/megahal
# In particular, the regexes and the display layout. Thanks a bunch, bro!
import re
import time
from girc.formatting import escape, unescape
from girc.utils import CaseInsensitiveDict
from gbot.libs.helper import get_url, format_extract, JsonHandler
from gbot.modules import Module
class Cooldown:
def __init__(self, cooldown_seconds=15, multiple=2, max_cooldown=60*60*24):
self.cooldown_seconds = cooldown_seconds
self.end_ts = time.time()
self.default_multiple = multiple
self.multiple = multiple
self.max_cooldown = max_cooldown
def can_do(self):
if time.time() > self.end_ts:
self.multiple = self.default_multiple
self.end_ts = time.time() + self.cooldown_seconds
return True
self.multiple *= self.default_multiple
cooldown = min(self.cooldown_seconds * self.multiple, self.max_cooldown)
self.end_ts = time.time() + cooldown
return False
class link(Module):
standard_admin_commands = ['ignore']
def __init__(self, bot):
Module.__init__(self, bot)
self.links = []
self.cooldowns = {}
self.json_handlers.append(JsonHandler(self, self.dynamic_path,
attr='links', ext='lnk', yaml=True,
callback_name='_link_json_callback'))
def _link_json_callback(self, new_json):
for key, info in new_json.items():
for var_name, var_info in info.get('required_values', {}).items():
base_name = info['name'][0]
self.parse_required_value(base_name, var_name, var_info)
def link_listener(self, event):
"""Listens for links for which we can provide info
@listen pubmsg
@listen privmsg
"""
if event['source'].is_me or self.is_ignored(event['from_to']):
return
url_matches = re.search('(?:https?://)(\\S+)', unescape(event['message']))
if not url_matches:
return
for url in url_matches.groups():
response = ''
for provider in self.links:
if provider not in self.cooldowns:
self.cooldowns[provider] = CaseInsensitiveDict()
matches = re.match(self.links[provider]['match'], url)
if matches:
# response = '*** {}: '.format(self.links[provider]['display_name'])
response = ''
complete_dict = {}
complete_dict.update(self.get_required_values(provider))
for key, value in matches.groupdict().items():
complete_dict[key] = escape(value)
# check cooldown
server_name = event['server'].name
if event['from_to'].is_user:
from_to = event['from_to'].host
else:
from_to = event['from_to'].name
if server_name not in self.cooldowns[provider]:
self.cooldowns[provider][server_name] = event['server'].idict()
if from_to not in self.cooldowns[provider][server_name]:
self.cooldowns[provider][server_name][from_to] = Cooldown()
if not self.cooldowns[provider][server_name][from_to].can_do():
continue
# getting the actual file itself
api_url = self.links[provider]['url'].format(**complete_dict)
r = get_url(api_url)
display_name = self.links[provider]['display_name']
if isinstance(r, str):
event['from_to'].msg('*** {}: {}'.format(display_name, r))
return
# parsing
response += format_extract(self.links[provider], r.text,
debug=True,
fail='*** {}: Failed'.format(display_name))
# remove urls from our response
url_matches = re.search('(?:https?://)(\\S+)', response)
if url_matches:
for url in url_matches.groups():
for provider in self.links:
matches = re.match(self.links[provider]['match'], url)
if matches:
response = response.replace(url, '[REDACTED]')
if response:
event['from_to'].msg(response)
return # don't spam us tryna return every title
|
#############################################################################
# Author: Muhammed S. ElRakabawi (elrakabawi.github.io)
# Paper: Yin, C. (2017). Encoding DNA sequences by integer chaos game representation
# License: MIT
#############################################################################
import argparse
import re
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
parser = argparse.ArgumentParser(description='Integer Chaos Game Representation of DNA Encoder/Decoder script')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-e', '--encode', type=str, help='Encode sequence from fasta file')
group.add_argument('-d', '--decode', type=str, help='Decode sequence from icgr file')
parser.add_argument('-q', '--quiet', action='store_true', help='Will not print validity checks')
args = parser.parse_args()
################################ FUNCTIONS ###################################
def clean_file(this_file): # Truncating function for cleaning the file
this_file.seek(0) # Point cursor to first line
this_file.truncate() # Truncate file
this_file.seek(0) # Return cursor to first line
return this_file
def encode_icgr(dna_seq): # Integer Chaos Game Representation Encoder
i_value = 0 # base index (position)
alpha_x = 0 # Alpha (i,x)
alpha_y = 0 # Alpha (i,y)
arr = [] # List of lists [(i,Xi,Yi)..(n, X, Yn)]
cgr_coordinate = {'A': (1, 1), 'G': (1, -1), 'T': (-1, 1), 'C': (-1, -1)} # Base mapping to coordinates
for n in dna_seq: # For every nucleotide in sequence
i_value = i_value + 1 # increment i --> n
alpha_x = cgr_coordinate[n][0] # alpha_x = Nucleotide Xi coordinate
alpha_y = cgr_coordinate[n][1] # alpha_y = Nucleotide Yi coordinate
if i_value == 1:
px = (2 ** (i_value - 1)) * alpha_x
py = (2 ** (i_value - 1)) * alpha_y
elif i_value > 1:
px = px + (2 ** (i_value - 1)) * alpha_x
py = py + (2 ** (i_value - 1)) * alpha_y
arr.append([i_value, alpha_x, alpha_y, px, py])
big_x_value = arr[len(arr) - 1][3]
big_y_value = arr[len(arr) - 1][4]
return i_value, big_x_value, big_y_value
def decode_icgr(i_value, big_x, big_y): # Integer Chaos Game Representation Decoder
arr = [] # List of Bases [(i,Xi,Yi)..(n, X, Yn)]
cgr_coordinate = {(1, 1): 'A', (1, -1): 'G', (-1, 1): 'T', (-1, -1): 'C'} # Coordinates mapping back to Bases
alpha_x = 0
alpha_y = 0
for step in range(i_value, 0, -1): # For Every step in range (i --> 0)
if big_x > 0:
alpha_x = 1
elif big_x < 0:
alpha_x = -1
if big_y > 0:
alpha_y = 1
elif big_y < 0:
alpha_y = -1
big_x = (abs(big_x)-(2**(i_value-1)))*alpha_x # [|Pi,x| - 2^(i-1)] * alpha(i,x)
big_y = (abs(big_y) - (2 ** (i_value - 1))) * alpha_y # [|Pi,y| - 2^(i-1)] * alpha(i,y)
arr.append(cgr_coordinate[(alpha_x, alpha_y)])
i_value -= 1
decoded_seq = ''.join(arr[::-1]) # Reverse and join chars to a string
return decoded_seq # Return Decoded Sequence
################################ CORE LOGIC ###################################
if args.encode:
fastapath = args.encode
icgrpath = fastapath[:fastapath.find('.')] + '.icgr'
seq_comments = []
fasta_seq = ''
with open(fastapath) as fp:
line = fp.readline()
while line:
if line[0] == '>':
seq_desc = line
elif line[0] == ';':
seq_comments.append([line])
else:
line = line.replace('\n', '')
fasta_seq += line
line = fp.readline()
seq_desc = '>>{}'.format(seq_desc)
encoded_seq = encode_icgr(fasta_seq)
decoded_seq = decode_icgr(encoded_seq[0], encoded_seq[1], encoded_seq[2])
answer_file = open(icgrpath, 'a')
answer_file = clean_file(answer_file)
answer_file.write(seq_desc)
answer_file.write(str(encoded_seq))
if not args.quiet:
print(bcolors.HEADER, 'Script running in Verbose Mode.\n Validity checks will be printed!', bcolors.ENDC)
print(' Fasta file path:', fastapath)
if decoded_seq == fasta_seq:
print(bcolors.OKBLUE, 'Validity check returns True.', bcolors.ENDC)
print(bcolors.OKBLUE, 'ICGR Encoding Done (Trusted) --> ', bcolors.WARNING, icgrpath, bcolors.ENDC, '\nI:',
encoded_seq[0], '\nX:', encoded_seq[1], '\nY:', encoded_seq[2])
else:
print(bcolors.WARNING, 'Validity check returns False.', bcolors.ENDC)
choice = input(' Output file anyway? [y]: ')
if choice == 'y':
print(bcolors.OKBLUE, 'ICGR Encoding Done (Not Trusted) --> ', bcolors.WARNING, icgrpath, bcolors.ENDC, '\n I:',
encoded_seq[0], '\n X:', encoded_seq[1], '\n Y:', encoded_seq[2])
else:
print(bcolors.FAIL, 'iCGR encoding was not completed due to internal error', bcolors.ENDC)
elif args.quiet:
print(bcolors.HEADER, 'Script running in Quiet Mode.', bcolors.ENDC)
if decoded_seq == fasta_seq:
print(bcolors.OKBLUE, 'ICGR Encoding Done (Trusted) --> ', bcolors.WARNING, icgrpath, bcolors.ENDC)
else:
print(bcolors.WARNING, 'ICGR Encoding Done (Not Trusted) --> ', icgrpath, bcolors.ENDC)
elif args.decode:
icgrpath = args.decode
fastapath = icgrpath[:icgrpath.find('.')] + '.fasta'
open_file = open(icgrpath, 'r')
file_lines = open_file.readlines()
fasta_desc = file_lines[0].strip()
icgr_data = file_lines[1].strip(',') # Split values from string
icgr_data = icgr_data[1:-1] # Removes brackets
icgr_data = icgr_data.split(',') # String to tuple
decoded_seq = decode_icgr(int(icgr_data[0]), int(icgr_data[1]), int(icgr_data[2]))
answer_file = open(fastapath, 'a')
answer_file = clean_file(answer_file)
answer_file.write(fasta_desc[2:] + '\n')
answer_file.write(re.sub("(.{70})", "\\1\n", decoded_seq, 0, re.DOTALL)) # Printing sequence in fasta format
print(bcolors.OKBLUE, 'ICGR Decoding Done Successfully! -->', bcolors.WARNING, fastapath, bcolors.ENDC)
|
#!/usr/bin/env python3
# Et eksempelprogram som viser integrasjon med ID-porten og
# APIer fra skatteetaten.
import base64
import webbrowser
import random
import time
import json
from urllib.parse import urlparse, parse_qs, quote
from base64 import urlsafe_b64encode, urlsafe_b64decode
from hashlib import sha256
from http.server import BaseHTTPRequestHandler, HTTPServer
import xmltodict
import requests
from jose import jwt
CLIENT_ID = '8d7adad7-b497-40d0-8897-9a9d86c95306'
AUTH_DOMAIN = 'oidc-ver2.difi.no/idporten-oidc-provider'
ALGORITHMS = ["RS256"]
API_AUDIENCE = 'https://mp-test.sits.no/api/eksterntapi/formueinntekt/skattemelding/'
# En enkel webserver som venter på callback fra browseren, og lagrer
# unna callback-urlen.
class BrowserRedirectHandler(BaseHTTPRequestHandler):
timeout = 1000
result = None
def do_GET(self) -> None:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(b"""
<!DOCTYPE html>
<title>Authentication complete</title>
<body>
<h1>Authentication complete</h1>
<p>You may close this page.
""")
BrowserRedirectHandler.result = self
# Return a random byte array of length len
def random_bytes(n: int) -> bytes:
return bytearray(random.getrandbits(8) for _ in range(n))
def base64_response(s: str, encoding: str) -> str:
base64_bytes = s.encode(encoding)
message_bytes = base64.b64decode(base64_bytes)
return message_bytes.decode(encoding)
def decode_dokument(dokument):
orginal_conten = dokument["content"]
encoding = dokument["encoding"]
dokument["content"] = base64_response(orginal_conten, encoding)
return dokument
def iter_dokumenter(d):
for k, v in d.items():
if k == 'dokument': #valider response har en liste med dokumenter
for dok in v:
decode_dokument(dok)
elif k == "skattemeldingdokument":
decode_dokument(v)
elif k == "dokumenter":
iter_dokumenter(v)
else:
pass
return d
def base64_decode_response(r: requests):
if not r: # ikke 200 ok
return r.text
utkast_resp = xmltodict.parse(r.text)
for k, v in utkast_resp.items():
v = iter_dokumenter(v)
utkast_resp[k] = v
return xmltodict.unparse(utkast_resp)
def main_relay(**kwargs) -> dict:
# disabled - idporten fails to register 127.0.0.1 and dynamic port numbers for now
# # Bind to port 0, let the OS find an available port
# server = HTTPServer(('127.0.0.1', 0), BrowserRedirectHandler)
# Get the jwks from idporten (for token verification later)
u = requests.get('https://{}/.well-known/openid-configuration'.format(AUTH_DOMAIN)).json()["jwks_uri"]
jwks = requests.get(u).json()
server = HTTPServer(('127.0.0.1', 12345), BrowserRedirectHandler)
port = server.server_address[1]
assert 0 < port < 65536
client_id = CLIENT_ID
# Public clients need state parameter and PKCE challenge
# https://difi.github.io/felleslosninger/oidc_auth_spa.html
# https://tools.ietf.org/html/draft-ietf-oauth-browser-based-apps-00
state = urlsafe_b64encode(random_bytes(16)).decode().rstrip("=")
pkce_secret = urlsafe_b64encode(random_bytes(32)).decode().rstrip("=").encode()
pkce_challenge = urlsafe_b64encode(sha256(pkce_secret).digest()).decode()
nonce = "{}".format(int(time.time() * 1e6))
u = 'https://{}/authorize'.format(AUTH_DOMAIN) + \
quote(('?scope=skatteetaten:formueinntekt/skattemelding openid'
'&acr_values=Level3'
'&client_id={}'
'&redirect_uri=http://localhost:{}/token'
'&response_type=code'
'&state={}'
'&nonce={}'
'&resource={}'
'&code_challenge={}'
'&code_challenge_method=S256'
'&ui_locales=nb'.format(client_id, port, state, nonce, API_AUDIENCE, pkce_challenge)), safe='?&=_')
print(u)
# Open web browser to get ID-porten authorization token
webbrowser.open(u, new=0, autoraise=True)
# Wait for callback from ID-porten
while not hasattr(BrowserRedirectHandler.result, 'path'):
server.handle_request()
# Free the port, no more callbacks expected
server.server_close()
print("Authorization token received")
# result.path is now something like
# "/token?code=_Acl-x8H83rjhjhdefeefeef_xlbi_6TqauJV1Aiu_Q&state=oxw06LrtiyyWb7uj7umRSQ%3D%3D"
# We must verify that state is identical to what we sent - https://tools.ietf.org/html/rfc7636
qs = parse_qs(urlparse(BrowserRedirectHandler.result.path).query)
print(qs)
assert len(qs['state']) == 1 and qs['state'][0] == state
# Use the authorization code to get access and id token from /token
payload = {'grant_type': 'authorization_code',
'code_verifier': pkce_secret,
'code': qs['code'][0],
'redirect_uri': 'http://localhost:{}/token'.format(port),
'client_id': client_id}
headers = {'Accept': 'application/json'}
r = requests.post('https://{}/token'.format(AUTH_DOMAIN), headers=headers, data=payload)
if not r:
print(r.headers, r.text)
r.raise_for_status()
js = r.json()
assert js['token_type'] == 'Bearer'
print("JS : ")
print(js)
# Validate tokens according to https://tools.ietf.org/html/rfc7519#section-7.2
# A list of 3rd party libraries for various languages on https://jwt.io/
# python possibilites: pyjwt, python-jose, jwcrypto, authlib
# We use python-jose here:
jwt.decode(
js['id_token'],
jwks,
algorithms=ALGORITHMS,
issuer="https://" + AUTH_DOMAIN + "/",
audience=client_id,
access_token=js['access_token']
)
id_encoded = js['id_token'].split(".")[1]
id_token = json.loads(urlsafe_b64decode(id_encoded + "==").decode())
assert id_token['nonce'] == nonce
# Also validate the access token separately, this is what we have to pass
# on to our APIs
jwt.decode(
js['access_token'],
jwks,
algorithms=ALGORITHMS,
issuer="https://" + AUTH_DOMAIN + "/",
audience=API_AUDIENCE
)
at_encoded = js['access_token'].split(".", 3)[1]
access_token = json.loads(urlsafe_b64decode(at_encoded + "==").decode())
assert access_token['client_id'] == client_id
assert access_token['token_type'] == "Bearer"
assert access_token['acr'] == "Level3"
assert access_token['aud'] == API_AUDIENCE
print("The token is good, expires in {} seconds".format(access_token['exp'] - int(time.time())))
print("\nBearer {}".format(js['access_token']))
header = {'Authorization': 'Bearer ' + js['access_token']}
return header
|
import cv2
def heightANDdepth(flame, pixel_to_meters, data, roi):
"""
This function calculates the flame height and thickness
Parameters:
----------
flame: contour of the flame
np.ndarray
pixel_to_meters: ratio of meters per pixel
float
data: needs to be imported. Dictionary that includes all observed data
from each experiment
dict
roi: region of image.
np.ndarray
Returns:
--------
flame_height_top: height of the top of the flame in pixels
float
flame_height_bottom: height of the bottom of the flame in pixels
float
flame_projection: horizontal projection of the tip of the flame in pixels
float
flame_depth_below: depth of flame in pixels 500 mm above the door
float
flame_depth_topdoor = depth of flame in pixels at the top of the door
float
flame_depth_above = depth of flame in pixels 500 mm above the door
float
"""
# define variables
offset = 0.5
# determine heights at which the depth of the flame will be measured
height_offset = int(offset / pixel_to_meters)
height_door = data["top_of_door"][0] - data["min_y_real"]
height_below = int(height_door - height_offset)
height_above = int(height_door + height_offset)
# determine the top and bottom flame heights
top_point = tuple(flame[flame[:, :, 0].argmax()][0])
bottom_point = tuple(flame[flame[:, :, 0].argmin()][0])
height_top = top_point[0]
height_bottom = bottom_point[0]
# determine the horizontal projection of the flame
flame_projection = 0
pt2 = top_point
pt1 = (top_point[0], data["top_of_door"][1])
flame_projection = abs(pt2[1] - pt1[1])
# add a conditional to deal with contours detected on the floor
if height_top < 300:
height_top = 0
height_bottom = 0
flame_projection = 0
else:
# draw on the frame the top and bottom heights as well as the projection
cv2.circle(roi, top_point, 10, (0, 0, 255), 3)
cv2.circle(roi, bottom_point, 10, (0, 0, 255), 3)
cv2.arrowedLine(roi,pt2,pt1,color = (51,153,255),thickness = 3)
# calculate the flame depth at three different heights
for i,height in enumerate([height_below, height_door, height_above]):
"""
Initialize pt1 and pt2, otherwise the calculation of depth raises
an error
"""
pt1 = (0,0)
pt2 = (0,0)
for y_coordinate in range(roi.shape[0]):
# start from the edge of roi and find where the flame ends
inside_flame = cv2.pointPolygonTest(flame, (height, y_coordinate),
False)
# if the point is in the flame
if inside_flame in [0,1]:
pt1 = (height, y_coordinate)
break
for y_coordinate in range(roi.shape[0], 0, -1):
# start from the door and find where the flame starts
inside_flame = cv2.pointPolygonTest(flame, (height, y_coordinate),
False)
# if the point is in the flame
if inside_flame in [0,1]:
pt2 = (height, y_coordinate)
break
# draw the lines that indicate the depth that was measured on the image
cv2.arrowedLine(roi,pt2,pt1,color = (51,153,255),thickness = 3)
depth = abs(pt2[1] - pt1[1])
# assign the depth value to the corresponding variable
if i == 0:
depth_below = depth
if i == 1:
depth_door = depth
if i == 2:
depth_above = depth
return (height_top, height_bottom, flame_projection, depth_below,
depth_door, depth_above)
|
import uuid
from test_helper import get_result
import floto
import floto.api
import floto.decider
def test_07():
domain = 'floto_test'
rs = floto.specs.retry_strategy.InstantRetry(retries=2)
timer_a = floto.specs.task.Timer(id_='TimerA', delay_in_seconds=15)
task_1 = floto.specs.task.ActivityTask(domain=domain, name='activity1', version='v5',
retry_strategy=rs)
timer_b = floto.specs.task.Timer(id_='TimerB', delay_in_seconds=5,
requires=[task_1.id_])
task_2 = floto.specs.task.ActivityTask(domain=domain, name='activity2', version='v4',
requires=[timer_b.id_], retry_strategy=rs)
decider_spec = floto.specs.DeciderSpec(domain=domain,
task_list=str(uuid.uuid4()),
activity_tasks=[timer_a,
timer_b,
task_1,
task_2],
default_activity_task_list='floto_activities',
terminate_decider_after_completion=True)
decider = floto.decider.Decider(decider_spec=decider_spec)
workflow_args = {'domain': decider_spec.domain,
'workflow_type_name': 'my_workflow_type',
'workflow_type_version': 'v1',
'task_list': decider_spec.task_list,
'input': {'foo': 'bar'}}
response = floto.api.Swf().start_workflow_execution(**workflow_args)
print(30 * '-' + ' Running Test 07 ' + 30 * '-')
decider.run()
print(30 * '-' + ' Done Test 07 ' + 30 * '-')
return get_result(decider.domain, response['runId'], 'my_workflow_type_v1')
|
print('''Faça um programa que leia nome e média de um aluno, guardando também a situação em um dicionário.
No final, mostre o conteúdo da estrutura na tela.''')
aluno = dict()
aluno['nome'] = str(input('Insira o nome: '))
aluno['nota'] = float(input('Insira a nota: '))
if aluno['nota'] < 7:
aluno['nota'] = 'reprovado'
else:
aluno['situacao'] = 'aprovado'
print(f'''O aluno {aluno['nome']} teve a nota {aluno['nota']} e foi {aluno['situacao']}.''')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 26 10:07:12 2017
@author: adriana
The greatest common divisor of two positive integers is the largest integer that divides each of them without remainder. For example,
gcd(2, 12) = 2
gcd(6, 12) = 6
gcd(9, 12) = 3
gcd(17, 12) = 1
A clever mathematical trick (due to Euclid) makes it easy to find greatest common divisors. Suppose that a and b are two positive integers:
If b = 0, then the answer is a
Otherwise, gcd(a, b) is the same as gcd(b, a % b)
See this website for an example of Euclid's algorithm being used to find the gcd.
Write a function gcdRecur(a, b) that implements this idea recursively. This function takes in two positive integers and returns one integer.
"""
def gcdRecur(a, b):
if b == 0:
return a
else:
return gcdRecur(b, a % b)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.