text stringlengths 8 6.05M |
|---|
import mock
import unittest
import numpy
from six.moves import cPickle
from smqtk.representation.descriptor_element.local_elements import \
DescriptorFileElement
class TestDescriptorFileElement (unittest.TestCase):
def test_configuration1(self):
default_config = DescriptorFileElement.get_default_config()
self.assertEqual(default_config,
{
'save_dir': None,
'subdir_split': None,
})
default_config['save_dir'] = '/some/path/somewhere'
default_config['subdir_split'] = 4
#: :type: DescriptorFileElement
inst1 = DescriptorFileElement.from_config(default_config,
'test', 'abcd')
self.assertEqual(default_config, inst1.get_config())
self.assertEqual(inst1._save_dir, '/some/path/somewhere')
self.assertEqual(inst1._subdir_split, 4)
# vector-based equality
inst2 = DescriptorFileElement.from_config(inst1.get_config(),
'test', 'abcd')
self.assertEqual(inst1, inst2)
def test_vec_filepath_generation(self):
d = DescriptorFileElement('test', 'abcd', '/base', 4)
self.assertEqual(d._vec_filepath,
'/base/a/b/c/test.abcd.vector.npy')
d = DescriptorFileElement('test', 'abcd', '/base', 2)
self.assertEqual(d._vec_filepath,
'/base/ab/test.abcd.vector.npy')
d = DescriptorFileElement('test', 'abcd', '/base', 1)
self.assertEqual(d._vec_filepath,
'/base/test.abcd.vector.npy')
d = DescriptorFileElement('test', 'abcd', '/base', 0)
self.assertEqual(d._vec_filepath,
'/base/test.abcd.vector.npy')
d = DescriptorFileElement('test', 'abcd', '/base')
self.assertEqual(d._vec_filepath,
'/base/test.abcd.vector.npy')
def test_serialization(self):
# Test that an instance can be serialized and deserialized via pickle
# successfully.
ex_type = 'test'
ex_uid = 12345
ex_save_dir = 'some-dir'
ex_split = 5
e1 = DescriptorFileElement(ex_type, ex_uid, ex_save_dir, ex_split)
# pickle dump and load into a new copy
#: :type: DescriptorFileElement
e2 = cPickle.loads(cPickle.dumps(e1))
# Make sure the two have the smme attributes, including base descriptor
# element things.
self.assertEqual(e1.type(), e2.type())
self.assertEqual(e1.uuid(), e2.uuid())
self.assertEqual(e1._save_dir, e2._save_dir)
self.assertEqual(e1._subdir_split, e2._subdir_split)
self.assertEqual(e1._vec_filepath, e2._vec_filepath)
@mock.patch('smqtk.representation.descriptor_element.local_elements'
'.numpy.save')
@mock.patch('smqtk.representation.descriptor_element.local_elements'
'.file_utils.safe_create_dir')
def test_vector_set(self, mock_scd, mock_save):
d = DescriptorFileElement('test', 1234, '/base', 4)
self.assertEqual(d._vec_filepath,
'/base/1/2/3/test.1234.vector.npy')
v = numpy.zeros(16)
d.set_vector(v)
mock_scd.assert_called_with('/base/1/2/3')
mock_save.assert_called_with('/base/1/2/3/test.1234.vector.npy', v)
@mock.patch('smqtk.representation.descriptor_element.local_elements'
'.numpy.load')
def test_vector_get(self, mock_load):
d = DescriptorFileElement('test', 1234, '/base', 4)
self.assertFalse(d.has_vector())
self.assertIs(d.vector(), None)
d.has_vector = mock.Mock(return_value=True)
self.assertTrue(d.has_vector())
v = numpy.zeros(16)
mock_load.return_value = v
numpy.testing.assert_equal(d.vector(), v)
|
import os
import re
import sys
# All regular expressions to look for
PUB_IP = re.compile(r'(\d+)(?<!10)\.(\d+)(?<!192\.168)(?<!172\.(1[6-9]|2\d|3[0-1]))(?<!100\.64)\.(\d+)\.(\d+)')
LOCAL_IP = re.compile(r'\d+.\d+.\d+.\d+')
# Which folders to ignore
IGNORED_FOLDERS = ['.\.git']
ALL_RESULTS = {}
def get_parsers():
list_of_files = []
for folder in [f.path for f in os.scandir() if f.is_dir()]:
if folder not in IGNORED_FOLDERS:
for root, dir, files in os.walk(folder):
for file in files:
if file:
list_of_files.append(os.path.join(root, file))
return list_of_files
def get_matches(parser, resultdict):
line_results = []
for num, line in enumerate(open(parser, 'rb')):
for match in PUB_IP.finditer(str(line)):
if match:
line_results.append('Public IP {} found on line {}'.format(match.group(), num + 1))
else:
break
if line_results:
resultdict[parser] = line_results
line_results = []
return resultdict
def list_matches(results):
for parser in results.keys():
if parser:
print('{} in file {}'.format(ALL_RESULTS[parser][0], parser))
if __name__ == "__main__":
parsers = get_parsers()
for parser in parsers:
get_matches(parser, ALL_RESULTS)
if any(v is not None for v in ALL_RESULTS.values()):
list_matches(ALL_RESULTS)
sys.exit(1)
else:
sys.exit(0) |
from fenics import *
from pandas import DataFrame
import numpy as np
set_log_active(False)
def solve_system(N, degree_V, degree_Q, file_dump=False):
mesh = UnitSquareMesh(N, N)
# Create mixed element space
V = VectorElement("Lagrange", mesh.ufl_cell(), degree_V)
Q = FiniteElement("Lagrange", mesh.ufl_cell(), degree_Q)
W = FunctionSpace(mesh, V * Q)
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Specific problem
f = Expression(
[
"pi * pi * sin(pi * x[1]) - 2 * pi * cos(2*pi*x[0])",
"pi * pi * cos(pi * x[0])"
],
degree=degree_V)
p_analytical = Expression("sin(2 * pi * x[0])", degree=degree_Q)
u_analytical = Expression(
["sin(pi * x[1])", "cos(pi * x[0])"], degree=degree_V)
# boundary conditions
def u_boundary(x):
return x[0] < DOLFIN_EPS or x[1] > 1.0 - DOLFIN_EPS or x[
1] < DOLFIN_EPS
def p_boundary(x):
return x[0] > 1.0 - DOLFIN_EPS
bc_fluid = DirichletBC(W.sub(0), u_analytical, u_boundary)
bc_press = DirichletBC(W.sub(1), p_analytical, p_boundary)
# weak formulation
a = inner(grad(u), grad(v)) * dx + div(u) * q * dx + div(v) * p * dx
L = inner(f, v) * dx
UP = Function(W)
A, b = assemble_system(a, L, [bc_fluid, bc_press])
solve(A, UP.vector(), b, 'lu')
U, P = UP.split()
if file_dump:
fluid_file = File(
'data/fluid_{n}_{dv}_{dq}.pvd'.format(n=N, dv=degree_V, dq=degree_Q))
pressure_file = File('data/pressure_d{n}_{dv}_{dq}.pvd'.format(
n=N, dv=degree_V, dq=degree_Q))
fluid_file << U
pressure_file << P
error_u = errornorm(u_analytical, U, 'H1', degree_rise=2)
error_p = errornorm(p_analytical, P, 'L2', degree_rise=2)
error_sum = error_u + error_p
return error_u, error_p, error_sum
def estimate_error(error):
"""
Finds the line best suiting the data and returns intersection and slope.
:param error: pandas DataFrame
"""
N = error.index.values
parameters = error.columns.values
best_fit = DataFrame(
index=parameters, columns=['convergence rate', 'coefficient'])
h_log = [np.log(1.0 / n) for n in N]
error_log = error.applymap(np.log)
error_fit = np.polyfit(h_log, error_log['u + p'], deg=1)
# exponentiate to regain coefficients
error_fit[1] = np.exp(error_fit[1])
return list(error_fit)
if __name__ == "__main__":
element_degrees = [(4, 3), (4, 2), (3, 2), (3, 1)]
N_values = [8, 16, 32, 64]
error_table = DataFrame(index=N_values, columns=['u + p'])
approximation_table = DataFrame(index=element_degrees, columns=['convergence rate', 'coefficient'])
for element in element_degrees:
V_deg, Q_deg = element
for N in N_values:
errors = solve_system(N, V_deg, Q_deg, file_dump=True)
error_table.set_value(N, 'u + p', errors[2])
best_fit_table = estimate_error(error_table)
approximation_table.set_value(element, 'convergence rate', best_fit_table[0])
approximation_table.set_value(element, 'coefficient', best_fit_table[1])
print(approximation_table.to_latex())
|
# Generated by Django 3.0.3 on 2020-03-04 17:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0002_auto_20200301_1655'),
]
operations = [
migrations.CreateModel(
name='Membre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pseudo', models.CharField(max_length=50)),
('nom', models.CharField(max_length=50)),
('prenom', models.CharField(max_length=50)),
('imageProfil', models.ImageField(upload_to='imagesDeProfil/')),
('slug_pseudo', models.SlugField(default='')),
],
options={
'ordering': ['pseudo'],
},
),
]
|
import pytest
def fun(x):
if not isinstance(x, int):
raise TypeError('x不是数字')
elif x != 100:
raise ValueError('x值不对')
def test_raises():
with pytest.raises(ValueError) as e:
fun(101)
exec_msg = e.value.args[0]
print(exec_msg)
assert exec_msg == 'x值不对'
|
list= [1,3,5,7,9]
length= len(list)
for i in range(length):
print(list[i])
|
import sys
import urllib2
def Management_Transit(conf,inputs,outputs):
start_point = inputs["StartPoint"]["value"]
start_time = inputs["StartTime"]["value"]
walking_time_period = inputs["WalkingTimePeriod"]["value"]
walking_speed = inputs["WalkingSpeed"]["value"]
bus_waiting_time = inputs["BusWaitingTime"]["value"]
bus_ride_time = inputs["BusRideTime"]["value"]
distance_decay_function = inputs["DistanceDecayFunction"]["value"]
management_url = "http://127.0.0.1:9363/management?start_point="+start_point+"&start_time="+start_time+"&walking_time_period="+walking_time_period+"&walking_speed="+walking_speed+"&bus_waiting_time="+bus_waiting_time+"&bus_ride_time="+bus_ride_time+"&distance_decay_function="+distance_decay_function;
try:
data = urllib2.urlopen(management_url).read()
except urllib2.HTTPError, e:
print "HTTP error: %d" % e.code
except urllib2.URLError, e:
print "Network error: %s" % e.reason.args[1]
outputs["AccessibilityScore"]["value"] = data
return 3
|
def sieve (n):
num_dict = dict([num,True] for num in xrange(1,n+1))
for i in xrange(2, int(n**.5) + 1 ):
if num_dict [i]:
for j in xrange(i*2,n+1,i):
num_dict[j] = False
return [i for i in xrange(1,n+1) if num_dict[i]]
while True:
num = raw_input ("Enter a number or end to quit: ")
try:
int(num)
print sieve(int(num))
except ValueError:
if num.lower() == "end":
break
else:
print ("Not a valid number please re-enter")
|
#
# Assignment 4
#
# Student Name : Aausuman Deep
# Student Number : 119220605
#
# Assignment Creation Date : February 22, 2020
import docx
import pyexcel
import os.path
def analyze(docfile):
# This function creates an excel file with word frequencies of the desired document file
doc = docx.Document(docfile)
my_dict = {}
# iterating paragraph wise
for paragraph in doc.paragraphs:
# replacing all non alphanumeric characters with a space
for i in range(len(paragraph.text)):
if not paragraph.text[i].isalnum():
paragraph.text = paragraph.text.replace(paragraph.text[i], " ")
paragraph.text = paragraph.text.lower()
words = paragraph.text.split()
# creating a dictionary of words and their counts
for i in range(len(words)):
if words[i] not in my_dict.keys():
my_dict[words[i]] = 1
else:
my_dict[words[i]] += 1
count_words = sum(my_dict.values())
# updating dictionary to have frequency of words (divided by total) instead of counts
for i in my_dict:
my_dict[i] = float(my_dict[i]/count_words)
# deleting all key value pairs with frequency less than 0.001
delete = [key for key in my_dict if my_dict[key] < 0.001]
for key in delete:
del my_dict[key]
row = 1
# writing the dictionary into the worksheet and saving the appropriately named excel file
my_list = [[k, v] for k, v in my_dict.items()]
file = os.path.split(docfile)[1]
filename = file.split(".")[0] + "_word_stats.xlsx"
pyexcel.save_as(array=my_list, dest_file_name=filename, dest_sheet_name='Word Frequency Stats')
return 0
|
import networkx as nx
import numpy as np
def skelToLength(vertices, edges, res = [1,1,1]):
"""
Returns cable length of connected skeleton vertices in the same
metric that this volume uses (typically nanometers).
"""
if vertices.shape[0] == 0:
return 0
v1 = vertices[edges[:,0]]
v2 = vertices[edges[:,1]]
delta = (v2 - v1) * res
delta *= delta
dist = np.sum(delta, axis=1)
dist = np.sqrt(dist)
return np.sum(dist)
def skelToNetworkX(nodes, edges, seg_list, res):
# for ERL evaluation
gt_graph = nx.Graph()
node_segment_lut = [{}]*len(seg_list)
cc = 0
for k in range(len(nodes)):
node = nodes[k]
edge = edges[k] + cc
for l in range(node.shape[0]):
gt_graph.add_node(cc, skeleton_id = k, z=node[l,0]*res[0], y=node[l,1]*res[1], x=node[l,2]*res[2])
for i in range(len(seg_list)):
node_segment_lut[i][cc] = seg_list[i][node[l,0], node[l,1], node[l,2]]
cc += 1
for l in range(edge.shape[0]):
gt_graph.add_edge(edge[l,0], edge[l,1])
return gt_graph, node_segment_lut
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import csv
import datetime
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get("https://news.qq.com/zt2020/page/feiyan.htm")
listTitle = [] # ['地区', '现有确诊', '累计确诊', '治愈', '死亡', '疫情']
listData = []
def getTitle():
tableTitle = driver.find_element_by_xpath('//*[@id="listWraper"]/table[1]')
tableTitle_data = tableTitle.find_elements_by_xpath('./thead/tr/th')
for title in tableTitle_data:
listTitle.append(title.text)
def getData():
tableData = driver.find_element_by_xpath('//*[@id="listWraper"]/table[2]')
tbodyData = tableData.find_elements_by_xpath("./tbody")
for trData in tbodyData:
areaBox = trData.find_elements_by_xpath('./tr[@class="areaBox"]')
for data in areaBox:
listDataText = [data.find_element_by_xpath('.//span').text,
data.find_element_by_xpath('./td[1]/p[@class="bold"]').text,
data.find_element_by_xpath('./td[2]/p[@class="bold"]').text,
data.find_element_by_xpath('./td[3]/p[@class="bold"]').text,
data.find_element_by_xpath('./td[4]/p[@class="bold"]').text,
data.find_element_by_xpath('./td[5]/p').text,
]
listData.append(listDataText)
def saveData():
with open("./EpidemicData/EpidemicData-{}.csv".format(datetime.datetime.now().strftime("%Y-%m-%d")), "w", newline="", encoding="utf-8") as csv_f:
writer = csv.writer(csv_f, dialect='excel')
writer.writerow(listTitle)
for data in listData:
writer.writerow(data)
if __name__ == "__main__":
getTitle()
getData()
saveData()
driver.quit()
|
# -*- coding: utf-8 -*-
# Module author: Official Repo, @GovnoCodules
import logging
from .. import loader, utils
import telethon
import io
from telethon.errors.rpcerrorlist import MessageNotModifiedError
import asyncio
logger = logging.getLogger(__name__)
@loader.tds
class TextEditorMod(loader.Module):
"""Text Editor Module"""
strings = {
"name": "TextEditor",
"no_message": "<b>You can't type nothing!</b>",
"type_char_cfg_doc": "Character for typewriter",
"delay_typer_cfg_doc": "How long to delay showing the typewriter "
"character",
"delay_text_cfg_doc": "How long to delay showing the text"
}
def __init__(self):
self.config = loader.ModuleConfig("TYPE_CHAR", "▒",
lambda m: self.strings("type_char_cfg_doc"),
"DELAY_TYPER", 0.04,
lambda m: self.strings("delay_typer_cfg_doc"),
"DELAY_TEXT", 0.02,
lambda m: self.strings("delay_text_cfg_doc"))
async def switchcmd(self, message):
"""Если ты допустил ошибку и набрал текст не сменив раскладку
клавиатуры то вернись в его начало и допиши `.switch` и твой текст
станет читабельным. Если ты всё же отправил сообщение не в той
расскладке, то просто ответь на него этой командой и он измениться.
если же твой собеседник допустил ошибку, то просто ответь на его
сообщение и сообщение с командой измениться. """
RuKeys = """ёйцукенгшщзхъфывапролджэячсмитьбю.Ё"№;%:?ЙЦУКЕНГ
ШЩЗХЪФЫВАПРОЛДЖЭ/ЯЧСМИТЬБЮ, """
EnKeys = """`qwertyuiop[]asdfghjkl;'zxcvbnm,./~@#$%^&QWERTYUIOP{
}ASDFGHJKL:"|ZXCVBNM<>? """
if message.is_reply:
reply = await message.get_reply_message()
text = reply.raw_text
if not text:
await message.edit('Тут текста нету...')
return
change = str.maketrans(RuKeys + EnKeys, EnKeys + RuKeys)
text = str.translate(text, change)
if message.sender_id != reply.sender_id:
await message.edit(text)
else:
await message.delete()
await reply.edit(text)
else:
text = utils.get_args_raw(message)
if not text:
await message.edit('Тут текста нету...')
return
change = str.maketrans(RuKeys + EnKeys, EnKeys + RuKeys)
text = str.translate(text, change)
await message.edit(text)
@loader.ratelimit
async def codecmd(self, message):
""".code <text or reply>"""
if message.is_reply:
reply = await message.get_reply_message()
code = reply.raw_text
code = code.replace("<", "<").replace(">", ">")
await message.edit(f"<code>{code}</code>")
else:
code = message.raw_text[5:]
code = code.replace("<", "<").replace(">", ">")
try:
await message.edit(f"<code>{code}</code>")
except:
await message.edit(self.strings("msg_is_emp", message))
async def mtfcmd(self, message):
""".mtf <reply to text>"""
reply = await message.get_reply_message()
if not reply or not reply.message:
await message.edit("<b>Reply to text!</b>")
return
text = bytes(reply.raw_text, "utf8")
fname = utils.get_args_raw(message) or str(
message.id + reply.id) + ".txt"
file = io.BytesIO(text)
file.name = fname
file.seek(0)
await reply.reply(file=file)
await message.delete()
async def ftmcmd(self, message):
""".ftm <reply to file>"""
reply = await message.get_reply_message()
if not reply or not reply.file:
await message.edit("<b>Reply to file!</b>")
return
text = await reply.download_media(bytes)
text = str(text, "utf8")
if utils.get_args(message):
text = f"<code>{text}</code>"
await utils.answer(message, utils.escape_html(text))
@loader.ratelimit
async def typercmd(self, message):
""".type <message>"""
a = utils.get_args_raw(message)
if not a:
await utils.answer(message, self.strings("no_message", message))
return
m = ""
entities = message.entities or []
for c in a:
m += self.config["TYPE_CHAR"]
message = await update_message(message, m, entities)
await asyncio.sleep(0.04)
m = m[:-1] + c
message = await update_message(message, m, entities)
await asyncio.sleep(0.02)
async def revcmd(self, message):
"""Используй .rev <текст или реплай>."""
if message.text:
text = utils.get_args_raw(message)
reply = await message.get_reply_message()
if not text and not reply:
return await message.edit("Нет текста или реплая.")
return await message.edit((text or reply.raw_text)[::-1])
else:
return await message.edit("Это не текст.")
async def update_message(message, m, entities):
try:
return await utils.answer(message, m,
parse_mode=lambda t: (t, entities))
except MessageNotModifiedError:
return message # space doesnt count
|
import re
from urllib import request
from argparse import ArgumentParser
from sys import exit
import os
class ThreadDownloader:
def __init__(self, lnk, path):
try:
self.page = request.urlopen(lnk).read().decode("utf-8")
except request.HTTPError as e:
print(str(e))
exit()
self.dwn_link = lnk.replace("res", "src").replace(".html", "/")
self.pics = self.make_list_of_pics()
self.path = path
def make_list_of_pics(self):
pic_re = re.compile('\d+\.jpg')
return set(re.findall(pic_re, self.page))
def start(self):
for pic in self.pics:
print("Downloading: " + self.dwn_link + pic)
t = request.urlopen(self.dwn_link + pic).read()
save_path = self.path + '/' + pic
u = open(save_path, "wb")
print("Saving: " + save_path)
u.write(t)
u.close()
def mad():
print('Используйте только один из ключей -t или -l')
exit()
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument("-t", "--thread", type=int, help="Num of thread in /b")
p.add_argument("-l", "--link", type=str, help="Link to thread")
p.add_argument("-p", "--path", type=str, help="Path to save")
args = p.parse_args()
link = "https://2ch.hk/b"
path = "."
if args.path:
path = args.path
if not os.path.isdir(path):
print("Неверный путь")
exit()
if bool(args.thread) == bool(args.link):
mad()
elif args.thread:
link = "https://2ch.hk/b/res/" + str(args.thread) + ".html"
elif args.link:
if args.link.startswith("https://2ch.hk/"):
link = args.link
else:
mad()
ThreadDownloader(link, path).start()
|
from flask import flash, render_template, redirect, request
from flask_mail import Mail, Message
from app import app
from .forms import RequestForm
import uuid
import bdb_scraper
mail = Mail(app)
@app.route('/', methods=['GET', 'POST'])
def req():
form = RequestForm()
if form.validate_on_submit():
return rec(form)
return render_template('request.html',
title='BDB-Scraper',
form=form)
def mail_url(email, url):
msg = Message('Your BDB archive is ready',
sender = app.config['MAIL_USERNAME'],
recipients = [email])
msg.body = "Here are your pictures: " + url
mail.send(msg)
pass
def rec(form):
url = str(form['starturl'].data)
email = str(form['email'].data)
print(email)
username = str(form['username'].data)
password = str(form['password'].data)
save_text = form['save_text'].data
name = bdb_scraper.zip_name_from_url(url) + '-' + uuid.uuid4().hex
zip_path = "static/zips/"
zip_name = name
bdb_scraper.scrape(url,
username=username, password=password,
save_text=save_text,
create_zip=True,
zip_name='app/'+zip_path+zip_name,
zip_base='files/',
dest="files/"+name)
mail_url(email, request.url_root+zip_path+zip_name+'.zip')
return render_template('done.html',
title='BDB-Scraper',
path=zip_path,
zipname=zip_name+'.zip')
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class TauolaToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('tauola')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['tauola'].version
values['PFX'] = spec['tauola'].prefix
fname = 'tauola.xml'
contents = str("""
<tool name="tauola" version="${VER}">
<lib name="pretauola"/>
<lib name="tauola"/>
<client>
<environment name="TAUOLA_BASE" default="${PFX}"/>
<environment name="LIBDIR" default="$$TAUOLA_BASE/lib"/>
</client>
<use name="f77compiler"/>
<use name="tauola_headers"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
fname = 'tauola_headers.xml'
contents = str("""
<tool name="tauola_headers" version="${VER}">
<client>
<environment name="TAUOLA_HEADERS_BASE" default="${PFX}"/>
<environment name="INCLUDE" default="$$TAUOLA_HEADERS_BASE/include"/>
</client>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<use name="root_cxxdefaults"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
'''
Created on Apr 8, 2012
@author: bogdan
'''
import FinVol_2D_Conv_Diff
import LidCavity
import numpy
#import the TDMA module
from thomas import *
import linalg
# Create a mesh class that holds a vector of nodes
class SIMPLE(object):
'''
classdocs
The NS equation is solved for Fi = u and Fi = v and Gamma = miu
Solve the equation:
d/dx(rho*u*Fi) + d/dy(ro*v*Fi) = d/dx(Gamma* dFi/dx)+d/dy(Gamma* dFi/dy) - dp/dx +Su
d/dx(rho*u*u) + d/dy(ro*v*u) = d/dx(Gamma* du/dx)+d/dy(Gamma* du/dy) - dp/dx +Su
d/dx(rho*u*v) + d/dy(ro*v*v) = d/dx(Gamma* dv/dx)+d/dy(Gamma* dv/dy) - dp/dx +Su
'''
def __init__(self, lidCav, scheme, debug):
'''
Constructor
'''
#specific to SIMPLE
self.itermax = 4000
self.itmaxSIMPLE = 89 #41
self.eps = 1e-8 #TDMA
self.err = 1.73 #1e-3 #SIMPLE
#inherited from the Cavity problem
self.lc = lidCav
self.scheme = scheme
self.debug = debug
self.Nx = self.lc.Nx #number of nodes
self.Ny = self.lc.Ny
self.Lx = self.lc.Lx #domain dimension
self.Ly = self.lc.Ly
self.Gx = self.lc.miuX # diffusive coeff Gamma or Miu
self.Gy = self.lc.miuY
self.rho = self.lc.rho # density
self.deltaX = None
self.deltaY = None
self.finvol_u = None
self.finvol_v = None
self.W = self.lc.W
self.E = self.lc.E
self.N = self.lc.N
self.S = self.lc.S
self.P = self.lc.P
if scheme == "UD":
self.urf_p = 0.2 #underrelaxation factor for P GOOD for 50x50=0.1 129x129=0.1 25x25=0.1 self.urf = 0.2
self.urf_uv = 0.6 #underrelaxation factor for U *V GOOD for 50x50=0.6 129x129=0.6 25x25=0.6 self.urf = 0.2
elif scheme == 'QUICK':
self.urf_p = 0.061 #0.061 #underrelaxation factor for P GOOD for 25x25=0.061
self.urf_uv = 0.9 #underrelaxation factor for U *V GOOD for 25x25=0.9
else :
self.urf_p = 0.1 #
self.urf_uv = 0.6 #
if self.lc.CPP == True:
self.cls = linalg.CppBlas()
#self.urf_p = 0.3
#self.urf_uv = 0.2
self.urf = 0.2 #underrelaxation factor for TDMA
#init the nodes for pressure
self.aW = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.aE = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.aN = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.aS = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.aP = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.aWW = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.aEE = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.aNN = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.aSS = numpy.zeros((self.lc.Ny, self.lc.Nx))
#init Su sources
self.Su = numpy.zeros((self.lc.Ny, self.lc.Nx))
#init Sp sources
self.SpN = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.SpS = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.SpE = numpy.zeros((self.lc.Ny, self.lc.Nx))
self.SpW = numpy.zeros((self.lc.Ny, self.lc.Nx))
#Convection terms
self.F = numpy.zeros((4, self.lc.Ny, self.lc.Nx))
self.FOld = numpy.zeros((4, self.lc.Ny, self.lc.Nx))
self.V = numpy.zeros(5) #Volume on 4 directions + P
self.u = numpy.zeros((self.lc.Ny, self.lc.Nx)) # horizontal velocity
self.ue = numpy.zeros((self.lc.Ny, self.lc.Nx)) # horizontal E face velocity
self.uw = numpy.zeros((self.lc.Ny, self.lc.Nx)) # horizontal W face velocity
self.v = numpy.zeros((self.lc.Ny, self.lc.Nx)) # vertical velocity
self.vn = numpy.zeros((self.lc.Ny, self.lc.Nx)) # vertical N face velocity
self.vs = numpy.zeros((self.lc.Ny, self.lc.Nx)) # vertical S face velocity
self.p = numpy.zeros((self.lc.Ny, self.lc.Nx)) # pressure
self.ustar = numpy.zeros((self.lc.Ny, self.lc.Nx)) # horizontal velocity - initial
self.vstar = numpy.zeros((self.lc.Ny, self.lc.Nx)) # vertical velocity - initial
self.pstar = numpy.zeros((self.lc.Ny, self.lc.Nx)) # pressure - initial
self.pnot = numpy.zeros((self.lc.Ny, self.lc.Nx)) # pressure correction
self.pnotOld = numpy.zeros((self.lc.Ny, self.lc.Nx)) # pressure correction
#the TDMA coeficients. (We calculate vertical lines (columns = > allocate the number of horizontal lines)
self.alp = numpy.zeros(self.Ny)
self.bet = numpy.zeros(self.Ny)
self.D = numpy.zeros(self.Ny)
self.C = numpy.zeros(self.Ny)
self.calculateDelta()
#end __init__
#calculate the sources for each face
def Sxe(self, j, i):
#return 0.5 * (self.finvol_u.SuP[j, i] + self.finvol_u.SuE[j, i])
return 0.0
def Sxw(self, j, i):
return 0.0
def Syn(self, j, i):
return 0.0
def Sys(self, j, i):
return 0.0
def SxE(self, j, i):
return 0.0
def SxW(self, j, i):
return 0.0
def SyN(self, j, i):
return 0.0
def SyS(self, j, i):
return 0.0
def SxP(self, j, i):
return 0.0
def SyP(self, j, i):
return 0.0
def pE(self, p, j, i):
if i == self.Nx - 1:
return 2 * p[j, i] - p[j, i - 1] #extrapolate from the neighboring values
return p[j, i + 1]
def pW(self, p, j, i):
if i == 0:
return 2 * p[j, i] - p[j, i + 1] #extrapolate from the neighboring values
return p[j, i - 1]
def pEE(self, p, j, i):
if i == self.Nx - 1:
return 0.0
if i == self.Nx - 2:
return 2 * p[j, i + 1] - p[j, i] #extrapolate from the neighboring values
return p[j, i + 2]
def pWW(self, p, j, i):
if i == 0:
return 0.0
if i == 1:
return 2 * p[j, i - 1] - p[j, i] #extrapolate from the neighboring values
return p[j, i - 2]
def pN(self, p, j, i):
if j == self.Ny - 1:
return 2 * p[j, i] - p[j - 1, i] #extrapolate from the neighboring values
return p[j + 1, i]
def pNN(self, p, j, i):
if j == self.Ny - 1:
return 0.0
if j == self.Ny - 2:
return 2 * p[j + 1, i] - p[j, i] #extrapolate from the neighboring values
return p[j + 2, i]
def pS(self, p, j, i):
if j == 0:
return 2 * p[j, i] - p[j + 1, i] #extrapolate from the neighboring values
return p[j - 1, i]
def pSS(self, p, j, i):
if j == 0:
return 0.0
if j == 1:
return 2 * p[j - 1, i] - p[j, i] #extrapolate from the neighboring valuesself.urf_uv
return p[j - 2, i]
def pP(self, p, j, i):
return p[j, i]
def pe(self, p, j, i):
return 0.5 * (self.pE(p, j, i) + self.pP(p, j, i))
def pn(self, p, j, i):
return 0.5 * (self.pN(p, j, i) + self.pP(p, j, i))
def pw(self, p, j, i):
return 0.5 * (self.pP(p, j, i) + self.pW(p, j, i))
def ps(self, p, j, i):
return 0.5 * (self.pP(p, j, i) + self.pS(p, j, i))
def calculateDelta(self):
'''
calculate preliminary information, deltaX and Y
'''
self.deltaX = self.Lx / self.Nx
self.deltaY = self.Ly / self.Ny
self.alp = numpy.zeros(self.Ny)
self.bet = numpy.zeros(self.Ny)
self.D = numpy.zeros(self.Ny)
self.C = numpy.zeros(self.Ny)
self.V[self.P] = self.deltaX * self.deltaY
self.V[self.E] = self.deltaX * self.deltaY
self.V[self.W] = self.deltaX * self.deltaY
self.V[self.N] = self.deltaX * self.deltaY
self.V[self.S] = self.deltaX * self.deltaY
def calculate_uv_face(self):
for i in range(0, self.Nx):
for j in range(0, self.Ny):
self.ue[j, i] = self.ue_f(j, i)
self.uw[j, i] = self.uw_f(j, i)
self.vn[j, i] = self.vn_f(j, i)
self.vs[j, i] = self.vs_f(j, i)
def calculate_F(self, firstiter = False):
'''
calculate convection terms for cell faces
unsteady flow F is NOT constant
'''
if firstiter == True:
for i in range(0, self.Nx):
for j in range(0, self.Ny):
self.F[self.E, j, i] = self.lc.rho * self.ustar[j, i] * self.V[self.E] / self.deltaY
self.F[self.W, j, i] = self.lc.rho * self.ustar[j, i] * self.V[self.W] / self.deltaY
self.F[self.N, j, i] = self.lc.rho * self.vstar[j, i] * self.V[self.N] / self.deltaX
self.F[self.S, j, i] = self.lc.rho * self.vstar[j, i] * self.V[self.S] / self.deltaX
else:
for i in range(0, self.Nx):
for j in range(0, self.Ny):
self.F[self.E, j, i] = self.lc.rho * self.ue[j, i] * self.V[self.E] / self.deltaY
self.F[self.W, j, i] = self.lc.rho * self.uw[j, i] * self.V[self.W] / self.deltaY
self.F[self.N, j, i] = self.lc.rho * self.vn[j, i] * self.V[self.N] / self.deltaX
self.F[self.S, j, i] = self.lc.rho * self.vs[j, i] * self.V[self.S] / self.deltaX
#end calculate_F
#node velocities
#horizontal
def uW(self, j, i):
#FiW
if i == 0:
u = self.lc.FiU[self.W]
else:
u = self.ustar[j, i - 1]
return u
def uE(self, j, i):
if i == self.Nx - 1:
u = self.lc.FiU[self.E]
else:
u = self.ustar[j, i + 1]
return u
def uS(self, j, i):
if j == 0:
u = self.lc.FiU[self.S]
else:
u = self.ustar[j - 1, i]
return u
def uN(self, j, i):
#FiW
if j == self.Ny - 1:
u = self.lc.FiU[self.N]
else:
u = self.ustar[j + 1, i]
return u
#vertical
def vW(self, j, i):
#FiW
if i == 0:
v = self.lc.FiV[self.W]
else:
v = self.vstar[j, i - 1]
return v
def vE(self, j, i):
if i == self.Nx - 1:
v = self.lc.FiV[self.E]
else:
v = self.vstar[j, i + 1]
return v
def vS(self, j, i):
if j == 0:
v = self.lc.FiV[self.S]
else:
v = self.vstar[j - 1, i]
return v
def vN(self, j, i):
#FiW
if j == self.Ny - 1:
v = self.lc.FiV[self.N]
else:
v = self.vstar[j + 1, i]
return v
#face velocities
#horizontal
def ue_f(self, j, i):
if self.finvol_u.aE[j, i] == 0 or i == self.Nx - 1:
#VEaE = 0
return self.lc.ue_wall[j]
else:
VEaE = self.V[self.E] / self.finvol_u.aE[j, i]
VPaP = self.V[self.P] / self.finvol_u.aP[j, i]
Veae = 0.5 * (VEaE + VPaP)
#Veae = (self.V[self.E] + self.V[self.P]) / (self.finvol_u.aE[j, i] + self.finvol_u.aP[j, i])
dpdxE = (self.pEE(self.pstar, j, i) - self.pP(self.pstar, j, i)) / (2.0 * self.deltaX)
dpdxP = (self.pE(self.pstar, j, i) - self.pW(self.pstar, j, i)) / (2.0 * self.deltaX)
if i == self.Nx - 1:
dpdxe = dpdxE
else:
dpdxe = 0.5 * (dpdxE + dpdxP) #
#dpdxe = (self.pE(self.pstar, j, i) - self.pP(self.pstar, j, i)) / self.deltaX
ue = (self.uE(j, i) + self.ustar[j, i]) / 2.0 + \
self.urf_uv * 0.5 * (VEaE * (dpdxE - self.SxE(j, i)) + VPaP * (dpdxP - self.SxP(j, i))) - Veae * (dpdxe - self.Sxe(j, i))
#altenative formula
#ue2 = (self.uE(j, i) + self.ustar[j, i]) / 2.0 + \
# 0.5 * (self.deltaY / self.finvol_u.aE[j, i] * (self.pEE(self.pstar, j, i) - self.pP(self.pstar, j, i)) / 2.0\
# + self.deltaY / self.finvol_u.aP[j, i] * (self.pE(self.pstar, j, i) - self.pW(self.pstar, j, i)) / 2.0)\
# - self.deltaY * 0.5 * (1.0 / self.finvol_u.aP[j, i] + 1.0 / self.finvol_u.aE[j, i]) * (self.pE(self.pstar, j, i) - self.pP(self.pstar, j, i))
return ue
def uw_f(self, j, i):
if self.finvol_u.aW[j, i] == 0 or i == 0 :
#VWaW = 0
return self.lc.uw_wall[j]
else:
VWaW = self.V[self.W] / self.finvol_u.aW[j, i]
VPaP = self.V[self.P] / self.finvol_u.aP[j, i]
Vwaw = 0.5 * (VWaW + VPaP)
#Vwaw = (self.V[self.W] + self.V[self.P]) / (self.finvol_u.aW[j, i] + self.finvol_u.aP[j, i])
dpdxW = (self.pP(self.pstar, j, i) - self.pWW(self.pstar, j, i)) / (2 * self.deltaX)
dpdxP = (self.pE(self.pstar, j, i) - self.pW(self.pstar, j, i)) / (2 * self.deltaX)
if i == 0:
dpdxw = dpdxW
else:
dpdxw = 0.5 * (dpdxW + dpdxP)
#dpdxw = (self.pP(self.pstar, j, i) - self.pW(self.pstar, j, i)) / self.deltaX
uw = (self.uW(j, i) + self.ustar[j, i]) / 2 + \
self.urf_uv * 0.5 * (VWaW * (dpdxW - self.SxW(j, i)) + VPaP * (dpdxP - self.SxP(j, i))) - Vwaw * (dpdxw - self.Sxw(j, i))
#or
#uv2 = (self.uW(j, i) + self.ustar[j, i]) / 2 + Vwaw * (0.5 * (dpdxW + dpdxP) - dpdxw)
return uw
#vertical
def vn_f(self, j, i):
if self.finvol_u.aN[j, i] == 0 or j == self.Ny - 1 :
#VNaN = 0
return self.lc.vn_wall[i]
else:
VNaN = self.V[self.N] / self.finvol_v.aN[j, i]
VPaP = self.V[self.P] / self.finvol_v.aP[j, i]
Vnan = 0.5 * (VNaN + VPaP)
#Vnan = (self.V[self.N] + self.V[self.P]) / (self.finvol_u.aN[j, i] + self.finvol_u.aP[j, i])
dpdxN = (self.pNN(self.pstar, j, i) - self.pP(self.pstar, j, i)) / (2 * self.deltaY)
dpdxP = (self.pN(self.pstar, j, i) - self.pS(self.pstar, j, i)) / (2 * self.deltaY)
if j == self.Ny - 1:
dpdxn = dpdxN
else:
dpdxn = 0.5 * (dpdxN + dpdxP)
#dpdxn = (self.pN(self.pstar, j, i) - self.pP(self.pstar, j, i)) / self.deltaY
vn = (self.vN(j, i) + self.vstar[j, i]) / 2 + \
self.urf_uv * 0.5 * (VNaN * (dpdxN - self.SyN(j, i)) + VPaP * (dpdxP - self.SxP(j, i))) - Vnan * (dpdxn - self.Syn(j, i))
return vn
def vs_f(self, j, i):
if self.finvol_u.aS[j, i] == 0 or j == 0:
#VSaS = 0
return self.lc.vs_wall[i]
else:
VSaS = self.V[self.S] / self.finvol_v.aS[j, i]
VPaP = self.V[self.P] / self.finvol_v.aP[j, i]
Vsas = 0.5 * (VSaS + VPaP)
#Vsas = (self.V[self.S] + self.V[self.P]) / (self.finvol_u.aS[j, i] + self.finvol_u.aP[j, i])
dpdxS = (self.pP(self.pstar, j, i) - self.pSS(self.pstar, j, i)) / (2 * self.deltaY)
dpdxP = (self.pN(self.pstar, j, i) - self.pS(self.pstar, j, i)) / (2 * self.deltaY)
if j == 0:
dpdxs = dpdxS
else:
dpdxs = 0.5 * (dpdxS + dpdxP)
#dpdxs = (self.pP(self.pstar, j, i) - self.pS(self.pstar, j, i)) / self.deltaY
vs = (self.vS(j, i) + self.vstar[j, i]) / 2 + \
self.urf_uv * 0.5 * (VSaS * (dpdxS - self.SyS(j, i)) + VPaP * (dpdxP - self.SxP(j, i))) - Vsas * (dpdxs - self.Sys(j, i))
return vs
def solve(self):
converged = False
Iter = 0
# 1) Estimate intial values for the ustar, vstar and pstar variables
self.guess_p_u_v()
# Calcuate intitial convection terms
self.calculate_F(Iter == 0)
while converged == False and Iter < self.itmaxSIMPLE:
# 2) solve the momentum equations for the star/initial values
self.finvol_u, self.finvol_v = self.solve_momentum_u_v()
# 3) recalculate interface velocity and new convective flux terms (Fs)
#if Iter == 0 :
self.calculate_uv_face()
self.calculate_F()
# 4) solve aP * p'P = aW * p'W + aE * p'E + aS*p'S + aN* p'N + b
self.solve_pnot(self.finvol_u, self.finvol_v)
# 8) Extrapolate to boundaries
self.extrapolate_p_to_boundaries()
# 5) correct
# pressure p = p* + p'
self.correct_p_from_pnot_pstar()
# corect velocities u = u* + (dP*u')
self.correct_uv()
# 6) correct m not - Convection terms ex: Fe= Fe +( rho *de * deltaY)*(Pp' -Pe')
# => this step seems to make convergence worse
#self.correct_convection_terms()
# 7) optional - solve other transports
#self.solve_other_transports()
# 9) check convergence
converged = self.check_convergence(Iter)
# 10) set the new values
self.set_new_values()
# inrement the Iterator
Iter += 1
if Iter > self.itermax:
print "Solution did not converge in %d interations", Iter
else:
print "Solution converged in %d interations", Iter
return [self.u, self.v]
#end solve
def guess_p_u_v(self):
'''
Estimate intial valuess for the u, v and p variables
'''
for i in range(0, self.Nx):
for j in range(0, self.Ny):
self.ustar[j, i] = 0
self.vstar[j, i] = 0
self.pstar[j, i] = 0 #9.8 * (self.Ny - 1 - j) * self.deltaY * self.rho
#other may follow , Temp, Conc, etc
#end guess_p_u_v
def solve_momentum_u_v(self):
#set boundary conditions for Fi = v , all are 0
self.finvol_v = FinVol_2D_Conv_Diff.FinVol_2D_Conv_Diff("v", self.lc.FiV, self.pstar, self)
self.vstar = self.finvol_v.solve().copy()
#set boundary conditions for Fi = u
self.finvol_u = FinVol_2D_Conv_Diff.FinVol_2D_Conv_Diff("u", self.lc.FiU, self.pstar, self)
self.ustar = self.finvol_u.solve().copy()
return [self.finvol_u, self.finvol_v]
def calculateTDMACoefficients(self, i):
'''
book pag 220
Apply TDMA S to N sweeping W to E
The discretization equation is given by
In the book they have it reversed "j" is for lines and "i" for columns
'''
#calculate on each vertical from S -> N
for j in range(0, self.Ny):
#Compute the TDMA coefficients
self.alp[j] = self.aN[j, i].copy()
self.bet[j] = self.aS[j, i].copy()
self.D[j] = self.aP[j, i].copy()
#the free term
#Avoid problems at boundaries by calling a function which considers the boundary limitation on index
#boundary conditions are set through the term C[j]
self.C[j] = self.aW[j, i] * self.pW(self.pnot, j, i) + self.aE[j, i] * self.pE(self.pnot, j, i) + self.Su[j, i]
#end for j self.solve_pnot(finvol_u, finvol_v)
#end calculateTDMACoefficients
def solve_pnot(self, finvol_u, finvol_v):
#calculate the coefficients for pnot
for j in range(0, self.Ny):
for i in range(0, self.Nx):
if i == self.Nx - 1:
self.aE[j, i] = 0
else:
ape = 0.5 * (self.finvol_u.aE[j, i] + self.finvol_u.aP[j, i])
#de = self.finvol_u.A[self.E] / ape
de = 1.0 / ape
#de = 0.5 * (self.finvol_u.A[self.E] / self.finvol_u.aE[j, i] + self.finvol_u.A[self.E] / self.finvol_u.aP[j, i])
self.aE[j, i] = self.rho * de * self.deltaY
if i == 0:
self.aW[j, i] = 0
else:
apw = 0.5 * (self.finvol_u.aW[j, i] + self.finvol_u.aP[j, i])
#dw = self.finvol_u.A[self.W] / apw
dw = 1.0 / apw
#dw = 0.5 * (self.finvol_u.A[self.W] / self.finvol_u.aW[j, i] + self.finvol_u.A[self.W] / self.finvol_u.aP[j, i])
self.aW[j, i] = self.rho * dw * self.deltaY
if j == self.Ny - 1:
self.aN[j, i] = 0
else:
apn = 0.5 * (self.finvol_v.aN[j, i] + self.finvol_v.aP[j, i])
#dn = self.finvol_v.A[self.N] / apn
dn = 1.0 / apn
#dn = 0.5 * (self.finvol_v.A[self.N] / self.finvol_v.aN[j, i] + self.finvol_v.A[self.N] / self.finvol_v.aP[j, i])
self.aN[j, i] = self.rho * dn * self.deltaX
if j == 0:
self.aS[j, i] = 0
else:
aps = 0.5 * (self.finvol_v.aS[j, i] + self.finvol_v.aP[j, i])
#ds = self.finvol_v.A[self.S] / aps
ds = 1.0 / aps
#ds = 0.5 * (self.finvol_v.A[self.E] / self.finvol_v.aS[j, i] + self.finvol_v.A[self.S] / self.finvol_v.aP[j, i])
self.aS[j, i] = self.rho * ds * self.deltaX
self.Su[j, i] = (self.rho * self.uw[j, i] - self.rho * self.ue[j, i]) * self.deltaY + \
(self.rho * self.vs[j, i] - self.rho * self.vn[j, i]) * self.deltaX
self.aP[j, i] = self.aE[j, i] + self.aW[j, i] + self.aN[j, i] + self.aS[j, i]
it = 0
n = self.D.size
x = numpy.zeros(n)
while self.itermax > it :
#copy current values to the old values matrix
self.pnotOld = self.pnot.copy()
#Swipe from W to E
for i in range(0, self.Nx):
#calculate the TDMA coefficients for column i
self.calculateTDMACoefficients(i)
if self.debug == True:
print "beta:", self.bet
print "D", self.D
print "alp", self.alp
print "C", self.C
if self.lc.CPP == True:
self.cls.setTDMA(-self.bet[1:], self.D, -self.alp[:-1], self.C, n)
d = self.cls.solveTDMA(x, n)
self.pnot[:, i] = d["solution"].copy()
else:
x = thomas(n, -self.bet[1:], self.D, -self.alp[:-1], self.C)
self.pnot[:, i] = x.copy()
#end i
#TODO Under relaxation
self.pnot = self.urf * self.pnot.copy() + self.pnotOld.copy() * (1 - self.urf)
#test accuracy and exit condition
flat = self.pnot[:, 1] - self.pnotOld[:, 1]
dx = math.sqrt(numpy.dot(flat, flat))
if it % 600 == 0:
print "var: %s iter # %d, dx=%1.9f" % ("p", it, dx)
#print "Fi:", self.Fi
#Exit if we are satisfied wit the accuracy
if dx < self.eps :
print self.pnot
return
it += 1
#end while
#if we did not converge yet print an error and exit
if self.itermax <= it:
print "Max iterations exceeded => did not converge"
print self.pnot
return
return [self.pnot]
#end solve_pnot
def correct_p_from_pnot_pstar(self):
for i in range(0, self.Nx):
for j in range(0, self.Ny):
self.p[j, i] = self.pstar[j, i].copy() + self.urf_p * self.pnot[j, i]
def correct_uv(self):
'''
correct u /v from u*/v* + p'
'''
for i in range(0, self.Nx):
for j in range(0, self.Ny):
self.u[j, i] = self.ustar[j, i].copy() + self.urf_uv * self.finvol_u.A[self.E] / self.finvol_u.aP[j, i] * (self.pw(self.pnot, j, i) - self.pe(self.pnot, j, i))
self.v[j, i] = self.vstar[j, i].copy() + self.urf_uv * self.finvol_v.A[self.N] / self.finvol_v.aP[j, i] * (self.ps(self.pnot, j, i) - self.pn(self.pnot, j, i))
#end correct_uv
def update_F(self):
self.FOld = self.F.copy()
for i in range(0, self.Nx):
for j in range(0, self.Ny):
self.F[self.E, j, i] = self.FOld[self.E, j, i].copy() + self.rho * self.ue[j, i] * self.deltaY
self.F[self.W, j, i] = self.FOld[self.W, j, i].copy() + self.rho * self.uw[j, i] * self.deltaY
self.F[self.N, j, i] = self.FOld[self.N, j, i].copy() + self.rho * self.vn[j, i] * self.deltaX
self.F[self.S, j, i] = self.FOld[self.S, j, i].copy() + self.rho * self.vs[j, i] * self.deltaX
def correct_convection_terms(self):
self.calculate_uv_face()
self.calculate_F()
return
for i in range(0, self.Nx):
for j in range(0, self.Ny):
if i == self.Nx - 1:
self.ue[j, i] = 0
else:
de = self.urf_uv * 0.5 * (self.finvol_u.A[self.E] / self.finvol_u.aE[j, i] + self.finvol_u.A[self.E] / self.finvol_u.aP[j, i])
#de = self.urf_uv * 0.5* (1.0 / self.finvol_u.aE[j, i] + 1.0 / self.finvol_u.aP[j, i])
self.ue[j, i] = self.ue[j, i] + de * (self.pP(self.pnot, j, i) - self.pE(self.pnot, j, i))
if i == 0:
self.uw[j, i] = 0
else:
dw = self.urf_uv * 0.5 * (self.finvol_u.A[self.W] / self.finvol_u.aW[j, i] + self.finvol_u.A[self.W] / self.finvol_u.aP[j, i])
#dw = self.urf_uv * 0.5* (1.0 / self.finvol_u.aW[j, i] + 1.0 / self.finvol_u.aP[j, i])
self.uw[j, i] = self.uw[j, i] + dw * (self.pW(self.pnot, j, i) - self.pP(self.pnot, j, i))
if j == self.Ny - 1:
self.vn[j, i] = 0
else:
dn = self.urf_uv * 0.5 * (self.finvol_v.A[self.N] / self.finvol_v.aN[j, i] + self.finvol_v.A[self.W] / self.finvol_u.aP[j, i])
#dn = self.urf_uv * 0.5* (1.0 / self.finvol_u.aN[j, i] + 1.0 / self.finvol_u.aP[j, i])
self.vn[j, i] = self.vn[j, i] + dn * (self.pP(self.pnot, j, i) - self.pN(self.pnot, j, i))
if j == 0:
self.vs[j, i] = 0
else:
ds = self.urf_uv * 0.5 * (self.finvol_v.A[self.S] / self.finvol_v.aS[j, i] + self.finvol_v.A[self.W] / self.finvol_u.aP[j, i])
#ds = self.urf_uv * 0.5* (1.0 / self.finvol_u.aE[j, i] + 1.0 / self.finvol_u.aP[j, i])
self.vs[j, i] = self.vs[j, i] + ds * (self.pS(self.pnot, j, i) - self.pP(self.pnot, j, i))
self.update_F()
def solve_other_transports(self):
'''
Could be temperature, Concentration, etc.
Do nothing for now.
'''
pass
def extrapolate_p_to_boundaries(self):
'''
The pressure values at the boundary conditions can be calculated by linear
interpolation of using the the two near boundary node pressures
'''
# this is done automatically in the pE functions above for p but not for p'
for j in range(0, self.Ny):
for i in range(0, self.Nx):
if i == self.Nx - 1:
self.p[j, i] = self.p[j, i - 1]
self.pnot[j, i] = self.pnot[j, i - 1];
if i == 0:
self.p[j, i] = self.p[j, i + 1]
self.pnot[j, i] = self.pnot[j, i + 1];
if j == self.Ny - 1:
self.p[j, i] = self.p[j - 1, i]
self.pnot[j, i] = self.pnot[j - 1, i];
if j == 0:
self.p[j, i] = self.p[j + 1, i]
self.pnot[j, i] = self.pnot[j + 1, i];
#end extrapolate_p_to_boundaries
def check_convergence(self, Iter):
'''
All 3 variables need to converge
'''
Sum = 0
for j in range(0, self.Ny):
for i in range(0, self.Nx):
Sum += abs(self.F[self.E, j, i] - self.F[self.W, j, i] + self.F[self.N, j, i] - self.F[self.S, j, i])
print "iteration # %d, Sum abs(F) =%1.9f " % (Iter, Sum)
#Exit if we are satisfied wit the accuracy
if Sum <= self.err :
return True
return False
def set_new_values(self):
self.pstar = self.p.copy()
self.ustar = self.u.copy()
self.vstar = self.v.copy()
|
#!/usr/bin/env python
"""
"""
import os
from collections import defaultdict
import ujson as json
from util import liblogger
import math
using_cache = bool(os.environ["using_cache"])
cooc_dict_file = os.environ["cooc_dict_file"]
weighted_cooc_dict_file = os.environ["weighted_cooc_dict_file"]
lex_count_file = os.environ["lex_count_file"]
w_dict = None
c_dict = None
cw_dict = None
cache_dict = dict()
def load_cooc_dict():
global cw_dict, c_dict
liblogger.info("load cooc dict")
pxy_cache_file = cooc_dict_file + ".pxy.cache"
py_cache_file = cooc_dict_file + ".py.cache"
if using_cache and os.path.exists(pxy_cache_file) and os.path.exists(py_cache_file):
cw_dict = json.load(open(pxy_cache_file))
c_dict = json.load(open(py_cache_file))
return
cooc_dict = json.load(open(cooc_dict_file))
cw_dict = defaultdict(int)
c_dict = defaultdict(int)
for w in cooc_dict:
#ctxs = [eval(ctx) for ctx in cooc_dict[w].keys()]
for ctx in cooc_dict[w]:
count = cooc_dict[w][ctx]
cw = (w, ctx)
count = cooc_dict[w][ctx]
cw_dict[cw] += count
c_dict[ctx] += count
liblogger.info("norm cooc dict for P(x, y)")
cw_sum = float(sum(cw_dict.values()))
for cw in cw_dict:
cw_dict[cw] = math.log(cw_dict[cw] / cw_sum)
json.dump(cw_dict, open(pxy_cache_file, "w"))
liblogger.info("ctx dict P(y)")
c_sum = float(sum(c_dict.values()))
for c in c_dict:
c_dict[c] = math.log(c_dict[c] / c_sum)
json.dump(c_dict, open(py_cache_file, "w"))
def load_lex_counts():
global w_dict
liblogger.info("load word dict")
cache_file = lex_count_file + ".cache"
if using_cache and os.path.exists(cache_file):
w_dict = json.load(open(cache_file))
return
lex_counts = json.load(open(lex_count_file))
w_sum = float(sum(lex_counts.values()))
w_dict = dict()
liblogger.info("norm word dict for P(x)")
for w in lex_counts:
w_dict[w] = math.log(lex_counts[w] / w_sum)
json.dump(w_dict, open(cache_file, "w"))
def calc_pmi(w, c):
key = "{0} {1}".format(w, c)
pmi = cw_dict[key] - w_dict[w] - c_dict[c]
return pmi
def main():
load_cooc_dict()
load_lex_counts()
cooc_dict = json.load(open(cooc_dict_file))
cooc_pmi_dict = defaultdict(dict)
liblogger.info("calc pmi")
for w in cooc_dict:
#ctxs = cooc_dict[w].keys()
cooc_pmi_dict[w] = defaultdict(int)
for ctx in cooc_dict[w]:
pmi = calc_pmi(w, ctx)
if pmi > 0:
cooc_pmi_dict[w][ctx] = pmi
json.dump(cooc_pmi_dict, open(weighted_cooc_dict_file, "w"))
liblogger.info("pmi calculated")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2
import os
import ConfigParser
import time
import subprocess
import readline
class color:
HEADER = '\033[95m'
IMPORTANT = '\33[35m'
NOTICE = '\033[33m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
RED = '\033[91m'
WHITE = '\033[37m'
END = '\033[0m'
UNDERLINE = '\033[4m'
LOGGING = '\33[34m'
def clearScr():
os.system('clear')
def yesOrNo():
return (input("Continue Y / N: ") in yes)
def return_fw():
bashCommand = ". ./cyanide-framework.sh && main"
subprocess.call(['bash','-c', bashCommand])
def logo():
print(color.RED+" ..,;:ccccccc:;...")
print(color.WHITE+" ..,clllc:;;;;;;:cllc,.")
print(color.RED+" .,cllc,..............';;'.")
print(color.WHITE+" .;lol;......"+color.WHITE+"_______"+color.RED+"....;lol;.")
print(color.RED+" .,lol;......"+color.WHITE+"/ _____/"+color.RED+".....;lol;.. ")
print(color.WHITE+" .coo......."+color.WHITE+"/ /"+color.RED+".............coo")
print(color.RED+".'lol,....."+color.WHITE+"/ /"+color.RED+"............'lol,.")
print(color.WHITE+".,lol,...."+color.WHITE+"/ /_____"+color.RED+"........,lol,.")
print(color.RED+".,lol,...."+color.WHITE+"\______/"+color.RED+".......,lol,.")
print(color.WHITE+" .:ooc'.................:ooc'")
print(color.RED+" .'cllc'.............cllc.")
installDir = os.path.dirname(os.path.abspath(__file__)) + '/'
configFile = installDir + "/cyanide.cfg"
print(installDir)
config = ConfigParser.RawConfigParser()
config.read(configFile)
toolDir = installDir + config.get('cyanide', 'toolDir') # makes folder
logDir = installDir + config.get('cyanide', 'logDir') # make logs
yes = config.get('cyanide', 'yes').split()
color_random=[color.HEADER,color.IMPORTANT,color.NOTICE,color.OKBLUE,color.OKGREEN,color.WARNING,color.RED,color.END,color.UNDERLINE,color.LOGGING]
# random.shuffle(color_random)
continuePrompt = "\nClick [Return] to continue"
alreadyInstalled = "Already Installed"
class Jtr:
def __init__(self):
if not self.installed():
self.install()
self.run()
else:
self.run()
def installed(self):
return (os.path.isfile("/usr/sbin/john")) or (os.path.isfile("/usr/bin/john")) or (os.path.isfile("/etc/john"))
def install(self):
os.system("sudo apt-get install john -y")
def run(self):
clearScr()
self.menu()
def menu(self):
clearScr()
logo()
print(color.RED + " EC-Council " + color.WHITE + "Methodology\n")
print(color.RED + " [ 1 ] Basic Hash Cracking (.txt) file")
print(color.WHITE + " [ 2 ] From Configuration File ")
print(color.RED + "\n [ 99 ] Return to Main-Menu \n")
response = int(input(color.WHITE + " Select an Option : "))
time.sleep(1.5)
try:
if response == 1:
clearScr()
logo()
print(color.RED + "Formats are written as:\n")
print(color.WHITE + " 1) md5 => raw-md5")
print(color.WHITE + " 2) md4 => raw-md4")
print(color.WHITE + " 3) sha1 ==>raw-sha1")
print(color.WHITE + " 4) sha256 ==>raw-sha256")
# print(color.WHITE + " 1) md5 => raw-md5")
format_known = raw_input("Do you know the hash format[Y/n] : ")
if (format_known == "Y") or (format_known == "y"):
format = raw_input("Please Enter the Format: ")
passwd_file_known = raw_input(color.WHITE+"Do you want to use a Custom Wordlist(Y/"+color.RED+("n:"))
if (passwd_file_known == "Y") or (passwd_file_known == "y"):
passwd_file = input(color.WHITE+"Enter the Custom Dictionary Location: ")
file = input(color.WHITE+"Enter the hash file Location: ")
print("Performing Hash Cracking")
os.system("john --format=%s --wordlist=%s %s"%(format,passwd_file,file))
elif passwd_file_known == "n":
file = input(color.WHITE + "Enter the hash file Location: ")
print("Performing Hash Cracking")
os.system("john --format=%s %s" % (format,file))
else:
print("Wrong Option")
Jtr.menu(self)
elif format_known == "n":
print(color.WHITE+"We can try it normally, but if this does not work, please use HashId in From other Section and Come Back")
hid = raw_input(color.RED+"Do you want to use HashID(Y/n):")
if hid == "Y":
return_fw()
elif hid == "n":
passwd_file_known = input(color.WHITE + "Do you want to use a Custom Wordlist(Y/" + color.RED + ("n:"))
if passwd_file_known == "Y":
passwd_file = input(color.WHITE + "Enter the Custom Dictionary Location: ")
file = input(color.WHITE + "Enter the hash file Location: ")
print("Performing Hash Cracking")
os.system("john --wordlist=%s %s" % (passwd_file, file))
elif passwd_file_known == "n":
file = input(color.WHITE + "Enter the hash file Location: ")
print("Performing Hash Cracking")
os.system("john %s" % file)
else:
print("Wrong Option")
Jtr.menu(self)
else:
print("Wrong Option")
Jtr.menu(self)
elif response == 2:
passwd_file = input(color.WHITE+"Do You have /etc/passwd (Y/"+color.RED+"n): ")
shadow_file = input(color.WHITE+"Do You have /etc/shadow (Y/"+color.RED+"n): ")
if shadow_file == "Y":
if passwd_file == "Y":
lcation = input(color.WHITE+"Location of custom /etc/passwd and /etc/shadow ")
os.system("unshadow %s/etc/passwd %s/etc/shadow > crack.txt" %(lcation,lcation))
os.system("john crack.txt")
elif passwd_file == "n":
lcation = input(color.WHITE + "Location of custom /etc/shadow ")
os.system("john %s/etc/shadow" % lcation)
else:
Jtr.menu(self)
if shadow_file == "n":
if passwd_file == "Y":
lcation = input(color.WHITE + "Location of custom /etc/passwd: ")
os.system("john %s/etc/passwd" %lcation)
else:
Jtr.menu(self)
elif response == 99:
main()
else:
print(color.RED+"Wrong Option")
Jtr.menu(self)
except KeyboardInterrupt:
main()
class acrack:
def __init__(self):
if not self.installed():
self.install()
self.run()
else:
self.run()
def installed(self):
return (os.path.isfile("/usr/bin/aircrack-ng")) or (os.path.isfile("/usr/sbin/aircrack-ng"))
def install(self):
os.system("sudo apt-get install aircrack-ng -y")
def run(self):
clearScr()
self.menu()
def menu(self):
clearScr()
logo()
print(color.RED + " EC-Council " + color.WHITE + "Methodology\n")
print(color.RED + " [ 1 ] Perform Basic Hash Cracking")
print(color.WHITE + "\n [ 99 ] Return to Main-Menu")
response = int(raw_input(color.RED+"\n Select the Option : "))
if response == 1:
file = raw_input(color.WHITE+"\n Do you have a Custom Dictionary(Y/"+color.RED+"n): ")
try:
if (file == "Y") or (file == "y"):
lcation = raw_input(color.RED+"\n Enter the Dictionary Path: ")
lcation2 = raw_input(color.WHITE+"\n Enter the Path of Hashed File: ")
os.system("aircrack-ng -w %s %s" %(lcation, lcation2))
elif (file == "N") or (file == "n"):
lcation2 = raw_input(color.WHITE + "\n Enter the Path of Hashed File: ")
os.system("aircrack-ng -w /usr/share/wordlists/rockyou.txt %s" % lcation2)
else:
print(color.RED+"\n Wrong Option")
acrack.menu(self)
except KeyboardInterrupt:
acrack.menu(self)
elif response == 99:
main()
else:
print(color.RED+"Wrong Option")
acrack.menu(self)
class ophcrack:
def __init__(self):
if not self.installed():
self.install()
self.run()
else:
self.run()
def installed(self):
return (os.path.isfile("/usr/bin/ophcrack"))
def install(self):
os.system("sudo apt-get install ophcrack -y")
def run(self):
clearScr()
self.menu()
def menu(self):
clearScr()
logo()
print(color.RED + " EC-Council " + color.WHITE + "Methodology\n")
print(color.RED + " [ 1 ] Basic Hash Cracking")
print(color.WHITE + "\n [ 99 ] Return to Main-Menu")
response = int(raw_input(color.RED+"\n Select an Option : "))
try:
if response == 1:
time.sleep(2)
os.system("ophcrack")
elif response == 99:
main()
else:
print(color.RED+"Wrong Option")
ophcrack.menu(self)
except KeyboardInterrupt:
ophcrack.menu(self)
class cupp:
def __init__(self):
if not self.installed():
self.install()
self.run()
else:
self.run()
def installed(self):
return (os.path.isfile("/etc/cupp.cfg"))
def install(self):
os.system("sudo apt-get install cupp -y")
def run(self):
clearScr()
self.menu()
def menu(self):
clearScr()
logo()
print(color.RED + " EC-Council " + color.WHITE + "Methodology\n")
print(color.WHITE+" [ 1 ] CUPP Interactive Mode")
print(color.RED+"\n [ 99 ] Back to Main-Menu")
response = int(raw_input(color.WHITE+"\n Enter your Response: "))
try:
if response == 1:
os.system("cupp -i")
elif response == 99:
main()
else:
print(color.RED+" Wrong Option")
cupp.menu(self)
except KeyboardInterrupt:
cupp.menu(self)
class hydra:
def __init__(self):
if not self.installed():
self.install()
self.run()
else:
self.run()
def installed(self):
return (os.path.isfile("/usr/bin/hydra"))
def install(self):
os.system("sudo apt-get install john -y")
def run(self):
clearScr()
self.menu()
def menu(self):
clearScr()
logo()
print(color.RED + " EC-Council " + color.WHITE + "Methodology\n")
print(color.WHITE + " Basic Service Brute Forcing")
print(color.WHITE+" Please select a Service")
print(color.RED+" [ 1 ] SSH")
print(color.WHITE+" [ 2 ] TELNET")
print(color.RED + " [ 3 ] SMTP")
print(color.WHITE + " [ 4 ] FTP")
print(color.RED + " [ 5 ] SMB")
print(color.WHITE + "\n [ 99 ] Back to Main Menu")
response = int(raw_input(color.RED+"\n Select the Option : "))
time.sleep(1.5)
try:
if response == 1:
clearScr()
logo()
ip = raw_input(color.RED+"Enter the Target IP: ")
rponse = raw_input(color.WHITE+"\n Do you want add a Custom Dictionary(Y/"+color.RED+"n): ")
if (rponse == "Y") or (rponse == "y"):
dict = raw_input(color.WHITE+"\n Enter a Custom Dictionary Location: ")
os.system("hydra -L %s -P %s %s ssh" %(dict,dict,ip))
elif (rponse == "N") or (rponse == "n"):
os.system("hydra -L /usr/share/wordlists/rockyou.txt -P /usr/share/wordlists/rockyou.txt %s ssh" % ip)
else:
print(color.RED+ "\n Wrong Option")
hydra.menu(self)
if response == 2:
clearScr()
logo()
ip = raw_input(color.RED + "\n Enter the Target IP: ")
rponse = raw_input(color.WHITE + "\n Do you want add a Custom Dictionary(Y/" + color.RED + "n): ")
if (rponse == "Y") or (rponse == "y"):
dict = raw_input(color.WHITE + "\n Enter a Custom Dictionary Location: ")
os.system("hydra -L %s -P %s %s telnet" % (dict, dict, ip))
elif (rponse == "N") or (rponse == "n"):
os.system("hydra -L /usr/share/wordlists/rockyou.txt -P /usr/share/wordlists/rockyou.txt %s telnet" % ip)
else:
print(color.RED+"\n Wrong Option")
hydra.menu(self)
if response == 4:
clearScr()
logo()
ip = raw_input(color.RED + "\n Enter the Target IP: ")
rponse = raw_input(color.WHITE + "\n Do you want add a Custom Dictionary(Y/" + color.RED + "n): ")
if (rponse == "Y") or (rponse == "y"):
dict = raw_input(color.WHITE + "\n Enter a Custom Dictionary Location: ")
os.system("hydra -L %s -P %s %s FTP" % (dict, dict, ip))
elif (rponse == "N") or (rponse == "n"):
os.system(
"hydra -L /usr/share/wordlists/rockyou.txt -P /usr/share/wordlists/rockyou.txt %s FTP" % ip)
else:
print(color.RED+"\n Wrong Option")
hydra.menu(self)
if response == 3:
clearScr()
logo()
ip = raw_input(color.RED + "\n Enter the Target IP: ")
uname=raw_input(color.WHITE +"\n Enter the email address")
rponse = raw_input(color.RED + "\n Do you want add a Custom Dictionary(Y/" + color.RED + "n): ")
if (rponse == "Y") or (rponse == "y"):
dict = raw_input(color.WHITE + "\n Enter a Custom Dictionary Location: ")
os.system("hydra -l %s -P %s %s -e ns -V -s 465 smtp.gmail.com smtp" % (uname, dict, ip))
elif (rponse == "N") or (rponse == "n"):
os.system("hydra -l %s -P /usr/share/wordlists/rockyou.txt %s -e ns -V -s 465 smtp.gmail.com smtp" %(uname, ip))
else:
print("Wrong Option")
hydra.menu(self)
if response == 5:
clearScr()
logo()
ip = raw_input(color.RED + "\n Enter the Target IP: ")
rponse = raw_input(color.WHITE + "\n Do you want add a Custom Dictionary(Y/" + color.RED + "n): ")
if (rponse == "Y") or (rponse == "y"):
dict = raw_input(color.RED + "\n Enter a Custom Dictionary Location: ")
os.system("hydra -L %s -P %s %s smb" % (dict, dict, ip))
elif (rponse == "N") or (rponse == "n"):
os.system("hydra -L /usr/share/wordlists/rockyou.txt -P /usr/share/wordlists/rockyou.txt %s smb" % ip)
else:
print(color.WHITE+ "\n Wrong Option")
hydra.menu(self)
if response == 99:
hydra.menu(self)
except KeyboardInterrupt:
main()
def main():
clearScr()
logo()
print(color.RED + " [ 1 ] John the Ripper")
print(color.WHITE + " [ 2 ] Aircrack-ng")
print(color.RED + " [ 3 ] Ophcrack")
print(color.WHITE + " [ 4 ] CUPP")
print(color.RED + " [ 5 ] Hydra")
print(color.WHITE + "\n [ 99 ] Back To Framework")
response = raw_input(color.RED +"\n Select your option : ")
try:
if response == "1":
clearScr()
Jtr()
if response == "2":
clearScr()
acrack()
elif response == "3":
clearScr()
ophcrack()
elif response == "4":
clearScr()
cupp()
elif response == "5":
clearScr()
hydra()
elif response == "99":
clearScr()
return_fw()
else:
main()
except KeyboardInterrupt:
main()
main()
|
import os, shutil
from glob import glob
# Option Values
file_path = "D:\python_test"
new_file_path='D:\python_test\\test'
file_count_limit = 1
# Check if a variable is Int type or not
def is_int(var):
try:
int(var)
return True
except Exception:
return False
# Go to files directory and read the file
os.chdir(file_path)
file_count = 0
for filename in glob('ER_802*.txt.bad'):
# Delete empty files
if os.path.getsize(filename) == 0:
os.remove(filename)
continue # Skip to other file
new_filename = os.path.join(new_file_path, os.path.basename(filename) + ".tmp")
print filename
fp = open(filename, 'r')
nfp = open(new_filename, "w")
# Read Line by line of file
for line in fp.readlines():
print line.strip()
words = line.split(",")
# print(words)
turn_around_time_ms = processing_time_ms = response_time_ms = result_code = ""
try:
message = words[17].strip()
except IndexError, e:
raise Exception(filename + " Index Error : " + line)
print 'Columns : %s' % (len(words))
# Check the Word from 18th Position onwards i.e. 17th Index and merge it
if (len(words) > 22):
for i in range(18, words.__len__() - 3):
message = message + "," + words[i].strip()
# Assign the remaining fields if they exists in the file. If not then null
try:
turn_around_time_ms = words[i].strip()
processing_time_ms = words[i + 1].strip() if (i + 1) <= words.__len__() else ""
response_time_ms = words[i + 2].strip() if i + 2 <= words.__len__() - 1 else ""
result_code = words[i + 3].strip() if i + 3 <= words.__len__() - 1 else ""
except IndexError, e:
raise "Error : " + line
else:
i = 18
while (i < words.__len__()):
if is_int(words[i]) or len(words[i].strip()) == 0:
break
else:
message = message + "," + words[i].strip()
i += 1
# Assign the remaining fields if they exists in the file. If not then null
try:
turn_around_time_ms = words[i].strip() if i <= words.__len__() else ""
processing_time_ms = words[i + 1].strip() if (i + 1) <= words.__len__() else ""
response_time_ms = words[i + 2].strip() if i + 2 <= words.__len__() - 1 else ""
result_code = words[i + 3].strip() if i + 3 <= words.__len__() - 1 else ""
except:
pass
# Replace any double quote in the string
message = message.replace('"', '')
# print(line)
new_line = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\"%s\",%s,%s,%s,%s\n" % (
words[0], words[1], words[2], words[3], words[4], words[5], words[6], words[7], words[8], words[9],
words[10], words[11], words[12], words[13], words[14], words[15], words[16], message, turn_around_time_ms,
processing_time_ms, response_time_ms, result_code)
print new_line
# Open another file to write
nfp.write(new_line)
print ''
fp.close()
nfp.close()
# Delete the original file and rename .tmp to the original one and move to build directory
shutil.move(new_filename, os.path.join(new_file_path, filename))
os.system("chmod 775 " + os.path.join(new_file_path, filename))
# os.remove(filename)
# os.system("rm -f " + filename) # Getting issues with python remove
file_count += 1
if file_count_limit == file_count:
break
|
# МЦ с погл-м состоянием (3 состояния).
# Путём моделирования и расчёта вычислить среднее время дост-я полглащ. сост.
# Сделать теоретический расчёт. Решить систему уравнений с f с помощью матричного способа.
# 19/02/18. СЛУ
from random import *
def show_matrix(matrix, name):
if name == 1:
print("Матрица переходов: ")
for row in matrix:
for elem in row:
print(elem, end=' ')
print()
print("Допуск 3\nЗадайте исходные данные: ")
n = 10000
p = [[0.1, 0.4, 0.5], # [0->0, 0->1, 0->2]
[0.2, 0.795, 0.005], # [1->0, 1->1, 1->2]
[0.0, 0.0, 1.0]] # [2->0, 2->1, 2->2]
show_matrix(p, 1)
print("Кол-во экпериментов: " + str(n))
def experimet():
print("Экспериметнальный расчёт")
nwas = [0 for i in range(n)] # Номер окна, в котором МЦ попала в поглощающее состояние
for i in range(n):
status = 0 # Начальное состояние МЦ в каждом эксперименте
window = 0 # Номер окна, в котором находится МЦ
while status != 2:
r = random()
if r <= p[status][0]:
status = 0
elif p[status][0] < r <= (p[status][0] + p[status][1]):
status = 1
else:
status = 2
window += 1
nwas[i] = window
middle_time = 0
max_time = 0
min_time = 10000
for i in range(len(nwas)):
min_time = min(min_time, nwas[i])
max_time = max(max_time, nwas[i])
middle_time += nwas[i]
middle_time /= n
difference = middle_time - int(middle_time)
if (difference >= 0.5):
middle_time = int(middle_time) + 1
else:
middle_time = middle_time - difference
print("Среднее время работы системы до перехода в поглащающее состояние: " + str(middle_time))
print("Минимальное время работы системы до перехода в состояние поглощения: " + str(min_time))
print("Максимальное время работы системы до перехода в состояние поглощения: " + str(max_time))
def theory():
print("Теоретический расчёт")
dividend = (p[0][0] + p[0][1] + p[0][2] - p[0][0] * p[1][1] - p[0][2] * p[1][1] + p[0][1] * p[1][0] + p[0][1] * p[1][2])
divider = (1 - p[0][0] - p[1][1] + p[0][0] * p[1][1] - p[0][1] * p[1][0])
f02 = dividend / divider
f12 = (p[1][0] + p[1][1] + p[1][2] + p[1][0] * f02) / (1 - p[1][1])
print("f02(Среднее число переходов из состояния 0 в состояние 2 (поглощающее)) = " + str(f02))
print("f12(Среднее число переходов из состояния 1 в состояние 2 (поглощающее)) = " + str(f12))
# Основная часть программы
experimet()
theory() |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import time
import subprocess
import locale
import codecs
import os
import multiprocessing
# import FileRW
import socket, sys
from adbExtend import adbExtend
import xml.sax
# import xml.dom.minidom
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import parse
# import testAutomator
def activityDump(filename = 'tmp.xml'):
strRoot = 'adb root'
strDumpXML = "adb shell uiautomator dump /data/local/tmp/uidump.xml"
strPullXML = "adb pull /data/local/tmp/uidump.xml " + filename
adb = adbExtend()
# deviceSerial = adb.attached_devices()
# print 'device: ' + deviceSerial
# adb.showDevices()
# adb.get_state()
adb.commands(strRoot + ' ; ' + strDumpXML+' ; ' + strPullXML)
# adb.execShellCommand('adb shell uiautomator dump --compressed')
# adb.execShellCommand('adb pull /sdcard/window_dump.xml .')
# adb.call_adb('root')
# adb.call_adb(strDumpXML)
# adb.call_adb(strPullXML)
# print ('---------- waiting for your next event --------------')
def xmlAnalysis(x, y):
f = open('tmp.xml', 'r')
tree = ET.parse(f)
root = tree.getroot()
# print ('root.tag =', root.tag)
# print ('root.attrib =', root.attrib)
miniSize = 0
miniNode = None
for child in root.iter('node'):
# print '------- ' + str(count) + ' -------'
# print (child.tag)
# print (child.attrib)
bounds = child.get('bounds')
size = boundsMatch(x,y,bounds)
if size != -1:
if miniSize == 0:
miniSize = size
miniNode = child
elif size < miniSize:
miniSize = size
miniNode = child
if miniNode is not None:
index = miniNode.get('index')
text = miniNode.get('text')
resourceid = miniNode.get('resource-id')
myclass = miniNode.get('class')
package = miniNode.get('package')
contentdesc = miniNode.get('content-desc')
checkable = miniNode.get('checkable')
checked = miniNode.get('checked')
clickable = miniNode.get('clickable')
enabled = miniNode.get('enabled')
focusable = miniNode.get('focusable')
focused = miniNode.get('focused')
scrollable = miniNode.get('scrollable')
longclickable = miniNode.get('long-clickable')
password = miniNode.get('password')
selected = miniNode.get('selected')
bounds = miniNode.get('bounds')
print ('index: ' + index)
print ('text: ' + text)
print ('resourceid: ' + resourceid)
print ('class: ' + myclass)
print ('package: ' + package)
print ('content-desc: ' + contentdesc)
print ('checkable: ' + checkable)
print ('checked: '+ checked)
print ('clickable: ' + clickable )
print ('enable: ' + enabled)
print ('focusable: ' + focusable)
print ('focused: ' + focused)
print ('scrollable: ' + scrollable)
print ('long-click: '+ longclickable)
print ('password: ' + password)
print ('selected: '+ selected)
print ('bounds: ' + bounds )
return miniNode
else:
print ('-------------------------')
print ('--- wight not found ----')
print ('-------------------------')
return None
# # home, back, menu, enter, volume_up, volume_down, volume_mute, camera, power
# def sysKeyCaseWrite(key):
# sysKeys = ['home', 'back', 'menu', 'enter', 'volum_up', 'volum_down', 'volum_mute', 'camera', 'power']
# if key in sysKeys:
# testAutomator.systemKey(key)
# def dragCaseWrite(x0,y0,x1,y1):
#
# print ("Drag Event: x0=%d , y0=%d , x1=%d , y1=%d" % (x0, y0, x1, y1))
# testAutomator.androidEvent('true', 'false', 'false', 'false',
# 'flase', 'flase', 'flase', 'false',
# 'flase', x0, y0, x1, y1)
# def touchCaseWrite(x,y):
#
# print ("Touch Event: x=%d , y=%d" % (x, y))
# miniNode = xmlAnalysis(x, y)
#
# if miniNode is not None:
#
# index = miniNode.get('index')
# text = miniNode.get('text')
# resourceid = miniNode.get('resource-id')
# myclass = miniNode.get('class')
# package = miniNode.get('package')
# contentdesc = miniNode.get('content-desc')
# checkable = miniNode.get('checkable')
# checked = miniNode.get('checked')
# clickable = miniNode.get('clickable')
# enabled = miniNode.get('enabled')
# focusable = miniNode.get('focusable')
# focused = miniNode.get('focused')
# scrollable = miniNode.get('scrollable')
# longclickable = miniNode.get('long-clickable')
# password = miniNode.get('password')
# selected = miniNode.get('selected')
# bounds = miniNode.get('bounds')
#
# testAutomator.androidEvent('false', focusable, longclickable, clickable,
# contentdesc, text, index, resourceid,
# password,0,0,0,0)
# xmlDump()
def boundsMatch(x,y, bounds):
x0 = bounds.split(',')[0].replace('[','')
y0 = bounds.split('][')[0].split(',')[1]
x1 = bounds.split('][')[1].split(',')[0]
y1 = bounds.split(',')[2].replace(']','')
if(x>int(x0) and x<int(x1) and y>int(y0) and y<int(y1)):
# print ('x0=%s,y0=%s,x1=%s,y1=%s' % (x0, y0, x1, y1))
return (int(x1)-int(x0))*(int(y1)-int(y0))
else:
return -1
if (__name__ == "__main__"):
activityDump('tmp1.xml')
# xmlAnalysis() |
# script_version=1
# %%
try:
import pykefcontrol as pkf
import sys
import socket
from rich import print
from rich.console import Console
import ipaddress
import time
import requests
except Exception as e:
print("Error:", e, style="red")
print("Please install the required packages with `pip install -r testing_reqs.txt`")
sys.exit()
# %%
console = Console()
AUTO_TESTS_OUTPUT = {}
USER_CONFIRMATION = {}
DEBUG = False
MODEL_SELECTED = -1
MODEL_LIST = ["LSX 2", "LS50 Wireless 2", "LS60"]
def select_model():
global MODEL_SELECTED
newline()
console.print("[dodger_blue1]Select your speaker model:[/dodger_blue1]")
console.print("[bold]1[/bold] KEF LSX 2")
console.print("[bold]2[/bold] KEF LS50 Wireless 2")
console.print("[bold]3[/bold] KEF LS60")
try:
MODEL_SELECTED = (
int(input("Enter the number of your speaker model (1/2/3): ")) - 1
)
except:
MODEL_SELECTED = -1
while MODEL_SELECTED not in [0, 1, 2]:
console.print("\tPlease enter 1, 2 or 3: ", end="")
MODEL_SELECTED = int(input()) - 1
newline()
def newline():
print("\n")
def get_local_ip():
return (
(
[
ip
for ip in socket.gethostbyname_ex(socket.gethostname())[2]
if not ip.startswith("127.")
]
or [
[
(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close())
for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
][0][1]
]
)
+ ["no IP found"]
)[0]
def validate_ip_address(ip_string):
try:
ip_object = ipaddress.ip_address(ip_string)
return str(ip_object)
except ValueError:
print(
f"The IP address '{ip_string}' is not valid.\n\
Please enter a valid IP address in the form www.xxx.yyy.zzz"
)
return -1
def check_script_version():
try:
with console.status("Checking if this script is up to date..."):
with requests.get(
"https://raw.githubusercontent.com/N0ciple/pykefcontrol/main/testing.py"
) as response:
output = response.text
version = output.split("# script_version=")[1].split("\n")[0]
if version == "1":
console.print(
f"[bold green]This script is up to date.[/bold green]",
)
else:
console.print(
"[bold orange_red1]Testing utility is not up to date.[/bold orange_red1]\n\
Please upgrade with [bold red]`git pull`[/bold red] in the pykefcontrol folder."
)
except Exception as e:
console.print("Error:", e, style="red")
console.print(
"[bold orange_red1]Could not check testing utility version.[/bold orange_red1]"
)
console.print(
"Continuing anyway... but script version might not be the latest!"
)
input("Press enter to continue...")
def prompt_continue():
input("Press enter to continue...")
def rule_msg(msg, sep="-"):
console.print(f"[cyan3]{msg}[/cyan3]".center(80, sep))
def report_github(e):
console.print(f"[bold red]Error: {e}[/bold red]")
console.print("[orange1]Please report this error on the github repo![/orange1]")
sys.exit()
def user_confirmation(console, action, msg=None):
if msg is None:
console.print("Do you confirm the change was successful? (y/n) ", end="")
else:
console.print(f"{msg} (y/n) ", end="")
user_input = input()
while user_input.lower() not in ["y", "n"]:
console.print("\tPlease enter y (for yes) or n (for no): ", end="")
user_input = input()
if user_input.lower() == "y":
console.print("\t[bold green]✅ Sucess![/bold green]")
return {action: True}
else:
console.print("\t[bold orange_red1]❌ Failure ![/bold orange_red1]")
return {action: False}
def speaker_info():
rule_msg("Speaker IP Address")
console.print("The script will gather a few informations about your speaker.")
console.print(
"[orange1]The script needs the [bold]IP address[/bold] of your speaker.[/orange1]"
)
prompt_continue()
print(
"Please enter the IP address of your KEF speaker in the form www.xxx.yyy.zzz\n(192.168.0.12 for example)"
)
spkr_ip = validate_ip_address(input("IP Address: "))
while spkr_ip == -1:
spkr_ip = validate_ip_address(input("IP Address: "))
print("Using speaker IP:", spkr_ip)
newline()
rule_msg("Speaker Information")
spkr = pkf.KefConnector(spkr_ip)
with console.status(
"Getting speaker info...",
):
exception = None
try:
spkr_name = spkr.speaker_name
except Exception as e:
exception = e
if exception is not None:
console.print("[orange1]Error while fetching speaker name[/orange1]")
console.print(f"[bold red]Error: {e}[/bold red]")
newline()
console.print(
"Verify that your speaker is plugged in 🔌 and connected to the network."
)
console.print(
"Verify that your computer is connected to the same network as your speaker."
)
console.print("Verify that the IP address is correct.")
console.print(
"[orange1]Otherwise, please report this error on the github repo! [/orange1]"
)
sys.exit()
time.sleep(0.5)
try:
spkr_mac_address = spkr.mac_address
except Exception as e:
report_github(e)
print("Speaker Infos:")
print("\tIP:", spkr_ip)
print(f'\tName: "{spkr_name}"')
print("\tMAC Address:", spkr_mac_address)
print(f"\tModel: [dodger_blue1]{MODEL_LIST[MODEL_SELECTED]}[/dodger_blue1]")
newline()
USER_CONFIRMATION.update(
user_confirmation(console, "speaker_info", msg="Are the information correct?")
)
return spkr
def power_check():
rule_msg("Testing Power ON/OFF")
console.print("The script will now test the power ON/OFF feature.")
with console.status("Detecting status..."):
try:
status = spkr.status
except Exception as e:
report_github(e)
if status == "standby":
console.print("The speaker is currently OFF.")
console.print("Turning ON the speaker...")
try:
spkr.power_on()
except Exception as e:
report_github(e)
with console.status("Waiting for the speaker to turn ON (10s)..."):
time.sleep(10)
USER_CONFIRMATION.update(
user_confirmation(
console,
"power_on",
msg="Did the speaker turn ON successfully? \n[orange1]it should be on but no sources should be selected[/orange1]",
)
)
console.print("Turning OFF the speaker...")
try:
spkr.shutdown()
except Exception as e:
report_github(e)
with console.status("Waiting for the speaker to turn OFF..."):
time.sleep(5)
USER_CONFIRMATION.update(
user_confirmation(
console, "power_off", msg="Did the speaker turn OFF successfully?"
)
)
console.print("Turning ON the speaker again...")
try:
spkr.power_on()
except Exception as e:
report_github(e)
with console.status("Waiting for the speaker to turn ON again (10s)..."):
time.sleep(10)
user_confirmation(
console,
"power_on",
msg="Did the speaker turn ON successfully?\n[orange1]it should be on but no sources should be selected[/orange1]",
)
else:
console.print("The speaker is currently ON.")
console.print("Turning OFF the speaker...")
try:
spkr.shutdown()
except Exception as e:
report_github(e)
with console.status("Waiting for the speaker to turn OFF..."):
time.sleep(5)
USER_CONFIRMATION.update(
user_confirmation(
console, "power_off", msg="Did the speaker turn OFF successfully?"
)
)
console.print("Turning ON the speaker...")
try:
spkr.power_on()
except Exception as e:
report_github(e)
with console.status("Waiting for the speaker to turn ON (10s)..."):
time.sleep(10)
USER_CONFIRMATION.update(
user_confirmation(
console,
"power_on",
msg="Did the speaker turn ON successfully?\n[orange1]it should be on but no sources should be selected[/orange1]",
)
)
if USER_CONFIRMATION["power_on"] and USER_CONFIRMATION["power_off"]:
console.print("[bold green]All power tests passed ! 🎉[/bold green]")
def source_check():
rule_msg("Testing Source Selection")
console.print("The script will now test the source selection feature.")
console.print(
"The script will cycle through the channels: [dodger_blue1]wifi, bluetooth, tv, optical, coaxial, and analog[/dodger_blue1]"
)
console.print(
"[orange1]You can check on the speakers LED or on the [bold]KEF Connect[/bold] app (recommended)[/orange1]"
)
console.print(
"[red]You do NOT need to play any sound. Just make sure the speaker changes its input source[/red]"
)
prompt_continue()
for source in ["wifi", "bluetooth", "tv", "optical", "coaxial", "analog"]:
console.print(f"Selecting source: [dodger_blue1]{source}[/dodger_blue1]")
try:
spkr.source = source
except Exception as e:
report_github(e)
with console.status(f"Waiting for the speaker to select {source} (10s)..."):
time.sleep(1.5)
USER_CONFIRMATION.update(
user_confirmation(
console,
f"select {source}",
msg=f"\tDid the speaker select {source} successfully?",
)
)
console.print("Switching back to wifi...")
try:
spkr.source = "wifi"
except:
report_github(e)
all_checks = True
for source in ["wifi", "bluetooth", "tv", "optical", "coaxial", "analog"]:
all_checks *= USER_CONFIRMATION[f"select {source}"]
if all_checks:
console.print("[bold green]All source tests passed ! 🎉[/bold green]")
def sumup():
rule_msg("Sum Up")
console.print("[bold]Speaker version:[/bold]")
console.print(f"\t[dodger_blue1]{MODEL_LIST[MODEL_SELECTED]}[/dodger_blue1]")
console.print("[bold]Working features:[/bold]")
for feature in USER_CONFIRMATION:
if USER_CONFIRMATION[feature]:
console.print(f"\t[green]✓[/green] {feature}")
console.print("[bold]Non working features:[/bold]")
for feature in USER_CONFIRMATION:
if not USER_CONFIRMATION[feature]:
console.print(f"\t[red]✗[/red] {feature}")
def vol_test():
rule_msg("Testing Volume Control")
console.print("The script will now test the volume control feature.")
prompt_continue()
try:
vol = spkr.volume
except Exception as e:
report_github(e)
console.print(f"Current volume: [dodger_blue1]{vol}[/dodger_blue1]")
if vol < 10:
console.print(f"setting volume to {vol+5}")
newvol = vol + 5
else:
console.print(f"setting volume to {vol-5}")
newvol = vol - 5
try:
spkr.volume = newvol
except Exception as e:
report_github(e)
USER_CONFIRMATION.update(
user_confirmation(
console,
"set volume",
msg="Did the speaker change its volume successfully?\n[orange1]You can check on the [bold]KEF Connect[/bold] app[/orange1]",
)
)
newline()
console.print("The script will now test the mute/unmute feature.")
console.print(
"[orange1]You can play a song or just check on the KEF Connect app.[/orange1]"
)
prompt_continue()
newline()
console.print("Muting the speaker...")
try:
spkr.mute()
except Exception as e:
report_github(e)
USER_CONFIRMATION.update(
user_confirmation(
console,
"mute",
msg="Did the speaker was muted successfully?",
)
)
newline()
console.print("Now testing the unmuting feature.")
console.print("Unmuting the speaker...")
try:
spkr.unmute()
except Exception as e:
report_github(e)
USER_CONFIRMATION.update(
user_confirmation(
console,
"unmute",
msg="Did the speaker was unmuted successfully?",
)
)
if (
USER_CONFIRMATION["set volume"]
and USER_CONFIRMATION["mute"]
and USER_CONFIRMATION["unmute"]
):
console.print("[bold green]All volume tests passed ! 🎉[/bold green]")
def system_infos():
python_version = sys.version
pkf_version = pkf.__version__
computer_ip = get_local_ip()
rule_msg("System info")
print("Python version:", python_version)
# if pkf_version == "0.6.1":
# end_msg = "(✅ Latest version)"
# else:
# end_msg = "(⚠️ not the latest version, please upgrade with `pip install pykefcontrol --upgrade`)"
print("Pykefcontrol version:", pkf_version)
print("Computer local IP:", computer_ip)
def song_info():
rule_msg("Song Info")
console.print("The script will now test the song info fetching.")
console.print("[bold red]Make sure the speaker is playing a song.[/bold red]")
console.print(
"[bold red]the song should be playing via Chormecast, Airplay, Spotify Connect or DLNA.[/bold red]"
)
input("Press [ENTER] to continue, when a song is playing...")
with console.status("Checking if a song is playing"):
try:
while not spkr.is_playing:
time.sleep(0.5)
except Exception as e:
report_github(e)
console.print("A song is playing, fetching song info...")
try:
song_info = spkr.get_song_information()
except Exception as e:
report_github(e)
console.print(
f"Current song informations: [dodger_blue1]{song_info}[/dodger_blue1]"
)
USER_CONFIRMATION.update(
user_confirmation(
console,
"get song info",
msg="Did the script fetch the song info successfully?\n(are they [bold red]roughly[/bold red] correct?",
)
)
if USER_CONFIRMATION["get song info"]:
console.print("[bold green]All song info tests passed ! 🎉[/bold green]")
def track_control():
rule_msg("Track Control")
console.print("The script will now test the track control feature.")
console.print("[bold red]Make sure the speaker is playing a song.[/bold red]")
console.print(
"The script will test [dot blue]next track, previous track[/dot blue] and [dot blue]pause/play[/dot blue]."
)
input("Press [ENTER] to continue, when a song is playing...")
with console.status("Checking if a song is playing"):
try:
while not spkr.is_playing:
time.sleep(0.5)
except Exception as e:
report_github(e)
console.print("A song is playing, testing next track...")
try:
spkr.next_track()
except Exception as e:
report_github(e)
USER_CONFIRMATION.update(
user_confirmation(
console,
"next track",
msg="Did the speaker skip to the next track successfully?",
)
)
newline()
console.print("Testing previous track...")
try:
spkr.previous_track()
except Exception as e:
report_github(e)
USER_CONFIRMATION.update(
user_confirmation(
console,
"previous track",
msg="Did the speaker skip to the previous track successfully?",
)
)
newline()
console.print("Testing pause...")
console.print("[bold red]Make sure the speaker is playing a song.[/bold red]")
input("Press [ENTER] to continue, when a song is playing...")
try:
spkr.toggle_play_pause()
except Exception as e:
report_github(e)
USER_CONFIRMATION.update(
user_confirmation(
console,
"pause",
msg="Did the speaker pause the song successfully?",
)
)
newline()
console.print("Testing play...")
console.print("[bold red]Make sure the speaker is paused.[/bold red]")
input("Press [ENTER] to continue, when the speaker is paused...")
try:
spkr.toggle_play_pause()
except Exception as e:
report_github(e)
USER_CONFIRMATION.update(
user_confirmation(
console,
"play",
msg="Did the speaker resumed the song successfully?",
)
)
if (
USER_CONFIRMATION["next track"]
and USER_CONFIRMATION["previous track"]
and USER_CONFIRMATION["pause"]
and USER_CONFIRMATION["play"]
):
console.print("[bold green]All track control tests passed ! 🎉[/bold green]")
if __name__ == "__main__":
# ====== Check testing utility version ======
newline()
rule_msg("Pykefcontrol Library Testing".upper(), sep="=")
rule_msg("This script version")
check_script_version()
rule_msg("Infos")
console.print(
"The aim of this script is to test the pykefcontrol library on\
\nvarious hardware. Namely the KEF LS50W2, KEF LSX2 and KEF LS60."
)
time.sleep(0.5)
select_model()
prompt_continue()
console.print("This script will test the following:")
console.print(
"\t- [bold]Speaker power on/off[/bold]\
\n\t- [bold]Speaker source selection[/bold]\
\n\t- [bold]Speaker volume control[/bold]\
\n\t- [bold]Speaker mute control[/bold]\
\n\t- [bold]Song Info[/bold] (get title/artist/album)\
\n\t- [bold]Track control[/bold] (next/prev/play/pause)"
)
prompt_continue()
system_infos()
prompt_continue()
spkr = speaker_info()
newline()
power_check()
prompt_continue()
newline()
source_check()
prompt_continue()
newline()
vol_test()
newline()
song_info()
newline()
track_control()
newline()
sumup()
rule_msg("End of tests")
console.print("Thanks for using this script !")
console.print(
"Please copy the content of the [dodger_blue1]Sum Up[/dodger_blue1] section"
)
console.print(
"and [bold red]report it to GitHub[/bold red]. Even if all the tests passed ! 👌"
)
console.print(
"[bold red] Report here: https://github.com/N0ciple/pykefcontrol/issues/2[/bold red]"
)
console.print(
"[bold green]Thanks for helping improving Pykefcontrol ! 🤗[/bold green]"
)
#%% section")
# console = Console()
# DEBUG = True
# ALL_TESTS_OUTPUTS = {}
# def newline():
# print("\n")
# def get_local_ip():
# return (
# (
# [
# ip
# for ip in socket.gethostbyname_ex(socket.gethostname())[2]
# if not ip.startswith("127.")
# ]
# or [
# [
# (s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close())
# for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
# ][0][1]
# ]
# )
# + ["no IP found"]
# )[0]
# def check_turn_on(spkr, console):
# counter = 0
# with console.status("Turning on speaker [italic](15s max)[/italic]..."):
# spkr.power_on()
# counter = 0
# while spkr.status == "standby" or counter < 15:
# time.sleep(1)
# counter += 1
# if spkr.status in ["powerOn", "wifi"]:
# console.print(
# f"[bold green]Speaker turned on successfully.[/bold green]",
# )
# elif counter >= 15:
# console.print(
# f"[bold orange_red1]Speaker did not turn on: timeout![/bold orange_red1]",
# f"\n[bold orange_red1]after timeout, status is: {spkr.status}[/bold orange_red1]",
# )
# else:
# console.print(
# f"[bold orange_red1]Unknown speaker status: {spkr.status}[/bold orange_red1]",
# )
# def check_turn_off(spkr, console):
# counter = 0
# with console.status("Turning off speaker [italic](15s max)[/italic]..."):
# spkr.shutdown()
# counter = 0
# while spkr.status != "standby" or counter < 15:
# time.sleep(1)
# counter += 1
# if spkr.status == "standby":
# console.print(
# f"[bold green]Speaker turned off successfully.[/bold green]",
# )
# elif counter >= 15:
# console.print(
# f"[bold orange_red1]Speaker did not turn off: timeout![/bold orange_red1]",
# f"\n[bold orange_red1]after timeout, status is: {spkr.status}[/bold orange_red1]",
# )
# else:
# console.print(
# f"[bold orange_red1]Unknown speaker status: {spkr.status}[/bold orange_red1]",
# )
# def check_source(spkr, console, source):
# counter = 0
# with console.status(f"Setting source to {source} [italic](15s max)[/italic]..."):
# spkr.source = source
# counter = 0
# while spkr.source != source or counter < 15:
# time.sleep(1)
# counter += 1
# if spkr.source == source:
# console.print(
# f"[bold green]Speaker source set to {source} successfully.[/bold green]",
# )
# elif counter >= 15:
# console.print(
# f"[bold orange_red1]Speaker did not set source to {source}: timeout![/bold orange_red1]",
# f"\n[bold orange_red1]after timeout, source is: {spkr.source}[/bold orange_red1]",
# )
# return -1
# else:
# console.print(
# f"[bold orange_red1]Unknown speaker source: {spkr.source}[/bold orange_red1]",
# )
# return -1
# def validate_ip_address(ip_string):
# try:
# ip_object = ipaddress.ip_address(ip_string)
# return str(ip_object)
# except ValueError:
# print(
# f"The IP address '{ip_string}' is not valid.\n\
# Please enter a valid IP address in the form www.xxx.yyy.zzz"
# )
# return -1
# def user_confirmation(console, action, msg=None):
# if DEBUG:
# return {action: True}
# else:
# if msg is None:
# console.print("\tDo you confirm the change was successful? (y/n) ", end="")
# else:
# console.print(f"\t{msg} (y/n) ", end="")
# user_input = input()
# while user_input.lower() not in ["y", "n"]:
# console.print("\tPlease enter y (for yes) or n (for no): ", end="")
# user_input = input()
# if user_input.lower() == "y":
# console.print(
# "\t[bold green]✅ Change sucessful for the user ![/bold green]"
# )
# return {action: True}
# else:
# console.print(
# "\t[bold orange_red1]❌ Unsucessful change for the user ![/bold orange_red1]"
# )
# return {action: False}
# newline()
# console.print("[green3]Testing Utility[/green3]".center(80, "="))
# # ====== System info ======
# python_version = sys.version
# pkf_version = pkf.__version__
# computer_ip = get_local_ip()
# newline()
# print("[cyan3]System info[/cyan3]".center(80, "-"))
# print("Python version:", python_version)
# if pkf_version == "0.5.1":
# end_msg = "(✅ Latest version)"
# else:
# end_msg = "(⚠️ not the latest version, please upgrade with `pip install pykefcontrol --upgrade`)"
# print("Pykefcontrol version:", pkf_version, end_msg)
# print("Computer local IP:", computer_ip)
# # ====== Speaker info ======
# newline()
# print("[cyan3]KEF Speaker IP Address[/cyan3]".center(80, "-"))
# print(
# "Please enter the IP address of your KEF speaker in the form www.xxx.yyy.zzz\n(192.168.0.12 for example)"
# )
# spkr_ip = validate_ip_address(input("IP Address: "))
# while spkr_ip == -1:
# spkr_ip = validate_ip_address(input("IP Address: "))
# print("Using speaker IP:", spkr_ip)
# newline()
# print("[cyan3]Speaker Info[/cyan3]".center(80, "-"))
# spkr = pkf.KefConnector(spkr_ip)
# with console.status(
# "Getting speaker info...",
# ):
# try:
# spkr_name = spkr.speaker_name
# except AttributeError:
# print("Error getting speaker Name. Please check your IP address and try again.")
# sys.exit()
# except:
# print("Error. Please check your IP address and try again.")
# sys.exit()
# time.sleep(0.5)
# spkr_mac_address = spkr.mac_address
# print("Speaker Infos:")
# print("\tIP:", spkr_ip)
# print(f'\tName: "{spkr_name}"')
# print("\tMAC Address:", spkr_mac_address)
# newline()
# out = user_confirmation(
# console, "speaker info", msg="Are the speaker information correct?"
# )
# ALL_TESTS_OUTPUTS.update(out)
# # ====== Power & Source Control ======
# newline()
# print("[cyan3]Power & Source Control[/cyan3]".center(80, "-"))
# print(
# "This section will test if your speaker can be [bold]powered on and off[/bold] by pykefcontrol,\nand if it can [bold]switch between sources.[/bold]"
# )
# input("Press press Enter to continue...")
# with console.status("Getting speaker status..."):
# time.sleep(0.5)
# status = spkr.status
# newline()
# console.print(f"Current speaker status: [dodger_blue1]{status}[/dodger_blue1]")
# newline()
# if status == "standby":
# console.print(
# "The speaker is currently off. The test sequence will be [blue]->on ->off[/blue] and then a normal turn on"
# )
# input("Press press Enter to continue...")
# check_turn_on(spkr, console)
# out = user_confirmation(console, action="turn on")
# ALL_TESTS_OUTPUTS.update(out)
# check_turn_off(spkr, console)
# out = user_confirmation(console, action="turn off")
# ALL_TESTS_OUTPUTS.update(out)
# spkr.power_on()
# elif status in ["powerOn", "wifi", "optical", "aux", "bluetooth"]:
# console.print(
# "The speaker is currently on. The test sequence will be [blue]->off ->on[/blue]"
# )
# input("Press press Enter to continue...")
# check_turn_off(spkr, console)
# out = user_confirmation(console, action="turn off")
# ALL_TESTS_OUTPUTS.update(out)
# check_turn_on(spkr, console)
# user_confirmation(console, action="turn on")
# ALL_TESTS_OUTPUTS.update(out)
# # ====== Source Control ======
# newline()
# console.print("The script will now check that pykefcontrol can switch between sources.")
# console.print(
# "It will switch between [bold]wifi, bluetooth, tv, optical, coaxial and analog[/bold]."
# )
# console.print(
# "[red]Please grab your phone with the application [bold]KEF Connect[/bold] to check that the changes where successful.[/red]"
# )
# input("Press press Enter to continue...")
# issue_with = []
# for source in ["wifi", "bluetooth", "tv", "optical", "coaxial", "analog"]:
# out = check_source(spkr, console, source)
# if out == -1:
# issue_with.append(source)
# else:
# out = user_confirmation(console, action="switch to " + source)
# ALL_TESTS_OUTPUTS.update(out)
# newline()
# console.print(
# "Sources tested: [bold]wifi, bluetooth, tv, optical, coaxial and analog[/bold]"
# )
# if len(issue_with) == 0:
# console.print(
# "[bold green]All sources were switched successfully.[/bold green]",
# )
# else:
# console.print(
# f"[bold orange_red1]The following sources could not be switched to: {issue_with}[/bold orange_red1]",
# )
# # ====== Volume Control ======
# console.print("[cyan3]Volume Control[/cyan3]".center(80, "-"))
# console.print("The script will now check that pykefcontrol can control the volume.")
# console.print(
# "[red]Please grab your phone with the application [bold]KEF Connect[/bold] to check that the changes where successful.[/red]"
# )
# input("Press press Enter to continue...")
# newline()
# console.print("Getting current volume.")
# try:
# vol = spkr.volume
# console.print(f"Current volume: [dodger_blue1]{vol}[/dodger_blue1]")
# except Exception as e:
# console.print(f"[bold orange_red1]Error getting volume: {e}[/bold orange_red1]")
# sys.exit()
# with console.status("Testing changing volume..."):
# if vol < 10:
# spkr.volume = vol + 5
# else:
# spkr.volume = vol - 5
# if vol < 10:
# console.print(
# f"Volume should have been increased by 5.\nCurrent volume: [dodger_blue1]{spkr.volume}[/dodger_blue1]"
# )
# out = user_confirmation(console, action="increase volume")
# ALL_TESTS_OUTPUTS.update(out)
# else:
# console.print(
# f"Volume should have been decreased by 5.\nCurrent volume: [dodger_blue1]{spkr.volume}[/dodger_blue1]"
# )
# out = user_confirmation(console, action="decrease volume")
# ALL_TESTS_OUTPUTS.update(out)
# current_vol = spkr.volume
# # ====== Mute Control ======
# console.print("[cyan3]Mute Control[/cyan3]".center(80, "-"))
# console.print(
# "The script will now check that pykefcontrol can mute and unmute the speaker."
# )
# with console.status("Muting speaker..."):
# spkr.mute()
# console.print("Speaker should be muted now.")
# out = user_confirmation(console, action="mute speaker")
# ALL_TESTS_OUTPUTS.update(out)
# with console.status("Unmuting speaker..."):
# spkr.unmute()
# console.print(f"Speaker should be unmuted now.\nVolume should be {current_vol}.")
# out = user_confirmation(console, action="unmute speaker")
# ALL_TESTS_OUTPUTS.update(out)
# newline()
# # ====== Playback Control ======
# console.print("[cyan3]Playback Control[/cyan3]".center(80, "-"))
# console.print("The script will now check that pykefcontrol can control playback.")
# console.print(
# "[red]Please grab your phone and play music over wifi with [bold]Chromecast, Airplay, Spotify Connect or DLNA[/bold][/red]"
# )
# newline()
# input("Press press Enter to continue...")
# console.print(
# "The script will now try to [detect the current song.\n[bold]Make sure that a song is playing.[/bold]"
# )
# input("Press press Enter to continue...")
# with console.status("Detecting song..."):
# song_info = spkr.get_song_information()
# console.print(f"Current song informations: [dodger_blue1]{song_info}[/dodger_blue1]")
# out = user_confirmation(
# console,
# action="song information",
# msg="Are the informations [bold red] roughly [/bold red] correct?",
# )
# ALL_TESTS_OUTPUTS.update(out)
# console.print("The script will now try to [bold]pause/pause[/bold] the playback.")
# console.print("[bold orange1]Make sure that the track is not paused.[/bold orange1]")
# input("Press press Enter to continue...")
# with console.status("Detecting status..."):
# status = spkr.is_playing
# if status:
# console.print(f"The song is detected as [dodger_blue1]playing[/dodger_blue1]")
# console.print("The script will now try to [bold]pause[/bold] the playback.")
# input("Press press Enter to continue...")
# with console.status("Pausing playback..."):
# spkr.toggle_play_pause()
# console.print("Playback should be paused now.")
# out = user_confirmation(console, action="pause playback")
# ALL_TESTS_OUTPUTS.update(out)
# newline()
# console.print("The script will now try to [bold]resume[/bold] the playback.")
# with console.status("Resuming playback..."):
# spkr.toggle_play_pause()
# console.print("Playback should be resumed now.")
# out = user_confirmation(console, action="resume playback")
# ALL_TESTS_OUTPUTS.update(out)
# newline()
# console.print("The script will now try to [bold]skip to next track[/bold].")
# with console.status("Skipping to next track..."):
# spkr.next_track()
# console.print("The speaker should be playing the next track now.")
# out = user_confirmation(console, action="skip to next track")
# ALL_TESTS_OUTPUTS.update(out)
# newline()
# console.print("The script will now try to [bold]skip to previous track[/bold].")
# with console.status("Skipping to previous track..."):
# spkr.previous_track()
# console.print("The speaker should be playing the previous track now.")
# out = user_confirmation(console, action="skip to previous track")
# ALL_TESTS_OUTPUTS.update(out)
# newline()
# console.print("[cyan3]Sum Up[/cyan3]".center(80, "-"))
# console.print("[bold]Working features:[/bold]")
# for feature in ALL_TESTS_OUTPUTS:
# if ALL_TESTS_OUTPUTS[feature]:
# console.print(f"\t[green]✓[/green] {feature}")
# console.print("[bold]Non working features:[/bold]")
# for feature in ALL_TESTS_OUTPUTS:
# if not ALL_TESTS_OUTPUTS[feature]:
# console.print(f"\t[red]✗[/red] {feature}")
# console.print("[cyan_blue1]Testing Ended[/cyan_blue1]".center(80, "-"))
# console.print("[bold green]Thank you for helping testing Pykefcontrol 🤗[/bold green]")
# console.print(
# "[bold orange1]wether all the tests were successful or not, please repport your results on GitHub[/bold orange1]"
# )
# print("==".center(80, "="))
# newline()
# %%
sys.exit()
console = Console()
spkr = pkf.KefConnector("192.168.124.46")
# %%
console.print("1rst polling")
spkr.poll_speaker(timeout=1)
spkr.volume -= 3
with console.status("Polling speaker..."):
time.sleep(0.5)
out = spkr.poll_speaker()
console.print("ok", out, style="bold red")
# %%
|
#encoding:utf-8
import smtplib
import os
from email.mime.text import MIMEText # MIMEText()定义邮件正文
from email.header import Header # Header()定义邮件标题
report=os.path.join(os.path.dirname(__file__),'result_report.html')
# 发送邮箱服务器
smtpserver = 'smtp.exmail.qq.com'
# 发送邮箱用户/密码(登录邮箱操作)
user = "mengdebin@shaoziketang.com"
password = "s6siqg9jNHVCfa4Z"
# 发送邮箱
sender = "mengdebin@shaoziketang.com"
# 接收邮箱
receiver = "1124479307@qq.com"
# # 发送主题
# subject = 'love'
# # 编写HTML类型的邮件正文(把HTML代码写进入)
# msg = MIMEText('<html><body><a href="">测试报告</a></p></body></html>', report, 'utf-8')
# msg['Subject'] = Header(subject, 'utf-8')
#
# # 连接发送邮件(smtplib模块基本使用格式)
# smtp = smtplib.SMTP()
# smtp.connect(smtpserver)
# smtp.login(user, password)
# smtp.sendmail(sender, receiver, msg.as_string())
# smtp.quit()
def send_mail(file_new):
f = open(file_new, 'rb')
mail_body = f.read()
f.close()
mail_msg = """
<p>勺子课堂测试报告...</p>
<p><a href="www.baidu.com">测试报告地址</a></p>
"""
msg = MIMEText(mail_msg, 'html', 'utf-8')
msg['From'] = Header("一位小测试", 'utf-8')
msg['To'] = Header("各位大佬", 'utf-8')
subject = '勺子课堂测试报告'
msg['Subject'] = Header(subject, 'utf-8')
# msg = MIMEText(mail_body, 'html', 'utf-8')
# msg['Subject'] = Header("自动化测试报告", 'utf-8')
att = MIMEText(open(file_new,'rb').read(),'base64','utf-8')
att["Content-Type"] = 'application/octet-stream'
att["Content-Disposition"] = 'attatchment;filename="result_report.html"'
msg.attach(att)
smtp = smtplib.SMTP()
smtp.connect('smtp.exmail.qq.com') # 邮箱服务器
smtp.login(user, password) # 登录邮箱
smtp.sendmail(sender, receiver, msg.as_string()) # 发送者和接收者
smtp.quit()
print("邮件已发出!注意查收。")
|
import os
import sys
import curses
import time
class Console:
def __init__(self, board):
self.__board = board
pass
def run(self):
my_window = curses.initscr()
while True:
self.__board.update_board()
my_window.refresh()
time.sleep(1)
|
import numpy as np
import os
import nibabel as nib
import re
import warnings
from os.path import join as jph
# --- text-files utils ---
def unique_words_in_string(in_string):
ulist = []
[ulist.append(s) for s in in_string.split() if s not in ulist]
return ulist[0]
def indians_file_parser(s, sh=None):
"""
An here-called indians file is a string obtained from a sequence of rows from a Bruker parameter file
whose shape needs to be changed, in function of its content and according to an optional parameter sh
that defines the shape of the output.
This function transform the indian file in a data structure,
according to the information that can be parsed in the file:
A - list of vectors transformed into a list
B - list of numbers, transformed into a np.ndarray, or single number stored as a float.
B bis - string of 'inf' repeated n times that will be transformed in a numpy array of 'inf'.
C - list of strings separated by <>.
D - everything else becomes a string.
:param s: string indian file
:param sh: shape related
:return: parsed indian file of adequate output.
"""
s = s.strip() # removes initial and final spaces.
# A
if ("(" in s) and (")" in s):
s = s[1:-1] # removes initial and final ( )
a = ["(" + v + ")" for v in s.split(") (")]
# B
elif (
s.replace("-", "").replace(".", "").replace(" ", "").replace("e", "").isdigit()
):
if " " in s:
a = np.array([float(x) for x in s.split()])
if sh is not None:
a = a.reshape(sh)
else:
a = float(s)
# B-bis
elif "inf" in s:
if "inf" == unique_words_in_string(s):
num_occurrences = sum("inf" == word for word in s.split())
a = [np.inf] * num_occurrences
else:
a = s[:]
# C
elif ("<" in s) and (">" in s):
s = s[1:-1] # removes initial and final < >
a = [v for v in s.split("> <")]
# D
else:
a = s[:]
# added to work with ParaVision vers 6.0.1:
if isinstance(a, list):
if len(a) == 1:
a = a[0]
return a
def var_name_clean(line_in):
"""
Removes #, $ and PVM_ from line_in, where line in is a string from a Bruker parameter list file.
:param line_in: input string
:return: output string cleaned from #, $ and PVM_
"""
line_out = line_in.replace("#", "").replace("$", "").replace("PVM_", "").strip()
return line_out
def from_dict_to_txt_sorted(dict_input, pfi_output):
"""
Simple auxiliary to save the information contained in a dictionary into a txt file
at the specified path to file (pfi).
:param dict_input: input structure dictionary
:param pfi_output: path to file.
:return:
"""
sorted_keys = sorted(dict_input.keys())
with open(pfi_output, "w") as f:
f.writelines("{0} = {1} \n".format(k, dict_input[k]) for k in sorted_keys)
def bruker_read_files(param_file, data_path, sub_scan_num="1"):
"""
Reads parameters files of from Bruker raw data imaging format.
It parses the files 'acqp', 'method', 'reco', 'visu_pars' and 'subject'.
Even if only 'visu_pars' is relevant for the conversion to nifti, having a more general parser has turned out
to be useful in many cases (e.g. in PV5.1 to check).
:param param_file: file parameter, must be a string in the list ['acqp', 'method', 'reco', 'visu_pars', 'subject'].
:param data_path: path to data.
:param sub_scan_num: number of the sub-scan folder where usually the 'reco' and 'visu_pars' parameter files
are stored.
:return: dict_info dictionary with the parsed information from the input file.
"""
if param_file.lower() == "reco":
if os.path.exists(jph(data_path, "pdata", str(sub_scan_num), "reco")):
f = open(jph(data_path, "pdata", str(sub_scan_num), "reco"), "r")
else:
print(
"File {} does not exist".format(
jph(data_path, "pdata", str(sub_scan_num), "reco")
)
)
return {}
elif param_file.lower() == "acqp":
if os.path.exists(jph(data_path, "acqp")):
f = open(jph(data_path, "acqp"), "r")
else:
print("File {} does not exist".format(jph(data_path, "acqp")))
return {}
elif param_file.lower() == "method":
if os.path.exists(jph(data_path, "method")):
f = open(jph(data_path, "method"), "r")
else:
print("File {} does not exist".format(jph(data_path, "method")))
return {}
elif param_file.lower() == "visu_pars":
if os.path.exists(jph(data_path, "pdata", str(sub_scan_num), "visu_pars")):
f = open(jph(data_path, "pdata", str(sub_scan_num), "visu_pars"), "r")
elif os.path.exists(
jph(data_path, str(sub_scan_num), "pdata", "1", "visu_pars")
):
f = open(jph(data_path, str(sub_scan_num), "pdata", "1", "visu_pars"), "r")
else:
print(
"File {} does not exist".format(
jph(data_path, "pdata", str(sub_scan_num), "visu_pars")
)
)
return {}
elif param_file.lower() == "subject":
if os.path.exists(jph(data_path, "subject")):
f = open(jph(data_path, "subject"), "r")
else:
print("File {} does not exist".format(jph(data_path, "subject")))
return {}
else:
raise IOError(
"param_file input must be the string 'reco', 'acqp', 'method', 'visu_pars' or 'subject'"
)
dict_info = {}
lines = f.readlines()
for line_num in range(len(lines)):
"""
Relevant information are in the lines with '##'.
For the parameters that have arrays values specified between (), with values in the next line.
Values in the next line can be parsed in lists or np.ndarray when they contains also characters or numbers.
"""
line_in = lines[line_num]
if "##" in line_in:
if ("$" in line_in) and ("(" in line_in) and ("<" not in line_in):
# A:
splitted_line = line_in.split("=")
# name of the variable contained in the row, and shape:
var_name = var_name_clean(splitted_line[0][3:])
done = False
indian_file = ""
pos = line_num
sh = splitted_line[1]
# this is not the shape of the vector but the beginning of a full vector.
if sh.replace(" ", "").endswith(",\n"):
sh = sh.replace("(", "").replace(")", "").replace("\n", "").strip()
indian_file += sh
sh = None
# this is not the shape of the vector but a full vector.
elif sh.replace(" ", "").endswith(")\n") and "." in sh:
sh = sh.replace("(", "").replace(")", "").replace("\n", "").strip()
indian_file += sh
sh = None
# this is finally the shape of the vector that will start in the next line.
else:
sh = sh.replace("(", "").replace(")", "").replace("\n", "").strip()
sh = [int(num) for num in sh.split(",")]
while not done:
pos += 1
# collect the indian file: info related to the same variables that can appears on multiple rows.
line_to_explore = lines[
pos
] # tell seek does not work in the line iterators...
if ("##" in line_to_explore) or ("$$" in line_to_explore):
# indian file is over
done = True
else:
# we store the rows in the indian file all in the same string.
indian_file += line_to_explore.replace("\n", "").strip() + " "
dict_info[var_name] = indians_file_parser(indian_file, sh)
elif ("$" in line_in) and ("(" not in line_in):
# B:
splitted_line = line_in.split("=")
var_name = var_name_clean(splitted_line[0][3:])
indian_file = splitted_line[1]
dict_info[var_name] = indians_file_parser(indian_file)
elif ("$" not in line_in) and ("(" in line_in):
# C:
splitted_line = line_in.split("=")
var_name = var_name_clean(splitted_line[0][2:])
done = False
indian_file = splitted_line[1].strip() + " "
pos = line_num
while not done:
pos += 1
# collect the indian file: info related to the same variables that can appears on multiple rows.
line_to_explore = lines[
pos
] # tell seek does not work in the line iterators...
if ("##" in line_to_explore) or ("$$" in line_to_explore):
# indian file is over
done = True
else:
# we store the rows in the indian file all in the same string.
indian_file += line_to_explore.replace("\n", "").strip() + " "
dict_info[var_name] = indians_file_parser(indian_file)
elif ("$" not in line_in) and ("(" not in line_in):
# D:
splitted_line = line_in.split("=")
var_name = var_name_clean(splitted_line[0])
indian_file = splitted_line[1].replace("=", "").strip()
dict_info[var_name] = indians_file_parser(indian_file)
else:
# General case: take it as a simple string.
splitted_line = line_in.split("=")
var_name = var_name_clean(splitted_line[0])
dict_info[var_name] = (
splitted_line[1]
.replace("(", "")
.replace(")", "")
.replace("\n", "")
.replace("<", "")
.replace(">", "")
.replace(",", " ")
.strip()
)
else:
# line does not contain any 'assignable' variable, so this information is not included in the info.
pass
return dict_info
# --- Slope correction utils ---
def eliminate_consecutive_duplicates(input_list):
"""
Simple funcion to eliminate consecutive duplicates in a list or arrays or in a list of numbers.
:param input_list: list with possible consecutive duplicates.
:return: input_list with no consecutive duplicates.
"""
if isinstance(input_list[0], np.ndarray):
output_list = [input_list[0]]
for k in input_list[1:]:
if not list(k) == list(output_list[-1]):
output_list.append(k)
return output_list
else:
output_list = [input_list[0]]
for i in range(1, len(input_list)):
if not input_list[i] == input_list[i - 1]:
output_list.append(input_list[i])
return output_list
def data_corrector(
data, factors, kind="slope", num_initial_dir_to_skip=None, dtype=np.float64
):
"""
Slope is a float or a vector that needs to be multiplied to the data, to obtain the data as they are acquired.
To reduce the weight of an image, each slice can be divided by a common float factor, so that at each voxel only the
integer remaining is stored:
real_value_acquired[slice_j][x] = data_integer_reminder[slice_j][x] * float_slope[slice_j][x]
(where = is an almost equal, where the small loss of accuracy is justified by the huge amount of space saved)
:param data: data as parsed from the data structure.
:param factors: can be the slope or the offset as parsed from the data structure
:param kind: is a string that can be 'slope' (multiplicative factor) or 'offset' additive factor.
:param num_initial_dir_to_skip: in some cases (as some DWI) the number of slices in the image is higher than the
provided slope/offset length. Usually it is because the initial directions have no weighted and the first element
in the slope/offset can correct them all. If num_initial_direction_to_skip=j the slope/offset correction starts
after j slices, and the initial j timepoint are trimmed by j.
:param dtype: [np.float64] output datatype.
:return: data after the slope/offset correction.
---
NOTE 1: if used in sequence to correct for slope and offset, correct FIRST slope, then OFFSET.
NOTE 2: when read 'factor' think slope or offset. The two are embeded in the same method to avoid code repetition.
"""
if len(data.shape) > 5:
raise IOError(
"4d or lower dimensional images allowed. Input data has shape {} ".format(
data.shape
)
)
assert kind in ("slope", "offset")
if hasattr(factors, "__contains__"):
if np.inf in factors:
warnings.warn(
"bruker2nifti - Vector corresponding to {} has some inf values. Can not correct it.".format(
kind
),
UserWarning,
)
return data
data = data.astype(dtype)
if num_initial_dir_to_skip is not None:
factors = factors[num_initial_dir_to_skip:]
data = data[..., num_initial_dir_to_skip:]
# Check compatibility slope and data and if necessarily correct for possible consecutive duplicates
# (as in some cases, when the size of the slope is larger than any timepoint or spatial point, the problem can
# be in the fact that there are duplicates in the slope vector. This has been seein only in PV5.1).
if not (isinstance(factors, int) or isinstance(factors, float)):
if factors.ndim == 1:
if (
not factors.size == data.shape[-1]
and not factors.size == data.shape[-2]
):
factors = np.array(
eliminate_consecutive_duplicates(list(factors)), dtype=np.float64
)
if (
not factors.size == data.shape[-1]
and not factors.size == data.shape[-2]
):
msg = "Slope shape {0} and data shape {1} appears to be not compatible".format(
factors.shape, data.shape
)
raise IOError(msg)
if isinstance(factors, int) or isinstance(factors, float):
# scalar slope/offset times nd array data
if kind == "slope":
data *= factors
elif kind == "offset":
data += factors
elif factors.size == 1:
# scalar slope/offset embedded in a singleton times nd array data
if kind == "slope":
data *= factors[0]
else:
data += factors[0]
elif len(data.shape) == 3 and len(factors.shape) == 1:
# each slice of the 3d image is multiplied an element of the slope consecutively
if data.shape[2] == factors.shape[0]:
for t, fa in enumerate(factors):
if kind == "slope":
data[..., t] = data[..., t] * fa
elif kind == "offset":
data[..., t] = data[..., t] + fa
else:
raise IOError(
"Shape of the 2d image and slope dimensions are not consistent"
)
elif (
len(data.shape) == 4
and len(factors.shape) == 1
and factors.shape[0] == data.shape[2]
):
# each slice of the 4d image, taken from the third dim, is multiplied by each element of the slope in sequence.
if factors.size == data.shape[2]:
for t in range(data.shape[3]):
for k in range(factors.size):
if kind == "slope":
data[..., k, t] = data[..., k, t] * factors[k]
elif kind == "offset":
data[..., k, t] = data[..., k, t] + factors[k]
else:
raise IOError(
"If you are here, your case cannot be converted. Further investigations required."
)
elif (
len(data.shape) == 5
and len(factors.shape) == 1
and factors.shape[0] == data.shape[3]
):
# each slice of the 5d image, taken from the fourth dim, is multiplied by each element of the slope in sequence.
if factors.size == data.shape[3]:
for t in range(data.shape[4]):
for k in range(factors.size):
if kind == "slope":
data[..., k, t] = data[..., k, t] * factors[k]
elif kind == "offset":
data[..., k, t] = data[..., k, t] + factors[k]
else:
raise IOError(
"If you are here, your case cannot be converted. Further investigations required."
)
else:
# each slice of the nd image, taken from the last dimension, is multiplied by each element of the slope.
if factors.size == data.shape[-1]:
for t in range(data.shape[-1]):
if kind == "slope":
data[..., t] = data[..., t] * factors[t]
elif kind == "offset":
data[..., t] = data[..., t] + factors[t]
else:
msg = "Slope shape {0} and data shape {1} appears to be not compatible".format(
factors.shape, data.shape
)
raise IOError(msg)
return data
# -- nifti affine matrix utils --
def compute_resolution_from_visu_pars(vc_extent, vc_size, vc_frame_thickness):
"""
Resolution parameter is provided as a vector in the 'reco' parameter file. To extract the information from the
'visu_pars' only, as some scans can lack the reco file, some computation on its paramteres neesd to be performed.
:param vc_extent: VisuCoreExtent parameter file from 'visu_pars'.
:param vc_size: VisuCoreSize parameter file from 'visu_pars'.
:param vc_frame_thickness: VisuCoreFrameThickness parameter file from 'visu_pars'.
:return:
"""
if len(vc_extent) == len(vc_size):
resolution = [e / float(s) for e, s in zip(vc_extent, vc_size)]
else:
raise IOError
if isinstance(vc_frame_thickness, np.ndarray) or isinstance(
vc_frame_thickness, list
):
vc_frame_thickness = vc_frame_thickness[0]
if len(vc_extent) == 2:
resolution += [vc_frame_thickness]
return resolution
elif len(vc_extent) == 3:
return resolution
else:
raise IOError
def sanity_check_visu_core_subject_position(vc_subject_position):
"""
The parameter VisuCoreSubjectPosition can be 'Head_Prone' or 'Head_Supine'. Tertium non datur.
:param vc_subject_position: VisuCoreSubjectPosition from 'visu_pars'
:return: Raise error if VisuCoreSubjectPosition is not 'Head_Prone' or 'Head_Supine'
"""
if vc_subject_position not in ["Head_Prone", "Head_Supine"]:
msg = "Known cases are 'Head_Prone' or 'Head_Supine' for the parameter 'visu_pars.VisuSubjectPosition."
raise IOError(msg)
def filter_orientation(visu_parse_orientation):
"""
Pre-process the paramter value VisuParseOrientation from the 'visu_pars' paramter file.
:param visu_parse_orientation: VisuParseOrientation from the 'visu_pars' paramter file.
:return: re-shaped and rounded VisuParseOrientation parameter.
"""
if not np.prod(visu_parse_orientation.shape) == 9:
# Take the first 9 elements:
visu_parse_orientation = visu_parse_orientation.flat[:9]
ans = np.around(visu_parse_orientation.reshape([3, 3], order="F"), decimals=4)
return ans
def pivot(v):
"""
:param v: vector or list
:return: max in absolute value with original sign or max from origin.
Corresponds to the main direction for each column of an orientation matrix.
"""
return v[list(abs(v)).index(abs(v).max())]
def compute_affine_from_visu_pars(
vc_orientation,
vc_position,
vc_subject_position,
resolution,
frame_body_as_frame_head=False,
keep_same_det=True,
consider_subject_position=False,
):
"""
How the affine is computed (to the understanding acquired so far):
0) resolution, orientation and translation are provided in separate arrays, we combine them together in a
standard 4x4 matrix.
1) We invert the resulting matrix - according to conventions ParaVision (scanner to image frame)
and DICOM/Nifti (image to scanner frame).
2) impose the signs of the first two columns (pivots) to be negative, and the third to be be positive.
- according to the fact that the provided transformation is DICOM-like (LPS) instead of NIFTI like (RAS)
(Left/Right, Anterior/Posterior, Inferior/Superior).
-------- optional changes ----------
3) frame_body_as_frame_head: Switching the last 2 columns of the rotational part, no matter the value of
VisuCorePosition - According to the fact we are dealing with quadrupeds and not with humans,
we need to switch the Anterior-Posterior with the Inferior-Superior direction.
Set frame_body_as_frame_head=True to set the biped orientation.
4) consider_subject_position: This can be 'head_prone' or 'head_supine'.
Reason why sometimes this must be considered for a correct
orientation and must be considered dis-jointly with frame_body_as_frame_head, is that this parameter is sometimes
tuned to voluntarily switch from radiological to neurological coordinate systems.
If the subject is Prone and the technician wants to have the coordinates in neurological he/she can consciously
set the variable vc_subject_position to 'Head_Supine', even if the subject is not supine.
5) keep_same_det: Finally, for safety, we can impose the same determinant as the input matrix.
(If there is any b-vectors list, this is modified accordingly).
:param vc_orientation: visu core orientation parameter.
:param vc_position: visu core position parameter. - corresponds to the translational part of the matrix.
:param vc_subject_position: 'Head_Prone' or 'Head_Supine'. If head supine and if consider_subject_position is True
it invert the direction of the axis anterior-posterior. - do not confuse subject_position with positon (read this
last as 'translation').
:param resolution: resolution of the image, output of compute_resolution_from_visu_pars in the same module.
:param frame_body_as_frame_head: [False] to parametrise the difference between monkeys [True] and rats [False].
:param keep_same_det: in case you want the determinant to be the same as the input one. Consider it in particular
if frame_body_as_frame_head is set to False, and according to the choice of consider_subject_position.
:param consider_subject_position: [False] The reason why sometimes this must be considered for a correct
orientation and sometimes must not, is that this parameter is tuned to voluntarily switch from radiological
to neurological coordinate systems. If the subject is Prone and the technician wants to have the coordinates
in neurological he/she can consciously set the variable vc_subject_position to 'Head_Supine'.
:return: final affine (qform) transformation according to the nifti convention
NOTE: we are assuming that the angles parametrisation is the same for the input and the output.
We hope this is the case as we do not have any mean to confirm that. The fslreorient2std from FSL
should be applied afterwards to all the images (after DWI analysis if any).
"""
sanity_check_visu_core_subject_position(vc_subject_position)
vc_orientation = filter_orientation(vc_orientation)
# 0) integrate resolution with the orientation and add the translation in the projective coordinates:
result = np.eye(4, dtype=np.float32)
result[0:3, 0:3] = vc_orientation
result[0:3, 3] = vc_position[0,:]
# 1) Invert the orientation matrix, according to nifti convention and Bruker manual.
# Round the decimals to avoid precision problems. Check if determinant makes sense.
result = np.round(np.linalg.inv(result), decimals=4)
result_det = np.linalg.det(result)
if result_det == 0:
raise IOError("Orientation determinant is 0. Cannot grasp this dataset.")
# 2-3) impose pivot first column negative, second column negative, third column positive
result_orientation = result[:3, :3]
result_orientation = result_orientation.dot(
np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])
)
if frame_body_as_frame_head: # from SAR to ASL
result_orientation = result_orientation.dot(
np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
)
if pivot(result_orientation[:, 0]) > 0:
result_orientation[:, 0] = -1 * result_orientation[:, 0]
if pivot(result_orientation[:, 1]) > 0:
result_orientation[:, 1] = -1 * result_orientation[:, 1]
if pivot(result_orientation[:, 2]) < 0:
result_orientation[:, 2] = -1 * result_orientation[:, 2]
result_orientation = result_orientation.dot(np.diag(resolution))
result[:3, :3] = result_orientation
# 4) - optional
if consider_subject_position:
if vc_subject_position == "Head_Prone":
result[1, :] = -1 * result[1, :]
# 5) - optional
if keep_same_det:
if (np.linalg.det(result) < 0 < result_det) or (
np.linalg.det(result) > 0 > result_det
):
result[0, :3] = -1 * result[0, :3]
return result
# --- b-vectors utils ---
def obtain_b_vectors_orient_matrix(
vc_orientation,
vc_subject_position,
frame_body_as_frame_head=False,
keep_same_det=True,
consider_subject_position=False,
):
"""
See _utils.compute_affine_from_visu_pars help for the same input parameters.
:param vc_orientation: VisuCoreOrientation parameter file
:param vc_subject_position: VisuCoreSubjectPosition parameter file
:param frame_body_as_frame_head:
:param keep_same_det:
:param consider_subject_position:
:return:
"""
resolution = np.array([1, 1, 1])
translation = np.array([[0, 0, 0]])
aff = compute_affine_from_visu_pars(
vc_orientation,
translation,
vc_subject_position,
resolution,
frame_body_as_frame_head=frame_body_as_frame_head,
keep_same_det=keep_same_det,
consider_subject_position=consider_subject_position,
)
return np.copy(aff[:3, :3])
def normalise_b_vect(b_vect, remove_nan=True):
"""
Normalisation of the b_vector matrix (dim : num b-vectors x 3)
:param b_vect: the b_vector matrix (dim : num b-vectors x 3)
:param remove_nan: remove nan if appears in the b-vector matrix, applying np.nan_to_num.
:return: normalised b-vectors.
"""
b_vect_normalised = np.zeros_like(b_vect)
norms = np.linalg.norm(b_vect, axis=1)
for r in range(b_vect.shape[0]):
if norms[r] < 10e-5:
b_vect_normalised[r, :] = np.nan
else:
b_vect_normalised[r, :] = (1 / float(norms[r])) * b_vect[r, :]
if remove_nan:
b_vect_normalised = np.nan_to_num(b_vect_normalised)
return b_vect_normalised
def apply_reorientation_to_b_vects(reorientation_matrix, row_b_vectors_in_rows):
"""
:param reorientation_matrix: a 3x3 matrix representing a reorientation in the 3D space:
Typically with det = 1 or -1.
a b c
d e f
g h i
:param row_b_vectors_in_rows:
A nx3 matrix where n row-major b-vectors (v1, v2, v3, v4, ...) are aligned in rows
v1_1 v1_2 v1_3
v2_1 v2_2 v2_3
v3_1 v3_2 v3_3
v4_1 v4_2 v4_3
...
:return:
An nx3 matrix where each row is the corresponding b-vector multiplied by the same matrix reorientation_matrix:
a.v1_1 + b.v1_2 + c.v1_3 + d.v1_1 + e.v1_2 + f.v1_3 + g.v1_1 + h.v1_2 + i.v1_3
a.v2_1 + b.v2_2 + c.v2_3 + d.v2_1 + e.v2_2 + f.v2_3 + g.v2_1 + h.v2_2 + i.v2_3
a.v3_1 + b.v3_2 + c.v3_3 + d.v3_1 + e.v3_2 + f.v3_3 + g.v3_1 + h.v3_2 + i.v3_3
a.v4_1 + b.v4_2 + c.v4_3 + d.v4_1 + e.v4_2 + f.v4_3 + g.v4_1 + h.v4_2 + i.v4_3
...
"""
b_vectors_in_column_reoriented = np.einsum(
"ij, kj -> ki", reorientation_matrix, row_b_vectors_in_rows
)
return b_vectors_in_column_reoriented
# -- nibabel-related utils --
def set_new_data(image, new_data, new_dtype=None, remove_nan=True):
"""
From a nibabel image and a numpy array it creates a new image with
the same header of the image and the new_data as its data.
:param image: nibabel image
:param new_data: numpy array
:param new_dtype:
:param remove_nan:
:return: nibabel image
"""
if remove_nan:
new_data = np.nan_to_num(new_data)
# if nifty1
if image.header["sizeof_hdr"] == 348:
new_image = nib.Nifti1Image(new_data, image.affine, header=image.header)
# if nifty2
elif image.header["sizeof_hdr"] == 540:
new_image = nib.Nifti2Image(new_data, image.affine, header=image.header)
else:
raise IOError("Input image header problem")
# update data type:
if new_dtype is None:
new_image.set_data_dtype(new_data.dtype)
else:
new_image.set_data_dtype(new_dtype)
return new_image
def path_contains_whitespace(*args):
if re.search("\\s+", os.path.join(*args)):
return True
else:
return False
|
# File: p (Python 2.4)
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
class AwardMaker(DistributedObjectGlobal):
pass
|
from flask.ext.socketio import emit
from uuid import uuid4
from .. import socketio
@socketio.on('unsplash')
def unsplash():
url = 'https://source.unsplash.com/random?t=%s' % uuid4().hex
emit('image', {'url': url})
|
#-*- coding: utf-8 -*-
from django.contrib import admin
from models import Cliente, Pedido, Produto, Recebimento, Remessa, ItemPedido
tiny_mce_js = [
'/static/grappelli/tinymce/jscripts/tiny_mce/tiny_mce.js',
'/static/grappelli/tinymce_setup/tinymce_setup.js',
]
class ProdutoAdmin(admin.ModelAdmin):
list_display = ('nome', 'descricao', 'preco', )
search_fields = ('nome', 'descricao', 'preco', )
class ClienteAdmin(admin.ModelAdmin):
list_display = ('nome', 'sobrenome', 'email', 'telefone', 'endereco', )
search_fields = ('nome', 'sobrenome', 'email', 'telefone', 'endereco', )
class PedidoAdmin(admin.ModelAdmin):
list_display = ('cliente', 'valor_total', 'data', )
search_fields = ('cliente', 'valor_total', 'data', )
# filter_horizontal = ('itens',)
class RemessaAdmin(admin.ModelAdmin):
list_display = ('remetente', 'data', 'responsavel', 'observacao', 'pedido',)
search_fields = ('remetente', 'data', 'responsavel__username', 'observacao', 'pedido',)
class Media:
js = tiny_mce_js
class RecebimentoAdmin(admin.ModelAdmin):
list_display = ('remessa', 'data', 'receptor', 'observacao', )
search_fields = ('remessa', 'data', 'receptor', 'observacao', )
class Media:
js = tiny_mce_js
class ItemPedidoAdmin(admin.ModelAdmin):
class Media:
js = tiny_mce_js
admin.site.register(Produto, ProdutoAdmin)
admin.site.register(Cliente, ClienteAdmin)
admin.site.register(Pedido, PedidoAdmin)
admin.site.register(Remessa, RemessaAdmin)
admin.site.register(Recebimento, RecebimentoAdmin)
admin.site.register(ItemPedido, ItemPedidoAdmin)
|
x=5
y="Rajeev"
print(type(x))
print(type(y)) |
<<<<<<< HEAD
x = 1
print(x)
x = x+10
print(x)
=======
x = 1
print(x)
x = x+10
print(x)
>>>>>>> 48c87e4df57a1d3ad60f3bceb45f0d38faf9cd9b
exit() |
import math, random, util
class RadioNetwork():# (x,y)(x,y)
"""ToDo"""
NUM_BITS = 5 # 11
limit_area = 2 ** NUM_BITS # 2048
fitness = 0.2 # 80% ?
covered_area = 0.8 # 80%
covered_bs = 2
amount_bs = int(math.ceil(((limit_area**2) * covered_area) / (math.pi * (covered_bs**2))))
def num_bits(self):
return int(self.amount_bs * self.NUM_BITS * 2)
def new_individual(self):
"""Generats a new individual for population"""
while True:
individual = []
for x in xrange(self.amount_bs * self.NUM_BITS * 2):
individual.append(str(random.randint(0,1)))
var1, var2 = self.validate_individual(individual)
if var1:
break
return individual
def get_fitness(self, individual):
"""Checks how close to this result"""
v = []
for i in xrange(self.amount_bs * 2):
number = int(''.join(individual[i*self.NUM_BITS:(i+1)*self.NUM_BITS]), 2)
v.append(number - self.limit_area/2)
area_intersection = 0.0
area_coverage = self.amount_bs * (math.pi * (self.covered_bs**2))
for i in xrange(self.amount_bs):
for j in xrange(self.amount_bs):
if i < j:
d = math.sqrt(((v[j*2] - v[i*2])**2) + ((v[j*2+1] - v[i*2+1])**2))
d = math.fabs(d)##
if d < self.covered_bs*2:##
if d > 0:
part1 = self.covered_bs*self.covered_bs*math.acos((d*d + self.covered_bs*self.covered_bs - self.covered_bs*self.covered_bs)/(2*d*self.covered_bs))
part2 = self.covered_bs*self.covered_bs*math.acos((d*d + self.covered_bs*self.covered_bs - self.covered_bs*self.covered_bs)/(2*d*self.covered_bs))
part3 = 0.5*math.sqrt((-d+self.covered_bs+self.covered_bs)*(d+self.covered_bs-self.covered_bs)*(d-self.covered_bs+self.covered_bs)*(d+self.covered_bs+self.covered_bs))
area_intersection += (part1 + part2 + part3)
else:
area_intersection += math.pi * (self.covered_bs ** 2)
"""
part1 = self.covered_bs*self.covered_bs*math.acos((d*d)/(2*d*self.covered_bs))
part2 = self.covered_bs*self.covered_bs*math.acos((d*d)/(2*d*self.covered_bs))
part3 = 0.5*math.sqrt((-d+self.covered_bs+self.covered_bs)*(d)*(d)*(d+self.covered_bs+self.covered_bs))
area_intersection += (part1 + part2 + part3)
A = ((2*(self.covered_bs**2)) * (math.acos(d/(2*self.covered_bs))) - (0.5*d) * (math.sqrt((4*(self.covered_bs**2)) - (d**2))))
area_intersection += A
"""
area_coverage = area_coverage - area_intersection
return (1.0 - (area_coverage/(self.limit_area**2)))
def is_finished(self, individual):
"""ToDO"""
f = self.get_fitness()
return f <= math.fabs(self.fitness)
def validate_individual(self, individual):
"""Validation of an individual to find out if it is part of the domain"""
for i in xrange(self.amount_bs * 2):
number = int(''.join(individual[i*self.NUM_BITS:(i+1)*self.NUM_BITS]), 2)
if math.fabs(number) > self.limit_area - self.covered_bs or math.fabs(number) < self.covered_bs: #Limita para dentro da area
return False, individual
return True, individual
def show(self, individual):
string = "[ "
for i in xrange(self.amount_bs * 2):
number = int(''.join(individual[i*self.NUM_BITS:(i+1)*self.NUM_BITS]), 2)
string += str(number - self.limit_area/2)
string += ", " if i+1 < AMOUNT_NUM else " "
return string+"]"
|
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError, Warning
class ProductTemplateEmployee(models.Model):
_name = 'product.template.employee'
product_template_id = fields.Many2one('product.template','Product')
employee_id = fields.Many2one('hr.employee','Employee', required=True)
company_id = fields.Many2one('res.company','Company', required=True)
list_price = fields.Float('Sale Price', default=0.0, required=True)
commision_calculation_method = fields.Selection([('product','Per Product'),('day','Per Day')], 'Calculation Method', default='product', required=True)
commision_type = fields.Selection([('fixed','Fixed'),('percentage','Percentage')],'Commision Type', default='fixed', required=True)
commision_fixed = fields.Float('Fixed Amount', default=0.0)
commision_percentage = fields.Float('Percentage', default=0.0)
class ProductTemplate(models.Model):
_inherit = 'product.template'
product_template_employee_ids = fields.One2many('product.template.employee','product_template_id', 'Employees') |
from alchemyapi import AlchemyAPI
import json
import numpy as np
import matplotlib.pyplot as plt
from operator import itemgetter
alchemyapi = AlchemyAPI()
import random
#classData = pickle.load(open("reviewClassifier.p", "rb", -1))
#def classifyGame(game):
# myClassifier = Classifier()
# return myClassifier.classify(classData, game)
def chart3(entities):
import numpy as np
import matplotlib.pyplot as plt
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='r')
womenMeans = (25, 32, 34, 20, 25)
rects2 = ax.bar(ind+width, womenMeans, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('G1', 'G2', 'G3', 'G4', 'G5') )
ax.legend( (rects1[0], rects2[0]), ('Men', 'Women') )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
#autolabel(rects1)
#autolabel(rects2)
plt.show()
def makeChart2(entities):
placeTypes = ['City', 'Continent', 'Country', 'GeographicFeature', 'Region', 'StateOrCounty']
orgTypes = ['Company', 'Organization', 'Facility']
person = ['Person']
thingTypes = ['Degree','EntertainmentAward','FinancialMarketIndex','HealthCondition', 'Holiday', 'JobTitle', 'Movie', 'MusicGroup', 'NaturalDisaster',
'Anniversary', 'Automobile','Degree','EntertainmentAward','FinancialMarketIndex','HealthCondition','Holiday','JobTitle','Movie','MusicGroup','NaturalDisaster',
'OperatingSystem','PrintMedia','Product','RadioProgram','RadioStation','Sport','SportingEvent','Technology','TelevisionShow','TelevisionStation','EmailAddress',
'TwitterHandle','Hashtag','IPAddress','Quantity','Money']
data = [(entity["text"], int(entity["count"])) for entity in entities]
data = sorted(data, key=itemgetter(1), reverse=True)
#N = len( data )
N = 4
people = [(entity["text"], int(entity["count"])) for entity in entities if entity["type"] == "Person"]
orgs = [(entity["text"], int(entity["count"])) for entity in entities if entity["type"] in orgTypes]
places = [(entity["text"], int(entity["count"])) for entity in entities if entity["type"] in placeTypes]
things = [(entity["text"], int(entity["count"])) for entity in entities if entity["type"] in thingTypes]
menMeans = (20, 35, 30, 35, 27)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='g')
womenMeans = (25, 32, 34, 20, 25)
rects2 = ax.bar(ind+width, womenMeans, width, color='b')
# add some text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('People', 'Places', 'Orgs', 'Things', 'G5') )
ax.legend( (rects1[0], rects2[0]), ('Men', 'Women') )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.show()
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
def makeChart(entities):
data = sorted([(entity["text"], int(entity["count"]), entity["sentiment"]["type"]) for entity in entities], key=itemgetter(1))
posdata = [item for item in sorted(data, key=itemgetter(1)) if item[2].lower() == "positive"]
negdata = [item for item in sorted(data, key=itemgetter(1)) if item[2].lower() == "negative"]
neutraldata = [item for item in sorted(data, key=itemgetter(1)) if item[2].lower() == "neutral"]
N = len(posdata) + len(negdata) + len(neutraldata)
y = np.arange(1, N+1)
posx = np.array([ num for (s, num, sent) in data if sent.lower() == "positive"])
negx = np.array([ num for (s, num, sent) in data if sent.lower() == "negative"])
neutralx = np.array([ num for (s, num, sent) in data if sent.lower() == "neutral"])
labels = np.array([s for (s, num, sent) in data])
# Example data
index = 0
for entity, count, sentiment in data:
if sentiment.lower() == "positive":
color = "g"
elif sentiment.lower() == "negative":
color = "r"
else:
color = "y"
plt.barh(index - 0.5, count, 0.7, color=color)
index+=1
y_pos = np.arange(N)
plt.yticks(y_pos, labels)
plt.xlabel('Entity Count')
plt.title('Named Entities')
plt.show()
def makeChartA(entities):
data = [(entity["text"], int(entity["count"])) for entity in entities]
data = sorted(data, key=itemgetter(1), reverse=True)
N = len( data )
y = np.arange(1, N+1)
x = [ num for (s, num) in data ]
labels = np.array([s for (s, num) in data])
# Example data
y_pos = np.arange(len(labels))
error = np.random.rand(len(labels))
plt.barh(y_pos, 3+10*rand(5), xerr=error, align='center', alpha=0.4)
plt.yticks(y_pos, labels)
plt.xlabel('Count')
plt.title('Named Entities')
plt.show()
def makeChartOld(entities):
"""data = [ ("data1", 34), ("data2", 22),
("data3", 11), ( "data4", 28),
("data5", 57), ( "data6", 39),
("data7", 23), ( "data8", 98)]"""
#for entity in entities:
# print(entity["text"])
# print(int(entity["count"]))
data = [(entity["text"], int(entity["count"])) for entity in entities]
data = sorted(data, key=itemgetter(1), reverse=True)
N = len( data )
y = np.arange(1, N+1)
x = [ num for (s, num) in data ]
fig, ax = plt.subplots()
def autolabel(rects, labels):
# attach some text labels
for i in range(0, len(rects)):
rect = rects[i]
label = labels[i]
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, label, ha='center', va='bottom')
labels = [ s for (s, num) in data ]
width = 1
bar1 = plt.bar( x, y, width, color="g")
plt.ylabel( 'Count' )
#plt.xticks(x + width/2.0, labels )
plt.yticks(y + width/2.0, labels )
#autolabel(bar1, labels)
plt.show()
def getEntityFeatures(doc):
entities = getEntities(doc)
featureDict = {}
for entity in entities:
featureDict[entity["text"] + "_ex_"] = int(entity["count"])
return featureDict
def getKeyWordsFeatures(doc):
keywords = getKeyWords(doc)
featureDict = {}
for keyword in keywords:
featureDict[keyword["text"] + "_kw_"] = float(keyword["relevance"])
return featureDict
def getKwEntityFeatures(doc):
featureDict = getEntityFeatures(doc)
kwDict = getKeyWordsFeatures(doc)
featureDict.update(kwDict)
return featureDict
def getKeyWords(text):
response = alchemyapi.keywords('text', text, {'sentiment': 1})
if response['status'] == 'OK':
return [entity for entity in response['keywords']]
else:
print('Error in entity extraction call: ', response['statusInfo'])
return None
def getEntities(text):
response = alchemyapi.entities('text', text, {'sentiment': 1})
if response['status'] == 'OK':
return [entity for entity in response['entities']]
else:
print('Error in entity extraction call: ', response['statusInfo'])
return None
|
import random
def accountNumberCreate():
#Variables
minRandom = 1000000000000000
maxRandom = 9999999999999999
countryCode = "PL"
tCheckSum = "00"
bankNumber = "2500000"
CheckSumOfBankNumber=0
for i in bankNumber:
CheckSumOfBankNumber += int(i)
bankNumberFull = bankNumber+str(CheckSumOfBankNumber)
customerInvoice = str(random.randint(minRandom, maxRandom))
beforeIBAN=countryCode+tCheckSum+bankNumberFull+customerInvoice
beforeIBANT1 = beforeIBAN[0:4]
beforeIBANT2 = beforeIBAN.replace(beforeIBANT1,"")
beforeIBANT3 = beforeIBANT2+beforeIBANT1
beforeIBANT4 = beforeIBANT3.replace(countryCode, "2521")
beforeIBANT5 = int(beforeIBANT4) % 97
beforeIBANT6 = 98-beforeIBANT5
if beforeIBANT6<10:
beforeIBANT6+=10
checkSum = str(beforeIBANT6)
IBAN=countryCode+checkSum+bankNumberFull+customerInvoice
return IBAN
def accountNumberCheck(IBAN):
IBANToCheck=IBAN
IBANTemp1 = IBANToCheck[0:4]
IBANToCheck = IBANToCheck.replace(IBANTemp1, "")
IBANTemp2 = IBANToCheck+IBANTemp1
IBANTemp3 = IBANTemp2.replace("PL","2521")
IBANTemp4 = int(IBANTemp3) % 97
if IBANTemp4 == 1:
return True
|
from utils import *
import math
def min_dist(x0,y0,x1,y1,dimx,dimy):
points = [(x1+dimx*i,y1+dimy*j) for i in [-1,0,1] for j in [-1,0,1]]
min_dist = -1
for point in points:
dist = math.sqrt((x0-point[0])**2+(y0-point[1])**2)
if (min_dist < 0 or dist < min_dist):
min_dist = dist
return min_dist
def get_path(x0,y0,x,y,dx,dy,dimx,dimy):
# ideal_vec = np.array([x-x0,y-y0])
velocity = 0 #np.array([dx,dy])
points = [(x+dimx*i,y+dimy*j) for i in [-1,0,1] for j in [-1,0,1]]
min_dist = -1
for point in points:
dist = (x0-point[0])**2+(y0-point[1])**2
if (min_dist < 0 or dist < min_dist):
min_dist = dist
min_point = point
x,y = min_point
# print(points,min_point)
# pdb.set_trace()
ideal_vec = np.array([x-x0,y-y0])
fin_vel = (ideal_vec - velocity)
# print(math.acos((x-x0)/(math.sqrt((x-x0)**2+(y-y0)**2))))
if ((math.sqrt((x-x0)**2+(y-y0)**2)) == 0):
return 0,0
return min(1,math.sqrt((x-x0)**2+(y-y0)**2)),np.sign(x-x0)*math.acos((x-x0)/(math.sqrt((x-x0)**2+(y-y0)**2)))
def move_to(current,mine,config):
accel = get_path(float(current['px']),float(current['py']),x,y,float(mine['px']),float(mine['py']),float(config['MAP_WIDTH']),float(config['MAP_HEIGHT']))
move(user,password,accel[1],accel[0])
if __name__ == '__main__':
exec_move('BSOD','Alboucai',6000,6000)
|
from .base import EntityRef
from .exchange_ref import ExchangeRef
class MultipleReferences(Exception):
pass
class NoReference(Exception):
pass
class ProcessRef(EntityRef):
"""
Processes can lookup:
"""
_etype = 'process'
_ref_field = 'referenceExchange'
@property
def _addl(self):
return self.__getitem__('SpatialScope')
def __init__(self, external_ref, query, **kwargs):
self._default_rx = None
self._lci = dict()
super(ProcessRef, self).__init__(external_ref, query, **kwargs)
@property
def reference_entity(self):
if self._reference_entity is None:
self._reference_entity = self._query.get_reference(self.external_ref)
if len(self._reference_entity) == 1:
self._default_rx = self._reference_entity[0].flow.external_ref
return self._reference_entity
def _show_ref(self):
for i in self.references():
print('reference: %s' % i)
@property
def name(self):
return self._name
@property
def default_rx(self):
"""
The 'primary' reference exchange of a process CatalogRef. This is an external_ref for a flow.
This can be set by a user for convenience for multi-reference processes.
(- which is req. unique among references)
:return:
"""
return self._default_rx
@default_rx.setter
def default_rx(self, value):
if not isinstance(value, str) and not isinstance(value, int):
if hasattr(value, 'external_ref'):
value = value.external_ref
elif hasattr(value, 'entity_type'):
if value.entity_type == 'exchange':
value = value.flow.external_ref
if value in [rx.flow.external_ref for rx in self.references()]:
self._default_rx = value
else:
print('Not a valid reference exchange specification')
def reference(self, flow=None):
"""
This used to fallback to regular exchanges; no longer.
:param flow:
:return:
"""
if len(self.reference_entity) == 0:
raise NoReference
if flow is None:
if len(self.reference_entity) > 1:
raise MultipleReferences('%s: You must specify a reference flow' % self.link)
return self.reference_entity[0]
if hasattr(flow, 'entity_type'):
if flow.entity_type == 'exchange':
flow = flow.flow
try:
return next(x for x in self.reference_entity if x.flow == flow or x.flow.external_ref == flow)
except StopIteration:
try:
return next(x for x in self.reference_entity if x.flow.match(flow))
except StopIteration:
print('%s: references:' % self.link)
self._show_ref()
raise KeyError(flow)
def references(self):
for x in self.reference_entity:
yield x
'''
def is_allocated(self, rx):
"""
For process refs, assume
:param rx:
:return:
"""
for _rx in self.reference_entity:
if _rx.key == rx.key:
return _rx.is_alloc
return False
'''
def _use_ref_exch(self, ref_flow):
"""
returns a string which is the external_ref of a flow; default_rx if none was specified and the process has one.
:param ref_flow:
:return:
"""
if ref_flow is None:
if self._default_rx is not None:
ref_flow = self._default_rx
elif hasattr(ref_flow, 'entity_type'):
if ref_flow.entity_type == 'exchange':
return ref_flow.flow.external_ref
elif ref_flow.entity_type == 'flow':
return ref_flow.external_ref
raise TypeError('Invalid reference exchange: %s' % ref_flow)
return ref_flow
'''
Inventory queries
'''
def exchanges(self, **kwargs):
for x in self._query.exchanges(self.external_ref, **kwargs):
yield ExchangeRef(self, self._query.make_ref(x.flow), x.direction, value=None, termination=x.termination,
comment=x.comment)
def exchange_values(self, flow, direction=None, termination=None, reference=None, **kwargs):
"""
This should get replaced by ev()
:param flow:
:param direction:
:param termination:
:param reference:
:param kwargs:
:return:
"""
if hasattr(flow, 'entity_type'):
if flow.entity_type == 'exchange':
flow = flow.flow.external_ref
elif flow.entity_type == 'flow':
flow = flow.external_ref
for x in self._query.exchange_values(self.external_ref, flow, direction,
termination=termination, reference=reference, **kwargs):
yield ExchangeRef(self, self._query.make_ref(x.flow), x.direction, value=x.value, termination=x.termination,
comment=x.comment)
def inventory(self, ref_flow=None, **kwargs):
# ref_flow = self._use_ref_exch(ref_flow) # ref_flow=None returns unallocated inventory
for x in sorted(self._query.inventory(self.external_ref, ref_flow=ref_flow, **kwargs),
key=lambda t: (not t.is_reference, t.type == 'elementary', t.type == 'context', t.type == 'cutoff', t.direction)):
yield ExchangeRef(self, self._query.make_ref(x.flow), x.direction, value=x.value, termination=x.termination,
comment=x.comment, is_reference=x.is_reference)
def exchange_relation(self, ref_flow, exch_flow, direction, termination=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
if hasattr(exch_flow, 'external_ref'):
exch_flow = exch_flow.external_ref
return self._query.exchange_relation(self.external_ref, ref_flow,
exch_flow, direction,
termination=termination, **kwargs)
def fg_lcia(self, lcia_qty, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.lcia(self.external_ref, ref_flow, lcia_qty, **kwargs)
'''
support process
'''
def reference_value(self, flow=None):
if flow is None:
flow = self.reference().flow
return sum(x.value for x in self.exchange_values(flow, reference=True))
def get_exchange(self, key):
try:
return next(x for x in self.reference_entity if x.key == key)
except StopIteration:
raise KeyError
@property
def alloc_qty(self):
"""
This is hugely kludgely. What should be the expected behavior of a process ref asked to perform allocation?
:return:
"""
return None
'''
Background queries
'''
def foreground(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.foreground(self.external_ref, ref_flow=ref_flow, **kwargs)
def consumers(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.consumers(self.external_ref, ref_flow=ref_flow, **kwargs)
def dependencies(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.dependencies(self.external_ref, ref_flow=ref_flow, **kwargs)
def emissions(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.emissions(self.external_ref, ref_flow=ref_flow, **kwargs)
def cutoffs(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.cutoffs(self.external_ref, ref_flow=ref_flow, **kwargs)
def is_in_background(self, termination=None, ref_flow=None, **kwargs):
if termination is None:
termination = self.external_ref
ref_flow = self._use_ref_exch(ref_flow)
return self._query.is_in_background(termination, ref_flow=ref_flow, **kwargs)
def ad(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.ad(self.external_ref, ref_flow, **kwargs)
def bf(self, ref_flow=None, **kwargs):
ref_flow = self._use_ref_exch(ref_flow)
return self._query.bf(self.external_ref, ref_flow, **kwargs)
def lci(self, ref_flow=None, refresh=False, **kwargs):
"""
Caches LCI results
:param ref_flow:
:param refresh:
:param kwargs:
:return:
"""
ref_flow = self._use_ref_exch(ref_flow)
if refresh:
self._lci.pop(ref_flow, None)
if ref_flow not in self._lci:
self._lci[ref_flow] = list(self._query.lci(self.external_ref, ref_flow, **kwargs))
for i in self._lci[ref_flow]:
yield i
def unobserved_lci(self, observed, ref_flow=None, **kwargs):
"""
Performs a sys_lci of the process's unobserved exchanges. derived by excluding observed exchanges from the
process's inventory and passing the result to sys_lci. Note that terminations are ignored-- if a process
has an observed Electricity flow, all the process's electricity exchanges are assumed to be accounted for
by the observation. (flow.external_ref, direction) is the filter.
:param observed: iterable of exchanges or child flows, having a flow (with external_ref) and direction
:param ref_flow:
:param kwargs:
:return:
"""
excl = set((k.flow.external_ref, k.direction) for k in observed)
ref_flow = self._use_ref_exch(ref_flow)
incl = (k for k in self.inventory(ref_flow) if (k.flow.external_ref, k.direction) not in excl)
return self._query.sys_lci(self, incl, **kwargs)
def bg_lcia(self, lcia_qty, ref_flow=None, **kwargs):
"""
:param lcia_qty: should be a quantity ref (or qty), not an external ID
:param ref_flow:
:param kwargs:
:return:
"""
ref_flow = self._use_ref_exch(ref_flow)
return self._query.bg_lcia(self.external_ref, lcia_qty, ref_flow=ref_flow, **kwargs)
|
# ****************************************************************** #
# *********************** <<Byte of Python>> *********************** #
# ****************************************************************** #
########################
# if
########################
# number = 23
# guess = int(input("Enter an integer : "))
# if guess == number:
# print("Congratulations, you guessed it.")
# print("(but you do not win any prizes!)")
# elif guess < number:
# print("No, it is a litter higher than that")
# else:
# print("No, it is a little lower than that")
# print("Done")
########################
# while
########################
# number = 23
# running = True
# while running:
# guess = int(input("Enter an integer: "))
# if guess == number:
# print("Congratulations, you guessed it.")
# running = False
# elif guess < number:
# print("No, it is a little higher than that.")
# else:
# print("No, it is a little lower than that.")
# else:
# print("The while loop is over.")
# print("Done")
########################
# for
########################
# for i in range(1, 5):
# print(i)
# else:
# print("The for loop is over")
########################
# break
########################
# while True:
# s = input("Enter something: ")
# if s == "quit":
# break
# print("Length of the string is", len(s))
# else:
# print("quit")
# print("done")
########################
# continue
########################
# while True:
# s = input("Enter something: ")
# if s == "quit":
# break
# if len(s) < 3:
# print("Too small")
# continue
# print("Input is of sufficient length")
# ****************************************************************** #
# ********************<<Programming in python>> ******************** #
# ****************************************************************** #
# for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
# if letter in "AEIOU":
# print(letter, "is a vowel")
# else:
# print(letter, "is a consonant")
# ****************************************************************** #
# *************************** Demo ********************************* #
# ****************************************************************** #
# ord is short for ordinal
sum = 0
for c in "Angelia":
sum += ord(c)
print(sum)
s = ""
for c in "Consen":
c = chr(ord(c) + 1)
s += c
print(s) |
'''
Tensorflow - Neural Network
'''
import tensorflow as tf
import numpy as np
# XOR 문제
x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)
X = tf.placeholder(tf.float32, shape=[None, 2])
Y = tf.placeholder(tf.float32, shape=[None, 1])
# layer1
W1 = tf.Variable(tf.random_normal([2, 2])) # wide
b1 = tf.Variable(tf.random_normal([2]))
layer1 = tf.sigmoid(tf.matmul(X, W1) + b1) # input : 4 X 2 weight : 2 X 2 ==> 4 X 2
# output layer
W2 = tf.Variable(tf.random_normal([2, 1]))
b2 = tf.Variable(tf.random_normal([1]))
z = tf.matmul(layer1, W2) + b2 # input : 4 X 2 weight : 2 X 1 ==> 4 X 1
hx = tf.sigmoid(z)
# logistic..
cost_i = Y * -tf.log(hx) + (1 - Y) * -tf.log(1 - hx)
cost = tf.reduce_mean(cost_i)
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
predicted = tf.cast(hx > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(10000):
_t, _c = sess.run([train, cost], feed_dict={X: x_data, Y: y_data})
if not i % 100:
print(i, _c)
print(sess.run(predicted, feed_dict={X: x_data, Y: y_data}))
print(sess.run(accuracy, feed_dict={X: x_data, Y: y_data}))
|
# coding:utf-8
import pandas as pd
import numpy as np
import datetime
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
import ShortTermPredict as STP
import matplotlib.pyplot as plt
import math
from sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score
test_dt = '20141229'
dts = ['20141201', '20141202', '20141203', '20141204', '20141205'
, '20141208', '20141209', '20141210', '20141211', '20141212'
, '20141215', '20141216', '20141217', '20141218', '20141219'
, '20141223', '20141225', '20141226']
predict_time_table = STP.predict_by_constant_weights(dts, test_dt, 3)
div = 200000
rs = '0'
for i in dts:
real_train_data = pd.read_csv(
'E:\Pycharm\PythonProjects\Subway\data\TrainData\TrainData_for14_line1_' + i + '.csv')
real_time_table = real_train_data.loc[4:, 'leav_time'].copy() # 真实的时刻表
real_time_table *= 15
mean_squared_error(test['count'], ha)
|
class Servo:
def __init__(self, ser):
self.last_sent = {"finger":0, "thumb":0, "under":180}
self.servo_table = {"finger":0, "thumb":1, "under":2}
self.ser = ser
def add_zeros_to_int(self, int_val):
if(len(str(int_val)) == 1):
return "00" + str(int_val)
elif(len(str(int_val)) == 2):
return "0" + str(int_val)
else:
return str(int_val)
def send(self, motor_hash):
item_count = 0
for motor, pos in self.last_sent.iteritems():
if motor in motor_hash:
self.ser.write("%s:%s"
%(
self.servo_table[motor],
add_zeros_to_int(motor_hash[motor])
)
)
else:
self.ser.write("%s:%s"
%(
self.servo_table[motor],
add_zeros_to_int(pos)
)
)
if(item_count < 2):
self.ser.write(",")
item_count += 1
self.ser.write("\n")
def tilt(self, degrees, motor = "finger"):
if(degrees < 0 or degrees > 180):
print("TOO FAR")
return
elif(motor == "bottom"):
if(abs(self.last_value["finger"] - degrees) < 180):
print("NO FIGHTING")
return
elif(motor == "finger"):
if(abs(self.last_value["bottom"] - degrees) < 180):
print("NO FIGHTING")
return
else:
print(degrees)
self.send({motor:degrees})
def stop(self, motor = "finger"):
self.send({motor:900}) |
#this one is imported for mobile
from appium import webdriver
from time import sleep
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
#this one is not used for mobile
import time, requests
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from utils.function.general import wait_visible_element, timestamp_print, timestamp_print_verify_url
from utils.lib.user import Environment
from urllib.error import HTTPError
from requests import exceptions
class PageBaseMobile(object):
def __init__(self, driver):
self.driver = driver
def explicit_wait(self, locator):
return WebDriverWait(self.driver, 60).until(EC.presence_of_element_located((By.XPATH, locator)))
def swipeLeft(self):
self.driver.swipe(100,835,900,835,150)
def swipeRight(self):
self.driver.swipe(900,835,100,835,150)
def swipeUp (self):
self.driver.swipe(250,150,250,800,300)
def swipeDown(self, startx=250, starty=800, endx=250, endy=150, dur=300):
self.driver.swipe(startx, starty, endx, endy, dur)
def snooze(self, x=2):
sleep(x)
def get_page_load_time(self, url): #Fungsi untuk mengakses suatu halaman dan waktu proses-nya
#response = urllib.request.urlopen(url)
retry = 0
set_time_out = 60
while (retry<3):
try:
timestart = time.clock()
self.driver.set_page_load_timeout(set_time_out)
self.driver.get(url)
if "www" in url :
response = requests.get(url, verify = True)
elif "dev" or "nginx" or "beta" in url:
response = requests.get(url, verify = False)
timeend = time.clock()
loadtime = timeend-timestart
timestamp_print(url, loadtime, self.log_result)
break
except TimeoutException:
print ("Load Page takes too long, it's over %s second" %(set_time_out))
self.driver.refresh()
retry +=1
except requests.exceptions.HTTPError:
if response.status_code == 500:
print ("Error code 500 : Maaf, saat ini Tokopedia sedang kepenuhan pengunjung. ")
elif response.status_code == 503:
print ("Error code 503 : Tokopedia sedang maintenance.")
elif response.status_code == 502:
print ("Error code 502 : Bad Gateway.")
elif response.status_code ==504:
print ("Error code 504 : Gateway Timeout.")
def _click(self, loc):
timestart = time.clock()
loc.click()
timeend = time.clock()
loadtime = timeend-timestart
timestamp_print_verify_url(self.driver.current_url, loadtime, self.log_result)
#print (self.driver.current_url + " is accessed in " + str(loadtime) + " second")
def check_visible_element(self, by, element):
retry = 0
while(retry<3):
try:
WebDriverWait(self.driver,30).until(EC.visibility_of_element_located((by,element)))
break
except NoSuchElementException:
retry += 1
print ("Element not found.. retry attempt %s" %(retry))
except TimeoutException:
retry +=1
print ("Load Element is taking too long.. retry attempt %s" %(retry))
def check_clickable_element(self, by, element):
retry = 0
while(retry<3):
try:
WebDriverWait(self.driver,30).until(EC.element_to_be_clickable((by,element)))
break
except NoSuchElementException:
retry += 1
print ("Element not found.. retry attempt %s" %(retry))
except TimeoutException:
retry +=1
print ("Load Element is taking too long.. retry attempt %s" %(retry))
def click_on_javascript(self, target_element):
self.mouse = webdriver.ActionChains(self.driver)
return self.mouse.move_to_element(target_element).click().perform()
def mouse_hover_to(self, by, element):
target_element = self.find_element(by, element)
self.check_visible_element(by, element)
hover_to_target = ActionChains(self.driver).move_to_element(target_element)
hover_to_target.perform()
return target_element
#Find single element
def find_element(self, *loc):
return self.driver.find_element(*loc)
#Find multiple element
def find_elements(self, *loc):
try:
return self.driver.find_elements(*loc)
except:
print(" ")
def on_page(self):
return self.driver.current_url == (self.url)
class switch(object):
value = None
def __new__(class_, value):
class_.value = value
return True
#Return value based on the value of an input from class object
def case(*args):
return any((arg == switch.value for arg in args))
|
#Decision Tree Regression
#Regression Template
#Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing the DataSet
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
"""
#Spliting the Datase into The Training set and Test set:
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2, random_state = 0)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train) #primeiro transformar depois fazer o fit
X_test = sc_X.transform(X_test)#aqui nao precisa fazer o fit somente transformar
"""
# Fitting Regression Model to the Training set
# Create yor regressor here
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X,y)
# Predicting a new result with Decision Tree Regression
for_pred = np.array(6.5).reshape(1,-1)
y_pred = regressor.predict(for_pred)
# Visualising the Regression Results
plt.scatter(X, y, color = 'red')
plt.plot(X, regressor.predict(X), color = 'blue')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show()
# Visualising the Regression Results(for higher resolution and smooter curve)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show() |
"""
Run the nested cross-validation for the NMTF class, on the Sanger dataset.
Since we want to find co-clusters of significantly higher/lower drug sensitivity
values, we should use the unstandardised Sanger dataset.
"""
import sys
sys.path.append("/home/tab43/Documents/Projects/libraries/")#("/home/thomas/Documenten/PhD/")#
import numpy, random
from BNMTF.code.nmf_np import NMF
from BNMTF.cross_validation.matrix_cross_validation import MatrixCrossValidation
from BNMTF.drug_sensitivity.load_data import load_Sanger
# Settings
standardised = False
train_config = {
'iterations' : 3000,
'init_UV' : 'exponential',
'expo_prior' : 0.1
}
K_range = [7,9,11,13]#range(2,20+1,2)
no_folds = 5
output_file = "./results.txt"
files_nested_performances = ["./fold_%s.txt" % fold for fold in range(1,no_folds+1)]
# Construct the parameter search
parameter_search = [{'K':K} for K in K_range]
# Load in the Sanger dataset
(_,X_min,M,_,_,_,_) = load_Sanger(standardised=standardised)
# Run the cross-validation framework
random.seed(42)
numpy.random.seed(9000)
nested_crossval = MatrixCrossValidation(
method=NMF,
X=X_min,
M=M,
K=no_folds,
parameter_search=parameter_search,
train_config=train_config,
file_performance=output_file
)
nested_crossval.run()
"""
Average performances: {'R^2': 0.7945475836591555, 'MSE': 2.402773897685726, 'Rp': 0.89201588072427906}.
All performances: {'R^2': [0.7971434454089756, 0.7932911883973699, 0.7937366204674767, 0.7912048835312413, 0.7973617804907137], 'MSE': [2.3573186661044465, 2.405759948350799, 2.4571177036234162, 2.4296979090873569, 2.3639752612626115], 'Rp': [0.89358850102890097, 0.89151992775702293, 0.89176931419249772, 0.88990343193851773, 0.89329822870445619]}.
""" |
# creating a bot
import discord
from discord.ext import commands
from discord import colour
import youtube_dl
import os
# creating a command
client = commands.Bot(command_prefix='-')
@client.command(name='version')
async def version(context):
myEmbed = discord.Embed(
title="Current version", description="The bot is version 1.0", color=0xfb00)
myEmbed.add_field(name="Version code: ", value="v1.0.0", inline=False)
myEmbed.add_field(name="Date Released ",
value="July 18th, 2021", inline=False)
myEmbed.set_footer(text="Developed by Spunkey")
myEmbed.set_author(name="Bash")
await context.message.channel.send(embed=myEmbed)
@client.event
async def on_ready():
general_channel = client.get_channel(849549651400065074)
await general_channel.send('Hello, world! This is bash!')
#passing arguments and returning a string
@client.command()
async def test(ctx, *args):
await ctx.send('{} arguments: {}'.format(len(args), ', '.join(args)))
@client.event
# on_message func takes a parameter message and this function runs when somebody sends a message
async def on_message(message):
general_channel = client.get_channel(849549651400065074)
if message.content == 'what is the version':
myEmbed = discord.Embed(
title="Current version", description="The bot is version 1.0", color=0xff00)
myEmbed.add_field(name="Version code: ", value="v1.0.0", inline=False)
myEmbed.add_field(name="Date Released ",
value="July 18th, 2021", inline=False)
myEmbed.set_footer(text="Developed by Spunkey")
myEmbed.set_author(name="Bash")
await general_channel.send(embed=myEmbed)
# we have to specify this bc we are using this content for commands too
await client.process_commands(message)
if message.content == 'who':
general_channel = client.get_channel(849549651400065074)
await general_channel.send('I am bash')
if message.content == 'what do you do':
general_channel = client.get_channel(849549651400065074)
await general_channel.send('i\'m a bot')
# Music
@client.command()
async def play(ctx, url : str):
song_there = os.path.isfile("song.mp3")
try:
if song_there:
os.remove("song.mp3")
except PermissionError:
await ctx.send("Wait for the current playing music to end or use the 'stop' command")
return
voiceChannel = discord.utils.get(ctx.guild.voice_channels, name='General')
await voiceChannel.connect()
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
for file in os.listdir("./"):
if file.endswith(".mp3"):
os.rename(file, "song.mp3")
voice.play(discord.FFmpegPCMAudio("song.mp3"))
@client.command()
async def leave(ctx):
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
if voice.is_connected():
await voice.disconnect()
else:
await ctx.send("The bot is not connected to a voice channel.")
@client.command()
async def pause(ctx):
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
if voice.is_playing():
voice.pause()
else:
await ctx.send("Currently no audio is playing.")
@client.command()
async def resume(ctx):
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
if voice.is_paused():
voice.resume()
else:
await ctx.send("The audio is not paused.")
@client.command()
async def stop(ctx):
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
voice.stop()
client.run('ODQ5NTQyNDIwMzc1MTQyNDEw.YLcsCA.HQ-BydQTK8YmptUaSlJmMEGdvmQ')
|
import unittest
from katas.kyu_8.find_the_slope import find_slope
class FindSlopeTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(find_slope([19, 3, 20, 3]), '0')
def test_equals_2(self):
self.assertEqual(find_slope([-7, 2, -7, 4]), 'undefined')
def test_equals_3(self):
self.assertEqual(find_slope([10, 50, 30, 150]), '5')
def test_equals_4(self):
self.assertEqual(find_slope([10, 20, 20, 80]), '6')
def test_equals_5(self):
self.assertEqual(find_slope([-10, 6, -10, 3]), 'undefined')
|
import unittest, logging, copy
from pyfiles import characterController, playerController
from pyfiles.model import characterClass
from pony.orm import db_session
TEST_SESSIONID = 12345678
TEST_USERNAME = 'foo'
TEST_CHARNAME = 'Woody'
TEST_CHARCLASS = characterClass.CharacterClass.Fighter
DEFAULT_SCORES = {'Strength': 0, 'Agility': 0, 'Arcana': 0, 'Stealth': 0}
DEFAULT_ATTRIBUTE_SCORES = {
'scores' : DEFAULT_SCORES
}
DEFAULT_CHARACTER_DETAILS = {
'character' : {
'charname': TEST_CHARNAME,
'charclass': 'Fighter',
'position': {
'pos_x': 11,
'pos_y': 11
},
'health': 100,
'attributes': {
'min_value': 0,
'max_value': 100,
'free_points': 5,
'scores': DEFAULT_SCORES
}
}
}
TEST_SCORES = {'Strength': 2, 'Agility': 2, 'Arcana': 2, 'Stealth': 2}
# Slightly adjusted defaults
TEST_CHARACTER_DETAILS = {
'character' : {
'charname': TEST_CHARNAME,
'charclass': TEST_CHARCLASS.value,
'position': {
'pos_x': 0,
'pos_y': 0
},
'health': 100,
'attributes': {
'min_value': 0,
'max_value': 100,
'free_points': 5,
'scores': TEST_SCORES
}
}
}
TEST_SESSION_INFO_JSON = { 'sessionId' : TEST_SESSIONID,
'username' : TEST_USERNAME}
TEST_CHARUPDATE_DATA = {
'character' : TEST_CHARACTER_DETAILS['character'],
'sessionJson' : TEST_SESSION_INFO_JSON
}
TEST_PLAYER = None
class TestCharacter(unittest.TestCase):
exception_response = None
@classmethod
def setUp(self) -> None:
TEST_PLAYER = playerController.new_player(TEST_USERNAME, 'test')
logging.info('Test player created: ' + str(TEST_PLAYER))
@classmethod
def tearDown(self) -> None:
playerController.remove_player(TEST_USERNAME)
def test_get_json(self):
self.maxDiff = None
test_character = characterController.new_character('Woody', TEST_USERNAME)
self.assertNotEqual(test_character, None, 'Check the character was created.')
character_json = test_character.get_json()
self.assertEqual(character_json, DEFAULT_CHARACTER_DETAILS, 'Ensure the character JSON representation is as expected')
# when I have created a new character
# their attribute scores should be set to the defaults
def test_get_json_attribute_scores(self):
json_scores = None
with db_session:
test_character = characterController.new_character('Woody', TEST_USERNAME)
these_attributes = test_character.get_attributes()
json_scores = these_attributes.get_json_attribute_scores()
self.assertEqual(json_scores, DEFAULT_ATTRIBUTE_SCORES)
def test_update_from_json(self):
self.maxDiff = None
# character creation creates a db_session, we must have a top-level session to avoid losing it
with db_session:
test_character = characterController.new_character('Woody', TEST_USERNAME)
self.assertNotEqual(test_character, None, 'Check the character was created.')
character_json = test_character.get_json()
self.assertEqual(character_json, DEFAULT_CHARACTER_DETAILS, 'Ensure the character JSON representation is as expected')
updated_details = copy.deepcopy(TEST_CHARACTER_DETAILS)
updated_details['character']['charclass'] = 'Spellcaster'
print('original chardetails ' + str(TEST_CHARACTER_DETAILS))
self.assertTrue(test_character.update_from_json(updated_details), 'Try to perform character update')
character_json = test_character.get_json()
print('Expected: ' + str(updated_details))
print('Actual' + str(character_json))
self.assertEqual(character_json, updated_details, 'Ensure the character JSON representation is as expected')
|
import cv2
image = cv2.imread("./boxing-fisheye/00000.jpg",1)
imagegt = cv2.imread("./boxing-fisheye/00000.png",1)
imageinfo = image.shape
imagegtinfo = imagegt.shape
print("imageinfo:",imageinfo)
print("imageftinfo:",imagegtinfo)
height = imageinfo[0]
width = imageinfo[1]
channel = imageinfo[2]
dstheight = int(height*0.5)
dstwidth = int(width*0.5)
dst = cv2.resize(image,(dstwidth,dstheight))
cv2.imshow("image", image)
cv2.imshow("GroundTruth", imagegt)
cv2.imshow("resize",dst)
cv2.waitKey(0)
|
'''
Candidate ordering by TSP Optimization
Code adopted from:
https://mlrose.readthedocs.io/en/stable/source/tutorial2.html
'''
import os
import numpy as np
import mlrose
from .df_utils import load, write
def mat2tuples(mat):
# assumes mat as dense matrix
# extracts lower-triangular elements
L = []
nrows, ncols = mat.shape
for i in range(nrows):
for j in range(i):
v = mat[i,j]
if v:
L.append((i,j,v))
return L
def rank(fnames, save=True):
dist_mat = np.load(fnames['dmat'])
dist_list = mat2tuples(dist_mat)
# define fitness function object
fitness_dists = mlrose.TravellingSales(distances=dist_list)
# define optimization problem object
n = dist_mat.shape[0]
problem_fit = mlrose.TSPOpt(length=n, fitness_fn=fitness_dists,
maximize=False)
# solve problem using the genetic algorithm
best_state, best_fitness = mlrose.genetic_alg(problem_fit,
mutation_prob=0.2,
max_attempts=100,
random_state=2)
# retrieve ranked list
cand = load(fnames['cand'])
ranked_cand = cand.loc[best_state]
# save the output
fname_ranked = None
if save:
fname, ext = os.path.splitext(fnames['cand'])
fname_ranked = fname + '_ranked' + ext
write(fname_ranked, ranked_cand)
print('Ordered candidates saved to {}'.format(fname_ranked))
return fname_ranked
|
# Day 8: Handhelp Halting
# <ryc> 2021
def inputdata():
stream = open('day_08_2020.input')
program = [ line for line in stream ]
stream.close()
return program
def processing(program):
accumulator = 0
pointer = 0
exit = False
while pointer < len(program) and not exit:
instruction = program[pointer][0:3]
offset = int(program[pointer][4:-1])
#print(pointer, program[pointer][:-1],instruction,offset,accumulator)
if instruction == 'nop':
program[pointer] = 'rep +0 '
pointer += 1
elif instruction == 'acc':
accumulator += offset
program[pointer] = 'rep +0 '
pointer += 1
elif instruction == 'jmp':
program[pointer] = 'rep +0 '
pointer += offset
elif instruction == 'rep':
exit = True
return accumulator,pointer
def reparing(program):
change = { 'nop':'jmp' , 'jmp':'nop' }
for focus in range(len(program)):
instruction = program[focus][0:3]
offset = program[focus][4:]
if instruction in change:
program_copy = program.copy()
program_copy[focus] = change[instruction] + offset
accumulator,pointer = processing(program_copy)
if pointer == len(program):
return accumulator
if __name__ == '__main__':
print('\n8: Handhelp Halting')
program = inputdata()
accumulator,pointer = processing(program)
print('\nAccumulator =',accumulator)
program = inputdata()
print('\nAccumulator reparing =',reparing(program))
|
# -*- coding: utf-8 -*-
import dgl
import time
import torch as th
import numpy as np
import matplotlib.pyplot as plt
from src.utils import *
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.metrics import average_precision_score, precision_recall_curve
from sklearn.model_selection import train_test_split, StratifiedKFold
from src.model import GRDTI
def loda_data():
network_path = '../data/'
drug_drug = np.loadtxt(network_path + 'mat_drug_drug.txt')
true_drug = 708
drug_chemical = np.loadtxt(network_path + 'Similarity_Matrix_Drugs.txt')
drug_chemical = drug_chemical[:true_drug, :true_drug]
drug_disease = np.loadtxt(network_path + 'mat_drug_disease.txt')
drug_sideeffect = np.loadtxt(network_path + 'mat_drug_se.txt')
protein_protein = np.loadtxt(network_path + 'mat_protein_protein.txt')
protein_sequence = np.loadtxt(network_path + 'Similarity_Matrix_Proteins.txt')
protein_disease = np.loadtxt(network_path + 'mat_protein_disease.txt')
num_drug = len(drug_drug)
num_protein = len(protein_protein)
# Removed the self-loop
drug_chemical = drug_chemical - np.identity(num_drug)
protein_sequence = protein_sequence / 100.
protein_sequence = protein_sequence - np.identity(num_protein)
drug_protein = np.loadtxt(network_path + 'mat_drug_protein.txt')
# Removed DTIs with similar drugs or proteins
#drug_protein = np.loadtxt(network_path + 'mat_drug_protein_homo_protein_drug.txt')
print("Load data finished.")
return drug_drug, drug_chemical, drug_disease, drug_sideeffect, protein_protein, protein_sequence, \
protein_disease, drug_protein
def ConstructGraph(drug_drug, drug_chemical, drug_disease, drug_sideeffect, protein_protein, protein_sequence,
protein_disease, drug_protein):
num_drug = len(drug_drug)
num_protein = len(protein_protein)
num_disease = len(drug_disease.T)
num_sideeffect = len(drug_sideeffect.T)
list_drug = []
for i in range(num_drug):
list_drug.append((i, i))
list_protein = []
for i in range(num_protein):
list_protein.append((i, i))
list_disease = []
for i in range(num_disease):
list_disease.append((i, i))
list_sideeffect = []
for i in range(num_sideeffect):
list_sideeffect.append((i, i))
list_DDI = []
for row in range(num_drug):
for col in range(num_drug):
if drug_drug[row, col] > 0:
list_DDI.append((row, col))
list_PPI = []
for row in range(num_protein):
for col in range(num_protein):
if protein_protein[row, col] > 0:
list_PPI.append((row, col))
list_drug_protein = []
list_protein_drug = []
for row in range(num_drug):
for col in range(num_protein):
if drug_protein[row, col] > 0:
list_drug_protein.append((row, col))
list_protein_drug.append((col, row))
list_drug_sideeffect = []
list_sideeffect_drug = []
for row in range(num_drug):
for col in range(num_sideeffect):
if drug_sideeffect[row, col] > 0:
list_drug_sideeffect.append((row, col))
list_sideeffect_drug.append((col, row))
list_drug_disease = []
list_disease_drug = []
for row in range(num_drug):
for col in range(num_disease):
if drug_disease[row, col] > 0:
list_drug_disease.append((row, col))
list_disease_drug.append((col, row))
list_protein_disease = []
list_disease_protein = []
for row in range(num_protein):
for col in range(num_disease):
if protein_disease[row, col] > 0:
list_protein_disease.append((row, col))
list_disease_protein.append((col, row))
g_HIN = dgl.heterograph({('disease', 'disease_disease virtual', 'disease'): list_disease,
('drug', 'drug_drug virtual', 'drug'): list_drug,
('protein', 'protein_protein virtual', 'protein'): list_protein,
('sideeffect', 'sideeffect_sideeffect virtual', 'sideeffect'): list_sideeffect,
('drug', 'drug_drug interaction', 'drug'): list_DDI, \
('protein', 'protein_protein interaction', 'protein'): list_PPI, \
('drug', 'drug_protein interaction', 'protein'): list_drug_protein, \
('protein', 'protein_drug interaction', 'drug'): list_protein_drug, \
('drug', 'drug_sideeffect association', 'sideeffect'): list_drug_sideeffect, \
('sideeffect', 'sideeffect_drug association', 'drug'): list_sideeffect_drug, \
('drug', 'drug_disease association', 'disease'): list_drug_disease, \
('disease', 'disease_drug association', 'drug'): list_disease_drug, \
('protein', 'protein_disease association', 'disease'): list_protein_disease, \
('disease', 'disease_protein association', 'protein'): list_disease_protein})
g = g_HIN.edge_type_subgraph(['drug_drug interaction', 'protein_protein interaction',
'drug_protein interaction', 'protein_drug interaction',
'drug_sideeffect association', 'sideeffect_drug association',
'drug_disease association', 'disease_drug association',
'protein_disease association', 'disease_protein association'
])
return g
def TrainAndEvaluate(DTItrain, DTIvalid, DTItest, args, drug_drug, drug_chemical, drug_disease,
drug_sideeffect, protein_protein, protein_sequence, protein_disease):
device = th.device(args.device)
# Numbers of different nodes
num_disease = len(drug_disease.T)
num_drug = len(drug_drug)
num_protein = len(protein_protein)
num_sideeffect = len(drug_sideeffect.T)
drug_protein = th.zeros((num_drug, num_protein))
mask = th.zeros((num_drug, num_protein)).to(device)
for ele in DTItrain:
drug_protein[ele[0], ele[1]] = ele[2]
mask[ele[0], ele[1]] = 1
best_valid_aupr = 0.
# best_valid_auc = 0
test_aupr = 0.
test_auc = 0.
patience = 0.
pos = np.count_nonzero(DTItest[:, 2])
neg = np.size(DTItest[:, 2]) - pos
xy_roc_sampling = []
xy_pr_sampling = []
g = ConstructGraph(drug_drug, drug_chemical, drug_disease, drug_sideeffect, protein_protein, protein_sequence,
protein_disease, drug_protein)
drug_drug = th.tensor(drug_drug).to(device)
drug_chemical = th.tensor(drug_chemical).to(device)
drug_disease = th.tensor(drug_disease).to(device)
drug_sideeffect = th.tensor(drug_sideeffect).to(device)
protein_protein = th.tensor(protein_protein).to(device)
protein_sequence = th.tensor(protein_sequence).to(device)
protein_disease = th.tensor(protein_disease).to(device)
drug_protein = drug_protein.to(device)
model = GRDTI(g, num_disease, num_drug, num_protein, num_sideeffect, args)
model.to(device)
optimizer = th.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
for i in range(args.epochs):
model.train()
tloss, dtiloss, l2loss, dp_re, DTI_p = model(drug_drug, drug_chemical, drug_disease, drug_sideeffect,
protein_protein, protein_sequence, protein_disease,
drug_protein, mask)
results = dp_re.detach().cpu()
optimizer.zero_grad()
loss = tloss
loss.backward()
th.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
model.eval()
if i % 25 == 0:
with th.no_grad():
print("step", i, ":", "Total_loss & DTIloss & L2_loss:", loss.cpu().data.numpy(), ",", dtiloss.item(),
",", l2loss.item())
pred_list = []
ground_truth = []
for ele in DTIvalid:
pred_list.append(results[ele[0], ele[1]])
ground_truth.append(ele[2])
valid_auc = roc_auc_score(ground_truth, pred_list)
valid_aupr = average_precision_score(ground_truth, pred_list)
if valid_aupr >= best_valid_aupr:
best_valid_aupr = valid_aupr
# best_valid_auc = valid_auc
best_DTI_potential = DTI_p
patience = 0
# Calculating AUC & AUPR (pos:neg=1:10)
db = []
xy_roc = []
xy_pr = []
for ele in DTItest:
db.append([results[ele[0], ele[1]], ele[2]])
db = sorted(db, key=lambda x: x[0], reverse=True)
tp, fp = 0., 0.
for i_db in range(len(db)):
if db[i_db][0]:
if db[i_db][1]:
tp = tp + 1
else:
fp = fp + 1
xy_roc.append([fp / neg, tp / pos])
xy_pr.append([tp / pos, tp / (tp + fp)])
test_auc = 0.
prev_x = 0.
for x, y in xy_roc:
if x != prev_x:
test_auc += (x - prev_x) * y
prev_x = x
test_aupr = 0.
prev_x = 0.
for x, y in xy_pr:
if x != prev_x:
test_aupr += (x - prev_x) * y
prev_x = x
# All unknown DTI pairs all treated as negative examples
'''pred_list = []
ground_truth = []
for ele in DTItest:
pred_list.append(results[ele[0], ele[1]])
ground_truth.append(ele[2])
test_auc = roc_auc_score(ground_truth, pred_list)
test_aupr = average_precision_score(ground_truth, pred_list)'''
else:
patience += 1
if patience > args.patience:
print("Early Stopping")
# sampling (pos:neg=1:10) for averaging and plotting
xy_roc_sampling = []
xy_pr_sampling = []
for i_xy in range(len(xy_roc)):
if i_xy % 10 == 0:
xy_roc_sampling.append(xy_roc[i_xy])
xy_pr_sampling.append(xy_pr[i_xy])
# Record data for sampling, averaging and plotting.
# All unknown DTI pairs all treated as negative examples
'''t1 = time.localtime()
time_creat_txt = str(t1.tm_year) + '_' + str(t1.tm_mon) + '_' + str(t1.tm_mday) + '_' + str(
t1.tm_hour) + '_' + str(t1.tm_min)
fpr, tpr, threshold = roc_curve(ground_truth, pred_list)
print("len(fpr):", len(fpr))
np.savetxt('fpr_' + time_creat_txt + '.csv', fpr)
np.savetxt('tpr_' + time_creat_txt + '.csv', tpr)
np.savetxt('ROC_threshold_' + time_creat_txt + '.csv', threshold)
precision, recall, threshold = precision_recall_curve(ground_truth, pred_list)
print("len(recall):", len(recall))
np.savetxt('precision_' + time_creat_txt + '.csv', precision)
np.savetxt('recall_' + time_creat_txt + '.csv', recall)
np.savetxt('PRC_threshold_' + time_creat_txt + '.csv', threshold)'''
break
print('Valid auc & aupr:', valid_auc, valid_aupr, "; ", 'Test auc & aupr:', test_auc, test_aupr)
return test_auc, test_aupr, xy_roc_sampling, xy_pr_sampling, best_DTI_potential
def main(args):
drug_d, drug_ch, drug_di, drug_side, protein_p, protein_seq, protein_di, dti_original = loda_data()
# sampling
whole_positive_index = []
whole_negative_index = []
for i in range(np.shape(dti_original)[0]):
for j in range(np.shape(dti_original)[1]):
if int(dti_original[i][j]) == 1:
whole_positive_index.append([i, j])
elif int(dti_original[i][j]) == 0:
whole_negative_index.append([i, j])
# pos:neg=1:10
negative_sample_index = np.random.choice(np.arange(len(whole_negative_index)),
size=10 * len(whole_positive_index), replace=False)
# All unknown DTI pairs all treated as negative examples
'''negative_sample_index = np.random.choice(np.arange(len(whole_negative_index)),
size=len(whole_negative_index), replace=False)'''
data_set = np.zeros((len(negative_sample_index) + len(whole_positive_index), 3), dtype=int)
count = 0
for i in whole_positive_index:
data_set[count][0] = i[0]
data_set[count][1] = i[1]
data_set[count][2] = 1
count += 1
for i in negative_sample_index:
data_set[count][0] = whole_negative_index[i][0]
data_set[count][1] = whole_negative_index[i][1]
data_set[count][2] = 0
count += 1
test_auc_round = []
test_aupr_round = []
tpr_mean = []
fpr = []
precision_mean = []
recall = []
rounds = args.rounds
for r in range(rounds):
print("----------------------------------------")
test_auc_fold = []
test_aupr_fold = []
kf = StratifiedKFold(n_splits=10, random_state=None, shuffle=True)
k_fold = 0
for train_index, test_index in kf.split(data_set[:, :2], data_set[:, 2]):
train = data_set[train_index]
DTItest = data_set[test_index]
DTItrain, DTIvalid = train_test_split(train, test_size=0.05, random_state=None)
k_fold += 1
print("--------------------------------------------------------------")
print("round ", r + 1, " of ", rounds, ":", "KFold ", k_fold, " of 10")
print("--------------------------------------------------------------")
time_roundStart = time.time()
t_auc, t_aupr, xy_roc, xy_pr, DTI_potential = TrainAndEvaluate(DTItrain, DTIvalid, DTItest, args, drug_d,
drug_ch, drug_di, drug_side, protein_p,
protein_seq, protein_di)
time_roundEnd = time.time()
print("Time spent in this fold:", time_roundEnd - time_roundStart)
test_auc_fold.append(t_auc)
test_aupr_fold.append(t_aupr)
order_txt1 = 'DTI_potential_' + 'r' + str(r + 1) + '_f' + str(k_fold) + '.csv'
np.savetxt(order_txt1, DTI_potential.detach().cpu().numpy(), fmt='%-.4f', delimiter=',')
top_values, top_indices = th.topk(DTI_potential, 40)
order_txt2 = 'top40_' + 'r' + str(r + 1) + '_f' + str(k_fold) + '.csv'
np.savetxt(order_txt2, top_indices.detach().cpu().numpy(), fmt='%d', delimiter=',')
# pos:neg=1:10
if not fpr:
fpr = [_v[0] for _v in xy_roc]
if not recall:
recall = [_v[0] for _v in xy_pr]
temp = [_v[1] for _v in xy_roc]
tpr_mean.append(temp)
temp = [_v[1] for _v in xy_pr]
precision_mean.append(temp)
print("Training and evaluation is OK.")
test_auc_round.append(np.mean(test_auc_fold))
test_aupr_round.append(np.mean(test_aupr_fold))
t1 = time.localtime()
time_creat_txt = str(t1.tm_year) + '_' + str(t1.tm_mon) + '_' + str(t1.tm_mday) + '_' + str(t1.tm_hour) + '_' + str(
t1.tm_min)
np.savetxt('test_auc_' + time_creat_txt, test_auc_round)
np.savetxt('test_aupr_' + time_creat_txt, test_aupr_round)
# pos:neg=1:10
tpr = (np.mean(np.array(tpr_mean), axis=0)).tolist()
precision = (np.mean(np.array(precision_mean), axis=0)).tolist()
np.savetxt('fpr.csv', fpr, fmt='%-.4f', delimiter=',')
np.savetxt('tpr.csv', tpr, fmt='%-.4f', delimiter=',')
np.savetxt('recall.csv', recall, fmt='%-.4f', delimiter=',')
np.savetxt('precision.csv', precision, fmt='%-.4f', delimiter=',')
if __name__ == "__main__":
args = parse_args()
print(args)
start = time.time()
main(args)
end = time.time()
print("Total time:", end - start)
|
import sys
from src.crawler import Crawler
if __name__== "__main__":
crawler = Crawler([url for url in sys.stdin])
crawler.crawl()
|
import pandas as pd
import plotly_express as px
line1 = pd.read_csv("data.csv")
graph = px.scatter(line1, x="Population", y="InternetUsers", color ="Country", size="Percentage", title="Population VS Internet Users")
print("Adios!")
graph.show() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 27 21:15:04 2018
@author: Iswariya Manivannan
"""
import sys
import os
from collections import deque
from helper import maze_map_to_tree, write_to_file, assign_character_for_nodes
from helper import start_pose, print_maze, clear_screen
import time
def breadth_first_search(maze_map):
"""Function to implement the BFS algorithm.
Please use the functions in helper.py to complete the algorithm.
Please do not clutter the code this file by adding extra functions.
Additional functions if required should be added in helper.py
Parameters
----------
maze_map : [type]
[description]
start_pos : [type]
[description]
goal_pos : [type]
[description]
Returns
-------
[type]
[description]
"""
start = start_pose(maze_map)
iterable = maze_map_to_tree(maze_map)
# queue = deque([(iterable, start)])
# Fill in your BFS algorithm here
fringe = [start]
visited = []
fringe.extend(iterable[start])
while fringe:
# print(fringe)
parent_node = fringe.pop(0)
# print(fringe)
for child_node in iterable[parent_node]:
if child_node not in visited and maze_map[parent_node[0]][parent_node[1]] != '=' and maze_map[parent_node[0]][parent_node[1]]!= '|':
new_map = assign_character_for_nodes(maze_map, child_node, parent_node)
print_maze(new_map)
fringe.append(child_node)
visited.append(child_node)
return new_map
if __name__ == '__main__':
working_directory = os.getcwd()
if len(sys.argv) > 1:
map_directory = sys.argv[1]
else:
map_directory = 'maps'
file_path_map1 = os.path.join(working_directory, map_directory + '/map1.txt')
file_path_map2 = os.path.join(working_directory, map_directory + '/map2.txt')
file_path_map3 = os.path.join(working_directory, map_directory + '/map3.txt')
maze_map_map1 = []
with open(file_path_map1) as f1:
maze_map_map1 = f1.readlines()
maze_map_map2 = []
with open(file_path_map2) as f2:
maze_map_map2 = f2.readlines()
maze_map_map3 = []
with open(file_path_map3) as f3:
maze_map_map3 = f3.readlines()
breadth_first_search(maze_map_map2)
# CALL THIS FUNCTIONS after filling in the necessary implementations
# path_map1 = breadth_first_search(maze_map_map1)
# write_to_file("bdf_map1", path_map1)
# path_map2 = breadth_first_search(maze_map_map2)
# write_to_file("bdf_map2", path_map2)
# path_map3 = breadth_first_search(maze_map_map3)
# write_to_file("bdf_map3", path_map3)
|
#!/usr/bin/env python3
"""
usage : unzip unFichierZippé.zip
Un exemple simple de scripting en ligne de commande.
Prend en argument le nom d'une archive zippée (fichier.zip).
Extrait les fichiers et les sauvegarde dans des sous-répertoires
par nom d'extension.
"""
# import des modules et fonctions python dont nous avons besoin
from sys import argv as arguments
from os.path import exists, isdir
from os import mkdir
from shutil import rmtree
import zipfile
""" s'il n'y a pas exactement deux elements qui compose la ligne
de commande, on est certain que la commande est incorrecte """
if len(arguments) != 2:
print("""attention : mauvaise utilisation de la commande.\n
unzipbytype requiert exactement un argument:\n \
unzipbytype fichier.zip """)
exit(None)
ZIPFILENAME = arguments[1]
# plus besoin d'afficher le nom du fichier
# print('le fichier à manipuler est : ', ZIPFILENAME)
if not ZIPFILENAME.endswith('.zip'):
### ce n'est pas un fichier zip : test insufisant
### mais pour un début : on se contente de ce test
print("le fichier : ", ZIPFILENAME, "n'est pas un fichier zip")
exit(1)
elif not exists(ZIPFILENAME):
# le fichier à manipuler n'existe pas
print(ZIPFILENAME, "n'est pas un fichier existant")
exit(2)
elif not zipfile.is_zipfile(ZIPFILENAME):
""" le programme de manipulation des fichiers zip
a échouer à ouvrir le fichier en argument """
print("l'archive : ", ZIPFILENAME, "est non reconnue comme intégre")
exit(3)
else:
# on peut traiter le fichier et extraire les fichiers
DIRECTORY_NAME = ZIPFILENAME[:-4]
# with zipfile.ZipFile(ZIPFILENAME, 'r') as ZIP_FILE_OBJECT:
# supprimer le répertoire destination s'il existe
# avant de pouvoir le créer
# plus besoin d'afficher ceci
#print("le repertoire principal : ", DIRECTORY_NAME, " a été créé")
# LIST_OF_FILES = ZIP_FILE_OBJECT.filelist
# DICO_EXT ={}
# for FICHIER in LIST_OF_FILES:
# le nom est le dernier élément du chemin absolu
# print(FICHIER.filename)
# FICHIER_NAME = FICHIER.filename.split('/')[-1]
# plus besoin d'afficher cela
# print(FICHIER.filename)
# print(FICHIER.filename.split('/'))
# if FICHIER_NAME == '':
# fichier courant : répertoire, passer au suivant sans rien faire
# else:
# on a faire à un fichier régulier
# l'extension est ce qui suit le dernier .
# si le répertoire destination existe déja, alors : pas besoin de le créer
# le fichier à créer est un fichier binaire
# récuperer le contenu du fichier courant depuis l'archive
# ne pas oublier la fermeture du fichier
# ouverture du fichier de log
# comptage des nombres de fichiers par extension
# for extension in DICO_EXT:
# affichage à l'écran
# écriture du fichier de log
# fermeture du fichier de log
# code de retour de fonction: tout s'est bien passé
|
from os import urandom
import hashlib
import hmac
from epqcrypto.persistence import save_data, load_data
def hash_password(password, iterations, algorithm="pbkdf2hmac", sub_algorithm="sha512",
salt=None, salt_size=16, output_size=32):
salt = urandom(salt_size)
header = save_data(algorithm, sub_algorithm, iterations, salt_size, output_size, salt)
if algorithm == "pbkdf2hmac":
return save_data(header, hashlib.pbkdf2_hmac(sub_algorithm, header + password, salt, iterations, output_size))
else:
raise ValueError("Unsupported algorithm: '{}'".format(algorithm))
def verify_hashed_password(password, header_salt_hash):
header, correct_output = load_data(header_salt_hash)
algorithm, sub_algorithm, iterations, salt_size, output_size, salt = load_data(header)
if algorithm == "pbkdf2hmac":
output = hashlib.pbkdf2_hmac(sub_algorithm, header + password, salt, iterations, output_size)
else:
raise ValueError("Unsupported algorithm: '{}'".format(algorithm))
return constant_time_comparison(output, correct_output)
def constant_time_comparison(data1, data2):
return hmac.compare_digest(data1, data2)
def key_derivation_function(derivation_material, salt=None, output_size=32,
algorithm="pbkdf2_hmac", sub_algorithm="sha512",
work_factor=100000):
salt = salt if salt is not None else urandom(32)
header = save_data(algorithm, sub_algorithm, work_factor, len(salt), output_size)
return hashlib.pbkdf2_hmac(sub_algorithm, header + derivation_material, salt, work_factor, output_size)
def test_hash_password():
password = "password"
iterations = 100000
_hash = hash_password(password, iterations)
assert verify_hashed_password(password, _hash)
invalid_password = "passwore"
assert not verify_hashed_password(invalid_password, _hash)
key = key_derivation_function(password)
print "Passed hash_password/verify_password unit test"
if __name__ == "__main__":
test_hash_password()
|
# Generated by Django 3.0.1 on 2019-12-21 09:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cat', '0006_auto_20191221_1647'),
]
operations = [
migrations.AlterField(
model_name='catphoto',
name='photo',
field=models.FileField(null=True, upload_to='', verbose_name='画像'),
),
]
|
command script import {YOUR_PATH}/ignore_exception.py
|
import os
import psycopg2
from dotenv import load_dotenv
load_dotenv()
DATABASE_URL = os.getenv('DATABASE_URL')
def get_db_connection():
if os.getenv('ENVIRONMENT') == 'LOCAL':
conn = psycopg2.connect(os.getenv('POSTGRES_CONN_DETAIL'))
else:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = conn.cursor()
return conn, cursor
def save_search(query):
conn, cursor = get_db_connection()
cursor.execute("""INSERT INTO search_query (q_str, q_tsv) VALUES ('{}', to_tsvector('{}'))""".format(query, query))
conn.commit()
print('query saved successfully!')
conn.close()
def get_recent(query):
conn, cursor = get_db_connection()
cursor.execute("""SELECT q_str FROM search_query WHERE q_tsv @@ (plainto_tsquery('{}')) = true""".format(query))
rows = cursor.fetchall()
print('got recent queries successfully!')
conn.close()
if not rows:
return 'No recent searches found for it.'
return '\n'.join([r[0] for r in rows])
|
import unittest
from collections import defaultdict
import torch
import torchvision.transforms as transforms
from sampler import PKSampler
from torch.utils.data import DataLoader
from torchvision.datasets import FakeData
class Tester(unittest.TestCase):
def test_pksampler(self):
p, k = 16, 4
# Ensure sampler does not allow p to be greater than num_classes
dataset = FakeData(size=100, num_classes=10, image_size=(3, 1, 1))
targets = [target.item() for _, target in dataset]
self.assertRaises(AssertionError, PKSampler, targets, p, k)
# Ensure p, k constraints on batch
trans = transforms.Compose(
[
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
]
)
dataset = FakeData(size=1000, num_classes=100, image_size=(3, 1, 1), transform=trans)
targets = [target.item() for _, target in dataset]
sampler = PKSampler(targets, p, k)
loader = DataLoader(dataset, batch_size=p * k, sampler=sampler)
for _, labels in loader:
bins = defaultdict(int)
for label in labels.tolist():
bins[label] += 1
# Ensure that each batch has samples from exactly p classes
self.assertEqual(len(bins), p)
# Ensure that there are k samples from each class
for b in bins:
self.assertEqual(bins[b], k)
if __name__ == "__main__":
unittest.main()
|
def evaluationFunction(gameState):
|
from settings import settings
from office365.sharepoint.client_context import ClientContext
ctx = ClientContext(settings["url"]).with_user_credentials(settings.get('user_credentials').get('username'),
settings.get('user_credentials').get('password'))
web = ctx.web.get().execute_query()
print(web.properties["Url"])
|
#coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
import sys
import tensorflow as tf
N = 100
K = 3
D = 2
def createData():
X = np.zeros((N * K, D),dtype=float)
Y = np.zeros(N * K, dtype=float)
for k in range(K):
idx = range(N * k, N * (k + 1))
r = np.linspace(0.0, 1.0, N)
t = np.linspace(4 * k, (k + 1) * 4, N) + np.random.randn(N) * 0.2
X[idx] = np.c_[r * np.sin(t), r * np.cos(t)]
Y[idx] = float(k)
print(type(Y),type(Y[0]))
return X,Y
def train(x,y):
print(x.shape)
print(y.shape)
#labels不是one hot类型标签,用tf.one_hot转换为one_hot标签
#注意转换后是tensor类型不能作为feed_dict的feed
y_onehot = tf.one_hot(y,depth=3)
print(y_onehot)
h1_num = 100
X = tf.placeholder(tf.float32,shape=x.shape,name='x')
Y = tf.placeholder(tf.float32,shape=y_onehot.shape,name='y')
keep_prob = tf.placeholder(tf.float32)
w1 = tf.Variable(tf.truncated_normal([D,h1_num]),dtype=tf.float32)
b1 = tf.Variable(tf.zeros([1,h1_num]),dtype=tf.float32)
w2 = tf.Variable(tf.truncated_normal([h1_num,K]),dtype=tf.float32)
b2 = tf.Variable(tf.zeros([1,K]),dtype=tf.float32)
h1 = tf.nn.relu(tf.matmul(X,w1)+b1) #隐层
h_1_drop = tf.nn.dropout(h1,keep_prob) #dropout层
Y_prev = tf.nn.softmax(tf.matmul(h_1_drop,w2)+b2)
loss = -tf.reduce_sum(Y*tf.log(Y_prev)) # + 1e-6*tf.global_norm([w2,w1])
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
correct_prediction = tf.equal(tf.argmax(Y_prev,1),tf.arg_max(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#y_onehot是tensor类型不能作为feed_dict的feed
y_r = sess.run(y_onehot)
for epoch in range(1000):
sess.run(train_op,feed_dict={X:x,Y:y_r,keep_prob:0.8})
if epoch % 100 == 0:
print(epoch,sess.run(accuracy,feed_dict={X:x,Y:y_r,keep_prob:1.0}))
sys.stdout.flush()
W1,B1,W2,B2 = sess.run([w1,b1,w2,b2])
return W1,B1,W2,B2
def show(X,Y,W,b,W2,b2):
h = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = np.dot(np.maximum(0, np.dot(np.c_[xx.ravel(), yy.ravel()], W) + b), W2) + b2
Z = np.argmax(Z, axis=1)
Z = Z.reshape(xx.shape)
fig = plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=Y, s=10, cmap=plt.cm.Spectral)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.show()
X,Y = createData()
W,b,W2,b2 = train(X,Y)
show(X,Y,W,b,W2,b2)
|
import numpy as np
def coin_tosses(n=10, p=0.5):
total = sum(np.random.choice(np.arange(2), size=10, p=[p, 1 - p]))
if total == n:
return 1
else:
return 0
bag = ["F" for i in range(99)] + ["UF"]
a = 0 # number of times we see 10 heads in a row and coin was unfair
b = 0 # number of times we see 10 heads in a row
for _ in range(1000000):
pick_coin = bag[np.random.randint(low=0, high=100)]
if pick_coin == "F":
b += coin_tosses(n=10, p=0.5)
else:
a += coin_tosses(n=10, p=0.0)
print(a / (a + b))
|
data = [4, 10, 4, 1, 8, 4, 9, 14, 5, 1, 14, 15, 0, 15, 3, 5]
data1 = [0, 2, 7, 0]
def reallocate(data):
j = 0
maxBlock = max(data)
for i in range(0, len(data)):
if data[i] == maxBlock:
data[i] = 0
j = i + 1
break
while maxBlock > 0:
if j > len(data) - 1:
j = 0
data[j] += 1
maxBlock -= 1
j += 1
return data
memoryState = {}
stateCounter = 0
a = []
while stateCounter == len(memoryState) or stateCounter -1 == len(memoryState):
stateCounter += 1
data = reallocate(data)
a = [str(d) for d in data]
memoryState["".join(a)] = stateCounter
print(data)
print(stateCounter)
memoryState = {}
stateCounter = 0
while stateCounter == len(memoryState) or stateCounter -1 == len(memoryState):
stateCounter += 1
data = reallocate(data)
a = [str(d) for d in data]
memoryState["".join(a)] = stateCounter
print(data)
print(stateCounter - 1) |
from django.http import Http404
class Post() :
POSTS = [
{'id':1, 'title': 'First post', 'body':'This is my first post'},
{'id':2, 'title': 'Second post', 'body':'This is my second post'},
{'id':3, 'title': 'Third post', 'body':'This is my Third post'},
]
@classmethod
def all(cls) :
return cls.POSTS
@classmethod
def findById(cls, id) :
try :
return cls.POSTS[int(id) - 1]
except :
raise Http404('Sorry, post #{} not found'.format(id))
|
import numpy
import pandas
import random
from pymatgen.core.structure import Structure
from sklearn.svm import SVR
from sklearn.preprocessing import scale
import util.crystal_conv as cc
list_crys = list()
id_target = numpy.array(pandas.read_csv('../data/crystal/nlhm/id_target.csv'))
num_train_ins = int(id_target.shape[0] * 0.8)
for i in range(0, id_target.shape[0]):
crys = Structure.from_file('../data/crystal/nlhm/' + id_target[i, 0] + '.cif')
atoms = crys.atomic_numbers
ratio = crys.formula.split(' ')
r_avg = 0
atom_feats_mean = numpy.empty(6)
atom_feats_std = numpy.empty(6)
for j in range(0, len(ratio)):
r_avg += int(''.join([k for k in ratio[j] if not k.isalpha()]))
r_avg /= len(ratio)
for j in range(0, len(ratio)):
r_n = int(''.join([k for k in ratio[j] if not k.isalpha()]))
atom_feats = cc.mat_atom_feats[atoms[j]-1, :]
atom_feats_mean += r_n * atom_feats
atom_feats_std += numpy.square((r_n - r_avg)) * atom_feats
atom_feat = numpy.hstack([atom_feats_mean, atom_feats_std, crys.volume, id_target[i, 1], id_target[i, 2]])
list_crys.append(atom_feat)
num_feats = 14
random.shuffle(list_crys)
data = numpy.array(list_crys)
data_x = scale(data[:, :num_feats])
train_data_x = data_x[:num_train_ins, :]
train_data_y = data[:num_train_ins, num_feats]
test_data_x = data_x[num_train_ins:, :]
test_data_y = data[num_train_ins:, num_feats]
cs = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
es = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
best_mae = 1e+8
best_rmse = 1e+8
for c in cs:
for e in es:
svr = SVR(C=c, epsilon=e, gamma='auto')
svr.fit(train_data_x, train_data_y)
pred = svr.predict(test_data_x)
mae = numpy.mean(numpy.abs(test_data_y - pred))
rmse = numpy.sqrt(numpy.mean((test_data_y - pred)**2))
print(mae, rmse)
if mae < best_mae:
best_mae = mae
if rmse < best_rmse:
best_rmse = rmse
print(best_mae, best_rmse)
|
"""
Creates a NIR image from .jpg upload by filtering out the blue bands.
Provides a custom bar to help users understand healthiness of plant.
"""
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_web_app.settings')
import warnings
warnings.filterwarnings('ignore')
from django.core.files.images import ImageFile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os.path
from matplotlib import colors, ticker
from matplotlib.colors import LinearSegmentedColormap
from PIL import Image as Img
from .models import Post
class NDVI(object):
def __init__(self, file_path):
self.output_name = "NDVI"
"""suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")"""
self.image = plt.imread(file_path)
"""self.output_name = "_".join([basename, suffix])"""
self.colors = ['gray', 'gray', 'red', 'yellow', 'green']
def create_colormap(self, *args):
return LinearSegmentedColormap.from_list(name='custom1', colors=args)
def create_colorbar(self, fig, image):
position = fig.add_axes([0.125, 0.19, 0.2, 0.05])
norm = colors.Normalize(vmin=-1., vmax=1.)
cbar = plt.colorbar(image,
cax=position,
orientation='horizontal',
norm=norm)
cbar.ax.tick_params(labelsize=6)
tick_locator = ticker.MaxNLocator(nbins=3)
cbar.locator = tick_locator
cbar.update_ticks()
cbar.set_label("NDVI", fontsize=10, x=0.5, y=0.5, labelpad=-25)
def convert(self, file_path):
"""
NDVI calculation and mapped colors
"""
NIR = (self.image[:, :, 0]).astype('float')
blue = (self.image[:, :, 2]).astype('float')
green = (self.image[:, :, 1]).astype('float')
bottom = (blue - green) ** 2
bottom[bottom == 0] = 1
VIS = (blue + green) ** 2 / bottom
NDVI = (NIR - VIS) / (NIR + VIS)
fig, ax = plt.subplots()
image = ax.imshow(NDVI, cmap=self.create_colormap(*self.colors))
plt.axis('off')
self.create_colorbar(fig, image)
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
m = Post.objects.create()
m.temp = fig.savefig(self.output_name, output_type = 'file', dpi = 600, transparent=True, bbox_inches=extent, pad_inches=0)
m.save()
"""
new = fig.savefig(self.output_name, output_type = 'file', dpi = 600, transparent=True, bbox_inches=extent, pad_inches=0)
figure = io.BytesIO()
new.get_figure().savefig(figure, format='png')
image_file = ImageFile(new)
return image_file"""
"""fig = Img.fromarray(fig.gca(), 'RGB')"""
"""output_file = StringIO()
f = Img.open(fig)
f.convert('RGB')
f.save(self.output_name, upload_location = "PlantB/media/Files")
fig = io.BytesIO()
plt.savefig(fig, format="png")"""
temp = fig.savefig(self.output_name, output_type='file', dpi=600, transparent=True, bbox_inches=extent, pad_inches=0)
def main(request):
blue_ndvi = NDVI(request)
blue_ndvi.convert(request)
|
#!/usr/bin/python3
#minimalist python pe library
import sys
import argparse
import struct
from Utils import spaces
import DOSHeader
import DOSHeaderDecoder
import PEHeaderDecoder
class PEHeader:
__PEHeaderMachineTypes_dict = {\
0x0 :["IMAGE_FILE_MACHINE_UNKNOWN ","The contents of this field are assumed to be applicable to any machine type "],\
0x1d3 :["IMAGE_FILE_MACHINE_AM33 ","Matsushita AM33 "],\
0x8664:["IMAGE_FILE_MACHINE_AMD64 ","x64 "],\
0x1c0 :["IMAGE_FILE_MACHINE_ARM ","ARM little endian "],\
0xaa64:["IMAGE_FILE_MACHINE_ARM64 ","ARM64 little endian "],\
0x1c4 :["IMAGE_FILE_MACHINE_ARMNT ","ARM Thumb-2 little endian "],\
0xebc :["IMAGE_FILE_MACHINE_EBC ","EFI byte code "],\
0x14c :["IMAGE_FILE_MACHINE_I386 ","Intel 386 or later processors and compatible processors "],\
0x200 :["IMAGE_FILE_MACHINE_IA64 ","Intel Itanium processor family "],\
0x9041:["IMAGE_FILE_MACHINE_M32R ","Mitsubishi M32R little endian "],\
0x266 :["IMAGE_FILE_MACHINE_MIPS16 ","MIPS16 "],\
0x366 :["IMAGE_FILE_MACHINE_MIPSFPU ","MIPS with FPU "],\
0x466 :["IMAGE_FILE_MACHINE_MIPSFPU16 ","MIPS16 with FPU "],\
0x1f0 :["IMAGE_FILE_MACHINE_POWERPC ","Power PC little endian "],\
0x1f1 :["IMAGE_FILE_MACHINE_POWERPCFP","Power PC with floating point support "],\
0x166 :["IMAGE_FILE_MACHINE_R4000 ","MIPS little endian "],\
0x5032:["IMAGE_FILE_MACHINE_RISCV32 ","RISC-V 32-bit address space "],\
0x5064:["IMAGE_FILE_MACHINE_RISCV64 ","RISC-V 64-bit address space "],\
0x5128:["IMAGE_FILE_MACHINE_RISCV128 ","RISC-V 128-bit address space "],\
0x1a2 :["IMAGE_FILE_MACHINE_SH3 ","Hitachi SH3 "],\
0x1a3 :["IMAGE_FILE_MACHINE_SH3DSP ","Hitachi SH3 DSP "],\
0x1a6 :["IMAGE_FILE_MACHINE_SH4 ","Hitachi SH4 "],\
0x1a8 :["IMAGE_FILE_MACHINE_SH5 ","Hitachi SH5 "],\
0x1c2 :["IMAGE_FILE_MACHINE_THUMB ","Thumb "],\
0x169 :["IMAGE_FILE_MACHINE_WCEMIPSV2","MIPS little-endian WCE v2 "]}
__PEHeaderCharacsTypes_dict = {"IMAGE_FILE_RELOCS_STRIPPED":"0x0001",\
"IMAGE_FILE_EXECUTABLE_IMAGE":"0x0002",\
"IMAGE_FILE_LINE_NUMS_STRIPPED":"0x0004",\
"IMAGE_FILE_LOCAL_SYMS_STRIPPED":"0x0008",\
"IMAGE_FILE_AGGRESSIVE_WS_TRIM":"0x0010",\
"IMAGE_FILE_LARGE_ADDRESS_AWARE":"0x0020",\
"RESERVED":"0x0040",\
"IMAGE_FILE_BYTES_REVERSED_LO":"0x0080",\
"IMAGE_FILE_32BIT_MACHINE":"0x0100",\
"IMAGE_FILE_DEBUG_STRIPPED":"0x0200",\
"IMAGE_FILE_REMOVABLE_RUN_ FROM_SWAP ":"0x0400",\
"IMAGE_FILE_NET_RUN_FROM_SWAP":"0x0800",\
"IMAGE_FILE_SYSTEM ":"0x1000",\
"IMAGE_FILE_DLL ":"0x2000",\
"IMAGE_FILE_UP_SYSTEM_ONLY ":"0x4000",\
"IMAGE_FILE_BYTES_REVERSED_HI ":"0x8000"}
__PEHeader_fmt_dict = {\
"Signature":"I",\
"Machine":"H",\
"NumberOfSections":"H",\
"TimeDateStamp":"I",\
"PointerToSymbolTable":"I",\
"NumberOfSymbols":"I",\
"SizeOfOptionalHeader":"H",\
"Characteristics":"H"}
__PEHeader_fields = ["Signature",\
"Machine",\
"NumberOfSections",\
"TimeDateStamp",\
"PointerToSymbolTable",\
"NumberOfSymbols",\
"SizeOfOptionalHeader",\
"Characteristics"]
"""
Object for handling PEHeaders files."""
def __init__(self,_DOSHeader=None):
self.attribute_list = [("Signature",0),\
("Machine",0),\
("NumberOfSections",0),\
("TimeDateStamp",0),\
("PointerToSymbolTable",0),\
("NumberOfSymbols",0),\
("SizeOfOptionalHeader",0),\
("Characteristics",0,[])] #list at the end is the characs that apply
self.dos_header = _DOSHeader
if (self.dos_header):
self.set_offset(int(self.dos_header.get_e_lfanew(),16))
self.header_fields = PEHeader.__PEHeader_fields
self.header_fmt_dict = PEHeader.__PEHeader_fmt_dict
self.pe_char_fields = PEHeader.__PEHeaderCharacsTypes_dict
self.pe_machine_types = PEHeader.__PEHeaderMachineTypes_dict
def get_siganture(self):
index = self.header_fields.index("Signature")
return self.attribute_list[index]
def get_machine(self):
index = self.header_fields.index("Machine")
return self.attribute_list[index]
def get_timedatestamp(self):
index = self.header_fields.index("TimeDateStamp")
return self.attribute_list[index]
def get_pointertosymboltable(self):
index = self.header_fields.index("PointerToSymbolTable")
return self.attribute_list[index]
def get_numberofsymbols(self):
index = self.header_fields.index("NumberOfSymbols")
return self.attribute_list[index]
def get_sizeofoptionalheader(self):
index = self.header_fields.index("SizeOfOptionalHeader")
return self.attribute_list[index]
def get_characteristics(self):
index = self.header_fields.index("Characteristics")
return self.attribute_list[index]
"""
Parse out a DOSHeader.attribute_list straight from a binary file
def build_from_binary(
,_filename --- filename to parse DOSHeader from
,_fileperms="rb" --- fileperms to access file under
Returns
self.attribute_list a list of tuples [("field name",decimal value),...]
"""
def build_from_binary(self,_filename,_fileperms="rb"):
self.filename = _filename
if (self.dos_header):
return build_from_dosheader(_dosheader=self.dos_header)
peheader,length = PEHeaderDecoder.Decoder(_filename=_filename,\
_fileperms=_fileperms)
self.len = length
for index,value in enumerate(peheader.decode()[:len(self.attribute_list)]):#might need to undo this hack one day lol
self.attribute_list[index] = (self.attribute_list[index][0],value)
return self.attribute_list
def get_offset(self):
return self.offset
def set_offset(self,_offset):
self.offset = _offset
def build_from_dosheader(self):
if (not(self.dos_header)):
return None
self.filename = self.dos_header.filename
self.fileperms = self.dos_header.fileperms
pedecoder = PEHeaderDecoder.Decoder(_filename=self.filename,\
_fileperms=self.fileperms)
peheader,length = pedecoder.decode(_start=self.offset)[:len(self.attribute_list)]
self.len = length
for index,value in enumerate(peheader):#might need to undo this hack one day lol
if (self.attribute_list[index][0] == "Characteristics"):
try:
for char in self.pe_char_fields:
char_value = int(self.pe_char_fields[char],16)
if (value != 0 and (int(char_value) & value != 0)):
if len(self.attribute_list[index]) == 3:
self.attribute_list[index][2].append(char)
else:
self.attribute_list[index] = (self.attribute_list[index][0],\
value,[char])
except KeyError:
pass
else:
self.attribute_list[index] = (self.attribute_list[index][0],value)
return self.attribute_list
def __repr__(self):
doc_string = "\tPE header '%s'\n" % (self.filename)
for index,field in enumerate(self.header_fields):
pred = "\t|- %s =>%s[%s]\n"
subj = "".join([field,hex(self.attribute_list[index][1])])
_spaces = spaces(line_length=50,\
predicate=len(pred),subject=len(subj))
subj = (field,_spaces,\
hex(self.attribute_list[index][1]))
if (self.attribute_list[index][0] == "Machine"):
field_name = self.attribute_list[index][0]
machine_type_value = self.attribute_list[index][1]
machine_type_desc = self.pe_machine_types[machine_type_value][0]
pred = "\t|- %s =>%s[ %s:'%s' ]\n"
_spaces = spaces(line_length=50,\
predicate=len(pred),subject=len(subj))
subj = (field_name,_spaces,\
machine_type_value,machine_type_desc)
doc_string += pred % (subj[0],subj[1],subj[2],subj[3])
elif (self.attribute_list[index][0] == "Characteristics"\
and len(self.attribute_list[index]) == 3):
doc_string += "\tCharacteristics:\n"
for charac in self.attribute_list[index][2]:
doc_string += "\t\t|-- [%s]\n" % (charac)
else:
doc_string += pred % subj
return doc_string
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Dusan Klinec, ph4r05
import os
import ctypes as ct
from trezor_crypto import trezor_ctypes as tt
from trezor_crypto import mod_base
# Loaded library instance
CLIB = None
def open_lib(lib_path=None, try_env=False, no_init=False):
"""
Opens the library
:param lib_path:
:param try_env:
:param no_init:
:return:
"""
global CLIB
ext_fpath = lib_path
if ext_fpath is None:
ext_base = 'tcry_ctype'
mods, basedir = mod_base.get_ext_outputs()
ext_name = '%s%s' % (ext_base, mod_base.get_mod_suffix())
extensions = ['.so', '.dylib', '.dll', '.pyd']
ext_guesses = ['%s%s' % (ext_base, x) for x in extensions]
if ext_name in mods:
ext_fpath = os.path.join(basedir, ext_name)
else:
for g in ext_guesses:
if g in mods:
ext_fpath = os.path.join(basedir, g)
if ext_fpath is None and try_env:
ext_fpath = os.getenv('LIBTREZOR_CRYPTO_PATH', None)
if ext_fpath is None or not os.path.exists(ext_fpath):
raise FileNotFoundError('Trezor-Crypto lib not found')
CLIB = ct.cdll.LoadLibrary(ext_fpath)
if not no_init:
setup_lib(CLIB)
init_lib()
return CLIB
def cl():
"""
Returns CLIB
:return:
"""
return CLIB
def init_lib():
"""
Initializes Trezor crypto library
:return:
"""
res = cl().random_init()
if res < 0:
raise ValueError('Library initialization error: %s' % res)
return res
def setup_lib(CLIB):
"""
Setup the CLIB - define fncs
:param CLIB:
:return:
"""
# size_t address_prefix_bytes_len(uint32_t address_type)
CLIB.address_prefix_bytes_len.argtypes = [tt.uint32_t]
CLIB.address_prefix_bytes_len.restype = tt.size_t
# void address_write_prefix_bytes(uint32_t address_type, uint8_t *out)
CLIB.address_write_prefix_bytes.argtypes = [tt.uint32_t, tt.POINTER(tt.uint8_t)]
# bool address_check_prefix(const uint8_t *addr, uint32_t address_type)
CLIB.address_check_prefix.argtypes = [tt.POINTER(tt.uint8_t), tt.uint32_t]
CLIB.address_check_prefix.restype = ct.c_bool
# char *base32_encode(const uint8_t *in, size_t inlen, char *out, size_t outlen, const char *alphabet)
CLIB.base32_encode.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, tt.POINTER(ct.c_byte), tt.size_t, tt.POINTER(ct.c_byte)]
CLIB.base32_encode.restype = tt.POINTER(ct.c_byte)
# void base32_encode_unsafe(const uint8_t *in, size_t inlen, uint8_t *out)
CLIB.base32_encode_unsafe.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, tt.POINTER(tt.uint8_t)]
# uint8_t *base32_decode(const char *in, size_t inlen, uint8_t *out, size_t outlen, const char *alphabet)
CLIB.base32_decode.argtypes = [tt.POINTER(ct.c_byte), tt.size_t, tt.POINTER(tt.uint8_t), tt.size_t, tt.POINTER(ct.c_byte)]
CLIB.base32_decode.restype = tt.POINTER(tt.uint8_t)
# bool base32_decode_unsafe(const uint8_t *in, size_t inlen, uint8_t *out, const char *alphabet)
CLIB.base32_decode_unsafe.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, tt.POINTER(tt.uint8_t), tt.POINTER(ct.c_byte)]
CLIB.base32_decode_unsafe.restype = ct.c_bool
# size_t base32_encoded_length(size_t inlen)
CLIB.base32_encoded_length.argtypes = [tt.size_t]
CLIB.base32_encoded_length.restype = tt.size_t
# size_t base32_decoded_length(size_t inlen)
CLIB.base32_decoded_length.argtypes = [tt.size_t]
CLIB.base32_decoded_length.restype = tt.size_t
# void sha1_Transform(const uint32_t *state_in, const uint32_t *data, uint32_t *state_out)
CLIB.sha1_Transform.argtypes = [tt.POINTER(tt.uint32_t), tt.POINTER(tt.uint32_t), tt.POINTER(tt.uint32_t)]
# void sha1_Init(SHA1_CTX *)
CLIB.sha1_Init.argtypes = [tt.POINTER(tt.SHA1_CTX)]
# void sha1_Update(SHA1_CTX *, const uint8_t *, size_t)
CLIB.sha1_Update.argtypes = [tt.POINTER(tt.SHA1_CTX), tt.POINTER(tt.uint8_t), tt.size_t]
# void sha1_Final(SHA1_CTX *, uint8_t [20])
CLIB.sha1_Final.argtypes = [tt.POINTER(tt.SHA1_CTX), tt.uint8_t * 20]
# char *sha1_End(SHA1_CTX *, char [(20 * 2) + 1])
CLIB.sha1_End.argtypes = [tt.POINTER(tt.SHA1_CTX), ct.c_byte * 41]
CLIB.sha1_End.restype = tt.POINTER(ct.c_byte)
# void sha1_Raw(const uint8_t *, size_t, uint8_t [20])
CLIB.sha1_Raw.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, tt.uint8_t * 20]
# char *sha1_Data(const uint8_t *, size_t, char [(20 * 2) + 1])
CLIB.sha1_Data.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, ct.c_byte * 41]
CLIB.sha1_Data.restype = tt.POINTER(ct.c_byte)
# void sha256_Transform(const uint32_t *state_in, const uint32_t *data, uint32_t *state_out)
CLIB.sha256_Transform.argtypes = [tt.POINTER(tt.uint32_t), tt.POINTER(tt.uint32_t), tt.POINTER(tt.uint32_t)]
# void sha256_Init(SHA256_CTX *)
CLIB.sha256_Init.argtypes = [tt.POINTER(tt.SHA256_CTX)]
# void sha256_Update(SHA256_CTX *, const uint8_t *, size_t)
CLIB.sha256_Update.argtypes = [tt.POINTER(tt.SHA256_CTX), tt.POINTER(tt.uint8_t), tt.size_t]
# void sha256_Final(SHA256_CTX *, uint8_t [32])
CLIB.sha256_Final.argtypes = [tt.POINTER(tt.SHA256_CTX), tt.uint8_t * 32]
# char *sha256_End(SHA256_CTX *, char [(32 * 2) + 1])
CLIB.sha256_End.argtypes = [tt.POINTER(tt.SHA256_CTX), ct.c_byte * 65]
CLIB.sha256_End.restype = tt.POINTER(ct.c_byte)
# void sha256_Raw(const uint8_t *, size_t, uint8_t [32])
CLIB.sha256_Raw.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, tt.uint8_t * 32]
# char *sha256_Data(const uint8_t *, size_t, char [(32 * 2) + 1])
CLIB.sha256_Data.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, ct.c_byte * 65]
CLIB.sha256_Data.restype = tt.POINTER(ct.c_byte)
# void sha512_Transform(const uint64_t *state_in, const uint64_t *data, uint64_t *state_out)
CLIB.sha512_Transform.argtypes = [tt.POINTER(tt.uint64_t), tt.POINTER(tt.uint64_t), tt.POINTER(tt.uint64_t)]
# void sha512_Init(SHA512_CTX *)
CLIB.sha512_Init.argtypes = [tt.POINTER(tt.SHA512_CTX)]
# void sha512_Update(SHA512_CTX *, const uint8_t *, size_t)
CLIB.sha512_Update.argtypes = [tt.POINTER(tt.SHA512_CTX), tt.POINTER(tt.uint8_t), tt.size_t]
# void sha512_Final(SHA512_CTX *, uint8_t [64])
CLIB.sha512_Final.argtypes = [tt.POINTER(tt.SHA512_CTX), tt.uint8_t * 64]
# char *sha512_End(SHA512_CTX *, char [(64 * 2) + 1])
CLIB.sha512_End.argtypes = [tt.POINTER(tt.SHA512_CTX), ct.c_byte * 129]
CLIB.sha512_End.restype = tt.POINTER(ct.c_byte)
# void sha512_Raw(const uint8_t *, size_t, uint8_t [64])
CLIB.sha512_Raw.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, tt.uint8_t * 64]
# char *sha512_Data(const uint8_t *, size_t, char [(64 * 2) + 1])
CLIB.sha512_Data.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, ct.c_byte * 129]
CLIB.sha512_Data.restype = tt.POINTER(ct.c_byte)
# void sha3_224_Init(SHA3_CTX *ctx)
CLIB.sha3_224_Init.argtypes = [tt.POINTER(tt.SHA3_CTX)]
# void sha3_256_Init(SHA3_CTX *ctx)
CLIB.sha3_256_Init.argtypes = [tt.POINTER(tt.SHA3_CTX)]
# void sha3_384_Init(SHA3_CTX *ctx)
CLIB.sha3_384_Init.argtypes = [tt.POINTER(tt.SHA3_CTX)]
# void sha3_512_Init(SHA3_CTX *ctx)
CLIB.sha3_512_Init.argtypes = [tt.POINTER(tt.SHA3_CTX)]
# void sha3_Update(SHA3_CTX *ctx, const char *msg, size_t size)
CLIB.sha3_Update.argtypes = [tt.POINTER(tt.SHA3_CTX), tt.POINTER(ct.c_ubyte), tt.size_t]
# void sha3_Final(SHA3_CTX *ctx, char *result)
CLIB.sha3_Final.argtypes = [tt.POINTER(tt.SHA3_CTX), tt.POINTER(ct.c_ubyte)]
# void keccak_Final(SHA3_CTX *ctx, char *result)
CLIB.keccak_Final.argtypes = [tt.POINTER(tt.SHA3_CTX), tt.POINTER(ct.c_ubyte)]
# void keccak_256(const char *data, size_t len, char *digest)
CLIB.keccak_256.argtypes = [tt.POINTER(ct.c_ubyte), tt.size_t, tt.POINTER(ct.c_ubyte)]
# void keccak_512(const char *data, size_t len, char *digest)
CLIB.keccak_512.argtypes = [tt.POINTER(ct.c_ubyte), tt.size_t, tt.POINTER(ct.c_ubyte)]
# void sha3_256(const char *data, size_t len, char *digest)
CLIB.sha3_256.argtypes = [tt.POINTER(ct.c_ubyte), tt.size_t, tt.POINTER(ct.c_ubyte)]
# void sha3_512(const char *data, size_t len, char *digest)
CLIB.sha3_512.argtypes = [tt.POINTER(ct.c_ubyte), tt.size_t, tt.POINTER(ct.c_ubyte)]
# void blake256_Init(BLAKE256_CTX *)
CLIB.blake256_Init.argtypes = [tt.POINTER(tt.BLAKE256_CTX)]
# void blake256_Update(BLAKE256_CTX *, const uint8_t *, size_t)
CLIB.blake256_Update.argtypes = [tt.POINTER(tt.BLAKE256_CTX), tt.POINTER(tt.uint8_t), tt.size_t]
# void blake256_Final(BLAKE256_CTX *, uint8_t *)
CLIB.blake256_Final.argtypes = [tt.POINTER(tt.BLAKE256_CTX), tt.POINTER(tt.uint8_t)]
# void blake256(const uint8_t *, size_t, uint8_t *)
CLIB.blake256.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, tt.POINTER(tt.uint8_t)]
# void groestl512_Init(void *cc)
CLIB.groestl512_Init.argtypes = [ct.c_void_p]
# void groestl512_Update(void *cc, const void *data, size_t len)
CLIB.groestl512_Update.argtypes = [ct.c_void_p, ct.c_void_p, tt.size_t]
# void groestl512_Final(void *cc, void *dst)
CLIB.groestl512_Final.argtypes = [ct.c_void_p, ct.c_void_p]
# void groestl512_DoubleTrunc(void *cc, void *dst)
CLIB.groestl512_DoubleTrunc.argtypes = [ct.c_void_p, ct.c_void_p]
# int blake2b_Init(blake2b_state *S, size_t outlen)
CLIB.blake2b_Init.argtypes = [tt.POINTER(tt.blake2b_state), tt.size_t]
CLIB.blake2b_Init.restype = ct.c_int
# int blake2b_InitKey(blake2b_state *S, size_t outlen, const void *key, size_t keylen)
CLIB.blake2b_InitKey.argtypes = [tt.POINTER(tt.blake2b_state), tt.size_t, ct.c_void_p, tt.size_t]
CLIB.blake2b_InitKey.restype = ct.c_int
# int blake2b_InitPersonal(blake2b_state *S, size_t outlen, const void *personal, size_t personal_len)
CLIB.blake2b_InitPersonal.argtypes = [tt.POINTER(tt.blake2b_state), tt.size_t, ct.c_void_p, tt.size_t]
CLIB.blake2b_InitPersonal.restype = ct.c_int
# int blake2b_Update(blake2b_state *S, const void *pin, size_t inlen)
CLIB.blake2b_Update.argtypes = [tt.POINTER(tt.blake2b_state), ct.c_void_p, tt.size_t]
CLIB.blake2b_Update.restype = ct.c_int
# int blake2b_Final(blake2b_state *S, void *out, size_t outlen)
CLIB.blake2b_Final.argtypes = [tt.POINTER(tt.blake2b_state), ct.c_void_p, tt.size_t]
CLIB.blake2b_Final.restype = ct.c_int
# int blake2b(const uint8_t *msg, uint32_t msg_len, void *out, size_t outlen)
CLIB.blake2b.argtypes = [tt.POINTER(tt.uint8_t), tt.uint32_t, ct.c_void_p, tt.size_t]
CLIB.blake2b.restype = ct.c_int
# int blake2b_Key(const uint8_t *msg, uint32_t msg_len, const void *key, size_t keylen, void *out, size_t outlen)
CLIB.blake2b_Key.argtypes = [tt.POINTER(tt.uint8_t), tt.uint32_t, ct.c_void_p, tt.size_t, ct.c_void_p, tt.size_t]
CLIB.blake2b_Key.restype = ct.c_int
# void hasher_Init(Hasher *hasher, HasherType type)
CLIB.hasher_Init.argtypes = [tt.POINTER(tt.Hasher), tt.HasherType]
# void hasher_Reset(Hasher *hasher)
CLIB.hasher_Reset.argtypes = [tt.POINTER(tt.Hasher)]
# void hasher_Update(Hasher *hasher, const uint8_t *data, size_t length)
CLIB.hasher_Update.argtypes = [tt.POINTER(tt.Hasher), tt.POINTER(tt.uint8_t), tt.size_t]
# void hasher_Final(Hasher *hasher, uint8_t hash[32])
CLIB.hasher_Final.argtypes = [tt.POINTER(tt.Hasher), tt.uint8_t * 32]
# void hasher_Raw(HasherType type, const uint8_t *data, size_t length, uint8_t hash[32])
CLIB.hasher_Raw.argtypes = [tt.HasherType, tt.POINTER(tt.uint8_t), tt.size_t, tt.uint8_t * 32]
# int base58_encode_check(const uint8_t *data, int len, HasherType hasher_type, char *str, int strsize)
CLIB.base58_encode_check.argtypes = [tt.POINTER(tt.uint8_t), ct.c_int, tt.HasherType, tt.POINTER(ct.c_byte), ct.c_int]
CLIB.base58_encode_check.restype = ct.c_int
# int base58_decode_check(const char *str, HasherType hasher_type, uint8_t *data, int datalen)
CLIB.base58_decode_check.argtypes = [tt.POINTER(ct.c_byte), tt.HasherType, tt.POINTER(tt.uint8_t), ct.c_int]
CLIB.base58_decode_check.restype = ct.c_int
# bool b58tobin(void *bin, size_t *binszp, const char *b58)
CLIB.b58tobin.argtypes = [ct.c_void_p, tt.POINTER(tt.size_t), tt.POINTER(ct.c_byte)]
CLIB.b58tobin.restype = ct.c_bool
# int b58check(const void *bin, size_t binsz, HasherType hasher_type, const char *base58str)
CLIB.b58check.argtypes = [ct.c_void_p, tt.size_t, tt.HasherType, tt.POINTER(ct.c_byte)]
CLIB.b58check.restype = ct.c_int
# bool b58enc(char *b58, size_t *b58sz, const void *data, size_t binsz)
CLIB.b58enc.argtypes = [tt.POINTER(ct.c_byte), tt.POINTER(tt.size_t), ct.c_void_p, tt.size_t]
CLIB.b58enc.restype = ct.c_bool
# uint32_t random32(void)
CLIB.random32.argtypes = []
CLIB.random32.restype = tt.uint32_t
# void random_buffer(uint8_t *buf, size_t len)
CLIB.random_buffer.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t]
# uint32_t random_uniform(uint32_t n)
CLIB.random_uniform.argtypes = [tt.uint32_t]
CLIB.random_uniform.restype = tt.uint32_t
# void random_permute(char *buf, size_t len)
CLIB.random_permute.argtypes = [tt.POINTER(ct.c_byte), tt.size_t]
# int random_init(void)
CLIB.random_init.argtypes = []
CLIB.random_init.restype = ct.c_int
# void hmac_sha256_Init(HMAC_SHA256_CTX *hctx, const uint8_t *key, const uint32_t keylen)
CLIB.hmac_sha256_Init.argtypes = [tt.POINTER(tt.HMAC_SHA256_CTX), tt.POINTER(tt.uint8_t), tt.uint32_t]
# void hmac_sha256_Update(HMAC_SHA256_CTX *hctx, const uint8_t *msg, const uint32_t msglen)
CLIB.hmac_sha256_Update.argtypes = [tt.POINTER(tt.HMAC_SHA256_CTX), tt.POINTER(tt.uint8_t), tt.uint32_t]
# void hmac_sha256_Final(HMAC_SHA256_CTX *hctx, uint8_t *hmac)
CLIB.hmac_sha256_Final.argtypes = [tt.POINTER(tt.HMAC_SHA256_CTX), tt.POINTER(tt.uint8_t)]
# void hmac_sha256(const uint8_t *key, const uint32_t keylen, const uint8_t *msg, const uint32_t msglen, uint8_t *hmac)
CLIB.hmac_sha256.argtypes = [tt.POINTER(tt.uint8_t), tt.uint32_t, tt.POINTER(tt.uint8_t), tt.uint32_t, tt.POINTER(tt.uint8_t)]
# void hmac_sha256_prepare(const uint8_t *key, const uint32_t keylen, uint32_t *opad_digest, uint32_t *ipad_digest)
CLIB.hmac_sha256_prepare.argtypes = [tt.POINTER(tt.uint8_t), tt.uint32_t, tt.POINTER(tt.uint32_t), tt.POINTER(tt.uint32_t)]
# void hmac_sha512_Init(HMAC_SHA512_CTX *hctx, const uint8_t *key, const uint32_t keylen)
CLIB.hmac_sha512_Init.argtypes = [tt.POINTER(tt.HMAC_SHA512_CTX), tt.POINTER(tt.uint8_t), tt.uint32_t]
# void hmac_sha512_Update(HMAC_SHA512_CTX *hctx, const uint8_t *msg, const uint32_t msglen)
CLIB.hmac_sha512_Update.argtypes = [tt.POINTER(tt.HMAC_SHA512_CTX), tt.POINTER(tt.uint8_t), tt.uint32_t]
# void hmac_sha512_Final(HMAC_SHA512_CTX *hctx, uint8_t *hmac)
CLIB.hmac_sha512_Final.argtypes = [tt.POINTER(tt.HMAC_SHA512_CTX), tt.POINTER(tt.uint8_t)]
# void hmac_sha512(const uint8_t *key, const uint32_t keylen, const uint8_t *msg, const uint32_t msglen, uint8_t *hmac)
CLIB.hmac_sha512.argtypes = [tt.POINTER(tt.uint8_t), tt.uint32_t, tt.POINTER(tt.uint8_t), tt.uint32_t, tt.POINTER(tt.uint8_t)]
# void hmac_sha512_prepare(const uint8_t *key, const uint32_t keylen, uint64_t *opad_digest, uint64_t *ipad_digest)
CLIB.hmac_sha512_prepare.argtypes = [tt.POINTER(tt.uint8_t), tt.uint32_t, tt.POINTER(tt.uint64_t), tt.POINTER(tt.uint64_t)]
# void pbkdf2_hmac_sha256_Init(PBKDF2_HMAC_SHA256_CTX *pctx, const uint8_t *pass, int passlen, const uint8_t *salt, int saltlen)
CLIB.pbkdf2_hmac_sha256_Init.argtypes = [tt.POINTER(tt.PBKDF2_HMAC_SHA256_CTX), tt.POINTER(tt.uint8_t), ct.c_int, tt.POINTER(tt.uint8_t), ct.c_int]
# void pbkdf2_hmac_sha256_Update(PBKDF2_HMAC_SHA256_CTX *pctx, uint32_t iterations)
CLIB.pbkdf2_hmac_sha256_Update.argtypes = [tt.POINTER(tt.PBKDF2_HMAC_SHA256_CTX), tt.uint32_t]
# void pbkdf2_hmac_sha256_Final(PBKDF2_HMAC_SHA256_CTX *pctx, uint8_t *key)
CLIB.pbkdf2_hmac_sha256_Final.argtypes = [tt.POINTER(tt.PBKDF2_HMAC_SHA256_CTX), tt.POINTER(tt.uint8_t)]
# void pbkdf2_hmac_sha256(const uint8_t *pass, int passlen, const uint8_t *salt, int saltlen, uint32_t iterations, uint8_t *key)
CLIB.pbkdf2_hmac_sha256.argtypes = [tt.POINTER(tt.uint8_t), ct.c_int, tt.POINTER(tt.uint8_t), ct.c_int, tt.uint32_t, tt.POINTER(tt.uint8_t)]
# void pbkdf2_hmac_sha512_Init(PBKDF2_HMAC_SHA512_CTX *pctx, const uint8_t *pass, int passlen, const uint8_t *salt, int saltlen)
CLIB.pbkdf2_hmac_sha512_Init.argtypes = [tt.POINTER(tt.PBKDF2_HMAC_SHA512_CTX), tt.POINTER(tt.uint8_t), ct.c_int, tt.POINTER(tt.uint8_t), ct.c_int]
# void pbkdf2_hmac_sha512_Update(PBKDF2_HMAC_SHA512_CTX *pctx, uint32_t iterations)
CLIB.pbkdf2_hmac_sha512_Update.argtypes = [tt.POINTER(tt.PBKDF2_HMAC_SHA512_CTX), tt.uint32_t]
# void pbkdf2_hmac_sha512_Final(PBKDF2_HMAC_SHA512_CTX *pctx, uint8_t *key)
CLIB.pbkdf2_hmac_sha512_Final.argtypes = [tt.POINTER(tt.PBKDF2_HMAC_SHA512_CTX), tt.POINTER(tt.uint8_t)]
# void pbkdf2_hmac_sha512(const uint8_t *pass, int passlen, const uint8_t *salt, int saltlen, uint32_t iterations, uint8_t *key)
CLIB.pbkdf2_hmac_sha512.argtypes = [tt.POINTER(tt.uint8_t), ct.c_int, tt.POINTER(tt.uint8_t), ct.c_int, tt.uint32_t, tt.POINTER(tt.uint8_t)]
# uint32_t read_be(const uint8_t *data)
CLIB.read_be.argtypes = [tt.POINTER(tt.uint8_t)]
CLIB.read_be.restype = tt.uint32_t
# void write_be(uint8_t *data, uint32_t x)
CLIB.write_be.argtypes = [tt.POINTER(tt.uint8_t), tt.uint32_t]
# uint32_t read_le(const uint8_t *data)
CLIB.read_le.argtypes = [tt.POINTER(tt.uint8_t)]
CLIB.read_le.restype = tt.uint32_t
# void write_le(uint8_t *data, uint32_t x)
CLIB.write_le.argtypes = [tt.POINTER(tt.uint8_t), tt.uint32_t]
# void bn_read_be(const uint8_t *in_number, bignum256 *out_number)
CLIB.bn_read_be.argtypes = [tt.POINTER(tt.uint8_t), tt.POINTER(tt.bignum256)]
# void bn_write_be(const bignum256 *in_number, uint8_t *out_number)
CLIB.bn_write_be.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.uint8_t)]
# void bn_read_le(const uint8_t *in_number, bignum256 *out_number)
CLIB.bn_read_le.argtypes = [tt.POINTER(tt.uint8_t), tt.POINTER(tt.bignum256)]
# void bn_write_le(const bignum256 *in_number, uint8_t *out_number)
CLIB.bn_write_le.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.uint8_t)]
# void bn_read_uint32(uint32_t in_number, bignum256 *out_number)
CLIB.bn_read_uint32.argtypes = [tt.uint32_t, tt.POINTER(tt.bignum256)]
# void bn_read_uint64(uint64_t in_number, bignum256 *out_number)
CLIB.bn_read_uint64.argtypes = [tt.uint64_t, tt.POINTER(tt.bignum256)]
# int bn_bitcount(const bignum256 *a)
CLIB.bn_bitcount.argtypes = [tt.POINTER(tt.bignum256)]
CLIB.bn_bitcount.restype = ct.c_int
# int bn_digitcount(const bignum256 *a)
CLIB.bn_digitcount.argtypes = [tt.POINTER(tt.bignum256)]
CLIB.bn_digitcount.restype = ct.c_uint
# void bn_zero(bignum256 *a)
CLIB.bn_zero.argtypes = [tt.POINTER(tt.bignum256)]
# int bn_is_zero(const bignum256 *a)
CLIB.bn_is_zero.argtypes = [tt.POINTER(tt.bignum256)]
CLIB.bn_is_zero.restype = ct.c_int
# void bn_one(bignum256 *a)
CLIB.bn_one.argtypes = [tt.POINTER(tt.bignum256)]
# int bn_is_less(const bignum256 *a, const bignum256 *b)
CLIB.bn_is_less.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
CLIB.bn_is_less.restype = ct.c_int
# int bn_is_equal(const bignum256 *a, const bignum256 *b)
CLIB.bn_is_equal.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
CLIB.bn_is_equal.restype = ct.c_int
# void bn_cmov(bignum256 *res, int cond, const bignum256 *truecase, const bignum256 *falsecase)
CLIB.bn_cmov.argtypes = [tt.POINTER(tt.bignum256), ct.c_int, tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_lshift(bignum256 *a)
CLIB.bn_lshift.argtypes = [tt.POINTER(tt.bignum256)]
# void bn_rshift(bignum256 *a)
CLIB.bn_rshift.argtypes = [tt.POINTER(tt.bignum256)]
# void bn_setbit(bignum256 *a, uint8_t bit)
CLIB.bn_setbit.argtypes = [tt.POINTER(tt.bignum256), tt.uint8_t]
# void bn_clearbit(bignum256 *a, uint8_t bit)
CLIB.bn_clearbit.argtypes = [tt.POINTER(tt.bignum256), tt.uint8_t]
# uint32_t bn_testbit(bignum256 *a, uint8_t bit)
CLIB.bn_testbit.argtypes = [tt.POINTER(tt.bignum256), tt.uint8_t]
CLIB.bn_testbit.restype = tt.uint32_t
# void bn_xor(bignum256 *a, const bignum256 *b, const bignum256 *c)
CLIB.bn_xor.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_mult_half(bignum256 *x, const bignum256 *prime)
CLIB.bn_mult_half.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_mult_k(bignum256 *x, uint8_t k, const bignum256 *prime)
CLIB.bn_mult_k.argtypes = [tt.POINTER(tt.bignum256), tt.uint8_t, tt.POINTER(tt.bignum256)]
# void bn_mod(bignum256 *x, const bignum256 *prime)
CLIB.bn_mod.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_multiply(const bignum256 *k, bignum256 *x, const bignum256 *prime)
CLIB.bn_multiply.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_fast_mod(bignum256 *x, const bignum256 *prime)
CLIB.bn_fast_mod.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_sqrt(bignum256 *x, const bignum256 *prime)
CLIB.bn_sqrt.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_inverse(bignum256 *x, const bignum256 *prime)
CLIB.bn_inverse.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_normalize(bignum256 *a)
CLIB.bn_normalize.argtypes = [tt.POINTER(tt.bignum256)]
# void bn_add(bignum256 *a, const bignum256 *b)
CLIB.bn_add.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_addmod(bignum256 *a, const bignum256 *b, const bignum256 *prime)
CLIB.bn_addmod.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_addi(bignum256 *a, uint32_t b)
CLIB.bn_addi.argtypes = [tt.POINTER(tt.bignum256), tt.uint32_t]
# void bn_subi(bignum256 *a, uint32_t b, const bignum256 *prime)
CLIB.bn_subi.argtypes = [tt.POINTER(tt.bignum256), tt.uint32_t, tt.POINTER(tt.bignum256)]
# void bn_subtractmod(const bignum256 *a, const bignum256 *b, bignum256 *res, const bignum256 *prime)
CLIB.bn_subtractmod.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_subtract(const bignum256 *a, const bignum256 *b, bignum256 *res)
CLIB.bn_subtract.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256), tt.POINTER(tt.bignum256)]
# void bn_divmod58(bignum256 *a, uint32_t *r)
CLIB.bn_divmod58.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.uint32_t)]
# void bn_divmod1000(bignum256 *a, uint32_t *r)
CLIB.bn_divmod1000.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(tt.uint32_t)]
# size_t bn_format(const bignum256 *amnt, const char *prefix, const char *suffix, int decimals, int exponent, bool trailing, char *out, size_t outlen)
CLIB.bn_format.argtypes = [tt.POINTER(tt.bignum256), tt.POINTER(ct.c_byte), tt.POINTER(ct.c_byte), ct.c_uint, ct.c_int, ct.c_bool, tt.POINTER(ct.c_byte), tt.size_t]
CLIB.bn_format.restype = tt.size_t
# void ECRYPT_init(void)
CLIB.ECRYPT_init.argtypes = []
# void ECRYPT_keysetup(ECRYPT_ctx *ctx, const u8 *key, u32 keysize, u32 ivsize)
CLIB.ECRYPT_keysetup.argtypes = [tt.POINTER(tt.ECRYPT_ctx), tt.POINTER(tt.u8), tt.u32, tt.u32]
# void ECRYPT_ivsetup(ECRYPT_ctx *ctx, const u8 *iv)
CLIB.ECRYPT_ivsetup.argtypes = [tt.POINTER(tt.ECRYPT_ctx), tt.POINTER(tt.u8)]
# void ECRYPT_encrypt_bytes(ECRYPT_ctx *ctx, const u8 *plaintext, u8 *ciphertext, u32 msglen)
CLIB.ECRYPT_encrypt_bytes.argtypes = [tt.POINTER(tt.ECRYPT_ctx), tt.POINTER(tt.u8), tt.POINTER(tt.u8), tt.u32]
# void ECRYPT_decrypt_bytes(ECRYPT_ctx *ctx, const u8 *ciphertext, u8 *plaintext, u32 msglen)
CLIB.ECRYPT_decrypt_bytes.argtypes = [tt.POINTER(tt.ECRYPT_ctx), tt.POINTER(tt.u8), tt.POINTER(tt.u8), tt.u32]
# void ECRYPT_keystream_bytes(ECRYPT_ctx *ctx, u8 *keystream, u32 length)
CLIB.ECRYPT_keystream_bytes.argtypes = [tt.POINTER(tt.ECRYPT_ctx), tt.POINTER(tt.u8), tt.u32]
# void poly1305_init(poly1305_context *ctx, const char key[32])
CLIB.poly1305_init.argtypes = [tt.POINTER(tt.poly1305_context), ct.c_ubyte * 32]
# void poly1305_update(poly1305_context *ctx, const char *m, size_t bytes)
CLIB.poly1305_update.argtypes = [tt.POINTER(tt.poly1305_context), tt.POINTER(ct.c_ubyte), tt.size_t]
# void poly1305_finish(poly1305_context *ctx, char mac[16])
CLIB.poly1305_finish.argtypes = [tt.POINTER(tt.poly1305_context), ct.c_ubyte * 16]
# void poly1305_auth(char mac[16], const char *m, size_t bytes, const char key[32])
CLIB.poly1305_auth.argtypes = [ct.c_ubyte * 16, tt.POINTER(ct.c_ubyte), tt.size_t, ct.c_ubyte * 32]
# int poly1305_verify(const char mac1[16], const char mac2[16])
CLIB.poly1305_verify.argtypes = [ct.c_ubyte * 16, ct.c_ubyte * 16]
CLIB.poly1305_verify.restype = ct.c_int
# int poly1305_power_on_self_test(void)
CLIB.poly1305_power_on_self_test.argtypes = []
CLIB.poly1305_power_on_self_test.restype = ct.c_int
# void xchacha20poly1305_init(chacha20poly1305_ctx *ctx, uint8_t key[32], uint8_t nonce[24])
CLIB.xchacha20poly1305_init.argtypes = [tt.POINTER(tt.chacha20poly1305_ctx), tt.uint8_t * 32, tt.uint8_t * 24]
# void chacha20poly1305_encrypt(chacha20poly1305_ctx *ctx, uint8_t *in, uint8_t *out, size_t n)
CLIB.chacha20poly1305_encrypt.argtypes = [tt.POINTER(tt.chacha20poly1305_ctx), tt.POINTER(tt.uint8_t), tt.POINTER(tt.uint8_t), tt.size_t]
# void chacha20poly1305_decrypt(chacha20poly1305_ctx *ctx, uint8_t *in, uint8_t *out, size_t n)
CLIB.chacha20poly1305_decrypt.argtypes = [tt.POINTER(tt.chacha20poly1305_ctx), tt.POINTER(tt.uint8_t), tt.POINTER(tt.uint8_t), tt.size_t]
# void chacha20poly1305_auth(chacha20poly1305_ctx *ctx, uint8_t *in, size_t n)
CLIB.chacha20poly1305_auth.argtypes = [tt.POINTER(tt.chacha20poly1305_ctx), tt.POINTER(tt.uint8_t), tt.size_t]
# void chacha20poly1305_finish(chacha20poly1305_ctx *ctx, uint8_t mac[16])
CLIB.chacha20poly1305_finish.argtypes = [tt.POINTER(tt.chacha20poly1305_ctx), tt.uint8_t * 16]
# void rfc7539_init(chacha20poly1305_ctx *ctx, uint8_t key[32], uint8_t nonce[12])
CLIB.rfc7539_init.argtypes = [tt.POINTER(tt.chacha20poly1305_ctx), tt.uint8_t * 32, tt.uint8_t * 12]
# void rfc7539_auth(chacha20poly1305_ctx *ctx, uint8_t *in, size_t n)
CLIB.rfc7539_auth.argtypes = [tt.POINTER(tt.chacha20poly1305_ctx), tt.POINTER(tt.uint8_t), tt.size_t]
# void rfc7539_finish(chacha20poly1305_ctx *ctx, int64_t alen, int64_t plen, uint8_t mac[16])
CLIB.rfc7539_finish.argtypes = [tt.POINTER(tt.chacha20poly1305_ctx), tt.int64_t, tt.int64_t, tt.uint8_t * 16]
# int xmr_base58_addr_encode_check(uint64_t tag, const uint8_t *data, size_t binsz, char *b58, size_t b58sz)
CLIB.xmr_base58_addr_encode_check.argtypes = [tt.uint64_t, tt.POINTER(tt.uint8_t), tt.size_t, tt.POINTER(ct.c_byte), tt.size_t]
CLIB.xmr_base58_addr_encode_check.restype = ct.c_int
# int xmr_base58_addr_decode_check(const char *addr, size_t sz, uint64_t *tag, void *data, size_t datalen)
CLIB.xmr_base58_addr_decode_check.argtypes = [tt.POINTER(ct.c_byte), tt.size_t, tt.POINTER(tt.uint64_t), ct.c_void_p, tt.size_t]
CLIB.xmr_base58_addr_decode_check.restype = ct.c_int
# bool xmr_base58_encode(char *b58, size_t *b58sz, const void *data, size_t binsz)
CLIB.xmr_base58_encode.argtypes = [tt.POINTER(ct.c_byte), tt.POINTER(tt.size_t), ct.c_void_p, tt.size_t]
CLIB.xmr_base58_encode.restype = ct.c_bool
# bool xmr_base58_decode(const char *b58, size_t b58sz, void *data, size_t *binsz)
CLIB.xmr_base58_decode.argtypes = [tt.POINTER(ct.c_byte), tt.size_t, ct.c_void_p, tt.POINTER(tt.size_t)]
CLIB.xmr_base58_decode.restype = ct.c_bool
# int xmr_size_varint(uint64_t num)
CLIB.xmr_size_varint.argtypes = [tt.uint64_t]
CLIB.xmr_size_varint.restype = ct.c_int
# int xmr_write_varint(uint8_t *buff, size_t buff_size, uint64_t num)
CLIB.xmr_write_varint.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, tt.uint64_t]
CLIB.xmr_write_varint.restype = ct.c_int
# int xmr_read_varint(uint8_t *buff, size_t buff_size, uint64_t *val)
CLIB.xmr_read_varint.argtypes = [tt.POINTER(tt.uint8_t), tt.size_t, tt.POINTER(tt.uint64_t)]
CLIB.xmr_read_varint.restype = ct.c_int
# void curve25519_copy(bignum25519 out, const bignum25519 in)
CLIB.curve25519_copy.argtypes = [tt.bignum25519, tt.bignum25519]
# void curve25519_add(bignum25519 out, const bignum25519 a, const bignum25519 b)
CLIB.curve25519_add.argtypes = [tt.bignum25519, tt.bignum25519, tt.bignum25519]
# void curve25519_add_after_basic(bignum25519 out, const bignum25519 a, const bignum25519 b)
CLIB.curve25519_add_after_basic.argtypes = [tt.bignum25519, tt.bignum25519, tt.bignum25519]
# void curve25519_add_reduce(bignum25519 out, const bignum25519 a, const bignum25519 b)
CLIB.curve25519_add_reduce.argtypes = [tt.bignum25519, tt.bignum25519, tt.bignum25519]
# void curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b)
CLIB.curve25519_sub.argtypes = [tt.bignum25519, tt.bignum25519, tt.bignum25519]
# void curve25519_scalar_product(bignum25519 out, const bignum25519 in, const uint32_t scalar)
CLIB.curve25519_scalar_product.argtypes = [tt.bignum25519, tt.bignum25519, tt.uint32_t]
# void curve25519_sub_after_basic(bignum25519 out, const bignum25519 a, const bignum25519 b)
CLIB.curve25519_sub_after_basic.argtypes = [tt.bignum25519, tt.bignum25519, tt.bignum25519]
# void curve25519_sub_reduce(bignum25519 out, const bignum25519 a, const bignum25519 b)
CLIB.curve25519_sub_reduce.argtypes = [tt.bignum25519, tt.bignum25519, tt.bignum25519]
# void curve25519_neg(bignum25519 out, const bignum25519 a)
CLIB.curve25519_neg.argtypes = [tt.bignum25519, tt.bignum25519]
# void curve25519_mul(bignum25519 out, const bignum25519 a, const bignum25519 b)
CLIB.curve25519_mul.argtypes = [tt.bignum25519, tt.bignum25519, tt.bignum25519]
# void curve25519_square(bignum25519 out, const bignum25519 in)
CLIB.curve25519_square.argtypes = [tt.bignum25519, tt.bignum25519]
# void curve25519_square_times(bignum25519 out, const bignum25519 in, int count)
CLIB.curve25519_square_times.argtypes = [tt.bignum25519, tt.bignum25519, ct.c_int]
# void curve25519_expand(bignum25519 out, const char in[32])
CLIB.curve25519_expand.argtypes = [tt.bignum25519, ct.c_ubyte * 32]
# void curve25519_contract(char out[32], const bignum25519 in)
CLIB.curve25519_contract.argtypes = [ct.c_ubyte * 32, tt.bignum25519]
# void curve25519_swap_conditional(bignum25519 a, bignum25519 b, uint32_t iswap)
CLIB.curve25519_swap_conditional.argtypes = [tt.bignum25519, tt.bignum25519, tt.uint32_t]
# void curve25519_pow_two5mtwo0_two250mtwo0(bignum25519 b)
CLIB.curve25519_pow_two5mtwo0_two250mtwo0.argtypes = [tt.bignum25519]
# void curve25519_recip(bignum25519 out, const bignum25519 z)
CLIB.curve25519_recip.argtypes = [tt.bignum25519, tt.bignum25519]
# void curve25519_pow_two252m3(bignum25519 two252m3, const bignum25519 z)
CLIB.curve25519_pow_two252m3.argtypes = [tt.bignum25519, tt.bignum25519]
# void reduce256_modm(bignum256modm r)
CLIB.reduce256_modm.argtypes = [tt.bignum256modm]
# void barrett_reduce256_modm(bignum256modm r, const bignum256modm q1, const bignum256modm r1)
CLIB.barrett_reduce256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm, tt.bignum256modm]
# void add256_modm(bignum256modm r, const bignum256modm x, const bignum256modm y)
CLIB.add256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm, tt.bignum256modm]
# void neg256_modm(bignum256modm r, const bignum256modm x)
CLIB.neg256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm]
# void sub256_modm(bignum256modm r, const bignum256modm x, const bignum256modm y)
CLIB.sub256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm, tt.bignum256modm]
# void mul256_modm(bignum256modm r, const bignum256modm x, const bignum256modm y)
CLIB.mul256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm, tt.bignum256modm]
# void expand256_modm(bignum256modm out, const char *in, size_t len)
CLIB.expand256_modm.argtypes = [tt.bignum256modm, tt.POINTER(ct.c_ubyte), tt.size_t]
# void expand_raw256_modm(bignum256modm out, const char in[32])
CLIB.expand_raw256_modm.argtypes = [tt.bignum256modm, ct.c_ubyte * 32]
# int is_reduced256_modm(const bignum256modm in)
CLIB.is_reduced256_modm.argtypes = [tt.bignum256modm]
CLIB.is_reduced256_modm.restype = ct.c_int
# void contract256_modm(char out[32], const bignum256modm in)
CLIB.contract256_modm.argtypes = [ct.c_ubyte * 32, tt.bignum256modm]
# void contract256_window4_modm(char r[64], const bignum256modm in)
CLIB.contract256_window4_modm.argtypes = [ct.c_byte * 64, tt.bignum256modm]
# void contract256_slidingwindow_modm(char r[256], const bignum256modm s, int windowsize)
CLIB.contract256_slidingwindow_modm.argtypes = [ct.c_byte * 256, tt.bignum256modm, ct.c_int]
# void set256_modm(bignum256modm r, uint64_t v)
CLIB.set256_modm.argtypes = [tt.bignum256modm, tt.uint64_t]
# int get256_modm(uint64_t *v, const bignum256modm r)
CLIB.get256_modm.argtypes = [tt.POINTER(tt.uint64_t), tt.bignum256modm]
CLIB.get256_modm.restype = ct.c_int
# int eq256_modm(const bignum256modm x, const bignum256modm y)
CLIB.eq256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm]
CLIB.eq256_modm.restype = ct.c_int
# int cmp256_modm(const bignum256modm x, const bignum256modm y)
CLIB.cmp256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm]
CLIB.cmp256_modm.restype = ct.c_int
# int iszero256_modm(const bignum256modm x)
CLIB.iszero256_modm.argtypes = [tt.bignum256modm]
CLIB.iszero256_modm.restype = ct.c_int
# void copy256_modm(bignum256modm r, const bignum256modm x)
CLIB.copy256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm]
# int check256_modm(const bignum256modm x)
CLIB.check256_modm.argtypes = [tt.bignum256modm]
CLIB.check256_modm.restype = ct.c_int
# void mulsub256_modm(bignum256modm r, const bignum256modm a, const bignum256modm b, const bignum256modm c)
CLIB.mulsub256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm, tt.bignum256modm, tt.bignum256modm]
# void muladd256_modm(bignum256modm r, const bignum256modm a, const bignum256modm b, const bignum256modm c)
CLIB.muladd256_modm.argtypes = [tt.bignum256modm, tt.bignum256modm, tt.bignum256modm, tt.bignum256modm]
# int ed25519_verify(const char *x, const char *y, size_t len)
CLIB.ed25519_verify.argtypes = [tt.POINTER(ct.c_ubyte), tt.POINTER(ct.c_ubyte), tt.size_t]
CLIB.ed25519_verify.restype = ct.c_int
# void ge25519_p1p1_to_partial(ge25519 *r, const ge25519_p1p1 *p)
CLIB.ge25519_p1p1_to_partial.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519_p1p1)]
# void ge25519_p1p1_to_full(ge25519 *r, const ge25519_p1p1 *p)
CLIB.ge25519_p1p1_to_full.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519_p1p1)]
# void ge25519_full_to_pniels(ge25519_pniels *p, const ge25519 *r)
CLIB.ge25519_full_to_pniels.argtypes = [tt.POINTER(tt.ge25519_pniels), tt.POINTER(tt.ge25519)]
# void ge25519_double_p1p1(ge25519_p1p1 *r, const ge25519 *p)
CLIB.ge25519_double_p1p1.argtypes = [tt.POINTER(tt.ge25519_p1p1), tt.POINTER(tt.ge25519)]
# void ge25519_nielsadd2_p1p1(ge25519_p1p1 *r, const ge25519 *p, const ge25519_niels *q, char signbit)
CLIB.ge25519_nielsadd2_p1p1.argtypes = [tt.POINTER(tt.ge25519_p1p1), tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519_niels), ct.c_ubyte]
# void ge25519_pnielsadd_p1p1(ge25519_p1p1 *r, const ge25519 *p, const ge25519_pniels *q, char signbit)
CLIB.ge25519_pnielsadd_p1p1.argtypes = [tt.POINTER(tt.ge25519_p1p1), tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519_pniels), ct.c_ubyte]
# void ge25519_double_partial(ge25519 *r, const ge25519 *p)
CLIB.ge25519_double_partial.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519)]
# void ge25519_double(ge25519 *r, const ge25519 *p)
CLIB.ge25519_double.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519)]
# void ge25519_nielsadd2(ge25519 *r, const ge25519_niels *q)
CLIB.ge25519_nielsadd2.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519_niels)]
# void ge25519_pnielsadd(ge25519_pniels *r, const ge25519 *p, const ge25519_pniels *q)
CLIB.ge25519_pnielsadd.argtypes = [tt.POINTER(tt.ge25519_pniels), tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519_pniels)]
# void ge25519_pack(char r[32], const ge25519 *p)
CLIB.ge25519_pack.argtypes = [ct.c_ubyte * 32, tt.POINTER(tt.ge25519)]
# int ge25519_unpack_negative_vartime(ge25519 *r, const char p[32])
CLIB.ge25519_unpack_negative_vartime.argtypes = [tt.POINTER(tt.ge25519), ct.c_ubyte * 32]
CLIB.ge25519_unpack_negative_vartime.restype = ct.c_int
# void ge25519_set_neutral(ge25519 *r)
CLIB.ge25519_set_neutral.argtypes = [tt.POINTER(tt.ge25519)]
# void ge25519_double_scalarmult_vartime(ge25519 *r, const ge25519 *p1, const bignum256modm s1, const bignum256modm s2)
CLIB.ge25519_double_scalarmult_vartime.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519), tt.bignum256modm, tt.bignum256modm]
# void ge25519_double_scalarmult_vartime2(ge25519 *r, const ge25519 *p1, const bignum256modm s1, const ge25519 *p2, const bignum256modm s2)
CLIB.ge25519_double_scalarmult_vartime2.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519), tt.bignum256modm, tt.POINTER(tt.ge25519), tt.bignum256modm]
# void ge25519_scalarmult(ge25519 *r, const ge25519 *p1, const bignum256modm s1)
CLIB.ge25519_scalarmult.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519), tt.bignum256modm]
# void curve25519_set(bignum25519 r, uint32_t x)
CLIB.curve25519_set.argtypes = [tt.bignum25519, tt.uint32_t]
# void curve25519_set_d(bignum25519 r)
CLIB.curve25519_set_d.argtypes = [tt.bignum25519]
# void curve25519_set_2d(bignum25519 r)
CLIB.curve25519_set_2d.argtypes = [tt.bignum25519]
# void curve25519_set_sqrtneg1(bignum25519 r)
CLIB.curve25519_set_sqrtneg1.argtypes = [tt.bignum25519]
# int curve25519_isnegative(const bignum25519 f)
CLIB.curve25519_isnegative.argtypes = [tt.bignum25519]
CLIB.curve25519_isnegative.restype = ct.c_int
# int curve25519_isnonzero(const bignum25519 f)
CLIB.curve25519_isnonzero.argtypes = [tt.bignum25519]
CLIB.curve25519_isnonzero.restype = ct.c_int
# void curve25519_reduce(bignum25519 r, const bignum25519 in)
CLIB.curve25519_reduce.argtypes = [tt.bignum25519, tt.bignum25519]
# void curve25519_expand_reduce(bignum25519 out, const char in[32])
CLIB.curve25519_expand_reduce.argtypes = [tt.bignum25519, ct.c_ubyte * 32]
# int ge25519_check(const ge25519 *r)
CLIB.ge25519_check.argtypes = [tt.POINTER(tt.ge25519)]
CLIB.ge25519_check.restype = ct.c_int
# int ge25519_eq(const ge25519 *a, const ge25519 *b)
CLIB.ge25519_eq.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519)]
CLIB.ge25519_eq.restype = ct.c_int
# void ge25519_copy(ge25519 *dst, const ge25519 *src)
CLIB.ge25519_copy.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519)]
# void ge25519_set_base(ge25519 *r)
CLIB.ge25519_set_base.argtypes = [tt.POINTER(tt.ge25519)]
# void ge25519_mul8(ge25519 *r, const ge25519 *t)
CLIB.ge25519_mul8.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519)]
# void ge25519_neg_partial(ge25519 *r)
CLIB.ge25519_neg_partial.argtypes = [tt.POINTER(tt.ge25519)]
# void ge25519_neg_full(ge25519 *r)
CLIB.ge25519_neg_full.argtypes = [tt.POINTER(tt.ge25519)]
# void ge25519_reduce(ge25519 *r, const ge25519 *t)
CLIB.ge25519_reduce.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519)]
# void ge25519_norm(ge25519 *r, const ge25519 *t)
CLIB.ge25519_norm.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519)]
# void ge25519_add(ge25519 *r, const ge25519 *a, const ge25519 *b, char signbit)
CLIB.ge25519_add.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519), ct.c_ubyte]
# void ge25519_fromfe_frombytes_vartime(ge25519 *r, const char *s)
CLIB.ge25519_fromfe_frombytes_vartime.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(ct.c_ubyte)]
# int ge25519_unpack_vartime(ge25519 *r, const char *s)
CLIB.ge25519_unpack_vartime.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(ct.c_ubyte)]
CLIB.ge25519_unpack_vartime.restype = ct.c_int
# void ge25519_scalarmult_base_wrapper(ge25519 *r, const bignum256modm s)
CLIB.ge25519_scalarmult_base_wrapper.argtypes = [tt.POINTER(tt.ge25519), tt.bignum256modm]
# void ge25519_set_xmr_h(ge25519 *r)
CLIB.ge25519_set_xmr_h.argtypes = [tt.POINTER(tt.ge25519)]
# void xmr_random_scalar(bignum256modm m)
CLIB.xmr_random_scalar.argtypes = [tt.bignum256modm]
# void xmr_fast_hash(uint8_t *hash, const void *data, size_t length)
CLIB.xmr_fast_hash.argtypes = [tt.POINTER(tt.uint8_t), ct.c_void_p, tt.size_t]
# void xmr_hasher_init(Hasher *hasher)
CLIB.xmr_hasher_init.argtypes = [tt.POINTER(tt.Hasher)]
# void xmr_hasher_update(Hasher *hasher, const void *data, size_t length)
CLIB.xmr_hasher_update.argtypes = [tt.POINTER(tt.Hasher), ct.c_void_p, tt.size_t]
# void xmr_hasher_final(Hasher *hasher, uint8_t *hash)
CLIB.xmr_hasher_final.argtypes = [tt.POINTER(tt.Hasher), tt.POINTER(tt.uint8_t)]
# void xmr_hasher_copy(Hasher *dst, const Hasher *src)
CLIB.xmr_hasher_copy.argtypes = [tt.POINTER(tt.Hasher), tt.POINTER(tt.Hasher)]
# void xmr_hash_to_scalar(bignum256modm r, const void *data, size_t length)
CLIB.xmr_hash_to_scalar.argtypes = [tt.bignum256modm, ct.c_void_p, tt.size_t]
# void xmr_hash_to_ec(ge25519 *P, const void *data, size_t length)
CLIB.xmr_hash_to_ec.argtypes = [tt.POINTER(tt.ge25519), ct.c_void_p, tt.size_t]
# void xmr_derivation_to_scalar(bignum256modm s, const ge25519 *p, uint32_t output_index)
CLIB.xmr_derivation_to_scalar.argtypes = [tt.bignum256modm, tt.POINTER(tt.ge25519), tt.uint32_t]
# void xmr_generate_key_derivation(ge25519 *r, const ge25519 *A, const bignum256modm b)
CLIB.xmr_generate_key_derivation.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519), tt.bignum256modm]
# void xmr_derive_private_key(bignum256modm s, const ge25519 *deriv, uint32_t idx, const bignum256modm base)
CLIB.xmr_derive_private_key.argtypes = [tt.bignum256modm, tt.POINTER(tt.ge25519), tt.uint32_t, tt.bignum256modm]
# void xmr_derive_public_key(ge25519 *r, const ge25519 *deriv, uint32_t idx, const ge25519 *base)
CLIB.xmr_derive_public_key.argtypes = [tt.POINTER(tt.ge25519), tt.POINTER(tt.ge25519), tt.uint32_t, tt.POINTER(tt.ge25519)]
# void xmr_add_keys2(ge25519 *r, const bignum256modm a, const bignum256modm b, const ge25519 *B)
CLIB.xmr_add_keys2.argtypes = [tt.POINTER(tt.ge25519), tt.bignum256modm, tt.bignum256modm, tt.POINTER(tt.ge25519)]
# void xmr_add_keys2_vartime(ge25519 *r, const bignum256modm a, const bignum256modm b, const ge25519 *B)
CLIB.xmr_add_keys2_vartime.argtypes = [tt.POINTER(tt.ge25519), tt.bignum256modm, tt.bignum256modm, tt.POINTER(tt.ge25519)]
# void xmr_add_keys3(ge25519 *r, const bignum256modm a, const ge25519 *A, const bignum256modm b, const ge25519 *B)
CLIB.xmr_add_keys3.argtypes = [tt.POINTER(tt.ge25519), tt.bignum256modm, tt.POINTER(tt.ge25519), tt.bignum256modm, tt.POINTER(tt.ge25519)]
# void xmr_add_keys3_vartime(ge25519 *r, const bignum256modm a, const ge25519 *A, const bignum256modm b, const ge25519 *B)
CLIB.xmr_add_keys3_vartime.argtypes = [tt.POINTER(tt.ge25519), tt.bignum256modm, tt.POINTER(tt.ge25519), tt.bignum256modm, tt.POINTER(tt.ge25519)]
# void xmr_get_subaddress_secret_key(bignum256modm r, uint32_t major, uint32_t minor, const bignum256modm m)
CLIB.xmr_get_subaddress_secret_key.argtypes = [tt.bignum256modm, tt.uint32_t, tt.uint32_t, tt.bignum256modm]
# void xmr_gen_c(ge25519 *r, const bignum256modm a, uint64_t amount)
CLIB.xmr_gen_c.argtypes = [tt.POINTER(tt.ge25519), tt.bignum256modm, tt.uint64_t]
# void xmr_gen_range_sig(xmr_range_sig_t *sig, ge25519 *C, bignum256modm mask, xmr_amount amount, bignum256modm *last_mask)
CLIB.xmr_gen_range_sig.argtypes = [tt.POINTER(tt.xmr_range_sig_t), tt.POINTER(tt.ge25519), tt.bignum256modm, tt.xmr_amount, tt.POINTER(tt.bignum256modm)]
# void xmr_gen_range_sig_ex(xmr_range_sig_t *sig, ge25519 *C, bignum256modm mask, xmr_amount amount, bignum256modm *last_mask, bignum256modm ai[64], bignum256modm alpha[64])
CLIB.xmr_gen_range_sig_ex.argtypes = [tt.POINTER(tt.xmr_range_sig_t), tt.POINTER(tt.ge25519), tt.bignum256modm, tt.xmr_amount, tt.POINTER(tt.bignum256modm), tt.bignum256modm * 64, tt.bignum256modm * 64]
#
# Wrappers
#
def address_prefix_bytes_len(address_type):
return int(CLIB.address_prefix_bytes_len(address_type))
def address_write_prefix_bytes(address_type, out):
CLIB.address_write_prefix_bytes(address_type, out)
def address_write_prefix_bytes_r(address_type):
out = (tt.uint8_t)()
CLIB.address_write_prefix_bytes(address_type, out)
return int(out)
def address_check_prefix(addr, address_type):
return int(CLIB.address_check_prefix(addr, address_type))
def base32_encode(in_, inlen, out, outlen, alphabet):
return bytes(CLIB.base32_encode(in_, inlen, out, outlen, alphabet))
def base32_encode_unsafe(in_, inlen, out):
CLIB.base32_encode_unsafe(in_, inlen, out)
def base32_encode_unsafe_r(in_, inlen):
out = (tt.uint8_t)()
CLIB.base32_encode_unsafe(in_, inlen, out)
return bytes(out)
def base32_decode(in_, inlen, out, outlen, alphabet):
return bytes(CLIB.base32_decode(in_, inlen, out, outlen, alphabet))
def base32_decode_unsafe(in_, inlen, out, alphabet):
return int(CLIB.base32_decode_unsafe(in_, inlen, out, alphabet))
def base32_encoded_length(inlen):
return int(CLIB.base32_encoded_length(inlen))
def base32_decoded_length(inlen):
return int(CLIB.base32_decoded_length(inlen))
def sha1_Transform(state_in, data, state_out):
CLIB.sha1_Transform(ct.byref(state_in), ct.byref(data), ct.byref(state_out))
def sha1_Transform_r(state_in, data):
state_out = (tt.uint32_t)()
CLIB.sha1_Transform(ct.byref(state_in), ct.byref(data), ct.byref(state_out))
return state_out
def sha1_Init(r):
CLIB.sha1_Init(ct.byref(r))
def sha1_Init_r():
r = (tt.SHA1_CTX)()
CLIB.sha1_Init(ct.byref(r))
return r
def sha1_Update(r, a, b):
CLIB.sha1_Update(ct.byref(r), a, b)
def sha1_Update_r(a, b):
r = (tt.SHA1_CTX)()
CLIB.sha1_Update(ct.byref(r), a, b)
return r
def sha1_Final(r, a):
CLIB.sha1_Final(ct.byref(r), a)
def sha1_Final_r():
r = (tt.SHA1_CTX)()
a = (tt.uint8_t * 20)()
CLIB.sha1_Final(ct.byref(r), a)
return r, bytes(a)
def sha1_End(r, a):
return bytes(CLIB.sha1_End(ct.byref(r), a))
def sha1_End_r():
r = (tt.SHA1_CTX)()
a = (ct.c_byte * 41)()
_res = CLIB.sha1_End(ct.byref(r), a)
return bytes(_res), r, bytes(a)
def sha1_Raw(r, a, b):
CLIB.sha1_Raw(r, a, b)
def sha1_Raw_r(r, a):
b = (tt.uint8_t * 20)()
CLIB.sha1_Raw(r, a, b)
return bytes(b)
def sha1_Data(r, a, b):
return bytes(CLIB.sha1_Data(r, a, b))
def sha1_Data_r(r, a):
b = (ct.c_byte * 41)()
_res = CLIB.sha1_Data(r, a, b)
return bytes(_res), bytes(b)
def sha256_Transform(state_in, data, state_out):
CLIB.sha256_Transform(ct.byref(state_in), ct.byref(data), ct.byref(state_out))
def sha256_Transform_r(state_in, data):
state_out = (tt.uint32_t)()
CLIB.sha256_Transform(ct.byref(state_in), ct.byref(data), ct.byref(state_out))
return state_out
def sha256_Init(r):
CLIB.sha256_Init(ct.byref(r))
def sha256_Init_r():
r = (tt.SHA256_CTX)()
CLIB.sha256_Init(ct.byref(r))
return r
def sha256_Update(r, a, b):
CLIB.sha256_Update(ct.byref(r), a, b)
def sha256_Update_r(a, b):
r = (tt.SHA256_CTX)()
CLIB.sha256_Update(ct.byref(r), a, b)
return r
def sha256_Final(r, a):
CLIB.sha256_Final(ct.byref(r), a)
def sha256_Final_r():
r = (tt.SHA256_CTX)()
a = (tt.uint8_t * 32)()
CLIB.sha256_Final(ct.byref(r), a)
return r, bytes(a)
def sha256_End(r, a):
return bytes(CLIB.sha256_End(ct.byref(r), a))
def sha256_End_r():
r = (tt.SHA256_CTX)()
a = (ct.c_byte * 65)()
_res = CLIB.sha256_End(ct.byref(r), a)
return bytes(_res), r, bytes(a)
def sha256_Raw(r, a, b):
CLIB.sha256_Raw(r, a, b)
def sha256_Raw_r(r, a):
b = (tt.uint8_t * 32)()
CLIB.sha256_Raw(r, a, b)
return bytes(b)
def sha256_Data(r, a, b):
return bytes(CLIB.sha256_Data(r, a, b))
def sha256_Data_r(r, a):
b = (ct.c_byte * 65)()
_res = CLIB.sha256_Data(r, a, b)
return bytes(_res), bytes(b)
def sha512_Transform(state_in, data, state_out):
CLIB.sha512_Transform(ct.byref(state_in), ct.byref(data), ct.byref(state_out))
def sha512_Transform_r(state_in, data):
state_out = (tt.uint64_t)()
CLIB.sha512_Transform(ct.byref(state_in), ct.byref(data), ct.byref(state_out))
return state_out
def sha512_Init(r):
CLIB.sha512_Init(ct.byref(r))
def sha512_Init_r():
r = (tt.SHA512_CTX)()
CLIB.sha512_Init(ct.byref(r))
return r
def sha512_Update(r, a, b):
CLIB.sha512_Update(ct.byref(r), a, b)
def sha512_Update_r(a, b):
r = (tt.SHA512_CTX)()
CLIB.sha512_Update(ct.byref(r), a, b)
return r
def sha512_Final(r, a):
CLIB.sha512_Final(ct.byref(r), a)
def sha512_Final_r():
r = (tt.SHA512_CTX)()
a = (tt.uint8_t * 64)()
CLIB.sha512_Final(ct.byref(r), a)
return r, bytes(a)
def sha512_End(r, a):
return bytes(CLIB.sha512_End(ct.byref(r), a))
def sha512_End_r():
r = (tt.SHA512_CTX)()
a = (ct.c_byte * 129)()
_res = CLIB.sha512_End(ct.byref(r), a)
return bytes(_res), r, bytes(a)
def sha512_Raw(r, a, b):
CLIB.sha512_Raw(r, a, b)
def sha512_Raw_r(r, a):
b = (tt.uint8_t * 64)()
CLIB.sha512_Raw(r, a, b)
return bytes(b)
def sha512_Data(r, a, b):
return bytes(CLIB.sha512_Data(r, a, b))
def sha512_Data_r(r, a):
b = (ct.c_byte * 129)()
_res = CLIB.sha512_Data(r, a, b)
return bytes(_res), bytes(b)
def sha3_224_Init(ctx):
CLIB.sha3_224_Init(ct.byref(ctx))
def sha3_224_Init_r():
ctx = (tt.SHA3_CTX)()
CLIB.sha3_224_Init(ct.byref(ctx))
return ctx
def sha3_256_Init(ctx):
CLIB.sha3_256_Init(ct.byref(ctx))
def sha3_256_Init_r():
ctx = (tt.SHA3_CTX)()
CLIB.sha3_256_Init(ct.byref(ctx))
return ctx
def sha3_384_Init(ctx):
CLIB.sha3_384_Init(ct.byref(ctx))
def sha3_384_Init_r():
ctx = (tt.SHA3_CTX)()
CLIB.sha3_384_Init(ct.byref(ctx))
return ctx
def sha3_512_Init(ctx):
CLIB.sha3_512_Init(ct.byref(ctx))
def sha3_512_Init_r():
ctx = (tt.SHA3_CTX)()
CLIB.sha3_512_Init(ct.byref(ctx))
return ctx
def sha3_Update(ctx, msg, size):
CLIB.sha3_Update(ct.byref(ctx), msg, size)
def sha3_Update_r(msg, size):
ctx = (tt.SHA3_CTX)()
CLIB.sha3_Update(ct.byref(ctx), msg, size)
return ctx
def sha3_Final(ctx, result):
CLIB.sha3_Final(ct.byref(ctx), result)
def sha3_Final_r():
ctx = (tt.SHA3_CTX)()
result = (ct.c_ubyte)()
CLIB.sha3_Final(ct.byref(ctx), result)
return ctx, bytes(result)
def keccak_Final(ctx, result):
CLIB.keccak_Final(ct.byref(ctx), result)
def keccak_Final_r():
ctx = (tt.SHA3_CTX)()
result = (ct.c_ubyte)()
CLIB.keccak_Final(ct.byref(ctx), result)
return ctx, bytes(result)
def keccak_256(data, len, digest):
CLIB.keccak_256(data, len, digest)
def keccak_256_r(data, len):
digest = (ct.c_ubyte)()
CLIB.keccak_256(data, len, digest)
return bytes(digest)
def keccak_512(data, len, digest):
CLIB.keccak_512(data, len, digest)
def keccak_512_r(data, len):
digest = (ct.c_ubyte)()
CLIB.keccak_512(data, len, digest)
return bytes(digest)
def sha3_256(data, len, digest):
CLIB.sha3_256(data, len, digest)
def sha3_256_r(data, len):
digest = (ct.c_ubyte)()
CLIB.sha3_256(data, len, digest)
return bytes(digest)
def sha3_512(data, len, digest):
CLIB.sha3_512(data, len, digest)
def sha3_512_r(data, len):
digest = (ct.c_ubyte)()
CLIB.sha3_512(data, len, digest)
return bytes(digest)
def blake256_Init(r):
CLIB.blake256_Init(ct.byref(r))
def blake256_Init_r():
r = (tt.BLAKE256_CTX)()
CLIB.blake256_Init(ct.byref(r))
return r
def blake256_Update(r, a, b):
CLIB.blake256_Update(ct.byref(r), a, b)
def blake256_Update_r(a, b):
r = (tt.BLAKE256_CTX)()
CLIB.blake256_Update(ct.byref(r), a, b)
return r
def blake256_Final(r, a):
CLIB.blake256_Final(ct.byref(r), a)
def blake256_Final_r():
r = (tt.BLAKE256_CTX)()
a = (tt.uint8_t)()
CLIB.blake256_Final(ct.byref(r), a)
return r, bytes(a)
def blake256(r, a, b):
CLIB.blake256(r, a, b)
def blake256_r(r, a):
b = (tt.uint8_t)()
CLIB.blake256(r, a, b)
return bytes(b)
def groestl512_Init(cc):
CLIB.groestl512_Init(ct.byref(cc))
def groestl512_Init_r():
cc = (ct.c_void_p)()
CLIB.groestl512_Init(ct.byref(cc))
return cc
def groestl512_Update(cc, data, len):
CLIB.groestl512_Update(ct.byref(cc), ct.byref(data), len)
def groestl512_Update_r(data, len):
cc = (ct.c_void_p)()
CLIB.groestl512_Update(ct.byref(cc), ct.byref(data), len)
return cc
def groestl512_Final(cc, dst):
CLIB.groestl512_Final(ct.byref(cc), ct.byref(dst))
def groestl512_Final_r():
cc = (ct.c_void_p)()
dst = (ct.c_void_p)()
CLIB.groestl512_Final(ct.byref(cc), ct.byref(dst))
return cc, dst
def groestl512_DoubleTrunc(cc, dst):
CLIB.groestl512_DoubleTrunc(ct.byref(cc), ct.byref(dst))
def groestl512_DoubleTrunc_r():
cc = (ct.c_void_p)()
dst = (ct.c_void_p)()
CLIB.groestl512_DoubleTrunc(ct.byref(cc), ct.byref(dst))
return cc, dst
def blake2b_Init(S, outlen):
return int(CLIB.blake2b_Init(ct.byref(S), outlen))
def blake2b_Init_r(outlen):
S = (tt.blake2b_state)()
_res = CLIB.blake2b_Init(ct.byref(S), outlen)
return int(_res), S
def blake2b_InitKey(S, outlen, key, keylen):
return int(CLIB.blake2b_InitKey(ct.byref(S), outlen, ct.byref(key), keylen))
def blake2b_InitKey_r(outlen, key, keylen):
S = (tt.blake2b_state)()
_res = CLIB.blake2b_InitKey(ct.byref(S), outlen, ct.byref(key), keylen)
return int(_res), S
def blake2b_InitPersonal(S, outlen, personal, personal_len):
return int(CLIB.blake2b_InitPersonal(ct.byref(S), outlen, ct.byref(personal), personal_len))
def blake2b_InitPersonal_r(outlen, personal, personal_len):
S = (tt.blake2b_state)()
_res = CLIB.blake2b_InitPersonal(ct.byref(S), outlen, ct.byref(personal), personal_len)
return int(_res), S
def blake2b_Update(S, pin, inlen):
return int(CLIB.blake2b_Update(ct.byref(S), ct.byref(pin), inlen))
def blake2b_Update_r(pin, inlen):
S = (tt.blake2b_state)()
_res = CLIB.blake2b_Update(ct.byref(S), ct.byref(pin), inlen)
return int(_res), S
def blake2b_Final(S, out, outlen):
return int(CLIB.blake2b_Final(ct.byref(S), ct.byref(out), outlen))
def blake2b_Final_r(outlen):
S = (tt.blake2b_state)()
out = (ct.c_void_p)()
_res = CLIB.blake2b_Final(ct.byref(S), ct.byref(out), outlen)
return int(_res), S, out
def blake2b(msg, msg_len, out, outlen):
return int(CLIB.blake2b(msg, msg_len, ct.byref(out), outlen))
def blake2b_Key(msg, msg_len, key, keylen, out, outlen):
return int(CLIB.blake2b_Key(msg, msg_len, ct.byref(key), keylen, ct.byref(out), outlen))
def hasher_Init(hasher, type):
CLIB.hasher_Init(ct.byref(hasher), type)
def hasher_Init_r():
hasher = (tt.Hasher)()
type = (tt.HasherType)()
CLIB.hasher_Init(ct.byref(hasher), type)
return hasher, type
def hasher_Reset(hasher):
CLIB.hasher_Reset(ct.byref(hasher))
def hasher_Reset_r():
hasher = (tt.Hasher)()
CLIB.hasher_Reset(ct.byref(hasher))
return hasher
def hasher_Update(hasher, data, length):
CLIB.hasher_Update(ct.byref(hasher), data, length)
def hasher_Update_r(data, length):
hasher = (tt.Hasher)()
CLIB.hasher_Update(ct.byref(hasher), data, length)
return hasher
def hasher_Final(hasher, hash):
CLIB.hasher_Final(ct.byref(hasher), hash)
def hasher_Final_r():
hasher = (tt.Hasher)()
hash = (tt.uint8_t * 32)()
CLIB.hasher_Final(ct.byref(hasher), hash)
return hasher, bytes(hash)
def hasher_Raw(type, data, length, hash):
CLIB.hasher_Raw(type, data, length, hash)
def hasher_Raw_r(data, length, hash):
type = (tt.HasherType)()
CLIB.hasher_Raw(type, data, length, hash)
return type
def base58_encode_check(data, len, hasher_type, str, strsize):
return int(CLIB.base58_encode_check(data, len, hasher_type, str, strsize))
def base58_decode_check(str, hasher_type, data, datalen):
return int(CLIB.base58_decode_check(str, hasher_type, data, datalen))
def b58tobin(bin, binszp, b58):
return int(CLIB.b58tobin(ct.byref(bin), ct.byref(binszp), b58))
def b58tobin_r(b58):
bin = (ct.c_void_p)()
binszp = (tt.size_t)()
_res = CLIB.b58tobin(ct.byref(bin), ct.byref(binszp), b58)
return int(_res), bin, binszp
def b58check(bin, binsz, hasher_type, base58str):
return int(CLIB.b58check(ct.byref(bin), binsz, hasher_type, base58str))
def b58enc(b58, b58sz, data, binsz):
return int(CLIB.b58enc(b58, ct.byref(b58sz), ct.byref(data), binsz))
def b58enc_r(data, binsz):
b58 = (ct.c_byte)()
b58sz = (tt.size_t)()
_res = CLIB.b58enc(b58, ct.byref(b58sz), ct.byref(data), binsz)
return int(_res), bytes(b58), b58sz
def random32():
return int(CLIB.random32())
def random_buffer(buf, len):
CLIB.random_buffer(buf, len)
def random_buffer_r(len):
buf = (tt.uint8_t)()
CLIB.random_buffer(buf, len)
return bytes(buf)
def random_uniform(n):
return int(CLIB.random_uniform(n))
def random_permute(buf, len):
CLIB.random_permute(buf, len)
def random_permute_r(len):
buf = (ct.c_byte)()
CLIB.random_permute(buf, len)
return bytes(buf)
def random_init():
return int(CLIB.random_init())
def hmac_sha256_Init(hctx, key, keylen):
CLIB.hmac_sha256_Init(ct.byref(hctx), key, keylen)
def hmac_sha256_Init_r(key, keylen):
hctx = (tt.HMAC_SHA256_CTX)()
CLIB.hmac_sha256_Init(ct.byref(hctx), key, keylen)
return hctx
def hmac_sha256_Update(hctx, msg, msglen):
CLIB.hmac_sha256_Update(ct.byref(hctx), msg, msglen)
def hmac_sha256_Update_r(msg, msglen):
hctx = (tt.HMAC_SHA256_CTX)()
CLIB.hmac_sha256_Update(ct.byref(hctx), msg, msglen)
return hctx
def hmac_sha256_Final(hctx, hmac):
CLIB.hmac_sha256_Final(ct.byref(hctx), hmac)
def hmac_sha256_Final_r():
hctx = (tt.HMAC_SHA256_CTX)()
hmac = (tt.uint8_t)()
CLIB.hmac_sha256_Final(ct.byref(hctx), hmac)
return hctx, bytes(hmac)
def hmac_sha256(key, keylen, msg, msglen, hmac):
CLIB.hmac_sha256(key, keylen, msg, msglen, hmac)
def hmac_sha256_r(key, keylen, msg, msglen):
hmac = (tt.uint8_t)()
CLIB.hmac_sha256(key, keylen, msg, msglen, hmac)
return bytes(hmac)
def hmac_sha256_prepare(key, keylen, opad_digest, ipad_digest):
CLIB.hmac_sha256_prepare(key, keylen, ct.byref(opad_digest), ct.byref(ipad_digest))
def hmac_sha256_prepare_r(key, keylen):
opad_digest = (tt.uint32_t)()
ipad_digest = (tt.uint32_t)()
CLIB.hmac_sha256_prepare(key, keylen, ct.byref(opad_digest), ct.byref(ipad_digest))
return bytes(opad_digest), int(ipad_digest)
def hmac_sha512_Init(hctx, key, keylen):
CLIB.hmac_sha512_Init(ct.byref(hctx), key, keylen)
def hmac_sha512_Init_r(key, keylen):
hctx = (tt.HMAC_SHA512_CTX)()
CLIB.hmac_sha512_Init(ct.byref(hctx), key, keylen)
return hctx
def hmac_sha512_Update(hctx, msg, msglen):
CLIB.hmac_sha512_Update(ct.byref(hctx), msg, msglen)
def hmac_sha512_Update_r(msg, msglen):
hctx = (tt.HMAC_SHA512_CTX)()
CLIB.hmac_sha512_Update(ct.byref(hctx), msg, msglen)
return hctx
def hmac_sha512_Final(hctx, hmac):
CLIB.hmac_sha512_Final(ct.byref(hctx), hmac)
def hmac_sha512_Final_r():
hctx = (tt.HMAC_SHA512_CTX)()
hmac = (tt.uint8_t)()
CLIB.hmac_sha512_Final(ct.byref(hctx), hmac)
return hctx, bytes(hmac)
def hmac_sha512(key, keylen, msg, msglen, hmac):
CLIB.hmac_sha512(key, keylen, msg, msglen, hmac)
def hmac_sha512_r(key, keylen, msg, msglen):
hmac = (tt.uint8_t)()
CLIB.hmac_sha512(key, keylen, msg, msglen, hmac)
return bytes(hmac)
def hmac_sha512_prepare(key, keylen, opad_digest, ipad_digest):
CLIB.hmac_sha512_prepare(key, keylen, ct.byref(opad_digest), ct.byref(ipad_digest))
def hmac_sha512_prepare_r(key, keylen):
opad_digest = (tt.uint64_t)()
ipad_digest = (tt.uint64_t)()
CLIB.hmac_sha512_prepare(key, keylen, ct.byref(opad_digest), ct.byref(ipad_digest))
return bytes(opad_digest), int(ipad_digest)
def pbkdf2_hmac_sha256_Init(pctx, pass_, passlen, salt, saltlen):
CLIB.pbkdf2_hmac_sha256_Init(ct.byref(pctx), pass_, passlen, salt, saltlen)
def pbkdf2_hmac_sha256_Init_r(pass_, passlen, salt, saltlen):
pctx = (tt.PBKDF2_HMAC_SHA256_CTX)()
CLIB.pbkdf2_hmac_sha256_Init(ct.byref(pctx), pass_, passlen, salt, saltlen)
return pctx
def pbkdf2_hmac_sha256_Update(pctx, iterations):
CLIB.pbkdf2_hmac_sha256_Update(ct.byref(pctx), iterations)
def pbkdf2_hmac_sha256_Update_r(iterations):
pctx = (tt.PBKDF2_HMAC_SHA256_CTX)()
CLIB.pbkdf2_hmac_sha256_Update(ct.byref(pctx), iterations)
return pctx
def pbkdf2_hmac_sha256_Final(pctx, key):
CLIB.pbkdf2_hmac_sha256_Final(ct.byref(pctx), key)
def pbkdf2_hmac_sha256_Final_r():
pctx = (tt.PBKDF2_HMAC_SHA256_CTX)()
key = (tt.uint8_t)()
CLIB.pbkdf2_hmac_sha256_Final(ct.byref(pctx), key)
return pctx, bytes(key)
def pbkdf2_hmac_sha256(pass_, passlen, salt, saltlen, iterations, key):
CLIB.pbkdf2_hmac_sha256(pass_, passlen, salt, saltlen, iterations, key)
def pbkdf2_hmac_sha256_r(pass_, passlen, salt, saltlen, iterations):
key = (tt.uint8_t)()
CLIB.pbkdf2_hmac_sha256(pass_, passlen, salt, saltlen, iterations, key)
return bytes(key)
def pbkdf2_hmac_sha512_Init(pctx, pass_, passlen, salt, saltlen):
CLIB.pbkdf2_hmac_sha512_Init(ct.byref(pctx), pass_, passlen, salt, saltlen)
def pbkdf2_hmac_sha512_Init_r(pass_, passlen, salt, saltlen):
pctx = (tt.PBKDF2_HMAC_SHA512_CTX)()
CLIB.pbkdf2_hmac_sha512_Init(ct.byref(pctx), pass_, passlen, salt, saltlen)
return pctx
def pbkdf2_hmac_sha512_Update(pctx, iterations):
CLIB.pbkdf2_hmac_sha512_Update(ct.byref(pctx), iterations)
def pbkdf2_hmac_sha512_Update_r(iterations):
pctx = (tt.PBKDF2_HMAC_SHA512_CTX)()
CLIB.pbkdf2_hmac_sha512_Update(ct.byref(pctx), iterations)
return pctx
def pbkdf2_hmac_sha512_Final(pctx, key):
CLIB.pbkdf2_hmac_sha512_Final(ct.byref(pctx), key)
def pbkdf2_hmac_sha512_Final_r():
pctx = (tt.PBKDF2_HMAC_SHA512_CTX)()
key = (tt.uint8_t)()
CLIB.pbkdf2_hmac_sha512_Final(ct.byref(pctx), key)
return pctx, bytes(key)
def pbkdf2_hmac_sha512(pass_, passlen, salt, saltlen, iterations, key):
CLIB.pbkdf2_hmac_sha512(pass_, passlen, salt, saltlen, iterations, key)
def pbkdf2_hmac_sha512_r(pass_, passlen, salt, saltlen, iterations):
key = (tt.uint8_t)()
CLIB.pbkdf2_hmac_sha512(pass_, passlen, salt, saltlen, iterations, key)
return bytes(key)
def read_be(data):
return int(CLIB.read_be(data))
def write_be(data, x):
CLIB.write_be(data, x)
def write_be_r(x):
data = (tt.uint8_t)()
CLIB.write_be(data, x)
return bytes(data)
def read_le(data):
return int(CLIB.read_le(data))
def write_le(data, x):
CLIB.write_le(data, x)
def write_le_r(x):
data = (tt.uint8_t)()
CLIB.write_le(data, x)
return bytes(data)
def bn_read_be(in_number, out_number):
CLIB.bn_read_be(in_number, ct.byref(out_number))
def bn_read_be_r(in_number):
out_number = (tt.bignum256)()
CLIB.bn_read_be(in_number, ct.byref(out_number))
return bytes(out_number)
def bn_write_be(in_number, out_number):
CLIB.bn_write_be(ct.byref(in_number), out_number)
def bn_write_be_r(in_number):
out_number = (tt.uint8_t)()
CLIB.bn_write_be(ct.byref(in_number), out_number)
return out_number
def bn_read_le(in_number, out_number):
CLIB.bn_read_le(in_number, ct.byref(out_number))
def bn_read_le_r(in_number):
out_number = (tt.bignum256)()
CLIB.bn_read_le(in_number, ct.byref(out_number))
return bytes(out_number)
def bn_write_le(in_number, out_number):
CLIB.bn_write_le(ct.byref(in_number), out_number)
def bn_write_le_r(in_number):
out_number = (tt.uint8_t)()
CLIB.bn_write_le(ct.byref(in_number), out_number)
return out_number
def bn_read_uint32(in_number, out_number):
CLIB.bn_read_uint32(in_number, ct.byref(out_number))
def bn_read_uint32_r(in_number):
out_number = (tt.bignum256)()
CLIB.bn_read_uint32(in_number, ct.byref(out_number))
return int(out_number)
def bn_read_uint64(in_number, out_number):
CLIB.bn_read_uint64(in_number, ct.byref(out_number))
def bn_read_uint64_r(in_number):
out_number = (tt.bignum256)()
CLIB.bn_read_uint64(in_number, ct.byref(out_number))
return int(out_number)
def bn_bitcount(a):
return int(CLIB.bn_bitcount(ct.byref(a)))
def bn_digitcount(a):
return int(CLIB.bn_digitcount(ct.byref(a)))
def bn_zero(a):
CLIB.bn_zero(ct.byref(a))
def bn_zero_r():
a = (tt.bignum256)()
CLIB.bn_zero(ct.byref(a))
return a
def bn_is_zero(a):
return int(CLIB.bn_is_zero(ct.byref(a)))
def bn_one(a):
CLIB.bn_one(ct.byref(a))
def bn_one_r():
a = (tt.bignum256)()
CLIB.bn_one(ct.byref(a))
return a
def bn_is_less(a, b):
return int(CLIB.bn_is_less(ct.byref(a), ct.byref(b)))
def bn_is_equal(a, b):
return int(CLIB.bn_is_equal(ct.byref(a), ct.byref(b)))
def bn_cmov(res, cond, truecase, falsecase):
CLIB.bn_cmov(ct.byref(res), cond, ct.byref(truecase), ct.byref(falsecase))
def bn_cmov_r(cond, truecase, falsecase):
res = (tt.bignum256)()
CLIB.bn_cmov(ct.byref(res), cond, ct.byref(truecase), ct.byref(falsecase))
return res
def bn_lshift(a):
CLIB.bn_lshift(ct.byref(a))
def bn_lshift_r():
a = (tt.bignum256)()
CLIB.bn_lshift(ct.byref(a))
return a
def bn_rshift(a):
CLIB.bn_rshift(ct.byref(a))
def bn_rshift_r():
a = (tt.bignum256)()
CLIB.bn_rshift(ct.byref(a))
return a
def bn_setbit(a, bit):
CLIB.bn_setbit(ct.byref(a), bit)
def bn_setbit_r():
a = (tt.bignum256)()
bit = (tt.uint8_t)()
CLIB.bn_setbit(ct.byref(a), bit)
return a, bit
def bn_clearbit(a, bit):
CLIB.bn_clearbit(ct.byref(a), bit)
def bn_clearbit_r():
a = (tt.bignum256)()
bit = (tt.uint8_t)()
CLIB.bn_clearbit(ct.byref(a), bit)
return a, bit
def bn_testbit(a, bit):
return int(CLIB.bn_testbit(ct.byref(a), bit))
def bn_testbit_r():
a = (tt.bignum256)()
bit = (tt.uint8_t)()
_res = CLIB.bn_testbit(ct.byref(a), bit)
return int(_res), a, bit
def bn_xor(a, b, c):
CLIB.bn_xor(ct.byref(a), ct.byref(b), ct.byref(c))
def bn_xor_r(b, c):
a = (tt.bignum256)()
CLIB.bn_xor(ct.byref(a), ct.byref(b), ct.byref(c))
return a
def bn_mult_half(x, prime):
CLIB.bn_mult_half(ct.byref(x), ct.byref(prime))
def bn_mult_half_r(prime):
x = (tt.bignum256)()
CLIB.bn_mult_half(ct.byref(x), ct.byref(prime))
return x
def bn_mult_k(x, k, prime):
CLIB.bn_mult_k(ct.byref(x), k, ct.byref(prime))
def bn_mult_k_r(prime):
x = (tt.bignum256)()
k = (tt.uint8_t)()
CLIB.bn_mult_k(ct.byref(x), k, ct.byref(prime))
return x, k
def bn_mod(x, prime):
CLIB.bn_mod(ct.byref(x), ct.byref(prime))
def bn_mod_r(prime):
x = (tt.bignum256)()
CLIB.bn_mod(ct.byref(x), ct.byref(prime))
return x
def bn_multiply(k, x, prime):
CLIB.bn_multiply(ct.byref(k), ct.byref(x), ct.byref(prime))
def bn_fast_mod(x, prime):
CLIB.bn_fast_mod(ct.byref(x), ct.byref(prime))
def bn_fast_mod_r(prime):
x = (tt.bignum256)()
CLIB.bn_fast_mod(ct.byref(x), ct.byref(prime))
return x
def bn_sqrt(x, prime):
CLIB.bn_sqrt(ct.byref(x), ct.byref(prime))
def bn_sqrt_r(prime):
x = (tt.bignum256)()
CLIB.bn_sqrt(ct.byref(x), ct.byref(prime))
return x
def bn_inverse(x, prime):
CLIB.bn_inverse(ct.byref(x), ct.byref(prime))
def bn_inverse_r(prime):
x = (tt.bignum256)()
CLIB.bn_inverse(ct.byref(x), ct.byref(prime))
return x
def bn_normalize(a):
CLIB.bn_normalize(ct.byref(a))
def bn_normalize_r():
a = (tt.bignum256)()
CLIB.bn_normalize(ct.byref(a))
return a
def bn_add(a, b):
CLIB.bn_add(ct.byref(a), ct.byref(b))
def bn_add_r(b):
a = (tt.bignum256)()
CLIB.bn_add(ct.byref(a), ct.byref(b))
return a
def bn_addmod(a, b, prime):
CLIB.bn_addmod(ct.byref(a), ct.byref(b), ct.byref(prime))
def bn_addmod_r(b, prime):
a = (tt.bignum256)()
CLIB.bn_addmod(ct.byref(a), ct.byref(b), ct.byref(prime))
return a
def bn_addi(a, b):
CLIB.bn_addi(ct.byref(a), b)
def bn_addi_r(b):
a = (tt.bignum256)()
CLIB.bn_addi(ct.byref(a), b)
return a
def bn_subi(a, b, prime):
CLIB.bn_subi(ct.byref(a), b, ct.byref(prime))
def bn_subi_r(b, prime):
a = (tt.bignum256)()
CLIB.bn_subi(ct.byref(a), b, ct.byref(prime))
return a
def bn_subtractmod(a, b, res, prime):
CLIB.bn_subtractmod(ct.byref(a), ct.byref(b), ct.byref(res), ct.byref(prime))
def bn_subtract(a, b, res):
CLIB.bn_subtract(ct.byref(a), ct.byref(b), ct.byref(res))
def bn_subtract_r(a, b):
res = (tt.bignum256)()
CLIB.bn_subtract(ct.byref(a), ct.byref(b), ct.byref(res))
return res
def bn_divmod58(a, r):
CLIB.bn_divmod58(ct.byref(a), ct.byref(r))
def bn_divmod58_r():
a = (tt.bignum256)()
r = (tt.uint32_t)()
CLIB.bn_divmod58(ct.byref(a), ct.byref(r))
return a, r
def bn_divmod1000(a, r):
CLIB.bn_divmod1000(ct.byref(a), ct.byref(r))
def bn_divmod1000_r():
a = (tt.bignum256)()
r = (tt.uint32_t)()
CLIB.bn_divmod1000(ct.byref(a), ct.byref(r))
return a, r
def bn_format(amnt, prefix, suffix, decimals, exponent, trailing, out, outlen):
return int(CLIB.bn_format(ct.byref(amnt), prefix, suffix, decimals, exponent, trailing, out, outlen))
def ECRYPT_init():
CLIB.ECRYPT_init()
def ECRYPT_keysetup(ctx, key, keysize, ivsize):
CLIB.ECRYPT_keysetup(ct.byref(ctx), ct.byref(key), keysize, ivsize)
def ECRYPT_keysetup_r(key, keysize, ivsize):
ctx = (tt.ECRYPT_ctx)()
CLIB.ECRYPT_keysetup(ct.byref(ctx), ct.byref(key), keysize, ivsize)
return ctx
def ECRYPT_ivsetup(ctx, iv):
CLIB.ECRYPT_ivsetup(ct.byref(ctx), ct.byref(iv))
def ECRYPT_ivsetup_r(iv):
ctx = (tt.ECRYPT_ctx)()
CLIB.ECRYPT_ivsetup(ct.byref(ctx), ct.byref(iv))
return ctx
def ECRYPT_encrypt_bytes(ctx, plaintext, ciphertext, msglen):
CLIB.ECRYPT_encrypt_bytes(ct.byref(ctx), ct.byref(plaintext), ct.byref(ciphertext), msglen)
def ECRYPT_encrypt_bytes_r(plaintext, ciphertext, msglen):
ctx = (tt.ECRYPT_ctx)()
CLIB.ECRYPT_encrypt_bytes(ct.byref(ctx), ct.byref(plaintext), ct.byref(ciphertext), msglen)
return ctx
def ECRYPT_decrypt_bytes(ctx, ciphertext, plaintext, msglen):
CLIB.ECRYPT_decrypt_bytes(ct.byref(ctx), ct.byref(ciphertext), ct.byref(plaintext), msglen)
def ECRYPT_decrypt_bytes_r(ciphertext, plaintext, msglen):
ctx = (tt.ECRYPT_ctx)()
CLIB.ECRYPT_decrypt_bytes(ct.byref(ctx), ct.byref(ciphertext), ct.byref(plaintext), msglen)
return ctx
def ECRYPT_keystream_bytes(ctx, keystream, length):
CLIB.ECRYPT_keystream_bytes(ct.byref(ctx), ct.byref(keystream), length)
def ECRYPT_keystream_bytes_r():
ctx = (tt.ECRYPT_ctx)()
keystream = (tt.u8)()
length = (tt.u32)()
CLIB.ECRYPT_keystream_bytes(ct.byref(ctx), ct.byref(keystream), length)
return ctx, keystream, length
def poly1305_init(ctx, key):
CLIB.poly1305_init(ct.byref(ctx), key)
def poly1305_init_r(key):
ctx = (tt.poly1305_context)()
CLIB.poly1305_init(ct.byref(ctx), key)
return ctx
def poly1305_update(ctx, m, bytes):
CLIB.poly1305_update(ct.byref(ctx), m, bytes)
def poly1305_update_r(m, bytes):
ctx = (tt.poly1305_context)()
CLIB.poly1305_update(ct.byref(ctx), m, bytes)
return ctx
def poly1305_finish(ctx, mac):
CLIB.poly1305_finish(ct.byref(ctx), mac)
def poly1305_finish_r():
ctx = (tt.poly1305_context)()
mac = (ct.c_ubyte * 16)()
CLIB.poly1305_finish(ct.byref(ctx), mac)
return ctx, bytes(mac)
def poly1305_auth(mac, m, bytes, key):
CLIB.poly1305_auth(mac, m, bytes, key)
def poly1305_auth_r(m, bytes, key):
mac = (ct.c_ubyte * 16)()
CLIB.poly1305_auth(mac, m, bytes, key)
return bytes(mac)
def poly1305_verify(mac1, mac2):
return int(CLIB.poly1305_verify(mac1, mac2))
def poly1305_power_on_self_test():
return int(CLIB.poly1305_power_on_self_test())
def xchacha20poly1305_init(ctx, key, nonce):
CLIB.xchacha20poly1305_init(ct.byref(ctx), key, nonce)
def xchacha20poly1305_init_r():
ctx = (tt.chacha20poly1305_ctx)()
key = (tt.uint8_t * 32)()
nonce = (tt.uint8_t * 24)()
CLIB.xchacha20poly1305_init(ct.byref(ctx), key, nonce)
return ctx, bytes(key), bytes(nonce)
def chacha20poly1305_encrypt(ctx, in_, out, n):
CLIB.chacha20poly1305_encrypt(ct.byref(ctx), in_, out, n)
def chacha20poly1305_encrypt_r(n):
ctx = (tt.chacha20poly1305_ctx)()
in_ = (tt.uint8_t)()
out = (tt.uint8_t)()
CLIB.chacha20poly1305_encrypt(ct.byref(ctx), in_, out, n)
return ctx, bytes(in_), bytes(out)
def chacha20poly1305_decrypt(ctx, in_, out, n):
CLIB.chacha20poly1305_decrypt(ct.byref(ctx), in_, out, n)
def chacha20poly1305_decrypt_r(n):
ctx = (tt.chacha20poly1305_ctx)()
in_ = (tt.uint8_t)()
out = (tt.uint8_t)()
CLIB.chacha20poly1305_decrypt(ct.byref(ctx), in_, out, n)
return ctx, bytes(in_), bytes(out)
def chacha20poly1305_auth(ctx, in_, n):
CLIB.chacha20poly1305_auth(ct.byref(ctx), in_, n)
def chacha20poly1305_auth_r(n):
ctx = (tt.chacha20poly1305_ctx)()
in_ = (tt.uint8_t)()
CLIB.chacha20poly1305_auth(ct.byref(ctx), in_, n)
return ctx, bytes(in_)
def chacha20poly1305_finish(ctx, mac):
CLIB.chacha20poly1305_finish(ct.byref(ctx), mac)
def chacha20poly1305_finish_r():
ctx = (tt.chacha20poly1305_ctx)()
mac = (tt.uint8_t * 16)()
CLIB.chacha20poly1305_finish(ct.byref(ctx), mac)
return ctx, bytes(mac)
def rfc7539_init(ctx, key, nonce):
CLIB.rfc7539_init(ct.byref(ctx), key, nonce)
def rfc7539_init_r():
ctx = (tt.chacha20poly1305_ctx)()
key = (tt.uint8_t * 32)()
nonce = (tt.uint8_t * 12)()
CLIB.rfc7539_init(ct.byref(ctx), key, nonce)
return ctx, bytes(key), bytes(nonce)
def rfc7539_auth(ctx, in_, n):
CLIB.rfc7539_auth(ct.byref(ctx), in_, n)
def rfc7539_auth_r(n):
ctx = (tt.chacha20poly1305_ctx)()
in_ = (tt.uint8_t)()
CLIB.rfc7539_auth(ct.byref(ctx), in_, n)
return ctx, bytes(in_)
def rfc7539_finish(ctx, alen, plen, mac):
CLIB.rfc7539_finish(ct.byref(ctx), alen, plen, mac)
def rfc7539_finish_r(alen, plen, mac):
ctx = (tt.chacha20poly1305_ctx)()
CLIB.rfc7539_finish(ct.byref(ctx), alen, plen, mac)
return ctx
def xmr_base58_addr_encode_check(tag, data, binsz, b58, b58sz):
return int(CLIB.xmr_base58_addr_encode_check(tag, data, binsz, b58, b58sz))
def xmr_base58_addr_decode_check(addr, sz, tag, data, datalen):
return int(CLIB.xmr_base58_addr_decode_check(addr, sz, ct.byref(tag), ct.byref(data), datalen))
def xmr_base58_encode(b58, b58sz, data, binsz):
return int(CLIB.xmr_base58_encode(b58, ct.byref(b58sz), ct.byref(data), binsz))
def xmr_base58_encode_r(data, binsz):
b58 = (ct.c_byte)()
b58sz = (tt.size_t)()
_res = CLIB.xmr_base58_encode(b58, ct.byref(b58sz), ct.byref(data), binsz)
return int(_res), bytes(b58), b58sz
def xmr_base58_decode(b58, b58sz, data, binsz):
return int(CLIB.xmr_base58_decode(b58, b58sz, ct.byref(data), ct.byref(binsz)))
def xmr_base58_decode_r(b58, b58sz):
data = (ct.c_void_p)()
binsz = (tt.size_t)()
_res = CLIB.xmr_base58_decode(b58, b58sz, ct.byref(data), ct.byref(binsz))
return int(_res), bytes(data), int(binsz)
def xmr_size_varint(num):
return int(CLIB.xmr_size_varint(num))
def xmr_write_varint(buff, buff_size, num):
return int(CLIB.xmr_write_varint(buff, buff_size, num))
def xmr_write_varint_r(buff_size, num):
buff = (tt.uint8_t)()
_res = CLIB.xmr_write_varint(buff, buff_size, num)
return int(_res), bytes(buff)
def xmr_read_varint(buff, buff_size, val):
return int(CLIB.xmr_read_varint(buff, buff_size, ct.byref(val)))
def xmr_read_varint_r(buff_size, val):
buff = (tt.uint8_t)()
_res = CLIB.xmr_read_varint(buff, buff_size, ct.byref(val))
return int(_res), bytes(buff)
def curve25519_copy(out, in_):
CLIB.curve25519_copy(out, in_)
def curve25519_copy_r(in_):
out = (tt.bignum25519)()
CLIB.curve25519_copy(out, in_)
return out
def curve25519_add(out, a, b):
CLIB.curve25519_add(out, a, b)
def curve25519_add_r(a, b):
out = (tt.bignum25519)()
CLIB.curve25519_add(out, a, b)
return out
def curve25519_add_after_basic(out, a, b):
CLIB.curve25519_add_after_basic(out, a, b)
def curve25519_add_after_basic_r(a, b):
out = (tt.bignum25519)()
CLIB.curve25519_add_after_basic(out, a, b)
return out
def curve25519_add_reduce(out, a, b):
CLIB.curve25519_add_reduce(out, a, b)
def curve25519_add_reduce_r(a, b):
out = (tt.bignum25519)()
CLIB.curve25519_add_reduce(out, a, b)
return out
def curve25519_sub(out, a, b):
CLIB.curve25519_sub(out, a, b)
def curve25519_sub_r(a, b):
out = (tt.bignum25519)()
CLIB.curve25519_sub(out, a, b)
return out
def curve25519_scalar_product(out, in_, scalar):
CLIB.curve25519_scalar_product(out, in_, scalar)
def curve25519_scalar_product_r(in_, scalar):
out = (tt.bignum25519)()
CLIB.curve25519_scalar_product(out, in_, scalar)
return out
def curve25519_sub_after_basic(out, a, b):
CLIB.curve25519_sub_after_basic(out, a, b)
def curve25519_sub_after_basic_r(a, b):
out = (tt.bignum25519)()
CLIB.curve25519_sub_after_basic(out, a, b)
return out
def curve25519_sub_reduce(out, a, b):
CLIB.curve25519_sub_reduce(out, a, b)
def curve25519_sub_reduce_r(a, b):
out = (tt.bignum25519)()
CLIB.curve25519_sub_reduce(out, a, b)
return out
def curve25519_neg(out, a):
CLIB.curve25519_neg(out, a)
def curve25519_neg_r(a):
out = (tt.bignum25519)()
CLIB.curve25519_neg(out, a)
return out
def curve25519_mul(out, a, b):
CLIB.curve25519_mul(out, a, b)
def curve25519_mul_r(a, b):
out = (tt.bignum25519)()
CLIB.curve25519_mul(out, a, b)
return out
def curve25519_square(out, in_):
CLIB.curve25519_square(out, in_)
def curve25519_square_r(in_):
out = (tt.bignum25519)()
CLIB.curve25519_square(out, in_)
return out
def curve25519_square_times(out, in_, count):
CLIB.curve25519_square_times(out, in_, count)
def curve25519_square_times_r(in_, count):
out = (tt.bignum25519)()
CLIB.curve25519_square_times(out, in_, count)
return out
def curve25519_expand(out, in_):
CLIB.curve25519_expand(out, in_)
def curve25519_expand_r(in_):
out = (tt.bignum25519)()
CLIB.curve25519_expand(out, in_)
return out
def curve25519_contract(out, in_):
CLIB.curve25519_contract(out, in_)
def curve25519_contract_r(in_):
out = (ct.c_ubyte * 32)()
CLIB.curve25519_contract(out, in_)
return bytes(out)
def curve25519_swap_conditional(a, b, iswap):
CLIB.curve25519_swap_conditional(a, b, iswap)
def curve25519_swap_conditional_r(iswap):
a = (tt.bignum25519)()
b = (tt.bignum25519)()
CLIB.curve25519_swap_conditional(a, b, iswap)
return a, b
def curve25519_pow_two5mtwo0_two250mtwo0(b):
CLIB.curve25519_pow_two5mtwo0_two250mtwo0(b)
def curve25519_pow_two5mtwo0_two250mtwo0_r():
b = (tt.bignum25519)()
CLIB.curve25519_pow_two5mtwo0_two250mtwo0(b)
return b
def curve25519_recip(out, z):
CLIB.curve25519_recip(out, z)
def curve25519_recip_r(z):
out = (tt.bignum25519)()
CLIB.curve25519_recip(out, z)
return out
def curve25519_pow_two252m3(two252m3, z):
CLIB.curve25519_pow_two252m3(two252m3, z)
def curve25519_pow_two252m3_r(z):
two252m3 = (tt.bignum25519)()
CLIB.curve25519_pow_two252m3(two252m3, z)
return two252m3
def reduce256_modm(r):
CLIB.reduce256_modm(r)
def reduce256_modm_r():
r = (tt.bignum256modm)()
CLIB.reduce256_modm(r)
return r
def barrett_reduce256_modm(r, q1, r1):
CLIB.barrett_reduce256_modm(r, q1, r1)
def barrett_reduce256_modm_r(q1, r1):
r = (tt.bignum256modm)()
CLIB.barrett_reduce256_modm(r, q1, r1)
return r
def add256_modm(r, x, y):
CLIB.add256_modm(r, x, y)
def add256_modm_r(x, y):
r = (tt.bignum256modm)()
CLIB.add256_modm(r, x, y)
return r
def neg256_modm(r, x):
CLIB.neg256_modm(r, x)
def neg256_modm_r(x):
r = (tt.bignum256modm)()
CLIB.neg256_modm(r, x)
return r
def sub256_modm(r, x, y):
CLIB.sub256_modm(r, x, y)
def sub256_modm_r(x, y):
r = (tt.bignum256modm)()
CLIB.sub256_modm(r, x, y)
return r
def mul256_modm(r, x, y):
CLIB.mul256_modm(r, x, y)
def mul256_modm_r(x, y):
r = (tt.bignum256modm)()
CLIB.mul256_modm(r, x, y)
return r
def expand_raw256_modm(out, in_):
CLIB.expand_raw256_modm(out, in_)
def expand_raw256_modm_r(in_):
out = (tt.bignum256modm)()
CLIB.expand_raw256_modm(out, in_)
return out
def is_reduced256_modm(in_):
return int(CLIB.is_reduced256_modm(in_))
def contract256_modm(out, in_):
CLIB.contract256_modm(out, in_)
def contract256_modm_r(in_):
out = (ct.c_ubyte * 32)()
CLIB.contract256_modm(out, in_)
return bytes(out)
def contract256_window4_modm(r, in_):
CLIB.contract256_window4_modm(r, in_)
def contract256_window4_modm_r(in_):
r = (ct.c_byte * 64)()
CLIB.contract256_window4_modm(r, in_)
return bytes(r)
def contract256_slidingwindow_modm(r, s, windowsize):
CLIB.contract256_slidingwindow_modm(r, s, windowsize)
def contract256_slidingwindow_modm_r(s, windowsize):
r = (ct.c_byte * 256)()
CLIB.contract256_slidingwindow_modm(r, s, windowsize)
return bytes(r)
def set256_modm(r, v):
CLIB.set256_modm(r, v)
def set256_modm_r(v):
r = (tt.bignum256modm)()
CLIB.set256_modm(r, v)
return r
def get256_modm(v, r):
return int(CLIB.get256_modm(ct.byref(v), r))
def eq256_modm(x, y):
return int(CLIB.eq256_modm(x, y))
def cmp256_modm(x, y):
return int(CLIB.cmp256_modm(x, y))
def iszero256_modm(x):
return int(CLIB.iszero256_modm(x))
def copy256_modm(r, x):
CLIB.copy256_modm(r, x)
def copy256_modm_r(x):
r = (tt.bignum256modm)()
CLIB.copy256_modm(r, x)
return r
def check256_modm(x):
return int(CLIB.check256_modm(x))
def mulsub256_modm(r, a, b, c):
CLIB.mulsub256_modm(r, a, b, c)
def mulsub256_modm_r(a, b, c):
r = (tt.bignum256modm)()
CLIB.mulsub256_modm(r, a, b, c)
return r
def muladd256_modm(r, a, b, c):
CLIB.muladd256_modm(r, a, b, c)
def muladd256_modm_r(a, b, c):
r = (tt.bignum256modm)()
CLIB.muladd256_modm(r, a, b, c)
return r
def ed25519_verify(x, y, len):
return int(CLIB.ed25519_verify(x, y, len))
def ge25519_p1p1_to_partial(r, p):
CLIB.ge25519_p1p1_to_partial(ct.byref(r), ct.byref(p))
def ge25519_p1p1_to_partial_r(p):
r = (tt.ge25519)()
CLIB.ge25519_p1p1_to_partial(ct.byref(r), ct.byref(p))
return r
def ge25519_p1p1_to_full(r, p):
CLIB.ge25519_p1p1_to_full(ct.byref(r), ct.byref(p))
def ge25519_p1p1_to_full_r(p):
r = (tt.ge25519)()
CLIB.ge25519_p1p1_to_full(ct.byref(r), ct.byref(p))
return r
def ge25519_full_to_pniels(p, r):
CLIB.ge25519_full_to_pniels(ct.byref(p), ct.byref(r))
def ge25519_full_to_pniels_r(r):
p = (tt.ge25519_pniels)()
CLIB.ge25519_full_to_pniels(ct.byref(p), ct.byref(r))
return p
def ge25519_double_p1p1(r, p):
CLIB.ge25519_double_p1p1(ct.byref(r), ct.byref(p))
def ge25519_double_p1p1_r(p):
r = (tt.ge25519_p1p1)()
CLIB.ge25519_double_p1p1(ct.byref(r), ct.byref(p))
return r
def ge25519_nielsadd2_p1p1(r, p, q, signbit):
CLIB.ge25519_nielsadd2_p1p1(ct.byref(r), ct.byref(p), ct.byref(q), signbit)
def ge25519_nielsadd2_p1p1_r(p, q, signbit):
r = (tt.ge25519_p1p1)()
CLIB.ge25519_nielsadd2_p1p1(ct.byref(r), ct.byref(p), ct.byref(q), signbit)
return r
def ge25519_pnielsadd_p1p1(r, p, q, signbit):
CLIB.ge25519_pnielsadd_p1p1(ct.byref(r), ct.byref(p), ct.byref(q), signbit)
def ge25519_pnielsadd_p1p1_r(p, q, signbit):
r = (tt.ge25519_p1p1)()
CLIB.ge25519_pnielsadd_p1p1(ct.byref(r), ct.byref(p), ct.byref(q), signbit)
return r
def ge25519_double_partial(r, p):
CLIB.ge25519_double_partial(ct.byref(r), ct.byref(p))
def ge25519_double_partial_r(p):
r = (tt.ge25519)()
CLIB.ge25519_double_partial(ct.byref(r), ct.byref(p))
return r
def ge25519_double(r, p):
CLIB.ge25519_double(ct.byref(r), ct.byref(p))
def ge25519_double_r(p):
r = (tt.ge25519)()
CLIB.ge25519_double(ct.byref(r), ct.byref(p))
return r
def ge25519_nielsadd2(r, q):
CLIB.ge25519_nielsadd2(ct.byref(r), ct.byref(q))
def ge25519_nielsadd2_r(q):
r = (tt.ge25519)()
CLIB.ge25519_nielsadd2(ct.byref(r), ct.byref(q))
return r
def ge25519_pnielsadd(r, p, q):
CLIB.ge25519_pnielsadd(ct.byref(r), ct.byref(p), ct.byref(q))
def ge25519_pnielsadd_r(p, q):
r = (tt.ge25519_pniels)()
CLIB.ge25519_pnielsadd(ct.byref(r), ct.byref(p), ct.byref(q))
return r
def ge25519_pack(r, p):
CLIB.ge25519_pack(r, ct.byref(p))
def ge25519_pack_r(p):
r = (ct.c_ubyte * 32)()
CLIB.ge25519_pack(r, ct.byref(p))
return bytes(r)
def ge25519_unpack_negative_vartime(r, p):
return int(CLIB.ge25519_unpack_negative_vartime(ct.byref(r), p))
def ge25519_unpack_negative_vartime_r(p):
r = (tt.ge25519)()
_res = CLIB.ge25519_unpack_negative_vartime(ct.byref(r), p)
return int(_res), r
def ge25519_set_neutral(r):
CLIB.ge25519_set_neutral(ct.byref(r))
def ge25519_set_neutral_r():
r = (tt.ge25519)()
CLIB.ge25519_set_neutral(ct.byref(r))
return r
def ge25519_double_scalarmult_vartime(r, p1, s1, s2):
CLIB.ge25519_double_scalarmult_vartime(ct.byref(r), ct.byref(p1), s1, s2)
def ge25519_double_scalarmult_vartime_r(p1, s1, s2):
r = (tt.ge25519)()
CLIB.ge25519_double_scalarmult_vartime(ct.byref(r), ct.byref(p1), s1, s2)
return r
def ge25519_double_scalarmult_vartime2(r, p1, s1, p2, s2):
CLIB.ge25519_double_scalarmult_vartime2(ct.byref(r), ct.byref(p1), s1, ct.byref(p2), s2)
def ge25519_double_scalarmult_vartime2_r(p1, s1, p2, s2):
r = (tt.ge25519)()
CLIB.ge25519_double_scalarmult_vartime2(ct.byref(r), ct.byref(p1), s1, ct.byref(p2), s2)
return r
def ge25519_scalarmult(r, p1, s1):
CLIB.ge25519_scalarmult(ct.byref(r), ct.byref(p1), s1)
def ge25519_scalarmult_r(p1, s1):
r = (tt.ge25519)()
CLIB.ge25519_scalarmult(ct.byref(r), ct.byref(p1), s1)
return r
def curve25519_set(r, x):
CLIB.curve25519_set(r, x)
def curve25519_set_r(x):
r = (tt.bignum25519)()
CLIB.curve25519_set(r, x)
return r
def curve25519_set_d(r):
CLIB.curve25519_set_d(r)
def curve25519_set_d_r():
r = (tt.bignum25519)()
CLIB.curve25519_set_d(r)
return r
def curve25519_set_2d(r):
CLIB.curve25519_set_2d(r)
def curve25519_set_2d_r():
r = (tt.bignum25519)()
CLIB.curve25519_set_2d(r)
return r
def curve25519_set_sqrtneg1(r):
CLIB.curve25519_set_sqrtneg1(r)
def curve25519_set_sqrtneg1_r():
r = (tt.bignum25519)()
CLIB.curve25519_set_sqrtneg1(r)
return r
def curve25519_isnegative(f):
return int(CLIB.curve25519_isnegative(f))
def curve25519_isnonzero(f):
return int(CLIB.curve25519_isnonzero(f))
def curve25519_reduce(r, in_):
CLIB.curve25519_reduce(r, in_)
def curve25519_reduce_r(in_):
r = (tt.bignum25519)()
CLIB.curve25519_reduce(r, in_)
return r
def curve25519_expand_reduce(out, in_):
CLIB.curve25519_expand_reduce(out, in_)
def curve25519_expand_reduce_r(in_):
out = (tt.bignum25519)()
CLIB.curve25519_expand_reduce(out, in_)
return out
def ge25519_check(r):
return int(CLIB.ge25519_check(ct.byref(r)))
def ge25519_eq(a, b):
return int(CLIB.ge25519_eq(ct.byref(a), ct.byref(b)))
def ge25519_copy(dst, src):
CLIB.ge25519_copy(ct.byref(dst), ct.byref(src))
def ge25519_copy_r(src):
dst = (tt.ge25519)()
CLIB.ge25519_copy(ct.byref(dst), ct.byref(src))
return dst
def ge25519_set_base(r):
CLIB.ge25519_set_base(ct.byref(r))
def ge25519_set_base_r():
r = (tt.ge25519)()
CLIB.ge25519_set_base(ct.byref(r))
return r
def ge25519_mul8(r, t):
CLIB.ge25519_mul8(ct.byref(r), ct.byref(t))
def ge25519_mul8_r(t):
r = (tt.ge25519)()
CLIB.ge25519_mul8(ct.byref(r), ct.byref(t))
return r
def ge25519_neg_partial(r):
CLIB.ge25519_neg_partial(ct.byref(r))
def ge25519_neg_partial_r():
r = (tt.ge25519)()
CLIB.ge25519_neg_partial(ct.byref(r))
return r
def ge25519_neg_full(r):
CLIB.ge25519_neg_full(ct.byref(r))
def ge25519_neg_full_r():
r = (tt.ge25519)()
CLIB.ge25519_neg_full(ct.byref(r))
return r
def ge25519_reduce(r, t):
CLIB.ge25519_reduce(ct.byref(r), ct.byref(t))
def ge25519_reduce_r(t):
r = (tt.ge25519)()
CLIB.ge25519_reduce(ct.byref(r), ct.byref(t))
return r
def ge25519_norm(r, t):
CLIB.ge25519_norm(ct.byref(r), ct.byref(t))
def ge25519_norm_r(t):
r = (tt.ge25519)()
CLIB.ge25519_norm(ct.byref(r), ct.byref(t))
return r
def ge25519_add(r, a, b, signbit):
CLIB.ge25519_add(ct.byref(r), ct.byref(a), ct.byref(b), signbit)
def ge25519_add_r(a, b, signbit):
r = (tt.ge25519)()
CLIB.ge25519_add(ct.byref(r), ct.byref(a), ct.byref(b), signbit)
return r
def ge25519_fromfe_frombytes_vartime(r, s):
CLIB.ge25519_fromfe_frombytes_vartime(ct.byref(r), s)
def ge25519_fromfe_frombytes_vartime_r(s):
r = (tt.ge25519)()
CLIB.ge25519_fromfe_frombytes_vartime(ct.byref(r), s)
return r
def ge25519_scalarmult_base_wrapper(r, s):
CLIB.ge25519_scalarmult_base_wrapper(ct.byref(r), s)
def ge25519_scalarmult_base_wrapper_r(s):
r = (tt.ge25519)()
CLIB.ge25519_scalarmult_base_wrapper(ct.byref(r), s)
return r
def ge25519_set_xmr_h(r):
CLIB.ge25519_set_xmr_h(ct.byref(r))
def ge25519_set_xmr_h_r():
r = (tt.ge25519)()
CLIB.ge25519_set_xmr_h(ct.byref(r))
return r
def xmr_random_scalar(m):
CLIB.xmr_random_scalar(m)
def xmr_random_scalar_r():
m = (tt.bignum256modm)()
CLIB.xmr_random_scalar(m)
return m
def xmr_fast_hash(hash, data, length):
CLIB.xmr_fast_hash(hash, ct.byref(data), length)
def xmr_hasher_init(hasher):
CLIB.xmr_hasher_init(ct.byref(hasher))
def xmr_hasher_init_r():
hasher = (tt.Hasher)()
CLIB.xmr_hasher_init(ct.byref(hasher))
return hasher
def xmr_hasher_final(hasher, hash):
CLIB.xmr_hasher_final(ct.byref(hasher), hash)
def xmr_hasher_final_r(hasher):
hash = tt.KEY_BUFF()
CLIB.xmr_hasher_final(ct.byref(hasher), hash)
return bytes(hash)
def xmr_hasher_copy(dst, src):
CLIB.xmr_hasher_copy(ct.byref(dst), ct.byref(src))
def xmr_hasher_copy_r(src):
dst = (tt.Hasher)()
CLIB.xmr_hasher_copy(ct.byref(dst), ct.byref(src))
return dst
def xmr_derivation_to_scalar(s, p, output_index):
CLIB.xmr_derivation_to_scalar(s, ct.byref(p), output_index)
def xmr_derivation_to_scalar_r(p, output_index):
s = (tt.bignum256modm)()
CLIB.xmr_derivation_to_scalar(s, ct.byref(p), output_index)
return s
def xmr_generate_key_derivation(r, A, b):
CLIB.xmr_generate_key_derivation(ct.byref(r), ct.byref(A), b)
def xmr_generate_key_derivation_r(A, b):
r = (tt.ge25519)()
CLIB.xmr_generate_key_derivation(ct.byref(r), ct.byref(A), b)
return r
def xmr_derive_private_key(s, deriv, idx, base):
CLIB.xmr_derive_private_key(s, ct.byref(deriv), idx, base)
def xmr_derive_private_key_r(deriv, idx, base):
s = (tt.bignum256modm)()
CLIB.xmr_derive_private_key(s, ct.byref(deriv), idx, base)
return s
def xmr_derive_public_key(r, deriv, idx, base):
CLIB.xmr_derive_public_key(ct.byref(r), ct.byref(deriv), idx, ct.byref(base))
def xmr_derive_public_key_r(deriv, idx, base):
r = (tt.ge25519)()
CLIB.xmr_derive_public_key(ct.byref(r), ct.byref(deriv), idx, ct.byref(base))
return r
def xmr_add_keys2(r, a, b, B):
CLIB.xmr_add_keys2(ct.byref(r), a, b, ct.byref(B))
def xmr_add_keys2_r(a, b, B):
r = (tt.ge25519)()
CLIB.xmr_add_keys2(ct.byref(r), a, b, ct.byref(B))
return r
def xmr_add_keys2_vartime(r, a, b, B):
CLIB.xmr_add_keys2_vartime(ct.byref(r), a, b, ct.byref(B))
def xmr_add_keys2_vartime_r(a, b, B):
r = (tt.ge25519)()
CLIB.xmr_add_keys2_vartime(ct.byref(r), a, b, ct.byref(B))
return r
def xmr_add_keys3(r, a, A, b, B):
CLIB.xmr_add_keys3(ct.byref(r), a, ct.byref(A), b, ct.byref(B))
def xmr_add_keys3_r(a, A, b, B):
r = (tt.ge25519)()
CLIB.xmr_add_keys3(ct.byref(r), a, ct.byref(A), b, ct.byref(B))
return r
def xmr_add_keys3_vartime(r, a, A, b, B):
CLIB.xmr_add_keys3_vartime(ct.byref(r), a, ct.byref(A), b, ct.byref(B))
def xmr_add_keys3_vartime_r(a, A, b, B):
r = (tt.ge25519)()
CLIB.xmr_add_keys3_vartime(ct.byref(r), a, ct.byref(A), b, ct.byref(B))
return r
def xmr_get_subaddress_secret_key(r, major, minor, m):
CLIB.xmr_get_subaddress_secret_key(r, major, minor, m)
def xmr_get_subaddress_secret_key_r(major, minor, m):
r = (tt.bignum256modm)()
CLIB.xmr_get_subaddress_secret_key(r, major, minor, m)
return r
def xmr_gen_c(r, a, amount):
CLIB.xmr_gen_c(ct.byref(r), a, amount)
def xmr_gen_c_r(a, amount):
r = (tt.ge25519)()
CLIB.xmr_gen_c(ct.byref(r), a, amount)
return r
def xmr_gen_range_sig(sig, C, mask, amount, last_mask):
CLIB.xmr_gen_range_sig(ct.byref(sig), ct.byref(C), mask, amount, ct.byref(last_mask))
def xmr_gen_range_sig_r(amount, last_mask):
sig = (tt.xmr_range_sig_t)()
C = (tt.ge25519)()
mask = (tt.bignum256modm)()
CLIB.xmr_gen_range_sig(ct.byref(sig), ct.byref(C), mask, amount, ct.byref(last_mask))
return sig, C, mask
def xmr_gen_range_sig_ex(sig, C, mask, amount, last_mask, ai, alpha):
CLIB.xmr_gen_range_sig_ex(ct.byref(sig), ct.byref(C), mask, amount, ct.byref(last_mask), ai, alpha)
|
class Atom:
def __init__(self, neg, literal):
self.negated = neg
self.literal = literal
def neg(self):
return Atom(not self.negated, self.literal)
def contrary(self, atom):
return self.neg() == atom
def __hash__(self):
return hash((self.negated, self.literal))
def __eq__(self, other):
return isinstance(other, type(self)) and self.negated == other.negated and self.literal == other.literal
def __repr__(self):
if self.negated:
return "!" + str(self.literal)
else:
return self.literal
|
class Solution:
def longestPalindrome(self, words: List[str]) -> int:
dp = [[0]*2 for _ in range(20000)]
for s in words:
temp = ord(s[0])*100 + ord(s[1])
temp2 = ord(s[1])*100 + ord(s[0])
if s[0] == s[1]:
dp[temp][0] += 1
dp[temp][1] = 2
else:
dp[temp][0] += 1
dp[temp][1] = temp2
cnt = 0
flag = True
for i in range(20000):
if dp[i][1] == 2:
if dp[i][0] % 2 == 0:
cnt += dp[i][0] * 2
elif flag:
cnt += dp[i][0] * 2
flag = False
else:
cnt += (dp[i][0] - 1) * 2
elif dp[i][1] != 0:
cnt += min(dp[i][0], dp[dp[i][1]][0])*4
dp[dp[i][1]][1] = 0
return cnt |
"""
Crear un programa que cambie todas las 'A' o 'a' por la strin 'VACA' de una string introducida por el usuario
"""
string_usuario=input("Escribe una frase: ")
mi_string="VACA"
frase_final=""
contador=0
for caracter in string_usuario:
if caracter == "a" or caracter == "A":
frase_final+=mi_string
else:
frase_final+=string_usuario[contador]
contador+=1
print("{}".format(frase_final)) |
import json
import csv
import boto3
iam = boto3.client("iam")
marker = None
field = ['UserName', 'Effect', 'Action', 'NotAction', 'Resource', 'Condition', 'Permission Source']
row = []
paginator = iam.get_paginator('list_users')
response_iterator = paginator.paginate( PaginationConfig={'PageSize': 1000,'StartingToken': marker})
for page in response_iterator:
u = page['Users']
for user in u:
# print(user['UserName'])
print("Fetching IAM permissions for "+user['UserName'])
inline_user_policies=iam.list_user_policies(UserName=user['UserName'])
managed_policies= iam.list_attached_user_policies(UserName=user['UserName'])
groups=iam.list_groups_for_user(UserName=user['UserName'])
if len(groups['Groups']) > 0:
for group in groups['Groups']:
group_inline_policies = iam.list_group_policies(GroupName=group['GroupName'])
group_managed_policies = iam.list_attached_group_policies(GroupName=group['GroupName'])
if len(group_inline_policies['PolicyNames']) > 0:
for policy in group_inline_policies['PolicyNames']:
group_inline_policiy_detail= iam.get_group_policy(GroupName=group['GroupName'],PolicyName=policy)
data=json.dumps(group_inline_policiy_detail['PolicyDocument'])
permissions=json.loads(data)['Statement']
for permission in permissions:
# print(permission)
row.append(permission)
if len(group_managed_policies['AttachedPolicies']) > 0:
for policy in group_managed_policies['AttachedPolicies']:
group_managed_policiy_detail= iam.get_policy(PolicyArn=policy['PolicyArn'])
policy_version = iam.get_policy_version(PolicyArn = policy['PolicyArn'], VersionId = group_managed_policiy_detail['Policy']['DefaultVersionId'])
data=json.dumps(policy_version['PolicyVersion']['Document'])
permissions=json.loads(data)['Statement']
for permission in permissions:
# print(permission)
row.append(permission)
if len(inline_user_policies['PolicyNames']) > 0:
for policy in inline_user_policies['PolicyNames']:
user_inline_policiy_detail= iam.get_user_policy(UserName=user['UserName'],PolicyName=policy)
data=json.dumps(user_inline_policiy_detail['PolicyDocument'])
permissions=json.loads(data)['Statement']
for permission in permissions:
# print(permission)
row.append(permission)
if len(managed_policies['AttachedPolicies']) >0:
for policy in managed_policies['AttachedPolicies']:
user_managed_policiy_detail= iam.get_policy(PolicyArn=policy['PolicyArn'])
policy_version = iam.get_policy_version(PolicyArn = policy['PolicyArn'], VersionId = user_managed_policiy_detail['Policy']['DefaultVersionId'])
data=json.dumps(policy_version['PolicyVersion']['Document'])
permissions=json.loads(data)['Statement']
for permission in permissions:
# print(permission)
row.append(permission)
print(row)
filename = 'iam-user-audit.csv'
with open(filename, 'w', encoding='utf-8') as outfile:
fieldnames = field
rows = row
writer = csv.DictWriter(outfile, fieldnames=list(fieldnames, rows))
# writerrow = csv.DictWriter(outfile, fieldnames=list(rows))
writer.writeheader() |
from chaban.utils import MetaSingleton
class _Helper(metaclass=MetaSingleton):
def __init__(self, a):
self.a = a
def test_attrs():
x = _Helper(1)
y = _Helper(2)
assert x.a == y.a == 1
def test_is():
x = _Helper(1)
y = _Helper(2)
assert x is y
|
f = open("restricted_foods.csv").read()
f = f.lower()
lines = f.split('\n')
header = lines[0]
content = lines[1:]
del f
del lines
headers = [h.strip() for h in header.split(',')]
#print(headers)
c = []
for line in content:
aux = [i.strip() for i in line.split(',')]
assert(len(aux) == 19)
c.append(aux)
'''
is_fruit 1 empty
too_much_fructose 2 empty
we_sell 3 not_empty
gluten 4 empty
is_molluscs 5 empty
glucose 6 empty
celery_allergy 7 empty
egg allergy 8
fish 9 ignore
mustard 10 empty
tree_nut 11 empty
sesame 12 empty
soybeans 13 empty
sulfure_dioxide 14 empty
sulfites 15 empty
coconut allergy 16 empty
part_of_diet 17 ignore
"" 18 ignore
'''
def all_true(x):
for i in x:
if not i:
return False
return True
ans = []
for ingredient in c:
name = ingredient[0]
booleans = []
booleans.append(ingredient[1] == "")
booleans.append(ingredient[2] == "")
booleans.append(not(ingredient[3] == ""))
booleans.append(ingredient[4] == "")
booleans.append(ingredient[5] == "")
booleans.append(ingredient[6] == "")
booleans.append(ingredient[7] == "")
booleans.append(ingredient[8] == "")
#fish = (ingredient[9] == "")
booleans.append(ingredient[10] == "")
booleans.append(ingredient[11] == "")
booleans.append(ingredient[12] == "")
booleans.append(ingredient[13] == "")
booleans.append(ingredient[14] == "")
booleans.append(ingredient[15] == "")
if all_true(booleans):
ans.append(name)
ans = set(ans)
print(ans)
f = open("food_list_final_final.txt", 'w')
for i in ans:
print(i, file = f) |
from django.http import HttpResponse
from django.shortcuts import render
import operator
def home(requests):
return render(requests,'wcount/home.html')
def me(requests):
return render(requests,'wcount/me.html')
def hobies(requests):
return HttpResponse('<h1>Playing badminton, Listening to Music.</h1>')
# Create your views here.
|
from xlwt import Workbook
from tkinter.filedialog import asksaveasfile
wb_obj = Workbook()
my_sheet = wb_obj.add_sheet('Imdb')
my_sheet.write(0, 0, 'Title')
my_sheet.write(1, 0, 'Joker')
my_sheet.write(2, 0, 'Interstellar')
my_sheet.write(3, 0, 'Inception')
my_sheet.write(4, 0, 'Avengers Endgame')
f = asksaveasfile(mode='w', defaultextension='.csv')
if f is not None:
wb_obj.save(f.name)
f.close() |
# encoding: utf-8
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
from natsort import natsorted, ns
import numpy
import re
import xlsxwriter
def start():
directory = 'result_exe1/'
arq = os.listdir(directory)
arquivosDiretorio = natsorted(arq, alg=ns.IGNORECASE) #ordena arquivos para plot
size = len(arquivosDiretorio)
cenarios = []
for i in range(size):
cenarios.append(re.findall('\d+',arquivosDiretorio[i]))
alfa, beta = zip(*cenarios)
throughput_list = []
queueingDelay_list = []
signalDelay_list = []
for i in range(size):
with open(directory + '/'+ arquivosDiretorio[i], 'r') as f:
try:
lines = f.read().splitlines()
throughput_line = lines[4].split()
throughput = throughput_line[2]
throughput_list.append(throughput)
queueingDelay_line = lines[5].split()
queueingDelay = queueingDelay_line[5]
queueingDelay_list.append(queueingDelay)
signalDelay_line = lines[6].split()
signalDelay = signalDelay_line[4]
signalDelay_list.append(signalDelay)
except:
print('Erro na leitura dos arquivos')
x = [float(i) for i in signalDelay_list]
y = [float(i) for i in throughput_list]
w = [] #Throughput/Delay ou Potencia
for i in range(len(x)):
w.append(y[i]/(x[i]*0.001))
x_str = []
for i in range(size):
x_str.append("{:.2f}".format(x[i]))
y_str = []
for i in range(size):
y_str.append("{:.2f}".format(y[i]))
w_str = []
for i in range(size):
w_str.append("{:.2f}".format(w[i])) #{:.2e}
dados = zip(alfa,beta, y_str,x_str,w_str)
#dados = {"alfa": alfa, "beta": beta}
workbook = xlsxwriter.Workbook('tabela.xls')
worksheet = workbook.add_worksheet('tab')
head = ('Alfa', 'Beta', 'Throughput (Mbps)', 'Atraso (ms)', 'Potencia')
dados.insert(0, head)
#worksheet.write_row(0,0, head)
for row, item in enumerate(dados):
worksheet.write_row(row,0,item)
workbook.close()
# f = open('tabela.csv','w')
# for i in range(size):
# row = alfa[i] + " " + beta[i] + " " + y_str[i] + " " + x_str[i] + " " + w_str[i]
# f.write(row)
#y_str = ":.2f"
#w_str = ''.join(w)
# dados = zip(alfa,beta, y,x,w)
# print(dados)
# f = open('tabela.csv','w')
# for i in range(size):
# f.write(dados[i])
# f.close()
#print(dados)
#fieldnames = ('Alfa', 'Beta', 'Throughput (Mbps)', 'Atraso (ms)', 'Potência')
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# writer.writeheader()
# for i in range(size):
# print(dados[i])
# writer.writerow()
# writer.writerow(dados[i])
# fig, ax = plt.subplots()
# dados = zip(alfa,beta, y,x,w)
# collabel=('Alfa', 'Beta', 'Throughput (Mbps)', 'Atraso (ms)', 'Potencia')
# table = ax.table(cellText=dados,colLabels=collabel,loc='center')
# table.auto_set_font_size(False)
# table.set_fontsize(22)
# plt.show()
if __name__ == "__main__":
start()
|
#리스트안의 데이터를 요소라고 부를게요!
print("===요소 수정===")
a = [1, 2, 3]
a[1] = 22
print(a)
#리스트는 슬라이싱과 인덱싱의 연산결과가 다릅니다.
a = [1, 2, 3]
a[1:2] = ["a", "b", "c"]
print(a)
a = [1, 2, 3]
a[1] = ["a", "b", "c"]
print(a)
print("\n===요소 삭제===")
a = [1, 2, 3, 4, 5]
a[1:3] = []
print(a)
a = [1, 2, 3, 4, 5]
a[1] = []
a[2] = []
print(a)
#del은 인덱싱이든 슬라이싱이든 그 범위의 데이터를 없애줍니다!
del a[2:4]
print(a)
#del은 리스트 명령어가 아니라 파이썬 전체 명령어
a = 5
del a
# print(a) a라는 공간이 삭제되었다.
#문자열은 한번 선언되면 수정, 삭제가 안되요
b = "itbank"
# b[3] = " "
# del b[3]
|
"""
This module implements training and evaluation of a multi-layer perceptron in PyTorch.
You should fill in code into indicated sections.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import os
from mlp_pytorch import MLP
import cifar10_utils
import torch
import torch.nn as nn
# Default constants
DNN_HIDDEN_UNITS_DEFAULT = '100'
LEARNING_RATE_DEFAULT = 1e-4
MAX_STEPS_DEFAULT = 3000
BATCH_SIZE_DEFAULT = 200
EVAL_FREQ_DEFAULT = 100
# Directory in which cifar data is saved
DATA_DIR_DEFAULT = './cifar10/cifar-10-batches-py'
FLAGS = None
def accuracy(predictions, targets):
"""
Computes the prediction accuracy, i.e. the average of correct predictions
of the network.
Args:
predictions: 2D float array of size [batch_size, n_classes]
labels: 2D int array of size [batch_size, n_classes]
with one-hot encoding. Ground truth labels for
each sample in the batch
Returns:
accuracy: scalar float, the accuracy of predictions,
i.e. the average correct predictions over the whole batch
TODO:
Implement accuracy computation.
"""
batch_size = predictions.shape[0]
pred = torch.argmax(predictions, dim=1) # tensor
true = np.argmax(targets, axis=1) # ground truth. tensor
count_correct = torch.sum(torch.eq(pred, true)).item()
accuracy = count_correct / batch_size
return accuracy
def plot_loss_accuracy (max_epoch, eval_freq, trainLoss_list, testlLoss_list, trainAccuracy_list, testAccuracy_list):
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.plot(np.arange(1,max_epoch+1,eval_freq), trainLoss_list, label='train losses' )
ax1.plot(np.arange(1,max_epoch+1,eval_freq), testlLoss_list, label='test losses' )
ax1.legend()
plt.title('loss')
plt.xlabel('epoch')
plt.ylabel('loss')
ax2 = fig.add_subplot(122)
ax2.plot(np.arange(1,max_epoch+1,eval_freq) ,trainAccuracy_list, label='train accuracy' )
ax2.plot(np.arange(1,max_epoch+1,eval_freq) ,testAccuracy_list, label='test accuracies')
ax2.legend()
plt.title('accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
return
def train():
np.random.seed(42)
# Get number of units in each hidden layer specified in the string such as 100,100
if FLAGS.dnn_hidden_units:
dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]
else:
dnn_hidden_units = []
batch_size = FLAGS.batch_size
max_epoch = FLAGS.max_steps # or MAX_STEPS_DEFAULT
n_inputs = 3072 # 3*32*32 =3072
n_classes = 10
dnn_hidden_units = [300, 500, 100]
model = MLP(n_inputs, dnn_hidden_units, n_classes )
lossFunc = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE_DEFAULT)
# optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE_DEFAULT)
cifar_data = cifar10_utils.get_cifar10(DATA_DIR_DEFAULT)
test_img , test_labels = cifar_data[ 'test' ].next_batch (batch_size )
test_img = test_img.reshape ([test_img.shape[0], -1])
test_img, test_labels = torch.from_numpy(test_img).type(torch.FloatTensor), torch.from_numpy(test_labels).type(torch.FloatTensor)
trainLoss_list, testlLoss_list, trainAccuracy_list, testAccuracy_list = [],[],[],[]
for epoch in range (0, max_epoch):
train_img , train_labels = cifar_data[ 'train' ].next_batch (batch_size )
train_img = train_img.reshape([train_img.shape[0],-1])
train_img, train_labels = torch.from_numpy(train_img).type(torch.FloatTensor), torch.from_numpy(train_labels).type(torch.FloatTensor)
optimizer.zero_grad() # make all gradients to 0. ready for compute gradients in the next batch
prediction_onTrainset = model.forward( train_img)
dloss_dinput = lossFunc( prediction_onTrainset, train_labels.argmax(1))
dloss_dinput.backward()
optimizer.step() # w=w-delta_w*lr
dloss_dinput.retain_grad() ## why i need this line?
if epoch % FLAGS.eval_freq == 0 :
print ("epo", epoch)
trainLoss = lossFunc.forward( prediction_onTrainset, train_labels.argmax(1)).item()
trainLoss_list.append (trainLoss)
prediction_onTestset = model.forward(test_img)
testLoss = lossFunc.forward (prediction_onTestset, test_labels.argmax(1)).item() # ?? why error
testlLoss_list.append(testLoss)
train_accuracy = accuracy( prediction_onTrainset, train_labels)
trainAccuracy_list.append(train_accuracy)
test_accuracy = accuracy( prediction_onTestset, test_labels)
testAccuracy_list.append(test_accuracy)
print ("trainLoss_list{} \ntestlLoss_list{} \ntrainAccuracy_list{} \ntestAccuracy_list{}".format(trainLoss_list, testlLoss_list, trainAccuracy_list, testAccuracy_list))
plot_loss_accuracy (max_epoch, FLAGS.eval_freq, trainLoss_list, testlLoss_list, trainAccuracy_list, testAccuracy_list)
return
def print_flags():
"""
Prints all entries in FLAGS variable.
"""
for key, value in vars(FLAGS).items():
print(key + ' : ' + str(value))
def main():
print_flags()
if not os.path.exists(FLAGS.data_dir):
os.makedirs(FLAGS.data_dir)
# Run the training operation
train()
if __name__ == '__main__':
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--dnn_hidden_units', type=str, default=DNN_HIDDEN_UNITS_DEFAULT,
help='Comma separated list of number of units in each hidden layer')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE_DEFAULT,
help='Learning rate')
parser.add_argument('--max_steps', type=int, default=MAX_STEPS_DEFAULT,
help='Number of steps to run trainer.')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE_DEFAULT,
help='Batch size to run trainer.')
parser.add_argument('--eval_freq', type=int, default=EVAL_FREQ_DEFAULT,
help='Frequency of evaluation on the test set')
parser.add_argument('--data_dir', type=str, default=DATA_DIR_DEFAULT,
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
main()
|
# %% imports
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
plt.ion()
import sys
import time
import pathlib
import numpy as np
import pandas as pd
import scipy.io.wavfile
import libtiff
_code_git_version="11d174e8861127a6b334e9795795573452655401"
_code_repository="https://github.com/plops/cl-py-generator/tree/master/example/29_ondrejs_challenge/source/run_00_start.py"
_code_generation_time="14:01:13 of Saturday, 2020-11-28 (GMT+1)"
fn="supplementary_materials/zdravice.wav"
rate, a=scipy.io.wavfile.read(fn)
# => 11025, array with 242550x2 elements, int16
z=((a[:,0])+(((1j)*(a[:,1]))))
zs=np.fft.fftshift(z)
k=np.fft.ifft(zs)
kr=np.real(k)
ki=np.imag(k)
scipy.io.wavfile.write("/dev/shm/r.wav", rate, kr) |
import unittest
from katas.kyu_6.iq_test import iq_test
class IQTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(iq_test('2 4 7 8 10'), 3)
def test_equals_2(self):
self.assertEqual(iq_test('1 2 1 1'), 2)
def test_equals_3(self):
self.assertEqual(iq_test('1 2 2'), 1)
|
from flask import Flask, render_template, request, Markup
import sqlite3 as sql
from flask import flash, redirect, session, abort
from datetime import date, datetime
app = Flask(__name__)
app.secret_key = "super secret key"
@app.route('/')
def home():
if not session.get('logged_in'):
return render_template('login.html')
else:
return render_template('home.html')
@app.route('/login', methods=['POST'])
def do_admin_login():
usr = request.form['username']
pwd = request.form['password']
conn = sql.connect("admin.db")
cur = conn.cursor()
cur.execute("SELECT password FROM doctors")
rows = cur.fetchall()
for row in rows:
pswd=row[0]
print(pswd)
conn.close()
#print(pswd,pwd)
if pwd==pswd:
session['logged_in'] = True
else:
flash('wrong password!')
return home()
@app.route("/logout")
def logout():
session['logged_in'] = False
return home()
@app.route('/plogin')
def phome():
if not session.get('logged_in'):
return render_template('login2.html')
else:
return plist()
@app.route('/login2', methods=['POST'])
def do_patient_login():
usr = request.form['username']
pwd = request.form['password']
conn = sql.connect("patient.db")
cur = conn.cursor()
cur.execute("SELECT password FROM patients where pid=?",(usr,))
rows = cur.fetchall()
for row in rows:
pswd=row[0]
print(pswd)
conn.close()
#print(pswd,pwd)
if pwd==pswd:
session['logged_in'] = usr
else:
flash('wrong password!')
return phome()
@app.route('/enternew')
def new_student():
if not session.get('logged_in'):
return render_template('login.html')
return render_template('student.html')
@app.route('/faq')
def faq():
if not session.get('logged_in'):
return render_template('login.html')
return render_template('doctq.html')
@app.route('/findp')
def find_student():
if not session.get('logged_in'):
return render_template('login.html')
return render_template('find.html')
@app.route('/addrec',methods = ['POST', 'GET'])
def addrec():
if not session.get('logged_in'):
return render_template('login.html')
if request.method == 'POST':
try:
i = request.form['id']
p = request.form['psswd']
n = request.form['nme']
h = request.form['hlth']
today = date.today()
me = request.form['medicine']
v = request.form['val']
with sql.connect("patient.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO patients (pid,password,pname,visit,health,medication,value) VALUES (?,?,?,?,?,?,?)",(i,p,n,today,h,me,v) )
con.commit()
msg = "Record successfully added"
except:
con.rollback()
msg = "error in insert operation"
finally:
return render_template("result.html",msg = msg)
con.close()
@app.route('/addque',methods = ['POST', 'GET'])
def addque():
if not session.get('logged_in'):
return render_template('login.html')
if request.method == 'POST':
try:
qu = request.form['ques']
an = request.form['ans']
tp = request.form['typ']
ch = request.form['choose_one']
if(ch=='Orthopedics/ Physiotherapy'):
url= 'http://chahalacademyexpenditures.hostingerapp.com/addhealth.php?questions='+qu+'&answers='+an+'&category='+tp
print(url)
else:
url= 'http://chahalacademyexpenditures.hostingerapp.com/addhealthnut.php?questions='+qu+'&answers='+an+'&category='+tp
print(url)
except:
print("Error")
finally:
url2='http://www.google.com'
return redirect(url, code=302)
@app.route('/find',methods = ['POST', 'GET'])
def find():
if not session.get('logged_in'):
return render_template('login.html')
nm = request.form['nm']
con = sql.connect("patient.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from patients where pid=?",(nm,))
rows = cur.fetchall();
return render_template("list2.html",rows = rows)
@app.route('/find2',methods = ['POST', 'GET'])
def find2():
if not session.get('logged_in'):
return render_template('login.html')
nm=session.get('logged_in')
con = sql.connect("patient.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select visit from patients where pid=?",(nm,))
rows = cur.fetchall();
labels=[]
for row in rows:
labels.append(row[0])
cur = con.cursor()
cur.execute("select value from patients where pid=?",(nm,))
rows = cur.fetchall();
values=[]
for row in rows:
values.append(row[0])
print(values)
print(labels)
return render_template("chart.html",values=values, labels=labels)
@app.route('/list')
def list():
if not session.get('logged_in'):
return render_template('login.html')
con = sql.connect("patient.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from patients ")
rows = cur.fetchall();
return render_template("list2.html",rows = rows)
@app.route('/plist')
def plist():
if not session.get('logged_in'):
return render_template('login.html')
m=session.get('logged_in')
con = sql.connect("patient.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from patients where pid=?",(m,))
rows = cur.fetchall();
return render_template("list.html",rows = rows)
if __name__ == '__main__':
app.run(debug = True,host= '0.0.0.0')
|
#!/Users/duffrind/miniconda3/bin/python
from app import app
app.run(debug=True)
#if __name__ == '__main__':
# app.run(debug=True)
|
from _typeshed import Incomplete
from collections.abc import Generator
def graph_edit_distance(
G1,
G2,
node_match: Incomplete | None = None,
edge_match: Incomplete | None = None,
node_subst_cost: Incomplete | None = None,
node_del_cost: Incomplete | None = None,
node_ins_cost: Incomplete | None = None,
edge_subst_cost: Incomplete | None = None,
edge_del_cost: Incomplete | None = None,
edge_ins_cost: Incomplete | None = None,
roots: Incomplete | None = None,
upper_bound: Incomplete | None = None,
timeout: Incomplete | None = None,
): ...
def optimal_edit_paths(
G1,
G2,
node_match: Incomplete | None = None,
edge_match: Incomplete | None = None,
node_subst_cost: Incomplete | None = None,
node_del_cost: Incomplete | None = None,
node_ins_cost: Incomplete | None = None,
edge_subst_cost: Incomplete | None = None,
edge_del_cost: Incomplete | None = None,
edge_ins_cost: Incomplete | None = None,
upper_bound: Incomplete | None = None,
): ...
def optimize_graph_edit_distance(
G1,
G2,
node_match: Incomplete | None = None,
edge_match: Incomplete | None = None,
node_subst_cost: Incomplete | None = None,
node_del_cost: Incomplete | None = None,
node_ins_cost: Incomplete | None = None,
edge_subst_cost: Incomplete | None = None,
edge_del_cost: Incomplete | None = None,
edge_ins_cost: Incomplete | None = None,
upper_bound: Incomplete | None = None,
) -> Generator[Incomplete, None, None]: ...
def optimize_edit_paths(
G1,
G2,
node_match: Incomplete | None = None,
edge_match: Incomplete | None = None,
node_subst_cost: Incomplete | None = None,
node_del_cost: Incomplete | None = None,
node_ins_cost: Incomplete | None = None,
edge_subst_cost: Incomplete | None = None,
edge_del_cost: Incomplete | None = None,
edge_ins_cost: Incomplete | None = None,
upper_bound: Incomplete | None = None,
strictly_decreasing: bool = True,
roots: Incomplete | None = None,
timeout: Incomplete | None = None,
) -> Generator[Incomplete, None, Incomplete]: ...
def simrank_similarity(
G,
source: Incomplete | None = None,
target: Incomplete | None = None,
importance_factor: float = 0.9,
max_iterations: int = 1000,
tolerance: float = 0.0001,
): ...
def panther_similarity(
G,
source,
k: int = 5,
path_length: int = 5,
c: float = 0.5,
delta: float = 0.1,
eps: Incomplete | None = None,
): ...
def generate_random_paths(
G, sample_size, path_length: int = 5, index_map: Incomplete | None = None
) -> Generator[Incomplete, None, None]: ...
|
import pandas as pd
import numpy as np
import datetime
import lightgbm as lgb
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
pd.set_option('display.max_columns', None)
df_train=pd.read_csv('../output/df_train.csv')
df_test=pd.read_csv('../output/df_test.csv')
df_user=pd.read_csv('../data/jdata_user.csv')
df_comment=pd.read_csv('../data/jdata_comment.csv')
df_shop=pd.read_csv('../data/jdata_shop.csv')
# 1)行为数据(jdata_action)
jdata_action = pd.read_csv('../data/jdata_action.csv')
# 3)商品数据(jdata_product)
jdata_product = pd.read_csv('../data/jdata_product.csv')
jdata_data = jdata_action.merge(jdata_product,on=['sku_id'])
train_buy = jdata_data[(jdata_data['action_time']>='2018-04-09') \
& (jdata_data['action_time']<'2018-04-16') \
& (jdata_data['type']==2)][['user_id','cate','shop_id']].drop_duplicates()
train_buy['label'] = 1
# 候选集 时间 : '2018-03-19'-'2018-04-08' 最近两周有行为的(用户,类目,店铺)
train_set = jdata_data[(jdata_data['action_time']>='2018-03-19') \
& (jdata_data['action_time']<'2018-04-09')][['user_id','cate','shop_id']].drop_duplicates()
train_set = train_set.merge(train_buy,on=['user_id','cate','shop_id'],how='left').fillna(0)
train_set = train_set.merge(df_train,on=['user_id','cate','shop_id'],how='left')
def mapper(x):
if x is not np.nan:
year=int(x[:4])
return 2018-year
df_user['user_reg_tm']=df_user['user_reg_tm'].apply(lambda x:mapper(x))
df_shop['shop_reg_tm']=df_shop['shop_reg_tm'].apply(lambda x:mapper(x))
df_shop['shop_reg_tm']=df_shop['shop_reg_tm'].fillna(df_shop['shop_reg_tm'].mean())
df_user['age']=df_user['age'].fillna(df_user['age'].mean())
df_comment=pd.read_csv('../data/jdata_comment.csv')
df_comment=df_comment.groupby(['sku_id'],as_index=False).sum()
df_product=pd.read_csv('../data/jdata_product.csv')
df_product_comment=pd.merge(df_product,df_comment,on='sku_id',how='left')
df_product_comment=df_product_comment.fillna(0)
df_product_comment=df_product_comment.groupby(['shop_id'],as_index=False).sum()
df_product_comment=df_product_comment.drop(['sku_id','brand','cate'],axis=1)
df_shop_product_comment=pd.merge(df_shop,df_product_comment,how='left',on='shop_id')
train_set=pd.merge(train_set,df_user,how='left',on='user_id')
train_set=pd.merge(train_set,df_shop_product_comment,on='shop_id',how='left')
test_set = jdata_data[(jdata_data['action_time']>='2018-03-26') \
& (jdata_data['action_time']<'2018-04-16')][['user_id','cate','shop_id']].drop_duplicates()
test_set = test_set.merge(df_test,on=['user_id','cate','shop_id'],how='left')
del df_train
del df_test
test_set=pd.merge(test_set,df_user,how='left',on='user_id')
test_set=pd.merge(test_set,df_shop_product_comment,on='shop_id',how='left')
train_set.rename(columns={'cate_x':'cate'}, inplace = True)
test_set.rename(columns={'cate_x':'cate'}, inplace = True)
test_head=test_set[['user_id','cate','shop_id']]
train_head=train_set[['user_id','cate','shop_id']]
test_set=test_set.drop(['user_id','cate','shop_id'],axis=1)
train_set=train_set.drop(['user_id','cate','shop_id'],axis=1)
# 数据准备
X_train = train_set.drop(['label'],axis=1).values
y_train = train_set['label'].values
X_test = test_set.values
del test_set
del train_set
# 模型工具
class SBBTree():
"""Stacking,Bootstap,Bagging----SBBTree"""
def __init__(self, params, stacking_num, bagging_num, bagging_test_size, num_boost_round, early_stopping_rounds):
"""
Initializes the SBBTree.
Args:
params : lgb params.
stacking_num : k_flod stacking.
bagging_num : bootstrap num.
bagging_test_size : bootstrap sample rate.
num_boost_round : boost num.
early_stopping_rounds : early_stopping_rounds.
"""
self.params = params
self.stacking_num = stacking_num
self.bagging_num = bagging_num
self.bagging_test_size = bagging_test_size
self.num_boost_round = num_boost_round
self.early_stopping_rounds = early_stopping_rounds
self.model = lgb
self.stacking_model = []
self.bagging_model = []
def fit(self, X, y):
""" fit model. """
if self.stacking_num > 1:
layer_train = np.zeros((X.shape[0], 2))
self.SK = StratifiedKFold(n_splits=self.stacking_num, shuffle=True, random_state=1)
for k,(train_index, test_index) in enumerate(self.SK.split(X, y)):
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
gbm = lgb.train(self.params,
lgb_train,
num_boost_round=self.num_boost_round,
valid_sets=lgb_eval,
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=300)
self.stacking_model.append(gbm)
pred_y = gbm.predict(X_test, num_iteration=gbm.best_iteration)
layer_train[test_index, 1] = pred_y
X = np.hstack((X, layer_train[:,1].reshape((-1,1))))
else:
pass
for bn in range(self.bagging_num):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.bagging_test_size, random_state=bn)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
gbm = lgb.train(self.params,
lgb_train,
num_boost_round=10000,
valid_sets=lgb_eval,
early_stopping_rounds=200,
verbose_eval=300)
self.bagging_model.append(gbm)
def predict(self, X_pred):
""" predict test data. """
if self.stacking_num > 1:
test_pred = np.zeros((X_pred.shape[0], self.stacking_num))
for sn,gbm in enumerate(self.stacking_model):
pred = gbm.predict(X_pred, num_iteration=gbm.best_iteration)
test_pred[:, sn] = pred
X_pred = np.hstack((X_pred, test_pred.mean(axis=1).reshape((-1,1))))
else:
pass
for bn,gbm in enumerate(self.bagging_model):
pred = gbm.predict(X_pred, num_iteration=gbm.best_iteration)
if bn == 0:
pred_out=pred
else:
pred_out+=pred
return pred_out/self.bagging_num
# 模型参数
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.01,
'num_leaves': 2 ** 5 - 1,
'min_child_samples': 100,
'max_bin': 100,
'subsample': .7,
'subsample_freq': 1,
'colsample_bytree': 0.7,
'min_child_weight': 0,
'scale_pos_weight': 25,
'seed': 2018,
'nthread': 16,
'verbose': 0,
}
# 使用模型
model = SBBTree(params=params,\
stacking_num=5,\
bagging_num=5,\
bagging_test_size=0.33,\
num_boost_round=10000,\
early_stopping_rounds=200)
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
#y_train_predict = model.predict(X_train)
test_head['pred_prob'] = y_predict
test_head.to_csv('../output/EDA16-threeWeek_rightTime.csv',index=False)
threeNew = test_head[test_head['pred_prob'] >= 0.65][['user_id', 'cate', 'shop_id']]
threeNew.to_csv('../output/res_threeWeekNew65.csv', index=False)
|
#!/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nmigen.sim import Delay
from nmigen_cfu import TestBase
from .constants import Constants
from .get import StatusRegister, GetInstruction
class StatusRegisterTest(TestBase):
def create_dut(self):
return StatusRegister()
def test(self):
DATA = [
# (valid, payload), value
((0, 0), 0),
]
def process():
for n, (inputs, expected) in enumerate(DATA):
with self.subTest(n=n, inputs=inputs, expected=expected):
valid, payload = inputs
yield self.dut.sink.valid.eq(valid)
yield self.dut.sink.payload.eq(payload)
yield Delay(0.25)
self.assertEqual((yield self.dut.value), expected)
yield
self.run_sim(process, False)
class GetInstructionTest(TestBase):
def create_dut(self):
dut = GetInstruction()
return dut
def test(self):
VER = Constants.REG_VERIFY
INV = Constants.REG_INVALID
DATA = [
# (funct7, valid, payload), (output, read_strobe)
((INV, 0, 0), (0, 0)),
((INV, 1, 22), (0, 0)),
((INV, 0, 22), (0, 0)),
((VER, 0, 22), (22, 1)),
((INV, 1, 44), (0, 0)),
((INV, 0, 0), (0, 0)),
((INV, 0, 0), (0, 0)),
((VER, 0, 0), (44, 1)),
]
def process():
ver_sink = self.dut.sinks[VER]
for n, (inputs, expected) in enumerate(DATA):
with self.subTest(n=n, inputs=inputs, expected=expected):
funct7, valid, payload = inputs
yield self.dut.funct7.eq(funct7)
yield ver_sink.valid.eq(valid)
yield ver_sink.payload.eq(payload)
yield self.dut.start.eq(1)
yield
yield ver_sink.valid.eq(0)
yield self.dut.start.eq(0)
while not (yield self.dut.done):
yield
output, strobe = expected
self.assertEqual((yield self.dut.output), output)
self.assertEqual((yield self.dut.read_strobes[VER]), strobe)
yield
self.run_sim(process, True)
|
from django.shortcuts import render
def home(request):
return render(request, 'accounts/login.html', name = 'login')
def signup(request):
return render(request, 'accounts/signup.html', name= 'signtup') |
import numpy as np
from prettytable import PrettyTable
UNIFORM_FRONT = np.sqrt(3)
TESTS_NUM = 1000
TRUNCATION = 0.25
POISSON_PARAM = 3
def generate_laplace(x):
return np.random.laplace(0, 1 / np.sqrt(3), x)
def generate_uniform(x):
return np.random.uniform(-UNIFORM_FRONT, UNIFORM_FRONT, x)
def generate_poisson(x):
return np.random.poisson(POISSON_PARAM, x)
generate_dict = {
'normal': np.random.standard_normal,
'laplace': generate_laplace,
'uniform': generate_uniform,
'cauchy': np.random.standard_cauchy,
'poisson': generate_poisson
}
def find_sample_mean(sample):
return np.mean(sample)
def find_median(sample):
return np.median(sample)
def find_half_sum_extreme(sample):
return (min(sample) + max(sample)) / 2
def find_quantile(sample, index):
return np.quantile(sample, index)
def find_half_sum_quantile(sample):
return (find_quantile(sample, 0.25) + find_quantile(sample, 0.75)) / 2
def find_truncated_mean(sample):
res = 0
n = len(sample)
r = int(TRUNCATION * n)
i = r + 1
while i <= n - r:
res += sample[i]
i = i + 1
return res / (n - 2 * r)
characteristic_dict = {
'sample_mean': find_sample_mean,
'med': find_median,
'half_sum_extreme': find_half_sum_extreme,
'half_sum_quantile': find_half_sum_quantile,
'truncated_mean': find_truncated_mean,
}
def research(distribution_type, characteristic_type):
res = []
num = 20
values = [0, 0, 0, 0, 0, 0]
for i in range(3):
for j in range(TESTS_NUM):
sample = np.sort(generate_dict[distribution_type](num))
res.append(characteristic_dict[characteristic_type](sample))
values[i] = np.mean(res)
values[i + 3] = D(res)
res = []
num += 40
return values
def D(sample):
return np.var(sample)
def print_table(distribution_type):
s = research(distribution_type, 'sample_mean')
m = research(distribution_type, 'med')
zr = research(distribution_type, 'half_sum_extreme')
zq = research(distribution_type, 'half_sum_quantile')
ztr = research(distribution_type, 'truncated_mean')
precision = 10000
if distribution_type == 'cauchy':
precision = 1
for i in range(len(s)):
s[i] = int(s[i] * precision) / precision
m[i] = int(m[i] * 10000) / 10000
zr[i] = int(zr[i] * precision) / precision
zq[i] = int(zq[i] * 10000) / 10000
ztr[i] = int(ztr[i] * 10000) / 10000
x = PrettyTable()
x.field_names = [distribution_type, "sample_mean", "med_x", "Z_R", "Z_Q", "Z_tr"]
x.add_row(["E(z) 20", s[0], m[0], zr[0], zq[0], ztr[0]])
x.add_row(["D(z) 20", s[3], m[3], zr[3], zq[3], ztr[3]])
x.add_row(["E(z) 60", s[1], m[1], zr[1], zq[1], ztr[1]])
x.add_row(["D(z) 60", s[4], m[4], zr[4], zq[4], ztr[4]])
x.add_row(["E(z) 100", s[2], m[2], zr[2], zq[2], ztr[2]])
x.add_row(["D(z) 100", s[5], m[5], zr[5], zq[5], ztr[5]])
print(distribution_type, "sample_mean", "med_x", "Z_R", "Z_Q", "Z_tr")
print("E(z)20", s[0], m[0], zr[0], zq[0], ztr[0])
print("D(z)20", s[3], m[3], zr[3], zq[3], ztr[3])
print("E(z)60", s[1], m[1], zr[1], zq[1], ztr[1])
print("D(z)60", s[4], m[4], zr[4], zq[4], ztr[4])
print("E(z)100", s[2], m[2], zr[2], zq[2], ztr[2])
print("D(z)100", s[5], m[5], zr[5], zq[5], ztr[5])
print(x)
if __name__ == "__main__":
print_table('normal')
print_table('cauchy')
print_table('laplace')
print_table('uniform')
print_table('poisson')
|
#!/usr/bin/env python3
# Copyright 2016 The Dart project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import platform
import subprocess
import sys
import time
import utils
HOST_OS = utils.GuessOS()
HOST_ARCH = utils.GuessArchitecture()
SCRIPT_DIR = os.path.dirname(sys.argv[0])
DART_ROOT = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
AVAILABLE_ARCHS = utils.ARCH_FAMILY.keys()
# Environment variables for default settings.
DART_USE_TOOLCHAIN = "DART_USE_TOOLCHAIN" # Use instead of --toolchain-prefix
DART_USE_SYSROOT = "DART_USE_SYSROOT" # Use instead of --target-sysroot
DART_USE_CRASHPAD = "DART_USE_CRASHPAD" # Use instead of --use-crashpad
# use instead of --platform-sdk
DART_MAKE_PLATFORM_SDK = "DART_MAKE_PLATFORM_SDK"
DART_GN_ARGS = "DART_GN_ARGS"
def ToolchainPrefix(args):
if args.toolchain_prefix:
return args.toolchain_prefix
return os.environ.get(DART_USE_TOOLCHAIN)
def TargetSysroot(args):
if args.target_sysroot:
return args.target_sysroot
return os.environ.get(DART_USE_SYSROOT)
def MakePlatformSDK():
return DART_MAKE_PLATFORM_SDK in os.environ
def GetGNArgs(args):
if args.gn_args != None:
return args.gn_args
args = os.environ.get(DART_GN_ARGS) or ""
return args.split()
def GetOutDir(mode, arch, target_os, sanitizer):
return utils.GetBuildRoot(HOST_OS, mode, arch, target_os, sanitizer)
def ToCommandLine(gn_args):
def merge(key, value):
if type(value) is bool:
return '%s=%s' % (key, 'true' if value else 'false')
elif type(value) is int:
return '%s=%d' % (key, value)
return '%s="%s"' % (key, value)
return [merge(x, y) for x, y in gn_args.items()]
# The C compiler's target under the host toolchain (DART_HOST_ARCH_***).
def HostCpuForArch(arch):
arch = arch.split("_")[-1]
# For each target architecture, we prefer in descending order
# - using the same architecture for the host (supports all architectures)
# - using a host architecture with the same word size (supports arm and riscv, which have simulators)
# - using a host architecture with a different word size (supports only AOT and only 32-bit target on 64-bit host)
if arch in ['ia32']:
candidates = ['x86']
elif arch in ['x64', 'x64c', 'simx64', 'simx64c']:
candidates = ['x64', 'arm64']
elif arch in ['arm', 'simarm']:
candidates = ['arm', 'x86', 'riscv32', 'arm64', 'x64', 'riscv64']
elif arch in ['arm64', 'arm64c', 'simarm64', 'simarm64c']:
candidates = ['arm64', 'x64', 'riscv64']
elif arch in ['riscv32', 'simriscv32']:
candidates = ['riscv32', 'arm', 'x86', 'riscv64', 'arm64', 'x64']
elif arch in ['riscv64', 'simriscv64']:
candidates = ['riscv64', 'arm64', 'x64']
else:
raise Exception("Unknown Dart architecture: %s" % arch)
available = utils.HostArchitectures()
for candidate in candidates:
if candidate in available:
return candidate
raise Exception(
"Failed to find a C host architecture for %s. Need one of %s but only %s are available."
% (arch, candidates, available))
# The C compiler's target under the target toolchain (DART_HOST_ARCH_***).
def TargetCpuForArch(arch):
# Real target architectures
if arch.startswith('ia32'):
return 'x86'
elif arch.startswith('x64'):
return 'x64'
elif arch.startswith('arm64'):
return 'arm64'
elif arch.startswith('arm'):
return 'arm'
elif arch.startswith('riscv32'):
return 'riscv32'
elif arch.startswith('riscv64'):
return 'riscv64'
# Simulators
if arch.endswith('_x64'):
return 'x64'
elif arch.endswith('_arm64'):
return 'arm64'
elif arch.endswith('_riscv64'):
return 'riscv64'
elif arch in ['simarm', 'simriscv32']:
candidates = ['arm', 'riscv32', 'x86']
elif arch in ['simx64', 'simx64c', 'simarm64', 'simarm64c', 'simriscv64']:
candidates = ['arm64', 'riscv64', 'x64']
else:
raise Exception("Unknown Dart architecture: %s" % arch)
available = utils.HostArchitectures()
for candidate in candidates:
if candidate in available:
return candidate
raise Exception(
"Failed to find a C target architecture for %s. Need one of %s but only %s are available."
% (arch, candidates, available))
# The Dart compiler's target (DART_TARGET_ARCH_***)
def DartTargetCpuForArch(arch):
arch = arch.split("_")[0]
if arch.startswith("sim"):
arch = arch[3:]
if arch.endswith("c"):
arch = arch[:-1]
return arch
def IsCompressedPointerArch(arch):
return "64c" in arch
def HostOsForGn(host_os):
if host_os.startswith('macos'):
return 'mac'
if host_os.startswith('win'):
return 'win'
return host_os
# Where string_map is formatted as X1=Y1,X2=Y2 etc.
# If key is X1, returns Y1.
def ParseStringMap(key, string_map):
for m in string_map.split(','):
l = m.split('=')
if l[0] == key:
return l[1]
return None
def UseSysroot(args, gn_args):
# Don't try to use a Linux sysroot if we aren't on Linux.
if gn_args['target_os'] != 'linux' and HOST_OS != 'linux':
return False
# Don't use the sysroot if we're given another sysroot.
if TargetSysroot(args):
return False
# Our Debian Jesse sysroot doesn't work with GCC 9
if not gn_args['is_clang']:
return False
# Our Debian Jesse sysroot has incorrect annotations on realloc.
if gn_args['is_ubsan']:
return False
# Our Debian Jesse sysroot doesn't support RISCV
if gn_args['target_cpu'] in ['riscv32', 'riscv64']:
return False
# Otherwise use the sysroot.
return True
def ToGnArgs(args, mode, arch, target_os, sanitizer, verify_sdk_hash):
gn_args = {}
host_os = HostOsForGn(HOST_OS)
if target_os == 'host':
gn_args['target_os'] = host_os
else:
gn_args['target_os'] = target_os
gn_args['host_cpu'] = HostCpuForArch(arch)
gn_args['target_cpu'] = TargetCpuForArch(arch)
gn_args['dart_target_arch'] = DartTargetCpuForArch(arch)
gn_args['dart_use_compressed_pointers'] = IsCompressedPointerArch(arch)
# Configure Crashpad library if it is used.
gn_args['dart_use_crashpad'] = ((args.use_crashpad or
DART_USE_CRASHPAD in os.environ) and
gn_args['target_cpu'] in ['x86', 'x64'])
if gn_args['dart_use_crashpad']:
# Tell Crashpad's BUILD files which checkout layout to use.
gn_args['crashpad_dependencies'] = 'dart'
if DartTargetCpuForArch(arch) != HostCpuForArch(arch):
# Training an app-jit snapshot under a simulator is slow. Use script
# snapshots instead.
gn_args['dart_snapshot_kind'] = 'kernel'
else:
gn_args['dart_snapshot_kind'] = 'app-jit'
# We only want the fallback root certs in the standalone VM on
# Linux and Windows.
if gn_args['target_os'] in ['linux', 'win']:
gn_args['dart_use_fallback_root_certificates'] = True
gn_args['bssl_use_clang_integrated_as'] = True
if gn_args['target_os'] == 'linux':
if gn_args['target_cpu'] == 'arm':
# Default to -mfloat-abi=hard and -mfpu=neon for arm on Linux as we're
# specifying a gnueabihf compiler in //build/toolchain/linux/BUILD.gn.
floatabi = 'hard' if args.arm_float_abi == '' else args.arm_float_abi
gn_args['arm_version'] = 7
gn_args['arm_float_abi'] = floatabi
gn_args['arm_use_neon'] = True
gn_args['is_debug'] = mode == 'debug'
gn_args['is_release'] = mode == 'release'
gn_args['is_product'] = mode == 'product'
gn_args['dart_debug'] = mode == 'debug'
# This setting is only meaningful for Flutter. Standalone builds of the VM
# should leave this set to 'develop', which causes the build to defer to
# 'is_debug', 'is_release' and 'is_product'.
if mode == 'product':
gn_args['dart_runtime_mode'] = 'release'
else:
gn_args['dart_runtime_mode'] = 'develop'
gn_args['exclude_kernel_service'] = args.exclude_kernel_service
gn_args['is_clang'] = args.clang
enable_code_coverage = args.code_coverage and gn_args['is_clang']
gn_args['dart_vm_code_coverage'] = enable_code_coverage
gn_args['is_asan'] = sanitizer == 'asan'
gn_args['is_lsan'] = sanitizer == 'lsan'
gn_args['is_msan'] = sanitizer == 'msan'
gn_args['is_tsan'] = sanitizer == 'tsan'
gn_args['is_ubsan'] = sanitizer == 'ubsan'
gn_args['is_qemu'] = args.use_qemu
if not args.platform_sdk:
gn_args['dart_platform_sdk'] = args.platform_sdk
# We don't support stripping on Windows
if host_os != 'win':
gn_args['dart_stripped_binary'] = 'exe.stripped/dart'
gn_args['dart_precompiled_runtime_stripped_binary'] = (
'exe.stripped/dart_precompiled_runtime_product')
gn_args['gen_snapshot_stripped_binary'] = (
'exe.stripped/gen_snapshot_product')
gn_args['analyze_snapshot_binary'] = ('exe.stripped/analyze_snapshot')
gn_args['wasm_opt_stripped_binary'] = 'exe.stripped/wasm-opt'
# Setup the user-defined sysroot.
if UseSysroot(args, gn_args):
gn_args['dart_sysroot'] = 'debian'
else:
sysroot = TargetSysroot(args)
if sysroot:
gn_args['target_sysroot'] = ParseStringMap(arch, sysroot)
toolchain = ToolchainPrefix(args)
if toolchain:
for arch in ['ia32', 'x64', 'arm', 'arm64', 'riscv32', 'riscv64']:
prefix = ParseStringMap(arch, toolchain)
if prefix != None:
gn_args[arch + '_toolchain_prefix'] = prefix
goma_dir = os.environ.get('GOMA_DIR')
# Search for goma in depot_tools in path
goma_depot_tools_dir = None
for path in os.environ.get('PATH', '').split(os.pathsep):
if os.path.basename(path) == 'depot_tools':
cipd_bin = os.path.join(path, '.cipd_bin')
if os.path.isfile(os.path.join(cipd_bin, ExecutableName('gomacc'))):
goma_depot_tools_dir = cipd_bin
break
# Otherwise use goma from home directory.
# TODO(whesse): Remove support for goma installed in home directory.
# Goma will only be distributed through depot_tools.
goma_home_dir = os.path.join(os.getenv('HOME', ''), 'goma')
if args.goma and goma_dir:
gn_args['use_goma'] = True
gn_args['goma_dir'] = goma_dir
elif args.goma and goma_depot_tools_dir:
gn_args['use_goma'] = True
gn_args['goma_dir'] = goma_depot_tools_dir
elif args.goma and os.path.exists(goma_home_dir):
gn_args['use_goma'] = True
gn_args['goma_dir'] = goma_home_dir
else:
gn_args['use_goma'] = False
gn_args['goma_dir'] = None
if gn_args['target_os'] == 'mac' and gn_args['use_goma']:
gn_args['mac_use_goma_rbe'] = True
# Code coverage requires -O0 to be set.
if enable_code_coverage:
gn_args['dart_debug_optimization_level'] = 0
gn_args['debug_optimization_level'] = 0
elif args.debug_opt_level:
gn_args['dart_debug_optimization_level'] = args.debug_opt_level
gn_args['debug_optimization_level'] = args.debug_opt_level
gn_args['verify_sdk_hash'] = verify_sdk_hash
return gn_args
def ProcessOsOption(os_name):
if os_name == 'host':
return HOST_OS
return os_name
def ProcessOptions(args):
if args.arch == 'all':
if platform.system() == 'Darwin':
# Targeting 32 bits not supported on MacOS.
# See HostArchitectures in utils.py.
args.arch = 'x64,simarm64,x64c,simarm64c,simriscv64'
else:
args.arch = 'ia32,x64,simarm,simarm64,x64c,simarm64c,simriscv32,simriscv64'
if args.mode == 'all':
args.mode = 'debug,release,product'
if args.os == 'all':
args.os = 'host,android,fuchsia'
if args.sanitizer == 'all':
args.sanitizer = 'none,asan,lsan,msan,tsan,ubsan'
args.mode = args.mode.split(',')
args.arch = args.arch.split(',')
args.os = args.os.split(',')
args.sanitizer = args.sanitizer.split(',')
for mode in args.mode:
if not mode in ['debug', 'release', 'product']:
print("Unknown mode %s" % mode)
return False
for i, arch in enumerate(args.arch):
args.arch[i] = arch.lower()
oses = [ProcessOsOption(os_name) for os_name in args.os]
for os_name in oses:
if not os_name in [
'android', 'freebsd', 'linux', 'macos', 'win32', 'fuchsia'
]:
print("Unknown os %s" % os_name)
return False
if os_name == 'android':
if not HOST_OS in ['linux', 'macos']:
print(
"Cross-compilation to %s is not supported on host os %s." %
(os_name, HOST_OS))
return False
if not arch in [
'ia32',
'x64',
'arm',
'arm_x64',
'arm64',
'x64c',
'arm64c',
]:
print(
"Cross-compilation to %s is not supported for architecture %s."
% (os_name, arch))
return False
elif os_name == 'fuchsia':
if not HOST_OS in ['linux', 'macos']:
print(
"Cross-compilation to %s is not supported on host os %s." %
(os_name, HOST_OS))
return False
if not arch in ['x64', 'arm64', 'x64c', 'arm64c', 'riscv64']:
print(
"Cross-compilation to %s is not supported for architecture %s."
% (os_name, arch))
return False
elif os_name != HOST_OS:
print("Unsupported target os %s" % os_name)
return False
if HOST_OS != 'win' and args.use_crashpad:
print("Crashpad is only supported on Windows")
return False
return True
def os_has_ide(host_os):
return host_os.startswith('win') or host_os.startswith('mac')
def ide_switch(host_os):
if host_os.startswith('win'):
return '--ide=vs'
elif host_os.startswith('mac'):
return '--ide=xcode'
else:
return '--ide=json'
def AddCommonGnOptionArgs(parser):
"""Adds arguments that will change the default GN arguments."""
parser.add_argument('--goma', help='Use goma', action='store_true')
parser.add_argument('--no-goma',
help='Disable goma',
dest='goma',
action='store_false')
parser.set_defaults(goma=True)
parser.add_argument('--verify-sdk-hash',
help='Enable SDK hash checks (default)',
dest='verify_sdk_hash',
action='store_true')
parser.add_argument('-nvh',
'--no-verify-sdk-hash',
help='Disable SDK hash checks',
dest='verify_sdk_hash',
action='store_false')
parser.set_defaults(verify_sdk_hash=True)
parser.add_argument('--clang', help='Use Clang', action='store_true')
parser.add_argument('--no-clang',
help='Disable Clang',
dest='clang',
action='store_false')
parser.set_defaults(clang=True)
parser.add_argument(
'--platform-sdk',
help='Directs the create_sdk target to create a smaller "Platform" SDK',
default=MakePlatformSDK(),
action='store_true')
parser.add_argument('--use-crashpad',
default=False,
dest='use_crashpad',
action='store_true')
parser.add_argument('--use-qemu',
default=False,
dest='use_qemu',
action='store_true')
parser.add_argument('--exclude-kernel-service',
help='Exclude the kernel service.',
default=False,
dest='exclude_kernel_service',
action='store_true')
parser.add_argument('--arm-float-abi',
type=str,
help='The ARM float ABI (soft, softfp, hard)',
metavar='[soft,softfp,hard]',
default='')
parser.add_argument('--code-coverage',
help='Enable code coverage for the standalone VM',
default=False,
dest="code_coverage",
action='store_true')
parser.add_argument('--debug-opt-level',
'-d',
help='The optimization level to use for debug builds',
type=str)
parser.add_argument('--gn-args',
help='Set extra GN args',
dest='gn_args',
action='append')
parser.add_argument(
'--toolchain-prefix',
'-t',
type=str,
help='Comma-separated list of arch=/path/to/toolchain-prefix mappings')
parser.add_argument('--ide',
help='Generate an IDE file.',
default=os_has_ide(HOST_OS),
action='store_true')
parser.add_argument('--export-compile-commands',
help='Export compile_commands.json database file.',
default=False,
action='store_true')
parser.add_argument(
'--target-sysroot',
'-s',
type=str,
help='Comma-separated list of arch=/path/to/sysroot mappings')
parser.add_argument('--use-mallinfo2',
help='Use mallinfo2 to collect malloc stats.',
default=False,
dest='use_mallinfo2',
action='store_true')
def AddCommonConfigurationArgs(parser):
"""Adds arguments that influence which configuration will be built."""
parser.add_argument("-a",
"--arch",
type=str,
help='Target architectures (comma-separated).',
metavar='[all,' + ','.join(AVAILABLE_ARCHS) + ']',
default=utils.GuessArchitecture())
parser.add_argument('--mode',
'-m',
type=str,
help='Build variants (comma-separated).',
metavar='[all,debug,release,product]',
default='debug')
parser.add_argument('--os',
type=str,
help='Target OSs (comma-separated).',
metavar='[all,host,android,fuchsia]',
default='host')
parser.add_argument('--sanitizer',
type=str,
help='Build variants (comma-separated).',
metavar='[all,none,asan,lsan,msan,tsan,ubsan]',
default='none')
def AddOtherArgs(parser):
"""Adds miscellaneous arguments to the parser."""
parser.add_argument("-v",
"--verbose",
help='Verbose output.',
default=False,
action="store_true")
parser.add_argument("--test",
help='Test this script.',
default=False,
action="store_true")
def parse_args(args):
args = args[1:]
parser = argparse.ArgumentParser(
description='A script to run `gn gen`.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
config_group = parser.add_argument_group('Configuration Related Arguments')
AddCommonConfigurationArgs(config_group)
gn_group = parser.add_argument_group('GN Related Arguments')
AddCommonGnOptionArgs(gn_group)
other_group = parser.add_argument_group('Other Arguments')
AddOtherArgs(other_group)
options = parser.parse_args(args)
if not ProcessOptions(options):
parser.print_help()
return None
return options
def ExecutableName(basename):
if utils.IsWindows():
return f'{basename}.exe'
return basename
def BuildGnCommand(args, mode, arch, target_os, sanitizer, out_dir):
if utils.IsWindows():
gn = os.path.join(DART_ROOT, 'buildtools', 'win', 'gn.exe')
else:
gn = os.path.join(DART_ROOT, 'buildtools', 'gn')
if not os.path.isfile(gn):
raise Exception("Couldn't find the gn binary at path: " + gn)
# TODO(infra): Re-enable --check. Many targets fail to use
# public_deps to re-expose header files to their dependents.
# See dartbug.com/32364
command = [gn, 'gen', out_dir]
gn_args = ToCommandLine(
ToGnArgs(args, mode, arch, target_os, sanitizer, args.verify_sdk_hash))
gn_args += GetGNArgs(args)
if args.ide:
command.append(ide_switch(HOST_OS))
if args.export_compile_commands:
command.append('--export-compile-commands')
command.append('--args=%s' % ' '.join(gn_args))
return command
def RunGnOnConfiguredConfigurations(args):
commands = []
for target_os in args.os:
for mode in args.mode:
for arch in args.arch:
for sanitizer in args.sanitizer:
out_dir = GetOutDir(mode, arch, target_os, sanitizer)
commands.append(
BuildGnCommand(args, mode, arch, target_os, sanitizer,
out_dir))
if args.verbose:
print("gn gen --check in %s" % out_dir)
active_commands = []
def cleanup(command):
print("Command failed: " + ' '.join(command))
for (_, process) in active_commands:
process.terminate()
for command in commands:
try:
process = subprocess.Popen(command, cwd=DART_ROOT)
active_commands.append([command, process])
except Exception as e:
print('Error: %s' % e)
cleanup(command)
return 1
while active_commands:
time.sleep(0.1)
for active_command in active_commands:
(command, process) = active_command
if process.poll() is not None:
active_commands.remove(active_command)
if process.returncode != 0:
cleanup(command)
return 1
return 0
def ExpectEquals(actual, expected):
if actual != expected:
raise Exception(f"Actual: {actual} Expected: {expected}")
def RunTests():
host_arch = utils.HostArchitectures()[0]
host_arch_or_x64 = host_arch
if 'x64' in utils.HostArchitectures():
# Rosetta means 'x64' may be built directly.
host_arch_or_x64 = 'x64'
ExpectEquals(HostCpuForArch("arm64"), host_arch)
ExpectEquals(HostCpuForArch("arm64c"), host_arch)
ExpectEquals(HostCpuForArch("simarm64"), host_arch)
ExpectEquals(HostCpuForArch("simarm64_x64"), host_arch_or_x64)
ExpectEquals(HostCpuForArch("simarm64_arm64"), host_arch)
ExpectEquals(HostCpuForArch("simarm64_riscv64"), host_arch)
ExpectEquals(HostCpuForArch("x64"), host_arch_or_x64)
ExpectEquals(HostCpuForArch("simx64"), host_arch_or_x64)
ExpectEquals(HostCpuForArch("simx64_x64"), host_arch_or_x64)
ExpectEquals(HostCpuForArch("simx64_arm64"), host_arch)
ExpectEquals(HostCpuForArch("simx64_riscv64"), host_arch)
ExpectEquals(TargetCpuForArch("arm64"), "arm64")
ExpectEquals(TargetCpuForArch("arm64c"), "arm64")
ExpectEquals(TargetCpuForArch("simarm64"), host_arch)
ExpectEquals(TargetCpuForArch("simarm64_x64"), "x64")
ExpectEquals(TargetCpuForArch("simarm64_arm64"), "arm64")
ExpectEquals(TargetCpuForArch("simarm64_riscv64"), "riscv64")
ExpectEquals(TargetCpuForArch("x64"), "x64")
ExpectEquals(TargetCpuForArch("simx64"), host_arch)
ExpectEquals(TargetCpuForArch("simx64_x64"), "x64")
ExpectEquals(TargetCpuForArch("simx64_arm64"), "arm64")
ExpectEquals(TargetCpuForArch("simx64_riscv64"), "riscv64")
ExpectEquals(DartTargetCpuForArch("arm64"), "arm64")
ExpectEquals(DartTargetCpuForArch("arm64c"), "arm64")
ExpectEquals(DartTargetCpuForArch("simarm64"), "arm64")
ExpectEquals(DartTargetCpuForArch("simarm64_x64"), "arm64")
ExpectEquals(DartTargetCpuForArch("simarm64_arm64"), "arm64")
ExpectEquals(DartTargetCpuForArch("simarm64_riscv64"), "arm64")
ExpectEquals(DartTargetCpuForArch("x64"), "x64")
ExpectEquals(DartTargetCpuForArch("simx64"), "x64")
ExpectEquals(DartTargetCpuForArch("simx64_x64"), "x64")
ExpectEquals(DartTargetCpuForArch("simx64_arm64"), "x64")
ExpectEquals(DartTargetCpuForArch("simx64_riscv64"), "x64")
ExpectEquals(IsCompressedPointerArch("arm64c"), True)
ExpectEquals(IsCompressedPointerArch("simarm64c"), True)
ExpectEquals(IsCompressedPointerArch("simarm64c_x64"), True)
ExpectEquals(IsCompressedPointerArch("x64c"), True)
ExpectEquals(IsCompressedPointerArch("simx64c"), True)
ExpectEquals(IsCompressedPointerArch("simx64c_x64"), True)
ExpectEquals(IsCompressedPointerArch("arm64"), False)
ExpectEquals(IsCompressedPointerArch("simarm64"), False)
ExpectEquals(IsCompressedPointerArch("simarm64_x64"), False)
ExpectEquals(IsCompressedPointerArch("x64"), False)
ExpectEquals(IsCompressedPointerArch("simx64"), False)
ExpectEquals(IsCompressedPointerArch("simx64_x64"), False)
# Our Android bots:
ExpectEquals(HostCpuForArch("arm64c"), host_arch)
ExpectEquals(TargetCpuForArch("arm64c"), 'arm64')
ExpectEquals(DartTargetCpuForArch("arm64c"), 'arm64')
ExpectEquals(HostCpuForArch("arm_x64"), host_arch_or_x64)
ExpectEquals(TargetCpuForArch("arm_x64"), 'arm')
ExpectEquals(DartTargetCpuForArch("arm_x64"), 'arm')
def Main(argv):
starttime = time.time()
args = parse_args(argv)
if args is None:
return 1
if args.test:
RunTests()
print("Tests passed.")
return 0
result = RunGnOnConfiguredConfigurations(args)
if args.verbose:
endtime = time.time()
print("GN Time: %.3f seconds" % (endtime - starttime))
return result
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
import numpy as np
import cv2
from decimal import *
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn import cross_validation
from sklearn import datasets, neighbors, linear_model
from sklearn.preprocessing import MinMaxScaler, Normalizer
from sklearn.decomposition import PCA as sklearnPCA, KernelPCA
from sklearn.preprocessing import scale
from sklearn.neighbors import KNeighborsRegressor
# sample=np.zeros((5000,7),dtype=np.dtype(Decimal)) #matrix of 5000x7 , 5000 are digits samples, 7 is humoments for each digits
sample=np.zeros((50,100,28),dtype=np.dtype(np.float64)) #matrix of 5000x7 , 5000 are digits samples, 7 is humoments for each digits
# print sampleTest.shape
# sample=np.reshape(sample,(5000,7))
# print sample.shape
def calHuMoments(src):
# img = cv2.imread(src)
# # Convert to grayscale and apply Gaussian filtering
# hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# hsv = cv2.GaussianBlur(hsv, (5, 5), 0)
# # Threshold the image
# MIN= np.array([0,0,0],np.uint8)
# MAX= np.array([355,55,100],np.uint8)
# mask = cv2.inRange(hsv, MIN,MAX)
# ret,im_th = cv2.threshold(mask,150,255,cv2.THRESH_BINARY_INV)
# Find contours in the image
# ctrs, hier = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(img, ctrs,35, (255,100,0), 2)
# print len(ctrs)
#OPTIMAL 140X140, GAUSSIAN 5,5 AND BLUR, 120,100
moments=np.zeros((4,7),dtype=np.dtype(np.dtype(Decimal)))
src = cv2.resize(src, (40, 40))
m,n=src.shape
src = cv2.GaussianBlur(src, (5, 5), 0)
# src = cv2.Canny(src,120,100)
cv2.imshow('res',src)
seg=0
# moments=cv2.HuMoments(cv2.moments(src)).flatten()
for l in range(0,2):
for k in range(0,2):
# cv2.imshow('res',src[l*m/4:l*m/4+m/4-1,k*n/4:k*n/4+n/4-1])
x = cv2.Canny(src[l*m/2:l*m/2+m/2-1,k*n/2:k*n/2+n/2-1],120,100)
humoments=cv2.HuMoments(cv2.moments(x)).flatten()
a1=humoments[0]
a2=humoments[1]
a3=humoments[2]
a4=humoments[3]
a5=humoments[4]
a6=humoments[5]
a7=humoments[6]
# Amin=np.amin(humoments)
# Amax=np.amax(humoments)
# # print Amin, Amax
# a1=(a1-Amin)/(Amax-Amin)
# a1=a1*(Amax-Amin)+Amin
# a2=(a2-Amin)/(Amax-Amin)
# a2=a2*(Amax-Amin)+Amin
# a3=(a3-Amin)/(Amax-Amin)
# a3=a3*(Amax-Amin)+Amin
# a4=(a4-Amin)/(Amax-Amin)
# a4=a4*(Amax-Amin)+Amin
# a5=(a5-Amin)/(Amax-Amin)
# a5=a5*(Amax-Amin)+Amin
# a6=(a6-Amin)/(Amax-Amin)
# a6=a6*(Amax-Amin)+Amin
# a7=(a7-Amin)/(Amax-Amin)
# a7=a7*(Amax-Amin)+Amin
# mean=np.mean(humoments)
# print np.std(humoments)
# a1=(a1-mean)
# a11=pow(a1,2)
# a2=(a2-mean)
# a22=pow(a2,2)
# a3=(a3-mean)
# a33=pow(a3,2)
# a4=(a4-mean)
# a44=pow(a4,2)
# a5=(a5-mean)
# a55=pow(a5,2)
# a6=(a6-mean)
# a66=pow(a6,2)
# a7=(a7-mean)
# a77=pow(a7,2)
# deviation=np.sqrt(a11+a22+a33+a44+a55+a66+a77)
# print "deviation",deviation
# a1=(a1/deviation)
# a2=(a2/deviation)
# a3=(a3/deviation)
# a4=(a4/deviation)
# a5=(a5/deviation)
# a6=(a6/deviation)
# a7=(a7/deviation)
# print a1,a2,a3,a4,a5,a6
moments[seg]=[a1,a2,a3,a4,a5,a6,a7]
# moments=scale(moments)
# moments[seg]=humoments
seg=seg+1
moments=moments[:,:].reshape(-1,28).astype(np.dtype(np.float32))
# Amin=np.amin(moments)
# Amax=np.amax(moments)
# for i in range(0,28):
# moments[0][i]=(moments[0][i]-Amin)/(Amax-Amin)
# moments[0][i]=moments[0][i]*(Amax-Amin)+Amin
# moments=min_max.fit_transform(moments[0])
# moments=scale(moments[0])
# sklearn_pca = sklearnPCA(n_components=7)
# # moments = sklearn_pca.fit_transform(moments)
Amean=np.mean(moments)
Astd=np.std(moments)
# print "mean", Amean
# print "Astd",Astd
# print moments.shape
for i in range(0,28):
moments[0][i]=(moments[0][i]-Amean)
moments[0][i]=moments[0][i]/Astd
# cv2.imshow('ress',src[0:m/4,0:n/4])
# cv2.imshow('rs',src)
# moments=cv2.HuMoments(cv2.moments(src)).flatten()
# a1=moments[0]
# a2=moments[1]
# a3=moments[2]
# a4=moments[3]
# a5=moments[4]
# a6=moments[5]
# a7=moments[6]
# Amin=np.amin(moments)
# Amax=np.amax(moments)
# a1=(a1-Amin)/(Amax-Amin)
# a2=(a2-Amin)/(Amax-Amin)
# a3=(a3-Amin)/(Amax-Amin)
# a4=(a4-Amin)/(Amax-Amin)
# a5=(a5-Amin)/(Amax-Amin)
# a6=(a6-Amin)/(Amax-Amin)
# a7=(a7-Amin)/(Amax-Amin)
# # print "Original hu moments are ",moments
# mean=(moments[0]+moments[1]+moments[2]+moments[3]+moments[4]+moments[5]+moments[6])/7
# # print mean
# a1=(moments[0]-mean)
# a11=pow(a1,2)
# a2=(moments[1]-mean)
# a22=pow(a2,2)
# a3=(moments[2]-mean)
# a33=pow(a3,2)
# a4=(moments[3]-mean)
# a44=pow(a4,2)
# a5=(moments[4]-mean)
# a55=pow(a5,2)
# a6=(moments[5]-mean)
# a66=pow(a6,2)
# a7=(moments[6]-mean)
# a77=pow(a7,2)
# deviation=np.sqrt(a11+a22+a33+a44+a55+a66+a77)
# a1=(a1/deviation)
# a2=(a2/deviation)
# a3=(a3/deviation)
# a4=(a4/deviation)
# a5=(a5/deviation)
# a6=(a6/deviation)
# a7=(a7/deviation)
# # nom2Moments={a1,a2,a3,a4,a5,a6,a7}
# # print "Normalised Hu Moments are", nomMoments
# a1=np.sign(a1)*np.log10(np.abs(a1))
# a2=np.sign(a2)*np.log10(np.abs(a2))
# a3=np.sign(a3)*np.log10(np.abs(a3))
# a4=np.sign(a4)*np.log10(np.abs(a4))
# a5=np.sign(a5)*np.log10(np.abs(a5))
# a6=np.sign(a6)*np.log10(np.abs(a6))
# a7=np.sign(a7)*np.log10(np.abs(a7))
# D=abs(a1-logHuMoments[0])+abs(a2-logHuMoments[1])+abs(a3-logHuMoments[2])+abs(a4-logHuMoments[3])+abs(a5-logHuMoments[4])+abs(a6-logHuMoments[5])+abs(a7-logHuMoments[6])
#print [a1,a2,a3,a4,a5,a6,a7]
# moments=[a1,a2,a3,a4,a5,a6,a7]
return moments
# return moments
# main program
img = cv2.imread('digits.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# gray = cv2.resize(gray, (4000, 2400))
# gray=(255-gray)
# cv2.imshow("da0",gray)
# Now we split the image to 5000 cells, each 20x20 size
row,col= gray.shape #1000x2000
Dx=20
Dy=20#dimension of each digit
cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]#1000/50=2000/100=20x20 pixel image
x = np.array(cells)
# print x.shape
# train = x[:,:50].reshape(-1,400).astype(np.float32)
# print x
count=0
min_max=MinMaxScaler()
for i in range(0,50):#getting humoments for all the sample images and storing it in the sample array
for j in range(0,100):
# cv2.imshow('ress',x[i,j,:,:])
sample[i][j]=calHuMoments(x[i,j,:,:])
count=count+1
# calHuMoments(x[0,99,:,:],0)
# print sample
# min_max=MinMaxScaler()##feature scaling
train = sample[:,:100].reshape(-1,28).astype(np.dtype(np.float32)) # Size = (3000,7), initializing train
# print train.shape
# # print "Moments ori ",train[0]
# train=min_max.fit_transform(train)
# # print train.shape
# train=scale(train)
# print train.shape
test = sample[:,50:100].reshape(-1,28).astype(np.dtype(np.float32)) # Size = (3000,7)
# test=min_max.fit_transform(test)
# test=scale(test)
# print train[0]
k = np.arange(10)# 10 is for + and 11 is for minus
# k = {0,1,2,3,4,5,6,7,8,9,'+','-'}
# print test[0]
train_labels = np.repeat(k,500)[:,np.newaxis]
test_labels = np.repeat(k,250)[:,np.newaxis]
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.KNearest()
#####PCA
train=train.astype(np.float32)
test=test.astype(np.float32)
# sklearn_pca = sklearnPCA(n_components=6)
# sklearn_train = sklearn_pca.fit_transform(train)
# sklearn_test = sklearn_pca.fit_transform(test)
########KPCA
# kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
# sklearn_train = kpca.fit_transform(train)
# sklearn_train = kpca.inverse_transform(sklearn_train)
# sklearn_test = kpca.fit_transform(test)
# sklearn_test = kpca.inverse_transform(sklearn_test)
# print sklearn_train[0]
sklearn_train=train
sklearn_test=test
# print sklearn_train[0]
# print train.shape
# print sklearn_train.shape
knn.train(sklearn_train,train_labels)
#test image
# img4=cv2.imread('digits/5.jpg')
# img4 = cv2.resize(img4, (40, 40))
# gray4 = cv2.cvtColor(img4,cv2.COLOR_BGR2GRAY)
# # gray4 = cv2.GaussianBlur(gray4, (5, 5), 0)
# # Threshold the image
# (thresh, gray4) = cv2.threshold(gray4, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# gray4=(255-gray4)
# # gray4 = cv2.Canny(gray4,200,100)
# # gray4 = cv2.cvtColor(img4,cv2.COLOR_BGR2GRAY)
# # rotating image
# # rows,cols = gray4.shape
# # M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)
# # gray4 = cv2.warpAffine(gray4,M,(cols,rows))
# #########
# jay=np.zeros((1,1,7),dtype=np.dtype(Decimal))
# jay[0][0]=calHuMoments(gray4)
# print jay
# jay = jay[0,0].reshape(-1,7).astype(np.dtype(np.float32))
# print jay
# print jay.shape
# print test.shape
# test=jay
for k in range(1,6):
print "k=",k
ret,result,neighbours,dist = knn.find_nearest(sklearn_test,k=k)
# print ret,"\n",result,"\n", neighbours,"\n", dist
# print result, neighbours
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print "Accuracy is", accuracy
# cv2.imshow('ress',gray4)
######################################################3
# using sklearn here
p = np.arange(10)
# p = {0,1,2,3,4,5,6,7,8,9,'+','-'}
train_x = np.repeat(p,500)[:,]
test_y = np.repeat(p,250)[:,]
knn = neighbors.KNeighborsClassifier(n_neighbors=5)
print('KNN score:', knn.fit(sklearn_train, train_x).score(sklearn_test, test_y))
# print 'KNN pred label', knn.predict(sklearn_test[1])
# logistic = linear_model.LogisticRegression()
# print('LogisticRegression score: %f'
# % logistic.fit(train, train_x).score(test, test_y))
############################
#using SVM
clf = SVC()
p = np.arange(10)
# p = {0,1,2,3,4,5,6,7,8,9,'+','-'}
train_x = np.repeat(p,500)[:,]
test_y = np.repeat(p,250)[:,]
clf.fit(sklearn_train, train_x)
print 'SVM score', clf.score(sklearn_test, test_y)
# print 'SVM pred label', clf.predict(sklearn_test[1])
###########
# neigh = KNeighborsRegressor(n_neighbors=5)
# neigh.fit(sklearn_train, train_x)
# # KNeighborsRegressor(...)
# print("KNN regression, ",neigh.score(sklearn_test, test_y))
# print neigh.predict(sklearn_test[2499])
# print sklearn_test.shape
#testing
l=x[45,19,:,:]
rows,cols = l.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),0,1)
l = cv2.warpAffine(l,M,(cols,rows))
cv2.imshow("shw",l)
mom=np.zeros((1,1,28),dtype=np.dtype(np.float64))
mom[0][0]=calHuMoments(l)
sampleCheck = sample[:,:50].reshape(-1,28).astype(np.dtype(np.float64)) # Size = (1,7)
momcheck = mom[0][0].reshape(-1,28).astype(np.dtype(np.float64)) # Size = (1,7)
# print "dsaimension",check.shape
# print "moments sample ",check[0]
# momcheck=min_max1.fit_transform(momcheck)
# sampleCheck=min_max1.fit_transform(sampleCheck)
# # print "momn",momcheck
# # print "original moments, ",sampleCheck
# momcheck=scale(momcheck)
# sampleCheck=scale(sampleCheck)
print "momn",momcheck[0]
# print "original moments, ",sampleCheck[0]
print "original train, ",train[0]
print 'KNN pred label', knn.predict(momcheck[0])
print 'SVM pred label', clf.predict(momcheck[0])
# print "train", train[0]
# jay=jay.astype(np.float32)
# print "min max", jay[0]
# print train[0]
# sklearn_pca = sklearnPCA(n_components=6)
# check = sklearn_pca.fit_transform(check)
# print check
cv2.waitKey() |
import warnings
from functools import partial
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torchvision.models import inception as inception_module
from torchvision.models.inception import Inception_V3_Weights, InceptionOutputs
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableInception3",
"Inception_V3_QuantizedWeights",
"inception_v3",
]
class QuantizableBasicConv2d(inception_module.BasicConv2d):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(self, ["conv", "bn", "relu"], is_qat, inplace=True)
class QuantizableInceptionA(inception_module.InceptionA):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop.cat(outputs, 1)
class QuantizableInceptionB(inception_module.InceptionB):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop.cat(outputs, 1)
class QuantizableInceptionC(inception_module.InceptionC):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop.cat(outputs, 1)
class QuantizableInceptionD(inception_module.InceptionD):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop.cat(outputs, 1)
class QuantizableInceptionE(inception_module.InceptionE):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
self.myop1 = nn.quantized.FloatFunctional()
self.myop2 = nn.quantized.FloatFunctional()
self.myop3 = nn.quantized.FloatFunctional()
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
branch3x3 = self.myop1.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = self.myop2.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.myop3.cat(outputs, 1)
class QuantizableInceptionAux(inception_module.InceptionAux):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
class QuantizableInception3(inception_module.Inception3):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__( # type: ignore[misc]
*args,
inception_blocks=[
QuantizableBasicConv2d,
QuantizableInceptionA,
QuantizableInceptionB,
QuantizableInceptionC,
QuantizableInceptionD,
QuantizableInceptionE,
QuantizableInceptionAux,
],
**kwargs,
)
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x: Tensor) -> InceptionOutputs:
x = self._transform_input(x)
x = self.quant(x)
x, aux = self._forward(x)
x = self.dequant(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted QuantizableInception3 always returns QuantizableInception3 Tuple")
return InceptionOutputs(x, aux)
else:
return self.eager_outputs(x, aux)
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
r"""Fuse conv/bn/relu modules in inception model
Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
for m in self.modules():
if type(m) is QuantizableBasicConv2d:
m.fuse_model(is_qat)
class Inception_V3_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-a2837893.pth",
transforms=partial(ImageClassification, crop_size=299, resize_size=342),
meta={
"num_params": 27161264,
"min_size": (75, 75),
"categories": _IMAGENET_CATEGORIES,
"backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"unquantized": Inception_V3_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.176,
"acc@5": 93.354,
}
},
"_ops": 5.713,
"_file_size": 23.146,
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
""",
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
@register_model(name="quantized_inception_v3")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: Inception_V3_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else Inception_V3_Weights.IMAGENET1K_V1,
)
)
def inception_v3(
*,
weights: Optional[Union[Inception_V3_QuantizedWeights, Inception_V3_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableInception3:
r"""Inception v3 model architecture from
`Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`__.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` or :class:`~torchvision.models.Inception_V3_Weights`, optional): The pretrained
weights for the model. See
:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr.
Default is True.
quantize (bool, optional): If True, return a quantized version of the model.
Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableInception3``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/inception.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.Inception_V3_QuantizedWeights
:members:
.. autoclass:: torchvision.models.Inception_V3_Weights
:members:
:noindex:
"""
weights = (Inception_V3_QuantizedWeights if quantize else Inception_V3_Weights).verify(weights)
original_aux_logits = kwargs.get("aux_logits", False)
if weights is not None:
if "transform_input" not in kwargs:
_ovewrite_named_param(kwargs, "transform_input", True)
_ovewrite_named_param(kwargs, "aux_logits", True)
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableInception3(**kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
if quantize and not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
if not quantize and not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
return model
|
# import necessary libraries
import plotly
import plotly.graph_objs as go
import plotly.io as pio
import numpy as np
import pandas as pd
# define function to calculate time given time and distance
def calculate_time(speed, distance):
# test and catch errors
try:
# convert parameter values to float
s = float(speed)
d = float(distance)
time = d/s # calculate time
print(f'Time: {time}')
return time # return value of time
# if valueError is returned, print message
except ValueError:
print("invalid Number")
# main function
def main():
# initialize variables to store array of values
speed = [100, 85, 92]
distance = [100, 100, 100]
time = []
# loop 3 times
for i in range(len(speed)):
# check that same number of values are in both arrays
if (len(speed) != len(distance)):
print("Speed and Distance arrays need to be the same length")
return -1 # end program if lengths are not equal
# convert values to integer and calculate time
try:
s = int(speed[i])
d = int(distance[i])
time.append(calculate_time(s, d)) # append to time array
except ValueError:
print("An invalid number was entered.")
# create figure
fig = go.Figure()
# plot the values in the time and distance arrays
for i in range(len(time)):
x1 = [0, time[i]]
y1 = [0, distance[i]]
# convert arrays to numpy arrays used for plotting
arr1 = np.array(x1)
arr2 = np.array(y1)
# add line to graph
fig.add_trace(go.Scatter(
x = arr1,
y = arr2,
text = f'Speed: {speed[i]}',
mode = 'lines+markers',
name = f'Speed: {speed[i]}'
))
# update layout to add title and axis labels
fig.update_layout(
title = "Time vs Distance",
xaxis_title = "Time",
yaxis_title = "Distance"
)
# write graph to file
pio.write_image(fig, 'q2.png')
if __name__ == '__main__':
main()
|
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# construct a reference to the dataset. The project name is "bigquery-public-data", the name of the dataset is "stackoverflow"
dataset_ref = client.dataset("stackoverflow", project="bigquery-public-data")
# API request o get the dataset
dataset = client.get_dataset(dataset_ref)
""" CREATING TABLE 1 = ANSWERS' TABLE """
# Construct a reference to the "posts_answers" table
answers_table_ref = dataset_ref.table("posts_answers")
# API request
answers_table = client.get_table(answers_table_ref)
# First five lines of the "posts_answers" table
client.list_rows(answers_table, max_results=5).to_dataframe()
""" CREATING TABLE 2 = QUESTIONS' TABLE """
# Construct a reference to the "posts_questions" table
questions_table_ref = dataset_ref.table("posts_questions")
# API request
questions_table = client.get_table(questions_table_ref)
# First five lines of the "posts_questions" table
client.list_rows(questions_table, max_results=5).to_dataframe()
def expert_finder(topic, client):
'''
Inputs:
topic: A string with the topic of interest -> choosen by the user
client: A Client object that specifies the connection to the Stack Overflow dataset
Outputs:
results: A DataFrame with columns for user_id and number_of_answers.
'''
query = """
SELECT a.owner_user_id AS user_id, COUNT(1) AS number_of_answers
FROM `bigquery-public-data.stackoverflow.posts_questions` AS q
INNER JOIN `bigquery-public-data.stackoverflow.posts_answers` AS a
ON q.id = a.parent_Id
WHERE q.tags like @topic_finder
GROUP BY a.owner_user_id
ORDER BY number_of_answers DESC
"""
# Set up the query
query_params = [bigquery.ScalarQueryParameter("topic_finder", "STRING", '%' + topic + '%')] # tricky part: handling the %topic% NOT A STRING PROBLEM !!!
# Error if the query scans too much data. Slow and costly.
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**11, query_parameters = query_params)
query_job = client.query(query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
df_results = query_job.to_dataframe()
return df_results
""" TIME TO CALL THE BACKEND FUNCTION. CHOOISE YOUR TOPIC """
experts_df= expert_finder("pandas", client) ### <--- USER, CHOOSE YOUR TOPIC HERE (replace "pandas" by "python", "adaboost", etc...)
print(experts_df.head(10)) |
import myLib
import matplotlib.pyplot as plt
import datetime
import time
import csv
import threading
import numpy as np
class HordePlotter:
horde = None
gvfList = []
graphSpan = 100 # width of plot
graphSpanL = 900 # width of large plot
currentAngle = 0.0
currentLoad = 0.0
currentTemperature = 0.0
currentVoltage = 0.0
observationManager = None
predMinY = -300
predMaxY = 300
obsMinY = -200
obsMaxY = 200
rupMinY = 0
rupMaxY = 100
udeMinY = 0
udeMaxY = 1
def __init__(self, horde, obsMan):
self.horde = horde
self.observationManager = obsMan
self.gvfList = horde.getHorde()
self.angle = [0] * self.graphSpan
self.angle2 = [0] * self.graphSpan
self.load = [0] * self.graphSpan
self.temperature = [0] * self.graphSpan
self.voltage = [0] * self.graphSpan
self.ude = []
self.rupee = []
self.pred = []
for i in range(0, len(self.gvfList)):
self.pred.append([0]*self.graphSpan)
self.rupee.append([0]*self.graphSpanL)
self.ude.append([0]*self.graphSpanL)
def initPlot(self):
plt.ion()
self.fig, (self.predictionAx, self.obsAx, self.rupeeAx, self.udeAx) = plt.subplots(4)
self.fig.subplots_adjust(left=0.05, right=.75, hspace=1)
x = np.arange(0, self.graphSpan)#1 - dimensional x for smaller scale
xL = np.arange(0, self.graphSpanL)
self.x2 = []#2 dimensional x
self.x2L = []#2 dimensional x for larger scale
for i in range(0, len(self.gvfList)):
self.x2.append(x)
self.x2L.append(xL)
self.predLines = self.predictionAx.plot(self.x2, self.pred)
# [self.angleLine, self.loadLine, self.temperatureLine, self.voltageLine, self.angle2Line] = self.obsAx.plot(x, self.angle, 'b', x, self.load, 'g', x, self.temperature, 'r', x, self.voltage, 'y', x, self.angle2, 'p')
[self.angleLine, self.loadLine, self.temperatureLine, self.voltageLine] = self.obsAx.plot(x,
self.angle,
'b',
x,
self.load,
'g',
x,
self.temperature,
'r',
x,
self.voltage,
'y',
)
self.rupeeLines = self.rupeeAx.plot(self.x2L, self.horde.getRupee())
self.udeLines = self.udeAx.plot(self.x2L, self.horde.getUDE())
#Set line labels
obsNames = ["angle (deg)", "load (dyna.)", "temp. (cels.)", "volt.", 'Pavlov']
self.angleLine.set_label(obsNames[0])
self.loadLine.set_label(obsNames[1])
self.temperatureLine.set_label(obsNames[2])
self.voltageLine.set_label(obsNames[3])
# self.angle2Line.set_label(obsNames[4])
predNames = ['0', '0.5', '0.75', '0.9', '0.98', '0.99', '0.999', 'State dep.', '1(off-pol)', '0(off-pol)']
for i in range(0, 10):
self.predLines[i].set_label(predNames[i])
self.rupeeLines[i].set_label(predNames[i])
self.udeLines[i].set_label(predNames[i])
if i == 7:
self.predLines[i].set_linestyle('dashed')
self.rupeeLines[i].set_linestyle('dashed')
self.udeLines[i].set_linestyle('dashed')
if i > 7:
self.predLines[i].set_linewidth(2)
self.rupeeLines[i].set_linewidth(2)
self.udeLines[i].set_linewidth(2)
#Set legend position and y-limits
self.obsAx.axes.legend(bbox_to_anchor=(1, .5), loc='center left', ncol=2, title='Observations')
self.obsAx.axes.set_ylim(self.obsMinY, self.obsMaxY)
self.predictionAx.axes.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=2, title='Gamma(On-policy)/Target(Off-policy)')
self.predictionAx.axes.set_ylim(self.predMinY, self.predMaxY)
self.rupeeAx.axes.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=2, title='Gamma(On-policy)/Target(Off-policy)')
self.rupeeAx.axes.set_ylim(self.rupMinY, self.rupMaxY)
self.udeAx.axes.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=2, title='Gamma(On-policy)/Target(Off-policy)')
self.udeAx.axes.set_ylim(self.udeMinY, self.udeMaxY)
#Set axis label
self.obsAx.set_xlabel("Timesteps")
self.predictionAx.set_xlabel("Timesteps")
self.rupeeAx.set_xlabel("Timesteps")
self.udeAx.set_xlabel("Timesteps")
#Set title
self.obsAx.set_title('Observations From Robot')
self.predictionAx.set_title("Demon Predictions")
self.rupeeAx.set_title("RUPEE Measure For Each Demon")
self.udeAx.set_title("UDE For Each Demon")
#Dynamic text
self.numLearnText = plt.text(0,0, "# Learn step: " + str(0.0))
plt.pause(0.05)
def plot(self):
#get and set data for ude, rupee and prediction plots
for i in range(0, len(self.gvfList)):
curGVF = self.gvfList[i]
self.pred[i].append(curGVF.prediction)
self.rupee[i].append(curGVF.rupee)
self.ude[i].append(curGVF.ude)
for i in range(0, len(self.pred)):
currentPrediction = self.pred[i]
currentRupee = self.rupee[i]
currentUDE = self.ude[i]
self.predLines[i].set_xdata(self.x2[i])
self.predLines[i].set_ydata(currentPrediction[-self.graphSpan:])
self.rupeeLines[i].set_xdata(self.x2L[i])
self.rupeeLines[i].set_ydata(currentRupee[-self.graphSpanL:])
self.udeLines[i].set_xdata(self.x2L[i])
self.udeLines[i].set_ydata(currentUDE[-self.graphSpanL:])
#get and set data for observation plot
self.angle.append(myLib.radToDeg(self.observationManager.currentAngle))
self.load.append(self.observationManager.currentLoad)
self.voltage.append(self.observationManager.currentVoltage)
self.temperature.append(self.observationManager.currentTemperature)
self.angle2.append(myLib.radToDeg(self.observationManager.currentAngleS2))
self.angleLine.set_ydata(self.angle[-self.graphSpan:])
self.loadLine.set_ydata(self.load[-self.graphSpan:])
self.temperatureLine.set_ydata(self.temperature[-self.graphSpan:])
self.voltageLine.set_ydata(self.voltage[-self.graphSpan:])
#self.angle2Line.set_ydata(self.angle2[-self.graphSpan:])
self.numLearnText.set_text("# Learn Steps: " + str(self.observationManager.obsIndex))
plt.pause(0.05)
def saveFigure(self, fileName='HordeFigure_from_%s.png' % datetime.datetime.now()):
plt.savefig('figures/%s' % fileName)
|
#!/usr/bin/env python
"""
::
LV=box abprofile.py
LV=box python2.7 abprofile.py
ip abprofile.py --cat cvd_1_rtx_0_1M --pfx scan-pf-0 --tag 0
OKG4Test run
"""
from __future__ import print_function
import os, sys, logging, numpy as np
log = logging.getLogger(__name__)
from opticks.ana.profile_ import Profile
class ABProfile(object):
def __init__(self, adir, bdir=None):
log.info("adir %s" % adir)
log.info("bdir %s" % bdir)
if bdir is None:
# assume OK vs G4 mode : ie profiles from a bi-simulation
pdir = adir
self.ap = Profile(pdir, "ab.pro.ap", g4=False)
self.bp = Profile(pdir, "ab.pro.bp", g4=True )
else:
# treat arguments as two profile directories, assumed to be OK (not G4)
self.ap = Profile(adir, "ab.pro.ap", g4=False)
self.bp = Profile(bdir, "ab.pro.bp", g4=False)
pass
valid = self.ap.valid and self.bp.valid
if valid:
boa = self.bp.tim/self.ap.tim if self.ap.tim > 0 else -1
log.info("self.bp.tim %s self.ap.tim %s boa %s" % (self.bp.tim, self.ap.tim, boa))
else:
boa = -2
pass
self.boa = boa
def brief(self):
return " ap.tim %-10.4f bp.tim %-10.4f bp.tim/ap.tim %-10.4f " % (self.ap.tim, self.bp.tim, self.boa )
def __repr__(self):
return "\n".join(["ab.pro", self.brief()] + self.ap.lines() + self.bp.lines() )
if __name__ == '__main__':
from opticks.ana.main import opticks_main
from opticks.ana.plot import init_rcParams
import matplotlib.pyplot as plt
init_rcParams(plt)
ok = opticks_main(doc=__doc__)
log.info(ok.brief)
op = ABProfile(ok.tagdir)
print(op)
ap = op.ap
bp = op.bp
plt.plot( ap.t, ap.v, 'o' )
plt.plot( bp.t, bp.v, 'o' )
plt.axvline( ap.t[ap.idx[0]], c="b" )
plt.axvline( ap.t[ap.idx[1]], c="b" )
plt.axvline( bp.t[bp.idx[0]], c="r" )
plt.axvline( bp.t[bp.idx[1]], c="r" )
plt.ion()
plt.show()
log.info("tagdir: %s " % ok.tagdir)
|
from torchfly_dev.training.checkpointer.advanced_checkpointer import AdavancedCheckpointer
import time
import os
# class Net(nn.Module):
# def __init__(self):
# super().__init__()
# self.model = nn.Sequential( nn.Linear(5000,4000) )
# def forward(self):
# return 0
net = {"weights": 0, "bias":0}
saver = AdavancedCheckpointer(num_checkpoints_to_keep=2, keep_checkpoint_every_num_seconds=5)
saver.save_checkpoint( stamp='0', state = net.state_dict() )
saver.save_checkpoint( stamp='1', state = net.state_dict() )
# at this time there should be two models (0 and 1) saved
saved_checkpoints = os.listdir('Checkpoints')
saved_checkpoints.sort()
assert (saved_checkpoints[0] == '0_state.pth')
assert (saved_checkpoints[1] == '1_state.pth')
saver.save_checkpoint( stamp='2', state = net.state_dict() )
# at this time there should be two models (1 and 2) saved, since less than 5 seconds thus 0 is deleted
saved_checkpoints = os.listdir('Checkpoints')
saved_checkpoints.sort()
assert (saved_checkpoints[0] == '1_state.pth')
assert (saved_checkpoints[1] == '2_state.pth')
time.sleep(6)
saver.save_checkpoint( stamp='3', state = net.state_dict() )
saver.save_checkpoint( stamp='4', state = net.state_dict() )
# at this time there should be four models (1, 2 3, 4) saved, and 1,2 should be already in safe list
saved_checkpoints = os.listdir('Checkpoints')
saved_checkpoints.sort()
assert (saved_checkpoints[0] == '1_state.pth')
assert (saved_checkpoints[1] == '2_state.pth')
assert (saved_checkpoints[2] == '3_state.pth')
assert (saved_checkpoints[3] == '4_state.pth')
saver.save_checkpoint( stamp='5', state = net.state_dict() )
# at this time there should be four models (1, 2, 4,5) saved, and 1,2 should be already in safe list
saved_checkpoints = os.listdir('Checkpoints')
saved_checkpoints.sort()
assert (saved_checkpoints[0] == '1_state.pth')
assert (saved_checkpoints[1] == '2_state.pth')
assert (saved_checkpoints[2] == '4_state.pth')
assert (saved_checkpoints[3] == '5_state.pth')
print('test pass !') |
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def sumBase(self, n: int, k: int) -> int:
return sum(self.toBaseK(n, k))
def toBaseK(self, n: int, k: int) -> List[int]:
digits = []
while n:
digits.append(n % k)
n //= k
return digits
if __name__ == "__main__":
solution = Solution()
assert 9 == solution.sumBase(34, 6)
assert 1 == solution.sumBase(10, 10)
|
import maya.cmds as cmds
def ChangeSuffix(sufx):
print 'Suffix is now' + sufx
def windowCreator():
#get user parameters via window
if (cmds.window('Renaming', exists=True)): cmds.deleteUI('Renaming')
Renaming = cmds.window('Renaming')
colLayout = cmds.columnLayout(parent=Renaming, adjustableColumn=True)
cmds.text(label='Renamer', parent=colLayout)
cmds.textFieldGrp('NewName', label='Enter new Name Here', parent=colLayout)
dropDwnM = cmds.optionMenu('Suffix', parent=colLayout, label='Select Suffix', changeCommand=ChangeSuffix)
cmds.menuItem(parent=dropDwnM, label=' Jnt')
cmds.menuItem(parent=dropDwnM, label=' Geo')
cmds.menuItem(parent=dropDwnM, label=' Ctrl')
cmds.menuItem(parent=dropDwnM, label=' SNRTHIS')
cmds.optionMenu('Padd', label='Select Number Padding')
cmds.menuItem(label='None')
cmds.menuItem(label='0')
cmds.menuItem(label='00')
cmds.button(label="Rename the Thing!!!", command=lambda *args: Renamer())
cmds.showWindow(Renaming)
#For each object in list Rename To:
# Prefix (user input) + Number (that counts) + Suffix (user input or drop down)
def Renamer():
#Get selection into list
sel = cmds.ls(sl=True)
counter = 1
for objet in sel:
name = cmds.textFieldGrp('NewName', q=True, tx=True)
suffix = cmds.optionMenu('Suffix', q=True, v=True)
padding = cmds.optionMenu('Padd', q=True, v=True)
if padding == '00' and counter < 10:
NewestName = name + str(0) + str(0) + str(counter) + suffix
cmds.rename(objet, NewestName)
counter += 1
elif padding == '00' and 10 <= counter < 100:
NewestName = name + str(0) + str(counter) + suffix
cmds.rename(objet, NewestName)
counter += 1
elif padding == '0' and counter < 10:
NewestName = name + str(0) + str(counter) + suffix
cmds.rename(objet, NewestName)
counter += 1
elif padding == 'None':
NewestName = name + str(counter) + suffix
cmds.rename(objet, NewestName)
counter += 1
else:
cmds.warning("No Valid Padd Number! Doing Default No Pad")
NewestName = name + str(counter) + suffix
cmds.rename(objet, NewestName)
counter += 1
print NewestName
counter = 1
windowCreator() |
from django.urls import path
from . import views
urlpatterns=[
path('',views.InformacioneListView.as_view(),name='Naruto'),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.