text stringlengths 8 6.05M |
|---|
# Create your views here.
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, Group, Permission
from admin.forms import *
""" @todo, this is the global administration page. """
@login_required
def index(request):
if not request.user.is_superuser:
return HttpResponseRedirect(reverse("home.views.index"))
return render_to_response("admin/index.html", {
}, context_instance=RequestContext(request))
@login_required
def users(request):
if not request.user.is_superuser:
return HttpResponseRedirect(reverse("home.views.index"))
if request.method == 'POST':
""" @fixme: extremely unsafe; can lead to unexpected application crashes. """
""" @fixme: only if the group is found, and the user is found then the user to the group. """
""" @fixme: i don't like to use the primary key for user identification; maybe we should comeup with another mechanism for user IDs. """
try:
group = Group.objects.get(name=request.POST['group'])
user = User.objects.get(pk=request.POST['userid'])
user.groups.add(group)
except:
return HttpResponseRedirect(reverse("admin.views.users"))
""" @todo: needs to be fixed, too bad. """
users = User.objects.all()
for user in users:
user.userprofile = user.get_profile()
return render_to_response("admin/users.html", {
'users' : users
}, context_instance=RequestContext(request))
@login_required
def users_groups(request, group):
if not request.user.is_superuser:
return HttpResponseRedirect(reverse("home.views.index"))
users = User.objects.filter(groups__name=group)
for user in users:
user.userprofile = user.get_profile()
return render_to_response("admin/users.html", {
'users' : users
}, context_instance=RequestContext(request))
@login_required
def tests(request):
if not request.user.is_superuser:
return HttpResponseRedirect(reverse("home.views.index"))
return render_to_response("admin/tests.html", {
}, context_instance=RequestContext(request))
@login_required
def reports(request):
if not request.user.is_superuser:
return HttpResponseRedirect(reverse("home.views.index"))
return render_to_response("admin/reports.html", {
}, context_instance=RequestContext(request))
@login_required
def groups(request):
if not request.user.is_superuser:
return HttpResponseRedirect(reverse("home.views.index"))
if request.method == 'POST':
form = GroupADD(request.POST)
if form.is_valid():
""" @fixme: unsafe; needs to be removed; form.cleaned_data['name'] """
g = Group(name=form.cleaned_data['name'])
g.save()
else:
form = GroupADD()
groups = Group.objects.all()
return render_to_response("admin/groups.html", {
'form' : form,
'groups' : groups
}, context_instance=RequestContext(request))
@login_required
def group_view(request, group):
if not request.user.is_superuser:
return HttpResponseRedirect(reverse("home.views.index"))
return render_to_response("admin/groups.html", {
}, context_instance=RequestContext(request))
|
import numpy as np
#import theano
#import theano.Tensor as T
import time
th_zero = [0, 90, -90, 0, 180, -180]
L = [10.0, 105.0, -28.0, 110.0, -20.0, 28.0, 150.0]
#th = [-10, -27, 47, 63, -20, 5]
#th = [0,-20,0,30,0,90]
#th = [ -96, -75, -34, -27, 21, 206]
#th = [ -96.07351766, -74.89197776, -34.13323947, -27.17642705, 21.20637644, 205.58019537]
th = [0,-50,-50,0,-50,0]
#th = [ 79, -90, -41, -11, 90, -90]
#dest = [10, -355, 60, 10, -365, 60]
#dest = [ 200, 0, 200, 210, 0, 200, 200, 10, 200]
x = 160
y = 100
z = 0
dest = [ x,y,z, x,y,z-10, x,y-10,z]
def FK(th):
#time_st = time.time()
local_th = list(th)
for i in range(len(local_th)):
local_th[i] = deg_to_rad(local_th[i]+th_zero[i])
#print "\n-------Forward Kinematics-------"
#print "Theta\t", th
#print "L\t",L
T0 = [ [np.cos(local_th[0]), 0.0, np.sin(local_th[0]), L[0]*np.cos(local_th[0])],
[np.sin(local_th[0]), 0.0, -np.cos(local_th[0]), L[0]*np.sin(local_th[0])],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0]
]
T1 = [ [np.cos(local_th[1]), -np.sin(local_th[1]), 0.0, L[1]*np.cos(local_th[1])],
[np.sin(local_th[1]), np.cos(local_th[1]), 0.0, L[1]*np.sin(local_th[1])],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]
]
T2 = [ [np.cos(local_th[2]), 0.0, -np.sin(local_th[2]), L[2]*np.cos(local_th[2])],
[np.sin(local_th[2]), 0.0, np.cos(local_th[2]), L[2]*np.sin(local_th[2])],
[0.0, -1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0]
]
T3 = [ [np.cos(local_th[3]), 0.0, np.sin(local_th[3]), 0.0],
[np.sin(local_th[3]), 0.0, -np.cos(local_th[3]), 0.0],
[0.0, 1.0, 0.0, L[3]],
[0.0, 0.0, 0.0, 1.0]
]
T4 = [ [np.cos(local_th[4]), 0.0, np.sin(local_th[4]), L[5]*np.cos(local_th[4])],
[np.sin(local_th[4]), 0.0, -np.cos(local_th[4]), L[5]*np.sin(local_th[4])],
[0.0, 1.0, 0.0, L[4]],
[0.0, 0.0, 0.0, 1.0]
]
T5 = [ [np.cos(local_th[5]), -np.sin(local_th[5]), 0.0, 0.0],
[np.sin(local_th[5]), np.cos(local_th[5]), 0.0, 0.0],
[0.0, 0.0, 1.0, L[6]],
[0.0, 0.0, 0.0, 1.0]
]
Tb = [ [ 1.0, 0.0, 0.0, 30.0],
[ 0.0, 1.0, 0.0, 0.0],
[ 0.0, 0.0, 1.0, 95.0],
[ 0.0, 0.0, 0.0, 1.0]
]
T = np.dot( np.dot( np.dot(Tb,T0),np.dot(T1,T2) ),np.dot( np.dot(T3,T4), T5) )
front = np.dot(T,[[0],[0],[10],[1]])
right = np.dot(T,[[10],[0],[0],[1]])
pose = np.array([ T[0][3], T[1][3], T[2][3], front[0], front[1], front[2], right[0], right[1], right[2] ]) # x,y,z,roll,pitch,yaw
#for i in range(len(pose)):
# pose[i] = round(pose[i],5)
return pose
def rad_to_deg(rad):
return rad/np.pi*180.0
def deg_to_rad(deg):
return deg/180.0*np.pi
def IK(curr_th, dest, time_span):
time_st = time.time()
curr = FK(curr_th)
d = dist(dest,curr)
mini = d
th_mini = curr_th
#print curr
while d > 0.1 and time.time() - time_st < time_span:
d_x = dest - curr
d_x = np.divide(d_x,dist(d_x))
d_q = np.dot(inverse_jac(curr_th),d_x)
curr_th += d_q
for i in range(len(curr_th)):
if curr_th[i]>90 :
curr_th[i] = 90
if curr_th[i]<-180:
curr_th[i] = -180
'''
for i in range(6):
curr_th[i] = round(curr_th[i],0)
while not curr_th[i] in range(-180,180):
curr_th[i] -= np.sign(curr_th[i])*360
'''
curr = FK(curr_th)
#print curr_th#,dist(dest,curr)
#print curr
d = dist(dest,curr)
if d < mini:
mini = d
th_mini = list(curr_th)
#print th_mini
#print d
#print "////////////////////////////"
for i in range(6):
th_mini[i] = round(th_mini[i],2)
curr = FK(th_mini)
print th_mini#,dist(dest,curr)
print curr
print dest
print dist(dest,curr)
return th_mini
def dist(curr,dest = [0,0,0,0,0,0,0,0,0]):
summary = 0.0
for i in range(9):
summary += (curr[i]-dest[i])**2
return np.sqrt(summary)
def inverse_jac(th_in):
#print '\n'
delta = 0.001
current = FK(th_in)
jac = []
for i in range(len(th_in)):
new_th = list(th_in)
new_th[i] += delta
#print new_th, th
#print FK(new_th)
jac.append(np.divide(np.subtract(FK(new_th),current),delta))
#raw_input()
jac = np.transpose(jac)
#print jac
#print jac
in_jac = np.dot( np.linalg.inv( np.dot(np.transpose(jac),jac) ),np.transpose(jac) )
#print in_jac
#return np.linalg.inv(jac)
return in_jac
#1IK(th,dest)
#print FK(th) |
from django.db import models
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
nick_name = models.CharField(max_length=20,null=True)
mobile =models.CharField(max_length=11,unique=True)
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
def __str__(self):
return self.username |
import dash_bootstrap_components as dbc
from dash import html
nav_contents = [
dbc.NavItem(dbc.NavLink("Active", href="#", active=True)),
dbc.NavItem(dbc.NavLink("A much longer link label", href="#")),
dbc.NavItem(dbc.NavLink("Link", href="#")),
]
nav1 = dbc.Nav(nav_contents, pills=True, fill=True)
nav2 = dbc.Nav(nav_contents, pills=True, justified=True)
navs = html.Div([nav1, html.Hr(), nav2])
|
import os
import shutil
import unittest
from pathlib import Path
from patchworkdocker.modifiers import copy_file, apply_patch
from patchworkdocker.tests._common import TestWithTempFiles
_RESOURCES_LOCATION = os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources")
_EXAMPLE_FILE_NAME = "test-file"
_EXAMPLE_FILE_NAME_2 = "test-file-2"
class TestCopyFile(TestWithTempFiles):
"""
Tests for `copy_file`.
"""
def setUp(self):
super().setUp()
self.src_directory = self.temp_manager.create_temp_directory()
self.dest_directory = self.temp_manager.create_temp_directory()
def test_empty_src_to_empty_dest(self):
copy_file(self.src_directory, self.dest_directory)
self.assertEquals(0, len(os.listdir(self.dest_directory)))
def test_src_to_empty_dest(self):
Path(os.path.join(self.src_directory, _EXAMPLE_FILE_NAME)).touch()
copy_file(self.src_directory, self.dest_directory)
self.assertTrue(os.path.exists(os.path.join(self.dest_directory, _EXAMPLE_FILE_NAME)))
self.assertEquals(1, len(os.listdir(self.dest_directory)))
def test_src_to_dest(self):
Path(os.path.join(self.dest_directory, _EXAMPLE_FILE_NAME_2)).touch()
Path(os.path.join(self.src_directory, _EXAMPLE_FILE_NAME)).touch()
copy_file(self.src_directory, self.dest_directory)
self.assertTrue(os.path.exists(os.path.join(self.dest_directory, _EXAMPLE_FILE_NAME)))
self.assertEquals(2, len(os.listdir(self.dest_directory)))
def test_src_to_dest_with_directory_merge(self):
Path(os.path.join(self.dest_directory, _EXAMPLE_FILE_NAME_2)).touch()
Path(os.path.join(self.src_directory, _EXAMPLE_FILE_NAME)).touch()
copy_file(self.src_directory, self.dest_directory)
self.assertTrue(os.path.exists(os.path.join(self.dest_directory, _EXAMPLE_FILE_NAME)))
self.assertEquals(2, len(os.listdir(self.dest_directory)))
def test_src_file_to_empty_dest(self):
file_location = os.path.join(self.src_directory, _EXAMPLE_FILE_NAME)
Path(file_location).touch()
copy_file(file_location, self.dest_directory)
self.assertTrue(os.path.exists(os.path.join(self.dest_directory, _EXAMPLE_FILE_NAME)))
self.assertEquals(1, len(os.listdir(self.dest_directory)))
def test_src_file_to_dest_with_overwrite(self):
file_location = os.path.join(self.src_directory, _EXAMPLE_FILE_NAME)
print("1", file=open(os.path.join(self.dest_directory, _EXAMPLE_FILE_NAME), "w"), end="")
print("1", file=open(os.path.join(self.dest_directory, _EXAMPLE_FILE_NAME_2), "w"), end="")
print("2", file=open(file_location, "w"), end="")
copy_file(file_location, self.dest_directory)
self.assertEquals("2", open(os.path.join(self.dest_directory, _EXAMPLE_FILE_NAME), "r").read())
self.assertEquals(2, len(os.listdir(self.dest_directory)))
class TestApplyPatch(TestWithTempFiles):
"""
Tests for `apply_patch`.
"""
_DOCKERFILE_NAME = "Dockerfile"
_EXAMPLE_DOCKERFILE_LOCATION = os.path.join(_RESOURCES_LOCATION, "patching", _DOCKERFILE_NAME)
def setUp(self):
super().setUp()
temp_directory = self.temp_manager.create_temp_directory()
self._dockerfile_location = os.path.join(temp_directory, TestApplyPatch._DOCKERFILE_NAME)
shutil.copyfile(TestApplyPatch._EXAMPLE_DOCKERFILE_LOCATION, self._dockerfile_location)
def test_change_from(self):
patched_content = self._apply(f"{_RESOURCES_LOCATION}/patching/from-change.patch")
self.assertTrue(patched_content.startswith("FROM arm32v7/ubuntu:16.04"))
def test_add_and_remove(self):
patched_content = self._apply(f"{_RESOURCES_LOCATION}/patching/add-and-remove.patch")
self.assertTrue("RUN /other.sh" in patched_content)
self.assertTrue("COPY . /data" not in patched_content)
def _apply(self, patch_location: str) -> str:
"""
Applies the given patch to the example Docker file.
:param patch_location: the location of the patch to apply
:return: the contents of the example Docker file after the patch has been applied
"""
apply_patch(patch_location, self._dockerfile_location)
with open(self._dockerfile_location, "r") as file:
return file.read()
if __name__ == "__main__":
unittest.main()
|
### This middleware will set a variable, request.site
### to reference the current Site for the given request.
from django.contrib.sites.models import Site, SiteManager
from django.conf import settings
import re
ignore_www_zone = getattr(settings, 'IGNORE_WWW_ZONE', True)
ignore_server_port = getattr(settings, 'IGNORE_SERVER_PORT', True)
# New get_current override, which can take a request into account. This is some
# nasty monkey patching, but it does what we want it to.
SiteManager.get_current_legacy = SiteManager.get_current
def get_current(self, request=None):
if request is None:
return Site.objects.get_current_legacy()
hostname = request.get_host()
# Remove the www from our domain unless requested otherwise
if ignore_www_zone:
hostname = re.sub(r'^w{2,3}\d*\.', '', hostname)
if ignore_server_port:
hostname = re.sub(r':\d*$', '', hostname)
try:
# Attempt to get a site based on the current domain
return Site.objects.get(domain=hostname)
except Site.DoesNotExist:
# If this site doesn't exist, revert to our SITE_ID
return Site.objects.get_current()
SiteManager.get_current = get_current
class SiteProviderMiddleware(object):
def process_request(self, request):
# Prevent future version collisions
if hasattr(request, 'site'):
return
request.site = Site.objects.get_current(request)
|
def carpIkiEkle(x, y):
sonuc = x * y + 2
return sonuc
def carpUcEkle(x, y):
sonuc = x * y + 3
return sonuc
sayi = carpIkiEkle(3, 5)
print(sayi)
print(carpUcEkle(3, 5)) |
import pymysql
import os
MONKEYPATCH_PYMYSQL_CONNECTION = True
def monkeypatch_pymysql_connection():
Connection = pymysql.connections.Connection
def enter_patch(self):
return self
def exit_patch(self, exc, value, traceback):
try:
self.rollback() # Implicit rollback when connection closed per PEP-249
finally:
self.close()
Connection.__enter__ = enter_patch
Connection.__exit__ = exit_patch
if MONKEYPATCH_PYMYSQL_CONNECTION:
monkeypatch_pymysql_connection()
MONKEYPATCH_PYMYSQL_CONNECTION = False
# Соединение с БД
def connect_to_db():
return pymysql.connect(os.getenv("MYSQL_HOST"), os.getenv("MYSQL_USER"), os.getenv("MYSQL_PASSWORD"), os.getenv("MYSQL_DB"))
# Получение первой строки по запросу из БД
def select_and_fetch_all(con, query, params):
cur = con.cursor()
cur.execute(query, params)
result = cur.fetchall()
cur.close()
return result
# Получение первой строки по запросу из БД
def select_and_fetch_one(con, query, params):
cur = con.cursor()
cur.execute(query, params)
result = cur.fetchone()
cur.close()
return result
# Получение первой колонки и строки по запросу из БД
def select_and_fetch_first_column(con, query, params):
row = select_and_fetch_one(con, query, params)
if (row is None):
return None
else:
return row[0]
# Выполнение запроса к БД
def execute_update(con, query, params):
cur = con.cursor()
cur.execute(query, params)
con.commit()
cur.close()
# Выполнение запроса к БД
def execute_insert(con, query, params):
cur = con.cursor()
cur.execute(query, params)
con.commit()
rowid = cur.lastrowid
cur.close()
return rowid |
"""
CCT 建模优化代码
A21 Baseutils 示例
作者:赵润晓
日期:2021年5月2日
"""
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from cctpy import *
# Baseutils 工具类,提供了很多实用的方法
# 函数 equal()
# 可以用来判断两个数是否相等
print(BaseUtils.equal(1, 1)) # True
print(BaseUtils.equal(1, 2)) # False
# 还可以判断两个 P2/P3 对象是否相等
print(BaseUtils.equal(P2.zeros(), P2.origin())) # True
print(BaseUtils.equal(P3(x=1), P3(y=1))) # False
# err 设置允许的绝对误差
# 允许误差 0.2 时,1.0 和 1.1 相等
print(BaseUtils.equal(1.0, 1.1, err=0.2)) # True
# 对象 P2/P3 的判断,也会考虑绝对误差,只有当每个分量的差小于绝对误差,对象才相等
print(BaseUtils.equal(P2(1, 1), P2(0.9, 1.05), err=0.2)) # True
# msg :当 msg 不为空时,且a不等于b,则抛出错误,错误信息为msg
# BaseUtils.equal(1,2,msg="1和2不相等")
# 出现以下异常信息
# Traceback (most recent call last):
# File "c:/Users/madoka_9900/Documents/github/cctpy/final_code/demos/A21BaseUtils示例.py", line 34, in <module>
# BaseUtils.equal(1,2,msg="1和2不相等")
# File "c:\Users\madoka_9900\Documents\github\cctpy\final_code\packages\base_utils.py", line 60, in equal
# raise AssertionError(msg)
# AssertionError: 1和2不相等
# 函数 linspace()
print(BaseUtils.linspace(1, 2, 2))
print(BaseUtils.linspace(1, 2, 3))
print(BaseUtils.linspace(1, 2, 4))
print(BaseUtils.linspace(1, 2, 5))
# [1.0, 2.0]
# [1.0, 1.5, 2.0]
# [1.0, 1.3333333333333333, 1.6666666666666665, 2.0]
# [1.0, 1.25, 1.5, 1.75, 2.0]
points = BaseUtils.linspace(P2.origin(), P2(3, 4), 20)
# Plot2.plot(points,describe='r.')
# Plot2.show()
print(BaseUtils.linspace(3, 1, 5))
# [3.0, 2.5, 2.0, 1.5, 1.0]
print(BaseUtils.linspace(1, -1, 5))
# [1.0, 0.5, 0.0, -0.5, -1.0]
a1 = math.pi
a2 = math.pi/2
a3 = math.pi/3
a4 = math.pi/4
b1 = BaseUtils.radian_to_angle(a1)
b2 = BaseUtils.radian_to_angle(a2)
b34 = BaseUtils.radian_to_angle([a3, a4])
print(b1, b2, b34) # 180.0 90.0 [59.99999999999999, 45.0]
print(BaseUtils.angle_to_radian(b1)) # 3.141592653589793
print(BaseUtils.angle_to_radian(b2)) # 1.5707963267948966
print(BaseUtils.angle_to_radian(b34))
# [1.0471975511965976, 0.7853981633974483]
p1 = P2(0, 0)
p2 = P2(1, 1)
p3 = P2(1, 0)
center, r = BaseUtils.circle_center_and_radius(p1, p2, p3)
# 绘制这三个点
# Plot2.plot_p2s([p1,p2,p3],describe='k.')
# # 绘制圆心
# Plot2.plot_p2(center,describe='ro')
# Plot2.equal()
# Plot2.show()
xs = [1, 2, 3, 4, 5]
ys = [-2, 0, 5, 9, 20]
fit1 = BaseUtils.polynomial_fitting(xs, ys, 1)
fit2 = BaseUtils.polynomial_fitting(xs, ys, 2)
fit3 = BaseUtils.polynomial_fitting(xs, ys, 3)
print(fit1)
print(fit2)
print(fit3)
# [-0.7000000000000017, 2.6999999999999997]
# [-0.1999999999999946, 2.271428571428568, 0.07142857142857197]
# [-1.599999999999992, 4.238095238095229, -0.6785714285714245, 0.08333333333333283]
xs = [1, 2, 3, 4, 5]
ys = [-2, 0, 5, 9, 20]
fit1 = BaseUtils.polynomial_fitting(xs, ys, 1)
fit2 = BaseUtils.polynomial_fitting(xs, ys, 2)
fit3 = BaseUtils.polynomial_fitting(xs, ys, 3)
fun1 = BaseUtils.polynomial_fitted_function(fit1)
fun2 = BaseUtils.polynomial_fitted_function(fit2)
fun3 = BaseUtils.polynomial_fitted_function(fit3)
# 返回值是一个函数
print(type(fun1)) # <class 'function'>
print(fun1(1)) # -4.2
# 绘制拟合点
# Plot2.plot_xy_array(xs, ys, 'rx')
# # 绘制拟合函数
# Plot2.plot_function(fun1, start=0, end=6, describe='k-')
# Plot2.plot_function(fun2, start=0, end=6, describe='g-')
# Plot2.plot_function(fun3, start=0, end=6, describe='y-')
# Plot2.info()
# Plot2.legend("point",'linear','order2','order3')
# Plot2.show()
arr1 = [1,2,3,4,5]
arr2 = [P2(1,2),P2(3,4),P2(5,6)]
print(BaseUtils.list_multiply(arr1,2))
# [2, 4, 6, 8, 10]
print(BaseUtils.list_multiply(arr2,3))
# [(3.0, 6.0), (9.0, 12.0), (15.0, 18.0)]
y = lambda x:x*x
yd = BaseUtils.derivative(y)
# Plot2.plot_function(y,start=-4,end=4,describe='r-')
# Plot2.plot_function(yd,start=-4,end=4,describe='k-')
# Plot2.equal()
# Plot2.info()
# Plot2.show()
# interpolate_lagrange
y = BaseUtils.interpolate_lagrange(
x = 2.2,
x0 = 1, y0 = 2,
x1 = 2, y1 = 4,
x2 = 3, y2 = 6,
x3 = 4, y3 = 8
)
print(y) # 4.4
# is_sorted
print(BaseUtils.is_sorted([1,2,3])) # True
print(BaseUtils.is_sorted([1,2,1])) # False
BaseUtils.print_traceback()
# <frame at 0x0000020DFFC8BBA0, file 'c:\\Users\\madoka_9900\\Documents\\github\\cctpy\\final_code\\packages\\base_utils.py', line 260, code print_traceback>
# <frame at 0x0000020DE8FDC440, file 'c:/Users/madoka_9900/Documents/github/cctpy/final_code/demos/A21BaseUtils示例.py', line 149, code <module>>
fun_1 = lambda :BaseUtils.print_traceback()
fun_2 = lambda :fun_1()
fun_3 = lambda :fun_2()
fun_3()
# <frame at 0x000001F07F47D9A0, file 'c:/Users/madoka_9900/Documents/github/cctpy/final_code/demos/A21BaseUtils示例.py', line 153, code <lambda>>
# <frame at 0x000001F07F47D810, file 'c:/Users/madoka_9900/Documents/github/cctpy/final_code/demos/A21BaseUtils示例.py', line 154, code <lambda>>
# <frame at 0x000001F07F47D680, file 'c:/Users/madoka_9900/Documents/github/cctpy/final_code/demos/A21BaseUtils示例.py', line 155, code <lambda>>
# <frame at 0x000001F06E3EC440, file 'c:/Users/madoka_9900/Documents/github/cctpy/final_code/demos/A21BaseUtils示例.py', line 156, code <module>>
yd = lambda t,Y:2*t
y3 = BaseUtils.runge_kutta4(0,3,0,yd,dt=0.01)
print(y3) # 8.999999999999984
yd = lambda t,Y:2*t
ts,Ys = BaseUtils.runge_kutta4(0,3,0,yd,dt=0.01,record=True)
# Plot2.plot_xy_array(ts,Ys)
# Plot2.equal()
# Plot2.info()
# Plot2.show()
yd = lambda t,Y:2*t
y3 = BaseUtils.solve_ode(0,3,[0],lambda x,Y:[2*x],dt=0.1)
print(y3[0][-1]) # 9.0 |
from django.conf.urls import patterns, url
from views import delete, index
urlpatterns = patterns('boxer.views',
url(r'^delete/$', delete, name="delete"),
url(r'^$', index, name="index"),
) |
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn import wamp
from model.serializer.ditesiSerializer import JSONSerializable
#from model.serializer import ditesiSerializer
#ditesiSerializer.register()
class TestSer(JSONSerializable):
def __init__(self):
self.rr = 'test'
self.dd = 'test2'
class LibraryClient(ApplicationSession):
def onConnect(self):
self.join("core", ["ticket"], "system")
def onChallenge(self, challenge):
if challenge.method == 'ticket':
return 'password'
else:
raise Exception('Invalid auth method {}'.format(challenge.method))
@inlineCallbacks
def onJoin(self, details):
t = TestSer()
res = yield self.call('test_serializer', t)
print(res)
print(res.__dict__)
self.leave()
|
#!/usr/bin/python
__author__ = 'Elisabetta Ronchieri'
import sys
import unittest
import getopt
from tstorm.utils import report_file
from tstorm.utils import settings
from tstorm.utils import sequence
from tstorm.utils import release
from tstorm.utils import range
from tstorm.utils import limit
from tstorm.utils import test
from tstorm.utils import tests
from tstorm.utils import filters
from tstorm.utils import configuration
from tstorm.run import run_tests
if __name__ == '__main__':
try:
rt = run_tests.RunTests()
rt.do_parse()
rt.do_pre_run()
rt.do_list()
rt.do_run_tests()
except sequence.SequenceError, err:
print '\n\nExecution: ', err
except filters.FiltersError, err:
print '\n\nExecution: ', err
except run_tests.OptionError, err:
print '\n\nExecution: ', err
except tests.TestsError, err:
print '\n\nExecution: ', err
except run_tests.RunTestsError, err:
print '\n\nExecution: ', err
except range.RangeError, err:
print '\n\nExecution: ', err
except limit.LimitError, err:
print '\n\nExecution: ', err
except release.ReleaseError, err:
print '\n\nExecution: ', err
except test.TestStructureError, err:
print '\n\nExecution: ', err
except KeyboardInterrupt:
print '\n\nExecution n!'
sys.exit(1)
|
# A friendly note from the author:
#
# I wrote this implementation of merge sort as an
# exercise. Please use a library function for your
# real-world sorting needs.
def merge(left, right):
leftlen = len(left)
rightlen = len(right)
iLeft = 0
iRight = 0
result = [0 for i in range(0,leftlen+rightlen)]
# This part is ugly... What would a functional merge look like?
# Pattern-matching and recursion?
# TO DO: Make it pretty
for i in range(0,leftlen+rightlen):
if iLeft < leftlen and iRight >= rightlen:
result[i] = left[iLeft]
iLeft += 1
elif iRight < rightlen and iLeft >= leftlen:
result[i] = right[iRight]
iRight += 1
elif iLeft < leftlen and left[iLeft] < right[iRight]:
result[i] = left[iLeft]
iLeft += 1
elif iRight < rightlen:
result[i] = right[iRight]
iRight += 1
return result
def split(start,end):
if start == end:
return start
else:
mid = end / 2
return (start,mid), (mid,end)
def mergesort(arr):
l = len(arr)
if l == 1:
return arr
left, right = split(0,l)
lsorted = mergesort(arr[ left[0] : left[1] ])
rsorted = mergesort(arr[ right[0] : right[1] ])
return merge(lsorted,rsorted)
arr = (6,5,3,1,2,7,9,3,7,44,8,98,2,7,9,3,7,44,8,98,2,7,9,3,7,44,8,98,2,7,9,3,7,44,8,98,2,7,9,3,7,44,8,98,2,7,9,3,7,44,8,98,8,7,2,4)
print(mergesort(arr))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-07 18:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mealplanner', '0006_auto_20160606_1911'),
]
operations = [
migrations.AlterField(
model_name='day',
name='name',
field=models.TextField(choices=[('Mon', 'Mandag'), ('Tue', 'Tirsdag'), ('Wed', 'Onsdag'), ('Thu', 'Torsdag'), ('Fri', 'Fredag'), ('Sat', 'Lørdag'), ('Sun', 'Søndag')], default='Mon'),
),
]
|
import re
from collections import Counter
file = open('URL_String.txt', "r")
Url_text = file.readlines()
text_blob = []
for i in range(len(Url_text)):
if 1==1:
text_blob = text_blob +re.split(r'[^a-zA-Z]+', Url_text[i].strip().lower())
print i
#print text_blob
text_file = open("URL_String_Freq.txt", "w")
wordCount = Counter(text_blob)
for i in wordCount:
text_file.write( str(wordCount[i]) +" : "+ i)
text_file.write( '\n')
#print str(wordCount[i]) +" : "+ i
text_file.close() |
''''
面向对象
程序 现实中
对象 具体的事务
现实中的实物---转化为程序 ====面向对象
好处:
面向对象:
类
对象
属性
方法
对象:
小明的手机
小红的手机
小妹的手机
。。。
对象的集合 ---共同的特征:品牌, 颜色,大小,价格 动作:打电话, 发短信,上网
类别:手机类
对象里面包含属性(特征)和方法(动作)
多个对象----提取共同的属性和方法---封装成一个类
'''
# 所有的类名首字母大写,多个单词使用驼峰命名法
# 所有的类都默认继承object
# 类相当于一个模子
# 在模子中定义属性和方法,
'''
class 类名(父类)
属性
方法
'''
class phone(object):
brand = 'vivo'
tom = phone()
print(tom.brand)
tom.brand = 'iphone' # 修改类中的属性
print(tom.brand)
# 定义一个类和属性
class Student:
# 类属性
name = 'huawei'
age = 2
# 使用类来创建对象
xiaowei = Student()
xiaowei.age = 13 # 对象属性
print(xiaowei.age) # 先找对象属性,如果没有找到,再到类中 去找属性
# 类中的方法
# 普通方法、类方法 静态方法, 魔术方法
'''
普通方法
def 方法名(self{}):
'''
class Car:
price = 23000000
type = 5
__age = 3 # 私有变量,类外面不可以修改,可以借助类方法修改
nickname = "xiaobao"
# 魔术方法之一:称作魔术方法 :————名字————()
def __init__(self): # 只要创建了对象,系统自动调用初始化方法
print("初始化")
self.brand = 'benzi' # 动态的给self空间中添加了两个属性
self.seat = 7
def run(self): # self 是不断发生变化的,随着调用对象的改变而改变 只有对象去调用
print('跑的很快')
print('有:', self.seat)
def drink(self):
self.run()
print("调用了run方法")
# 类方法
@classmethod
def eat(cls):
print(cls.nickname) # 只能访问类属性
# self.run() # 会报错,类方法中不可以调用普通方法,因为缺少self
@classmethod
def update_age(cls):
cls.__age = 20
print("类方法")
@classmethod
def show_age(cls):
print(cls.__age)
# Car.run() 这种写会报错,普通方法需要实例化后才能调用,类方法可以直接调用
Car.update_age()
Car.show_age()
ben = Car()
ben.seat = '7座'
ben.run()
bao = Car()
bao.seat = "5座"
bao.run()
ben.drink()
# 在系统中找是佛有一块空间是Car
# 利用Car类,向内存中申请一块和Car一样的空间
# 去Car中找是否有一种方法叫——__init__,如果没有则执行将开辟空间给个对象
# 如果有,会进入__init__则会执行里面的动作
# 最后将内存地址复制给bao/ben
# 函数和类里面定义的方法
# 类方法
'''
特点:
类方法中的参数不是对象中的,而是雷钟德
定义方法classmethod
类方法中只可以使用类属性
只要加上self,说明要依赖于对象
类中方法的调用需要加self.方法名()
所以普通方法不能在方法中调用,因为没有self
类方法的作用:
在对象还没出现之前的动作,可以放在类方法中
因为只能访问类属性和类方法,所以在创建对象之前创建,如果需要完成一些动作(功能)
'''
|
import json
import datetime
from .types import Artifact, Publication, Collection, Universe
# serialization
# --------------------------------------------------------------------------------------
def serialize(node):
"""Serialize the universe/collection/publication/artifact to JSON.
Parameters
----------
node : Union[Universe, Collection, Publication, Artifact]
The thing to serialize as JSON.
Returns
-------
str
The object serialized as JSON.
"""
def converter(o):
return str(o)
if isinstance(node, Artifact):
dct = node._asdict()
else:
dct = node._deep_asdict()
return json.dumps(dct, default=converter, indent=4)
def _convert_to_time(s):
converters = [datetime.date.fromisoformat, datetime.datetime.fromisoformat]
for converter in converters:
try:
return converter(s)
except ValueError:
continue
else:
raise ValueError("Not a time.")
def deserialize(s):
"""Reconstruct a universe/collection/publication/artifact from JSON.
Parameters
----------
s : str
The JSON to deserialize.
Returns
-------
Universe/Collection/Publication/Artifact
The reconstructed object; its type is inferred from the string.
"""
# we need to pass a hook to json.loads in order to automatically convert
# datestring to date/datetime objects
def hook(pairs):
"""Hook for json.loads to convert date/time-like values."""
d = {}
for k, v in pairs:
if isinstance(v, str):
try:
d[k] = _convert_to_time(v)
except ValueError:
d[k] = v
else:
d[k] = v
return d
dct = json.loads(s, object_pairs_hook=hook)
# infer what we're reconstructing
if "collections" in dct:
type_ = Universe
children_key = "collections"
elif "publications" in dct:
type_ = Collection
children_key = "publications"
elif "artifacts" in dct:
type_ = Publication
children_key = "artifacts"
else:
return _artifact_from_dict(dct)
return type_._deep_fromdict(dct)
|
import pandas as pd
def function_Save_Data_Matrix_into_CSV(data_matrix, Path):
data_matrix_df = pd.DataFrame(data_matrix)
data_matrix_df.to_csv(Path, header=False, index=False)
return |
import re
rawdata = []
with open("input.txt") as f:
rawdata = f.readlines()
rawdata = [n.strip() for n in rawdata]
data = {}
for line in rawdata:
newline = line.split(" contain ")
newline = [re.sub(" *bags* *", "", n).strip(".") for n in newline]
# Make this a dictionary
data[newline[0]] = [n.strip() for n in newline[1].split(",")]
# At this point data is a dictionary of bag colours(key) and lists of contents (value)
# in the format 'n colour' e.g. 5 vibrant indigo
# Can a given list of bags contain a shiny gold bag?
def count_bags_inside(dict_of_bags, bag_color):
print("CHECKING " + bag_color)
print(bag_color + " contains: " + str(dict_of_bags[bag_color]))
# Get rid of the numbers (in a nasty way)
number_index = [re.search("^[0-9]+ ", n) for n in dict_of_bags[bag_color]]
if None in number_index:
# There were no numbers in the list, therefore no new bags contained here
print("No more")
return 0
else:
# Isolate the bag type and the bag number in separate lists
list_of_bags = [n.strip("0123456789 ") for n in dict_of_bags[bag_color]]
list_of_numbers = [int(n[:number_index[ind].end()]) for ind, n in enumerate(dict_of_bags[bag_color])]
results = 0
# Loop through the list of bags contained within this one
for i in range(len(list_of_bags)):
# Count how many bags are inside each of these bags
check_bag = count_bags_inside(dict_of_bags, list_of_bags[i])
# Save the results as a list
results += (check_bag * list_of_numbers[i]) + list_of_numbers[i]
return results
print("Total bags inside - ", count_bags_inside(data, "shiny gold"))
# 1470 too high |
import queue
pages = int(input())
graph = {}
for p in range(pages):
root = input()
buffer = []
while True:
l = input()
if l == '</HTML>':
break
while '<A HREF=' in l:
url = l[l.index('<A HREF=') + 9: l.index('<A HREF=') + l[l.index('<A HREF='):].index('">')]
print('Link from %s to %s' % (root, url))
buffer.append(url)
l = l[l.index('<A HREF=') + l[l.index('<A HREF='):].index('">') + 2:]
if root in graph:
graph[root] += buffer[:]
else:
graph[root] = buffer[:]
prev = ''
while True:
path = input()
if path == 'The End':
break
if not prev:
prev = path
else:
q = queue.Queue()
q.put(prev)
solved = False
meh = []
while not q.empty():
i = q.get()
if path == i:
print('Can surf from %s to %s.' % (prev, path))
solved = True
break
for e in graph[i]:
if e not in meh:
q.put(e)
meh.append(e)
if not solved:
print('Can\'t surf from %s to %s.' % (prev, path))
prev = ''
|
"""
QS 硬边磁铁
"""
from typing import List, Tuple
import numpy as np
from cctpy.abstract_classes import Magnet, Plotable, LocalCoordinateSystem
from cctpy.constant import ZERO3
class QsHardEdgeMagnet(Magnet, Plotable):
"""
硬边 QS 磁铁,由以下参数完全确定:
length 磁铁长度 / m
gradient 四极场梯度 / Tm-1
second_gradient 六极场梯度 / Tm-2
aperture_radius 孔径(半径) / m
local_coordinate_system 局部坐标系
局部坐标系的含义见下:
------------------------------------------
| ② |
① ->| ----> ③ ↑ |
| |
------------------------------------------
① QS 磁铁入口中心位置,是局部坐标系的原心
② 理想粒子运动方向,是局部坐标系 z 方向
③ 像空间中 x 方向
另外 y 方向有 x 方向和 z 方向确定
"""
def __init__(self, length: float, gradient: float, second_gradient: float,
aperture_radius: float, local_coordinate_system: LocalCoordinateSystem):
self.length = length
self.gradient = gradient
self.second_gradient = second_gradient
self.aperture_radius = aperture_radius
self.local_coordinate_system = local_coordinate_system
def magnetic_field_at(self, point: np.ndarray) -> np.ndarray:
"""
qs 磁铁在全局坐标系点 point 处产生的磁场
Parameters
----------
point 全局坐标系点
Returns qs 磁铁在全局坐标系点 point 处产生的磁场
-------
"""
# point 转为局部坐标,并拆包
x, y, z = self.local_coordinate_system.point_to_local_coordinate(point)
# z < 0 or z > self.length 表示点 point 位于磁铁外部
if z < 0 or z > self.length:
return ZERO3
else:
# 以下判断点 point 是不是在孔径外,前两个 or 是为了快速短路判断,避免不必要的开方计算
if np.abs(x) > self.aperture_radius or np.abs(y) > self.aperture_radius or np.sqrt(
x ** 2 + y ** 2) > self.aperture_radius:
return ZERO3
else:
# bx 和 by 分别是局部坐标系中 x 和 y 方向的磁场(局部坐标系中 z 方向是理想束流方向/中轴线反向,不会产生磁场)
bx = self.gradient * y + self.second_gradient * (x * y)
by = self.gradient * x + 0.5 * self.second_gradient * (x ** 2 - y ** 2)
# 转移到全局坐标系中
return bx * self.local_coordinate_system.XI + by * self.local_coordinate_system.YI
def line_and_color(self, describe='r') -> List[Tuple[np.ndarray, str]]:
"""
画图相关
"""
# 前中后三个圈
front_circle_local = np.array([
[self.aperture_radius * np.cos(i / 180 * np.pi),
self.aperture_radius * np.sin(i / 180 * np.pi),
0.]
for i in range(360)])
mid_circle_local = front_circle_local + np.array([0, 0, self.length / 2])
back_circle_local = front_circle_local + np.array([0, 0, self.length])
# 转到全局坐标系中
front_circle = self.local_coordinate_system.line_to_global_coordinate(front_circle_local)
mid_circle = self.local_coordinate_system.line_to_global_coordinate(mid_circle_local)
back_circle = self.local_coordinate_system.line_to_global_coordinate(back_circle_local)
# 画四条轴线
axial_direction_line_0 = np.array([front_circle[0], back_circle[0]])
axial_direction_line_1 = np.array([front_circle[90], back_circle[90]])
axial_direction_line_2 = np.array([front_circle[180], back_circle[180]])
axial_direction_line_3 = np.array([front_circle[270], back_circle[270]])
return [
(front_circle, describe),
(mid_circle, describe),
(back_circle, describe),
(axial_direction_line_0, describe),
(axial_direction_line_1, describe),
(axial_direction_line_2, describe),
(axial_direction_line_3, describe),
]
|
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.dbsparta
def get_detail_info(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get(url, headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
lists = soup.select('#div_profile > div.s-list.basic-info > ul > li')
title = soup.select_one('#div_profile > div.s-list.pic-grade > div.tit-point > p')
# rating = soup.select_one('#div_profile > div.s-list.pic-grad > div.sns-grade > p > span.point > strong')
rating = soup.select_one('#lbl_star_point > span.point')
img = soup.select_one('#div_profile > div.s-list.pic-grade > ul > li.bimg.btn-gallery-open > img')['src']
all_tags = soup.select('#div_profile > div.s-list.appraisal > div.grade-info > ul.app-arti > li > p.icon')
tags = []
keywords = soup.select('#div_profile > div.s-list.pic-grade > div.btxt > a')
# only_keyword = keyword.text.strip()
# tags.append(only_keyword)
for kw in keywords :
keyword = kw.text.strip()
tags.append(keyword)
#print(data.text)
all_hour = soup.select('#div_hour > div.busi-hours > ul > li')
open_days = []
for od in all_hour :
open_days.append(od.text.strip().replace(' ','').replace('\n',': '))
for tag in all_tags:
# text_tag = tag.text
only_tag = tag.text.split('(')[0]
tags.append(only_tag)
main_tag1 = lists[2].text.strip()
main_tag2 = lists[3].text.strip()
tags.append(main_tag1)
tags.append(main_tag2)
if rating is None:
rating = ''
else :
rating = rating.text.strip()
info = {
"title" : title.text.strip(),
"img" : img,
"addr" : lists[0].text.strip(),
"tel" : lists[1].text.strip(),
"main_tag1" : main_tag1,
"main_tag2" : main_tag2,
"keyword" : keyword,
"tag" : tags,
"rating": rating,
"open_days" : open_days
}
print(info)
db.pubstreet.insert_one(info)
def get_detail_urls(page=1):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
params = {
'query' : '서울+도봉+술집',
'page' : page
}
data = requests.get('https://www.diningcode.com/list.php', headers=headers, params=params)
soup = BeautifulSoup(data.text, 'html.parser')
lists = soup.select('#div_list > li')
urls = []
for list in lists:
a = list.select_one('a')
if a is not None:
base_url = 'https://www.diningcode.com'
url = base_url + a['href']
urls.append(url)
return urls
def insert_all():
for page in range(1, 10):
urls = get_detail_urls(page)
for url in urls:
get_detail_info(url)
print('[알림] 데이터 저장이 완료되었습니다!')
### 실행하기
insert_all() |
matriz = []
for i in range(3):
linha = []
for j in range(3):
linha.append(int(input('Digite a nota ['+ str(i) + ',' + str(j) + ']: ')))
matriz.append(linha)
#contar pares
pares = 0
for linha in matriz:
for valor in linha:
if valor % 2 == 0:
pares = pares + 1
#imprimir em formato de matriz
for i in range(3):
print(matriz[i])
#imprimir qtde de numeros pares
print ('A matriz contem ',pares,' numeros pares') |
from simpleparse.common import numbers, strings, comments
from simpleparse import generator
from simpleparse.parser import Parser
from simpleparse.dispatchprocessor import *
import collections, re
from .Factor import Factor
from .Faresystem import Faresystem
from .Linki import Linki
from .Logger import WranglerLogger
from .NetworkException import NetworkException
from .Node import Node
from .PNRLink import PNRLink
from .PTSystem import PTSystem
from .Supplink import Supplink
from .TransitLine import TransitLine
from .TransitLink import TransitLink
from .ZACLink import ZACLink
__all__ = [ 'TransitParser' ]
WRANGLER_FILE_SUFFICES = [ "lin", "link", "pnr", "zac", "access", "xfer", "pts" ]
# PARSER DEFINITION ------------------------------------------------------------------------------
# NOTE: even though XYSPEED and TIMEFAC are node attributes here, I'm not sure that's really ok --
# Cube documentation implies TF and XYSPD are node attributes...
transit_file_def=r'''
transit_file := smcw*, ( accessli / line / link / pnr / zac / supplink / factor / faresystem / waitcrvdef / crowdcrvdef / operator / mode / vehicletype )+, smcw*, whitespace*
line := whitespace?, smcw?, c"LINE", whitespace, lin_attr*, lin_node*, whitespace?
lin_attr := ( lin_attr_name, whitespace?, "=", whitespace?, attr_value, whitespace?,
comma, whitespace?, semicolon_comment* )
lin_nodeattr := ( lin_nodeattr_name, whitespace?, "=", whitespace?, attr_value, whitespace?, comma?, whitespace?, semicolon_comment* )
lin_attr_name := c"allstops" / c"color" / (c"freq",'[',[1-5],']') / c"mode" / c"name" / c"oneway" / c"owner" / c"runtime" / c"timefac" / c"xyspeed" / c"longname" / c"shortname" / (c"usera",[1-5]) / (c"headway",'[',[1-5],']') / c"vehicletype" / c"operator" / c"faresystem"
lin_nodeattr_name := c"access_c" / c"access" / c"delay" / c"xyspeed" / c"timefac" / c"nntime" / c"time"
lin_node := lin_nodestart?, whitespace?, nodenum, spaces*, comma?, spaces*, semicolon_comment?, whitespace?, lin_nodeattr*
lin_nodestart := (whitespace?, "N", whitespace?, "=")
link := whitespace?, smcw?, c"LINK", whitespace, link_attr*, whitespace?, semicolon_comment*
link_attr := (( (link_attr_name, whitespace?, "=", whitespace?, attr_value) /
(word_nodes, whitespace?, "=", whitespace?, nodepair) /
(word_modes, whitespace?, "=", whitespace?, numseq) ),
whitespace?, comma?, whitespace?)
link_attr_name := c"dist" / c"speed" / c"time" / c"oneway"
pnr := whitespace?, smcw?, c"PNR", whitespace, pnr_attr*, whitespace?
pnr_attr := (( (pnr_attr_name, whitespace?, "=", whitespace?, attr_value) /
(word_node, whitespace?, "=", whitespace?, ( nodepair / nodenum )) /
(word_zones, whitespace?, "=", whitespace?, numseq )),
whitespace?, comma?, whitespace?, semicolon_comment*)
pnr_attr_name := c"time" / c"maxtime" / c"distfac" / c"cost"
zac := whitespace?, smcw?, c"ZONEACCESS", whitespace, zac_attr*, whitespace?, semicolon_comment*
zac_attr := (( (c"link", whitespace?, "=", whitespace?, nodepair) /
(zac_attr_name, whitespace?, "=", whitespace?, attr_value) ),
whitespace?, comma?, whitespace?)
zac_attr_name := c"mode"
supplink := whitespace?, smcw?, c"SUPPLINK", whitespace, supplink_attr*, whitespace?, semicolon_comment*
supplink_attr := (( (supplink_attr_name, whitespace?, "=", whitespace?, attr_value) /
(npair_attr_name, whitespace?, "=", whitespace?, nodepair )),
whitespace?, comma?, whitespace?)
npair_attr_name := c"nodes" / c"n"
supplink_attr_name:= c"mode" / c"dist" / c"speed" / c"oneway" / c"time"
factor := whitespace?, smcw?, c"FACTOR", whitespace, factor_attr*, whitespace?, semicolon_comment*
factor_attr := ( (factor_attr_name, whitespace?, "=", whitespace?, attr_value),
whitespace?, comma?, whitespace? )
factor_attr_name := c"maxwaittime" / word_nodes
faresystem := whitespace?, smcw?, c"FARESYSTEM", whitespace, faresystem_attr*, whitespace?, semicolon_comment*
faresystem_attr := (( (faresystem_attr_name, whitespace?, "=", whitespace?, attr_value) /
(faresystem_fff, whitespace?, "=", whitespace?, floatseq )),
whitespace?, comma?, whitespace? )
faresystem_attr_name := c"number" / c"name" / c"longname" / c"structure" / c"same" / c"iboardfare" / c"farematrix" / c"farezones"
faresystem_fff := c"farefromfs"
waitcrvdef := whitespace?, smcw?, c"WAITCRVDEF", whitespace, crv_attr*, whitespace?, semicolon_comment*
crowdcrvdef := whitespace?, smcw?, c"CROWDCRVDEF", whitespace, crv_attr*, whitespace?, semicolon_comment*
crv_attr := (( (opmode_attr_name, whitespace?, "=", whitespace?, attr_value) /
(word_curve, whitespace?, "=", whitespace?, xyseq )),
whitespace?, comma?, whitespace? )
operator := whitespace?, smcw?, c"OPERATOR", whitespace, opmode_attr*, whitespace?, semicolon_comment*
mode := whitespace?, smcw?, c"MODE", whitespace, opmode_attr*, whitespace?, semicolon_comment*
opmode_attr := ( (opmode_attr_name, whitespace?, "=", whitespace?, attr_value), whitespace?, comma?, whitespace? )
opmode_attr_name := c"number" / c"name" / c"longname"
vehicletype := whitespace?, smcw?, c"VEHICLETYPE", whitespace, vehtype_attr*, whitespace?, semicolon_comment*
vehtype_attr := ( (vehtype_attr_name, whitespace?, "=", whitespace?, attr_value), whitespace?, comma?, whitespace? )
vehtype_attr_name := c"number" / (c"crowdcurve",'[',[0-9]+,']') / c"crushcap" / c"loaddistfac" / c"longname" / c"name" / c"seatcap"
accessli := whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, accesstag?, spaces?, (float/int)?, spaces?, semicolon_comment?
accesstag := c"wnr" / c"pnr"
word_curve := c"curve"
word_nodes := c"nodes"
word_node := c"node"
word_modes := c"modes"
word_zones := c"zones"
xyseq := xy, (spaces?, ",", spaces?, xy)*
xy := pos_floatnum, spaces?, ("-" / ","), spaces?, pos_floatnum
pos_floatnum := [0-9]+, [\.]?, [0-9]*
numseq := int, (spaces?, ("-" / ","), spaces?, int)*
floatseq := floatnum, (spaces?, ("-" / ","), spaces?, floatnum)*
floatnum := [-]?, [0-9]+, [\.]?, [0-9]*
nodepair := nodenum, spaces?, ("-" / ","), spaces?, nodenum
nodenumA := nodenum
nodenumB := nodenum
nodenum := int
attr_value := alphanums / string_single_quote / string_double_quote
alphanums := [a-zA-Z0-9_\.]+
<comma> := [,]
<whitespace> := [ \t\r\n]+
<spaces> := [ \t]+
smcw := whitespace?, (semicolon_comment / c_comment, whitespace?)+
'''
class TransitFileProcessor(DispatchProcessor):
""" Class to process transit files
"""
def __init__(self, verbosity=1):
self.verbosity=verbosity
self.lines = []
self.links = []
self.pnrs = []
self.zacs = []
self.accesslis = []
self.xferlis = []
self.nodes = []
self.liType = ''
self.supplinks = []
self.factors = []
self.faresystems = []
# PT System control statements
self.waitcrvdefs = []
self.crowdcrvdefs = []
self.operators = []
self.modes = []
self.vehicletypes = []
self.linecomments = []
def crackTags(self, leaf, buffer):
tag = leaf[0]
text = buffer[leaf[1]:leaf[2]]
subtags = leaf[3]
b = []
if subtags:
for leaf in subtags:
b.append(self.crackTags(leaf, buffer))
return (tag,text,b)
def line(self, tup, buffer):
(tag,start,stop,subtags) = tup
# this is the whole line
if self.verbosity>=1:
print(tag,start,stop)
# Append list items for this line
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.lines.append(xxx)
if self.verbosity==2:
# lines are composed of smcw (semicolon-comment / whitespace), line_attr and lin_node
for linepart in subtags:
print(" ",linepart[0], " -> [ "),
for partpart in linepart[3]:
print(partpart[0], "(", buffer[partpart[1]:partpart[2]],")"),
print(" ]")
def link(self, tup, buffer):
(tag,start,stop,subtags) = tup
# this is the whole link
if self.verbosity>=1:
print(tag, start, stop)
# Append list items for this link
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.links.append(xxx)
if self.verbosity==2:
# links are composed of smcw and link_attr
for linkpart in subtags:
print(" ",linkpart[0], " -> [ "),
for partpart in linkpart[3]:
print(partpart[0], "(", buffer[partpart[1]:partpart[2]], ")"),
print(" ]")
def pnr(self, tup, buffer):
(tag,start,stop,subtags) = tup
if self.verbosity>=1:
print(tag, start, stop)
# Append list items for this link
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.pnrs.append(xxx)
if self.verbosity==2:
# pnrs are composed of smcw and pnr_attr
for pnrpart in subtags:
print(" ",pnrpart[0], " -> [ "),
for partpart in pnrpart[3]:
print(partpart[0], "(", buffer[partpart[1]:partpart[2]], ")"),
print(" ]")
def zac(self, tup, buffer):
(tag,start,stop,subtags) = tup
if self.verbosity>=1:
print(tag, start, stop)
if self.verbosity==2:
# zacs are composed of smcw and zac_attr
for zacpart in subtags:
print(" ",zacpart[0], " -> [ "),
for partpart in zacpart[3]:
print(partpart[0], "(", buffer[partpart[1]:partpart[2]], ")"),
print(" ]")
# Append list items for this link
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.zacs.append(xxx)
def process_line(self, tup, buffer):
"""
Generic version, returns list of pieces.
"""
(tag,start,stop,subtags) = tup
if self.verbosity>=1:
print(tag, start, stop)
if self.verbosity==2:
for part in subtags:
print(" ",part[0], " -> [ "),
for partpart in part[3]:
print(partpart[0], "(", buffer[partpart[1]:partpart[2]], ")"),
print(" ]")
# Append list items for this link
# TODO: make the others more like this -- let the list separate the parse structures!
retlist = []
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
retlist.append(xxx)
return retlist
def supplink(self, tup, buffer):
supplink = self.process_line(tup, buffer)
self.supplinks.append(supplink)
def factor(self, tup, buffer):
factor = self.process_line(tup, buffer)
self.factors.append(factor)
def faresystem(self, tup, buffer):
fs = self.process_line(tup, buffer)
self.faresystems.append(fs)
def waitcrvdef(self, tup, buffer):
mycrvedef = self.process_line(tup, buffer)
self.waitcrvdefs.append(mycrvedef)
def crowdcrvdef(self, tup, buffer):
mycrvedef = self.process_line(tup, buffer)
self.crowdcrvdefs.append(mycrvedef)
def operator(self, tup, buffer):
myopmode = self.process_line(tup, buffer)
self.operators.append(myopmode)
def mode(self, tup, buffer):
myopmode = self.process_line(tup, buffer)
self.modes.append(myopmode)
def vehicletype(self, tup, buffer):
myvt = self.process_line(tup, buffer)
self.vehicletypes.append(myvt)
def smcw(self, tup, buffer):
""" Semicolon comment whitespace
"""
(tag,start,stop,subtags) = tup
if self.verbosity>=1:
print(tag, start, stop)
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.linecomments.append(xxx)
def accessli(self, tup, buffer):
(tag,start,stop,subtags) = tup
if self.verbosity>=1:
print(tag, start, stop)
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
if self.liType=="access":
self.accesslis.append(xxx)
elif self.liType=="xfer":
self.xferlis.append(xxx)
elif self.liType=="node":
self.nodes.append(xxx)
else:
raise NetworkException("Found access or xfer link without classification. {}".format(self.liType))
class TransitParser(Parser):
# line files are one of these
PROGRAM_PT = "PT"
PROGRAM_TRNBUILD = "TRNBUILD"
PROGRAM_UNKNOWN = "unknown"
def __init__(self, filedef=transit_file_def, verbosity=1):
Parser.__init__(self, filedef)
self.verbosity=verbosity
self.tfp = TransitFileProcessor(self.verbosity)
def setVerbosity(self,verbosity):
self.verbosity=verbosity
self.tfp.verbosity=verbosity
def buildProcessor(self):
return self.tfp
def convertLineData(self):
""" Convert the parsed tree of data into a usable python list of transit lines
returns (PROGRAM_PT or PROGRAM_TRNBUILD, list of comments and transit line objects)
"""
program = TransitParser.PROGRAM_UNKNOWN # default
rows = []
currentRoute = None
currentComments = []
# try to figure out what type of file this is -- TRNBUILD or PT
for comment in self.tfp.linecomments:
if comment[0] == "semicolon_comment":
cmt = comment[2][0][1]
# print("cmt={}".format(cmt))
# note the first semicolon is stripped
if cmt.startswith(';<<Trnbuild>>;;'):
program = TransitParser.PROGRAM_TRNBUILD
elif cmt.startswith(";<<PT>><<LINE>>;;"):
program = TransitParser.PROGRAM_PT
WranglerLogger.debug("convertLineData: PROGRAM: {}".format(program))
line_num = 1
for line in self.tfp.lines:
# WranglerLogger.debug("{:5} line[0]={}".format(line_num, line[0]))
line_num += 1
# Add comments as simple strings
if line[0] == 'smcw':
cmt = line[1].strip()
# WranglerLogger.debug("smcw line={}".format(line))
if currentRoute:
# don't add it now since we might mess up the ordering
# if we haven't closed out the last line
currentComments.append(cmt)
else:
rows.append(cmt)
continue
# Handle Line attributes
if line[0] == 'lin_attr':
key = None
value = None
comment = None
# Pay attention only to the children of lin_attr elements
kids = line[2]
for child in kids:
if child[0]=='lin_attr_name': key=child[1]
if child[0]=='attr_value': value=child[1]
if child[0]=='semicolon_comment': comment=child[1].strip()
# If this is a NAME attribute, we need to start a new TransitLine!
if key=='NAME':
if currentRoute:
rows.append(currentRoute)
# now add the comments stored up
if len(currentComments)>0:
# WranglerLogger.debug("currentComments: {}".format(currentComments))
rows.extend(currentComments)
currentComments = []
currentRoute = TransitLine(name=value)
else:
currentRoute[key] = value # Just store all other attributes
# And save line comment if there is one
if comment: currentRoute.comment = comment
continue
# Handle Node list
if line[0] == "lin_node":
# Pay attention only to the children of lin_attr elements
kids = line[2]
node = None
for child in kids:
if child[0]=='nodenum':
node = Node(child[1])
if child[0]=='lin_nodeattr':
key = None
value = None
for nodechild in child[2]:
if nodechild[0]=='lin_nodeattr_name': key = nodechild[1]
if nodechild[0]=='attr_value': value = nodechild[1]
if nodechild[0]=='semicolon_comment': comment=nodechild[1].strip()
node[key] = value
if comment: node.comment = comment
currentRoute.n.append(node)
continue
# Got something other than lin_node, lin_attr, or smcw:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (line[0], line[1]))
# End of tree; store final route and return
if currentRoute: rows.append(currentRoute)
return (program, rows)
def convertLinkData(self):
""" Convert the parsed tree of data into a usable python list of transit links
returns list of comments and transit link & factor objects
"""
rows = []
currentLink = None
currentFactor = None
key = None
value = None
comments = []
for link in self.tfp.links:
# Each link is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings:
if link[0] in ('smcw','semicolon_comment'):
if currentLink:
currentLink.comment = " "+link[1].strip() # Link comment
rows.append(currentLink)
currentLink = None
else:
rows.append(link[1].strip()) # Line comment
continue
# Link records
if link[0] == 'link_attr':
# Pay attention only to the children of lin_attr elements
kids = link[2]
for child in kids:
if child[0] in ('link_attr_name','word_nodes','word_modes'):
key = child[1]
# If this is a NAME attribute, we need to start a new TransitLink.
if key in ('nodes','NODES'):
if currentLink: rows.append(currentLink)
currentLink = TransitLink() # Create new dictionary for this transit support link
if child[0]=='nodepair':
currentLink.setId(child[1])
if child[0] in ('attr_value','numseq'):
currentLink[key] = child[1]
continue
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (link[0], link[1]))
# Save last link too
if currentLink: rows.append(currentLink)
for factor in self.tfp.factors:
currentFactor = Factor()
# factor[0]:
# ('smcw', '; BART-eBART timed transfer\n',
# [('semicolon_comment', '; BART-eBART timed transfer\n',
# [('comment', ' BART-eBART timed transfer', [])])])
# keep as line comment
if factor[0][0] == 'smcw':
smcw = factor.pop(0)
rows.append(smcw[1].strip())
# the rest are attributes
# [('factor_attr', 'MAXWAITTIME=1, ', [('factor_attr_name', 'MAXWAITTIME', []), ('attr_value', '1', [('alphanums', '1', [])])]),
# ('factor_attr', 'NODES=15536\n', [('factor_attr_name', 'NODES', [('word_nodes', 'NODES', [])]), ('attr_value', '15536', [('alphanums', '15536', [])])])]
for factor_attr in factor:
if factor_attr[0] == 'semicolon_comment':
comments.append(factor_attr[1])
continue
if factor_attr[0] != 'factor_attr':
WranglerLogger.critical("** unexpected factor item: {}".format(factor_attr))
factor_attr_name = factor_attr[2][0] # ('factor_attr_name', 'MAXWAITTIME', [])
factor_attr_val = factor_attr[2][1] # ('attr_value', '1', [('alphanums', '1', [])])
# set it
currentFactor[factor_attr_name[1]] = factor_attr_val[1]
rows.append(currentFactor)
if len(comments)>0:
rows.extend(comments)
comments = []
return rows
def convertPNRData(self):
""" Convert the parsed tree of data into a usable python list of PNR objects
returns list of strings and PNR objects
"""
rows = []
currentPNR = None
key = None
value = None
for pnr in self.tfp.pnrs:
# Each pnr is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings
# Textline Comments
if pnr[0] =='smcw':
# Line comment; thus existing PNR must be finished.
if currentPNR:
rows.append(currentPNR)
currentPNR = None
rows.append(pnr[1].strip()) # Append line-comment
continue
# PNR records
if pnr[0] == 'pnr_attr':
# Pay attention only to the children of attr elements
kids = pnr[2]
for child in kids:
if child[0] in ('pnr_attr_name','word_node','word_zones'):
key = child[1]
# If this is a NAME attribute, we need to start a new PNR.
if key in ('node','NODE'):
if currentPNR:
rows.append(currentPNR)
currentPNR = PNRLink() # Create new dictionary for this PNR
if child[0]=='nodepair' or child[0]=='nodenum':
#print "child[0]/[1]",child[0],child[1]
currentPNR.id = child[1]
currentPNR.parseID()
if child[0] in ('attr_value','numseq'):
currentPNR[key.upper()] = child[1]
if child[0]=='semicolon_comment':
currentPNR.comment = ' '+child[1].strip()
continue
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (pnr[0], pnr[1]))
# Save last link too
if currentPNR: rows.append(currentPNR)
return rows
def convertZACData(self):
""" Convert the parsed tree of data into a usable python list of ZAC objects
returns list of strings and ZAC objects
"""
rows = []
currentZAC = None
key = None
value = None
for zac in self.tfp.zacs:
# Each zac is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings
# Textline Comments
if zac[0] in ('smcw','semicolon_comment'):
if currentZAC:
currentZAC.comment = ' '+zac[1].strip()
rows.append(currentZAC)
currentZAC = None
else:
rows.append(zac[1].strip()) # Append value
continue
# Link records
if zac[0] == 'zac_attr':
# Pay attention only to the children of lin_attr elements
kids = zac[2]
for child in kids:
if child[0]=='nodepair':
# Save old ZAC
if currentZAC: rows.append(currentZAC)
# Start new ZAC
currentZAC = ZACLink() # Create new dictionary for this ZAC.
currentZAC.id=child[1]
if child[0] =='zac_attr_name':
key = child[1]
if child[0]=='attr_value':
currentZAC[key] = child[1]
continue
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (zac[0], zac[1]))
# Save last link too
if currentZAC: rows.append(currentZAC)
return rows
def convertLinkiData(self, linktype):
""" Convert the parsed tree of data into a usable python list of ZAC objects
returns list of strings and ZAC objects
"""
rows = []
currentLinki = None
key = None
value = None
linkis = []
if linktype=="access":
linkis=self.tfp.accesslis
elif linktype=="xfer":
linkis=self.tfp.xferlis
elif linktype=="node":
linkis=self.tfp.nodes
else:
raise NetworkException("ConvertLinkiData with invalid linktype")
for accessli in linkis:
# whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, (float/int)?, spaces?, semicolon_comment?
if accessli[0]=='smcw':
rows.append(accessli[1].strip())
elif accessli[0]=='nodenumA':
currentLinki = Linki()
rows.append(currentLinki)
currentLinki.A = accessli[1].strip()
elif accessli[0]=='nodenumB':
currentLinki.B = accessli[1].strip()
elif accessli[0]=='float':
currentLinki.distance = accessli[1].strip()
elif accessli[0]=='int':
currentLinki.xferTime = accessli[1].strip()
elif accessli[0]=='semicolon_comment':
currentLinki.comment = accessli[1].strip()
elif accessli[0]=='accesstag':
currentLinki.accessType = accessli[1].strip()
else:
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (accessli[0], accessli[1]))
return rows
def convertSupplinksData(self):
""" Convert the parsed tree of data into a usable python list of Supplink objects
returns list of strings and Supplink objects
"""
rows = []
currentSupplink = None
key = None
value = None
for supplink in self.tfp.supplinks:
# Supplink records are lists
if currentSupplink: rows.append(currentSupplink)
currentSupplink = Supplink() # Create new dictionary for this PNR
for supplink_attr in supplink:
if supplink_attr[0] == 'supplink_attr':
if supplink_attr[2][0][0]=='supplink_attr_name':
currentSupplink[supplink_attr[2][0][1]] = supplink_attr[2][1][1]
elif supplink_attr[2][0][0]=='npair_attr_name':
currentSupplink.setId(supplink_attr[2][1][1])
else:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (supplink[0], supplink[1]))
raise
elif supplink_attr[0] == "semicolon_comment":
currentSupplink.comment = supplink_attr[1].strip()
elif supplink_attr[0] == 'smcw':
currentSupplink.comment = supplink_attr[1].strip()
else:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (supplink[0], supplink[1]))
raise
# Save last link too
if currentSupplink: rows.append(currentSupplink)
return rows
def convertFaresystemData(self):
""" Convert the parsed tree of data into a usable python list of Faresystem objects
returns list of strings and Faresystem objects
"""
rows = {}
currentFaresystem = None
for faresystem in self.tfp.faresystems:
# faresystem records are lists
if currentFaresystem: rows[currentFaresystem.getId()] = currentFaresystem
currentFaresystem = Faresystem()
for fs_attr in faresystem:
if fs_attr[0] == 'faresystem_attr':
if fs_attr[2][0][0]=='faresystem_attr_name':
currentFaresystem[fs_attr[2][0][1]] = fs_attr[2][1][1]
# for now, save this as FAREFROMFS => "0,0,1.0,0," etc
elif fs_attr[2][0][0]=='faresystem_fff':
# fs_attr[2] = [('faresystem_fff', 'FAREFROMFS', []),
# ('floatseq', '0,0,0,0,..,0,0', [('floatnum', '0', []), ('floatnum', '0', []), ..
currentFaresystem[fs_attr[2][0][1]] = fs_attr[2][1][1]
elif fs_attr[0] == "semicolon_comment":
currentFaresystem.comment = fs_attr[1].strip()
elif fs_attr[0] == 'smcw':
currentFaresystem.comment = fs_attr[1].strip()
else:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s".format(fs_attr))
raise
# save last faresystem too
if currentFaresystem: rows[currentFaresystem.getId()] = currentFaresystem
return rows
def convertPTSystemData(self):
""" Convert the parsed tree of data into a PTSystem object
returns a PTSystem object
"""
pts = PTSystem()
for crvdef in self.tfp.waitcrvdefs:
curve_num = None
curve_dict = collections.OrderedDict()
for attr in crvdef:
# just handle curve attributes
if attr[0] !="crv_attr": continue
key = attr[2][0][1]
val = attr[2][1][1]
if key == "NUMBER": curve_num = int(val)
curve_dict[key] = val
pts.waitCurveDefs[curve_num] = curve_dict
for crvdef in self.tfp.crowdcrvdefs:
curve_num = None
curve_dict = collections.OrderedDict()
for attr in crvdef:
# just handle curve attributes
if attr[0] !="crv_attr": continue
key = attr[2][0][1]
val = attr[2][1][1]
if key == "NUMBER": curve_num = int(val)
curve_dict[key] = val
pts.crowdCurveDefs[curve_num] = curve_dict
for operator in self.tfp.operators:
op_num = None
op_dict = collections.OrderedDict()
for attr in operator:
# just handle opmode attributes
if attr[0] !="opmode_attr": continue
key = attr[2][0][1]
val = attr[2][1][1]
if key == "NUMBER": op_num = int(val)
op_dict[key] = val # leave as string
pts.operators[op_num] = op_dict
for mode in self.tfp.modes:
mode_num = None
mode_dict = collections.OrderedDict()
for attr in mode:
# just handle opmode attributes
if attr[0] !="opmode_attr": continue
key = attr[2][0][1]
val = attr[2][1][1]
if key == "NUMBER": mode_num = int(val)
mode_dict[key] = val # leave as string
pts.modes[mode_num] = mode_dict
for vehicletype in self.tfp.vehicletypes:
vt_num = None
vt_dict = collections.OrderedDict()
for attr in vehicletype:
# just handle vehtype attributes
if attr[0] != "vehtype_attr": continue
key = attr[2][0][1]
val = attr[2][1][1]
if key == "NUMBER": vt_num = int(val)
vt_dict[key] = val # leave as string
pts.vehicleTypes[vt_num] = vt_dict
if len(pts.waitCurveDefs) > 0 or len(pts.crowdCurveDefs) > 0 or len(pts.operators) > 0 or len(pts.modes) > 0 or len(pts.vehicleTypes) > 0:
return pts
return None
|
import cgi
import os
import wsgiref.handlers
import os
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import mail
#Google App Engine Object Models
class Account(db.Model):
firstName = db.StringProperty(required=True)
lastName = db.StringProperty(required=True)
userName = db.StringProperty(required=True)
password = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
dateAdded = db.DateTimeProperty(auto_now_add=True)
class Author(db.Model):
firstName = db.StringProperty(required=True)
lastName = db.StringProperty(required=True)
class Subject(db.Model):
name = db.StringProperty (required = True)
class Course(db.Model):
number = db.StringProperty(required = True)
class Book(db.Model):
ownedBy = db.ReferenceProperty(Account, required = True, collection_name="ownedBy_set")
requestedBy = db.ReferenceProperty(Account, collection_name="requestedBy_set")
author = db.StringProperty(required = True)
course = db.ReferenceProperty(Course, required = True)
subject = db.ReferenceProperty(Subject, required = True)
title = db.StringProperty(required = True)
dateAdded = db.DateTimeProperty(auto_now_add = True)
available = db.BooleanProperty(default=True,required = True)
#HTTP Request Handlers
class AccountHandler(webapp.RequestHandler):
def userNameExists(self,u):
userNameQuery = db.GqlQuery("SELECT __key__ from Account WHERE userName =:1",u)
userName = userNameQuery.get()
return userName
def accountExists(self, u, p):
acctQuery = db.GqlQuery("SELECT __key__ from Account WHERE userName = :1 AND password = :2",u,p)
acct = acctQuery.get()
return acct
def hasAccount(self,f, l):
acctQuery = db.GqlQuery("SELECT __key__ from Account WHERE firstName = :1 AND lastName = :2", f, l)
acct = acctQuery.get()
return acct
def post(self):
user = self.request.get('userName')
pswrd = self.request.get('password')
last = self.request.get('lastName')
first =self.request.get('firstName')
email = self.request.get('email')
if self.hasAccount(first,last):
self.response.out.write('')
elif self.userNameExists(user):
self.response.out.write('')
else:
acct = Account(firstName=first, lastName=last, userName=user, password=pswrd,email=email)
acct.put()
self.response.out.write(acct.key().id())
def get(self):
user = self.request.get('userName')
pswrd = self.request.get('password')
subjectQuery = db.GqlQuery('Select __key__ FROM Account WHERE userName = :1 AND password = :2',user,pswrd)
subjectKey = subjectQuery.get()
self.response.out.write(subjectKey.id())
class AuthorHandler(webapp.RequestHandler):
def authorExists(f, l):
authQuery = db.GqlQuery("SELECT __key__ from Author WHERE firstName =:1 AND lastName =:2", f, l)
auth = authQuery.get()
return auth
def post(self):
first = self.request.get('firstName')
last = self.request.get('lastName')
author = Author(lastName = last, firstName = first)
author.put()
class BookHandler(webapp.RequestHandler):
def post(self):
ownerID = int(self.request.get('ownerID'))
acctKey = db.Key.from_path('Account',ownerID)
subject = self.request.get('subject')
title = self.request.get('title')
author = self.request.get('author')
course = self.request.get('course')
courseQuery = db.GqlQuery('SELECT __key__ from Course WHERE number =:1',course)
courseKey = courseQuery.get()
subjectQuery = db.GqlQuery('SELECT __key__ from Subject WHERE name =:1',subject)
subjectKey = subjectQuery.get()
book = Book(ownedBy=acctKey, course=courseKey, title=title, author=author, subject=subjectKey)
book.put()
def put(self):
updateType = self.request.get('updateType')
bookID = int(self.request.get('bookID'))
book = Book.get_by_id(bookID)
if updateType=='edit':
subject = self.request.get('subject')
title = self.request.get('title')
author = self.request.get('author')
course = self.request.get('course')
courseQuery = db.GqlQuery('SELECT __key__ from Course WHERE number =:1',course)
courseKey = courseQuery.get()
subjectQuery = db.GqlQuery('SELECT __key__ from Subject WHERE name =:1',subject)
subjectKey = subjectQuery.get()
book.subject=subjectKey
book.title=title
book.author=author
book.course=courseKey
if updateType=='request':
requesterID = int(self.request.get('requesterID'))
requesterKey = db.Key.from_path('Account',requesterID)
book.requestedBy = requesterKey
requestOb = Account.get_by_id(requesterKey.id())
mailReceiver = requestOb.firstName+' '+requestOb.lastName+'<'+requestOb.email+'>'
mailSender = book.ownedBy.firstName+' '+book.ownedBy.lastName+'<'+book.ownedBy.email+'>'
mailBody = requestOb.firstName+' '+requestOb.lastName + ' Requests Book ' + str(book.key().id()) + ' : ' +book.title
mail.send_mail(sender = 'Nikea Davis <nikeadavis@gmail.com>', to = mailReceiver, subject='GT Book Shelf Request',body=mailBody)
if updateType=='acceptRequest':
book.available=False
requesterID = int(self.request.get('requesterID'))
requesterKey = db.Key.from_path('Account',requesterID)
requestOb = Account.get_by_id(requesterKey.id())
mailReceiver = requestOb.firstName+' '+requestOb.lastName+'<'+requestOb.email+'>'
mailBody = 'Your Request for Book ' + str(book.key().id()) + ' : ' +book.title + ' has been ACCEPTED'
mail.send_mail(sender = 'Nikea Davis <nikeadavis@gmail.com>', to = mailReceiver, subject='GT Book Shelf Request Status',body=mailBody)
if updateType== 'rejectRequest':
requesterID = int(self.request.get('requesterID'))
requesterKey = db.Key.from_path('Account',requesterID)
requestOb = Account.get_by_id(requesterKey.id())
mailReceiver = requestOb.firstName+' '+requestOb.lastName+'<'+requestOb.email+'>'
mailBody = 'Your Request for Book ' + str(book.key().id()) + ' : ' +book.title + ' has been DENIED'
mail.send_mail(sender = 'Nikea Davis <nikeadavis@gmail.com>', to = mailReceiver, subject='GT Book Shelf Request Status',body=mailBody)
book.put()
def delete(self):
bookID = int(self.request.get('bookID'))
book = Book.get_by_id(bookID)
book.delete()
def get(self):
queryType = self.request.get('queryType')
if queryType=='User':
userID =int(self.request.get('userID'))
userKey = db.Key.from_path('Account',userID)
bookQ = Book.gql('WHERE ownedBy = :1',userKey)
books = bookQ.fetch(1000)
elif queryType =='Subject':
subject = self.request.get('searchValue')
subjectQuery = Subject.gql('WHERE name = :1',subject)
subjectKey = subjectQuery.get().key()
bookQuery= Book.gql('WHERE subject = :1', subjectKey)
books= bookQuery.fetch(1000)
elif queryType =='Course':
course = self.request.get('searchValue')
courseQuery = Course.gql('WHERE number =:1',course)
courseKey = courseQuery.get().key()
bookQuery= Book.gql('WHERE course = :1', courseKey)
books= bookQuery.fetch(1000)
elif queryType =='Title':
title =self.request.get('searchValue')
bookQuery= Book.gql('WHERE title = :1', title)
books= bookQuery.fetch(1000)
elif queryType =='Author':
author =self.request.get('searchValue')
bookQuery= Book.gql('WHERE author = :1', author)
books= bookQuery.fetch(1000)
elif queryType == 'ID':
bookID = int(self.request.get('bookID'))
books = []
books.append(Book.get_by_id(bookID))
else:
self.response.out.write('bad query type')
return
if books:
template_values = {'books':books}
path = os.path.join(os.path.dirname(__file__), 'bookTemplate.xml')
self.response.out.write(template.render(path, template_values))
else:
self.response.out.write('0')
class CourseHandler(webapp.RequestHandler):
def courseExists(name):
crsQuery = db.GqlQuery("SELECT __key__ from Course WHERE number =:1",name )
crs = crsQuery.get()
return crs
def post(self):
courseNum = self.request.get('courseNumber')
course = Course(number = courseNum)
course.put()
def get(self):
courses =Course.all()
numberlist=[]
for c in courses:
numberlist.append(c.number)
coursenumber=",".join(numberlist)
if coursenumber:
self.response.out.write(coursenumber)
class SubjectHandler(webapp.RequestHandler):
def subjectExists(name):
subQuery = db.GqlQuery("SELECT __key__ from Subject WHERE name =:1",name)
sub = subQuery.get()
return sub
def post(self):
subName = self.request.get('subjectName')
subject = Subject(name = subName)
subject.put()
def get(self):
subjects =Subject.all()
namelist=[]
for s in subjects:
namelist.append(s.name)
subjectnames=",".join(namelist)
if subjectnames:
self.response.out.write(subjectnames)
#
application = webapp.WSGIApplication([('/account',AccountHandler),('/author',AuthorHandler),('/book', BookHandler),('/course',CourseHandler),('/subject', SubjectHandler)],debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main() |
import boto3
def get_bucket_list():
"""
returns the buckets names
"""
regions = 0
client = boto3.client('s3')
# get bucket names from list
bucket_list_full_info = client.list_buckets()
bucket_list_dict = bucket_list_full_info['Buckets']
bucket_list = []
for i in bucket_list_dict:
bucket_list.append(i["Name"])
return bucket_list
def bucket_object_info(bucket_req):
"""
returns the buckets object info
"""
client = boto3.client('s3')
# list all bucket objects info
bucket_object = client.list_objects(
Bucket=bucket_req
)
bucket_contents_dict = bucket_object['Contents']
bucket_contents = []
for i in bucket_contents_dict:
bucket_contents.append(i["Key"])
print(bucket_contents)
return bucket_contents[0]
def object_info(region, obj_info, bucket_req):
"""
A function that gives bucket information
"""
client = boto3.client('s3', region_name=region)
bucket_object = client.list_objects(
Bucket=bucket_req
)
req_obj_info = []
bucket_contents_dict = bucket_object['Contents']
for i in bucket_contents_dict:
for k, v in i.items():
if i[k] == obj_info:
req_obj_info.append(i)
req_obj_info = req_obj_info[0]
object_info = []
obj_info_req = ['Key', 'LastModified', 'ETag', 'Size', 'Owner']
for k, v in req_obj_info.items():
for i in range(0, len(obj_info_req)):
if k == obj_info_req[i]:
object_info.append(req_obj_info[k])
for i in range(0, len(obj_info_req)):
print("{0} : {1}".format(obj_info_req[i], object_info[i]))
def get_bucket_obj_info(bucket):
client = boto3.client('s3')
bucket_obj_info = []
for i in bucket:
bucket_objs_1 = []
bucket_objs = client.list_objects(
Bucket=i
)
bucket_names = bucket_objs['Name']
bucket_objs_1.append(bucket_names)
bucket_contents_dict = bucket_objs['Contents']
obj_info_req = ['Key', 'LastModified', 'ETag', 'Size']
for j in bucket_contents_dict:
for k in j.keys():
for l in range(len(obj_info_req)):
if k == obj_info_req[l]:
bucket_objs_1.append(j[k])
print(bucket_objs_1)
buckets = get_bucket_list()
get_bucket_obj_info(buckets) |
from sys import stdin
def main():
case = 1
while True:
n, m = map(lambda x: int(x), stdin.readline().split())
if n == 0:
break
names = []
distance = [[1000 for j in range(n)] for i in range(n)]
for x in range(n):
distance[x][x] = 0
for x in range(n):
names.append(stdin.readline().strip())
for x in range(m):
i, j, k = map(lambda x: int(x), stdin.readline().split())
i -= 1
j -= 1
distance[i][j] = distance[j][i] = k
for k in range(n):
for i in range(n):
for j in range(n):
if distance[i][j] > (distance[i][k] + distance[k][j]):
distance[i][j] = distance[i][k] + distance[k][j]
min = 100000
name = names[0]
for i in range(n):
s = sum(distance[i])
if s < min:
min = s
name = names[i]
print("Case #%d : %s" % (case, name))
case += 1
if __name__ == '__main__':
main()
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.codegen.protobuf.protoc import Protoc
from pants.engine.rules import collect_rules, rule
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AllTargets,
BoolField,
Dependencies,
MultipleSourcesField,
OverridesField,
SingleSourceField,
Target,
TargetFilesGenerator,
TargetFilesGeneratorSettings,
TargetFilesGeneratorSettingsRequest,
Targets,
generate_file_based_overrides_field_help_message,
generate_multiple_sources_field_help_message,
)
from pants.engine.unions import UnionRule
from pants.util.docutil import doc_url
from pants.util.logging import LogLevel
from pants.util.strutil import help_text
class ProtobufDependenciesField(Dependencies):
pass
class ProtobufGrpcToggleField(BoolField):
alias = "grpc"
default = False
help = "Whether to generate gRPC code or not."
class AllProtobufTargets(Targets):
pass
@rule(desc="Find all Protobuf targets in project", level=LogLevel.DEBUG)
def find_all_protobuf_targets(targets: AllTargets) -> AllProtobufTargets:
return AllProtobufTargets(tgt for tgt in targets if tgt.has_field(ProtobufSourceField))
# -----------------------------------------------------------------------------------------------
# `protobuf_source` target
# -----------------------------------------------------------------------------------------------
class ProtobufSourceField(SingleSourceField):
expected_file_extensions = (".proto",)
class ProtobufSourceTarget(Target):
alias = "protobuf_source"
core_fields = (
*COMMON_TARGET_FIELDS,
ProtobufDependenciesField,
ProtobufSourceField,
ProtobufGrpcToggleField,
)
help = help_text(
f"""
A single Protobuf file used to generate various languages.
See language-specific docs:
Python: {doc_url('protobuf-python')}
Go: {doc_url('protobuf-go')}
"""
)
# -----------------------------------------------------------------------------------------------
# `protobuf_sources` target generator
# -----------------------------------------------------------------------------------------------
class GeneratorSettingsRequest(TargetFilesGeneratorSettingsRequest):
pass
@rule
def generator_settings(
_: GeneratorSettingsRequest,
protoc: Protoc,
) -> TargetFilesGeneratorSettings:
return TargetFilesGeneratorSettings(
add_dependencies_on_all_siblings=not protoc.dependency_inference
)
class ProtobufSourcesGeneratingSourcesField(MultipleSourcesField):
default = ("*.proto",)
expected_file_extensions = (".proto",)
help = generate_multiple_sources_field_help_message(
"Example: `sources=['example.proto', 'new_*.proto', '!old_ignore*.proto']`"
)
class ProtobufSourcesOverridesField(OverridesField):
help = generate_file_based_overrides_field_help_message(
ProtobufSourceTarget.alias,
"""
overrides={
"foo.proto": {"grpc": True},
"bar.proto": {"description": "our user model"},
("foo.proto", "bar.proto"): {"tags": ["overridden"]},
}
""",
)
class ProtobufSourcesGeneratorTarget(TargetFilesGenerator):
alias = "protobuf_sources"
core_fields = (
*COMMON_TARGET_FIELDS,
ProtobufSourcesGeneratingSourcesField,
ProtobufSourcesOverridesField,
)
generated_target_cls = ProtobufSourceTarget
copied_fields = COMMON_TARGET_FIELDS
moved_fields = (
ProtobufGrpcToggleField,
ProtobufDependenciesField,
)
settings_request_cls = GeneratorSettingsRequest
help = "Generate a `protobuf_source` target for each file in the `sources` field."
def rules():
return [
*collect_rules(),
UnionRule(TargetFilesGeneratorSettingsRequest, GeneratorSettingsRequest),
]
|
''''
George Alromhin gr.858301
example of deep learning with a library Keras.. https://keras.com/
Documentation library for charting Matplotlib.. https://matplotlib.org/
Documentation of tensorflow https://www.tensorflow.org/install/pip
OF. PIP site .. https://pypi.org/project/pip/
'''
import tensorflow as tf
import keras
import numpy as np
from keras.models import Model
from keras.layers import Input, Dense, Conv1D, SimpleRNN
from inspect import getmembers, isfunction
import matplotlib.pyplot as plt
from tensorflow.keras.activations import linear, relu, softplus
#Как задать модель нейронной сети. Какие есть интерфейсы и их параметры? Как задать весовые коэффициенты нейронной сети?
W = np.random.rand(2, 2)
b = np.random.rand(2)
visible = Input(shape=(2,))
hidden = Dense(units=2, weights=[W, b])(visible) # layer with weights
model = Model(inputs=visible, outputs=hidden)
#Как задать полносвязный слой нейронной сети?
Dense(units=32)
#Как задать свёрточный слой нейронной сети?
Conv1D(kernel_size=200, filters=20)
#Какие есть средства для работы с рекуррентными нейросетями?
SimpleRNN(units=32)
#Как задать функцию активации нейронной сети и какие поддерживаются в keras?
Dense(64, activation='tanh')
print('Activation Functions:')
[name for name, obj in getmembers(tf.keras.activations) if isfunction(obj) and name != 'deserialize']
#Чем отличается linear от ReLU, softplus?
x = np.linspace(-10, 10)
fig, ax = plt.subplots(1, 3, sharey=True, figsize=(14, 4))
for i, f in enumerate([linear, relu, softplus]):
ax[i].plot(x, f(x))
ax[i].set_title(f.__name__)
plt.show()
#Как задать функцию ошибки / потерь нейронной сети? Как задать метод обучения нейронной сети?
model.compile(loss='mean_squared_error', optimizer='sgd')
#Чем отличается mean_squared_error от cosinus_proxmity, по каким формулам они вычисляются?
#Чем отличается SGD от rprop, Adadelta, Adam; nesterov от momentum?
keras.optimizers.SGD(learning_rate=0.01, momentum=0.0, nesterov=False)
keras.optimizers.Adadelta(learning_rate=1.0, rho=0.95)
keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
#Чем отличается SGD от rprop, Adadelta, Adam; nesterov от momentum?
#Как указать обучающую выборку?
train_data = np.random.random((2, 2))
model.fit(train_data, epochs=10)
#Как указать обучающую выборку?
train_data = np.random.random((2, 2))
model.fit(train_data, epochs=10)
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
dat.py
========
"""
import os, logging, numpy as np
log = logging.getLogger(__name__)
class Dat(object):
"""
For interactive exploration of dimension > 3 arrays
"""
def __init__(self, a, ina=None, jna=None, kna=None ):
self.a = a
if ina is None:
ina = np.arange(0, a.shape[0] ).astype("|S16")
if jna is None:
jna = np.arange(0, a.shape[1] ).astype("|S16")
if kna is None:
kna = np.arange(0, a.shape[2] ).astype("|S16")
self.ina = ina
self.kna = kna
self.jna = jna
self.ijk = 0,0,0
self.sli = slice(0,None,1)
def __getitem__(self, sli):
self.sli = sli
return self
def _get_d(self):
return self.a[self.i,self.j,self.k][self.sli]
d = property(_get_d)
def __repr__(self):
return "\n".join(map(repr, [self.ijk, self.name, self.d]))
def _get_name(self):
return ",".join([self.ina[self.i], self.jna[self.j], self.kna[self.k]])
name = property(_get_name)
def _set_i(self, _i):
assert _i < self.a.shape[0]
self._i = _i
def _get_i(self):
return self._i
i = property(_get_i, _set_i)
def _set_j(self, _j):
assert _j < self.a.shape[1]
self._j = _j
def _get_j(self):
return self._j
j = property(_get_j, _set_j)
def _set_k(self, _k):
assert _k < self.a.shape[2]
self._k = _k
def _get_k(self):
return self._k
k = property(_get_k, _set_k)
def _set_ijk(self, *ijk):
assert len(ijk) == 1 and len(ijk[0]) == 3
self.i = ijk[0][0]
self.j = ijk[0][1]
self.k = ijk[0][2]
def _get_ijk(self):
return (self.i, self.j, self.k)
ijk = property(_get_ijk, _set_ijk)
if __name__ == '__main__':
from opticks.ana.main import opticks_main
ok = opticks_main()
a = np.load(os.path.expandvars("$IDPATH/GBndLib/GBndLib.npy"))
d = Dat(a, None, "omat osur isur imat".split(), "g0 g1".split())
print(d)
|
# Generated by Django 3.0.7 on 2020-08-17 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0002_staff_status'),
]
operations = [
migrations.AddField(
model_name='staff',
name='comment',
field=models.CharField(default='-', max_length=200, verbose_name='Comment'),
),
migrations.AlterField(
model_name='staff',
name='status',
field=models.CharField(choices=[('Registred', 'Registered'), ('Pending', 'Pending'), ('Approved', 'Approved'), ('Disapproved', 'Disapproved')], default='Registered', max_length=11, verbose_name='STATUS'),
),
]
|
from enum import Enum
from warnings import warn
import torch
from ..extension import _load_library
from ..utils import _log_api_usage_once
try:
_load_library("image")
except (ImportError, OSError) as e:
warn(
f"Failed to load image Python extension: '{e}'"
f"If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. "
f"Otherwise, there might be something wrong with your environment. "
f"Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?"
)
class ImageReadMode(Enum):
"""
Support for various modes while reading images.
Use ``ImageReadMode.UNCHANGED`` for loading the image as-is,
``ImageReadMode.GRAY`` for converting to grayscale,
``ImageReadMode.GRAY_ALPHA`` for grayscale with transparency,
``ImageReadMode.RGB`` for RGB and ``ImageReadMode.RGB_ALPHA`` for
RGB with transparency.
"""
UNCHANGED = 0
GRAY = 1
GRAY_ALPHA = 2
RGB = 3
RGB_ALPHA = 4
def read_file(path: str) -> torch.Tensor:
"""
Reads and outputs the bytes contents of a file as a uint8 Tensor
with one dimension.
Args:
path (str): the path to the file to be read
Returns:
data (Tensor)
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_file)
data = torch.ops.image.read_file(path)
return data
def write_file(filename: str, data: torch.Tensor) -> None:
"""
Writes the contents of an uint8 tensor with one dimension to a
file.
Args:
filename (str): the path to the file to be written
data (Tensor): the contents to be written to the output file
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_file)
torch.ops.image.write_file(filename, data)
def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
"""
Decodes a PNG image into a 3 dimensional RGB or grayscale Tensor.
Optionally converts the image to the desired format.
The values of the output tensor are uint8 in [0, 255].
Args:
input (Tensor[1]): a one dimensional uint8 tensor containing
the raw bytes of the PNG image.
mode (ImageReadMode): the read mode used for optionally
converting the image. Default: ``ImageReadMode.UNCHANGED``.
See `ImageReadMode` class for more information on various
available modes.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_png)
output = torch.ops.image.decode_png(input, mode.value, False)
return output
def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor:
"""
Takes an input tensor in CHW layout and returns a buffer with the contents
of its corresponding PNG file.
Args:
input (Tensor[channels, image_height, image_width]): int8 image tensor of
``c`` channels, where ``c`` must 3 or 1.
compression_level (int): Compression factor for the resulting file, it must be a number
between 0 and 9. Default: 6
Returns:
Tensor[1]: A one dimensional int8 tensor that contains the raw bytes of the
PNG file.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(encode_png)
output = torch.ops.image.encode_png(input, compression_level)
return output
def write_png(input: torch.Tensor, filename: str, compression_level: int = 6):
"""
Takes an input tensor in CHW layout (or HW in the case of grayscale images)
and saves it in a PNG file.
Args:
input (Tensor[channels, image_height, image_width]): int8 image tensor of
``c`` channels, where ``c`` must be 1 or 3.
filename (str): Path to save the image.
compression_level (int): Compression factor for the resulting file, it must be a number
between 0 and 9. Default: 6
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_png)
output = encode_png(input, compression_level)
write_file(filename, output)
def decode_jpeg(
input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED, device: str = "cpu"
) -> torch.Tensor:
"""
Decodes a JPEG image into a 3 dimensional RGB or grayscale Tensor.
Optionally converts the image to the desired format.
The values of the output tensor are uint8 between 0 and 255.
Args:
input (Tensor[1]): a one dimensional uint8 tensor containing
the raw bytes of the JPEG image. This tensor must be on CPU,
regardless of the ``device`` parameter.
mode (ImageReadMode): the read mode used for optionally
converting the image. The supported modes are: ``ImageReadMode.UNCHANGED``,
``ImageReadMode.GRAY`` and ``ImageReadMode.RGB``
Default: ``ImageReadMode.UNCHANGED``.
See ``ImageReadMode`` class for more information on various
available modes.
device (str or torch.device): The device on which the decoded image will
be stored. If a cuda device is specified, the image will be decoded
with `nvjpeg <https://developer.nvidia.com/nvjpeg>`_. This is only
supported for CUDA version >= 10.1
.. betastatus:: device parameter
.. warning::
There is a memory leak in the nvjpeg library for CUDA versions < 11.6.
Make sure to rely on CUDA 11.6 or above before using ``device="cuda"``.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_jpeg)
device = torch.device(device)
if device.type == "cuda":
output = torch.ops.image.decode_jpeg_cuda(input, mode.value, device)
else:
output = torch.ops.image.decode_jpeg(input, mode.value)
return output
def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor:
"""
Takes an input tensor in CHW layout and returns a buffer with the contents
of its corresponding JPEG file.
Args:
input (Tensor[channels, image_height, image_width])): int8 image tensor of
``c`` channels, where ``c`` must be 1 or 3.
quality (int): Quality of the resulting JPEG file, it must be a number between
1 and 100. Default: 75
Returns:
output (Tensor[1]): A one dimensional int8 tensor that contains the raw bytes of the
JPEG file.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(encode_jpeg)
if quality < 1 or quality > 100:
raise ValueError("Image quality should be a positive number between 1 and 100")
output = torch.ops.image.encode_jpeg(input, quality)
return output
def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75):
"""
Takes an input tensor in CHW layout and saves it in a JPEG file.
Args:
input (Tensor[channels, image_height, image_width]): int8 image tensor of ``c``
channels, where ``c`` must be 1 or 3.
filename (str): Path to save the image.
quality (int): Quality of the resulting JPEG file, it must be a number
between 1 and 100. Default: 75
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_jpeg)
output = encode_jpeg(input, quality)
write_file(filename, output)
def decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
"""
Detects whether an image is a JPEG or PNG and performs the appropriate
operation to decode the image into a 3 dimensional RGB or grayscale Tensor.
Optionally converts the image to the desired format.
The values of the output tensor are uint8 in [0, 255].
Args:
input (Tensor): a one dimensional uint8 tensor containing the raw bytes of the
PNG or JPEG image.
mode (ImageReadMode): the read mode used for optionally converting the image.
Default: ``ImageReadMode.UNCHANGED``.
See ``ImageReadMode`` class for more information on various
available modes.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_image)
output = torch.ops.image.decode_image(input, mode.value)
return output
def read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
"""
Reads a JPEG or PNG image into a 3 dimensional RGB or grayscale Tensor.
Optionally converts the image to the desired format.
The values of the output tensor are uint8 in [0, 255].
Args:
path (str): path of the JPEG or PNG image.
mode (ImageReadMode): the read mode used for optionally converting the image.
Default: ``ImageReadMode.UNCHANGED``.
See ``ImageReadMode`` class for more information on various
available modes.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_image)
data = read_file(path)
return decode_image(data, mode)
def _read_png_16(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
data = read_file(path)
return torch.ops.image.decode_png(data, mode.value, True)
|
import plotly.express as px
dark_blue = "rgb(40, 60, 70)"
light_blue = "rgb(200, 230, 250)"
plot_background_blue = "rgb(240, 250, 255)"
transparent = "rgba(255, 255, 255, 0)"
def get_rubicon_colorscale(num_colors, low=0.33):
if num_colors < 2:
num_colors = 2
return px.colors.sample_colorscale(
"Blues",
num_colors,
low=low,
)
|
#! /usr/bin/env/python3
"""A simple script used to detect the presence of an ARP spoofing attack.
Uses Python 3"""
import scapy.all as scapy
def get_mac(ip):
"""Returns target MAC address"""
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst='ff:ff:ff:ff:ff:ff')
arp_request_broadcast = broadcast/arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
return answered_list[0][1].hwsrc
def sniff(interface):
"""Sniffs network for packets"""
scapy.sniff(iface=interface, store=False, prn=process_sniffed_packet)
def process_sniffed_packet(packet):
if packet.haslayer(scapy.ARP) and packet[scapy.ARP].op == 2:
try:
real_mac = get_mac(packet[scapy.ARP].psrc)
response_mac = packet[scapy.ARP].hwsrc
if real_mac != response_mac:
print('[+] Attack detected!')
except IndexError:
pass
sniff('eth0')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import csv
import random
import re
import os
import unicodedata
import codecs
from io import open
import itertools
import math
import seq2seq
import prepare_data_for_model as p
import load_trim_data as d
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
#searcher = seq2seq.GreedySearchDecoder(r.encoder, r.decoder)
#seq2seq.evaluateInput(r.encoder, r.decoder, searcher, d.voc)
# Configure models
model_name = 'cb_model'
attn_model = 'dot'
#attn_model = 'general'
#attn_model = 'concat'
hidden_size = 500
encoder_n_layers = 2
decoder_n_layers = 2
dropout = 0.1
batch_size = 64
# Set checkpoint to load from; set to None if starting from scratch
loadFilename = r"C:\Users\wxwyl\Desktop\nontopic-mostlikely\64000_checkpoint.tar"
checkpoint_iter = 64000
#loadFilename = os.path.join(save_dir, model_name, corpus_name,
# '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),
# '{}_checkpoint.tar'.format(checkpoint_iter))
# Load model if a loadFilename is provided
if loadFilename:
# If loading on same machine the model was trained on
checkpoint = torch.load(loadFilename)
# If loading a model trained on GPU to CPU
#checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
encoder_sd = checkpoint['en']
decoder_sd = checkpoint['de']
encoder_optimizer_sd = checkpoint['en_opt']
decoder_optimizer_sd = checkpoint['de_opt']
embedding_sd = checkpoint['embedding']
d.voc.__dict__ = checkpoint['voc_dict']
#print("not empty")
print('Building encoder and decoder ...')
# Initialize word embeddings
embedding = nn.Embedding(d.voc.num_words, hidden_size)
if loadFilename:
embedding.load_state_dict(embedding_sd)
# Initialize encoder & decoder models
encoder = seq2seq.EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
decoder = seq2seq.LuongAttnDecoderRNN(attn_model, embedding, hidden_size, d.voc.num_words, decoder_n_layers, dropout)
if loadFilename:
encoder.load_state_dict(encoder_sd)
decoder.load_state_dict(decoder_sd)
# Use appropriate device
encoder = encoder.to(device)
decoder = decoder.to(device)
print('Models built and ready to go!')
encoder.eval()
decoder.eval()
searcher = seq2seq.GreedySearchDecoder(encoder, decoder)
seq2seq.evaluateInput(encoder, decoder, searcher, d.voc)
|
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# This file contains a set of utilities functions used by other Python-based
# scripts.
from __future__ import print_function
import contextlib
import datetime
from functools import total_ordering
import glob
import imp
import json
import os
import platform
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import uuid
try:
# Not available on Windows.
import resource
except:
pass
SEMANTIC_VERSION_PATTERN = r'^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$'
# To eliminate clashing with older archived builds on bleeding edge we add
# a base number bigger the largest svn revision (this also gives us an easy
# way of seeing if an archive comes from git based or svn based commits).
GIT_NUMBER_BASE = 100000
# Mapping table between build mode and build configuration.
BUILD_MODES = {
'debug': 'Debug',
'release': 'Release',
'product': 'Product',
}
# Mapping table between build mode and build configuration.
BUILD_SANITIZERS = {
None: '',
'none': '',
'asan': 'ASAN',
'lsan': 'LSAN',
'msan': 'MSAN',
'tsan': 'TSAN',
'ubsan': 'UBSAN',
}
# Mapping table between OS and build output location.
BUILD_ROOT = {
'win32': 'out',
'linux': 'out',
'freebsd': 'out',
'macos': 'xcodebuild',
}
# Note: gn expects these to be lower case.
ARCH_FAMILY = {
'ia32': 'ia32',
'x64': 'ia32',
'arm': 'arm',
'arm64': 'arm',
'arm_x64': 'arm',
'arm_arm64': 'arm',
'simarm': 'ia32',
'simarm64': 'ia32',
'simarm_x64': 'ia32',
'simarm_arm64': 'arm',
'x64c': 'ia32',
'arm64c': 'arm',
'simarm64c': 'ia32',
'simriscv32': 'ia32',
'simriscv64': 'ia32',
'simx64': 'arm',
'simx64c': 'arm',
'riscv32': 'riscv',
'riscv64': 'riscv',
}
BASE_DIR = os.path.abspath(os.path.join(os.curdir, '..'))
DART_DIR = os.path.abspath(os.path.join(__file__, '..', '..'))
VERSION_FILE = os.path.join(DART_DIR, 'tools', 'VERSION')
def GetArchFamily(arch):
return ARCH_FAMILY[arch]
def GetBuildDir(host_os):
return BUILD_ROOT[host_os]
def GetBuildMode(mode):
return BUILD_MODES[mode]
def GetBuildSanitizer(sanitizer):
return BUILD_SANITIZERS[sanitizer]
def GetBaseDir():
return BASE_DIR
def GetBotUtils(repo_path=DART_DIR):
'''Dynamically load the tools/bots/bot_utils.py python module.'''
return imp.load_source(
'bot_utils', os.path.join(repo_path, 'tools', 'bots', 'bot_utils.py'))
def GetMinidumpUtils(repo_path=DART_DIR):
'''Dynamically load the tools/minidump.py python module.'''
return imp.load_source('minidump',
os.path.join(repo_path, 'tools', 'minidump.py'))
@total_ordering
class Version(object):
def __init__(self,
channel=None,
major=None,
minor=None,
patch=None,
prerelease=None,
prerelease_patch=None,
version=None):
self.channel = channel
self.major = major
self.minor = minor
self.patch = patch
self.prerelease = prerelease
self.prerelease_patch = prerelease_patch
if version:
self.set_version(version)
def set_version(self, version):
match = re.match(SEMANTIC_VERSION_PATTERN, version)
assert match, '%s must be a valid version' % version
self.channel = 'stable'
self.major = match['major']
self.minor = match['minor']
self.patch = match['patch']
self.prerelease = '0'
self.prerelease_patch = '0'
if match['prerelease']:
subversions = match['prerelease'].split('.')
self.prerelease = subversions[0]
self.prerelease_patch = subversions[1]
self.channel = subversions[2]
def __str__(self):
result = '%s.%s.%s' % (self.major, self.minor, self.patch)
if self.channel != 'stable':
result += '-%s.%s.%s' % (self.prerelease, self.prerelease_patch,
self.channel)
return result
def __eq__(self, other):
return self.channel == other.channel and \
self.major == other.major and \
self.minor == other.minor and \
self.patch == other.patch and \
self.prerelease == other.prerelease and \
self.prerelease_patch == other.prerelease_patch
def __lt__(self, other):
if int(self.major) < int(other.major):
return True
if int(self.major) > int(other.major):
return False
if int(self.minor) < int(other.minor):
return True
if int(self.minor) > int(other.minor):
return False
if int(self.patch) < int(other.patch):
return True
if int(self.patch) > int(other.patch):
return False
# The stable channel is ahead of the other channels on the same triplet.
if self.channel != 'stable' and other.channel == 'stable':
return True
if self.channel == 'stable' and other.channel != 'stable':
return False
# The be channel is ahead of the other channels on the same triplet.
if self.channel != 'be' and other.channel == 'be':
return True
if self.channel == 'be' and other.channel != 'be':
return False
if int(self.prerelease) < int(other.prerelease):
return True
if int(self.prerelease) > int(other.prerelease):
return False
if int(self.prerelease_patch) < int(other.prerelease_patch):
return True
if int(self.prerelease_patch) > int(other.prerelease_patch):
return False
return False
# Try to guess the host operating system.
def GuessOS():
os_id = platform.system()
if os_id == 'Linux':
return 'linux'
elif os_id == 'Darwin':
return 'macos'
elif os_id == 'Windows' or os_id == 'Microsoft':
# On Windows Vista platform.system() can return 'Microsoft' with some
# versions of Python, see http://bugs.python.org/issue1082 for details.
return 'win32'
elif os_id == 'FreeBSD':
return 'freebsd'
elif os_id == 'OpenBSD':
return 'openbsd'
elif os_id == 'SunOS':
return 'solaris'
return None
# Runs true if the currently executing python interpreter is running under
# Rosetta. I.e., python3 is an x64 executable and we're on an arm64 Mac.
def IsRosetta():
if platform.system() == 'Darwin':
p = subprocess.Popen(['sysctl', '-in', 'sysctl.proc_translated'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = p.communicate()
return output.decode('utf-8').strip() == '1'
return False
# Returns the architectures that can run on the current machine.
def HostArchitectures():
m = platform.machine()
if platform.system() == 'Darwin':
if m == 'arm64' or IsRosetta():
# ARM64 Macs also support X64.
return ['arm64', 'x64']
if m == 'x86_64':
# X64 Macs no longer support IA32.
return ['x64']
# Icky use of CIPD_ARCHITECTURE should be effectively dead whenever the
# Python on bots becomes native ARM64.
if ((platform.system() == 'Windows') and
(os.environ.get("CIPD_ARCHITECTURE") == "arm64")):
# ARM64 Windows also can emulate X64.
return ['arm64', 'x64']
if m in ['aarch64', 'arm64', 'arm64e', 'ARM64']:
return ['arm64']
if m in ['armv7l', 'armv8l']:
return ['arm']
if m in ['i386', 'i686', 'ia32', 'x86']:
return ['x86', 'ia32']
if m in ['x64', 'x86-64', 'x86_64', 'amd64', 'AMD64']:
return ['x64', 'x86', 'ia32']
raise Exception('Failed to determine host architectures for %s %s',
platform.machine(), platform.system())
# Try to guess the host architecture.
def GuessArchitecture():
return HostArchitectures()[0]
# Try to guess the number of cpus on this machine.
def GuessCpus():
if os.getenv('DART_NUMBER_OF_CORES') is not None:
return int(os.getenv('DART_NUMBER_OF_CORES'))
if os.path.exists('/proc/cpuinfo'):
return int(
subprocess.check_output(
'grep -E \'^processor\' /proc/cpuinfo | wc -l', shell=True))
if os.path.exists('/usr/bin/hostinfo'):
return int(
subprocess.check_output(
'/usr/bin/hostinfo |'
' grep "processors are logically available." |'
' awk "{ print \$1 }"',
shell=True))
win_cpu_count = os.getenv("NUMBER_OF_PROCESSORS")
if win_cpu_count:
return int(win_cpu_count)
return 2
# Returns true if we're running under Windows.
def IsWindows():
return GuessOS() == 'win32'
# Reads a text file into an array of strings - one for each
# line. Strips comments in the process.
def ReadLinesFrom(name):
result = []
for line in open(name):
if '#' in line:
line = line[:line.find('#')]
line = line.strip()
if len(line) == 0:
continue
result.append(line)
return result
# Filters out all arguments until the next '--' argument
# occurs.
def ListArgCallback(option, value, parser):
if value is None:
value = []
for arg in parser.rargs:
if arg[:2].startswith('--'):
break
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
# Filters out all argument until the first non '-' or the
# '--' argument occurs.
def ListDartArgCallback(option, value, parser):
if value is None:
value = []
for arg in parser.rargs:
if arg[:2].startswith('--') or arg[0] != '-':
break
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
def IsCrossBuild(target_os, arch):
if (target_os not in [None, 'host']) and (target_os != GuessOS()):
return True
if arch.startswith('sim'):
return False
if arch.endswith('c'):
# Strip 'compressed' suffix.
arch = arch[:-1]
if arch in HostArchitectures():
return False
return True
def GetBuildConf(mode, arch, conf_os=None, sanitizer=None):
if conf_os is not None and conf_os != GuessOS() and conf_os != 'host':
return '{}{}{}'.format(GetBuildMode(mode), conf_os.title(),
arch.upper())
# Ask for a cross build if the host and target architectures don't match.
cross_build = ''
if IsCrossBuild(conf_os, arch):
cross_build = 'X'
return '{}{}{}{}'.format(GetBuildMode(mode), GetBuildSanitizer(sanitizer),
cross_build, arch.upper())
def GetBuildRoot(host_os, mode=None, arch=None, target_os=None, sanitizer=None):
build_root = GetBuildDir(host_os)
if mode:
build_root = os.path.join(
build_root, GetBuildConf(mode, arch, target_os, sanitizer))
return build_root
def GetBuildSdkBin(host_os, mode=None, arch=None, target_os=None):
build_root = GetBuildRoot(host_os, mode, arch, target_os)
return os.path.join(build_root, 'dart-sdk', 'bin')
def GetShortVersion(version_file=None):
version = ReadVersionFile(version_file)
return ('{}.{}.{}.{}.{}'.format(version.major, version.minor, version.patch,
version.prerelease,
version.prerelease_patch))
def GetSemanticSDKVersion(no_git_hash=False,
version_file=None,
git_revision_file=None):
version = ReadVersionFile(version_file)
if not version:
return None
suffix = ''
if version.channel == 'be':
suffix = '-edge' if no_git_hash else '-edge.{}'.format(
GetGitRevision(git_revision_file))
elif version.channel in ('beta', 'dev'):
suffix = '-{}.{}.{}'.format(version.prerelease,
version.prerelease_patch, version.channel)
else:
assert version.channel == 'stable'
return '{}.{}.{}{}'.format(version.major, version.minor, version.patch,
suffix)
def GetVersion(no_git_hash=False, version_file=None, git_revision_file=None):
return GetSemanticSDKVersion(no_git_hash, version_file, git_revision_file)
# The editor used to produce the VERSION file put on gcs. We now produce this
# in the bots archiving the sdk.
# The content looks like this:
#{
# "date": "2015-05-28",
# "version": "1.11.0-edge.131653",
# "revision": "535394c2657ede445142d8a92486d3899bbf49b5"
#}
def GetVersionFileContent():
result = {
'date': str(datetime.date.today()),
'version': GetVersion(),
'revision': GetGitRevision()
}
return json.dumps(result, indent=2)
def GetChannel(version_file=None):
version = ReadVersionFile(version_file)
return version.channel
def GetUserName():
key = 'USER'
if sys.platform == 'win32':
key = 'USERNAME'
return os.environ.get(key, '')
def ReadVersionFile(version_file=None):
def match_against(pattern, file_content):
match = re.search(pattern, file_content, flags=re.MULTILINE)
if match:
return match.group(1)
return None
if version_file == None:
version_file = VERSION_FILE
content = None
try:
with open(version_file) as fd:
content = fd.read()
except:
print('Warning: Could not read VERSION file ({})'.format(version_file))
return None
channel = match_against('^CHANNEL ([A-Za-z0-9]+)$', content)
major = match_against('^MAJOR (\d+)$', content)
minor = match_against('^MINOR (\d+)$', content)
patch = match_against('^PATCH (\d+)$', content)
prerelease = match_against('^PRERELEASE (\d+)$', content)
prerelease_patch = match_against('^PRERELEASE_PATCH (\d+)$', content)
if (channel and major and minor and prerelease and prerelease_patch):
return Version(channel, major, minor, patch, prerelease,
prerelease_patch)
print('Warning: VERSION file ({}) has wrong format'.format(version_file))
return None
# Our schema for releases and archiving is based on an increasing
# sequence of numbers. In the svn world this was simply the revision of a
# commit, which would always give us a one to one mapping between the number
# and the commit. This was true across branches as well, so a number used
# to archive a build was always unique and unambiguous.
# In git there is no such global number, so we loosen the requirement a bit.
# We only use numbers on the master branch (bleeding edge). On branches
# we use the version number instead for archiving purposes.
# The number on master is the count of commits on the master branch.
def GetArchiveVersion(version_file=None):
version = ReadVersionFile(version_file=None)
if not version:
raise 'Could not get the archive version, parsing the version file failed'
if version.channel in ['be', 'integration']:
return GetGitNumber()
return GetSemanticSDKVersion()
def GetGitRevision(git_revision_file=None, repo_path=DART_DIR):
# When building from tarball use tools/GIT_REVISION
if git_revision_file is None:
git_revision_file = os.path.join(repo_path, 'tools', 'GIT_REVISION')
try:
with open(git_revision_file) as fd:
return fd.read().strip()
except:
pass
p = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IsWindows(),
cwd=repo_path)
out, err = p.communicate()
# TODO(https://github.com/dart-lang/sdk/issues/51865): Don't ignore errors.
# if p.wait() != 0:
# raise Exception('git rev-parse failed: ' + str(err))
revision = out.decode('utf-8').strip()
# We expect a full git hash
if len(revision) != 40:
print('Warning: Could not parse git commit, output was {}'.format(
revision),
file=sys.stderr)
return None
return revision
def GetShortGitHash(repo_path=DART_DIR):
p = subprocess.Popen(['git', 'rev-parse', '--short=10', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IsWindows(),
cwd=repo_path)
out, err = p.communicate()
if p.wait() != 0:
# TODO(https://github.com/dart-lang/sdk/issues/51865): Don't ignore errors.
# raise Exception('git rev-parse failed: ' + str(err))
return None
revision = out.decode('utf-8').strip()
return revision
def GetLatestDevTag(repo_path=DART_DIR):
# We used the old, pre-git2.13 refname:strip here since lstrip will fail on
# older git versions. strip is an alias for lstrip in later versions.
cmd = [
'git',
'for-each-ref',
'refs/tags/*dev*',
'--sort=-taggerdate',
"--format=%(refname:strip=2)",
'--count=1',
]
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IsWindows(),
cwd=repo_path)
out, err = p.communicate()
if p.wait() != 0:
print('Warning: Could not get the most recent dev branch tag {}'.format(
tag),
file=sys.stderr)
return None
tag = out.decode('utf-8').strip()
return tag
def GetGitTimestamp(git_timestamp_file=None, repo_path=DART_DIR):
# When building from tarball use tools/GIT_TIMESTAMP
if git_timestamp_file is None:
git_timestamp_file = os.path.join(repo_path, 'tools', 'GIT_TIMESTAMP')
try:
with open(git_timestamp_file) as fd:
return fd.read().strip()
except:
pass
p = subprocess.Popen(['git', 'log', '-n', '1', '--pretty=format:%cd'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IsWindows(),
cwd=repo_path)
out, err = p.communicate()
if p.wait() != 0:
# TODO(https://github.com/dart-lang/sdk/issues/51865): Don't ignore errors.
# raise Exception('git log failed: ' + str(err))
return None
timestamp = out.decode('utf-8').strip()
return timestamp
def GetGitNumber(repo_path=DART_DIR):
p = subprocess.Popen(['git', 'rev-list', 'HEAD', '--count'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IsWindows(),
cwd=repo_path)
out, err = p.communicate()
# TODO(https://github.com/dart-lang/sdk/issues/51865): Don't ignore errors.
# if p.wait() != 0:
# raise Exception('git rev-list failed: ' + str(err))
number = out.decode('utf-8').strip()
try:
number = int(number)
return number + GIT_NUMBER_BASE
except:
print(
'Warning: Could not parse git count, output was {}'.format(number),
file=sys.stderr)
return None
def ParseGitInfoOutput(output):
"""Given a git log, determine the latest corresponding svn revision."""
for line in output.split('\n'):
tokens = line.split()
if len(tokens) > 0 and tokens[0] == 'git-svn-id:':
return tokens[1].split('@')[1]
return None
def ParseSvnInfoOutput(output):
revision_match = re.search('Last Changed Rev: (\d+)', output)
if revision_match:
return revision_match.group(1)
return None
def RewritePathSeparator(path, workspace):
# Paths in test files are always specified using '/'
# as the path separator. Replace with the actual
# path separator before use.
if '/' in path:
split_path = path.split('/')
path = os.sep.join(split_path)
path = os.path.join(workspace, path)
if not os.path.exists(path):
raise Exception(path)
return path
def ParseTestOptions(pattern, source, workspace):
match = pattern.search(source)
if match:
return [
RewritePathSeparator(o, workspace)
for o in match.group(1).split(' ')
]
return None
def ParseTestOptionsMultiple(pattern, source, workspace):
matches = pattern.findall(source)
if matches:
result = []
for match in matches:
if len(match) > 0:
result.append([
RewritePathSeparator(o, workspace) for o in match.split(' ')
])
else:
result.append([])
return result
return None
def CheckedUnlink(name):
"""Unlink a file without throwing an exception."""
try:
os.unlink(name)
except OSError as e:
sys.stderr.write('os.unlink() ' + str(e))
sys.stderr.write('\n')
# TODO(42528): Can we remove this? It's basically just an alias for Exception.
class Error(Exception):
pass
class ToolError(Exception):
"""Deprecated exception, use Error instead."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def IsCrashExitCode(exit_code):
if IsWindows():
return 0x80000000 & exit_code
return exit_code < 0
def DiagnoseExitCode(exit_code, command):
if IsCrashExitCode(exit_code):
sys.stderr.write(
'Command: {}\nCRASHED with exit code {} (0x{:x})\n'.format(
' '.join(command), exit_code, exit_code & 0xffffffff))
def ExecuteCommand(cmd):
"""Execute a command in a subprocess."""
print('Executing: ' + ' '.join(cmd))
pipe = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IsWindows())
output = pipe.communicate()
if pipe.returncode != 0:
raise Exception('Execution failed: ' + str(output))
return pipe.returncode, output
# The checked-in SDKs are documented at
# https://github.com/dart-lang/sdk/wiki/The-checked-in-SDK-in-tools
def CheckedInSdkPath():
# We don't use the normal macos, linux, win32 directory names here, instead,
# we use the names that the download_from_google_storage script uses.
osdict = {'Darwin': 'mac', 'Linux': 'linux', 'Windows': 'win'}
system = platform.system()
try:
osname = osdict[system]
except KeyError:
sys.stderr.write(
'WARNING: platform "{}" not supported\n'.format(system))
return None
tools_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(tools_dir, 'sdks', 'dart-sdk')
def CheckedInSdkExecutable():
name = 'dart'
if IsWindows():
name = 'dart.exe'
return os.path.join(CheckedInSdkPath(), 'bin', name)
def CheckedInSdkCheckExecutable():
executable = CheckedInSdkExecutable()
canary_script = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'canary.dart')
try:
with open(os.devnull, 'wb') as silent_sink:
if 0 == subprocess.call([executable, canary_script],
stdout=silent_sink):
return True
except OSError as e:
pass
return False
def CheckLinuxCoreDumpPattern(fatal=False):
core_pattern_file = '/proc/sys/kernel/core_pattern'
core_pattern = open(core_pattern_file).read()
expected_core_pattern = 'core.%p'
if core_pattern.strip() != expected_core_pattern:
message = (
'Invalid core_pattern configuration. '
'The configuration of core dump handling is *not* correct for '
'a buildbot. The content of {0} must be "{1}" instead of "{2}".'.
format(core_pattern_file, expected_core_pattern, core_pattern))
if fatal:
raise Exception(message)
print(message)
return False
return True
class TempDir(object):
def __init__(self, prefix=''):
self._temp_dir = None
self._prefix = prefix
def __enter__(self):
self._temp_dir = tempfile.mkdtemp(self._prefix)
return self._temp_dir
def __exit__(self, *_):
shutil.rmtree(self._temp_dir, ignore_errors=True)
class ChangedWorkingDirectory(object):
def __init__(self, working_directory):
self._working_directory = working_directory
def __enter__(self):
self._old_cwd = os.getcwd()
print('Enter directory = ', self._working_directory)
os.chdir(self._working_directory)
def __exit__(self, *_):
print('Enter directory = ', self._old_cwd)
os.chdir(self._old_cwd)
class UnexpectedCrash(object):
def __init__(self, test, pid, *binaries):
self.test = test
self.pid = pid
self.binaries = binaries
def __str__(self):
return 'Crash({}: {} {})'.format(self.test, self.pid,
', '.join(self.binaries))
class PosixCoreDumpEnabler(object):
def __init__(self):
self._old_limits = None
def __enter__(self):
self._old_limits = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
def __exit__(self, *_):
if self._old_limits != None:
resource.setrlimit(resource.RLIMIT_CORE, self._old_limits)
class LinuxCoreDumpEnabler(PosixCoreDumpEnabler):
def __enter__(self):
# Bump core limits to unlimited if core_pattern is correctly configured.
if CheckLinuxCoreDumpPattern(fatal=False):
super(LinuxCoreDumpEnabler, self).__enter__()
def __exit__(self, *args):
CheckLinuxCoreDumpPattern(fatal=False)
super(LinuxCoreDumpEnabler, self).__exit__(*args)
class WindowsCoreDumpEnabler(object):
"""This enabler assumes that Dart binary was built with Crashpad support.
In this case DART_CRASHPAD_CRASHES_DIR environment variable allows to
specify the location of Crashpad crashes database. Actual minidumps will
be written into reports subfolder of the database.
"""
CRASHPAD_DB_FOLDER = os.path.join(DART_DIR, 'crashes')
DUMPS_FOLDER = os.path.join(CRASHPAD_DB_FOLDER, 'reports')
def __init__(self):
pass
def __enter__(self):
print('INFO: Enabling coredump archiving into {}'.format(
WindowsCoreDumpEnabler.CRASHPAD_DB_FOLDER))
os.environ[
'DART_CRASHPAD_CRASHES_DIR'] = WindowsCoreDumpEnabler.CRASHPAD_DB_FOLDER
def __exit__(self, *_):
del os.environ['DART_CRASHPAD_CRASHES_DIR']
def TryUnlink(file):
try:
os.unlink(file)
except Exception as error:
print('ERROR: Failed to remove {}: {}'.format(file, error))
class BaseCoreDumpArchiver(object):
"""This class reads coredumps file written by UnexpectedCrashDumpArchiver
into the current working directory and uploads all cores and binaries
listed in it into Cloud Storage (see
pkg/test_runner/lib/src/test_progress.dart).
"""
# test.dart will write a line for each unexpected crash into this file.
_UNEXPECTED_CRASHES_FILE = 'unexpected-crashes'
def __init__(self, search_dir, output_directory):
self._bucket = 'dart-temp-crash-archive'
self._binaries_dir = os.getcwd()
self._search_dir = search_dir
self._output_directory = output_directory
def _safe_cleanup(self):
try:
return self._cleanup()
except Exception as error:
print('ERROR: Failure during cleanup: {}'.format(error))
return False
def __enter__(self):
print('INFO: Core dump archiving is activated')
# Cleanup any stale files
if self._safe_cleanup():
print('WARNING: Found and removed stale coredumps')
def __exit__(self, *_):
try:
crashes = self._find_unexpected_crashes()
if crashes:
# If we get a ton of crashes, only archive 10 dumps.
archive_crashes = crashes[:10]
print('Archiving coredumps for crash (if possible):')
for crash in archive_crashes:
print('----> {}'.format(crash))
sys.stdout.flush()
self._archive(archive_crashes)
else:
print('INFO: No unexpected crashes recorded')
dumps = self._find_all_coredumps()
if dumps:
print('INFO: However there are {} core dumps found'.format(
len(dumps)))
for dump in dumps:
print('INFO: -> {}'.format(dump))
print()
except Exception as error:
print('ERROR: Failed to archive crashes: {}'.format(error))
raise
finally:
self._safe_cleanup()
def _archive(self, crashes):
files = set()
missing = []
for crash in crashes:
files.update(crash.binaries)
core = self._find_coredump_file(crash)
if core:
files.add(core)
else:
missing.append(crash)
if self._output_directory is not None and self._is_shard():
print(
"INFO: Moving collected dumps and binaries into output directory\n"
"INFO: They will be uploaded to isolate server. Look for \"isolated"
" out\" under the failed step on the build page.\n"
"INFO: For more information see runtime/docs/infra/coredumps.md"
)
self._move(files)
else:
print(
"INFO: Uploading collected dumps and binaries into Cloud Storage\n"
"INFO: Use `gsutil.py cp from-url to-path` to download them.\n"
"INFO: For more information see runtime/docs/infra/coredumps.md"
)
self._upload(files)
if missing:
self._report_missing_crashes(missing, throw=False)
# todo(athom): move the logic to decide where to copy core dumps into the recipes.
def _is_shard(self):
return 'BUILDBOT_BUILDERNAME' not in os.environ
def _report_missing_crashes(self, missing, throw=False):
missing_as_string = ', '.join([str(c) for c in missing])
other_files = list(glob.glob(os.path.join(self._search_dir, '*')))
sys.stderr.write(
"Could not find crash dumps for '{}' in search directory '{}'.\n"
"Existing files which *did not* match the pattern inside the search "
"directory are are:\n {}\n".format(missing_as_string,
self._search_dir,
'\n '.join(other_files)))
# TODO: Figure out why windows coredump generation does not work.
# See http://dartbug.com/36469
if throw and GuessOS() != 'win32':
raise Exception(
'Missing crash dumps for: {}'.format(missing_as_string))
def _get_file_name(self, file):
# Sanitize the name: actual cores follow 'core.%d' pattern, crashed
# binaries are copied next to cores and named
# 'binary.<mode>_<arch>_<binary_name>'.
# This should match the code in testing/dart/test_progress.dart
name = os.path.basename(file)
(prefix, suffix) = name.split('.', 1)
is_binary = prefix == 'binary'
if is_binary:
(mode, arch, binary_name) = suffix.split('_', 2)
name = binary_name
return (name, is_binary)
def _move(self, files):
for file in files:
print('+++ Moving {} to output_directory ({})'.format(
file, self._output_directory))
(name, is_binary) = self._get_file_name(file)
destination = os.path.join(self._output_directory, name)
shutil.move(file, destination)
if is_binary and os.path.exists(file + '.pdb'):
# Also move a PDB file if there is one.
pdb = os.path.join(self._output_directory, name + '.pdb')
shutil.move(file + '.pdb', pdb)
def _tar(self, file):
(name, is_binary) = self._get_file_name(file)
tarname = '{}.tar.gz'.format(name)
# Compress the file.
tar = tarfile.open(tarname, mode='w:gz')
tar.add(file, arcname=name)
if is_binary and os.path.exists(file + '.pdb'):
# Also add a PDB file if there is one.
tar.add(file + '.pdb', arcname=name + '.pdb')
tar.close()
return tarname
def _upload(self, files):
bot_utils = GetBotUtils()
gsutil = bot_utils.GSUtil()
storage_path = '{}/{}/'.format(self._bucket, uuid.uuid4())
gs_prefix = 'gs://{}'.format(storage_path)
http_prefix = 'https://storage.cloud.google.com/{}'.format(storage_path)
print('\n--- Uploading into {} ({}) ---'.format(gs_prefix, http_prefix))
for file in files:
tarname = self._tar(file)
# Remove / from absolute path to not have // in gs path.
gs_url = '{}{}'.format(gs_prefix, tarname)
http_url = '{}{}'.format(http_prefix, tarname)
try:
gsutil.upload(tarname, gs_url)
print('+++ Uploaded {} ({})'.format(gs_url, http_url))
except Exception as error:
print('!!! Failed to upload {}, error: {}'.format(
tarname, error))
TryUnlink(tarname)
print('--- Done ---\n')
def _find_all_coredumps(self):
"""Return coredumps that were recorded (if supported by the platform).
This method will be overridden by concrete platform specific implementations.
"""
return []
def _find_unexpected_crashes(self):
"""Load coredumps file. Each line has the following format:
test-name,pid,binary-file1,binary-file2,...
"""
try:
with open(BaseCoreDumpArchiver._UNEXPECTED_CRASHES_FILE) as f:
return [
UnexpectedCrash(*ln.strip('\n').split(','))
for ln in f.readlines()
]
except:
return []
def _cleanup(self):
found = False
if os.path.exists(BaseCoreDumpArchiver._UNEXPECTED_CRASHES_FILE):
os.unlink(BaseCoreDumpArchiver._UNEXPECTED_CRASHES_FILE)
found = True
for binary in glob.glob(os.path.join(self._binaries_dir, 'binary.*')):
found = True
TryUnlink(binary)
return found
class PosixCoreDumpArchiver(BaseCoreDumpArchiver):
def __init__(self, search_dir, output_directory):
super(PosixCoreDumpArchiver, self).__init__(search_dir,
output_directory)
def _cleanup(self):
found = super(PosixCoreDumpArchiver, self)._cleanup()
for core in glob.glob(os.path.join(self._search_dir, 'core.*')):
found = True
TryUnlink(core)
return found
def _find_coredump_file(self, crash):
core_filename = os.path.join(self._search_dir,
'core.{}'.format(crash.pid))
if os.path.exists(core_filename):
return core_filename
class LinuxCoreDumpArchiver(PosixCoreDumpArchiver):
def __init__(self, output_directory):
super(LinuxCoreDumpArchiver, self).__init__(os.getcwd(),
output_directory)
class MacOSCoreDumpArchiver(PosixCoreDumpArchiver):
def __init__(self, output_directory):
super(MacOSCoreDumpArchiver, self).__init__('/cores', output_directory)
class WindowsCoreDumpArchiver(BaseCoreDumpArchiver):
def __init__(self, output_directory):
super(WindowsCoreDumpArchiver, self).__init__(
WindowsCoreDumpEnabler.DUMPS_FOLDER, output_directory)
self._dumps_by_pid = None
# Find CDB.exe in the win_toolchain that we are using.
def _find_cdb(self):
win_toolchain_json_path = os.path.join(DART_DIR, 'build',
'win_toolchain.json')
if not os.path.exists(win_toolchain_json_path):
return None
with open(win_toolchain_json_path, 'r') as f:
win_toolchain_info = json.loads(f.read())
win_sdk_path = win_toolchain_info['win_sdk']
# We assume that we are running on 64-bit Windows.
# Note: x64 CDB can work with both X64 and IA32 dumps.
cdb_path = os.path.join(win_sdk_path, 'Debuggers', 'x64', 'cdb.exe')
if not os.path.exists(cdb_path):
return None
return cdb_path
CDBG_PROMPT_RE = re.compile(r'^\d+:\d+>')
def _dump_all_stacks(self):
# On Windows due to crashpad integration crashes do not produce any
# stacktraces. Dump stack traces from dumps Crashpad collected using
# CDB (if available).
cdb_path = self._find_cdb()
if cdb_path is None:
return
dumps = self._find_all_coredumps()
if not dumps:
return
print('### Collected {} crash dumps'.format(len(dumps)))
for dump in dumps:
print()
print('### Dumping stacks from {} using CDB'.format(dump))
cdb_output = subprocess.check_output(
'"{}" -z "{}" -kqm -c "!uniqstack -b -v -p;qd"'.format(
cdb_path, dump),
stderr=subprocess.STDOUT)
# Extract output of uniqstack from the whole output of CDB.
output = False
for line in cdb_output.split('\n'):
if re.match(WindowsCoreDumpArchiver.CDBG_PROMPT_RE, line):
output = True
elif line.startswith('quit:'):
break
elif output:
print(line)
print()
print('#############################################')
print()
def __exit__(self, *args):
try:
self._dump_all_stacks()
except Exception as error:
print('ERROR: Unable to dump stacks from dumps: {}'.format(error))
super(WindowsCoreDumpArchiver, self).__exit__(*args)
def _cleanup(self):
found = super(WindowsCoreDumpArchiver, self)._cleanup()
for core in glob.glob(os.path.join(self._search_dir, '*')):
found = True
TryUnlink(core)
return found
def _find_all_coredumps(self):
pattern = os.path.join(self._search_dir, '*.dmp')
return [core_filename for core_filename in glob.glob(pattern)]
def _find_coredump_file(self, crash):
if self._dumps_by_pid is None:
# If this function is invoked the first time then look through the directory
# that contains crashes for all dump files and collect pid -> filename
# mapping.
self._dumps_by_pid = {}
minidump = GetMinidumpUtils()
pattern = os.path.join(self._search_dir, '*.dmp')
for core_filename in glob.glob(pattern):
pid = minidump.GetProcessIdFromDump(core_filename)
if pid != -1:
self._dumps_by_pid[str(pid)] = core_filename
if crash.pid in self._dumps_by_pid:
return self._dumps_by_pid[crash.pid]
def _report_missing_crashes(self, missing, throw=False):
# Let's only print the debugging information and not throw. We'll do more
# validation for werfault.exe and throw afterwards.
super(WindowsCoreDumpArchiver, self)._report_missing_crashes(
missing, throw=False)
if throw:
missing_as_string = ', '.join([str(c) for c in missing])
raise Exception(
'Missing crash dumps for: {}'.format(missing_as_string))
class IncreasedNumberOfFileDescriptors(object):
def __init__(self, nofiles):
self._old_limits = None
self._limits = (nofiles, nofiles)
def __enter__(self):
self._old_limits = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, self._limits)
def __exit__(self, *_):
resource.setrlimit(resource.RLIMIT_CORE, self._old_limits)
@contextlib.contextmanager
def NooptContextManager():
yield
def CoreDumpArchiver(args):
enabled = '--copy-coredumps' in args
prefix = '--output-directory='
output_directory = next(
(arg[len(prefix):] for arg in args if arg.startswith(prefix)), None)
if not enabled:
return (NooptContextManager(),)
osname = GuessOS()
if osname == 'linux':
return (LinuxCoreDumpEnabler(), LinuxCoreDumpArchiver(output_directory))
elif osname == 'macos':
return (PosixCoreDumpEnabler(), MacOSCoreDumpArchiver(output_directory))
elif osname == 'win32':
return (WindowsCoreDumpEnabler(),
WindowsCoreDumpArchiver(output_directory))
# We don't have support for MacOS yet.
return (NooptContextManager(),)
def FileDescriptorLimitIncreaser():
osname = GuessOS()
if osname == 'macos':
return IncreasedNumberOfFileDescriptors(nofiles=10000)
assert osname in ('linux', 'win32')
# We don't have support for MacOS yet.
return NooptContextManager()
def Main():
print('GuessOS() -> ', GuessOS())
print('GuessArchitecture() -> ', GuessArchitecture())
print('GuessCpus() -> ', GuessCpus())
print('IsWindows() -> ', IsWindows())
print('GetGitRevision() -> ', GetGitRevision())
print('GetGitTimestamp() -> ', GetGitTimestamp())
print('GetVersionFileContent() -> ', GetVersionFileContent())
print('GetGitNumber() -> ', GetGitNumber())
if __name__ == '__main__':
Main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-11 15:49:07
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
data_list = []
def func(arg):
return data_list.insert(0,arg)
data = func("hhhh")
print(data)
print(data_list)
def func2(a,list1=[]):
list1.append(a)
return list1
list2 = func2(10)
list3 = func2(123,[])
list4 = func2('a')
#print(list1) # 会报错
print(list2)
print(list3)
print(list4)
|
import sys
import os
import warnings
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import date
from datetime import timedelta
import math
import copy
from scipy.optimize import minimize
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
warnings.filterwarnings("ignore")
from COMM import File_Util
# List up CSV files from default folder
base_folder = '../DATA/CSV/futures/'
ex_list = ('FTSE China 50 Total Return', 'iBovespa Futures'
, 'MSCI Brazil 25-50 Net Return', 'MSCI International EAFE Net'
, 'MVIS Global Junior Gold Miners TR Net', 'Nifty 50 Futures', 'MVIS Russia TR Net'
, 'US 10 Year T-Note Futures', 'US 30 Year T-Bond Futures')
datas = File_Util.ReadCSVFiles(base_folder, ex_list)
# Sampling 데이터가 휴일인 경우 가장 최근 영업일 데이터를 찾기 위해 사용
reference_list = datas.resample('D', on='Date2', convention="end")
reference_datas = datas.loc[datas['Date2'].isin(list(reference_list.indices))]
pivoted_reference_datas = reference_datas.pivot(index='Date2', columns='Name', values='Price')
#print(pivoted_reference_datas)
# Sampling 데이터 생성
sample_list = datas.resample('M', on='Date2', convention="end")
sample_datas = datas.loc[datas['Date2'].isin(list(sample_list.indices))]
pivoted_sample_datas = sample_datas.pivot(index='Date2', columns='Name', values='Price')
#print(pivoted_sample_datas)
# Index의 타입을 Timestamp에서 Date로 변경
pivoted_reference_datas.index = [date(index.year, index.month, index.day) for index in pivoted_reference_datas.index]
pivoted_sample_datas.index = [date(index.year, index.month, index.day) for index in pivoted_sample_datas.index]
# Sampling 데이터가 휴일인 경우 가장 최근 영업일 데이터로 채움
pivoted_inserted_datas = copy.deepcopy(pivoted_sample_datas)
for num, index in enumerate(pivoted_sample_datas.index):
# 기본로직(Month 단위): After next month의 1일 전일
# 10월 이후의 경우 After next month는 year가 넘어간다.
year = index.year + 1 if index.month > 10 else index.year
if index.month == 11:
month = 1
elif index.month == 12:
month = 2
else:
month = index.month + 2
# After next month의 1일 전으로 월별 말일을 찾음.
next_last_date = date(year, month, 1) + timedelta(days=-1)
# 마지말까지 확인전인 경우
#if num + 1 < len(pivoted_sample_datas.index):
if pivoted_sample_datas.index[-1] < pivoted_reference_datas.index[-1]:
#print(num, len(pivoted_sample_datas.index), index, next_last_date, pivoted_sample_datas.index[num+1] == next_last_date)
#print(next_last_date)
# 다음 Sampling 데이터가 휴일이어서 데이터가 없는 경우 or 다음 Sampling 데이터와 다음달 말일이 다른 경우
if next_last_date > pivoted_sample_datas.index[-1] or pivoted_sample_datas.index[num+1] != next_last_date:
pivoted_inserted_datas = pd.concat([pivoted_inserted_datas, pd.DataFrame(index=[next_last_date], columns=pivoted_inserted_datas.columns)])
# 새로움 Sampling 데이터는 끝에 추가되기 때문에 날짜로 Sorting
pivoted_inserted_datas = pivoted_inserted_datas.sort_index(ascending=1)
pivoted_filled_datas = copy.deepcopy(pivoted_inserted_datas)
for column_nm in pivoted_filled_datas.columns:
for row_nm in pivoted_filled_datas.index:
# 값이 포맷이 string인 경우 float으로 변경
if isinstance(pivoted_filled_datas[column_nm][row_nm], str):
pivoted_filled_datas[column_nm][row_nm] = float(pivoted_filled_datas[column_nm][row_nm].replace(',',''))
#print(column_nm, "\t", row_nm, "\t", pivoted_sample_datas[column_nm][row_nm], "\t", pivoted_filled_datas[column_nm][row_nm])
if math.isnan(pivoted_filled_datas[column_nm][row_nm]) == True:
# ref_row_nm = copy.copy(row_nm)
#ref_row_nm = str(row_nm)[:10]
ref_row_nm = row_nm
# 해당일에 데이터가 없는 경우 가장 최근 값을 대신 사용함
for loop_cnt in range(10):
try:
float_value = float(pivoted_reference_datas[column_nm][ref_row_nm].replace(',', '')) if isinstance(pivoted_reference_datas[column_nm][ref_row_nm], str) else pivoted_reference_datas[column_nm][ref_row_nm]
if math.isnan(float_value) == True:
# print("No Data", str(ref_row_nm))
#ref_row_nm = str(datetime.strptime(ref_row_nm, '%Y-%m-%d').date() - timedelta(days=1))
ref_row_nm = ref_row_nm - timedelta(days=1)
else:
pivoted_filled_datas[column_nm][row_nm] = float_value
break
except KeyError:
# print("KeyError", str(ref_row_nm))
#ref_row_nm = str(datetime.strptime(ref_row_nm, '%Y-%m-%d').date() - timedelta(days=1))
ref_row_nm = ref_row_nm - timedelta(days=1)
# 이후 연산작업을 위해 decimal을 float 형태로 변경
if math.isnan(pivoted_filled_datas[column_nm][row_nm]) == False:
pivoted_filled_datas[column_nm][row_nm] = float(pivoted_filled_datas[column_nm][row_nm])
# 지수값을 수익률로 변경
pivoted_profit_data = pivoted_filled_datas.rolling(window=2).apply(lambda x: x[1] / x[0] - 1)
# 유효기간을 벗어난 데이터 삭제
pivoted_droped_data = copy.deepcopy(pivoted_profit_data)
row_list = copy.deepcopy(pivoted_droped_data.index)
for row_nm in row_list:
for column_nm in pivoted_droped_data.columns:
# 수익률 생성시 문제있는 셀은 nan값
if math.isnan(pivoted_droped_data[column_nm][row_nm]) == True:
pivoted_droped_data.drop(index=row_nm, inplace=True)
pivoted_filled_datas.drop(index=row_nm, inplace=True)
break
def ObjectiveVol(rets, objective_type, target, lb, ub):
rets.index = pd.to_datetime(rets.index)
covmat = pd.DataFrame.cov(rets)
var_list = pd.DataFrame.var(rets)
def annualize_scale(rets):
med = np.median(np.diff(rets.index.values))
seconds = int(med.astype('timedelta64[s]').item().total_seconds())
if seconds < 60:
freq = 'second'.format(seconds)
elif seconds < 3600:
freq = 'minute'.format(seconds // 60)
elif seconds < 86400:
freq = 'hour'.format(seconds // 3600)
elif seconds < 604800:
freq = 'day'.format(seconds // 86400)
elif seconds < 2678400:
freq = 'week'.format(seconds // 604800)
elif seconds < 7948800:
freq = 'month'.format(seconds // 2678400)
else:
freq = 'quarter'.format(seconds // 7948800)
def switch1(x):
return {
'day': 252,
'week': 52,
'month': 12,
'quarter': 4,
}.get(x)
return switch1(freq)
# --- Risk Budget Portfolio Objective Function ---#
def EqualRiskContribution_objective(x):
variance = x.T @ covmat @ x
sigma = variance ** 0.5
mrc = 1 / sigma * (covmat @ x)
rc = x * mrc
#a = np.reshape(rc, (len(rc), 1))
a = np.reshape(rc.values, (len(rc), 1))
risk_diffs = a - a.T
sum_risk_diffs_squared = np.sum(np.square(np.ravel(risk_diffs)))
return (sum_risk_diffs_squared)
def MinVariance_objective(x):
variance = x.T @ covmat @ x
sigma = variance ** 0.5
return (sigma)
def MostDiversifiedPortfolio_objective(x):
portfolio_variance = x.T @ covmat @ x
portfolio_sigma = portfolio_variance ** 0.5
weighted_sum_sigma = x @ (var_list ** 0.5)
return (portfolio_sigma / weighted_sum_sigma)
# --- Constraints ---#
def TargetVol_const_lower(x):
variance = x.T @ covmat @ x
sigma = variance ** 0.5
sigma_scale = sigma * np.sqrt(annualize_scale(rets))
vol_diffs = sigma_scale - (target * 1.00)
return (vol_diffs)
def TargetVol_const_upper(x):
variance = x.T @ covmat @ x
sigma = variance ** 0.5
sigma_scale = sigma * np.sqrt(annualize_scale(rets))
vol_diffs = (target * 1.00) - sigma_scale
return (vol_diffs)
def TotalWgt_const(x):
return x.sum() - 1
# --- Calculate Portfolio ---#
x0 = np.repeat(1 / covmat.shape[1], covmat.shape[1])
#print(x0)
lbound = np.repeat(lb, covmat.shape[1])
ubound = np.repeat(ub, covmat.shape[1])
bnds = tuple(zip(lbound, ubound))
constraints = ({'type': 'ineq', 'fun': TargetVol_const_lower},
{'type': 'ineq', 'fun': TargetVol_const_upper},
{'type': 'eq', 'fun': TotalWgt_const})
options = {'ftol': 1e-20, 'maxiter': 5000, 'disp': False}
obejctive_func = EqualRiskContribution_objective
if objective_type == 1:
obejctive_func = EqualRiskContribution_objective
elif objective_type == 2:
obejctive_func = MinVariance_objective
elif objective_type == 3:
obejctive_func = MostDiversifiedPortfolio_objective
result = minimize(fun=obejctive_func,
x0=x0,
method='SLSQP',
constraints=constraints,
options=options,
bounds=bnds)
#print(result)
return (result.fun, result.x)
for objective_type in range(1, 4):
# hyper-parameter
acc_profit = 1
period_term = 24 # Covariance Matrix 계산을 위한 기간 (12, 36 보다 24가 좋았음)
# 결과 저장 parameter
output_weights = {} # 기간별 & 자산별 가중치
output_vols = {}
output_profit = [] # 기간별 포트폴리오 수익률
output_acc_profit = [] # 기간별 포트폴리오 누적 수익률
output_vol = [] # 기간별 포트폴리오 변동성
for prd_idx, index in enumerate(pivoted_droped_data.index):
# 마지막 결정일은 weight 산출만 가능, 그 이후는 불가
if index > pivoted_droped_data.index[-period_term]:
print('break', prd_idx + period_term, len(pivoted_droped_data))
break
# 자산배분 결정일 (익일에 결정일 종가까지를 이용해서 계산)
date = pivoted_droped_data.index[prd_idx + period_term - 1]
# lb는 자산별 최소비율(%), ub는 자산별 최대비율(%)
output_weights[date] = {}
output_vols[date] = {}
rst_value, rst_weights = ObjectiveVol(pivoted_droped_data[prd_idx:prd_idx + period_term], objective_type, target=0.1, lb=0.00, ub=1.00)
asset_vols = pd.DataFrame.var(pivoted_droped_data[prd_idx:prd_idx + period_term])
# 결과 저장을 위해 Container에 입력
profit = 0
for col_idx, column in enumerate(pivoted_droped_data.columns):
output_weights[date][column] = rst_weights[col_idx]
output_vols[date][column] = asset_vols.values[col_idx]
if index < pivoted_droped_data.index[-period_term]:
# 예를 들어 0~11까지 수익률로 변동성을 구하면 12의 수익률을 사용.
profit += rst_weights[col_idx] * pivoted_droped_data[column][prd_idx + period_term]
if index < pivoted_droped_data.index[-period_term]:
acc_profit *= profit + 1
# 결과 데이터
output_profit.append(profit)
output_acc_profit.append(acc_profit - 1)
output_vol.append(math.sqrt(rst_weights.T @ pd.DataFrame.cov(pivoted_droped_data[prd_idx:prd_idx + period_term]) @ rst_weights) * math.sqrt(12))
print(prd_idx, date, profit, acc_profit - 1, math.sqrt(rst_weights.T @ pd.DataFrame.cov(pivoted_droped_data[prd_idx:prd_idx + period_term]) @ rst_weights) * math.sqrt(12))
result = pd.DataFrame.from_dict(output_weights).transpose()
result['Vol'] = output_vol
result['Profit'] = output_profit
result['AccProfit'] = output_acc_profit
if 1:
File_Util.SaveExcelFiles(file='pivoted_data_%s.xlsx' % (objective_type), obj_dict={'pivoted_reference_datas': pivoted_reference_datas
, 'pivoted_sample_datas': pivoted_sample_datas, 'pivoted_inserted_datas': pivoted_inserted_datas
, 'pivoted_filled_datas': pivoted_filled_datas, 'pivoted_profit_data': pivoted_profit_data
, 'pivoted_droped_data': pivoted_droped_data, 'Result': result , 'AssetVols': pd.DataFrame.from_dict(output_vols).transpose()}) |
import os
import numpy as np
import pandas as pd
from modisco.hit_scoring import densityadapted_hitscoring
from modisco.util import compute_per_position_ic
import chrombpnet.evaluation.invivo_footprints.run_tfmodisco as run_tfmodisco
import click
def import_tfmodisco_hits(hits_bed):
"""
Imports the TF-MoDISco hits as a single Pandas DataFrame.
The `key` column is the name of the originating PFM, and `peak_index` is the
index of the peak file from which it was originally found.
"""
hit_table = pd.read_csv(
hits_bed, sep="\t", header=None, index_col=False,
names=[
"chrom", "start", "end", "key", "strand", "peak_index",
"imp_total_signed_score", "imp_total_score", "imp_frac_score",
"imp_ic_avg_score", "agg_sim", "mod_delta", "mod_precision",
"mod_percentile", "fann_perclasssum_perc", "fann_perclassavg_perc"
]
)
return hit_table
@click.command()
@click.option(
"-o", "--outdir", required=True, help="Path to output directory"
)
@click.option(
"-i", "--input-length", default=2114,
help="Length of input sequences for importance scores"
)
@click.option(
"-c", "--center-cut-size", default=1000,
help="Length of sequence that was used to run TF-MoDISco"
)
@click.option(
"--keep-non-acgt", is_flag=True,
help="If given, don't remove non-ACGT score tracks"
)
@click.option(
"-m", "--min-ic", default=0.2,
help="Information content cut-off to use to trim motif hits"
)
@click.option(
"-mc", "--metacluster-ind", default=0,
help="Index of the metacluster whose patterns to use for motif assignment; defaults to metacluster 0"
)
@click.option(
"-p", "--pattern-inds", default=None, type=str,
help="Comma-delimited list of pattern indices in the metacluster to use for motif assignment; defaults to all patterns in the metacluster"
)
@click.argument("shap_scores_path", nargs=1)
@click.argument("tfm_results_path", nargs=1)
@click.argument("peak_bed_path", nargs=1)
def main(
shap_scores_path, tfm_results_path, peak_bed_path, outdir,
input_length, center_cut_size, keep_non_acgt, min_ic, metacluster_ind,
pattern_inds
):
assert metacluster_ind in (0, 1)
if pattern_inds is not None:
pattern_inds = [int(x) for x in pattern_inds.split(",")]
os.makedirs(outdir, exist_ok=True)
# Import peaks
peak_table = pd.read_csv(
peak_bed_path, sep="\t", header=None, index_col=False,
usecols=[0, 1, 2, 9],
names=["peak_chrom", "peak_start", "peak_end", "summit_offset"]
)
# Expand peaks to input length
peak_table["peak_start"] = \
(peak_table["peak_start"] + peak_table["summit_offset"]) - \
(input_length // 2)
peak_table["peak_end"] = peak_table["peak_start"] + input_length
print("Importing DeepSHAP scores and TF-MoDISco results...")
hyp_scores, act_scores, one_hot_seqs, imp_coords = \
run_tfmodisco.import_shap_scores_part2(
shap_scores_path, peak_table, center_cut_size=center_cut_size
)
tfm_results = run_tfmodisco.import_tfmodisco_results(
tfm_results_path, hyp_scores, one_hot_seqs, center_cut_size
)
assert np.all(imp_coords[:, 2] - imp_coords[:, 1] == input_length)
peak_table = peak_table.reset_index().drop_duplicates(
["peak_chrom", "peak_start", "peak_end"]
)
# Importantly, we add the index column before dropping duplicates
print("Matching up DeepSHAP coordinates and peak coordinates...")
imp_coords_table = pd.DataFrame(
imp_coords, columns=["chrom", "start", "end"]
).reset_index().drop_duplicates(["chrom", "start", "end"])
# Importantly, we add the index column before dropping duplicates
# Map peak indices to importance score tracks
matched_inds = peak_table.merge(
imp_coords_table, how="inner",
# Inner join: can't call hits if there's no importance score track,
# and don't bother if it's not a peak
left_on=["peak_chrom", "peak_start", "peak_end"],
right_on=["chrom", "start", "end"]
)[["index_x", "index_y"]].values
# `matched_inds` is an N x 2 array, where each pair is
# (peak index, score index)
# Sort by score index
matched_inds = matched_inds[np.argsort(matched_inds[:, 1])]
# Limit the importance scores to only those which matched to a peak
score_inds = matched_inds[:, 1]
hyp_scores_matched = hyp_scores[score_inds]
act_scores_matched = act_scores[score_inds]
one_hot_seqs_matched = one_hot_seqs[score_inds]
example_to_peak_index = matched_inds[:, 0]
# `example_to_peak_index` is an array such that if `i` is the index of
# a sequence in `*_scores_matched`, then `example_to_peak_index[i]` is the
# index of the matching peak
print("Preparing the hit scorer...")
# Only do the first metacluster (positive scores)
patterns = tfm_results.metacluster_idx_to_submetacluster_results[
"metacluster_%d" % metacluster_ind
].seqlets_to_patterns_result.patterns
# If specified, use only specific patterns in the metacluster
if pattern_inds is None:
pattern_inds = list(range(len(patterns)))
else:
patterns = [patterns[i] for i in pattern_inds]
bg_freq = np.mean(one_hot_seqs_matched, axis=(0, 1))
# Verify that every pattern has sufficiently high IC
for pattern in patterns:
pfm = pattern["sequence"].fwd
ic = compute_per_position_ic(pfm, bg_freq, 0.001)
assert np.sum(ic >= min_ic) > 0, "The given IC threshold results in an empty motif"
# Instantiate the hit scorer
hit_scorer = densityadapted_hitscoring.MakeHitScorer(
patterns=patterns,
target_seqlet_size=25,
bg_freq=bg_freq,
task_names_and_signs=[("task0", 1 if metacluster_ind == 0 else -1)],
n_cores=10,
additional_seqletscorer_kwargs={"ic_trim_threshold": min_ic}
)
# Set seqlet identification method
hit_scorer.set_coordproducer(
contrib_scores={"task0": act_scores_matched},
core_sliding_window_size=5,
target_fdr=0.2,
min_passing_windows_frac=0.03,
max_passing_windows_frac=0.2,
separate_pos_neg_thresholds=False,
max_seqlets_total=np.inf
)
# Map pattern index to motif key
motif_keys = ["%d_%d" % (metacluster_ind, i) for i in pattern_inds]
print("Starting hit scoring...")
batch_size = 1024
num_batches = int(np.ceil(len(act_scores_matched) / batch_size))
rows = []
for i in range(num_batches):
print("\tScoring batch %d/%d" % (i + 1, num_batches))
batch_slice = slice(i * batch_size, (i + 1) * batch_size)
example_to_matches, pattern_to_matches = hit_scorer(
contrib_scores={"task0": act_scores_matched[batch_slice]},
hypothetical_contribs={"task0": hyp_scores_matched[batch_slice]},
one_hot=one_hot_seqs_matched[batch_slice],
hits_to_return_per_seqlet=1
)
offset = i * batch_size
for example_index, match_list in example_to_matches.items():
for match in match_list:
rows.append([
match.exampleidx + offset, match.patternidx, match.start,
match.end, match.is_revcomp, match.aggregate_sim,
match.mod_delta, match.mod_precision, match.mod_percentile,
match.fann_perclasssum_perc, match.fann_perclassavg_perc
])
# Collate the matches together into a big table
colnames = [
"example_index", "pattern_index", "start", "end", "revcomp",
"agg_sim", "mod_delta", "mod_precision", "mod_percentile",
"fann_perclasssum_perc", "fann_perclassavg_perc"
]
match_table = pd.DataFrame(rows, columns=colnames)
# Save raw table
match_table.to_csv(
os.path.join(outdir, "tfm_matches_raw.tsv"), sep="\t", header=True,
index=False
)
print("Cleaning up matches...")
# Convert example index to peak index
match_table["peak_index"] = example_to_peak_index[
match_table["example_index"]
]
# Convert pattern index to motif key
match_table["key"] = np.array(motif_keys)[match_table["pattern_index"]]
# Convert revcomp to strand
# Note we are assuming that the input scores were all positive strand
match_table["strand"] = match_table["revcomp"].map({True: "-", False: "+"})
# Save the start/end as other columns, which match the score coordinates
match_table["score_start"] = match_table["start"]
match_table["score_end"] = match_table["end"]
# Convert start/end of motif hit to genomic coordinate
# `peak_starts[i] == j` is such that if `i` is a peak index, `j` is the peak
# start in genomic coordinate space
peak_starts = np.empty(np.max(peak_table["index"]) + 1, dtype=int)
peak_starts[peak_table["index"]] = peak_table["peak_start"]
# Now reduce `peak_starts` to match `match_table` exactly
peak_starts = peak_starts[match_table["peak_index"]]
offset = (input_length - center_cut_size) // 2
match_table["chrom"] = peak_table["peak_chrom"].loc[
match_table["peak_index"]
].reset_index(drop=True)
# Note: "peak_chrom" was an index column so we need to drop that before
# setting it as a value
match_table["start"] = match_table["start"] + offset + peak_starts
match_table["end"] = match_table["end"] + offset + peak_starts
# Trim each motif hit to be only the size of the core motif (determined by
# IC)
ic_dict = {} # Save (trimmed) IC for each motif
hit_patterns = hit_scorer.trimmed_subclustered_patterns
for i, pattern in enumerate(hit_patterns):
motif_key = motif_keys[i]
pfm = pattern["sequence"].fwd
ic = compute_per_position_ic(pfm, bg_freq, 0.001)
pass_inds = np.where(ic >= min_ic)[0]
if not pass_inds.size:
continue
start, end = np.min(pass_inds), np.max(pass_inds) + 1
length = end - start
rc_start = len(pfm) - end
ic_dict[motif_key] = ic[start:end]
motif_mask = match_table["key"] == motif_key
pos_mask = match_table["strand"] == "+"
match_table.loc[motif_mask & pos_mask, "start"] = \
match_table[motif_mask & pos_mask]["start"] + start
match_table.loc[motif_mask & pos_mask, "end"] = \
match_table[motif_mask & pos_mask]["start"] + length
match_table.loc[motif_mask & pos_mask, "score_start"] = \
match_table[motif_mask & pos_mask]["score_start"] + start
match_table.loc[motif_mask & pos_mask, "score_end"] = \
match_table[motif_mask & pos_mask]["score_start"] + length
match_table.loc[motif_mask & (~pos_mask), "start"] = \
match_table[motif_mask & (~pos_mask)]["start"] + rc_start
match_table.loc[motif_mask & (~pos_mask), "end"] = \
match_table[motif_mask & (~pos_mask)]["start"] + length
match_table.loc[motif_mask & (~pos_mask), "score_start"] = \
match_table[motif_mask & (~pos_mask)]["score_start"] + rc_start
match_table.loc[motif_mask & (~pos_mask), "score_end"] = \
match_table[motif_mask & (~pos_mask)]["score_start"] + length
# For each hit, compute the total absolute importance, fraction absolute
# importance, and IC-weighted importance average
match_table["imp_total_score"] = np.nan
match_table["imp_total_signed_score"] = np.nan
match_table["imp_ic_avg_score"] = np.nan
score_length = act_scores_matched.shape[1]
for i, row in match_table.iterrows():
if row["score_start"] < 0 or row["score_end"] >= score_length:
print("Hit at %s:%d-%d is outside of importance score range" % (
row["chrom"], row["start"], row["end"]
))
continue
scores = np.sum(act_scores_matched[
row["example_index"], row["score_start"]:row["score_end"]
], axis=1) # Flatten from L x 4 to L-array
match_table.loc[i, "imp_total_score"] = np.sum(np.abs(scores))
match_table.loc[i, "imp_total_signed_score"] = np.sum(scores)
ic = ic_dict[row["key"]]
if row["strand"] == "-":
ic = np.flip(ic)
match_table.loc[i, "imp_ic_avg_score"] = np.mean(ic * scores)
# Compute the fraction importance by dividing total importance
total_track_imp = np.sum(np.abs(act_scores_matched), axis=(1, 2))
match_table["imp_frac_score"] = match_table["imp_total_score"] / \
total_track_imp[match_table["example_index"]]
# Filter out any hits that had NaN as an importance score; these hits
# overran the importance score track boundaries
match_table = match_table.dropna(subset=["imp_total_score"])
# Re-order columns (and drop a few) before saving the result
match_table = match_table[[
"chrom", "start", "end", "key", "strand", "peak_index",
"imp_total_signed_score", "imp_total_score", "imp_frac_score",
"imp_ic_avg_score", "agg_sim", "mod_delta", "mod_precision",
"mod_percentile", "fann_perclasssum_perc", "fann_perclassavg_perc"
]]
match_table.to_csv(
os.path.join(outdir, "tfm_matches.bed"), sep="\t", header=False,
index=False
)
if __name__ == "__main__":
main()
|
from django.db import models
class Player(models.Model):
cID = models.CharField(max_length = 20)
password = models.CharField(max_length = 20)
Room = models.IntegerField (default = 0)
class Room(models.Model):
num = models.AutoField(primary_key =True)
User0 = models.CharField(max_length = 20, default = "")
User1 = models.CharField(max_length = 20, default = "")
User2 = models.CharField(max_length = 20, default = "")
User3 = models.CharField(max_length = 20, default = "")
turn = models.IntegerField (default = 0)
thrown = models.CharField(max_length = 1000, default = "")
start = models.BooleanField(default = False)
Deck = models.CharField(max_length = 1000, default = "")
Flag0 = models.BooleanField(default = False)
Flag1 = models.BooleanField(default = False)
Flag2 = models.BooleanField(default = False)
Flag3 = models.BooleanField(default = False)
Tchu0 = models.IntegerField(default = 0)
Tchu1 = models.IntegerField(default = 0)
Tchu2 = models.IntegerField(default = 0)
Tchu3 = models.IntegerField(default = 0)
lFlag0 = models.BooleanField(default = False)
lFlag1 = models.BooleanField(default = False)
lFlag2 = models.BooleanField(default = False)
lFlag3 = models.BooleanField(default = False)
sFlag0 = models.BooleanField(default = False)
sFlag1 = models.BooleanField(default = False)
sFlag2 = models.BooleanField(default = False)
sFlag3 = models.BooleanField(default = False)
eFlag0 = models.BooleanField(default = False)
eFlag1 = models.BooleanField(default = False)
eFlag2 = models.BooleanField(default = False)
eFlag3 = models.BooleanField(default = False)
eCard0 = models.CharField(max_length = 100)
eCard1 = models.CharField(max_length = 100)
eCard2 = models.CharField(max_length = 100)
eCard3 = models.CharField(max_length = 100)
wFlag0 = models.BooleanField(default = False)
wFlag1 = models.BooleanField(default = False)
wFlag2 = models.BooleanField(default = False)
wFlag3 = models.BooleanField(default = False)
fFlag0 = models.BooleanField(default = False)
fFlag1 = models.BooleanField(default = False)
fFlag2 = models.BooleanField(default = False)
fFlag3 = models.BooleanField(default = False)
|
import numpy as np
from matplotlib import pyplot as plt
# This generates 100 variables that could possibly be assigned to 5 clusters
n_variables = 100
n_clusters = 5
n_samples = 1000
# To keep this example simple, each cluster will have a fixed size
cluster_size = n_variables / n_clusters
# Assign each variable to a cluster
belongs_to_cluster = np.repeat(range(n_clusters), cluster_size)
np.random.shuffle(belongs_to_cluster)
# This latent data is used to make variables that belong
# to the same cluster correlated.
latent = np.random.randn(n_clusters, n_samples)
variables = []
for i in range(n_variables):
variables.append(
np.random.randn(n_samples) + latent[belongs_to_cluster[i], :]
)
variables = np.array(variables)
C = np.cov(variables)
def score(C):
'''
Function to assign a score to an ordered covariance matrix.
High correlations within a cluster improve the score.
High correlations between clusters decease the score.
'''
score = 0
for cluster in range(n_clusters):
inside_cluster = np.arange(cluster_size) + cluster * cluster_size
outside_cluster = np.setdiff1d(range(n_variables), inside_cluster)
# Belonging to the same cluster
score += np.sum(C[inside_cluster, :][:, inside_cluster])
# Belonging to different clusters
score -= np.sum(C[inside_cluster, :][:, outside_cluster])
score -= np.sum(C[outside_cluster, :][:, inside_cluster])
return score
initial_C = C
# initial_score = score(C)
initial_ordering = np.arange(n_variables)
plt.figure()
plt.imshow(C, interpolation='nearest')
plt.title('Initial C')
print('Initial ordering:', initial_ordering)
# print('Initial covariance matrix score:', initial_score)
plt.show()
# Pretty dumb greedy optimization algorithm that continuously
# swaps rows to improve the score
def swap_rows(C, var1, var2):
'''
Function to swap two rows in a covariance matrix,
updating the appropriate columns as well.
'''
D = C.copy()
D[var2, :] = C[var1, :]
D[var1, :] = C[var2, :]
E = D.copy()
E[:, var2] = D[:, var1]
E[:, var1] = D[:, var2]
return E
current_C = C
current_ordering = initial_ordering
current_score = initial_score
max_iter = 1000
for i in range(max_iter):
# Find the best row swap to make
best_C = current_C
best_ordering = current_ordering
best_score = current_score
for row1 in range(n_variables):
for row2 in range(n_variables):
if row1 == row2:
continue
option_ordering = best_ordering.copy()
option_ordering[row1] = best_ordering[row2]
option_ordering[row2] = best_ordering[row1]
option_C = swap_rows(best_C, row1, row2)
option_score = score(option_C)
if option_score > best_score:
best_C = option_C
best_ordering = option_ordering
best_score = option_score
if best_score > current_score:
# Perform the best row swap
current_C = best_C
current_ordering = best_ordering
current_score = best_score
else:
# No row swap found that improves the solution, we're done
break
# Output the result
plt.figure()
plt.imshow(current_C, interpolation='nearest')
plt.title('Best C')
print('Best ordering:', current_ordering)
print('Best score:', current_score)
print()
print('Cluster [variables assigned to this cluster]')
print('------------------------------------------------')
for cluster in range(n_clusters):
print('Cluster %02d %s' % (cluster + 1, current_ordering[cluster*cluster_size:(cluster+1)*cluster_size])) |
import csv
import time
import mandb
'''
CREATE TABLE timu_1(
temp_id INT UNSIGNED AUTO_INCREMENT,/* 主(不可重复) 临时id */
njtype varchar(50) NOT NULL,/* 年级类型 */
tmtype varchar(500) NOT NULL,/* 题目类型 */
urls varchar(500) NOT NULL,/* 数据源链接 */
tmdata TEXT NOT NULL,/* 题目文本 */
daandata TEXT NOT NULL,/* 题目答案 */
jiexi TEXT NOT NULL,/* 题目解析 */
PRIMARY KEY (temp_id)
)ENGINE=InnoDB DEFAULT CHARSET=utf8;
'''
#
def adddata(listss):
try:
man_db = mandb.DB()
sql = "INSERT INTO timu_2 ( njtype, tmtype, urls, tmdata, daandata, jiexi) \
VALUES ('"+listss[0]+"', '"+listss[1]+"', '"+listss[2]+"', \
'"+str(listss[3]).replace("'","\"")+"', '"+str(listss[4]).replace("'","\"")+"', '"+str(listss[5]).replace("'","\"")+"' );"
man_db.execute(sql)
except Exception as e:
print(e)
return adddata(listss)
csv_path = "f:/timu_yinyu_4.csv"
#解决读取存在大字段的数据
csv.field_size_limit(500 * 1024 * 1024)
with open(csv_path, 'r',encoding='utf-8') as f:
data = csv.reader((line for line in f), delimiter=",")
#data = csv.reader((line for line in f), delimiter=",")
#print(len(data))
n=1
for row in data:
#131072
if n>=101315:
print(n)
print(row)
#print(d_date)
adddata(row)
print("\n\n\n\n")
n+=1
print(n)
|
import os
import dmenu
import pyimgur
def upload():
client_id = "8e98531fa1631f6"
PATH = "/tmp/screenshot.png"
im = pyimgur.Imgur(client_id)
uploaded_image = im.upload_image(PATH, title="Uploaded with PyImgur")
print(uploaded_image.link)
os.system("rm /tmp/screenshot.png")
def save_local():
save_name = dmenu.show([''], prompt='type the filename (widout extension)')
os.system("mv /tmp/screenshot.png /home/philip/pics/screenshots/" + save_name + ".png")
os.system("gnome-screenshot -a -f /tmp/screenshot.png 2> /dev/null")
if dmenu.show(['local', 'imgur']) == 'imgur':
try:
upload()
except:
if dmenu.show(['yes', 'no'], prompt='could not upload to upload to Imgur! save local?') == 'yes':
save_local()
else:
os.system("rm /tmp/screenshot.png")
exit()
else:
save_local()
|
__author__ = 'tyerq'
def schema():
return """
posts:
permalink == _id
author == nickname
posted
topic
text
comments == [
comment:
author == nickname
posted
text
...
]
tags == [
tag
...
]
users:
username == _id
passw
name
email
sessions:
token == _id
user
""" |
# coding:utf-8
# 面向对象编程
# 定义一个Student类,这个类拥有name和score两个属性Property。
class Student(object):
def __init__(self, name, score):
self.name = name
self.score = score
def print_score(self):
"""打印一个学生的成绩"""
print("%s: %s" % (self.name, self.score))
Tom = Student("Tom", 85)
Jim = Student("Jim", 100)
Tom.print_score()
Jim.print_score()
|
def introduce():
print("Hello, I'm Attila!")
def add(a, b):
return a + b
def joke():
print("LOL")
def shout():
print("KEK")
|
#
# @lc app=leetcode.cn id=102 lang=python3
#
# [102] 二叉树的层序遍历
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
# 34/34 cases passed (32 ms)
# Your runtime beats 90.1 % of python3 submissions
# Your memory usage beats 25.27 % of python3 submissions (15.4 MB)
if not root:
return []
res = []
queue = [root]
while queue:
# 存储当前层
res.append([node.val for node in queue])
# 下一层(所有左右节点) 待处理
tmp_queue = []
for node in queue:
if node.left:
tmp_queue.append(node.left)
if node.right:
tmp_queue.append(node.right)
queue = tmp_queue
return res
# @lc code=end
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 25 14:28:12 2018
@author: XuL
"""
import random
from pathlib import Path
import spacy
import en_core_web_md as spacyEn
TRAIN_DATA = [
('Choudries Inc. Super Seven Food Mart filed for Chapter 11 bankruptcy protection Bankr. M.D. Pa. Case No. 16-02475 on June 13, 2016.', {
'entities': [(0, 35, 'ORG')]
}),
('2747 Camelback, LLC Test, based in Dallas, Texas, filed a Chapter 11 petition Bankr. N.D. Tex. Case No. 16-31846 on May 4, 2016.', {
'entities': [(0, 23, 'ORG')]
})
]
n_iter = 100
"""Load the model, set up the pipeline and train the entity recognizer."""
nlp = spacyEn.load()
# create blank Language class
print("Created en_core_web_md model")
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe('ner')
# add labels
for _, annotations in TRAIN_DATA:
print(annotations)
for ent in annotations.get('entities'):
print(ent[2])
ner.add_label(ent[2])
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
optimizer = nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
for text, annotations in TRAIN_DATA:
nlp.update(
[text], # batch of texts
[annotations], # batch of annotations
drop=0.5, # dropout - make it harder to memorise data
sgd=optimizer, # callable to update weights
losses=losses)
print(losses)
# test the trained model
for text, _ in TRAIN_DATA:
doc = nlp(text)
print('Entities', [(ent.text, ent.label_) for ent in doc.ents])
print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])
#sentences = []
#sentences.append("")
#sentences.append("")
#sentences.append("")
#sentences.append("")
|
from django.db import models
class QuestionBank(models.Model):
bank_name = models.CharField(max_length=20, verbose_name='题库名')
question_num = models.IntegerField(verbose_name='题号')
question_name = models.CharField(max_length=300, verbose_name='题目')
@staticmethod
def get_question(bank_name, questions_num=None):
if not questions_num:
questions = QuestionBank.objects.filter(bank_name=bank_name).order_by('question_num')
else:
questions = QuestionBank.objects.filter(bank_name=bank_name, question_num__in=questions_num).order_by('question_num')
return questions
question_and_choice = []
for question in questions:
choices = question.choice_set.all().order_by('choice_type')
choice_detail = []
for choice in choices:
choice_detail.append((choice.choice_type, choice.choice_content))
question_and_choice.append(((question.question_num, question.question_name), choice_detail))
# question_and_choice = [((题号, 题目), [(选项号, 选项内容), (选项号, 选项内容)]), ((题号, 题目), [(选项号, 选项内容), (选项号, 选项内容)]), ...]
return question_and_choice
def __str__(self):
return '<QuestionBank: {}题目{} 题库名{}>'.format(self.question_num, self.question_name, self.bank_name)
class Choice(models.Model):
choice_type = models.CharField(max_length=5, verbose_name='选项类型')
choice_content = models.CharField(max_length=100, verbose_name='选项内容')
question = models.ForeignKey(QuestionBank, on_delete=models.CASCADE)
def __str__(self):
return '<id:{} Choice: {}{}>'.format(self.id, self.choice_type, self.choice_content)
class MBTIAnwserType(models.Model):
ANWSER_TYPE_ITEMS = (
('E', 'E'),
('I', 'I'),
('S', 'S'),
('N', 'N'),
('T', 'T'),
('F', 'F'),
('J', 'J'),
('P', 'P'),
)
choice = models.ForeignKey(Choice, on_delete=models.CASCADE)
anwser_type = models.CharField(max_length=5, choices=ANWSER_TYPE_ITEMS, verbose_name='答案类型')
class MBTIResult(models.Model):
RESULT_TYPE_ITEMS = (
('ISTJ', 'ISTJ'),
('ISFJ', 'ISFJ'),
('INFJ', 'INFJ'),
('INTJ', 'INTJ'),
('ISTP', 'ISTP'),
('ISFP', 'ISFP'),
('INFP', 'INFP'),
('INTP', 'INTP'),
('ESTP', 'ESTP'),
('ESFP', 'ESFP'),
('ENFP', 'ENFP'),
('ENTP', 'ENTP'),
('ESTJ', 'ESTJ'),
('ESFJ', 'ESFJ'),
('ENFJ', 'ENFJ'),
('ENTJ', 'ENTJ'),
)
result_type = models.CharField(max_length=5, choices=RESULT_TYPE_ITEMS, verbose_name='报告类型')
def __str__(self):
return '<id:{} result_type: {}>'.format(self.id, self.result_type)
class MBTIResultDetail(models.Model):
result_type = models.ForeignKey(MBTIResult, on_delete=models.CASCADE)
result_num = models.IntegerField(verbose_name='结果号')
result_content = models.CharField(max_length=100, verbose_name='结果内容')
class CareerResultType(models.Model):
type_name = models.CharField(max_length=2, verbose_name='结果类型')
type_title = models.CharField(max_length=100, verbose_name='结果标题')
type_content = models.CharField(max_length=500, verbose_name='结果内容')
@staticmethod
def get_career_result(type_names):
return CareerResultType.objects.filter(type_name__in=type_names)
class HollandData(models.Model):
part_num = models.IntegerField(verbose_name='号码')
part_title = models.CharField(max_length=30, verbose_name='标题')
@staticmethod
def get_holland_data(part_num):
return HollandData.objects.get(part_num=part_num)
def __str__(self):
return '<id:{} HollandData: {}>'.format(self.id, self.part_title)
class HollandDataItem(models.Model):
item_num = models.IntegerField(verbose_name='号码')
part_type = models.CharField(max_length=2, verbose_name='类型')
content = models.CharField(max_length=100, verbose_name='内容')
part = models.ForeignKey(HollandData, on_delete=models.CASCADE)
@staticmethod
def get_holland_data_item(part, part_type):
return HollandDataItem.objects.filter(part=part, part_type=part_type).order_by('item_num')
def __str__(self):
return '< id:{} item_num: {} part_type: {} >'.format(self.id, self.item_num, self.part_type)
class HollandTypeResult(models.Model):
result_type = models.CharField(max_length=2, verbose_name='类型')
result_title = models.CharField(max_length=30, verbose_name='标题')
result_detail = models.CharField(max_length=300, verbose_name='内容')
@staticmethod
def get_type_result(result_type):
return HollandTypeResult.objects.filter(result_type__in=result_type)
class NewHolland(models.Model):
title_num = models.IntegerField(verbose_name='号码')
title = models.CharField(max_length=50, verbose_name='标题')
@classmethod
def get_all_title(cls):
return cls.objects.all()
# @staticmethod
# def get_new_holland_list(num_list):
# return NewHolland.objects.filter(title_num__in=num_list)
def __str__(self):
return '< id:{} title_num: {} title: {} >'.format(self.id, self.title_num, self.title)
class NewHollandType(models.Model):
TYPE_ITEMS = (
('R', 'R'),
('I', 'I'),
('A', 'A'),
('S', 'S'),
('E', 'E'),
('C', 'C'),
)
item_type = models.CharField(max_length=1, choices=TYPE_ITEMS, verbose_name='类型')
item_name = models.CharField(max_length=5, verbose_name='类型名称')
personality_tendency = models.CharField(max_length=200, verbose_name='人格倾向')
typical_occupation = models.CharField(max_length=100, verbose_name='典型职业')
def __str__(self):
return '< id:{} item_type: {} item_name: {} >'.format(self.id, self.item_type, self.item_name)
class NewHollandTitleNumType(models.Model):
new_holland = models.ForeignKey(NewHolland, on_delete=models.CASCADE, verbose_name='题号信息')
new_holland_type = models.ForeignKey(NewHollandType, on_delete=models.CASCADE, verbose_name='类型信息')
score_condition = models.BooleanField(default=True, verbose_name='得分条件信息')
@staticmethod
def get_new_holland_title_num_type(num_list):
context = dict()
context['select_num'] = NewHolland.objects.filter(title_num__in=num_list)
context['not_select_num'] = NewHolland.objects.exclude(title_num__in=num_list)
return context
|
import os
import sys
sys.path.insert(0, 'tools/msa_edition')
import remove_empty_sequences
is_dna = True
def extract_msa(s, curs, max_curs, writer):
while (curs < max_curs and curs != 0):
end_species = s.find("\t", curs)
species = s[curs: end_species]
curs_seq = end_species + 1
end_seq = s.find("\n", curs_seq)
seq = s[curs_seq:end_seq]
curs = s.find("\t", end_seq)
curs = curs + 1
if (remove_empty_sequences.is_empty(seq, is_dna)):
continue
writer.write(">")
writer.write(species)
writer.write("\n")
writer.write(seq)
writer.write("\n")
def extract_msas(nexus, outputdir):
os.mkdir(outputdir)
s = open(nexus).read()
curs = 0
count = 0
while (True):
# search for family name
curs = s.find(":", curs)
if (curs == -1):
break
end_family = s.find("]", curs)
family = s[curs + 2 : end_family]
fasta = os.path.join(outputdir, family + ".fasta")
print(fasta)
curs = s.find("[", end_family)
if (curs == -1):
curs = len(s)
with open(fasta, "w") as writer:
extract_msa(s, end_family + 4, curs, writer)
count = count + 1
print(count)
if (__name__ == "__main__"):
if (len(sys.argv) < 3):
print("Syntax python " + os.path.basename(__file__) + " nexus outputdir")
sys.exit(1)
print("WARNING: the script assumes DNA data")
nexus = sys.argv[1]
outputdir = sys.argv[2]
extract_msas(nexus, outputdir)
print("WARNING: the script assumes DNA data")
|
nums = sorted(list(map(int,input().split())))
ans = 0
if (nums[1] - nums[0]) % 2 == 1:
nums[0] += 1
nums[2] += 1
ans += 1
while nums[0] != nums[1]:
nums[0] += 2
ans += 1
while nums[0] != nums[2]:
nums[0] += 1
nums[1] += 1
ans += 1
print(ans) |
"""
-- Refactor the dataloader for Squirrel
"""
import math
import random
import numpy as np
import torch
from torchtext.data.batch import Batch
# from squirrel.data.noise import merged_noisy_generator
class DistributedBatch(Batch):
def __init__(self,
data=None,
dataset=None,
device=None,
world_size=1,
local_rank=0,
train=True):
"""Create a Batch from a list of examples."""
self.message = ''
self.task = ''
self.preprocessed = None
self.weights = None
self.train = train
if data is not None:
big_batch_size = len(data)
mini_batch_size = int(math.floor(big_batch_size / world_size))
additional_size = int(big_batch_size -
mini_batch_size * world_size)
start_pos = local_rank if additional_size > local_rank \
else additional_size
start_pos = start_pos + local_rank * mini_batch_size
end_pos = (local_rank + 1) if additional_size > (
local_rank + 1) else additional_size
end_pos = end_pos + (local_rank + 1) * mini_batch_size
data = data[start_pos:end_pos]
self.batch_size = len(data)
self.dataset = dataset
self.fields = dataset.fields.keys() # copy field names
self.task = dataset.task
self.attributes = []
for (name, field) in dataset.fields.items():
if field is not None:
batch = [getattr(x, name) for x in data]
setattr(self, name + '_original', batch)
setattr(self, name, field.process(batch, device=device))
self.attributes += [name, name + '_original']
if hasattr(data[0], 'id'):
setattr(
self, 'id',
torch.tensor([getattr(x, 'id') for x in data],
dtype=torch.long,
device=device))
self.attributes += ['id']
def __str__(self):
if not self.__dict__:
return 'Empty {} instance'.format(torch.typename(self))
fields_to_index = filter(lambda field: field is not None,
self.attributes)
var_strs = '\n'.join([
'\t[.' + name + ']' + ":" + _short_str(getattr(self, name))
for name in fields_to_index if hasattr(self, name)
])
data_str = (' from {}'.format(self.dataset.name.upper())
if hasattr(self.dataset, 'name')
and isinstance(self.dataset.name, str) else '')
strt = '[{} of size {}{}]\n{}'.format(
torch.typename(self), self.batch_size, data_str, var_strs)
return '\n' + strt
def fetch_batch(data,
batch_size,
world_size=1,
reserve=False,
maxlen=None,
maxatt_size=None,
fields=['src', 'trg']):
# --- dynamic batching function (by default) --- #
def dynamic_batching(new, i, tokens, maxatt):
tokens = tokens + max([len(getattr(new, field)) for field in fields])
maxatt = maxatt / (i - 1) if i > 1 else 0
maxatt = max([len(getattr(new, field))**2
for field in fields] + [maxatt]) * i
return tokens, maxatt
if batch_size == 1: # speed-test: one sentence per batch.
batch_size_fn = lambda new, count, sofar, maxatt: count
else:
batch_size_fn = dynamic_batching
if maxatt_size is None:
maxatt_size = 1e10 # infinite
size_so_far = 0
maxatt_so_far = 0
minibatch = []
if reserve:
reserved_minibatch = []
for it, ex in enumerate(data):
# drop examples that has elements too long
if maxlen is not None:
if max([len(getattr(ex, field)) for field in fields]) > maxlen:
continue
if reserve and (it < world_size):
reserved_minibatch.append(ex)
continue
else:
minibatch.append(ex)
size_so_far, maxatt_so_far = batch_size_fn(ex, len(minibatch),
size_so_far, maxatt_so_far)
def check(a, ax, b, bx):
if ((a == ax) and (b <= bx)):
return 0
if ((b == bx) and (a <= ax)):
return 0
if ((a > ax) or (b > bx)):
return 1
return -1
status = check(size_so_far, batch_size * world_size,
np.ceil(maxatt_so_far / world_size), maxatt_size)
if (status == 0) and (
len(minibatch) > world_size
): # make sure there is no empty batches coming out during testing.
# print(maxatt_so_far, np.ceil(maxatt_so_far / world_size))
yield minibatch
minibatch, size_so_far, maxatt_so_far = [], 0, 0
elif (status == 1) and (
len(minibatch) > (world_size + 1)
): # make sure there is no empty batches coming out during testing.
# print(maxatt_so_far, np.ceil(maxatt_so_far / world_size))
yield minibatch[:-1]
minibatch = minibatch[-1:]
size_so_far, maxatt_so_far = batch_size_fn(ex, 1, 0, 0)
if reserve:
minibatch += reserved_minibatch # make sure there is no empty batches
yield minibatch
def fetch_pool(data,
batch_size,
key,
random_shuffler=None,
world_size=1,
maxlen=None,
maxatt_size=None,
fields=None):
"""Sort within buckets, then batch, then shuffle batches.
Partitions data into chunks of size 100*batch_size, sorts examples within
each chunk using sort_key, then batch these examples and shuffle the
batches.
"""
if random_shuffler is None:
random_shuffler = random.shuffle
for p in fetch_batch(
data, batch_size * 100, maxlen=maxlen, maxatt_size=None,
fields=fields): # pre-read 100 batches and sort.
p_batch = fetch_batch(
sorted(p, key=key),
batch_size,
world_size,
True,
maxlen=maxlen,
maxatt_size=maxatt_size,
fields=fields)
for b in random_shuffler(list(p_batch)):
yield b
def split_batch(batch, N): # split a batch into N parts.
if N == 1:
yield batch
else:
backup_batch = batch
big_batch_size = backup_batch.batch_size
mini_batch_size = int(math.floor(big_batch_size / N))
additional_size = int(big_batch_size - mini_batch_size * N)
batches = []
for k in range(N):
batch = DistributedBatch()
batch.fields = backup_batch.fields
batch.attributes = backup_batch.attributes
start_pos = k if additional_size > k else additional_size
start_pos = start_pos + k * mini_batch_size
end_pos = (k + 1) if additional_size > (k + 1) else additional_size
end_pos = end_pos + (k + 1) * mini_batch_size
if start_pos >= end_pos:
continue
batch.batch_size = end_pos - start_pos
if backup_batch.preprocessed is not None:
batch.preprocessed = []
for u in range(len(backup_batch.preprocessed)):
batch.preprocessed.append(
backup_batch.preprocessed[u][start_pos:end_pos])
if backup_batch.weights is not None:
batch.weights = backup_batch.weights[start_pos:end_pos]
for field in backup_batch.attributes:
setattr(batch, field,
getattr(backup_batch, field)[start_pos:end_pos])
batches.append(batch)
for batch in batches:
yield batch
def merge_batches(batches): # merge batches into a big batch
if len(batches) == 1:
return batches[0]
else:
batch = DistributedBatch()
for field in batches[-1].fields:
for backup_batch in batches:
assert field in backup_batch.fields, "the same fields"
batch.fields = batches[-1].fields
batch.attributes = batches[-1].attributes
for field in batches[-1].attributes:
if isinstance(getattr(batches[-1], field), list):
setattr(batch, field, [
t for backup_batch in batches
for t in getattr(backup_batch, field)
])
elif field == 'id':
setattr(
batch, 'id',
torch.cat([backup_batch.id for backup_batch in batches]))
else:
max_len = max([
getattr(backup_batch, field).size(1)
for backup_batch in batches
])
setattr(
batch, field,
torch.cat([
backup_batch.dataset.fields[field].extend_padding(
getattr(backup_batch, field), max_len)
for backup_batch in batches
], 0))
batch.batch_size = sum(
[backup_batch.batch_size for backup_batch in batches])
batch.task = '/'.join([backup_batch.task for backup_batch in batches])
return batch
def _short_str(tensor):
# unwrap variable to tensor
if not torch.is_tensor(tensor):
# (1) unpack variable
if hasattr(tensor, 'data'):
tensor = getattr(tensor, 'data')
# (2) handle include_lengths
elif isinstance(tensor, tuple):
return str(tuple(_short_str(t) for t in tensor))
# (3) fallback to default str
elif isinstance(tensor, list):
return str('list of size {}'.format(len(tensor)))
else:
return str(tensor)
# copied from torch _tensor_str
size_str = 'x'.join(str(size) for size in tensor.size())
device_str = '' if not tensor.is_cuda else \
' (GPU {})'.format(tensor.get_device())
strt = '[{} of size {}{}]'.format(
torch.typename(tensor), size_str, device_str)
return strt
|
# Generated by Django 2.2 on 2020-10-23 18:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('U', 'Undefined')], default='U', max_length=1)),
('age', models.IntegerField(default=0)),
('loyality', models.CharField(choices=[('PLATINUM', 'PLATINUM CARD'), ('GOLD', 'GOLDEN CARD'), ('SILVER', 'SILVER CARD')], default='SILVER', max_length=8)),
],
),
]
|
from ixnetwork_restpy import SessionAssistant
from ixnetwork_restpy import TestPlatform
import time
import pprint
#sw-ixia3.insieme.local is 172.31.194.141
session_assistant = SessionAssistant(IpAddress='172.31.194.141',
LogLevel=SessionAssistant.LOGLEVEL_INFO,
ClearConfig=True)
ixnetwork = session_assistant.Ixnetwork
test_platform = session_assistant.TestPlatform
test_platform.Trace = TestPlatform.TRACE_INFO
# create tx and rx port resources
port_map = session_assistant.PortMapAssistant()
port_map.Map('172.21.86.100', 9, 2, Name='Rx')
port_map.Map('172.21.86.100', 9, 14, Name='Tx')
#port_map.Map('172.21.86.100', 9, 6, Name='SupRx') keep adding more ports in Rx if needed for capturing
eth = test_platform.Sessions.find().Ixnetwork.Vport.find(Name='^Tx').L1Config.Ethernet
eth.Media = 'fiber'
eth = test_platform.Sessions.find().Ixnetwork.Vport.find(Name='^Rx').L1Config.Ethernet
eth.Media = 'fiber'
# create a TrafficItem resource
#create topology
eth_s = ixnetwork \
.Topology.add(Vports=ixnetwork.Vport.find(Name='^Tx')) \
.DeviceGroup.add(Multiplier='1') \
.Ethernet.add()
eth_s.EnableVlans.Single(True)
eth_s.Ipv4.add().Address.Single('23.1.1.2')
eth_s.Vlan.find().VlanId.Single('351')
eth_d = ixnetwork \
.Topology.add(Vports=ixnetwork.Vport.find(Name='^Rx')) \
.DeviceGroup.add(Multiplier='1') \
.Ethernet.add()
eth_d.EnableVlans.Single(True)
eth_d.Ipv4.add().Address.Single('23.1.1.3')
eth_d.Vlan.find().VlanId.Single('351')
# create template
vlanT = ixnetwork.Traffic.ProtocolTemplate.find(StackTypeId='^vlan$')
arpT = ixnetwork.Traffic.ProtocolTemplate.find(StackTypeId='^ethernetARP$')
# create arp stream
traffic_item_arp = ixnetwork.Traffic.TrafficItem.add(Name='pyats-copp-arp', TrafficType='raw')
traffic_item_arp.EndpointSet.add(
Sources=ixnetwork.Vport.find(Name='^Tx').Protocols.find(),
Destinations=ixnetwork.Vport.find(Name='^Rx').Protocols.find())
traffic_config = traffic_item_arp.ConfigElement.find()
traffic_config.FrameRate.update(Type='framesPerSecond', Rate='1')
traffic_config.FrameSize.update(FixedSize='1024')
traffic_config.TransmissionControl.update(Type='continuous')#, BurstPacketCount='100', InterBurstGapUnits='nanoseconds', RepeatBurst='1000')
# create stack
ethernet_stack = traffic_config.Stack.find(StackTypeId='^ethernet$')
vlan_stack = traffic_config.Stack.read(ethernet_stack.AppendProtocol(vlanT))
arp_stack = traffic_config.Stack.read(vlan_stack.AppendProtocol(arpT))
# adjust stack fields
destination_mac = ethernet_stack.Field.find(FieldTypeId='ethernet.header.destinationAddress')
destination_mac.update(ValueType='singleValue', SingleValue='00:22:BD:F8:19:FF', TrackingEnabled=True)
source_mac = ethernet_stack.Field.find(FieldTypeId='ethernet.header.sourceAddress')
source_mac.update(ValueType='singleValue', SingleValue='00:00:00:00:00:82')
vlan_stack.Field.find(FieldTypeId='vlan.header.vlanTag.vlanID').update(ValueType='singleValue', SingleValue='351', TrackingEnabled=True)
arp_stack.Field.find(FieldTypeId='ethernetARP.header.protocolType').update(ValueType='singleValue', SingleValue="0x0800")
arp_stack.Field.find(FieldTypeId='ethernetARP.header.srcHardwareAddress').update(ValueType='singleValue', SingleValue='00:00:00:00:00:82')
arp_stack.Field.find(FieldTypeId='ethernetARP.header.dstHardwareAddress').update(ValueType='singleValue', SingleValue='00:00:00:00:00:00')
arp_stack.Field.find(FieldTypeId='ethernetARP.header.srcIP').update(ValueType='singleValue', SingleValue='23.1.1.10')
arp_stack.Field.find(FieldTypeId='ethernetARP.header.dstIP').update(ValueType='singleValue', SingleValue='23.1.1.11')
arp_stack.Field.find(FieldTypeId='ethernetARP.header.opCode').update(ValueType='singleValue', SingleValue='1')
# connect ports to hardware test ports
# apply traffic to hardware
# start traffic
# push ConfigElement settings down to HighLevelStream resources
traffic_item_arp.Generate()
port_map.Connect(ForceOwnership=True)
ixnetwork.Traffic.Apply()
ixnetwork.Traffic.StartStatelessTrafficBlocking()
print("+++++++Waiting for 300 seconds ...")
time.sleep(300)
# print statistics
output = session_assistant.StatViewAssistant('Flow Statistics')
print(output)
# stop traffic
#ixnetwork.Traffic.StopStatelessTrafficBlocking()
# disable the traffic item
#traffic_item_arp.Enabled = "False"
print("+++++++DONE")
######################################
|
#!/usr/bin/env python3
import argparse
import serial
import time
from time import sleep
import datetime
from tqdm import tqdm
import os
parser = argparse.ArgumentParser()
parser.add_argument('port')
parser.add_argument('--clear', type=int, default=1)
args = parser.parse_args()
class Controller:
def __init__(self, port):
super().__init__()
self.today = 1
self.ser = serial.Serial(port, 9600)
def send(self, msg, duration=0):
now = datetime.datetime.now()
# print(f'[{now}] {msg}')
self.ser.write(f'{msg}\r\n'.encode('utf-8'))
sleep(duration)
self.ser.write(b'RELEASE\r\n')
def close(self):
self.ser.close()
def leap(self):
self.send("Button HOME", 0.1) # Home
sleep(0.5)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("Button A", 0.1) # 設定
sleep(0.05)
self.send("LY MAX", 2.4)
self.send("Button A", 0.05) # 本体設定
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("Button A", 0.05) # 日付と時刻選択
sleep(0.2)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
count = 0 # count initialize
self.send("Button A", 0.06) # 現在の日付と時刻
sleep(0.1)
self.send("Button A", 0.06)
sleep(0.05)
self.send("Button A", 0.06)
sleep(0.05)
self.send("LY MIN", 0.06) # 日付を1日進める
sleep(0.1)
self.send("Button A", 0.06)
sleep(0.05)
self.send("Button A", 0.06)
sleep(0.05)
self.send("Button A", 0.06)
sleep(0.05)
self.send("Button A", 0.06) # 日付OK
sleep(0.11)
self.send("Button HOME", 0.1)
sleep(1)
self.send("Button A", 0.05)
sleep(0.5)
def oneDay(self, check=True):
self.send("Button A", 0.1) # みんなで挑戦
sleep(3)
self.leap()
self.send("Button B", 0.1) # やめる
sleep(1)
self.send("Button A", 0.1) # はい
sleep(4.5)
if check:
self.send("Button A", 0.1) # 巣にはなしかける
sleep(1)
self.send("Button A", 0.1) # あふれでている
sleep(0.9)
self.send("Button A", 0.1) # 2000W手に入れた
sleep(1)
def nDays(self, n):
self.send("Button A", 0.1) # ゲーム起動
sleep(15)
self.send("Button A", 0.1) # タイトルA
sleep(9)
self.send("Button A", 0.1) # 巣にはなしかける
sleep(1)
self.send("Button A", 0.1) # あふれでている
sleep(1)
self.send("Button A", 0.1) # 2000W手に入れた
sleep(1)
for i in tqdm(range(n)):
self.oneDay()
def finish(self, n):
self.send("Button HOME", 0.1)
sleep(1)
self.send("Button X", 0.1)
sleep(0.8)
self.send("Button A", 0.1)
sleep(3)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 設定
sleep(0.1)
self.send("LY MAX", 2.5)
self.send("Button A", 0.1) # 本体設定
sleep(0.1)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 日付と時刻選択
sleep(0.2)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
count = 0 # count initialize
self.send("Button A", 0.1) # 現在の日付と時刻
sleep(0.1)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
for i in range(n):
self.send("LY MAX", 0.1)
sleep(0.1)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 日付OK
sleep(0.1)
self.send("Button HOME", 0.1)
sleep(0.8)
def advance1day(self):
self.send("Button A", 0.1) # ゲーム起動
sleep(15)
self.send("Button A", 0.1) # タイトルA
sleep(8)
self.send("Button A", 0.1) # 巣にはなしかける
sleep(1)
self.send("Button A", 0.1) # あふれでている
sleep(0.7)
self.send("Button A", 0.1) # 2000W手に入れた
sleep(0.7)
self.send("Button A", 0.1) # みんなで挑戦
sleep(3)
self.leap()
self.send("Button B", 0.1) # やめる
sleep(1)
self.send("Button A", 0.1) # はい
sleep(5)
self.send("Button X", 0.1) # メニュー開く
sleep(1.2)
self.send("Button R", 0.1) # セーブ
sleep(1.5)
self.send("Button A", 0.1) # セーブする
sleep(4)
self.finish(1)
def fixseed(self):
leepday = self.today - 4
print("Today is day {}. Finish.".format(self.today))
self.finish(self.today-1)
self.today = 1
print("Today is day {}. Leep {} days.".format(self.today, leepday))
self.send("Button A", 0.1) # ゲーム起動
sleep(15)
self.send("Button A", 0.1) # タイトルA
sleep(9)
self.send("Button A", 0.1) # 巣にはなしかける
sleep(1)
self.send("Button A", 0.1) # あふれでている
sleep(1)
self.send("Button A", 0.1) # 2000W手に入れた
sleep(1)
for i in tqdm(range(leepday)):
if i != leepday - 1:
self.oneDay(True)
self.today += 1
else:
self.oneDay(False)
self.today += 1
print("Today is day {}. Save.".format(self.today))
self.send("Button X", 0.1) # メニュー開く
sleep(1.2)
self.send("Button R", 0.1) # セーブ
sleep(1.5)
self.send("Button A", 0.1) # セーブする
sleep(4)
print("Today is day {}. Finish.".format(self.today))
self.finish(self.today-1)
self.today = 1
print("Today is day {}.".format(self.today))
def infinityFruits(self, n):
for i in range(n):
self.send("Button A", 0.1) # 木に話しかける
sleep(1)
self.send("Button A", 0.1) # きのみが取れる木だ
sleep(1)
self.send("Button A", 0.1) # はい
sleep(5)
self.send("Button A", 0.1) # 落ちてきた
sleep(1)
self.send("LY MAX", 0.1) # やめる
sleep(0.5)
self.send("Button A", 0.1) # 決定
sleep(1)
self.send("Button A", 0.1) # ひろいあげた
sleep(1)
self.send("Button A", 0.1) # 手に入れた
sleep(1)
self.fastTimeLeap(1)
def fastOneDay(self):
self.send("Button A", 0.1) # 現在の日付と時刻
sleep(0.1)
self.send("LX MIN", 0.05)
sleep(0.03)
self.send("LX MIN", 0.05)
sleep(0.03)
self.send("LX MIN", 0.05)
sleep(0.03)
self.send("LY MIN", 0.1) # 日付を1日進める
sleep(0.1)
self.send("Button A", 0.05)
sleep(0.03)
self.send("Button A", 0.05)
sleep(0.03)
self.send("Button A", 0.05)
sleep(0.03)
self.send("Button A", 0.1) # 日付OK
sleep(0.1)
def fastTimeLeap(self, n):
bar = tqdm(total=n)
self.send("Button B", 0.1)
self.send("Button B", 0.1)
self.send("Button B", 0.1) # Initialize
self.send("Button HOME", 0.1) # Home
sleep(0.5)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 設定
sleep(0.1)
self.send("LY MAX", 2.5)
self.send("Button A", 0.1) # 本体設定
sleep(0.1)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 日付と時刻選択
sleep(0.2)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
count = 0 # count initialize
self.send("Button A", 0.1) # 現在の日付と時刻
sleep(0.1)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("LY MIN", 0.1) # 日付を1日進める
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 日付OK
sleep(0.05)
count += 1
bar.update(1)
while True:
if count == n:
break
if count % 30 == 0:
self.fastOneDay() # 31日になったら1日進めて日付を1日に戻す
self.fastOneDay()
count += 1
bar.update(1)
self.send("Button HOME", 0.1)
sleep(1)
self.send("Button A", 0.1)
sleep(0.5)
def fastTimeLeap4S(self, n, ret=8000):
bar = tqdm(total=n)
self.send("Button B", 0.1)
self.send("Button B", 0.1)
self.send("Button B", 0.1) # Initialize
self.send("Button HOME", 0.1) # Home
sleep(0.5)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 設定
sleep(0.1)
self.send("LY MAX", 2.5)
self.send("Button A", 0.1) # 本体設定
sleep(0.1)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 日付と時刻選択
sleep(0.2)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
count = 0 # count initialize
self.send("Button A", 0.1) # 現在の日付と時刻
sleep(0.1)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("LY MIN", 0.1) # 日付を1日進める
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 日付OK
sleep(0.05)
count += 1
bar.update(1)
while True:
if count == n:
break
if count % 30 == 0:
self.fastOneDay() # 31日になったら1日進めて日付を1日に戻す
if count % ret == 0:
self.send("Button HOME", 0.1)
sleep(1)
self.send("Button A", 0.1)
sleep(2)
self.send("Button HOME", 0.1) # Home
sleep(0.5)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("LX MAX", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 設定
sleep(0.1)
self.send("LY MAX", 2.5)
self.send("Button A", 0.1) # 本体設定
sleep(0.1)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 日付と時刻選択
sleep(0.2)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("LY MAX", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 現在の日付と時刻
sleep(0.1)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.05)
self.send("Button A", 0.1) # 日付OK
sleep(0.05)
self.fastOneDay()
count += 1
bar.update(1)
self.send("Button HOME", 0.1)
sleep(1)
self.send("Button A", 0.1)
sleep(0.5)
def pokejob(self, jobtime=None):
self.send("Button A", 0.1) # 一番上選択
sleep(1)
self.send("Button A", 0.1) # 受けますか? - はい
sleep(1.5)
self.send("Button A", 0.1) # ボックスに移動します
sleep(1.6)
self.send("Button Y", 0.1) # 操作切り替え
sleep(0.3)
if jobtime is None: # 経験値
c = ["LX MAX", "LX MIN", "LX MAX", "LX MIN"]
for i in range(4):
for j in range(5):
self.send("Button A", 0.05)
sleep(0.05)
self.send(c[i], 0.05)
sleep(0.05)
self.send("Button A", 0.1)
sleep(0.1)
self.send("LY MAX", 0.1)
sleep(0.1)
else: # 努力値
self.send("LY MAX", 0.1)
sleep(0.1)
self.send("LY MAX", 0.1)
sleep(0.1)
self.send("LY MAX", 0.1)
sleep(0.1)
self.send("Button A", 0.1)
sleep(0.1)
self.send("Button B", 0.1) # おくりだす
sleep(1.8)
self.send("Button A", 0.1) # はい
sleep(3)
self.send("Button A", 0.1) # おくりだす
sleep(1)
if jobtime is not None:
for i in range(jobtime):
self.send("LY MAX", 0.1)
sleep(0.5)
self.send("Button A", 0.1) # 一日
sleep(10)
self.send("Button A", 0.1) # 向かいました
sleep(0.8)
self.send("Button A", 0.1) # がんばってきてね
sleep(0.5)
# 1日+1分時渡り
self.send("Button HOME", 0.1) # Home
sleep(0.5)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("Button A", 0.1) # 設定
sleep(0.05)
self.send("LY MAX", 2.4)
self.send("Button A", 0.05) # 本体設定
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("Button A", 0.05) # 日付と時刻選択
sleep(0.2)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
count = 0 # count initialize
self.send("Button A", 0.06) # 現在の日付と時刻
sleep(0.1)
self.send("Button A", 0.06)
sleep(0.05)
self.send("Button A", 0.06)
sleep(0.05)
self.send("LY MIN", 0.06) # 日付を1日進める
sleep(0.1)
self.send("Button A", 0.06)
sleep(0.05)
self.send("Button A", 0.06)
sleep(0.05)
self.send("LY MIN", 0.06) # 分を1分進める
sleep(0.1)
self.send("Button A", 0.06)
sleep(0.05)
self.send("Button A", 0.08) # 日付OK
sleep(0.2)
self.send("Button HOME", 0.05)
sleep(1)
self.send("Button A", 0.05)
sleep(1)
self.send("Button A", 0.1)
sleep(14)
self.send("Button A", 0.1) # 帰ってきました
sleep(1)
if jobtime is None:
self.send("Button A", 0.1) # とても活躍した
sleep(2)
else:
sleep(1.5)
self.send("Button A", 0.1) # 経験値もらった
sleep(3)
self.send("Button A", 0.1) # なんとかお手伝いを
sleep(0.8)
self.send("Button A", 0.1) # やりとげたようです
sleep(1)
self.send("Button A", 0.1) # 次はもっと
sleep(1)
self.send("Button A", 0.1) # わたしたいものが
sleep(1)
self.send("Button A", 0.1) # お礼として
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button A", 0.1) # またの
sleep(1)
self.send("Button A", 0.1) # ロトミに話しかける
sleep(1)
self.send("Button A", 0.1)
sleep(1)
self.send("LY MAX", 0.1)
sleep(0.1)
self.send("LY MAX", 0.1)
sleep(0.1)
self.send("Button A", 0.1) # ポケジョブ選択
sleep(4)
self.send("Button A", 0.1) # 新しいお手伝い
sleep(0.5)
pass
def nojob(self):
self. send("Button B", 0.1)
sleep(1)
self. send("Button B", 0.1)
sleep(1)
self.fastTimeLeap(1)
sleep(2)
self.send("Button A", 0.1) # ロトミに話しかける
sleep(2)
self.send("Button A", 0.1)
sleep(2)
self.send("LY MAX", 0.1)
sleep(0.1)
self.send("LY MAX", 0.1)
sleep(0.1)
self.send("Button A", 0.1) # ポケジョブ選択
sleep(4)
self.send("Button A", 0.1) # 新しいお手伝い
sleep(0.5)
def effort(self, V, n):
vposition = {"S":1, "D": 2, "C":3, "B":4, "A":5, "H":6}
move = vposition[V.upper()]
n2time = [96, 48, 32, 16, 12, 8, 4]
timeList = [0, 0, 0, 0, 0, 0, 0]
if n >= 252:
timeList[0] = 3
else:
for i, d in enumerate(n2time):
timeList[i] = int(n / d)
n = n - timeList[i] * n2time[i]
if n == 0:
break
for i in range(len(timeList)):
print("{}:{} ".format(n2time[i], timeList[i]), end="")
print("")
sleep(1)
bar = tqdm(total=n)
for i, j in enumerate(timeList):
for k in range(j):
for m in range(move):
self.send("LY MIN", 0.1)
sleep(0.5)
self.pokejob(jobtime=i)
bar.update(n2time[i])
bar.close()
def autoRotomi(self):
self.send("Button A", 0.1) #ロトミ起動
sleep(0.8)
self.send("Button B", 0.1)
sleep(0.5)
self.send("LY MAX", 0.1)
sleep(0.5)
self.send("Button A", 0.1)
sleep(0.6)
self.send("Button A", 0.1)
sleep(1)
self.send("Button A", 0.1)
sleep(1)
self.send("Button A", 0.1)
sleep(1)
self.send("Button A", 0.1)
sleep(1)
self.send("Button A", 0.1)
sleep(1)
self.send("Button A", 0.1)
sleep(0.8)
self.send("Button A", 0.1)
sleep(2.2)
#レポート
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1.5)
#待機
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(1)
self.send("Button B", 0.1)
sleep(0.7)
#ロトミ閉じる
self.send("Button HOME", 0.1) # Home
sleep(0.5)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("LX MAX", 0.05)
sleep(0.05)
self.send("Button A", 0.1) # 設定
sleep(0.05)
self.send("LY MAX", 2.4)
self.send("Button A", 0.05) # 本体設定
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("Button A", 0.05) # 日付と時刻選択
sleep(0.2)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("LY MAX", 0.05)
sleep(0.05)
self.send("Button A", 0.06) # 現在の日付と時刻
sleep(0.1)
self.send("LY MAX", 0.1) #年号1つもどす
sleep(0.1)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.1) #日付OK
sleep(0.1)
self.send("Button A", 0.05)
sleep(0.05)
self.send("LX MIN", 0.05)
sleep(0.05)
self.send("LX MIN", 0.05)
sleep(0.05)
self.send("LX MIN", 0.05)
sleep(0.05)
self.send("LX MIN", 0.05)
sleep(0.05)
self.send("LX MIN", 0.05)
sleep(0.05)
self.send("LX MIN", 0.05)
sleep(0.05)
self.send("LY MIN", 0.1) #年号1つすすめる
sleep(0.1)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.05)
sleep(0.05)
self.send("Button A", 0.1) #日付OK
sleep(0.5)
self.send("Button HOME", 0.1) #ゲームに戻る
sleep(1.0)
self.send("Button HOME", 0.1)
sleep(2.0)
def help(self):
with open("./readme.txt", "r") as f:
print(f.read())
def action(self, command):
command = command.upper()
if command.split()[0] == "LEAP":
print("Today is Day {}. Leap {} days.".format(self.today, int(command.split()[1])))
if self.today == 1:
self.nDays(int(command.split()[1]))
else:
for i in range(int(command.split()[1])):
self.oneDay()
self.today += int(command.split()[1])
print("Finish. Today is Day {}. ".format(self.today))
elif command.split()[0] == "RESET":
print("Today is Day {}. Reset.".format(self.today))
self.finish(self.today-1)
self.today = 1
print("Finish. Today is Day {}. ".format(self.today))
elif command.split()[0] == "AGAIN":
print("Today is Day {}. Reset.".format(self.today))
self.finish(self.today-1)
self.today = 1
print("Finish. Leap {} days.".format(int(command.split()[1])))
self.nDays(int(command.split()[1]))
self.today += int(command.split()[1])
print("Finish. Today is Day {}. ".format(self.today))
elif command.split()[0] == "FASTTIMELEAP" or command.split()[0] == "FTL":
if len(command.split()) == 3:
check = bool(command.split()[2])
else:
check = False
if not check:
if input("Have you completed a Rank battle? (yes or no)").upper() != "YES":
print("Please do a Rank battle.")
return
if not check:
if input("Did you move into the building? (yes or no)").upper() != "YES":
print("Please move into the building")
return
print("Fast TimeLeap for {} days !!".format(int(command.split()[1])))
self.fastTimeLeap(int(command.split()[1]))
elif command.split()[0] == "FASTTIMELEAP4S" or command.split()[0] == "FTL4S":
if len(command.split()) == 3:
check = bool(command.split()[2])
else:
check = False
if not check:
if input("Have you completed a Rank battle? (yes or no)").upper() != "YES":
print("Please do a Rank battle.")
return
if not check:
if input("Did you move into the building? (yes or no)").upper() != "YES":
print("Please move into the building")
return
print("Fast TimeLeap for {} days !!".format(int(command.split()[1])))
self.fastTimeLeap4S(int(command.split()[1]), ret=8000)
elif command.split()[0] == "ADVANCE":
print("Today is Day {}. Reset.".format(self.today))
self.finish(self.today-1)
self.today = 1
print("Finish. Advance 1 day.")
self.advance1day()
elif command.split()[0] == "SETTODAY":
self.today = int(command.split()[1])
print("Today is Day {}. ".format(self.today))
elif command.split()[0] == "POKEJOB":
self.pokejob()
elif command.split()[0] == "NOJOB":
self.nojob()
elif command.split()[0] == "EFFORT":
self.effort(command.split()[1], int(command.split()[2]))
elif command.split()[0] == "WAIT":
sleep(float(command.split()[1]))
elif command == "RESETTODAY":
self.today = 1
elif command == "FIXSEED":
print("Today is Day {}. Reset.".format(self.today))
self.fixseed()
print("Today is Day {}. ".format(self.today))
elif command.split()[0] == "ROTOMI":
n = input("How long ? (Type \"number\" or \"inf\")")
if n == "inf":
while True:
self.autoRotomi()
else:
for i in tqdm(range(int(n))):
self.autoRotomi()
elif command == "HELP":
with open("./readme.txt", "r") as f:
print(f.read())
elif command.split()[0] == "HOLD":
c = command.split()[1]
t = float(command.split()[2])
if c == "UP" or c == "U":
self.send("LY MIN", t)
elif c == "DOWN" or c == "J":
self.send("LY MAX", t)
elif c == "RIGHT" or c == "K":
self.send("LX MAX", t)
elif c == "LEFT" or c == "H":
self.send("LX MIN", t)
elif command == "UP" or command == "U":
self.send("LY MIN", 0.1)
elif command == "DOWN" or command == "J":
self.send("LY MAX", 0.1)
elif command == "RIGHT" or command == "K":
self.send("LX MAX", 0.1)
elif command == "LEFT" or command == "H":
self.send("LX MIN", 0.1)
elif command == "EXIT":
self.send('RELEASE')
self.ser.close()
else:
self.send("Button {}".format(command), 0.1)
self.today = self.today % 31
if self.today == 0:
self.today = 31
sleep(0.1)
def main():
c = Controller(args.port)
try:
c.send("Button B", 0.1)
memory = None
memory2 = None
prevCommand = None
while True:
command = input("Command: ").upper()
if args.clear is 1:
os.system('clear')
print("\n"*30)
os.system('clear')
print("Command: {}".format(command))
if command == "":
continue
if command.split()[0] == "READ":
with open("./scriptTxt/{}".format(command.split()[1]), "r") as f:
command = f.readline()
while command:
print(command[:-1])
c.action(command[:-1])
command = f.readline()
print("finish TXT command !")
elif command.split()[0] == "RECORD":
commands = ""
command = input("Recording Command: ").upper()
t = time.time()
while command.split()[0] != "STOP":
c.action(command)
commands += command + "\n"
command = input("Recording Command: ").upper()
commands += "WAIT {:.2f}\n".format(time.time() - t)
t = time.time()
filename = input("Finish Recording !\nWhat is the file name?: ")
with open("./scriptTXT/{}".format(filename), "w") as f:
f.write(commands)
print("Export commands!")
elif command == "MEMORY":
memory = input("Memory Command: ")
elif command == "MEMORY2":
memory2 = input("Memory2 Command: ")
elif command == "M":
print("Memory Command is \"{}\"".format(memory))
c.action(memory)
elif command == "N":
print("Memory2 Command is \"{}\"".format(memory2))
c.action(memory2)
elif command == "S":
c.action(prevCommand)
else:
c.action(command)
prevCommand = command
except KeyboardInterrupt:
c.send('RELEASE')
c.close()
if __name__ == "__main__":
main()
|
from traceback import format_exc
import click
from lib import Validator, RuleDoesNotExist, RuleCannotBeParsed, FileNewLineError
@click.group(invoke_without_command=True)
@click.option('--rules', type=click.Path(exists=True), default='rules.json')
@click.option('--rules-string', type=click.STRING)
@click.argument('csv', type=click.File('r'))
@click.argument('rule_name', type=click.STRING, required=False)
@click.pass_context
def run(context, csv, rule_name, rules, rules_string):
'''csvcheck accepts a path to a csv file and will validate it against rules'''
if rules_string:
rules = rules_string
context.obj = {
'file': csv,
'validator': get_validator(csv, rules, rule_name)
}
if not context.invoked_subcommand:
errors = pretty_errors(context.obj['validator'].errors,
context.obj['validator'].csv_data)
if not errors:
click.echo('there are no errors in {}'.format(csv.name))
else:
print_csv_errors(errors)
@run.command()
@click.option('-l', '--line-endings/--no-line-endings', default=False)
@click.option('-e', '--remove-errors/--no-remove-errors', default=False)
@click.pass_context
def fix(context, line_endings, remove_errors):
handle_fix(line_endings, remove_errors)
@click.pass_context
def handle_fix(context, line_endings, remove_errors):
fixed_file_name = ''
if line_endings:
fixed_file_name = fix_line_endings(context.obj['file'])
elif remove_errors:
csv_data = context.obj['validator'].csv_data
errors = context.obj['validator'].errors
err_explaination = '''CONTENTS OF LINE: {line}
INVALID CHARACTERS: {invalid}'''
initial_msg = '''
There are {count} errors in {file_name}.
The first error is:
{first_err}
'''.format(
count=len(errors),
file_name=context.obj['file'].name,
first_err=err_explaination.format(
line=csv_data[errors[0]['line_num']],
invalid=errors[0]['errors']))
strategy_prompt = '''Choose a strategy:
delete - remove the line completely
fix - remove the invalid characters, but keep the valid ones
skip - skip this line'''
invalid_input_msg = '"{}" is not a valid strategy. Please choose from above.'
strategy = None
block_on_each_iteration = True
REMOVE_LINE = 1
STRIP_ERRORS = 2
SKIP_LINE = 3
allowed_inputs = {
'd': REMOVE_LINE,
'delete': REMOVE_LINE,
'f': STRIP_ERRORS,
'fix': STRIP_ERRORS,
's': SKIP_LINE,
'skip': SKIP_LINE
}
click.echo(initial_msg)
for index, err in enumerate(errors):
line_contents = csv_data[err['line_num']]
if block_on_each_iteration:
if index != 0:
click.echo(err_explaination.format(line=line_contents, invalid=err['errors']))
click.echo(strategy_prompt)
strategy = get_user_input(
allowed_map=allowed_inputs, invalid_msg=invalid_input_msg)
if index == 0:
block_on_each_iteration = not click.confirm(
'Do you want to do this for the rest of the errors?')
if strategy == STRIP_ERRORS:
csv_data[err['line_num']] = fix_line(
line_contents, err, strip=True)
elif strategy == REMOVE_LINE:
csv_data[err['line_num']] = fix_line(line_contents, err)
click.echo('--------------------')
fixed_file_name = 'fixed-{}'.format(context.obj['file'].name)
with open(fixed_file_name, 'w') as f:
new_csv_data = ''.join('{}\n'.format(l) for l in csv_data
if len(l) > 0)
f.write(new_csv_data)
click.echo('{} saved to current directory'.format(fixed_file_name))
def fix_line(line_contents, error, strip=False):
'''returns the line without invalid characters or an empty string if strategy == "line"
'''
result = ''
if strip:
errors = error['errors']
result = ''.join(c for c in line_contents if c not in errors)
return result
def get_validator(csv, rules, rule_name):
csv_data = csv.readlines()
try:
return Validator(csv_data, rules, rule_name=rule_name)
except FileNewLineError:
handle_file_new_line_error(promt_fix=True)
except RuleDoesNotExist:
handle_rule_does_not_exist_error(rule_name)
except RuleCannotBeParsed:
handle_rule_cannot_be_parsed_error()
except Exception:
print_prog_error(
'There was a fatal error, the file was not processed.',
tb=format_exc())
def handle_file_new_line_error(context, prompt_fix=False):
err_message = 'CSV file has non-standard line endings and cannot be parsed as csv.'
suggestion = 'Use the fix command to fix line endings.'
example = 'csvcheck /path/to/file.csv rule_name fix -l'
click.echo(err_message, err=True, color='red')
if prompt_fix:
if click.confirm(
'Save new file with fixed line endings to the current directory?'
):
handle_fix(True, False)
else:
click.echo(suggestion, err=True, color='yellow')
click.echo(example, err=True)
raise click.BadParameter(
'csv file contains malformed line ending characters')
def handle_rule_does_not_exist_error(rule_name):
err_message = 'Rule "{}" does not exist in rules json.'.format(rule_name)
raise click.BadArgumentUsage(err_message)
def handle_rule_cannot_be_parsed_error():
raise click.BadOptionUsage('invalid json passed into --rules-string')
def pretty_errors(errors, csv_data):
'''returns a list of end-user friendly formatted strings describing the errors '''
err_lines = []
for error in errors:
line_number = error['line_num']
column_number = error['col_num']
contents = csv_data[line_number]
err_lines.append('L{line}:{col} ({errors}) --> "{contents}"'.format(
line=line_number + 1,
col='' if column_number == 0 else 'C{}:'.format(column_number),
contents=contents,
errors=error['errors']))
return err_lines
def fix_line_endings(csv_file, out_path=None):
'''accept file object, remove bad line endings, write to a file of the same
basename with fixed- as a prefix
options
out_path: write file to a specific path instead of the same path as csv
windows line endings can have a carriage return in addition
to the \n char - csv has trouble parsing these line endings
so just remove them here instead of opening the file in
universal line endings mode since it's a big reason uploads fail
'''
contents = ''.join(
line.replace('\r', '\n') for line in csv_file.readlines())
out_file_name = 'fixed-{}'.format(csv_file.name)
with open(out_file_name, mode='w') as out_file:
out_file.write(contents)
return out_file_name
def get_user_input(prompt_str='>>> ',
allowed_map=None,
invalid_msg='{} is invalid input please try again.'):
choice = None
while not choice:
user_input = click.prompt(prompt_str, prompt_suffix='').lower().strip()
choice = allowed_map.get(user_input) if allowed_map else user_input
if not choice:
formatted = invalid_msg.format(
choice) if '{}' in invalid_msg else invalid_msg
click.echo(formatted)
return choice
def print_csv_errors(errors):
for error in errors:
click.echo(error)
def print_prog_error(msg, tb=None):
click.echo(msg, err=True)
if tb:
for line in tb:
click.echo(line)
|
from django.shortcuts import render, HttpResponse,redirect
from django.http import JsonResponse
def root(request):
return redirect("/blog")
#/blogs - display the string "placeholder to later display a list of all blogs" with a method named "index"
def index(request):
return HttpResponse("placeholder to later display a list of all blogs")
#/blogs/new - display the string "placeholder to display a new form to create a new blog" with a method named "new"
def new(request):
return HttpResponse("placeholder to display a new form to create a new blog")
#/blogs/create - redirect to the "/" route with a method called "create"
def create(request):
return redirect("")
#/blogs/< number > - display the string "placeholder to display blog number: {number}" with a method named "show"
#(eg. localhost:8000/blogs/15 should display the message: 'placeholder to display blog number 15')
def show(request, number):
return HttpResponse(f"placeholder to display blog number: {number}")
#/blogs/< number >/edit - display the string "placeholder to edit blog {number}" with a method named "edit"
def edit(request,number):
return HttpResponse(f"placeholder to edit blog {number}")
# /blogs/< number >/delete - redirect to the "/blogs" route with a method called "destroy"
def destroy(request,number):
return redirect("/blog")
# (**Bonus**) /blogs/json - return a JsonResponse with title and content keys.
def jason(request):
return JsonResponse({"Tittle": "My First Blog", "status": True})
|
from pymol import cmd
cmd.load("1r_final.pdb")
cmd.hide("lines")
cmd.show("cartoon")
cmd.set("cartoon_fancy_helices", 1)
cmd.set("ray_trace_mode", 1)
cmd.set("two_sided_lighting", "on")
cmd.set("reflect", 0)
cmd.set("ambient", 0.5)
cmd.set("ray_trace_mode", 0)
cmd.set('''ray_opaque_background''', '''off''')
inFile = open("rmsf_pymol", 'r')
stored.newB = []
for line in inFile.readlines(): stored.newB.append( float(line) )
inFile.close()
alter 1r_final, b=0.0
alter 1r_final and n. CA, b=stored.newB.pop(0)
# color the protein based on the new B Factors of the alpha carbons
cmd.spectrum("b", "1r_final and n. CA")
cmd.png("bfac_1r_final.png", width=1800, height=1100, dpi=600, ray=1)
|
def omit_hashtag(message, hashtag):
return message.replace(hashtag, "", 1)
'''
The local transport authority is organizing an online picture contest.
Participants must take pictures of transport means in an original way,
and then post the picture on Instagram using a specific hashtag.
The local transport authority needs your help. They want you to take out
the hashtag from the posted message. Your task is to implement the function
def omit_hashtag(message, hashtag):
Examples
* ("Sunny day! #lta #vvv", "#lta") -> "Sunny day! #vvv" (notice the double space)
* ("#lta #picture_contest", "#lta") -> " #picture_contest"
Notes
When multiple occurences of the hashtag are found, delete only the first one.
In C, you should modify the message, as the function returns a void type.
In Python, you should return the answer.
There can be erroneous messages where the hashtag isn't present.
The message should in this case stay untouched.
The hashtag only consists of alphanumeric characters.
'''
|
#Program to calculate the exponentials.
base=input("Enter the base value : ")
ex=input("Enter exponent : ")
p=1
for i in range(1,ex+1,1):
p=p*base
print base,"raised to exponent",ex," = ",p
|
''' The application.
'''
import os
from flask import Flask
ASYNC_MODE = 'threading'
PING_INTERVAL = 59
# Create and configure application
app = Flask(__name__)
app.config['SECRET_KEY'] = os.getenv('FLASK_SECRET_KEY')
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('SQLALCHEMY_DATABASE_URI')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['BLOOM_FILTER_URL'] = os.getenv('BLOOM_FILTER_URL')
def create_app():
''' The application factory.
'''
from testsuite import (
extensions,
views,
)
# Init extensions
extensions.db.init_app(app)
extensions.socketio.init_app(
app,
async_mode=ASYNC_MODE,
ping_interval=PING_INTERVAL,
message_queue='redis://'
)
return app
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Stephanie
#
# Created: 08/03/2015
# Copyright: (c) Stephanie 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import pdb
import matplotlib.pyplot as plt
import numpy as np
import mlpy
from sklearn.datasets import load_svmlight_file
from sklearn import linear_model, metrics, neighbors, preprocessing
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.grid_search import GridSearchCV
def preprocessBNB(X):
'''Preprocess'''
# scaler = preprocessing.StandardScaler(with_mean = False, with_std = False)
norm = preprocessing.Normalizer(norm = 'l2')
modX = norm.fit_transform(X)
# pdb.set_trace()
return modX
def preprocessSGD(X) :
scaler = preprocessing.StandardScaler()
modX = scaler.fit_transform(X)
return modX
def preprocessLG(X):
scale = preprocessing.MinMaxScaler()
modX = scale.fit_transform(X)
return modX
def preprocessRF(X) :
binary = preprocessing.Binarizer(threshold = np.percentile(X, 70))
modX = binary.fit_transform(X)
return modX
def LG(X, y):
LG = linear_model.SGDClassifier(loss = 'log', alpha = 0.02, n_iter = 210,
penalty = 'l2', shuffle = True,
eta0 = 0.0001)
clf = LG.fit(X, y)
return clf
def BNB(X, y):
'''Training'''
BNB = BernoulliNB()
clf = BNB.fit(X, y)
return clf
def randfor(X, y) :
clf = RandomForestClassifier(n_estimators = 20, max_depth = 15)
clf.fit(X, y)
return clf
def SGD(X, y):
'''Train'''
SGD = linear_model.SGDClassifier(loss = 'hinge', alpha = .02, n_iter = 210,
penalty = 'l2', shuffle = True,
eta0 = 0.001)
clf = SGD.fit(X, y)
# clf = SGD.fit(X, y)
# clf.get_params()
return clf
def KNN(X, y) :
knn = neighbors.KNeighborsClassifier(n_neighbors = 5, weights = 'distance')
clf = knn.fit(X, y)
return clf
def test(model, X_test, y_test):
#prediction
# pred = model.predict(X_test)
# print("mislabeled %d out of %d total" %((y_test != pred).sum(), X_test.shape[0]))
# #score
# print model.score(X_test, y_test)*100, "%"
cm = metrics.confusion_matrix(y_test, model.predict(X_test))
print metrics.accuracy_score(y_test, model.predict(X_test), normalize = False)
print metrics.accuracy_score(y_test, model.predict(X_test))
# print metrics.precision_score(y_test, model.predict(X_test))
plt.matshow(cm, cmap = 'jet')
plt.colorbar()
plt.ylabel('True label', fontsize = 18)
plt.xlabel('Predicted label', fontsize = 18)
plt.gca().xaxis.set_label_position('top')
plt.xscale('linear')
plt.yscale('linear')
plt.xticks(np.arange(0,10,1))
plt.yticks(np.arange(0,10,1))
plt.suptitle('Confusion matrix for LG', fontsize = 24)
plt.show()
return
def main():
''' Get Data '''
x_train, y_train = load_svmlight_file('mnist')
x_train = x_train.toarray()
x_test, y_test = load_svmlight_file('mnist.t')
x_test = x_test.toarray()
# std_scale = preprocessing.StandardScaler().fit(x_train)
# x_train = std_scale.transform(x_train)
#is this okay???
x_test = preprocessing.add_dummy_feature(x_test)
x_test = preprocessing.add_dummy_feature(x_test)
''' KNN '''
# proc_x_trainKNN = preprocessBNB(x_train)
# proc_x_testKNN = preprocessBNB(x_test)
#
# modelKNN = KNN(proc_x_trainKNN, y_train)
#
# print "KNN training accuracy:"
# test(modelKNN, proc_x_trainKNN, y_train)
# print "KNN test accuracy"
# test(modelKNN, proc_x_testKNN, y_test)
# ''' random forest '''
# proc_x_trainRF = preprocessBNB(x_train)
# proc_x_testRF = preprocessBNB(x_test)
proc_x_trainRF = preprocessRF(x_train)
proc_x_testRF = preprocessRF(x_test)
modelRF = randfor(proc_x_trainRF, y_train)
print "RF training accuracy:"
test(modelRF, proc_x_trainRF, y_train)
print "RF test accuracy"
test(modelRF, proc_x_testRF, y_test)
# ''' Logistic Regression '''
# proc_x_trainLG = preprocessLG(x_train)
# proc_x_testLG = preprocessLG(x_test)
#
# model_LG = LG(proc_x_trainLG, y_train)
#
# print "LG accuracy on training data:"
# test(model_LG, proc_x_trainLG, y_train)
# print "LG accuracy on testing data:"
# test(model_LG, proc_x_testLG, y_test)
''' Naive Bayes '''
# proc_x_trainBNB = preprocessBNB(x_train)
# proc_x_testBNB = preprocessBNB(x_test)
#
# model_BNB = BNB(proc_x_trainBNB, y_train)
#
# print "BNB accuracy on training data:"
# test(model_BNB, proc_x_trainBNB, y_train)
# print "BNB accuracy on testing data:"
# test(model_BNB, proc_x_testBNB, y_test)
''' SVM '''
# proc_x_trainSGD = preprocessSGD(x_train)
# proc_x_testSGD = preprocessSGD(x_test)
#
# model_SGD = SGD(proc_x_trainSGD, y_train)
#
# print "SGD accuracy on training data:"
# test(model_SGD, proc_x_trainSGD, y_train)
# print "SGD accuracy on testing data:"
# test(model_SGD, proc_x_testSGD, y_test)
return
if __name__ == '__main__':
main() |
import types
from openerp import models, fields, api, _
class DocumentTemplateFunctionJinja(models.Model):
_inherit = 'document.template.function'
is_filter = fields.Boolean('Is Filter')
is_test = fields.Boolean('Is Test')
is_function = fields.Boolean('Is Function')
@api.one
@api.constrains('code')
def _check_code(self):
try:
l = eval(self.code)
if not isinstance(l, types.LambdaType):
raise ValueError(_("The python code must be a lambda."))
except (SyntaxError, NameError, ):
raise ValueError(_("The python code must be a lambda."))
_sql_constraints = [
('name', 'unique(name)', 'The name must be unique')
]
@api.multi
def jinja_get_all(self):
functions = {
f.name: {
'function': eval(f.code),
'record': f
} for f in self.search([])
}
return {
'filters': {
key: functions[key]['function'] for key in functions if functions[key]['record'].is_filter
},
'tests': {
key: functions[key]['function'] for key in functions if functions[key]['record'].is_test
},
'functions': {
key: functions[key]['function'] for key in functions if functions[key]['record'].is_function
},
} |
import RPi.GPIO as GPIO
from influxdb import InfluxDBClient
from datetime import datetime,time
from time import sleep
from pytz import timezone
import logging,pdb
'''initial var'''
RELAIS_4_GPIO = 22
sleep_time = 600
influxdb_user = 'pippo'
influxdb_password = 'pippopassword'
influxdb_db = 'LIGHT'
influxdb_host = 'localhost'
influxdb_port = 8086
influxdbclient = InfluxDBClient(influxdb_host, influxdb_port, influxdb_user, influxdb_password, influxdb_db)
tz = 'Rome'
GPIO.setwarnings(False)
'''logging config'''
logging.basicConfig(
level=logging.INFO,
filename='water.log',
format='[%(asctime)s] %(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
'''functions'''
def water_on():
GPIO.output(RELAIS_4_GPIO, GPIO.LOW)
logging.debug('Water On')
return True
def water_off():
GPIO.output(RELAIS_4_GPIO, GPIO.HIGH)
logging.debug('Water Off')
return True
def water_status():
pin_status = GPIO.input(RELAIS_4_GPIO)
print('pin status: ', pin_status, type(pin_status))
if pin_status == 1:
logging.info('Water is OFF')
print('Water is OFF')
else:
logging.info('Water is ON')
print('Water is ON')
return True
def insert_influxdb_row(value):
utcnow = datetime.utcnow()
'''json_body template'''
json_body = [
{
"measurement": "water",
"tags": {
"host": "cortile"
},
"time": utcnow,
"fields": {
"value": value
}
}
]
logging.debug('writing a value to influxdb with time ' + utcnow.strftime("%H:%M:%S"))
influxdbclient.write_points(json_body)
def main():
logging.info('Starting up ')
'''GPIO settings'''
GPIO.setmode(GPIO.BCM)
GPIO.setup(RELAIS_4_GPIO, GPIO.OUT)
'''main program'''
water_status()
if __name__ == "__main__":
main()
|
"""
leg @ rig
"""
import maya.cmds as mc
from .. base import module
from .. base import control
from ..utils import joint
from ..utils import name
from cmath import polar
def build(
legJoints,
topToeJoints,
pvLocator,
scapJoint = '',
prefix = 'lf_leg',
rigScale = 1.0,
baseRig = None
):
"""
@param legJoints: list( str ), shoulder - elbow - hand - toe - end toe
@param topToeJoints: list( str ), top metacarpal toe joints
@param pvLocator: str, reference locator for position of Pole Vector control
@param scapulaJnt: str, optional, scapula joint, parent of top leg joint
@param prefix: str, prefix to name new objects
@param rigScale: float, scale factor for size of controls
@param baseRig: baseRig: instance of base.module.Base class
@return: dictionary with rig module objects
"""
pass
#make rig Module
rigModule = module.Module(prefix = prefix, baseObj = baseRig)
bodyAttachGrp = mc.group(n = prefix + 'bodyAttachGrp',em =1, p = rigModule.partsGrp)
baseAttachGrp = mc.group(n = prefix + 'baseAttachGrp',em =1, p = rigModule.partsGrp)
#make ctls
if scapJoint:
scapCtl = control.Control(
prefix = prefix +'Scap',
translateTo = scapJoint,
rotateTo = scapJoint,
scale = rigScale * 3,
parent = rigModule.controlsGrp,
importCurve = 'sphere',
lockChannels = ['ty','rx','rz','s','v']
)
footCtl = control.Control(
prefix = prefix +'Foot',
translateTo = legJoints[2],
scale = rigScale * 3,
parent = rigModule.controlsGrp,
orient = [0,1,0]
)
ballCtl = control.Control(
prefix = prefix +'ball',
translateTo = legJoints[3],
rotateTo = legJoints[3],
scale = rigScale * 2,
parent = footCtl.C,
orient = [0,0,1],
lockChannels = ['ty','rx','rz','s','v']
)
pvCtl = control.Control(
prefix = prefix +'PV',
translateTo = pvLocator,
scale = rigScale,
parent = rigModule.controlsGrp,
importCurve = 'sphere'
)
toeIKCtls = []
for jnt in topToeJoints:
toePrefix = name.removeSuffix(jnt)[:-1]
toeEndJnt = mc.listRelatives(jnt, ad =1, type = 'joint')[0]
toeIKCtl = control.Control(
prefix = toePrefix,
translateTo = toeEndJnt,
scale = rigScale,
parent = footCtl.C,
orient = [0,1,0]
)
toeIKCtls.append(toeIKCtl)
#make IK handles
if scapJoint:
scapIK = mc.ikHandle(n = prefix + 'Scap_ikh', sol= 'ikSCsolver',sj = scapJoint, ee = legJoints[0])[0]
legIK = mc.ikHandle(n = prefix + 'Leg_ikh', sol= 'ikRPsolver',sj = legJoints[0], ee = legJoints[2])[0]
ballIK = mc.ikHandle(n = prefix + 'Ball_ikh', sol= 'ikSCsolver',sj = legJoints[2], ee = legJoints[3])[0]
mainToeIK = mc.ikHandle(n = prefix + 'MainToe_ikh', sol= 'ikSCsolver',sj = legJoints[3], ee = legJoints[4])[0]
mc.hide(legIK,ballIK,mainToeIK)
for i, jnt in enumerate(topToeJoints):
toePrefix = name.removeSuffix(jnt)[:-1]
toeJoints = joint.listHierarchy(jnt)
toeIK = mc.ikHandle(n=toePrefix + '_ikh', sol = 'ikSCsolver', sj = toeJoints[1], ee = toeJoints[-1] )[0]
mc.hide(toeIK)
mc.parent(toeIK, toeIKCtls[i].C)
#attach Controls
mc.parentConstraint(bodyAttachGrp,pvCtl.Off, mo=1)
if scapJoint:
mc.ParentConstraint(baseAttachGrp,scapCtl.Off, mo =1)
#attach objects to controls
mc.parent(legIK, ballCtl.C)
mc.parent(ballIK, mainToeIK, footCtl.C)
mc.poleVectorConstraint(pvCtl.C, legIK)
if scapJoint:
mc.parent(scapIK, scapCtl.C)
mc.pointConstraint(scapCtl.C, scapJoint)
#cute line to pv
pvLinePos1 = mc.xform(legJoints[1], q =1, t =1, ws =1 )
pvLinePos2 = mc.xform(pvLocator, q=1, t =1, ws =1)
pvCrv = mc.curve(n = prefix + 'pv_crv',d =1, p = [pvLinePos1,pvLinePos2])
mc.cluster(pvCrv + '.cv[0]', n = prefix + 'pv1_cls', wn = [legJoints[1], legJoints[1]], bs =1)
mc.cluster(pvCrv + '.cv[1]', n = prefix + 'pv2_cls', wn = [pvCtl.C, pvCtl.C], bs =1)
mc.parent(pvCrv,rigModule.controlsGrp)
mc.setAttr(pvCrv + '.template',1)
return { 'module':rigModule, 'baseAttachGrp':baseAttachGrp, 'bodyAttachGrp':bodyAttachGrp }
|
import sys
sys.path.insert(1, '/home/jimmy/ctf/tools')
from base64 import b64encode, b64decode
from cryptotools import *
# token we want to forge: king-horse-5diuoe7tpxjen8xu0n7
print(len('king-horse-5diuoe7tpxjen8xu0n7'))
# need something of length 30 then we need something close so we can flip the right area
# token for king-horse-5diuoe7tpxjen8xu0n6
token = 'EiSCVBOXLUM/TeKcsmaYC8gUDKDYXmH1ynRwP9grJpCBLL/QPXm9ZO1emN8BFc5VTXeBF6bvtyRn3w=='
token_buff = bytearray(b64decode(token))
# goal is to change the char at pos len(iv) + 29 [30th character]
pos = 12 + 29
token_buff[pos] = token_buff[pos] ^ ord('6') ^ ord('7')
print(b64encode(token_buff))
|
from tkinter import *
from tasks import *
import json
f=open("tasks.json","w")
f.write("")
f.close()
window = Tk()
window.title("Welcome to Repl.it")
window.geometry('350x200')
lbl = Label(window, text="Empty")
lbl.grid(column=0, row=1)
txt = Entry(window,width=10)
txt.grid(column=1, row=0)
def clicked():
createTask(txt.get())
f = open("tasks.json", "r")
if f.read():
f.seek(0)
tasks = [value["title"] for value in list(json.loads(f.read()).values())]
else:
tasks = ""
f.close()
lbl.configure(text=tasks)
btn = Button(window, text="add", command=clicked)
btn.grid(column=2, row=0)
print("hello world")
window.mainloop() |
from unittest import test
from notes import *
test(29, greet("Jeremy"))
|
# Generated by Django 2.1.5 on 2019-02-08 04:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('books', '0010_auto_20190202_2240'),
('orders', '0003_offer'),
]
operations = [
migrations.RenameModel(
old_name='OrderItem',
new_name='OfferItem',
),
migrations.RemoveField(
model_name='offer',
name='book',
),
migrations.RemoveField(
model_name='offeritem',
name='order',
),
migrations.AddField(
model_name='offeritem',
name='offer',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='item', to='orders.Offer'),
preserve_default=False,
),
]
|
import fournisseur
class Concessionnaire:
def __init__(self, lieu, nbEmployes, nbVoiture, voitureDispo):
self.adresse = lieu
self.nombreEmployes = nbEmployes
self.nombreVoiture = nbVoiture
self.voitureDisponible = voitureDispo
concessionnaire = Concessionnaire("France", 10, 30, fournisseur.fournisseur.voiture)
if __name__ == "__main__":
concessionnaire = Concessionnaire("France", 15, 30, fournisseur.fournisseur.voiture)
print(concessionnaire.voitureDisponible) |
import _thread
import time
def fun1(tread_name,delay):
print('开始运行fun1,线程的名字:',tread_name)
time.sleep(delay)
print('fun1结束运行')
def fun2(tread_name,delay):
print('开始运行fun2,线程的名字:',tread_name)
time.sleep(delay)
print('fun2结束运行')
if __name__=='__main__':
print('开始运行')
_thread.start_new_thread(fun1,('thread-1',2))
_thread.start_new_thread(fun2, ('thread-2', 3)) |
import seaborn as sns
import matplotlib.pyplot as plt
iris=sns.load_dataset("iris")
print(iris.head())
print(iris.shape)
print(iris.describe())
print(sns.jointplot(x="sepal_length", y="sepal_width",data=iris))
plt.show()
print(sns.pairplot(iris))
plt.show(0) |
#Program to print the longest string
def longest_string(s):
a = s.split(" ")
max=0
for i in a:
l = len(i)
if l > max:
max = l
for i in a:
if len(i) == max:
print(i)
longest_string ("Shape of you Xp")
|
#!/usr/bin/python
import os
import sys
sys.path.append("/home/penguinofdoom")
sys.path.append("/home/penguinofdoom/Projects")
sys.path.append("/home/penguinofdoom/Projects/Retina")
import time
import random as rnd
import commands as comm
import itertools
import numpy as np
import MultiNEAT as NEAT
import multiprocessing as mpc
import fast_cppn as fc
import es_hyperNeat as es
from quad_tree_v2 import *
import itertools
import os.path
import traceback
import fast_cppn
import scipy.stats as ss
# NEAT parameters
params = NEAT.Parameters()
params.PopulationSize = 150
params.DynamicCompatibility = True
params.CompatTreshold = 2.0
params.YoungAgeTreshold = 15
params.SpeciesMaxStagnation = 100
params.OldAgeTreshold = 35
params.MinSpecies = 5
params.MaxSpecies = 25
params.RouletteWheelSelection = False
params.MutateRemLinkProb = 0.02
params.RecurrentProb = 0
params.OverallMutationRate = 0.15
params.MutateAddLinkProb = 0.08
params.MutateAddNeuronProb = 0.01
params.MutateWeightsProb = 0.90
params.MaxWeight = 8.0
params.WeightMutationMaxPower = 0.2
params.WeightReplacementMaxPower = 1.0
params.MutateActivationAProb = 0.0
params.ActivationAMutationMaxPower = 0.5
params.MinActivationA = 0.05
params.MaxActivationA = 6.0
params.MutateNeuronActivationTypeProb = 0.03;
# Probabilities for a particular activation function appearance
params.ActivationFunction_SignedSigmoid_Prob = 1.0
params.ActivationFunction_UnsignedSigmoid_Prob = 0.0
params.ActivationFunction_Tanh_Prob = 0.0
params.ActivationFunction_TanhCubic_Prob = 0.0
params.ActivationFunction_SignedStep_Prob = 1.0
params.ActivationFunction_UnsignedStep_Prob = 0.0
params.ActivationFunction_SignedGauss_Prob = 1.0
params.ActivationFunction_UnsignedGauss_Prob = 0.0
params.ActivationFunction_Abs_Prob = 1.0
params.ActivationFunction_SignedSine_Prob = 1.0
params.ActivationFunction_UnsignedSine_Prob = 0.0
params.ActivationFunction_Linear_Prob = 1.0
# Params for ES
params.DivisionThreshold = 0.03
params.VarianceThreshold = 0.03
params.BandThreshold = 0.3
params.InitialDepth = 5
params.MaxDepth = 5
params.IterationLevel = 1
params.Leo = True;
params.LeoThreshold = 0.2;
params.MutualConnection = True
rng = NEAT.RNG()
rng.TimeSeed()
left_patterns = [
(3.,3., 3.,3.),
(3.,3., 3.,-3.),
(3.,-3., 3.,-3.),
(3.,-3., 3.,3.),
(3.,-3.,-3.,-3.),
(3.,3.,-3.,3.),
(-3.,-3.,3.,-3.),
(-3.,3.,3.,3.)
]
right_patterns = [
(3.,3.,3.,3.),
(-3.,3.,3.,3.),
(-3.,3.,-3.,3.),
(3.,3.,-3.,3.),
(-3.,-3.,-3.,3.),
(3.,-3.,3.,3.),
(-3.,3.,-3.,-3.),
(3.,3.,3.,-3.)
]
patterns = [i for i in itertools.product((-3.,3.), repeat=8)]
def evaluate_lr(nn):
error = 0
correct = 0.
global left_patterns, right_patterns, patterns
if nn.empty:
return (0.0,0.0)
try:
for pattern in patterns:
left = False
right = False
output = nn.run_cyclic(pattern)
if pattern[0:4] in left_patterns:
left = True
if pattern[4:8] in right_patterns:
right = True
if (left and right):
error += abs(output[0]-1.)
error += abs(output[1]-1.)
if (output[0] >0 and output[1] > 0):
correct +=1.
elif left:
if (output[0] > 0 and output[1] < 0):
correct +=1.
error += abs(output[0]-1.)
error += abs(output[1]+1.)
elif right:
if (output[0] < 0 and output[1] > 0):
correct +=1.
error += abs(output[0]+1.)
error += abs(output[1] - 1.)
else:
error += abs(output[0]+ 1.)
error += abs(output[1] + 1.)
if output[0] < 0. and output[1]<0.:
correct +=1.
#return (1000./(1.+ (error*error)), correct/256.)
#alternative - normalized squared distance from max value within 1000
e = (512 - error)**2
#512 is max possible corrects - 2 outputs * 256 patterns.
#512 squared is 262144. Gotta squeeze every percent of processing power.
return ((e/(262144))*1000
except Exception as ex:
#print "nn ",ex
return (0.0, 0.0)
def get_best_connection_cost(run):
global max_correct
g = NEAT.Genome(0, 7, 2, True,
NEAT.ActivationFunction.SIGNED_GAUSS,
params)
pop = NEAT.Population(g, params, True, 1.0)
bestest = []
substrate_generator = es.Substrate_Generator(params)
inputs = [(-1.0, 0.,-1.),
(-0.25, 0.,-1.),
(0.25, .0,-1.),
(1., 0., -1.),
(-1.0, 0., 1.),
(-0.25, 0., 1.),
(0.25, 0., 1.),
(1., 0., 1.),
(0., 0., 0.)
]
outputs= [(-1.,1.,0.),
(1., 1., 0.)]
performance = []
corrects = []
print "Here we goooo"
genlimit = 1500
for generation in range(genlimit):
curtime = time.time()
genome_list = NEAT.GetGenomeList(pop)
neural_networks = []
for genome in genome_list:
try:
nn = substrate_generator.generate_substrate(inputs,outputs,genome)
neural_networks.append(nn)
#nn.connection_length()
except Exception as ex:
#print traceback.format_exc()
empty = fast_cppn.fast_network(0,0,0, empty = True)
neural_networks.append(empty)
ranks = ss.rankdata([nn.length for nn in neural_networks])
max_rank = max(ranks)
#print (1./(1.+ (genlimit - generation)))*(max_rank)
fitnesses = NEAT.es_EvaluateGenomeList_retina(neural_networks, evaluate_lr,cores = 4)
[genome.SetFitness(fitness + (1./(1.+ (genlimit - generation)))*(max_rank - rank)) for genome, fitness, rank in zip(genome_list, fitnesses, ranks)]
best = max(fitnesses)
index = fitnesses.index(best)
performance.append(neural_networks[index].correct)
bestest.append(best)
if generation % 10 == 0:
print "Generation ", generation
print "Best ", best
print "perf", neural_networks[index].correct
print "-------------------"
if generation % 50 == 0:
pop.GetBestGenome().Save('/home/penguinofdoom/Projects/retina_evaluator/gauss/Best_Genome_run_%d_gen_%d' %(run, generation))
print "Elapsed time: ", time.time() - curtime
pop.Epoch()
print "Done"
return bestest, performance
evolutions = []
performance = []
runs = 5
for i in range(runs):
evo, perf = get_best_connection_cost(i)
evolutions.append(evo)
performance.append(perf)
f = open("/home/penguinofdoom/Projects/retina_evaluator/ES_full_retina_%d.csv" %(i),'w')
f.write("Generation, Fitness, Performance \n")
for i in range(len(evo)):
f.write('%f, %f, \n' %(evo[i], perf[i]))
f.close()
print "Done with run"
|
# 232. Implement Queue using Stacks
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.__queue1 = []
self.__queue2 = []
def push(self, x):
"""
Push element x to the back of queue.
:type x: int
:rtype: void
"""
self.__queue1.append(x)
def pop(self):
"""
Removes the element from in front of queue and returns that element.
:rtype: int
"""
if len(self.__queue2) == 0:
for i in range(len(self.__queue1)):
self.__queue2.append(self.__queue1[-1])
self.__queue1.pop()
res = self.__queue2[-1]
self.__queue2.pop()
return res
def peek(self):
"""
Get the front element.
:rtype: int
"""
if self.empty():
return None
elif len(self.__queue1) > 0:
return self.__queue1[0]
else:
return self.__queue2[-1]
def empty(self):
"""
Returns whether the queue is empty.
:rtype: bool
"""
return (len(self.__queue1) == 0 and len(self.__queue2) == 0)
# Your MyQueue object will be instantiated and called as such:
# obj = MyQueue()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.peek()
# param_4 = obj.empty() |
import random
from collections import defaultdict
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
from keras import backend as K
from keras.models import Model
from scipy.misc import imsave
# util function to convert a tensor into a valid image
def deprocess_image(x):
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x.reshape(x.shape[1], x.shape[2]) # original shape (1,img_rows, img_cols,1)
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def constraint_occl(gradients, start_point, rect_shape):
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def constraint_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = np.mean(gradients)
return grad_mean * new_grads
def constraint_black(gradients, rect_shape=(6, 6)):
start_point = (
random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def init_coverage_tables(model1, model2, model3):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
init_dict(model2, model_layer_dict2)
init_dict(model3, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def neuron_covered(model_layer_dict):
covered_neurons = len([v for v in model_layer_dict.values() if v])
total_neurons = len(model_layer_dict)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def update_coverage(input_data, model, model_layer_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
# if predictions2 == predictions3 == target and predictions1 != target:
if not predictions1 == predictions2 == predictions3:
return True
return False
def update_heatmap(orig_img, aug_img, heatmap):
for x in xrange(heatmap.shape[0]):
for y in xrange(heatmap.shape[1]):
if orig_img[0,x,y,0] != aug_img[0,x,y,0]:
heatmap[x,y] += 1
hm_avg = np.mean(heatmap)
hm_colored = np.zeros([heatmap.shape[0], heatmap.shape[1], 3], dtype=np.uint8)
for x in xrange(heatmap.shape[0]):
for y in xrange(heatmap.shape[1]):
if heatmap[x,y] >= hm_avg + .25 * hm_avg:
hm_colored[x,y] = [255,0,0]
elif heatmap[x,y] >= hm_avg and heatmap[x,y] < hm_avg + .25 * hm_avg:
hm_colored[x,y] = [255,165,0]
elif heatmap[x,y] < hm_avg and heatmap[x,y] >= hm_avg - .25 * hm_avg:
hm_colored[x,y] = [0,255,0]
else:
hm_colored[x,y] = [0,0,255]
print('updated heatmap')
return heatmap, hm_colored
def save_heatmap(hm, aug, num_imgs):
fn = "MNIST_" + aug + "_" + str(num_imgs)
fp = "./heatmaps/" + fn + ".pdf"
pp = PdfPages(fp)
fig = plt.figure(figsize = (8.5, 11))
plt.imshow(hm)
pp.savefig()
plt.close()
pp.close()
print('heatmap saved to ' + fp)
def error_pattern_match(hm, orig_img_list, gen_img_list, transformation, p1, p2 ,p3):
error_pattern_set = []
p1_error = []
p2_error = []
p3_error = []
for i, img in enumerate(gen_img_list):
done = False
for x in xrange(hm.shape[0]):
for y in xrange(hm.shape[1]):
pixel = hm[x,y]
orig_img = orig_img_list[i]
if orig_img[0,x,y,0] != img[0,x,y,0] and pixel[0] == 255:
error_pattern_set.append(deprocess_image(img))
p1_error.append(p1[i])
p2_error.append(p1[i])
p3_error.append(p3[i])
done = True
break
if done:
break
for i, img in enumerate(error_pattern_set):
imsave('./error_pattern_set/' + transformation + '_' + p1_error[i] + '_' + p2_error[i] +
'_' + p3_error[i] + '.png', img)
print("Error pattern set saved to ./error_pattern_set/ folder")
def make_scatter_plot(scatter_plot_data, aug, num_imgs):
data = []
for i in range(len(scatter_plot_data)):
for j in range(3):
data.append((i,scatter_plot_data[i][j-1]))
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
for pair in data:
x,y = pair
ax.scatter(x,y,alpha=0.8,c='red')
title = 'MNIST_' + str(aug) + '_' + str(num_imgs)
plt.title(title)
plt.xlabel('Iteration')
plt.ylabel('Predictions')
#plt.show()
print ('update scatter plot')
return fig
def save_scatter_plot(scatter_plot, aug, num_imgs):
fn = "MNIST_" + aug + "_" + str(num_imgs)
fp = "./scatterplots/" +fn + ".pdf"
scatter_plot.savefig(fn,bbox_inches='tight')
pp=PdfPages(fn)
pp.savefig()
pp.close()
print('Scatter plot saved to: '+ fp)
|
import tokens
# ###############
from telegram.ext import Updater, MessageHandler, Filters
def messageFilter(update, context):#This can filter messages
print("Hi")
def send_message(my_text):
dp.bot.send_message(chat_id=my_telegram_id, text=my_text)
my_telegram_id = tokens.get_my_telegram_id()
telegram_token = tokens.get_telegram_token()
if telegram_token == None:
print("Invalid token")
updater = Updater(token=telegram_token, use_context=True)
dp = updater.dispatcher
dp.add_handler(MessageHandler(Filters.text, messageFilter))
updater.start_polling()
|
""" Для чисел в пределах от 20 до 240 найти числа,
кратные 20 или 21. Необходимо решить задание в одну строку. """
result = [itm for itm in range(20,240) if itm % 20 == 0 or itm % 21 == 0]
print(result)
|
def print_sub():
print("Printed form sub script folder") |
#!/usr/bin/env python
import unittest
from useless.decorators import nocase
__author__ = 'Ronie Martinez'
class NoCaseTest(unittest.TestCase):
def test_call_inexistent_snake_case(self):
@nocase
class MyClass(object):
def myMethod(self):
return "myMethod"
a = MyClass()
self.assertEquals("myMethod", a.myMethod())
self.assertEquals("myMethod", a.my_method())
def test_call_inexistent_camel_case(self):
@nocase
class MyClass(object):
def my_method(self):
return "my_method"
a = MyClass()
self.assertEquals("my_method", a.my_method())
self.assertEquals("my_method", a.myMethod())
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Python 3.8.5 (tags/v3.8.5:580fbb0, Jul 20 2020, 15:43:08) [MSC v.1926 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> #I pledge my honor that I have abided by the Stevens Honor System
>>> #this code gives the uder a list of menus to choose from
>>> def main():
first = input("Enter 1 for mathematical fucntions, 2 for string fucntions. ")
first = int(first)
if first == 1:
math = input("For additon enter 1, For subtraction enter 2, For multiplication enter 3, For division enter 4. ")
math = int(math)
if math == 1:
A = float(input("Please enter the first number you would like: "))
B = float(input("Please enter the second number you would like: "))
sum = A+B
print("The sum is:", sum)
elif math == 2:
C = float(input("Please enter the first number you would like to subtract, the second number will be subtracted from the first: "))
D = float(input("Please enter the second number you would like to multiply: "))
dif = C-D
print("The difference is:", dif)
elif math == 3:
E = float(input("Please enter the first number that you would like to multiply, Please enter the second number you would like to multiply: "))
F = float(input("Please enter the first number that you would like to multiply, Please enter the second number you would like to multiply: "))
product = round(E*F, 4)
print ("The answer is:", product)
elif math == 4:
G = float(input("Please enter the first number that you would like to divide (The first number will be divided by the second number): "))
H = float(input("Please enter the second number that you would like to divide: "))
quotient = round (G/H, 4)
print("The answer is:", quotient)
else:
print("Error! please run program again and enter a number between 1 and 4.")
elif start == 2:
stringer = input("to determine the number of vowels in a string, enter the number 1. \to encrypt a string, enter the number 2.")
stringer = int(stringer)
if stringer == 1:
string = input ("Enter the string: ")
vowel1 = string.count("a")
vowel2 = string.count("e")
vowel3 = string.count("i")
vowel4 = string.count("o")
vowel5 = string.count("u")
sum2 = vowel1 + vowel2 + vowel3 + vowel4 + vowel5
print("There are", sum2, "vowels in the string.")
elif stringer == 2:
message = input ("enter the message you would like to encrypt: ")
print("the encrypted message is: ")
for i in message:
x = ord(i)
print(" ", x*x+x-2, end = "")
else:
print("Error! please run program again and enter the number 1 or 2.")
else:
print("Error! please run program again and enter the number 1 or 2.")
>>> main()
Enter 1 for mathematical fucntions, 2 for string fucntions. 1
For additon enter 1, For subtraction enter 2, For multiplication enter 3, For division enter 4. 2
Please enter the first number you would like to subtract, the second number will be subtracted from the first: 12
Please enter the second number you would like to multiply: 5
The difference is: 7.0
>>> |
import json
import yaml
import re
PEERPEM = "crypto-config/peerOrganizations/org%d.example.com/tlsca/tlsca.org%d.example.com-cert.pem"
CAPEM="crypto-config/peerOrganizations/org%d.example.com/ca/ca.org%d.example.com-cert.pem"
class JSONObject:
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class CertificateAuthorities(JSONObject):
def __init__(self, org_id):
self.url = "https://localhost:%d" % (19000 + org_id)
self.caName = "ca-org%d" % org_id
self.tlsCACerts = {"pem": CAPEM % (org_id, org_id)}
self.httpOptions = {"verify": False}
class Peers(JSONObject):
def __init__(self, org_id, peer_id, tlsCACerts):
self.url = "grpcs://localhost:%d" % (20000+org_id*1000+peer_id)
self.tlsCACerts = {"pem": tlsCACerts}
self.grpcOptions = {"ssl-target-name-override": "peer%d.org%d.example.com" % (peer_id, org_id),
"hostnameOverride": "peer%d.org%d.example.com" % (peer_id, org_id)}
class OrgConfig(JSONObject):
def __init__(self, org_id, peer_num):
self.mspid = "Org%dMSP" % org_id
self.peers = []
for i in range(peer_num):
self.peers.append("peer%d.org%d.example.com" % (i, org_id))
self.certificateAuthorities = ["ca.org%d.example.com" % org_id]
class Client(JSONObject):
def __init__(self, org_id):
self.organization = "Org%d" % org_id
self.connection = {"timeout": {"peer": {"endorser": 300}}}
class Ccp(JSONObject):
def __init__(self, org_id, peer_num):
self.name = "first-network-org%d" % org_id
self.version = "1.0.0"
self.client = Client(org_id)
self.organizations = OrgConfig(org_id, peer_num)
self.peers = {}
for i in range(peer_num):
self.peers["peer%d.org%d.example.com" % (i, org_id)] = Peers(org_id, i, PEERPEM % (org_id, org_id))
self.certificateAuthorities = {"ca.org%d.example.com" % org_id: CertificateAuthorities(org_id)}
if __name__ == "__main__":
for i in range(1,7):
ccp_class = Ccp(i, 3)
jsonOutput = open("../out/my-connection-org%d.json"%i,"w")
jsonOutput.write(ccp_class.toJSON())
yamlOutput = open("../out/my-connection-org%d.yaml"%i,"w")
yamlOutput.write(re.sub("!!.*\n", "\n", yaml.dump(ccp_class)))
|
n=2
primo=1
while primo!=10002:
for i in range(2,n):
if n%i==0:
break
else:
print(n)
primo+=1
n+=1
|
# 这一节我们使用tfrecord来实现读取数据
import tensorflow as tf
tfrecord_dir = "./Dataset/tfrecord/"
IMAGENET_MEAN = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32)
NUM_CLASSES = 9 # 数据集类别数用于生成one_hot数据
#########生成feature方法##########
def _tf_record_parser(record):
keys_to_features = {
'data': tf.FixedLenFeature([], tf.string),
'visit': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
}
features = tf.parse_single_example(record, keys_to_features)
image = tf.decode_raw(features['data'], tf.uint8) # 保存时是uint8所以这必须是uint8(位数必须一样否则报错)
image = tf.reshape(image, [100, 100, 3])
image = tf.random_crop(image, [88, 88, 3]) # 随机裁剪
image = tf.image.random_flip_left_right(image) # 随机左右翻转
image = tf.image.random_flip_up_down(image) # 随机上下翻转
img_centered = tf.cast(image, tf.float32)
#img_centered = tf.subtract(image, IMAGENET_MEAN)
# 在这里可以对图像进行处理(现在我们暂且不处理)
visit = tf.decode_raw(features['visit'], tf.int32) # 保存时是int64所以这必须是64位
visit = tf.reshape(visit, [174, 24, 2])
visit = tf.cast(visit, tf.float32)
# 可以在这里改变visit特征的形状使用tf.reshape()
label = tf.cast(features['label'], tf.int64)
label_onehot = tf.one_hot(label, NUM_CLASSES)
return img_centered, visit, label_onehot
def _tf_record_parser_test(record):
keys_to_features = {
'data': tf.FixedLenFeature([], tf.string),
'visit': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
}
features = tf.parse_single_example(record, keys_to_features)
image = tf.decode_raw(features['data'], tf.uint8) # 保存时是uint8所以这必须是uint8(位数必须一样否则报错)
image = tf.reshape(image, [100, 100, 3])
image = tf.random_crop(image, [88, 88, 3]) # 随机裁剪
image = tf.image.random_flip_left_right(image) # 随机左右翻转
image = tf.image.random_flip_up_down(image) # 随机上下翻转
img_centered = tf.cast(image, tf.float32)
# img_centered = tf.subtract(image, IMAGENET_MEAN)
# 在这里可以对图像进行处理(现在我们暂且不处理)
visit = tf.decode_raw(features['visit'], tf.int32) # 保存时是int64所以这必须是64位
visit = tf.reshape(visit, [174, 24, 2])
visit = tf.cast(visit, tf.float32)
# 可以在这里改变visit特征的形状使用tf.reshape()
label = tf.cast(features['label'], tf.int64)
return img_centered, visit, label
#########生成feature方法##########
class DataGenerator(object):
def __init__(self):
train_tfrecord_filename = "train.tfrecord"
eval_tfrecord_filename = "eval.tfrecord"
test_tfrecord_filename = "test.tfrecord"
self.train_tfrecord_path = tfrecord_dir + train_tfrecord_filename
self.eval_tfrecord_path = tfrecord_dir + eval_tfrecord_filename
self.test_tfrecord_path = tfrecord_dir + test_tfrecord_filename
def get_batch(self, batch_size, tag="training"):
if tag == "training":
training_dataset = tf.data.TFRecordDataset(self.train_tfrecord_path)
training_dataset = training_dataset.map(_tf_record_parser)
training_dataset = training_dataset.repeat(None)
training_dataset = training_dataset.shuffle(buffer_size=5000)
training_dataset = training_dataset.batch(batch_size)
return training_dataset
if tag == "evaling":
evaling_dataset = tf.data.TFRecordDataset(self.eval_tfrecord_path)
evaling_dataset = evaling_dataset.map(_tf_record_parser)
evaling_dataset = evaling_dataset.repeat(1)
#evaling_dataset = evaling_dataset.shuffle(buffer_size=500)
evaling_dataset = evaling_dataset.batch(batch_size)
return evaling_dataset
if tag == "testing":
testing_dataset = tf.data.TFRecordDataset(self.test_tfrecord_path)
testing_dataset = testing_dataset.map(_tf_record_parser_test)
testing_dataset = testing_dataset.repeat(1)
testing_dataset = testing_dataset.batch(batch_size)
return testing_dataset
|
#!/usr/bin/python
import random
maxNum = 1000000
# The array that is to be sorted
arr = [int(maxNum * random.random()) for i in range(10000)]
def merge(listA, listB):
listReturn = []
while len(listA) > 0 or len(listB) > 0:
if len(listA) == 0:
listReturn.extend(listB)
listB.clear()
elif len(listB) == 0:
listReturn.extend(listA)
listA.clear()
elif listA[0] < listB[0]:
listReturn.append(listA.pop(0))
else:
listReturn.append(listB.pop(0))
return listReturn
def mergeSort(array):
if len(array) == 1:
return array
listA = mergeSort(array[:len(array) // 2])
listB = mergeSort(array[len(array) // 2:])
return merge(listA, listB)
print(mergeSort(arr))
|
import pandas as pd
import os
import numpy
import csv
import pycountry
import re
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
def close_overlay():
time.sleep(3)
try:
close_overlay = driver.find_element_by_class_name("_pendo-close-guide")
except:
close_overlay = driver.find_element_by_class_name("_pendo-close-guide_")
close_overlay.click()
countries = []
for i in list(pycountry.countries):
countries.append(i.name)
centers = ["Smart", "Nanoelectronics", "School", "Nisc", "Wireless", "Mechanical", "Bioinformatics", "Management", "Graduate"]
PATH = os.path.dirname(os.path.realpath(__file__))
df = pd.read_csv(PATH + '/differences.csv',) #you could add index_col=0 if there's an index
author_affil_int =[]
names_int = []
ids_int = []
author_affil_int.append(df['Authors with affiliations'])
names_int.append(df["Title"])
ids_int.append(df["Author(s) ID"])
data_author = author_affil_int[0].to_numpy()
data_name = names_int[0].to_numpy()
data_ids = ids_int[0].to_numpy()
driver = webdriver.Firefox(executable_path=PATH + r'\geckodriver.exe')
driver.get("https://08105aj8u-1103-y-https-www-scopus-com.mplbci.ekb.eg/authid/detail.uri?authorId=7103379659")
myElem = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, '_58_login')))
login = driver.find_element_by_id("_58_login")
login.send_keys("yradwan147.yr@gmail.com")
login2 = driver.find_element_by_id("_58_password")
login2.send_keys("TE_Data1")
login_button = driver.find_element_by_css_selector("button.btn.btn-default.mb-2.btn-primary")
login_button.click()
with open(PATH + "\Output.csv", "w+") as output:
writer = csv.writer(output)
writer.writerow(["Title", "Affiliated Author", "Affiliation", "Scopus ID", "Scopus Profile Link", "Scopus Name", "Scopus Titles"])
counter = 0
for x in data_author:
counter1 = 0
names = []
affiliations = []
ids = []
scopus_names = []
scopus_titles = []
try:
authors = x.split(";")
except:
print(x)
for author in authors:
name = ""
if "Nile University" in author:
idIndex = authors.index(author)
idFinal = (data_ids[counter].split(';'))[idIndex]
ids.append(idFinal)
driver.get("https://08105aj8u-1103-y-https-www-scopus-com.mplbci.ekb.eg/authid/detail.uri?authorId=" + str(idFinal))
time.sleep(1)
try:
button = driver.find_element_by_id("scopus-author-profile-page-control-microui__scopus-author-general-information__showAllAuthorInfo")
try:
button.click()
except:
close_overlay()
button.click()
flag = True
time.sleep(1)
except:
flag = False
try:
scopus_names.append(driver.find_elements_by_class_name("author-general-details-title")[0].text)
if (flag):
scopus_titles.append(driver.find_element_by_id("scopus-author-profile-page-control-microui__scopus-author-general-information__showAllInfoNameVariants").text)
else:
scopus_titles.append(" ")
except:
scopus_names.append(" ")
scopus_titles.append(" ")
counter1 += 1
name_split = author.split(",")[:2]
for i in name_split:
name += i
name += " "
##print(name)
names.append(name)
affs = author.split("Egypt")
for aff in affs:
if "Nile University" in aff:
for i in countries:
if i in aff:
aff = aff[(aff.find(i) + len(i) + 2):]
if name.replace(" ", ", ") in aff:
aff = aff[(aff.find(name.replace(" ", ", ")) + len(name.replace(" ", ", "))):]
elif (name[1:].replace(" ", ", ")) in aff:
aff = aff[(aff.find(" " + name[1:].replace(" ", ", ")) + len(" " + name[1:].replace(" ", ", "))):]
if "," in aff[:3]:
aff = aff[aff.find(",") + 1:]
aff.strip()
# for x in centers:
# if x in aff:
# aff = aff[(aff.find(x)):]
affiliations.append(aff.strip())
with open(PATH + "\Output.csv", "a", encoding="utf-8") as output:
writer = csv.writer(output)
list_output = []
##print(str(data_name[counter]) + str(counter))
for i in range(counter1):
url = "https://www.scopus.com/authid/detail.uri?origin=resultslist&authorId=" + str(ids[i]) + "&zone="
writer.writerow([data_name[counter], names[i], affiliations[i], ids[i], url, scopus_names[i], scopus_titles[i]])
#for j in range(counter1):
# writer.writerow(list_output)
counter += 1 |
from cs_plone3_theme import Plone3Theme
class BootstrapTheme(Plone3Theme):
_template_dir = 'templates/bootstrap_theme'
summary = 'A Theme for Plone 3/4 based on Twitter Bootstrap'
skinbase = 'Bootstrap Theme'
use_local_commands = True
def post(self, command, output_dir, vars):
print "-----------------------------------------------------------"
print "Generation finished"
print "Remember to pin plone.app.jquery = 1.7.1.1"
print "in your buildout"
print
print "See README.txt for details"
print "-----------------------------------------------------------"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sqlite3, os
from PyQt5.QtCore import pyqtSignal
from shutil import copyfile
from io import StringIO
from progressWidget import *
class database():
'''
Do the SQL things
'''
def __init__(self):
self.dataPath = "./data/pyzik.db"
self.connection = ""
self.memoryConnection = ""
self.createConnection()
def initDataBase(self):
self.createTableArtists()
self.createTableAlbums()
self.createTableMusicDirectories()
self.createTablePlayHistoryAlbum()
def initMemoryDB(self):
wProgress = progressWidget()
# Read database to tempfile
print('initMemoryDB')
tempfile = StringIO()
i=0
iterCount = 1000
for line in self.connection.iterdump():
tempfile.write('%s\n' % line)
iProgress = round((i/iterCount)*100)
wProgress.setValue(iProgress)
i+=1
self.connection.close()
tempfile.seek(0)
# Create a database in memory and import from tempfile
self.memoryConnection = sqlite3.connect(":memory:")
self.memoryConnection.cursor().executescript(tempfile.read())
self.memoryConnection.commit()
self.memoryConnection.row_factory = sqlite3.Row
self.connection = self.memoryConnection
wProgress.close()
def saveMemoryToDisc(self):
copyfile(self.dataPath,self.dataPath+'k')
os.remove(self.dataPath)
self.createConnection()
with self.connection:
for line in self.memoryConnection.iterdump():
if line not in ('BEGIN;', 'COMMIT;'): # let python handle the transactions
self.connection.execute(line)
self.connection.commit()
def createConnection(self):
""" create a database connection to the SQLite database
specified by self.dataPath
:return: Connection object or None
"""
print('createConnection')
dirPath, db_file = os.path.split(self.dataPath)
if not os.path.exists(dirPath):
os.makedirs(dirPath)
try:
self.connection = sqlite3.connect(self.dataPath)
return self.connection
except sqlite3.Error as e:
print(e)
return None
def execSQLWithoutResult(self, sql):
try:
c = self.connection.cursor()
c.execute(sql)
except sqlite3.Error as e:
print(e)
def dropTable(self, table_name):
""" drop the table called table_name
"""
self.execSQLWithoutResult("DROP TABLE "+table_name)
def dropAllTables(self):
self.dropTable("artists")
self.dropTable("albums")
#self.dropTable("musicDirectories")
def insertLine(self, insert_sql):
""" insert a line from the insert_sql statement """
try:
c = self.connection.cursor()
c.execute(insert_sql)
self.connection.commit()
return c.lastrowid
except sqlite3.Error as e:
print(e)
return -1
def createTableArtists(self):
sqlCreateTableArtist = """ CREATE TABLE IF NOT EXISTS artists (
artistID integer PRIMARY KEY,
name text NOT NULL,
countryID integer,
categoryID integer
); """
self.execSQLWithoutResult(sqlCreateTableArtist)
def createTableAlbums(self):
sqlCreateTableAlbum = """ CREATE TABLE IF NOT EXISTS albums (
albumID integer PRIMARY KEY,
title text NOT NULL,
dirName text,
dirPath text,
musicDirectoryID integer,
artistID integer,
year integer,
cover text,
FOREIGN KEY (artistID) REFERENCES artists(artistID),
FOREIGN KEY (musicDirectoryID) REFERENCES musicDirectories(musicDirectoryID)
); """
self.execSQLWithoutResult(sqlCreateTableAlbum)
def createTableMusicDirectories(self):
sqlCreateTableMusicDirectories = """ CREATE TABLE IF NOT EXISTS musicDirectories (
musicDirectoryID integer PRIMARY KEY,
dirPath text NOT NULL,
dirName text,
styleID integer
); """
self.execSQLWithoutResult(sqlCreateTableMusicDirectories)
if not self.columnExistsInTable("musicDirectories","dirType"):
sqlAddcolumnDirType = """ ALTER TABLE musicDirectories ADD COLUMN dirType integer default 0 """
self.execSQLWithoutResult(sqlAddcolumnDirType)
def createTablePlayHistoryAlbum(self):
sqlCreateTablePlayHistoryAlbum = """ CREATE TABLE IF NOT EXISTS playHistoryAlbum (
HistoryAlbumID integer PRIMARY KEY,
albumID integer,
PlayDate datetime,
FOREIGN KEY (albumID) REFERENCES albums(albumID)
); """
self.execSQLWithoutResult(sqlCreateTablePlayHistoryAlbum)
def getSelect(self,select_sql,params=None):
c = self.connection.cursor()
if params is None:
c.execute(select_sql)
else:
c.execute(select_sql,params)
rows = c.fetchall()
return rows
def columnExistsInTable(self,table,column):
sqlExists = "PRAGMA table_info("+table+");"
columns = self.getSelect(sqlExists)
for col in columns:
if column == col[1] : return True
return False
def insertAlbum(self,album):
try:
c = self.connection.cursor()
sqlInsertAlbum = """ INSERT INTO albums (title, artistID,dirPath,year,musicDirectoryID)
VALUES (?,?,?,?,?);
"""
c.execute(sqlInsertAlbum,(album.title,album.artistID,album.dirPath,album.year,album.musicDirectoryID))
self.connection.commit()
album.albumID = c.lastrowid
except sqlite3.Error as e:
print(e)
return album.albumID
def insertArtist(self,artist):
try:
c = self.connection.cursor()
sqlInsertArtist = """ INSERT INTO artists (name)
VALUES (?);
"""
c.execute(sqlInsertArtist,(artist.name,))
self.connection.commit()
artist.artistID = c.lastrowid
except sqlite3.Error as e:
print("InsertArtist error="+str(e))
return artist.artistID
|
import cv2
img=cv2.imread("/home/anshul/Desktop/obama.jpg")
#resized=cv2.resize(img, (600,600))
resized=cv2.resize(img,(int(img.shape[1]*5),int(img.shape[0]*2)))
cv2.imshow("legend",resized)
cv2.waitKey(2000)
cv2.destroyAllWindows()
|
import liblo, sys
class Manta(object):
def __init__(self, receive_port=8000, send_port=8001, send_address='127.0.0.1'):
self.osc_server = liblo.Server(receive_port)
self.osc_target = liblo.Address(send_port)
def send_osc(self, path, *args):
#liblo.send(self.osc_target, *args)
liblo.send(self.osc_target, "/foo/message1", 123)
def main():
manta = Manta()
manta.send_osc('/testing/testing')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
def inventory_mikrotik_system_health(info):
if info:
return [('', None)]
else:
return None
def check_mikrotik_system_health(item, _no_params, info):
for voltage, temp, cputemp, powercons, current, cpufreq in info:
perfdata = []
summary = ''
if voltage:
perfdata.append(('voltage', (float(voltage) / 10)))
summary += 'Voltage: %s V, ' % (float(voltage) / 10)
if temp:
perfdata.append(('temp', (float(temp) / 10)))
summary += 'Temperature: %sC, ' % (float(temp) / 10)
if cputemp:
perfdata.append(('cputemp', (float(cputemp) / 10)))
summary += 'CPU Temperature: %sC, ' % (float(cputemp) / 10)
if powercons:
perfdata.append(('power-consumption', (float(powercons) / 10)))
summary += 'Power Consumption: %s W, ' % (float(powercons) / 10)
if current:
perfdata.append(('current', int(current)))
summary += 'Current: %s mA, ' % int(current)
if cpufreq:
summary += 'CPU Frequency: %s MHz' % cpufreq
if summary.endswith(', '):
summary = summary[0:-2]
return (0, summary, perfdata)
return (3, 'Invalid data', [])
check_info['mikrotik_system_health'] = {
'check_function': check_mikrotik_system_health,
'inventory_function': inventory_mikrotik_system_health,
'service_description': 'System health',
'group': 'mikrotik_system_health',
'has_perfdata': True
}
snmp_info['mikrotik_system_health'] = (
'.1.3.6.1.4.1.14988.1.1.3', [
'8.0', # voltage
'10.0', # temperature
'11.0', # cpu-temperature
'12.0', # power consumption
'13.0', # current
'14.0', # cpu frequency
]
)
snmp_scan_functions['mikrotik_system_health'] = \
lambda oid: oid('.1.3.6.1.4.1.14988.1.1.3.*') is not None
|
from .test_thoriumcorp_lab import suite
|
# Unordered Unique Items
numbers1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 9}
numbers2 = {1, 5, 9}
numbers3 = {1, 2, 3, 4, 5}
numbers1_bkp = numbers1.copy()
print(numbers1)
# Methods
# clear()
numbers3.clear() # Clear the sets
print(numbers3)
# difference()
result = numbers1.difference(numbers2) # Difference from set1 to set2
print(result) # {0, 2, 3, 4, 6, 7, 8}
# discard()
numbers1.discard(5) # Remove he particular element
print(f"Discarded Set -> {numbers1}") # Discarded Set -> {0, 1, 2, 3, 4, 6, 7, 8, 9}
# difference_update()
numbers1.difference_update(numbers2) # update the numbers1 set with difference
print(f"Difference Update -> {numbers1}") # {0, 2, 3, 4, 6, 7, 8}
# intersection()
print(numbers1_bkp)
print(numbers2)
print(f"Intersection -> {numbers1_bkp.intersection(numbers2)}") # common items => Intersection -> {1, 5, 9}
# isdisjoint() Should not have common items
print(numbers1_bkp.isdisjoint({11, 10, 11})) # True
print(numbers1_bkp.isdisjoint(numbers2)) # False
# Union() Combine all the elements b/w the sets
print(numbers1_bkp.union(numbers2)) # {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
print(numbers1_bkp.union({11, 12, 15})) # {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15}
# issubset()
print(numbers1_bkp.issubset(numbers2)) # False
print(numbers2.issubset(numbers1_bkp)) # True
# issuperset()
print(numbers1_bkp.issuperset(numbers2)) # True
print(numbers2.issuperset(numbers1_bkp)) # False
|
from flask import Flask, render_template, request
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
import re
import string
app = Flask(__name__)
@app.route("/", methods=["POST", "GET"])
def index ():
if request.form:
if "tweet_input" in request.form:
tweet_input = request.form["tweet_input"]
r = re.findall("@[\w]*", tweet_input)
for i in r:
tweet_input = re.sub(i, "", tweet_input)
tweet_input = tweet_input.replace("\n", "")
exclude = set(string.punctuation)
tweet_input = "".join(char for char in tweet_input if char not in exclude)
# Load trained model
# 0 = negative 4 = positive
pipeline = joblib.load("./SPLASH/depression_model.pkl")
tweet_input = [(tweet_input)]
output = pipeline.predict(tweet_input)
if output == 0:
print("Positive result")
else:
print("Negative result")
return render_template("index.html", tweet_input=tweet_input[0], output=output[0])
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True)
|
from __future__ import division
import iotbx.pdb
import sys
import os
import itertools
def get_prev_rsd_flip_occs(rg):
co_angles = []
o_occs = {}
altlocs = set()
for ag in rg.atom_groups():
altlocs.add(ag.altloc)
for altloc_pair in itertools.combinations(altlocs, 2):
if " " in altloc_pair or "" in altloc_pair:
continue
c1_atom = None
o1_atom = None
c2_atom = None
o2_atom = None
for ag in rg.atom_groups():
if ag.altloc == altloc_pair[0]:
for atom in ag.atoms():
if atom.name == " C ":
c1_atom = atom
elif atom.name == " O ":
o1_atom = atom
o_occs[ag.altloc] = atom.occ
for ag in rg.atom_groups():
if ag.altloc == altloc_pair[1]:
for atom in ag.atoms():
if atom.name == " C ":
c2_atom = atom
elif atom.name == " O ":
o2_atom = atom
o_occs[ag.altloc] = atom.occ
if not (c1_atom and o1_atom and c2_atom and o2_atom):
return None
# Translate second C=O so carbons are aligned
new_o2_xyz = []
for i in range(3):
diff = c2_atom.xyz[i] - c1_atom.xyz[i]
new_o2_xyz.append(o2_atom.xyz[i] - diff)
new_o2_atom = o2_atom.detached_copy()
new_o2_atom.xyz = new_o2_xyz
co_angle = c1_atom.angle(atom_1=o1_atom, atom_3=new_o2_atom, deg=True)
# ^ c1_atom is "atom_2", so to speak, in the angle
co_angles.append(co_angle)
if len(co_angles) >= 1 and max(co_angles) > 90:
return o_occs
return None
if __name__ == "__main__":
if len(sys.argv) != 2:
print >> sys.stderr, \
'Usage: python %s in.pdb' % os.path.basename(__file__)
sys.exit(1)
file_name = sys.argv[1]
pdb_obj = iotbx.pdb.hierarchy.input(file_name=file_name)
for model in pdb_obj.hierarchy.models():
for chain in model.chains():
prev_rsd_flip_occs = None
for rg in chain.residue_groups():
if prev_rsd_flip_occs:
# See if we need to split the N
N_altlocs = set()
#N_atom = None
for ag in rg.atom_groups():
for atom in ag.atoms():
if atom.name.strip() == "N":
N_altlocs.add(ag.altloc)
#N_atom = atom
#if len(N_altlocs) == 1:
if len(N_altlocs) != len(prev_rsd_flip_occs):
# We *DO* need to split the N
print chain.id, rg.resseq, rg.atom_groups()[0].resname, \
"needs to be split to fix the i to i-1 peptide geometry"
new_ags_to_append = []
for altloc in prev_rsd_flip_occs:
print "altloc", altloc, prev_rsd_flip_occs[altloc]
matching_ag = None
matching_N_atom = None
for ag in rg.atom_groups():
if ag.altloc == altloc:
matching_ag = ag
for atom in ag.atoms():
if atom.name.strip() == "N":
matching_N_atom = atom
# OPTION 1: atom_group exists and already has N
if matching_ag and matching_N_atom:
#print 'OPTION 1'
matching_N_atom.occ = prev_rsd_flip_occs[altloc]
# This ^ makes the N's occ match that of the preceding CO,
# but its occ could now not match its own CA's occ!
# OPTION 2: atom_group exists, but N does not
# (e.g. atom_group is only for sidechain)
elif matching_ag and (not matching_N_atom):
#print 'OPTION 2'
new_ag = matching_ag.detached_copy()
# Possible TODO: change coordinates?
new_ag.occupancy = prev_rsd_flip_occs[altloc]
#print new_ag.occupancy
for atom in new_ag.atoms():
if atom.name.strip() != "N":
new_ag.remove_atom(atom)
else:
atom.occ = prev_rsd_flip_occs[altloc]
rg.merge_atom_groups(matching_ag, new_ag)
# OPTION 3: atom_group does not exist at all
else:
#print 'OPTION 3'
new_ag = rg.atom_groups()[0].detached_copy()
# Possible TODO: pick altloc that is closest / has best geometry
# to use for template atom_group, instead of just default index 0?
# Possible TODO: change coordinates?
new_ag.altloc = altloc
new_ag.occupancy = prev_rsd_flip_occs[altloc]
#print new_ag.occupancy
for atom in new_ag.atoms():
if atom.name.strip() != "N":
new_ag.remove_atom(atom)
else:
atom.occ = prev_rsd_flip_occs[altloc]
new_ags_to_append.append(new_ag)
for new_ag in new_ags_to_append:
rg.append_atom_group(new_ag)
#rg.atom_groups()[0].remove_atom(N_atom)
# Regardless of what we did for this residue,
# plan ahead for the next residue
prev_rsd_flip_occs = get_prev_rsd_flip_occs(rg)
if prev_rsd_flip_occs:
print chain.id, rg.resseq, rg.atom_groups()[0].resname, \
"has a peptide flip (CO rotates by >90 degrees)"
suffix = "_fixedPepFlipGeom"
output_pdb = os.path.basename(file_name).split(".pdb")[0]+suffix+".pdb"
pdb_obj.hierarchy.write_pdb_file(file_name=output_pdb,
crystal_symmetry=pdb_obj.input.crystal_symmetry(), append_end=True)
print "Wrote", output_pdb
|
class Queue:
def __init__(self):
self.queue = []
def enqueue(self,value):
self.queue.append(value)
def dequeue(self):
self.queue.pop()
def display(self):
print(self.queue)
a = Queue()
a.enqueue(5)
a.enqueue(10)
a.enqueue(15)
a.display()
a.dequeue()
a.display()
|
# -*- coding: utf-8 -*-
from datetime import datetime
try:
# noinspection PyUnresolvedReferences
from django.core.exceptions import ImproperlyConfigured
# noinspection PyUnresolvedReferences
from django.utils import timezone
def now():
try:
return timezone.now()
except ImproperlyConfigured:
return datetime.now()
def fromtimestamp(timestamp):
try:
tzinfo = timezone.get_current_timezone()
except ImproperlyConfigured:
tzinfo = None
return datetime.fromtimestamp(timestamp, tz=tzinfo)
def astimezone(time):
try:
tzinfo = timezone.get_current_timezone()
except ImproperlyConfigured:
tzinfo = None
return time.astimezone(tzinfo)
except ImportError:
def now():
return datetime.now()
def fromtimestamp(timestamp):
return datetime.fromtimestamp(timestamp)
def astimezone(time):
return time.astimezone()
|
# -*- coding: utf-8 -*-
#############################################################################
# Copyright Vlad Popovici <popovici@bioxlab.org>
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
__author__ = "Vlad Popovici <popovici@bioxlab.org>"
__version__ = 0.1
"""QPATH.CORE: Core classes and functions.
Defines exception classes and other basic classes.
"""
__all__ = ['WSIInfo', 'NumpyImage', 'MRIBase', 'MRI', 'MRIExplorer', 'MRISlidingWindow', 'TiledImage']
import numpy as np
from math import ceil, floor, pow, log10, log2
import openslide as osl
from abc import ABC, abstractmethod
import simplejson as json
from skimage.io import imread, imsave
import os
import os.path
import shutil
from ._lowlevel._io import osl_read_region_
from .annot.core import Polygon
from .masks import add_region, apply_mask
from . import Error
#####
class WSIInfo(object):
"""Hold some basic info about a WSI.
Args:
path (str): full path to WSI file
Attributes:
path (str): full path to WSI file
info (dict): a dictionary containing WSI properties
"""
path = None
info = {}
def __init__(self, path):
self.path = path
with osl.OpenSlide(path) as wsi:
self.info = {'vendor' : wsi.properties['openslide.vendor'],
'x_mpp' : float(wsi.properties['openslide.mpp-x']),
'y_mpp' : float(wsi.properties['openslide.mpp-y']),
'objective' : float(wsi.properties['openslide.objective-power']),
'x_offset' : 0,
'y_offset' : 0,
'level_count': wsi.level_count}
# fill in level data:
lv = dict()
for k in range(wsi.level_count):
lv[k] = {'x_size': np.long(wsi.level_dimensions[k][0]),
'y_size': np.long(wsi.level_dimensions[k][1]),
'downsample_factor': float(wsi.level_downsamples[k])}
if 'openslide.level['+str(k)+'].tile-width' in wsi.properties:
lv[k]['tile_x_size'] = np.long(wsi.properties['openslide.level['+str(k)+'].tile-width']),
lv[k]['tile_y_size'] = np.long(wsi.properties['openslide.level['+str(k)+'].tile-height'])
self.info['levels'] = lv
if wsi.properties['openslide.vendor'] == 'hamamatsu':
self.info['x_offset'] = np.long(wsi.properties['hamamatsu.XOffsetFromSlideCentre'])
self.info['y_offset'] = np.long(wsi.properties['hamamatsu.YOffsetFromSlideCentre'])
def get_level_for_magnification(self, mag: float, eps=1e-6) -> int:
"""Returns the level in the image pyramid that corresponds the given magnification.
Args:
mag (float): magnification
eps (float): accepted error when approximating the level
Returns:
level (int) or -1 if no suitable level was found
"""
if mag > self.info['objective'] or mag < 2.0**(1-self.info['level_count']) * self.info['objective']:
return -1
lx = log2(self.info['objective'] / mag)
k = np.where(np.isclose(lx, range(0, self.info['level_count']), atol=eps))[0]
if len(k) > 0:
return k[0] # first index matching
else:
return -1 # no match close enough
def get_magnification_for_level(self, level: int) -> float:
"""Returns the magnification (objective power) for a given level.
Args:
level (int): level in the pyramidal image
Returns:
magnification (float)
If the level is out of bounds, returns -1.0
"""
if level < 0 or level >= self.info['level_count']:
return -1.0
if level == 0:
return self.info['objective']
return 2.0**(-level) * self.info['objective']
#####
class NumpyImage:
"""This is barely a namespace for collecting a number of useful
functions that are applied to images stored as Numpy arrays.
Usually, such an image -either single channel or 3(4) channels -
is stored as a H x W (x C) array, with H (height) rows and W (width)
columns. C=3 or 4.
"""
@staticmethod
def width(img):
img: np.ndarray
return img.shape[1]
@staticmethod
def height(img):
img: np.ndarray
return img.shape[0]
@staticmethod
def nchannels(img):
img: np.ndarray
if img.ndim > 2:
return img.shape[2]
else:
return 1
@staticmethod
def is_empty(img, empty_level: float=0) -> bool:
"""Is the image empty?
Args:
img (numpy.ndarray): image
empty_level (int/numeric): if the sum of pixels is at most this
value, the image is considered empty.
Returns:
bool
"""
return img.sum() <= empty_level
#####
class MRIBase(ABC):
"""Base class for MultiResolutionImages presenting a uniform interface for retrieving
pixels and regions. Note that changes in image data are not propagated back to the original
image. This is an abstract class.
Args:
wsi_info (WSIInfo): an info object for a whole slide image
Attributes:
info (WSIInfo)
"""
def __init__(self, wsi_info):
if not isinstance(wsi_info, WSIInfo):
raise Error("Only WSIInfo instances are accepted")
self._wsi_info = wsi_info
@property
def info(self):
return self._wsi_info.info
@property
def path(self):
return self._wsi_info.path
@property
def widths(self):
return [self.info['levels'][l]['x_size'] for l in range(self.nlevels)]
@property
def heights(self):
return [self.info['levels'][l]['y_size'] for l in range(self.nlevels)]
@property
def nlevels(self):
return self.info['level_count']
def between_level_scaling_factor(self, from_level, to_level):
"""Return the scaling factor for converting coordinates (magnification)
between two levels in the MRI.
Args:
from_level (int): original level
to_level (int): destination level
Returns:
float
"""
f = self.info['levels'][from_level]['downsample_factor'] / \
self.info['levels'][to_level]['downsample_factor']
return f
def convert_px(self, point, from_level, to_level):
"""Convert pixel coordinates of a point from <from_level> to
<to_level>
Args:
point (tuple): (x,y) coordinates in <from_level> plane
from_level (int): original image level
to_level (int): destination level
Returns:
x, y (float): new coodinates - no rounding is applied
"""
if from_level == to_level:
return point # no conversion is necessary
x, y = point
f = self.between_level_scaling_factor(from_level, to_level)
x *= f
y *= f
return x, y
@abstractmethod
def get_region_px(self, x0, y0, width, height, level, as_type=np.uint8):
"""Read a region from the image source. The region is specified in
pixel coordinates.
Args:
x0, y0 (long): top left corner of the region (in pixels, at the specified
level)
width, height (long): width and height (in pixels) of the region
level (int): the magnification level to read from
as_type: type of the pixels (default numpy.uint8)
Returns:
a numpy.ndarray
"""
pass
@abstractmethod
def get_polygonal_region_px(self, contour, boder=0, as_type=np.uint8):
"""Returns a rectangular view of the image source that minimally covers a closed
contour (polygon). All pixels outside the contour are set to 0.
Args:
contour (qpath.annot.core.Polygon): a closed polygonal line given in
terms of its vertices. The contour's coordinates are supposed to be
precomputed and to be represented in pixel units at the desired
magnification (level) - which is taken from the contour object.
border (int): if > 0, take this many extra pixels in the rectangular
region (up to the limits on the image size)
as_type: pixel type for the returned image (array)
Returns:
a numpy.ndarray
"""
pass
@abstractmethod
def get_region(self, x0, y0, width, height, level, as_type=np.uint8):
"""Read a region from the image source. The region is specified in
slide coordinates.
Args:
x0, y0 (long): top left corner of the region (in slide units)
width, height (long): width and height (in slide units) of the region
level (int): the magnification level to read from
as_type: type of the pixels (default numpy.uint8)
Returns:
a numpy.ndarray (OpenCV channel ordering: (A)BGR)
"""
pass
#####
class MRI(MRIBase):
"""A multi-resolution image backed by OpenSlide.
Args:
wsi_info (WSIInfo): info about the slide
Attributes:
see MRIBase
"""
_reader = None
def __init__(self, wsi_info: WSIInfo):
if not isinstance(wsi_info, WSIInfo):
raise Error("Only WSIInfo instances are accepted")
self._wsi_info = wsi_info
self._reader = osl.OpenSlide(self.path)
def get_region_px(self, x0, y0, width, height, level, as_type=np.uint8, keep_alpha=False):
"""Read a region from the image source. The region is specified in
pixel coordinates.
Args:
x0, y0 (long): top left corner of the region (in pixels, at the specified
level)
width, height (long): width and height (in pixels) of the region
level (int): the magnification level to read from
as_type: type of the pixels (default numpy.uint8)
keep_alpha (bool): if True, returns the alpha-channel as well
Returns:
a numpy.ndarray
"""
if level < 0 or level >= self.nlevels:
raise Error("requested level does not exist")
# check bounds:
if x0 >= self.widths[level] or y0 >= self.heights[level] or \
x0 + width > self.widths[level] or \
y0 + height > self.heights[level]:
raise Error("region out of layer's extent")
x0, y0 = self.convert_px((x0, y0), from_level=level, to_level=0)
x0, y0, width, height = [np.long(_x) for _x in [x0, y0, width, height]]
img = np.zeros((height, width, 4), dtype=np.uint8)
r = osl_read_region_(self.path,
img,
x0, y0,
width, height,
level)
if r != 0:
raise Error("low-level error in osl_read_region", code=r)
if keep_alpha:
return img[..., (2, 1, 0, 3)] # change BGRA into RGBA
else:
return img[..., (2, 1, 0)] # change BGRA into RGB
def get_polygonal_region_px(self, contour: Polygon, border: int=0, as_type=np.uint8) -> np.ndarray:
"""Returns a rectangular view of the image source that minimally covers a closed
contour (polygon). All pixels outside the contour are set to 0.
Args:
contour (qpath.annot.core.Polygon): a closed polygonal line given in
terms of its vertices. The contour's coordinates are supposed to be
precomputed and to be represented in pixel units at the desired
magnification (level) - which is taken from the contour object.
border (int): if > 0, take this many extra pixels in the rectangular
region (up to the limits on the image size)
as_type: pixel type for the returned image (array)
Returns:
a numpy.ndarray
"""
if not isinstance(contour, Polygon):
raise Error("expecting a Polygon for <contour>")
lv = self._wsi_info.get_level_for_magnification(contour.coordinate_system_magnification)
if lv == -1:
raise Error("Contour magnification does not match any available level.")
x0, y0 = np.min(contour.xy, axis=0)
x1, y1 = np.max(contour.xy, axis=0)
x0, y0 = max(0, x0-border), max(0, y0-border)
x1, y1 = min(x1+border,self._wsi_info.info['levels'][lv]['x_size']), \
min(y1+border,self._wsi_info.info['levels'][lv]['y_size'])
# Shift the annotation such that (0,0) will correspond to (x0, y0)
contour.translate(-x0, -y0)
# Read the corresponding region
img = self.get_region_px(x0, y0, x1-x0, y1-y0, lv, keep_alpha=False, as_type=np.uint8)
# Prepare mask
mask = np.zeros(img.shape[:2], dtype=np.uint8)
add_region(mask, contour.xy)
# Apply mask
img = apply_mask(img, mask)
return img
def get_region(self, x0, y0, width, height, level, as_type=np.uint8):
raise Error("Not yet implemented")
#####
class MRIExplorer(ABC):
"""Defines an interface for multi-resolution image explorers. An image
explorer simply returns positions in an image rather than parts of the
image itself. Hence, it only needs to know about the extent of the image.
"""
@abstractmethod
def reset(self):
"""Reset the explore, next call to next() will start from the
initial conditions.
"""
pass
@abstractmethod
def last(self):
"""Go to last position and return it."""
pass
@abstractmethod
def next(self):
"""Go to next position."""
pass
@abstractmethod
def prev(self):
"""Go to previous position."""
pass
@abstractmethod
def here(self):
"""Returns current position, does not change it."""
pass
@abstractmethod
def total_steps(self):
"""Returns the total number of steps to iterate over all positions
in the image, according to the specific schedule.
"""
pass
def __iter__(self):
return self
def __next__(self):
return self.next()
def __prev__(self):
return self.prev()
#####
class MRISlidingWindow(MRIExplorer):
"""A sliding window image explorer. It returns successively the coordinates
of the sliding window as a tuple (x0, y0, x1, y1).
Args:
image_shape : tuple (nrows, ncols)
Image shape (img.shape).
w_size : tuple (width, height)
Window size as a pair of width and height values.
start : tuple (x0, y0)
Top left corner of the first window. Defaults to (0,0).
step : tuple (x_step, y_step)
Step size for the sliding window, as a pair of horizontal
and vertical steps. Defaults to (1,1).
"""
def __init__(self, image_shape, w_size, start=(0,0), step=(1,1)):
self._image_shape = image_shape
self._w_size = w_size
self._start = start
self._step = step
self._k = 0
img_h, img_w = image_shape
if w_size[0] < 2 or w_size[1] < 2:
raise ValueError('Window size too small.')
if img_w < start[0] + w_size[0] or img_h < start[1] + w_size[1]:
raise ValueError('Start position and/or window size out of image.')
x, y = np.meshgrid(np.arange(start[0], img_w - w_size[0] + 1, step[0]),
np.arange(start[1], img_h - w_size[1] + 1, step[1]))
self._top_left_corners = zip(x.reshape((-1,)).tolist(),
y.reshape((-1,)).tolist())
def total_steps(self):
return len(self._top_left_corners)
def reset(self):
self._k = 0
def here(self):
if 0 <= self._k < self.total_steps():
x0, y0 = self._top_left_corners[self._k]
x1 = min(x0 + self._w_size[0], self._image_shape[1])
y1 = min(y0 + self._w_size[1], self._image_shape[0])
return x0, y0, x1, y1
raise Error("Position outside bounds")
def last(self):
if self.total_steps() > 0:
self._k = self.total_steps() - 1
x0, y0, x1, y1 = self.here()
return x0, y0, x1, y1
else:
raise Error("Empty iterator")
def next(self):
if self._k < self.total_steps():
x0, y0, x1, y1 = self.here()
self._k += 1
return x0, y0, x1, y1
else:
raise StopIteration()
def prev(self):
if self._k >= 1:
self._k -= 1
x0, y0, x1, y1 = self.here()
return x0, y0, x1, y1
else:
raise StopIteration()
#####
class TiledImage(object):
"""A tiled image, loading regions on demand.
"""
_meta_file = None
_meta = None
_root_folder = None
def __init__(self, meta_file: str):
"""Initializes a TiledImage by reading the configuration from a file.
Args:
meta_file (str): the file with meta-info for the image. This should be
located at the root of the folder hierarchy.
"""
self._root_folder, self._meta_file = os.path.split(meta_file)
with open(meta_file, 'r') as fp:
self._meta = json.load(fp)
@property
def root_folder(self):
return self._root_folder
@property
def height(self):
return np.long(self._meta['level_image_height'])
@property
def width(self):
return np.long(self._meta['level_image_width'])
@property
def level(self):
return int(self._meta['level'])
@property
def tile_count_horizontal(self):
return int(self._meta['n_tiles_horiz'])
@property
def tile_count_vertical(self):
return int(self._meta['n_tiles_vert'])
@property
def tile_count(self):
return self.tile_count_horizontal * self.tile_count_vertical
@property
def tile_width(self):
return int(self._meta['tile_width'])
@property
def tile_height(self):
return int(self._meta['tile_height'])
@property
def nchannels(self):
return int(self._meta['n_channels'])
def get_tile(self, i, j, skip_empty=True):
"""Return the (i,j)-th tile.
Args:
i, j (int): tile coordinates
skip_empty (bool): if true and the file corresponding to the
tile (i,j) does not exist, assume the tile is empty and
return a 0-filled array.
Returns:
numpy.ndarray
"""
tile_id = 'tile_' + str(i) + '_' + str(j)
if skip_empty and not os.path.exists(self._meta['tile_' + str(i) + '_' + str(j)]['name']):
img = np.zeros((self._meta[tile_id]['height'], self._meta[tile_id]['width'],
self._meta[tile_id]['n_channels']), dtype=np.uitn8)
else:
img = imread(self.root_folder + os.path.pathsep + 'level_{:d}'.format(self._meta['level']) + \
os.path.sep + self._meta[tile_id]['name'])
return img
def get_image(self):
"""Return the whole image, by loading all the tils."""
return TiledImage.load_tiled_image(self._meta)
def get_tile_coverage(self, x, y, width, height):
"""Return the indices (i,j) of the tiles covering a given
rectangular region.
Args:
x, y (long): top-left corner coordinates (column, row)
width, height (long): region extent
Returns:
list of pairs: [(i,j), ...] corresponding to tiles_i_j covering
the region
"""
x, y, width, height = [np.long(_z) for _z in [x, y, width, height]]
if not (0 <= x < self.width):
raise Error('x out of bounds')
if not (0 <= y < self.height):
raise Error('y out of bounds')
if x + width > self.width or y + height > self.height:
raise Error('region too large for the image')
# Find the tiles covering the requested reqion
start_i = np.int(np.floor(y / self.tile_height))
start_j = np.int(np.floor(x / self.tile_width))
end_i = np.int(np.floor((y + height) / self.tile_height) + \
(1 if (y + height) % self.tile_height != 0 else 0))
end_j = np.int(np.floor((x + width) / self.tile_width) + \
(1 if (x + width) % self.tile_width != 0 else 0))
ij = [(i, j) for i in np.arange(start_i, end_i) for j in np.arange(start_j, end_j)]
return ij
def get_region_px(self, x, y, width, height):
"""Return an arbitrary region within a tiled image.
Args:
x, y (long): top-left corner coordinates (column, row)
width, height (long): region extent
Returns:
numpy.ndarray
"""
x, y, width, height = [np.long(_z) for _z in [x, y, width, height]]
if not (0 <= x < self.width):
raise Error('x out of bounds')
if not (0 <= y < self.height):
raise Error('y out of bounds')
if x + width > self.width or y + height > self.height:
raise Error('region too large for the image')
# Algo:
# -find the tiles to load
# -load all the tiles
# -adjust, if needed, the starting and ending points of the
# region
# This is not optimal from a memory usage perspective, but
# it's simpler.
# Find the tiles covering the requested reqion
start_i = np.int(np.floor(y / self.tile_height))
start_j = np.int(np.floor(x / self.tile_width))
end_i = np.int(np.floor((y + height) / self.tile_height) + \
(1 if (y + height) % self.tile_height != 0 else 0))
end_j = np.int(np.floor((x + width) / self.tile_width) + \
(1 if (x + width) % self.tile_width != 0 else 0))
# Load the tiles start_i:end_i, start_j:end_j
tile = self.get_tile(start_i, start_j)
nchannels = 1 if tile.ndim == 2 else 3
if nchannels == 1:
img = np.zeros((self.tile_height * (end_i - start_i),
self.tile_width * (end_j - start_j)), dtype=np.uint8)
else:
img = np.zeros((self.tile_height * (end_i - start_i),
self.tile_width * (end_j - start_j),
tile.shape[2]), dtype=np.uint8)
if nchannels == 1:
for i in range(start_i, end_i):
for j in range(start_i, end_i):
tile = self.get_tile(i, j)
# last tile in row and last row of tiles might have non-standard
# dimensions, so better use the actual tile shape in computing the
# end point:
img[(i-start_i)*self.tile_height:(i-start_i)*self.tile_height + tile.shape[0],
(j-start_j)*self.tile_width:(j-start_j)*self.tile_width + tile.shape[1]] = tile
else:
for i in range(start_i, end_i):
for j in range(start_j, end_j):
tile = self.get_tile(i, j)
# last tile in row and last row of tiles might have non-standard
# dimensions, so better use the actual tile shape in computing the
# end point:
img[(i-start_i)*self.tile_height:(i-start_i)*self.tile_height + tile.shape[0],
(j-start_j)*self.tile_width:(j-start_j)*self.tile_width + tile.shape[1], :] = tile
# Adjust image to the requested region:
if nchannels == 1:
res = img[y - start_i*self.tile_height : y + height - start_i*self.tile_height,
x - start_j * self.tile_width : x + width - start_j * self.tile_width].copy()
else:
res = img[y - start_i*self.tile_height : y + height - start_i*self.tile_height,
x - start_j * self.tile_width : x + width - start_j * self.tile_width, :].copy()
return res
def load_tiled_image(self):
"""Load a tiled image. All the information about the tile geometry and tile paths is
supposed to be already stored in the object itself.
The meta info contains:
level_image_width
level_image_height
level_image_nchannels
n_tiles_horiz
n_tiles_vert
and for each tile, an entry as
'tile_i_j' which is a dict with keys:
i
j
name
x
y
Canonical usage:
img = TiledImage("path/to/meta_data_file.json").load_tiled_image()
Returns:
a numpy.ndarray
"""
img_w, img_h = self.width, self.height
nh, nv = self.tile_count_horizontal, self.tile_count_vertical
img = np.zeros((img_h, img_w, self.n_channels), dtype=np.uint8)
for i in range(nv):
for j in range(nh):
tile_id = 'tile_'+str(i)+'_'+str(j)
if self._meta[tile_id]['is_empty'] == 'True':
img[x:x + self._meta[tile_id]['width'], y:y + self._meta[tile_id]['height'], ...] = 0
else:
# not empty, need to read the tile
tile = imread(self.root_path + os.path.pathsep + \
'level_{:d}'.format(self._meta['level']) + os.path.sep + \
self._meta[tile_id]['name']).astype(np.uint8)
# the tile might not have the regular default shape, so it's better to use the
# tile's shape than 'tile_width' and 'tile_height'
x, y = np.long(self._meta[tile_id]['x']), np.long(self._meta[tile_id]['y'])
img[x:x+tile.width, y:y+tile.height, :] = tile
return img
@staticmethod
def save_tiled_image(img: np.ndarray, root: str, level: int, tile_geom: tuple, img_type: str="jpeg",
skip_empty: bool=True, empty_level: float=0):
"""Save an image as a collection of tiles. This is a static method since the required meta information
about the hierarchy is not yet know (is computed here) and, hence, the object could not have been
initialized.
The image is split into a set of fixed-sized (with the exception of right-most and
bottom-most) tiles.
*WARNING*: any existing tiles in the path root/level will be deleted!
Args:
img (numpy array): an image (RGB)
root (string): root folder of the image storing hierarchy. The tiles will be
stored into root/level_xx folder
level (int): the magnification level
tile_geom (tuple): (width, height) of the tile
img_type (string, optional): file type for the tiles
skip_empty (bool): if true, do not save images for empty tiles (those for which
the sum of pixels (intensity or color) is <= empty_level).
empty_level (int): if the sum of pixels is at most this value, the image/tile is
considered empty
Returns:
a TiledImage object
"""
assert(img.ndim == 2 or (img.ndim == 3 and img.shape[2] <= 3))
n_channels = NumpyImage.nchannels(img)
tg = (min(tile_geom[0], img.shape[1]), min(tile_geom[1], img.shape[0]))
nh = int(floor(img.shape[1] / tg[0])) + (1 if img.shape[1] % tg[0] != 0 else 0)
nv = int(floor(img.shape[0] / tg[1])) + (1 if img.shape[0] % tg[1] != 0 else 0)
tile_meta = dict({'level': level,
'level_image_width': img.shape[1],
'level_image_height': img.shape[0],
'level_image_nchannels': 1 if img.ndim == 2 else img.shape[2],
'n_tiles_horiz': nh,
'n_tiles_vert': nv,
'tile_width': tg[0],
'tile_height': tg[1],
'n_channels': n_channels})
dst_path = root + os.path.sep + 'level_{:d}'.format(level)
if os.path.exists(dst_path):
shutil.rmtree(dst_path)
os.mkdir(dst_path)
for i in range(nv):
for j in range(nh):
i0, j0 = i * tg[1], j * tg[0]
i1, j1 = min((i + 1) * tg[1], img.shape[0]), min((j + 1) * tg[0], img.shape[1])
if n_channels == 1:
im_sub = img[i0:i1, j0:j1]
else:
im_sub = img[i0:i1, j0:j1, :]
tile_id = 'tile_' + str(i) + '_' + str(j)
tile_meta[tile_id] = dict(
{'name': tile_id + '.' + img_type,
'i': i, 'j': j,
'x': j0, 'y': i0,
'width': (j1-j0), 'height': (i1-i0)})
tile_meta[tile_id]['is_empty'] = str(NumpyImage.is_empty(im_sub, empty_level))
if skip_empty and NumpyImage.is_empty(im_sub, empty_level):
continue
# effectively save the tile
imsave(dst_path + os.path.sep + tile_meta[tile_id]['name'], im_sub, quality=100)
with open(dst_path + os.path.sep + 'meta.json', 'w') as fp:
json.dump(tile_meta, fp, separators=(',', ':'), indent=' ', sort_keys=True)
return TiledImage(meta_file=dst_path + os.path.sep + 'meta.json')
|
import Adafruit_DHT
import time
import datetime
# Example using a Raspberry Pi with DHT sensor
# connected to GPIO23.
pin = 4
sensor = Adafruit_DHT.DHT22
def get_date_time():
v = datetime.datetime.now()
my_date='{}/{}/{}'.format(v.month,v.day,v.year)
my_time = '{}:{}:{}'.format(v.hour,v.minute,v.second)
return my_date, my_time
def write_data_dht():
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
my_date,my_time= get_date_time()
with open("sample.txt", 'w') as file:
data="{},{},{},{}".format(my_date,my_time,temperature,humidity)
print(data)
file.write(data)
file.write("/n")
file.close()
else:
print('Failed to get reading. Try again!')
if __name__ == '__main__':
for x in range(1,4):
time.sleep(2)
write_data_dht()
|
import telebot
import datetime
bot = telebot.TeleBot('830999920:AAFyyAO5ZIJ7sYQFJGQA9QmF201KWnObHNc')
global_bots = 0
TIMES_WAKE_UP = 2
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
global global_bots
global TIMES_WAKE_UP
global_bots +=1
now = datetime.datetime.now()
bot.send_message(message.chat.id, str(now.hour))
if message.text == "/start" and global_bots == 1:
i = 0
while True:
now = datetime.datetime.now()
#bot.send_message(message.chat.id, str(now.hour))
i +=1
if int(now.hour) == 0 and int(now.minute) %2 == 1 and int(now.second) < 20:
print("OKAY")
for _ in range(TIMES_WAKE_UP):
bot.send_message(message.chat.id, "РОТА ПОДЪЕМ!")
if int(now.hour) == 0 and int(now.minute) %2 == 0 and int(now.second) < 20:
for _ in range(TIMES_WAKE_UP):
bot.send_message(message.chat.id, "РОТА ОТБОЙ!")
if __name__ == '__main__':
bot.polling(none_stop=True, interval=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.