text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def binaryTreePaths(self, root):
def _binaryTreePaths(root):
if root.left is None and root.right is None:
return [[str(root.val)]]
elif root.left is None:
result = []
for path in _binaryTreePaths(root.right):
path.append(str(root.val))
result.append(path)
return result
elif root.right is None:
result = []
for path in _binaryTreePaths(root.left):
path.append(str(root.val))
result.append(path)
return result
result = []
for path in _binaryTreePaths(root.left):
path.append(str(root.val))
result.append(path)
for path in _binaryTreePaths(root.right):
path.append(str(root.val))
result.append(path)
return result
if root is None:
return []
return ["->".join(reversed(path)) for path in _binaryTreePaths(root)]
if __name__ == "__main__":
solution = Solution()
t0_0 = TreeNode(1)
t0_1 = TreeNode(2)
t0_2 = TreeNode(3)
t0_3 = TreeNode(5)
t0_1.right = t0_3
t0_0.right = t0_2
t0_0.left = t0_1
assert ["1->2->5", "1->3"] == solution.binaryTreePaths(t0_0)
|
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
https://leetcode.com/problems/contains-duplicate-ii/
store the index in the dictionary with value as key.
check if the index previously saved is less than or equal k or not
"""
danger = dict()
if len(nums) <= 1 or len(set(nums)) == len(nums):
return False
for i in range(len(nums)):
if nums[i] in danger.keys() and i - danger[nums[i]] <= k:
return True
danger[nums[i]] = i
return False |
# encoding: utf-8
from src.light_gbm import light_gbm_predict
import pandas as pd
from src.config import FaqConfig
import logging.config
import logging
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
logging.config.fileConfig(fname='log.config', disable_existing_loggers=False)
def calc_score(faq_item_list):
jc_list = []
ed_list = []
bm_list = []
for item in faq_item_list:
jc_list.append(item.jaccard_similarity_score)
ed_list.append(item.edit_similarity_score)
bm_list.append(item.bm25_similarity_score)
ma = [jc_list, ed_list, bm_list]
predict_score = light_gbm_predict(pd.DataFrame(ma).T)
for i in range(len(faq_item_list)):
faq_item_list[i].score = predict_score[i]
return faq_item_list
def calc_score_with_abcnn(faq_item_list):
jc_list = []
ed_list = []
bm_list = []
abcnn_list = []
for item in faq_item_list:
jc_list.append(item.jaccard_similarity_score)
ed_list.append(item.edit_similarity_score)
bm_list.append(item.bm25_similarity_score)
abcnn_list.append(item.abcnn_similarity)
ma = [jc_list, ed_list, bm_list, abcnn_list]
predict_score = light_gbm_predict(pd.DataFrame(ma).T)
for i in range(len(faq_item_list)):
faq_item_list[i].score = predict_score[i]
return faq_item_list
# 默认按照score从大到小排序
def sort_by_score(faq_item_list, top_n=5, threshold=0.5):
logger = logging.getLogger('rank')
length = len(faq_item_list)
score_list = [0.0] * length
for i in range(length):
score_list[i] = faq_item_list[i].score
sorted_score = sorted(
enumerate(score_list),
key=lambda x: x[1],
reverse=True)
idx = [x[0] for x in sorted_score]
output_list = [{}] * length
for i in range(length):
output_list[i] = faq_item_list[idx[i]]
logger.info('rank SUCCESS !')
if length > top_n:
output_list = output_list[:top_n]
sifted_output_list = []
for question in output_list:
logger.debug('score_by_score'+str(question.question)+str(question.score))
if question.score > threshold:
sifted_output_list.append(question)
return output_list
def rank(input_list, faq_config: FaqConfig):
output_list = calc_score(input_list)
# output_list = calc_score_with_abcnn(input_list)
output_list = sort_by_score(output_list, top_n=faq_config.rank.top_n,
threshold=faq_config.rank.threshold)
return output_list
if __name__ == '__main__':
from src.utils import FAQItem, QueryItem, faq_items_to_list
from src.config import init_faq_config
f_c = init_faq_config('faq.config')
q_i = QueryItem()
q = []
for ii in range(10):
f_i = FAQItem(q_i)
f_i.score = ii / 10.0
q.append(f_i)
r = rank(q, f_c)
print(faq_items_to_list(r))
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import os
import sys
import time
import threading
import cv2
import numpy as np
import dlib
import json
import utils
win = dlib.image_window()
enrolled_faces = {}
# Semaphor for IDENTIFYING thread
IDENTIFYING = False
def identify(image):
# Get all faces
faces = utils.faces_from_image(image)
def find_match(face):
# Calculate face descriptor
descriptor = utils.face_recognizer.compute_face_descriptor(image, face)
face_vector = np.array(descriptor).astype(float)
# THIS is probably hazardous as ordering may not be always the same?
enroll_identifiers = np.array(list(enrolled_faces.keys()))
enroll_matrix = np.array(list(enrolled_faces.values()))
# Calculate differences between the face and all enrolled faces
differences = np.subtract(np.array(enroll_matrix), face_vector)
distances = np.linalg.norm(differences, axis=1)
# and pick the closest one
closest_index = np.argmin(distances)
return enroll_identifiers[closest_index], distances[closest_index], face
return map(find_match, faces)
def handle_frame(origFrame, cb):
global IDENTIFYING
try:
frame = cv2.resize(origFrame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
start = time.time()
identified_matches = identify(frame)
valid_matches = list(filter((lambda match: match[1] < 0.6), identified_matches))
cb(valid_matches)
sys.stdout.flush()
except Exception as e:
exc = e
print(e)
cb(None, 0, time.time() - start)
# print(e)
IDENTIFYING = False
def webcam():
global IDENTIFYING
video_capture = cv2.VideoCapture(0)
while True:
video_capture.grab()
if (not IDENTIFYING):
ret, frame = video_capture.retrieve()
if (not ret):
raise Exception('No frame received!')
IDENTIFYING = True
thread = threading.Thread(target=handle_frame, args=(frame, (lambda res: logger(res, frame))))
thread.daemon=True
thread.start()
# When everything is done, release the capture
video_capture.release()
def logger(matches, frame):
win.set_image(frame)
if len(matches) > 0:
win.clear_overlay()
for _, (_, _, face_vector) in enumerate(matches):
win.add_overlay(face_vector)
def match_to_json(match):
return { 'id': match[0], 'confidence': match[1] }
print(json.dumps(list(map(match_to_json, matches))))
def load_enrolled_faces():
global enrolled_faces
enrolled_faces = np.load('encodings.npy').item()
if not os.path.isfile('encodings.npy'):
utils.eprint("No encodings.npy file found! Create it by running create-face-encodings")
else:
load_enrolled_faces()
webcam()
|
def sum_fac(n):
s = set([])
for i in range(1, int(n**0.5)+1):
if n%i == 0:
s.add(i)
s.add(n//i)
return sum(list(s))
def seive(n):
z = [False]*(n+1)
for i in range(n+1):
if sum_fac(i) > 2*i:
z[i] = True
return z
z = seive(20161)
for _ in range(int(input())):
n = int(input())
if n > 20161:
print('YES')
else:
flag = False
for i in range(1, n):
if z[i] and z[n-i]:
print('YES')
flag = True
break
if not flag:
print('NO') |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import math
import scipy.integrate as integrate
import csv
from pathControl import ProportionalControl as pControl
def calculatePolarError(states,referencePosition):
errorPosition = referencePosition - states[0:2]
referenceAngle = math.atan2(errorPosition[1],errorPosition[0])
errorAngle = referenceAngle - states[2]
magnitude = math.sqrt(errorPosition[0]**2+errorPosition[1]**2)
direction = errorAngle
return magnitude,direction
def DDMRkinematic(q,t,dphi):
#%Parameters
R = 1
L = 1
#q1,q2,q3 = q
dq = np.empty(3)
dq[0] =R*math.cos(q[2])*(dphi[0]+dphi[1])/2
dq[1] = R*math.sin(q[2])*(dphi[0]+dphi[1])/2
dq[2] = R*(dphi[0]-dphi[1])/(2*L)
return dq
def erroCart2pol(qk,refXY):
erroXY = refXY - qk[0:2]
refTheta = math.atan2(erroXY[1],erroXY[0])
erroTheta = refTheta - qk[2]
mod = math.sqrt(erroXY[0]**2+erroXY[1]**2)
ang = erroTheta
return mod,ang
def saturate(control,limit):
saturatedControl= np.array([])
for signal in control:
isSaturated = (abs(signal)-limit)>0
if isSaturated:
signal = np.sign(signal)*limit
saturatedControl = np.append(saturatedControl,signal)
return saturatedControl
#carregar tragetória
trajectoryList = []
with open('trajetoria.csv',newline='') as csvfile:
spamreader = csv.reader(csvfile,delimiter=',')
for row in spamreader:
trajectoryList.append(row)
newList = []
for l in trajectoryList:
aux = [float(s) for s in l]
newList.append(1*np.array(aux))
trajectoryList = newList
del(newList)
refPlot = trajectoryList
trajectoryList.reverse()#Make a stack
q0 = np.append(trajectoryList.pop(),np.array([0]))
Ts = 0.01
velMax = 3 #rad/s
qk = q0
ref = trajectoryList.pop()
q = qk
#Control configuration
positionGain = 1*np.array([1,1])
angleGain = 100*np.array([1,-1])
(mod,ang) = erroCart2pol(qk,ref)
control = mod*positionGain + ang*angleGain
control = saturate(control,velMax)
t = np.linspace(0,Ts,2)
qPlot = np.array([q0])
simulPlot = np.array([q0])
refPlot = np.array([ref])
atReference =False
#while len(trajectoryList)>0:
i=0
print(len(trajectoryList))
while (len(trajectoryList)>0):
# for i in range(4):
while not atReference:
(mod,ang) = pControl.calculatePolarError(qk,ref)
control = pControl.calculateKinematicControl(mod,ang,1,100)
control = pControl.saturateControl(control,velMax)
sol = integrate.odeint(DDMRkinematic,qk,t,args=(control,))
simulPlot = np.append(simulPlot,np.array([sol[1]]),axis=0)
qk = sol[1]
atReference = all( abs(qk[0:2]-ref)< 0.05 )
qPlot = np.append(qPlot,np.array([qk]),axis=0)
ref = trajectoryList.pop()
refPlot = np.append(refPlot,np.array([ref]),axis=0)
atReference = False
print(str(len(trajectoryList)) + '---' +str(atReference)+'---'+str(control))
plt.plot(simulPlot[:,0],simulPlot[:,1])
plt.scatter(qPlot[:,0],qPlot[:,1])
plt.scatter(refPlot[:,0],refPlot[:,1])
#plt.plot(simulGraph[1::,0],t)
plt.xlabel('x(t)')
plt.ylabel('y(t)')
plt.grid()
plt.show()
#plt.plot(ref)
#plt.show() |
import cPickle as pkl
import os
import parameters
import Data
import learners
import entropy_gains
import world
class AnalyzedData():
def __init__(self, outfilename, group='kids', N=None, A=None):
self.filename=outfilename
#check nonexistent filename
#if os.path.isfile(self.filename):
# print "Initialized with existing file. WILL NOT SAVE."
self.group=group
if N is None:
if group=='kids':
self.N=parameters.n_kids
elif group=='adults':
self.N=parameters.n_adults
else:
self.N=N
if A is None:
self.A=10
else:
self.A=A
self.alldata={}
def analyze(self):
data=self.load_data()
subjects=data.get_kids()[:self.N]
for subject in subjects:
print 'Analyzing subject {0}...'.format(subject)
self.alldata[subject]={}#defaultdict(dict)
max_action=min(data.get_kid_nactions(subject),self.A)
subject_sequence=data.data[subject][:max_action]
for actioni in range(max_action):
#print 'Action {0} of {1}'.format(actioni, max_action)
self.alldata[subject][actioni]={}
subject_action=data.data[subject][actioni].get_action()
self.alldata[subject][actioni]['SubjectAction']=subject_action
theory_model=learners.TheoryLearner()
pg_model=learners.ActivePlayer()
# model_actions, model_gain=pg_model.choose_actions(subject_sequence[:actioni])
self.alldata[subject][actioni]['ActionValues']={}
for a in world.possible_actions():
#EIG=theory_model.expected_final_entropy(a, subject_sequence[:actioni])
EIG=-1*theory_model.expected_information_gain(a, subject_sequence[:actioni])
PG=pg_model.success_probability(a, subject_sequence[:actioni])
self.alldata[subject][actioni]['ActionValues'][a]=(EIG,PG)
# theory_model=learners.TheoryLearner()
# model_actions, model_gain=theory_model.choose_actions(subject_sequence[:actioni])
# self.alldata[subject][actioni]['TMA']=model_actions
# self.alldata[subject][actioni]['SEIG']=\
# entropy_gains.theory_expected_final_entropy(subject_action, subject_sequence[:actioni])
# self.alldata[subject][actioni]['TMEIG']=model_gain
# pg_model=learners.ActivePlayer()
# model_actions, model_gain=pg_model.choose_actions(subject_sequence[:actioni])
# self.alldata[subject][actioni]['PMA']=model_actions
# self.alldata[subject][actioni]['PMSP']=model_gain
self.save()
def load_data(self):
if self.group=='kids':
data=Data.Data(parameters.inputfile_kids)
elif self.group=='adults':
data=Data.Data(parameters.inputfile_adults)
else:
print 'Unknown group.'
data.read(astext=False)
return data
def safe_save(self):
#check nonexistent filename
if not os.path.isfile(self.filename):
with open(self.filename, 'wb') as f:
pkl.dump(self.alldata, f)
else:
print 'FILE EXISTS, NOT SAVING.'
def save(self):
with open(self.filename, 'wb') as f:
pkl.dump(self.alldata, f)
def load(self):
with open(self.filename, 'rb') as f:
self.alldata=pkl.load(f)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-13 20:47
from __future__ import unicode_literals
import DjangoUeditor.models
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='banner', verbose_name='Roller Image')),
('index', models.IntegerField(default=0, verbose_name='Rolling Sequence')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='Add Time')),
],
options={
'verbose_name_plural': 'Slider Image',
'verbose_name': 'Slider Image',
},
),
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('goods_sn', models.CharField(default='', max_length=50, verbose_name='Product SKU')),
('name', models.CharField(default='', max_length=300, verbose_name='Product Name')),
('goods_size', models.CharField(blank=True, choices=[('OneSize', 'OneSize'), ('29x32', '29x32'), ('30x32', '30x32'), ('31x32', '31x32'), ('32x32', '32x32'), ('33x32', '33x32'), ('34x32', '34x32'), ('36x32', '36x32'), ('38x32', '38x32'), ('xsmall', 'xsmall'), ('small', 'small'), ('medium', 'medium'), ('large', 'large'), ('xlarge', 'xlarge'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10'), ('other', 'other')], max_length=10, null=True, verbose_name='Product Size')),
('goods_color', models.CharField(blank=True, choices=[('OneColor', 'OneColor'), ('red', 'Red'), ('blue', 'Blue'), ('pink', 'pink'), ('black', 'black'), ('white', 'white'), ('green', 'green'), ('grey', 'grey'), ('other', 'other')], max_length=10, null=True, verbose_name='Available Color')),
('click_num', models.IntegerField(default=0, verbose_name='Click Times')),
('sold_num', models.IntegerField(default=0, verbose_name='Sold Quantity')),
('fav_num', models.IntegerField(default=0, verbose_name='Total Favorite')),
('goods_num', models.IntegerField(blank=True, default=0, null=True, verbose_name='Inventory')),
('market_price', models.FloatField(default=0.0, verbose_name='Market Price')),
('shop_price', models.FloatField(default=0.0, verbose_name='Price in Store')),
('goods_brief', models.TextField(max_length=100, verbose_name='Product Brief Intro')),
('goods_desc', DjangoUeditor.models.UEditorField(default='', verbose_name='Product Detail')),
('ship_free', models.BooleanField(default=False, verbose_name='Free delivery')),
('goods_front_image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Home Page Product Image')),
('is_new', models.BooleanField(default=False, verbose_name='New Arrival')),
('is_hot', models.BooleanField(default=False, verbose_name='Popular Product')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='Add Time')),
],
options={
'verbose_name_plural': 'Product Detail',
'verbose_name': 'Product Detail',
},
),
migrations.CreateModel(
name='GoodsCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', help_text='Category Name', max_length=30, verbose_name='Category Name')),
('code', models.CharField(default='', help_text='Category Code', max_length=30, verbose_name='Category Code')),
('desc', models.TextField(default='Some Category Description...', help_text='Category Description', verbose_name='Category Description')),
('category_type', models.IntegerField(choices=[(1, 'First Category'), (2, 'Second Category'), (3, 'Third Category'), (4, 'Fourth Category')], help_text='Category Type', verbose_name='Category Type')),
('is_tab', models.BooleanField(default=False, help_text='On Navigation Bar', verbose_name='On NaviBar')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='Add Time')),
('parent_category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sub_cat', to='goods.GoodsCategory', verbose_name='Parent Category')),
],
options={
'verbose_name_plural': 'Goods Category',
'verbose_name': 'Goods Category',
},
),
migrations.CreateModel(
name='GoodsCategoryBrand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', help_text='Brand Name', max_length=30, verbose_name='Brand Name')),
('desc', models.TextField(default='', help_text='Brand Description', max_length=500, verbose_name='Brand Description')),
('image', models.ImageField(max_length=200, upload_to='brand/image/')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='Add Time')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='brands', to='goods.GoodsCategory', verbose_name='Brand Category')),
],
options={
'verbose_name_plural': 'Product Brand',
'verbose_name': 'Product Brand',
},
),
migrations.CreateModel(
name='GoodsImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Product Image')),
('image_url', models.CharField(blank=True, max_length=300, null=True, verbose_name='image url')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='Add Time')),
('goods', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='goods.Goods', verbose_name='Product Name')),
],
options={
'verbose_name_plural': 'Item Image Roller',
'verbose_name': 'Item Image Roller',
},
),
migrations.AddField(
model_name='goods',
name='brand',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsCategoryBrand', verbose_name='Product Brand'),
),
migrations.AddField(
model_name='goods',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsCategory', verbose_name='Product Category'),
),
migrations.AddField(
model_name='banner',
name='goods',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.Goods', verbose_name='Product Name'),
),
]
|
import sys
# Include relative path for mimpy library.
import mimpy.mesh.hexmeshwmsfracs as mesh
import mimpy.mfd.mfd as mfd
import numpy as np
res_mfd = mfd.MFD()
res_mfd.set_compute_diagonality(True)
res_mfd.set_m_e_construction_method(0)
#Define the permeability function
def K(p, i, j, k):
return np.eye(3)
#set the mesh and an instance of the HexMesh class
res_mesh = mesh.HexMeshWMSFracs()
#The modification function is applied to the points of the mesh.
#In this case no change is applied.
def mod_function(p, i, j, k):
return p
frac_file = open("fracs.dat")
frac_file.readline()
frac_list = []
count = 0
for line in frac_file:
line_split = line.split()
new_frac = mesh.FracData()
new_frac.azimuth = float(line_split[0])/180.*np.pi
new_frac.dip = float(line_split[1])/180.*np.pi
new_frac.a = float(line_split[2])/2.
new_frac.b = float(line_split[3])/2.
new_frac.id = count
count +=1
frac_list.append(new_frac)
new_frac.normal = new_frac.get_normal()
point_x = float(line_split[4])
point_y = float(line_split[5])
point_z = float(line_split[6])
new_frac.center = np.array([point_x,
point_y,
point_z])
new_frac.generate_polygon(23)
res_mesh.build_mesh(22, 22, 22, 300., 300., 300., K, mod_function)
count = 1
fracture_faces_list = []
for frac in frac_list:
frac.output_vtk("frac" + str(count))
count += 1
fracture_faces_list.append(res_mesh.add_fractures(frac))
count = 0
for key in res_mesh.fracture_faces_multi:
fracture_faces = res_mesh.fracture_faces_multi[key]
count += 1
res_mesh.output_vtk_faces("faces_"+str(count), list(fracture_faces))
res_mesh.build_frac_from_faces(list(fracture_faces))
res_mfd.set_mesh(res_mesh)
res_mfd.apply_dirichlet_from_function(0, lambda x:0.)
res_mfd.apply_dirichlet_from_function(1, lambda x:10.)
res_mfd.apply_dirichlet_from_function(2, lambda x:0.)
res_mfd.apply_dirichlet_from_function(3, lambda x:0.)
res_mfd.apply_dirichlet_from_function(4, lambda x:0.)
res_mfd.apply_dirichlet_from_function(5, lambda x:0.)
#Build the LHS and RHS.
res_mfd.build_lhs()
res_mfd.build_rhs()
#Solve the linear system.
res_mfd.solve()
#Output the solution in the vtk format. It will be saved in
#the file "hexmes_example_1.vtk".
res_mesh.output_vtk_mesh("hexmesh_example_1",
[res_mfd.get_pressure_solution(),
res_mesh.get_cell_domain_all()],
["MFDPressure", "DOMAIN"])
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:main.py
# @Author: Michael.liu
# @Date:2020/6/9 11:41
# @Desc: this code is ....
if __name__ == "__main__":
print("this is ctr_package test......") |
# change gui font size in linux: xrandr --output HDMI-0 --dpi 55
# https://www.youtube.com/watch?v=3HSh_eSGf4c
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWebkit import *
from PyQt5.QtWebkitWidgets import *
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLabel
import numpy as np
import collections
import pandas as pd
import pathlib
# %%
output_path="/dev/shm"
_code_git_version="4ef34fcca3d068199bbdb2e46e3cdbc9dcc7d987"
_code_repository="https://github.com/plops/cl-py-generator/tree/master/example/18_qt_webkit/source/run_00_browser.py"
_code_generation_time="09:39:39 of Saturday, 2020-05-23 (GMT+1)"
# %% open gui windows
app=QApplication([""])
web=QWebview()
web.Load(QUrl("https://youtube.com"))
web.show() |
import pandas as pd
from sklearn.cross_validation import train_test_split
df_train = pd.read_csv('Kaggle_Datasets/Facebook/train.csv')
#df_test = pd.read_csv('https://s3-us-west-2.amazonaws.com/fbdataset/test.csv')
class PredictionModel():
def __init__(self, df, xsize=1, ysize=0.5, xslide=0.5, yslide=0.25, xcol='x', ycol='y'):
self.df = df
self.xsize = xsize
self.ysize = ysize
self.xslide = xslide
self.yslide = yslide
self.xcol = xcol
self.ycol = ycol
self.xmax = self.df.x.max()
self.ymax = self.df.y.max()
self.windows = self.generate_windows()
self.slices = self.slice_df()
def frange(self, x, y, jump):
while x < y:
yield x
x += jump
yield y
def generate_windows(self):
ranges = []
result = []
xmin, xmax = self.df.x.min(), self.df.x.max()
ymin, ymax = self.df.y.min(), self.df.y.max()
xranges = list(self.frange(xmin, xmax-self.xsize, self.xslide))
yranges = list(self.frange(ymin, ymax-self.ysize, self.yslide))
ylen = len(yranges)
for x in xranges:
subrange = [x] * ylen
ranges.extend(zip(subrange, yranges))
for x1, y1 in ranges:
x2, y2 = x1 + self.xsize, y1 + self.ysize
result.append(((x1, y1), (x1+self.xsize, y1+self.ysize)))
return result
def slice_df(self):
slices = {}
for window in self.windows:
slices[window] = ModelStore(self.df, window, self.xcol, self.ycol)
return slices
def find_best_window(self, df):
x1, y1 = self.find_x_window(x), self.find_y_window(y)
x2, y2 = x1+self.xsize, y1+self.ysize
try:
assert x1 <= x <= x2
assert y1 <= y <= y2
except:
import pdb; pdb.set_trace()
return ((x1, y1), (x2, y2))
def find_x_window(self, x):
xs = max(0, x - (self.xsize/2.0))
x0 = 0
while x0 < xs:
x0 += self.xslide
if x0 >= self.xmax - self.xsize:
x0 = self.xmax - self.xsize
return x0
def find_y_window(self, y):
ys = max(0, y - (self.ysize/2.0))
y0 = 0
while y0 < ys:
y0 += self.yslide
if y0 >= self.ymax - self.ysize:
y0 = self.ymax - self.ysize
return y0
def train(self):
for window, model in self.slices.iteritems():
print 'Training Model: {}'.format(model)
(x1, y1), (x2, y2) = window
model_df = self.df[(self.df[self.xcol] >= x1) & (self.df[self.xcol] <= x2) & (self.df[self.ycol] >= y1) & (self.df[self.ycol] <= y2)]
model.train(model_df)
del model_df
def predict(self, df):
self.expected = df.sort_values('row_id')['place_id']
result_set = {}
df['x1'] = df.x.apply(self.find_x_window)
df['x2'] = df.x1 + self.xsize
df['y1'] = df.y.apply(self.find_y_window)
df['y2'] = df.y1 + self.ysize
for window, model in self.slices.iteritems():
(x1, y1), (x2, y2) = window
wdf = df[(df.x1 == x1) & (df.x2 == x2) & (df.y1 == y1) & (df.y2 == y2)]
res = model.predict(wdf)
result_set.update(res)
self.actual = [result_set[x] for x in sorted(result_set.keys())]
return result_set
def score(self):
expect = pd.Series(self.expected)
actual = pd.Series(self.actual)
return (sum(expect == actual) / float(len(self.expected))) * 100
class ModelStore():
def __init__(self, df, window, xcol, ycol):
self.window = window
self.xcol = xcol
self.ycol = ycol
(self.x1, self.y1), (self.x2, self.y2) = self.window
self.unique_place_count = len(df.place_id.unique())
self.model = None
self.total_count = len(df)
def __unicode__(self):
return '{}: {}, {}'.format(self.window, self.total_count, self.unique_place_count)
def get_self_df(self, df):
self_df = df
self_df['hours'] = self_df.time / 60.0
self_df['days'] = self_df.time / (60*24.0)
self_df['hours_cycle'] = self_df.hours % 24
self_df['days_cycle'] = self_df.days % 7
return self_df
def train(self, df):
self_df = self.get_self_df(df)
from sklearn.ensemble import RandomForestClassifier
self.model = RandomForestClassifier(n_estimators=5) # x, y, accuracy, hours_cycle, days_cycle
tdf = self_df.sort_values('row_id').set_index('row_id')
train_df = tdf[['x', 'y', 'accuracy', 'hours_cycle', 'days_cycle']]
values = tdf['place_id']
self.model.fit(train_df, values)
def predict(self, df):
wdf = df.sort_values('row_id').set_index('row_id')
wdf = self.get_self_df(wdf)
wdf = wdf[['x', 'y', 'accuracy', 'hours_cycle', 'days_cycle']]
return dict(zip(wdf.index, self.model.predict(wdf)))
def run():
print 'Splitting train and test data'
train, test = train_test_split(df_train, test_size = 0.2)
print 'Initializing PredictionModel class'
pred_model = PredictionModel(df=train)
print 'Init done'
print pred_model.slices
print 'Training Model'
pred_model.train()
print 'Done Training'
print 'Predicting on test data'
print pred_model.predict(test)
print 'Done predicting'
score = pred_model.score()
print 'Score: {}'.format(score)
return score
run()
|
import lib_robotis_hack
import myLib
import numpy as np
import RLtoolkit
import json
import datetime
class ObservationManager:
s1 = None
s2 = None
currentAngle = 0.0
currentAngleS2 = 0.0
currentLoad = 0.0
currentTemperature = 0.0
currentVoltage = 0.0
angles = []
loads = []
temperatures = []
voltages = []
actions = []
currentAction = 0
currentState = None
nextState = None
numTilings = 24
obsIndex = 0
numTiles = 24
numTilesTotal = numTiles * numTiles * numTiles * numTilings
def __init__(self, servos):
if servos is not None:
self.s1 = servos[0]
self.s2 = servos[1]
def getObs(self, action=0):
self.currentAngle = self.s1.read_angle()
self.currentAngleS2 = self.s2.read_angle()
self.currentLoad = self.s1.read_load()
self.currentTemperature = self.s1.read_temperature()
self.currentVoltage = self.s1.read_voltage()
self.angles.append(self.currentAngle)
self.loads.append(self.currentLoad)
self.temperatures.append(self.currentTemperature)
self.voltages.append(self.currentVoltage)
self.actions.append(action)
self.currentAction = action
def initStateFromFile(self):
self.getObsFromIndex()
currentLoad = myLib.normalizeLoad(self.currentLoad)
# print 'load is: ' + str(currentLoad)
currentAngle = myLib.normalizeAngle(self.currentAngle)
obs = [currentAngle * self.numTiles,
currentLoad * self.numTiles, self.currentAction] # myLib.normalizeLoad(float(s1.read_load()))*numTiles] # , s2.read_encoder()/853/.1, s2.read_load()/1023/.1]
currentStateSparse = RLtoolkit.tiles.tiles(self.numTilings, self.numTilesTotal, obs) # tileIndices
currentState = np.zeros(self.numTilesTotal)
for index in currentStateSparse: # Convert to full vector of 0s and 1s
currentState[index] = 1
# nextState = np.append(nextState, 1)#bias bit
self.currentState = currentState
def getObsFromIndex(self):
index = self.obsIndex
self.currentAngle = self.angles[index]
self.currentLoad = self.loads[index]
self.currentTemperature = self.temperatures[index]
self.currentVoltage = self.voltages[index]
self.currentAction = self.actions[index]
self.obsIndex += 1
def initState(self):
self.getObs()
# Return state values scaled to unit length and then scaled into number of tiles
currentLoad = myLib.normalizeLoad(self.currentLoad)
# print 'load is: ' + str(currentLoad)
currentAngle = myLib.normalizeAngle(self.currentAngle)
obs = [currentAngle * self.numTiles,
currentLoad * self.numTiles, self.currentAction] # myLib.normalizeLoad(float(s1.read_load()))*numTiles] # , s2.read_encoder()/853/.1, s2.read_load()/1023/.1]
currentStateSparse = RLtoolkit.tiles.tiles(self.numTilings, self.numTilesTotal, obs) # tileIndices
currentState = np.zeros(self.numTilesTotal)
for index in currentStateSparse: # Convert to full vector of 0s and 1s
currentState[index] = 1
# nextState = np.append(nextState, 1)#bias bit
self.currentState = currentState
def getState(self):
# Return state values scaled to unit length and then scaled into number of tiles
currentLoad = myLib.normalizeLoad(self.currentLoad)
# print 'load is: ' + str(currentLoad)
currentAngle = myLib.normalizeAngle(self.currentAngle)
obs = [currentAngle * self.numTiles,
currentLoad * self.numTiles, self.currentAction] # myLib.normalizeLoad(float(s1.read_load()))*numTiles] # , s2.read_encoder()/853/.1, s2.read_load()/1023/.1]
nextStateSparse = RLtoolkit.tiles.tiles(self.numTilings, self.numTilesTotal, obs) # tileIndices
nextState = np.zeros(self.numTilesTotal)
for index in nextStateSparse: # Convert to full vector of 0s and 1s
nextState[index] = 1
# nextState = np.append(nextState, 1)#bias bit
self.nextState = nextState
return nextState
def writeObs(self):
data = {'angles' : self.angles, 'loads' : self.loads , 'temperatures' : self.temperatures, 'voltages' : self.voltages, 'actions' : self.actions}
with open('obs_' + str(datetime.datetime.now()) + '.json', 'w') as f:
json.dump(data, f)
def readFromFile(self, fileName):
with open(fileName, 'r') as f:
data = json.load(f)
self.angles = data['angles']
self.loads = data['loads']
self.temperatures = data['temperatures']
self.voltages = data['voltages']
self.actions = data['actions'] |
import os
from shutil import copyfile
import config
import string
DATADIR = config.DATADIR
BACKUP_NAME = input("Enter a backup directory name: ")
valid_chars = set(string.ascii_letters + string.digits + "-" + "_")
assert BACKUP_NAME and not set(BACKUP_NAME).difference(valid_chars), "Enter a valid directory name. Valid characters are letters, digits, dashes, and underscores."
BACKUP = os.path.join(DATADIR, BACKUP_NAME)
if not os.path.isdir(BACKUP):
os.mkdir(BACKUP)
verify = "y"
else:
verify = ""
while verify not in ["y","yes","n","no"]:
verify = input("Backup directory already exists. Are you sure you want to overwrite? [enter y/yes or n/no] ").lower()
if verify in ["y","yes"]:
data_files = next(os.walk(DATADIR))[2]
for f in data_files:
copyfile(os.path.join(DATADIR, f), os.path.join(BACKUP, f)) |
import pandas as pd
file=open('E:\csvdhf5xlsxurlallfiles\iris.txt', 'r')
read = file.readline()
print(read)
|
import torch
def train():
optimizer = torch.optim.SGD(
params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
device = torch.device("cuda" if torch.cuda.is_available() and args.use_cuda else "cpu")
|
import numpy as np
import matplotlib.pyplot as plt
#from mpl_toolkits.axes_grid1.inset_locator import inset_axes
class Graphics():
def __init__(self, wavelength, layers_name, layer_l2_name):
self.fig = plt.figure(figsize=(16,6));
self.wavelength = wavelength
self.layers_name = layers_name
self.layer_l2_name = layer_l2_name
def linear_plot(self, contrast, thicknes_l2) -> None:
self.fig.add_subplot(1, 2, 1);
wavelength = self.wavelength
plt.grid(True)
for i in range (0,len(thicknes_l2)):
if thicknes_l2[i]>=1000:
plt.plot(wavelength, contrast[i],"-",linewidth=3,label="$d_{2}= $"+str(thicknes_l2[i]/1000)+" $\mu m$") # plot contrast vs wavelength
else:
plt.plot(wavelength, contrast[i],"-",linewidth=3,label="$d_{2}= $"+str(thicknes_l2[i])+" $nm$") # plot contrast vs wavelength
plt.legend(loc="best",fontsize=10)
plt.tick_params(labelsize=12)
plt.xlabel("Longitud de Onda [nm]",fontsize=16)
plt.ylabel("Contraste",fontsize=16)
def contour_plot(self, contrast, thicknes_l1_i, thicknes_l2) -> None:
self.fig.add_subplot(1, 2, 2)
contrast = np.transpose(contrast)
wavelength = self.wavelength
plt.contourf(thicknes_l2, wavelength, contrast, 100, cmap='jet')
plt.tick_params(labelsize=12)
plt.xlabel("Espesor de $\mathregular{"+self.layer_l2_name+"}$ [nm]",fontsize=16)
plt.ylabel("Longitud de Onda [nm]",fontsize=16)
plt.ylim(min(wavelength),max(wavelength));
#cbaxes = inset_axes(ax, width="20%", height="3%", loc=1)
#plt.colorbar(cax=cbaxes, ticks=[float("{0:.3f}".format(np.min(contrast))), 0], orientation='horizontal')
cb = plt.colorbar()
cb.set_label(label='Contraste', size=16)
#plt.suptitle( "images/" +self.layer_name + "_" + str(thicknes_l1_i)+" nm$ \;\;\;\; ", fontsize=18,y=0.96)
plt.grid(True)
plt.savefig( "images/" +self.layers_name + "/" + str(thicknes_l1_i)+".jpg", transparent=True, bbox_inches='tight',dpi=300)
plt.close() |
SWAP = {'i': '1', 'I': '1', 'o': '0', 'O': '0', 's': '5', 'S': '5'}
def make_password(password):
return ''.join(SWAP.get(a[0], a[0]) for a in password.split())
|
import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import configparser
import cfg_tempprobes
import cfg_global
import cfg_outlets
import cfg_environmental
import cfg_switches
import cfg_analog
import cfg_feedtimers
import cfg_pwm
import cfg_common
import cfg_alerts
LARGE_FONT= ("Verdana", 12)
PAGE_GLOBAL = 0
PAGE_TEMPPROBES = 1
PAGE_OUTLETS = 2
PAGE_ENVIRO = 3
PAGE_SWITCHES = 4
PAGE_ANALOG = 5
PAGE_FEEDTIMERS = 6
PAGE_PWM = 7
PAGE_ALERTS = 8
class RBP_configurator(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#self.iconbitmap('@images/reefberrypi_logo.xbm')
tk.Tk.wm_title(self, "Reefberry Pi Configurator")
# check if config file exists, if not it will make one
cfg_common.checkifconfigexists()
## #create a menubar
## menubar = Menu(self)
##
## # create a pulldown menu, and add it to the menu bar
## filemenu = Menu(menubar, tearoff=0)
## filemenu.add_command(label="Exit", command=self.quit)
## menubar.add_cascade(label="File", menu=filemenu)
## # display the menu
## self.config(menu=menubar)
# create a toolbar
self.ConfigSelection = IntVar()
toolbarframe = tk.Frame(self, relief=tk.FLAT)
toolbarframe.pack(side=TOP, fill=tk.X)
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (cfg_global.PageGlobal,
cfg_tempprobes.PageTempProbes,
cfg_outlets.PageOutlets,
cfg_environmental.PageEnvironmental,
cfg_switches.PageSwitches,
cfg_analog.PageAnalogProbes,
cfg_feedtimers.PageFeedTimers,
cfg_pwm.PagePWM,
cfg_alerts.PageAlerts):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(cfg_global.PageGlobal)
# button for global settings
self.img_global = PhotoImage(file="images/globe-64.png")
rdoGlobal = Radiobutton(toolbarframe, text="Global", variable=self.ConfigSelection,
image=self.img_global, value=0, indicatoron=0,
compound=TOP, width=90, command=self.change_tab)
rdoGlobal.pack(side=LEFT)
# button for temperature probes
self.img_temperature = PhotoImage(file="images/temperature-64.png")
rdoTempProbes = Radiobutton(toolbarframe, text="Temp Probes", variable=self.ConfigSelection,
image=self.img_temperature, value=1, indicatoron=0,
compound=TOP, width=90, command=self.change_tab)
rdoTempProbes.pack(side=LEFT)
# button for outlets
self.img_outlets = PhotoImage(file="images/socket-64.png")
rdoOutlets = Radiobutton(toolbarframe, text="Outlets", variable=self.ConfigSelection,
image=self.img_outlets, value=2, indicatoron=0,
compound=TOP, width=90, command=self.change_tab)
rdoOutlets.pack(side=LEFT)
# button for environmental probes
self.img_environmental = PhotoImage(file="images/heating-room-64.png")
rdoEnvironmental = Radiobutton(toolbarframe, text="Environmental", variable=self.ConfigSelection,
image=self.img_environmental, value=3, indicatoron=0,
compound=TOP, width=90, command=self.change_tab)
rdoEnvironmental.pack(side=LEFT)
# button for switches
self.img_switches = PhotoImage(file="images/switch-on-64.png")
rdoSwitches = Radiobutton(toolbarframe, text="Switches", variable=self.ConfigSelection,
image=self.img_switches, value=4, indicatoron=0,
compound=TOP, width=90, command=self.change_tab)
rdoSwitches.pack(side=LEFT)
# button for analog probes
self.img_analog = PhotoImage(file="images/sine-64.png")
rdoph = Radiobutton(toolbarframe, text="Analog Probes", variable=self.ConfigSelection,
image=self.img_analog, value=5, indicatoron=0,
compound=TOP, width=90, command=self.change_tab)
rdoph.pack(side=LEFT)
# button for feed timers
self.img_feed = PhotoImage(file="images/time-64.png")
rdofeed = Radiobutton(toolbarframe, text="Feed Timers", variable=self.ConfigSelection,
image=self.img_feed, value=PAGE_FEEDTIMERS, indicatoron=0,
compound=TOP, width=90, command=self.change_tab)
rdofeed.pack(side=LEFT)
# button for pulse width modulation settings
self.img_pwm = PhotoImage(file="images/integrated-circuit-64.png")
rdopwm = Radiobutton(toolbarframe, text="PWM", variable=self.ConfigSelection,
image=self.img_pwm, value=PAGE_PWM, indicatoron=0,
compound=TOP, width=90, command=self.change_tab)
rdopwm.pack(side=LEFT)
# button for alerts settings
self.img_alerts = PhotoImage(file="images/alarm-64.png")
rdoalerts = Radiobutton(toolbarframe, text="Alerts", variable=self.ConfigSelection,
image=self.img_alerts, value=PAGE_ALERTS, indicatoron=0,
compound=TOP, width=90, command=self.change_tab)
rdoalerts.pack(side=LEFT)
def change_tab(selection):
tab = selection.ConfigSelection.get()
if tab == PAGE_GLOBAL:
selection.show_frame(cfg_global.PageGlobal)
elif tab == PAGE_TEMPPROBES:
selection.show_frame(cfg_tempprobes.PageTempProbes)
elif tab == PAGE_OUTLETS:
selection.show_frame(cfg_outlets.PageOutlets)
elif tab == PAGE_ENVIRO:
selection.show_frame(cfg_environmental.PageEnvironmental)
elif tab == PAGE_SWITCHES:
selection.show_frame(cfg_switches.PageSwitches)
elif tab == PAGE_ANALOG:
selection.show_frame(cfg_analog.PageAnalogProbes)
elif tab == PAGE_FEEDTIMERS:
selection.show_frame(cfg_feedtimers.PageFeedTimers)
elif tab == PAGE_PWM:
selection.show_frame(cfg_pwm.PagePWM)
elif tab == PAGE_ALERTS:
selection.show_frame(cfg_alerts.PageAlerts)
def show_frame(self, cont):
#print("show_frame" + str(cont))
frame = self.frames[cont]
frame.tkraise()
root = RBP_configurator()
def on_closing():
if messagebox.askokcancel("Quit", "Are you sure want to quit?\n\nAny unsaved changes will be lost."):
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
## outletwin = tk.Toplevel()
## outletwin.transient()
## outletwin.grab_set()
|
#!/usr/bin/python
"""
WiimoteController.py needs to be run from the command line
This code is for the remote-controlled PiWars 2017 challenges:
Pi Noon
Obstacle Course
Skittles
Slightly Deranged Golf
http://piwars.org/
Need to install correct python modules, see
https://help.ubuntu.com/community/CWiiD
"""
# Import required libraries
import time
import logging
import SetupConsoleLogger
import GPIOLayout
import cwiid
import SpeedSettings
STICK_DELAY = 0.1
# Create a logger to both file and stdout
LOGGER = logging.getLogger(__name__)
SetupConsoleLogger.setup_console_logger(LOGGER)
# Initialise motors
robotmove = DualMotorController.DualMotorController(
GPIOLayout.MOTOR_LEFT_FRONT_FORWARD_GPIO,
GPIOLayout.MOTOR_LEFT_FRONT_BACKWARD_GPIO,
GPIOLayout.MOTOR_RIGHT_FRONT_FORWARD_GPIO,
GPIOLayout.MOTOR_RIGHT_FRONT_BACKWARD_GPIO,
GPIOLayout.MOTOR_LEFT_REAR_FORWARD_GPIO,
GPIOLayout.MOTOR_LEFT_REAR_BACKWARD_GPIO,
GPIOLayout.MOTOR_RIGHT_REAR_FORWARD_GPIO,
GPIOLayout.MOTOR_RIGHT_REAR_BACKWARD_GPIO)
def main():
"""
"""
# Set initial values
speed = SpeedSettings.SPEED_FASTEST # Initial forward speed
# Connecting to the wiimote. This allows several attempts
# as first few often fail.
LOGGER.info("Press 1+2 on your Wiimote now ...")
time.sleep(1)
wm = None
i = 2
while not wm:
try:
wm = cwiid.Wiimote()
except RuntimeError:
if (i > 5):
LOGGER.info("Cannot create Wiimote connection.")
quit()
LOGGER.info("Error opening wiimote connection, attempt " + str(i))
i += 1
LOGGER.info("Wiimote connected.")
# Set wiimote to report button presses
wm.rpt_mode = cwiid.RPT_BTN | cwiid.RPT_ACC | cwiid.RPT_EXT
# Turn on led to show connected
wm.led = 1
# Respond to Nunchuk joystick
while True:
if 'nunchuk' in wm.state:
# print("Success")
# X axis: Left Max = 25, Middle = 125, RightMax = 225
NunchukStickX = (wm.state['nunchuk']['stick'][cwiid.X])
# Y axis: DownMax = 30, Middle = 125, UpMax = 225
NunchukStickY = (wm.state['nunchuk']['stick'][cwiid.Y])
# print NunchukStickX
# print NunchukStickY
# Go forward if joystick pushed forward
if (NunchukStickY > 150) & (NunchukStickY < 190):
speed = SpeedSettings.SPEED_SLOW
robotmove.forward(speed)
LOGGER.info("Forward at speed " + str(speed))
time.sleep(STICK_DELAY)
elif (NunchukStickY >= 190):
speed = SpeedSettings.SPEED_FASTEST
robotmove.forward(speed)
LOGGER.info("Forward at speed " + str(speed))
time.sleep(STICK_DELAY)
# Go backwards if joystick pulled back
elif (NunchukStickY < 100) & (NunchukStickY > 50):
speed = SpeedSettings.SPEED_SLOW
robotmove.reverse(speed)
LOGGER.info("Reverse at speed " + str(speed))
time.sleep(STICK_DELAY)
elif (NunchukStickY <= 50):
speed = SpeedSettings.SPEED_FASTEST
robotmove.reverse(speed)
LOGGER.info("Reverse at speed " + str(speed))
time.sleep(STICK_DELAY)
# Spin right right joystick pushed right
elif (NunchukStickX > 150) & (NunchukStickX < 190):
speed = SpeedSettings.SPEED_SLOW
robotmove.spin_right(speed)
LOGGER.info("Spin right at speed " + str(speed))
time.sleep(STICK_DELAY)
elif (NunchukStickX >= 190):
speed = SpeedSettings.SPEED_FASTEST
robotmove.spin_right(speed)
LOGGER.info("Spin right at speed " + str(speed))
time.sleep(STICK_DELAY)
# Spin left if joystick pushed left
elif (NunchukStickX < 100) & (NunchukStickX > 50):
speed = SpeedSettings.SPEED_SLOW
robotmove.spin_left(speed)
LOGGER.info("Spin left at speed " + str(speed))
time.sleep(STICK_DELAY)
elif (NunchukStickX <= 50):
speed = SpeedSettings.SPEED_FASTEST
robotmove.spin_left(speed)
LOGGER.info("Spin left at speed " + str(speed))
time.sleep(STICK_DELAY)
# else stop
else:
robotmove.stop()
LOGGER.info("Stop!")
else:
print("Doh for now")
time.sleep(2)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
LOGGER.info("Stopping the Wiimote Controller")
finally:
LOGGER.info("Wiimote Controller Finished")
robotmove.cleanup()
|
from colors import *
class Ore:
def __init__(self, abundance, color):
self.abundance = abundance
self.color = color
COAL_ORE = Ore(0.9, COLOR_STONE)
IRON_ORE = Ore(0.7, COLOR_STONE)
GOLD_ORE = Ore(0.5, COLOR_GOLD)
DIAMOND_ORE = Ore(0.1, COLOR_DIAMOND)
|
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:wchao118
@license: Apache Licence
@file: FaceDatasets.py
@time: 2019/06/28
@contact: wchao118@gmail.com
@software: PyCharm
"""
from torch.utils.data import DataLoader, Dataset
import random
from PIL import Image
import torch
import numpy as np
class FaceData(Dataset):
def __init__(self, folder_datasets, transform=None):
self.folder_datasets = folder_datasets
self.transform = transform
def __len__(self):
return len(self.folder_datasets.imgs)
def __getitem__(self, item):
img0_tuple = random.choice(self.folder_datasets.imgs)
is_same_class = random.randint(0, 1)
if is_same_class:
label = 0
while True:
img1_tuple = random.choice(self.folder_datasets.imgs)
if img0_tuple[1] == img1_tuple[1]:
break
else:
label = 1
while True:
img1_tuple = random.choice(self.folder_datasets.imgs)
if img0_tuple[1] != img1_tuple[1]:
break
img0 = Image.open(img0_tuple[0])
img1 = Image.open(img1_tuple[0])
if self.transform:
img0 = self.transform(img0)
img1 = self.transform(img1)
label = np.array(label)
label = torch.from_numpy(label)
return img0, img1, label
|
"""File: three_shapes_game.py
Author: Russ Lewis
Purpose: Defines the Game class, which provides the core mechanisms for the
Three Shapes family of programs.
"""
import math # for sqrt
from graphics import graphics
class Game:
def __init__(self, title, frame_rate, wid,hei):
"""Constructor. Initializes the game to have zero objets; call
add_obj() to add objects to the system.
Parameters: the width and height of the window
"""
self._wid = wid
self._hei = hei
self._frame_rate = frame_rate
self._win = graphics(wid,hei, title)
# this is a configuration setting - it changes how we calculate
# the distance between objects in do_nearby_calls()
self._account_for_radii_in_dist = False
# the user must call add_obj() to add to this set
self._active_objs = set()
# see what remove_obj() and perform_moves() do, to understand this
# variable.
self._pending_removes = set()
# I plan to add a feature, where the user can mark the game as "over"
self._game_over = False
def config_set(self, param, val):
"""Function to set various config variables. Right now, it only
supports a single parameter; I might add more later. Give the name
of the parameter (as a string), then the value.
Parmeters: config parameter to set, value
Supported Config Options:
"account_for_radii_in_dist" -> Boolean
"""
if param == "account_for_radii_in_dist":
self._account_for_radii_in_dist = val
else:
assert False # unrecognized config parameter
def set_game_over(self):
self._game_over = True
def is_over(self):
return self._game_over
def add_obj(self, new_obj):
"""Adds a new object to the game. Can be called at any time, although
if called in the middle of the nearby() or move() loops, may not be
added to the ongoing loop. The object must implement the standard
methods required of any object: get_xy(), get_radius(), nearby(),
move(), and draw().
Parameters: the new object
"""
assert new_obj not in self._active_objs
self._active_objs.add(new_obj)
# REMOVE LOGIC
#
# In the do_nearby_calls() and do_move_calls() methods, we loop over
# lots of objects. Inside those methods, the user may choose to call
# remove_obj(); if they do, then ideally we would just remove it
# immediately. But we're in the middle of a loop: what if we call
# nearby() or move() on a recently-removed object, or if we pass it as
# a parameter to a nearby() call?
#
# One option would be to force the remove logic to exclude such objects
# from the loop as it runs, but that's not the easiest thing in the
# world. Instead, remove_obj() will add an object to a set of "pending
# removes" - none of these removals will take place until the game loop
# calls execute_removes() - which happens *after* all of the nearby()
# and move() calls have finished.
#
# When the user calls remove_obj(), it *MUST* be in the current set of
# active objects. It is *permissible* to call it multiple times in the
# same game tick.
def remove_obj(self, bad_obj):
"""Queues up an object to be removed from the game. It is
permissible to call this multiple times on the same object during
one clock tick; all of the removals will take place at once,
*after* all of the nearby() and move() calls have been completed,
but *before* any draw() calls. It is illegal to call this if the
object is not currently in the game.
Arguments: object to remove
"""
assert bad_obj in self._active_objs
self._pending_removes.add(bad_obj)
def execute_removes(self):
"""Called by the game loop, after all of the nearby() and move() calls
have completed; performs all of the pending remove operations.
Arguments: None
"""
self._active_objs -= self._pending_removes
self._pending_removes = set()
def do_nearby_calls(self):
"""Figures out how close each object is to every other, sorts them by
distance, and then performs all of the nearby() calls on the object
pairs. Makes all of the calls for a given "left-hand" object as a
block; if the user returns False from any call, we terminate that
inner loop, and then start delivering values for another left-hand
value.
Parameters: none
"""
positions = []
for o in self._active_objs:
x,y = o.get_xy()
positions.append( (o,x,y) )
# Note that we're doing a 2D loop, but because we're only looking for
# one version of each pair (not the reversed), notice that we do
# something funny with the lower bound of the inner loop variable.
distances = []
for i in range(len(positions)):
for j in range(i+1, len(positions)):
o1,x1,y1 = positions[i]
o2,x2,y2 = positions[j]
dist = math.sqrt( (x1-x2)**2 + (y1-y2)**2 )
if self._account_for_radii_in_dist:
dist -= o1.get_radius()
dist -= o2.get_radius()
# we add two records to the 'distances' array, so that we can
# simply *sort* that list at the end. Note that the way that
# we arrange this, we will organize first by the left-hand
# object, then by the distance, and then by the right-hand
# object (the last of which will rarely be an issue)
#
# UPDATE: Note that I wanted to use object references here -
# but then I realized that we couldn't sort by those!
# so I need to use the indices into the positions[]
# array instead.
distances.append( (i,dist,j) )
distances.append( (j,dist,i) )
# now that we're done *creating* the distances, we can sort all of
# them.
distances.sort()
# there should be exactly n(n-1) elements in the array - since every
# object in the game will be paired with n-1 others.
n = len(positions)
assert len(distances) == n*(n-1)
# this loop is weird - but we have n different objects, each of which
# has n-1 partners. So I will implement each inner loop as looping
# over a slice of the distances array.
for i in range(n):
for entry in distances[ (n-1)*i : (n-1)*(i+1) ]:
k1,dist,k2 = entry
assert k1 == i
left = positions[k1][0]
right = positions[k2][0]
# if the user returns False, then we will terminate this as a
# left-hand element.
if not left.nearby(right, dist, self):
break
def do_move_calls(self):
"""Calls move() on every object in the game"""
for o in self._active_objs:
o.move(self)
def do_edge_calls(self):
"""Finds any objects that are close to any edge - defined as within the
radius of it (that is, touching or overlapping) - and calls edge()
on them.
Parameters: none
"""
for o in self._active_objs:
x,y = o.get_xy()
rad = o.get_radius()
if x < rad:
o.edge("left", 0)
if y < rad:
o.edge("top", 0)
if x+rad >= self._wid:
o.edge("right", self._wid)
if y+rad >= self._hei:
o.edge("bottom", self._hei)
def draw(self):
"""Calls draw() on every object in the game. Also does the rest of the
misc calls necessary to animate the window.
"""
# if the window has been destroyed, then we will throw an exception when
# we run clear() below. So check for this condition first!
if self._win.is_killed:
self._game_over = True
return
self._win.clear()
for o in self._active_objs:
o.draw(self._win)
self._win.update_frame(self._frame_rate)
|
import sys
from datetime import datetime
def pr_red(skk): print("\033[91m {}\033[00m".format(skk), end='')
def pr_cyan(skk): print("\033[96m {}\033[00m".format(skk), end='')
def mk_int(s):
s = s.strip()
return int(s) if s else None
def mk_double(s):
s = s.strip()
return float(s) if s else None
class WeatherReading:
def __init__(self, record):
# print(record)
if record != '':
self.pkt, self.maxTempC, self.meanTempC, self.minTempC, self.dewPointC, self.meanDewPointC,\
self.minDewPointC, self.maxHumidity, self.meanHumidity, self.minHumidity, self.maxSeaLevelPressure,\
self.meanSeaLevelPressure, self.minSeaLevelPressure, self.maxVisibility, self.meanVisibility,\
self.minVisibility, self.maxWindSpeed, self.meanWindSpeed, self.maxGustSpeed, self.precipitation,\
self.cloudClover, self.event, self.windDirDegrees = record.split(',')
self.pkt = datetime.strptime(self.pkt, '%Y-%m-%d')
self.maxTempC = mk_int(self.maxTempC)
self.minTempC = mk_int(self.minTempC)
self.meanTempC = mk_int(self.meanTempC)
self.meanHumidity = mk_int(self.meanHumidity)
self.maxHumidity = mk_int(self.maxHumidity)
self.minHumidity = mk_int(self.minHumidity)
self.dewPointC = mk_int(self.dewPointC)
self.meanDewPointC = mk_int(self.meanDewPointC)
self.minDewPointC = mk_int(self.minDewPointC)
self.minSeaLevelPressure = mk_double(self.minSeaLevelPressure)
self.meanSeaLevelPressure = mk_double(self.meanSeaLevelPressure)
self.maxSeaLevelPressure = mk_double(self.maxSeaLevelPressure)
self.maxVisibility = mk_double(self.maxVisibility)
self.minVisibility = mk_double(self.minVisibility)
self.meanVisibility = mk_double(self.meanVisibility)
self.maxWindSpeed = mk_double(self.maxWindSpeed)
self.meanWindSpeed = mk_double(self.meanWindSpeed)
self.maxGustSpeed = mk_double(self.maxGustSpeed)
self.precipitation = mk_double(self.precipitation)
self.cloudClover = mk_double(self.cloudClover)
months = ["", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
# filePath = "/home/alii/PycharmProjects/WeatherMan/weatherfiles/Murree_weather_{0}_{1}.txt"
filePath = "{0}/Murree_weather_{1}_{2}.txt"
# days_in_a_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
class AnnualReport:
def __init__(self):
self.maxTemp = None
self.maxDate = None
self.minTemp = None
self.minDate = None
self.mostHumid = None
self.humidDate = None
self.report = "Highest: {0}C on {1} {2}\n" \
"Lowest: {3}C on {4} {5}\n" \
"Humidity: {6}% on {7} {8}"
def display(self):
self.report = self.report.format(self.maxTemp, months[self.maxDate.month], self.maxDate.day,
self.minTemp, months[self.minDate.month], self.minDate.day,
self.mostHumid, months[self.humidDate.month], self.humidDate.day)
print(self.report)
class MonthlyReport:
def __init__(self):
self.report = "Highest Average: {0}C\nLowest Average: {1}C\nAverage Mean Humidity: {2}%"
self.maxMeanTemp = None
self.minMeanTemp = None
self.meanHumid = None
def display(self):
self.report = self.report.format(self.maxMeanTemp, self.minMeanTemp, round(self.meanHumid))
print(self.report)
class CalculateResults:
@staticmethod
def calculate_annual_report(directory, year):
print("\n\n\n~~~TASK 1~~~")
flag = False
annual_report = AnnualReport()
for month in months:
try:
file = open(filePath.format(directory, year, month))
except FileNotFoundError:
continue
file.readline()
weather_reading = WeatherReading(file.readline())
if flag is False:
annual_report.maxTemp = weather_reading.maxTempC
annual_report.maxDate = weather_reading.pkt
annual_report.minTemp = weather_reading.minTempC
annual_report.minDate = weather_reading.pkt
annual_report.mostHumid = weather_reading.maxHumidity
annual_report.humidDate = weather_reading.pkt
weather_reading = WeatherReading(file.readline())
flag = True
while weather_reading is not None:
if weather_reading.maxTempC is not None and annual_report.maxTemp < weather_reading.maxTempC:
annual_report.maxTemp = weather_reading.maxTempC
annual_report.maxDate = weather_reading.pkt
if weather_reading.minTempC is not None and annual_report.minTemp > weather_reading.minTempC:
annual_report.minTemp = weather_reading.minTempC
annual_report.minDate = weather_reading.pkt
if weather_reading.maxHumidity is not None and annual_report.mostHumid < weather_reading.maxHumidity:
annual_report.mostHumid = weather_reading.maxHumidity
annual_report.humidDate = weather_reading.pkt
record = file.readline()
if record != '':
weather_reading = WeatherReading(record)
else:
weather_reading = None
file.close()
if flag is True:
return annual_report
else:
print("File(s) not found!")
@staticmethod
def calculate_monthly_report(directory, year, month):
print("\n\n\n~~~TASK 2~~~")
flag = False
try:
file = open(filePath.format(directory, year, months[month]))
file.readline()
weather_reading = WeatherReading(file.readline())
monthly_report = MonthlyReport()
if flag is False:
monthly_report.maxMeanTemp = weather_reading.meanTempC
monthly_report.minMeanTemp = weather_reading.meanTempC
monthly_report.meanHumid = weather_reading.meanHumidity
weather_reading = WeatherReading(file.readline())
days = 1
while weather_reading is not None:
if weather_reading.maxTempC is not None and weather_reading.minTempC is not None:
if weather_reading.meanTempC is None:
weather_reading.meanTempC = (weather_reading.maxTempC + weather_reading.minTempC) / 2
if monthly_report.maxMeanTemp < weather_reading.meanTempC:
monthly_report.maxMeanTemp = weather_reading.meanTempC
if monthly_report.minMeanTemp > weather_reading.meanTempC:
monthly_report.minMeanTemp = weather_reading.meanTempC
monthly_report.meanHumid += weather_reading.meanHumidity
record = file.readline()
if record != '':
weather_reading = WeatherReading(record)
days += 1
else:
weather_reading = None
file.close()
monthly_report.meanHumid /= days
return monthly_report
except FileNotFoundError:
print("File not found")
@staticmethod
def draw_bar_chart_for_a_month(directory, year, month):
print("\n\n\n~~~TASK 3~~~")
try:
file = open(filePath.format(directory, year, months[month]))
file.readline()
weather_reading = WeatherReading(file.readline())
day = 1
while weather_reading is not None:
if weather_reading.maxTempC is not None and weather_reading.maxTempC is not None:
str1 = ("+" * weather_reading.maxTempC) + " " + str(weather_reading.maxTempC) + "\n"
str2 = ("+" * weather_reading.minTempC) + " " + str(weather_reading.minTempC) + "\n"
print("{:02d} ".format(day), end='')
pr_red(str1)
print("{:02d} ".format(day), end='')
pr_cyan(str2)
record = file.readline()
if record != '':
weather_reading = WeatherReading(record)
day += 1
else:
weather_reading = None
file.close()
except FileNotFoundError:
print("file not found")
@staticmethod
def draw_bar_chart_for_a_month2(directory, year, month):
print("\n\n\n~~~BONUS TASK~~~")
try:
file = open(filePath.format(directory, year, months[month]))
file.readline()
weather_reading = WeatherReading(file.readline())
day = 1
while weather_reading is not None:
if weather_reading.maxTempC is not None and weather_reading.maxTempC is not None:
str1 = ("+" * weather_reading.minTempC)
str2 = ("+" * weather_reading.maxTempC) + " "
print("{:02d} ".format(day), end='')
pr_cyan(str1)
pr_red(str2)
print(weather_reading.minTempC, "C - ", weather_reading.maxTempC, "C")
record = file.readline()
if record != '':
weather_reading = WeatherReading(record)
day += 1
else:
weather_reading = None
file.close()
except FileNotFoundError:
print("file not found")
print(sys.argv)
directory = sys.argv[1]
i = 2
while i < len(sys.argv):
argument = sys.argv[i]
if i % 2 == 0:
choice = argument
else:
if choice == "-e":
annualReport = CalculateResults.calculate_annual_report(directory, argument)
if annualReport is not None:
annualReport.display()
if choice == "-a":
year, month = argument.split('/')
monthlyReport = CalculateResults.calculate_monthly_report(directory, year, int(month))
monthlyReport.display()
if choice == "-c":
year, month = argument.split('/')
CalculateResults.draw_bar_chart_for_a_month(directory, year, int(month))
CalculateResults.draw_bar_chart_for_a_month2(directory, year, int(month))
i += 1
|
#
# @lc app=leetcode.cn id=337 lang=python3
#
# [337] 打家劫舍 III
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def rob(self, root: Optional[TreeNode]) -> int:
res = self.dp(root)
return max(res[0], res[1])
def dp(self, root):
"""
返回二维数组 [res[0], [res[1]]]
res[0]: 不抢root带来的最大收益
res[1]: 抢root带来的最大收益
"""
if not root:
return [0, 0]
left = self.dp(root.left)
right = self.dp(root.right)
not_rob = max(left[0], left[1]) + max(right[0], right[1])
rob = root.val + left[0] + right[0]
return [not_rob, rob]
# @lc code=end
|
#testing within branch
print("Branch hello world")
|
#!/usr/bin/env python
"""
Reference
https://docs.python.org/2/library/socket.html
https://docs.python.org/2/howto/sockets.html
http://voorloopnul.com/blog/a-python-proxy-in-less-than-100-lines-of-code/
"""
import sys
import socket
import select
import time
import re
import argparse
buffer_size = 4096
p = re.compile('/vod/\d+Seg\d+-Frag\d+')
class Forward:
def __init__(self):
self.forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def start(self, host, port):
try:
self.forward.bind((args.fakeip, 0))
self.forward.connect((host, port))
return self.forward
except Exception, e:
print e
return False
class TheServer:
def __init__(self, host, port):
self.input_list = []
self.channel = {}
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Make socket
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Reuse address
self.server.bind((host, port)) # Visible to outside with the interface (host, port)
self.server.listen(200) # Number of client can handle 200
def main_loop(self):
self.input_list.append(self.server) # Add socket
self.tPut = 10 # Initialize throughput as 10kbps
self.bitrate = 10
self.duration = 1
self.caltPut = 10
self.chunkname = None
contLen = None
timeS = None
while True:
ss = select.select
inputready, outputready, exceptready = ss(self.input_list, [], [])
for self.s in inputready:
if self.s == self.server:
self.on_accept()
break
# inbound from target server
if (self.s.getpeername()[0] == args.serverip):
self.data = self.s.recv(buffer_size)
tempTimeF = time.time()
if 'Content-Type: video' in self.data:
self.duration = timeF - timeS
contLen = contLenParse(self.data) # Parsing Contents Length in bytes
self.caltPut = tPutCal(self.duration, contLen*8/1024)
self.tPut = float(args.a) * self.caltPut + (1-float(args.a)) * self.tPut
self.bitrate = bitrateSel(self.tPut)
with open(args.logfile, 'w') as log:
log.write('%d %f %d %.1f %d %s %s\n' % (time.time(), self.duration, self.caltPut, self.tPut, self.bitrate, args.serverip, self.chunkname) )
log.closed
# inbound from client
else:
self.data = self.s.recv(buffer_size)
tempTimeS = time.time()
if 'GET /vod/' in self.data: # When Proxy got reqeust 'GET' from client
timeS = tempTimeS
if len(self.data) == 0: # if received data is empty, close
self.on_close()
break
else: # if received data is not empty, handover the data to peer
self.on_recv()
def on_accept(self):
forward = Forward().start(args.serverip, 8080)
clientsock, clientaddr = self.server.accept()
if forward:
print clientaddr, "has connected"
self.input_list.append(clientsock)
self.input_list.append(forward)
self.channel[clientsock] = forward
self.channel[forward] = clientsock
else:
print "Can't establish connection with remote server.",
print "Closing connection with client side", clientaddr
clientsock.close()
def on_close(self):
print self.s.getpeername(), "has disconnected"
#remove objects from input_list
self.input_list.remove(self.s)
self.input_list.remove(self.channel[self.s])
out = self.channel[self.s]
# close the connection with client
self.channel[out].close() # equivalent to do self.s.close()
# close the connection with remote server
self.channel[self.s].close()
# delete both objects from channel dict
del self.channel[out]
del self.channel[self.s]
def on_recv(self):
data = self.data
if (self.s.getpeername()[0] == args.serverip): # outbound to client w/ modification
if 'big_buck_bunny.f4m' in data: # replace manifest file
data = self.data.replace('big_buck_bunny.f4m', 'big_buck_bunny_nolist.f4m')
print('manifest file replaced')
self.channel[self.s].send(data)
pass
else: # outbound to target server w/ modification
if 'GET /vod/' in data: # bitrate adaptive function
s = self.data.split()
if p.match(s[1]):
temp = s[1]
self.chunkname = chunkSel(self.bitrate, s[1])
data = data.replace(s[1], self.chunkname)
print("chunkname changed to" + self.chunkname)
self.channel[self.s].send(data)
def tPutCal(duration, sizeOfData):
return sizeOfData/duration
def bitrateSel(th):
print(th)
if (th >= 1.5*1000):
print("1000kbps")
return 1000 # 10kbps
elif (th >= 1.5*500):
print("500kbps")
return 500 # 100kbps
elif (th >= 1.5*100):
print("100kbps")
return 100 # 500kbps
else:
print("10kbps")
return 10 # 1000kbps
def chunkSel(bitrate, URL):
s = URL.split('-')
ptSeg = s[0].find('Seg')
ptFrag = s[1].find('Frag')
s = '/vod/' + str(bitrate) + 'Seg' + s[0][ptSeg+3:] + '-Frag' + s[1][ptFrag+4:]
return s
def contLenParse(data):
contLen = float(data.split('\n')[3].rstrip().split(' ')[1])
return contLen
def main():
server = TheServer('', int(args.port))
try:
server.main_loop()
except KeyboardInterrupt:
print "Ctrl C - Stopping server"
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Launch a proxy.')
parser.add_argument('logfile', help='name of the logfile')
parser.add_argument('a', help='a')
parser.add_argument('port', help='Listening port')
parser.add_argument('fakeip', help='Fake ip for proxy')
parser.add_argument('serverip', help='Target server ip')
args = parser.parse_args()
main()
|
# coding=utf-8
import redis as Redis # 连接redis,用来存放亮灯记录
redis = Redis.StrictRedis(host='localhost', port=6379, db=0)
redis_key = "pwl_record"
import time
rightBrace = "}" # 存放用于字符串输出用
# 声控传感器敏感度对应表,4~0敏感度递减。单位(ADC最小精度)
sound_sensor_sensitivity_map = {
4: 3,
3: 5,
2: 8,
1: 10,
0: 15
}
"""
智能灯父类,用于定义智能灯基本参数、方法。
Parameters:
self.id: {Number} 0,1,2, 智能灯的序号
self.gpioSetting: {Table} gpio对象
self.gpio: {Number} gpio引脚号
self.on: {Boolean} 智能灯是否开启,True为开启。
self.light_on: {String} 智能灯开启存放的部分信息。
self.light_off: {String} 智能灯关闭存放的部分信息。
self.switchOn(self): {function} 打开智能灯。
self.switchOff(self): {function} 关闭智能灯。
self.pushRecord(self, action): {function} 存放信息
@:param action: {Boolean}开启/关闭动作(True是开启)
self.process(self, opticalSensorVal, soundSensorValues, threshold): {function} 小灯根据传感器值进行操作
@:param opticalSensorVal: {Number} 光传感器值
@:param soundSensorValues: {Number} 声传感器值
@:param threshold: {Number} 光感阈值
"""
class Light:
def __init__(self, dict):
import RPi.GPIO as GPIO
self.gpio = dict['gpio']
self.brightness = dict['brightness']
self.gpioSetting = GPIO
self.gpioSetting.setmode(self.gpioSetting.BCM)
for i in range(len(self.gpio)):
self.gpioSetting.setup(self.gpio[i], self.gpioSetting.OUT)
# self.pre_gpio = dict['pre_gpio']
self.id = dict['id']
self.on = False
self.light_on = "{\"id\": " + str(self.id) + ", \"brightness\": " + str(
self.brightness) + ", \"action\": true, \"occur_at\": "
self.light_off = "{\"id\": " + str(self.id) + ", \"action\": false, \"occur_at\": "
for i in range(len(self.gpio)):
if self.gpioSetting.input(self.gpio[i]):
self.switchOff()
def switchOn(self):
for i in range(self.brightness):
self.gpioSetting.output(self.gpio[i], True)
self.on = True
self.pushRecord(True)
def switchOff(self):
self.pushRecord(False)
self.on = False
for i in range(len(self.gpio)):
self.gpioSetting.output(self.gpio[i], False)
def pushRecord(self, action):
time_str = str(time.time())
if action:
output = self.light_on + time_str + rightBrace
else:
output = self.light_off + time_str + rightBrace
redis.rpush(redis_key, output)
def process(self, opticalSensorVal, soundSensorValues, threshold):
pass
"""
光控亮暗智能灯类(继承智能灯父类),用于定义光控亮暗智能灯参数、方法。
Parameters:
self.process(self, opticalSensorVal, soundSensorValues, threshold): {function} (方法重写)根据光感数值判断灯亮暗
@:param opticalSensorVal: {Number} 光传感器值
@:param soundSensorValues: {Number} 声传感器值
@:param threshold: {Number} 光感阈值
"""
class LightWithOpticalSensor(Light):
def __init__(self, dict):
Light.__init__(self, dict)
self.sleep = 2
self.end = 0
def setSleep(self):
self.end = time.time() + self.sleep
def process(self, opticalSensorVal, soundSensorValues, threshold):
if self.end < time.time():
if opticalSensorVal > threshold:
if not self.gpioSetting.input(self.gpio[0]):
self.switchOn()
self.setSleep()
else:
if self.gpioSetting.input(self.gpio[0]):
self.switchOff()
self.setSleep()
"""
光控声控智能灯类(继承智能灯父类),用于定义光控声控智能灯参数、方法。
Parameters:
self.duration: {Number} 亮灯时间间隔
self.sensitivity: {Number} 声控敏感度
self.queue: {List} 记录前后两次声控信息进行比较
self.end: {Time} 亮灯结束时间戳
self.switchOn(self): {function} (方法重写)打开智能灯,并设置关灯时间。
self.getSoundSensorVal(self, soundSensorValues): {function} 将声感数值入队
self.check(self): {function} 判断是否声控亮灯
self.checkTime(self): {function} 判断是否时间到了灭灯
self.process(self, opticalSensorVal, soundSensorValues, threshold): {function} (方法重写)根据光感数值和声控数值判断灯亮暗
@:param opticalSensorVal: {Number} 光传感器值
@:param soundSensorValues: {Number} 声传感器值
@:param threshold: {Number} 光感阈值
"""
class LightWithOpticalSoundSensor(Light):
def __init__(self, dict):
Light.__init__(self, dict)
self.duration = dict['duration']
self.sensitivity = dict['sensitivity']
soundSensorVal = dict['soundSensorValues'][self.id]
self.queue = [soundSensorVal, soundSensorVal]
self.end = 0
def switchOn(self):
for i in range(self.brightness):
self.gpioSetting.output(self.gpio[i], True)
self.on = True
self.pushRecord(True)
self.end = time.time() + self.duration
def getSoundSensorVal(self, soundSensorValues):
self.queue.pop()
self.queue.insert(0, soundSensorValues[self.id])
def check(self):
if abs(self.queue[0] - self.queue[1]) > self.sensitivity:
if self.on:
self.end = time.time() + self.duration
else:
self.switchOn()
def checkTime(self):
if self.on and self.end < time.time():
self.switchOff()
def process(self, opticalSensorVal, soundSensorValues, threshold):
self.getSoundSensorVal(soundSensorValues)
if opticalSensorVal > threshold:
self.check()
self.checkTime()
else:
if self.gpioSetting.input(self.gpio[0]):
self.switchOff()
"""
常亮光控声控亮度智能灯类(继承智能灯父类),用于定义光控声控亮度智能灯参数、方法。
Parameters:
self.duration: {Number} 亮灯时间间隔
self.sensitivity: {Number} 声控敏感度
self.queue: {List} 记录前后两次声控信息进行比较
self.end: {Time} 提高亮度结束时间戳
self.up: {Boolean} 记录是否在提高亮度状态
self.light_up: {String} 下同输入数据时预设字符串
self.light_down: {String}
self.brightness_txt: {String}
self.switchUp(self): {function} 打开提高智能灯亮度,并设置恢复亮度等待时间。
self.switchDown(self): {function} 恢复平时智能灯亮度。
self.getSoundSensorVal(self, soundSensorValues): {function} 将声感数值入队
self.check(self): {function} 判断是否声控提升亮度
self.checkTime(self): {function} 判断是否时间到了恢复暗的亮度
self.pushUpDownRecord(self, action): {function} 记录数据
self.process(self, opticalSensorVal, soundSensorValues, threshold): {function} (方法重写)根据光感数值和声控数值判断灯亮暗
@:param opticalSensorVal: {Number} 光传感器值
@:param soundSensorValues: {Number} 声传感器值
@:param threshold: {Number} 光感阈值
"""
class LightWithAdjustLightness(Light):
def __init__(self, dict):
Light.__init__(self, dict)
self.duration = dict['duration']
self.sensitivity = dict['sensitivity']
soundSensorVal = dict['soundSensorValues'][self.id]
self.queue = [soundSensorVal, soundSensorVal]
self.end = 0
self.up = False
self.switchOn() // 平时为常亮状态
self.light_up = "{\"id\": " + str(self.id) + ", \"action\": true, \"occur_at\": "
self.light_down = "{\"id\": " + str(self.id) + ", \"action\": true, \"occur_at\": "
self.brightness_txt = ", \"brightness\": "
def switchUp(self):
self.gpioSetting.output(self.gpio[self.brightness], True)
self.up = True
self.pushUpDownRecord(True)
self.end = time.time() + self.duration
def switchDown(self):
self.pushUpDownRecord(False)
self.up = False
self.gpioSetting.output(self.gpio[self.brightness], False)
def getSoundSensorVal(self, soundSensorValues):
self.queue.pop()
self.queue.insert(0, soundSensorValues[self.id])
def check(self):
if abs(self.queue[0] - self.queue[1]) > self.sensitivity:
if self.up:
self.end = time.time() + self.duration
else:
self.switchUp()
def checkTime(self):
if self.up and self.end < time.time():
self.switchDown()
def pushUpDownRecord(self, action):
time_str = str(time.time())
if action:
output = self.light_up + time_str + self.brightness_txt + str(self.brightness + 1) + rightBrace
else:
output = self.light_down + time_str + self.brightness_txt + str(self.brightness) + rightBrace
redis.rpush(redis_key, output)
def process(self, opticalSensorVal, soundSensorValues, threshold):
self.getSoundSensorVal(soundSensorValues)
if opticalSensorVal > threshold:
self.check()
self.checkTime()
else:
if self.gpioSetting.input(self.gpio[0]):
self.switchDown()
|
import urllib
import json
response = urllib.urlopen("http://search.twitter.com/search.json?q=microsoft")
print type
#pyresponse = json.load(response)
#results = pyresponse['results']
#for i in results:
# print i['text']
afinnfile = open("AFINN-111.txt")
scores = {} # initialize an empty dictionary
for line in afinnfile:
term, score = line.split("\t") # The file is tab-delimited. "\t" means "tab character"
scores[term] = int(score) # Convert the score to an integer.
print scores.items() # Print every (term, score) pair in the dictionary
raw_input()
|
from Utils import SCORES_FILE_NAME
from TimeLogger import time_logger
@time_logger
def get_score(file_name=SCORES_FILE_NAME):
"""
Reads the score from a file, returns the score- or zero, if unable to read from some reason
:param file_name: optional, name of score file
:type file_name: String
:return: current score from file
:rtype: int
"""
try:
with open(file_name, 'r') as file:
return int(file.read())
except Exception:
return 0
@time_logger
def add_score(difficulty, file_name=SCORES_FILE_NAME):
"""
Gets the current score from the score file, calculates and writes new score back to the file
:param difficulty: value of 1 to 5
:type difficulty: int
:param file_name: optional, name of score file
:type file_name: str
"""
new_score = get_score() + 5 + difficulty * 3
try:
with open(file_name, 'w') as file:
file.write(str(new_score))
except Exception:
pass
|
#!/usr/bin/python
from contextlib import closing
from zipfile import ZipFile, ZIP_DEFLATED
import os
import subprocess
import shutil
import bsdiff
def main():
release_repository_path = 'release_repository'
dirs = mylistdir(release_repository_path)
dirs.sort(key=lambda x, : int(x.replace('v', '').replace('.', '')))
baseDir = './'+release_repository_path+'/'
if len(dirs) > 0:
nextFolderName = dirs[0]
nextDir = baseDir + dirs[0]
lastDir = dirs[-1]
# androidImg = baseDir+lastDir+"/bundle/android/assets/img"
# iosImg = baseDir+lastDir+"/bundle/ios/assets/img"
# copyAssets(androidImg, iosImg)
# create zip
for folder in dirs:
originFileAndroid = baseDir + folder + '/bundle/android/'
originFileIos = baseDir + folder + '/bundle/ios/'
androidZipFile = baseDir + folder + '/bundle/' + 'android.zip'
iosZipFile = baseDir + folder + '/bundle/' + 'ios.zip'
#create android zip file
zipdir(originFileAndroid, androidZipFile)
#create android zip file
zipdir(originFileIos, iosZipFile)
targetFileAndroid = baseDir + lastDir + '/bundle/android.zip'
patchesFileAndroid = baseDir + lastDir + '/patches/android/' + nextFolderName + '_' + lastDir
targetFileIos = baseDir + lastDir + '/bundle/ios.zip'
patchesFileIos = baseDir + lastDir + '/patches/ios/' + nextFolderName + '_' + lastDir
nextFolderName = folder
nextDir = baseDir + folder
print('create zip file done')
# create patches files
nextFolderName = dirs[0]
nextDir = baseDir + dirs[0]
for folder in dirs[1:]:
androidZipFile = nextDir + '/bundle/' + 'android.zip'
iosZipFile = nextDir + '/bundle/' + 'ios.zip'
targetFileAndroid = baseDir + lastDir + '/bundle/android.zip'
patchesFileAndroid = baseDir + lastDir + '/patches/android/' + nextFolderName + '_' + lastDir
targetFileIos = baseDir + lastDir + '/bundle/ios.zip'
patchesFileIos = baseDir + lastDir + '/patches/ios/' + nextFolderName + '_' + lastDir
bsdiff.generatePatch(androidZipFile, targetFileAndroid, patchesFileAndroid)
bsdiff.generatePatch(iosZipFile, targetFileIos, patchesFileIos)
# shellAndroid = './node_modules/react-native-hotupdate/release_tools/bsdiff.py -o ' + androidZipFile + ' -t ' + targetFileAndroid + ' -p ' + patchesFileAndroid
# shellIos = './node_modules/react-native-hotupdate/release_tools/bsdiff.py -o ' + iosZipFile + ' -t ' + targetFileIos + ' -p ' + patchesFileIos
#
# subprocess.Popen(shellAndroid, shell=True, stdout=subprocess.PIPE).stdout.read()
# subprocess.Popen(shellIos, shell=True, stdout=subprocess.PIPE).stdout.read()
nextFolderName = folder
nextDir = baseDir + folder
print('create patches file done')
def copyAssets(androidDir, iosDir):
if os.path.exists(androidDir):
shutil.rmtree(androidDir)
if os.path.exists(iosDir):
shutil.rmtree(iosDir)
# copy img froder
shutil.copytree('img', androidDir)
shutil.copytree('img', iosDir)
print('copy imgage resouce done')
def mylistdir(directory):
"""A specialized version of os.listdir() that ignores files that
start with a leading period."""
filelist = os.listdir(directory)
return [x for x in filelist if not (x.startswith('.'))]
def zipdir(basedir, archivename):
assert os.path.isdir(basedir)
with closing(ZipFile(archivename, "w", ZIP_DEFLATED)) as z:
for root, dirs, files in os.walk(basedir):
#NOTE: ignore empty directories
for fn in files:
absfn = os.path.join(root, fn)
zfn = absfn[len(basedir)+len(os.sep)-1:] #XXX: relative path
z.write(absfn, zfn)
if __name__ == "__main__":
main()
|
#-*- coding: UTF-8 -*-
import datetime
import math
import simplejson as json
import PublishHistory as ph
import argparse
def main(yyyy=2015, MM=4, DD=25, HH=0, khour=3, answer=False, history=7):
history_tracking = history #days
et = datetime.datetime(yyyy,MM,DD,HH,0,0)
feature_selection = {'f1':True ,'f3': True, 'f5': True, 'f8': True, 'f4':False }
#predict period:
#example:
# MM=8, DD=13, HH=0, k=6
# 2014/08/12 18:00:00 ~ 2014/08/13 00:00:00
print "Groud Truth:"
target = ph.main(yyyy=et.year, MM=et.month, DD=et.day, HH=et.hour, khour=khour)
seeds = target.keys()
print et, len(seeds)
print
et = et + datetime.timedelta(hours=-khour)
prev_target = ph.main(yyyy=et.year, MM=et.month, DD=et.day, HH=et.hour, khour=khour)
seeds = target.keys()
print et, len(prev_target.keys())
print len(prev_target.keys())
seeds.extend(prev_target.keys())
dedupedSeed = set(seeds)
print "Dedupe:",len(dedupedSeed)
et = datetime.datetime(yyyy,MM,DD,HH,0,0)
weekpages ={}
for i in range(0,history_tracking):
et = et + datetime.timedelta(hours=-24)
print et
prev_behavior = ph.main(yyyy=et.year, MM=et.month, DD=et.day, HH=et.hour, khour=khour)
for key in prev_behavior.keys():
if key not in weekpages:
weekpages[key] = prev_behavior[key]
else:
weekpages[key]["totallike"] = weekpages[key]["totallike"] + prev_behavior[key]["totallike"]
weekpages[key]["totalpost"] = weekpages[key]["totalpost"] + prev_behavior[key]["totalpost"]
print len(weekpages.keys())
et = datetime.datetime(yyyy,MM,DD,HH,0,0)
fulldaypages ={}
for i in range(0,history_tracking*4):
et = et + datetime.timedelta(hours=-6)
print et
prev_behavior = ph.main(yyyy=et.year, MM=et.month, DD=et.day, HH=et.hour, khour=khour)
for key in prev_behavior.keys():
if key not in fulldaypages:
fulldaypages[key] = prev_behavior[key]
else:
fulldaypages[key]["totallike"] = fulldaypages[key]["totallike"] + prev_behavior[key]["totallike"]
fulldaypages[key]["totalpost"] = fulldaypages[key]["totalpost"] + prev_behavior[key]["totalpost"]
print len(fulldaypages.keys())
#-----------
pagemeta = json.load(open("pagedata_core16.json"))
print len(pagemeta.keys())
#-----------
sourecedate = "{:0>2d}".format(datetime.datetime.now().month) + "{:0>2d}".format(datetime.datetime.now().day)
w = open("(2015_" + sourecedate + ")featuremulti_" + str(history_tracking)
+ "day_M"+str(MM)+"_D"+str(DD)+"_H"+str(HH)+"_k" + str(khour) + ".csv","w")
fw = open("(2015_" + sourecedate + ")featuremulti_" + str(history_tracking)
+ "day_M"+str(MM)+"_D"+str(DD)+"_H"+str(HH)+"_k" + str(khour) + "."
+ ("f1" if feature_selection['f1'] else "")
+ ("f3" if feature_selection['f3'] else "")
+ ("f4" if feature_selection['f4'] else "")
+ ("f5" if feature_selection['f5'] else "")
+ ("f8" if feature_selection['f8'] else "")
+".csv","w")
for seed in pagemeta.keys():
record = []
record.append(str(seed)) #page i A
if len(record) != 1: print "error0",len(record), record
trainrecord = []
if seed in target: #label
record.append(str(target[seed]["totallike"])) #B
record.append(str(target[seed]["totalpost"])) #C
if target[seed]["totallike"]>0:
fw.write(str(int(math.log(target[seed]["totallike"],2)+1)))
#fw.write(str(target[seed]["totallike"]))
else:
fw.write(str(0))
else:
record.append(str(0))
record.append(str(0))
fw.write(str(0))
if len(record) != 3: print "error1",len(record), record
if seed in prev_target: #likes and posts of the latest section
record.append(str(prev_target[seed]["totallike"])) #F1 D
record.append(str(prev_target[seed]["totalpost"])) #F2 E
if feature_selection['f1']:
trainrecord.append(str(prev_target[seed]["totallike"]))
#trainrecord.append(str(prev_target[seed]["totallike"]))
else:
record.append(str(0))
record.append(str(0))
if feature_selection['f1']:
trainrecord.append(str(0))
if len(record) != 5: print "error2",len(record)
if seed in weekpages: #likes and posts at the same section in N days
record.append(str(weekpages[seed]["totallike"])) #F3 F
record.append(str(weekpages[seed]["totalpost"])) #F4 G
if feature_selection['f3']:
trainrecord.append(str(weekpages[seed]["totallike"]))
if feature_selection['f4']:
trainrecord.append(str(weekpages[seed]["totalpost"]))
else:
record.append(str(0))
record.append(str(0))
if feature_selection['f3']:
trainrecord.append(str(0))
if feature_selection['f4']:
trainrecord.append(str(0))
if len(record) != 7: print "error3",len(record)
if seed in fulldaypages: #likes and posts in N days
record.append(str(fulldaypages[seed]["totallike"])) #F5 H
record.append(str(fulldaypages[seed]["totalpost"])) #F6 I
if seed in weekpages:
if fulldaypages[seed]["totallike"] - weekpages[seed]["totallike"] < 0:
raise ValueError('A very specific bad thing happened:'+ str(seed)+"," +
str(fulldaypages[seed]["totallike"])+"-"+str(weekpages[seed]["totallike"]))
if feature_selection['f5']:
trainrecord.append(str(fulldaypages[seed]["totallike"]))
else:
record.append(str(0))
record.append(str(0))
if feature_selection['f5']:
trainrecord.append(str(0))
if len(record) != 9: print "error4"
if seed in pagemeta: #fans and talking_about_count at crawling time
if "likes" not in pagemeta[seed]:
record.append(str(0))
record.append(str(0))
else:
record.append(str(pagemeta[seed]["likes"])) #F7 J
record.append(str(pagemeta[seed]["talking_about_count"])) #F8 K
#record.append(str(0))
if feature_selection['f8']:
if "likes" not in pagemeta[seed]:
trainrecord.append(str(0))
else:
trainrecord.append(str(pagemeta[seed]["talking_about_count"]))
else:
print seed
record.append(str(0))
record.append(str(0))
if feature_selection['f8']:
trainrecord.append(str(0))
if len(record) != 11: print "error5"
w.write(",".join(record)+"\n")
fw.write(" " + " ".join([ str(i+1) + ":" + _val for i, _val in enumerate(trainrecord) ])+"\n")
w.close()
fw.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Facebook Fan Page post schedular')
parser.add_argument('--answer', dest='answer', action='store_true')
parser.set_defaults(answer=False)
parser.add_argument('-MM', type=int, action="store",dest="MM",help="Mode")
parser.add_argument('-DD', type=int, action="store",dest="DD",help="Mode")
parser.add_argument('-HH', type=int, action="store",dest="HH",help="Mode")
parser.add_argument('-L', type=int, action="store",dest="L",help="back")
parser.add_argument('-k', type=int, action="store",dest="k",help="Mode")
parser.set_defaults(MM=4)
parser.set_defaults(DD=20)
parser.set_defaults(HH=0)
parser.set_defaults(L=7)
parser.set_defaults(k=1)
args = parser.parse_args()
#example
#python featurevector.py -MM 8 -DD 12 -HH 18 -k 6
main(yyyy=2015, MM=args.MM, DD=args.DD, HH=args.HH, khour=args.k, answer=args.answer, history=args.L)
|
import os
from typing import Any
import torch
from torch import nn
from torchvision import datasets
from torchvision import transforms
from torch.nn import functional as F
from torch.utils.data import (
Dataset,
DataLoader
)
from torchvision.utils import make_grid
from PIL import Image
from torch import optim
class Net(nn.Module):
def __init__(self, num_channels: int):
super(Net, self).__init__()
self.num_channels = num_channels
# this network gonna take three layers
# convolutional
# features extractors
# here num channels is 3 as the input is an image
self.conv1 = nn.Conv2d(3, self.num_channels, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(self.num_channels, self.num_channels * 2, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(self.num_channels *2, self.num_channels * 4, 3, stride=1, padding=1)
# fully connectted linear
self.fc1 = nn.Linear(self.num_channels * 4 * 8 * 8,
self.num_channels * 4) # the output of the convolutional layers
self.fc2 = nn.Linear(self.num_channels * 4, 6)
def forward(self, x) -> Any:
"""At the start we do
with an image with 3 channels and 64 x 64 pixels"""
# convolutional layer 1
x = self.conv1(x) # num channels x 64 x64
x = F.relu(F.max_pool2d(x, 2)) # parameter two say, to divide by 2 => num channels x 32 x 32
# convolutional layer 2
x = self.conv2(x) # num channels*2 x 32 x32
x = F.relu(F.max_pool2d(x, 2)) # dived by 2 => channels *2 16 x 16
# convolutional 3
x = self.conv3(x) # num channels*4 x 16 x 16
x = F.relu(F.max_pool2d(x, 2)) # num chanells x 8 x 8
# flatten
# -1 say we gonna do the flatten
x = x.view(-1, self.num_channels * 4 * 8 * 8)
# fully connected
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x
# mounting dataset
class SIGNSDataset(Dataset):
def __init__(self, base_dir, split="train", transform=None):
path = os.path.join(base_dir, "{}_signs".format(split))
files = os.listdir(path)
self.filenames = [os.path.join(path, f) for f in files if f.endswith(".jpg")]
self.targets = [int(f[0]) for f in files]
self.transform = transform
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
image = Image.open(self.filenames[idx])
if self.transform:
image = self.transform(image)
return image, self.targets[idx]
def main():
signs = SIGNSDataset(base_dir="datasets",
split="train",
transform=transforms.ToTensor())
print(signs[0][0])
class RunningMetric:
def __init__(self):
self.S = 0
self.N = 0
def update(self, val, size):
self.S += val
self.N += size
def __call__(self):
return self.S / float(self.N)
# def main():
# signs = SIGNSDataset(base_dir="datasets",
# split="train",
# transform=transforms.ToTensor())
# # deliver in batches to the neuron network
# dataloader = DataLoader(signs, batch_size=32)
# network = Net(32)
# loss_function = nn.NLLLoss()
# optimizer = optim.SGD(network.parameters(), lr=1e-3, momentum=0.9)
# epochs = 100
# for epoch in range(epochs):
# print(f"Epoch => {epoch}/{epochs}")
# running_loss = RunningMetric() # error ice in the network
# running_acc = RunningMetric() # ice of the accuracy
# for inputs, targets in dataloader:
# # reload gradients to zero
# # because in the last batch
# # the gradients were modified
# # insied the optimizer, so in the new batch
# # there's to carry to zero
# optimizer.zero_grad()
# outputs = network(inputs)
# _, preds = torch.max(outputs, 1)
# loss = loss_function(outputs, targets)
# loss.backward() #magias: gradientes calculados automaticamente
# optimizer.step() #magia2: actualiza las perillas o los parametros
# batch_size = inputs.size()[0]
# running_loss.update(loss.item()*batch_size,
# batch_size)
# running_acc.update(torch.sum(preds == targets).float(),
# batch_size)
# print("Loss: {:.4f} Acc: {:.4f}".format(running_loss(), running_acc()))
if __name__ == "__main__":
main()
|
class Location:
def __init__(self, location_id, name, area, dimension, timeframe):
self.location_id = location_id
self.name = name
self.area = area
self.dimension = dimension
self.timeframe = timeframe
|
import dataclasses
from kadena import types
work_header_bytes = bytes.fromhex(
"05000000d3a6e63c5b5767267eb9021d98be2333d4af4bd3d1df740f7fffc70a000000000000000000000000fa5b1b55de9605006d4503feb42b2494cacf9bd1b1998300c53cc31c3c0601a427e159a586befc3e030000000000022442d2f610b4db056a254b8c34378fdab681651c379a9d82b07eee5b242775060000007972e7a310a9941a5f77c5b864cc21ae95a9c72712f5e9d876b79dd7db45280f090000008019cf71bcdbe10621b75d9364a3a3ebe8a5bd546bc4924e61a3aa1672994ec2d3a6e63c5b5767267eb9021d98be2333d4af4bd3d1df740f7fffc70a0000000077c681d5bb4290eb0fa481bc8b1d703883c0e3e4fb41ad78012762a15a1ffbe105000000b452fc44ba010100000000000000000000000000000000000000000000000000307100000000000005000000aadbc3f0dd9605000000000000000000"
)
decoded_work_header = types.WorkHeader(
chain=5,
target="06bmPFtXZyZ-uQIdmL4jM9SvS9PR33QPf__HCgAAAAA",
nonce=0,
time=1573256538315770,
parent="bUUD_rQrJJTKz5vRsZmDAMU8wxw8BgGkJ-FZpYa-_D4",
adjacents={
0: "AiRC0vYQtNsFaiVLjDQ3j9q2gWUcN5qdgrB-7lskJ3U",
6: "eXLnoxCplBpfd8W4ZMwhrpWpxycS9enYdred19tFKA8",
9: "gBnPcbzb4QYht12TZKOj6-ilvVRrxJJOYaOqFnKZTsI",
},
payload="d8aB1btCkOsPpIG8ix1wOIPA4-T7Qa14ASdioVof--E",
weight=283374509642420,
height=28976,
version=5,
epoch_start=1573254854859690,
flags=0,
difficulty=101983759913,
)
block_header = types.BlockHeader(
nonce=8305999242,
time=1573381243298146,
parent="0ksoqElFS1O92ELs4H9eJVw1GpwhyvMm2ghsvuk8oTg",
adjacents={
4: "WROEIBmF5Tj1GpWN3sFjqkNji3o8u5jmKZyhLlHtSBQ",
5: "Y119tFyVzf0ZkDa1fr_loPkgP_2o--UbrdZRXEC-ht8",
8: "OBEvbGZFJC1pKJOf9kCklZpLKVK7LMkyK3TDd8Qyjn8",
},
target="HNCMLDL-Iq96jjq6Sy4wbTREcvRSgPmXWHCmBgAAAAA",
payload="ajRtuwfvpXKukOzDNVHoJxqi5eW9vKdogdEtLE437Ro",
chain=9,
weight=737270491038141,
height=33157,
version="mainnet01",
epoch_start=1573380104334007,
flags=0,
hash="Yui29I2k4S0BQiIRNR1vdsvxoyzBliCQIWomn--bZjU",
difficulty=165336321130,
)
block_header_int_version = dataclasses.replace(block_header, version=5)
block_header_base64 = "in0T7wEAAABiYRpe-5YFANJLKKhJRUtTvdhC7OB_XiVcNRqcIcrzJtoIbL7pPKE4AwAEAAAAWROEIBmF5Tj1GpWN3sFjqkNji3o8u5jmKZyhLlHtSBQFAAAAY119tFyVzf0ZkDa1fr_loPkgP_2o--UbrdZRXEC-ht8IAAAAOBEvbGZFJC1pKJOf9kCklZpLKVK7LMkyK3TDd8Qyjn8c0IwsMv4ir3qOOrpLLjBtNERy9FKA-ZdYcKYGAAAAAGo0bbsH76VyrpDswzVR6CcaouXlvbynaIHRLSxON-0aCQAAAL3BuCmLngIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAhYEAAAAAAAAFAAAAtyo3GvuWBQAAAAAAAAAAAGLotvSNpOEtAUIiETUdb3bL8aMswZYgkCFqJp_vm2Y1"
block_header_bytes = bytes.fromhex(
"8a7d13ef0100000062611a5efb960500d24b28a849454b53bdd842ece07f5e255c351a9c21caf326da086cbee93ca138030004000000591384201985e538f51a958ddec163aa43638b7a3cbb98e6299ca12e51ed481405000000635d7db45c95cdfd199036b57ebfe5a0f9203ffda8fbe51badd6515c40be86df0800000038112f6c6645242d6928939ff640a4959a4b2952bb2cc9322b74c377c4328e7f1cd08c2c32fe22af7a8e3aba4b2e306d344472f45280f9975870a606000000006a346dbb07efa572ae90ecc33551e8271aa2e5e5bdbca76881d12d2c4e37ed1a09000000bdc1b8298b9e0200000000000000000000000000000000000000000000000000858100000000000005000000b72a371afb960500000000000000000062e8b6f48da4e12d01422211351d6f76cbf1a32cc1962090216a269fef9b6635"
)
block_header_object = {
"creationTime": 1573381243298146,
"parent": "0ksoqElFS1O92ELs4H9eJVw1GpwhyvMm2ghsvuk8oTg",
"height": 33157,
"hash": "Yui29I2k4S0BQiIRNR1vdsvxoyzBliCQIWomn--bZjU",
"chainId": 9,
"weight": "vcG4KYueAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"featureFlags": 0,
"epochStart": 1573380104334007,
"adjacents": {
"4": "WROEIBmF5Tj1GpWN3sFjqkNji3o8u5jmKZyhLlHtSBQ",
"5": "Y119tFyVzf0ZkDa1fr_loPkgP_2o--UbrdZRXEC-ht8",
"8": "OBEvbGZFJC1pKJOf9kCklZpLKVK7LMkyK3TDd8Qyjn8",
},
"payloadHash": "ajRtuwfvpXKukOzDNVHoJxqi5eW9vKdogdEtLE437Ro",
"chainwebVersion": "mainnet01",
"target": "HNCMLDL-Iq96jjq6Sy4wbTREcvRSgPmXWHCmBgAAAAA",
"nonce": "8305999242",
}
def test_decode_work_header():
assert types.decode_header(work_header_bytes) == decoded_work_header
def test_decode_block_header_base64():
assert types.decode_header(block_header_base64) == block_header_int_version
def test_decode_block_header_bytes():
assert types.decode_header(block_header_bytes) == block_header_int_version
def test_decode_block_header_object():
assert types.decode_header(block_header_object) == block_header
|
import sqlite3
from sqlite3 import Error
#Тест создания соединения с DB
def create_connection(path):
try:
connection = sqlite3.connect(path)
return ("0")
except Error:
return ("1")
#Тесты создания таблицы
def create_TableW(path):
try:
connection=sqlite3.connect(path)
c = connection.cursor()
c.execute("CREATE TABLE IF NOT EXISTS Workers(roll INTEGER PRIMARY KEY AUTOINCREMENT ,name TEXT,branch TEXT,sem INTEGER,mobile INTEGER,address TEXT)")
connection.commit()
c.close()
connection.close()
return ("0")
except Error:
return ("1")
def create_TableL(path):
try:
connection=sqlite3.connect(path)
c = connection.cursor()
c.execute("CREATE TABLE IF NOT EXISTS Late(roll INTEGER PRIMARY KEY AUTOINCREMENT ,name TEXT,count INTEGER)")
connection.commit()
c.close()
connection.close()
return ("0")
except Error:
return ("1")
#Тесты проверки функции поиска
def check_Select(path, query):
connection = sqlite3.connect(path)
cursor = connection.cursor()
try:
cursor.execute(query)
return ("0")
except Error:
return ("1")
|
# Generated by Django 2.2.2 on 2019-06-25 18:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('host', '0002_auto_20190625_1041'),
]
operations = [
migrations.RemoveField(
model_name='host',
name='password',
),
migrations.RemoveField(
model_name='host',
name='password2',
),
]
|
""" proc.py - simple process control with python class """
import os,traceback,time,subprocess,sys
def osCmd(cmd):
print 'Execute %s' % cmd
#os.system( cmd )
try:
retcode = subprocess.call(cmd, shell=True)
if retcode < 0:
print >> sys.stderr, "Child was terminated by signal", -retcode
else:
print >> sys.stderr, "Child returned", retcode
except OSError as e:
print >> sys.stderr, "Execution failed:", e
class Proc(object):
""" Create process using subprocess.call method (same as os.system)
uses the window title to identify the process (not very good)
"""
def __init__(self, title, path, exe, args='/min'):
self.title = title
self.path = path
self.exe = exe
self.args = args
self._setPath()
self.state = 'not running'
def _setPath(self):
self.path = os.path.join( os.getcwd(), self.path)
def start(self, args=''):
osCmd( 'start "%s" /D "%s" %s %s %s' % (self.title,self.path, self.args, self.exe, args))
self.state = 'started'
def kill(self):
osCmd( 'taskkill /fi "windowtitle eq %s"' % self.title )
def terminate(self):
self.kill()
def monitor(self):
pass
def __str__(self):
return '%-12s - %s' % (self.title, self.state)
class Proc2(Proc):
""" Create process using subprocess.Popen method and save the process instance """
def __init__(self, title, path, exe, args=''):
Proc.__init__(self, title, path, exe, args)
self.proc = None
def start(self, args=''):
cmd = self.exe
print 'Starting %s' % self.title
print ' cmd:%s' % cmd
print ' cwd:%s' % self.path
self.proc = subprocess.Popen( cmd, cwd=self.path) #, shell=True)
print ' proc:%s' % self.proc
def terminate(self):
if self.proc:
print 'Terminate %s' % self.title
print ' proc:%s' % self.proc
poll = self.proc.poll()
print ' before proc.poll():%s' % poll
self.proc.terminate()
poll = self.proc.poll()
print ' after proc.poll():%s' % poll
def monitor(self):
#print 'Monitor %s' % self.title
#print ' proc:%s' % self.proc
if self.proc:
if self.proc.poll():
print ' Process %s pid:%s has terminated' % (self.title, self.proc.pid)
def __str__(self):
if self.proc:
return '%-12s - pid:%s' % (self.title, self.proc.pid)
else:
return '%-12s - not running' % (self.title)
if __name__ == '__main__':
import subprocess
resp = None
try:
iperfIPAddr = '10.0.1.36'
iperfTime = 5
lstCmds = ['c:\\iperf\\iperf.exe', '-c', iperfIPAddr, '-t', str(iperfTime), '-y', 'C']
resp = subprocess.check_output(lstCmds,stderr=subprocess.STDOUT)
lst = resp.split(',')
dct = {}
dct['time'] = lst[0]
dct['local_ipAddr'] = lst[1]
dct['local_port'] = int(lst[2])
dct['ipaddr'] = lst[3]
dct['port'] = int(lst[4])
dct['client_id'] = int(lst[5])
dct['interval'] = lst[6]
dct['transfer'] = float(lst[7])
dct['bandwidth'] = float(lst[8])
print dct
except Exception,err:
print 'Exception %s' % err
if resp:
print 'resp:%s' % resp
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-24 16:41:39
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
import time
class Date(object):
"""docstring for Date"""
def __init__(self, year,month,day):
self.year = year
self.month = month
self.day = day
@classmethod
def today(cls):
struct_time = time.localtime()
print(struct_time.tm_year,'-',struct_time.tm_mon,'-',struct_time.tm_mday)
# print(time.localtime())
date = cls(struct_time.tm_year,struct_time.tm_mon,struct_time.tm_mday)
return date
date = Date.today()
print(date.year,date.month,date.day)
|
pat0 = "ーー"
pat1 = "++"
pattern1 = ((pat0 + pat1) * 8 + "\n") * 2
pattern2 = ((pat1 + pat0) * 8 + "\n") * 2
flag = (pattern1 + pattern2) * 4
print(flag)
# |
import turtle
tur = turtle.Turtle()
scr=turtle.Screen()
scr.bgcolor('black')
tur.pencolor('white')
tur.speed(0)
tur.penup()
tur.goto(-400,-250)
tur.pendown()
for i in range(500):
tur.forward(850)
tur.right(244)
if i==50:
tur.pencolor('red')
if i==80:
tur.pencolor('blue')
if i==120:
tur.pencolor('green')
if i==150:
tur.pencolor('purple')
if i==190:
tur.pencolor('SaddleBrown')
if i==220:
tur.pencolor('OrangeRed')
if i==260:
tur.pencolor('Orchid')
if i==300:
tur.pencolor('Aquamarine')
if i==350:
tur.pencolor('Teal')
if i==400:
tur.pencolor('DarkViolet')
if i==450:
tur.pencolor('Maroon')
if i==499:
tur.pencolor('SlateBlue')
turtle.done()
|
import unittest
from katas.beta.trumpness_detector import trump_detector
class TrumpDetectorTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(trump_detector('I will build a huge wall'), 0)
def test_equal_2(self):
self.assertEqual(trump_detector('HUUUUUGEEEE WAAAAAALL'), 4)
def test_equal_3(self):
self.assertEqual(trump_detector(
'MEXICAAAAAAAANS GOOOO HOOOMEEEE'
), 2.5)
def test_equal_4(self):
self.assertEqual(trump_detector(
'America NUUUUUKEEEE Oooobaaaamaaaaa'
), 1.89)
def test_equal_5(self):
self.assertEqual(trump_detector(
'listen migrants: IIII KIIIDD YOOOUUU NOOOOOOTTT'
), 1.56)
|
from functools import partial
from typing import Any, List, Optional, Type, Union
import torch
import torch.nn as nn
from torch import Tensor
from torchvision.models.resnet import (
BasicBlock,
Bottleneck,
ResNet,
ResNet18_Weights,
ResNet50_Weights,
ResNeXt101_32X8D_Weights,
ResNeXt101_64X4D_Weights,
)
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableResNet",
"ResNet18_QuantizedWeights",
"ResNet50_QuantizedWeights",
"ResNeXt101_32X8D_QuantizedWeights",
"ResNeXt101_64X4D_QuantizedWeights",
"resnet18",
"resnet50",
"resnext101_32x8d",
"resnext101_64x4d",
]
class QuantizableBasicBlock(BasicBlock):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.add_relu = torch.nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.add_relu.add_relu(out, identity)
return out
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(self, [["conv1", "bn1", "relu"], ["conv2", "bn2"]], is_qat, inplace=True)
if self.downsample:
_fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
class QuantizableBottleneck(Bottleneck):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.skip_add_relu = nn.quantized.FloatFunctional()
self.relu1 = nn.ReLU(inplace=False)
self.relu2 = nn.ReLU(inplace=False)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip_add_relu.add_relu(out, identity)
return out
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(
self, [["conv1", "bn1", "relu1"], ["conv2", "bn2", "relu2"], ["conv3", "bn3"]], is_qat, inplace=True
)
if self.downsample:
_fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
class QuantizableResNet(ResNet):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x: Tensor) -> Tensor:
x = self.quant(x)
# Ensure scriptability
# super(QuantizableResNet,self).forward(x)
# is not scriptable
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
r"""Fuse conv/bn/relu modules in resnet models
Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
_fuse_modules(self, ["conv1", "bn1", "relu"], is_qat, inplace=True)
for m in self.modules():
if type(m) is QuantizableBottleneck or type(m) is QuantizableBasicBlock:
m.fuse_model(is_qat)
def _resnet(
block: Type[Union[QuantizableBasicBlock, QuantizableBottleneck]],
layers: List[int],
weights: Optional[WeightsEnum],
progress: bool,
quantize: bool,
**kwargs: Any,
) -> QuantizableResNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableResNet(block, layers, **kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
""",
}
class ResNet18_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 11689512,
"unquantized": ResNet18_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.494,
"acc@5": 88.882,
}
},
"_ops": 1.814,
"_file_size": 11.238,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
class ResNet50_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.920,
"acc@5": 92.814,
}
},
"_ops": 4.089,
"_file_size": 24.759,
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
url="https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V2,
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.282,
"acc@5": 94.976,
}
},
"_ops": 4.089,
"_file_size": 24.953,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.986,
"acc@5": 94.480,
}
},
"_ops": 16.414,
"_file_size": 86.034,
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2,
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.574,
"acc@5": 96.132,
}
},
"_ops": 16.414,
"_file_size": 86.645,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 83455272,
"recipe": "https://github.com/pytorch/vision/pull/5935",
"unquantized": ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.898,
"acc@5": 96.326,
}
},
"_ops": 15.46,
"_file_size": 81.556,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
@register_model(name="quantized_resnet18")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNet18_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNet18_Weights.IMAGENET1K_V1,
)
)
def resnet18(
*,
weights: Optional[Union[ResNet18_QuantizedWeights, ResNet18_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNet-18 model from
`Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` or :class:`~torchvision.models.ResNet18_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNet18_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNet18_Weights
:members:
:noindex:
"""
weights = (ResNet18_QuantizedWeights if quantize else ResNet18_Weights).verify(weights)
return _resnet(QuantizableBasicBlock, [2, 2, 2, 2], weights, progress, quantize, **kwargs)
@register_model(name="quantized_resnet50")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNet50_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNet50_Weights.IMAGENET1K_V1,
)
)
def resnet50(
*,
weights: Optional[Union[ResNet50_QuantizedWeights, ResNet50_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNet-50 model from
`Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` or :class:`~torchvision.models.ResNet50_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNet50_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNet50_Weights
:members:
:noindex:
"""
weights = (ResNet50_QuantizedWeights if quantize else ResNet50_Weights).verify(weights)
return _resnet(QuantizableBottleneck, [3, 4, 6, 3], weights, progress, quantize, **kwargs)
@register_model(name="quantized_resnext101_32x8d")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNeXt101_32X8D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
)
)
def resnext101_32x8d(
*,
weights: Optional[Union[ResNeXt101_32X8D_QuantizedWeights, ResNeXt101_32X8D_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNeXt-101 32x8d model from
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet101_32X8D_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
:members:
:noindex:
"""
weights = (ResNeXt101_32X8D_QuantizedWeights if quantize else ResNeXt101_32X8D_Weights).verify(weights)
_ovewrite_named_param(kwargs, "groups", 32)
_ovewrite_named_param(kwargs, "width_per_group", 8)
return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
@register_model(name="quantized_resnext101_64x4d")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNeXt101_64X4D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
)
)
def resnext101_64x4d(
*,
weights: Optional[Union[ResNeXt101_64X4D_QuantizedWeights, ResNeXt101_64X4D_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNeXt-101 64x4d model from
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet101_64X4D_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
:members:
:noindex:
"""
weights = (ResNeXt101_64X4D_QuantizedWeights if quantize else ResNeXt101_64X4D_Weights).verify(weights)
_ovewrite_named_param(kwargs, "groups", 64)
_ovewrite_named_param(kwargs, "width_per_group", 4)
return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
|
# -*- coding: utf-8 -*-
"""ORRM Schedule application."""
from flask import Flask, jsonify, render_template, request, Response
from werkzeug.security import check_password_hash
from database import *
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def get_page():
if request.method == 'POST':
if check_password_hash(get_password_hash('admin'),
request.form['password']):
return Response(status=200)
else:
return Response(status=403)
return render_template('index.html')
@app.route('/api/v1/employees', methods=['GET', 'POST', 'DELETE'])
def employees():
if request.method == 'GET':
return jsonify(get_employees())
if request.method == 'POST':
if Util.is_valid_employee(request.form['name_rus'],
request.form['surname_rus']):
add_employee(request.form['name_rus'], request.form['surname_rus'])
return Response(status=201)
return Response(status=400)
if request.method == 'DELETE':
if Util.is_valid_employee(request.form['name_rus'],
request.form['surname_rus']):
delete_employee(request.form['name_rus'],
request.form['surname_rus'])
return Response(status=200)
return Response(status=400)
@app.route('/api/v1/schedule', methods=['PUT'])
@app.route('/api/v1/schedule/<string:iso_week>', methods=['GET'])
def schedule(iso_week=None):
if request.method == 'GET':
return jsonify(get_schedule(iso_week))
if request.method == 'PUT':
update_schedule(request.form['shortname'], request.form['iso_week'],
request.form['shift'])
return Response(status=200)
if __name__ == "__main__":
app.run()
|
string ="hello world";
string.startswith('hello');
|
class Sudoku:
def __init__(self, board):
self.board = board
def solve(self):
row, col = self.find_empty()
if row == -1 or col == -1:
return True
for number in range(1, 10):
if self.isValid(row, col, number):
self.board[row][col] = number
if self.solve():
return True
self.board[row][col]= 0
def find_empty(self):
for row in range(len(self.board)):
for col in range(len(self.board[row])):
if self.board[row][col] == 0:
return (row, col)
return (-1, -1)
def isValid(self, row, column, number):
if self.isRowValid(row, number) and self.isColumnValid(column, number) and self.isSubgridValid(row, column, number):
return True
else:
return False
def isRowValid(self, row, number):
if number in self.board[row]:
return False
else:
return True
def isColumnValid(self, column, number):
if number in [row[column] for row in self.board]:
return False
else:
return True
def isSubgridValid(self, row, column, number):
startRow, startCol = (row//3)*3, (column//3)*3
if number in [self.board[i][j] for i in range(startRow, startRow+3) for j in range(startCol, startCol+3)]:
return False
else:
return True
print("Enter your sudoku. Numbers should be seperated by spaces")
board = []
while True:
row = list(input().split())
if row:
board.append([int(x) for x in row])
else:
break
sudoku = Sudoku(board)
sudoku.solve()
print("This is your solved sudoku board:")
for row in sudoku.board:
print(row) |
from .distributions import get_distribution
from .effect import RiskEffect
from .base_risk import Risk
from .implementations.low_birth_weight_and_short_gestation import LBWSGRisk
|
import sys
x = [line.strip('\n').rstrip() for line in sys.stdin]
cases = int(x[0])
del x[0]
inputs = []
for i in range(cases):
itr = 3 * i
inputs.append((x[itr + 1], x[itr + 2]))
def f(tup):
first, second = tup
if first in second or second in first:
return min(len(first),len(second))
dp = [[0] * (len(second) + 1) for _ in range(len(first) + 1)]
m = 0
for i in range(1,len(first) + 1):
for j in range(1,len(second) + 1):
letter2 = second[j-1]
letter1 = first[i-1]
if letter2 == letter1:
if i - 1 > 0 and j - 1 > 0:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = 1
m = max(dp[i][j],m)
return m
answer = list(map(f, inputs))
for ans in answer:
print(ans) |
from django.conf import settings
from django.shortcuts import redirect, render, get_object_or_404
from django.core.mail import EmailMessage, send_mail
from django.template import Context
from django.template.loader import get_template
from django.contrib import messages, auth
from django.forms import modelformset_factory, formset_factory
from .forms import UploadForm, ImagesForm, ImageFormset, SocialMediaForm
from .post_to_social import twitter_post_status, facebook_post_status
from products.models import Product, ProductImage
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.decorators import user_passes_test
# DO ALL OF THESE IMPORTS NEED TO BE HERE?
@user_passes_test(lambda u: u.is_superuser)
def upload(request):
formset = ImageFormset(queryset=ProductImage.objects.none())
form = UploadForm(request.POST, request.FILES)
if request.method == 'POST':
formset = ImageFormset(request.POST, request.FILES)
if form.is_valid() and formset.is_valid():
form.save()
# is this hacky?
product = Product.objects.all().order_by('-id')[0]
for form in formset.cleaned_data:
image= form['image']
pi = ProductImage (image = image, product = product)
pi.save()
return redirect('social_media')
# invalid form response?
return render(request, 'upload/upload.html', {
'form': form,
'formset': formset,
})
@user_passes_test(lambda u: u.is_superuser)
def social_media(request):
product = Product.objects.all().order_by('-id')[0]
form = SocialMediaForm(request.POST)
if request.method == 'POST':
if form.is_valid():
# should be a function
post_to_facebook = request.POST.get('post_to_facebook')
post_to_twitter = request.POST.get('post_to_twitter')
product_id = request.POST.get('product_id')
product_url = "http://e-commerce-johnpooch.c9users.io:8080/products/" + product_id
image_url = "http://e-commerce-johnpooch.c9users.io:8080" + product.image.url
if post_to_facebook:
facebook_caption = request.POST.get('facebook_caption')
facebook_post_status(facebook_caption, product_url)
if post_to_twitter:
twitter_caption = request.POST.get('twitter_caption')
twitter_post_status(twitter_caption, image_url, product_url)
return redirect('get_index')
print("form is not valid")
return render(request, 'upload/social_media.html', {
'form': form, 'product': product,
})
return render(request, 'upload/social_media.html') |
# -*- coding: utf-8 -*-
from django.db import models
from django_extensions.db.fields import AutoSlugField
#from Aluno.models import Aluno
#from Professor.models import Professor
#from Materia.models import Materia
class Turma(models.Model):
"""
Classe que representa uma turma de uma materia.
Essa classe que possuim os professores e os alunos da materia em questao.
"""
nome = models.CharField(u"Nome", max_length=250)
sigla = models.CharField(u"Sigla",max_length=10)
professor = models.ForeignKey('Professor.Professor',related_name="turmas")
materia = models.ForeignKey('Materia.Materia',related_name="turmas")
alunos = models.ManyToManyField('Aluno.Aluno', related_name="turmas")
slug = AutoSlugField(populate_from='sigla')
class Meta:
verbose_name = u'Turma'
app_label = 'Turma'
def __unicode__(self):
return self.nome
|
# Generated by Django 3.0.7 on 2020-08-23 12:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0009_auto_20200823_1417'),
]
operations = [
migrations.CreateModel(
name='Serviceprovider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('provider', models.CharField(max_length=4)),
('datecreated', models.DateTimeField(auto_now_add=True)),
],
),
migrations.RenameField(
model_name='mobileformat',
old_name='format',
new_name='mformat',
),
migrations.AlterField(
model_name='mobileformat',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='account.Serviceprovider'),
),
]
|
from flask_restful import request, reqparse
from MySQLdb.cursors import DictCursor
from pathlib import Path
from Camel import CamelResource
from Camel.field import FieldList
from Camel.auth import is_authenticated
from Camel import config
import io
import shutil
import csv
def _compose_query(where_base = [], where_field = [], not_field = [], where_ref = []):
'''
Compose the SQL query to fetch a filtered list of experiments
:param where_base: list of WHERE statements for main query
:param where_field: list of WHERE statements for experiment_fields sub_query
:param where_ref: list of WHERE statements for references_fields sub_query
'''
base = ("SELECT e.`id` AS `experiment_id`, e.`name`, "
"f.`id` AS `field_id`, f.`title` AS `field_title`, f.`weight`, "
"ef.`id` as value_id, "
"ef.`value_INT`, ef.`value_VARCHAR`, ef.`value_DOUBLE`, ef.`value_BOOL`, ef.`value_TEXT`, ef.`value_ATTACH` "
"FROM `experiments` e "
"LEFT JOIN `experiments_fields` ef ON e.`id` = ef.`experiment_id` "
"LEFT JOIN `fields` f ON ef.`field_id` = f.`id` ")
field_filter = ("e.`id` IN (SELECT ef_filter.`experiment_id` "
"FROM `experiments_fields` ef_filter "
"WHERE {} ) ")
not_field_filter = ("e.`id` NOT IN (SELECT ef_filter.`experiment_id` "
"FROM `experiments_fields` ef_filter "
"WHERE {} ) ")
ref_filter = ("e.`id` IN (SELECT er_filter.`experiment_id` "
"FROM `experiments_references` er_filter "
"JOIN `references` r_filter ON er_filter.`reference_id` = r_filter.`id` "
"WHERE {} ) ")
order = " ORDER BY e.`id`, f.`weight`"
where = []
where+= where_base
for wf in where_field:
wf_sql = field_filter.format(wf)
where.append(wf_sql)
for nf in not_field:
nf_sql = not_field_filter.format(nf)
where.append(nf_sql)
for wr in where_ref:
wr_sql = ref_filter.format(wr)
where.append(wr_sql)
sql = base
if len(where) > 0:
sql+=" WHERE "+' AND '.join(where)
sql+= order
return sql
def _compact(res, field_types, db):
'''
Gather all result values from the query and group them by experiment.
:return a list of dictionaries, one per experiment
'''
##Combine all field/value results into a 'summary' (one entry per experiment)
summary = {}
for entry in res:
experiment_id = entry['experiment_id']
if experiment_id not in summary:
summary[experiment_id] = {}
summary[experiment_id]['name'] = entry['name']
summary[experiment_id]['fields'] = {}
field_id = entry['field_id']
if field_id is None:
continue
field_type = field_types[field_id]
field_value = entry['value_'+field_type]
if field_id not in summary[experiment_id]['fields']:
summary[experiment_id]['fields'][field_id] = {}
value_id = entry['value_id']
summary[experiment_id]['fields'][field_id][value_id] = field_value
##generate a list from gathered summary results and add the references to each entry
result = []
for exp_id in summary:
exp = summary[exp_id]
##ID
exp['id'] = exp_id
##References
sql = ("SELECT r.`id`, r.`authors`, r.`title`, r.`journal`, r.`year`, r.`pages`, r.`pubmed_id`, r.`url` "
"FROM `references` r "
"JOIN `experiments_references` er ON r.`id` = er.`reference_id` "
"WHERE er.`experiment_id` = %(ID)s")
c = db.cursor(DictCursor)
c.execute(sql, {'ID': exp_id})
res = c.fetchall()
c.close()
exp['references'] = res
result.append(exp)
return result
def _map_field_types():
'''
:return a mapping of field id's to field type (VARCHAR, TEXT, INT, BOOL)
'''
fieldList = FieldList()
rows = fieldList.retrieveFieldData()
field_types = {}
for row in rows:
field_types[row['id']] = row['type_column'].split('_')[1]
return field_types
def _put_file(uuid, exp_id, field_id, filename):
'''
Move tmp file with uuid to its download location with
original filename.
:return: final filename, including possible postfix
'''
upload_conf = config['uploads']
tmp_path = Path(upload_conf['TMP'])
tmp_file = tmp_path.joinpath(uuid)
target_path = Path(upload_conf['PATH'])
target_full_path = target_path.joinpath(str(exp_id), str(field_id))
target_full_path.mkdir(parents=True, exist_ok=True)
target_file = target_full_path.joinpath(filename)
postfix = 0
stem = target_file.stem.split('.')[0]
while target_file.exists():
postfix +=1
postfixed = stem + '_' + str(postfix)
new_name = postfixed + ''.join(target_file.suffixes)
target_file = target_file.parent.joinpath(new_name)
shutil.move(str(tmp_file), str(target_file), copy_function=shutil.copy)
return target_file.name
def _del_file(exp_id, field_id, filename):
'''
Remove file
'''
upload_conf = config['uploads']
target_path = Path(upload_conf['PATH'])
target_file = target_path.joinpath(str(exp_id), str(field_id), filename)
try:
target_file.unlink()
except FileNotFoundError:
## if the file is gone already: mission accimplished
pass
def _edit_fields(exp_id, fields, field_types, db):
'''
Loop over the submitted field dictionary (field id => field data)
and insert/update/delete as needed.
'''
cursor = db.cursor()
for field_id, values in fields.items():
field_type = field_types[int(field_id)]
for value_id, value in values.items():
id_parts = value_id.split('_')
if len(id_parts) == 2 and id_parts[0] == 'new':
##Insert new value
##uploaded attachments need to store the tmp uuid
if field_type == 'ATTACH':
uuid = value['uuid']
value = value['filename']
value = _put_file(uuid, exp_id, field_id, value)
sql = ("INSERT INTO `experiments_fields` "
"(`experiment_id`, `field_id`, `value_{type_col}`) "
"VALUES (%(exp_id)s, %(field_id)s, %(val)s) ").format(type_col = field_type)
cursor.execute(sql, {'exp_id': exp_id, 'field_id': field_id, 'val': value})
else:
if type(value) is not dict:
##Update existing value
sql = "UPDATE `experiments_fields` SET `value_{type_col}` = %(value)s WHERE `id`=%(val_id)s".format(type_col=field_type)
cursor.execute(sql, {'val_id': value_id, 'value': value})
elif 'action' in value and value['action'] == 'delete':
##Delete value
if field_type == 'ATTACH':
sql = "SELECT `value_ATTACH` as filename FROM `experiments_fields` WHERE `id` = %(val_id)s"
cursor.execute(sql, {'val_id': value_id})
row = cursor.fetchone()
_del_file(exp_id, field_id, row[0])
sql = "DELETE FROM `experiments_fields` WHERE `id` = %(val_id)s"
cursor.execute(sql, {'val_id': value_id})
cursor.close()
def _edit_references(exp_id, refs, db):
'''
Loop over the list of submitted references and insert/update/delete as needed.
'''
cursor = db.cursor()
sql = "SELECT `reference_id` FROM `experiments_references` WHERE `experiment_id` = %(exp_id)s"
cursor.execute(sql, {'exp_id': exp_id})
ref_links = cursor.fetchall()
ref_links = [r[0] for r in ref_links]
for ref in refs:
if 'action' in ref:
if ref['action'] == 'new':
##Add new reference
sql = ("INSERT INTO `references` "
"(`title`, `authors`, `journal`, `year`, `pages`, `pubmed_id`, `url`) "
"VALUES (%(title)s, %(authors)s, %(journal)s, %(year)s, %(pages)s, %(pubmed_id)s, %(url)s) ")
cursor.execute(sql, ref)
ref['id'] = cursor.lastrowid
if ref['action'] == 'delete':
sql = "DELETE FROM `experiments_references` WHERE `experiment_id` = %(exp_id)s AND `reference_id` = %(ref_id)s"
cursor.execute(sql, {'exp_id': exp_id, 'ref_id': ref['id']})
##check if the reference points at other experiments. If not, delete it completely
sql = "SELECT * FROM `experiments_references` WHERE `reference_id` = %(ref_id)s"
count = cursor.execute(sql, {'ref_id': ref['id']})
if count == 0:
sql = "DELETE FROM `references` WHERE `id` = %(ref_id)s"
cursor.execute(sql, {'ref_id': ref['id']})
if 'action' not in ref or ref['action'] == 'update':
##Update existing reference
sql = ("UPDATE `references` SET "
"`title`=%(title)s, `authors`=%(authors)s, "
"`journal`=%(journal)s, `year` = %(year)s, `pages` = %(pages)s, "
"`pubmed_id`=%(pubmed_id)s, `url`=%(url)s "
"WHERE `id` = %(id)s")
cursor.execute(sql, ref)
##insert a link between experiment and reference if it's not there yet.
if ref['id'] not in ref_links:
sql = "INSERT INTO `experiments_references` (`experiment_id`, `reference_id`) VALUES (%(exp_id)s, %(ref_id)s)"
cursor.execute(sql, {'exp_id': exp_id, 'ref_id': ref['id']})
cursor.close()
class ExperimentList(CamelResource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
##POST arguments
self.reqparse.add_argument('name', required = True, type = str, location = 'json')
self.reqparse.add_argument('fields', type = dict, location = 'json')
self.reqparse.add_argument('references', type = list, location = 'json')
super(ExperimentList, self).__init__()
def _add_field_filters(self, field_id, field_type, value):
if field_type == 'VARCHAR' or field_type == 'TEXT':
filter_query = ("(ef_filter.`field_id` = %(FieldID_{field_id})s AND ef_filter.`value_{field_type}` "
"LIKE CONCAT('%%', %(FieldValue_{field_id})s ,'%%')) ").format(field_id=field_id, field_type=field_type)
self.tokens["FieldID_{}".format(field_id)] = field_id
self.tokens["FieldValue_{}".format(field_id)] = value
self.where_field.append(filter_query)
elif field_type == 'INT' or field_type == 'DOUBLE':
filter_query = "(ef_filter.`field_id` = %(FieldID_{field_id})s ".format(field_id=field_id)
self.tokens["FieldID_{}".format(field_id)] = field_id
if 'min_'+str(field_id) in request.args:
min_value = request.args['min_'+str(field_id)]
filter_query+= "AND ef_filter.`value_{field_type}` >= %(FieldMinValue_{field_id})s ".format(field_type=field_type, field_id=field_id)
self.tokens["FieldMinValue_{}".format(field_id)] = min_value
if 'max_'+str(field_id) in request.args:
max_value = request.args['max_'+str(field_id)]
filter_query+= "AND ef_filter.`value_{field_type}` <= %(FieldMaxValue_{field_id})s ".format(field_type=field_type, field_id=field_id)
self.tokens["FieldMaxValue_{}".format(field_id)] = max_value
filter_query+= ") "
self.where_field.append(filter_query)
elif field_type == 'BOOL':
bool_value = 1 if value=='true' else 0
filter_query = ("(ef_filter.`field_id` = %(FieldID_{field_id})s "
"AND ef_filter.`value_BOOL` = %(FieldValue_{field_id})s) ").format(field_id=field_id)
self.tokens["FieldID_{}".format(field_id)] = field_id
self.tokens["FieldValue_{}".format(field_id)] = bool_value
self.where_field.append(filter_query)
elif field_type == 'ATTACH':
filter_query = ("(ef_filter.`field_id` = %(FieldID_{field_id})s "
"AND ef_filter.`value_ATTACH` IS NOT NULL) ").format(field_id=field_id)
self.tokens["FieldID_{}".format(field_id)] = field_id
if value=='true':
self.where_field.append(filter_query)
else:
self.not_field.append(filter_query)
def _add_ref_filters(self, field_id, value):
ref_parts = field_id.split('_', 1)
if len(ref_parts) == 1 or (ref_parts[0] != 'min' and ref_parts[0] != 'max'):
ref_filter_query = "(r_filter.`{ref_field}` LIKE CONCAT('%%', %(RefValue_{ref_field})s, '%%')) ".format(ref_field=field_id)
self.tokens['RefValue_{}'.format(field_id)] = value
self.where_ref.append(ref_filter_query)
else:
if field_id == 'min_year':
ref_filter_query = "r_filter.`year` >= %(MinYear)s "
self.tokens['MinYear'] = value
elif field_id == 'max_year':
ref_filter_query = "r_filter.`year` <= %(MaxYear)s "
self.tokens['MaxYear'] = value
self.where_ref.append(ref_filter_query)
def retrieveExperimentData(self):
'''
Gather all experiment data, filtered by field and reference
Filters are key_value pairs with the key formatted like:
- ExperimentName
- <int> (int being a field id)
- min_<int> | max_<int> (min/max values for an integer field, <int> being a field id)
- ref_<field> (reference data, field being 'authors', 'journal' or 'title'
- ref_min_year | ref_max_year (reference year min/max values)
'''
self.tokens = {}
##Name filter
self.where_base = []
##Field filters
self.where_field = []
self.not_field = []
self.where_ref = []
field_types = _map_field_types()
for key in request.args:
value = request.args[key]
if key == 'ExperimentName':
self.where_base.append("e.`name` LIKE CONCAT('%%', %(ExperimentName)s ,'%%') ")
self.tokens['ExperimentName'] = request.args['ExperimentName']
continue
key_parts = key.split('_', 1)
if len(key_parts) == 2:
field_prefix = key_parts[0]
field_id = key_parts[1]
else:
field_prefix = ''
field_id = key
## Field filter
if field_id.isnumeric():
field_id = int(field_id)
field_type = field_types[field_id]
self._add_field_filters(field_id, field_type, value)
## Ref filter
elif field_prefix == 'ref':
self._add_ref_filters(field_id, value)
c = self.db.cursor(DictCursor)
sql = _compose_query(self.where_base, self.where_field, self.not_field, self.where_ref)
c.execute(sql, self.tokens)
res = c.fetchall()
c.close()
result = _compact(res, field_types, self.db)
return result
def csv(self):
'''
Retrieve all (filtered) experiment data and format as a CSV string
'''
output = io.StringIO()
writer = csv.writer(output,
dialect="excel",
quoting=csv.QUOTE_MINIMAL)
data = self.retrieveExperimentData()
fieldList = FieldList()
fields = fieldList.retrieveFieldData()
## Write header
header_fields = []
header_fields.append("id")
header_fields.append("name")
header_fields.append("paper_title")
header_fields.append("paper_authors")
header_fields.append("paper_journal")
header_fields.append("paper_year")
header_fields.append("paper_pages")
header_fields.append("paper_url")
header_fields.append("pubmed_id")
for f in fields:
header_fields.append(f['title'])
writer.writerow(header_fields)
## Write data
for exp in data:
row = []
row.append(exp['id'])
row.append(exp['name'])
titles=[]
authors=[]
journals=[]
years=[]
pages=[]
urls=[]
pubmed_ids=[]
for ref in exp['references']:
titles.append(ref['title'])
authors.append(ref['authors'])
journals.append(ref['journal'])
years.append(ref['year'])
pages.append(ref['pages'])
urls.append(ref['url'])
pubmed_ids.append(ref['pubmed_id'])
row.append('\n'.join([t if t is not None else '' for t in titles]))
row.append('\n'.join([a if a is not None else ''for a in authors]))
row.append('\n'.join([j if j is not None else '' for j in journals]))
row.append('\n'.join([str(y) for y in years]))
row.append('\n'.join([p if p is not None else '' for p in pages]))
row.append('\n'.join([u if u is not None else '' for u in urls]))
row.append('\n'.join([str(p) if p is not None else '' for p in pubmed_ids]))
for field in fields:
field_id = field['id']
if field_id in exp['fields']:
field_values = list(exp['fields'][field_id].values())
row.append('\n'.join([str(f) if f is not None else '' for f in field_values]))
else:
row.append('')
writer.writerow(row)
return output.getvalue()
def get(self):
result = self.retrieveExperimentData()
return result
def post(self):
if not is_authenticated():
return "Admin only", 401
args = self.reqparse.parse_args()
exp_name = args['name']
##Experiment
sql = "INSERT INTO `experiments` (`name`) VALUES (%(exp_name)s)"
cursor = self.db.cursor()
cursor.execute(sql, {'exp_name': exp_name})
exp_id = cursor.lastrowid
cursor.close()
##Fields
if args['fields']:
field_types = _map_field_types()
_edit_fields(exp_id, args['fields'], field_types, self.db)
##References
if args['references']:
_edit_references(exp_id, args['references'], self.db)
self.db.commit()
created = request.json
created['id'] = exp_id
return created, 201
class Experiment(CamelResource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
##PUT arguments
self.reqparse.add_argument('name', type = str, location = 'json')
self.reqparse.add_argument('fields', type = dict, location = 'json')
self.reqparse.add_argument('references', type = list, location = 'json')
super(Experiment, self).__init__()
def get(self, id):
where_base = ["e.`id` = %(id)s"]
tokens = {'id': id}
c = self.db.cursor(DictCursor)
sql = _compose_query(where_base)
c.execute(sql, tokens)
res = c.fetchall()
c.close()
field_types = _map_field_types()
result = _compact(res, field_types, self.db)
if len(result) > 0:
return result[0]
else:
return 'Unknown Experiment ID', 400
def put(self, id):
## Without authentication, the user can only make
## suggestions, but never overwrite an entry.
##suggestion = not is_authenticated()
##TODO implement the suggestion idea
if not is_authenticated():
return "Admin only", 401
args = self.reqparse.parse_args()
##Experiment properties
if args['name']:
cursor = self.db.cursor()
name = args['name']
sql = "UPDATE `experiments` SET `name` = %(name)s WHERE `id` = %(id)s"
cursor.execute(sql, {'id': id, 'name': name})
cursor.close()
##Fields
if args['fields']:
field_types = _map_field_types()
_edit_fields(id, args['fields'], field_types, self.db)
##References
if args['references']:
_edit_references(id, args['references'], self.db)
self.db.commit()
return "UPDATED", 204
def delete(self, id):
if not is_authenticated():
return "Admin only", 401
cursor = self.db.cursor()
## Get linked references
sql = "SELECT `reference_id` FROM `experiments_references` WHERE `experiment_id` = %(id)s"
cursor.execute(sql, {'id': id})
refs = cursor.fetchall()
ref_id_list = [ref[0] for ref in refs]
sql = "DELETE FROM `experiments` WHERE `id` = %(id)s"
cursor.execute(sql, {'id': id})
## Delete orphan references
for ref_id in ref_id_list:
sql = "SELECT * FROM `experiments_references` WHERE `reference_id` = %(ref_id)s"
linkCount = cursor.execute(sql, {'ref_id': ref_id})
if linkCount == 0:
sql = "DELETE FROM `references` WHERE `id` = %(ref_id)s"
cursor.execute(sql, {'ref_id': ref_id})
## Delete attachments
upload_conf = config['uploads']
target_path = Path(upload_conf['PATH'])
target_exp_path = target_path.joinpath(str(id))
shutil.rmtree(target_exp_path, ignore_errors=True)
self.db.commit()
cursor.close()
return "DELETED", 204
|
# -*- coding:utf-8 -*-
from app.extensions import db
from app.util.helper import now_time
class Message(db.Model):
# 提醒
__tablename__ = 'message'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=False)
from_user_id = db.Column(db.Integer, nullable=False)
date_created = db.Column(db.DateTime, default=now_time())
unread = db.Column(db.Boolean, nullable=False, default=True)
content = db.Column(db.String(200), nullable=False)
topic_id = db.Column(db.Integer, nullable=True)
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.id)
def __init__(self, user_id, from_user_id, topic_id, content):
self.user_id = user_id
self.from_user_id = from_user_id
if topic_id is None:
pass
else:
self.topic_id = topic_id
self.content = content
@classmethod
def get_user_message(cls, u_id):
msg = cls.query.filter_by(user_id=u_id).order_by(cls.date_created.desc()).all()
return msg
@classmethod
def get_user_unread_num(cls, u_id):
num = cls.query.filter_by(user_id=u_id).filter_by(unread=True).count()
return num
def set_readed(self):
self.unread = False
self.save()
def delete(self):
db.session.delete(self)
db.session.commit()
def save(self):
db.session.add(self)
db.session.commit()
class Pri_letter(db.Model):
# 私信
__tablename__ = 'pri_letter'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(200))
from_user_id = db.Column(db.Integer, nullable=False)
to_user_id = db.Column(db.Integer, nullable=False)
date_created = db.Column(db.DateTime, default=now_time())
content = db.Column(db.Text)
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.id)
def __init__(self, username, from_user_id, to_user_id, content):
self.username = username
self.from_user_id = from_user_id
self.to_user_id = to_user_id
self.content = content
@staticmethod
def get_user_letter(u_id):
return Pri_letter.query.filter_by(to_user_id=u_id).order_by(Pri_letter.date_created.desc()).all()
@staticmethod
def get_letter_num(u_id):
return Pri_letter.query.filter_by(from_user_id=u_id).order_by(Pri_letter.date_created.desc()).count()
def save(self):
m = "收到了来自" + self.username + "的私信"
msg = Message(self.to_user_id, self.from_user_id, None, m)
try:
msg.save()
except:
print("私信发送失败,{}".format(self.id))
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit() |
#-- encoding=utf8 --
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class UserProfile(models.Model):
user=models.OneToOneField(User)
first_name_rus=models.CharField(max_length=255, verbose_name=u'Имя', blank=True, null=True)
last_name_rus=models.CharField(max_length=255, verbose_name=u'Фамилия', blank=True, null=True)
def __unicode__(self):
return self.user.first_name+" "+self.user.last_name + " (" + self.user.username + ")"
class Meta:
verbose_name=u'User profile'
verbose_name_plural=u'User profiles'
def create_user_profile(sender, instance, created, **kwargs):
if created:
if not UserProfile.objects.filter(user=instance):
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
class Country(models.Model):
name=models.CharField(max_length=255, verbose_name=u'Country')
name_rus=models.CharField(max_length=255, verbose_name=u'Страна')
def __unicode__(self):
return self.name
class Meta:
verbose_name=u'Country'
verbose_name_plural=u'Countries'
class State(models.Model):
name=models.CharField(max_length=255, verbose_name=u'State')
abbr=models.CharField(max_length=2, verbose_name='Abbreviation', blank=True)
country=models.ForeignKey(Country, verbose_name=u'Country')
def __unicode__(self):
return self.name
class Meta:
verbose_name=u'State'
verbose_name_plural=u'States'
def states():
from django.db import connection, transaction
cursor = connection.cursor()
cursor.execute("SELECT id,name from accounts_state order by name asc")
return cursor.fetchall()
def countries(lang=''):
from django.db import connection, transaction
cursor = connection.cursor()
if(lang!=''): lang="_"+lang
cursor.execute("SELECT id,name%s from accounts_country order by name asc" % (lang,))
return cursor.fetchall()
class Address(models.Model):
name=models.CharField(max_length=500, verbose_name=u'First and Last name', blank=True)
name_rus=models.CharField(max_length=500, verbose_name=u'Имя и Фамилия', blank=True)
addr1=models.CharField(max_length=500, verbose_name=u'Address Line 1', blank=True)
addr1_rus=models.CharField(max_length=500, verbose_name=u'Улица, дом, квартира', blank=True)
addr2=models.CharField(max_length=500, verbose_name=u'Address Line 2', blank=True)
addr2_rus=models.CharField(max_length=500, verbose_name=u'Регион', blank=True)
city=models.CharField(max_length=500, verbose_name=u'City', blank=True)
city_rus=models.CharField(max_length=500, verbose_name=u'Город', blank=True)
state=models.CharField(max_length=500, verbose_name=u'State', blank=True, choices=states())
country=models.CharField(max_length=500, verbose_name=u'Country', choices=countries())
country_rus=models.CharField(max_length=500, verbose_name=u'Страна', choices=countries('rus'))
zip_code=models.CharField(max_length=20, verbose_name=u'Zip')
user=models.ForeignKey(User, verbose_name=u'User')
phone=models.TextField(max_length=20, verbose_name=u'Phone number', blank=True)
primary=models.BooleanField(verbose_name=u'Primary address')
def __unicode__(self):
return self.name + ", "+ self.addr1 +", "+ self.city +", " + self.country + ", " + self.zip_code
class Meta:
verbose_name=u'Address'
verbose_name_plural=u'Addresses'
|
"""
a web spider for baidu baike schools info in beijing powered by XPath and BeautifulSoup
"""
import logging.handlers
import os
import urllib.parse
import pandas as pd
import pymysql.cursors
import requests
from bs4 import BeautifulSoup
from lxml import etree
from io import StringIO, BytesIO
from pymongo import MongoClient
def insert_item(item):
"""
insert an object into mongodb
:param item:
:return:
"""
client = MongoClient()
db = client.baike.school
result = db.insert_one(item)
def crawl_baike_simple(school_name):
"""
powered by XPath
:return:
@Deprecared
"""
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Host": "baike.baidu.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
}
request_url = "https://baike.baidu.com/item/%s" % urllib.parse.quote(school_name)
response = requests.get(request_url, timeout=20, headers=headers)
print('start crawling %s ...' % school_name)
if response.status_code not in [403, 404]:
try:
school = {}
html_raw = response.text.encode("Latin").decode("UTF-8").replace('\xa0', '').replace('\n', '')
soup = BeautifulSoup(html_raw, "html5lib")
parser = etree.HTMLParser()
tree = etree.parse(StringIO(html_raw), parser)
name = tree.xpath("//dd[@class='lemmaWgt-lemmaTitle-title']/h1/text()")[0] # 学校名称
introduction = ''.join(tree.xpath("//div[@class='lemma-summary']/div/text()")) # 学校简介
school['学校名称'] = name.strip()
school['学校简介'] = introduction.strip()
if len(soup.find_all(class_="basic-info cmn-clearfix")) > 0:
for each_prop in soup.find_all(class_="basicInfo-item value", recursive=True, limit=3):
key = each_prop.find_previous().get_text().strip()
value = each_prop.get_text().strip()
school[key] = value
for _ in soup.find_all('div', class_='para-title level-2', recursive=True, limit=3):
title = _.h2.get_text().replace(name, '').strip()
title_desc = _.find_next('div', class_="para").get_text().strip()
school[title] = title_desc
print(school)
except:
pass
else:
print('ERROR')
return school
def crawl_baike(school_name):
"""
powered by XPath
:return:
"""
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Host": "baike.baidu.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
}
request_url = "https://baike.baidu.com/item/%s" % urllib.parse.quote(school_name)
response = requests.get(request_url, timeout=20, headers=headers)
print('start crawling %s ...' % school_name)
if response.status_code not in [403, 404]:
try:
school = {}
html_raw = response.text.encode("Latin").decode("UTF-8").replace('\n', '')
soup = BeautifulSoup(html_raw, "html5lib")
parser = etree.HTMLParser()
tree = etree.parse(StringIO(html_raw), parser)
name = tree.xpath("//dd[@class='lemmaWgt-lemmaTitle-title']/h1/text()")[0] # 学校名称
introduction = ''.join(tree.xpath("//div[@class='lemma-summary']/div/text()")) # 学校简介
school['学校名称'] = name.strip()
school['学校简介'] = introduction.strip()
# 获取主要信息列表
if len(soup.find_all(class_="basic-info cmn-clearfix")) > 0:
for dl in soup.find_all(class_="basic-info cmn-clearfix")[0].find_all('dl', class_='basicInfo-block'):
for _ in range(len(dl.find_all('dt'))):
school[dl.find_all('dt')[_].get_text().strip().replace(' ', '').replace('\xa0', '')] = \
dl.find_all('dd')[_].get_text().strip()
# 获取详细补充信息
for _ in soup.find_all('div', class_='para-title level-2', recursive=True):
title = _.h2.get_text().replace(name, '').strip()
desc = ''
for each_desc_div in _.find_next_siblings('div', class_="para"):
if each_desc_div['class'] == 'para':
break
desc += each_desc_div.get_text().strip()
school[title.replace('\xa0', '')] = desc.replace('\xa0', '').replace(' ', '')
# 参考资料
reflist = []
if len(soup.find_all(class_='reference-list')) > 0:
for li in soup.find_all(class_='reference-list')[0]:
if len(li.find_all('a')) > 1:
ref = {}
ref_name = li.find_all('a')[1].text.strip()
ref_url = li.find_all('a')[1]['href'].strip()
ref_source = "".join([_.get_text().strip() for _ in li.find_all('span')])
ref['参考名称'] = ref_name
ref['参考链接'] = ref_url
ref['参考来源'] = ref_source
reflist.append(ref)
school['参考资料'] = reflist
# 词条统计
try:
word_stat = {}
if len(soup.find_all('dd', class_="description")) > 0:
li_list = soup.find_all('dd', class_="description", recursive=True)[0].find_all('li')
word_stat[li_list[0].text.split(':')[0].strip()] = li_list[0].span.text
for i in range(1, len(li_list), 1):
word_stat[li_list[i].text.split(':')[0].strip()] = li_list[i].text.split(':')[1].strip()
school['词条统计'] = word_stat
except:
pass
print(school)
except:
pass
else:
print('ERROR')
return school
def read_school_names(excel_path):
df = pd.read_excel(excel_path, sheetname="Sheet1", index_col=False)['名称']
return df.tolist()
if __name__ == '__main__':
schoolnames = read_school_names("D:/采集.xlsx")
# school = crawl_baike("中国人民大学附属中学")
for school_name in schoolnames:
try:
school = crawl_baike(school_name)
if school is not None and school['学校名称'].strip() != "":
insert_item(school)
except:
pass
|
# -*- coding: utf-8 -*-
import re
from pycolorname.color_system import ColorSystem
from pycolorname.utilities import u
class PantonePaint(ColorSystem):
def __init__(self, *args, **kwargs):
ColorSystem.__init__(self, *args, **kwargs)
self.load()
def refresh(self):
full_data = self.request(
'POST',
'http://www.pantonepaint.co.kr/color/color_chip_ajax.asp',
data={"cmp": "TPX", "keyword": ""})
lis = full_data.find_all('li',
attrs={"attr_name": re.compile(r".*"),
"attr_number": re.compile(r".*"),
"attr_company": re.compile(r".*"),
"id": re.compile(r".*")})
data = {}
style_regex = re.compile(r'.*background-color *: *'
r'rgb\((?P<rgb>[\d,]+ *).*')
for li in lis:
name = u("PMS {0} {1} ({2})").format(li['attr_number'],
li['attr_company'],
li['attr_name'])
rgb = re.findall(style_regex, li['style'])[0]
rgb = map(lambda x: int(x.strip()), rgb.split(","))
color = tuple(rgb)
data[name] = color
return data
|
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
@slash.requires(have_ffmpeg)
@slash.requires(have_ffmpeg_qsv_accel)
@slash.requires(using_compatible_driver)
class EncoderTest(slash.Test):
def gen_input_opts(self):
opts = "-f rawvideo -pix_fmt {mformat} -s:v {width}x{height}"
if vars(self).get("fps", None) is not None:
opts += " -r:v {fps}"
opts += " -i {source}"
return opts.format(**vars(self))
def gen_output_opts(self):
opts = "-vf 'format={hwformat},hwupload=extra_hw_frames=64' -an"
opts += " -c:v {ffencoder}"
if self.codec not in ["jpeg",]:
opts += " -profile:v {mprofile}"
if vars(self).get("gop", None) is not None:
opts += " -g {gop}"
if vars(self).get("qp", None) is not None:
if self.codec in ["mpeg2"]:
opts += " -q {mqp}"
else:
opts += " -q {qp}"
if vars(self).get("quality", None) is not None:
if self.codec in ["jpeg",]:
opts += " -global_quality {quality}"
else:
opts += " -preset {quality}"
if vars(self).get("slices", None) is not None:
opts += " -slices {slices}"
if vars(self).get("bframes", None) is not None:
opts += " -bf {bframes}"
if vars(self).get("minrate", None) is not None:
opts += " -b:v {minrate}k"
if vars(self).get("maxrate", None) is not None:
opts += " -maxrate {maxrate}k"
if vars(self).get("refs", None) is not None:
opts += " -refs {refs}"
if vars(self).get("lowpower", None) is not None:
opts += " -low_power {lowpower}"
opts += " -vframes {frames} -y {encoded}"
return opts.format(**vars(self))
def gen_name(self):
name = "{case}-{rcmode}-{profile}"
if vars(self).get("fps", None) is not None:
name += "-{fps}"
if vars(self).get("gop", None) is not None:
name += "-{gop}"
if vars(self).get("qp", None) is not None:
name += "-{qp}"
if vars(self).get("slices", None) is not None:
name += "-{slices}"
if vars(self).get("quality", None) is not None:
name += "-{quality}"
if vars(self).get("bframes", None) is not None:
name += "-{bframes}"
if vars(self).get("minrate", None) is not None:
name += "-{minrate}k"
if vars(self).get("maxrate", None) is not None:
name += "-{maxrate}k"
if vars(self).get("refs", None) is not None:
name += "-{refs}"
if vars(self).get("lowpower", None) is not None:
name += "-{lowpower}"
return name.format(**vars(self))
def before(self):
self.refctx = []
def encode(self):
self.mprofile = mapprofile(self.codec, self.profile)
if self.mprofile is None:
slash.skip_test("{profile} profile is not supported".format(**vars(self)))
self.mformat = mapformat(self.format)
if self.mformat is None:
slash.skip_test("{format} format not supported".format(**vars(self)))
self.encoded = get_media()._test_artifact(
"{}.{}".format(self.gen_name(), self.get_file_ext()))
iopts = self.gen_input_opts()
oopts = self.gen_output_opts()
self.output = call(
"ffmpeg -init_hw_device qsv=qsv:hw -hwaccel qsv -filter_hw_device qsv"
" -v verbose {iopts} {oopts}".format(iopts = iopts, oopts = oopts))
self.check_output()
self.check_bitrate()
self.check_metrics()
def check_output(self):
m = re.search("Initialize MFX session", self.output, re.MULTILINE)
assert m is not None, "It appears that the QSV plugin did not load"
def check_metrics(self):
self.decoded = get_media()._test_artifact(
"{}-{width}x{height}-{format}.yuv".format(self.gen_name(), **vars(self)))
call(
"ffmpeg -hwaccel qsv -hwaccel_device /dev/dri/renderD128 -v verbose"
" -c:v {ffdecoder} -i {encoded} -vf 'hwdownload,format={hwformat}'"
" -pix_fmt {mformat} -f rawvideo -vsync passthrough -vframes {frames}"
" -y {decoded}".format(**vars(self)))
get_media().baseline.check_psnr(
psnr = calculate_psnr(
self.source, self.decoded,
self.width, self.height,
self.frames, self.format),
context = self.refctx,
)
def check_bitrate(self):
if "cbr" == self.rcmode:
encsize = os.path.getsize(self.encoded)
bitrate_actual = encsize * 8 * self.fps / 1024.0 / self.frames
bitrate_gap = abs(bitrate_actual - self.bitrate) / self.bitrate
get_media()._set_test_details(
size_encoded = encsize,
bitrate_actual = "{:-.2f}".format(bitrate_actual),
bitrate_gap = "{:.2%}".format(bitrate_gap))
# acceptable bitrate within 10% of bitrate
assert(bitrate_gap <= 0.10)
elif "vbr" == self.rcmode:
encsize = os.path.getsize(self.encoded)
bitrate_actual = encsize * 8 * self.fps / 1024.0 / self.frames
get_media()._set_test_details(
size_encoded = encsize,
bitrate_actual = "{:-.2f}".format(bitrate_actual))
# acceptable bitrate within 25% of minrate and 10% of maxrate
assert(self.minrate * 0.75 <= bitrate_actual <= self.maxrate * 1.10)
|
from setuptools import setup
requires = ["requests==2.20.0"]
setup(
name='flounder',
version='0.0.8',
description='flounder is a library that create Entity of Dialogflow. This library uses RestAPI of dialogflow. It is not an official library.',
url='https://github.com/flatfisher/flounder',
author='flatfisher',
author_email='shimano.entou@gmail.com',
license='MIT',
keywords=['dialogflow', 'dialogflow entity'],
packages=[
"flounder",
"flounder.requests",
"flounder.requests.entity"
],
install_requires=requires,
classifiers=[
'Programming Language :: Python :: 2.7',
]
) |
import unittest
from katas.beta.multiply_list_by_integer_with_restrictions import multiply
class MultiplyTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(multiply(2, [1, 2, 3]), [2, 4, 6])
def test_equal_2(self):
self.assertEqual(multiply(2, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
|
import unittest
from unittest.mock import patch
from io import StringIO
from src.add import add, hello_world, subtract
class TestFn(unittest.TestCase):
def setUp(self):
pass
def test_add(self):
expected = 6
actual = add(4, 2)
self.assertEqual(expected, actual)
expected = 4
actual = add(0, 4)
self.assertEqual(expected, actual)
def test_hello(self):
expected = "Hello World"
actual = hello_world()
self.assertEqual(expected, actual)
def test_subtract(self):
expected = 2
actual = subtract(4, 2)
self.assertEqual(expected, actual)
expected = 4
actual = add(0, 4)
self.assertEqual(expected, actual)
def tearDown(self):
print("Testing Done")
|
with open('fjord.txt') as f:
fjord = [line.strip('\n') for line in f]
for i, line in enumerate(fjord):
if 'B' in line:
b = (line.index('B'), i)
fjord_length = len(line)
crosses = 1
direction = -1 # -1 for north, 1 for south
while b[0] < fjord_length-1:
b = (b[0] + 1, b[1] + direction)
if fjord[b[1]+3*direction][b[0]] == '#':
b = (b[0]+1, b[1])
direction *= -1
crosses += 1
print(crosses) |
#!/usr/bin/python
import socket
import random
import subprocess
from subprocess import call
from time import sleep
import os
import datetime
import sys
import dbus
import click
def S_pause():
host = socket.gethostname() # as both code is running on same pc
port = 5000 # socket server port number
client_socket = socket.socket() # instantiate
client_socket.connect((host, port)) # connect to the server
message = input(" -> " ) # take input
while message.lower().strip() != 'bye':
if message.lower().strip() == 'pause':
list_files = subprocess.run(["sp","pause"])
print("The exit code was: %d" % list_files.returncode)
if message.lower().strip() == 'play':
list_files = subprocess.run(["sp","play"])
print("The exit code was: %d" % list_files.returncode)
message = input(" -> " ) # take input
# client_socket.close() # close the connection
if __name__ == '__main__':
S_pause() |
from flask import Flask, request, render_template
import subprocess, time
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("root.html")
@app.route("/echo", methods=['POST'])
def echo():
createuser=subprocess.Popen('aws iam create-user --user-name ' + str(request.form['text']), shell=True)
time.sleep(4)
wait=subprocess.Popen('aws iam user-exists --user-name ' +str(request.form['text']), shell=True)
addgroup=subprocess.Popen('aws iam add-user-to-group --user-name ' + str(request.form['text']) + ' --group-name workshopGrp', shell=True)
createpass=subprocess.Popen('aws iam create-login-profile --user-name ' + str(request.form['text']) + ' --password hackpsu', shell=True)
return render_template("confirmationpage.html", username=str(request.form['text']))
#return "User " + str(request.form['text']) + ' has been created. Your password is: hackpsu <br> Log into the AWS environment at <a href="https://hackpsu.signin.aws.amazon.com/console"> https://hackpsu.signin.aws.amazon.com/console </a>'
#add arns to iam policies
if __name__ == "__main__":
app.run(debug=True) |
def imprimeFuncao(nomesHub,letraEntrada):
for i in range(len(nomesHub)):
if not nomesHub[i].upper().startswith(letraEntrada.upper()):
print(nomesHub[i]) |
#!/usr/bin/env python
# coding: utf-8
# In[41]:
import numpy as np
import cv2
import imageio
from PIL import Image
import matplotlib.pyplot as plt
import time
import math
import glob
import matplotlib.colors as colors
from skimage import color, io, exposure
from scipy.ndimage import morphology as morph
from skimage.morphology import disk
from skimage.transform import resize
from skimage import filters
#%matplotlib inline
# In[45]:
def calcErrorSurface(panorama, curr_img, overlap, channel):
A = panorama[:, -overlap-1:, channel]
B = curr_img[:, 0:overlap+1, channel]
return np.square(A-B)
def calcSeam(e):
E = np.zeros(e.shape);
E[0, :] = e[0, :];
for h in range(1, e.shape[0]):
for w in range(0, e.shape[1]):
if w == 0:
cost = min(E[h-1, w], E[h-1, w+1]);
elif w == e.shape[1]-1:
cost = min(E[h-1, w-1], E[h-1, w]);
else:
cost = min(E[h-1, w-1], E[h-1, w], E[h-1, w+1]);
E[h, w] = e[h, w] + cost;
return E
def calcSeamPath(E, e):
h = e.shape[0];
path = np.zeros((h, 1));
idx = np.argmin(E[h-1, :]);
path[h-1] = idx;
for h in range(e.shape[0]-2,-1,-1):
w = int(path[h+1][0]);
if w > 0 and E[h, w-1] == E[h+1, w]-e[h+1, w]:
path[h] = w-1;
elif w < e.shape[1] - 1 and E[h, w+1] == E[h+1, w]-e[h+1, w]:
path[h] = w+1;
else:
path[h] = w;
path[path==0] = 1
return path
def stitchImage(panorama, curr_img, path, overlap):
n = 1
bound_threshold = 15;
tmp = np.zeros((0,panorama.shape[1] + curr_img.shape[1] - overlap,3)).astype('float64');
for h in range(0, panorama.shape[0]):
A = np.expand_dims(panorama[h, 0:-(overlap-int(path[h][0])+1), :], axis=0);
B = np.expand_dims(curr_img[h, int(path[h][0])-1:, :], axis = 0);
ZA = np.concatenate((np.expand_dims(panorama[h,:,:],axis=0), np.zeros((A.shape[0],panorama.shape[1] + curr_img.shape[1] - overlap-np.expand_dims(panorama[h,:,:],axis=0).shape[1],3))), axis=1);
ZB = np.concatenate((np.expand_dims(panorama[h,0:panorama.shape[1] + curr_img.shape[1] - overlap-np.expand_dims(curr_img[h,:,:],axis=0).shape[1],:], axis=0), np.expand_dims(curr_img[h,:,:],axis=0)), axis=1);
filt_A = np.ones((1, A.shape[1]-bound_threshold));
grad = np.expand_dims(np.linspace(1, 0, 2*bound_threshold+1, endpoint=True), axis = 0);
filt_B = np.zeros((1, B.shape[1]-bound_threshold));
blender = np.concatenate((filt_A, grad, filt_B), axis=1);
Z = (blender[:, 0:ZA.shape[1]].T*ZA.T).T + ((1-blender[:, 0:ZB.shape[1]]).T*ZB.T).T;
tmp = np.concatenate((tmp,Z));
return tmp
def colorCorrection(images_temp, shift, bestIndex, gamma=2.2):
alpha = np.ones((3, len(images_temp)));
for rightBorder in range(bestIndex+1, len(images_temp)):
for i in range(bestIndex+1, rightBorder+1):
I = images_temp[i];
J = images_temp[i-1];
overlap = I.shape[1] - shift[i-1];
for channel in range(3):
alpha[channel, i] = np.sum(np.power(J[:,-overlap-1:,channel], gamma))/np.sum(np.power(I[:,0:overlap+1,channel],gamma));
G = np.sum(alpha, 1)/np.sum(np.square(alpha), 1);
for i in range(bestIndex+1, rightBorder+1):
for channel in range(3):
images_temp[i][:,:,channel] = np.power(G[channel] * alpha[channel, i], 1.0/gamma) * images_temp[i][:,:,channel];
for leftBorder in range(bestIndex-1, -1, -1):
for i in range(bestIndex-1, leftBorder-1, -1):
I = images_temp[i];
J = images_temp[i+1];
overlap = I.shape[1] - shift[i-1];
for channel in range(3):
alpha[channel, i] = np.sum(np.power(J[:,0:overlap+1,channel], gamma))/np.sum(np.power(I[:,-overlap-1:,channel],gamma));
G = np.sum(alpha, 1)/np.sum(np.square(alpha), 1);
for i in range(bestIndex-1, leftBorder-1, -1):
for channel in range(3):
images_temp[i][:,:,channel] = np.power(G[channel] * alpha[channel, i], 1.0/gamma) * images_temp[i][:,:,channel];
return images_temp
def getBestIndex(images_temp):
idx = 0
bestVar = 255**5
for i in range(len(images_temp)):
curMeans = np.array([np.mean(images_temp[i][:,:,0]),np.mean(images_temp[i][:,:,1]),np.mean(images_temp[i][:,:,2])]);
# if -np.var(images_temp[i].flatten()) < bestVar:
if np.max(curMeans) - np.min(curMeans) < bestVar:
idx = i
bestVar = np.max(curMeans) - np.min(curMeans)
# bestVar = -np.var(images_temp[i].flatten())
return idx
def calcPanorama(images_dir, shift):
start = time.time()
# read panorama source images
files = glob.glob(images_dir + 'in-*.*g');
files = sorted(files)
print(len(files))
image_files = [np.array(Image.open(files[i])) for i in range(len(files))];
images_temp = [ image_files[i].astype('float64') for i in range(len(image_files))];
if image_files[0].ndim == 2 or image_files[0].shape[2] == 1:
images_temp = [ cv2.resize(cv2.cvtColor(image_files[i], cv2.COLOR_GRAY2RGB), (200, 300)).astype('float64') for i in range(len(image_files))];
bestIndex = getBestIndex(images_temp);
print("The image chosen as the base image for color is the image with index " + str(bestIndex)+'.')
images_temp = colorCorrection(images_temp, shift, bestIndex);
panorama = images_temp[0];
for i in range(1, len(images_temp)):
curr_img = images_temp[i];
channel = np.argmax([np.var(curr_img[:,:,0]), np.var(curr_img[:,:,1]), np.var(curr_img[:,:,2])]);
overlap = curr_img.shape[1] - shift[i-1];
e = calcErrorSurface(panorama, curr_img, overlap, channel);
E = calcSeam(e)
path = calcSeamPath(E,e)
panorama = stitchImage(panorama, curr_img, path, overlap)
print("The time taken for merging " + str(i+1) + " images: " + str(time.time() - start))
# fig = plt.figure(figsize=(20,10))
# plt.axis('off')
# plt.imshow(panorama/np.max(panorama));
print("The image has been saved as output.png")
imageio.imwrite(images_dir+'output.png', np.array(255*panorama/np.max(panorama)).astype('uint8'));
return panorama
# In[46]:
calcPanorama('./results/3/', [55]*11);
# In[35]:
calcPanorama('./results/2/', [109]*6);
# In[36]:
calcPanorama('./results/1/', [36]*16);
# In[37]:
calcPanorama('./results/4/', [85]*5);
|
thing = "spam!"
for c in thing:
print c
word = "eggs!"
for char in word:
print char
# ========================================
s = "A bird in the hand..."
for char in s:
if (char == 'A' or char == 'a'):
print 'X'
else:
print char
# ======================================== |
from initROOT import initROOT
import ROOT
from ROOT import gROOT, TCanvas, TF1
import numpy as np
import matplotlib.pyplot as plt
initROOT()
# pmt=ROOT.PmtData("/home/mage/Data/p20Data/root/P20_2015-04-16-10-44-25.root")
pmt=ROOT.PmtData("/home/mage/Data/proc_cry_0.root.pro")
event=0
ent= pmt.GetEntries()
pmt.CalIntegral(0)
pmt.CalIntegral(1)
integral0=[]
integral1=[]
pulseList=[]
dtList=[]
for i in xrange(0,ent):
pmt.SetEntry(i)
i0= pmt.GetPulseIntegral(0,i)
i1= pmt.GetPulseIntegral(1,i)
if pmt.GetNCha() >=4 and i0 >10000:
ch1=pmt.GetPulse(0)
ch2=pmt.GetPulse(1)
peak1=np.array(ch1).min()
peak2=np.array(ch2).min()
if peak1 < - 50 or peak2<-50:
# pmt.GetTrace(0).Draw()
# pmt.GetTrace(1).Draw("SAME")
# input("pause ")
tr1=pmt.GetTrace(0)
tr2=pmt.GetTrace(1)
t1=0
t2=0
for i in xrange(0,1100):
if tr1.Eval(i) < .5*peak1:
t1=i
break
for i in xrange(0,1200):
if tr2.Eval(i) < .5*peak2:
t2=i
break
dt=t1-t2
integral0.append(i0)
integral1.append(i1)
if abs(dt)<50:
dtList.append(dt)
pulse=np.array(pmt.GetPulse(0))
pulseList.append(pulse)
# plt.plot(pulse)
# plt.show()
print len(dtList)
print np.array(dtList).mean()
print np.array(dtList).var()
plt.hist(np.array(dtList),bins=50)
plt.xlabel("Delta Time (ns)")
plt.show()
# pmt.GetIntegral(0).Draw()
# input()
# pmt.GetIntegral(1).Draw()
# input()
# pmt.SetEntry(i)
# print i
# print pmt.GetNCha()
# if pmt.GetNCha() >= 4:
# pmt.GetTrace(0).Draw()
# pmt.GetTrace(1).Draw("SAME")
# input("pause ")
# while event >=0:
# pmt.GetTrace(0).Draw()
# # input("pause ")
# pmt.GetTrace(1).SetLineColor(4);
# pmt.GetTrace(1).Draw("SAME")
# event=int(raw_input("promt: "))
# pmt.SetEntry(event)
#############
|
import train as train
import time, random
import scipy.sparse
import pycrfsuite as crf
import helper
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.tag import pos_tag
import random
import text2num as t2n
def trainModel(holdback=-1):
## extract features
trainer = crf.Trainer(verbose=True)
for xseq, yseq in zip(trainX, trainY):
trainer.append(xseq, yseq, group = 0)
for xseq, yseq in zip(testX, testY):
trainer.append(xseq, yseq, group = 1)
trainer.set_params({
'c1': 2.0, # coefficient for L1 penalty
'c2': 1e-3, # coefficient for L2 penalty
# include transitions that are possible, but not observed
'max_iterations': 250, # stop earlier
'feature.possible_transitions': True,
'feature.possible_states': True,
})
trainer.train(trained_model, holdback)
return trainer
def predict():
tagger = crf.Tagger()
tagger.open(trained_model)
predictedY = []
confidences = []
confidences_beam = []
for xseq in testX:
yseq = tagger.tag(xseq)
predictedY.append(yseq)
confidences.append([tagger.marginal(yseq[i],i) for i in range(len(yseq))])
confidences_beam.append([ [tagger.marginal(tag, i) for tag in train.int2tags] for i in range(len(yseq))])
return predictedY, testY, confidences, confidences_beam, tagger.info()
def predict(article, trained_model):
tagger = crf.Tagger()
tagger.open(trained_model)
xseq = articleFeatureExtract(article)
yseq = tagger.tag(xseq)
confidences = [tagger.marginal(yseq[i],i) for i in range(len(yseq))]
confidences_beam = [ [tagger.marginal(tag, i) for tag in train.int2tags] for i in range(len(yseq))]
return yseq, confidences
def featureExtract(data, identifier, prev_n = 4, next_n = 4):
features = []
labels = []
int2tags = ["TAG"] + train.int2tags
for index in range(len(data)):
article = data[index][0]
article_labels = [int2tags[t] for t in data[index][1]]
article_features = articleFeatureExtract(article, prev_n, next_n)
features.append(article_features)
labels.append(article_labels)
return features, labels
def articleFeatureExtract(article, prev_n = 4, next_n = 4):
article_features = []
title_features = {}
labels = []
# if '.' in article:
# title = article[:article.index('.')]
# for i in range(len(title)):
# t = title[i]
# tf = {}
# tf[t] = 1
# title_features[t] = 1
for token_ind in range(len(article)):
token = article[token_ind]
context = {}
for i in range(max(0, token_ind - prev_n), min(token_ind + next_n, len(article))):
context_token = article[i]
context[context_token] =1
token_features = {}
token_features["context"] = context
# token_features["title"] = title_features
token_features["token"] = token
token_features[token] = 1
token_features["other"] = helper.getOtherFeatures(token)
article_features.append(token_features)
return article_features
if __name__ == '__main__':
##SCRIPT
print "reload helper"
reload(helper)
helper.load_constants()
print "end load helper"
retrain = True
if retrain:
num_blocks = 1
## num_blocks = 5
training_file = "../data/tagged_data/EMA/train.tag"
dev_file = "../data/tagged_data/EMA/dev.tag"
test_file = "../data/tagged_data/EMA/test.tag"
trained_model = "trained_model_crf.EMA.p"
print "load files"
train_data, train_identifier = train.load_data(training_file)
test_data, test_identifier = train.load_data(dev_file)
print "End load files"
prev_n = 2
next_n = 2
print "Start Feature extract on train set"
trainX, trainY = featureExtract(train_data,train_identifier, prev_n, next_n )
print "Done Feature extract on train set"
#trainX, trainY = featureExtract(dev_data, prev_n, next_n)
print "Start Feature extract on test set"
testX, testY = featureExtract(test_data, test_identifier, prev_n, next_n)
print "Done Feature extract on test set"
#testX, testY = featureExtract(train_data[split_index:], prev_n, next_n)
trainer = trainModel(1)
|
#from geopy.geocoders import Nominatim
from geopy.geocoders import Nominatim
geolocator = Nominatim(user_agent="example app")
print("Complete_details", geolocator.geocode("Airport Rd Peelamedu Coimbatore, India").raw)
print("\n")
print("Latitude and Longitude",geolocator.geocode("Airport Rd Peelamedu Coimbatore, India").point)
|
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
hash = {}
for i in range(len(nums)):
if nums[i] in hash:
return [hash[nums[i]], i]
hash[target - nums[i]] = i
return [-1, -1]
def twoSum2(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if(len(nums) == 0):
return False;
k = 0
for i in range(len(nums)):
k += 1
j = target - nums[i]
temp = nums[k:]
if j in temp:
return [i, temp.index(j) + k]
return False
if __name__ == '__main__':
print Solution().twoSum2([3,2,4], 6)
|
import io
from collections import OrderedDict
from typing import List, Optional, Tuple
import pytest
import torch
from common_utils import assert_equal, set_rng_seed
from torchvision import models, ops
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
from torchvision.models.detection.image_list import ImageList
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.rpn import AnchorGenerator, RegionProposalNetwork, RPNHead
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.ops import _register_onnx_ops
# In environments without onnxruntime we prefer to
# invoke all tests in the repo and have this one skipped rather than fail.
onnxruntime = pytest.importorskip("onnxruntime")
class TestONNXExporter:
@classmethod
def setup_class(cls):
torch.manual_seed(123)
def run_model(
self,
model,
inputs_list,
do_constant_folding=True,
dynamic_axes=None,
output_names=None,
input_names=None,
opset_version: Optional[int] = None,
):
if opset_version is None:
opset_version = _register_onnx_ops.BASE_ONNX_OPSET_VERSION
model.eval()
onnx_io = io.BytesIO()
if isinstance(inputs_list[0][-1], dict):
torch_onnx_input = inputs_list[0] + ({},)
else:
torch_onnx_input = inputs_list[0]
# export to onnx with the first input
torch.onnx.export(
model,
torch_onnx_input,
onnx_io,
do_constant_folding=do_constant_folding,
opset_version=opset_version,
dynamic_axes=dynamic_axes,
input_names=input_names,
output_names=output_names,
verbose=True,
)
# validate the exported model with onnx runtime
for test_inputs in inputs_list:
with torch.no_grad():
if isinstance(test_inputs, torch.Tensor) or isinstance(test_inputs, list):
test_inputs = (test_inputs,)
test_ouputs = model(*test_inputs)
if isinstance(test_ouputs, torch.Tensor):
test_ouputs = (test_ouputs,)
self.ort_validate(onnx_io, test_inputs, test_ouputs)
def ort_validate(self, onnx_io, inputs, outputs):
inputs, _ = torch.jit._flatten(inputs)
outputs, _ = torch.jit._flatten(outputs)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
inputs = list(map(to_numpy, inputs))
outputs = list(map(to_numpy, outputs))
ort_session = onnxruntime.InferenceSession(onnx_io.getvalue())
# compute onnxruntime output prediction
ort_inputs = {ort_session.get_inputs()[i].name: inpt for i, inpt in enumerate(inputs)}
ort_outs = ort_session.run(None, ort_inputs)
for i in range(0, len(outputs)):
torch.testing.assert_close(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05)
def test_nms(self):
num_boxes = 100
boxes = torch.rand(num_boxes, 4)
boxes[:, 2:] += boxes[:, :2]
scores = torch.randn(num_boxes)
class Module(torch.nn.Module):
def forward(self, boxes, scores):
return ops.nms(boxes, scores, 0.5)
self.run_model(Module(), [(boxes, scores)])
def test_batched_nms(self):
num_boxes = 100
boxes = torch.rand(num_boxes, 4)
boxes[:, 2:] += boxes[:, :2]
scores = torch.randn(num_boxes)
idxs = torch.randint(0, 5, size=(num_boxes,))
class Module(torch.nn.Module):
def forward(self, boxes, scores, idxs):
return ops.batched_nms(boxes, scores, idxs, 0.5)
self.run_model(Module(), [(boxes, scores, idxs)])
def test_clip_boxes_to_image(self):
boxes = torch.randn(5, 4) * 500
boxes[:, 2:] += boxes[:, :2]
size = torch.randn(200, 300)
size_2 = torch.randn(300, 400)
class Module(torch.nn.Module):
def forward(self, boxes, size):
return ops.boxes.clip_boxes_to_image(boxes, size.shape)
self.run_model(
Module(), [(boxes, size), (boxes, size_2)], input_names=["boxes", "size"], dynamic_axes={"size": [0, 1]}
)
def test_roi_align(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)
model = ops.RoIAlign((5, 5), 1, 2)
self.run_model(model, [(x, single_roi)])
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)
model = ops.RoIAlign((5, 5), 1, -1)
self.run_model(model, [(x, single_roi)])
def test_roi_align_aligned(self):
supported_onnx_version = _register_onnx_ops._ONNX_OPSET_VERSION_16
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 1.5, 1.5, 3, 3]], dtype=torch.float32)
model = ops.RoIAlign((5, 5), 1, 2, aligned=True)
self.run_model(model, [(x, single_roi)], opset_version=supported_onnx_version)
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model = ops.RoIAlign((5, 5), 0.5, 3, aligned=True)
self.run_model(model, [(x, single_roi)], opset_version=supported_onnx_version)
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model = ops.RoIAlign((5, 5), 1.8, 2, aligned=True)
self.run_model(model, [(x, single_roi)], opset_version=supported_onnx_version)
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model = ops.RoIAlign((2, 2), 2.5, 0, aligned=True)
self.run_model(model, [(x, single_roi)], opset_version=supported_onnx_version)
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model = ops.RoIAlign((2, 2), 2.5, -1, aligned=True)
self.run_model(model, [(x, single_roi)], opset_version=supported_onnx_version)
def test_roi_align_malformed_boxes(self):
supported_onnx_version = _register_onnx_ops._ONNX_OPSET_VERSION_16
x = torch.randn(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 2, 0.3, 1.5, 1.5]], dtype=torch.float32)
model = ops.RoIAlign((5, 5), 1, 1, aligned=True)
self.run_model(model, [(x, single_roi)], opset_version=supported_onnx_version)
def test_roi_pool(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
rois = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)
pool_h = 5
pool_w = 5
model = ops.RoIPool((pool_h, pool_w), 2)
self.run_model(model, [(x, rois)])
def test_resize_images(self):
class TransformModule(torch.nn.Module):
def __init__(self_module):
super().__init__()
self_module.transform = self._init_test_generalized_rcnn_transform()
def forward(self_module, images):
return self_module.transform.resize(images, None)[0]
input = torch.rand(3, 10, 20)
input_test = torch.rand(3, 100, 150)
self.run_model(
TransformModule(), [(input,), (input_test,)], input_names=["input1"], dynamic_axes={"input1": [0, 1, 2]}
)
def test_transform_images(self):
class TransformModule(torch.nn.Module):
def __init__(self_module):
super().__init__()
self_module.transform = self._init_test_generalized_rcnn_transform()
def forward(self_module, images):
return self_module.transform(images)[0].tensors
input = torch.rand(3, 100, 200), torch.rand(3, 200, 200)
input_test = torch.rand(3, 100, 200), torch.rand(3, 200, 200)
self.run_model(TransformModule(), [(input,), (input_test,)])
def _init_test_generalized_rcnn_transform(self):
min_size = 100
max_size = 200
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
return transform
def _init_test_rpn(self):
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
out_channels = 256
rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])
rpn_fg_iou_thresh = 0.7
rpn_bg_iou_thresh = 0.3
rpn_batch_size_per_image = 256
rpn_positive_fraction = 0.5
rpn_pre_nms_top_n = dict(training=2000, testing=1000)
rpn_post_nms_top_n = dict(training=2000, testing=1000)
rpn_nms_thresh = 0.7
rpn_score_thresh = 0.0
rpn = RegionProposalNetwork(
rpn_anchor_generator,
rpn_head,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_nms_thresh,
score_thresh=rpn_score_thresh,
)
return rpn
def _init_test_roi_heads_faster_rcnn(self):
out_channels = 256
num_classes = 91
box_fg_iou_thresh = 0.5
box_bg_iou_thresh = 0.5
box_batch_size_per_image = 512
box_positive_fraction = 0.25
bbox_reg_weights = None
box_score_thresh = 0.05
box_nms_thresh = 0.5
box_detections_per_img = 100
box_roi_pool = ops.MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2)
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(out_channels * resolution**2, representation_size)
representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, num_classes)
roi_heads = RoIHeads(
box_roi_pool,
box_head,
box_predictor,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
)
return roi_heads
def get_features(self, images):
s0, s1 = images.shape[-2:]
features = [
("0", torch.rand(2, 256, s0 // 4, s1 // 4)),
("1", torch.rand(2, 256, s0 // 8, s1 // 8)),
("2", torch.rand(2, 256, s0 // 16, s1 // 16)),
("3", torch.rand(2, 256, s0 // 32, s1 // 32)),
("4", torch.rand(2, 256, s0 // 64, s1 // 64)),
]
features = OrderedDict(features)
return features
def test_rpn(self):
set_rng_seed(0)
class RPNModule(torch.nn.Module):
def __init__(self_module):
super().__init__()
self_module.rpn = self._init_test_rpn()
def forward(self_module, images, features):
images = ImageList(images, [i.shape[-2:] for i in images])
return self_module.rpn(images, features)
images = torch.rand(2, 3, 150, 150)
features = self.get_features(images)
images2 = torch.rand(2, 3, 80, 80)
test_features = self.get_features(images2)
model = RPNModule()
model.eval()
model(images, features)
self.run_model(
model,
[(images, features), (images2, test_features)],
input_names=["input1", "input2", "input3", "input4", "input5", "input6"],
dynamic_axes={
"input1": [0, 1, 2, 3],
"input2": [0, 1, 2, 3],
"input3": [0, 1, 2, 3],
"input4": [0, 1, 2, 3],
"input5": [0, 1, 2, 3],
"input6": [0, 1, 2, 3],
},
)
def test_multi_scale_roi_align(self):
class TransformModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = ops.MultiScaleRoIAlign(["feat1", "feat2"], 3, 2)
self.image_sizes = [(512, 512)]
def forward(self, input, boxes):
return self.model(input, boxes, self.image_sizes)
i = OrderedDict()
i["feat1"] = torch.rand(1, 5, 64, 64)
i["feat2"] = torch.rand(1, 5, 16, 16)
boxes = torch.rand(6, 4) * 256
boxes[:, 2:] += boxes[:, :2]
i1 = OrderedDict()
i1["feat1"] = torch.rand(1, 5, 64, 64)
i1["feat2"] = torch.rand(1, 5, 16, 16)
boxes1 = torch.rand(6, 4) * 256
boxes1[:, 2:] += boxes1[:, :2]
self.run_model(
TransformModule(),
[
(
i,
[boxes],
),
(
i1,
[boxes1],
),
],
)
def test_roi_heads(self):
class RoiHeadsModule(torch.nn.Module):
def __init__(self_module):
super().__init__()
self_module.transform = self._init_test_generalized_rcnn_transform()
self_module.rpn = self._init_test_rpn()
self_module.roi_heads = self._init_test_roi_heads_faster_rcnn()
def forward(self_module, images, features):
original_image_sizes = [img.shape[-2:] for img in images]
images = ImageList(images, [i.shape[-2:] for i in images])
proposals, _ = self_module.rpn(images, features)
detections, _ = self_module.roi_heads(features, proposals, images.image_sizes)
detections = self_module.transform.postprocess(detections, images.image_sizes, original_image_sizes)
return detections
images = torch.rand(2, 3, 100, 100)
features = self.get_features(images)
images2 = torch.rand(2, 3, 150, 150)
test_features = self.get_features(images2)
model = RoiHeadsModule()
model.eval()
model(images, features)
self.run_model(
model,
[(images, features), (images2, test_features)],
input_names=["input1", "input2", "input3", "input4", "input5", "input6"],
dynamic_axes={
"input1": [0, 1, 2, 3],
"input2": [0, 1, 2, 3],
"input3": [0, 1, 2, 3],
"input4": [0, 1, 2, 3],
"input5": [0, 1, 2, 3],
"input6": [0, 1, 2, 3],
},
)
def get_image(self, rel_path: str, size: Tuple[int, int]) -> torch.Tensor:
import os
from PIL import Image
from torchvision.transforms import functional as F
data_dir = os.path.join(os.path.dirname(__file__), "assets")
path = os.path.join(data_dir, *rel_path.split("/"))
image = Image.open(path).convert("RGB").resize(size, Image.BILINEAR)
return F.convert_image_dtype(F.pil_to_tensor(image))
def get_test_images(self) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
return (
[self.get_image("encode_jpeg/grace_hopper_517x606.jpg", (100, 320))],
[self.get_image("fakedata/logos/rgb_pytorch.png", (250, 380))],
)
def test_faster_rcnn(self):
images, test_images = self.get_test_images()
dummy_image = [torch.ones(3, 100, 100) * 0.3]
model = models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(
weights=models.detection.faster_rcnn.FasterRCNN_ResNet50_FPN_Weights.DEFAULT, min_size=200, max_size=300
)
model.eval()
model(images)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(images,), (test_images,), (dummy_image,)],
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
)
# Test exported model for an image with no detections on other images
self.run_model(
model,
[(dummy_image,), (images,)],
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
)
# Verify that paste_mask_in_image beahves the same in tracing.
# This test also compares both paste_masks_in_image and _onnx_paste_masks_in_image
# (since jit_trace witll call _onnx_paste_masks_in_image).
def test_paste_mask_in_image(self):
masks = torch.rand(10, 1, 26, 26)
boxes = torch.rand(10, 4)
boxes[:, 2:] += torch.rand(10, 2)
boxes *= 50
o_im_s = (100, 100)
from torchvision.models.detection.roi_heads import paste_masks_in_image
out = paste_masks_in_image(masks, boxes, o_im_s)
jit_trace = torch.jit.trace(
paste_masks_in_image, (masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])])
)
out_trace = jit_trace(masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])])
assert torch.all(out.eq(out_trace))
masks2 = torch.rand(20, 1, 26, 26)
boxes2 = torch.rand(20, 4)
boxes2[:, 2:] += torch.rand(20, 2)
boxes2 *= 100
o_im_s2 = (200, 200)
from torchvision.models.detection.roi_heads import paste_masks_in_image
out2 = paste_masks_in_image(masks2, boxes2, o_im_s2)
out_trace2 = jit_trace(masks2, boxes2, [torch.tensor(o_im_s2[0]), torch.tensor(o_im_s2[1])])
assert torch.all(out2.eq(out_trace2))
def test_mask_rcnn(self):
images, test_images = self.get_test_images()
dummy_image = [torch.ones(3, 100, 100) * 0.3]
model = models.detection.mask_rcnn.maskrcnn_resnet50_fpn(
weights=models.detection.mask_rcnn.MaskRCNN_ResNet50_FPN_Weights.DEFAULT, min_size=200, max_size=300
)
model.eval()
model(images)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(images,), (test_images,), (dummy_image,)],
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
)
# Test exported model for an image with no detections on other images
self.run_model(
model,
[(dummy_image,), (images,)],
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
)
# Verify that heatmaps_to_keypoints behaves the same in tracing.
# This test also compares both heatmaps_to_keypoints and _onnx_heatmaps_to_keypoints
# (since jit_trace witll call _heatmaps_to_keypoints).
def test_heatmaps_to_keypoints(self):
maps = torch.rand(10, 1, 26, 26)
rois = torch.rand(10, 4)
from torchvision.models.detection.roi_heads import heatmaps_to_keypoints
out = heatmaps_to_keypoints(maps, rois)
jit_trace = torch.jit.trace(heatmaps_to_keypoints, (maps, rois))
out_trace = jit_trace(maps, rois)
assert_equal(out[0], out_trace[0])
assert_equal(out[1], out_trace[1])
maps2 = torch.rand(20, 2, 21, 21)
rois2 = torch.rand(20, 4)
from torchvision.models.detection.roi_heads import heatmaps_to_keypoints
out2 = heatmaps_to_keypoints(maps2, rois2)
out_trace2 = jit_trace(maps2, rois2)
assert_equal(out2[0], out_trace2[0])
assert_equal(out2[1], out_trace2[1])
def test_keypoint_rcnn(self):
images, test_images = self.get_test_images()
dummy_images = [torch.ones(3, 100, 100) * 0.3]
model = models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(
weights=models.detection.keypoint_rcnn.KeypointRCNN_ResNet50_FPN_Weights.DEFAULT, min_size=200, max_size=300
)
model.eval()
model(images)
self.run_model(
model,
[(images,), (test_images,), (dummy_images,)],
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
)
self.run_model(
model,
[(dummy_images,), (test_images,)],
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
)
def test_shufflenet_v2_dynamic_axes(self):
model = models.shufflenet_v2_x0_5(weights=models.ShuffleNet_V2_X0_5_Weights.DEFAULT)
dummy_input = torch.randn(1, 3, 224, 224, requires_grad=True)
test_inputs = torch.cat([dummy_input, dummy_input, dummy_input], 0)
self.run_model(
model,
[(dummy_input,), (test_inputs,)],
input_names=["input_images"],
output_names=["output"],
dynamic_axes={"input_images": {0: "batch_size"}, "output": {0: "batch_size"}},
)
if __name__ == "__main__":
pytest.main([__file__])
|
import z
d1 = z.getp("prob_down")
d2 = z.getp("prob_down_5_10")
from sortedcontainers import SortedSet
scores = SortedSet()
def doem():
saveem = dict()
stocks = z.getp("listofstocks")
for idx, astock in enumerate(stocks):
if not idx % 100:
print("idx: {}".format( idx))
try:
score = d1[astock] + (1-d2[astock])
except:
continue
scores.add((score, astock))
z.setp(scores[-30:], "probs_added_up")
if __name__ == '__main__':
doem()
|
import pyopencl as cl
import pyopencl.array
import numpy
import numpy.linalg as la
import time
HARD_LOCATIONS = 2**20
criteria = 104
ACCESS_RADIUS_THRESHOLD = 104 #COMPUTE EXPECTED NUMBER OF num_ACTIVE_locations_found HARD LOCATIONS
DIMENSIONS = 256
BUFFER_SIZE_EXPECTED_ACTIVE_HARD_LOCATIONS = 1300 #Compute analytically; prove it's safe...
maximum = (2**32)-1
HASH_TABLE_SIZE = 25033
HASH_TABLE_SIZE_FILE = \
"#define HASH_TABLE_SIZE 25033 \n\
#define HASH_TABLE_SIZE2 25032 \n\
#define HASH_TABLE_SIZE3 25031 \n\
#define HASH_TABLE_SIZE4 25030 \n\
#define HASH_TABLE_SIZE5 25029 \n\
#define HASH_TABLE_SIZE6 25028 \n\
#define HASH_TABLE_SIZE7 25027 \n\
"
# HASH_TABLE_SIZE must be prime. The higher it is, the more bandwidth, but way less collisions. It should also be "far" from a power of 2.
print "HASH_TABLE_SIZE=", HASH_TABLE_SIZE #WHAT IS THE OPTIMUM HASH_TABLE_SIZE??
'''
def random_int64(size):
a0 = numpy.random.random_integers(0, 0xFFFF, size=size).astype(numpy.uint64)
a1 = numpy.random.random_integers(0, 0xFFFF, size=size).astype(numpy.uint64)
a2 = numpy.random.random_integers(0, 0xFFFF, size=size).astype(numpy.uint64)
a3 = numpy.random.random_integers(0, 0xFFFF, size=size).astype(numpy.uint64)
a = a0 + (a1<<16) + (a2 << 32) + (a3 << 48)
return a.view(dtype=numpy.uint64)
'''
def Get_Hash_Table():
hash_table_active_index = numpy.zeros(HASH_TABLE_SIZE).astype(numpy.int32)
return hash_table_active_index
def Get_Hash_Table_GPU_Buffer(ctx):
hash_table_active_index = Get_Hash_Table()
hash_table_gpu = cl.Buffer(ctx, mem_flags.READ_WRITE | mem_flags.COPY_HOST_PTR, hostbuf=hash_table_active_index)
return hash_table_gpu
def Get_Hamming_Distances():
hamming_distances = numpy.zeros(HARD_LOCATIONS).astype(numpy.uint32) #32 BITS??????
return hamming_distances
def Get_Distances_GPU_Buffer(ctx):
Distances = Get_Hamming_Distances()
hamming_distances_gpu = cl.Buffer(ctx, mem_flags.READ_WRITE | mem_flags.COPY_HOST_PTR, hostbuf=Distances)
return hamming_distances_gpu
def Get_Random_Bitstring():
#bitstrings = numpy.random.random_integers(0,maximum,size=8).astype(numpy.uint32) #TRYING THIS OUT
import address_space_through_sha256_SDM
import random
bitstring = address_space_through_sha256_SDM.get_bitstring(str(random.randrange(2**32-1)))
return bitstring
def Get_Bitstring_GPU_Buffer(ctx, bitstring):
bitstring_gpu = cl.Buffer(ctx, mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR, hostbuf=bitstring)
return bitstring_gpu
'''
def Get_num_times_Random_Bitstrings():
for x in range(num_times):
bitstrings[x] = numpy.random.random_integers(0,2**32,size=8).astype(numpy.uint32)
return bitstrings
'''
'''
def Get_num_times_Bitstrings_GPU_Buffer(ctx):
for x in range(num_times):
bitstrings = numpy.random.random_integers(0,2**32,size=8*num_times).astype(numpy.uint32)
#bitstrings.shape = (8,num_times)
bitstring_gpu = cl.Buffer(ctx, mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR, hostbuf=bitstrings)
return bitstring_gpu
'''
def Create_Memory_Addresses():
print 'creating memory memory_addresses...'
memory_addresses = numpy.random.random_integers(0,maximum,size=(HARD_LOCATIONS,8)).astype(numpy.uint32) #numpy.random.random_integers(0,(2**32)-1,size=(HARD_LOCATIONS,8)).astype(numpy.uint32)
return memory_addresses
#The mistake here is that numpy is working with ints, and we're working with uints, so we have to have the **int** range, then cast to uint---not the uint range. Yes, we are morons.
#memory_addresses = numpy.random.random_integers(-2**15+1,2**15-1,size=(HARD_LOCATIONS,8)).astype(numpy.uint32)
def load_address_space():
import cPickle
address_space_file = open('hard_locations.sha256.sdm.pickle', 'rb')
space = cPickle.load(address_space_file)
return space
def Get_Text_code(filename):
with open (filename, "r") as myfile:
data = myfile.read()
return HASH_TABLE_SIZE_FILE + data
def create_sdm_values():
return numpy.zeros((HARD_LOCATIONS, DIMENSIONS), dtype = numpy.int8)
def write_x_at_x_kanerva(active_hard_locations, bitstring):
maximum=255 #isn't it 255?
for pos in numpy.nditer(active_hard_locations):
bitstring = SDM_addresses[pos,0:8] #WHAT THE FUCK IS THIS? JUST RECEIVED AS PARAMETER!
for dimension in range (256):
uint_to_check_bit = ((dimension // maximum) + dimension % maximum ) // 32
#print dimension
add = bool((bitstring [uint_to_check_bit] & (1<< dimension%32 ) ))
#print add
if add:
sdm_values[pos,dimension] +=1
elif not(add):
sdm_values[pos,dimension] -=1
print 'location', pos,'has been updated to',sdm_values[pos,]
'''
def read_address_sum_kanerva(active_hard_locations):
#maximum=255
sum = numpy.zeros((DIMENSIONS,), dtype = int32)
for pos in active_hard_locations:
bitstring = SDM_addresses[pos,]
for checkbit in range (256):
if bool(bitstring [( (checkbit // maximum) + checkbit % maximum ) // 32] & (1<< checkbit%32 )):
#increase something
sum[pos,checkbit]+=1
else:
#decrease something
sum[pos,checkbit]-=1
return sum
'''
def Get_Active_Locations(bitstring, ctx):
err = prg.clear_hash_table_gpu(queue, (HASH_TABLE_SIZE,), None, hash_table_gpu.data).wait()
err = prg.get_active_hard_locations_no_dist_buffer(queue, (HARD_LOCATIONS,), None, memory_addresses_gpu, bitstring_gpu, hash_table_gpu.data).wait()
if err: print 'Error --> ',err
err = cl.enqueue_read_buffer(queue, hash_table_gpu.data , hash_table_active_index).wait()
if err: print 'Error in retrieving hash_table_active_index? --> ',err
active_hard_locations = hash_table_active_index[hash_table_active_index!=0] ## THIS HAS TO BE DONE ON THE GPU, MORON! If it's sorted, you can get the num of items and copy only those...
'''
#ALSO, when GID=0, you are counting the hard locations there. HL[0] IS ALWAYS ACTIVE.
'''
return active_hard_locations
#from pyopencl.algorithm import copy_if
import my_pyopencl_algorithm
from my_pyopencl_algorithm import copy_if
def Get_Active_Locations2(ctx):
prg.compute_distances(queue, (HARD_LOCATIONS,), None, memory_addresses_gpu.data, bitstring_gpu, distances_gpu.data).wait()
final_gpu, evt = my_pyopencl_algorithm.sparse_copy_if(distances_gpu, "ary[i] < 104", queue = queue)
return final_gpu
def Get_Active_Locations3(ctx):
hash_table_gpu = cl_array.zeros(queue, (HASH_TABLE_SIZE,), dtype=numpy.int32)
prg.get_active_hard_locations_32bit(queue, (HARD_LOCATIONS,), None, memory_addresses_gpu.data, bitstring_gpu, distances_gpu.data, hash_table_gpu.data ).wait()
#if err: print 'Error --> ',err
active_hard_locations_gpu, event = my_pyopencl_algorithm.sparse_copy_if(hash_table_gpu, "ary[i] > 0", queue = queue)
active = active_hard_locations_gpu.get()
Num_HLs = active.size
prg.get_HL_distances_from_gpu5(queue, (Num_HLs,), None, active_hard_locations_gpu.data, distances_gpu.data, final_distances_gpu.data)
return Num_HLs, final_locations_gpu.get(), final_distances_gpu.get()
def Get_Active_Locations4(bitstring, ctx):
hash_table_gpu = cl_array.zeros(queue, (HASH_TABLE_SIZE,), dtype=numpy.int32)
prg.get_active_hard_locations_32bit(queue, (HARD_LOCATIONS,), None, memory_addresses_gpu.data, bitstring_gpu, distances_gpu, hash_table_gpu.data ).wait()
active_hard_locations_gpu, count_active_gpu, event = copy_if(distances_gpu, "ary[i] < 104")
Num_HLs = int(count_active_gpu.get())
final_distances_gpu = cl_array.zeros(queue, (Num_HLs,), dtype=numpy.int32)
prg.get_HL_distances_from_gpu(queue, (Num_HLs,), None, active_hard_locations_gpu.data, hash_table_gpu.data, distances_gpu)
prg.copy_final_results(queue, (Num_HLs,), None, final_locations_gpu.data, active_hard_locations_gpu.data, final_distances_gpu.data, hash_table_gpu.data)
# err =
return Num_HLs, final_locations_gpu.get(), final_distances_gpu.get()
def Get_Active_Locations5(ctx, bitstring_gpu):
hash_table_gpu = cl_array.zeros(queue, (HASH_TABLE_SIZE,), dtype=numpy.int32)
#prg.clear_hash_table_gpu(queue, hash_table_gpu.data)
prg.get_active_hard_locations_32bit(queue, (HARD_LOCATIONS,), None, memory_addresses_gpu, bitstring_gpu.data, distances_gpu, hash_table_gpu.data ).wait()
active_hard_locations_gpu, event = my_pyopencl_algorithm.sparse_copy_if(hash_table_gpu, "ary[i] > 0", queue = queue)
#active_hard_locations_gpu, final_distances_gpu, event = my_pyopencl_algorithm.sparse_copy_if_with_distances(hash_table_gpu, "ary[i] > 0", extra_args = [distances_gpu], queue = queue)
active = active_hard_locations_gpu.get()
count = active.size
final_distances_gpu = cl_array.Array(queue, (count,), dtype=numpy.int32)
prg.get_HL_distances_from_gpu(queue, (count,), None, active_hard_locations_gpu.data, distances_gpu.data, final_distances_gpu.data)
return count, active_hard_locations_gpu.get(), final_distances_gpu.get()
|
import numpy as np
from pathlib import Path
from PIL import Image
import pyvips
from skimage import data, io, filters
from scipy.ndimage.morphology import (
binary_dilation,
binary_erosion,
binary_opening,
binary_closing,
grey_dilation,
grey_erosion,
)
from skimage.morphology import opening, closing
from skimage.morphology import watershed, diamond
from skimage.segmentation import random_walker
from skimage.filters import (
threshold_yen,
threshold_otsu,
threshold_triangle,
threshold_mean,
threshold_local,
)
from scipy.ndimage.measurements import watershed_ift
from skimage.feature import peak_local_max
from scipy import ndimage
from skimage import measure, exposure
from mpyx import EZ, As, F, Iter, Seq, Stamp, Data
import time
import os
import tensorflow as tf
from matplotlib import pyplot as plt
class Datagram(Data):
def tmp_dir(self):
return "/tmp/mot/"
class LinescanSegment(Data):
pass
class LoadLinescanSegment(F):
def format_to_dtype(self, img_format):
return {
"uchar": np.uint8,
"char": np.int8,
"ushort": np.uint16,
"short": np.int16,
"uint": np.uint32,
"int": np.int32,
"float": np.float32,
"double": np.float64,
"complex": np.complex64,
"dpcomplex": np.complex128,
}[img_format]
def setup(self, crop=48, debug=False, env=None, device="/gpu:0"):
self.crop = crop
self.width = 4000
self.height = 1000
self.debug = debug
self.device = device
print("Linescan loader running on GPU", os.environ["CUDA_VISIBLE_DEVICES"])
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf.enable_eager_execution(config=tf_config)
def do(self, filename_pair):
with tf.device(self.device):
_vis = pyvips.Image.new_from_file(
str(filename_pair[0]), access="sequential"
).crop(self.crop, 0, self.width, self.height)
vis = (
np.ndarray(
buffer=_vis.write_to_memory(),
dtype=self.format_to_dtype(_vis.format),
shape=[_vis.height, _vis.width, _vis.bands],
).astype("float64")
/ 255.0
)
_nir = (
pyvips.Image.new_from_file(str(filename_pair[0]), access="sequential")
.extract_band(0)
.crop(self.crop, 0, self.width, self.height)
)
nir = (
np.ndarray(
buffer=_nir.write_to_memory(),
dtype=self.format_to_dtype(_nir.format),
shape=[_nir.height, _nir.width, _nir.bands],
).astype("float64")
/ 255.0
).squeeze()
# todo : add noise to reduce quatization artifacts (+/- 1/255)
# todo : compare tensorflow performance to libvips for image ops
"""
vis = np.array(Image.open(filename_pair[0]), dtype="float64")[
:, self.crop : -self.crop, :
]
nir = np.array(Image.open(filename_pair[1]), dtype="float64")[
:, self.crop : -self.crop, np.newaxis
][:, :, 0]
"""
print("vis", np.min(vis), np.max(vis))
print("nir", np.min(nir), np.max(nir))
im = tf.stack(
[nir, tf.divide(tf.add(vis[:, :, 0], vis[:, :, 1]), 2.0), vis[:, :, 2]],
axis=2,
)
bg = tf.expand_dims(tf.reduce_mean(im, axis=0), 0)
dv = tf.square(tf.divide(im, tf.add(bg, 0.00001)))
# mask regions blown to >= 1
# normalize and invert these areas
dv = tf.subtract(
1.0,
tf.where(
tf.greater(dv, 1.0),
tf.subtract(
1.0,
tf.divide(
tf.subtract(dv, 1.0), tf.subtract(tf.reduce_max(dv), 1.0)
),
),
dv,
),
)
# merge channels
mk = tf.square(tf.divide(tf.reduce_sum(dv, axis=2), 3.0))
# first (coarse) threshold
th = 0.1 # threshold_yen(mk.numpy())
mk = tf.tile(tf.expand_dims(tf.greater(mk, th), -1), [1, 1, 3])
mk = binary_dilation(mk, iterations=3)
# pass 2 - better background estimation
bg = masked_mean(im, tf.subtract(1.0, mk))
dv = tf.square(tf.divide(im, tf.add(bg, 0.00001)))
dv = tf.subtract(
1.0,
tf.where(
tf.greater(dv, 1.0),
tf.subtract(
1.0,
tf.divide(
tf.subtract(dv, 1.0), tf.subtract(tf.reduce_max(dv), 1.0)
),
),
dv,
),
)
# second (fine) threshold
mk = tf.square(tf.divide(tf.reduce_sum(dv, axis=2), 3.0))
th = 0.05 # threshold_yen(mk.numpy())
mk = tf.greater(mk, th)
# mk = binary_opening(binary_closing(mk.numpy()))
# normalize for uint8 RGB
vis = tf.multiply(vis, 255.0)
nir = tf.multiply(nir, 255.0)
im = tf.multiply(im, 255.0)
dv = tf.multiply(dv, 255.0)
lss = LinescanSegment()
lss.save("vis", vis.numpy().astype("uint8"))
lss.save("nir", nir.numpy().astype("uint8"))
lss.save("im", im.numpy().astype("uint8"))
lss.save("dv", dv.numpy().astype("uint8"))
lss.save("mk", mk)
self.put((lss, th))
def masked_mean(T, m):
m = tf.cast(m, tf.float64)
t = tf.reduce_sum(tf.multiply(T, m), axis=0)
s = tf.add(tf.reduce_sum(m, axis=0), 0.00001)
return tf.divide(t, s)
def tf_closing(T):
kern = [[1, 1], [1, 1]]
eroded = tf.nn.erosion2d(T, kern, [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
class Linescan2:
# powered by tensorflow
NIR_GLOB = "LS.NIR*.tif"
VIS_GLOB = "LS.VIS*.tif"
def __init__(self, path, crop=48, debug=False):
p = Path(path)
self.crop = crop
self.debug = debug
vis_files = list(sorted(p.glob(self.VIS_GLOB)))
nir_files = list(sorted(p.glob(self.NIR_GLOB)))
if debug: # - decrese data size # approx 100000 tall slice from center of LS
vis_files = vis_files[100:110]
nir_files = nir_files[100:110]
if len(nir_files) != len(vis_files):
raise RuntimeError(
"Number of NIR files does not equal number of VIS files in target directory"
)
paired = list(zip(vis_files, nir_files))
self.vis = []
self.nir = []
self.im = []
self.dv = []
self.mk = []
self.th = []
start = time.time()
very_start = start
for segment in EZ(
Iter(paired),
Seq(
tuple(
(
LoadLinescanSegment(
self.crop,
self.debug,
env={"CUDA_VISIBLE_DEVICES": str(1 + (i % 3))},
)
for i in range(20)
)
),
Stamp(),
),
).items():
# todo: imshow on debug
self.vis.append(segment[0].load("vis"))
self.nir.append(segment[0].load("nir"))
self.im.append(segment[0].load("im"))
self.dv.append(segment[0].load("dv"))
self.mk.append(segment[0].load("mk"))
self.th.append(segment[1])
segment[0].clean()
print("loaded in", time.time() - start, "seconds")
start = time.time()
self.vis = np.concatenate(self.vis)
self.nir = np.concatenate(self.nir)
self.im = np.concatenate(self.im)
self.dv = np.concatenate(self.dv)
self.mk = np.concatenate(self.mk)
self.th = np.array(self.th)
print("concat took", time.time() - start, "seconds")
start = time.time()
self.mk = closing(self.mk).astype("bool")
print("morphology took", time.time() - start, "seconds")
start = time.time()
self.labels = measure.label(self.mk, background=0)
print("labels took", time.time() - start, "seconds")
start = time.time()
self.props = measure.regionprops(self.labels, self.im[:, :, 0])
print("props took", time.time() - start, "seconds")
print("total time", time.time() - very_start)
"""
class Linescan:
# powered by numpy
NIR_GLOB = "LS.NIR*.tif"
VIS_GLOB = "LS.VIS*.tif"
def __init__(self, path, crop=48, debug=False):
p = Path(path)
vis_files = list(sorted(p.glob(self.VIS_GLOB)))
nir_files = list(sorted(p.glob(self.NIR_GLOB)))
if debug: # - decrese data size # approx 100000 tall slice from center of LS
vis_files = vis_files[100:110]
nir_files = nir_files[100:110]
if len(nir_files) != len(vis_files):
raise RuntimeError(
"Number of NIR files does not equal number of VIS files in target directory"
)
paired = list(zip(vis_files, nir_files))
self.vis = []
self.nir = []
self.im = []
self.bg = []
self.dv = []
self.dv2 = []
self.dv3 = []
self.mk = []
self.mk2 = []
self.markers = []
def load_n_combine(filename_pair):
# load visual / near-infrared image pair
vis = np.array(Image.open(filename_pair[0]), dtype="float64")[
:, crop:-crop, :
]
nir = np.array(Image.open(filename_pair[1]), dtype="float64")[
:, crop:-crop, np.newaxis
][:, :, 0]
# print("R pre", np.min(vis[:,:,0]), np.max(vis[:,:,0]))
# print("G pre", np.min(vis[:,:,1]), np.max(vis[:,:,1]))
# print("B pre", np.min(vis[:,:,2]), np.max(vis[:,:,2]))
im = vis.copy()
# visible light RGB gets compressed into to green/blue space
# blue has some unique differentations,/.?
# im[:,:,2] = (vis[:,:,1] + vis[:,:,2]) / 2.0
im[:, :, 1] = (vis[:, :, 0] + vis[:, :, 1]) / 2.0
# Near infrared gets placed on the "red" channel
im[:, :, 0] = nir
# normalize to 0-1
# todo: assert im > 1
im = im / 255.
# Find the background-line (1 px tall by n px wide)
bg = np.median(im, axis=0)[np.newaxis, :, :] + 0.00001
# divide out the background and gamma correct
dv = (im / bg) ** 3
# mask regions blown to >= 1
m = dv > 1
dv2 = dv.copy()
# normalize and invert these areas
dv2[m] = 1.0 - (
(dv2[m] - np.min(dv2[m])) / (np.max(dv2[m]) - np.min(dv2[m]))
)
# maybe something else or more fancy tech <- here,
# create mask
mk = 1.0 - dv2
# mk = np.zeros(dv2.shape, dtype="float64")
# mk[dv2 < 0.9] = 1.0 - dv2[dv2 < 0.9]
# print("mk stats", np.min(mk), np.mean(mk), np.median(mk), np.max(mk))
# merge channels
mk2 = (np.sum(mk, axis=2) / 3.0) ** 2.0
# print("mk2 stats", np.min(mk2), np.mean(mk2), np.median(mk2), np.max(mk2))
mk2 = mk2 > threshold_yen(mk2)
# mk2[mk2 > 0.375] = 1
# mk2[mk2 < 0.375] = 0
dv3 = dv2 # hacky
##############
# pass 2 - better background estimation
##############
im2 = im.copy()
im2[mk2] = (mk2[:, :, np.newaxis] * bg)[mk2]
# b = (1 ^ mk2).astype("bool")
# sol1
# im2[b] = (b*bg)[b]
# for r, row in enumerate(im2):
# row[b[r]] = bg[:, b[r]]
bg = np.median(im2, axis=0)[np.newaxis, :, :] + 0.00001
dv = (im / bg) ** 3
# mask regions blown to >= 1
m = dv > 1
dv2 = dv.copy()
# normalize and invert these areas
dv2[m] = 1.0 - (
(dv2[m] - np.min(dv2[m])) / (np.max(dv2[m]) - np.min(dv2[m]))
)
# maybe something else or more fancy tech <- here,
# create mask
mk = 1.0 - dv2
# mk = np.zeros(dv2.shape, dtype="float64")
# mk[dv2 < 0.9] = 1.0 - dv2[dv2 < 0.9]
# print("mk stats", np.min(mk), np.mean(mk), np.median(mk), np.max(mk))
# merge channels
mk2 = (np.sum(mk, axis=2) / 3.0) ** 2.0
# print("mk2 stats", np.min(mk2), np.mean(mk2), np.median(mk2), np.max(mk2))
mk2 = mk2 > threshold_yen(mk2)
# mk2[mk2 > 0.375] = 1
# mk2[mk2 < 0.375] = 0
dv3 = dv2 # hacky
# return (im, bg, dv, mk, mk2) #, markers)
return (
vis.astype("uint8"),
nir.astype("uint8"),
(im * 255).astype("uint8"),
(dv3 * 255).astype("uint8"),
(mk * 255).astype("uint8"),
mk2.astype("bool"),
)
start = time.time()
very_start = start
for part in EZ(
Iter(paired),
Seq(
(
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
load_n_combine,
),
Stamp(),
),
).items():
self.vis.append(part[0])
self.nir.append(part[1])
self.im.append(part[2])
# self.dv2.append(part[1])
self.dv3.append(part[3])
self.mk.append(part[4])
self.mk2.append(part[5])
print("loading took", time.time() - start, "seconds")
start = time.time()
self.vis = np.concatenate(self.vis)
self.nir = np.concatenate(self.nir)
self.im = np.concatenate(self.im)
# self.bg = np.concatenate(self.bg)
# self.dv = np.concatenate(self.dv)
# self.dv2 = np.concatenate(self.dv2)
self.dv3 = np.concatenate(self.dv3)
self.mk = np.concatenate(self.mk)
self.mk2 = np.concatenate(self.mk2)
print("concat took", time.time() - start, "seconds")
start = time.time()
# self.mk2 = np.array(
# binary_dilation(
# binary_erosion( self.mk2, iterations=5),
# iterations=5)
# , dtype='bool')
# self.mk2 = self.mk2.astype('bool')
self.mk2 = binary_erosion(opening(self.mk2))
print("morphology took", time.time() - start, "seconds")
start = time.time()
self.labels = measure.label(self.mk2, background=0)
print("labels took", time.time() - start, "seconds")
start = time.time()
self.props = measure.regionprops(self.labels, self.im[:, :, 0])
print("props took", time.time() - start, "seconds")
print("total time", time.time() - very_start)
def equalize(f):
# doesn't work very well..
h = np.histogram(f, bins=np.arange(256))[0]
H = np.cumsum(h) / float(np.sum(h))
e = np.floor(H[f.flatten().astype("int")] * 255.).astype("uint8")
return e.reshape(f.shape)
"""
# NumPy / SciPy Recipes for Image Processing: Intensity Normalization and Histogram Equalization (PDF Download Available). Available from: https://www.researchgate.net/publication/281118372_NumPy_SciPy_Recipes_for_Image_Processing_Intensity_Normalization_and_Histogram_Equalization [accessed Mar 02 2018].
# a few utilities to deal with reading in crops
import re
import random
def getrandcrops(crop_dir_list, resized=True):
crops = []
for crop_dir in crop_dir_list:
crop_list = []
for crop_file in Path(crop_dir).glob("*.png"):
crop_list.append(os.path.join(crop_dir, crop_file))
crops.append(crop_list)
while True:
idx = random.randrange(0, len(crops))
crop = readcrop(random.choice(crops[idx]))
yield idx, crop
def getcrops(crop_dir, resized=True):
p = Path(crop_dir).glob("*.png")
for f in p:
yield readcrop(os.path.join(crop_dir, f), resized)
def cropsize(filename):
return int(
re.search(
"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}-(\d*)\.png",
filename,
).group(1)
)
def readcrop(filename, resized=True):
crop = np.array(Image.open(filename), dtype="float64")
s = 64
w, h = cropsize(filename), len(crop) - s
if resized:
im = crop[0:s, 0:s]
dv = crop[0:s, s : s * 2]
mk = crop[0:s, s * 2 :]
return im, dv, mk
else:
im = crop[s:, 0:w]
dv = crop[s:, w:]
return im, dv
|
from pylearn2.utils import serial
import numpy as np
import re
from sklearn.linear_model import LogisticRegression
def load_features_labels(features, labels):
""" Given full fold features labels are still separated by filterband, classifier pair etc."""
assert np.array_equal([35,1,6], features.shape), "Should have 35 filtbands, single fold, 6 classpairs."
assert np.array_equal([1], labels.shape), "Should have just a single fold."
y = labels[0]
# Check all have features same shape
for i_filt_band in range(35):
for i_class_pair in range(6):
assert np.array_equal(features[0,0,0].data.shape,
features[i_filt_band,0,i_class_pair].data.shape)
# initialize with nans
X = np.nan * np.ones((features[0,0,0].data.shape[0],
features[0,0,0].data.shape[1] * np.prod(features.shape)),
dtype=np.float32)
for i_filt_band in range(35):
for i_class_pair in range(6):
start_ind = i_filt_band * 6 * 10 + i_class_pair * 10
X[:, start_ind:start_ind+10] = features[i_filt_band,0,i_class_pair].data
# check for no nans
assert not (np.any(np.isnan(X)))
return X,y
def shorten_dataset_name(dataset_name):
dataset_name = re.sub(r"(./)?data/[^/]*/", '', str(dataset_name))
dataset_name = re.sub(r"MoSc[0-9]*S[0-9]*R[0-9]*_ds10_", '',
dataset_name)
dataset_name = re.sub("BBCI.mat", '', dataset_name)
return dataset_name
if __name__ == '__main__':
clf = LogisticRegression()
all_old_accs = []
all_new_accs = []
for i_file in xrange(91,109):
print ("Loading...")
filename = '/home/schirrmr/motor-imagery/data/models/final-eval/csp-standardized/' + str(i_file) + '.pkl'
csp_trainer = serial.load(filename)
print "Training {:20s}".format(shorten_dataset_name(csp_trainer.filename))
X_train, y_train = load_features_labels(csp_trainer.binary_csp.train_feature_full_fold,
csp_trainer.binary_csp.train_labels_full_fold)
X_test, y_test = load_features_labels(csp_trainer.binary_csp.test_feature_full_fold,
csp_trainer.binary_csp.test_labels_full_fold)
clf.fit(X_train, y_train)
old_acc = csp_trainer.multi_class.test_accuracy[0]
new_acc = clf.score(X_test, y_test)
print ("Master Thesis Accuracy: {:5.2f}%".format(old_acc * 100))
print ("New Accuracy: {:5.2f}%".format(new_acc * 100))
all_old_accs.append(old_acc)
all_new_accs.append(new_acc)
print("")
print("Master Thesis average(std): {:5.2f}% ({:5.2f}%)".format(
np.mean(all_old_accs) * 100, np.std(all_old_accs) * 100))
print("New average(std): {:5.2f}% ({:5.2f}%)".format(
np.mean(all_new_accs) * 100, np.std(all_new_accs) * 100)) |
from django.shortcuts import get_object_or_404, render, redirect
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.conf import settings
from models_other import Category, Coco50
from imgmanip.models import Image
from imgmanip.forms import ImageUploadForm
import base64
import coco
import cStringIO
import utils
import os
from os import listdir
from os.path import isfile, join, splitext
def index(request):
"""
Renders a page where you can either upload an image that will get saved in our database or choose from one of the existing files to play around with.
"""
# Handle image upload
if request.method == 'POST':
form = ImageUploadForm(request.POST, request.FILES)
if form.is_valid():
new_img = Image(img_file = request.FILES['img_file'])
new_img.save()
# Redirect to the image edit page after POST
url = '/imgmanip/edit?image_id=%s' % new_img.id
return HttpResponseRedirect(url)
else:
form = ImageUploadForm()
# Load all images for the image index page
images = Image.objects.all()
# Load all segmented images for image index page
segmented = Coco50.objects.all()
segmented_imgs = [coco50_obj.image.url for coco50_obj in segmented]
# Render page with the form and all images
context = {'images': images, 'form': form, 'segmented_imgs': segmented_imgs}
return render(request, 'imgmanip/index.html', context)
def edit(request):
"""
Renders a editable view given an image_id. Users can use this view to manipulate the image they are viewing.
"""
if 'image_id' not in request.GET:
return HttpResponseRedirect('/imgmanip')
image_id = request.GET['image_id']
image = get_object_or_404(Image, pk=image_id)
return render(request, 'imgmanip/edit.html', {'image': image, 'image_id': image_id})
def manipulate(request):
"""
This method manipulates the image passed in from the request.image_id and performs manipulations on it.
Returns:
an HttpResponse containing the image data of the manipulated image.
"""
if 'image_id' not in request.GET or 'manipulation' not in request.GET:
return HttpResponseRedirect('/imgmanip/edit')
image_id = request.GET['image_id']
image = get_object_or_404(Image, pk=image_id)
image_original = utils.load_image(image)
manipulation = {
'foveate_naive': utils.foveate_naive,
'foveate': utils.foveate,
'segment': utils.segment,
}[request.GET['manipulation']]
image_editted = manipulation(image_original, float(request.GET['x']), float(request.GET['y']))
url = utils.save_image(image_editted)
return HttpResponse(url, 200)
def obj_interact(request, image_id):
"""
Loads image with segmented objects; allows interaction with this image (TODO)
"""
context = {
'image_id': image_id
}
return render(request, 'imgmanip/obj_interact.html', context)
def clusters(request):
categories = coco.categories()
return render(request, 'imgmanip/clusters.html', {'categories': categories})
def attribute_charts(request):
return render(request, 'imgmanip/attribute_charts.html', {})
|
import time
cnt = 1
while True:
print( "Hello! #{}".format( cnt ) )
if cnt >= 10:
break
cnt = cnt + 1
time.sleep(1.0)
|
import warnings
from datetime import timedelta, datetime
from time import time, gmtime
from werkzeug._compat import to_bytes, string_types, text_type, PY2, integer_types
from werkzeug._internal import _make_cookie_domain, _cookie_quote
from werkzeug.urls import iri_to_uri
def dump_cookie(
key,
value="",
max_age=None,
expires=None,
path="/",
domain=None,
secure=False,
httponly=False,
charset="utf-8",
sync_expires=True,
max_size=4093,
samesite=None,
):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
:param max_size: Warn if the final header value exceeds this size. The
default, 4093, should be safely `supported by most browsers
<cookie_>`_. Set to 0 to disable this check.
:param samesite: Limits the scope of the cookie such that it will only
be attached to requests if those requests are "same-site".
.. _`cookie`: http://browsercookielimits.squawky.net/
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
samesite = samesite.title() if samesite else None
if samesite not in ("Strict", "Lax", 'None', None):
raise ValueError("invalid SameSite value; must be 'Strict', 'Lax', 'None', or None")
buf = [key + b"=" + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in (
(b"Domain", domain, True),
(b"Expires", expires, False),
(b"Max-Age", max_age, False),
(b"Secure", secure, None),
(b"HttpOnly", httponly, None),
(b"Path", path, False),
(b"SameSite", samesite, False),
):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b"=" + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b"; ".join(buf)
if not PY2:
rv = rv.decode("latin1")
# Warn if the final value of the cookie is less than the limit. If the
# cookie is too large, then it may be silently ignored, which can be quite
# hard to debug.
cookie_size = len(rv)
if max_size and cookie_size > max_size:
value_size = len(value)
warnings.warn(
'The "{key}" cookie is too large: the value was {value_size} bytes'
" but the header required {extra_size} extra bytes. The final size"
" was {cookie_size} bytes but the limit is {max_size} bytes."
" Browsers may silently ignore cookies larger than this.".format(
key=key,
value_size=value_size,
extra_size=cookie_size - value_size,
cookie_size=cookie_size,
max_size=max_size,
),
stacklevel=2,
)
return rv
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, "-")
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return "%s, %02d%s%s%s%s %02d:%02d:%02d GMT" % (
("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday],
d.tm_mday,
delim,
(
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
)[d.tm_mon - 1],
delim,
str(d.tm_year),
d.tm_hour,
d.tm_min,
d.tm_sec,
) |
import json
import time
import statistics
# import matplotlib.pyplot as plt
# Load data from internal json file
with open('stream_sensor_data.json') as f:
stream_sensor_data = json.load(f)
stream_sensor_value = {
'temperature': {
'min': 999,
'max': 0,
'med': 0,
'avg': 0
},
'humidity': {
'min': 999,
'max': 0,
'med': 0,
'avg': 0
}
}
sum_t = 0
sum_h = 0
count = 0
# Push rate = 15 minutes
time.sleep(15*60)
while(True):
for i in stream_sensor_data:
# Get the minimum data
if (stream_sensor_value['temperature']['min'] > i['temperature']):
stream_sensor_value['temperature']['min'] = i['temperature']
if (stream_sensor_value['humidity']['min'] > i['humidity']):
stream_sensor_value['humidity']['min'] = i['humidity']
# print(i)
# Get the maximum data
if (stream_sensor_value['temperature']['max'] < i['temperature']):
stream_sensor_value['temperature']['max'] = i['temperature']
if (stream_sensor_value['humidity']['max'] < i['humidity']):
stream_sensor_value['humidity']['max'] = i['humidity']
# Get sum of temperature and humidity
sum_t = sum_t + i['temperature']
sum_h = sum_h + i['humidity']
count = count + 1
list_t = [0]*count
list_h = [0]*count
# Get median of temperature and humidity
for i in stream_sensor_data:
for j in range(count):
list_t[j] = i['temperature']
list_h[j] = i['humidity']
stream_sensor_value['temperature']['med'] = statistics.median(list_t)
stream_sensor_value['humidity']['med'] = statistics.median(list_h)
# Get mean/average of temperature and humidity
stream_sensor_value['temperature']['avg'] = sum_t/count
stream_sensor_value['humidity']['avg'] = sum_h/count
# Display temperature and humidity data
for key, value in stream_sensor_value.items():
print(key, ' : ', value)
# Writing temperature and humidity data to endpoint
with open('stream_temperature_and_humidity_summary_data.json', 'w') as outfile:
json.dump(stream_sensor_value, outfile, indent = 2)
# Push rate = 15 minutes
time.sleep(15*60)
|
import os
import pytest
from asn1PERser.asn_definitions.module_def import ModuleDefinition
@pytest.fixture(scope='function')
def asn1_schema(request):
schema_dir = os.path.split(__file__)[0]
with open(os.path.join(schema_dir, 'asn1_schemas/{name}'.format(name=request.param)), 'r') as schema:
return schema.read()
@pytest.fixture(scope='function')
def python_code(request):
parsed_dir = os.path.split(__file__)[0]
with open(os.path.join(parsed_dir, 'asn1_python_code/{name}'.format(name=request.param)), 'r') as parsed_schema:
return parsed_schema.read()
@pytest.fixture()
def parse_schema():
def _parse_schema(schema):
parsed = ModuleDefinition.parseString(schema)
parsed_to_python = parsed[0].create_python_template(path=None)
return parsed_to_python
return _parse_schema
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'dep_framework',
'product_name': 'Dependency Bundle',
'type': 'shared_library',
'mac_bundle': 1,
'sources': [ 'empty.c', ],
},
{
'target_name': 'test_framework',
'product_name': 'Test Framework',
'type': 'shared_library',
'mac_bundle': 1,
'dependencies': [ 'dep_framework', ],
'sources': [
'TestFramework/ObjCVector.h',
'TestFramework/ObjCVectorInternal.h',
'TestFramework/ObjCVector.mm',
],
'mac_bundle_resources': [
'TestFramework/English.lproj/InfoPlist.strings',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Cocoa.framework',
],
},
'xcode_settings': {
'INFOPLIST_FILE': 'TestFramework/Info.plist',
'GCC_DYNAMIC_NO_PIC': 'NO',
},
'copies': [
# Test copying to a file that has envvars in its dest path.
# Needs to be in a mac_bundle target, else CONTENTS_FOLDER_PATH isn't
# set.
{
'destination': '<(PRODUCT_DIR)/$(CONTENTS_FOLDER_PATH)/Libraries',
'files': [
'empty.c',
],
},
],
},
{
'target_name': 'copy_target',
'type': 'none',
'dependencies': [ 'test_framework', 'dep_framework', ],
'copies': [
# Test copying directories with spaces in src and dest paths.
{
'destination': '<(PRODUCT_DIR)/Test Framework.framework/foo',
'files': [
'<(PRODUCT_DIR)/Dependency Bundle.framework',
],
},
],
'actions': [
{
'action_name': 'aektschn',
'inputs': [],
'outputs': ['<(PRODUCT_DIR)/touched_file'],
'action': ['touch', '${BUILT_PRODUCTS_DIR}/action_file'],
},
],
},
{
'target_name': 'copy_target_code_sign',
'type': 'none',
'dependencies': [ 'test_framework', 'dep_framework', ],
'copies': [
# Test copying directories with spaces in src and dest paths.
{
'destination': '<(PRODUCT_DIR)/Test Framework.framework/foo',
'files': [
'<(PRODUCT_DIR)/Dependency Bundle.framework',
],
'xcode_code_sign': 1,
},
],
'actions': [
{
'action_name': 'aektschn',
'inputs': [],
'outputs': ['<(PRODUCT_DIR)/touched_file'],
'action': ['touch', '${BUILT_PRODUCTS_DIR}/action_file'],
},
],
},
],
}
|
# -*- coding: utf-8 -*-
import inject, logging
import psycopg2
import asyncio
from asyncio import coroutine
from autobahn.asyncio.wamp import ApplicationSession
from model.config import Config
from model.systems.assistance.firmware import Firmware
'''
Clase que da acceso mediante wamp a los métodos del firmware
'''
class WampFirmware(ApplicationSession):
def __init__(self,config=None):
logging.debug('instanciando WampFirmware')
ApplicationSession.__init__(self, config)
self.firmware = inject.instance(Firmware)
self.serverConfig = inject.instance(Config)
'''
como referencia tambien se puede sobreeescribir el onConnect
def onConnect(self):
logging.debug('transport connected')
self.join(self.config.realm)
'''
@coroutine
def onJoin(self, details):
logging.debug('registering methods')
yield from self.register(self.syncLogs_async, 'assistance.server.firmware.syncLogs')
yield from self.register(self.syncUser_async, 'assistance.server.firmware.syncUser')
'''
yield from self.register(self.deviceAnnounce, 'assistance.server.firmware.deviceAnnounce')
'''
def _getDatabase(self):
host = self.serverConfig.configs['database_host']
dbname = self.serverConfig.configs['database_database']
user = self.serverConfig.configs['database_user']
passw = self.serverConfig.configs['database_password']
return psycopg2.connect(host=host, dbname=dbname, user=user, password=passw)
def deviceAnnounce(self,device):
pass
'''
Guarda los logs pasados como parámetro dentro de la base de datos.
retorna:
lista con los logs que fueron guardados exitósamente y que no existían
'''
def syncLogs(self,attlogs):
con = self._getDatabase()
try:
synchedLogs = self.firmware.syncLogs(con,attlogs)
con.commit()
return synchedLogs
finally:
con.close()
@coroutine
def syncLogs_async(self,attlogs):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None,self.syncLogs,attlogs)
return r
'''
Actualiza la información del usuario y sus templates dentro de la base de datos
'''
def syncUser(self,user,templates):
con = self._getDatabase()
try:
self.firmware.syncUser(con,user,templates)
con.commit()
finally:
con.close()
@coroutine
def syncUser_async(self,user,templates):
try:
loop = asyncio.get_event_loop()
yield from loop.run_in_executor(None,self.syncUser,user,templates)
return user['id']
except Exception as e:
logging.exception(e)
return None
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Client tests to ensure object ownership functionality.
import grp
import pytest
from getpass import getuser
from os import getenv
from tests.common.sentry_cache_test_suite import SentryCacheTestSuite, TestObject
from tests.common.test_dimensions import create_uncompressed_text_dimension
# Sentry long polling frequency to make Sentry refresh not run.
SENTRY_LONG_POLLING_FREQUENCY_S = 3600
SENTRY_CONFIG_DIR = getenv('IMPALA_HOME') + '/fe/src/test/resources/'
SENTRY_BASE_LOG_DIR = getenv('IMPALA_CLUSTER_LOGS_DIR') + "/sentry"
SENTRY_CONFIG_FILE_OO = SENTRY_CONFIG_DIR + 'sentry-site_oo.xml'
SENTRY_CONFIG_FILE_OO_NOGRANT = SENTRY_CONFIG_DIR + 'sentry-site_oo_nogrant.xml'
SENTRY_CONFIG_FILE_NO_OO = SENTRY_CONFIG_DIR + 'sentry-site_no_oo.xml'
class TestOwnerPrivileges(SentryCacheTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestOwnerPrivileges, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def teardown_class(self):
super(self)
def setup_method(self, method):
super(TestOwnerPrivileges, self).setup_method(method)
self._setup_admin()
def teardown_method(self, method):
self._cleanup_admin()
super(TestOwnerPrivileges, self).teardown_method(method)
def _setup_ownership_test(self):
self._cleanup_ownership_test()
# Base roles for enabling tests.
self.execute_query("create role owner_priv_test_oo_user1")
# Role for verifying grant.
self.execute_query("create role owner_priv_test_all_role")
# Role for verifying transfer to role.
self.execute_query("create role owner_priv_test_owner_role")
self.execute_query("grant role owner_priv_test_oo_user1 to group oo_group1")
self.execute_query("grant role owner_priv_test_owner_role to group oo_group1")
self.execute_query("grant create on server to owner_priv_test_oo_user1")
self.execute_query("grant select on database functional to owner_priv_test_oo_user1")
def _cleanup_ownership_test(self):
# Clean up the test artifacts.
try:
self.cleanup_db("owner_priv_db", sync_ddl=0)
except Exception:
# Ignore this if we can't show tables.
pass
# Clean up any old roles created by this test
for role_name in self.execute_query("show roles").data:
if "owner_priv_test" in role_name:
self.execute_query("drop role %s" % role_name)
@staticmethod
def count_user_privileges(result):
"""
This method returns a new list of privileges that only contain user privileges.
"""
# results should have the following columns
# principal_name, principal_type, scope, database, table, column, uri, privilege,
# grant_option, create_time
total = 0
for row in result.data:
col = row.split('\t')
if col[0] == 'USER':
total += 1
return total
def _validate_no_user_privileges(self, client, user, invalidate_metadata):
if invalidate_metadata: self.execute_query("invalidate metadata")
result = self.user_query(client, "show grant user %s" % user, user=user)
return TestOwnerPrivileges.count_user_privileges(result) == 0
def _setup_admin(self):
# Admin for manipulation and cleaning up.
try:
self.execute_query("drop role owner_priv_admin")
except Exception:
# Ignore in case it wasn't created yet.
pass
self.execute_query("create role owner_priv_admin")
self.execute_query("grant all on server to owner_priv_admin with grant option")
group_name = grp.getgrnam(getuser()).gr_name
self.execute_query("grant role owner_priv_admin to group `%s`" % group_name)
def _cleanup_admin(self):
self.execute_query("drop role owner_priv_admin")
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.service.CustomClusterResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO),
catalogd_args="--sentry_config={0} --sentry_catalog_polling_frequency_s={1} "
"--authorization_policy_provider_class="
"org.apache.impala.service.CustomClusterResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO, SENTRY_LONG_POLLING_FREQUENCY_S),
sentry_config=SENTRY_CONFIG_FILE_OO,
sentry_log_dir="{0}/test_owner_privileges_with_grant".format(SENTRY_BASE_LOG_DIR))
def test_owner_privileges_with_grant(self, vector, unique_database):
"""Tests owner privileges with grant on database, table, and view.
- invalidate_metadata=True: With Sentry refresh to make sure privileges are really
stored in Sentry.
- invalidate_metadata=False: No Sentry refresh to make sure user can use owner
privileges right away without a Sentry refresh."""
for invalidate in [True, False]:
try:
self._setup_ownership_test()
self._execute_owner_privilege_tests(TestObject(TestObject.DATABASE,
"owner_priv_db",
grant=True),
invalidate_metadata=invalidate)
self._execute_owner_privilege_tests(TestObject(TestObject.TABLE,
unique_database +
".owner_priv_tbl",
grant=True),
invalidate_metadata=invalidate)
self._execute_owner_privilege_tests(TestObject(TestObject.VIEW,
unique_database +
".owner_priv_view",
grant=True),
invalidate_metadata=invalidate)
finally:
self._cleanup_ownership_test()
def _execute_owner_privilege_tests(self, test_obj, invalidate_metadata):
"""
Executes all the statements required to validate owner privileges work correctly
for a specific database, table, or view.
"""
# Create object and ensure oo_user1 gets owner privileges.
self.oo_user1_impalad_client = self.create_impala_client()
# oo_user2 is only used for transferring ownership.
self.oo_user2_impalad_client = self.create_impala_client()
self.user_query(self.oo_user1_impalad_client, "create %s if not exists %s %s %s" %
(test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select), user="oo_user1")
self.validate_privileges(self.oo_user1_impalad_client, "show grant user oo_user1",
test_obj, user="oo_user1",
invalidate_metadata=invalidate_metadata)
# Ensure grant works.
self.user_query(self.oo_user1_impalad_client,
"grant all on %s %s to role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1")
self.user_query(self.oo_user1_impalad_client,
"revoke all on %s %s from role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1")
# Change the database owner and ensure oo_user1 does not have owner privileges.
self.user_query(self.oo_user1_impalad_client, "alter %s %s set owner user oo_user2" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
invalidate_metadata=invalidate_metadata)
# Ensure oo_user1 cannot drop database after owner change.
# Use a delay to avoid cache consistency issue that could occur after alter.
self.user_query(self.oo_user1_impalad_client, "drop %s %s" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute 'DROP'")
# oo_user2 should have privileges for object now.
self.validate_privileges(self.oo_user2_impalad_client, "show grant user oo_user2",
test_obj, user="oo_user2",
invalidate_metadata=invalidate_metadata)
# Change the owner to a role and ensure oo_user2 doesn't have privileges.
# Set the owner back to oo_user1 since for views, oo_user2 doesn't have select
# privileges on the underlying table.
self.execute_query("alter %s %s set owner user oo_user1" %
(test_obj.obj_type, test_obj.obj_name),
query_options={"sync_ddl": 1})
assert self._validate_no_user_privileges(self.oo_user2_impalad_client,
user="oo_user2",
invalidate_metadata=invalidate_metadata)
self.user_query(self.oo_user1_impalad_client,
"alter %s %s set owner role owner_priv_test_owner_role" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
# Ensure oo_user1 does not have user privileges.
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
invalidate_metadata=invalidate_metadata)
# Ensure role has owner privileges.
self.validate_privileges(self.oo_user1_impalad_client,
"show grant role owner_priv_test_owner_role", test_obj,
user="oo_user1", invalidate_metadata=invalidate_metadata)
# Drop the object and ensure no role privileges.
# Use a delay to avoid cache consistency issue that could occur after alter.
self.user_query(self.oo_user1_impalad_client, "drop %s %s " %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
invalidate_metadata=invalidate_metadata)
# Ensure user privileges are gone after drop.
# Use a delay to avoid cache consistency issue that could occur after drop.
self.user_query(self.oo_user1_impalad_client, "create %s if not exists %s %s %s" %
(test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select), user="oo_user1")
# Use a delay to avoid cache consistency issue that could occur after create.
self.user_query(self.oo_user1_impalad_client, "drop %s %s " %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
invalidate_metadata=invalidate_metadata)
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.service.CustomClusterResourceAuthorizationProvider "
.format(SENTRY_CONFIG_FILE_NO_OO),
catalogd_args="--sentry_config={0} --authorization_policy_provider_class="
"org.apache.impala.service.CustomClusterResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_NO_OO),
sentry_config=SENTRY_CONFIG_FILE_NO_OO,
sentry_log_dir="{0}/test_owner_privileges_disabled".format(SENTRY_BASE_LOG_DIR))
def test_owner_privileges_disabled(self, vector, unique_database):
"""Tests that there should not be owner privileges."""
try:
self._setup_ownership_test()
self._execute_owner_privilege_tests_no_oo(TestObject(TestObject.DATABASE,
"owner_priv_db"))
self._execute_owner_privilege_tests_no_oo(TestObject(TestObject.TABLE,
unique_database +
".owner_priv_tbl"))
self._execute_owner_privilege_tests_no_oo(TestObject(TestObject.VIEW,
unique_database +
".owner_priv_view"))
finally:
self._cleanup_ownership_test()
def _execute_owner_privilege_tests_no_oo(self, test_obj):
"""
Executes all the statements required to validate owner privileges work correctly
for a specific database, table, or view.
"""
# Create object and ensure oo_user1 gets owner privileges.
self.oo_user1_impalad_client = self.create_impala_client()
self.user_query(self.oo_user1_impalad_client, "create %s if not exists %s %s %s"
% (test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select), user="oo_user1")
# Ensure grant doesn't work.
self.user_query(self.oo_user1_impalad_client,
"grant all on %s %s to role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute: GRANT_PRIVILEGE")
self.user_query(self.oo_user1_impalad_client,
"revoke all on %s %s from role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute: REVOKE_PRIVILEGE")
# Ensure changing the database owner doesn't work.
self.user_query(self.oo_user1_impalad_client,
"alter %s %s set owner user oo_user2" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges with 'GRANT OPTION'")
# Ensure oo_user1 cannot drop database.
self.user_query(self.oo_user1_impalad_client, "drop %s %s" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute 'DROP'")
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.service.CustomClusterResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO_NOGRANT),
catalogd_args="--sentry_config={0} --sentry_catalog_polling_frequency_s={1} "
"--authorization_policy_provider_class="
"org.apache.impala.service.CustomClusterResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO_NOGRANT,
SENTRY_LONG_POLLING_FREQUENCY_S),
sentry_config=SENTRY_CONFIG_FILE_OO_NOGRANT,
sentry_log_dir="{0}/test_owner_privileges_without_grant"
.format(SENTRY_BASE_LOG_DIR))
def test_owner_privileges_without_grant(self, vector, unique_database):
"""Tests owner privileges without grant on database, table, and view.
- invalidate_metadata=True: With Sentry refresh to make sure privileges are really
stored in Sentry.
- invalidate_metadata=False: No Sentry refresh to make sure user can use owner
privileges right away without a Sentry refresh."""
for invalidate in [True, False]:
try:
self._setup_ownership_test()
self._execute_owner_privilege_tests_oo_nogrant(TestObject(TestObject.DATABASE,
"owner_priv_db"),
invalidate_metadata=invalidate)
self._execute_owner_privilege_tests_oo_nogrant(TestObject(TestObject.TABLE,
unique_database +
".owner_priv_tbl"),
invalidate_metadata=invalidate)
self._execute_owner_privilege_tests_oo_nogrant(TestObject(TestObject.VIEW,
unique_database +
".owner_priv_view"),
invalidate_metadata=invalidate)
finally:
self._cleanup_ownership_test()
def _execute_owner_privilege_tests_oo_nogrant(self, test_obj, invalidate_metadata):
"""
Executes all the statements required to validate owner privileges work correctly
for a specific database, table, or view.
"""
# Create object and ensure oo_user1 gets owner privileges.
self.oo_user1_impalad_client = self.create_impala_client()
self.user_query(self.oo_user1_impalad_client, "create %s if not exists %s %s %s" %
(test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select), user="oo_user1")
self.validate_privileges(self.oo_user1_impalad_client, "show grant user oo_user1",
test_obj, user="oo_user1",
invalidate_metadata=invalidate_metadata)
# Ensure grant doesn't work.
self.user_query(self.oo_user1_impalad_client,
"grant all on %s %s to role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute: GRANT_PRIVILEGE")
self.user_query(self.oo_user1_impalad_client,
"revoke all on %s %s from role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute: REVOKE_PRIVILEGE")
self.user_query(self.oo_user1_impalad_client, "alter %s %s set owner user oo_user2" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges with 'GRANT OPTION'")
# Use a delay to avoid cache consistency issue that could occur after alter.
self.user_query(self.oo_user1_impalad_client, "drop %s %s " %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
invalidate_metadata=invalidate_metadata)
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.service.CustomClusterResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO),
catalogd_args="--sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.service.CustomClusterResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO),
sentry_config=SENTRY_CONFIG_FILE_OO,
sentry_log_dir="{0}/test_owner_privileges_different_cases"
.format(SENTRY_BASE_LOG_DIR))
def test_owner_privileges_different_cases(self, vector, unique_database):
"""IMPALA-7742: Tests that only user names that differ only in case are not
authorized to access the database/table/view unless the user is the owner."""
# Use two different clients so that the sessions will use two different user names.
foobar_impalad_client = self.create_impala_client()
FOOBAR_impalad_client = self.create_impala_client()
role_name = "owner_priv_diff_cases_role"
try:
self.execute_query("create role %s" % role_name)
self.execute_query("grant role %s to group foobar" % role_name)
self.execute_query("grant all on server to role %s" % role_name)
self.user_query(foobar_impalad_client, "create database %s_db" %
unique_database, user="foobar")
# FOOBAR user should not be allowed to create a table in the foobar's database.
self.user_query(FOOBAR_impalad_client, "create table %s_db.test_tbl(i int)" %
unique_database, user="FOOBAR",
error_msg="User 'FOOBAR' does not have privileges to execute "
"'CREATE' on: %s_db" % unique_database)
self.user_query(foobar_impalad_client, "create table %s.owner_case_tbl(i int)" %
unique_database, user="foobar")
# FOOBAR user should not be allowed to select foobar's table.
self.user_query(FOOBAR_impalad_client, "select * from %s.owner_case_tbl" %
unique_database, user="FOOBAR",
error_msg="User 'FOOBAR' does not have privileges to execute "
"'SELECT' on: %s.owner_case_tbl" % unique_database)
self.user_query(foobar_impalad_client,
"create view %s.owner_case_view as select 1" % unique_database,
user="foobar")
# FOOBAR user should not be allowed to select foobar's view.
self.user_query(FOOBAR_impalad_client, "select * from %s.owner_case_view" %
unique_database, user="FOOBAR",
error_msg="User 'FOOBAR' does not have privileges to execute "
"'SELECT' on: %s.owner_case_view" % unique_database)
# FOOBAR user should not be allowed to see foobar's privileges.
self.user_query(FOOBAR_impalad_client, "show grant user foobar", user="FOOBAR",
error_msg="User 'FOOBAR' does not have privileges to access the "
"requested policy metadata")
finally:
self.user_query(foobar_impalad_client, "drop database %s_db cascade" %
unique_database, user="foobar")
self.execute_query("drop role %s" % role_name)
|
import discord
from discord.ext import commands
class Mycog:
"""My custom cog that does stuff!"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def roast4(self, user : discord.Member):
"""Roast People"""
#Your code will go here
await self.bot.say("I’m sorry, was I meant to be offended? The only thing offending me is your face " + user.mention + " ")
def setup(bot):
bot.add_cog(Mycog(bot))
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Body21(object):
"""Implementation of the 'body_21' model.
TODO: type model description here.
Attributes:
to (string): A valid address that will receive the email. Multiple
addresses can be separated by using commas.
mtype (TypeEnum): Specifies the type of email to be sent
subject (string): The subject of the mail. Must be a valid string.
message (string): The email message that is to be sent in the text.
mfrom (string): A valid address that will send the email.
cc (string): Carbon copy. A valid address that will receive the email.
Multiple addresses can be separated by using commas.
bcc (string): Blind carbon copy. A valid address that will receive the
email. Multiple addresses can be separated by using commas.
attachment (string): A file that is attached to the email. Must be
less than 7 MB in size.
"""
# Create a mapping from Model property names to API property names
_names = {
"to":'To',
"mtype":'Type',
"subject":'Subject',
"message":'Message',
"mfrom":'From',
"cc":'Cc',
"bcc":'Bcc',
"attachment":'Attachment'
}
def __init__(self,
to=None,
mtype=None,
subject=None,
message=None,
mfrom=None,
cc=None,
bcc=None,
attachment=None):
"""Constructor for the Body21 class"""
# Initialize members of the class
self.to = to
self.mtype = mtype
self.subject = subject
self.message = message
self.mfrom = mfrom
self.cc = cc
self.bcc = bcc
self.attachment = attachment
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
to = dictionary.get('To')
mtype = dictionary.get('Type')
subject = dictionary.get('Subject')
message = dictionary.get('Message')
mfrom = dictionary.get('From')
cc = dictionary.get('Cc')
bcc = dictionary.get('Bcc')
attachment = dictionary.get('Attachment')
# Return an object of this model
return cls(to,
mtype,
subject,
message,
mfrom,
cc,
bcc,
attachment)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 14:27:38 2019
@author: Reuben
Sometimes it's useful not to have to pass around a dictionary or other object
to accumulate data (e.g. key-value pairs). To that end, this module provides
singleton-like access to a set of dictionaries. A call to the `get_dict`
method with the same name will always return the same dictionary.
This approach can be useful in complex classes where multiple methods all
inpput data into a resultbox entry.
"""
class Dict_Container(dict):
"""A dictionary to contain dictionaries"""
def __missing__(self, key):
self[key] = {}
return self[key]
dict_container = Dict_Container()
def get_dict(name):
"""Return the named dictionary
Args:
name (str): The name of the dictionary
Returns:
dict: The dictionary
"""
return dict_container[name]
|
import sys
N, K = map(int, sys.stdin.readline().split())
List = [int(sys.stdin.readline()) for _ in range(N)]
Result = 0
for i in range(N-1, -1, -1):
Quotient = K // List[i]
if Quotient == 0:
continue
else:
K = K % List[i]
Result += Quotient
print(Result)
|
import time
import hdbfs
import hdbfs.ark
import hdbfs.imgdb
import hdbfs.model
class ThumbGenerator:
def __init__( self ):
self.__objects = []
def __pop_object( self, db ):
from sqlalchemy import or_
if( len( self.__objects ) == 0 ):
# TODO, this is hacky!
self.__objects = [ obj_id[0] for obj_id in
db.session.query( hdbfs.model.Object.object_id ) \
.filter( or_( hdbfs.model.Object.object_type == hdbfs.TYPE_FILE,
hdbfs.model.Object.object_type == hdbfs.TYPE_GROUP ) ) \
.order_by( 'RANDOM()' ).limit( 500 ) ]
if( len( self.__objects ) == 0 ):
return None
obj_id = self.__objects.pop()
return db.get_object_by_id( obj_id )
def run( self, max_exp, force = False, sleep = None ):
db = hdbfs.Database()
try:
db.enable_write_access()
obj = self.__pop_object( db )
if( obj is None ):
return
if( isinstance( obj, hdbfs.ImageFile ) ):
print 'Generating thumbs and meta for file', obj.get_id()
obj.check_metadata()
exp = hdbfs.imgdb.MIN_THUMB_EXP
while( db.tbcache.make_thumb( obj, exp ) is not None
and exp <= max_exp ):
exp += 1
if( sleep is not None ):
time.sleep( sleep )
elif( isinstance( obj, hdbfs.Album ) ):
print 'Generating metadata for album', obj.get_id()
obj.check_metadata()
db.tbcache.init_album_metadata( obj )
if( sleep is not None ):
time.sleep( sleep )
finally:
db.close()
|
# Script to illustrate the QG (quasi-geostrophic) model.
from common import *
from mods.QG.core import sample_filename, nx, square, default_prms
def show(x0,psi=True,ax=None):
#
def psi_or_q(x):
return x if psi else compute_q(x)
#
if ax==None:
fig, ax = plt.subplots()
im = ax.imshow(psi_or_q(square(x0)))
if psi: im.set_clim(-30,30)
else: im.set_clim(-28e4,25e4)
def update(x):
im.set_data(psi_or_q(square(x)))
plt.pause(0.01)
return update
# Although psi is the state variable, q looks cooler.
# q = Nabla^2(psi) - F*psi.
import scipy.ndimage.filters as filters
dx = 1/(nx-1)
def compute_q(psi):
Lapl = filters.laplace(psi,mode='constant')/dx**2
# mode='constant' coz BCs are: psi = nabla psi = nabla^2 psi = 0
return Lapl - default_prms['F']*psi
###########
# Main
###########
fig, (ax1,ax2) = plt.subplots(ncols=2,sharex=True,sharey=True,figsize=(8,4))
for ax in (ax1,ax2): ax.set_aspect('equal',adjustable_box_or_forced())
ax1.set_title('$\psi$')
ax2.set_title('$q$')
xx = np.load(sample_filename)['sample']
setter1 = show(xx[0],psi=True ,ax=ax1)
setter2 = show(xx[0],psi=False,ax=ax2)
for k, x in progbar(list(enumerate(xx)),"Animating"):
if k%2==0:
setter1(x)
setter2(x)
fig.suptitle("k: "+str(k))
plt.pause(0.01)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PWMMainWindow(object):
def setupUi(self, PWMMainWindow):
PWMMainWindow.setObjectName("PWMMainWindow")
PWMMainWindow.resize(695, 267)
self.centralwidget = QtWidgets.QWidget(PWMMainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setMaximumSize(QtCore.QSize(50, 16777215))
self.label_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 3, 1, 1)
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout.setObjectName("verticalLayout")
self.checkBoxIsSpecial = QtWidgets.QCheckBox(self.frame)
self.checkBoxIsSpecial.setObjectName("checkBoxIsSpecial")
self.verticalLayout.addWidget(self.checkBoxIsSpecial)
self.checkBoxIsUpperOnly = QtWidgets.QCheckBox(self.frame)
self.checkBoxIsUpperOnly.setObjectName("checkBoxIsUpperOnly")
self.verticalLayout.addWidget(self.checkBoxIsUpperOnly)
self.checkBoxIsLowerOnly = QtWidgets.QCheckBox(self.frame)
self.checkBoxIsLowerOnly.setObjectName("checkBoxIsLowerOnly")
self.verticalLayout.addWidget(self.checkBoxIsLowerOnly)
self.checkBoxIsNum = QtWidgets.QCheckBox(self.frame)
self.checkBoxIsNum.setObjectName("checkBoxIsNum")
self.verticalLayout.addWidget(self.checkBoxIsNum)
self.checkBoxIsExcludeSimilar = QtWidgets.QCheckBox(self.frame)
self.checkBoxIsExcludeSimilar.setObjectName("checkBoxIsExcludeSimilar")
self.verticalLayout.addWidget(self.checkBoxIsExcludeSimilar)
self.gridLayout.addWidget(self.frame, 4, 1, 1, 2)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setMaximumSize(QtCore.QSize(50, 16777215))
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 2, 3, 1, 1)
self.pushButtonMakePW = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonMakePW.setObjectName("pushButtonMakePW")
self.gridLayout.addWidget(self.pushButtonMakePW, 5, 1, 1, 2)
self.comboBoxSiteList = QtWidgets.QComboBox(self.centralwidget)
self.comboBoxSiteList.setObjectName("comboBoxSiteList")
self.gridLayout.addWidget(self.comboBoxSiteList, 0, 1, 1, 2)
self.pushButtonDeleteSite = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonDeleteSite.setObjectName("pushButtonDeleteSite")
self.gridLayout.addWidget(self.pushButtonDeleteSite, 2, 2, 1, 1)
self.pushButtonAddSite = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonAddSite.setObjectName("pushButtonAddSite")
self.gridLayout.addWidget(self.pushButtonAddSite, 2, 1, 1, 1)
self.lineEditPW = QtWidgets.QLineEdit(self.centralwidget)
self.lineEditPW.setText("")
self.lineEditPW.setDragEnabled(True)
self.lineEditPW.setReadOnly(True)
self.lineEditPW.setClearButtonEnabled(False)
self.lineEditPW.setObjectName("lineEditPW")
self.gridLayout.addWidget(self.lineEditPW, 2, 4, 1, 3)
self.lineEditID = QtWidgets.QLineEdit(self.centralwidget)
self.lineEditID.setDragEnabled(True)
self.lineEditID.setReadOnly(False)
self.lineEditID.setObjectName("lineEditID")
self.gridLayout.addWidget(self.lineEditID, 0, 4, 1, 3)
self.pushButtonSavePW = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonSavePW.setObjectName("pushButtonSavePW")
self.gridLayout.addWidget(self.pushButtonSavePW, 5, 3, 1, 2)
self.pushButtonFindID = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonFindID.setObjectName("pushButtonFindID")
self.gridLayout.addWidget(self.pushButtonFindID, 5, 5, 1, 2)
self.spinBox = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox.setMaximum(50)
self.spinBox.setProperty("value", 20)
self.spinBox.setObjectName("spinBox")
self.gridLayout.addWidget(self.spinBox, 4, 6, 1, 1)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 4, 5, 1, 1)
PWMMainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(PWMMainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 695, 21))
self.menubar.setObjectName("menubar")
PWMMainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(PWMMainWindow)
self.statusbar.setObjectName("statusbar")
PWMMainWindow.setStatusBar(self.statusbar)
self.retranslateUi(PWMMainWindow)
QtCore.QMetaObject.connectSlotsByName(PWMMainWindow)
def retranslateUi(self, PWMMainWindow):
_translate = QtCore.QCoreApplication.translate
PWMMainWindow.setWindowTitle(_translate("PWMMainWindow", "PWM"))
self.label_2.setText(_translate("PWMMainWindow", "아이디"))
self.checkBoxIsSpecial.setText(_translate("PWMMainWindow", "특수문자 포함"))
self.checkBoxIsUpperOnly.setText(_translate("PWMMainWindow", "소문자 제외"))
self.checkBoxIsLowerOnly.setText(_translate("PWMMainWindow", "대문자 제외"))
self.checkBoxIsNum.setText(_translate("PWMMainWindow", "숫자 포함"))
self.checkBoxIsExcludeSimilar.setText(_translate("PWMMainWindow", "비슷한 문자 제외(i, I, l, L, 1, o, O, 0)"))
self.label.setText(_translate("PWMMainWindow", "비밀번호"))
self.pushButtonMakePW.setText(_translate("PWMMainWindow", "생성"))
self.pushButtonDeleteSite.setText(_translate("PWMMainWindow", "사이트 삭제"))
self.pushButtonAddSite.setText(_translate("PWMMainWindow", "사이트 추가"))
self.pushButtonSavePW.setText(_translate("PWMMainWindow", "저장"))
self.pushButtonFindID.setText(_translate("PWMMainWindow", "데이터 베이스"))
self.label_3.setText(_translate("PWMMainWindow", "글자 수 (최대 50)"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
PWMMainWindow = QtWidgets.QMainWindow()
ui = Ui_PWMMainWindow()
ui.setupUi(PWMMainWindow)
PWMMainWindow.show()
sys.exit(app.exec_())
|
import numpy as np
A = [[0.1, 0.2, 0.7],
[0.7, 0.1, 0.2],
[0.1, 0.3, 0.6]]
B = [[0.2, 0.4, 0.4],
[0.3, 0.4, 0.3],
[0.4, 0.3, 0.3]]
c = [1, 0, 0]
d = [0.3, 0.4, 0.3]
e = [1, 1, 1]
def matrixN(matrix, vector, n):
for i in range(n):
vector = np.matmul(matrix, vector)
return vector
matrixN(A, c, 100) #[0.1, 0.7, 0.1]
matrixN(A, d, 100) #[0.32, 0.31, 0.33])
matrixN(A, e, 100) #[1., 1., 1.]
matrixN(B, c, 100) #[0.2, 0.3, 0.4]
matrixN(B, d, 100) #[0.34, 0.34, 0.33]
matrixN(B, e, 100) #[1., 1., 1.]
#ii Response no they don't
#yes they are
|
# Juice Forms 0.0.1 including the Juice Forms API
# by Konstantin Kovshenin
#
# The following module is an addition to the Juice framework built upon
# the Django web framework. Uses Django users and django forms as seen
# from the import statements.
# Compatible with the Juice shortcodes API, allows usage of the [form]
# shortcode where applicable.
from django.db import models
from django.contrib.auth.models import User
from django import forms
from juice.front.shortcodes import shortcodes
from juice.front.debug import debug
# The standard Form model which is extended via FormField objects
# This is the one that appears in the admin panel.
class Form(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=50)
published = models.DateTimeField('Date published', auto_now_add=True)
updated = models.DateTimeField('Date upated', auto_now=True, auto_now_add=True)
author = models.ForeignKey(User)
def __unicode__(self):
return self.title
class Meta:
db_table = 'juice_forms_form'
# Form fields are very similar to django form fields, but this is designed
# to handle input from the admin panel, where users can select the field
# type and several other attributes. Has a foreign key on Form.
class FormField(models.Model):
name = models.SlugField(max_length=50)
caption = models.CharField(max_length=255)
type = models.CharField(max_length=1, choices=(
('i', 'Input'),
('e', 'E-mail'),
('t', 'Textarea'),
('s', 'Selectbox'),
('c', 'Checkbox'),
('r', 'Radio'),
('f', 'File Attachment'),
('h', 'Hidden'),
('x', 'Submit'),
))
attributes = models.TextField(blank=True)
default_value = models.TextField(blank=True)
required = models.BooleanField()
form = models.ForeignKey(Form)
class Meta:
db_table = 'juice_forms_formfield'
# The Juice Forms API. This is a class consisting mostly of static methods
# which produce form objects based on the form fields and forms, sort of a
# Factory. The Action pool is essential to the Forms API which will tell
# what type of actions we'd like the form to produce on submit (email, etc).
class FormsAPI():
actions = {}
# The following create methods are used to lookup forms in the database
# parse their attributes and fields, and give out a valid form class
# which could be used anywhere.
@staticmethod
def by_id(form_id):
try:
form = Form.objects.get(id=form_id)
return FormsAPI.create_form(form)
except:
pass
@staticmethod
def by_title(form_title):
try:
form = Form.objects.get(title=form_title)
return FormsAPI.create_form(form)
except:
pass
@staticmethod
def by_slug(form_slug):
try:
form = Form.objects.get(slug=form_slug)
return FormsAPI.create_form(form)
except:
pass
# This method does the actual form generation, so if you're looking
# to add more possible fields, this is a place to look at how they're
# formed, returns a valid form class.
@staticmethod
def create_form(form_object):
form = form_object
form_fields = FormField.objects.filter(form__id=form.id)
# This is our future class which we will return later
class _FutureForm(forms.Form):
title = form.title
slug = form.slug
extra = []
# We init the parent form and then add more fields to the form
# depending on what we've been given in the form_fields attribute.
def __init__(self, *args, **kwargs):
self.extra = [] # This will hold extra fields which are not specified by the Django forms module.
super(_FutureForm, self).__init__(*args, **kwargs)
for field in form_fields:
if field.type == 'i': # Input
self.fields[field.name] = forms.CharField(max_length=255, required=field.required, label=field.caption)
elif field.type == 'e': # E-mail
self.fields[field.name] = forms.EmailField(max_length=255, required=field.required, label=field.caption)
elif field.type == 't': # Textarea
self.fields[field.name] = forms.CharField(max_length=3000, required=field.required, widget=forms.Textarea, label=field.caption)
elif field.type == 'c': # Checkbox
self.fields[field.name] = forms.BooleanField(label=field.caption, required=field.required)
elif field.type == 's': # Selectbox
field_choices = []
for attr in field.attributes.split("\n"):
field_choices.append(attr.split(":"))
self.fields[field.name] = forms.ChoiceField(choices=field_choices)
elif field.type == 'r': # Radio
field_choices = []
for attr in field.attributes.split("\n"):
field_choices.append(attr.split(":"))
self.fields[field.name] = forms.ChoiceField(choices=field_choices, required=field.required, widget=forms.RadioSelect, label=field.caption)
elif field.type == 'f': # File
self.fields[field.name] = forms.FileField(label=field.caption, required=field.required)
elif field.type == 'h': # Hidden
self.fields[field.name] = forms.CharField(max_length=255, required=field.required, label=field.caption, widget=forms.HiddenInput)
elif field.type == 'x': # Submit button
self.extra.append('<input type="submit" name="%s" value="%s" />' % (field.name, field.caption))
# In case we'd like to print this form.
def __unicode__(self):
return self.as_p()
# Rewrite the default django forms as_p statement to include
# a div container around the form, the form tag, the form contents
# (which are taken from the parent's as_p output and the form
# extras (submit button, etc).
def as_p(self):
result = """<div class="juice-form form-%(form_slug)s">
<form method="POST">
%(form_contents)s
%(form_extra)s
</form>
</div>""" % {'form_slug': self.slug, 'form_contents': super(_FutureForm, self).as_p(), 'form_extra': ' '.join(self.extra)}
return result
def get_extra(self):
return "%s" % '\n'.join(self.extra)
def as_custom(self):
template = """
<div class="juice-form form-%(form_slug)s">
<form method="post">
{% for field in form %}
<div class="fieldWrapper">
{{ field.errors }}
{{ field.label_tag }}: {{ field }}
</div>
{% endfor %}
<p><input type="submit" value="Send message" /></p>
</form>
</div>
"""
# Once the future form is constructed, return it.
return _FutureForm
# Use this static method to process any incoming form data. This
# is under a lot of thinking at the mo, but we'll tie up a pool
# of available actions that will be carried out on form submission.
# Meanwhile we simply write to the debug log.
@staticmethod
def process_form(form_class, request, **kwargs):
CustomForm = form_class
ids_format = "%s-%%s" % CustomForm.slug
feedback = []
if request != None and request.method == 'POST':
form = CustomForm(request.POST, auto_id=ids_format)
if form.is_valid():
# Are there any form processors?
if form.slug in FormsAPI.actions:
actions = FormsAPI.actions[form.slug]
for func in actions:
feedback.append(func(form, **kwargs))
# Carry out the actions applied to all forms with *
# This and the above could be combined some day.
if '*' in FormsAPI.actions:
actions = FormsAPI.actions['*']
for func in actions:
feedback.append(func(form, **kwargs))
debug("Submitted: %s" % form.slug)
form = CustomForm(auto_id=ids_format)
else:
form = CustomForm(auto_id=ids_format)
if kwargs.get("feedback") or False:
# Remove empty feedback
for k,v in enumerate(feedback):
if not v:
del feedback[k]
return form, feedback
else:
return form
# Do not call this static method directly unless you're 100% sure
# about what you're doing. This tends to use the Juice Shortcode API
# to display forms in posts, pages and other content types that Juice
# provides. For more information check out the juice.front.shortcodes
# package.
@staticmethod
def shortcode(kwargs):
id = kwargs.get("id")
slug = kwargs.get("slug").__str__()
title = kwargs.get("title").__str__()
request = kwargs.get("request")
# In priority order, find the parameter that we should use
# to identify the form.
if id != None:
NewForm = FormsAPI.by_id(int(id))
elif slug != None:
NewForm = FormsAPI.by_slug(slug)
elif title != None:
NewForm = FormsAPI.by_title(title)
# Fire a form processing passing on the request. If the request
# hasn't submitted any form data, process_form will return a new
# form of the passed class
form = FormsAPI.process_form(NewForm, request)
# If everything's fine output the form using our re-written
# as_p method, otherwise return some debug data.
if form != None:
return form.as_p()
else:
return kwargs.__unicode__()
# The following static method allows modules to add new actions to
# form processors. Input is the form slug and a function, which will
# be called when a form receives a valid submission. The function called
# is passed the form object.
@staticmethod
def add_action(form_slug, func):
if form_slug in FormsAPI.actions:
FormsAPI.actions[form_slug].append(func)
else:
FormsAPI.actions[form_slug] = (func,)
# Using the Shortcodes API (juice.front.shortcodes) add a new shortcode
# called form. The general usage is:
# * [form id="1"]
# * [form slug="contact-form"]
# * [form name="My Form Name"]
shortcodes.add("form", FormsAPI.shortcode)
# Use this to accept non-static methods in the Forms API
# Reserved for the action pool which will be worked out at a later
# stage.
# Update: The action pool is a static attribute inside FormsAPI, so api
# is no longer needed. Will be removed.
#api = FormsAPI()
# The following is a sample of how do add custom form processors
def form_debug(form, **kwargs):
debug("Form debug: %s" % form.slug)
FormsAPI.add_action('*', form_debug)
|
import requests
from bs4 import BeautifulSoup
import pprint
res = requests.get('https://news.ycombinator.com/news')
texts = res.text
soup = BeautifulSoup(res.text, 'html.parser')
links = soup.select('.storylink')
subtext = soup.select('.subtext')
# print(votes)
# print(links[1].getText())
def sort_stories_by_votes(hnlist):
return(sorted(hnlist, key= lambda k: k['point'], reverse=True))
def create_custom_hn(links, subtext):
hn = []
for idx, item in enumerate(links):
title = links[idx].getText()
href = links[idx].get('href', None)
vote = subtext[idx].select('.score')
if len(vote):
point = int(vote[0].getText().replace(' points', ''))
if point >= 100:
hn.append({'title': title, 'link': href, 'point': point})
return sort_stories_by_votes(hn)
answer = create_custom_hn(links, subtext)
pprint.pprint(answer[0:20]) |
from oauth.loginhistoru import app
if __name__ == '__main__':
app.config.update(RESTFUL_JSON=dict(ensure_ascii=False))
app.run(host='0.0.0.0', debug=True) |
# Generated by Django 2.2 on 2020-03-14 12:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('song', '0004_auto_20200216_2111'),
]
operations = [
migrations.AlterField(
model_name='songlist',
name='song',
field=models.CharField(blank=True, max_length=50, verbose_name='歌名'),
),
]
|
from flask import request, jsonify, render_template, abort
from urllib import urlencode, quote
from model import insert_sound, get_sound_by_id, sound_exists
from model import get_sound_by_lang_text_pair, save_sound, sounds_dir
from helpers.languages import languages
import requests
import os
import execjs
translate_base_url = 'http://translate.google.com/translate_tts'
s = requests.Session()
s.headers.update({ 'User-Agent': 'SoundOfTextBot (soundoftext.com)' })
current_dir = os.path.abspath(os.path.dirname(__file__))
parent_dir = os.path.dirname(current_dir)
HASHJS_PATH = os.path.join(parent_dir, 'hash.js')
# javascript hash function
hashjs_file = open(HASHJS_PATH, 'r')
hashjs = execjs.compile( hashjs_file.read() )
hashjs_file.close()
def create():
lang = request.form['lang']
text = request.form['text'].strip()[:100]
if sound_exists(lang, text):
sound = get_sound_by_lang_text_pair(lang, text)
res = { 'success': True, 'id': sound[0] }
else:
params = build_translate_url_params(lang, text)
translate_url = translate_base_url + '?' + params
r = s.get(translate_url)
if r.status_code == 200:
sound_path = save_sound(lang, text, r.content)
idd = insert_sound(lang, text, sound_path)
res = { 'success': True, 'id': idd }
else:
abort(500)
return jsonify(**res)
def get_sound(idd):
sound = get_sound_by_id(idd)
lang = languages[ sound[1] ]
text = sound[2]
filename = os.path.basename(sound[3])
safe_filename = quote(filename.encode('utf-8'))
path = os.path.join('/static/sounds', sound[1], safe_filename)
return render_template('sound.html', lang=lang, text=text, path=path)
def build_translate_url_params(lang, text):
hashed = hashjs.call('vM', text)
return urlencode({
'ie': 'UTF-8',
'tl': lang,
'q' : text.encode('utf-8'),
'client': 't',
'tk' : hashed
})
|
import glob
import math
import os
import sys
import hydra
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import soundfile as sf
import torch
import torch.nn.functional as F
from omegaconf import DictConfig, OmegaConf, open_dict
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
sys.path.append("src/")
import custom # import all custom modules for registering objects.
import kvt
import kvt.augmentation
import kvt.utils
from kvt.builder import build_hooks, build_lightning_module, build_model
# from kvt.evaluate import evaluate
from kvt.initialization import initialize
from kvt.registry import TRANSFORMS
from kvt.utils import build_from_config
from tools import evaluate
CLIP_SECONDS = 5
DATA_DIR = "../data/input/"
SR = 32000
class ClippedTrainDataset(Dataset):
def __init__(self, df: pd.DataFrame, transforms=None, idx_fold=0):
self.transforms = transforms
self.period = CLIP_SECONDS
self.idx_fold = idx_fold
df = df[df.Fold == self.idx_fold]
self.image_filenames = df["filename"].values
self.paths = df["path"].values
self.seconds = df["second"].values
def __len__(self):
return len(self.image_filenames)
def __getitem__(self, idx: int):
wav_name = self.image_filenames[idx]
path = self.paths[idx]
second = self.seconds[idx]
x, _ = sf.read(path)
len_x = len(x)
effective_length = SR * self.period
if len_x < effective_length:
new_x = np.zeros(effective_length, dtype=x.dtype)
new_x[:len_x] = x
x = new_x
x = np.nan_to_num(x.astype(np.float32))
if self.transforms:
x = self.transforms(x, SR)
x = np.nan_to_num(x)
return {"x": x, "filename": wav_name, "second": second}
def build_short_audio_dataloaders(config):
split = "validation"
batch_size = config.trainer.evaluation.batch_size
# build dataframe
paths = glob.glob(DATA_DIR + "train_short_audio_clipped/" + "*.ogg")
df = pd.DataFrame(paths, columns=["path"])
df["filename_with_seconds"] = df["path"].apply(lambda x: x.split("/")[-1])
df["second"] = df["filename_with_seconds"].apply(lambda x: int(x.split("_")[1]))
df["filename"] = df["filename_with_seconds"].apply(
lambda x: x.split("_")[0] + ".ogg"
)
meta = pd.read_csv(DATA_DIR + config.dataset.dataset.params.csv_filename)
df = df.merge(meta, how="left")
# build transform
transform_configs = {
"split": split,
"aug_cfg": config.augmentation.get(split),
}
transform = build_from_config(
config.dataset.transform,
TRANSFORMS,
default_args=transform_configs,
)
# build dataset
dataset = ClippedTrainDataset(
df,
transforms=transform,
idx_fold=config.dataset.dataset.params.idx_fold,
)
dataloader = DataLoader(
dataset,
shuffle=False,
batch_size=batch_size,
drop_last=False,
num_workers=24,
)
result = [{"dataloader": dataloader, "split": split, "mode": split}]
return result
def run(config):
pl.seed_everything(config.seed)
# overwrite path
OmegaConf.set_struct(config, True)
with open_dict(config):
config.trainer.model.params.backbone.params.pretrained = False
# build dataloaders
dataloaders = build_short_audio_dataloaders(config)
# build model
model = build_model(config)
# build hooks
hooks = build_hooks(config)
# build lightning module
lightning_module = build_lightning_module(
config,
model=model,
optimizer=None,
scheduler=None,
hooks=hooks,
dataloaders=dataloaders,
strong_transform=None,
)
# load best checkpoint
dir_path = config.trainer.callbacks.ModelCheckpoint.dirpath
filename = f"fold_{config.dataset.dataset.params.idx_fold}_best.ckpt"
best_model_path = os.path.join(dir_path, filename)
state_dict = torch.load(best_model_path)["state_dict"]
# if using dp, it is necessary to fix state dict keys
if (
hasattr(config.trainer.trainer, "sync_batchnorm")
and config.trainer.trainer.sync_batchnorm
):
state_dict = kvt.utils.fix_dp_model_state_dict(state_dict)
lightning_module.model.load_state_dict(state_dict)
# inference
print("---------------------------------------------------------------")
print("Inference")
lightning_module.eval()
lightning_module.cuda()
secondwise_dirpath = os.path.join(config.trainer.evaluation.dirpath, "secondwise")
clipwise_dirpath = os.path.join(config.trainer.evaluation.dirpath, "clipwise")
with torch.no_grad():
for dl_dict in lightning_module.dataloaders:
dataloader, split = dl_dict["dataloader"], dl_dict["split"]
batch_size = dataloader.batch_size
total_size = len(dataloader.dataset)
total_step = math.ceil(total_size / batch_size)
tbar = tqdm(enumerate(dataloader), total=total_step)
for i, data in tbar:
x = data["x"].cuda()
filenames = data["filename"]
seconds = data["second"]
outputs = lightning_module(x)
kernel_size = outputs["framewise_logit"].shape[1] // CLIP_SECONDS
clip_wise_predictions = (
F.sigmoid(outputs["logit"]).detach().cpu().numpy()
)
second_wise_predictions = (
F.sigmoid(
F.max_pool1d(
outputs["framewise_logit"].transpose(1, 2),
kernel_size=kernel_size,
)
)
.detach()
.cpu()
.numpy()
)
if not os.path.exists(secondwise_dirpath):
os.makedirs(secondwise_dirpath)
if not os.path.exists(clipwise_dirpath):
os.makedirs(clipwise_dirpath)
for filename, second, c_pred, s_pred in zip(
filenames, seconds, clip_wise_predictions, second_wise_predictions
):
c_path = os.path.join(
clipwise_dirpath,
f"{config.experiment_name}_{filename}_{second:0>5}.npy",
)
s_path = os.path.join(
secondwise_dirpath,
f"{config.experiment_name}_{filename}_{second:0>5}.npy",
)
np.save(c_path, c_pred)
np.save(s_path, s_pred)
@hydra.main(config_path="../../config", config_name="default")
def main(config: DictConfig) -> None:
run(config)
if __name__ == "__main__":
initialize()
main()
|
"""
# 自定义数组
包含增、删、改、查功能
"""
import os
import logging
logger = logging.getLogger(__name__)
class CustomArray(object):
"""数据结构---自定义数组"""
def __init__(self, capacity: int):
self._data = []
self._capacity = capacity
def __getitem__(self, index: int):
# find(查)
return self._data[index]
def __setitem__(self, index: int, value: object):
# set(改)
self._data[index] = value
def __len__(self):
return len(self._data)
def __iter__(self):
for item in self._data:
yield item
def find(self, value: object):
# 查找元素索引号
try:
return self._data.index(value)
except ValueError:
return None
def delete(self, index: int):
try:
self._data.pop(index)
return True
except IndexError:
return False
def insert(self, index: int, value: int):
if len(self) >= self._capacity:
return False
else:
return self._data.insert(index, value)
def show(self):
for item in self:
logger.info("value: {}".format(item))
def main():
arr = CustomArray(5)
arr.insert(0, 3)
arr.insert(0, 4)
arr.insert(1, 5)
arr.insert(3, 9)
arr.insert(3, 10)
assert arr.insert(0, 100) is False
assert len(arr) == 5
assert arr.find(5) == 1
assert arr.delete(4) is True
arr.show()
if __name__ == "__main__":
logging.basicConfig(format='[%(asctime)s %(filename)s:%(lineno)s] %(message)s',
level=logging.INFO,
filename=None,
filemode='a')
logger.info("Start.")
main()
|
# Separa os artigos em paragrafos
from os import listdir
from os.path import isfile, join
pasta_input = "raw\\"
pasta_output = "input\\"
encoding = "utf8"
separador = "."
files = [f for f in listdir(pasta_input) if isfile(join(pasta_input, f))]
num_doc = 0
for file in files:
with open(join(pasta_input, file), "r", encoding=encoding) as file_r:
trechos = []
lines = file_r.readlines()
num_line = 0
for line in lines:
if line == '\n':
continue
file_output = str(num_doc) + "_" + str(num_line)
with open(join(pasta_output, file_output), "w", encoding=encoding) as file_w:
file_w.write(line)
num_line += 1
num_doc += 1
|
class MathDojo(object):
def __init__(self):
self.start = 0
def add(self, *args):
for num in args:
if type(num) == tuple or type(num) == list:
for i in num:
self.start += i
else:
self.start += num
return self
def subtract(self, *args):
for num in args:
if type(num) == tuple or type(num) == list:
for i in num:
self.start -= i
else:
self.start -= num
return self
md = MathDojo()
print md.add(2).add(2,5).subtract(3,2).start |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.